Loading...
1/* Copyright (C) 2009 Red Hat, Inc.
2 * Author: Michael S. Tsirkin <mst@redhat.com>
3 *
4 * This work is licensed under the terms of the GNU GPL, version 2.
5 *
6 * test virtio server in host kernel.
7 */
8
9#include <linux/compat.h>
10#include <linux/eventfd.h>
11#include <linux/vhost.h>
12#include <linux/miscdevice.h>
13#include <linux/module.h>
14#include <linux/mutex.h>
15#include <linux/workqueue.h>
16#include <linux/rcupdate.h>
17#include <linux/file.h>
18#include <linux/slab.h>
19
20#include "test.h"
21#include "vhost.c"
22
23/* Max number of bytes transferred before requeueing the job.
24 * Using this limit prevents one virtqueue from starving others. */
25#define VHOST_TEST_WEIGHT 0x80000
26
27enum {
28 VHOST_TEST_VQ = 0,
29 VHOST_TEST_VQ_MAX = 1,
30};
31
32struct vhost_test {
33 struct vhost_dev dev;
34 struct vhost_virtqueue vqs[VHOST_TEST_VQ_MAX];
35};
36
37/* Expects to be always run from workqueue - which acts as
38 * read-size critical section for our kind of RCU. */
39static void handle_vq(struct vhost_test *n)
40{
41 struct vhost_virtqueue *vq = &n->dev.vqs[VHOST_TEST_VQ];
42 unsigned out, in;
43 int head;
44 size_t len, total_len = 0;
45 void *private;
46
47 private = rcu_dereference_check(vq->private_data, 1);
48 if (!private)
49 return;
50
51 mutex_lock(&vq->mutex);
52 vhost_disable_notify(&n->dev, vq);
53
54 for (;;) {
55 head = vhost_get_vq_desc(&n->dev, vq, vq->iov,
56 ARRAY_SIZE(vq->iov),
57 &out, &in,
58 NULL, NULL);
59 /* On error, stop handling until the next kick. */
60 if (unlikely(head < 0))
61 break;
62 /* Nothing new? Wait for eventfd to tell us they refilled. */
63 if (head == vq->num) {
64 if (unlikely(vhost_enable_notify(&n->dev, vq))) {
65 vhost_disable_notify(&n->dev, vq);
66 continue;
67 }
68 break;
69 }
70 if (in) {
71 vq_err(vq, "Unexpected descriptor format for TX: "
72 "out %d, int %d\n", out, in);
73 break;
74 }
75 len = iov_length(vq->iov, out);
76 /* Sanity check */
77 if (!len) {
78 vq_err(vq, "Unexpected 0 len for TX\n");
79 break;
80 }
81 vhost_add_used_and_signal(&n->dev, vq, head, 0);
82 total_len += len;
83 if (unlikely(total_len >= VHOST_TEST_WEIGHT)) {
84 vhost_poll_queue(&vq->poll);
85 break;
86 }
87 }
88
89 mutex_unlock(&vq->mutex);
90}
91
92static void handle_vq_kick(struct vhost_work *work)
93{
94 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
95 poll.work);
96 struct vhost_test *n = container_of(vq->dev, struct vhost_test, dev);
97
98 handle_vq(n);
99}
100
101static int vhost_test_open(struct inode *inode, struct file *f)
102{
103 struct vhost_test *n = kmalloc(sizeof *n, GFP_KERNEL);
104 struct vhost_dev *dev;
105 int r;
106
107 if (!n)
108 return -ENOMEM;
109
110 dev = &n->dev;
111 n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick;
112 r = vhost_dev_init(dev, n->vqs, VHOST_TEST_VQ_MAX);
113 if (r < 0) {
114 kfree(n);
115 return r;
116 }
117
118 f->private_data = n;
119
120 return 0;
121}
122
123static void *vhost_test_stop_vq(struct vhost_test *n,
124 struct vhost_virtqueue *vq)
125{
126 void *private;
127
128 mutex_lock(&vq->mutex);
129 private = rcu_dereference_protected(vq->private_data,
130 lockdep_is_held(&vq->mutex));
131 rcu_assign_pointer(vq->private_data, NULL);
132 mutex_unlock(&vq->mutex);
133 return private;
134}
135
136static void vhost_test_stop(struct vhost_test *n, void **privatep)
137{
138 *privatep = vhost_test_stop_vq(n, n->vqs + VHOST_TEST_VQ);
139}
140
141static void vhost_test_flush_vq(struct vhost_test *n, int index)
142{
143 vhost_poll_flush(&n->dev.vqs[index].poll);
144}
145
146static void vhost_test_flush(struct vhost_test *n)
147{
148 vhost_test_flush_vq(n, VHOST_TEST_VQ);
149}
150
151static int vhost_test_release(struct inode *inode, struct file *f)
152{
153 struct vhost_test *n = f->private_data;
154 void *private;
155
156 vhost_test_stop(n, &private);
157 vhost_test_flush(n);
158 vhost_dev_cleanup(&n->dev);
159 /* We do an extra flush before freeing memory,
160 * since jobs can re-queue themselves. */
161 vhost_test_flush(n);
162 kfree(n);
163 return 0;
164}
165
166static long vhost_test_run(struct vhost_test *n, int test)
167{
168 void *priv, *oldpriv;
169 struct vhost_virtqueue *vq;
170 int r, index;
171
172 if (test < 0 || test > 1)
173 return -EINVAL;
174
175 mutex_lock(&n->dev.mutex);
176 r = vhost_dev_check_owner(&n->dev);
177 if (r)
178 goto err;
179
180 for (index = 0; index < n->dev.nvqs; ++index) {
181 /* Verify that ring has been setup correctly. */
182 if (!vhost_vq_access_ok(&n->vqs[index])) {
183 r = -EFAULT;
184 goto err;
185 }
186 }
187
188 for (index = 0; index < n->dev.nvqs; ++index) {
189 vq = n->vqs + index;
190 mutex_lock(&vq->mutex);
191 priv = test ? n : NULL;
192
193 /* start polling new socket */
194 oldpriv = rcu_dereference_protected(vq->private_data,
195 lockdep_is_held(&vq->mutex));
196 rcu_assign_pointer(vq->private_data, priv);
197
198 r = vhost_init_used(&n->vqs[index]);
199
200 mutex_unlock(&vq->mutex);
201
202 if (r)
203 goto err;
204
205 if (oldpriv) {
206 vhost_test_flush_vq(n, index);
207 }
208 }
209
210 mutex_unlock(&n->dev.mutex);
211 return 0;
212
213err:
214 mutex_unlock(&n->dev.mutex);
215 return r;
216}
217
218static long vhost_test_reset_owner(struct vhost_test *n)
219{
220 void *priv = NULL;
221 long err;
222 mutex_lock(&n->dev.mutex);
223 err = vhost_dev_check_owner(&n->dev);
224 if (err)
225 goto done;
226 vhost_test_stop(n, &priv);
227 vhost_test_flush(n);
228 err = vhost_dev_reset_owner(&n->dev);
229done:
230 mutex_unlock(&n->dev.mutex);
231 return err;
232}
233
234static int vhost_test_set_features(struct vhost_test *n, u64 features)
235{
236 mutex_lock(&n->dev.mutex);
237 if ((features & (1 << VHOST_F_LOG_ALL)) &&
238 !vhost_log_access_ok(&n->dev)) {
239 mutex_unlock(&n->dev.mutex);
240 return -EFAULT;
241 }
242 n->dev.acked_features = features;
243 smp_wmb();
244 vhost_test_flush(n);
245 mutex_unlock(&n->dev.mutex);
246 return 0;
247}
248
249static long vhost_test_ioctl(struct file *f, unsigned int ioctl,
250 unsigned long arg)
251{
252 struct vhost_test *n = f->private_data;
253 void __user *argp = (void __user *)arg;
254 u64 __user *featurep = argp;
255 int test;
256 u64 features;
257 int r;
258 switch (ioctl) {
259 case VHOST_TEST_RUN:
260 if (copy_from_user(&test, argp, sizeof test))
261 return -EFAULT;
262 return vhost_test_run(n, test);
263 case VHOST_GET_FEATURES:
264 features = VHOST_FEATURES;
265 if (copy_to_user(featurep, &features, sizeof features))
266 return -EFAULT;
267 return 0;
268 case VHOST_SET_FEATURES:
269 if (copy_from_user(&features, featurep, sizeof features))
270 return -EFAULT;
271 if (features & ~VHOST_FEATURES)
272 return -EOPNOTSUPP;
273 return vhost_test_set_features(n, features);
274 case VHOST_RESET_OWNER:
275 return vhost_test_reset_owner(n);
276 default:
277 mutex_lock(&n->dev.mutex);
278 r = vhost_dev_ioctl(&n->dev, ioctl, arg);
279 vhost_test_flush(n);
280 mutex_unlock(&n->dev.mutex);
281 return r;
282 }
283}
284
285#ifdef CONFIG_COMPAT
286static long vhost_test_compat_ioctl(struct file *f, unsigned int ioctl,
287 unsigned long arg)
288{
289 return vhost_test_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
290}
291#endif
292
293static const struct file_operations vhost_test_fops = {
294 .owner = THIS_MODULE,
295 .release = vhost_test_release,
296 .unlocked_ioctl = vhost_test_ioctl,
297#ifdef CONFIG_COMPAT
298 .compat_ioctl = vhost_test_compat_ioctl,
299#endif
300 .open = vhost_test_open,
301 .llseek = noop_llseek,
302};
303
304static struct miscdevice vhost_test_misc = {
305 MISC_DYNAMIC_MINOR,
306 "vhost-test",
307 &vhost_test_fops,
308};
309
310static int vhost_test_init(void)
311{
312 return misc_register(&vhost_test_misc);
313}
314module_init(vhost_test_init);
315
316static void vhost_test_exit(void)
317{
318 misc_deregister(&vhost_test_misc);
319}
320module_exit(vhost_test_exit);
321
322MODULE_VERSION("0.0.1");
323MODULE_LICENSE("GPL v2");
324MODULE_AUTHOR("Michael S. Tsirkin");
325MODULE_DESCRIPTION("Host kernel side for virtio simulator");
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (C) 2009 Red Hat, Inc.
3 * Author: Michael S. Tsirkin <mst@redhat.com>
4 *
5 * test virtio server in host kernel.
6 */
7
8#include <linux/compat.h>
9#include <linux/eventfd.h>
10#include <linux/vhost.h>
11#include <linux/miscdevice.h>
12#include <linux/module.h>
13#include <linux/mutex.h>
14#include <linux/workqueue.h>
15#include <linux/file.h>
16#include <linux/slab.h>
17
18#include "test.h"
19#include "vhost.h"
20
21/* Max number of bytes transferred before requeueing the job.
22 * Using this limit prevents one virtqueue from starving others. */
23#define VHOST_TEST_WEIGHT 0x80000
24
25/* Max number of packets transferred before requeueing the job.
26 * Using this limit prevents one virtqueue from starving others with
27 * pkts.
28 */
29#define VHOST_TEST_PKT_WEIGHT 256
30
31enum {
32 VHOST_TEST_VQ = 0,
33 VHOST_TEST_VQ_MAX = 1,
34};
35
36struct vhost_test {
37 struct vhost_dev dev;
38 struct vhost_virtqueue vqs[VHOST_TEST_VQ_MAX];
39};
40
41/* Expects to be always run from workqueue - which acts as
42 * read-size critical section for our kind of RCU. */
43static void handle_vq(struct vhost_test *n)
44{
45 struct vhost_virtqueue *vq = &n->vqs[VHOST_TEST_VQ];
46 unsigned out, in;
47 int head;
48 size_t len, total_len = 0;
49 void *private;
50
51 mutex_lock(&vq->mutex);
52 private = vq->private_data;
53 if (!private) {
54 mutex_unlock(&vq->mutex);
55 return;
56 }
57
58 vhost_disable_notify(&n->dev, vq);
59
60 for (;;) {
61 head = vhost_get_vq_desc(vq, vq->iov,
62 ARRAY_SIZE(vq->iov),
63 &out, &in,
64 NULL, NULL);
65 /* On error, stop handling until the next kick. */
66 if (unlikely(head < 0))
67 break;
68 /* Nothing new? Wait for eventfd to tell us they refilled. */
69 if (head == vq->num) {
70 if (unlikely(vhost_enable_notify(&n->dev, vq))) {
71 vhost_disable_notify(&n->dev, vq);
72 continue;
73 }
74 break;
75 }
76 if (in) {
77 vq_err(vq, "Unexpected descriptor format for TX: "
78 "out %d, int %d\n", out, in);
79 break;
80 }
81 len = iov_length(vq->iov, out);
82 /* Sanity check */
83 if (!len) {
84 vq_err(vq, "Unexpected 0 len for TX\n");
85 break;
86 }
87 vhost_add_used_and_signal(&n->dev, vq, head, 0);
88 total_len += len;
89 if (unlikely(vhost_exceeds_weight(vq, 0, total_len)))
90 break;
91 }
92
93 mutex_unlock(&vq->mutex);
94}
95
96static void handle_vq_kick(struct vhost_work *work)
97{
98 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
99 poll.work);
100 struct vhost_test *n = container_of(vq->dev, struct vhost_test, dev);
101
102 handle_vq(n);
103}
104
105static int vhost_test_open(struct inode *inode, struct file *f)
106{
107 struct vhost_test *n = kmalloc(sizeof *n, GFP_KERNEL);
108 struct vhost_dev *dev;
109 struct vhost_virtqueue **vqs;
110
111 if (!n)
112 return -ENOMEM;
113 vqs = kmalloc_array(VHOST_TEST_VQ_MAX, sizeof(*vqs), GFP_KERNEL);
114 if (!vqs) {
115 kfree(n);
116 return -ENOMEM;
117 }
118
119 dev = &n->dev;
120 vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ];
121 n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick;
122 vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX, UIO_MAXIOV,
123 VHOST_TEST_PKT_WEIGHT, VHOST_TEST_WEIGHT);
124
125 f->private_data = n;
126
127 return 0;
128}
129
130static void *vhost_test_stop_vq(struct vhost_test *n,
131 struct vhost_virtqueue *vq)
132{
133 void *private;
134
135 mutex_lock(&vq->mutex);
136 private = vq->private_data;
137 vq->private_data = NULL;
138 mutex_unlock(&vq->mutex);
139 return private;
140}
141
142static void vhost_test_stop(struct vhost_test *n, void **privatep)
143{
144 *privatep = vhost_test_stop_vq(n, n->vqs + VHOST_TEST_VQ);
145}
146
147static void vhost_test_flush_vq(struct vhost_test *n, int index)
148{
149 vhost_poll_flush(&n->vqs[index].poll);
150}
151
152static void vhost_test_flush(struct vhost_test *n)
153{
154 vhost_test_flush_vq(n, VHOST_TEST_VQ);
155}
156
157static int vhost_test_release(struct inode *inode, struct file *f)
158{
159 struct vhost_test *n = f->private_data;
160 void *private;
161
162 vhost_test_stop(n, &private);
163 vhost_test_flush(n);
164 vhost_dev_stop(&n->dev);
165 vhost_dev_cleanup(&n->dev);
166 /* We do an extra flush before freeing memory,
167 * since jobs can re-queue themselves. */
168 vhost_test_flush(n);
169 kfree(n);
170 return 0;
171}
172
173static long vhost_test_run(struct vhost_test *n, int test)
174{
175 void *priv, *oldpriv;
176 struct vhost_virtqueue *vq;
177 int r, index;
178
179 if (test < 0 || test > 1)
180 return -EINVAL;
181
182 mutex_lock(&n->dev.mutex);
183 r = vhost_dev_check_owner(&n->dev);
184 if (r)
185 goto err;
186
187 for (index = 0; index < n->dev.nvqs; ++index) {
188 /* Verify that ring has been setup correctly. */
189 if (!vhost_vq_access_ok(&n->vqs[index])) {
190 r = -EFAULT;
191 goto err;
192 }
193 }
194
195 for (index = 0; index < n->dev.nvqs; ++index) {
196 vq = n->vqs + index;
197 mutex_lock(&vq->mutex);
198 priv = test ? n : NULL;
199
200 /* start polling new socket */
201 oldpriv = vq->private_data;
202 vq->private_data = priv;
203
204 r = vhost_vq_init_access(&n->vqs[index]);
205
206 mutex_unlock(&vq->mutex);
207
208 if (r)
209 goto err;
210
211 if (oldpriv) {
212 vhost_test_flush_vq(n, index);
213 }
214 }
215
216 mutex_unlock(&n->dev.mutex);
217 return 0;
218
219err:
220 mutex_unlock(&n->dev.mutex);
221 return r;
222}
223
224static long vhost_test_reset_owner(struct vhost_test *n)
225{
226 void *priv = NULL;
227 long err;
228 struct vhost_umem *umem;
229
230 mutex_lock(&n->dev.mutex);
231 err = vhost_dev_check_owner(&n->dev);
232 if (err)
233 goto done;
234 umem = vhost_dev_reset_owner_prepare();
235 if (!umem) {
236 err = -ENOMEM;
237 goto done;
238 }
239 vhost_test_stop(n, &priv);
240 vhost_test_flush(n);
241 vhost_dev_stop(&n->dev);
242 vhost_dev_reset_owner(&n->dev, umem);
243done:
244 mutex_unlock(&n->dev.mutex);
245 return err;
246}
247
248static int vhost_test_set_features(struct vhost_test *n, u64 features)
249{
250 struct vhost_virtqueue *vq;
251
252 mutex_lock(&n->dev.mutex);
253 if ((features & (1 << VHOST_F_LOG_ALL)) &&
254 !vhost_log_access_ok(&n->dev)) {
255 mutex_unlock(&n->dev.mutex);
256 return -EFAULT;
257 }
258 vq = &n->vqs[VHOST_TEST_VQ];
259 mutex_lock(&vq->mutex);
260 vq->acked_features = features;
261 mutex_unlock(&vq->mutex);
262 mutex_unlock(&n->dev.mutex);
263 return 0;
264}
265
266static long vhost_test_ioctl(struct file *f, unsigned int ioctl,
267 unsigned long arg)
268{
269 struct vhost_test *n = f->private_data;
270 void __user *argp = (void __user *)arg;
271 u64 __user *featurep = argp;
272 int test;
273 u64 features;
274 int r;
275 switch (ioctl) {
276 case VHOST_TEST_RUN:
277 if (copy_from_user(&test, argp, sizeof test))
278 return -EFAULT;
279 return vhost_test_run(n, test);
280 case VHOST_GET_FEATURES:
281 features = VHOST_FEATURES;
282 if (copy_to_user(featurep, &features, sizeof features))
283 return -EFAULT;
284 return 0;
285 case VHOST_SET_FEATURES:
286 printk(KERN_ERR "1\n");
287 if (copy_from_user(&features, featurep, sizeof features))
288 return -EFAULT;
289 printk(KERN_ERR "2\n");
290 if (features & ~VHOST_FEATURES)
291 return -EOPNOTSUPP;
292 printk(KERN_ERR "3\n");
293 return vhost_test_set_features(n, features);
294 case VHOST_RESET_OWNER:
295 return vhost_test_reset_owner(n);
296 default:
297 mutex_lock(&n->dev.mutex);
298 r = vhost_dev_ioctl(&n->dev, ioctl, argp);
299 if (r == -ENOIOCTLCMD)
300 r = vhost_vring_ioctl(&n->dev, ioctl, argp);
301 vhost_test_flush(n);
302 mutex_unlock(&n->dev.mutex);
303 return r;
304 }
305}
306
307#ifdef CONFIG_COMPAT
308static long vhost_test_compat_ioctl(struct file *f, unsigned int ioctl,
309 unsigned long arg)
310{
311 return vhost_test_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
312}
313#endif
314
315static const struct file_operations vhost_test_fops = {
316 .owner = THIS_MODULE,
317 .release = vhost_test_release,
318 .unlocked_ioctl = vhost_test_ioctl,
319#ifdef CONFIG_COMPAT
320 .compat_ioctl = vhost_test_compat_ioctl,
321#endif
322 .open = vhost_test_open,
323 .llseek = noop_llseek,
324};
325
326static struct miscdevice vhost_test_misc = {
327 MISC_DYNAMIC_MINOR,
328 "vhost-test",
329 &vhost_test_fops,
330};
331module_misc_device(vhost_test_misc);
332
333MODULE_VERSION("0.0.1");
334MODULE_LICENSE("GPL v2");
335MODULE_AUTHOR("Michael S. Tsirkin");
336MODULE_DESCRIPTION("Host kernel side for virtio simulator");