Linux Audio

Check our new training course

Embedded Linux training

Mar 10-20, 2025, special US time zones
Register
Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/* Simple test of virtio code, entirely in userpsace. */
  3#define _GNU_SOURCE
  4#include <sched.h>
  5#include <err.h>
  6#include <linux/kernel.h>
  7#include <linux/err.h>
  8#include <linux/virtio.h>
  9#include <linux/vringh.h>
 10#include <linux/virtio_ring.h>
 11#include <linux/virtio_config.h>
 12#include <linux/uaccess.h>
 13#include <sys/types.h>
 14#include <sys/stat.h>
 15#include <sys/mman.h>
 16#include <sys/wait.h>
 17#include <fcntl.h>
 18
 19#define USER_MEM (1024*1024)
 20void *__user_addr_min, *__user_addr_max;
 21void *__kmalloc_fake, *__kfree_ignore_start, *__kfree_ignore_end;
 22static u64 user_addr_offset;
 23
 24#define RINGSIZE 256
 25#define ALIGN 4096
 26
 27static bool never_notify_host(struct virtqueue *vq)
 28{
 29	abort();
 30}
 31
 32static void never_callback_guest(struct virtqueue *vq)
 33{
 34	abort();
 35}
 36
 37static bool getrange_iov(struct vringh *vrh, u64 addr, struct vringh_range *r)
 38{
 39	if (addr < (u64)(unsigned long)__user_addr_min - user_addr_offset)
 40		return false;
 41	if (addr >= (u64)(unsigned long)__user_addr_max - user_addr_offset)
 42		return false;
 43
 44	r->start = (u64)(unsigned long)__user_addr_min - user_addr_offset;
 45	r->end_incl = (u64)(unsigned long)__user_addr_max - 1 - user_addr_offset;
 46	r->offset = user_addr_offset;
 47	return true;
 48}
 49
 50/* We return single byte ranges. */
 51static bool getrange_slow(struct vringh *vrh, u64 addr, struct vringh_range *r)
 52{
 53	if (addr < (u64)(unsigned long)__user_addr_min - user_addr_offset)
 54		return false;
 55	if (addr >= (u64)(unsigned long)__user_addr_max - user_addr_offset)
 56		return false;
 57
 58	r->start = addr;
 59	r->end_incl = r->start;
 60	r->offset = user_addr_offset;
 61	return true;
 62}
 63
 64struct guest_virtio_device {
 65	struct virtio_device vdev;
 66	int to_host_fd;
 67	unsigned long notifies;
 68};
 69
 70static bool parallel_notify_host(struct virtqueue *vq)
 71{
 72	int rc;
 73	struct guest_virtio_device *gvdev;
 74
 75	gvdev = container_of(vq->vdev, struct guest_virtio_device, vdev);
 76	rc = write(gvdev->to_host_fd, "", 1);
 77	if (rc < 0)
 78		return false;
 79	gvdev->notifies++;
 80	return true;
 81}
 82
 83static bool no_notify_host(struct virtqueue *vq)
 84{
 85	return true;
 86}
 87
 88#define NUM_XFERS (10000000)
 89
 90/* We aim for two "distant" cpus. */
 91static void find_cpus(unsigned int *first, unsigned int *last)
 92{
 93	unsigned int i;
 94
 95	*first = -1U;
 96	*last = 0;
 97	for (i = 0; i < 4096; i++) {
 98		cpu_set_t set;
 99		CPU_ZERO(&set);
100		CPU_SET(i, &set);
101		if (sched_setaffinity(getpid(), sizeof(set), &set) == 0) {
102			if (i < *first)
103				*first = i;
104			if (i > *last)
105				*last = i;
106		}
107	}
108}
109
110/* Opencoded version for fast mode */
111static inline int vringh_get_head(struct vringh *vrh, u16 *head)
112{
113	u16 avail_idx, i;
114	int err;
115
116	err = get_user(avail_idx, &vrh->vring.avail->idx);
117	if (err)
118		return err;
119
120	if (vrh->last_avail_idx == avail_idx)
121		return 0;
122
123	/* Only get avail ring entries after they have been exposed by guest. */
124	virtio_rmb(vrh->weak_barriers);
125
126	i = vrh->last_avail_idx & (vrh->vring.num - 1);
127
128	err = get_user(*head, &vrh->vring.avail->ring[i]);
129	if (err)
130		return err;
131
132	vrh->last_avail_idx++;
133	return 1;
134}
135
136static int parallel_test(u64 features,
137			 bool (*getrange)(struct vringh *vrh,
138					  u64 addr, struct vringh_range *r),
139			 bool fast_vringh)
140{
141	void *host_map, *guest_map;
142	int fd, mapsize, to_guest[2], to_host[2];
143	unsigned long xfers = 0, notifies = 0, receives = 0;
144	unsigned int first_cpu, last_cpu;
145	cpu_set_t cpu_set;
146	char buf[128];
147
148	/* Create real file to mmap. */
149	fd = open("/tmp/vringh_test-file", O_RDWR|O_CREAT|O_TRUNC, 0600);
150	if (fd < 0)
151		err(1, "Opening /tmp/vringh_test-file");
152
153	/* Extra room at the end for some data, and indirects */
154	mapsize = vring_size(RINGSIZE, ALIGN)
155		+ RINGSIZE * 2 * sizeof(int)
156		+ RINGSIZE * 6 * sizeof(struct vring_desc);
157	mapsize = (mapsize + getpagesize() - 1) & ~(getpagesize() - 1);
158	ftruncate(fd, mapsize);
159
160	/* Parent and child use separate addresses, to check our mapping logic! */
161	host_map = mmap(NULL, mapsize, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
162	guest_map = mmap(NULL, mapsize, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
163
164	pipe(to_guest);
165	pipe(to_host);
166
167	CPU_ZERO(&cpu_set);
168	find_cpus(&first_cpu, &last_cpu);
169	printf("Using CPUS %u and %u\n", first_cpu, last_cpu);
170	fflush(stdout);
171
172	if (fork() != 0) {
173		struct vringh vrh;
174		int status, err, rlen = 0;
175		char rbuf[5];
176
177		/* We are the host: never access guest addresses! */
178		munmap(guest_map, mapsize);
179
180		__user_addr_min = host_map;
181		__user_addr_max = __user_addr_min + mapsize;
182		user_addr_offset = host_map - guest_map;
183		assert(user_addr_offset);
184
185		close(to_guest[0]);
186		close(to_host[1]);
187
188		vring_init(&vrh.vring, RINGSIZE, host_map, ALIGN);
189		vringh_init_user(&vrh, features, RINGSIZE, true,
190				 vrh.vring.desc, vrh.vring.avail, vrh.vring.used);
191		CPU_SET(first_cpu, &cpu_set);
192		if (sched_setaffinity(getpid(), sizeof(cpu_set), &cpu_set))
193			errx(1, "Could not set affinity to cpu %u", first_cpu);
194
195		while (xfers < NUM_XFERS) {
196			struct iovec host_riov[2], host_wiov[2];
197			struct vringh_iov riov, wiov;
198			u16 head, written;
199
200			if (fast_vringh) {
201				for (;;) {
202					err = vringh_get_head(&vrh, &head);
203					if (err != 0)
204						break;
205					err = vringh_need_notify_user(&vrh);
206					if (err < 0)
207						errx(1, "vringh_need_notify_user: %i",
208						     err);
209					if (err) {
210						write(to_guest[1], "", 1);
211						notifies++;
212					}
213				}
214				if (err != 1)
215					errx(1, "vringh_get_head");
216				written = 0;
217				goto complete;
218			} else {
219				vringh_iov_init(&riov,
220						host_riov,
221						ARRAY_SIZE(host_riov));
222				vringh_iov_init(&wiov,
223						host_wiov,
224						ARRAY_SIZE(host_wiov));
225
226				err = vringh_getdesc_user(&vrh, &riov, &wiov,
227							  getrange, &head);
228			}
229			if (err == 0) {
230				err = vringh_need_notify_user(&vrh);
231				if (err < 0)
232					errx(1, "vringh_need_notify_user: %i",
233					     err);
234				if (err) {
235					write(to_guest[1], "", 1);
236					notifies++;
237				}
238
239				if (!vringh_notify_enable_user(&vrh))
240					continue;
241
242				/* Swallow all notifies at once. */
243				if (read(to_host[0], buf, sizeof(buf)) < 1)
244					break;
245
246				vringh_notify_disable_user(&vrh);
247				receives++;
248				continue;
249			}
250			if (err != 1)
251				errx(1, "vringh_getdesc_user: %i", err);
252
253			/* We simply copy bytes. */
254			if (riov.used) {
255				rlen = vringh_iov_pull_user(&riov, rbuf,
256							    sizeof(rbuf));
257				if (rlen != 4)
258					errx(1, "vringh_iov_pull_user: %i",
259					     rlen);
260				assert(riov.i == riov.used);
261				written = 0;
262			} else {
263				err = vringh_iov_push_user(&wiov, rbuf, rlen);
264				if (err != rlen)
265					errx(1, "vringh_iov_push_user: %i",
266					     err);
267				assert(wiov.i == wiov.used);
268				written = err;
269			}
270		complete:
271			xfers++;
272
273			err = vringh_complete_user(&vrh, head, written);
274			if (err != 0)
275				errx(1, "vringh_complete_user: %i", err);
276		}
277
278		err = vringh_need_notify_user(&vrh);
279		if (err < 0)
280			errx(1, "vringh_need_notify_user: %i", err);
281		if (err) {
282			write(to_guest[1], "", 1);
283			notifies++;
284		}
285		wait(&status);
286		if (!WIFEXITED(status))
287			errx(1, "Child died with signal %i?", WTERMSIG(status));
288		if (WEXITSTATUS(status) != 0)
289			errx(1, "Child exited %i?", WEXITSTATUS(status));
290		printf("Host: notified %lu, pinged %lu\n", notifies, receives);
291		return 0;
292	} else {
293		struct guest_virtio_device gvdev;
294		struct virtqueue *vq;
295		unsigned int *data;
296		struct vring_desc *indirects;
297		unsigned int finished = 0;
298
299		/* We pass sg[]s pointing into here, but we need RINGSIZE+1 */
300		data = guest_map + vring_size(RINGSIZE, ALIGN);
301		indirects = (void *)data + (RINGSIZE + 1) * 2 * sizeof(int);
302
303		/* We are the guest. */
304		munmap(host_map, mapsize);
305
306		close(to_guest[1]);
307		close(to_host[0]);
308
309		gvdev.vdev.features = features;
310		INIT_LIST_HEAD(&gvdev.vdev.vqs);
311		gvdev.to_host_fd = to_host[1];
312		gvdev.notifies = 0;
313
314		CPU_SET(first_cpu, &cpu_set);
315		if (sched_setaffinity(getpid(), sizeof(cpu_set), &cpu_set))
316			err(1, "Could not set affinity to cpu %u", first_cpu);
317
318		vq = vring_new_virtqueue(0, RINGSIZE, ALIGN, &gvdev.vdev, true,
319					 false, guest_map,
320					 fast_vringh ? no_notify_host
321					 : parallel_notify_host,
322					 never_callback_guest, "guest vq");
323
324		/* Don't kfree indirects. */
325		__kfree_ignore_start = indirects;
326		__kfree_ignore_end = indirects + RINGSIZE * 6;
327
328		while (xfers < NUM_XFERS) {
329			struct scatterlist sg[4];
330			unsigned int num_sg, len;
331			int *dbuf, err;
332			bool output = !(xfers % 2);
333
334			/* Consume bufs. */
335			while ((dbuf = virtqueue_get_buf(vq, &len)) != NULL) {
336				if (len == 4)
337					assert(*dbuf == finished - 1);
338				else if (!fast_vringh)
339					assert(*dbuf == finished);
340				finished++;
341			}
342
343			/* Produce a buffer. */
344			dbuf = data + (xfers % (RINGSIZE + 1));
345
346			if (output)
347				*dbuf = xfers;
348			else
349				*dbuf = -1;
350
351			switch ((xfers / sizeof(*dbuf)) % 4) {
352			case 0:
353				/* Nasty three-element sg list. */
354				sg_init_table(sg, num_sg = 3);
355				sg_set_buf(&sg[0], (void *)dbuf, 1);
356				sg_set_buf(&sg[1], (void *)dbuf + 1, 2);
357				sg_set_buf(&sg[2], (void *)dbuf + 3, 1);
358				break;
359			case 1:
360				sg_init_table(sg, num_sg = 2);
361				sg_set_buf(&sg[0], (void *)dbuf, 1);
362				sg_set_buf(&sg[1], (void *)dbuf + 1, 3);
363				break;
364			case 2:
365				sg_init_table(sg, num_sg = 1);
366				sg_set_buf(&sg[0], (void *)dbuf, 4);
367				break;
368			case 3:
369				sg_init_table(sg, num_sg = 4);
370				sg_set_buf(&sg[0], (void *)dbuf, 1);
371				sg_set_buf(&sg[1], (void *)dbuf + 1, 1);
372				sg_set_buf(&sg[2], (void *)dbuf + 2, 1);
373				sg_set_buf(&sg[3], (void *)dbuf + 3, 1);
374				break;
375			}
376
377			/* May allocate an indirect, so force it to allocate
378			 * user addr */
379			__kmalloc_fake = indirects + (xfers % RINGSIZE) * 4;
380			if (output)
381				err = virtqueue_add_outbuf(vq, sg, num_sg, dbuf,
382							   GFP_KERNEL);
383			else
384				err = virtqueue_add_inbuf(vq, sg, num_sg,
385							  dbuf, GFP_KERNEL);
386
387			if (err == -ENOSPC) {
388				if (!virtqueue_enable_cb_delayed(vq))
389					continue;
390				/* Swallow all notifies at once. */
391				if (read(to_guest[0], buf, sizeof(buf)) < 1)
392					break;
393				
394				receives++;
395				virtqueue_disable_cb(vq);
396				continue;
397			}
398
399			if (err)
400				errx(1, "virtqueue_add_in/outbuf: %i", err);
401
402			xfers++;
403			virtqueue_kick(vq);
404		}
405
406		/* Any extra? */
407		while (finished != xfers) {
408			int *dbuf;
409			unsigned int len;
410
411			/* Consume bufs. */
412			dbuf = virtqueue_get_buf(vq, &len);
413			if (dbuf) {
414				if (len == 4)
415					assert(*dbuf == finished - 1);
416				else
417					assert(len == 0);
418				finished++;
419				continue;
420			}
421
422			if (!virtqueue_enable_cb_delayed(vq))
423				continue;
424			if (read(to_guest[0], buf, sizeof(buf)) < 1)
425				break;
426				
427			receives++;
428			virtqueue_disable_cb(vq);
429		}
430
431		printf("Guest: notified %lu, pinged %lu\n",
432		       gvdev.notifies, receives);
433		vring_del_virtqueue(vq);
434		return 0;
435	}
436}
437
438int main(int argc, char *argv[])
439{
440	struct virtio_device vdev;
441	struct virtqueue *vq;
442	struct vringh vrh;
443	struct scatterlist guest_sg[RINGSIZE], *sgs[2];
444	struct iovec host_riov[2], host_wiov[2];
445	struct vringh_iov riov, wiov;
446	struct vring_used_elem used[RINGSIZE];
447	char buf[28];
448	u16 head;
449	int err;
450	unsigned i;
451	void *ret;
452	bool (*getrange)(struct vringh *vrh, u64 addr, struct vringh_range *r);
453	bool fast_vringh = false, parallel = false;
454
455	getrange = getrange_iov;
456	vdev.features = 0;
457	INIT_LIST_HEAD(&vdev.vqs);
458
459	while (argv[1]) {
460		if (strcmp(argv[1], "--indirect") == 0)
461			__virtio_set_bit(&vdev, VIRTIO_RING_F_INDIRECT_DESC);
462		else if (strcmp(argv[1], "--eventidx") == 0)
463			__virtio_set_bit(&vdev, VIRTIO_RING_F_EVENT_IDX);
464		else if (strcmp(argv[1], "--virtio-1") == 0)
465			__virtio_set_bit(&vdev, VIRTIO_F_VERSION_1);
466		else if (strcmp(argv[1], "--slow-range") == 0)
467			getrange = getrange_slow;
468		else if (strcmp(argv[1], "--fast-vringh") == 0)
469			fast_vringh = true;
470		else if (strcmp(argv[1], "--parallel") == 0)
471			parallel = true;
472		else
473			errx(1, "Unknown arg %s", argv[1]);
474		argv++;
475	}
476
477	if (parallel)
478		return parallel_test(vdev.features, getrange, fast_vringh);
479
480	if (posix_memalign(&__user_addr_min, PAGE_SIZE, USER_MEM) != 0)
481		abort();
482	__user_addr_max = __user_addr_min + USER_MEM;
483	memset(__user_addr_min, 0, vring_size(RINGSIZE, ALIGN));
484
485	/* Set up guest side. */
486	vq = vring_new_virtqueue(0, RINGSIZE, ALIGN, &vdev, true, false,
487				 __user_addr_min,
488				 never_notify_host, never_callback_guest,
489				 "guest vq");
490
491	/* Set up host side. */
492	vring_init(&vrh.vring, RINGSIZE, __user_addr_min, ALIGN);
493	vringh_init_user(&vrh, vdev.features, RINGSIZE, true,
494			 vrh.vring.desc, vrh.vring.avail, vrh.vring.used);
495
496	/* No descriptor to get yet... */
497	err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head);
498	if (err != 0)
499		errx(1, "vringh_getdesc_user: %i", err);
500
501	/* Guest puts in a descriptor. */
502	memcpy(__user_addr_max - 1, "a", 1);
503	sg_init_table(guest_sg, 1);
504	sg_set_buf(&guest_sg[0], __user_addr_max - 1, 1);
505	sg_init_table(guest_sg+1, 1);
506	sg_set_buf(&guest_sg[1], __user_addr_max - 3, 2);
507	sgs[0] = &guest_sg[0];
508	sgs[1] = &guest_sg[1];
509
510	/* May allocate an indirect, so force it to allocate user addr */
511	__kmalloc_fake = __user_addr_min + vring_size(RINGSIZE, ALIGN);
512	err = virtqueue_add_sgs(vq, sgs, 1, 1, &err, GFP_KERNEL);
513	if (err)
514		errx(1, "virtqueue_add_sgs: %i", err);
515	__kmalloc_fake = NULL;
516
517	/* Host retreives it. */
518	vringh_iov_init(&riov, host_riov, ARRAY_SIZE(host_riov));
519	vringh_iov_init(&wiov, host_wiov, ARRAY_SIZE(host_wiov));
520
521	err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head);
522	if (err != 1)
523		errx(1, "vringh_getdesc_user: %i", err);
524
525	assert(riov.used == 1);
526	assert(riov.iov[0].iov_base == __user_addr_max - 1);
527	assert(riov.iov[0].iov_len == 1);
528	if (getrange != getrange_slow) {
529		assert(wiov.used == 1);
530		assert(wiov.iov[0].iov_base == __user_addr_max - 3);
531		assert(wiov.iov[0].iov_len == 2);
532	} else {
533		assert(wiov.used == 2);
534		assert(wiov.iov[0].iov_base == __user_addr_max - 3);
535		assert(wiov.iov[0].iov_len == 1);
536		assert(wiov.iov[1].iov_base == __user_addr_max - 2);
537		assert(wiov.iov[1].iov_len == 1);
538	}
539
540	err = vringh_iov_pull_user(&riov, buf, 5);
541	if (err != 1)
542		errx(1, "vringh_iov_pull_user: %i", err);
543	assert(buf[0] == 'a');
544	assert(riov.i == 1);
545	assert(vringh_iov_pull_user(&riov, buf, 5) == 0);
546
547	memcpy(buf, "bcdef", 5);
548	err = vringh_iov_push_user(&wiov, buf, 5);
549	if (err != 2)
550		errx(1, "vringh_iov_push_user: %i", err);
551	assert(memcmp(__user_addr_max - 3, "bc", 2) == 0);
552	assert(wiov.i == wiov.used);
553	assert(vringh_iov_push_user(&wiov, buf, 5) == 0);
554
555	/* Host is done. */
556	err = vringh_complete_user(&vrh, head, err);
557	if (err != 0)
558		errx(1, "vringh_complete_user: %i", err);
559
560	/* Guest should see used token now. */
561	__kfree_ignore_start = __user_addr_min + vring_size(RINGSIZE, ALIGN);
562	__kfree_ignore_end = __kfree_ignore_start + 1;
563	ret = virtqueue_get_buf(vq, &i);
564	if (ret != &err)
565		errx(1, "virtqueue_get_buf: %p", ret);
566	assert(i == 2);
567
568	/* Guest puts in a huge descriptor. */
569	sg_init_table(guest_sg, RINGSIZE);
570	for (i = 0; i < RINGSIZE; i++) {
571		sg_set_buf(&guest_sg[i],
572			   __user_addr_max - USER_MEM/4, USER_MEM/4);
573	}
574
575	/* Fill contents with recognisable garbage. */
576	for (i = 0; i < USER_MEM/4; i++)
577		((char *)__user_addr_max - USER_MEM/4)[i] = i;
578
579	/* This will allocate an indirect, so force it to allocate user addr */
580	__kmalloc_fake = __user_addr_min + vring_size(RINGSIZE, ALIGN);
581	err = virtqueue_add_outbuf(vq, guest_sg, RINGSIZE, &err, GFP_KERNEL);
582	if (err)
583		errx(1, "virtqueue_add_outbuf (large): %i", err);
584	__kmalloc_fake = NULL;
585
586	/* Host picks it up (allocates new iov). */
587	vringh_iov_init(&riov, host_riov, ARRAY_SIZE(host_riov));
588	vringh_iov_init(&wiov, host_wiov, ARRAY_SIZE(host_wiov));
589
590	err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head);
591	if (err != 1)
592		errx(1, "vringh_getdesc_user: %i", err);
593
594	assert(riov.max_num & VRINGH_IOV_ALLOCATED);
595	assert(riov.iov != host_riov);
596	if (getrange != getrange_slow)
597		assert(riov.used == RINGSIZE);
598	else
599		assert(riov.used == RINGSIZE * USER_MEM/4);
600
601	assert(!(wiov.max_num & VRINGH_IOV_ALLOCATED));
602	assert(wiov.used == 0);
603
604	/* Pull data back out (in odd chunks), should be as expected. */
605	for (i = 0; i < RINGSIZE * USER_MEM/4; i += 3) {
606		err = vringh_iov_pull_user(&riov, buf, 3);
607		if (err != 3 && i + err != RINGSIZE * USER_MEM/4)
608			errx(1, "vringh_iov_pull_user large: %i", err);
609		assert(buf[0] == (char)i);
610		assert(err < 2 || buf[1] == (char)(i + 1));
611		assert(err < 3 || buf[2] == (char)(i + 2));
612	}
613	assert(riov.i == riov.used);
614	vringh_iov_cleanup(&riov);
615	vringh_iov_cleanup(&wiov);
616
617	/* Complete using multi interface, just because we can. */
618	used[0].id = head;
619	used[0].len = 0;
620	err = vringh_complete_multi_user(&vrh, used, 1);
621	if (err)
622		errx(1, "vringh_complete_multi_user(1): %i", err);
623
624	/* Free up those descriptors. */
625	ret = virtqueue_get_buf(vq, &i);
626	if (ret != &err)
627		errx(1, "virtqueue_get_buf: %p", ret);
628
629	/* Add lots of descriptors. */
630	sg_init_table(guest_sg, 1);
631	sg_set_buf(&guest_sg[0], __user_addr_max - 1, 1);
632	for (i = 0; i < RINGSIZE; i++) {
633		err = virtqueue_add_outbuf(vq, guest_sg, 1, &err, GFP_KERNEL);
634		if (err)
635			errx(1, "virtqueue_add_outbuf (multiple): %i", err);
636	}
637
638	/* Now get many, and consume them all at once. */
639	vringh_iov_init(&riov, host_riov, ARRAY_SIZE(host_riov));
640	vringh_iov_init(&wiov, host_wiov, ARRAY_SIZE(host_wiov));
641
642	for (i = 0; i < RINGSIZE; i++) {
643		err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head);
644		if (err != 1)
645			errx(1, "vringh_getdesc_user: %i", err);
646		used[i].id = head;
647		used[i].len = 0;
648	}
649	/* Make sure it wraps around ring, to test! */
650	assert(vrh.vring.used->idx % RINGSIZE != 0);
651	err = vringh_complete_multi_user(&vrh, used, RINGSIZE);
652	if (err)
653		errx(1, "vringh_complete_multi_user: %i", err);
654
655	/* Free those buffers. */
656	for (i = 0; i < RINGSIZE; i++) {
657		unsigned len;
658		assert(virtqueue_get_buf(vq, &len) != NULL);
659	}
660
661	/* Test weird (but legal!) indirect. */
662	if (__virtio_test_bit(&vdev, VIRTIO_RING_F_INDIRECT_DESC)) {
663		char *data = __user_addr_max - USER_MEM/4;
664		struct vring_desc *d = __user_addr_max - USER_MEM/2;
665		struct vring vring;
666
667		/* Force creation of direct, which we modify. */
668		__virtio_clear_bit(&vdev, VIRTIO_RING_F_INDIRECT_DESC);
669		vq = vring_new_virtqueue(0, RINGSIZE, ALIGN, &vdev, true,
670					 false, __user_addr_min,
671					 never_notify_host,
672					 never_callback_guest,
673					 "guest vq");
674
675		sg_init_table(guest_sg, 4);
676		sg_set_buf(&guest_sg[0], d, sizeof(*d)*2);
677		sg_set_buf(&guest_sg[1], d + 2, sizeof(*d)*1);
678		sg_set_buf(&guest_sg[2], data + 6, 4);
679		sg_set_buf(&guest_sg[3], d + 3, sizeof(*d)*3);
680
681		err = virtqueue_add_outbuf(vq, guest_sg, 4, &err, GFP_KERNEL);
682		if (err)
683			errx(1, "virtqueue_add_outbuf (indirect): %i", err);
684
685		vring_init(&vring, RINGSIZE, __user_addr_min, ALIGN);
686
687		/* They're used in order, but double-check... */
688		assert(vring.desc[0].addr == (unsigned long)d);
689		assert(vring.desc[1].addr == (unsigned long)(d+2));
690		assert(vring.desc[2].addr == (unsigned long)data + 6);
691		assert(vring.desc[3].addr == (unsigned long)(d+3));
692		vring.desc[0].flags |= VRING_DESC_F_INDIRECT;
693		vring.desc[1].flags |= VRING_DESC_F_INDIRECT;
694		vring.desc[3].flags |= VRING_DESC_F_INDIRECT;
695
696		/* First indirect */
697		d[0].addr = (unsigned long)data;
698		d[0].len = 1;
699		d[0].flags = VRING_DESC_F_NEXT;
700		d[0].next = 1;
701		d[1].addr = (unsigned long)data + 1;
702		d[1].len = 2;
703		d[1].flags = 0;
704
705		/* Second indirect */
706		d[2].addr = (unsigned long)data + 3;
707		d[2].len = 3;
708		d[2].flags = 0;
709
710		/* Third indirect */
711		d[3].addr = (unsigned long)data + 10;
712		d[3].len = 5;
713		d[3].flags = VRING_DESC_F_NEXT;
714		d[3].next = 1;
715		d[4].addr = (unsigned long)data + 15;
716		d[4].len = 6;
717		d[4].flags = VRING_DESC_F_NEXT;
718		d[4].next = 2;
719		d[5].addr = (unsigned long)data + 21;
720		d[5].len = 7;
721		d[5].flags = 0;
722
723		/* Host picks it up (allocates new iov). */
724		vringh_iov_init(&riov, host_riov, ARRAY_SIZE(host_riov));
725		vringh_iov_init(&wiov, host_wiov, ARRAY_SIZE(host_wiov));
726
727		err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head);
728		if (err != 1)
729			errx(1, "vringh_getdesc_user: %i", err);
730
731		if (head != 0)
732			errx(1, "vringh_getdesc_user: head %i not 0", head);
733
734		assert(riov.max_num & VRINGH_IOV_ALLOCATED);
735		if (getrange != getrange_slow)
736			assert(riov.used == 7);
737		else
738			assert(riov.used == 28);
739		err = vringh_iov_pull_user(&riov, buf, 29);
740		assert(err == 28);
741
742		/* Data should be linear. */
743		for (i = 0; i < err; i++)
744			assert(buf[i] == i);
745		vringh_iov_cleanup(&riov);
746	}
747
748	/* Don't leak memory... */
749	vring_del_virtqueue(vq);
750	free(__user_addr_min);
751
752	return 0;
753}
v3.15
 
  1/* Simple test of virtio code, entirely in userpsace. */
  2#define _GNU_SOURCE
  3#include <sched.h>
  4#include <err.h>
  5#include <linux/kernel.h>
  6#include <linux/err.h>
  7#include <linux/virtio.h>
  8#include <linux/vringh.h>
  9#include <linux/virtio_ring.h>
 
 10#include <linux/uaccess.h>
 11#include <sys/types.h>
 12#include <sys/stat.h>
 13#include <sys/mman.h>
 14#include <sys/wait.h>
 15#include <fcntl.h>
 16
 17#define USER_MEM (1024*1024)
 18void *__user_addr_min, *__user_addr_max;
 19void *__kmalloc_fake, *__kfree_ignore_start, *__kfree_ignore_end;
 20static u64 user_addr_offset;
 21
 22#define RINGSIZE 256
 23#define ALIGN 4096
 24
 25static bool never_notify_host(struct virtqueue *vq)
 26{
 27	abort();
 28}
 29
 30static void never_callback_guest(struct virtqueue *vq)
 31{
 32	abort();
 33}
 34
 35static bool getrange_iov(struct vringh *vrh, u64 addr, struct vringh_range *r)
 36{
 37	if (addr < (u64)(unsigned long)__user_addr_min - user_addr_offset)
 38		return false;
 39	if (addr >= (u64)(unsigned long)__user_addr_max - user_addr_offset)
 40		return false;
 41
 42	r->start = (u64)(unsigned long)__user_addr_min - user_addr_offset;
 43	r->end_incl = (u64)(unsigned long)__user_addr_max - 1 - user_addr_offset;
 44	r->offset = user_addr_offset;
 45	return true;
 46}
 47
 48/* We return single byte ranges. */
 49static bool getrange_slow(struct vringh *vrh, u64 addr, struct vringh_range *r)
 50{
 51	if (addr < (u64)(unsigned long)__user_addr_min - user_addr_offset)
 52		return false;
 53	if (addr >= (u64)(unsigned long)__user_addr_max - user_addr_offset)
 54		return false;
 55
 56	r->start = addr;
 57	r->end_incl = r->start;
 58	r->offset = user_addr_offset;
 59	return true;
 60}
 61
 62struct guest_virtio_device {
 63	struct virtio_device vdev;
 64	int to_host_fd;
 65	unsigned long notifies;
 66};
 67
 68static bool parallel_notify_host(struct virtqueue *vq)
 69{
 70	int rc;
 71	struct guest_virtio_device *gvdev;
 72
 73	gvdev = container_of(vq->vdev, struct guest_virtio_device, vdev);
 74	rc = write(gvdev->to_host_fd, "", 1);
 75	if (rc < 0)
 76		return false;
 77	gvdev->notifies++;
 78	return true;
 79}
 80
 81static bool no_notify_host(struct virtqueue *vq)
 82{
 83	return true;
 84}
 85
 86#define NUM_XFERS (10000000)
 87
 88/* We aim for two "distant" cpus. */
 89static void find_cpus(unsigned int *first, unsigned int *last)
 90{
 91	unsigned int i;
 92
 93	*first = -1U;
 94	*last = 0;
 95	for (i = 0; i < 4096; i++) {
 96		cpu_set_t set;
 97		CPU_ZERO(&set);
 98		CPU_SET(i, &set);
 99		if (sched_setaffinity(getpid(), sizeof(set), &set) == 0) {
100			if (i < *first)
101				*first = i;
102			if (i > *last)
103				*last = i;
104		}
105	}
106}
107
108/* Opencoded version for fast mode */
109static inline int vringh_get_head(struct vringh *vrh, u16 *head)
110{
111	u16 avail_idx, i;
112	int err;
113
114	err = get_user(avail_idx, &vrh->vring.avail->idx);
115	if (err)
116		return err;
117
118	if (vrh->last_avail_idx == avail_idx)
119		return 0;
120
121	/* Only get avail ring entries after they have been exposed by guest. */
122	virtio_rmb(vrh->weak_barriers);
123
124	i = vrh->last_avail_idx & (vrh->vring.num - 1);
125
126	err = get_user(*head, &vrh->vring.avail->ring[i]);
127	if (err)
128		return err;
129
130	vrh->last_avail_idx++;
131	return 1;
132}
133
134static int parallel_test(unsigned long features,
135			 bool (*getrange)(struct vringh *vrh,
136					  u64 addr, struct vringh_range *r),
137			 bool fast_vringh)
138{
139	void *host_map, *guest_map;
140	int fd, mapsize, to_guest[2], to_host[2];
141	unsigned long xfers = 0, notifies = 0, receives = 0;
142	unsigned int first_cpu, last_cpu;
143	cpu_set_t cpu_set;
144	char buf[128];
145
146	/* Create real file to mmap. */
147	fd = open("/tmp/vringh_test-file", O_RDWR|O_CREAT|O_TRUNC, 0600);
148	if (fd < 0)
149		err(1, "Opening /tmp/vringh_test-file");
150
151	/* Extra room at the end for some data, and indirects */
152	mapsize = vring_size(RINGSIZE, ALIGN)
153		+ RINGSIZE * 2 * sizeof(int)
154		+ RINGSIZE * 6 * sizeof(struct vring_desc);
155	mapsize = (mapsize + getpagesize() - 1) & ~(getpagesize() - 1);
156	ftruncate(fd, mapsize);
157
158	/* Parent and child use separate addresses, to check our mapping logic! */
159	host_map = mmap(NULL, mapsize, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
160	guest_map = mmap(NULL, mapsize, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
161
162	pipe(to_guest);
163	pipe(to_host);
164
165	CPU_ZERO(&cpu_set);
166	find_cpus(&first_cpu, &last_cpu);
167	printf("Using CPUS %u and %u\n", first_cpu, last_cpu);
168	fflush(stdout);
169
170	if (fork() != 0) {
171		struct vringh vrh;
172		int status, err, rlen = 0;
173		char rbuf[5];
174
175		/* We are the host: never access guest addresses! */
176		munmap(guest_map, mapsize);
177
178		__user_addr_min = host_map;
179		__user_addr_max = __user_addr_min + mapsize;
180		user_addr_offset = host_map - guest_map;
181		assert(user_addr_offset);
182
183		close(to_guest[0]);
184		close(to_host[1]);
185
186		vring_init(&vrh.vring, RINGSIZE, host_map, ALIGN);
187		vringh_init_user(&vrh, features, RINGSIZE, true,
188				 vrh.vring.desc, vrh.vring.avail, vrh.vring.used);
189		CPU_SET(first_cpu, &cpu_set);
190		if (sched_setaffinity(getpid(), sizeof(cpu_set), &cpu_set))
191			errx(1, "Could not set affinity to cpu %u", first_cpu);
192
193		while (xfers < NUM_XFERS) {
194			struct iovec host_riov[2], host_wiov[2];
195			struct vringh_iov riov, wiov;
196			u16 head, written;
197
198			if (fast_vringh) {
199				for (;;) {
200					err = vringh_get_head(&vrh, &head);
201					if (err != 0)
202						break;
203					err = vringh_need_notify_user(&vrh);
204					if (err < 0)
205						errx(1, "vringh_need_notify_user: %i",
206						     err);
207					if (err) {
208						write(to_guest[1], "", 1);
209						notifies++;
210					}
211				}
212				if (err != 1)
213					errx(1, "vringh_get_head");
214				written = 0;
215				goto complete;
216			} else {
217				vringh_iov_init(&riov,
218						host_riov,
219						ARRAY_SIZE(host_riov));
220				vringh_iov_init(&wiov,
221						host_wiov,
222						ARRAY_SIZE(host_wiov));
223
224				err = vringh_getdesc_user(&vrh, &riov, &wiov,
225							  getrange, &head);
226			}
227			if (err == 0) {
228				err = vringh_need_notify_user(&vrh);
229				if (err < 0)
230					errx(1, "vringh_need_notify_user: %i",
231					     err);
232				if (err) {
233					write(to_guest[1], "", 1);
234					notifies++;
235				}
236
237				if (!vringh_notify_enable_user(&vrh))
238					continue;
239
240				/* Swallow all notifies at once. */
241				if (read(to_host[0], buf, sizeof(buf)) < 1)
242					break;
243
244				vringh_notify_disable_user(&vrh);
245				receives++;
246				continue;
247			}
248			if (err != 1)
249				errx(1, "vringh_getdesc_user: %i", err);
250
251			/* We simply copy bytes. */
252			if (riov.used) {
253				rlen = vringh_iov_pull_user(&riov, rbuf,
254							    sizeof(rbuf));
255				if (rlen != 4)
256					errx(1, "vringh_iov_pull_user: %i",
257					     rlen);
258				assert(riov.i == riov.used);
259				written = 0;
260			} else {
261				err = vringh_iov_push_user(&wiov, rbuf, rlen);
262				if (err != rlen)
263					errx(1, "vringh_iov_push_user: %i",
264					     err);
265				assert(wiov.i == wiov.used);
266				written = err;
267			}
268		complete:
269			xfers++;
270
271			err = vringh_complete_user(&vrh, head, written);
272			if (err != 0)
273				errx(1, "vringh_complete_user: %i", err);
274		}
275
276		err = vringh_need_notify_user(&vrh);
277		if (err < 0)
278			errx(1, "vringh_need_notify_user: %i", err);
279		if (err) {
280			write(to_guest[1], "", 1);
281			notifies++;
282		}
283		wait(&status);
284		if (!WIFEXITED(status))
285			errx(1, "Child died with signal %i?", WTERMSIG(status));
286		if (WEXITSTATUS(status) != 0)
287			errx(1, "Child exited %i?", WEXITSTATUS(status));
288		printf("Host: notified %lu, pinged %lu\n", notifies, receives);
289		return 0;
290	} else {
291		struct guest_virtio_device gvdev;
292		struct virtqueue *vq;
293		unsigned int *data;
294		struct vring_desc *indirects;
295		unsigned int finished = 0;
296
297		/* We pass sg[]s pointing into here, but we need RINGSIZE+1 */
298		data = guest_map + vring_size(RINGSIZE, ALIGN);
299		indirects = (void *)data + (RINGSIZE + 1) * 2 * sizeof(int);
300
301		/* We are the guest. */
302		munmap(host_map, mapsize);
303
304		close(to_guest[1]);
305		close(to_host[0]);
306
307		gvdev.vdev.features[0] = features;
 
308		gvdev.to_host_fd = to_host[1];
309		gvdev.notifies = 0;
310
311		CPU_SET(first_cpu, &cpu_set);
312		if (sched_setaffinity(getpid(), sizeof(cpu_set), &cpu_set))
313			err(1, "Could not set affinity to cpu %u", first_cpu);
314
315		vq = vring_new_virtqueue(0, RINGSIZE, ALIGN, &gvdev.vdev, true,
316					 guest_map, fast_vringh ? no_notify_host
 
317					 : parallel_notify_host,
318					 never_callback_guest, "guest vq");
319
320		/* Don't kfree indirects. */
321		__kfree_ignore_start = indirects;
322		__kfree_ignore_end = indirects + RINGSIZE * 6;
323
324		while (xfers < NUM_XFERS) {
325			struct scatterlist sg[4];
326			unsigned int num_sg, len;
327			int *dbuf, err;
328			bool output = !(xfers % 2);
329
330			/* Consume bufs. */
331			while ((dbuf = virtqueue_get_buf(vq, &len)) != NULL) {
332				if (len == 4)
333					assert(*dbuf == finished - 1);
334				else if (!fast_vringh)
335					assert(*dbuf == finished);
336				finished++;
337			}
338
339			/* Produce a buffer. */
340			dbuf = data + (xfers % (RINGSIZE + 1));
341
342			if (output)
343				*dbuf = xfers;
344			else
345				*dbuf = -1;
346
347			switch ((xfers / sizeof(*dbuf)) % 4) {
348			case 0:
349				/* Nasty three-element sg list. */
350				sg_init_table(sg, num_sg = 3);
351				sg_set_buf(&sg[0], (void *)dbuf, 1);
352				sg_set_buf(&sg[1], (void *)dbuf + 1, 2);
353				sg_set_buf(&sg[2], (void *)dbuf + 3, 1);
354				break;
355			case 1:
356				sg_init_table(sg, num_sg = 2);
357				sg_set_buf(&sg[0], (void *)dbuf, 1);
358				sg_set_buf(&sg[1], (void *)dbuf + 1, 3);
359				break;
360			case 2:
361				sg_init_table(sg, num_sg = 1);
362				sg_set_buf(&sg[0], (void *)dbuf, 4);
363				break;
364			case 3:
365				sg_init_table(sg, num_sg = 4);
366				sg_set_buf(&sg[0], (void *)dbuf, 1);
367				sg_set_buf(&sg[1], (void *)dbuf + 1, 1);
368				sg_set_buf(&sg[2], (void *)dbuf + 2, 1);
369				sg_set_buf(&sg[3], (void *)dbuf + 3, 1);
370				break;
371			}
372
373			/* May allocate an indirect, so force it to allocate
374			 * user addr */
375			__kmalloc_fake = indirects + (xfers % RINGSIZE) * 4;
376			if (output)
377				err = virtqueue_add_outbuf(vq, sg, num_sg, dbuf,
378							   GFP_KERNEL);
379			else
380				err = virtqueue_add_inbuf(vq, sg, num_sg,
381							  dbuf, GFP_KERNEL);
382
383			if (err == -ENOSPC) {
384				if (!virtqueue_enable_cb_delayed(vq))
385					continue;
386				/* Swallow all notifies at once. */
387				if (read(to_guest[0], buf, sizeof(buf)) < 1)
388					break;
389				
390				receives++;
391				virtqueue_disable_cb(vq);
392				continue;
393			}
394
395			if (err)
396				errx(1, "virtqueue_add_in/outbuf: %i", err);
397
398			xfers++;
399			virtqueue_kick(vq);
400		}
401
402		/* Any extra? */
403		while (finished != xfers) {
404			int *dbuf;
405			unsigned int len;
406
407			/* Consume bufs. */
408			dbuf = virtqueue_get_buf(vq, &len);
409			if (dbuf) {
410				if (len == 4)
411					assert(*dbuf == finished - 1);
412				else
413					assert(len == 0);
414				finished++;
415				continue;
416			}
417
418			if (!virtqueue_enable_cb_delayed(vq))
419				continue;
420			if (read(to_guest[0], buf, sizeof(buf)) < 1)
421				break;
422				
423			receives++;
424			virtqueue_disable_cb(vq);
425		}
426
427		printf("Guest: notified %lu, pinged %lu\n",
428		       gvdev.notifies, receives);
429		vring_del_virtqueue(vq);
430		return 0;
431	}
432}
433
434int main(int argc, char *argv[])
435{
436	struct virtio_device vdev;
437	struct virtqueue *vq;
438	struct vringh vrh;
439	struct scatterlist guest_sg[RINGSIZE], *sgs[2];
440	struct iovec host_riov[2], host_wiov[2];
441	struct vringh_iov riov, wiov;
442	struct vring_used_elem used[RINGSIZE];
443	char buf[28];
444	u16 head;
445	int err;
446	unsigned i;
447	void *ret;
448	bool (*getrange)(struct vringh *vrh, u64 addr, struct vringh_range *r);
449	bool fast_vringh = false, parallel = false;
450
451	getrange = getrange_iov;
452	vdev.features[0] = 0;
 
453
454	while (argv[1]) {
455		if (strcmp(argv[1], "--indirect") == 0)
456			vdev.features[0] |= (1 << VIRTIO_RING_F_INDIRECT_DESC);
457		else if (strcmp(argv[1], "--eventidx") == 0)
458			vdev.features[0] |= (1 << VIRTIO_RING_F_EVENT_IDX);
 
 
459		else if (strcmp(argv[1], "--slow-range") == 0)
460			getrange = getrange_slow;
461		else if (strcmp(argv[1], "--fast-vringh") == 0)
462			fast_vringh = true;
463		else if (strcmp(argv[1], "--parallel") == 0)
464			parallel = true;
465		else
466			errx(1, "Unknown arg %s", argv[1]);
467		argv++;
468	}
469
470	if (parallel)
471		return parallel_test(vdev.features[0], getrange, fast_vringh);
472
473	if (posix_memalign(&__user_addr_min, PAGE_SIZE, USER_MEM) != 0)
474		abort();
475	__user_addr_max = __user_addr_min + USER_MEM;
476	memset(__user_addr_min, 0, vring_size(RINGSIZE, ALIGN));
477
478	/* Set up guest side. */
479	vq = vring_new_virtqueue(0, RINGSIZE, ALIGN, &vdev, true,
480				 __user_addr_min,
481				 never_notify_host, never_callback_guest,
482				 "guest vq");
483
484	/* Set up host side. */
485	vring_init(&vrh.vring, RINGSIZE, __user_addr_min, ALIGN);
486	vringh_init_user(&vrh, vdev.features[0], RINGSIZE, true,
487			 vrh.vring.desc, vrh.vring.avail, vrh.vring.used);
488
489	/* No descriptor to get yet... */
490	err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head);
491	if (err != 0)
492		errx(1, "vringh_getdesc_user: %i", err);
493
494	/* Guest puts in a descriptor. */
495	memcpy(__user_addr_max - 1, "a", 1);
496	sg_init_table(guest_sg, 1);
497	sg_set_buf(&guest_sg[0], __user_addr_max - 1, 1);
498	sg_init_table(guest_sg+1, 1);
499	sg_set_buf(&guest_sg[1], __user_addr_max - 3, 2);
500	sgs[0] = &guest_sg[0];
501	sgs[1] = &guest_sg[1];
502
503	/* May allocate an indirect, so force it to allocate user addr */
504	__kmalloc_fake = __user_addr_min + vring_size(RINGSIZE, ALIGN);
505	err = virtqueue_add_sgs(vq, sgs, 1, 1, &err, GFP_KERNEL);
506	if (err)
507		errx(1, "virtqueue_add_sgs: %i", err);
508	__kmalloc_fake = NULL;
509
510	/* Host retreives it. */
511	vringh_iov_init(&riov, host_riov, ARRAY_SIZE(host_riov));
512	vringh_iov_init(&wiov, host_wiov, ARRAY_SIZE(host_wiov));
513
514	err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head);
515	if (err != 1)
516		errx(1, "vringh_getdesc_user: %i", err);
517
518	assert(riov.used == 1);
519	assert(riov.iov[0].iov_base == __user_addr_max - 1);
520	assert(riov.iov[0].iov_len == 1);
521	if (getrange != getrange_slow) {
522		assert(wiov.used == 1);
523		assert(wiov.iov[0].iov_base == __user_addr_max - 3);
524		assert(wiov.iov[0].iov_len == 2);
525	} else {
526		assert(wiov.used == 2);
527		assert(wiov.iov[0].iov_base == __user_addr_max - 3);
528		assert(wiov.iov[0].iov_len == 1);
529		assert(wiov.iov[1].iov_base == __user_addr_max - 2);
530		assert(wiov.iov[1].iov_len == 1);
531	}
532
533	err = vringh_iov_pull_user(&riov, buf, 5);
534	if (err != 1)
535		errx(1, "vringh_iov_pull_user: %i", err);
536	assert(buf[0] == 'a');
537	assert(riov.i == 1);
538	assert(vringh_iov_pull_user(&riov, buf, 5) == 0);
539
540	memcpy(buf, "bcdef", 5);
541	err = vringh_iov_push_user(&wiov, buf, 5);
542	if (err != 2)
543		errx(1, "vringh_iov_push_user: %i", err);
544	assert(memcmp(__user_addr_max - 3, "bc", 2) == 0);
545	assert(wiov.i == wiov.used);
546	assert(vringh_iov_push_user(&wiov, buf, 5) == 0);
547
548	/* Host is done. */
549	err = vringh_complete_user(&vrh, head, err);
550	if (err != 0)
551		errx(1, "vringh_complete_user: %i", err);
552
553	/* Guest should see used token now. */
554	__kfree_ignore_start = __user_addr_min + vring_size(RINGSIZE, ALIGN);
555	__kfree_ignore_end = __kfree_ignore_start + 1;
556	ret = virtqueue_get_buf(vq, &i);
557	if (ret != &err)
558		errx(1, "virtqueue_get_buf: %p", ret);
559	assert(i == 2);
560
561	/* Guest puts in a huge descriptor. */
562	sg_init_table(guest_sg, RINGSIZE);
563	for (i = 0; i < RINGSIZE; i++) {
564		sg_set_buf(&guest_sg[i],
565			   __user_addr_max - USER_MEM/4, USER_MEM/4);
566	}
567
568	/* Fill contents with recognisable garbage. */
569	for (i = 0; i < USER_MEM/4; i++)
570		((char *)__user_addr_max - USER_MEM/4)[i] = i;
571
572	/* This will allocate an indirect, so force it to allocate user addr */
573	__kmalloc_fake = __user_addr_min + vring_size(RINGSIZE, ALIGN);
574	err = virtqueue_add_outbuf(vq, guest_sg, RINGSIZE, &err, GFP_KERNEL);
575	if (err)
576		errx(1, "virtqueue_add_outbuf (large): %i", err);
577	__kmalloc_fake = NULL;
578
579	/* Host picks it up (allocates new iov). */
580	vringh_iov_init(&riov, host_riov, ARRAY_SIZE(host_riov));
581	vringh_iov_init(&wiov, host_wiov, ARRAY_SIZE(host_wiov));
582
583	err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head);
584	if (err != 1)
585		errx(1, "vringh_getdesc_user: %i", err);
586
587	assert(riov.max_num & VRINGH_IOV_ALLOCATED);
588	assert(riov.iov != host_riov);
589	if (getrange != getrange_slow)
590		assert(riov.used == RINGSIZE);
591	else
592		assert(riov.used == RINGSIZE * USER_MEM/4);
593
594	assert(!(wiov.max_num & VRINGH_IOV_ALLOCATED));
595	assert(wiov.used == 0);
596
597	/* Pull data back out (in odd chunks), should be as expected. */
598	for (i = 0; i < RINGSIZE * USER_MEM/4; i += 3) {
599		err = vringh_iov_pull_user(&riov, buf, 3);
600		if (err != 3 && i + err != RINGSIZE * USER_MEM/4)
601			errx(1, "vringh_iov_pull_user large: %i", err);
602		assert(buf[0] == (char)i);
603		assert(err < 2 || buf[1] == (char)(i + 1));
604		assert(err < 3 || buf[2] == (char)(i + 2));
605	}
606	assert(riov.i == riov.used);
607	vringh_iov_cleanup(&riov);
608	vringh_iov_cleanup(&wiov);
609
610	/* Complete using multi interface, just because we can. */
611	used[0].id = head;
612	used[0].len = 0;
613	err = vringh_complete_multi_user(&vrh, used, 1);
614	if (err)
615		errx(1, "vringh_complete_multi_user(1): %i", err);
616
617	/* Free up those descriptors. */
618	ret = virtqueue_get_buf(vq, &i);
619	if (ret != &err)
620		errx(1, "virtqueue_get_buf: %p", ret);
621
622	/* Add lots of descriptors. */
623	sg_init_table(guest_sg, 1);
624	sg_set_buf(&guest_sg[0], __user_addr_max - 1, 1);
625	for (i = 0; i < RINGSIZE; i++) {
626		err = virtqueue_add_outbuf(vq, guest_sg, 1, &err, GFP_KERNEL);
627		if (err)
628			errx(1, "virtqueue_add_outbuf (multiple): %i", err);
629	}
630
631	/* Now get many, and consume them all at once. */
632	vringh_iov_init(&riov, host_riov, ARRAY_SIZE(host_riov));
633	vringh_iov_init(&wiov, host_wiov, ARRAY_SIZE(host_wiov));
634
635	for (i = 0; i < RINGSIZE; i++) {
636		err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head);
637		if (err != 1)
638			errx(1, "vringh_getdesc_user: %i", err);
639		used[i].id = head;
640		used[i].len = 0;
641	}
642	/* Make sure it wraps around ring, to test! */
643	assert(vrh.vring.used->idx % RINGSIZE != 0);
644	err = vringh_complete_multi_user(&vrh, used, RINGSIZE);
645	if (err)
646		errx(1, "vringh_complete_multi_user: %i", err);
647
648	/* Free those buffers. */
649	for (i = 0; i < RINGSIZE; i++) {
650		unsigned len;
651		assert(virtqueue_get_buf(vq, &len) != NULL);
652	}
653
654	/* Test weird (but legal!) indirect. */
655	if (vdev.features[0] & (1 << VIRTIO_RING_F_INDIRECT_DESC)) {
656		char *data = __user_addr_max - USER_MEM/4;
657		struct vring_desc *d = __user_addr_max - USER_MEM/2;
658		struct vring vring;
659
660		/* Force creation of direct, which we modify. */
661		vdev.features[0] &= ~(1 << VIRTIO_RING_F_INDIRECT_DESC);
662		vq = vring_new_virtqueue(0, RINGSIZE, ALIGN, &vdev, true,
663					 __user_addr_min,
664					 never_notify_host,
665					 never_callback_guest,
666					 "guest vq");
667
668		sg_init_table(guest_sg, 4);
669		sg_set_buf(&guest_sg[0], d, sizeof(*d)*2);
670		sg_set_buf(&guest_sg[1], d + 2, sizeof(*d)*1);
671		sg_set_buf(&guest_sg[2], data + 6, 4);
672		sg_set_buf(&guest_sg[3], d + 3, sizeof(*d)*3);
673
674		err = virtqueue_add_outbuf(vq, guest_sg, 4, &err, GFP_KERNEL);
675		if (err)
676			errx(1, "virtqueue_add_outbuf (indirect): %i", err);
677
678		vring_init(&vring, RINGSIZE, __user_addr_min, ALIGN);
679
680		/* They're used in order, but double-check... */
681		assert(vring.desc[0].addr == (unsigned long)d);
682		assert(vring.desc[1].addr == (unsigned long)(d+2));
683		assert(vring.desc[2].addr == (unsigned long)data + 6);
684		assert(vring.desc[3].addr == (unsigned long)(d+3));
685		vring.desc[0].flags |= VRING_DESC_F_INDIRECT;
686		vring.desc[1].flags |= VRING_DESC_F_INDIRECT;
687		vring.desc[3].flags |= VRING_DESC_F_INDIRECT;
688
689		/* First indirect */
690		d[0].addr = (unsigned long)data;
691		d[0].len = 1;
692		d[0].flags = VRING_DESC_F_NEXT;
693		d[0].next = 1;
694		d[1].addr = (unsigned long)data + 1;
695		d[1].len = 2;
696		d[1].flags = 0;
697
698		/* Second indirect */
699		d[2].addr = (unsigned long)data + 3;
700		d[2].len = 3;
701		d[2].flags = 0;
702
703		/* Third indirect */
704		d[3].addr = (unsigned long)data + 10;
705		d[3].len = 5;
706		d[3].flags = VRING_DESC_F_NEXT;
707		d[3].next = 1;
708		d[4].addr = (unsigned long)data + 15;
709		d[4].len = 6;
710		d[4].flags = VRING_DESC_F_NEXT;
711		d[4].next = 2;
712		d[5].addr = (unsigned long)data + 21;
713		d[5].len = 7;
714		d[5].flags = 0;
715
716		/* Host picks it up (allocates new iov). */
717		vringh_iov_init(&riov, host_riov, ARRAY_SIZE(host_riov));
718		vringh_iov_init(&wiov, host_wiov, ARRAY_SIZE(host_wiov));
719
720		err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head);
721		if (err != 1)
722			errx(1, "vringh_getdesc_user: %i", err);
723
724		if (head != 0)
725			errx(1, "vringh_getdesc_user: head %i not 0", head);
726
727		assert(riov.max_num & VRINGH_IOV_ALLOCATED);
728		if (getrange != getrange_slow)
729			assert(riov.used == 7);
730		else
731			assert(riov.used == 28);
732		err = vringh_iov_pull_user(&riov, buf, 29);
733		assert(err == 28);
734
735		/* Data should be linear. */
736		for (i = 0; i < err; i++)
737			assert(buf[i] == i);
738		vringh_iov_cleanup(&riov);
739	}
740
741	/* Don't leak memory... */
742	vring_del_virtqueue(vq);
743	free(__user_addr_min);
744
745	return 0;
746}