Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/*
  2 * vhost transport for vsock
  3 *
  4 * Copyright (C) 2013-2015 Red Hat, Inc.
  5 * Author: Asias He <asias@redhat.com>
  6 *         Stefan Hajnoczi <stefanha@redhat.com>
  7 *
  8 * This work is licensed under the terms of the GNU GPL, version 2.
  9 */
 10#include <linux/miscdevice.h>
 11#include <linux/atomic.h>
 12#include <linux/module.h>
 13#include <linux/mutex.h>
 14#include <linux/vmalloc.h>
 15#include <net/sock.h>
 16#include <linux/virtio_vsock.h>
 17#include <linux/vhost.h>
 18
 19#include <net/af_vsock.h>
 20#include "vhost.h"
 21
 22#define VHOST_VSOCK_DEFAULT_HOST_CID	2
 23
 24enum {
 25	VHOST_VSOCK_FEATURES = VHOST_FEATURES,
 26};
 27
 28/* Used to track all the vhost_vsock instances on the system. */
 29static DEFINE_SPINLOCK(vhost_vsock_lock);
 30static LIST_HEAD(vhost_vsock_list);
 31
 32struct vhost_vsock {
 33	struct vhost_dev dev;
 34	struct vhost_virtqueue vqs[2];
 35
 36	/* Link to global vhost_vsock_list, protected by vhost_vsock_lock */
 37	struct list_head list;
 38
 39	struct vhost_work send_pkt_work;
 40	spinlock_t send_pkt_list_lock;
 41	struct list_head send_pkt_list;	/* host->guest pending packets */
 42
 43	atomic_t queued_replies;
 44
 45	u32 guest_cid;
 46};
 47
 48static u32 vhost_transport_get_local_cid(void)
 49{
 50	return VHOST_VSOCK_DEFAULT_HOST_CID;
 51}
 52
 53static struct vhost_vsock *__vhost_vsock_get(u32 guest_cid)
 54{
 55	struct vhost_vsock *vsock;
 56
 57	list_for_each_entry(vsock, &vhost_vsock_list, list) {
 58		u32 other_cid = vsock->guest_cid;
 59
 60		/* Skip instances that have no CID yet */
 61		if (other_cid == 0)
 62			continue;
 63
 64		if (other_cid == guest_cid)
 65			return vsock;
 66
 67	}
 68
 69	return NULL;
 70}
 71
 72static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
 73{
 74	struct vhost_vsock *vsock;
 75
 76	spin_lock_bh(&vhost_vsock_lock);
 77	vsock = __vhost_vsock_get(guest_cid);
 78	spin_unlock_bh(&vhost_vsock_lock);
 79
 80	return vsock;
 81}
 82
 83static void
 84vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
 85			    struct vhost_virtqueue *vq)
 86{
 87	struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
 88	bool added = false;
 89	bool restart_tx = false;
 90
 91	mutex_lock(&vq->mutex);
 92
 93	if (!vq->private_data)
 94		goto out;
 95
 96	/* Avoid further vmexits, we're already processing the virtqueue */
 97	vhost_disable_notify(&vsock->dev, vq);
 98
 99	for (;;) {
100		struct virtio_vsock_pkt *pkt;
101		struct iov_iter iov_iter;
102		unsigned out, in;
103		size_t nbytes;
104		size_t len;
105		int head;
106
107		spin_lock_bh(&vsock->send_pkt_list_lock);
108		if (list_empty(&vsock->send_pkt_list)) {
109			spin_unlock_bh(&vsock->send_pkt_list_lock);
110			vhost_enable_notify(&vsock->dev, vq);
111			break;
112		}
113
114		pkt = list_first_entry(&vsock->send_pkt_list,
115				       struct virtio_vsock_pkt, list);
116		list_del_init(&pkt->list);
117		spin_unlock_bh(&vsock->send_pkt_list_lock);
118
119		head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
120					 &out, &in, NULL, NULL);
121		if (head < 0) {
122			spin_lock_bh(&vsock->send_pkt_list_lock);
123			list_add(&pkt->list, &vsock->send_pkt_list);
124			spin_unlock_bh(&vsock->send_pkt_list_lock);
125			break;
126		}
127
128		if (head == vq->num) {
129			spin_lock_bh(&vsock->send_pkt_list_lock);
130			list_add(&pkt->list, &vsock->send_pkt_list);
131			spin_unlock_bh(&vsock->send_pkt_list_lock);
132
133			/* We cannot finish yet if more buffers snuck in while
134			 * re-enabling notify.
135			 */
136			if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
137				vhost_disable_notify(&vsock->dev, vq);
138				continue;
139			}
140			break;
141		}
142
143		if (out) {
144			virtio_transport_free_pkt(pkt);
145			vq_err(vq, "Expected 0 output buffers, got %u\n", out);
146			break;
147		}
148
149		len = iov_length(&vq->iov[out], in);
150		iov_iter_init(&iov_iter, READ, &vq->iov[out], in, len);
151
152		nbytes = copy_to_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
153		if (nbytes != sizeof(pkt->hdr)) {
154			virtio_transport_free_pkt(pkt);
155			vq_err(vq, "Faulted on copying pkt hdr\n");
156			break;
157		}
158
159		nbytes = copy_to_iter(pkt->buf, pkt->len, &iov_iter);
160		if (nbytes != pkt->len) {
161			virtio_transport_free_pkt(pkt);
162			vq_err(vq, "Faulted on copying pkt buf\n");
163			break;
164		}
165
166		vhost_add_used(vq, head, sizeof(pkt->hdr) + pkt->len);
167		added = true;
168
169		if (pkt->reply) {
170			int val;
171
172			val = atomic_dec_return(&vsock->queued_replies);
173
174			/* Do we have resources to resume tx processing? */
175			if (val + 1 == tx_vq->num)
176				restart_tx = true;
177		}
178
179		/* Deliver to monitoring devices all correctly transmitted
180		 * packets.
181		 */
182		virtio_transport_deliver_tap_pkt(pkt);
183
184		virtio_transport_free_pkt(pkt);
185	}
186	if (added)
187		vhost_signal(&vsock->dev, vq);
188
189out:
190	mutex_unlock(&vq->mutex);
191
192	if (restart_tx)
193		vhost_poll_queue(&tx_vq->poll);
194}
195
196static void vhost_transport_send_pkt_work(struct vhost_work *work)
197{
198	struct vhost_virtqueue *vq;
199	struct vhost_vsock *vsock;
200
201	vsock = container_of(work, struct vhost_vsock, send_pkt_work);
202	vq = &vsock->vqs[VSOCK_VQ_RX];
203
204	vhost_transport_do_send_pkt(vsock, vq);
205}
206
207static int
208vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
209{
210	struct vhost_vsock *vsock;
211	int len = pkt->len;
212
213	/* Find the vhost_vsock according to guest context id  */
214	vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid));
215	if (!vsock) {
216		virtio_transport_free_pkt(pkt);
217		return -ENODEV;
218	}
219
220	if (pkt->reply)
221		atomic_inc(&vsock->queued_replies);
222
223	spin_lock_bh(&vsock->send_pkt_list_lock);
224	list_add_tail(&pkt->list, &vsock->send_pkt_list);
225	spin_unlock_bh(&vsock->send_pkt_list_lock);
226
227	vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
228	return len;
229}
230
231static int
232vhost_transport_cancel_pkt(struct vsock_sock *vsk)
233{
234	struct vhost_vsock *vsock;
235	struct virtio_vsock_pkt *pkt, *n;
236	int cnt = 0;
237	LIST_HEAD(freeme);
238
239	/* Find the vhost_vsock according to guest context id  */
240	vsock = vhost_vsock_get(vsk->remote_addr.svm_cid);
241	if (!vsock)
242		return -ENODEV;
243
244	spin_lock_bh(&vsock->send_pkt_list_lock);
245	list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
246		if (pkt->vsk != vsk)
247			continue;
248		list_move(&pkt->list, &freeme);
249	}
250	spin_unlock_bh(&vsock->send_pkt_list_lock);
251
252	list_for_each_entry_safe(pkt, n, &freeme, list) {
253		if (pkt->reply)
254			cnt++;
255		list_del(&pkt->list);
256		virtio_transport_free_pkt(pkt);
257	}
258
259	if (cnt) {
260		struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
261		int new_cnt;
262
263		new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
264		if (new_cnt + cnt >= tx_vq->num && new_cnt < tx_vq->num)
265			vhost_poll_queue(&tx_vq->poll);
266	}
267
268	return 0;
269}
270
271static struct virtio_vsock_pkt *
272vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
273		      unsigned int out, unsigned int in)
274{
275	struct virtio_vsock_pkt *pkt;
276	struct iov_iter iov_iter;
277	size_t nbytes;
278	size_t len;
279
280	if (in != 0) {
281		vq_err(vq, "Expected 0 input buffers, got %u\n", in);
282		return NULL;
283	}
284
285	pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
286	if (!pkt)
287		return NULL;
288
289	len = iov_length(vq->iov, out);
290	iov_iter_init(&iov_iter, WRITE, vq->iov, out, len);
291
292	nbytes = copy_from_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
293	if (nbytes != sizeof(pkt->hdr)) {
294		vq_err(vq, "Expected %zu bytes for pkt->hdr, got %zu bytes\n",
295		       sizeof(pkt->hdr), nbytes);
296		kfree(pkt);
297		return NULL;
298	}
299
300	if (le16_to_cpu(pkt->hdr.type) == VIRTIO_VSOCK_TYPE_STREAM)
301		pkt->len = le32_to_cpu(pkt->hdr.len);
302
303	/* No payload */
304	if (!pkt->len)
305		return pkt;
306
307	/* The pkt is too big */
308	if (pkt->len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) {
309		kfree(pkt);
310		return NULL;
311	}
312
313	pkt->buf = kmalloc(pkt->len, GFP_KERNEL);
314	if (!pkt->buf) {
315		kfree(pkt);
316		return NULL;
317	}
318
319	nbytes = copy_from_iter(pkt->buf, pkt->len, &iov_iter);
320	if (nbytes != pkt->len) {
321		vq_err(vq, "Expected %u byte payload, got %zu bytes\n",
322		       pkt->len, nbytes);
323		virtio_transport_free_pkt(pkt);
324		return NULL;
325	}
326
327	return pkt;
328}
329
330/* Is there space left for replies to rx packets? */
331static bool vhost_vsock_more_replies(struct vhost_vsock *vsock)
332{
333	struct vhost_virtqueue *vq = &vsock->vqs[VSOCK_VQ_TX];
334	int val;
335
336	smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */
337	val = atomic_read(&vsock->queued_replies);
338
339	return val < vq->num;
340}
341
342static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
343{
344	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
345						  poll.work);
346	struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
347						 dev);
348	struct virtio_vsock_pkt *pkt;
349	int head;
350	unsigned int out, in;
351	bool added = false;
352
353	mutex_lock(&vq->mutex);
354
355	if (!vq->private_data)
356		goto out;
357
358	vhost_disable_notify(&vsock->dev, vq);
359	for (;;) {
360		u32 len;
361
362		if (!vhost_vsock_more_replies(vsock)) {
363			/* Stop tx until the device processes already
364			 * pending replies.  Leave tx virtqueue
365			 * callbacks disabled.
366			 */
367			goto no_more_replies;
368		}
369
370		head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
371					 &out, &in, NULL, NULL);
372		if (head < 0)
373			break;
374
375		if (head == vq->num) {
376			if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
377				vhost_disable_notify(&vsock->dev, vq);
378				continue;
379			}
380			break;
381		}
382
383		pkt = vhost_vsock_alloc_pkt(vq, out, in);
384		if (!pkt) {
385			vq_err(vq, "Faulted on pkt\n");
386			continue;
387		}
388
389		len = pkt->len;
390
391		/* Deliver to monitoring devices all received packets */
392		virtio_transport_deliver_tap_pkt(pkt);
393
394		/* Only accept correctly addressed packets */
395		if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid)
396			virtio_transport_recv_pkt(pkt);
397		else
398			virtio_transport_free_pkt(pkt);
399
400		vhost_add_used(vq, head, sizeof(pkt->hdr) + len);
401		added = true;
402	}
403
404no_more_replies:
405	if (added)
406		vhost_signal(&vsock->dev, vq);
407
408out:
409	mutex_unlock(&vq->mutex);
410}
411
412static void vhost_vsock_handle_rx_kick(struct vhost_work *work)
413{
414	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
415						poll.work);
416	struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
417						 dev);
418
419	vhost_transport_do_send_pkt(vsock, vq);
420}
421
422static int vhost_vsock_start(struct vhost_vsock *vsock)
423{
424	struct vhost_virtqueue *vq;
425	size_t i;
426	int ret;
427
428	mutex_lock(&vsock->dev.mutex);
429
430	ret = vhost_dev_check_owner(&vsock->dev);
431	if (ret)
432		goto err;
433
434	for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
435		vq = &vsock->vqs[i];
436
437		mutex_lock(&vq->mutex);
438
439		if (!vhost_vq_access_ok(vq)) {
440			ret = -EFAULT;
441			goto err_vq;
442		}
443
444		if (!vq->private_data) {
445			vq->private_data = vsock;
446			ret = vhost_vq_init_access(vq);
447			if (ret)
448				goto err_vq;
449		}
450
451		mutex_unlock(&vq->mutex);
452	}
453
454	mutex_unlock(&vsock->dev.mutex);
455	return 0;
456
457err_vq:
458	vq->private_data = NULL;
459	mutex_unlock(&vq->mutex);
460
461	for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
462		vq = &vsock->vqs[i];
463
464		mutex_lock(&vq->mutex);
465		vq->private_data = NULL;
466		mutex_unlock(&vq->mutex);
467	}
468err:
469	mutex_unlock(&vsock->dev.mutex);
470	return ret;
471}
472
473static int vhost_vsock_stop(struct vhost_vsock *vsock)
474{
475	size_t i;
476	int ret;
477
478	mutex_lock(&vsock->dev.mutex);
479
480	ret = vhost_dev_check_owner(&vsock->dev);
481	if (ret)
482		goto err;
483
484	for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
485		struct vhost_virtqueue *vq = &vsock->vqs[i];
486
487		mutex_lock(&vq->mutex);
488		vq->private_data = NULL;
489		mutex_unlock(&vq->mutex);
490	}
491
492err:
493	mutex_unlock(&vsock->dev.mutex);
494	return ret;
495}
496
497static void vhost_vsock_free(struct vhost_vsock *vsock)
498{
499	kvfree(vsock);
500}
501
502static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
503{
504	struct vhost_virtqueue **vqs;
505	struct vhost_vsock *vsock;
506	int ret;
507
508	/* This struct is large and allocation could fail, fall back to vmalloc
509	 * if there is no other way.
510	 */
511	vsock = kvmalloc(sizeof(*vsock), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
512	if (!vsock)
513		return -ENOMEM;
514
515	vqs = kmalloc_array(ARRAY_SIZE(vsock->vqs), sizeof(*vqs), GFP_KERNEL);
516	if (!vqs) {
517		ret = -ENOMEM;
518		goto out;
519	}
520
521	vsock->guest_cid = 0; /* no CID assigned yet */
522
523	atomic_set(&vsock->queued_replies, 0);
524
525	vqs[VSOCK_VQ_TX] = &vsock->vqs[VSOCK_VQ_TX];
526	vqs[VSOCK_VQ_RX] = &vsock->vqs[VSOCK_VQ_RX];
527	vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
528	vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
529
530	vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs));
531
532	file->private_data = vsock;
533	spin_lock_init(&vsock->send_pkt_list_lock);
534	INIT_LIST_HEAD(&vsock->send_pkt_list);
535	vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work);
536
537	spin_lock_bh(&vhost_vsock_lock);
538	list_add_tail(&vsock->list, &vhost_vsock_list);
539	spin_unlock_bh(&vhost_vsock_lock);
540	return 0;
541
542out:
543	vhost_vsock_free(vsock);
544	return ret;
545}
546
547static void vhost_vsock_flush(struct vhost_vsock *vsock)
548{
549	int i;
550
551	for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++)
552		if (vsock->vqs[i].handle_kick)
553			vhost_poll_flush(&vsock->vqs[i].poll);
554	vhost_work_flush(&vsock->dev, &vsock->send_pkt_work);
555}
556
557static void vhost_vsock_reset_orphans(struct sock *sk)
558{
559	struct vsock_sock *vsk = vsock_sk(sk);
560
561	/* vmci_transport.c doesn't take sk_lock here either.  At least we're
562	 * under vsock_table_lock so the sock cannot disappear while we're
563	 * executing.
564	 */
565
566	if (!vhost_vsock_get(vsk->remote_addr.svm_cid)) {
567		sock_set_flag(sk, SOCK_DONE);
568		vsk->peer_shutdown = SHUTDOWN_MASK;
569		sk->sk_state = SS_UNCONNECTED;
570		sk->sk_err = ECONNRESET;
571		sk->sk_error_report(sk);
572	}
573}
574
575static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
576{
577	struct vhost_vsock *vsock = file->private_data;
578
579	spin_lock_bh(&vhost_vsock_lock);
580	list_del(&vsock->list);
581	spin_unlock_bh(&vhost_vsock_lock);
582
583	/* Iterating over all connections for all CIDs to find orphans is
584	 * inefficient.  Room for improvement here. */
585	vsock_for_each_connected_socket(vhost_vsock_reset_orphans);
586
587	vhost_vsock_stop(vsock);
588	vhost_vsock_flush(vsock);
589	vhost_dev_stop(&vsock->dev);
590
591	spin_lock_bh(&vsock->send_pkt_list_lock);
592	while (!list_empty(&vsock->send_pkt_list)) {
593		struct virtio_vsock_pkt *pkt;
594
595		pkt = list_first_entry(&vsock->send_pkt_list,
596				struct virtio_vsock_pkt, list);
597		list_del_init(&pkt->list);
598		virtio_transport_free_pkt(pkt);
599	}
600	spin_unlock_bh(&vsock->send_pkt_list_lock);
601
602	vhost_dev_cleanup(&vsock->dev);
603	kfree(vsock->dev.vqs);
604	vhost_vsock_free(vsock);
605	return 0;
606}
607
608static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
609{
610	struct vhost_vsock *other;
611
612	/* Refuse reserved CIDs */
613	if (guest_cid <= VMADDR_CID_HOST ||
614	    guest_cid == U32_MAX)
615		return -EINVAL;
616
617	/* 64-bit CIDs are not yet supported */
618	if (guest_cid > U32_MAX)
619		return -EINVAL;
620
621	/* Refuse if CID is already in use */
622	spin_lock_bh(&vhost_vsock_lock);
623	other = __vhost_vsock_get(guest_cid);
624	if (other && other != vsock) {
625		spin_unlock_bh(&vhost_vsock_lock);
626		return -EADDRINUSE;
627	}
628	vsock->guest_cid = guest_cid;
629	spin_unlock_bh(&vhost_vsock_lock);
630
631	return 0;
632}
633
634static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
635{
636	struct vhost_virtqueue *vq;
637	int i;
638
639	if (features & ~VHOST_VSOCK_FEATURES)
640		return -EOPNOTSUPP;
641
642	mutex_lock(&vsock->dev.mutex);
643	if ((features & (1 << VHOST_F_LOG_ALL)) &&
644	    !vhost_log_access_ok(&vsock->dev)) {
645		mutex_unlock(&vsock->dev.mutex);
646		return -EFAULT;
647	}
648
649	for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
650		vq = &vsock->vqs[i];
651		mutex_lock(&vq->mutex);
652		vq->acked_features = features;
653		mutex_unlock(&vq->mutex);
654	}
655	mutex_unlock(&vsock->dev.mutex);
656	return 0;
657}
658
659static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
660				  unsigned long arg)
661{
662	struct vhost_vsock *vsock = f->private_data;
663	void __user *argp = (void __user *)arg;
664	u64 guest_cid;
665	u64 features;
666	int start;
667	int r;
668
669	switch (ioctl) {
670	case VHOST_VSOCK_SET_GUEST_CID:
671		if (copy_from_user(&guest_cid, argp, sizeof(guest_cid)))
672			return -EFAULT;
673		return vhost_vsock_set_cid(vsock, guest_cid);
674	case VHOST_VSOCK_SET_RUNNING:
675		if (copy_from_user(&start, argp, sizeof(start)))
676			return -EFAULT;
677		if (start)
678			return vhost_vsock_start(vsock);
679		else
680			return vhost_vsock_stop(vsock);
681	case VHOST_GET_FEATURES:
682		features = VHOST_VSOCK_FEATURES;
683		if (copy_to_user(argp, &features, sizeof(features)))
684			return -EFAULT;
685		return 0;
686	case VHOST_SET_FEATURES:
687		if (copy_from_user(&features, argp, sizeof(features)))
688			return -EFAULT;
689		return vhost_vsock_set_features(vsock, features);
690	default:
691		mutex_lock(&vsock->dev.mutex);
692		r = vhost_dev_ioctl(&vsock->dev, ioctl, argp);
693		if (r == -ENOIOCTLCMD)
694			r = vhost_vring_ioctl(&vsock->dev, ioctl, argp);
695		else
696			vhost_vsock_flush(vsock);
697		mutex_unlock(&vsock->dev.mutex);
698		return r;
699	}
700}
701
702#ifdef CONFIG_COMPAT
703static long vhost_vsock_dev_compat_ioctl(struct file *f, unsigned int ioctl,
704					 unsigned long arg)
705{
706	return vhost_vsock_dev_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
707}
708#endif
709
710static const struct file_operations vhost_vsock_fops = {
711	.owner          = THIS_MODULE,
712	.open           = vhost_vsock_dev_open,
713	.release        = vhost_vsock_dev_release,
714	.llseek		= noop_llseek,
715	.unlocked_ioctl = vhost_vsock_dev_ioctl,
716#ifdef CONFIG_COMPAT
717	.compat_ioctl   = vhost_vsock_dev_compat_ioctl,
718#endif
719};
720
721static struct miscdevice vhost_vsock_misc = {
722	.minor = VHOST_VSOCK_MINOR,
723	.name = "vhost-vsock",
724	.fops = &vhost_vsock_fops,
725};
726
727static struct virtio_transport vhost_transport = {
728	.transport = {
729		.get_local_cid            = vhost_transport_get_local_cid,
730
731		.init                     = virtio_transport_do_socket_init,
732		.destruct                 = virtio_transport_destruct,
733		.release                  = virtio_transport_release,
734		.connect                  = virtio_transport_connect,
735		.shutdown                 = virtio_transport_shutdown,
736		.cancel_pkt               = vhost_transport_cancel_pkt,
737
738		.dgram_enqueue            = virtio_transport_dgram_enqueue,
739		.dgram_dequeue            = virtio_transport_dgram_dequeue,
740		.dgram_bind               = virtio_transport_dgram_bind,
741		.dgram_allow              = virtio_transport_dgram_allow,
742
743		.stream_enqueue           = virtio_transport_stream_enqueue,
744		.stream_dequeue           = virtio_transport_stream_dequeue,
745		.stream_has_data          = virtio_transport_stream_has_data,
746		.stream_has_space         = virtio_transport_stream_has_space,
747		.stream_rcvhiwat          = virtio_transport_stream_rcvhiwat,
748		.stream_is_active         = virtio_transport_stream_is_active,
749		.stream_allow             = virtio_transport_stream_allow,
750
751		.notify_poll_in           = virtio_transport_notify_poll_in,
752		.notify_poll_out          = virtio_transport_notify_poll_out,
753		.notify_recv_init         = virtio_transport_notify_recv_init,
754		.notify_recv_pre_block    = virtio_transport_notify_recv_pre_block,
755		.notify_recv_pre_dequeue  = virtio_transport_notify_recv_pre_dequeue,
756		.notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
757		.notify_send_init         = virtio_transport_notify_send_init,
758		.notify_send_pre_block    = virtio_transport_notify_send_pre_block,
759		.notify_send_pre_enqueue  = virtio_transport_notify_send_pre_enqueue,
760		.notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
761
762		.set_buffer_size          = virtio_transport_set_buffer_size,
763		.set_min_buffer_size      = virtio_transport_set_min_buffer_size,
764		.set_max_buffer_size      = virtio_transport_set_max_buffer_size,
765		.get_buffer_size          = virtio_transport_get_buffer_size,
766		.get_min_buffer_size      = virtio_transport_get_min_buffer_size,
767		.get_max_buffer_size      = virtio_transport_get_max_buffer_size,
768	},
769
770	.send_pkt = vhost_transport_send_pkt,
771};
772
773static int __init vhost_vsock_init(void)
774{
775	int ret;
776
777	ret = vsock_core_init(&vhost_transport.transport);
778	if (ret < 0)
779		return ret;
780	return misc_register(&vhost_vsock_misc);
781};
782
783static void __exit vhost_vsock_exit(void)
784{
785	misc_deregister(&vhost_vsock_misc);
786	vsock_core_exit();
787};
788
789module_init(vhost_vsock_init);
790module_exit(vhost_vsock_exit);
791MODULE_LICENSE("GPL v2");
792MODULE_AUTHOR("Asias He");
793MODULE_DESCRIPTION("vhost transport for vsock ");
794MODULE_ALIAS_MISCDEV(VHOST_VSOCK_MINOR);
795MODULE_ALIAS("devname:vhost-vsock");