Linux Audio

Check our new training course

Loading...
v3.1
  1#ifndef _VHOST_H
  2#define _VHOST_H
  3
  4#include <linux/eventfd.h>
  5#include <linux/vhost.h>
  6#include <linux/mm.h>
  7#include <linux/mutex.h>
  8#include <linux/poll.h>
  9#include <linux/file.h>
 10#include <linux/skbuff.h>
 11#include <linux/uio.h>
 12#include <linux/virtio_config.h>
 13#include <linux/virtio_ring.h>
 14#include <linux/atomic.h>
 15
 16/* This is for zerocopy, used buffer len is set to 1 when lower device DMA
 17 * done */
 18#define VHOST_DMA_DONE_LEN	1
 19#define VHOST_DMA_CLEAR_LEN	0
 20
 21struct vhost_device;
 22
 23struct vhost_work;
 24typedef void (*vhost_work_fn_t)(struct vhost_work *work);
 25
 26struct vhost_work {
 27	struct list_head	  node;
 28	vhost_work_fn_t		  fn;
 29	wait_queue_head_t	  done;
 30	int			  flushing;
 31	unsigned		  queue_seq;
 32	unsigned		  done_seq;
 33};
 34
 35/* Poll a file (eventfd or socket) */
 36/* Note: there's nothing vhost specific about this structure. */
 37struct vhost_poll {
 38	poll_table                table;
 39	wait_queue_head_t        *wqh;
 40	wait_queue_t              wait;
 41	struct vhost_work	  work;
 42	unsigned long		  mask;
 43	struct vhost_dev	 *dev;
 44};
 45
 
 
 
 
 46void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
 47		     unsigned long mask, struct vhost_dev *dev);
 48void vhost_poll_start(struct vhost_poll *poll, struct file *file);
 49void vhost_poll_stop(struct vhost_poll *poll);
 50void vhost_poll_flush(struct vhost_poll *poll);
 51void vhost_poll_queue(struct vhost_poll *poll);
 
 
 52
 53struct vhost_log {
 54	u64 addr;
 55	u64 len;
 56};
 57
 58struct vhost_virtqueue;
 59
 60struct vhost_ubuf_ref {
 61	struct kref kref;
 62	wait_queue_head_t wait;
 63	struct vhost_virtqueue *vq;
 64};
 65
 66struct vhost_ubuf_ref *vhost_ubuf_alloc(struct vhost_virtqueue *, bool zcopy);
 67void vhost_ubuf_put(struct vhost_ubuf_ref *);
 68void vhost_ubuf_put_and_wait(struct vhost_ubuf_ref *);
 69
 70/* The virtqueue structure describes a queue attached to a device. */
 71struct vhost_virtqueue {
 72	struct vhost_dev *dev;
 73
 74	/* The actual ring of buffers. */
 75	struct mutex mutex;
 76	unsigned int num;
 77	struct vring_desc __user *desc;
 78	struct vring_avail __user *avail;
 79	struct vring_used __user *used;
 80	struct file *kick;
 81	struct file *call;
 82	struct file *error;
 83	struct eventfd_ctx *call_ctx;
 84	struct eventfd_ctx *error_ctx;
 85	struct eventfd_ctx *log_ctx;
 86
 87	struct vhost_poll poll;
 88
 89	/* The routine to call when the Guest pings us, or timeout. */
 90	vhost_work_fn_t handle_kick;
 91
 92	/* Last available index we saw. */
 93	u16 last_avail_idx;
 94
 95	/* Caches available index value from user. */
 96	u16 avail_idx;
 97
 98	/* Last index we used. */
 99	u16 last_used_idx;
100
101	/* Used flags */
102	u16 used_flags;
103
104	/* Last used index value we have signalled on */
105	u16 signalled_used;
106
107	/* Last used index value we have signalled on */
108	bool signalled_used_valid;
109
110	/* Log writes to used structure. */
111	bool log_used;
112	u64 log_addr;
113
114	struct iovec iov[UIO_MAXIOV];
115	/* hdr is used to store the virtio header.
116	 * Since each iovec has >= 1 byte length, we never need more than
117	 * header length entries to store the header. */
118	struct iovec hdr[sizeof(struct virtio_net_hdr_mrg_rxbuf)];
119	struct iovec *indirect;
120	size_t vhost_hlen;
121	size_t sock_hlen;
122	struct vring_used_elem *heads;
123	/* We use a kind of RCU to access private pointer.
124	 * All readers access it from worker, which makes it possible to
125	 * flush the vhost_work instead of synchronize_rcu. Therefore readers do
126	 * not need to call rcu_read_lock/rcu_read_unlock: the beginning of
127	 * vhost_work execution acts instead of rcu_read_lock() and the end of
128	 * vhost_work execution acts instead of rcu_read_unlock().
129	 * Writers use virtqueue mutex. */
130	void __rcu *private_data;
131	/* Log write descriptors */
132	void __user *log_base;
133	struct vhost_log *log;
134	/* vhost zerocopy support fields below: */
135	/* last used idx for outstanding DMA zerocopy buffers */
136	int upend_idx;
137	/* first used idx for DMA done zerocopy buffers */
138	int done_idx;
139	/* an array of userspace buffers info */
140	struct ubuf_info *ubuf_info;
141	/* Reference counting for outstanding ubufs.
142	 * Protected by vq mutex. Writers must also take device mutex. */
143	struct vhost_ubuf_ref *ubufs;
144};
145
146struct vhost_dev {
147	/* Readers use RCU to access memory table pointer
148	 * log base pointer and features.
149	 * Writers use mutex below.*/
150	struct vhost_memory __rcu *memory;
151	struct mm_struct *mm;
152	struct mutex mutex;
153	unsigned acked_features;
154	struct vhost_virtqueue *vqs;
155	int nvqs;
156	struct file *log_file;
157	struct eventfd_ctx *log_ctx;
158	spinlock_t work_lock;
159	struct list_head work_list;
160	struct task_struct *worker;
161};
162
163long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue *vqs, int nvqs);
 
 
164long vhost_dev_check_owner(struct vhost_dev *);
165long vhost_dev_reset_owner(struct vhost_dev *);
166void vhost_dev_cleanup(struct vhost_dev *);
167long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, unsigned long arg);
 
 
 
168int vhost_vq_access_ok(struct vhost_virtqueue *vq);
169int vhost_log_access_ok(struct vhost_dev *);
170
171int vhost_get_vq_desc(struct vhost_dev *, struct vhost_virtqueue *,
172		      struct iovec iov[], unsigned int iov_count,
173		      unsigned int *out_num, unsigned int *in_num,
174		      struct vhost_log *log, unsigned int *log_num);
175void vhost_discard_vq_desc(struct vhost_virtqueue *, int n);
176
177int vhost_init_used(struct vhost_virtqueue *);
178int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len);
179int vhost_add_used_n(struct vhost_virtqueue *, struct vring_used_elem *heads,
180		     unsigned count);
181void vhost_add_used_and_signal(struct vhost_dev *, struct vhost_virtqueue *,
182			       unsigned int id, int len);
183void vhost_add_used_and_signal_n(struct vhost_dev *, struct vhost_virtqueue *,
184			       struct vring_used_elem *heads, unsigned count);
185void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *);
186void vhost_disable_notify(struct vhost_dev *, struct vhost_virtqueue *);
 
187bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
188
189int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
190		    unsigned int log_num, u64 len);
191void vhost_zerocopy_callback(void *arg);
192int vhost_zerocopy_signal_used(struct vhost_virtqueue *vq);
193
194#define vq_err(vq, fmt, ...) do {                                  \
195		pr_debug(pr_fmt(fmt), ##__VA_ARGS__);       \
196		if ((vq)->error_ctx)                               \
197				eventfd_signal((vq)->error_ctx, 1);\
198	} while (0)
199
200enum {
201	VHOST_FEATURES = (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) |
202			 (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
203			 (1ULL << VIRTIO_RING_F_EVENT_IDX) |
204			 (1ULL << VHOST_F_LOG_ALL) |
205			 (1ULL << VHOST_NET_F_VIRTIO_NET_HDR) |
206			 (1ULL << VIRTIO_NET_F_MRG_RXBUF),
207};
208
209static inline int vhost_has_feature(struct vhost_dev *dev, int bit)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
210{
211	unsigned acked_features;
 
212
213	/* TODO: check that we are running from vhost_worker or dev mutex is
214	 * held? */
215	acked_features = rcu_dereference_index_check(dev->acked_features, 1);
216	return acked_features & (1 << bit);
217}
218
219void vhost_enable_zcopy(int vq);
 
 
 
220
 
 
 
 
221#endif
v4.6
  1#ifndef _VHOST_H
  2#define _VHOST_H
  3
  4#include <linux/eventfd.h>
  5#include <linux/vhost.h>
  6#include <linux/mm.h>
  7#include <linux/mutex.h>
  8#include <linux/poll.h>
  9#include <linux/file.h>
 
 10#include <linux/uio.h>
 11#include <linux/virtio_config.h>
 12#include <linux/virtio_ring.h>
 13#include <linux/atomic.h>
 14
 
 
 
 
 
 
 
 15struct vhost_work;
 16typedef void (*vhost_work_fn_t)(struct vhost_work *work);
 17
 18struct vhost_work {
 19	struct list_head	  node;
 20	vhost_work_fn_t		  fn;
 21	wait_queue_head_t	  done;
 22	int			  flushing;
 23	unsigned		  queue_seq;
 24	unsigned		  done_seq;
 25};
 26
 27/* Poll a file (eventfd or socket) */
 28/* Note: there's nothing vhost specific about this structure. */
 29struct vhost_poll {
 30	poll_table                table;
 31	wait_queue_head_t        *wqh;
 32	wait_queue_t              wait;
 33	struct vhost_work	  work;
 34	unsigned long		  mask;
 35	struct vhost_dev	 *dev;
 36};
 37
 38void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn);
 39void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work);
 40bool vhost_has_work(struct vhost_dev *dev);
 41
 42void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
 43		     unsigned long mask, struct vhost_dev *dev);
 44int vhost_poll_start(struct vhost_poll *poll, struct file *file);
 45void vhost_poll_stop(struct vhost_poll *poll);
 46void vhost_poll_flush(struct vhost_poll *poll);
 47void vhost_poll_queue(struct vhost_poll *poll);
 48void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work);
 49long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp);
 50
 51struct vhost_log {
 52	u64 addr;
 53	u64 len;
 54};
 55
 
 
 
 
 
 
 
 
 
 
 
 
 56/* The virtqueue structure describes a queue attached to a device. */
 57struct vhost_virtqueue {
 58	struct vhost_dev *dev;
 59
 60	/* The actual ring of buffers. */
 61	struct mutex mutex;
 62	unsigned int num;
 63	struct vring_desc __user *desc;
 64	struct vring_avail __user *avail;
 65	struct vring_used __user *used;
 66	struct file *kick;
 67	struct file *call;
 68	struct file *error;
 69	struct eventfd_ctx *call_ctx;
 70	struct eventfd_ctx *error_ctx;
 71	struct eventfd_ctx *log_ctx;
 72
 73	struct vhost_poll poll;
 74
 75	/* The routine to call when the Guest pings us, or timeout. */
 76	vhost_work_fn_t handle_kick;
 77
 78	/* Last available index we saw. */
 79	u16 last_avail_idx;
 80
 81	/* Caches available index value from user. */
 82	u16 avail_idx;
 83
 84	/* Last index we used. */
 85	u16 last_used_idx;
 86
 87	/* Used flags */
 88	u16 used_flags;
 89
 90	/* Last used index value we have signalled on */
 91	u16 signalled_used;
 92
 93	/* Last used index value we have signalled on */
 94	bool signalled_used_valid;
 95
 96	/* Log writes to used structure. */
 97	bool log_used;
 98	u64 log_addr;
 99
100	struct iovec iov[UIO_MAXIOV];
 
 
 
 
101	struct iovec *indirect;
 
 
102	struct vring_used_elem *heads;
103	/* Protected by virtqueue mutex. */
104	struct vhost_memory *memory;
105	void *private_data;
106	u64 acked_features;
 
 
 
 
107	/* Log write descriptors */
108	void __user *log_base;
109	struct vhost_log *log;
110
111	/* Ring endianness. Defaults to legacy native endianness.
112	 * Set to true when starting a modern virtio device. */
113	bool is_le;
114#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
115	/* Ring endianness requested by userspace for cross-endian support. */
116	bool user_be;
117#endif
118	u32 busyloop_timeout;
 
119};
120
121struct vhost_dev {
122	struct vhost_memory *memory;
 
 
 
123	struct mm_struct *mm;
124	struct mutex mutex;
125	struct vhost_virtqueue **vqs;
 
126	int nvqs;
127	struct file *log_file;
128	struct eventfd_ctx *log_ctx;
129	spinlock_t work_lock;
130	struct list_head work_list;
131	struct task_struct *worker;
132};
133
134void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs);
135long vhost_dev_set_owner(struct vhost_dev *dev);
136bool vhost_dev_has_owner(struct vhost_dev *dev);
137long vhost_dev_check_owner(struct vhost_dev *);
138struct vhost_memory *vhost_dev_reset_owner_prepare(void);
139void vhost_dev_reset_owner(struct vhost_dev *, struct vhost_memory *);
140void vhost_dev_cleanup(struct vhost_dev *, bool locked);
141void vhost_dev_stop(struct vhost_dev *);
142long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp);
143long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp);
144int vhost_vq_access_ok(struct vhost_virtqueue *vq);
145int vhost_log_access_ok(struct vhost_dev *);
146
147int vhost_get_vq_desc(struct vhost_virtqueue *,
148		      struct iovec iov[], unsigned int iov_count,
149		      unsigned int *out_num, unsigned int *in_num,
150		      struct vhost_log *log, unsigned int *log_num);
151void vhost_discard_vq_desc(struct vhost_virtqueue *, int n);
152
153int vhost_vq_init_access(struct vhost_virtqueue *);
154int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len);
155int vhost_add_used_n(struct vhost_virtqueue *, struct vring_used_elem *heads,
156		     unsigned count);
157void vhost_add_used_and_signal(struct vhost_dev *, struct vhost_virtqueue *,
158			       unsigned int id, int len);
159void vhost_add_used_and_signal_n(struct vhost_dev *, struct vhost_virtqueue *,
160			       struct vring_used_elem *heads, unsigned count);
161void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *);
162void vhost_disable_notify(struct vhost_dev *, struct vhost_virtqueue *);
163bool vhost_vq_avail_empty(struct vhost_dev *, struct vhost_virtqueue *);
164bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
165
166int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
167		    unsigned int log_num, u64 len);
 
 
168
169#define vq_err(vq, fmt, ...) do {                                  \
170		pr_debug(pr_fmt(fmt), ##__VA_ARGS__);       \
171		if ((vq)->error_ctx)                               \
172				eventfd_signal((vq)->error_ctx, 1);\
173	} while (0)
174
175enum {
176	VHOST_FEATURES = (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) |
177			 (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
178			 (1ULL << VIRTIO_RING_F_EVENT_IDX) |
179			 (1ULL << VHOST_F_LOG_ALL) |
180			 (1ULL << VIRTIO_F_ANY_LAYOUT) |
181			 (1ULL << VIRTIO_F_VERSION_1)
182};
183
184static inline bool vhost_has_feature(struct vhost_virtqueue *vq, int bit)
185{
186	return vq->acked_features & (1ULL << bit);
187}
188
189#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
190static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
191{
192	return vq->is_le;
193}
194#else
195static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
196{
197	return virtio_legacy_is_little_endian() || vq->is_le;
198}
199#endif
200
201/* Memory accessors */
202static inline u16 vhost16_to_cpu(struct vhost_virtqueue *vq, __virtio16 val)
203{
204	return __virtio16_to_cpu(vhost_is_little_endian(vq), val);
205}
206
207static inline __virtio16 cpu_to_vhost16(struct vhost_virtqueue *vq, u16 val)
208{
209	return __cpu_to_virtio16(vhost_is_little_endian(vq), val);
210}
211
212static inline u32 vhost32_to_cpu(struct vhost_virtqueue *vq, __virtio32 val)
213{
214	return __virtio32_to_cpu(vhost_is_little_endian(vq), val);
215}
216
217static inline __virtio32 cpu_to_vhost32(struct vhost_virtqueue *vq, u32 val)
218{
219	return __cpu_to_virtio32(vhost_is_little_endian(vq), val);
 
220}
221
222static inline u64 vhost64_to_cpu(struct vhost_virtqueue *vq, __virtio64 val)
223{
224	return __virtio64_to_cpu(vhost_is_little_endian(vq), val);
225}
226
227static inline __virtio64 cpu_to_vhost64(struct vhost_virtqueue *vq, u64 val)
228{
229	return __cpu_to_virtio64(vhost_is_little_endian(vq), val);
230}
231#endif