Linux Audio

Check our new training course

Loading...
v6.8
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _VHOST_H
  3#define _VHOST_H
  4
  5#include <linux/eventfd.h>
  6#include <linux/vhost.h>
  7#include <linux/mm.h>
  8#include <linux/mutex.h>
  9#include <linux/poll.h>
 10#include <linux/file.h>
 11#include <linux/uio.h>
 12#include <linux/virtio_config.h>
 13#include <linux/virtio_ring.h>
 14#include <linux/atomic.h>
 15#include <linux/vhost_iotlb.h>
 16#include <linux/irqbypass.h>
 17
 18struct vhost_work;
 19struct vhost_task;
 20typedef void (*vhost_work_fn_t)(struct vhost_work *work);
 21
 22#define VHOST_WORK_QUEUED 1
 23struct vhost_work {
 24	struct llist_node	node;
 25	vhost_work_fn_t		fn;
 26	unsigned long		flags;
 27};
 28
 29struct vhost_worker {
 30	struct vhost_task	*vtsk;
 31	/* Used to serialize device wide flushing with worker swapping. */
 32	struct mutex		mutex;
 33	struct llist_head	work_list;
 34	u64			kcov_handle;
 35	u32			id;
 36	int			attachment_cnt;
 37};
 38
 39/* Poll a file (eventfd or socket) */
 40/* Note: there's nothing vhost specific about this structure. */
 41struct vhost_poll {
 42	poll_table		table;
 43	wait_queue_head_t	*wqh;
 44	wait_queue_entry_t	wait;
 45	struct vhost_work	work;
 46	__poll_t		mask;
 47	struct vhost_dev	*dev;
 48	struct vhost_virtqueue	*vq;
 49};
 50
 
 
 
 
 51void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
 52		     __poll_t mask, struct vhost_dev *dev,
 53		     struct vhost_virtqueue *vq);
 54int vhost_poll_start(struct vhost_poll *poll, struct file *file);
 55void vhost_poll_stop(struct vhost_poll *poll);
 
 56void vhost_poll_queue(struct vhost_poll *poll);
 57
 58void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn);
 59void vhost_dev_flush(struct vhost_dev *dev);
 60
 61struct vhost_log {
 62	u64 addr;
 63	u64 len;
 64};
 65
 66enum vhost_uaddr_type {
 67	VHOST_ADDR_DESC = 0,
 68	VHOST_ADDR_AVAIL = 1,
 69	VHOST_ADDR_USED = 2,
 70	VHOST_NUM_ADDRS = 3,
 71};
 72
 73struct vhost_vring_call {
 74	struct eventfd_ctx *ctx;
 75	struct irq_bypass_producer producer;
 76};
 77
 78/* The virtqueue structure describes a queue attached to a device. */
 79struct vhost_virtqueue {
 80	struct vhost_dev *dev;
 81	struct vhost_worker __rcu *worker;
 82
 83	/* The actual ring of buffers. */
 84	struct mutex mutex;
 85	unsigned int num;
 86	vring_desc_t __user *desc;
 87	vring_avail_t __user *avail;
 88	vring_used_t __user *used;
 89	const struct vhost_iotlb_map *meta_iotlb[VHOST_NUM_ADDRS];
 90	struct file *kick;
 91	struct vhost_vring_call call_ctx;
 
 
 92	struct eventfd_ctx *error_ctx;
 93	struct eventfd_ctx *log_ctx;
 94
 95	struct vhost_poll poll;
 96
 97	/* The routine to call when the Guest pings us, or timeout. */
 98	vhost_work_fn_t handle_kick;
 99
100	/* Last available index we saw.
101	 * Values are limited to 0x7fff, and the high bit is used as
102	 * a wrap counter when using VIRTIO_F_RING_PACKED. */
103	u16 last_avail_idx;
104
105	/* Caches available index value from user. */
106	u16 avail_idx;
107
108	/* Last index we used.
109	 * Values are limited to 0x7fff, and the high bit is used as
110	 * a wrap counter when using VIRTIO_F_RING_PACKED. */
111	u16 last_used_idx;
112
113	/* Used flags */
114	u16 used_flags;
115
116	/* Last used index value we have signalled on */
117	u16 signalled_used;
118
119	/* Last used index value we have signalled on */
120	bool signalled_used_valid;
121
122	/* Log writes to used structure. */
123	bool log_used;
124	u64 log_addr;
125
126	struct iovec iov[UIO_MAXIOV];
127	struct iovec iotlb_iov[64];
128	struct iovec *indirect;
129	struct vring_used_elem *heads;
130	/* Protected by virtqueue mutex. */
131	struct vhost_iotlb *umem;
132	struct vhost_iotlb *iotlb;
133	void *private_data;
134	u64 acked_features;
135	u64 acked_backend_features;
136	/* Log write descriptors */
137	void __user *log_base;
138	struct vhost_log *log;
139	struct iovec log_iov[64];
140
141	/* Ring endianness. Defaults to legacy native endianness.
142	 * Set to true when starting a modern virtio device. */
143	bool is_le;
144#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
145	/* Ring endianness requested by userspace for cross-endian support. */
146	bool user_be;
147#endif
148	u32 busyloop_timeout;
149};
150
151struct vhost_msg_node {
152  union {
153	  struct vhost_msg msg;
154	  struct vhost_msg_v2 msg_v2;
155  };
156  struct vhost_virtqueue *vq;
157  struct list_head node;
158};
159
160struct vhost_dev {
 
161	struct mm_struct *mm;
162	struct mutex mutex;
163	struct vhost_virtqueue **vqs;
164	int nvqs;
 
165	struct eventfd_ctx *log_ctx;
166	struct vhost_iotlb *umem;
167	struct vhost_iotlb *iotlb;
168	spinlock_t iotlb_lock;
169	struct list_head read_list;
170	struct list_head pending_list;
171	wait_queue_head_t wait;
172	int iov_limit;
173	int weight;
174	int byte_weight;
175	struct xarray worker_xa;
176	bool use_worker;
177	int (*msg_handler)(struct vhost_dev *dev, u32 asid,
178			   struct vhost_iotlb_msg *msg);
179};
180
181bool vhost_exceeds_weight(struct vhost_virtqueue *vq, int pkts, int total_len);
182void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs,
183		    int nvqs, int iov_limit, int weight, int byte_weight,
184		    bool use_worker,
185		    int (*msg_handler)(struct vhost_dev *dev, u32 asid,
186				       struct vhost_iotlb_msg *msg));
187long vhost_dev_set_owner(struct vhost_dev *dev);
188bool vhost_dev_has_owner(struct vhost_dev *dev);
189long vhost_dev_check_owner(struct vhost_dev *);
190struct vhost_iotlb *vhost_dev_reset_owner_prepare(void);
191void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_iotlb *iotlb);
192void vhost_dev_cleanup(struct vhost_dev *);
193void vhost_dev_stop(struct vhost_dev *);
194long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp);
195long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp);
196long vhost_worker_ioctl(struct vhost_dev *dev, unsigned int ioctl,
197			void __user *argp);
198bool vhost_vq_access_ok(struct vhost_virtqueue *vq);
199bool vhost_log_access_ok(struct vhost_dev *);
200void vhost_clear_msg(struct vhost_dev *dev);
201
202int vhost_get_vq_desc(struct vhost_virtqueue *,
203		      struct iovec iov[], unsigned int iov_size,
204		      unsigned int *out_num, unsigned int *in_num,
205		      struct vhost_log *log, unsigned int *log_num);
206void vhost_discard_vq_desc(struct vhost_virtqueue *, int n);
207
208void vhost_vq_flush(struct vhost_virtqueue *vq);
209bool vhost_vq_work_queue(struct vhost_virtqueue *vq, struct vhost_work *work);
210bool vhost_vq_has_work(struct vhost_virtqueue *vq);
211bool vhost_vq_is_setup(struct vhost_virtqueue *vq);
212int vhost_vq_init_access(struct vhost_virtqueue *);
213int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len);
214int vhost_add_used_n(struct vhost_virtqueue *, struct vring_used_elem *heads,
215		     unsigned count);
216void vhost_add_used_and_signal(struct vhost_dev *, struct vhost_virtqueue *,
217			       unsigned int id, int len);
218void vhost_add_used_and_signal_n(struct vhost_dev *, struct vhost_virtqueue *,
219			       struct vring_used_elem *heads, unsigned count);
220void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *);
221void vhost_disable_notify(struct vhost_dev *, struct vhost_virtqueue *);
222bool vhost_vq_avail_empty(struct vhost_dev *, struct vhost_virtqueue *);
223bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
224
225int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
226		    unsigned int log_num, u64 len,
227		    struct iovec *iov, int count);
228int vq_meta_prefetch(struct vhost_virtqueue *vq);
229
230struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type);
231void vhost_enqueue_msg(struct vhost_dev *dev,
232		       struct list_head *head,
233		       struct vhost_msg_node *node);
234struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
235					 struct list_head *head);
236void vhost_set_backend_features(struct vhost_dev *dev, u64 features);
237
238__poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev,
239			    poll_table *wait);
240ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
241			    int noblock);
242ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
243			     struct iov_iter *from);
244int vhost_init_device_iotlb(struct vhost_dev *d);
245
246void vhost_iotlb_map_free(struct vhost_iotlb *iotlb,
247			  struct vhost_iotlb_map *map);
248
249#define vq_err(vq, fmt, ...) do {                                  \
250		pr_debug(pr_fmt(fmt), ##__VA_ARGS__);       \
251		if ((vq)->error_ctx)                               \
252				eventfd_signal((vq)->error_ctx);\
253	} while (0)
254
255enum {
256	VHOST_FEATURES = (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) |
257			 (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
258			 (1ULL << VIRTIO_RING_F_EVENT_IDX) |
259			 (1ULL << VHOST_F_LOG_ALL) |
260			 (1ULL << VIRTIO_F_ANY_LAYOUT) |
261			 (1ULL << VIRTIO_F_VERSION_1)
262};
263
264/**
265 * vhost_vq_set_backend - Set backend.
266 *
267 * @vq            Virtqueue.
268 * @private_data  The private data.
269 *
270 * Context: Need to call with vq->mutex acquired.
271 */
272static inline void vhost_vq_set_backend(struct vhost_virtqueue *vq,
273					void *private_data)
274{
275	vq->private_data = private_data;
276}
277
278/**
279 * vhost_vq_get_backend - Get backend.
280 *
281 * @vq            Virtqueue.
282 *
283 * Context: Need to call with vq->mutex acquired.
284 * Return: Private data previously set with vhost_vq_set_backend.
285 */
286static inline void *vhost_vq_get_backend(struct vhost_virtqueue *vq)
287{
288	return vq->private_data;
289}
290
291static inline bool vhost_has_feature(struct vhost_virtqueue *vq, int bit)
292{
293	return vq->acked_features & (1ULL << bit);
294}
295
296static inline bool vhost_backend_has_feature(struct vhost_virtqueue *vq, int bit)
297{
298	return vq->acked_backend_features & (1ULL << bit);
299}
300
301#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
302static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
303{
304	return vq->is_le;
305}
306#else
307static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
308{
309	return virtio_legacy_is_little_endian() || vq->is_le;
310}
311#endif
312
313/* Memory accessors */
314static inline u16 vhost16_to_cpu(struct vhost_virtqueue *vq, __virtio16 val)
315{
316	return __virtio16_to_cpu(vhost_is_little_endian(vq), val);
317}
318
319static inline __virtio16 cpu_to_vhost16(struct vhost_virtqueue *vq, u16 val)
320{
321	return __cpu_to_virtio16(vhost_is_little_endian(vq), val);
322}
323
324static inline u32 vhost32_to_cpu(struct vhost_virtqueue *vq, __virtio32 val)
325{
326	return __virtio32_to_cpu(vhost_is_little_endian(vq), val);
327}
328
329static inline __virtio32 cpu_to_vhost32(struct vhost_virtqueue *vq, u32 val)
330{
331	return __cpu_to_virtio32(vhost_is_little_endian(vq), val);
332}
333
334static inline u64 vhost64_to_cpu(struct vhost_virtqueue *vq, __virtio64 val)
335{
336	return __virtio64_to_cpu(vhost_is_little_endian(vq), val);
337}
338
339static inline __virtio64 cpu_to_vhost64(struct vhost_virtqueue *vq, u64 val)
340{
341	return __cpu_to_virtio64(vhost_is_little_endian(vq), val);
342}
343#endif
v4.6
 
  1#ifndef _VHOST_H
  2#define _VHOST_H
  3
  4#include <linux/eventfd.h>
  5#include <linux/vhost.h>
  6#include <linux/mm.h>
  7#include <linux/mutex.h>
  8#include <linux/poll.h>
  9#include <linux/file.h>
 10#include <linux/uio.h>
 11#include <linux/virtio_config.h>
 12#include <linux/virtio_ring.h>
 13#include <linux/atomic.h>
 
 
 14
 15struct vhost_work;
 
 16typedef void (*vhost_work_fn_t)(struct vhost_work *work);
 17
 
 18struct vhost_work {
 19	struct list_head	  node;
 20	vhost_work_fn_t		  fn;
 21	wait_queue_head_t	  done;
 22	int			  flushing;
 23	unsigned		  queue_seq;
 24	unsigned		  done_seq;
 
 
 
 
 
 
 
 25};
 26
 27/* Poll a file (eventfd or socket) */
 28/* Note: there's nothing vhost specific about this structure. */
 29struct vhost_poll {
 30	poll_table                table;
 31	wait_queue_head_t        *wqh;
 32	wait_queue_t              wait;
 33	struct vhost_work	  work;
 34	unsigned long		  mask;
 35	struct vhost_dev	 *dev;
 
 36};
 37
 38void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn);
 39void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work);
 40bool vhost_has_work(struct vhost_dev *dev);
 41
 42void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
 43		     unsigned long mask, struct vhost_dev *dev);
 
 44int vhost_poll_start(struct vhost_poll *poll, struct file *file);
 45void vhost_poll_stop(struct vhost_poll *poll);
 46void vhost_poll_flush(struct vhost_poll *poll);
 47void vhost_poll_queue(struct vhost_poll *poll);
 48void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work);
 49long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp);
 
 50
 51struct vhost_log {
 52	u64 addr;
 53	u64 len;
 54};
 55
 
 
 
 
 
 
 
 
 
 
 
 
 56/* The virtqueue structure describes a queue attached to a device. */
 57struct vhost_virtqueue {
 58	struct vhost_dev *dev;
 
 59
 60	/* The actual ring of buffers. */
 61	struct mutex mutex;
 62	unsigned int num;
 63	struct vring_desc __user *desc;
 64	struct vring_avail __user *avail;
 65	struct vring_used __user *used;
 
 66	struct file *kick;
 67	struct file *call;
 68	struct file *error;
 69	struct eventfd_ctx *call_ctx;
 70	struct eventfd_ctx *error_ctx;
 71	struct eventfd_ctx *log_ctx;
 72
 73	struct vhost_poll poll;
 74
 75	/* The routine to call when the Guest pings us, or timeout. */
 76	vhost_work_fn_t handle_kick;
 77
 78	/* Last available index we saw. */
 
 
 79	u16 last_avail_idx;
 80
 81	/* Caches available index value from user. */
 82	u16 avail_idx;
 83
 84	/* Last index we used. */
 
 
 85	u16 last_used_idx;
 86
 87	/* Used flags */
 88	u16 used_flags;
 89
 90	/* Last used index value we have signalled on */
 91	u16 signalled_used;
 92
 93	/* Last used index value we have signalled on */
 94	bool signalled_used_valid;
 95
 96	/* Log writes to used structure. */
 97	bool log_used;
 98	u64 log_addr;
 99
100	struct iovec iov[UIO_MAXIOV];
 
101	struct iovec *indirect;
102	struct vring_used_elem *heads;
103	/* Protected by virtqueue mutex. */
104	struct vhost_memory *memory;
 
105	void *private_data;
106	u64 acked_features;
 
107	/* Log write descriptors */
108	void __user *log_base;
109	struct vhost_log *log;
 
110
111	/* Ring endianness. Defaults to legacy native endianness.
112	 * Set to true when starting a modern virtio device. */
113	bool is_le;
114#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
115	/* Ring endianness requested by userspace for cross-endian support. */
116	bool user_be;
117#endif
118	u32 busyloop_timeout;
119};
120
 
 
 
 
 
 
 
 
 
121struct vhost_dev {
122	struct vhost_memory *memory;
123	struct mm_struct *mm;
124	struct mutex mutex;
125	struct vhost_virtqueue **vqs;
126	int nvqs;
127	struct file *log_file;
128	struct eventfd_ctx *log_ctx;
129	spinlock_t work_lock;
130	struct list_head work_list;
131	struct task_struct *worker;
 
 
 
 
 
 
 
 
 
 
132};
133
134void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs);
 
 
 
 
 
135long vhost_dev_set_owner(struct vhost_dev *dev);
136bool vhost_dev_has_owner(struct vhost_dev *dev);
137long vhost_dev_check_owner(struct vhost_dev *);
138struct vhost_memory *vhost_dev_reset_owner_prepare(void);
139void vhost_dev_reset_owner(struct vhost_dev *, struct vhost_memory *);
140void vhost_dev_cleanup(struct vhost_dev *, bool locked);
141void vhost_dev_stop(struct vhost_dev *);
142long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp);
143long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp);
144int vhost_vq_access_ok(struct vhost_virtqueue *vq);
145int vhost_log_access_ok(struct vhost_dev *);
 
 
 
146
147int vhost_get_vq_desc(struct vhost_virtqueue *,
148		      struct iovec iov[], unsigned int iov_count,
149		      unsigned int *out_num, unsigned int *in_num,
150		      struct vhost_log *log, unsigned int *log_num);
151void vhost_discard_vq_desc(struct vhost_virtqueue *, int n);
152
 
 
 
 
153int vhost_vq_init_access(struct vhost_virtqueue *);
154int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len);
155int vhost_add_used_n(struct vhost_virtqueue *, struct vring_used_elem *heads,
156		     unsigned count);
157void vhost_add_used_and_signal(struct vhost_dev *, struct vhost_virtqueue *,
158			       unsigned int id, int len);
159void vhost_add_used_and_signal_n(struct vhost_dev *, struct vhost_virtqueue *,
160			       struct vring_used_elem *heads, unsigned count);
161void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *);
162void vhost_disable_notify(struct vhost_dev *, struct vhost_virtqueue *);
163bool vhost_vq_avail_empty(struct vhost_dev *, struct vhost_virtqueue *);
164bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
165
166int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
167		    unsigned int log_num, u64 len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
168
169#define vq_err(vq, fmt, ...) do {                                  \
170		pr_debug(pr_fmt(fmt), ##__VA_ARGS__);       \
171		if ((vq)->error_ctx)                               \
172				eventfd_signal((vq)->error_ctx, 1);\
173	} while (0)
174
175enum {
176	VHOST_FEATURES = (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) |
177			 (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
178			 (1ULL << VIRTIO_RING_F_EVENT_IDX) |
179			 (1ULL << VHOST_F_LOG_ALL) |
180			 (1ULL << VIRTIO_F_ANY_LAYOUT) |
181			 (1ULL << VIRTIO_F_VERSION_1)
182};
183
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184static inline bool vhost_has_feature(struct vhost_virtqueue *vq, int bit)
185{
186	return vq->acked_features & (1ULL << bit);
 
 
 
 
 
187}
188
189#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
190static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
191{
192	return vq->is_le;
193}
194#else
195static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
196{
197	return virtio_legacy_is_little_endian() || vq->is_le;
198}
199#endif
200
201/* Memory accessors */
202static inline u16 vhost16_to_cpu(struct vhost_virtqueue *vq, __virtio16 val)
203{
204	return __virtio16_to_cpu(vhost_is_little_endian(vq), val);
205}
206
207static inline __virtio16 cpu_to_vhost16(struct vhost_virtqueue *vq, u16 val)
208{
209	return __cpu_to_virtio16(vhost_is_little_endian(vq), val);
210}
211
212static inline u32 vhost32_to_cpu(struct vhost_virtqueue *vq, __virtio32 val)
213{
214	return __virtio32_to_cpu(vhost_is_little_endian(vq), val);
215}
216
217static inline __virtio32 cpu_to_vhost32(struct vhost_virtqueue *vq, u32 val)
218{
219	return __cpu_to_virtio32(vhost_is_little_endian(vq), val);
220}
221
222static inline u64 vhost64_to_cpu(struct vhost_virtqueue *vq, __virtio64 val)
223{
224	return __virtio64_to_cpu(vhost_is_little_endian(vq), val);
225}
226
227static inline __virtio64 cpu_to_vhost64(struct vhost_virtqueue *vq, u64 val)
228{
229	return __cpu_to_virtio64(vhost_is_little_endian(vq), val);
230}
231#endif