Linux Audio

Check our new training course

Open-source upstreaming

Need help get the support for your hardware in upstream Linux?
Loading...
v5.9
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _VHOST_H
  3#define _VHOST_H
  4
  5#include <linux/eventfd.h>
  6#include <linux/vhost.h>
  7#include <linux/mm.h>
  8#include <linux/mutex.h>
  9#include <linux/poll.h>
 10#include <linux/file.h>
 11#include <linux/uio.h>
 12#include <linux/virtio_config.h>
 13#include <linux/virtio_ring.h>
 14#include <linux/atomic.h>
 15#include <linux/vhost_iotlb.h>
 16#include <linux/irqbypass.h>
 17
 18struct vhost_work;
 19typedef void (*vhost_work_fn_t)(struct vhost_work *work);
 20
 21#define VHOST_WORK_QUEUED 1
 22struct vhost_work {
 23	struct llist_node	  node;
 24	vhost_work_fn_t		  fn;
 
 
 
 
 25	unsigned long		  flags;
 26};
 27
 28/* Poll a file (eventfd or socket) */
 29/* Note: there's nothing vhost specific about this structure. */
 30struct vhost_poll {
 31	poll_table                table;
 32	wait_queue_head_t        *wqh;
 33	wait_queue_entry_t              wait;
 34	struct vhost_work	  work;
 35	__poll_t		  mask;
 36	struct vhost_dev	 *dev;
 37};
 38
 39void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn);
 40void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work);
 41bool vhost_has_work(struct vhost_dev *dev);
 42
 43void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
 44		     __poll_t mask, struct vhost_dev *dev);
 45int vhost_poll_start(struct vhost_poll *poll, struct file *file);
 46void vhost_poll_stop(struct vhost_poll *poll);
 47void vhost_poll_flush(struct vhost_poll *poll);
 48void vhost_poll_queue(struct vhost_poll *poll);
 49void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work);
 50long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp);
 51
 52struct vhost_log {
 53	u64 addr;
 54	u64 len;
 55};
 56
 57enum vhost_uaddr_type {
 58	VHOST_ADDR_DESC = 0,
 59	VHOST_ADDR_AVAIL = 1,
 60	VHOST_ADDR_USED = 2,
 61	VHOST_NUM_ADDRS = 3,
 62};
 63
 64struct vhost_vring_call {
 65	struct eventfd_ctx *ctx;
 66	struct irq_bypass_producer producer;
 67	spinlock_t ctx_lock;
 
 
 
 
 
 
 
 
 
 
 
 
 68};
 69
 70/* The virtqueue structure describes a queue attached to a device. */
 71struct vhost_virtqueue {
 72	struct vhost_dev *dev;
 73
 74	/* The actual ring of buffers. */
 75	struct mutex mutex;
 76	unsigned int num;
 77	vring_desc_t __user *desc;
 78	vring_avail_t __user *avail;
 79	vring_used_t __user *used;
 80	const struct vhost_iotlb_map *meta_iotlb[VHOST_NUM_ADDRS];
 81	struct file *kick;
 82	struct vhost_vring_call call_ctx;
 
 
 83	struct eventfd_ctx *error_ctx;
 84	struct eventfd_ctx *log_ctx;
 85
 86	struct vhost_poll poll;
 87
 88	/* The routine to call when the Guest pings us, or timeout. */
 89	vhost_work_fn_t handle_kick;
 90
 91	/* Last available index we saw. */
 92	u16 last_avail_idx;
 93
 94	/* Caches available index value from user. */
 95	u16 avail_idx;
 96
 97	/* Last index we used. */
 98	u16 last_used_idx;
 99
 
 
 
100	/* Used flags */
101	u16 used_flags;
102
103	/* Last used index value we have signalled on */
104	u16 signalled_used;
105
106	/* Last used index value we have signalled on */
107	bool signalled_used_valid;
108
109	/* Log writes to used structure. */
110	bool log_used;
111	u64 log_addr;
112
113	struct iovec iov[UIO_MAXIOV];
114	struct iovec iotlb_iov[64];
115	struct iovec *indirect;
116	struct vring_used_elem *heads;
117	/* Protected by virtqueue mutex. */
118	struct vhost_iotlb *umem;
119	struct vhost_iotlb *iotlb;
120	void *private_data;
121	u64 acked_features;
122	u64 acked_backend_features;
123	/* Log write descriptors */
124	void __user *log_base;
125	struct vhost_log *log;
126
127	/* Ring endianness. Defaults to legacy native endianness.
128	 * Set to true when starting a modern virtio device. */
129	bool is_le;
130#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
131	/* Ring endianness requested by userspace for cross-endian support. */
132	bool user_be;
133#endif
134	u32 busyloop_timeout;
135};
136
137struct vhost_msg_node {
138  union {
139	  struct vhost_msg msg;
140	  struct vhost_msg_v2 msg_v2;
141  };
142  struct vhost_virtqueue *vq;
143  struct list_head node;
144};
145
146struct vhost_dev {
147	struct mm_struct *mm;
148	struct mutex mutex;
149	struct vhost_virtqueue **vqs;
150	int nvqs;
 
151	struct eventfd_ctx *log_ctx;
152	struct llist_head work_list;
153	struct task_struct *worker;
154	struct vhost_iotlb *umem;
155	struct vhost_iotlb *iotlb;
156	spinlock_t iotlb_lock;
157	struct list_head read_list;
158	struct list_head pending_list;
159	wait_queue_head_t wait;
160	int iov_limit;
161	int weight;
162	int byte_weight;
163	u64 kcov_handle;
164	bool use_worker;
165	int (*msg_handler)(struct vhost_dev *dev,
166			   struct vhost_iotlb_msg *msg);
167};
168
169bool vhost_exceeds_weight(struct vhost_virtqueue *vq, int pkts, int total_len);
170void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs,
171		    int nvqs, int iov_limit, int weight, int byte_weight,
172		    bool use_worker,
173		    int (*msg_handler)(struct vhost_dev *dev,
174				       struct vhost_iotlb_msg *msg));
175long vhost_dev_set_owner(struct vhost_dev *dev);
176bool vhost_dev_has_owner(struct vhost_dev *dev);
177long vhost_dev_check_owner(struct vhost_dev *);
178struct vhost_iotlb *vhost_dev_reset_owner_prepare(void);
179void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_iotlb *iotlb);
180void vhost_dev_cleanup(struct vhost_dev *);
181void vhost_dev_stop(struct vhost_dev *);
182long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp);
183long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp);
184bool vhost_vq_access_ok(struct vhost_virtqueue *vq);
185bool vhost_log_access_ok(struct vhost_dev *);
186
187int vhost_get_vq_desc(struct vhost_virtqueue *,
188		      struct iovec iov[], unsigned int iov_count,
189		      unsigned int *out_num, unsigned int *in_num,
190		      struct vhost_log *log, unsigned int *log_num);
191void vhost_discard_vq_desc(struct vhost_virtqueue *, int n);
192
193int vhost_vq_init_access(struct vhost_virtqueue *);
194int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len);
195int vhost_add_used_n(struct vhost_virtqueue *, struct vring_used_elem *heads,
196		     unsigned count);
197void vhost_add_used_and_signal(struct vhost_dev *, struct vhost_virtqueue *,
198			       unsigned int id, int len);
199void vhost_add_used_and_signal_n(struct vhost_dev *, struct vhost_virtqueue *,
200			       struct vring_used_elem *heads, unsigned count);
201void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *);
202void vhost_disable_notify(struct vhost_dev *, struct vhost_virtqueue *);
203bool vhost_vq_avail_empty(struct vhost_dev *, struct vhost_virtqueue *);
204bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
205
206int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
207		    unsigned int log_num, u64 len,
208		    struct iovec *iov, int count);
209int vq_meta_prefetch(struct vhost_virtqueue *vq);
210
211struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type);
212void vhost_enqueue_msg(struct vhost_dev *dev,
213		       struct list_head *head,
214		       struct vhost_msg_node *node);
215struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
216					 struct list_head *head);
217void vhost_set_backend_features(struct vhost_dev *dev, u64 features);
218
219__poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev,
220			    poll_table *wait);
221ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
222			    int noblock);
223ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
224			     struct iov_iter *from);
225int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled);
226
227void vhost_iotlb_map_free(struct vhost_iotlb *iotlb,
228			  struct vhost_iotlb_map *map);
229
230#define vq_err(vq, fmt, ...) do {                                  \
231		pr_debug(pr_fmt(fmt), ##__VA_ARGS__);       \
232		if ((vq)->error_ctx)                               \
233				eventfd_signal((vq)->error_ctx, 1);\
234	} while (0)
235
236enum {
237	VHOST_FEATURES = (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) |
238			 (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
239			 (1ULL << VIRTIO_RING_F_EVENT_IDX) |
240			 (1ULL << VHOST_F_LOG_ALL) |
241			 (1ULL << VIRTIO_F_ANY_LAYOUT) |
242			 (1ULL << VIRTIO_F_VERSION_1)
243};
244
245/**
246 * vhost_vq_set_backend - Set backend.
247 *
248 * @vq            Virtqueue.
249 * @private_data  The private data.
250 *
251 * Context: Need to call with vq->mutex acquired.
252 */
253static inline void vhost_vq_set_backend(struct vhost_virtqueue *vq,
254					void *private_data)
255{
256	vq->private_data = private_data;
257}
258
259/**
260 * vhost_vq_get_backend - Get backend.
261 *
262 * @vq            Virtqueue.
263 *
264 * Context: Need to call with vq->mutex acquired.
265 * Return: Private data previously set with vhost_vq_set_backend.
266 */
267static inline void *vhost_vq_get_backend(struct vhost_virtqueue *vq)
268{
269	return vq->private_data;
270}
271
272static inline bool vhost_has_feature(struct vhost_virtqueue *vq, int bit)
273{
274	return vq->acked_features & (1ULL << bit);
275}
276
277static inline bool vhost_backend_has_feature(struct vhost_virtqueue *vq, int bit)
278{
279	return vq->acked_backend_features & (1ULL << bit);
280}
281
282#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
283static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
284{
285	return vq->is_le;
286}
287#else
288static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
289{
290	return virtio_legacy_is_little_endian() || vq->is_le;
291}
292#endif
293
294/* Memory accessors */
295static inline u16 vhost16_to_cpu(struct vhost_virtqueue *vq, __virtio16 val)
296{
297	return __virtio16_to_cpu(vhost_is_little_endian(vq), val);
298}
299
300static inline __virtio16 cpu_to_vhost16(struct vhost_virtqueue *vq, u16 val)
301{
302	return __cpu_to_virtio16(vhost_is_little_endian(vq), val);
303}
304
305static inline u32 vhost32_to_cpu(struct vhost_virtqueue *vq, __virtio32 val)
306{
307	return __virtio32_to_cpu(vhost_is_little_endian(vq), val);
308}
309
310static inline __virtio32 cpu_to_vhost32(struct vhost_virtqueue *vq, u32 val)
311{
312	return __cpu_to_virtio32(vhost_is_little_endian(vq), val);
313}
314
315static inline u64 vhost64_to_cpu(struct vhost_virtqueue *vq, __virtio64 val)
316{
317	return __virtio64_to_cpu(vhost_is_little_endian(vq), val);
318}
319
320static inline __virtio64 cpu_to_vhost64(struct vhost_virtqueue *vq, u64 val)
321{
322	return __cpu_to_virtio64(vhost_is_little_endian(vq), val);
323}
324#endif
v4.10.11
 
  1#ifndef _VHOST_H
  2#define _VHOST_H
  3
  4#include <linux/eventfd.h>
  5#include <linux/vhost.h>
  6#include <linux/mm.h>
  7#include <linux/mutex.h>
  8#include <linux/poll.h>
  9#include <linux/file.h>
 10#include <linux/uio.h>
 11#include <linux/virtio_config.h>
 12#include <linux/virtio_ring.h>
 13#include <linux/atomic.h>
 
 
 14
 15struct vhost_work;
 16typedef void (*vhost_work_fn_t)(struct vhost_work *work);
 17
 18#define VHOST_WORK_QUEUED 1
 19struct vhost_work {
 20	struct llist_node	  node;
 21	vhost_work_fn_t		  fn;
 22	wait_queue_head_t	  done;
 23	int			  flushing;
 24	unsigned		  queue_seq;
 25	unsigned		  done_seq;
 26	unsigned long		  flags;
 27};
 28
 29/* Poll a file (eventfd or socket) */
 30/* Note: there's nothing vhost specific about this structure. */
 31struct vhost_poll {
 32	poll_table                table;
 33	wait_queue_head_t        *wqh;
 34	wait_queue_t              wait;
 35	struct vhost_work	  work;
 36	unsigned long		  mask;
 37	struct vhost_dev	 *dev;
 38};
 39
 40void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn);
 41void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work);
 42bool vhost_has_work(struct vhost_dev *dev);
 43
 44void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
 45		     unsigned long mask, struct vhost_dev *dev);
 46int vhost_poll_start(struct vhost_poll *poll, struct file *file);
 47void vhost_poll_stop(struct vhost_poll *poll);
 48void vhost_poll_flush(struct vhost_poll *poll);
 49void vhost_poll_queue(struct vhost_poll *poll);
 50void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work);
 51long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp);
 52
 53struct vhost_log {
 54	u64 addr;
 55	u64 len;
 56};
 57
 58#define START(node) ((node)->start)
 59#define LAST(node) ((node)->last)
 
 
 
 
 60
 61struct vhost_umem_node {
 62	struct rb_node rb;
 63	struct list_head link;
 64	__u64 start;
 65	__u64 last;
 66	__u64 size;
 67	__u64 userspace_addr;
 68	__u32 perm;
 69	__u32 flags_padding;
 70	__u64 __subtree_last;
 71};
 72
 73struct vhost_umem {
 74	struct rb_root umem_tree;
 75	struct list_head umem_list;
 76	int numem;
 77};
 78
 79/* The virtqueue structure describes a queue attached to a device. */
 80struct vhost_virtqueue {
 81	struct vhost_dev *dev;
 82
 83	/* The actual ring of buffers. */
 84	struct mutex mutex;
 85	unsigned int num;
 86	struct vring_desc __user *desc;
 87	struct vring_avail __user *avail;
 88	struct vring_used __user *used;
 
 89	struct file *kick;
 90	struct file *call;
 91	struct file *error;
 92	struct eventfd_ctx *call_ctx;
 93	struct eventfd_ctx *error_ctx;
 94	struct eventfd_ctx *log_ctx;
 95
 96	struct vhost_poll poll;
 97
 98	/* The routine to call when the Guest pings us, or timeout. */
 99	vhost_work_fn_t handle_kick;
100
101	/* Last available index we saw. */
102	u16 last_avail_idx;
103
104	/* Caches available index value from user. */
105	u16 avail_idx;
106
107	/* Last index we used. */
108	u16 last_used_idx;
109
110	/* Last used evet we've seen */
111	u16 last_used_event;
112
113	/* Used flags */
114	u16 used_flags;
115
116	/* Last used index value we have signalled on */
117	u16 signalled_used;
118
119	/* Last used index value we have signalled on */
120	bool signalled_used_valid;
121
122	/* Log writes to used structure. */
123	bool log_used;
124	u64 log_addr;
125
126	struct iovec iov[UIO_MAXIOV];
127	struct iovec iotlb_iov[64];
128	struct iovec *indirect;
129	struct vring_used_elem *heads;
130	/* Protected by virtqueue mutex. */
131	struct vhost_umem *umem;
132	struct vhost_umem *iotlb;
133	void *private_data;
134	u64 acked_features;
 
135	/* Log write descriptors */
136	void __user *log_base;
137	struct vhost_log *log;
138
139	/* Ring endianness. Defaults to legacy native endianness.
140	 * Set to true when starting a modern virtio device. */
141	bool is_le;
142#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
143	/* Ring endianness requested by userspace for cross-endian support. */
144	bool user_be;
145#endif
146	u32 busyloop_timeout;
147};
148
149struct vhost_msg_node {
150  struct vhost_msg msg;
 
 
 
151  struct vhost_virtqueue *vq;
152  struct list_head node;
153};
154
155struct vhost_dev {
156	struct mm_struct *mm;
157	struct mutex mutex;
158	struct vhost_virtqueue **vqs;
159	int nvqs;
160	struct file *log_file;
161	struct eventfd_ctx *log_ctx;
162	struct llist_head work_list;
163	struct task_struct *worker;
164	struct vhost_umem *umem;
165	struct vhost_umem *iotlb;
166	spinlock_t iotlb_lock;
167	struct list_head read_list;
168	struct list_head pending_list;
169	wait_queue_head_t wait;
 
 
 
 
 
 
 
170};
171
172void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs);
 
 
 
 
 
173long vhost_dev_set_owner(struct vhost_dev *dev);
174bool vhost_dev_has_owner(struct vhost_dev *dev);
175long vhost_dev_check_owner(struct vhost_dev *);
176struct vhost_umem *vhost_dev_reset_owner_prepare(void);
177void vhost_dev_reset_owner(struct vhost_dev *, struct vhost_umem *);
178void vhost_dev_cleanup(struct vhost_dev *, bool locked);
179void vhost_dev_stop(struct vhost_dev *);
180long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp);
181long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp);
182int vhost_vq_access_ok(struct vhost_virtqueue *vq);
183int vhost_log_access_ok(struct vhost_dev *);
184
185int vhost_get_vq_desc(struct vhost_virtqueue *,
186		      struct iovec iov[], unsigned int iov_count,
187		      unsigned int *out_num, unsigned int *in_num,
188		      struct vhost_log *log, unsigned int *log_num);
189void vhost_discard_vq_desc(struct vhost_virtqueue *, int n);
190
191int vhost_vq_init_access(struct vhost_virtqueue *);
192int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len);
193int vhost_add_used_n(struct vhost_virtqueue *, struct vring_used_elem *heads,
194		     unsigned count);
195void vhost_add_used_and_signal(struct vhost_dev *, struct vhost_virtqueue *,
196			       unsigned int id, int len);
197void vhost_add_used_and_signal_n(struct vhost_dev *, struct vhost_virtqueue *,
198			       struct vring_used_elem *heads, unsigned count);
199void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *);
200void vhost_disable_notify(struct vhost_dev *, struct vhost_virtqueue *);
201bool vhost_vq_avail_empty(struct vhost_dev *, struct vhost_virtqueue *);
202bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
203
204int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
205		    unsigned int log_num, u64 len);
206int vq_iotlb_prefetch(struct vhost_virtqueue *vq);
 
207
208struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type);
209void vhost_enqueue_msg(struct vhost_dev *dev,
210		       struct list_head *head,
211		       struct vhost_msg_node *node);
212struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
213					 struct list_head *head);
214unsigned int vhost_chr_poll(struct file *file, struct vhost_dev *dev,
 
 
215			    poll_table *wait);
216ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
217			    int noblock);
218ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
219			     struct iov_iter *from);
220int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled);
221
 
 
 
222#define vq_err(vq, fmt, ...) do {                                  \
223		pr_debug(pr_fmt(fmt), ##__VA_ARGS__);       \
224		if ((vq)->error_ctx)                               \
225				eventfd_signal((vq)->error_ctx, 1);\
226	} while (0)
227
228enum {
229	VHOST_FEATURES = (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) |
230			 (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
231			 (1ULL << VIRTIO_RING_F_EVENT_IDX) |
232			 (1ULL << VHOST_F_LOG_ALL) |
233			 (1ULL << VIRTIO_F_ANY_LAYOUT) |
234			 (1ULL << VIRTIO_F_VERSION_1)
235};
236
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
237static inline bool vhost_has_feature(struct vhost_virtqueue *vq, int bit)
238{
239	return vq->acked_features & (1ULL << bit);
 
 
 
 
 
240}
241
242#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
243static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
244{
245	return vq->is_le;
246}
247#else
248static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
249{
250	return virtio_legacy_is_little_endian() || vq->is_le;
251}
252#endif
253
254/* Memory accessors */
255static inline u16 vhost16_to_cpu(struct vhost_virtqueue *vq, __virtio16 val)
256{
257	return __virtio16_to_cpu(vhost_is_little_endian(vq), val);
258}
259
260static inline __virtio16 cpu_to_vhost16(struct vhost_virtqueue *vq, u16 val)
261{
262	return __cpu_to_virtio16(vhost_is_little_endian(vq), val);
263}
264
265static inline u32 vhost32_to_cpu(struct vhost_virtqueue *vq, __virtio32 val)
266{
267	return __virtio32_to_cpu(vhost_is_little_endian(vq), val);
268}
269
270static inline __virtio32 cpu_to_vhost32(struct vhost_virtqueue *vq, u32 val)
271{
272	return __cpu_to_virtio32(vhost_is_little_endian(vq), val);
273}
274
275static inline u64 vhost64_to_cpu(struct vhost_virtqueue *vq, __virtio64 val)
276{
277	return __virtio64_to_cpu(vhost_is_little_endian(vq), val);
278}
279
280static inline __virtio64 cpu_to_vhost64(struct vhost_virtqueue *vq, u64 val)
281{
282	return __cpu_to_virtio64(vhost_is_little_endian(vq), val);
283}
284#endif