Linux Audio

Check our new training course

Loading...
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _VHOST_H
  3#define _VHOST_H
  4
  5#include <linux/eventfd.h>
  6#include <linux/vhost.h>
  7#include <linux/mm.h>
  8#include <linux/mutex.h>
  9#include <linux/poll.h>
 10#include <linux/file.h>
 11#include <linux/uio.h>
 12#include <linux/virtio_config.h>
 13#include <linux/virtio_ring.h>
 14#include <linux/atomic.h>
 15#include <linux/vhost_iotlb.h>
 16#include <linux/irqbypass.h>
 17
 18struct vhost_work;
 19struct vhost_task;
 20typedef void (*vhost_work_fn_t)(struct vhost_work *work);
 21
 22#define VHOST_WORK_QUEUED 1
 23struct vhost_work {
 24	struct llist_node	node;
 25	vhost_work_fn_t		fn;
 26	unsigned long		flags;
 27};
 28
 29struct vhost_worker {
 30	struct vhost_task	*vtsk;
 31	struct vhost_dev	*dev;
 32	/* Used to serialize device wide flushing with worker swapping. */
 33	struct mutex		mutex;
 34	struct llist_head	work_list;
 35	u64			kcov_handle;
 36	u32			id;
 37	int			attachment_cnt;
 38	bool			killed;
 39};
 40
 41/* Poll a file (eventfd or socket) */
 42/* Note: there's nothing vhost specific about this structure. */
 43struct vhost_poll {
 44	poll_table		table;
 45	wait_queue_head_t	*wqh;
 46	wait_queue_entry_t	wait;
 47	struct vhost_work	work;
 48	__poll_t		mask;
 49	struct vhost_dev	*dev;
 50	struct vhost_virtqueue	*vq;
 51};
 52
 
 
 
 
 53void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
 54		     __poll_t mask, struct vhost_dev *dev,
 55		     struct vhost_virtqueue *vq);
 56int vhost_poll_start(struct vhost_poll *poll, struct file *file);
 57void vhost_poll_stop(struct vhost_poll *poll);
 
 58void vhost_poll_queue(struct vhost_poll *poll);
 59
 60void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn);
 61void vhost_dev_flush(struct vhost_dev *dev);
 62
 63struct vhost_log {
 64	u64 addr;
 65	u64 len;
 66};
 67
 68enum vhost_uaddr_type {
 69	VHOST_ADDR_DESC = 0,
 70	VHOST_ADDR_AVAIL = 1,
 71	VHOST_ADDR_USED = 2,
 72	VHOST_NUM_ADDRS = 3,
 73};
 74
 75struct vhost_vring_call {
 76	struct eventfd_ctx *ctx;
 77	struct irq_bypass_producer producer;
 78};
 79
 80/* The virtqueue structure describes a queue attached to a device. */
 81struct vhost_virtqueue {
 82	struct vhost_dev *dev;
 83	struct vhost_worker __rcu *worker;
 84
 85	/* The actual ring of buffers. */
 86	struct mutex mutex;
 87	unsigned int num;
 88	vring_desc_t __user *desc;
 89	vring_avail_t __user *avail;
 90	vring_used_t __user *used;
 91	const struct vhost_iotlb_map *meta_iotlb[VHOST_NUM_ADDRS];
 92	struct file *kick;
 93	struct vhost_vring_call call_ctx;
 94	struct eventfd_ctx *error_ctx;
 95	struct eventfd_ctx *log_ctx;
 96
 97	struct vhost_poll poll;
 98
 99	/* The routine to call when the Guest pings us, or timeout. */
100	vhost_work_fn_t handle_kick;
101
102	/* Last available index we saw.
103	 * Values are limited to 0x7fff, and the high bit is used as
104	 * a wrap counter when using VIRTIO_F_RING_PACKED. */
105	u16 last_avail_idx;
106
107	/* Caches available index value from user. */
108	u16 avail_idx;
109
110	/* Last index we used.
111	 * Values are limited to 0x7fff, and the high bit is used as
112	 * a wrap counter when using VIRTIO_F_RING_PACKED. */
113	u16 last_used_idx;
114
115	/* Used flags */
116	u16 used_flags;
117
118	/* Last used index value we have signalled on */
119	u16 signalled_used;
120
121	/* Last used index value we have signalled on */
122	bool signalled_used_valid;
123
124	/* Log writes to used structure. */
125	bool log_used;
126	u64 log_addr;
127
128	struct iovec iov[UIO_MAXIOV];
129	struct iovec iotlb_iov[64];
130	struct iovec *indirect;
131	struct vring_used_elem *heads;
132	/* Protected by virtqueue mutex. */
133	struct vhost_iotlb *umem;
134	struct vhost_iotlb *iotlb;
135	void *private_data;
136	u64 acked_features;
137	u64 acked_backend_features;
138	/* Log write descriptors */
139	void __user *log_base;
140	struct vhost_log *log;
141	struct iovec log_iov[64];
142
143	/* Ring endianness. Defaults to legacy native endianness.
144	 * Set to true when starting a modern virtio device. */
145	bool is_le;
146#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
147	/* Ring endianness requested by userspace for cross-endian support. */
148	bool user_be;
149#endif
150	u32 busyloop_timeout;
151};
152
153struct vhost_msg_node {
154  union {
155	  struct vhost_msg msg;
156	  struct vhost_msg_v2 msg_v2;
157  };
158  struct vhost_virtqueue *vq;
159  struct list_head node;
160};
161
162struct vhost_dev {
163	struct mm_struct *mm;
164	struct mutex mutex;
165	struct vhost_virtqueue **vqs;
166	int nvqs;
167	struct eventfd_ctx *log_ctx;
 
 
168	struct vhost_iotlb *umem;
169	struct vhost_iotlb *iotlb;
170	spinlock_t iotlb_lock;
171	struct list_head read_list;
172	struct list_head pending_list;
173	wait_queue_head_t wait;
174	int iov_limit;
175	int weight;
176	int byte_weight;
177	struct xarray worker_xa;
178	bool use_worker;
179	int (*msg_handler)(struct vhost_dev *dev, u32 asid,
180			   struct vhost_iotlb_msg *msg);
181};
182
183bool vhost_exceeds_weight(struct vhost_virtqueue *vq, int pkts, int total_len);
184void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs,
185		    int nvqs, int iov_limit, int weight, int byte_weight,
186		    bool use_worker,
187		    int (*msg_handler)(struct vhost_dev *dev, u32 asid,
188				       struct vhost_iotlb_msg *msg));
189long vhost_dev_set_owner(struct vhost_dev *dev);
190bool vhost_dev_has_owner(struct vhost_dev *dev);
191long vhost_dev_check_owner(struct vhost_dev *);
192struct vhost_iotlb *vhost_dev_reset_owner_prepare(void);
193void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_iotlb *iotlb);
194void vhost_dev_cleanup(struct vhost_dev *);
195void vhost_dev_stop(struct vhost_dev *);
196long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp);
197long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp);
198long vhost_worker_ioctl(struct vhost_dev *dev, unsigned int ioctl,
199			void __user *argp);
200bool vhost_vq_access_ok(struct vhost_virtqueue *vq);
201bool vhost_log_access_ok(struct vhost_dev *);
202void vhost_clear_msg(struct vhost_dev *dev);
203
204int vhost_get_vq_desc(struct vhost_virtqueue *,
205		      struct iovec iov[], unsigned int iov_size,
206		      unsigned int *out_num, unsigned int *in_num,
207		      struct vhost_log *log, unsigned int *log_num);
208void vhost_discard_vq_desc(struct vhost_virtqueue *, int n);
209
210bool vhost_vq_work_queue(struct vhost_virtqueue *vq, struct vhost_work *work);
211bool vhost_vq_has_work(struct vhost_virtqueue *vq);
212bool vhost_vq_is_setup(struct vhost_virtqueue *vq);
213int vhost_vq_init_access(struct vhost_virtqueue *);
214int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len);
215int vhost_add_used_n(struct vhost_virtqueue *, struct vring_used_elem *heads,
216		     unsigned count);
217void vhost_add_used_and_signal(struct vhost_dev *, struct vhost_virtqueue *,
218			       unsigned int id, int len);
219void vhost_add_used_and_signal_n(struct vhost_dev *, struct vhost_virtqueue *,
220			       struct vring_used_elem *heads, unsigned count);
221void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *);
222void vhost_disable_notify(struct vhost_dev *, struct vhost_virtqueue *);
223bool vhost_vq_avail_empty(struct vhost_dev *, struct vhost_virtqueue *);
224bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
225
226int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
227		    unsigned int log_num, u64 len,
228		    struct iovec *iov, int count);
229int vq_meta_prefetch(struct vhost_virtqueue *vq);
230
231struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type);
232void vhost_enqueue_msg(struct vhost_dev *dev,
233		       struct list_head *head,
234		       struct vhost_msg_node *node);
235struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
236					 struct list_head *head);
237void vhost_set_backend_features(struct vhost_dev *dev, u64 features);
238
239__poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev,
240			    poll_table *wait);
241ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
242			    int noblock);
243ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
244			     struct iov_iter *from);
245int vhost_init_device_iotlb(struct vhost_dev *d);
246
247void vhost_iotlb_map_free(struct vhost_iotlb *iotlb,
248			  struct vhost_iotlb_map *map);
249
250#define vq_err(vq, fmt, ...) do {                                  \
251		pr_debug(pr_fmt(fmt), ##__VA_ARGS__);       \
252		if ((vq)->error_ctx)                               \
253				eventfd_signal((vq)->error_ctx);\
254	} while (0)
255
256enum {
257	VHOST_FEATURES = (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) |
258			 (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
259			 (1ULL << VIRTIO_RING_F_EVENT_IDX) |
260			 (1ULL << VHOST_F_LOG_ALL) |
261			 (1ULL << VIRTIO_F_ANY_LAYOUT) |
262			 (1ULL << VIRTIO_F_VERSION_1)
263};
264
265/**
266 * vhost_vq_set_backend - Set backend.
267 *
268 * @vq            Virtqueue.
269 * @private_data  The private data.
270 *
271 * Context: Need to call with vq->mutex acquired.
272 */
273static inline void vhost_vq_set_backend(struct vhost_virtqueue *vq,
274					void *private_data)
275{
276	vq->private_data = private_data;
277}
278
279/**
280 * vhost_vq_get_backend - Get backend.
281 *
282 * @vq            Virtqueue.
283 *
284 * Context: Need to call with vq->mutex acquired.
285 * Return: Private data previously set with vhost_vq_set_backend.
286 */
287static inline void *vhost_vq_get_backend(struct vhost_virtqueue *vq)
288{
289	return vq->private_data;
290}
291
292static inline bool vhost_has_feature(struct vhost_virtqueue *vq, int bit)
293{
294	return vq->acked_features & (1ULL << bit);
295}
296
297static inline bool vhost_backend_has_feature(struct vhost_virtqueue *vq, int bit)
298{
299	return vq->acked_backend_features & (1ULL << bit);
300}
301
302#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
303static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
304{
305	return vq->is_le;
306}
307#else
308static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
309{
310	return virtio_legacy_is_little_endian() || vq->is_le;
311}
312#endif
313
314/* Memory accessors */
315static inline u16 vhost16_to_cpu(struct vhost_virtqueue *vq, __virtio16 val)
316{
317	return __virtio16_to_cpu(vhost_is_little_endian(vq), val);
318}
319
320static inline __virtio16 cpu_to_vhost16(struct vhost_virtqueue *vq, u16 val)
321{
322	return __cpu_to_virtio16(vhost_is_little_endian(vq), val);
323}
324
325static inline u32 vhost32_to_cpu(struct vhost_virtqueue *vq, __virtio32 val)
326{
327	return __virtio32_to_cpu(vhost_is_little_endian(vq), val);
328}
329
330static inline __virtio32 cpu_to_vhost32(struct vhost_virtqueue *vq, u32 val)
331{
332	return __cpu_to_virtio32(vhost_is_little_endian(vq), val);
333}
334
335static inline u64 vhost64_to_cpu(struct vhost_virtqueue *vq, __virtio64 val)
336{
337	return __virtio64_to_cpu(vhost_is_little_endian(vq), val);
338}
339
340static inline __virtio64 cpu_to_vhost64(struct vhost_virtqueue *vq, u64 val)
341{
342	return __cpu_to_virtio64(vhost_is_little_endian(vq), val);
343}
344#endif
v5.14.15
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _VHOST_H
  3#define _VHOST_H
  4
  5#include <linux/eventfd.h>
  6#include <linux/vhost.h>
  7#include <linux/mm.h>
  8#include <linux/mutex.h>
  9#include <linux/poll.h>
 10#include <linux/file.h>
 11#include <linux/uio.h>
 12#include <linux/virtio_config.h>
 13#include <linux/virtio_ring.h>
 14#include <linux/atomic.h>
 15#include <linux/vhost_iotlb.h>
 16#include <linux/irqbypass.h>
 17
 18struct vhost_work;
 
 19typedef void (*vhost_work_fn_t)(struct vhost_work *work);
 20
 21#define VHOST_WORK_QUEUED 1
 22struct vhost_work {
 23	struct llist_node	node;
 24	vhost_work_fn_t		fn;
 25	unsigned long		flags;
 26};
 27
 
 
 
 
 
 
 
 
 
 
 
 
 28/* Poll a file (eventfd or socket) */
 29/* Note: there's nothing vhost specific about this structure. */
 30struct vhost_poll {
 31	poll_table		table;
 32	wait_queue_head_t	*wqh;
 33	wait_queue_entry_t	wait;
 34	struct vhost_work	work;
 35	__poll_t		mask;
 36	struct vhost_dev	*dev;
 
 37};
 38
 39void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn);
 40void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work);
 41bool vhost_has_work(struct vhost_dev *dev);
 42
 43void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
 44		     __poll_t mask, struct vhost_dev *dev);
 
 45int vhost_poll_start(struct vhost_poll *poll, struct file *file);
 46void vhost_poll_stop(struct vhost_poll *poll);
 47void vhost_poll_flush(struct vhost_poll *poll);
 48void vhost_poll_queue(struct vhost_poll *poll);
 49void vhost_work_dev_flush(struct vhost_dev *dev);
 
 
 50
 51struct vhost_log {
 52	u64 addr;
 53	u64 len;
 54};
 55
 56enum vhost_uaddr_type {
 57	VHOST_ADDR_DESC = 0,
 58	VHOST_ADDR_AVAIL = 1,
 59	VHOST_ADDR_USED = 2,
 60	VHOST_NUM_ADDRS = 3,
 61};
 62
 63struct vhost_vring_call {
 64	struct eventfd_ctx *ctx;
 65	struct irq_bypass_producer producer;
 66};
 67
 68/* The virtqueue structure describes a queue attached to a device. */
 69struct vhost_virtqueue {
 70	struct vhost_dev *dev;
 
 71
 72	/* The actual ring of buffers. */
 73	struct mutex mutex;
 74	unsigned int num;
 75	vring_desc_t __user *desc;
 76	vring_avail_t __user *avail;
 77	vring_used_t __user *used;
 78	const struct vhost_iotlb_map *meta_iotlb[VHOST_NUM_ADDRS];
 79	struct file *kick;
 80	struct vhost_vring_call call_ctx;
 81	struct eventfd_ctx *error_ctx;
 82	struct eventfd_ctx *log_ctx;
 83
 84	struct vhost_poll poll;
 85
 86	/* The routine to call when the Guest pings us, or timeout. */
 87	vhost_work_fn_t handle_kick;
 88
 89	/* Last available index we saw. */
 
 
 90	u16 last_avail_idx;
 91
 92	/* Caches available index value from user. */
 93	u16 avail_idx;
 94
 95	/* Last index we used. */
 
 
 96	u16 last_used_idx;
 97
 98	/* Used flags */
 99	u16 used_flags;
100
101	/* Last used index value we have signalled on */
102	u16 signalled_used;
103
104	/* Last used index value we have signalled on */
105	bool signalled_used_valid;
106
107	/* Log writes to used structure. */
108	bool log_used;
109	u64 log_addr;
110
111	struct iovec iov[UIO_MAXIOV];
112	struct iovec iotlb_iov[64];
113	struct iovec *indirect;
114	struct vring_used_elem *heads;
115	/* Protected by virtqueue mutex. */
116	struct vhost_iotlb *umem;
117	struct vhost_iotlb *iotlb;
118	void *private_data;
119	u64 acked_features;
120	u64 acked_backend_features;
121	/* Log write descriptors */
122	void __user *log_base;
123	struct vhost_log *log;
124	struct iovec log_iov[64];
125
126	/* Ring endianness. Defaults to legacy native endianness.
127	 * Set to true when starting a modern virtio device. */
128	bool is_le;
129#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
130	/* Ring endianness requested by userspace for cross-endian support. */
131	bool user_be;
132#endif
133	u32 busyloop_timeout;
134};
135
136struct vhost_msg_node {
137  union {
138	  struct vhost_msg msg;
139	  struct vhost_msg_v2 msg_v2;
140  };
141  struct vhost_virtqueue *vq;
142  struct list_head node;
143};
144
145struct vhost_dev {
146	struct mm_struct *mm;
147	struct mutex mutex;
148	struct vhost_virtqueue **vqs;
149	int nvqs;
150	struct eventfd_ctx *log_ctx;
151	struct llist_head work_list;
152	struct task_struct *worker;
153	struct vhost_iotlb *umem;
154	struct vhost_iotlb *iotlb;
155	spinlock_t iotlb_lock;
156	struct list_head read_list;
157	struct list_head pending_list;
158	wait_queue_head_t wait;
159	int iov_limit;
160	int weight;
161	int byte_weight;
162	u64 kcov_handle;
163	bool use_worker;
164	int (*msg_handler)(struct vhost_dev *dev,
165			   struct vhost_iotlb_msg *msg);
166};
167
168bool vhost_exceeds_weight(struct vhost_virtqueue *vq, int pkts, int total_len);
169void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs,
170		    int nvqs, int iov_limit, int weight, int byte_weight,
171		    bool use_worker,
172		    int (*msg_handler)(struct vhost_dev *dev,
173				       struct vhost_iotlb_msg *msg));
174long vhost_dev_set_owner(struct vhost_dev *dev);
175bool vhost_dev_has_owner(struct vhost_dev *dev);
176long vhost_dev_check_owner(struct vhost_dev *);
177struct vhost_iotlb *vhost_dev_reset_owner_prepare(void);
178void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_iotlb *iotlb);
179void vhost_dev_cleanup(struct vhost_dev *);
180void vhost_dev_stop(struct vhost_dev *);
181long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp);
182long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp);
 
 
183bool vhost_vq_access_ok(struct vhost_virtqueue *vq);
184bool vhost_log_access_ok(struct vhost_dev *);
 
185
186int vhost_get_vq_desc(struct vhost_virtqueue *,
187		      struct iovec iov[], unsigned int iov_count,
188		      unsigned int *out_num, unsigned int *in_num,
189		      struct vhost_log *log, unsigned int *log_num);
190void vhost_discard_vq_desc(struct vhost_virtqueue *, int n);
191
 
 
192bool vhost_vq_is_setup(struct vhost_virtqueue *vq);
193int vhost_vq_init_access(struct vhost_virtqueue *);
194int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len);
195int vhost_add_used_n(struct vhost_virtqueue *, struct vring_used_elem *heads,
196		     unsigned count);
197void vhost_add_used_and_signal(struct vhost_dev *, struct vhost_virtqueue *,
198			       unsigned int id, int len);
199void vhost_add_used_and_signal_n(struct vhost_dev *, struct vhost_virtqueue *,
200			       struct vring_used_elem *heads, unsigned count);
201void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *);
202void vhost_disable_notify(struct vhost_dev *, struct vhost_virtqueue *);
203bool vhost_vq_avail_empty(struct vhost_dev *, struct vhost_virtqueue *);
204bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
205
206int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
207		    unsigned int log_num, u64 len,
208		    struct iovec *iov, int count);
209int vq_meta_prefetch(struct vhost_virtqueue *vq);
210
211struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type);
212void vhost_enqueue_msg(struct vhost_dev *dev,
213		       struct list_head *head,
214		       struct vhost_msg_node *node);
215struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
216					 struct list_head *head);
217void vhost_set_backend_features(struct vhost_dev *dev, u64 features);
218
219__poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev,
220			    poll_table *wait);
221ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
222			    int noblock);
223ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
224			     struct iov_iter *from);
225int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled);
226
227void vhost_iotlb_map_free(struct vhost_iotlb *iotlb,
228			  struct vhost_iotlb_map *map);
229
230#define vq_err(vq, fmt, ...) do {                                  \
231		pr_debug(pr_fmt(fmt), ##__VA_ARGS__);       \
232		if ((vq)->error_ctx)                               \
233				eventfd_signal((vq)->error_ctx, 1);\
234	} while (0)
235
236enum {
237	VHOST_FEATURES = (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) |
238			 (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
239			 (1ULL << VIRTIO_RING_F_EVENT_IDX) |
240			 (1ULL << VHOST_F_LOG_ALL) |
241			 (1ULL << VIRTIO_F_ANY_LAYOUT) |
242			 (1ULL << VIRTIO_F_VERSION_1)
243};
244
245/**
246 * vhost_vq_set_backend - Set backend.
247 *
248 * @vq            Virtqueue.
249 * @private_data  The private data.
250 *
251 * Context: Need to call with vq->mutex acquired.
252 */
253static inline void vhost_vq_set_backend(struct vhost_virtqueue *vq,
254					void *private_data)
255{
256	vq->private_data = private_data;
257}
258
259/**
260 * vhost_vq_get_backend - Get backend.
261 *
262 * @vq            Virtqueue.
263 *
264 * Context: Need to call with vq->mutex acquired.
265 * Return: Private data previously set with vhost_vq_set_backend.
266 */
267static inline void *vhost_vq_get_backend(struct vhost_virtqueue *vq)
268{
269	return vq->private_data;
270}
271
272static inline bool vhost_has_feature(struct vhost_virtqueue *vq, int bit)
273{
274	return vq->acked_features & (1ULL << bit);
275}
276
277static inline bool vhost_backend_has_feature(struct vhost_virtqueue *vq, int bit)
278{
279	return vq->acked_backend_features & (1ULL << bit);
280}
281
282#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
283static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
284{
285	return vq->is_le;
286}
287#else
288static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
289{
290	return virtio_legacy_is_little_endian() || vq->is_le;
291}
292#endif
293
294/* Memory accessors */
295static inline u16 vhost16_to_cpu(struct vhost_virtqueue *vq, __virtio16 val)
296{
297	return __virtio16_to_cpu(vhost_is_little_endian(vq), val);
298}
299
300static inline __virtio16 cpu_to_vhost16(struct vhost_virtqueue *vq, u16 val)
301{
302	return __cpu_to_virtio16(vhost_is_little_endian(vq), val);
303}
304
305static inline u32 vhost32_to_cpu(struct vhost_virtqueue *vq, __virtio32 val)
306{
307	return __virtio32_to_cpu(vhost_is_little_endian(vq), val);
308}
309
310static inline __virtio32 cpu_to_vhost32(struct vhost_virtqueue *vq, u32 val)
311{
312	return __cpu_to_virtio32(vhost_is_little_endian(vq), val);
313}
314
315static inline u64 vhost64_to_cpu(struct vhost_virtqueue *vq, __virtio64 val)
316{
317	return __virtio64_to_cpu(vhost_is_little_endian(vq), val);
318}
319
320static inline __virtio64 cpu_to_vhost64(struct vhost_virtqueue *vq, u64 val)
321{
322	return __cpu_to_virtio64(vhost_is_little_endian(vq), val);
323}
324#endif