Loading...
1#ifndef _VHOST_H
2#define _VHOST_H
3
4#include <linux/eventfd.h>
5#include <linux/vhost.h>
6#include <linux/mm.h>
7#include <linux/mutex.h>
8#include <linux/poll.h>
9#include <linux/file.h>
10#include <linux/skbuff.h>
11#include <linux/uio.h>
12#include <linux/virtio_config.h>
13#include <linux/virtio_ring.h>
14#include <linux/atomic.h>
15
16/* This is for zerocopy, used buffer len is set to 1 when lower device DMA
17 * done */
18#define VHOST_DMA_DONE_LEN 1
19#define VHOST_DMA_CLEAR_LEN 0
20
21struct vhost_device;
22
23struct vhost_work;
24typedef void (*vhost_work_fn_t)(struct vhost_work *work);
25
26struct vhost_work {
27 struct list_head node;
28 vhost_work_fn_t fn;
29 wait_queue_head_t done;
30 int flushing;
31 unsigned queue_seq;
32 unsigned done_seq;
33};
34
35/* Poll a file (eventfd or socket) */
36/* Note: there's nothing vhost specific about this structure. */
37struct vhost_poll {
38 poll_table table;
39 wait_queue_head_t *wqh;
40 wait_queue_t wait;
41 struct vhost_work work;
42 unsigned long mask;
43 struct vhost_dev *dev;
44};
45
46void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
47 unsigned long mask, struct vhost_dev *dev);
48void vhost_poll_start(struct vhost_poll *poll, struct file *file);
49void vhost_poll_stop(struct vhost_poll *poll);
50void vhost_poll_flush(struct vhost_poll *poll);
51void vhost_poll_queue(struct vhost_poll *poll);
52
53struct vhost_log {
54 u64 addr;
55 u64 len;
56};
57
58struct vhost_virtqueue;
59
60struct vhost_ubuf_ref {
61 struct kref kref;
62 wait_queue_head_t wait;
63 struct vhost_virtqueue *vq;
64};
65
66struct vhost_ubuf_ref *vhost_ubuf_alloc(struct vhost_virtqueue *, bool zcopy);
67void vhost_ubuf_put(struct vhost_ubuf_ref *);
68void vhost_ubuf_put_and_wait(struct vhost_ubuf_ref *);
69
70/* The virtqueue structure describes a queue attached to a device. */
71struct vhost_virtqueue {
72 struct vhost_dev *dev;
73
74 /* The actual ring of buffers. */
75 struct mutex mutex;
76 unsigned int num;
77 struct vring_desc __user *desc;
78 struct vring_avail __user *avail;
79 struct vring_used __user *used;
80 struct file *kick;
81 struct file *call;
82 struct file *error;
83 struct eventfd_ctx *call_ctx;
84 struct eventfd_ctx *error_ctx;
85 struct eventfd_ctx *log_ctx;
86
87 struct vhost_poll poll;
88
89 /* The routine to call when the Guest pings us, or timeout. */
90 vhost_work_fn_t handle_kick;
91
92 /* Last available index we saw. */
93 u16 last_avail_idx;
94
95 /* Caches available index value from user. */
96 u16 avail_idx;
97
98 /* Last index we used. */
99 u16 last_used_idx;
100
101 /* Used flags */
102 u16 used_flags;
103
104 /* Last used index value we have signalled on */
105 u16 signalled_used;
106
107 /* Last used index value we have signalled on */
108 bool signalled_used_valid;
109
110 /* Log writes to used structure. */
111 bool log_used;
112 u64 log_addr;
113
114 struct iovec iov[UIO_MAXIOV];
115 /* hdr is used to store the virtio header.
116 * Since each iovec has >= 1 byte length, we never need more than
117 * header length entries to store the header. */
118 struct iovec hdr[sizeof(struct virtio_net_hdr_mrg_rxbuf)];
119 struct iovec *indirect;
120 size_t vhost_hlen;
121 size_t sock_hlen;
122 struct vring_used_elem *heads;
123 /* We use a kind of RCU to access private pointer.
124 * All readers access it from worker, which makes it possible to
125 * flush the vhost_work instead of synchronize_rcu. Therefore readers do
126 * not need to call rcu_read_lock/rcu_read_unlock: the beginning of
127 * vhost_work execution acts instead of rcu_read_lock() and the end of
128 * vhost_work execution acts instead of rcu_read_unlock().
129 * Writers use virtqueue mutex. */
130 void __rcu *private_data;
131 /* Log write descriptors */
132 void __user *log_base;
133 struct vhost_log *log;
134 /* vhost zerocopy support fields below: */
135 /* last used idx for outstanding DMA zerocopy buffers */
136 int upend_idx;
137 /* first used idx for DMA done zerocopy buffers */
138 int done_idx;
139 /* an array of userspace buffers info */
140 struct ubuf_info *ubuf_info;
141 /* Reference counting for outstanding ubufs.
142 * Protected by vq mutex. Writers must also take device mutex. */
143 struct vhost_ubuf_ref *ubufs;
144};
145
146struct vhost_dev {
147 /* Readers use RCU to access memory table pointer
148 * log base pointer and features.
149 * Writers use mutex below.*/
150 struct vhost_memory __rcu *memory;
151 struct mm_struct *mm;
152 struct mutex mutex;
153 unsigned acked_features;
154 struct vhost_virtqueue *vqs;
155 int nvqs;
156 struct file *log_file;
157 struct eventfd_ctx *log_ctx;
158 spinlock_t work_lock;
159 struct list_head work_list;
160 struct task_struct *worker;
161};
162
163long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue *vqs, int nvqs);
164long vhost_dev_check_owner(struct vhost_dev *);
165long vhost_dev_reset_owner(struct vhost_dev *);
166void vhost_dev_cleanup(struct vhost_dev *);
167long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, unsigned long arg);
168int vhost_vq_access_ok(struct vhost_virtqueue *vq);
169int vhost_log_access_ok(struct vhost_dev *);
170
171int vhost_get_vq_desc(struct vhost_dev *, struct vhost_virtqueue *,
172 struct iovec iov[], unsigned int iov_count,
173 unsigned int *out_num, unsigned int *in_num,
174 struct vhost_log *log, unsigned int *log_num);
175void vhost_discard_vq_desc(struct vhost_virtqueue *, int n);
176
177int vhost_init_used(struct vhost_virtqueue *);
178int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len);
179int vhost_add_used_n(struct vhost_virtqueue *, struct vring_used_elem *heads,
180 unsigned count);
181void vhost_add_used_and_signal(struct vhost_dev *, struct vhost_virtqueue *,
182 unsigned int id, int len);
183void vhost_add_used_and_signal_n(struct vhost_dev *, struct vhost_virtqueue *,
184 struct vring_used_elem *heads, unsigned count);
185void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *);
186void vhost_disable_notify(struct vhost_dev *, struct vhost_virtqueue *);
187bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
188
189int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
190 unsigned int log_num, u64 len);
191void vhost_zerocopy_callback(void *arg);
192int vhost_zerocopy_signal_used(struct vhost_virtqueue *vq);
193
194#define vq_err(vq, fmt, ...) do { \
195 pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \
196 if ((vq)->error_ctx) \
197 eventfd_signal((vq)->error_ctx, 1);\
198 } while (0)
199
200enum {
201 VHOST_FEATURES = (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) |
202 (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
203 (1ULL << VIRTIO_RING_F_EVENT_IDX) |
204 (1ULL << VHOST_F_LOG_ALL) |
205 (1ULL << VHOST_NET_F_VIRTIO_NET_HDR) |
206 (1ULL << VIRTIO_NET_F_MRG_RXBUF),
207};
208
209static inline int vhost_has_feature(struct vhost_dev *dev, int bit)
210{
211 unsigned acked_features;
212
213 /* TODO: check that we are running from vhost_worker or dev mutex is
214 * held? */
215 acked_features = rcu_dereference_index_check(dev->acked_features, 1);
216 return acked_features & (1 << bit);
217}
218
219void vhost_enable_zcopy(int vq);
220
221#endif
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _VHOST_H
3#define _VHOST_H
4
5#include <linux/eventfd.h>
6#include <linux/vhost.h>
7#include <linux/mm.h>
8#include <linux/mutex.h>
9#include <linux/poll.h>
10#include <linux/file.h>
11#include <linux/uio.h>
12#include <linux/virtio_config.h>
13#include <linux/virtio_ring.h>
14#include <linux/atomic.h>
15
16struct vhost_work;
17typedef void (*vhost_work_fn_t)(struct vhost_work *work);
18
19#define VHOST_WORK_QUEUED 1
20struct vhost_work {
21 struct llist_node node;
22 vhost_work_fn_t fn;
23 unsigned long flags;
24};
25
26/* Poll a file (eventfd or socket) */
27/* Note: there's nothing vhost specific about this structure. */
28struct vhost_poll {
29 poll_table table;
30 wait_queue_head_t *wqh;
31 wait_queue_entry_t wait;
32 struct vhost_work work;
33 __poll_t mask;
34 struct vhost_dev *dev;
35};
36
37void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn);
38void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work);
39bool vhost_has_work(struct vhost_dev *dev);
40
41void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
42 __poll_t mask, struct vhost_dev *dev);
43int vhost_poll_start(struct vhost_poll *poll, struct file *file);
44void vhost_poll_stop(struct vhost_poll *poll);
45void vhost_poll_flush(struct vhost_poll *poll);
46void vhost_poll_queue(struct vhost_poll *poll);
47void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work);
48long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp);
49
50struct vhost_log {
51 u64 addr;
52 u64 len;
53};
54
55#define START(node) ((node)->start)
56#define LAST(node) ((node)->last)
57
58struct vhost_umem_node {
59 struct rb_node rb;
60 struct list_head link;
61 __u64 start;
62 __u64 last;
63 __u64 size;
64 __u64 userspace_addr;
65 __u32 perm;
66 __u32 flags_padding;
67 __u64 __subtree_last;
68};
69
70struct vhost_umem {
71 struct rb_root_cached umem_tree;
72 struct list_head umem_list;
73 int numem;
74};
75
76enum vhost_uaddr_type {
77 VHOST_ADDR_DESC = 0,
78 VHOST_ADDR_AVAIL = 1,
79 VHOST_ADDR_USED = 2,
80 VHOST_NUM_ADDRS = 3,
81};
82
83/* The virtqueue structure describes a queue attached to a device. */
84struct vhost_virtqueue {
85 struct vhost_dev *dev;
86
87 /* The actual ring of buffers. */
88 struct mutex mutex;
89 unsigned int num;
90 struct vring_desc __user *desc;
91 struct vring_avail __user *avail;
92 struct vring_used __user *used;
93 const struct vhost_umem_node *meta_iotlb[VHOST_NUM_ADDRS];
94 struct file *kick;
95 struct eventfd_ctx *call_ctx;
96 struct eventfd_ctx *error_ctx;
97 struct eventfd_ctx *log_ctx;
98
99 struct vhost_poll poll;
100
101 /* The routine to call when the Guest pings us, or timeout. */
102 vhost_work_fn_t handle_kick;
103
104 /* Last available index we saw. */
105 u16 last_avail_idx;
106
107 /* Caches available index value from user. */
108 u16 avail_idx;
109
110 /* Last index we used. */
111 u16 last_used_idx;
112
113 /* Used flags */
114 u16 used_flags;
115
116 /* Last used index value we have signalled on */
117 u16 signalled_used;
118
119 /* Last used index value we have signalled on */
120 bool signalled_used_valid;
121
122 /* Log writes to used structure. */
123 bool log_used;
124 u64 log_addr;
125
126 struct iovec iov[UIO_MAXIOV];
127 struct iovec iotlb_iov[64];
128 struct iovec *indirect;
129 struct vring_used_elem *heads;
130 /* Protected by virtqueue mutex. */
131 struct vhost_umem *umem;
132 struct vhost_umem *iotlb;
133 void *private_data;
134 u64 acked_features;
135 u64 acked_backend_features;
136 /* Log write descriptors */
137 void __user *log_base;
138 struct vhost_log *log;
139
140 /* Ring endianness. Defaults to legacy native endianness.
141 * Set to true when starting a modern virtio device. */
142 bool is_le;
143#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
144 /* Ring endianness requested by userspace for cross-endian support. */
145 bool user_be;
146#endif
147 u32 busyloop_timeout;
148};
149
150struct vhost_msg_node {
151 union {
152 struct vhost_msg msg;
153 struct vhost_msg_v2 msg_v2;
154 };
155 struct vhost_virtqueue *vq;
156 struct list_head node;
157};
158
159struct vhost_dev {
160 struct mm_struct *mm;
161 struct mutex mutex;
162 struct vhost_virtqueue **vqs;
163 int nvqs;
164 struct eventfd_ctx *log_ctx;
165 struct llist_head work_list;
166 struct task_struct *worker;
167 struct vhost_umem *umem;
168 struct vhost_umem *iotlb;
169 spinlock_t iotlb_lock;
170 struct list_head read_list;
171 struct list_head pending_list;
172 wait_queue_head_t wait;
173 int iov_limit;
174 int weight;
175 int byte_weight;
176};
177
178bool vhost_exceeds_weight(struct vhost_virtqueue *vq, int pkts, int total_len);
179void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs,
180 int nvqs, int iov_limit, int weight, int byte_weight);
181long vhost_dev_set_owner(struct vhost_dev *dev);
182bool vhost_dev_has_owner(struct vhost_dev *dev);
183long vhost_dev_check_owner(struct vhost_dev *);
184struct vhost_umem *vhost_dev_reset_owner_prepare(void);
185void vhost_dev_reset_owner(struct vhost_dev *, struct vhost_umem *);
186void vhost_dev_cleanup(struct vhost_dev *);
187void vhost_dev_stop(struct vhost_dev *);
188long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp);
189long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp);
190bool vhost_vq_access_ok(struct vhost_virtqueue *vq);
191bool vhost_log_access_ok(struct vhost_dev *);
192
193int vhost_get_vq_desc(struct vhost_virtqueue *,
194 struct iovec iov[], unsigned int iov_count,
195 unsigned int *out_num, unsigned int *in_num,
196 struct vhost_log *log, unsigned int *log_num);
197void vhost_discard_vq_desc(struct vhost_virtqueue *, int n);
198
199int vhost_vq_init_access(struct vhost_virtqueue *);
200int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len);
201int vhost_add_used_n(struct vhost_virtqueue *, struct vring_used_elem *heads,
202 unsigned count);
203void vhost_add_used_and_signal(struct vhost_dev *, struct vhost_virtqueue *,
204 unsigned int id, int len);
205void vhost_add_used_and_signal_n(struct vhost_dev *, struct vhost_virtqueue *,
206 struct vring_used_elem *heads, unsigned count);
207void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *);
208void vhost_disable_notify(struct vhost_dev *, struct vhost_virtqueue *);
209bool vhost_vq_avail_empty(struct vhost_dev *, struct vhost_virtqueue *);
210bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
211
212int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
213 unsigned int log_num, u64 len,
214 struct iovec *iov, int count);
215int vq_meta_prefetch(struct vhost_virtqueue *vq);
216
217struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type);
218void vhost_enqueue_msg(struct vhost_dev *dev,
219 struct list_head *head,
220 struct vhost_msg_node *node);
221struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
222 struct list_head *head);
223__poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev,
224 poll_table *wait);
225ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
226 int noblock);
227ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
228 struct iov_iter *from);
229int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled);
230
231#define vq_err(vq, fmt, ...) do { \
232 pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \
233 if ((vq)->error_ctx) \
234 eventfd_signal((vq)->error_ctx, 1);\
235 } while (0)
236
237enum {
238 VHOST_FEATURES = (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) |
239 (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
240 (1ULL << VIRTIO_RING_F_EVENT_IDX) |
241 (1ULL << VHOST_F_LOG_ALL) |
242 (1ULL << VIRTIO_F_ANY_LAYOUT) |
243 (1ULL << VIRTIO_F_VERSION_1)
244};
245
246static inline bool vhost_has_feature(struct vhost_virtqueue *vq, int bit)
247{
248 return vq->acked_features & (1ULL << bit);
249}
250
251static inline bool vhost_backend_has_feature(struct vhost_virtqueue *vq, int bit)
252{
253 return vq->acked_backend_features & (1ULL << bit);
254}
255
256#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
257static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
258{
259 return vq->is_le;
260}
261#else
262static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
263{
264 return virtio_legacy_is_little_endian() || vq->is_le;
265}
266#endif
267
268/* Memory accessors */
269static inline u16 vhost16_to_cpu(struct vhost_virtqueue *vq, __virtio16 val)
270{
271 return __virtio16_to_cpu(vhost_is_little_endian(vq), val);
272}
273
274static inline __virtio16 cpu_to_vhost16(struct vhost_virtqueue *vq, u16 val)
275{
276 return __cpu_to_virtio16(vhost_is_little_endian(vq), val);
277}
278
279static inline u32 vhost32_to_cpu(struct vhost_virtqueue *vq, __virtio32 val)
280{
281 return __virtio32_to_cpu(vhost_is_little_endian(vq), val);
282}
283
284static inline __virtio32 cpu_to_vhost32(struct vhost_virtqueue *vq, u32 val)
285{
286 return __cpu_to_virtio32(vhost_is_little_endian(vq), val);
287}
288
289static inline u64 vhost64_to_cpu(struct vhost_virtqueue *vq, __virtio64 val)
290{
291 return __virtio64_to_cpu(vhost_is_little_endian(vq), val);
292}
293
294static inline __virtio64 cpu_to_vhost64(struct vhost_virtqueue *vq, u64 val)
295{
296 return __cpu_to_virtio64(vhost_is_little_endian(vq), val);
297}
298#endif