Linux Audio

Check our new training course

Loading...
v3.1
  1/* Virtio ring implementation.
  2 *
  3 *  Copyright 2007 Rusty Russell IBM Corporation
  4 *
  5 *  This program is free software; you can redistribute it and/or modify
  6 *  it under the terms of the GNU General Public License as published by
  7 *  the Free Software Foundation; either version 2 of the License, or
  8 *  (at your option) any later version.
  9 *
 10 *  This program is distributed in the hope that it will be useful,
 11 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 12 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 13 *  GNU General Public License for more details.
 14 *
 15 *  You should have received a copy of the GNU General Public License
 16 *  along with this program; if not, write to the Free Software
 17 *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
 18 */
 19#include <linux/virtio.h>
 20#include <linux/virtio_ring.h>
 21#include <linux/virtio_config.h>
 22#include <linux/device.h>
 23#include <linux/slab.h>
 
 
 24
 25/* virtio guest is communicating with a virtual "device" that actually runs on
 26 * a host processor.  Memory barriers are used to control SMP effects. */
 27#ifdef CONFIG_SMP
 28/* Where possible, use SMP barriers which are more lightweight than mandatory
 29 * barriers, because mandatory barriers control MMIO effects on accesses
 30 * through relaxed memory I/O windows (which virtio does not use). */
 31#define virtio_mb() smp_mb()
 32#define virtio_rmb() smp_rmb()
 33#define virtio_wmb() smp_wmb()
 
 
 
 34#else
 35/* We must force memory ordering even if guest is UP since host could be
 36 * running on another CPU, but SMP barriers are defined to barrier() in that
 37 * configuration. So fall back to mandatory barriers instead. */
 38#define virtio_mb() mb()
 39#define virtio_rmb() rmb()
 40#define virtio_wmb() wmb()
 41#endif
 42
 43#ifdef DEBUG
 44/* For development, we want to crash whenever the ring is screwed. */
 45#define BAD_RING(_vq, fmt, args...)				\
 46	do {							\
 47		dev_err(&(_vq)->vq.vdev->dev,			\
 48			"%s:"fmt, (_vq)->vq.name, ##args);	\
 49		BUG();						\
 50	} while (0)
 51/* Caller is supposed to guarantee no reentry. */
 52#define START_USE(_vq)						\
 53	do {							\
 54		if ((_vq)->in_use)				\
 55			panic("%s:in_use = %i\n",		\
 56			      (_vq)->vq.name, (_vq)->in_use);	\
 57		(_vq)->in_use = __LINE__;			\
 58	} while (0)
 59#define END_USE(_vq) \
 60	do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
 61#else
 62#define BAD_RING(_vq, fmt, args...)				\
 63	do {							\
 64		dev_err(&_vq->vq.vdev->dev,			\
 65			"%s:"fmt, (_vq)->vq.name, ##args);	\
 66		(_vq)->broken = true;				\
 67	} while (0)
 68#define START_USE(vq)
 69#define END_USE(vq)
 70#endif
 71
 72struct vring_virtqueue
 73{
 74	struct virtqueue vq;
 75
 76	/* Actual memory layout for this queue */
 77	struct vring vring;
 78
 
 
 
 79	/* Other side has made a mess, don't try any more. */
 80	bool broken;
 81
 82	/* Host supports indirect buffers */
 83	bool indirect;
 84
 85	/* Host publishes avail event idx */
 86	bool event;
 87
 88	/* Number of free buffers */
 89	unsigned int num_free;
 90	/* Head of free buffer list. */
 91	unsigned int free_head;
 92	/* Number we've added since last sync. */
 93	unsigned int num_added;
 94
 95	/* Last used index we've seen. */
 96	u16 last_used_idx;
 97
 98	/* How to notify other side. FIXME: commonalize hcalls! */
 99	void (*notify)(struct virtqueue *vq);
100
101#ifdef DEBUG
102	/* They're supposed to lock for us. */
103	unsigned int in_use;
 
 
 
 
104#endif
105
106	/* Tokens for callbacks. */
107	void *data[];
108};
109
110#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
111
112/* Set up an indirect table of descriptors and add it to the queue. */
113static int vring_add_indirect(struct vring_virtqueue *vq,
114			      struct scatterlist sg[],
115			      unsigned int out,
116			      unsigned int in,
117			      gfp_t gfp)
118{
119	struct vring_desc *desc;
120	unsigned head;
121	int i;
122
123	desc = kmalloc((out + in) * sizeof(struct vring_desc), gfp);
124	if (!desc)
125		return -ENOMEM;
126
127	/* Transfer entries from the sg list into the indirect page */
128	for (i = 0; i < out; i++) {
129		desc[i].flags = VRING_DESC_F_NEXT;
130		desc[i].addr = sg_phys(sg);
131		desc[i].len = sg->length;
132		desc[i].next = i+1;
133		sg++;
134	}
135	for (; i < (out + in); i++) {
136		desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
137		desc[i].addr = sg_phys(sg);
138		desc[i].len = sg->length;
139		desc[i].next = i+1;
140		sg++;
141	}
142
143	/* Last one doesn't continue. */
144	desc[i-1].flags &= ~VRING_DESC_F_NEXT;
145	desc[i-1].next = 0;
146
147	/* We're about to use a buffer */
148	vq->num_free--;
149
150	/* Use a single buffer which doesn't continue */
151	head = vq->free_head;
152	vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT;
153	vq->vring.desc[head].addr = virt_to_phys(desc);
154	vq->vring.desc[head].len = i * sizeof(struct vring_desc);
155
156	/* Update free pointer */
157	vq->free_head = vq->vring.desc[head].next;
158
159	return head;
160}
161
162int virtqueue_add_buf_gfp(struct virtqueue *_vq,
163			  struct scatterlist sg[],
164			  unsigned int out,
165			  unsigned int in,
166			  void *data,
167			  gfp_t gfp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
168{
169	struct vring_virtqueue *vq = to_vvq(_vq);
170	unsigned int i, avail, uninitialized_var(prev);
171	int head;
172
173	START_USE(vq);
174
175	BUG_ON(data == NULL);
176
 
 
 
 
 
 
 
 
 
 
 
 
 
177	/* If the host supports indirect descriptor tables, and we have multiple
178	 * buffers, then go indirect. FIXME: tune this threshold */
179	if (vq->indirect && (out + in) > 1 && vq->num_free) {
180		head = vring_add_indirect(vq, sg, out, in, gfp);
181		if (likely(head >= 0))
182			goto add_head;
183	}
184
185	BUG_ON(out + in > vq->vring.num);
186	BUG_ON(out + in == 0);
187
188	if (vq->num_free < out + in) {
189		pr_debug("Can't add buf len %i - avail = %i\n",
190			 out + in, vq->num_free);
191		/* FIXME: for historical reasons, we force a notify here if
192		 * there are outgoing parts to the buffer.  Presumably the
193		 * host should service the ring ASAP. */
194		if (out)
195			vq->notify(&vq->vq);
196		END_USE(vq);
197		return -ENOSPC;
198	}
199
200	/* We're about to use some buffers from the free list. */
201	vq->num_free -= out + in;
202
203	head = vq->free_head;
204	for (i = vq->free_head; out; i = vq->vring.desc[i].next, out--) {
205		vq->vring.desc[i].flags = VRING_DESC_F_NEXT;
206		vq->vring.desc[i].addr = sg_phys(sg);
207		vq->vring.desc[i].len = sg->length;
208		prev = i;
209		sg++;
210	}
211	for (; in; i = vq->vring.desc[i].next, in--) {
212		vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
213		vq->vring.desc[i].addr = sg_phys(sg);
214		vq->vring.desc[i].len = sg->length;
215		prev = i;
216		sg++;
217	}
218	/* Last one doesn't continue. */
219	vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT;
220
221	/* Update free pointer */
222	vq->free_head = i;
223
224add_head:
225	/* Set token. */
226	vq->data[head] = data;
227
228	/* Put entry in available array (but don't update avail->idx until they
229	 * do sync).  FIXME: avoid modulus here? */
230	avail = (vq->vring.avail->idx + vq->num_added++) % vq->vring.num;
231	vq->vring.avail->ring[avail] = head;
232
 
 
 
 
 
 
 
 
 
 
 
233	pr_debug("Added buffer head %i to %p\n", head, vq);
234	END_USE(vq);
235
236	return vq->num_free;
237}
238EXPORT_SYMBOL_GPL(virtqueue_add_buf_gfp);
239
240void virtqueue_kick(struct virtqueue *_vq)
 
 
 
 
 
 
 
 
 
 
 
241{
242	struct vring_virtqueue *vq = to_vvq(_vq);
243	u16 new, old;
 
 
244	START_USE(vq);
245	/* Descriptors and available array need to be set before we expose the
246	 * new available array entries. */
247	virtio_wmb();
248
249	old = vq->vring.avail->idx;
250	new = vq->vring.avail->idx = old + vq->num_added;
251	vq->num_added = 0;
252
253	/* Need to update avail index before checking if we should notify */
254	virtio_mb();
255
256	if (vq->event ?
257	    vring_need_event(vring_avail_event(&vq->vring), new, old) :
258	    !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY))
259		/* Prod other side to tell it about changes. */
260		vq->notify(&vq->vq);
261
 
 
 
 
 
 
262	END_USE(vq);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
263}
264EXPORT_SYMBOL_GPL(virtqueue_kick);
265
266static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
267{
268	unsigned int i;
269
270	/* Clear data ptr. */
271	vq->data[head] = NULL;
272
273	/* Put back on free list: find end */
274	i = head;
275
276	/* Free the indirect table */
277	if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT)
278		kfree(phys_to_virt(vq->vring.desc[i].addr));
279
280	while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) {
281		i = vq->vring.desc[i].next;
282		vq->num_free++;
283	}
284
285	vq->vring.desc[i].next = vq->free_head;
286	vq->free_head = head;
287	/* Plus final descriptor */
288	vq->num_free++;
289}
290
291static inline bool more_used(const struct vring_virtqueue *vq)
292{
293	return vq->last_used_idx != vq->vring.used->idx;
294}
295
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
296void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
297{
298	struct vring_virtqueue *vq = to_vvq(_vq);
299	void *ret;
300	unsigned int i;
 
301
302	START_USE(vq);
303
304	if (unlikely(vq->broken)) {
305		END_USE(vq);
306		return NULL;
307	}
308
309	if (!more_used(vq)) {
310		pr_debug("No more buffers in queue\n");
311		END_USE(vq);
312		return NULL;
313	}
314
315	/* Only get used array entries after they have been exposed by host. */
316	virtio_rmb();
317
318	i = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].id;
319	*len = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].len;
 
320
321	if (unlikely(i >= vq->vring.num)) {
322		BAD_RING(vq, "id %u out of range\n", i);
323		return NULL;
324	}
325	if (unlikely(!vq->data[i])) {
326		BAD_RING(vq, "id %u is not a head!\n", i);
327		return NULL;
328	}
329
330	/* detach_buf clears data, so grab it now. */
331	ret = vq->data[i];
332	detach_buf(vq, i);
333	vq->last_used_idx++;
334	/* If we expect an interrupt for the next entry, tell host
335	 * by writing event index and flush out the write before
336	 * the read in the next get_buf call. */
337	if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) {
338		vring_used_event(&vq->vring) = vq->last_used_idx;
339		virtio_mb();
340	}
341
 
 
 
 
342	END_USE(vq);
343	return ret;
344}
345EXPORT_SYMBOL_GPL(virtqueue_get_buf);
346
 
 
 
 
 
 
 
 
 
347void virtqueue_disable_cb(struct virtqueue *_vq)
348{
349	struct vring_virtqueue *vq = to_vvq(_vq);
350
351	vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
352}
353EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
354
 
 
 
 
 
 
 
 
 
 
 
355bool virtqueue_enable_cb(struct virtqueue *_vq)
356{
357	struct vring_virtqueue *vq = to_vvq(_vq);
358
359	START_USE(vq);
360
361	/* We optimistically turn back on interrupts, then check if there was
362	 * more to do. */
363	/* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
364	 * either clear the flags bit or point the event index at the next
365	 * entry. Always do both to keep code simple. */
366	vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
367	vring_used_event(&vq->vring) = vq->last_used_idx;
368	virtio_mb();
369	if (unlikely(more_used(vq))) {
370		END_USE(vq);
371		return false;
372	}
373
374	END_USE(vq);
375	return true;
376}
377EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
378
 
 
 
 
 
 
 
 
 
 
 
 
 
379bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
380{
381	struct vring_virtqueue *vq = to_vvq(_vq);
382	u16 bufs;
383
384	START_USE(vq);
385
386	/* We optimistically turn back on interrupts, then check if there was
387	 * more to do. */
388	/* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
389	 * either clear the flags bit or point the event index at the next
390	 * entry. Always do both to keep code simple. */
391	vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
392	/* TODO: tune this threshold */
393	bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4;
394	vring_used_event(&vq->vring) = vq->last_used_idx + bufs;
395	virtio_mb();
396	if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) {
397		END_USE(vq);
398		return false;
399	}
400
401	END_USE(vq);
402	return true;
403}
404EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
405
 
 
 
 
 
 
 
 
406void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
407{
408	struct vring_virtqueue *vq = to_vvq(_vq);
409	unsigned int i;
410	void *buf;
411
412	START_USE(vq);
413
414	for (i = 0; i < vq->vring.num; i++) {
415		if (!vq->data[i])
416			continue;
417		/* detach_buf clears data, so grab it now. */
418		buf = vq->data[i];
419		detach_buf(vq, i);
420		vq->vring.avail->idx--;
421		END_USE(vq);
422		return buf;
423	}
424	/* That should have freed everything. */
425	BUG_ON(vq->num_free != vq->vring.num);
426
427	END_USE(vq);
428	return NULL;
429}
430EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
431
432irqreturn_t vring_interrupt(int irq, void *_vq)
433{
434	struct vring_virtqueue *vq = to_vvq(_vq);
435
436	if (!more_used(vq)) {
437		pr_debug("virtqueue interrupt with no work for %p\n", vq);
438		return IRQ_NONE;
439	}
440
441	if (unlikely(vq->broken))
442		return IRQ_HANDLED;
443
444	pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
445	if (vq->vq.callback)
446		vq->vq.callback(&vq->vq);
447
448	return IRQ_HANDLED;
449}
450EXPORT_SYMBOL_GPL(vring_interrupt);
451
452struct virtqueue *vring_new_virtqueue(unsigned int num,
453				      unsigned int vring_align,
454				      struct virtio_device *vdev,
 
455				      void *pages,
456				      void (*notify)(struct virtqueue *),
457				      void (*callback)(struct virtqueue *),
458				      const char *name)
459{
460	struct vring_virtqueue *vq;
461	unsigned int i;
462
463	/* We assume num is a power of 2. */
464	if (num & (num - 1)) {
465		dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
466		return NULL;
467	}
468
469	vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL);
470	if (!vq)
471		return NULL;
472
473	vring_init(&vq->vring, num, pages, vring_align);
474	vq->vq.callback = callback;
475	vq->vq.vdev = vdev;
476	vq->vq.name = name;
477	vq->notify = notify;
 
478	vq->broken = false;
479	vq->last_used_idx = 0;
480	vq->num_added = 0;
481	list_add_tail(&vq->vq.list, &vdev->vqs);
482#ifdef DEBUG
483	vq->in_use = false;
 
484#endif
485
486	vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC);
487	vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
488
489	/* No callback?  Tell other side not to bother us. */
490	if (!callback)
491		vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
492
493	/* Put everything in free lists. */
494	vq->num_free = num;
495	vq->free_head = 0;
496	for (i = 0; i < num-1; i++) {
497		vq->vring.desc[i].next = i+1;
498		vq->data[i] = NULL;
499	}
500	vq->data[i] = NULL;
501
502	return &vq->vq;
503}
504EXPORT_SYMBOL_GPL(vring_new_virtqueue);
505
506void vring_del_virtqueue(struct virtqueue *vq)
507{
508	list_del(&vq->list);
509	kfree(to_vvq(vq));
510}
511EXPORT_SYMBOL_GPL(vring_del_virtqueue);
512
513/* Manipulates transport-specific feature bits. */
514void vring_transport_features(struct virtio_device *vdev)
515{
516	unsigned int i;
517
518	for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
519		switch (i) {
520		case VIRTIO_RING_F_INDIRECT_DESC:
521			break;
522		case VIRTIO_RING_F_EVENT_IDX:
523			break;
524		default:
525			/* We don't understand this bit. */
526			clear_bit(i, vdev->features);
527		}
528	}
529}
530EXPORT_SYMBOL_GPL(vring_transport_features);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
531
532MODULE_LICENSE("GPL");
v3.5.6
  1/* Virtio ring implementation.
  2 *
  3 *  Copyright 2007 Rusty Russell IBM Corporation
  4 *
  5 *  This program is free software; you can redistribute it and/or modify
  6 *  it under the terms of the GNU General Public License as published by
  7 *  the Free Software Foundation; either version 2 of the License, or
  8 *  (at your option) any later version.
  9 *
 10 *  This program is distributed in the hope that it will be useful,
 11 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 12 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 13 *  GNU General Public License for more details.
 14 *
 15 *  You should have received a copy of the GNU General Public License
 16 *  along with this program; if not, write to the Free Software
 17 *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
 18 */
 19#include <linux/virtio.h>
 20#include <linux/virtio_ring.h>
 21#include <linux/virtio_config.h>
 22#include <linux/device.h>
 23#include <linux/slab.h>
 24#include <linux/module.h>
 25#include <linux/hrtimer.h>
 26
 27/* virtio guest is communicating with a virtual "device" that actually runs on
 28 * a host processor.  Memory barriers are used to control SMP effects. */
 29#ifdef CONFIG_SMP
 30/* Where possible, use SMP barriers which are more lightweight than mandatory
 31 * barriers, because mandatory barriers control MMIO effects on accesses
 32 * through relaxed memory I/O windows (which virtio-pci does not use). */
 33#define virtio_mb(vq) \
 34	do { if ((vq)->weak_barriers) smp_mb(); else mb(); } while(0)
 35#define virtio_rmb(vq) \
 36	do { if ((vq)->weak_barriers) smp_rmb(); else rmb(); } while(0)
 37#define virtio_wmb(vq) \
 38	do { if ((vq)->weak_barriers) smp_wmb(); else wmb(); } while(0)
 39#else
 40/* We must force memory ordering even if guest is UP since host could be
 41 * running on another CPU, but SMP barriers are defined to barrier() in that
 42 * configuration. So fall back to mandatory barriers instead. */
 43#define virtio_mb(vq) mb()
 44#define virtio_rmb(vq) rmb()
 45#define virtio_wmb(vq) wmb()
 46#endif
 47
 48#ifdef DEBUG
 49/* For development, we want to crash whenever the ring is screwed. */
 50#define BAD_RING(_vq, fmt, args...)				\
 51	do {							\
 52		dev_err(&(_vq)->vq.vdev->dev,			\
 53			"%s:"fmt, (_vq)->vq.name, ##args);	\
 54		BUG();						\
 55	} while (0)
 56/* Caller is supposed to guarantee no reentry. */
 57#define START_USE(_vq)						\
 58	do {							\
 59		if ((_vq)->in_use)				\
 60			panic("%s:in_use = %i\n",		\
 61			      (_vq)->vq.name, (_vq)->in_use);	\
 62		(_vq)->in_use = __LINE__;			\
 63	} while (0)
 64#define END_USE(_vq) \
 65	do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
 66#else
 67#define BAD_RING(_vq, fmt, args...)				\
 68	do {							\
 69		dev_err(&_vq->vq.vdev->dev,			\
 70			"%s:"fmt, (_vq)->vq.name, ##args);	\
 71		(_vq)->broken = true;				\
 72	} while (0)
 73#define START_USE(vq)
 74#define END_USE(vq)
 75#endif
 76
 77struct vring_virtqueue
 78{
 79	struct virtqueue vq;
 80
 81	/* Actual memory layout for this queue */
 82	struct vring vring;
 83
 84	/* Can we use weak barriers? */
 85	bool weak_barriers;
 86
 87	/* Other side has made a mess, don't try any more. */
 88	bool broken;
 89
 90	/* Host supports indirect buffers */
 91	bool indirect;
 92
 93	/* Host publishes avail event idx */
 94	bool event;
 95
 96	/* Number of free buffers */
 97	unsigned int num_free;
 98	/* Head of free buffer list. */
 99	unsigned int free_head;
100	/* Number we've added since last sync. */
101	unsigned int num_added;
102
103	/* Last used index we've seen. */
104	u16 last_used_idx;
105
106	/* How to notify other side. FIXME: commonalize hcalls! */
107	void (*notify)(struct virtqueue *vq);
108
109#ifdef DEBUG
110	/* They're supposed to lock for us. */
111	unsigned int in_use;
112
113	/* Figure out if their kicks are too delayed. */
114	bool last_add_time_valid;
115	ktime_t last_add_time;
116#endif
117
118	/* Tokens for callbacks. */
119	void *data[];
120};
121
122#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
123
124/* Set up an indirect table of descriptors and add it to the queue. */
125static int vring_add_indirect(struct vring_virtqueue *vq,
126			      struct scatterlist sg[],
127			      unsigned int out,
128			      unsigned int in,
129			      gfp_t gfp)
130{
131	struct vring_desc *desc;
132	unsigned head;
133	int i;
134
135	desc = kmalloc((out + in) * sizeof(struct vring_desc), gfp);
136	if (!desc)
137		return -ENOMEM;
138
139	/* Transfer entries from the sg list into the indirect page */
140	for (i = 0; i < out; i++) {
141		desc[i].flags = VRING_DESC_F_NEXT;
142		desc[i].addr = sg_phys(sg);
143		desc[i].len = sg->length;
144		desc[i].next = i+1;
145		sg++;
146	}
147	for (; i < (out + in); i++) {
148		desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
149		desc[i].addr = sg_phys(sg);
150		desc[i].len = sg->length;
151		desc[i].next = i+1;
152		sg++;
153	}
154
155	/* Last one doesn't continue. */
156	desc[i-1].flags &= ~VRING_DESC_F_NEXT;
157	desc[i-1].next = 0;
158
159	/* We're about to use a buffer */
160	vq->num_free--;
161
162	/* Use a single buffer which doesn't continue */
163	head = vq->free_head;
164	vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT;
165	vq->vring.desc[head].addr = virt_to_phys(desc);
166	vq->vring.desc[head].len = i * sizeof(struct vring_desc);
167
168	/* Update free pointer */
169	vq->free_head = vq->vring.desc[head].next;
170
171	return head;
172}
173
174/**
175 * virtqueue_add_buf - expose buffer to other end
176 * @vq: the struct virtqueue we're talking about.
177 * @sg: the description of the buffer(s).
178 * @out_num: the number of sg readable by other side
179 * @in_num: the number of sg which are writable (after readable ones)
180 * @data: the token identifying the buffer.
181 * @gfp: how to do memory allocations (if necessary).
182 *
183 * Caller must ensure we don't call this with other virtqueue operations
184 * at the same time (except where noted).
185 *
186 * Returns remaining capacity of queue or a negative error
187 * (ie. ENOSPC).  Note that it only really makes sense to treat all
188 * positive return values as "available": indirect buffers mean that
189 * we can put an entire sg[] array inside a single queue entry.
190 */
191int virtqueue_add_buf(struct virtqueue *_vq,
192		      struct scatterlist sg[],
193		      unsigned int out,
194		      unsigned int in,
195		      void *data,
196		      gfp_t gfp)
197{
198	struct vring_virtqueue *vq = to_vvq(_vq);
199	unsigned int i, avail, uninitialized_var(prev);
200	int head;
201
202	START_USE(vq);
203
204	BUG_ON(data == NULL);
205
206#ifdef DEBUG
207	{
208		ktime_t now = ktime_get();
209
210		/* No kick or get, with .1 second between?  Warn. */
211		if (vq->last_add_time_valid)
212			WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time))
213					    > 100);
214		vq->last_add_time = now;
215		vq->last_add_time_valid = true;
216	}
217#endif
218
219	/* If the host supports indirect descriptor tables, and we have multiple
220	 * buffers, then go indirect. FIXME: tune this threshold */
221	if (vq->indirect && (out + in) > 1 && vq->num_free) {
222		head = vring_add_indirect(vq, sg, out, in, gfp);
223		if (likely(head >= 0))
224			goto add_head;
225	}
226
227	BUG_ON(out + in > vq->vring.num);
228	BUG_ON(out + in == 0);
229
230	if (vq->num_free < out + in) {
231		pr_debug("Can't add buf len %i - avail = %i\n",
232			 out + in, vq->num_free);
233		/* FIXME: for historical reasons, we force a notify here if
234		 * there are outgoing parts to the buffer.  Presumably the
235		 * host should service the ring ASAP. */
236		if (out)
237			vq->notify(&vq->vq);
238		END_USE(vq);
239		return -ENOSPC;
240	}
241
242	/* We're about to use some buffers from the free list. */
243	vq->num_free -= out + in;
244
245	head = vq->free_head;
246	for (i = vq->free_head; out; i = vq->vring.desc[i].next, out--) {
247		vq->vring.desc[i].flags = VRING_DESC_F_NEXT;
248		vq->vring.desc[i].addr = sg_phys(sg);
249		vq->vring.desc[i].len = sg->length;
250		prev = i;
251		sg++;
252	}
253	for (; in; i = vq->vring.desc[i].next, in--) {
254		vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
255		vq->vring.desc[i].addr = sg_phys(sg);
256		vq->vring.desc[i].len = sg->length;
257		prev = i;
258		sg++;
259	}
260	/* Last one doesn't continue. */
261	vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT;
262
263	/* Update free pointer */
264	vq->free_head = i;
265
266add_head:
267	/* Set token. */
268	vq->data[head] = data;
269
270	/* Put entry in available array (but don't update avail->idx until they
271	 * do sync). */
272	avail = (vq->vring.avail->idx & (vq->vring.num-1));
273	vq->vring.avail->ring[avail] = head;
274
275	/* Descriptors and available array need to be set before we expose the
276	 * new available array entries. */
277	virtio_wmb(vq);
278	vq->vring.avail->idx++;
279	vq->num_added++;
280
281	/* This is very unlikely, but theoretically possible.  Kick
282	 * just in case. */
283	if (unlikely(vq->num_added == (1 << 16) - 1))
284		virtqueue_kick(_vq);
285
286	pr_debug("Added buffer head %i to %p\n", head, vq);
287	END_USE(vq);
288
289	return vq->num_free;
290}
291EXPORT_SYMBOL_GPL(virtqueue_add_buf);
292
293/**
294 * virtqueue_kick_prepare - first half of split virtqueue_kick call.
295 * @vq: the struct virtqueue
296 *
297 * Instead of virtqueue_kick(), you can do:
298 *	if (virtqueue_kick_prepare(vq))
299 *		virtqueue_notify(vq);
300 *
301 * This is sometimes useful because the virtqueue_kick_prepare() needs
302 * to be serialized, but the actual virtqueue_notify() call does not.
303 */
304bool virtqueue_kick_prepare(struct virtqueue *_vq)
305{
306	struct vring_virtqueue *vq = to_vvq(_vq);
307	u16 new, old;
308	bool needs_kick;
309
310	START_USE(vq);
311	/* We need to expose available array entries before checking avail
312	 * event. */
313	virtio_mb(vq);
314
315	old = vq->vring.avail->idx - vq->num_added;
316	new = vq->vring.avail->idx;
317	vq->num_added = 0;
318
319#ifdef DEBUG
320	if (vq->last_add_time_valid) {
321		WARN_ON(ktime_to_ms(ktime_sub(ktime_get(),
322					      vq->last_add_time)) > 100);
323	}
324	vq->last_add_time_valid = false;
325#endif
 
326
327	if (vq->event) {
328		needs_kick = vring_need_event(vring_avail_event(&vq->vring),
329					      new, old);
330	} else {
331		needs_kick = !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY);
332	}
333	END_USE(vq);
334	return needs_kick;
335}
336EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
337
338/**
339 * virtqueue_notify - second half of split virtqueue_kick call.
340 * @vq: the struct virtqueue
341 *
342 * This does not need to be serialized.
343 */
344void virtqueue_notify(struct virtqueue *_vq)
345{
346	struct vring_virtqueue *vq = to_vvq(_vq);
347
348	/* Prod other side to tell it about changes. */
349	vq->notify(_vq);
350}
351EXPORT_SYMBOL_GPL(virtqueue_notify);
352
353/**
354 * virtqueue_kick - update after add_buf
355 * @vq: the struct virtqueue
356 *
357 * After one or more virtqueue_add_buf calls, invoke this to kick
358 * the other side.
359 *
360 * Caller must ensure we don't call this with other virtqueue
361 * operations at the same time (except where noted).
362 */
363void virtqueue_kick(struct virtqueue *vq)
364{
365	if (virtqueue_kick_prepare(vq))
366		virtqueue_notify(vq);
367}
368EXPORT_SYMBOL_GPL(virtqueue_kick);
369
370static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
371{
372	unsigned int i;
373
374	/* Clear data ptr. */
375	vq->data[head] = NULL;
376
377	/* Put back on free list: find end */
378	i = head;
379
380	/* Free the indirect table */
381	if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT)
382		kfree(phys_to_virt(vq->vring.desc[i].addr));
383
384	while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) {
385		i = vq->vring.desc[i].next;
386		vq->num_free++;
387	}
388
389	vq->vring.desc[i].next = vq->free_head;
390	vq->free_head = head;
391	/* Plus final descriptor */
392	vq->num_free++;
393}
394
395static inline bool more_used(const struct vring_virtqueue *vq)
396{
397	return vq->last_used_idx != vq->vring.used->idx;
398}
399
400/**
401 * virtqueue_get_buf - get the next used buffer
402 * @vq: the struct virtqueue we're talking about.
403 * @len: the length written into the buffer
404 *
405 * If the driver wrote data into the buffer, @len will be set to the
406 * amount written.  This means you don't need to clear the buffer
407 * beforehand to ensure there's no data leakage in the case of short
408 * writes.
409 *
410 * Caller must ensure we don't call this with other virtqueue
411 * operations at the same time (except where noted).
412 *
413 * Returns NULL if there are no used buffers, or the "data" token
414 * handed to virtqueue_add_buf().
415 */
416void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
417{
418	struct vring_virtqueue *vq = to_vvq(_vq);
419	void *ret;
420	unsigned int i;
421	u16 last_used;
422
423	START_USE(vq);
424
425	if (unlikely(vq->broken)) {
426		END_USE(vq);
427		return NULL;
428	}
429
430	if (!more_used(vq)) {
431		pr_debug("No more buffers in queue\n");
432		END_USE(vq);
433		return NULL;
434	}
435
436	/* Only get used array entries after they have been exposed by host. */
437	virtio_rmb(vq);
438
439	last_used = (vq->last_used_idx & (vq->vring.num - 1));
440	i = vq->vring.used->ring[last_used].id;
441	*len = vq->vring.used->ring[last_used].len;
442
443	if (unlikely(i >= vq->vring.num)) {
444		BAD_RING(vq, "id %u out of range\n", i);
445		return NULL;
446	}
447	if (unlikely(!vq->data[i])) {
448		BAD_RING(vq, "id %u is not a head!\n", i);
449		return NULL;
450	}
451
452	/* detach_buf clears data, so grab it now. */
453	ret = vq->data[i];
454	detach_buf(vq, i);
455	vq->last_used_idx++;
456	/* If we expect an interrupt for the next entry, tell host
457	 * by writing event index and flush out the write before
458	 * the read in the next get_buf call. */
459	if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) {
460		vring_used_event(&vq->vring) = vq->last_used_idx;
461		virtio_mb(vq);
462	}
463
464#ifdef DEBUG
465	vq->last_add_time_valid = false;
466#endif
467
468	END_USE(vq);
469	return ret;
470}
471EXPORT_SYMBOL_GPL(virtqueue_get_buf);
472
473/**
474 * virtqueue_disable_cb - disable callbacks
475 * @vq: the struct virtqueue we're talking about.
476 *
477 * Note that this is not necessarily synchronous, hence unreliable and only
478 * useful as an optimization.
479 *
480 * Unlike other operations, this need not be serialized.
481 */
482void virtqueue_disable_cb(struct virtqueue *_vq)
483{
484	struct vring_virtqueue *vq = to_vvq(_vq);
485
486	vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
487}
488EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
489
490/**
491 * virtqueue_enable_cb - restart callbacks after disable_cb.
492 * @vq: the struct virtqueue we're talking about.
493 *
494 * This re-enables callbacks; it returns "false" if there are pending
495 * buffers in the queue, to detect a possible race between the driver
496 * checking for more work, and enabling callbacks.
497 *
498 * Caller must ensure we don't call this with other virtqueue
499 * operations at the same time (except where noted).
500 */
501bool virtqueue_enable_cb(struct virtqueue *_vq)
502{
503	struct vring_virtqueue *vq = to_vvq(_vq);
504
505	START_USE(vq);
506
507	/* We optimistically turn back on interrupts, then check if there was
508	 * more to do. */
509	/* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
510	 * either clear the flags bit or point the event index at the next
511	 * entry. Always do both to keep code simple. */
512	vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
513	vring_used_event(&vq->vring) = vq->last_used_idx;
514	virtio_mb(vq);
515	if (unlikely(more_used(vq))) {
516		END_USE(vq);
517		return false;
518	}
519
520	END_USE(vq);
521	return true;
522}
523EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
524
525/**
526 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
527 * @vq: the struct virtqueue we're talking about.
528 *
529 * This re-enables callbacks but hints to the other side to delay
530 * interrupts until most of the available buffers have been processed;
531 * it returns "false" if there are many pending buffers in the queue,
532 * to detect a possible race between the driver checking for more work,
533 * and enabling callbacks.
534 *
535 * Caller must ensure we don't call this with other virtqueue
536 * operations at the same time (except where noted).
537 */
538bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
539{
540	struct vring_virtqueue *vq = to_vvq(_vq);
541	u16 bufs;
542
543	START_USE(vq);
544
545	/* We optimistically turn back on interrupts, then check if there was
546	 * more to do. */
547	/* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
548	 * either clear the flags bit or point the event index at the next
549	 * entry. Always do both to keep code simple. */
550	vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
551	/* TODO: tune this threshold */
552	bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4;
553	vring_used_event(&vq->vring) = vq->last_used_idx + bufs;
554	virtio_mb(vq);
555	if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) {
556		END_USE(vq);
557		return false;
558	}
559
560	END_USE(vq);
561	return true;
562}
563EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
564
565/**
566 * virtqueue_detach_unused_buf - detach first unused buffer
567 * @vq: the struct virtqueue we're talking about.
568 *
569 * Returns NULL or the "data" token handed to virtqueue_add_buf().
570 * This is not valid on an active queue; it is useful only for device
571 * shutdown.
572 */
573void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
574{
575	struct vring_virtqueue *vq = to_vvq(_vq);
576	unsigned int i;
577	void *buf;
578
579	START_USE(vq);
580
581	for (i = 0; i < vq->vring.num; i++) {
582		if (!vq->data[i])
583			continue;
584		/* detach_buf clears data, so grab it now. */
585		buf = vq->data[i];
586		detach_buf(vq, i);
587		vq->vring.avail->idx--;
588		END_USE(vq);
589		return buf;
590	}
591	/* That should have freed everything. */
592	BUG_ON(vq->num_free != vq->vring.num);
593
594	END_USE(vq);
595	return NULL;
596}
597EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
598
599irqreturn_t vring_interrupt(int irq, void *_vq)
600{
601	struct vring_virtqueue *vq = to_vvq(_vq);
602
603	if (!more_used(vq)) {
604		pr_debug("virtqueue interrupt with no work for %p\n", vq);
605		return IRQ_NONE;
606	}
607
608	if (unlikely(vq->broken))
609		return IRQ_HANDLED;
610
611	pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
612	if (vq->vq.callback)
613		vq->vq.callback(&vq->vq);
614
615	return IRQ_HANDLED;
616}
617EXPORT_SYMBOL_GPL(vring_interrupt);
618
619struct virtqueue *vring_new_virtqueue(unsigned int num,
620				      unsigned int vring_align,
621				      struct virtio_device *vdev,
622				      bool weak_barriers,
623				      void *pages,
624				      void (*notify)(struct virtqueue *),
625				      void (*callback)(struct virtqueue *),
626				      const char *name)
627{
628	struct vring_virtqueue *vq;
629	unsigned int i;
630
631	/* We assume num is a power of 2. */
632	if (num & (num - 1)) {
633		dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
634		return NULL;
635	}
636
637	vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL);
638	if (!vq)
639		return NULL;
640
641	vring_init(&vq->vring, num, pages, vring_align);
642	vq->vq.callback = callback;
643	vq->vq.vdev = vdev;
644	vq->vq.name = name;
645	vq->notify = notify;
646	vq->weak_barriers = weak_barriers;
647	vq->broken = false;
648	vq->last_used_idx = 0;
649	vq->num_added = 0;
650	list_add_tail(&vq->vq.list, &vdev->vqs);
651#ifdef DEBUG
652	vq->in_use = false;
653	vq->last_add_time_valid = false;
654#endif
655
656	vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC);
657	vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
658
659	/* No callback?  Tell other side not to bother us. */
660	if (!callback)
661		vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
662
663	/* Put everything in free lists. */
664	vq->num_free = num;
665	vq->free_head = 0;
666	for (i = 0; i < num-1; i++) {
667		vq->vring.desc[i].next = i+1;
668		vq->data[i] = NULL;
669	}
670	vq->data[i] = NULL;
671
672	return &vq->vq;
673}
674EXPORT_SYMBOL_GPL(vring_new_virtqueue);
675
676void vring_del_virtqueue(struct virtqueue *vq)
677{
678	list_del(&vq->list);
679	kfree(to_vvq(vq));
680}
681EXPORT_SYMBOL_GPL(vring_del_virtqueue);
682
683/* Manipulates transport-specific feature bits. */
684void vring_transport_features(struct virtio_device *vdev)
685{
686	unsigned int i;
687
688	for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
689		switch (i) {
690		case VIRTIO_RING_F_INDIRECT_DESC:
691			break;
692		case VIRTIO_RING_F_EVENT_IDX:
693			break;
694		default:
695			/* We don't understand this bit. */
696			clear_bit(i, vdev->features);
697		}
698	}
699}
700EXPORT_SYMBOL_GPL(vring_transport_features);
701
702/**
703 * virtqueue_get_vring_size - return the size of the virtqueue's vring
704 * @vq: the struct virtqueue containing the vring of interest.
705 *
706 * Returns the size of the vring.  This is mainly used for boasting to
707 * userspace.  Unlike other operations, this need not be serialized.
708 */
709unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
710{
711
712	struct vring_virtqueue *vq = to_vvq(_vq);
713
714	return vq->vring.num;
715}
716EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
717
718MODULE_LICENSE("GPL");