Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* Virtio ring implementation.
3 *
4 * Copyright 2007 Rusty Russell IBM Corporation
5 */
6#include <linux/virtio.h>
7#include <linux/virtio_ring.h>
8#include <linux/virtio_config.h>
9#include <linux/device.h>
10#include <linux/slab.h>
11#include <linux/module.h>
12#include <linux/hrtimer.h>
13#include <linux/dma-mapping.h>
14#include <xen/xen.h>
15
16#ifdef DEBUG
17/* For development, we want to crash whenever the ring is screwed. */
18#define BAD_RING(_vq, fmt, args...) \
19 do { \
20 dev_err(&(_vq)->vq.vdev->dev, \
21 "%s:"fmt, (_vq)->vq.name, ##args); \
22 BUG(); \
23 } while (0)
24/* Caller is supposed to guarantee no reentry. */
25#define START_USE(_vq) \
26 do { \
27 if ((_vq)->in_use) \
28 panic("%s:in_use = %i\n", \
29 (_vq)->vq.name, (_vq)->in_use); \
30 (_vq)->in_use = __LINE__; \
31 } while (0)
32#define END_USE(_vq) \
33 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
34#define LAST_ADD_TIME_UPDATE(_vq) \
35 do { \
36 ktime_t now = ktime_get(); \
37 \
38 /* No kick or get, with .1 second between? Warn. */ \
39 if ((_vq)->last_add_time_valid) \
40 WARN_ON(ktime_to_ms(ktime_sub(now, \
41 (_vq)->last_add_time)) > 100); \
42 (_vq)->last_add_time = now; \
43 (_vq)->last_add_time_valid = true; \
44 } while (0)
45#define LAST_ADD_TIME_CHECK(_vq) \
46 do { \
47 if ((_vq)->last_add_time_valid) { \
48 WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), \
49 (_vq)->last_add_time)) > 100); \
50 } \
51 } while (0)
52#define LAST_ADD_TIME_INVALID(_vq) \
53 ((_vq)->last_add_time_valid = false)
54#else
55#define BAD_RING(_vq, fmt, args...) \
56 do { \
57 dev_err(&_vq->vq.vdev->dev, \
58 "%s:"fmt, (_vq)->vq.name, ##args); \
59 (_vq)->broken = true; \
60 } while (0)
61#define START_USE(vq)
62#define END_USE(vq)
63#define LAST_ADD_TIME_UPDATE(vq)
64#define LAST_ADD_TIME_CHECK(vq)
65#define LAST_ADD_TIME_INVALID(vq)
66#endif
67
68struct vring_desc_state_split {
69 void *data; /* Data for callback. */
70 struct vring_desc *indir_desc; /* Indirect descriptor, if any. */
71};
72
73struct vring_desc_state_packed {
74 void *data; /* Data for callback. */
75 struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */
76 u16 num; /* Descriptor list length. */
77 u16 next; /* The next desc state in a list. */
78 u16 last; /* The last desc state in a list. */
79};
80
81struct vring_desc_extra_packed {
82 dma_addr_t addr; /* Buffer DMA addr. */
83 u32 len; /* Buffer length. */
84 u16 flags; /* Descriptor flags. */
85};
86
87struct vring_virtqueue {
88 struct virtqueue vq;
89
90 /* Is this a packed ring? */
91 bool packed_ring;
92
93 /* Is DMA API used? */
94 bool use_dma_api;
95
96 /* Can we use weak barriers? */
97 bool weak_barriers;
98
99 /* Other side has made a mess, don't try any more. */
100 bool broken;
101
102 /* Host supports indirect buffers */
103 bool indirect;
104
105 /* Host publishes avail event idx */
106 bool event;
107
108 /* Head of free buffer list. */
109 unsigned int free_head;
110 /* Number we've added since last sync. */
111 unsigned int num_added;
112
113 /* Last used index we've seen. */
114 u16 last_used_idx;
115
116 union {
117 /* Available for split ring */
118 struct {
119 /* Actual memory layout for this queue. */
120 struct vring vring;
121
122 /* Last written value to avail->flags */
123 u16 avail_flags_shadow;
124
125 /*
126 * Last written value to avail->idx in
127 * guest byte order.
128 */
129 u16 avail_idx_shadow;
130
131 /* Per-descriptor state. */
132 struct vring_desc_state_split *desc_state;
133
134 /* DMA address and size information */
135 dma_addr_t queue_dma_addr;
136 size_t queue_size_in_bytes;
137 } split;
138
139 /* Available for packed ring */
140 struct {
141 /* Actual memory layout for this queue. */
142 struct {
143 unsigned int num;
144 struct vring_packed_desc *desc;
145 struct vring_packed_desc_event *driver;
146 struct vring_packed_desc_event *device;
147 } vring;
148
149 /* Driver ring wrap counter. */
150 bool avail_wrap_counter;
151
152 /* Device ring wrap counter. */
153 bool used_wrap_counter;
154
155 /* Avail used flags. */
156 u16 avail_used_flags;
157
158 /* Index of the next avail descriptor. */
159 u16 next_avail_idx;
160
161 /*
162 * Last written value to driver->flags in
163 * guest byte order.
164 */
165 u16 event_flags_shadow;
166
167 /* Per-descriptor state. */
168 struct vring_desc_state_packed *desc_state;
169 struct vring_desc_extra_packed *desc_extra;
170
171 /* DMA address and size information */
172 dma_addr_t ring_dma_addr;
173 dma_addr_t driver_event_dma_addr;
174 dma_addr_t device_event_dma_addr;
175 size_t ring_size_in_bytes;
176 size_t event_size_in_bytes;
177 } packed;
178 };
179
180 /* How to notify other side. FIXME: commonalize hcalls! */
181 bool (*notify)(struct virtqueue *vq);
182
183 /* DMA, allocation, and size information */
184 bool we_own_ring;
185
186#ifdef DEBUG
187 /* They're supposed to lock for us. */
188 unsigned int in_use;
189
190 /* Figure out if their kicks are too delayed. */
191 bool last_add_time_valid;
192 ktime_t last_add_time;
193#endif
194};
195
196
197/*
198 * Helpers.
199 */
200
201#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
202
203static inline bool virtqueue_use_indirect(struct virtqueue *_vq,
204 unsigned int total_sg)
205{
206 struct vring_virtqueue *vq = to_vvq(_vq);
207
208 /*
209 * If the host supports indirect descriptor tables, and we have multiple
210 * buffers, then go indirect. FIXME: tune this threshold
211 */
212 return (vq->indirect && total_sg > 1 && vq->vq.num_free);
213}
214
215/*
216 * Modern virtio devices have feature bits to specify whether they need a
217 * quirk and bypass the IOMMU. If not there, just use the DMA API.
218 *
219 * If there, the interaction between virtio and DMA API is messy.
220 *
221 * On most systems with virtio, physical addresses match bus addresses,
222 * and it doesn't particularly matter whether we use the DMA API.
223 *
224 * On some systems, including Xen and any system with a physical device
225 * that speaks virtio behind a physical IOMMU, we must use the DMA API
226 * for virtio DMA to work at all.
227 *
228 * On other systems, including SPARC and PPC64, virtio-pci devices are
229 * enumerated as though they are behind an IOMMU, but the virtio host
230 * ignores the IOMMU, so we must either pretend that the IOMMU isn't
231 * there or somehow map everything as the identity.
232 *
233 * For the time being, we preserve historic behavior and bypass the DMA
234 * API.
235 *
236 * TODO: install a per-device DMA ops structure that does the right thing
237 * taking into account all the above quirks, and use the DMA API
238 * unconditionally on data path.
239 */
240
241static bool vring_use_dma_api(struct virtio_device *vdev)
242{
243 if (!virtio_has_dma_quirk(vdev))
244 return true;
245
246 /* Otherwise, we are left to guess. */
247 /*
248 * In theory, it's possible to have a buggy QEMU-supposed
249 * emulated Q35 IOMMU and Xen enabled at the same time. On
250 * such a configuration, virtio has never worked and will
251 * not work without an even larger kludge. Instead, enable
252 * the DMA API if we're a Xen guest, which at least allows
253 * all of the sensible Xen configurations to work correctly.
254 */
255 if (xen_domain())
256 return true;
257
258 return false;
259}
260
261size_t virtio_max_dma_size(struct virtio_device *vdev)
262{
263 size_t max_segment_size = SIZE_MAX;
264
265 if (vring_use_dma_api(vdev))
266 max_segment_size = dma_max_mapping_size(&vdev->dev);
267
268 return max_segment_size;
269}
270EXPORT_SYMBOL_GPL(virtio_max_dma_size);
271
272static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
273 dma_addr_t *dma_handle, gfp_t flag)
274{
275 if (vring_use_dma_api(vdev)) {
276 return dma_alloc_coherent(vdev->dev.parent, size,
277 dma_handle, flag);
278 } else {
279 void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
280
281 if (queue) {
282 phys_addr_t phys_addr = virt_to_phys(queue);
283 *dma_handle = (dma_addr_t)phys_addr;
284
285 /*
286 * Sanity check: make sure we dind't truncate
287 * the address. The only arches I can find that
288 * have 64-bit phys_addr_t but 32-bit dma_addr_t
289 * are certain non-highmem MIPS and x86
290 * configurations, but these configurations
291 * should never allocate physical pages above 32
292 * bits, so this is fine. Just in case, throw a
293 * warning and abort if we end up with an
294 * unrepresentable address.
295 */
296 if (WARN_ON_ONCE(*dma_handle != phys_addr)) {
297 free_pages_exact(queue, PAGE_ALIGN(size));
298 return NULL;
299 }
300 }
301 return queue;
302 }
303}
304
305static void vring_free_queue(struct virtio_device *vdev, size_t size,
306 void *queue, dma_addr_t dma_handle)
307{
308 if (vring_use_dma_api(vdev))
309 dma_free_coherent(vdev->dev.parent, size, queue, dma_handle);
310 else
311 free_pages_exact(queue, PAGE_ALIGN(size));
312}
313
314/*
315 * The DMA ops on various arches are rather gnarly right now, and
316 * making all of the arch DMA ops work on the vring device itself
317 * is a mess. For now, we use the parent device for DMA ops.
318 */
319static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq)
320{
321 return vq->vq.vdev->dev.parent;
322}
323
324/* Map one sg entry. */
325static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
326 struct scatterlist *sg,
327 enum dma_data_direction direction)
328{
329 if (!vq->use_dma_api)
330 return (dma_addr_t)sg_phys(sg);
331
332 /*
333 * We can't use dma_map_sg, because we don't use scatterlists in
334 * the way it expects (we don't guarantee that the scatterlist
335 * will exist for the lifetime of the mapping).
336 */
337 return dma_map_page(vring_dma_dev(vq),
338 sg_page(sg), sg->offset, sg->length,
339 direction);
340}
341
342static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
343 void *cpu_addr, size_t size,
344 enum dma_data_direction direction)
345{
346 if (!vq->use_dma_api)
347 return (dma_addr_t)virt_to_phys(cpu_addr);
348
349 return dma_map_single(vring_dma_dev(vq),
350 cpu_addr, size, direction);
351}
352
353static int vring_mapping_error(const struct vring_virtqueue *vq,
354 dma_addr_t addr)
355{
356 if (!vq->use_dma_api)
357 return 0;
358
359 return dma_mapping_error(vring_dma_dev(vq), addr);
360}
361
362
363/*
364 * Split ring specific functions - *_split().
365 */
366
367static void vring_unmap_one_split(const struct vring_virtqueue *vq,
368 struct vring_desc *desc)
369{
370 u16 flags;
371
372 if (!vq->use_dma_api)
373 return;
374
375 flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
376
377 if (flags & VRING_DESC_F_INDIRECT) {
378 dma_unmap_single(vring_dma_dev(vq),
379 virtio64_to_cpu(vq->vq.vdev, desc->addr),
380 virtio32_to_cpu(vq->vq.vdev, desc->len),
381 (flags & VRING_DESC_F_WRITE) ?
382 DMA_FROM_DEVICE : DMA_TO_DEVICE);
383 } else {
384 dma_unmap_page(vring_dma_dev(vq),
385 virtio64_to_cpu(vq->vq.vdev, desc->addr),
386 virtio32_to_cpu(vq->vq.vdev, desc->len),
387 (flags & VRING_DESC_F_WRITE) ?
388 DMA_FROM_DEVICE : DMA_TO_DEVICE);
389 }
390}
391
392static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
393 unsigned int total_sg,
394 gfp_t gfp)
395{
396 struct vring_desc *desc;
397 unsigned int i;
398
399 /*
400 * We require lowmem mappings for the descriptors because
401 * otherwise virt_to_phys will give us bogus addresses in the
402 * virtqueue.
403 */
404 gfp &= ~__GFP_HIGHMEM;
405
406 desc = kmalloc_array(total_sg, sizeof(struct vring_desc), gfp);
407 if (!desc)
408 return NULL;
409
410 for (i = 0; i < total_sg; i++)
411 desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
412 return desc;
413}
414
415static inline int virtqueue_add_split(struct virtqueue *_vq,
416 struct scatterlist *sgs[],
417 unsigned int total_sg,
418 unsigned int out_sgs,
419 unsigned int in_sgs,
420 void *data,
421 void *ctx,
422 gfp_t gfp)
423{
424 struct vring_virtqueue *vq = to_vvq(_vq);
425 struct scatterlist *sg;
426 struct vring_desc *desc;
427 unsigned int i, n, avail, descs_used, prev, err_idx;
428 int head;
429 bool indirect;
430
431 START_USE(vq);
432
433 BUG_ON(data == NULL);
434 BUG_ON(ctx && vq->indirect);
435
436 if (unlikely(vq->broken)) {
437 END_USE(vq);
438 return -EIO;
439 }
440
441 LAST_ADD_TIME_UPDATE(vq);
442
443 BUG_ON(total_sg == 0);
444
445 head = vq->free_head;
446
447 if (virtqueue_use_indirect(_vq, total_sg))
448 desc = alloc_indirect_split(_vq, total_sg, gfp);
449 else {
450 desc = NULL;
451 WARN_ON_ONCE(total_sg > vq->split.vring.num && !vq->indirect);
452 }
453
454 if (desc) {
455 /* Use a single buffer which doesn't continue */
456 indirect = true;
457 /* Set up rest to use this indirect table. */
458 i = 0;
459 descs_used = 1;
460 } else {
461 indirect = false;
462 desc = vq->split.vring.desc;
463 i = head;
464 descs_used = total_sg;
465 }
466
467 if (vq->vq.num_free < descs_used) {
468 pr_debug("Can't add buf len %i - avail = %i\n",
469 descs_used, vq->vq.num_free);
470 /* FIXME: for historical reasons, we force a notify here if
471 * there are outgoing parts to the buffer. Presumably the
472 * host should service the ring ASAP. */
473 if (out_sgs)
474 vq->notify(&vq->vq);
475 if (indirect)
476 kfree(desc);
477 END_USE(vq);
478 return -ENOSPC;
479 }
480
481 for (n = 0; n < out_sgs; n++) {
482 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
483 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
484 if (vring_mapping_error(vq, addr))
485 goto unmap_release;
486
487 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT);
488 desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
489 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
490 prev = i;
491 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
492 }
493 }
494 for (; n < (out_sgs + in_sgs); n++) {
495 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
496 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
497 if (vring_mapping_error(vq, addr))
498 goto unmap_release;
499
500 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE);
501 desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
502 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
503 prev = i;
504 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
505 }
506 }
507 /* Last one doesn't continue. */
508 desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
509
510 if (indirect) {
511 /* Now that the indirect table is filled in, map it. */
512 dma_addr_t addr = vring_map_single(
513 vq, desc, total_sg * sizeof(struct vring_desc),
514 DMA_TO_DEVICE);
515 if (vring_mapping_error(vq, addr))
516 goto unmap_release;
517
518 vq->split.vring.desc[head].flags = cpu_to_virtio16(_vq->vdev,
519 VRING_DESC_F_INDIRECT);
520 vq->split.vring.desc[head].addr = cpu_to_virtio64(_vq->vdev,
521 addr);
522
523 vq->split.vring.desc[head].len = cpu_to_virtio32(_vq->vdev,
524 total_sg * sizeof(struct vring_desc));
525 }
526
527 /* We're using some buffers from the free list. */
528 vq->vq.num_free -= descs_used;
529
530 /* Update free pointer */
531 if (indirect)
532 vq->free_head = virtio16_to_cpu(_vq->vdev,
533 vq->split.vring.desc[head].next);
534 else
535 vq->free_head = i;
536
537 /* Store token and indirect buffer state. */
538 vq->split.desc_state[head].data = data;
539 if (indirect)
540 vq->split.desc_state[head].indir_desc = desc;
541 else
542 vq->split.desc_state[head].indir_desc = ctx;
543
544 /* Put entry in available array (but don't update avail->idx until they
545 * do sync). */
546 avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1);
547 vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
548
549 /* Descriptors and available array need to be set before we expose the
550 * new available array entries. */
551 virtio_wmb(vq->weak_barriers);
552 vq->split.avail_idx_shadow++;
553 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
554 vq->split.avail_idx_shadow);
555 vq->num_added++;
556
557 pr_debug("Added buffer head %i to %p\n", head, vq);
558 END_USE(vq);
559
560 /* This is very unlikely, but theoretically possible. Kick
561 * just in case. */
562 if (unlikely(vq->num_added == (1 << 16) - 1))
563 virtqueue_kick(_vq);
564
565 return 0;
566
567unmap_release:
568 err_idx = i;
569
570 if (indirect)
571 i = 0;
572 else
573 i = head;
574
575 for (n = 0; n < total_sg; n++) {
576 if (i == err_idx)
577 break;
578 vring_unmap_one_split(vq, &desc[i]);
579 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
580 }
581
582 if (indirect)
583 kfree(desc);
584
585 END_USE(vq);
586 return -ENOMEM;
587}
588
589static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
590{
591 struct vring_virtqueue *vq = to_vvq(_vq);
592 u16 new, old;
593 bool needs_kick;
594
595 START_USE(vq);
596 /* We need to expose available array entries before checking avail
597 * event. */
598 virtio_mb(vq->weak_barriers);
599
600 old = vq->split.avail_idx_shadow - vq->num_added;
601 new = vq->split.avail_idx_shadow;
602 vq->num_added = 0;
603
604 LAST_ADD_TIME_CHECK(vq);
605 LAST_ADD_TIME_INVALID(vq);
606
607 if (vq->event) {
608 needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev,
609 vring_avail_event(&vq->split.vring)),
610 new, old);
611 } else {
612 needs_kick = !(vq->split.vring.used->flags &
613 cpu_to_virtio16(_vq->vdev,
614 VRING_USED_F_NO_NOTIFY));
615 }
616 END_USE(vq);
617 return needs_kick;
618}
619
620static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
621 void **ctx)
622{
623 unsigned int i, j;
624 __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
625
626 /* Clear data ptr. */
627 vq->split.desc_state[head].data = NULL;
628
629 /* Put back on free list: unmap first-level descriptors and find end */
630 i = head;
631
632 while (vq->split.vring.desc[i].flags & nextflag) {
633 vring_unmap_one_split(vq, &vq->split.vring.desc[i]);
634 i = virtio16_to_cpu(vq->vq.vdev, vq->split.vring.desc[i].next);
635 vq->vq.num_free++;
636 }
637
638 vring_unmap_one_split(vq, &vq->split.vring.desc[i]);
639 vq->split.vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev,
640 vq->free_head);
641 vq->free_head = head;
642
643 /* Plus final descriptor */
644 vq->vq.num_free++;
645
646 if (vq->indirect) {
647 struct vring_desc *indir_desc =
648 vq->split.desc_state[head].indir_desc;
649 u32 len;
650
651 /* Free the indirect table, if any, now that it's unmapped. */
652 if (!indir_desc)
653 return;
654
655 len = virtio32_to_cpu(vq->vq.vdev,
656 vq->split.vring.desc[head].len);
657
658 BUG_ON(!(vq->split.vring.desc[head].flags &
659 cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT)));
660 BUG_ON(len == 0 || len % sizeof(struct vring_desc));
661
662 for (j = 0; j < len / sizeof(struct vring_desc); j++)
663 vring_unmap_one_split(vq, &indir_desc[j]);
664
665 kfree(indir_desc);
666 vq->split.desc_state[head].indir_desc = NULL;
667 } else if (ctx) {
668 *ctx = vq->split.desc_state[head].indir_desc;
669 }
670}
671
672static inline bool more_used_split(const struct vring_virtqueue *vq)
673{
674 return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev,
675 vq->split.vring.used->idx);
676}
677
678static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
679 unsigned int *len,
680 void **ctx)
681{
682 struct vring_virtqueue *vq = to_vvq(_vq);
683 void *ret;
684 unsigned int i;
685 u16 last_used;
686
687 START_USE(vq);
688
689 if (unlikely(vq->broken)) {
690 END_USE(vq);
691 return NULL;
692 }
693
694 if (!more_used_split(vq)) {
695 pr_debug("No more buffers in queue\n");
696 END_USE(vq);
697 return NULL;
698 }
699
700 /* Only get used array entries after they have been exposed by host. */
701 virtio_rmb(vq->weak_barriers);
702
703 last_used = (vq->last_used_idx & (vq->split.vring.num - 1));
704 i = virtio32_to_cpu(_vq->vdev,
705 vq->split.vring.used->ring[last_used].id);
706 *len = virtio32_to_cpu(_vq->vdev,
707 vq->split.vring.used->ring[last_used].len);
708
709 if (unlikely(i >= vq->split.vring.num)) {
710 BAD_RING(vq, "id %u out of range\n", i);
711 return NULL;
712 }
713 if (unlikely(!vq->split.desc_state[i].data)) {
714 BAD_RING(vq, "id %u is not a head!\n", i);
715 return NULL;
716 }
717
718 /* detach_buf_split clears data, so grab it now. */
719 ret = vq->split.desc_state[i].data;
720 detach_buf_split(vq, i, ctx);
721 vq->last_used_idx++;
722 /* If we expect an interrupt for the next entry, tell host
723 * by writing event index and flush out the write before
724 * the read in the next get_buf call. */
725 if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
726 virtio_store_mb(vq->weak_barriers,
727 &vring_used_event(&vq->split.vring),
728 cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
729
730 LAST_ADD_TIME_INVALID(vq);
731
732 END_USE(vq);
733 return ret;
734}
735
736static void virtqueue_disable_cb_split(struct virtqueue *_vq)
737{
738 struct vring_virtqueue *vq = to_vvq(_vq);
739
740 if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
741 vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
742 if (!vq->event)
743 vq->split.vring.avail->flags =
744 cpu_to_virtio16(_vq->vdev,
745 vq->split.avail_flags_shadow);
746 }
747}
748
749static unsigned virtqueue_enable_cb_prepare_split(struct virtqueue *_vq)
750{
751 struct vring_virtqueue *vq = to_vvq(_vq);
752 u16 last_used_idx;
753
754 START_USE(vq);
755
756 /* We optimistically turn back on interrupts, then check if there was
757 * more to do. */
758 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
759 * either clear the flags bit or point the event index at the next
760 * entry. Always do both to keep code simple. */
761 if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
762 vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
763 if (!vq->event)
764 vq->split.vring.avail->flags =
765 cpu_to_virtio16(_vq->vdev,
766 vq->split.avail_flags_shadow);
767 }
768 vring_used_event(&vq->split.vring) = cpu_to_virtio16(_vq->vdev,
769 last_used_idx = vq->last_used_idx);
770 END_USE(vq);
771 return last_used_idx;
772}
773
774static bool virtqueue_poll_split(struct virtqueue *_vq, unsigned last_used_idx)
775{
776 struct vring_virtqueue *vq = to_vvq(_vq);
777
778 return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev,
779 vq->split.vring.used->idx);
780}
781
782static bool virtqueue_enable_cb_delayed_split(struct virtqueue *_vq)
783{
784 struct vring_virtqueue *vq = to_vvq(_vq);
785 u16 bufs;
786
787 START_USE(vq);
788
789 /* We optimistically turn back on interrupts, then check if there was
790 * more to do. */
791 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
792 * either clear the flags bit or point the event index at the next
793 * entry. Always update the event index to keep code simple. */
794 if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
795 vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
796 if (!vq->event)
797 vq->split.vring.avail->flags =
798 cpu_to_virtio16(_vq->vdev,
799 vq->split.avail_flags_shadow);
800 }
801 /* TODO: tune this threshold */
802 bufs = (u16)(vq->split.avail_idx_shadow - vq->last_used_idx) * 3 / 4;
803
804 virtio_store_mb(vq->weak_barriers,
805 &vring_used_event(&vq->split.vring),
806 cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
807
808 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->split.vring.used->idx)
809 - vq->last_used_idx) > bufs)) {
810 END_USE(vq);
811 return false;
812 }
813
814 END_USE(vq);
815 return true;
816}
817
818static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq)
819{
820 struct vring_virtqueue *vq = to_vvq(_vq);
821 unsigned int i;
822 void *buf;
823
824 START_USE(vq);
825
826 for (i = 0; i < vq->split.vring.num; i++) {
827 if (!vq->split.desc_state[i].data)
828 continue;
829 /* detach_buf_split clears data, so grab it now. */
830 buf = vq->split.desc_state[i].data;
831 detach_buf_split(vq, i, NULL);
832 vq->split.avail_idx_shadow--;
833 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
834 vq->split.avail_idx_shadow);
835 END_USE(vq);
836 return buf;
837 }
838 /* That should have freed everything. */
839 BUG_ON(vq->vq.num_free != vq->split.vring.num);
840
841 END_USE(vq);
842 return NULL;
843}
844
845static struct virtqueue *vring_create_virtqueue_split(
846 unsigned int index,
847 unsigned int num,
848 unsigned int vring_align,
849 struct virtio_device *vdev,
850 bool weak_barriers,
851 bool may_reduce_num,
852 bool context,
853 bool (*notify)(struct virtqueue *),
854 void (*callback)(struct virtqueue *),
855 const char *name)
856{
857 struct virtqueue *vq;
858 void *queue = NULL;
859 dma_addr_t dma_addr;
860 size_t queue_size_in_bytes;
861 struct vring vring;
862
863 /* We assume num is a power of 2. */
864 if (num & (num - 1)) {
865 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
866 return NULL;
867 }
868
869 /* TODO: allocate each queue chunk individually */
870 for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
871 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
872 &dma_addr,
873 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
874 if (queue)
875 break;
876 if (!may_reduce_num)
877 return NULL;
878 }
879
880 if (!num)
881 return NULL;
882
883 if (!queue) {
884 /* Try to get a single page. You are my only hope! */
885 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
886 &dma_addr, GFP_KERNEL|__GFP_ZERO);
887 }
888 if (!queue)
889 return NULL;
890
891 queue_size_in_bytes = vring_size(num, vring_align);
892 vring_init(&vring, num, queue, vring_align);
893
894 vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
895 notify, callback, name);
896 if (!vq) {
897 vring_free_queue(vdev, queue_size_in_bytes, queue,
898 dma_addr);
899 return NULL;
900 }
901
902 to_vvq(vq)->split.queue_dma_addr = dma_addr;
903 to_vvq(vq)->split.queue_size_in_bytes = queue_size_in_bytes;
904 to_vvq(vq)->we_own_ring = true;
905
906 return vq;
907}
908
909
910/*
911 * Packed ring specific functions - *_packed().
912 */
913
914static void vring_unmap_state_packed(const struct vring_virtqueue *vq,
915 struct vring_desc_extra_packed *state)
916{
917 u16 flags;
918
919 if (!vq->use_dma_api)
920 return;
921
922 flags = state->flags;
923
924 if (flags & VRING_DESC_F_INDIRECT) {
925 dma_unmap_single(vring_dma_dev(vq),
926 state->addr, state->len,
927 (flags & VRING_DESC_F_WRITE) ?
928 DMA_FROM_DEVICE : DMA_TO_DEVICE);
929 } else {
930 dma_unmap_page(vring_dma_dev(vq),
931 state->addr, state->len,
932 (flags & VRING_DESC_F_WRITE) ?
933 DMA_FROM_DEVICE : DMA_TO_DEVICE);
934 }
935}
936
937static void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
938 struct vring_packed_desc *desc)
939{
940 u16 flags;
941
942 if (!vq->use_dma_api)
943 return;
944
945 flags = le16_to_cpu(desc->flags);
946
947 if (flags & VRING_DESC_F_INDIRECT) {
948 dma_unmap_single(vring_dma_dev(vq),
949 le64_to_cpu(desc->addr),
950 le32_to_cpu(desc->len),
951 (flags & VRING_DESC_F_WRITE) ?
952 DMA_FROM_DEVICE : DMA_TO_DEVICE);
953 } else {
954 dma_unmap_page(vring_dma_dev(vq),
955 le64_to_cpu(desc->addr),
956 le32_to_cpu(desc->len),
957 (flags & VRING_DESC_F_WRITE) ?
958 DMA_FROM_DEVICE : DMA_TO_DEVICE);
959 }
960}
961
962static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg,
963 gfp_t gfp)
964{
965 struct vring_packed_desc *desc;
966
967 /*
968 * We require lowmem mappings for the descriptors because
969 * otherwise virt_to_phys will give us bogus addresses in the
970 * virtqueue.
971 */
972 gfp &= ~__GFP_HIGHMEM;
973
974 desc = kmalloc_array(total_sg, sizeof(struct vring_packed_desc), gfp);
975
976 return desc;
977}
978
979static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
980 struct scatterlist *sgs[],
981 unsigned int total_sg,
982 unsigned int out_sgs,
983 unsigned int in_sgs,
984 void *data,
985 gfp_t gfp)
986{
987 struct vring_packed_desc *desc;
988 struct scatterlist *sg;
989 unsigned int i, n, err_idx;
990 u16 head, id;
991 dma_addr_t addr;
992
993 head = vq->packed.next_avail_idx;
994 desc = alloc_indirect_packed(total_sg, gfp);
995
996 if (unlikely(vq->vq.num_free < 1)) {
997 pr_debug("Can't add buf len 1 - avail = 0\n");
998 kfree(desc);
999 END_USE(vq);
1000 return -ENOSPC;
1001 }
1002
1003 i = 0;
1004 id = vq->free_head;
1005 BUG_ON(id == vq->packed.vring.num);
1006
1007 for (n = 0; n < out_sgs + in_sgs; n++) {
1008 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
1009 addr = vring_map_one_sg(vq, sg, n < out_sgs ?
1010 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1011 if (vring_mapping_error(vq, addr))
1012 goto unmap_release;
1013
1014 desc[i].flags = cpu_to_le16(n < out_sgs ?
1015 0 : VRING_DESC_F_WRITE);
1016 desc[i].addr = cpu_to_le64(addr);
1017 desc[i].len = cpu_to_le32(sg->length);
1018 i++;
1019 }
1020 }
1021
1022 /* Now that the indirect table is filled in, map it. */
1023 addr = vring_map_single(vq, desc,
1024 total_sg * sizeof(struct vring_packed_desc),
1025 DMA_TO_DEVICE);
1026 if (vring_mapping_error(vq, addr))
1027 goto unmap_release;
1028
1029 vq->packed.vring.desc[head].addr = cpu_to_le64(addr);
1030 vq->packed.vring.desc[head].len = cpu_to_le32(total_sg *
1031 sizeof(struct vring_packed_desc));
1032 vq->packed.vring.desc[head].id = cpu_to_le16(id);
1033
1034 if (vq->use_dma_api) {
1035 vq->packed.desc_extra[id].addr = addr;
1036 vq->packed.desc_extra[id].len = total_sg *
1037 sizeof(struct vring_packed_desc);
1038 vq->packed.desc_extra[id].flags = VRING_DESC_F_INDIRECT |
1039 vq->packed.avail_used_flags;
1040 }
1041
1042 /*
1043 * A driver MUST NOT make the first descriptor in the list
1044 * available before all subsequent descriptors comprising
1045 * the list are made available.
1046 */
1047 virtio_wmb(vq->weak_barriers);
1048 vq->packed.vring.desc[head].flags = cpu_to_le16(VRING_DESC_F_INDIRECT |
1049 vq->packed.avail_used_flags);
1050
1051 /* We're using some buffers from the free list. */
1052 vq->vq.num_free -= 1;
1053
1054 /* Update free pointer */
1055 n = head + 1;
1056 if (n >= vq->packed.vring.num) {
1057 n = 0;
1058 vq->packed.avail_wrap_counter ^= 1;
1059 vq->packed.avail_used_flags ^=
1060 1 << VRING_PACKED_DESC_F_AVAIL |
1061 1 << VRING_PACKED_DESC_F_USED;
1062 }
1063 vq->packed.next_avail_idx = n;
1064 vq->free_head = vq->packed.desc_state[id].next;
1065
1066 /* Store token and indirect buffer state. */
1067 vq->packed.desc_state[id].num = 1;
1068 vq->packed.desc_state[id].data = data;
1069 vq->packed.desc_state[id].indir_desc = desc;
1070 vq->packed.desc_state[id].last = id;
1071
1072 vq->num_added += 1;
1073
1074 pr_debug("Added buffer head %i to %p\n", head, vq);
1075 END_USE(vq);
1076
1077 return 0;
1078
1079unmap_release:
1080 err_idx = i;
1081
1082 for (i = 0; i < err_idx; i++)
1083 vring_unmap_desc_packed(vq, &desc[i]);
1084
1085 kfree(desc);
1086
1087 END_USE(vq);
1088 return -ENOMEM;
1089}
1090
1091static inline int virtqueue_add_packed(struct virtqueue *_vq,
1092 struct scatterlist *sgs[],
1093 unsigned int total_sg,
1094 unsigned int out_sgs,
1095 unsigned int in_sgs,
1096 void *data,
1097 void *ctx,
1098 gfp_t gfp)
1099{
1100 struct vring_virtqueue *vq = to_vvq(_vq);
1101 struct vring_packed_desc *desc;
1102 struct scatterlist *sg;
1103 unsigned int i, n, c, descs_used, err_idx;
1104 __le16 head_flags, flags;
1105 u16 head, id, prev, curr, avail_used_flags;
1106
1107 START_USE(vq);
1108
1109 BUG_ON(data == NULL);
1110 BUG_ON(ctx && vq->indirect);
1111
1112 if (unlikely(vq->broken)) {
1113 END_USE(vq);
1114 return -EIO;
1115 }
1116
1117 LAST_ADD_TIME_UPDATE(vq);
1118
1119 BUG_ON(total_sg == 0);
1120
1121 if (virtqueue_use_indirect(_vq, total_sg))
1122 return virtqueue_add_indirect_packed(vq, sgs, total_sg,
1123 out_sgs, in_sgs, data, gfp);
1124
1125 head = vq->packed.next_avail_idx;
1126 avail_used_flags = vq->packed.avail_used_flags;
1127
1128 WARN_ON_ONCE(total_sg > vq->packed.vring.num && !vq->indirect);
1129
1130 desc = vq->packed.vring.desc;
1131 i = head;
1132 descs_used = total_sg;
1133
1134 if (unlikely(vq->vq.num_free < descs_used)) {
1135 pr_debug("Can't add buf len %i - avail = %i\n",
1136 descs_used, vq->vq.num_free);
1137 END_USE(vq);
1138 return -ENOSPC;
1139 }
1140
1141 id = vq->free_head;
1142 BUG_ON(id == vq->packed.vring.num);
1143
1144 curr = id;
1145 c = 0;
1146 for (n = 0; n < out_sgs + in_sgs; n++) {
1147 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
1148 dma_addr_t addr = vring_map_one_sg(vq, sg, n < out_sgs ?
1149 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1150 if (vring_mapping_error(vq, addr))
1151 goto unmap_release;
1152
1153 flags = cpu_to_le16(vq->packed.avail_used_flags |
1154 (++c == total_sg ? 0 : VRING_DESC_F_NEXT) |
1155 (n < out_sgs ? 0 : VRING_DESC_F_WRITE));
1156 if (i == head)
1157 head_flags = flags;
1158 else
1159 desc[i].flags = flags;
1160
1161 desc[i].addr = cpu_to_le64(addr);
1162 desc[i].len = cpu_to_le32(sg->length);
1163 desc[i].id = cpu_to_le16(id);
1164
1165 if (unlikely(vq->use_dma_api)) {
1166 vq->packed.desc_extra[curr].addr = addr;
1167 vq->packed.desc_extra[curr].len = sg->length;
1168 vq->packed.desc_extra[curr].flags =
1169 le16_to_cpu(flags);
1170 }
1171 prev = curr;
1172 curr = vq->packed.desc_state[curr].next;
1173
1174 if ((unlikely(++i >= vq->packed.vring.num))) {
1175 i = 0;
1176 vq->packed.avail_used_flags ^=
1177 1 << VRING_PACKED_DESC_F_AVAIL |
1178 1 << VRING_PACKED_DESC_F_USED;
1179 }
1180 }
1181 }
1182
1183 if (i < head)
1184 vq->packed.avail_wrap_counter ^= 1;
1185
1186 /* We're using some buffers from the free list. */
1187 vq->vq.num_free -= descs_used;
1188
1189 /* Update free pointer */
1190 vq->packed.next_avail_idx = i;
1191 vq->free_head = curr;
1192
1193 /* Store token. */
1194 vq->packed.desc_state[id].num = descs_used;
1195 vq->packed.desc_state[id].data = data;
1196 vq->packed.desc_state[id].indir_desc = ctx;
1197 vq->packed.desc_state[id].last = prev;
1198
1199 /*
1200 * A driver MUST NOT make the first descriptor in the list
1201 * available before all subsequent descriptors comprising
1202 * the list are made available.
1203 */
1204 virtio_wmb(vq->weak_barriers);
1205 vq->packed.vring.desc[head].flags = head_flags;
1206 vq->num_added += descs_used;
1207
1208 pr_debug("Added buffer head %i to %p\n", head, vq);
1209 END_USE(vq);
1210
1211 return 0;
1212
1213unmap_release:
1214 err_idx = i;
1215 i = head;
1216
1217 vq->packed.avail_used_flags = avail_used_flags;
1218
1219 for (n = 0; n < total_sg; n++) {
1220 if (i == err_idx)
1221 break;
1222 vring_unmap_desc_packed(vq, &desc[i]);
1223 i++;
1224 if (i >= vq->packed.vring.num)
1225 i = 0;
1226 }
1227
1228 END_USE(vq);
1229 return -EIO;
1230}
1231
1232static bool virtqueue_kick_prepare_packed(struct virtqueue *_vq)
1233{
1234 struct vring_virtqueue *vq = to_vvq(_vq);
1235 u16 new, old, off_wrap, flags, wrap_counter, event_idx;
1236 bool needs_kick;
1237 union {
1238 struct {
1239 __le16 off_wrap;
1240 __le16 flags;
1241 };
1242 u32 u32;
1243 } snapshot;
1244
1245 START_USE(vq);
1246
1247 /*
1248 * We need to expose the new flags value before checking notification
1249 * suppressions.
1250 */
1251 virtio_mb(vq->weak_barriers);
1252
1253 old = vq->packed.next_avail_idx - vq->num_added;
1254 new = vq->packed.next_avail_idx;
1255 vq->num_added = 0;
1256
1257 snapshot.u32 = *(u32 *)vq->packed.vring.device;
1258 flags = le16_to_cpu(snapshot.flags);
1259
1260 LAST_ADD_TIME_CHECK(vq);
1261 LAST_ADD_TIME_INVALID(vq);
1262
1263 if (flags != VRING_PACKED_EVENT_FLAG_DESC) {
1264 needs_kick = (flags != VRING_PACKED_EVENT_FLAG_DISABLE);
1265 goto out;
1266 }
1267
1268 off_wrap = le16_to_cpu(snapshot.off_wrap);
1269
1270 wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
1271 event_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
1272 if (wrap_counter != vq->packed.avail_wrap_counter)
1273 event_idx -= vq->packed.vring.num;
1274
1275 needs_kick = vring_need_event(event_idx, new, old);
1276out:
1277 END_USE(vq);
1278 return needs_kick;
1279}
1280
1281static void detach_buf_packed(struct vring_virtqueue *vq,
1282 unsigned int id, void **ctx)
1283{
1284 struct vring_desc_state_packed *state = NULL;
1285 struct vring_packed_desc *desc;
1286 unsigned int i, curr;
1287
1288 state = &vq->packed.desc_state[id];
1289
1290 /* Clear data ptr. */
1291 state->data = NULL;
1292
1293 vq->packed.desc_state[state->last].next = vq->free_head;
1294 vq->free_head = id;
1295 vq->vq.num_free += state->num;
1296
1297 if (unlikely(vq->use_dma_api)) {
1298 curr = id;
1299 for (i = 0; i < state->num; i++) {
1300 vring_unmap_state_packed(vq,
1301 &vq->packed.desc_extra[curr]);
1302 curr = vq->packed.desc_state[curr].next;
1303 }
1304 }
1305
1306 if (vq->indirect) {
1307 u32 len;
1308
1309 /* Free the indirect table, if any, now that it's unmapped. */
1310 desc = state->indir_desc;
1311 if (!desc)
1312 return;
1313
1314 if (vq->use_dma_api) {
1315 len = vq->packed.desc_extra[id].len;
1316 for (i = 0; i < len / sizeof(struct vring_packed_desc);
1317 i++)
1318 vring_unmap_desc_packed(vq, &desc[i]);
1319 }
1320 kfree(desc);
1321 state->indir_desc = NULL;
1322 } else if (ctx) {
1323 *ctx = state->indir_desc;
1324 }
1325}
1326
1327static inline bool is_used_desc_packed(const struct vring_virtqueue *vq,
1328 u16 idx, bool used_wrap_counter)
1329{
1330 bool avail, used;
1331 u16 flags;
1332
1333 flags = le16_to_cpu(vq->packed.vring.desc[idx].flags);
1334 avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL));
1335 used = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
1336
1337 return avail == used && used == used_wrap_counter;
1338}
1339
1340static inline bool more_used_packed(const struct vring_virtqueue *vq)
1341{
1342 return is_used_desc_packed(vq, vq->last_used_idx,
1343 vq->packed.used_wrap_counter);
1344}
1345
1346static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
1347 unsigned int *len,
1348 void **ctx)
1349{
1350 struct vring_virtqueue *vq = to_vvq(_vq);
1351 u16 last_used, id;
1352 void *ret;
1353
1354 START_USE(vq);
1355
1356 if (unlikely(vq->broken)) {
1357 END_USE(vq);
1358 return NULL;
1359 }
1360
1361 if (!more_used_packed(vq)) {
1362 pr_debug("No more buffers in queue\n");
1363 END_USE(vq);
1364 return NULL;
1365 }
1366
1367 /* Only get used elements after they have been exposed by host. */
1368 virtio_rmb(vq->weak_barriers);
1369
1370 last_used = vq->last_used_idx;
1371 id = le16_to_cpu(vq->packed.vring.desc[last_used].id);
1372 *len = le32_to_cpu(vq->packed.vring.desc[last_used].len);
1373
1374 if (unlikely(id >= vq->packed.vring.num)) {
1375 BAD_RING(vq, "id %u out of range\n", id);
1376 return NULL;
1377 }
1378 if (unlikely(!vq->packed.desc_state[id].data)) {
1379 BAD_RING(vq, "id %u is not a head!\n", id);
1380 return NULL;
1381 }
1382
1383 /* detach_buf_packed clears data, so grab it now. */
1384 ret = vq->packed.desc_state[id].data;
1385 detach_buf_packed(vq, id, ctx);
1386
1387 vq->last_used_idx += vq->packed.desc_state[id].num;
1388 if (unlikely(vq->last_used_idx >= vq->packed.vring.num)) {
1389 vq->last_used_idx -= vq->packed.vring.num;
1390 vq->packed.used_wrap_counter ^= 1;
1391 }
1392
1393 /*
1394 * If we expect an interrupt for the next entry, tell host
1395 * by writing event index and flush out the write before
1396 * the read in the next get_buf call.
1397 */
1398 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC)
1399 virtio_store_mb(vq->weak_barriers,
1400 &vq->packed.vring.driver->off_wrap,
1401 cpu_to_le16(vq->last_used_idx |
1402 (vq->packed.used_wrap_counter <<
1403 VRING_PACKED_EVENT_F_WRAP_CTR)));
1404
1405 LAST_ADD_TIME_INVALID(vq);
1406
1407 END_USE(vq);
1408 return ret;
1409}
1410
1411static void virtqueue_disable_cb_packed(struct virtqueue *_vq)
1412{
1413 struct vring_virtqueue *vq = to_vvq(_vq);
1414
1415 if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE) {
1416 vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
1417 vq->packed.vring.driver->flags =
1418 cpu_to_le16(vq->packed.event_flags_shadow);
1419 }
1420}
1421
1422static unsigned virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq)
1423{
1424 struct vring_virtqueue *vq = to_vvq(_vq);
1425
1426 START_USE(vq);
1427
1428 /*
1429 * We optimistically turn back on interrupts, then check if there was
1430 * more to do.
1431 */
1432
1433 if (vq->event) {
1434 vq->packed.vring.driver->off_wrap =
1435 cpu_to_le16(vq->last_used_idx |
1436 (vq->packed.used_wrap_counter <<
1437 VRING_PACKED_EVENT_F_WRAP_CTR));
1438 /*
1439 * We need to update event offset and event wrap
1440 * counter first before updating event flags.
1441 */
1442 virtio_wmb(vq->weak_barriers);
1443 }
1444
1445 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
1446 vq->packed.event_flags_shadow = vq->event ?
1447 VRING_PACKED_EVENT_FLAG_DESC :
1448 VRING_PACKED_EVENT_FLAG_ENABLE;
1449 vq->packed.vring.driver->flags =
1450 cpu_to_le16(vq->packed.event_flags_shadow);
1451 }
1452
1453 END_USE(vq);
1454 return vq->last_used_idx | ((u16)vq->packed.used_wrap_counter <<
1455 VRING_PACKED_EVENT_F_WRAP_CTR);
1456}
1457
1458static bool virtqueue_poll_packed(struct virtqueue *_vq, u16 off_wrap)
1459{
1460 struct vring_virtqueue *vq = to_vvq(_vq);
1461 bool wrap_counter;
1462 u16 used_idx;
1463
1464 wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
1465 used_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
1466
1467 return is_used_desc_packed(vq, used_idx, wrap_counter);
1468}
1469
1470static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
1471{
1472 struct vring_virtqueue *vq = to_vvq(_vq);
1473 u16 used_idx, wrap_counter;
1474 u16 bufs;
1475
1476 START_USE(vq);
1477
1478 /*
1479 * We optimistically turn back on interrupts, then check if there was
1480 * more to do.
1481 */
1482
1483 if (vq->event) {
1484 /* TODO: tune this threshold */
1485 bufs = (vq->packed.vring.num - vq->vq.num_free) * 3 / 4;
1486 wrap_counter = vq->packed.used_wrap_counter;
1487
1488 used_idx = vq->last_used_idx + bufs;
1489 if (used_idx >= vq->packed.vring.num) {
1490 used_idx -= vq->packed.vring.num;
1491 wrap_counter ^= 1;
1492 }
1493
1494 vq->packed.vring.driver->off_wrap = cpu_to_le16(used_idx |
1495 (wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR));
1496
1497 /*
1498 * We need to update event offset and event wrap
1499 * counter first before updating event flags.
1500 */
1501 virtio_wmb(vq->weak_barriers);
1502 }
1503
1504 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
1505 vq->packed.event_flags_shadow = vq->event ?
1506 VRING_PACKED_EVENT_FLAG_DESC :
1507 VRING_PACKED_EVENT_FLAG_ENABLE;
1508 vq->packed.vring.driver->flags =
1509 cpu_to_le16(vq->packed.event_flags_shadow);
1510 }
1511
1512 /*
1513 * We need to update event suppression structure first
1514 * before re-checking for more used buffers.
1515 */
1516 virtio_mb(vq->weak_barriers);
1517
1518 if (is_used_desc_packed(vq,
1519 vq->last_used_idx,
1520 vq->packed.used_wrap_counter)) {
1521 END_USE(vq);
1522 return false;
1523 }
1524
1525 END_USE(vq);
1526 return true;
1527}
1528
1529static void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq)
1530{
1531 struct vring_virtqueue *vq = to_vvq(_vq);
1532 unsigned int i;
1533 void *buf;
1534
1535 START_USE(vq);
1536
1537 for (i = 0; i < vq->packed.vring.num; i++) {
1538 if (!vq->packed.desc_state[i].data)
1539 continue;
1540 /* detach_buf clears data, so grab it now. */
1541 buf = vq->packed.desc_state[i].data;
1542 detach_buf_packed(vq, i, NULL);
1543 END_USE(vq);
1544 return buf;
1545 }
1546 /* That should have freed everything. */
1547 BUG_ON(vq->vq.num_free != vq->packed.vring.num);
1548
1549 END_USE(vq);
1550 return NULL;
1551}
1552
1553static struct virtqueue *vring_create_virtqueue_packed(
1554 unsigned int index,
1555 unsigned int num,
1556 unsigned int vring_align,
1557 struct virtio_device *vdev,
1558 bool weak_barriers,
1559 bool may_reduce_num,
1560 bool context,
1561 bool (*notify)(struct virtqueue *),
1562 void (*callback)(struct virtqueue *),
1563 const char *name)
1564{
1565 struct vring_virtqueue *vq;
1566 struct vring_packed_desc *ring;
1567 struct vring_packed_desc_event *driver, *device;
1568 dma_addr_t ring_dma_addr, driver_event_dma_addr, device_event_dma_addr;
1569 size_t ring_size_in_bytes, event_size_in_bytes;
1570 unsigned int i;
1571
1572 ring_size_in_bytes = num * sizeof(struct vring_packed_desc);
1573
1574 ring = vring_alloc_queue(vdev, ring_size_in_bytes,
1575 &ring_dma_addr,
1576 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
1577 if (!ring)
1578 goto err_ring;
1579
1580 event_size_in_bytes = sizeof(struct vring_packed_desc_event);
1581
1582 driver = vring_alloc_queue(vdev, event_size_in_bytes,
1583 &driver_event_dma_addr,
1584 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
1585 if (!driver)
1586 goto err_driver;
1587
1588 device = vring_alloc_queue(vdev, event_size_in_bytes,
1589 &device_event_dma_addr,
1590 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
1591 if (!device)
1592 goto err_device;
1593
1594 vq = kmalloc(sizeof(*vq), GFP_KERNEL);
1595 if (!vq)
1596 goto err_vq;
1597
1598 vq->vq.callback = callback;
1599 vq->vq.vdev = vdev;
1600 vq->vq.name = name;
1601 vq->vq.num_free = num;
1602 vq->vq.index = index;
1603 vq->we_own_ring = true;
1604 vq->notify = notify;
1605 vq->weak_barriers = weak_barriers;
1606 vq->broken = false;
1607 vq->last_used_idx = 0;
1608 vq->num_added = 0;
1609 vq->packed_ring = true;
1610 vq->use_dma_api = vring_use_dma_api(vdev);
1611 list_add_tail(&vq->vq.list, &vdev->vqs);
1612#ifdef DEBUG
1613 vq->in_use = false;
1614 vq->last_add_time_valid = false;
1615#endif
1616
1617 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
1618 !context;
1619 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
1620
1621 if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
1622 vq->weak_barriers = false;
1623
1624 vq->packed.ring_dma_addr = ring_dma_addr;
1625 vq->packed.driver_event_dma_addr = driver_event_dma_addr;
1626 vq->packed.device_event_dma_addr = device_event_dma_addr;
1627
1628 vq->packed.ring_size_in_bytes = ring_size_in_bytes;
1629 vq->packed.event_size_in_bytes = event_size_in_bytes;
1630
1631 vq->packed.vring.num = num;
1632 vq->packed.vring.desc = ring;
1633 vq->packed.vring.driver = driver;
1634 vq->packed.vring.device = device;
1635
1636 vq->packed.next_avail_idx = 0;
1637 vq->packed.avail_wrap_counter = 1;
1638 vq->packed.used_wrap_counter = 1;
1639 vq->packed.event_flags_shadow = 0;
1640 vq->packed.avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL;
1641
1642 vq->packed.desc_state = kmalloc_array(num,
1643 sizeof(struct vring_desc_state_packed),
1644 GFP_KERNEL);
1645 if (!vq->packed.desc_state)
1646 goto err_desc_state;
1647
1648 memset(vq->packed.desc_state, 0,
1649 num * sizeof(struct vring_desc_state_packed));
1650
1651 /* Put everything in free lists. */
1652 vq->free_head = 0;
1653 for (i = 0; i < num-1; i++)
1654 vq->packed.desc_state[i].next = i + 1;
1655
1656 vq->packed.desc_extra = kmalloc_array(num,
1657 sizeof(struct vring_desc_extra_packed),
1658 GFP_KERNEL);
1659 if (!vq->packed.desc_extra)
1660 goto err_desc_extra;
1661
1662 memset(vq->packed.desc_extra, 0,
1663 num * sizeof(struct vring_desc_extra_packed));
1664
1665 /* No callback? Tell other side not to bother us. */
1666 if (!callback) {
1667 vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
1668 vq->packed.vring.driver->flags =
1669 cpu_to_le16(vq->packed.event_flags_shadow);
1670 }
1671
1672 return &vq->vq;
1673
1674err_desc_extra:
1675 kfree(vq->packed.desc_state);
1676err_desc_state:
1677 kfree(vq);
1678err_vq:
1679 vring_free_queue(vdev, event_size_in_bytes, device, ring_dma_addr);
1680err_device:
1681 vring_free_queue(vdev, event_size_in_bytes, driver, ring_dma_addr);
1682err_driver:
1683 vring_free_queue(vdev, ring_size_in_bytes, ring, ring_dma_addr);
1684err_ring:
1685 return NULL;
1686}
1687
1688
1689/*
1690 * Generic functions and exported symbols.
1691 */
1692
1693static inline int virtqueue_add(struct virtqueue *_vq,
1694 struct scatterlist *sgs[],
1695 unsigned int total_sg,
1696 unsigned int out_sgs,
1697 unsigned int in_sgs,
1698 void *data,
1699 void *ctx,
1700 gfp_t gfp)
1701{
1702 struct vring_virtqueue *vq = to_vvq(_vq);
1703
1704 return vq->packed_ring ? virtqueue_add_packed(_vq, sgs, total_sg,
1705 out_sgs, in_sgs, data, ctx, gfp) :
1706 virtqueue_add_split(_vq, sgs, total_sg,
1707 out_sgs, in_sgs, data, ctx, gfp);
1708}
1709
1710/**
1711 * virtqueue_add_sgs - expose buffers to other end
1712 * @_vq: the struct virtqueue we're talking about.
1713 * @sgs: array of terminated scatterlists.
1714 * @out_sgs: the number of scatterlists readable by other side
1715 * @in_sgs: the number of scatterlists which are writable (after readable ones)
1716 * @data: the token identifying the buffer.
1717 * @gfp: how to do memory allocations (if necessary).
1718 *
1719 * Caller must ensure we don't call this with other virtqueue operations
1720 * at the same time (except where noted).
1721 *
1722 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1723 */
1724int virtqueue_add_sgs(struct virtqueue *_vq,
1725 struct scatterlist *sgs[],
1726 unsigned int out_sgs,
1727 unsigned int in_sgs,
1728 void *data,
1729 gfp_t gfp)
1730{
1731 unsigned int i, total_sg = 0;
1732
1733 /* Count them first. */
1734 for (i = 0; i < out_sgs + in_sgs; i++) {
1735 struct scatterlist *sg;
1736
1737 for (sg = sgs[i]; sg; sg = sg_next(sg))
1738 total_sg++;
1739 }
1740 return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs,
1741 data, NULL, gfp);
1742}
1743EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
1744
1745/**
1746 * virtqueue_add_outbuf - expose output buffers to other end
1747 * @vq: the struct virtqueue we're talking about.
1748 * @sg: scatterlist (must be well-formed and terminated!)
1749 * @num: the number of entries in @sg readable by other side
1750 * @data: the token identifying the buffer.
1751 * @gfp: how to do memory allocations (if necessary).
1752 *
1753 * Caller must ensure we don't call this with other virtqueue operations
1754 * at the same time (except where noted).
1755 *
1756 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1757 */
1758int virtqueue_add_outbuf(struct virtqueue *vq,
1759 struct scatterlist *sg, unsigned int num,
1760 void *data,
1761 gfp_t gfp)
1762{
1763 return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp);
1764}
1765EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
1766
1767/**
1768 * virtqueue_add_inbuf - expose input buffers to other end
1769 * @vq: the struct virtqueue we're talking about.
1770 * @sg: scatterlist (must be well-formed and terminated!)
1771 * @num: the number of entries in @sg writable by other side
1772 * @data: the token identifying the buffer.
1773 * @gfp: how to do memory allocations (if necessary).
1774 *
1775 * Caller must ensure we don't call this with other virtqueue operations
1776 * at the same time (except where noted).
1777 *
1778 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1779 */
1780int virtqueue_add_inbuf(struct virtqueue *vq,
1781 struct scatterlist *sg, unsigned int num,
1782 void *data,
1783 gfp_t gfp)
1784{
1785 return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp);
1786}
1787EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
1788
1789/**
1790 * virtqueue_add_inbuf_ctx - expose input buffers to other end
1791 * @vq: the struct virtqueue we're talking about.
1792 * @sg: scatterlist (must be well-formed and terminated!)
1793 * @num: the number of entries in @sg writable by other side
1794 * @data: the token identifying the buffer.
1795 * @ctx: extra context for the token
1796 * @gfp: how to do memory allocations (if necessary).
1797 *
1798 * Caller must ensure we don't call this with other virtqueue operations
1799 * at the same time (except where noted).
1800 *
1801 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1802 */
1803int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
1804 struct scatterlist *sg, unsigned int num,
1805 void *data,
1806 void *ctx,
1807 gfp_t gfp)
1808{
1809 return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp);
1810}
1811EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
1812
1813/**
1814 * virtqueue_kick_prepare - first half of split virtqueue_kick call.
1815 * @_vq: the struct virtqueue
1816 *
1817 * Instead of virtqueue_kick(), you can do:
1818 * if (virtqueue_kick_prepare(vq))
1819 * virtqueue_notify(vq);
1820 *
1821 * This is sometimes useful because the virtqueue_kick_prepare() needs
1822 * to be serialized, but the actual virtqueue_notify() call does not.
1823 */
1824bool virtqueue_kick_prepare(struct virtqueue *_vq)
1825{
1826 struct vring_virtqueue *vq = to_vvq(_vq);
1827
1828 return vq->packed_ring ? virtqueue_kick_prepare_packed(_vq) :
1829 virtqueue_kick_prepare_split(_vq);
1830}
1831EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
1832
1833/**
1834 * virtqueue_notify - second half of split virtqueue_kick call.
1835 * @_vq: the struct virtqueue
1836 *
1837 * This does not need to be serialized.
1838 *
1839 * Returns false if host notify failed or queue is broken, otherwise true.
1840 */
1841bool virtqueue_notify(struct virtqueue *_vq)
1842{
1843 struct vring_virtqueue *vq = to_vvq(_vq);
1844
1845 if (unlikely(vq->broken))
1846 return false;
1847
1848 /* Prod other side to tell it about changes. */
1849 if (!vq->notify(_vq)) {
1850 vq->broken = true;
1851 return false;
1852 }
1853 return true;
1854}
1855EXPORT_SYMBOL_GPL(virtqueue_notify);
1856
1857/**
1858 * virtqueue_kick - update after add_buf
1859 * @vq: the struct virtqueue
1860 *
1861 * After one or more virtqueue_add_* calls, invoke this to kick
1862 * the other side.
1863 *
1864 * Caller must ensure we don't call this with other virtqueue
1865 * operations at the same time (except where noted).
1866 *
1867 * Returns false if kick failed, otherwise true.
1868 */
1869bool virtqueue_kick(struct virtqueue *vq)
1870{
1871 if (virtqueue_kick_prepare(vq))
1872 return virtqueue_notify(vq);
1873 return true;
1874}
1875EXPORT_SYMBOL_GPL(virtqueue_kick);
1876
1877/**
1878 * virtqueue_get_buf - get the next used buffer
1879 * @_vq: the struct virtqueue we're talking about.
1880 * @len: the length written into the buffer
1881 * @ctx: extra context for the token
1882 *
1883 * If the device wrote data into the buffer, @len will be set to the
1884 * amount written. This means you don't need to clear the buffer
1885 * beforehand to ensure there's no data leakage in the case of short
1886 * writes.
1887 *
1888 * Caller must ensure we don't call this with other virtqueue
1889 * operations at the same time (except where noted).
1890 *
1891 * Returns NULL if there are no used buffers, or the "data" token
1892 * handed to virtqueue_add_*().
1893 */
1894void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
1895 void **ctx)
1896{
1897 struct vring_virtqueue *vq = to_vvq(_vq);
1898
1899 return vq->packed_ring ? virtqueue_get_buf_ctx_packed(_vq, len, ctx) :
1900 virtqueue_get_buf_ctx_split(_vq, len, ctx);
1901}
1902EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx);
1903
1904void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
1905{
1906 return virtqueue_get_buf_ctx(_vq, len, NULL);
1907}
1908EXPORT_SYMBOL_GPL(virtqueue_get_buf);
1909/**
1910 * virtqueue_disable_cb - disable callbacks
1911 * @_vq: the struct virtqueue we're talking about.
1912 *
1913 * Note that this is not necessarily synchronous, hence unreliable and only
1914 * useful as an optimization.
1915 *
1916 * Unlike other operations, this need not be serialized.
1917 */
1918void virtqueue_disable_cb(struct virtqueue *_vq)
1919{
1920 struct vring_virtqueue *vq = to_vvq(_vq);
1921
1922 if (vq->packed_ring)
1923 virtqueue_disable_cb_packed(_vq);
1924 else
1925 virtqueue_disable_cb_split(_vq);
1926}
1927EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
1928
1929/**
1930 * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
1931 * @_vq: the struct virtqueue we're talking about.
1932 *
1933 * This re-enables callbacks; it returns current queue state
1934 * in an opaque unsigned value. This value should be later tested by
1935 * virtqueue_poll, to detect a possible race between the driver checking for
1936 * more work, and enabling callbacks.
1937 *
1938 * Caller must ensure we don't call this with other virtqueue
1939 * operations at the same time (except where noted).
1940 */
1941unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
1942{
1943 struct vring_virtqueue *vq = to_vvq(_vq);
1944
1945 return vq->packed_ring ? virtqueue_enable_cb_prepare_packed(_vq) :
1946 virtqueue_enable_cb_prepare_split(_vq);
1947}
1948EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
1949
1950/**
1951 * virtqueue_poll - query pending used buffers
1952 * @_vq: the struct virtqueue we're talking about.
1953 * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
1954 *
1955 * Returns "true" if there are pending used buffers in the queue.
1956 *
1957 * This does not need to be serialized.
1958 */
1959bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
1960{
1961 struct vring_virtqueue *vq = to_vvq(_vq);
1962
1963 if (unlikely(vq->broken))
1964 return false;
1965
1966 virtio_mb(vq->weak_barriers);
1967 return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) :
1968 virtqueue_poll_split(_vq, last_used_idx);
1969}
1970EXPORT_SYMBOL_GPL(virtqueue_poll);
1971
1972/**
1973 * virtqueue_enable_cb - restart callbacks after disable_cb.
1974 * @_vq: the struct virtqueue we're talking about.
1975 *
1976 * This re-enables callbacks; it returns "false" if there are pending
1977 * buffers in the queue, to detect a possible race between the driver
1978 * checking for more work, and enabling callbacks.
1979 *
1980 * Caller must ensure we don't call this with other virtqueue
1981 * operations at the same time (except where noted).
1982 */
1983bool virtqueue_enable_cb(struct virtqueue *_vq)
1984{
1985 unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
1986
1987 return !virtqueue_poll(_vq, last_used_idx);
1988}
1989EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
1990
1991/**
1992 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
1993 * @_vq: the struct virtqueue we're talking about.
1994 *
1995 * This re-enables callbacks but hints to the other side to delay
1996 * interrupts until most of the available buffers have been processed;
1997 * it returns "false" if there are many pending buffers in the queue,
1998 * to detect a possible race between the driver checking for more work,
1999 * and enabling callbacks.
2000 *
2001 * Caller must ensure we don't call this with other virtqueue
2002 * operations at the same time (except where noted).
2003 */
2004bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
2005{
2006 struct vring_virtqueue *vq = to_vvq(_vq);
2007
2008 return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) :
2009 virtqueue_enable_cb_delayed_split(_vq);
2010}
2011EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
2012
2013/**
2014 * virtqueue_detach_unused_buf - detach first unused buffer
2015 * @_vq: the struct virtqueue we're talking about.
2016 *
2017 * Returns NULL or the "data" token handed to virtqueue_add_*().
2018 * This is not valid on an active queue; it is useful only for device
2019 * shutdown.
2020 */
2021void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
2022{
2023 struct vring_virtqueue *vq = to_vvq(_vq);
2024
2025 return vq->packed_ring ? virtqueue_detach_unused_buf_packed(_vq) :
2026 virtqueue_detach_unused_buf_split(_vq);
2027}
2028EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
2029
2030static inline bool more_used(const struct vring_virtqueue *vq)
2031{
2032 return vq->packed_ring ? more_used_packed(vq) : more_used_split(vq);
2033}
2034
2035irqreturn_t vring_interrupt(int irq, void *_vq)
2036{
2037 struct vring_virtqueue *vq = to_vvq(_vq);
2038
2039 if (!more_used(vq)) {
2040 pr_debug("virtqueue interrupt with no work for %p\n", vq);
2041 return IRQ_NONE;
2042 }
2043
2044 if (unlikely(vq->broken))
2045 return IRQ_HANDLED;
2046
2047 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
2048 if (vq->vq.callback)
2049 vq->vq.callback(&vq->vq);
2050
2051 return IRQ_HANDLED;
2052}
2053EXPORT_SYMBOL_GPL(vring_interrupt);
2054
2055/* Only available for split ring */
2056struct virtqueue *__vring_new_virtqueue(unsigned int index,
2057 struct vring vring,
2058 struct virtio_device *vdev,
2059 bool weak_barriers,
2060 bool context,
2061 bool (*notify)(struct virtqueue *),
2062 void (*callback)(struct virtqueue *),
2063 const char *name)
2064{
2065 unsigned int i;
2066 struct vring_virtqueue *vq;
2067
2068 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2069 return NULL;
2070
2071 vq = kmalloc(sizeof(*vq), GFP_KERNEL);
2072 if (!vq)
2073 return NULL;
2074
2075 vq->packed_ring = false;
2076 vq->vq.callback = callback;
2077 vq->vq.vdev = vdev;
2078 vq->vq.name = name;
2079 vq->vq.num_free = vring.num;
2080 vq->vq.index = index;
2081 vq->we_own_ring = false;
2082 vq->notify = notify;
2083 vq->weak_barriers = weak_barriers;
2084 vq->broken = false;
2085 vq->last_used_idx = 0;
2086 vq->num_added = 0;
2087 vq->use_dma_api = vring_use_dma_api(vdev);
2088 list_add_tail(&vq->vq.list, &vdev->vqs);
2089#ifdef DEBUG
2090 vq->in_use = false;
2091 vq->last_add_time_valid = false;
2092#endif
2093
2094 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
2095 !context;
2096 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
2097
2098 if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
2099 vq->weak_barriers = false;
2100
2101 vq->split.queue_dma_addr = 0;
2102 vq->split.queue_size_in_bytes = 0;
2103
2104 vq->split.vring = vring;
2105 vq->split.avail_flags_shadow = 0;
2106 vq->split.avail_idx_shadow = 0;
2107
2108 /* No callback? Tell other side not to bother us. */
2109 if (!callback) {
2110 vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
2111 if (!vq->event)
2112 vq->split.vring.avail->flags = cpu_to_virtio16(vdev,
2113 vq->split.avail_flags_shadow);
2114 }
2115
2116 vq->split.desc_state = kmalloc_array(vring.num,
2117 sizeof(struct vring_desc_state_split), GFP_KERNEL);
2118 if (!vq->split.desc_state) {
2119 kfree(vq);
2120 return NULL;
2121 }
2122
2123 /* Put everything in free lists. */
2124 vq->free_head = 0;
2125 for (i = 0; i < vring.num-1; i++)
2126 vq->split.vring.desc[i].next = cpu_to_virtio16(vdev, i + 1);
2127 memset(vq->split.desc_state, 0, vring.num *
2128 sizeof(struct vring_desc_state_split));
2129
2130 return &vq->vq;
2131}
2132EXPORT_SYMBOL_GPL(__vring_new_virtqueue);
2133
2134struct virtqueue *vring_create_virtqueue(
2135 unsigned int index,
2136 unsigned int num,
2137 unsigned int vring_align,
2138 struct virtio_device *vdev,
2139 bool weak_barriers,
2140 bool may_reduce_num,
2141 bool context,
2142 bool (*notify)(struct virtqueue *),
2143 void (*callback)(struct virtqueue *),
2144 const char *name)
2145{
2146
2147 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2148 return vring_create_virtqueue_packed(index, num, vring_align,
2149 vdev, weak_barriers, may_reduce_num,
2150 context, notify, callback, name);
2151
2152 return vring_create_virtqueue_split(index, num, vring_align,
2153 vdev, weak_barriers, may_reduce_num,
2154 context, notify, callback, name);
2155}
2156EXPORT_SYMBOL_GPL(vring_create_virtqueue);
2157
2158/* Only available for split ring */
2159struct virtqueue *vring_new_virtqueue(unsigned int index,
2160 unsigned int num,
2161 unsigned int vring_align,
2162 struct virtio_device *vdev,
2163 bool weak_barriers,
2164 bool context,
2165 void *pages,
2166 bool (*notify)(struct virtqueue *vq),
2167 void (*callback)(struct virtqueue *vq),
2168 const char *name)
2169{
2170 struct vring vring;
2171
2172 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2173 return NULL;
2174
2175 vring_init(&vring, num, pages, vring_align);
2176 return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
2177 notify, callback, name);
2178}
2179EXPORT_SYMBOL_GPL(vring_new_virtqueue);
2180
2181void vring_del_virtqueue(struct virtqueue *_vq)
2182{
2183 struct vring_virtqueue *vq = to_vvq(_vq);
2184
2185 if (vq->we_own_ring) {
2186 if (vq->packed_ring) {
2187 vring_free_queue(vq->vq.vdev,
2188 vq->packed.ring_size_in_bytes,
2189 vq->packed.vring.desc,
2190 vq->packed.ring_dma_addr);
2191
2192 vring_free_queue(vq->vq.vdev,
2193 vq->packed.event_size_in_bytes,
2194 vq->packed.vring.driver,
2195 vq->packed.driver_event_dma_addr);
2196
2197 vring_free_queue(vq->vq.vdev,
2198 vq->packed.event_size_in_bytes,
2199 vq->packed.vring.device,
2200 vq->packed.device_event_dma_addr);
2201
2202 kfree(vq->packed.desc_state);
2203 kfree(vq->packed.desc_extra);
2204 } else {
2205 vring_free_queue(vq->vq.vdev,
2206 vq->split.queue_size_in_bytes,
2207 vq->split.vring.desc,
2208 vq->split.queue_dma_addr);
2209 }
2210 }
2211 if (!vq->packed_ring)
2212 kfree(vq->split.desc_state);
2213 list_del(&_vq->list);
2214 kfree(vq);
2215}
2216EXPORT_SYMBOL_GPL(vring_del_virtqueue);
2217
2218/* Manipulates transport-specific feature bits. */
2219void vring_transport_features(struct virtio_device *vdev)
2220{
2221 unsigned int i;
2222
2223 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
2224 switch (i) {
2225 case VIRTIO_RING_F_INDIRECT_DESC:
2226 break;
2227 case VIRTIO_RING_F_EVENT_IDX:
2228 break;
2229 case VIRTIO_F_VERSION_1:
2230 break;
2231 case VIRTIO_F_ACCESS_PLATFORM:
2232 break;
2233 case VIRTIO_F_RING_PACKED:
2234 break;
2235 case VIRTIO_F_ORDER_PLATFORM:
2236 break;
2237 default:
2238 /* We don't understand this bit. */
2239 __virtio_clear_bit(vdev, i);
2240 }
2241 }
2242}
2243EXPORT_SYMBOL_GPL(vring_transport_features);
2244
2245/**
2246 * virtqueue_get_vring_size - return the size of the virtqueue's vring
2247 * @_vq: the struct virtqueue containing the vring of interest.
2248 *
2249 * Returns the size of the vring. This is mainly used for boasting to
2250 * userspace. Unlike other operations, this need not be serialized.
2251 */
2252unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
2253{
2254
2255 struct vring_virtqueue *vq = to_vvq(_vq);
2256
2257 return vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num;
2258}
2259EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
2260
2261bool virtqueue_is_broken(struct virtqueue *_vq)
2262{
2263 struct vring_virtqueue *vq = to_vvq(_vq);
2264
2265 return vq->broken;
2266}
2267EXPORT_SYMBOL_GPL(virtqueue_is_broken);
2268
2269/*
2270 * This should prevent the device from being used, allowing drivers to
2271 * recover. You may need to grab appropriate locks to flush.
2272 */
2273void virtio_break_device(struct virtio_device *dev)
2274{
2275 struct virtqueue *_vq;
2276
2277 list_for_each_entry(_vq, &dev->vqs, list) {
2278 struct vring_virtqueue *vq = to_vvq(_vq);
2279 vq->broken = true;
2280 }
2281}
2282EXPORT_SYMBOL_GPL(virtio_break_device);
2283
2284dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
2285{
2286 struct vring_virtqueue *vq = to_vvq(_vq);
2287
2288 BUG_ON(!vq->we_own_ring);
2289
2290 if (vq->packed_ring)
2291 return vq->packed.ring_dma_addr;
2292
2293 return vq->split.queue_dma_addr;
2294}
2295EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr);
2296
2297dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq)
2298{
2299 struct vring_virtqueue *vq = to_vvq(_vq);
2300
2301 BUG_ON(!vq->we_own_ring);
2302
2303 if (vq->packed_ring)
2304 return vq->packed.driver_event_dma_addr;
2305
2306 return vq->split.queue_dma_addr +
2307 ((char *)vq->split.vring.avail - (char *)vq->split.vring.desc);
2308}
2309EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr);
2310
2311dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq)
2312{
2313 struct vring_virtqueue *vq = to_vvq(_vq);
2314
2315 BUG_ON(!vq->we_own_ring);
2316
2317 if (vq->packed_ring)
2318 return vq->packed.device_event_dma_addr;
2319
2320 return vq->split.queue_dma_addr +
2321 ((char *)vq->split.vring.used - (char *)vq->split.vring.desc);
2322}
2323EXPORT_SYMBOL_GPL(virtqueue_get_used_addr);
2324
2325/* Only available for split ring */
2326const struct vring *virtqueue_get_vring(struct virtqueue *vq)
2327{
2328 return &to_vvq(vq)->split.vring;
2329}
2330EXPORT_SYMBOL_GPL(virtqueue_get_vring);
2331
2332MODULE_LICENSE("GPL");
1/* Virtio ring implementation.
2 *
3 * Copyright 2007 Rusty Russell IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19#include <linux/virtio.h>
20#include <linux/virtio_ring.h>
21#include <linux/virtio_config.h>
22#include <linux/device.h>
23#include <linux/slab.h>
24#include <linux/module.h>
25#include <linux/hrtimer.h>
26#include <linux/kmemleak.h>
27#include <linux/dma-mapping.h>
28#include <xen/xen.h>
29
30#ifdef DEBUG
31/* For development, we want to crash whenever the ring is screwed. */
32#define BAD_RING(_vq, fmt, args...) \
33 do { \
34 dev_err(&(_vq)->vq.vdev->dev, \
35 "%s:"fmt, (_vq)->vq.name, ##args); \
36 BUG(); \
37 } while (0)
38/* Caller is supposed to guarantee no reentry. */
39#define START_USE(_vq) \
40 do { \
41 if ((_vq)->in_use) \
42 panic("%s:in_use = %i\n", \
43 (_vq)->vq.name, (_vq)->in_use); \
44 (_vq)->in_use = __LINE__; \
45 } while (0)
46#define END_USE(_vq) \
47 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
48#else
49#define BAD_RING(_vq, fmt, args...) \
50 do { \
51 dev_err(&_vq->vq.vdev->dev, \
52 "%s:"fmt, (_vq)->vq.name, ##args); \
53 (_vq)->broken = true; \
54 } while (0)
55#define START_USE(vq)
56#define END_USE(vq)
57#endif
58
59struct vring_desc_state {
60 void *data; /* Data for callback. */
61 struct vring_desc *indir_desc; /* Indirect descriptor, if any. */
62};
63
64struct vring_virtqueue {
65 struct virtqueue vq;
66
67 /* Actual memory layout for this queue */
68 struct vring vring;
69
70 /* Can we use weak barriers? */
71 bool weak_barriers;
72
73 /* Other side has made a mess, don't try any more. */
74 bool broken;
75
76 /* Host supports indirect buffers */
77 bool indirect;
78
79 /* Host publishes avail event idx */
80 bool event;
81
82 /* Head of free buffer list. */
83 unsigned int free_head;
84 /* Number we've added since last sync. */
85 unsigned int num_added;
86
87 /* Last used index we've seen. */
88 u16 last_used_idx;
89
90 /* Last written value to avail->flags */
91 u16 avail_flags_shadow;
92
93 /* Last written value to avail->idx in guest byte order */
94 u16 avail_idx_shadow;
95
96 /* How to notify other side. FIXME: commonalize hcalls! */
97 bool (*notify)(struct virtqueue *vq);
98
99 /* DMA, allocation, and size information */
100 bool we_own_ring;
101 size_t queue_size_in_bytes;
102 dma_addr_t queue_dma_addr;
103
104#ifdef DEBUG
105 /* They're supposed to lock for us. */
106 unsigned int in_use;
107
108 /* Figure out if their kicks are too delayed. */
109 bool last_add_time_valid;
110 ktime_t last_add_time;
111#endif
112
113 /* Per-descriptor state. */
114 struct vring_desc_state desc_state[];
115};
116
117#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
118
119/*
120 * Modern virtio devices have feature bits to specify whether they need a
121 * quirk and bypass the IOMMU. If not there, just use the DMA API.
122 *
123 * If there, the interaction between virtio and DMA API is messy.
124 *
125 * On most systems with virtio, physical addresses match bus addresses,
126 * and it doesn't particularly matter whether we use the DMA API.
127 *
128 * On some systems, including Xen and any system with a physical device
129 * that speaks virtio behind a physical IOMMU, we must use the DMA API
130 * for virtio DMA to work at all.
131 *
132 * On other systems, including SPARC and PPC64, virtio-pci devices are
133 * enumerated as though they are behind an IOMMU, but the virtio host
134 * ignores the IOMMU, so we must either pretend that the IOMMU isn't
135 * there or somehow map everything as the identity.
136 *
137 * For the time being, we preserve historic behavior and bypass the DMA
138 * API.
139 *
140 * TODO: install a per-device DMA ops structure that does the right thing
141 * taking into account all the above quirks, and use the DMA API
142 * unconditionally on data path.
143 */
144
145static bool vring_use_dma_api(struct virtio_device *vdev)
146{
147 if (!virtio_has_iommu_quirk(vdev))
148 return true;
149
150 /* Otherwise, we are left to guess. */
151 /*
152 * In theory, it's possible to have a buggy QEMU-supposed
153 * emulated Q35 IOMMU and Xen enabled at the same time. On
154 * such a configuration, virtio has never worked and will
155 * not work without an even larger kludge. Instead, enable
156 * the DMA API if we're a Xen guest, which at least allows
157 * all of the sensible Xen configurations to work correctly.
158 */
159 if (xen_domain())
160 return true;
161
162 return false;
163}
164
165/*
166 * The DMA ops on various arches are rather gnarly right now, and
167 * making all of the arch DMA ops work on the vring device itself
168 * is a mess. For now, we use the parent device for DMA ops.
169 */
170static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq)
171{
172 return vq->vq.vdev->dev.parent;
173}
174
175/* Map one sg entry. */
176static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
177 struct scatterlist *sg,
178 enum dma_data_direction direction)
179{
180 if (!vring_use_dma_api(vq->vq.vdev))
181 return (dma_addr_t)sg_phys(sg);
182
183 /*
184 * We can't use dma_map_sg, because we don't use scatterlists in
185 * the way it expects (we don't guarantee that the scatterlist
186 * will exist for the lifetime of the mapping).
187 */
188 return dma_map_page(vring_dma_dev(vq),
189 sg_page(sg), sg->offset, sg->length,
190 direction);
191}
192
193static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
194 void *cpu_addr, size_t size,
195 enum dma_data_direction direction)
196{
197 if (!vring_use_dma_api(vq->vq.vdev))
198 return (dma_addr_t)virt_to_phys(cpu_addr);
199
200 return dma_map_single(vring_dma_dev(vq),
201 cpu_addr, size, direction);
202}
203
204static void vring_unmap_one(const struct vring_virtqueue *vq,
205 struct vring_desc *desc)
206{
207 u16 flags;
208
209 if (!vring_use_dma_api(vq->vq.vdev))
210 return;
211
212 flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
213
214 if (flags & VRING_DESC_F_INDIRECT) {
215 dma_unmap_single(vring_dma_dev(vq),
216 virtio64_to_cpu(vq->vq.vdev, desc->addr),
217 virtio32_to_cpu(vq->vq.vdev, desc->len),
218 (flags & VRING_DESC_F_WRITE) ?
219 DMA_FROM_DEVICE : DMA_TO_DEVICE);
220 } else {
221 dma_unmap_page(vring_dma_dev(vq),
222 virtio64_to_cpu(vq->vq.vdev, desc->addr),
223 virtio32_to_cpu(vq->vq.vdev, desc->len),
224 (flags & VRING_DESC_F_WRITE) ?
225 DMA_FROM_DEVICE : DMA_TO_DEVICE);
226 }
227}
228
229static int vring_mapping_error(const struct vring_virtqueue *vq,
230 dma_addr_t addr)
231{
232 if (!vring_use_dma_api(vq->vq.vdev))
233 return 0;
234
235 return dma_mapping_error(vring_dma_dev(vq), addr);
236}
237
238static struct vring_desc *alloc_indirect(struct virtqueue *_vq,
239 unsigned int total_sg, gfp_t gfp)
240{
241 struct vring_desc *desc;
242 unsigned int i;
243
244 /*
245 * We require lowmem mappings for the descriptors because
246 * otherwise virt_to_phys will give us bogus addresses in the
247 * virtqueue.
248 */
249 gfp &= ~__GFP_HIGHMEM;
250
251 desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp);
252 if (!desc)
253 return NULL;
254
255 for (i = 0; i < total_sg; i++)
256 desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
257 return desc;
258}
259
260static inline int virtqueue_add(struct virtqueue *_vq,
261 struct scatterlist *sgs[],
262 unsigned int total_sg,
263 unsigned int out_sgs,
264 unsigned int in_sgs,
265 void *data,
266 gfp_t gfp)
267{
268 struct vring_virtqueue *vq = to_vvq(_vq);
269 struct scatterlist *sg;
270 struct vring_desc *desc;
271 unsigned int i, n, avail, descs_used, uninitialized_var(prev), err_idx;
272 int head;
273 bool indirect;
274
275 START_USE(vq);
276
277 BUG_ON(data == NULL);
278
279 if (unlikely(vq->broken)) {
280 END_USE(vq);
281 return -EIO;
282 }
283
284#ifdef DEBUG
285 {
286 ktime_t now = ktime_get();
287
288 /* No kick or get, with .1 second between? Warn. */
289 if (vq->last_add_time_valid)
290 WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time))
291 > 100);
292 vq->last_add_time = now;
293 vq->last_add_time_valid = true;
294 }
295#endif
296
297 BUG_ON(total_sg > vq->vring.num);
298 BUG_ON(total_sg == 0);
299
300 head = vq->free_head;
301
302 /* If the host supports indirect descriptor tables, and we have multiple
303 * buffers, then go indirect. FIXME: tune this threshold */
304 if (vq->indirect && total_sg > 1 && vq->vq.num_free)
305 desc = alloc_indirect(_vq, total_sg, gfp);
306 else
307 desc = NULL;
308
309 if (desc) {
310 /* Use a single buffer which doesn't continue */
311 indirect = true;
312 /* Set up rest to use this indirect table. */
313 i = 0;
314 descs_used = 1;
315 } else {
316 indirect = false;
317 desc = vq->vring.desc;
318 i = head;
319 descs_used = total_sg;
320 }
321
322 if (vq->vq.num_free < descs_used) {
323 pr_debug("Can't add buf len %i - avail = %i\n",
324 descs_used, vq->vq.num_free);
325 /* FIXME: for historical reasons, we force a notify here if
326 * there are outgoing parts to the buffer. Presumably the
327 * host should service the ring ASAP. */
328 if (out_sgs)
329 vq->notify(&vq->vq);
330 if (indirect)
331 kfree(desc);
332 END_USE(vq);
333 return -ENOSPC;
334 }
335
336 for (n = 0; n < out_sgs; n++) {
337 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
338 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
339 if (vring_mapping_error(vq, addr))
340 goto unmap_release;
341
342 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT);
343 desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
344 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
345 prev = i;
346 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
347 }
348 }
349 for (; n < (out_sgs + in_sgs); n++) {
350 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
351 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
352 if (vring_mapping_error(vq, addr))
353 goto unmap_release;
354
355 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE);
356 desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
357 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
358 prev = i;
359 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
360 }
361 }
362 /* Last one doesn't continue. */
363 desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
364
365 if (indirect) {
366 /* Now that the indirect table is filled in, map it. */
367 dma_addr_t addr = vring_map_single(
368 vq, desc, total_sg * sizeof(struct vring_desc),
369 DMA_TO_DEVICE);
370 if (vring_mapping_error(vq, addr))
371 goto unmap_release;
372
373 vq->vring.desc[head].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_INDIRECT);
374 vq->vring.desc[head].addr = cpu_to_virtio64(_vq->vdev, addr);
375
376 vq->vring.desc[head].len = cpu_to_virtio32(_vq->vdev, total_sg * sizeof(struct vring_desc));
377 }
378
379 /* We're using some buffers from the free list. */
380 vq->vq.num_free -= descs_used;
381
382 /* Update free pointer */
383 if (indirect)
384 vq->free_head = virtio16_to_cpu(_vq->vdev, vq->vring.desc[head].next);
385 else
386 vq->free_head = i;
387
388 /* Store token and indirect buffer state. */
389 vq->desc_state[head].data = data;
390 if (indirect)
391 vq->desc_state[head].indir_desc = desc;
392
393 /* Put entry in available array (but don't update avail->idx until they
394 * do sync). */
395 avail = vq->avail_idx_shadow & (vq->vring.num - 1);
396 vq->vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
397
398 /* Descriptors and available array need to be set before we expose the
399 * new available array entries. */
400 virtio_wmb(vq->weak_barriers);
401 vq->avail_idx_shadow++;
402 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
403 vq->num_added++;
404
405 pr_debug("Added buffer head %i to %p\n", head, vq);
406 END_USE(vq);
407
408 /* This is very unlikely, but theoretically possible. Kick
409 * just in case. */
410 if (unlikely(vq->num_added == (1 << 16) - 1))
411 virtqueue_kick(_vq);
412
413 return 0;
414
415unmap_release:
416 err_idx = i;
417 i = head;
418
419 for (n = 0; n < total_sg; n++) {
420 if (i == err_idx)
421 break;
422 vring_unmap_one(vq, &desc[i]);
423 i = virtio16_to_cpu(_vq->vdev, vq->vring.desc[i].next);
424 }
425
426 vq->vq.num_free += total_sg;
427
428 if (indirect)
429 kfree(desc);
430
431 END_USE(vq);
432 return -EIO;
433}
434
435/**
436 * virtqueue_add_sgs - expose buffers to other end
437 * @vq: the struct virtqueue we're talking about.
438 * @sgs: array of terminated scatterlists.
439 * @out_num: the number of scatterlists readable by other side
440 * @in_num: the number of scatterlists which are writable (after readable ones)
441 * @data: the token identifying the buffer.
442 * @gfp: how to do memory allocations (if necessary).
443 *
444 * Caller must ensure we don't call this with other virtqueue operations
445 * at the same time (except where noted).
446 *
447 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
448 */
449int virtqueue_add_sgs(struct virtqueue *_vq,
450 struct scatterlist *sgs[],
451 unsigned int out_sgs,
452 unsigned int in_sgs,
453 void *data,
454 gfp_t gfp)
455{
456 unsigned int i, total_sg = 0;
457
458 /* Count them first. */
459 for (i = 0; i < out_sgs + in_sgs; i++) {
460 struct scatterlist *sg;
461 for (sg = sgs[i]; sg; sg = sg_next(sg))
462 total_sg++;
463 }
464 return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs, data, gfp);
465}
466EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
467
468/**
469 * virtqueue_add_outbuf - expose output buffers to other end
470 * @vq: the struct virtqueue we're talking about.
471 * @sg: scatterlist (must be well-formed and terminated!)
472 * @num: the number of entries in @sg readable by other side
473 * @data: the token identifying the buffer.
474 * @gfp: how to do memory allocations (if necessary).
475 *
476 * Caller must ensure we don't call this with other virtqueue operations
477 * at the same time (except where noted).
478 *
479 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
480 */
481int virtqueue_add_outbuf(struct virtqueue *vq,
482 struct scatterlist *sg, unsigned int num,
483 void *data,
484 gfp_t gfp)
485{
486 return virtqueue_add(vq, &sg, num, 1, 0, data, gfp);
487}
488EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
489
490/**
491 * virtqueue_add_inbuf - expose input buffers to other end
492 * @vq: the struct virtqueue we're talking about.
493 * @sg: scatterlist (must be well-formed and terminated!)
494 * @num: the number of entries in @sg writable by other side
495 * @data: the token identifying the buffer.
496 * @gfp: how to do memory allocations (if necessary).
497 *
498 * Caller must ensure we don't call this with other virtqueue operations
499 * at the same time (except where noted).
500 *
501 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
502 */
503int virtqueue_add_inbuf(struct virtqueue *vq,
504 struct scatterlist *sg, unsigned int num,
505 void *data,
506 gfp_t gfp)
507{
508 return virtqueue_add(vq, &sg, num, 0, 1, data, gfp);
509}
510EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
511
512/**
513 * virtqueue_kick_prepare - first half of split virtqueue_kick call.
514 * @vq: the struct virtqueue
515 *
516 * Instead of virtqueue_kick(), you can do:
517 * if (virtqueue_kick_prepare(vq))
518 * virtqueue_notify(vq);
519 *
520 * This is sometimes useful because the virtqueue_kick_prepare() needs
521 * to be serialized, but the actual virtqueue_notify() call does not.
522 */
523bool virtqueue_kick_prepare(struct virtqueue *_vq)
524{
525 struct vring_virtqueue *vq = to_vvq(_vq);
526 u16 new, old;
527 bool needs_kick;
528
529 START_USE(vq);
530 /* We need to expose available array entries before checking avail
531 * event. */
532 virtio_mb(vq->weak_barriers);
533
534 old = vq->avail_idx_shadow - vq->num_added;
535 new = vq->avail_idx_shadow;
536 vq->num_added = 0;
537
538#ifdef DEBUG
539 if (vq->last_add_time_valid) {
540 WARN_ON(ktime_to_ms(ktime_sub(ktime_get(),
541 vq->last_add_time)) > 100);
542 }
543 vq->last_add_time_valid = false;
544#endif
545
546 if (vq->event) {
547 needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev, vring_avail_event(&vq->vring)),
548 new, old);
549 } else {
550 needs_kick = !(vq->vring.used->flags & cpu_to_virtio16(_vq->vdev, VRING_USED_F_NO_NOTIFY));
551 }
552 END_USE(vq);
553 return needs_kick;
554}
555EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
556
557/**
558 * virtqueue_notify - second half of split virtqueue_kick call.
559 * @vq: the struct virtqueue
560 *
561 * This does not need to be serialized.
562 *
563 * Returns false if host notify failed or queue is broken, otherwise true.
564 */
565bool virtqueue_notify(struct virtqueue *_vq)
566{
567 struct vring_virtqueue *vq = to_vvq(_vq);
568
569 if (unlikely(vq->broken))
570 return false;
571
572 /* Prod other side to tell it about changes. */
573 if (!vq->notify(_vq)) {
574 vq->broken = true;
575 return false;
576 }
577 return true;
578}
579EXPORT_SYMBOL_GPL(virtqueue_notify);
580
581/**
582 * virtqueue_kick - update after add_buf
583 * @vq: the struct virtqueue
584 *
585 * After one or more virtqueue_add_* calls, invoke this to kick
586 * the other side.
587 *
588 * Caller must ensure we don't call this with other virtqueue
589 * operations at the same time (except where noted).
590 *
591 * Returns false if kick failed, otherwise true.
592 */
593bool virtqueue_kick(struct virtqueue *vq)
594{
595 if (virtqueue_kick_prepare(vq))
596 return virtqueue_notify(vq);
597 return true;
598}
599EXPORT_SYMBOL_GPL(virtqueue_kick);
600
601static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
602{
603 unsigned int i, j;
604 __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
605
606 /* Clear data ptr. */
607 vq->desc_state[head].data = NULL;
608
609 /* Put back on free list: unmap first-level descriptors and find end */
610 i = head;
611
612 while (vq->vring.desc[i].flags & nextflag) {
613 vring_unmap_one(vq, &vq->vring.desc[i]);
614 i = virtio16_to_cpu(vq->vq.vdev, vq->vring.desc[i].next);
615 vq->vq.num_free++;
616 }
617
618 vring_unmap_one(vq, &vq->vring.desc[i]);
619 vq->vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev, vq->free_head);
620 vq->free_head = head;
621
622 /* Plus final descriptor */
623 vq->vq.num_free++;
624
625 /* Free the indirect table, if any, now that it's unmapped. */
626 if (vq->desc_state[head].indir_desc) {
627 struct vring_desc *indir_desc = vq->desc_state[head].indir_desc;
628 u32 len = virtio32_to_cpu(vq->vq.vdev, vq->vring.desc[head].len);
629
630 BUG_ON(!(vq->vring.desc[head].flags &
631 cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT)));
632 BUG_ON(len == 0 || len % sizeof(struct vring_desc));
633
634 for (j = 0; j < len / sizeof(struct vring_desc); j++)
635 vring_unmap_one(vq, &indir_desc[j]);
636
637 kfree(vq->desc_state[head].indir_desc);
638 vq->desc_state[head].indir_desc = NULL;
639 }
640}
641
642static inline bool more_used(const struct vring_virtqueue *vq)
643{
644 return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx);
645}
646
647/**
648 * virtqueue_get_buf - get the next used buffer
649 * @vq: the struct virtqueue we're talking about.
650 * @len: the length written into the buffer
651 *
652 * If the device wrote data into the buffer, @len will be set to the
653 * amount written. This means you don't need to clear the buffer
654 * beforehand to ensure there's no data leakage in the case of short
655 * writes.
656 *
657 * Caller must ensure we don't call this with other virtqueue
658 * operations at the same time (except where noted).
659 *
660 * Returns NULL if there are no used buffers, or the "data" token
661 * handed to virtqueue_add_*().
662 */
663void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
664{
665 struct vring_virtqueue *vq = to_vvq(_vq);
666 void *ret;
667 unsigned int i;
668 u16 last_used;
669
670 START_USE(vq);
671
672 if (unlikely(vq->broken)) {
673 END_USE(vq);
674 return NULL;
675 }
676
677 if (!more_used(vq)) {
678 pr_debug("No more buffers in queue\n");
679 END_USE(vq);
680 return NULL;
681 }
682
683 /* Only get used array entries after they have been exposed by host. */
684 virtio_rmb(vq->weak_barriers);
685
686 last_used = (vq->last_used_idx & (vq->vring.num - 1));
687 i = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].id);
688 *len = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].len);
689
690 if (unlikely(i >= vq->vring.num)) {
691 BAD_RING(vq, "id %u out of range\n", i);
692 return NULL;
693 }
694 if (unlikely(!vq->desc_state[i].data)) {
695 BAD_RING(vq, "id %u is not a head!\n", i);
696 return NULL;
697 }
698
699 /* detach_buf clears data, so grab it now. */
700 ret = vq->desc_state[i].data;
701 detach_buf(vq, i);
702 vq->last_used_idx++;
703 /* If we expect an interrupt for the next entry, tell host
704 * by writing event index and flush out the write before
705 * the read in the next get_buf call. */
706 if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
707 virtio_store_mb(vq->weak_barriers,
708 &vring_used_event(&vq->vring),
709 cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
710
711#ifdef DEBUG
712 vq->last_add_time_valid = false;
713#endif
714
715 END_USE(vq);
716 return ret;
717}
718EXPORT_SYMBOL_GPL(virtqueue_get_buf);
719
720/**
721 * virtqueue_disable_cb - disable callbacks
722 * @vq: the struct virtqueue we're talking about.
723 *
724 * Note that this is not necessarily synchronous, hence unreliable and only
725 * useful as an optimization.
726 *
727 * Unlike other operations, this need not be serialized.
728 */
729void virtqueue_disable_cb(struct virtqueue *_vq)
730{
731 struct vring_virtqueue *vq = to_vvq(_vq);
732
733 if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
734 vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
735 if (!vq->event)
736 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
737 }
738
739}
740EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
741
742/**
743 * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
744 * @vq: the struct virtqueue we're talking about.
745 *
746 * This re-enables callbacks; it returns current queue state
747 * in an opaque unsigned value. This value should be later tested by
748 * virtqueue_poll, to detect a possible race between the driver checking for
749 * more work, and enabling callbacks.
750 *
751 * Caller must ensure we don't call this with other virtqueue
752 * operations at the same time (except where noted).
753 */
754unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
755{
756 struct vring_virtqueue *vq = to_vvq(_vq);
757 u16 last_used_idx;
758
759 START_USE(vq);
760
761 /* We optimistically turn back on interrupts, then check if there was
762 * more to do. */
763 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
764 * either clear the flags bit or point the event index at the next
765 * entry. Always do both to keep code simple. */
766 if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
767 vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
768 if (!vq->event)
769 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
770 }
771 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx);
772 END_USE(vq);
773 return last_used_idx;
774}
775EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
776
777/**
778 * virtqueue_poll - query pending used buffers
779 * @vq: the struct virtqueue we're talking about.
780 * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
781 *
782 * Returns "true" if there are pending used buffers in the queue.
783 *
784 * This does not need to be serialized.
785 */
786bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
787{
788 struct vring_virtqueue *vq = to_vvq(_vq);
789
790 virtio_mb(vq->weak_barriers);
791 return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, vq->vring.used->idx);
792}
793EXPORT_SYMBOL_GPL(virtqueue_poll);
794
795/**
796 * virtqueue_enable_cb - restart callbacks after disable_cb.
797 * @vq: the struct virtqueue we're talking about.
798 *
799 * This re-enables callbacks; it returns "false" if there are pending
800 * buffers in the queue, to detect a possible race between the driver
801 * checking for more work, and enabling callbacks.
802 *
803 * Caller must ensure we don't call this with other virtqueue
804 * operations at the same time (except where noted).
805 */
806bool virtqueue_enable_cb(struct virtqueue *_vq)
807{
808 unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
809 return !virtqueue_poll(_vq, last_used_idx);
810}
811EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
812
813/**
814 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
815 * @vq: the struct virtqueue we're talking about.
816 *
817 * This re-enables callbacks but hints to the other side to delay
818 * interrupts until most of the available buffers have been processed;
819 * it returns "false" if there are many pending buffers in the queue,
820 * to detect a possible race between the driver checking for more work,
821 * and enabling callbacks.
822 *
823 * Caller must ensure we don't call this with other virtqueue
824 * operations at the same time (except where noted).
825 */
826bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
827{
828 struct vring_virtqueue *vq = to_vvq(_vq);
829 u16 bufs;
830
831 START_USE(vq);
832
833 /* We optimistically turn back on interrupts, then check if there was
834 * more to do. */
835 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
836 * either clear the flags bit or point the event index at the next
837 * entry. Always update the event index to keep code simple. */
838 if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
839 vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
840 if (!vq->event)
841 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
842 }
843 /* TODO: tune this threshold */
844 bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4;
845
846 virtio_store_mb(vq->weak_barriers,
847 &vring_used_event(&vq->vring),
848 cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
849
850 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) {
851 END_USE(vq);
852 return false;
853 }
854
855 END_USE(vq);
856 return true;
857}
858EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
859
860/**
861 * virtqueue_detach_unused_buf - detach first unused buffer
862 * @vq: the struct virtqueue we're talking about.
863 *
864 * Returns NULL or the "data" token handed to virtqueue_add_*().
865 * This is not valid on an active queue; it is useful only for device
866 * shutdown.
867 */
868void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
869{
870 struct vring_virtqueue *vq = to_vvq(_vq);
871 unsigned int i;
872 void *buf;
873
874 START_USE(vq);
875
876 for (i = 0; i < vq->vring.num; i++) {
877 if (!vq->desc_state[i].data)
878 continue;
879 /* detach_buf clears data, so grab it now. */
880 buf = vq->desc_state[i].data;
881 detach_buf(vq, i);
882 vq->avail_idx_shadow--;
883 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
884 END_USE(vq);
885 return buf;
886 }
887 /* That should have freed everything. */
888 BUG_ON(vq->vq.num_free != vq->vring.num);
889
890 END_USE(vq);
891 return NULL;
892}
893EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
894
895irqreturn_t vring_interrupt(int irq, void *_vq)
896{
897 struct vring_virtqueue *vq = to_vvq(_vq);
898
899 if (!more_used(vq)) {
900 pr_debug("virtqueue interrupt with no work for %p\n", vq);
901 return IRQ_NONE;
902 }
903
904 if (unlikely(vq->broken))
905 return IRQ_HANDLED;
906
907 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
908 if (vq->vq.callback)
909 vq->vq.callback(&vq->vq);
910
911 return IRQ_HANDLED;
912}
913EXPORT_SYMBOL_GPL(vring_interrupt);
914
915struct virtqueue *__vring_new_virtqueue(unsigned int index,
916 struct vring vring,
917 struct virtio_device *vdev,
918 bool weak_barriers,
919 bool (*notify)(struct virtqueue *),
920 void (*callback)(struct virtqueue *),
921 const char *name)
922{
923 unsigned int i;
924 struct vring_virtqueue *vq;
925
926 vq = kmalloc(sizeof(*vq) + vring.num * sizeof(struct vring_desc_state),
927 GFP_KERNEL);
928 if (!vq)
929 return NULL;
930
931 vq->vring = vring;
932 vq->vq.callback = callback;
933 vq->vq.vdev = vdev;
934 vq->vq.name = name;
935 vq->vq.num_free = vring.num;
936 vq->vq.index = index;
937 vq->we_own_ring = false;
938 vq->queue_dma_addr = 0;
939 vq->queue_size_in_bytes = 0;
940 vq->notify = notify;
941 vq->weak_barriers = weak_barriers;
942 vq->broken = false;
943 vq->last_used_idx = 0;
944 vq->avail_flags_shadow = 0;
945 vq->avail_idx_shadow = 0;
946 vq->num_added = 0;
947 list_add_tail(&vq->vq.list, &vdev->vqs);
948#ifdef DEBUG
949 vq->in_use = false;
950 vq->last_add_time_valid = false;
951#endif
952
953 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC);
954 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
955
956 /* No callback? Tell other side not to bother us. */
957 if (!callback) {
958 vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
959 if (!vq->event)
960 vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow);
961 }
962
963 /* Put everything in free lists. */
964 vq->free_head = 0;
965 for (i = 0; i < vring.num-1; i++)
966 vq->vring.desc[i].next = cpu_to_virtio16(vdev, i + 1);
967 memset(vq->desc_state, 0, vring.num * sizeof(struct vring_desc_state));
968
969 return &vq->vq;
970}
971EXPORT_SYMBOL_GPL(__vring_new_virtqueue);
972
973static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
974 dma_addr_t *dma_handle, gfp_t flag)
975{
976 if (vring_use_dma_api(vdev)) {
977 return dma_alloc_coherent(vdev->dev.parent, size,
978 dma_handle, flag);
979 } else {
980 void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
981 if (queue) {
982 phys_addr_t phys_addr = virt_to_phys(queue);
983 *dma_handle = (dma_addr_t)phys_addr;
984
985 /*
986 * Sanity check: make sure we dind't truncate
987 * the address. The only arches I can find that
988 * have 64-bit phys_addr_t but 32-bit dma_addr_t
989 * are certain non-highmem MIPS and x86
990 * configurations, but these configurations
991 * should never allocate physical pages above 32
992 * bits, so this is fine. Just in case, throw a
993 * warning and abort if we end up with an
994 * unrepresentable address.
995 */
996 if (WARN_ON_ONCE(*dma_handle != phys_addr)) {
997 free_pages_exact(queue, PAGE_ALIGN(size));
998 return NULL;
999 }
1000 }
1001 return queue;
1002 }
1003}
1004
1005static void vring_free_queue(struct virtio_device *vdev, size_t size,
1006 void *queue, dma_addr_t dma_handle)
1007{
1008 if (vring_use_dma_api(vdev)) {
1009 dma_free_coherent(vdev->dev.parent, size, queue, dma_handle);
1010 } else {
1011 free_pages_exact(queue, PAGE_ALIGN(size));
1012 }
1013}
1014
1015struct virtqueue *vring_create_virtqueue(
1016 unsigned int index,
1017 unsigned int num,
1018 unsigned int vring_align,
1019 struct virtio_device *vdev,
1020 bool weak_barriers,
1021 bool may_reduce_num,
1022 bool (*notify)(struct virtqueue *),
1023 void (*callback)(struct virtqueue *),
1024 const char *name)
1025{
1026 struct virtqueue *vq;
1027 void *queue = NULL;
1028 dma_addr_t dma_addr;
1029 size_t queue_size_in_bytes;
1030 struct vring vring;
1031
1032 /* We assume num is a power of 2. */
1033 if (num & (num - 1)) {
1034 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
1035 return NULL;
1036 }
1037
1038 /* TODO: allocate each queue chunk individually */
1039 for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
1040 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
1041 &dma_addr,
1042 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
1043 if (queue)
1044 break;
1045 }
1046
1047 if (!num)
1048 return NULL;
1049
1050 if (!queue) {
1051 /* Try to get a single page. You are my only hope! */
1052 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
1053 &dma_addr, GFP_KERNEL|__GFP_ZERO);
1054 }
1055 if (!queue)
1056 return NULL;
1057
1058 queue_size_in_bytes = vring_size(num, vring_align);
1059 vring_init(&vring, num, queue, vring_align);
1060
1061 vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers,
1062 notify, callback, name);
1063 if (!vq) {
1064 vring_free_queue(vdev, queue_size_in_bytes, queue,
1065 dma_addr);
1066 return NULL;
1067 }
1068
1069 to_vvq(vq)->queue_dma_addr = dma_addr;
1070 to_vvq(vq)->queue_size_in_bytes = queue_size_in_bytes;
1071 to_vvq(vq)->we_own_ring = true;
1072
1073 return vq;
1074}
1075EXPORT_SYMBOL_GPL(vring_create_virtqueue);
1076
1077struct virtqueue *vring_new_virtqueue(unsigned int index,
1078 unsigned int num,
1079 unsigned int vring_align,
1080 struct virtio_device *vdev,
1081 bool weak_barriers,
1082 void *pages,
1083 bool (*notify)(struct virtqueue *vq),
1084 void (*callback)(struct virtqueue *vq),
1085 const char *name)
1086{
1087 struct vring vring;
1088 vring_init(&vring, num, pages, vring_align);
1089 return __vring_new_virtqueue(index, vring, vdev, weak_barriers,
1090 notify, callback, name);
1091}
1092EXPORT_SYMBOL_GPL(vring_new_virtqueue);
1093
1094void vring_del_virtqueue(struct virtqueue *_vq)
1095{
1096 struct vring_virtqueue *vq = to_vvq(_vq);
1097
1098 if (vq->we_own_ring) {
1099 vring_free_queue(vq->vq.vdev, vq->queue_size_in_bytes,
1100 vq->vring.desc, vq->queue_dma_addr);
1101 }
1102 list_del(&_vq->list);
1103 kfree(vq);
1104}
1105EXPORT_SYMBOL_GPL(vring_del_virtqueue);
1106
1107/* Manipulates transport-specific feature bits. */
1108void vring_transport_features(struct virtio_device *vdev)
1109{
1110 unsigned int i;
1111
1112 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
1113 switch (i) {
1114 case VIRTIO_RING_F_INDIRECT_DESC:
1115 break;
1116 case VIRTIO_RING_F_EVENT_IDX:
1117 break;
1118 case VIRTIO_F_VERSION_1:
1119 break;
1120 case VIRTIO_F_IOMMU_PLATFORM:
1121 break;
1122 default:
1123 /* We don't understand this bit. */
1124 __virtio_clear_bit(vdev, i);
1125 }
1126 }
1127}
1128EXPORT_SYMBOL_GPL(vring_transport_features);
1129
1130/**
1131 * virtqueue_get_vring_size - return the size of the virtqueue's vring
1132 * @vq: the struct virtqueue containing the vring of interest.
1133 *
1134 * Returns the size of the vring. This is mainly used for boasting to
1135 * userspace. Unlike other operations, this need not be serialized.
1136 */
1137unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
1138{
1139
1140 struct vring_virtqueue *vq = to_vvq(_vq);
1141
1142 return vq->vring.num;
1143}
1144EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
1145
1146bool virtqueue_is_broken(struct virtqueue *_vq)
1147{
1148 struct vring_virtqueue *vq = to_vvq(_vq);
1149
1150 return vq->broken;
1151}
1152EXPORT_SYMBOL_GPL(virtqueue_is_broken);
1153
1154/*
1155 * This should prevent the device from being used, allowing drivers to
1156 * recover. You may need to grab appropriate locks to flush.
1157 */
1158void virtio_break_device(struct virtio_device *dev)
1159{
1160 struct virtqueue *_vq;
1161
1162 list_for_each_entry(_vq, &dev->vqs, list) {
1163 struct vring_virtqueue *vq = to_vvq(_vq);
1164 vq->broken = true;
1165 }
1166}
1167EXPORT_SYMBOL_GPL(virtio_break_device);
1168
1169dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
1170{
1171 struct vring_virtqueue *vq = to_vvq(_vq);
1172
1173 BUG_ON(!vq->we_own_ring);
1174
1175 return vq->queue_dma_addr;
1176}
1177EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr);
1178
1179dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq)
1180{
1181 struct vring_virtqueue *vq = to_vvq(_vq);
1182
1183 BUG_ON(!vq->we_own_ring);
1184
1185 return vq->queue_dma_addr +
1186 ((char *)vq->vring.avail - (char *)vq->vring.desc);
1187}
1188EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr);
1189
1190dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq)
1191{
1192 struct vring_virtqueue *vq = to_vvq(_vq);
1193
1194 BUG_ON(!vq->we_own_ring);
1195
1196 return vq->queue_dma_addr +
1197 ((char *)vq->vring.used - (char *)vq->vring.desc);
1198}
1199EXPORT_SYMBOL_GPL(virtqueue_get_used_addr);
1200
1201const struct vring *virtqueue_get_vring(struct virtqueue *vq)
1202{
1203 return &to_vvq(vq)->vring;
1204}
1205EXPORT_SYMBOL_GPL(virtqueue_get_vring);
1206
1207MODULE_LICENSE("GPL");