Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/*
  2 * Copyright 2014 Canonical
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 * Authors: Andreas Pokorny
 23 */
 24
 25#include <drm/drm_prime.h>
 26#include <linux/virtio_dma_buf.h>
 27
 28#include "virtgpu_drv.h"
 29
 30static int virtgpu_virtio_get_uuid(struct dma_buf *buf,
 31				   uuid_t *uuid)
 32{
 33	struct drm_gem_object *obj = buf->priv;
 34	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
 35	struct virtio_gpu_device *vgdev = obj->dev->dev_private;
 36
 37	wait_event(vgdev->resp_wq, bo->uuid_state != STATE_INITIALIZING);
 38	if (bo->uuid_state != STATE_OK)
 39		return -ENODEV;
 40
 41	uuid_copy(uuid, &bo->uuid);
 42
 43	return 0;
 44}
 45
 46static struct sg_table *
 47virtgpu_gem_map_dma_buf(struct dma_buf_attachment *attach,
 48			enum dma_data_direction dir)
 49{
 50	struct drm_gem_object *obj = attach->dmabuf->priv;
 51	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
 52
 53	if (virtio_gpu_is_vram(bo))
 54		return virtio_gpu_vram_map_dma_buf(bo, attach->dev, dir);
 55
 56	return drm_gem_map_dma_buf(attach, dir);
 57}
 58
 59static void virtgpu_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
 60				      struct sg_table *sgt,
 61				      enum dma_data_direction dir)
 62{
 63	struct drm_gem_object *obj = attach->dmabuf->priv;
 64	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
 65
 66	if (virtio_gpu_is_vram(bo)) {
 67		virtio_gpu_vram_unmap_dma_buf(attach->dev, sgt, dir);
 68		return;
 69	}
 70
 71	drm_gem_unmap_dma_buf(attach, sgt, dir);
 72}
 73
 74static const struct virtio_dma_buf_ops virtgpu_dmabuf_ops =  {
 75	.ops = {
 76		.cache_sgt_mapping = true,
 77		.attach = virtio_dma_buf_attach,
 78		.detach = drm_gem_map_detach,
 79		.map_dma_buf = virtgpu_gem_map_dma_buf,
 80		.unmap_dma_buf = virtgpu_gem_unmap_dma_buf,
 81		.release = drm_gem_dmabuf_release,
 82		.mmap = drm_gem_dmabuf_mmap,
 83		.vmap = drm_gem_dmabuf_vmap,
 84		.vunmap = drm_gem_dmabuf_vunmap,
 85	},
 86	.device_attach = drm_gem_map_attach,
 87	.get_uuid = virtgpu_virtio_get_uuid,
 88};
 89
 90int virtio_gpu_resource_assign_uuid(struct virtio_gpu_device *vgdev,
 91				    struct virtio_gpu_object *bo)
 92{
 93	struct virtio_gpu_object_array *objs;
 94
 95	objs = virtio_gpu_array_alloc(1);
 96	if (!objs)
 97		return -ENOMEM;
 98
 99	virtio_gpu_array_add_obj(objs, &bo->base.base);
100
101	return virtio_gpu_cmd_resource_assign_uuid(vgdev, objs);
102}
103
104struct dma_buf *virtgpu_gem_prime_export(struct drm_gem_object *obj,
105					 int flags)
106{
107	struct dma_buf *buf;
108	struct drm_device *dev = obj->dev;
109	struct virtio_gpu_device *vgdev = dev->dev_private;
110	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
111	int ret = 0;
112	bool blob = bo->host3d_blob || bo->guest_blob;
113	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
114
115	if (!blob) {
116		if (vgdev->has_resource_assign_uuid) {
117			ret = virtio_gpu_resource_assign_uuid(vgdev, bo);
118			if (ret)
119				return ERR_PTR(ret);
120
121			virtio_gpu_notify(vgdev);
122		} else {
123			bo->uuid_state = STATE_ERR;
124		}
125	} else if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE)) {
126		bo->uuid_state = STATE_ERR;
127	}
128
129	exp_info.ops = &virtgpu_dmabuf_ops.ops;
130	exp_info.size = obj->size;
131	exp_info.flags = flags;
132	exp_info.priv = obj;
133	exp_info.resv = obj->resv;
134
135	buf = virtio_dma_buf_export(&exp_info);
136	if (IS_ERR(buf))
137		return buf;
138
139	drm_dev_get(dev);
140	drm_gem_object_get(obj);
141
142	return buf;
143}
144
145struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
146						struct dma_buf *buf)
147{
148	struct drm_gem_object *obj;
149
150	if (buf->ops == &virtgpu_dmabuf_ops.ops) {
151		obj = buf->priv;
152		if (obj->dev == dev) {
153			/*
154			 * Importing dmabuf exported from our own gem increases
155			 * refcount on gem itself instead of f_count of dmabuf.
156			 */
157			drm_gem_object_get(obj);
158			return obj;
159		}
160	}
161
162	return drm_gem_prime_import(dev, buf);
163}
164
165struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
166	struct drm_device *dev, struct dma_buf_attachment *attach,
167	struct sg_table *table)
168{
169	return ERR_PTR(-ENODEV);
170}