Loading...
1/*
2 * Copyright (C) 2015 Red Hat, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26#include <linux/dma-mapping.h>
27#include <linux/moduleparam.h>
28
29#include "virtgpu_drv.h"
30
31static int virtio_gpu_virglrenderer_workaround = 1;
32module_param_named(virglhack, virtio_gpu_virglrenderer_workaround, int, 0400);
33
34static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
35 uint32_t *resid)
36{
37 if (virtio_gpu_virglrenderer_workaround) {
38 /*
39 * Hack to avoid re-using resource IDs.
40 *
41 * virglrenderer versions up to (and including) 0.7.0
42 * can't deal with that. virglrenderer commit
43 * "f91a9dd35715 Fix unlinking resources from hash
44 * table." (Feb 2019) fixes the bug.
45 */
46 static atomic_t seqno = ATOMIC_INIT(0);
47 int handle = atomic_inc_return(&seqno);
48 *resid = handle + 1;
49 } else {
50 int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL);
51 if (handle < 0)
52 return handle;
53 *resid = handle + 1;
54 }
55 return 0;
56}
57
58static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
59{
60 if (!virtio_gpu_virglrenderer_workaround) {
61 ida_free(&vgdev->resource_ida, id - 1);
62 }
63}
64
65void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
66{
67 struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
68
69 virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
70 if (virtio_gpu_is_shmem(bo)) {
71 struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
72
73 if (shmem->pages) {
74 if (shmem->mapped) {
75 dma_unmap_sg(vgdev->vdev->dev.parent,
76 shmem->pages->sgl, shmem->mapped,
77 DMA_TO_DEVICE);
78 shmem->mapped = 0;
79 }
80
81 sg_free_table(shmem->pages);
82 kfree(shmem->pages);
83 shmem->pages = NULL;
84 drm_gem_shmem_unpin(&bo->base.base);
85 }
86
87 drm_gem_shmem_free_object(&bo->base.base);
88 }
89}
90
91static void virtio_gpu_free_object(struct drm_gem_object *obj)
92{
93 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
94 struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
95
96 if (bo->created) {
97 virtio_gpu_cmd_unref_resource(vgdev, bo);
98 virtio_gpu_notify(vgdev);
99 /* completion handler calls virtio_gpu_cleanup_object() */
100 return;
101 }
102 virtio_gpu_cleanup_object(bo);
103}
104
105static const struct drm_gem_object_funcs virtio_gpu_shmem_funcs = {
106 .free = virtio_gpu_free_object,
107 .open = virtio_gpu_gem_object_open,
108 .close = virtio_gpu_gem_object_close,
109
110 .print_info = drm_gem_shmem_print_info,
111 .pin = drm_gem_shmem_pin,
112 .unpin = drm_gem_shmem_unpin,
113 .get_sg_table = drm_gem_shmem_get_sg_table,
114 .vmap = drm_gem_shmem_vmap,
115 .vunmap = drm_gem_shmem_vunmap,
116 .mmap = drm_gem_shmem_mmap,
117};
118
119bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo)
120{
121 return bo->base.base.funcs == &virtio_gpu_shmem_funcs;
122}
123
124struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
125 size_t size)
126{
127 struct virtio_gpu_object_shmem *shmem;
128 struct drm_gem_shmem_object *dshmem;
129
130 shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
131 if (!shmem)
132 return NULL;
133
134 dshmem = &shmem->base.base;
135 dshmem->base.funcs = &virtio_gpu_shmem_funcs;
136 dshmem->map_cached = true;
137 return &dshmem->base;
138}
139
140static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
141 struct virtio_gpu_object *bo,
142 struct virtio_gpu_mem_entry **ents,
143 unsigned int *nents)
144{
145 bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
146 struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
147 struct scatterlist *sg;
148 int si, ret;
149
150 ret = drm_gem_shmem_pin(&bo->base.base);
151 if (ret < 0)
152 return -EINVAL;
153
154 /*
155 * virtio_gpu uses drm_gem_shmem_get_sg_table instead of
156 * drm_gem_shmem_get_pages_sgt because virtio has it's own set of
157 * dma-ops. This is discouraged for other drivers, but should be fine
158 * since virtio_gpu doesn't support dma-buf import from other devices.
159 */
160 shmem->pages = drm_gem_shmem_get_sg_table(&bo->base.base);
161 if (!shmem->pages) {
162 drm_gem_shmem_unpin(&bo->base.base);
163 return -EINVAL;
164 }
165
166 if (use_dma_api) {
167 shmem->mapped = dma_map_sg(vgdev->vdev->dev.parent,
168 shmem->pages->sgl,
169 shmem->pages->nents,
170 DMA_TO_DEVICE);
171 *nents = shmem->mapped;
172 } else {
173 *nents = shmem->pages->nents;
174 }
175
176 *ents = kmalloc_array(*nents, sizeof(struct virtio_gpu_mem_entry),
177 GFP_KERNEL);
178 if (!(*ents)) {
179 DRM_ERROR("failed to allocate ent list\n");
180 return -ENOMEM;
181 }
182
183 for_each_sg(shmem->pages->sgl, sg, *nents, si) {
184 (*ents)[si].addr = cpu_to_le64(use_dma_api
185 ? sg_dma_address(sg)
186 : sg_phys(sg));
187 (*ents)[si].length = cpu_to_le32(sg->length);
188 (*ents)[si].padding = 0;
189 }
190 return 0;
191}
192
193int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
194 struct virtio_gpu_object_params *params,
195 struct virtio_gpu_object **bo_ptr,
196 struct virtio_gpu_fence *fence)
197{
198 struct virtio_gpu_object_array *objs = NULL;
199 struct drm_gem_shmem_object *shmem_obj;
200 struct virtio_gpu_object *bo;
201 struct virtio_gpu_mem_entry *ents;
202 unsigned int nents;
203 int ret;
204
205 *bo_ptr = NULL;
206
207 params->size = roundup(params->size, PAGE_SIZE);
208 shmem_obj = drm_gem_shmem_create(vgdev->ddev, params->size);
209 if (IS_ERR(shmem_obj))
210 return PTR_ERR(shmem_obj);
211 bo = gem_to_virtio_gpu_obj(&shmem_obj->base);
212
213 ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle);
214 if (ret < 0)
215 goto err_free_gem;
216
217 bo->dumb = params->dumb;
218
219 if (fence) {
220 ret = -ENOMEM;
221 objs = virtio_gpu_array_alloc(1);
222 if (!objs)
223 goto err_put_id;
224 virtio_gpu_array_add_obj(objs, &bo->base.base);
225
226 ret = virtio_gpu_array_lock_resv(objs);
227 if (ret != 0)
228 goto err_put_objs;
229 }
230
231 if (params->virgl) {
232 virtio_gpu_cmd_resource_create_3d(vgdev, bo, params,
233 objs, fence);
234 } else {
235 virtio_gpu_cmd_create_resource(vgdev, bo, params,
236 objs, fence);
237 }
238
239 ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents);
240 if (ret != 0) {
241 virtio_gpu_free_object(&shmem_obj->base);
242 return ret;
243 }
244
245 virtio_gpu_object_attach(vgdev, bo, ents, nents);
246
247 *bo_ptr = bo;
248 return 0;
249
250err_put_objs:
251 virtio_gpu_array_put_free(objs);
252err_put_id:
253 virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
254err_free_gem:
255 drm_gem_shmem_free_object(&shmem_obj->base);
256 return ret;
257}
1/*
2 * Copyright (C) 2015 Red Hat, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26#include <linux/dma-mapping.h>
27#include <linux/moduleparam.h>
28
29#include "virtgpu_drv.h"
30
31static int virtio_gpu_virglrenderer_workaround = 1;
32module_param_named(virglhack, virtio_gpu_virglrenderer_workaround, int, 0400);
33
34int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, uint32_t *resid)
35{
36 if (virtio_gpu_virglrenderer_workaround) {
37 /*
38 * Hack to avoid re-using resource IDs.
39 *
40 * virglrenderer versions up to (and including) 0.7.0
41 * can't deal with that. virglrenderer commit
42 * "f91a9dd35715 Fix unlinking resources from hash
43 * table." (Feb 2019) fixes the bug.
44 */
45 static atomic_t seqno = ATOMIC_INIT(0);
46 int handle = atomic_inc_return(&seqno);
47 *resid = handle + 1;
48 } else {
49 int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL);
50 if (handle < 0)
51 return handle;
52 *resid = handle + 1;
53 }
54 return 0;
55}
56
57static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
58{
59 if (!virtio_gpu_virglrenderer_workaround) {
60 ida_free(&vgdev->resource_ida, id - 1);
61 }
62}
63
64void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
65{
66 struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
67
68 virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
69 if (virtio_gpu_is_shmem(bo)) {
70 struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
71
72 if (shmem->pages) {
73 if (shmem->mapped) {
74 dma_unmap_sgtable(vgdev->vdev->dev.parent,
75 shmem->pages, DMA_TO_DEVICE, 0);
76 shmem->mapped = 0;
77 }
78
79 sg_free_table(shmem->pages);
80 kfree(shmem->pages);
81 shmem->pages = NULL;
82 drm_gem_shmem_unpin(&bo->base.base);
83 }
84
85 drm_gem_shmem_free_object(&bo->base.base);
86 } else if (virtio_gpu_is_vram(bo)) {
87 struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
88
89 spin_lock(&vgdev->host_visible_lock);
90 if (drm_mm_node_allocated(&vram->vram_node))
91 drm_mm_remove_node(&vram->vram_node);
92
93 spin_unlock(&vgdev->host_visible_lock);
94
95 drm_gem_free_mmap_offset(&vram->base.base.base);
96 drm_gem_object_release(&vram->base.base.base);
97 kfree(vram);
98 }
99}
100
101static void virtio_gpu_free_object(struct drm_gem_object *obj)
102{
103 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
104 struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
105
106 if (bo->created) {
107 virtio_gpu_cmd_unref_resource(vgdev, bo);
108 virtio_gpu_notify(vgdev);
109 /* completion handler calls virtio_gpu_cleanup_object() */
110 return;
111 }
112 virtio_gpu_cleanup_object(bo);
113}
114
115static const struct drm_gem_object_funcs virtio_gpu_shmem_funcs = {
116 .free = virtio_gpu_free_object,
117 .open = virtio_gpu_gem_object_open,
118 .close = virtio_gpu_gem_object_close,
119
120 .print_info = drm_gem_shmem_print_info,
121 .export = virtgpu_gem_prime_export,
122 .pin = drm_gem_shmem_pin,
123 .unpin = drm_gem_shmem_unpin,
124 .get_sg_table = drm_gem_shmem_get_sg_table,
125 .vmap = drm_gem_shmem_vmap,
126 .vunmap = drm_gem_shmem_vunmap,
127 .mmap = drm_gem_shmem_mmap,
128};
129
130bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo)
131{
132 return bo->base.base.funcs == &virtio_gpu_shmem_funcs;
133}
134
135struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
136 size_t size)
137{
138 struct virtio_gpu_object_shmem *shmem;
139 struct drm_gem_shmem_object *dshmem;
140
141 shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
142 if (!shmem)
143 return NULL;
144
145 dshmem = &shmem->base.base;
146 dshmem->base.funcs = &virtio_gpu_shmem_funcs;
147 return &dshmem->base;
148}
149
150static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
151 struct virtio_gpu_object *bo,
152 struct virtio_gpu_mem_entry **ents,
153 unsigned int *nents)
154{
155 bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
156 struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
157 struct scatterlist *sg;
158 int si, ret;
159
160 ret = drm_gem_shmem_pin(&bo->base.base);
161 if (ret < 0)
162 return -EINVAL;
163
164 /*
165 * virtio_gpu uses drm_gem_shmem_get_sg_table instead of
166 * drm_gem_shmem_get_pages_sgt because virtio has it's own set of
167 * dma-ops. This is discouraged for other drivers, but should be fine
168 * since virtio_gpu doesn't support dma-buf import from other devices.
169 */
170 shmem->pages = drm_gem_shmem_get_sg_table(&bo->base.base);
171 if (!shmem->pages) {
172 drm_gem_shmem_unpin(&bo->base.base);
173 return -EINVAL;
174 }
175
176 if (use_dma_api) {
177 ret = dma_map_sgtable(vgdev->vdev->dev.parent,
178 shmem->pages, DMA_TO_DEVICE, 0);
179 if (ret)
180 return ret;
181 *nents = shmem->mapped = shmem->pages->nents;
182 } else {
183 *nents = shmem->pages->orig_nents;
184 }
185
186 *ents = kvmalloc_array(*nents,
187 sizeof(struct virtio_gpu_mem_entry),
188 GFP_KERNEL);
189 if (!(*ents)) {
190 DRM_ERROR("failed to allocate ent list\n");
191 return -ENOMEM;
192 }
193
194 if (use_dma_api) {
195 for_each_sgtable_dma_sg(shmem->pages, sg, si) {
196 (*ents)[si].addr = cpu_to_le64(sg_dma_address(sg));
197 (*ents)[si].length = cpu_to_le32(sg_dma_len(sg));
198 (*ents)[si].padding = 0;
199 }
200 } else {
201 for_each_sgtable_sg(shmem->pages, sg, si) {
202 (*ents)[si].addr = cpu_to_le64(sg_phys(sg));
203 (*ents)[si].length = cpu_to_le32(sg->length);
204 (*ents)[si].padding = 0;
205 }
206 }
207
208 return 0;
209}
210
211int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
212 struct virtio_gpu_object_params *params,
213 struct virtio_gpu_object **bo_ptr,
214 struct virtio_gpu_fence *fence)
215{
216 struct virtio_gpu_object_array *objs = NULL;
217 struct drm_gem_shmem_object *shmem_obj;
218 struct virtio_gpu_object *bo;
219 struct virtio_gpu_mem_entry *ents;
220 unsigned int nents;
221 int ret;
222
223 *bo_ptr = NULL;
224
225 params->size = roundup(params->size, PAGE_SIZE);
226 shmem_obj = drm_gem_shmem_create(vgdev->ddev, params->size);
227 if (IS_ERR(shmem_obj))
228 return PTR_ERR(shmem_obj);
229 bo = gem_to_virtio_gpu_obj(&shmem_obj->base);
230
231 ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle);
232 if (ret < 0)
233 goto err_free_gem;
234
235 bo->dumb = params->dumb;
236
237 if (fence) {
238 ret = -ENOMEM;
239 objs = virtio_gpu_array_alloc(1);
240 if (!objs)
241 goto err_put_id;
242 virtio_gpu_array_add_obj(objs, &bo->base.base);
243
244 ret = virtio_gpu_array_lock_resv(objs);
245 if (ret != 0)
246 goto err_put_objs;
247 }
248
249 ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents);
250 if (ret != 0) {
251 virtio_gpu_array_put_free(objs);
252 virtio_gpu_free_object(&shmem_obj->base);
253 return ret;
254 }
255
256 if (params->blob) {
257 if (params->blob_mem == VIRTGPU_BLOB_MEM_GUEST)
258 bo->guest_blob = true;
259
260 virtio_gpu_cmd_resource_create_blob(vgdev, bo, params,
261 ents, nents);
262 } else if (params->virgl) {
263 virtio_gpu_cmd_resource_create_3d(vgdev, bo, params,
264 objs, fence);
265 virtio_gpu_object_attach(vgdev, bo, ents, nents);
266 } else {
267 virtio_gpu_cmd_create_resource(vgdev, bo, params,
268 objs, fence);
269 virtio_gpu_object_attach(vgdev, bo, ents, nents);
270 }
271
272 *bo_ptr = bo;
273 return 0;
274
275err_put_objs:
276 virtio_gpu_array_put_free(objs);
277err_put_id:
278 virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
279err_free_gem:
280 drm_gem_shmem_free_object(&shmem_obj->base);
281 return ret;
282}