Loading...
1// SPDX-License-Identifier: GPL-2.0 OR MIT
2
3/*
4 * Xen para-virtual DRM device
5 *
6 * Copyright (C) 2016-2018 EPAM Systems Inc.
7 *
8 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
9 */
10
11#include <linux/dma-buf.h>
12#include <linux/scatterlist.h>
13#include <linux/shmem_fs.h>
14#include <linux/vmalloc.h>
15
16#include <drm/drm_gem.h>
17#include <drm/drm_prime.h>
18#include <drm/drm_probe_helper.h>
19
20#include <xen/balloon.h>
21#include <xen/xen.h>
22
23#include "xen_drm_front.h"
24#include "xen_drm_front_gem.h"
25
26struct xen_gem_object {
27 struct drm_gem_object base;
28
29 size_t num_pages;
30 struct page **pages;
31
32 /* set for buffers allocated by the backend */
33 bool be_alloc;
34
35 /* this is for imported PRIME buffer */
36 struct sg_table *sgt_imported;
37};
38
39static inline struct xen_gem_object *
40to_xen_gem_obj(struct drm_gem_object *gem_obj)
41{
42 return container_of(gem_obj, struct xen_gem_object, base);
43}
44
45static int gem_alloc_pages_array(struct xen_gem_object *xen_obj,
46 size_t buf_size)
47{
48 xen_obj->num_pages = DIV_ROUND_UP(buf_size, PAGE_SIZE);
49 xen_obj->pages = kvmalloc_array(xen_obj->num_pages,
50 sizeof(struct page *), GFP_KERNEL);
51 return !xen_obj->pages ? -ENOMEM : 0;
52}
53
54static void gem_free_pages_array(struct xen_gem_object *xen_obj)
55{
56 kvfree(xen_obj->pages);
57 xen_obj->pages = NULL;
58}
59
60static int xen_drm_front_gem_object_mmap(struct drm_gem_object *gem_obj,
61 struct vm_area_struct *vma)
62{
63 struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
64 int ret;
65
66 vma->vm_ops = gem_obj->funcs->vm_ops;
67
68 /*
69 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
70 * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
71 * the whole buffer.
72 */
73 vm_flags_mod(vma, VM_MIXEDMAP | VM_DONTEXPAND, VM_PFNMAP);
74 vma->vm_pgoff = 0;
75
76 /*
77 * According to Xen on ARM ABI (xen/include/public/arch-arm.h):
78 * all memory which is shared with other entities in the system
79 * (including the hypervisor and other guests) must reside in memory
80 * which is mapped as Normal Inner Write-Back Outer Write-Back
81 * Inner-Shareable.
82 */
83 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
84
85 /*
86 * vm_operations_struct.fault handler will be called if CPU access
87 * to VM is here. For GPUs this isn't the case, because CPU doesn't
88 * touch the memory. Insert pages now, so both CPU and GPU are happy.
89 *
90 * FIXME: as we insert all the pages now then no .fault handler must
91 * be called, so don't provide one
92 */
93 ret = vm_map_pages(vma, xen_obj->pages, xen_obj->num_pages);
94 if (ret < 0)
95 DRM_ERROR("Failed to map pages into vma: %d\n", ret);
96
97 return ret;
98}
99
100static const struct vm_operations_struct xen_drm_drv_vm_ops = {
101 .open = drm_gem_vm_open,
102 .close = drm_gem_vm_close,
103};
104
105static const struct drm_gem_object_funcs xen_drm_front_gem_object_funcs = {
106 .free = xen_drm_front_gem_object_free,
107 .get_sg_table = xen_drm_front_gem_get_sg_table,
108 .vmap = xen_drm_front_gem_prime_vmap,
109 .vunmap = xen_drm_front_gem_prime_vunmap,
110 .mmap = xen_drm_front_gem_object_mmap,
111 .vm_ops = &xen_drm_drv_vm_ops,
112};
113
114static struct xen_gem_object *gem_create_obj(struct drm_device *dev,
115 size_t size)
116{
117 struct xen_gem_object *xen_obj;
118 int ret;
119
120 xen_obj = kzalloc(sizeof(*xen_obj), GFP_KERNEL);
121 if (!xen_obj)
122 return ERR_PTR(-ENOMEM);
123
124 xen_obj->base.funcs = &xen_drm_front_gem_object_funcs;
125
126 ret = drm_gem_object_init(dev, &xen_obj->base, size);
127 if (ret < 0) {
128 kfree(xen_obj);
129 return ERR_PTR(ret);
130 }
131
132 return xen_obj;
133}
134
135static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size)
136{
137 struct xen_drm_front_drm_info *drm_info = dev->dev_private;
138 struct xen_gem_object *xen_obj;
139 int ret;
140
141 size = round_up(size, PAGE_SIZE);
142 xen_obj = gem_create_obj(dev, size);
143 if (IS_ERR(xen_obj))
144 return xen_obj;
145
146 if (drm_info->front_info->cfg.be_alloc) {
147 /*
148 * backend will allocate space for this buffer, so
149 * only allocate array of pointers to pages
150 */
151 ret = gem_alloc_pages_array(xen_obj, size);
152 if (ret < 0)
153 goto fail;
154
155 /*
156 * allocate ballooned pages which will be used to map
157 * grant references provided by the backend
158 */
159 ret = xen_alloc_unpopulated_pages(xen_obj->num_pages,
160 xen_obj->pages);
161 if (ret < 0) {
162 DRM_ERROR("Cannot allocate %zu ballooned pages: %d\n",
163 xen_obj->num_pages, ret);
164 gem_free_pages_array(xen_obj);
165 goto fail;
166 }
167
168 xen_obj->be_alloc = true;
169 return xen_obj;
170 }
171 /*
172 * need to allocate backing pages now, so we can share those
173 * with the backend
174 */
175 xen_obj->num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
176 xen_obj->pages = drm_gem_get_pages(&xen_obj->base);
177 if (IS_ERR(xen_obj->pages)) {
178 ret = PTR_ERR(xen_obj->pages);
179 xen_obj->pages = NULL;
180 goto fail;
181 }
182
183 return xen_obj;
184
185fail:
186 DRM_ERROR("Failed to allocate buffer with size %zu\n", size);
187 return ERR_PTR(ret);
188}
189
190struct drm_gem_object *xen_drm_front_gem_create(struct drm_device *dev,
191 size_t size)
192{
193 struct xen_gem_object *xen_obj;
194
195 xen_obj = gem_create(dev, size);
196 if (IS_ERR(xen_obj))
197 return ERR_CAST(xen_obj);
198
199 return &xen_obj->base;
200}
201
202void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj)
203{
204 struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
205
206 if (xen_obj->base.import_attach) {
207 drm_prime_gem_destroy(&xen_obj->base, xen_obj->sgt_imported);
208 gem_free_pages_array(xen_obj);
209 } else {
210 if (xen_obj->pages) {
211 if (xen_obj->be_alloc) {
212 xen_free_unpopulated_pages(xen_obj->num_pages,
213 xen_obj->pages);
214 gem_free_pages_array(xen_obj);
215 } else {
216 drm_gem_put_pages(&xen_obj->base,
217 xen_obj->pages, true, false);
218 }
219 }
220 }
221 drm_gem_object_release(gem_obj);
222 kfree(xen_obj);
223}
224
225struct page **xen_drm_front_gem_get_pages(struct drm_gem_object *gem_obj)
226{
227 struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
228
229 return xen_obj->pages;
230}
231
232struct sg_table *xen_drm_front_gem_get_sg_table(struct drm_gem_object *gem_obj)
233{
234 struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
235
236 if (!xen_obj->pages)
237 return ERR_PTR(-ENOMEM);
238
239 return drm_prime_pages_to_sg(gem_obj->dev,
240 xen_obj->pages, xen_obj->num_pages);
241}
242
243struct drm_gem_object *
244xen_drm_front_gem_import_sg_table(struct drm_device *dev,
245 struct dma_buf_attachment *attach,
246 struct sg_table *sgt)
247{
248 struct xen_drm_front_drm_info *drm_info = dev->dev_private;
249 struct xen_gem_object *xen_obj;
250 size_t size;
251 int ret;
252
253 size = attach->dmabuf->size;
254 xen_obj = gem_create_obj(dev, size);
255 if (IS_ERR(xen_obj))
256 return ERR_CAST(xen_obj);
257
258 ret = gem_alloc_pages_array(xen_obj, size);
259 if (ret < 0)
260 return ERR_PTR(ret);
261
262 xen_obj->sgt_imported = sgt;
263
264 ret = drm_prime_sg_to_page_array(sgt, xen_obj->pages,
265 xen_obj->num_pages);
266 if (ret < 0)
267 return ERR_PTR(ret);
268
269 ret = xen_drm_front_dbuf_create(drm_info->front_info,
270 xen_drm_front_dbuf_to_cookie(&xen_obj->base),
271 0, 0, 0, size, sgt->sgl->offset,
272 xen_obj->pages);
273 if (ret < 0)
274 return ERR_PTR(ret);
275
276 DRM_DEBUG("Imported buffer of size %zu with nents %u\n",
277 size, sgt->orig_nents);
278
279 return &xen_obj->base;
280}
281
282int xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj,
283 struct iosys_map *map)
284{
285 struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
286 void *vaddr;
287
288 if (!xen_obj->pages)
289 return -ENOMEM;
290
291 /* Please see comment in gem_mmap_obj on mapping and attributes. */
292 vaddr = vmap(xen_obj->pages, xen_obj->num_pages,
293 VM_MAP, PAGE_KERNEL);
294 if (!vaddr)
295 return -ENOMEM;
296 iosys_map_set_vaddr(map, vaddr);
297
298 return 0;
299}
300
301void xen_drm_front_gem_prime_vunmap(struct drm_gem_object *gem_obj,
302 struct iosys_map *map)
303{
304 vunmap(map->vaddr);
305}
1// SPDX-License-Identifier: GPL-2.0 OR MIT
2
3/*
4 * Xen para-virtual DRM device
5 *
6 * Copyright (C) 2016-2018 EPAM Systems Inc.
7 *
8 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
9 */
10
11#include <linux/dma-buf.h>
12#include <linux/scatterlist.h>
13#include <linux/shmem_fs.h>
14
15#include <drm/drm_gem.h>
16#include <drm/drm_prime.h>
17#include <drm/drm_probe_helper.h>
18
19#include <xen/balloon.h>
20#include <xen/xen.h>
21
22#include "xen_drm_front.h"
23#include "xen_drm_front_gem.h"
24
25struct xen_gem_object {
26 struct drm_gem_object base;
27
28 size_t num_pages;
29 struct page **pages;
30
31 /* set for buffers allocated by the backend */
32 bool be_alloc;
33
34 /* this is for imported PRIME buffer */
35 struct sg_table *sgt_imported;
36};
37
38static inline struct xen_gem_object *
39to_xen_gem_obj(struct drm_gem_object *gem_obj)
40{
41 return container_of(gem_obj, struct xen_gem_object, base);
42}
43
44static int gem_alloc_pages_array(struct xen_gem_object *xen_obj,
45 size_t buf_size)
46{
47 xen_obj->num_pages = DIV_ROUND_UP(buf_size, PAGE_SIZE);
48 xen_obj->pages = kvmalloc_array(xen_obj->num_pages,
49 sizeof(struct page *), GFP_KERNEL);
50 return !xen_obj->pages ? -ENOMEM : 0;
51}
52
53static void gem_free_pages_array(struct xen_gem_object *xen_obj)
54{
55 kvfree(xen_obj->pages);
56 xen_obj->pages = NULL;
57}
58
59static int xen_drm_front_gem_object_mmap(struct drm_gem_object *gem_obj,
60 struct vm_area_struct *vma)
61{
62 struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
63 int ret;
64
65 vma->vm_ops = gem_obj->funcs->vm_ops;
66
67 /*
68 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
69 * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
70 * the whole buffer.
71 */
72 vm_flags_mod(vma, VM_MIXEDMAP | VM_DONTEXPAND, VM_PFNMAP);
73 vma->vm_pgoff = 0;
74
75 /*
76 * According to Xen on ARM ABI (xen/include/public/arch-arm.h):
77 * all memory which is shared with other entities in the system
78 * (including the hypervisor and other guests) must reside in memory
79 * which is mapped as Normal Inner Write-Back Outer Write-Back
80 * Inner-Shareable.
81 */
82 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
83
84 /*
85 * vm_operations_struct.fault handler will be called if CPU access
86 * to VM is here. For GPUs this isn't the case, because CPU doesn't
87 * touch the memory. Insert pages now, so both CPU and GPU are happy.
88 *
89 * FIXME: as we insert all the pages now then no .fault handler must
90 * be called, so don't provide one
91 */
92 ret = vm_map_pages(vma, xen_obj->pages, xen_obj->num_pages);
93 if (ret < 0)
94 DRM_ERROR("Failed to map pages into vma: %d\n", ret);
95
96 return ret;
97}
98
99static const struct vm_operations_struct xen_drm_drv_vm_ops = {
100 .open = drm_gem_vm_open,
101 .close = drm_gem_vm_close,
102};
103
104static const struct drm_gem_object_funcs xen_drm_front_gem_object_funcs = {
105 .free = xen_drm_front_gem_object_free,
106 .get_sg_table = xen_drm_front_gem_get_sg_table,
107 .vmap = xen_drm_front_gem_prime_vmap,
108 .vunmap = xen_drm_front_gem_prime_vunmap,
109 .mmap = xen_drm_front_gem_object_mmap,
110 .vm_ops = &xen_drm_drv_vm_ops,
111};
112
113static struct xen_gem_object *gem_create_obj(struct drm_device *dev,
114 size_t size)
115{
116 struct xen_gem_object *xen_obj;
117 int ret;
118
119 xen_obj = kzalloc(sizeof(*xen_obj), GFP_KERNEL);
120 if (!xen_obj)
121 return ERR_PTR(-ENOMEM);
122
123 xen_obj->base.funcs = &xen_drm_front_gem_object_funcs;
124
125 ret = drm_gem_object_init(dev, &xen_obj->base, size);
126 if (ret < 0) {
127 kfree(xen_obj);
128 return ERR_PTR(ret);
129 }
130
131 return xen_obj;
132}
133
134static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size)
135{
136 struct xen_drm_front_drm_info *drm_info = dev->dev_private;
137 struct xen_gem_object *xen_obj;
138 int ret;
139
140 size = round_up(size, PAGE_SIZE);
141 xen_obj = gem_create_obj(dev, size);
142 if (IS_ERR(xen_obj))
143 return xen_obj;
144
145 if (drm_info->front_info->cfg.be_alloc) {
146 /*
147 * backend will allocate space for this buffer, so
148 * only allocate array of pointers to pages
149 */
150 ret = gem_alloc_pages_array(xen_obj, size);
151 if (ret < 0)
152 goto fail;
153
154 /*
155 * allocate ballooned pages which will be used to map
156 * grant references provided by the backend
157 */
158 ret = xen_alloc_unpopulated_pages(xen_obj->num_pages,
159 xen_obj->pages);
160 if (ret < 0) {
161 DRM_ERROR("Cannot allocate %zu ballooned pages: %d\n",
162 xen_obj->num_pages, ret);
163 gem_free_pages_array(xen_obj);
164 goto fail;
165 }
166
167 xen_obj->be_alloc = true;
168 return xen_obj;
169 }
170 /*
171 * need to allocate backing pages now, so we can share those
172 * with the backend
173 */
174 xen_obj->num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
175 xen_obj->pages = drm_gem_get_pages(&xen_obj->base);
176 if (IS_ERR(xen_obj->pages)) {
177 ret = PTR_ERR(xen_obj->pages);
178 xen_obj->pages = NULL;
179 goto fail;
180 }
181
182 return xen_obj;
183
184fail:
185 DRM_ERROR("Failed to allocate buffer with size %zu\n", size);
186 return ERR_PTR(ret);
187}
188
189struct drm_gem_object *xen_drm_front_gem_create(struct drm_device *dev,
190 size_t size)
191{
192 struct xen_gem_object *xen_obj;
193
194 xen_obj = gem_create(dev, size);
195 if (IS_ERR(xen_obj))
196 return ERR_CAST(xen_obj);
197
198 return &xen_obj->base;
199}
200
201void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj)
202{
203 struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
204
205 if (xen_obj->base.import_attach) {
206 drm_prime_gem_destroy(&xen_obj->base, xen_obj->sgt_imported);
207 gem_free_pages_array(xen_obj);
208 } else {
209 if (xen_obj->pages) {
210 if (xen_obj->be_alloc) {
211 xen_free_unpopulated_pages(xen_obj->num_pages,
212 xen_obj->pages);
213 gem_free_pages_array(xen_obj);
214 } else {
215 drm_gem_put_pages(&xen_obj->base,
216 xen_obj->pages, true, false);
217 }
218 }
219 }
220 drm_gem_object_release(gem_obj);
221 kfree(xen_obj);
222}
223
224struct page **xen_drm_front_gem_get_pages(struct drm_gem_object *gem_obj)
225{
226 struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
227
228 return xen_obj->pages;
229}
230
231struct sg_table *xen_drm_front_gem_get_sg_table(struct drm_gem_object *gem_obj)
232{
233 struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
234
235 if (!xen_obj->pages)
236 return ERR_PTR(-ENOMEM);
237
238 return drm_prime_pages_to_sg(gem_obj->dev,
239 xen_obj->pages, xen_obj->num_pages);
240}
241
242struct drm_gem_object *
243xen_drm_front_gem_import_sg_table(struct drm_device *dev,
244 struct dma_buf_attachment *attach,
245 struct sg_table *sgt)
246{
247 struct xen_drm_front_drm_info *drm_info = dev->dev_private;
248 struct xen_gem_object *xen_obj;
249 size_t size;
250 int ret;
251
252 size = attach->dmabuf->size;
253 xen_obj = gem_create_obj(dev, size);
254 if (IS_ERR(xen_obj))
255 return ERR_CAST(xen_obj);
256
257 ret = gem_alloc_pages_array(xen_obj, size);
258 if (ret < 0)
259 return ERR_PTR(ret);
260
261 xen_obj->sgt_imported = sgt;
262
263 ret = drm_prime_sg_to_page_array(sgt, xen_obj->pages,
264 xen_obj->num_pages);
265 if (ret < 0)
266 return ERR_PTR(ret);
267
268 ret = xen_drm_front_dbuf_create(drm_info->front_info,
269 xen_drm_front_dbuf_to_cookie(&xen_obj->base),
270 0, 0, 0, size, sgt->sgl->offset,
271 xen_obj->pages);
272 if (ret < 0)
273 return ERR_PTR(ret);
274
275 DRM_DEBUG("Imported buffer of size %zu with nents %u\n",
276 size, sgt->orig_nents);
277
278 return &xen_obj->base;
279}
280
281int xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj,
282 struct iosys_map *map)
283{
284 struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
285 void *vaddr;
286
287 if (!xen_obj->pages)
288 return -ENOMEM;
289
290 /* Please see comment in gem_mmap_obj on mapping and attributes. */
291 vaddr = vmap(xen_obj->pages, xen_obj->num_pages,
292 VM_MAP, PAGE_KERNEL);
293 if (!vaddr)
294 return -ENOMEM;
295 iosys_map_set_vaddr(map, vaddr);
296
297 return 0;
298}
299
300void xen_drm_front_gem_prime_vunmap(struct drm_gem_object *gem_obj,
301 struct iosys_map *map)
302{
303 vunmap(map->vaddr);
304}