Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0 OR MIT
  2
  3/*
  4 *  Xen para-virtual DRM device
  5 *
  6 * Copyright (C) 2016-2018 EPAM Systems Inc.
  7 *
  8 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
  9 */
 10
 11#include <linux/dma-buf.h>
 12#include <linux/scatterlist.h>
 13#include <linux/shmem_fs.h>
 14
 
 15#include <drm/drm_gem.h>
 16#include <drm/drm_prime.h>
 17#include <drm/drm_probe_helper.h>
 18
 19#include <xen/balloon.h>
 20#include <xen/xen.h>
 21
 22#include "xen_drm_front.h"
 23#include "xen_drm_front_gem.h"
 24
 25struct xen_gem_object {
 26	struct drm_gem_object base;
 27
 28	size_t num_pages;
 29	struct page **pages;
 30
 31	/* set for buffers allocated by the backend */
 32	bool be_alloc;
 33
 34	/* this is for imported PRIME buffer */
 35	struct sg_table *sgt_imported;
 36};
 37
 38static inline struct xen_gem_object *
 39to_xen_gem_obj(struct drm_gem_object *gem_obj)
 40{
 41	return container_of(gem_obj, struct xen_gem_object, base);
 42}
 43
 44static int gem_alloc_pages_array(struct xen_gem_object *xen_obj,
 45				 size_t buf_size)
 46{
 47	xen_obj->num_pages = DIV_ROUND_UP(buf_size, PAGE_SIZE);
 48	xen_obj->pages = kvmalloc_array(xen_obj->num_pages,
 49					sizeof(struct page *), GFP_KERNEL);
 50	return !xen_obj->pages ? -ENOMEM : 0;
 51}
 52
 53static void gem_free_pages_array(struct xen_gem_object *xen_obj)
 54{
 55	kvfree(xen_obj->pages);
 56	xen_obj->pages = NULL;
 57}
 58
 59static int xen_drm_front_gem_object_mmap(struct drm_gem_object *gem_obj,
 60					 struct vm_area_struct *vma)
 61{
 62	struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
 63	int ret;
 64
 65	vma->vm_ops = gem_obj->funcs->vm_ops;
 66
 67	/*
 68	 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
 69	 * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
 70	 * the whole buffer.
 71	 */
 72	vm_flags_mod(vma, VM_MIXEDMAP | VM_DONTEXPAND, VM_PFNMAP);
 73	vma->vm_pgoff = 0;
 74
 75	/*
 76	 * According to Xen on ARM ABI (xen/include/public/arch-arm.h):
 77	 * all memory which is shared with other entities in the system
 78	 * (including the hypervisor and other guests) must reside in memory
 79	 * which is mapped as Normal Inner Write-Back Outer Write-Back
 80	 * Inner-Shareable.
 81	 */
 82	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
 83
 84	/*
 85	 * vm_operations_struct.fault handler will be called if CPU access
 86	 * to VM is here. For GPUs this isn't the case, because CPU  doesn't
 87	 * touch the memory. Insert pages now, so both CPU and GPU are happy.
 88	 *
 89	 * FIXME: as we insert all the pages now then no .fault handler must
 90	 * be called, so don't provide one
 91	 */
 92	ret = vm_map_pages(vma, xen_obj->pages, xen_obj->num_pages);
 93	if (ret < 0)
 94		DRM_ERROR("Failed to map pages into vma: %d\n", ret);
 95
 96	return ret;
 97}
 98
 99static const struct vm_operations_struct xen_drm_drv_vm_ops = {
100	.open           = drm_gem_vm_open,
101	.close          = drm_gem_vm_close,
102};
103
104static const struct drm_gem_object_funcs xen_drm_front_gem_object_funcs = {
105	.free = xen_drm_front_gem_object_free,
106	.get_sg_table = xen_drm_front_gem_get_sg_table,
107	.vmap = xen_drm_front_gem_prime_vmap,
108	.vunmap = xen_drm_front_gem_prime_vunmap,
109	.mmap = xen_drm_front_gem_object_mmap,
110	.vm_ops = &xen_drm_drv_vm_ops,
111};
112
113static struct xen_gem_object *gem_create_obj(struct drm_device *dev,
114					     size_t size)
115{
116	struct xen_gem_object *xen_obj;
117	int ret;
118
119	xen_obj = kzalloc(sizeof(*xen_obj), GFP_KERNEL);
120	if (!xen_obj)
121		return ERR_PTR(-ENOMEM);
122
123	xen_obj->base.funcs = &xen_drm_front_gem_object_funcs;
124
125	ret = drm_gem_object_init(dev, &xen_obj->base, size);
126	if (ret < 0) {
127		kfree(xen_obj);
128		return ERR_PTR(ret);
129	}
130
131	return xen_obj;
132}
133
134static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size)
135{
136	struct xen_drm_front_drm_info *drm_info = dev->dev_private;
137	struct xen_gem_object *xen_obj;
138	int ret;
139
140	size = round_up(size, PAGE_SIZE);
141	xen_obj = gem_create_obj(dev, size);
142	if (IS_ERR(xen_obj))
143		return xen_obj;
144
145	if (drm_info->front_info->cfg.be_alloc) {
146		/*
147		 * backend will allocate space for this buffer, so
148		 * only allocate array of pointers to pages
149		 */
150		ret = gem_alloc_pages_array(xen_obj, size);
151		if (ret < 0)
152			goto fail;
153
154		/*
155		 * allocate ballooned pages which will be used to map
156		 * grant references provided by the backend
157		 */
158		ret = xen_alloc_unpopulated_pages(xen_obj->num_pages,
159					          xen_obj->pages);
160		if (ret < 0) {
161			DRM_ERROR("Cannot allocate %zu ballooned pages: %d\n",
162				  xen_obj->num_pages, ret);
163			gem_free_pages_array(xen_obj);
164			goto fail;
165		}
166
167		xen_obj->be_alloc = true;
168		return xen_obj;
169	}
170	/*
171	 * need to allocate backing pages now, so we can share those
172	 * with the backend
173	 */
174	xen_obj->num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
175	xen_obj->pages = drm_gem_get_pages(&xen_obj->base);
176	if (IS_ERR(xen_obj->pages)) {
177		ret = PTR_ERR(xen_obj->pages);
178		xen_obj->pages = NULL;
179		goto fail;
180	}
181
182	return xen_obj;
183
184fail:
185	DRM_ERROR("Failed to allocate buffer with size %zu\n", size);
186	return ERR_PTR(ret);
187}
188
189struct drm_gem_object *xen_drm_front_gem_create(struct drm_device *dev,
190						size_t size)
191{
192	struct xen_gem_object *xen_obj;
193
194	xen_obj = gem_create(dev, size);
195	if (IS_ERR(xen_obj))
196		return ERR_CAST(xen_obj);
197
198	return &xen_obj->base;
199}
200
201void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj)
202{
203	struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
204
205	if (xen_obj->base.import_attach) {
206		drm_prime_gem_destroy(&xen_obj->base, xen_obj->sgt_imported);
207		gem_free_pages_array(xen_obj);
208	} else {
209		if (xen_obj->pages) {
210			if (xen_obj->be_alloc) {
211				xen_free_unpopulated_pages(xen_obj->num_pages,
212							   xen_obj->pages);
213				gem_free_pages_array(xen_obj);
214			} else {
215				drm_gem_put_pages(&xen_obj->base,
216						  xen_obj->pages, true, false);
217			}
218		}
219	}
220	drm_gem_object_release(gem_obj);
221	kfree(xen_obj);
222}
223
224struct page **xen_drm_front_gem_get_pages(struct drm_gem_object *gem_obj)
225{
226	struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
227
228	return xen_obj->pages;
229}
230
231struct sg_table *xen_drm_front_gem_get_sg_table(struct drm_gem_object *gem_obj)
232{
233	struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
234
235	if (!xen_obj->pages)
236		return ERR_PTR(-ENOMEM);
237
238	return drm_prime_pages_to_sg(gem_obj->dev,
239				     xen_obj->pages, xen_obj->num_pages);
240}
241
242struct drm_gem_object *
243xen_drm_front_gem_import_sg_table(struct drm_device *dev,
244				  struct dma_buf_attachment *attach,
245				  struct sg_table *sgt)
246{
247	struct xen_drm_front_drm_info *drm_info = dev->dev_private;
248	struct xen_gem_object *xen_obj;
249	size_t size;
250	int ret;
251
252	size = attach->dmabuf->size;
253	xen_obj = gem_create_obj(dev, size);
254	if (IS_ERR(xen_obj))
255		return ERR_CAST(xen_obj);
256
257	ret = gem_alloc_pages_array(xen_obj, size);
258	if (ret < 0)
259		return ERR_PTR(ret);
260
261	xen_obj->sgt_imported = sgt;
262
263	ret = drm_prime_sg_to_page_array(sgt, xen_obj->pages,
264					 xen_obj->num_pages);
265	if (ret < 0)
266		return ERR_PTR(ret);
267
268	ret = xen_drm_front_dbuf_create(drm_info->front_info,
269					xen_drm_front_dbuf_to_cookie(&xen_obj->base),
270					0, 0, 0, size, sgt->sgl->offset,
271					xen_obj->pages);
272	if (ret < 0)
273		return ERR_PTR(ret);
274
275	DRM_DEBUG("Imported buffer of size %zu with nents %u\n",
276		  size, sgt->orig_nents);
277
278	return &xen_obj->base;
279}
280
281int xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj,
282				 struct iosys_map *map)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
283{
284	struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
285	void *vaddr;
286
287	if (!xen_obj->pages)
288		return -ENOMEM;
289
290	/* Please see comment in gem_mmap_obj on mapping and attributes. */
291	vaddr = vmap(xen_obj->pages, xen_obj->num_pages,
292		     VM_MAP, PAGE_KERNEL);
293	if (!vaddr)
294		return -ENOMEM;
295	iosys_map_set_vaddr(map, vaddr);
296
297	return 0;
298}
299
300void xen_drm_front_gem_prime_vunmap(struct drm_gem_object *gem_obj,
301				    struct iosys_map *map)
 
 
 
 
 
 
302{
303	vunmap(map->vaddr);
 
 
 
 
 
 
 
 
304}
v5.4
  1// SPDX-License-Identifier: GPL-2.0 OR MIT
  2
  3/*
  4 *  Xen para-virtual DRM device
  5 *
  6 * Copyright (C) 2016-2018 EPAM Systems Inc.
  7 *
  8 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
  9 */
 10
 11#include <linux/dma-buf.h>
 12#include <linux/scatterlist.h>
 13#include <linux/shmem_fs.h>
 14
 15#include <drm/drm_fb_helper.h>
 16#include <drm/drm_gem.h>
 17#include <drm/drm_prime.h>
 18#include <drm/drm_probe_helper.h>
 19
 20#include <xen/balloon.h>
 
 21
 22#include "xen_drm_front.h"
 23#include "xen_drm_front_gem.h"
 24
 25struct xen_gem_object {
 26	struct drm_gem_object base;
 27
 28	size_t num_pages;
 29	struct page **pages;
 30
 31	/* set for buffers allocated by the backend */
 32	bool be_alloc;
 33
 34	/* this is for imported PRIME buffer */
 35	struct sg_table *sgt_imported;
 36};
 37
 38static inline struct xen_gem_object *
 39to_xen_gem_obj(struct drm_gem_object *gem_obj)
 40{
 41	return container_of(gem_obj, struct xen_gem_object, base);
 42}
 43
 44static int gem_alloc_pages_array(struct xen_gem_object *xen_obj,
 45				 size_t buf_size)
 46{
 47	xen_obj->num_pages = DIV_ROUND_UP(buf_size, PAGE_SIZE);
 48	xen_obj->pages = kvmalloc_array(xen_obj->num_pages,
 49					sizeof(struct page *), GFP_KERNEL);
 50	return !xen_obj->pages ? -ENOMEM : 0;
 51}
 52
 53static void gem_free_pages_array(struct xen_gem_object *xen_obj)
 54{
 55	kvfree(xen_obj->pages);
 56	xen_obj->pages = NULL;
 57}
 58
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 59static struct xen_gem_object *gem_create_obj(struct drm_device *dev,
 60					     size_t size)
 61{
 62	struct xen_gem_object *xen_obj;
 63	int ret;
 64
 65	xen_obj = kzalloc(sizeof(*xen_obj), GFP_KERNEL);
 66	if (!xen_obj)
 67		return ERR_PTR(-ENOMEM);
 68
 
 
 69	ret = drm_gem_object_init(dev, &xen_obj->base, size);
 70	if (ret < 0) {
 71		kfree(xen_obj);
 72		return ERR_PTR(ret);
 73	}
 74
 75	return xen_obj;
 76}
 77
 78static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size)
 79{
 80	struct xen_drm_front_drm_info *drm_info = dev->dev_private;
 81	struct xen_gem_object *xen_obj;
 82	int ret;
 83
 84	size = round_up(size, PAGE_SIZE);
 85	xen_obj = gem_create_obj(dev, size);
 86	if (IS_ERR_OR_NULL(xen_obj))
 87		return xen_obj;
 88
 89	if (drm_info->front_info->cfg.be_alloc) {
 90		/*
 91		 * backend will allocate space for this buffer, so
 92		 * only allocate array of pointers to pages
 93		 */
 94		ret = gem_alloc_pages_array(xen_obj, size);
 95		if (ret < 0)
 96			goto fail;
 97
 98		/*
 99		 * allocate ballooned pages which will be used to map
100		 * grant references provided by the backend
101		 */
102		ret = alloc_xenballooned_pages(xen_obj->num_pages,
103					       xen_obj->pages);
104		if (ret < 0) {
105			DRM_ERROR("Cannot allocate %zu ballooned pages: %d\n",
106				  xen_obj->num_pages, ret);
107			gem_free_pages_array(xen_obj);
108			goto fail;
109		}
110
111		xen_obj->be_alloc = true;
112		return xen_obj;
113	}
114	/*
115	 * need to allocate backing pages now, so we can share those
116	 * with the backend
117	 */
118	xen_obj->num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
119	xen_obj->pages = drm_gem_get_pages(&xen_obj->base);
120	if (IS_ERR_OR_NULL(xen_obj->pages)) {
121		ret = PTR_ERR(xen_obj->pages);
122		xen_obj->pages = NULL;
123		goto fail;
124	}
125
126	return xen_obj;
127
128fail:
129	DRM_ERROR("Failed to allocate buffer with size %zu\n", size);
130	return ERR_PTR(ret);
131}
132
133struct drm_gem_object *xen_drm_front_gem_create(struct drm_device *dev,
134						size_t size)
135{
136	struct xen_gem_object *xen_obj;
137
138	xen_obj = gem_create(dev, size);
139	if (IS_ERR_OR_NULL(xen_obj))
140		return ERR_CAST(xen_obj);
141
142	return &xen_obj->base;
143}
144
145void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj)
146{
147	struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
148
149	if (xen_obj->base.import_attach) {
150		drm_prime_gem_destroy(&xen_obj->base, xen_obj->sgt_imported);
151		gem_free_pages_array(xen_obj);
152	} else {
153		if (xen_obj->pages) {
154			if (xen_obj->be_alloc) {
155				free_xenballooned_pages(xen_obj->num_pages,
156							xen_obj->pages);
157				gem_free_pages_array(xen_obj);
158			} else {
159				drm_gem_put_pages(&xen_obj->base,
160						  xen_obj->pages, true, false);
161			}
162		}
163	}
164	drm_gem_object_release(gem_obj);
165	kfree(xen_obj);
166}
167
168struct page **xen_drm_front_gem_get_pages(struct drm_gem_object *gem_obj)
169{
170	struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
171
172	return xen_obj->pages;
173}
174
175struct sg_table *xen_drm_front_gem_get_sg_table(struct drm_gem_object *gem_obj)
176{
177	struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
178
179	if (!xen_obj->pages)
180		return ERR_PTR(-ENOMEM);
181
182	return drm_prime_pages_to_sg(xen_obj->pages, xen_obj->num_pages);
 
183}
184
185struct drm_gem_object *
186xen_drm_front_gem_import_sg_table(struct drm_device *dev,
187				  struct dma_buf_attachment *attach,
188				  struct sg_table *sgt)
189{
190	struct xen_drm_front_drm_info *drm_info = dev->dev_private;
191	struct xen_gem_object *xen_obj;
192	size_t size;
193	int ret;
194
195	size = attach->dmabuf->size;
196	xen_obj = gem_create_obj(dev, size);
197	if (IS_ERR_OR_NULL(xen_obj))
198		return ERR_CAST(xen_obj);
199
200	ret = gem_alloc_pages_array(xen_obj, size);
201	if (ret < 0)
202		return ERR_PTR(ret);
203
204	xen_obj->sgt_imported = sgt;
205
206	ret = drm_prime_sg_to_page_addr_arrays(sgt, xen_obj->pages,
207					       NULL, xen_obj->num_pages);
208	if (ret < 0)
209		return ERR_PTR(ret);
210
211	ret = xen_drm_front_dbuf_create(drm_info->front_info,
212					xen_drm_front_dbuf_to_cookie(&xen_obj->base),
213					0, 0, 0, size, xen_obj->pages);
 
214	if (ret < 0)
215		return ERR_PTR(ret);
216
217	DRM_DEBUG("Imported buffer of size %zu with nents %u\n",
218		  size, sgt->nents);
219
220	return &xen_obj->base;
221}
222
223static int gem_mmap_obj(struct xen_gem_object *xen_obj,
224			struct vm_area_struct *vma)
225{
226	int ret;
227
228	/*
229	 * clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
230	 * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
231	 * the whole buffer.
232	 */
233	vma->vm_flags &= ~VM_PFNMAP;
234	vma->vm_flags |= VM_MIXEDMAP;
235	vma->vm_pgoff = 0;
236	/*
237	 * According to Xen on ARM ABI (xen/include/public/arch-arm.h):
238	 * all memory which is shared with other entities in the system
239	 * (including the hypervisor and other guests) must reside in memory
240	 * which is mapped as Normal Inner Write-Back Outer Write-Back
241	 * Inner-Shareable.
242	 */
243	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
244
245	/*
246	 * vm_operations_struct.fault handler will be called if CPU access
247	 * to VM is here. For GPUs this isn't the case, because CPU
248	 * doesn't touch the memory. Insert pages now, so both CPU and GPU are
249	 * happy.
250	 * FIXME: as we insert all the pages now then no .fault handler must
251	 * be called, so don't provide one
252	 */
253	ret = vm_map_pages(vma, xen_obj->pages, xen_obj->num_pages);
254	if (ret < 0)
255		DRM_ERROR("Failed to map pages into vma: %d\n", ret);
256
257	return ret;
258}
259
260int xen_drm_front_gem_mmap(struct file *filp, struct vm_area_struct *vma)
261{
262	struct xen_gem_object *xen_obj;
263	struct drm_gem_object *gem_obj;
264	int ret;
265
266	ret = drm_gem_mmap(filp, vma);
267	if (ret < 0)
268		return ret;
269
270	gem_obj = vma->vm_private_data;
271	xen_obj = to_xen_gem_obj(gem_obj);
272	return gem_mmap_obj(xen_obj, vma);
273}
274
275void *xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj)
276{
277	struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
 
278
279	if (!xen_obj->pages)
280		return NULL;
281
282	/* Please see comment in gem_mmap_obj on mapping and attributes. */
283	return vmap(xen_obj->pages, xen_obj->num_pages,
284		    VM_MAP, PAGE_KERNEL);
 
 
 
 
 
285}
286
287void xen_drm_front_gem_prime_vunmap(struct drm_gem_object *gem_obj,
288				    void *vaddr)
289{
290	vunmap(vaddr);
291}
292
293int xen_drm_front_gem_prime_mmap(struct drm_gem_object *gem_obj,
294				 struct vm_area_struct *vma)
295{
296	struct xen_gem_object *xen_obj;
297	int ret;
298
299	ret = drm_gem_mmap_obj(gem_obj, gem_obj->size, vma);
300	if (ret < 0)
301		return ret;
302
303	xen_obj = to_xen_gem_obj(gem_obj);
304	return gem_mmap_obj(xen_obj, vma);
305}