Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0 OR MIT
  2/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
  3
  4#include <linux/mm.h>
  5#include <linux/iosys-map.h>
  6#include <linux/sync_file.h>
  7#include <linux/pagemap.h>
  8#include <linux/shmem_fs.h>
  9#include <linux/dma-mapping.h>
 10
 11#include <drm/drm_file.h>
 12#include <drm/drm_syncobj.h>
 13#include <drm/drm_utils.h>
 14
 15#include <drm/lima_drm.h>
 16
 17#include "lima_drv.h"
 18#include "lima_gem.h"
 19#include "lima_vm.h"
 20
 21int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
 22{
 23	struct page **pages;
 24	struct address_space *mapping = bo->base.base.filp->f_mapping;
 25	struct device *dev = bo->base.base.dev->dev;
 26	size_t old_size = bo->heap_size;
 27	size_t new_size = bo->heap_size ? bo->heap_size * 2 :
 28		(lima_heap_init_nr_pages << PAGE_SHIFT);
 29	struct sg_table sgt;
 30	int i, ret;
 31
 32	if (bo->heap_size >= bo->base.base.size)
 33		return -ENOSPC;
 34
 35	new_size = min(new_size, bo->base.base.size);
 36
 37	mutex_lock(&bo->base.pages_lock);
 38
 39	if (bo->base.pages) {
 40		pages = bo->base.pages;
 41	} else {
 42		pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
 43				       sizeof(*pages), GFP_KERNEL | __GFP_ZERO);
 44		if (!pages) {
 45			mutex_unlock(&bo->base.pages_lock);
 46			return -ENOMEM;
 47		}
 48
 49		bo->base.pages = pages;
 50		bo->base.pages_use_count = 1;
 51
 52		mapping_set_unevictable(mapping);
 53	}
 54
 55	for (i = old_size >> PAGE_SHIFT; i < new_size >> PAGE_SHIFT; i++) {
 56		struct page *page = shmem_read_mapping_page(mapping, i);
 57
 58		if (IS_ERR(page)) {
 59			mutex_unlock(&bo->base.pages_lock);
 60			return PTR_ERR(page);
 61		}
 62		pages[i] = page;
 63	}
 64
 65	mutex_unlock(&bo->base.pages_lock);
 66
 67	ret = sg_alloc_table_from_pages(&sgt, pages, i, 0,
 68					new_size, GFP_KERNEL);
 69	if (ret)
 70		return ret;
 71
 72	if (bo->base.sgt) {
 73		dma_unmap_sgtable(dev, bo->base.sgt, DMA_BIDIRECTIONAL, 0);
 74		sg_free_table(bo->base.sgt);
 75	} else {
 76		bo->base.sgt = kmalloc(sizeof(*bo->base.sgt), GFP_KERNEL);
 77		if (!bo->base.sgt) {
 78			sg_free_table(&sgt);
 79			return -ENOMEM;
 80		}
 81	}
 82
 83	ret = dma_map_sgtable(dev, &sgt, DMA_BIDIRECTIONAL, 0);
 84	if (ret) {
 85		sg_free_table(&sgt);
 86		kfree(bo->base.sgt);
 87		bo->base.sgt = NULL;
 88		return ret;
 89	}
 90
 91	*bo->base.sgt = sgt;
 92
 93	if (vm) {
 94		ret = lima_vm_map_bo(vm, bo, old_size >> PAGE_SHIFT);
 95		if (ret)
 96			return ret;
 97	}
 98
 99	bo->heap_size = new_size;
100	return 0;
101}
102
103int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
104			   u32 size, u32 flags, u32 *handle)
105{
106	int err;
107	gfp_t mask;
108	struct drm_gem_shmem_object *shmem;
109	struct drm_gem_object *obj;
110	struct lima_bo *bo;
111	bool is_heap = flags & LIMA_BO_FLAG_HEAP;
112
113	shmem = drm_gem_shmem_create(dev, size);
114	if (IS_ERR(shmem))
115		return PTR_ERR(shmem);
116
117	obj = &shmem->base;
118
119	/* Mali Utgard GPU can only support 32bit address space */
120	mask = mapping_gfp_mask(obj->filp->f_mapping);
121	mask &= ~__GFP_HIGHMEM;
122	mask |= __GFP_DMA32;
123	mapping_set_gfp_mask(obj->filp->f_mapping, mask);
124
125	if (is_heap) {
126		bo = to_lima_bo(obj);
127		err = lima_heap_alloc(bo, NULL);
128		if (err)
129			goto out;
130	} else {
131		struct sg_table *sgt = drm_gem_shmem_get_pages_sgt(shmem);
132
133		if (IS_ERR(sgt)) {
134			err = PTR_ERR(sgt);
135			goto out;
136		}
137	}
138
139	err = drm_gem_handle_create(file, obj, handle);
140
141out:
142	/* drop reference from allocate - handle holds it now */
143	drm_gem_object_put(obj);
144
145	return err;
146}
147
148static void lima_gem_free_object(struct drm_gem_object *obj)
149{
150	struct lima_bo *bo = to_lima_bo(obj);
151
152	if (!list_empty(&bo->va))
153		dev_err(obj->dev->dev, "lima gem free bo still has va\n");
154
155	drm_gem_shmem_free(&bo->base);
156}
157
158static int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file)
159{
160	struct lima_bo *bo = to_lima_bo(obj);
161	struct lima_drm_priv *priv = to_lima_drm_priv(file);
162	struct lima_vm *vm = priv->vm;
163
164	return lima_vm_bo_add(vm, bo, true);
165}
166
167static void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *file)
168{
169	struct lima_bo *bo = to_lima_bo(obj);
170	struct lima_drm_priv *priv = to_lima_drm_priv(file);
171	struct lima_vm *vm = priv->vm;
172
173	lima_vm_bo_del(vm, bo);
174}
175
176static int lima_gem_pin(struct drm_gem_object *obj)
177{
178	struct lima_bo *bo = to_lima_bo(obj);
179
180	if (bo->heap_size)
181		return -EINVAL;
182
183	return drm_gem_shmem_pin(&bo->base);
184}
185
186static int lima_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
187{
188	struct lima_bo *bo = to_lima_bo(obj);
189
190	if (bo->heap_size)
191		return -EINVAL;
192
193	return drm_gem_shmem_vmap(&bo->base, map);
194}
195
196static int lima_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
197{
198	struct lima_bo *bo = to_lima_bo(obj);
199
200	if (bo->heap_size)
201		return -EINVAL;
202
203	return drm_gem_shmem_mmap(&bo->base, vma);
204}
205
206static const struct drm_gem_object_funcs lima_gem_funcs = {
207	.free = lima_gem_free_object,
208	.open = lima_gem_object_open,
209	.close = lima_gem_object_close,
210	.print_info = drm_gem_shmem_object_print_info,
211	.pin = lima_gem_pin,
212	.unpin = drm_gem_shmem_object_unpin,
213	.get_sg_table = drm_gem_shmem_object_get_sg_table,
214	.vmap = lima_gem_vmap,
215	.vunmap = drm_gem_shmem_object_vunmap,
216	.mmap = lima_gem_mmap,
217	.vm_ops = &drm_gem_shmem_vm_ops,
218};
219
220struct drm_gem_object *lima_gem_create_object(struct drm_device *dev, size_t size)
221{
222	struct lima_bo *bo;
223
224	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
225	if (!bo)
226		return ERR_PTR(-ENOMEM);
227
228	mutex_init(&bo->lock);
229	INIT_LIST_HEAD(&bo->va);
230	bo->base.map_wc = true;
231	bo->base.base.funcs = &lima_gem_funcs;
232
233	return &bo->base.base;
234}
235
236int lima_gem_get_info(struct drm_file *file, u32 handle, u32 *va, u64 *offset)
237{
238	struct drm_gem_object *obj;
239	struct lima_bo *bo;
240	struct lima_drm_priv *priv = to_lima_drm_priv(file);
241	struct lima_vm *vm = priv->vm;
242
243	obj = drm_gem_object_lookup(file, handle);
244	if (!obj)
245		return -ENOENT;
246
247	bo = to_lima_bo(obj);
248
249	*va = lima_vm_get_va(vm, bo);
250
251	*offset = drm_vma_node_offset_addr(&obj->vma_node);
252
253	drm_gem_object_put(obj);
254	return 0;
255}
256
257static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo,
258			    bool write, bool explicit)
259{
260	int err;
261
262	err = dma_resv_reserve_fences(lima_bo_resv(bo), 1);
263	if (err)
264		return err;
 
 
265
266	/* explicit sync use user passed dep fence */
267	if (explicit)
268		return 0;
269
270	return drm_sched_job_add_implicit_dependencies(&task->base,
271						       &bo->base.base,
272						       write);
273}
274
275static int lima_gem_add_deps(struct drm_file *file, struct lima_submit *submit)
276{
277	int i, err;
278
279	for (i = 0; i < ARRAY_SIZE(submit->in_sync); i++) {
280		struct dma_fence *fence = NULL;
281
282		if (!submit->in_sync[i])
283			continue;
284
285		err = drm_syncobj_find_fence(file, submit->in_sync[i],
286					     0, 0, &fence);
287		if (err)
288			return err;
289
290		err = drm_sched_job_add_dependency(&submit->task->base, fence);
291		if (err) {
292			dma_fence_put(fence);
293			return err;
294		}
295	}
296
297	return 0;
298}
299
300int lima_gem_submit(struct drm_file *file, struct lima_submit *submit)
301{
302	int i, err = 0;
303	struct ww_acquire_ctx ctx;
304	struct lima_drm_priv *priv = to_lima_drm_priv(file);
305	struct lima_vm *vm = priv->vm;
306	struct drm_syncobj *out_sync = NULL;
307	struct dma_fence *fence;
308	struct lima_bo **bos = submit->lbos;
309
310	if (submit->out_sync) {
311		out_sync = drm_syncobj_find(file, submit->out_sync);
312		if (!out_sync)
313			return -ENOENT;
314	}
315
316	for (i = 0; i < submit->nr_bos; i++) {
317		struct drm_gem_object *obj;
318		struct lima_bo *bo;
319
320		obj = drm_gem_object_lookup(file, submit->bos[i].handle);
321		if (!obj) {
322			err = -ENOENT;
323			goto err_out0;
324		}
325
326		bo = to_lima_bo(obj);
327
328		/* increase refcnt of gpu va map to prevent unmapped when executing,
329		 * will be decreased when task done
330		 */
331		err = lima_vm_bo_add(vm, bo, false);
332		if (err) {
333			drm_gem_object_put(obj);
334			goto err_out0;
335		}
336
337		bos[i] = bo;
338	}
339
340	err = drm_gem_lock_reservations((struct drm_gem_object **)bos,
341					submit->nr_bos, &ctx);
342	if (err)
343		goto err_out0;
344
345	err = lima_sched_task_init(
346		submit->task, submit->ctx->context + submit->pipe,
347		bos, submit->nr_bos, vm);
348	if (err)
349		goto err_out1;
350
351	err = lima_gem_add_deps(file, submit);
352	if (err)
353		goto err_out2;
354
355	for (i = 0; i < submit->nr_bos; i++) {
356		err = lima_gem_sync_bo(
357			submit->task, bos[i],
358			submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE,
359			submit->flags & LIMA_SUBMIT_FLAG_EXPLICIT_FENCE);
360		if (err)
361			goto err_out2;
362	}
363
364	fence = lima_sched_context_queue_task(submit->task);
 
365
366	for (i = 0; i < submit->nr_bos; i++) {
367		dma_resv_add_fence(lima_bo_resv(bos[i]), fence,
368				   submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE ?
369				   DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ);
 
370	}
371
372	drm_gem_unlock_reservations((struct drm_gem_object **)bos,
373				    submit->nr_bos, &ctx);
374
375	for (i = 0; i < submit->nr_bos; i++)
376		drm_gem_object_put(&bos[i]->base.base);
377
378	if (out_sync) {
379		drm_syncobj_replace_fence(out_sync, fence);
380		drm_syncobj_put(out_sync);
381	}
382
383	dma_fence_put(fence);
384
385	return 0;
386
387err_out2:
388	lima_sched_task_fini(submit->task);
389err_out1:
390	drm_gem_unlock_reservations((struct drm_gem_object **)bos,
391				    submit->nr_bos, &ctx);
392err_out0:
393	for (i = 0; i < submit->nr_bos; i++) {
394		if (!bos[i])
395			break;
396		lima_vm_bo_del(vm, bos[i]);
397		drm_gem_object_put(&bos[i]->base.base);
398	}
399	if (out_sync)
400		drm_syncobj_put(out_sync);
401	return err;
402}
403
404int lima_gem_wait(struct drm_file *file, u32 handle, u32 op, s64 timeout_ns)
405{
406	bool write = op & LIMA_GEM_WAIT_WRITE;
407	long ret, timeout;
408
409	if (!op)
410		return 0;
411
412	timeout = drm_timeout_abs_to_jiffies(timeout_ns);
413
414	ret = drm_gem_dma_resv_wait(file, handle, write, timeout);
415	if (ret == -ETIME)
416		ret = timeout ? -ETIMEDOUT : -EBUSY;
417
418	return ret;
419}
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0 OR MIT
  2/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
  3
  4#include <linux/mm.h>
 
  5#include <linux/sync_file.h>
  6#include <linux/pagemap.h>
  7#include <linux/shmem_fs.h>
  8#include <linux/dma-mapping.h>
  9
 10#include <drm/drm_file.h>
 11#include <drm/drm_syncobj.h>
 12#include <drm/drm_utils.h>
 13
 14#include <drm/lima_drm.h>
 15
 16#include "lima_drv.h"
 17#include "lima_gem.h"
 18#include "lima_vm.h"
 19
 20int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
 21{
 22	struct page **pages;
 23	struct address_space *mapping = bo->base.base.filp->f_mapping;
 24	struct device *dev = bo->base.base.dev->dev;
 25	size_t old_size = bo->heap_size;
 26	size_t new_size = bo->heap_size ? bo->heap_size * 2 :
 27		(lima_heap_init_nr_pages << PAGE_SHIFT);
 28	struct sg_table sgt;
 29	int i, ret;
 30
 31	if (bo->heap_size >= bo->base.base.size)
 32		return -ENOSPC;
 33
 34	new_size = min(new_size, bo->base.base.size);
 35
 36	mutex_lock(&bo->base.pages_lock);
 37
 38	if (bo->base.pages) {
 39		pages = bo->base.pages;
 40	} else {
 41		pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
 42				       sizeof(*pages), GFP_KERNEL | __GFP_ZERO);
 43		if (!pages) {
 44			mutex_unlock(&bo->base.pages_lock);
 45			return -ENOMEM;
 46		}
 47
 48		bo->base.pages = pages;
 49		bo->base.pages_use_count = 1;
 50
 51		mapping_set_unevictable(mapping);
 52	}
 53
 54	for (i = old_size >> PAGE_SHIFT; i < new_size >> PAGE_SHIFT; i++) {
 55		struct page *page = shmem_read_mapping_page(mapping, i);
 56
 57		if (IS_ERR(page)) {
 58			mutex_unlock(&bo->base.pages_lock);
 59			return PTR_ERR(page);
 60		}
 61		pages[i] = page;
 62	}
 63
 64	mutex_unlock(&bo->base.pages_lock);
 65
 66	ret = sg_alloc_table_from_pages(&sgt, pages, i, 0,
 67					new_size, GFP_KERNEL);
 68	if (ret)
 69		return ret;
 70
 71	if (bo->base.sgt) {
 72		dma_unmap_sgtable(dev, bo->base.sgt, DMA_BIDIRECTIONAL, 0);
 73		sg_free_table(bo->base.sgt);
 74	} else {
 75		bo->base.sgt = kmalloc(sizeof(*bo->base.sgt), GFP_KERNEL);
 76		if (!bo->base.sgt) {
 77			sg_free_table(&sgt);
 78			return -ENOMEM;
 79		}
 80	}
 81
 82	ret = dma_map_sgtable(dev, &sgt, DMA_BIDIRECTIONAL, 0);
 83	if (ret) {
 84		sg_free_table(&sgt);
 85		kfree(bo->base.sgt);
 86		bo->base.sgt = NULL;
 87		return ret;
 88	}
 89
 90	*bo->base.sgt = sgt;
 91
 92	if (vm) {
 93		ret = lima_vm_map_bo(vm, bo, old_size >> PAGE_SHIFT);
 94		if (ret)
 95			return ret;
 96	}
 97
 98	bo->heap_size = new_size;
 99	return 0;
100}
101
102int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
103			   u32 size, u32 flags, u32 *handle)
104{
105	int err;
106	gfp_t mask;
107	struct drm_gem_shmem_object *shmem;
108	struct drm_gem_object *obj;
109	struct lima_bo *bo;
110	bool is_heap = flags & LIMA_BO_FLAG_HEAP;
111
112	shmem = drm_gem_shmem_create(dev, size);
113	if (IS_ERR(shmem))
114		return PTR_ERR(shmem);
115
116	obj = &shmem->base;
117
118	/* Mali Utgard GPU can only support 32bit address space */
119	mask = mapping_gfp_mask(obj->filp->f_mapping);
120	mask &= ~__GFP_HIGHMEM;
121	mask |= __GFP_DMA32;
122	mapping_set_gfp_mask(obj->filp->f_mapping, mask);
123
124	if (is_heap) {
125		bo = to_lima_bo(obj);
126		err = lima_heap_alloc(bo, NULL);
127		if (err)
128			goto out;
129	} else {
130		struct sg_table *sgt = drm_gem_shmem_get_pages_sgt(obj);
131
132		if (IS_ERR(sgt)) {
133			err = PTR_ERR(sgt);
134			goto out;
135		}
136	}
137
138	err = drm_gem_handle_create(file, obj, handle);
139
140out:
141	/* drop reference from allocate - handle holds it now */
142	drm_gem_object_put(obj);
143
144	return err;
145}
146
147static void lima_gem_free_object(struct drm_gem_object *obj)
148{
149	struct lima_bo *bo = to_lima_bo(obj);
150
151	if (!list_empty(&bo->va))
152		dev_err(obj->dev->dev, "lima gem free bo still has va\n");
153
154	drm_gem_shmem_free_object(obj);
155}
156
157static int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file)
158{
159	struct lima_bo *bo = to_lima_bo(obj);
160	struct lima_drm_priv *priv = to_lima_drm_priv(file);
161	struct lima_vm *vm = priv->vm;
162
163	return lima_vm_bo_add(vm, bo, true);
164}
165
166static void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *file)
167{
168	struct lima_bo *bo = to_lima_bo(obj);
169	struct lima_drm_priv *priv = to_lima_drm_priv(file);
170	struct lima_vm *vm = priv->vm;
171
172	lima_vm_bo_del(vm, bo);
173}
174
175static int lima_gem_pin(struct drm_gem_object *obj)
176{
177	struct lima_bo *bo = to_lima_bo(obj);
178
179	if (bo->heap_size)
180		return -EINVAL;
181
182	return drm_gem_shmem_pin(obj);
183}
184
185static int lima_gem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
186{
187	struct lima_bo *bo = to_lima_bo(obj);
188
189	if (bo->heap_size)
190		return -EINVAL;
191
192	return drm_gem_shmem_vmap(obj, map);
193}
194
195static int lima_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
196{
197	struct lima_bo *bo = to_lima_bo(obj);
198
199	if (bo->heap_size)
200		return -EINVAL;
201
202	return drm_gem_shmem_mmap(obj, vma);
203}
204
205static const struct drm_gem_object_funcs lima_gem_funcs = {
206	.free = lima_gem_free_object,
207	.open = lima_gem_object_open,
208	.close = lima_gem_object_close,
209	.print_info = drm_gem_shmem_print_info,
210	.pin = lima_gem_pin,
211	.unpin = drm_gem_shmem_unpin,
212	.get_sg_table = drm_gem_shmem_get_sg_table,
213	.vmap = lima_gem_vmap,
214	.vunmap = drm_gem_shmem_vunmap,
215	.mmap = lima_gem_mmap,
 
216};
217
218struct drm_gem_object *lima_gem_create_object(struct drm_device *dev, size_t size)
219{
220	struct lima_bo *bo;
221
222	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
223	if (!bo)
224		return NULL;
225
226	mutex_init(&bo->lock);
227	INIT_LIST_HEAD(&bo->va);
228	bo->base.map_wc = true;
229	bo->base.base.funcs = &lima_gem_funcs;
230
231	return &bo->base.base;
232}
233
234int lima_gem_get_info(struct drm_file *file, u32 handle, u32 *va, u64 *offset)
235{
236	struct drm_gem_object *obj;
237	struct lima_bo *bo;
238	struct lima_drm_priv *priv = to_lima_drm_priv(file);
239	struct lima_vm *vm = priv->vm;
240
241	obj = drm_gem_object_lookup(file, handle);
242	if (!obj)
243		return -ENOENT;
244
245	bo = to_lima_bo(obj);
246
247	*va = lima_vm_get_va(vm, bo);
248
249	*offset = drm_vma_node_offset_addr(&obj->vma_node);
250
251	drm_gem_object_put(obj);
252	return 0;
253}
254
255static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo,
256			    bool write, bool explicit)
257{
258	int err = 0;
259
260	if (!write) {
261		err = dma_resv_reserve_shared(lima_bo_resv(bo), 1);
262		if (err)
263			return err;
264	}
265
266	/* explicit sync use user passed dep fence */
267	if (explicit)
268		return 0;
269
270	return drm_gem_fence_array_add_implicit(&task->deps, &bo->base.base, write);
 
 
271}
272
273static int lima_gem_add_deps(struct drm_file *file, struct lima_submit *submit)
274{
275	int i, err;
276
277	for (i = 0; i < ARRAY_SIZE(submit->in_sync); i++) {
278		struct dma_fence *fence = NULL;
279
280		if (!submit->in_sync[i])
281			continue;
282
283		err = drm_syncobj_find_fence(file, submit->in_sync[i],
284					     0, 0, &fence);
285		if (err)
286			return err;
287
288		err = drm_gem_fence_array_add(&submit->task->deps, fence);
289		if (err) {
290			dma_fence_put(fence);
291			return err;
292		}
293	}
294
295	return 0;
296}
297
298int lima_gem_submit(struct drm_file *file, struct lima_submit *submit)
299{
300	int i, err = 0;
301	struct ww_acquire_ctx ctx;
302	struct lima_drm_priv *priv = to_lima_drm_priv(file);
303	struct lima_vm *vm = priv->vm;
304	struct drm_syncobj *out_sync = NULL;
305	struct dma_fence *fence;
306	struct lima_bo **bos = submit->lbos;
307
308	if (submit->out_sync) {
309		out_sync = drm_syncobj_find(file, submit->out_sync);
310		if (!out_sync)
311			return -ENOENT;
312	}
313
314	for (i = 0; i < submit->nr_bos; i++) {
315		struct drm_gem_object *obj;
316		struct lima_bo *bo;
317
318		obj = drm_gem_object_lookup(file, submit->bos[i].handle);
319		if (!obj) {
320			err = -ENOENT;
321			goto err_out0;
322		}
323
324		bo = to_lima_bo(obj);
325
326		/* increase refcnt of gpu va map to prevent unmapped when executing,
327		 * will be decreased when task done
328		 */
329		err = lima_vm_bo_add(vm, bo, false);
330		if (err) {
331			drm_gem_object_put(obj);
332			goto err_out0;
333		}
334
335		bos[i] = bo;
336	}
337
338	err = drm_gem_lock_reservations((struct drm_gem_object **)bos,
339					submit->nr_bos, &ctx);
340	if (err)
341		goto err_out0;
342
343	err = lima_sched_task_init(
344		submit->task, submit->ctx->context + submit->pipe,
345		bos, submit->nr_bos, vm);
346	if (err)
347		goto err_out1;
348
349	err = lima_gem_add_deps(file, submit);
350	if (err)
351		goto err_out2;
352
353	for (i = 0; i < submit->nr_bos; i++) {
354		err = lima_gem_sync_bo(
355			submit->task, bos[i],
356			submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE,
357			submit->flags & LIMA_SUBMIT_FLAG_EXPLICIT_FENCE);
358		if (err)
359			goto err_out2;
360	}
361
362	fence = lima_sched_context_queue_task(
363		submit->ctx->context + submit->pipe, submit->task);
364
365	for (i = 0; i < submit->nr_bos; i++) {
366		if (submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE)
367			dma_resv_add_excl_fence(lima_bo_resv(bos[i]), fence);
368		else
369			dma_resv_add_shared_fence(lima_bo_resv(bos[i]), fence);
370	}
371
372	drm_gem_unlock_reservations((struct drm_gem_object **)bos,
373				    submit->nr_bos, &ctx);
374
375	for (i = 0; i < submit->nr_bos; i++)
376		drm_gem_object_put(&bos[i]->base.base);
377
378	if (out_sync) {
379		drm_syncobj_replace_fence(out_sync, fence);
380		drm_syncobj_put(out_sync);
381	}
382
383	dma_fence_put(fence);
384
385	return 0;
386
387err_out2:
388	lima_sched_task_fini(submit->task);
389err_out1:
390	drm_gem_unlock_reservations((struct drm_gem_object **)bos,
391				    submit->nr_bos, &ctx);
392err_out0:
393	for (i = 0; i < submit->nr_bos; i++) {
394		if (!bos[i])
395			break;
396		lima_vm_bo_del(vm, bos[i]);
397		drm_gem_object_put(&bos[i]->base.base);
398	}
399	if (out_sync)
400		drm_syncobj_put(out_sync);
401	return err;
402}
403
404int lima_gem_wait(struct drm_file *file, u32 handle, u32 op, s64 timeout_ns)
405{
406	bool write = op & LIMA_GEM_WAIT_WRITE;
407	long ret, timeout;
408
409	if (!op)
410		return 0;
411
412	timeout = drm_timeout_abs_to_jiffies(timeout_ns);
413
414	ret = drm_gem_dma_resv_wait(file, handle, write, timeout);
415	if (ret == -ETIME)
416		ret = timeout ? -ETIMEDOUT : -EBUSY;
417
418	return ret;
419}