Linux Audio

Check our new training course

Loading...
v4.6
  1/*
  2 * Copyright (C) 2015 Etnaviv Project
  3 *
  4 * This program is free software; you can redistribute it and/or modify it
  5 * under the terms of the GNU General Public License version 2 as published by
  6 * the Free Software Foundation.
  7 *
  8 * This program is distributed in the hope that it will be useful, but WITHOUT
  9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 10 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 11 * more details.
 12 *
 13 * You should have received a copy of the GNU General Public License along with
 14 * this program.  If not, see <http://www.gnu.org/licenses/>.
 15 */
 16
 
 17#include <linux/reservation.h>
 
 
 18#include "etnaviv_drv.h"
 19#include "etnaviv_gpu.h"
 20#include "etnaviv_gem.h"
 
 
 21
 22/*
 23 * Cmdstream submission:
 24 */
 25
 26#define BO_INVALID_FLAGS ~(ETNA_SUBMIT_BO_READ | ETNA_SUBMIT_BO_WRITE)
 27/* make sure these don't conflict w/ ETNAVIV_SUBMIT_BO_x */
 28#define BO_LOCKED   0x4000
 29#define BO_PINNED   0x2000
 30
 31static inline void __user *to_user_ptr(u64 address)
 32{
 33	return (void __user *)(uintptr_t)address;
 34}
 35
 36static struct etnaviv_gem_submit *submit_create(struct drm_device *dev,
 37		struct etnaviv_gpu *gpu, size_t nr)
 38{
 39	struct etnaviv_gem_submit *submit;
 40	size_t sz = size_vstruct(nr, sizeof(submit->bos[0]), sizeof(*submit));
 41
 42	submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
 43	if (submit) {
 44		submit->dev = dev;
 45		submit->gpu = gpu;
 46
 47		/* initially, until copy_from_user() and bo lookup succeeds: */
 48		submit->nr_bos = 0;
 49
 50		ww_acquire_init(&submit->ticket, &reservation_ww_class);
 
 
 
 
 
 51	}
 
 
 
 
 52
 53	return submit;
 54}
 55
 56static int submit_lookup_objects(struct etnaviv_gem_submit *submit,
 57	struct drm_file *file, struct drm_etnaviv_gem_submit_bo *submit_bos,
 58	unsigned nr_bos)
 59{
 60	struct drm_etnaviv_gem_submit_bo *bo;
 61	unsigned i;
 62	int ret = 0;
 63
 64	spin_lock(&file->table_lock);
 65
 66	for (i = 0, bo = submit_bos; i < nr_bos; i++, bo++) {
 67		struct drm_gem_object *obj;
 68
 69		if (bo->flags & BO_INVALID_FLAGS) {
 70			DRM_ERROR("invalid flags: %x\n", bo->flags);
 71			ret = -EINVAL;
 72			goto out_unlock;
 73		}
 74
 75		submit->bos[i].flags = bo->flags;
 76
 77		/* normally use drm_gem_object_lookup(), but for bulk lookup
 78		 * all under single table_lock just hit object_idr directly:
 79		 */
 80		obj = idr_find(&file->object_idr, bo->handle);
 81		if (!obj) {
 82			DRM_ERROR("invalid handle %u at index %u\n",
 83				  bo->handle, i);
 84			ret = -EINVAL;
 85			goto out_unlock;
 86		}
 87
 88		/*
 89		 * Take a refcount on the object. The file table lock
 90		 * prevents the object_idr's refcount on this being dropped.
 91		 */
 92		drm_gem_object_reference(obj);
 93
 94		submit->bos[i].obj = to_etnaviv_bo(obj);
 95	}
 96
 97out_unlock:
 98	submit->nr_bos = i;
 99	spin_unlock(&file->table_lock);
100
101	return ret;
102}
103
104static void submit_unlock_object(struct etnaviv_gem_submit *submit, int i)
105{
106	if (submit->bos[i].flags & BO_LOCKED) {
107		struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
108
109		ww_mutex_unlock(&etnaviv_obj->resv->lock);
110		submit->bos[i].flags &= ~BO_LOCKED;
111	}
112}
113
114static int submit_lock_objects(struct etnaviv_gem_submit *submit)
 
115{
116	int contended, slow_locked = -1, i, ret = 0;
117
118retry:
119	for (i = 0; i < submit->nr_bos; i++) {
120		struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
121
122		if (slow_locked == i)
123			slow_locked = -1;
124
125		contended = i;
126
127		if (!(submit->bos[i].flags & BO_LOCKED)) {
128			ret = ww_mutex_lock_interruptible(&etnaviv_obj->resv->lock,
129					&submit->ticket);
130			if (ret == -EALREADY)
131				DRM_ERROR("BO at index %u already on submit list\n",
132					  i);
133			if (ret)
134				goto fail;
135			submit->bos[i].flags |= BO_LOCKED;
136		}
137	}
138
139	ww_acquire_done(&submit->ticket);
140
141	return 0;
142
143fail:
144	for (; i >= 0; i--)
145		submit_unlock_object(submit, i);
146
147	if (slow_locked > 0)
148		submit_unlock_object(submit, slow_locked);
149
150	if (ret == -EDEADLK) {
151		struct etnaviv_gem_object *etnaviv_obj;
152
153		etnaviv_obj = submit->bos[contended].obj;
154
155		/* we lost out in a seqno race, lock and retry.. */
156		ret = ww_mutex_lock_slow_interruptible(&etnaviv_obj->resv->lock,
157				&submit->ticket);
158		if (!ret) {
159			submit->bos[contended].flags |= BO_LOCKED;
160			slow_locked = contended;
161			goto retry;
162		}
163	}
164
165	return ret;
166}
167
168static int submit_fence_sync(const struct etnaviv_gem_submit *submit)
169{
170	unsigned int context = submit->gpu->fence_context;
171	int i, ret = 0;
172
173	for (i = 0; i < submit->nr_bos; i++) {
174		struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
175		bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
176
177		ret = etnaviv_gpu_fence_sync_obj(etnaviv_obj, context, write);
178		if (ret)
179			break;
180	}
181
182	return ret;
183}
184
185static void submit_unpin_objects(struct etnaviv_gem_submit *submit)
186{
187	int i;
188
189	for (i = 0; i < submit->nr_bos; i++) {
190		if (submit->bos[i].flags & BO_PINNED)
191			etnaviv_gem_mapping_unreference(submit->bos[i].mapping);
 
 
 
 
 
 
192
193		submit->bos[i].mapping = NULL;
194		submit->bos[i].flags &= ~BO_PINNED;
195	}
196}
197
198static int submit_pin_objects(struct etnaviv_gem_submit *submit)
199{
200	int i, ret = 0;
201
202	for (i = 0; i < submit->nr_bos; i++) {
203		struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
204		struct etnaviv_vram_mapping *mapping;
205
206		mapping = etnaviv_gem_mapping_get(&etnaviv_obj->base,
207						  submit->gpu);
208		if (IS_ERR(mapping)) {
209			ret = PTR_ERR(mapping);
210			break;
211		}
 
212
213		submit->bos[i].flags |= BO_PINNED;
214		submit->bos[i].mapping = mapping;
215	}
216
217	return ret;
218}
219
220static int submit_bo(struct etnaviv_gem_submit *submit, u32 idx,
221	struct etnaviv_gem_submit_bo **bo)
222{
223	if (idx >= submit->nr_bos) {
224		DRM_ERROR("invalid buffer index: %u (out of %u)\n",
225				idx, submit->nr_bos);
226		return -EINVAL;
227	}
228
229	*bo = &submit->bos[idx];
230
231	return 0;
232}
233
234/* process the reloc's and patch up the cmdstream as needed: */
235static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream,
236		u32 size, const struct drm_etnaviv_gem_submit_reloc *relocs,
237		u32 nr_relocs)
238{
239	u32 i, last_offset = 0;
240	u32 *ptr = stream;
241	int ret;
242
243	for (i = 0; i < nr_relocs; i++) {
244		const struct drm_etnaviv_gem_submit_reloc *r = relocs + i;
245		struct etnaviv_gem_submit_bo *bo;
246		u32 off;
247
248		if (unlikely(r->flags)) {
249			DRM_ERROR("invalid reloc flags\n");
250			return -EINVAL;
251		}
252
253		if (r->submit_offset % 4) {
254			DRM_ERROR("non-aligned reloc offset: %u\n",
255				  r->submit_offset);
256			return -EINVAL;
257		}
258
259		/* offset in dwords: */
260		off = r->submit_offset / 4;
261
262		if ((off >= size ) ||
263				(off < last_offset)) {
264			DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
265			return -EINVAL;
266		}
267
268		ret = submit_bo(submit, r->reloc_idx, &bo);
269		if (ret)
270			return ret;
271
272		if (r->reloc_offset >= bo->obj->base.size - sizeof(*ptr)) {
273			DRM_ERROR("relocation %u outside object", i);
274			return -EINVAL;
275		}
276
277		ptr[off] = bo->mapping->iova + r->reloc_offset;
278
279		last_offset = off;
280	}
281
282	return 0;
283}
284
285static void submit_cleanup(struct etnaviv_gem_submit *submit)
 
286{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
287	unsigned i;
288
 
 
 
 
 
 
289	for (i = 0; i < submit->nr_bos; i++) {
290		struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
291
 
 
 
 
 
 
 
 
 
292		submit_unlock_object(submit, i);
293		drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
294	}
295
296	ww_acquire_fini(&submit->ticket);
 
 
 
 
 
 
 
 
 
 
 
297	kfree(submit);
298}
299
 
 
 
 
 
300int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
301		struct drm_file *file)
302{
 
303	struct etnaviv_drm_private *priv = dev->dev_private;
304	struct drm_etnaviv_gem_submit *args = data;
305	struct drm_etnaviv_gem_submit_reloc *relocs;
 
306	struct drm_etnaviv_gem_submit_bo *bos;
307	struct etnaviv_gem_submit *submit;
308	struct etnaviv_cmdbuf *cmdbuf;
309	struct etnaviv_gpu *gpu;
 
 
 
310	void *stream;
311	int ret;
312
313	if (args->pipe >= ETNA_MAX_PIPES)
314		return -EINVAL;
315
316	gpu = priv->gpu[args->pipe];
317	if (!gpu)
318		return -ENXIO;
319
320	if (args->stream_size % 4) {
321		DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
322			  args->stream_size);
323		return -EINVAL;
324	}
325
326	if (args->exec_state != ETNA_PIPE_3D &&
327	    args->exec_state != ETNA_PIPE_2D &&
328	    args->exec_state != ETNA_PIPE_VG) {
329		DRM_ERROR("invalid exec_state: 0x%x\n", args->exec_state);
330		return -EINVAL;
331	}
332
 
 
 
 
 
333	/*
334	 * Copy the command submission and bo array to kernel space in
335	 * one go, and do this outside of any locks.
336	 */
337	bos = drm_malloc_ab(args->nr_bos, sizeof(*bos));
338	relocs = drm_malloc_ab(args->nr_relocs, sizeof(*relocs));
339	stream = drm_malloc_ab(1, args->stream_size);
340	cmdbuf = etnaviv_gpu_cmdbuf_new(gpu, ALIGN(args->stream_size, 8) + 8,
341					args->nr_bos);
342	if (!bos || !relocs || !stream || !cmdbuf) {
343		ret = -ENOMEM;
344		goto err_submit_cmds;
345	}
346
347	cmdbuf->exec_state = args->exec_state;
348	cmdbuf->ctx = file->driver_priv;
349
350	ret = copy_from_user(bos, to_user_ptr(args->bos),
351			     args->nr_bos * sizeof(*bos));
352	if (ret) {
353		ret = -EFAULT;
354		goto err_submit_cmds;
355	}
356
357	ret = copy_from_user(relocs, to_user_ptr(args->relocs),
358			     args->nr_relocs * sizeof(*relocs));
359	if (ret) {
360		ret = -EFAULT;
361		goto err_submit_cmds;
362	}
363
364	ret = copy_from_user(stream, to_user_ptr(args->stream),
 
 
 
 
 
 
 
365			     args->stream_size);
366	if (ret) {
367		ret = -EFAULT;
368		goto err_submit_cmds;
369	}
370
371	submit = submit_create(dev, gpu, args->nr_bos);
 
 
 
 
 
 
 
 
 
 
372	if (!submit) {
373		ret = -ENOMEM;
374		goto err_submit_cmds;
375	}
376
377	ret = submit_lookup_objects(submit, file, bos, args->nr_bos);
 
378	if (ret)
379		goto err_submit_objects;
380
381	ret = submit_lock_objects(submit);
 
 
 
 
382	if (ret)
383		goto err_submit_objects;
384
385	if (!etnaviv_cmd_validate_one(gpu, stream, args->stream_size / 4,
386				      relocs, args->nr_relocs)) {
387		ret = -EINVAL;
388		goto err_submit_objects;
389	}
390
391	ret = submit_fence_sync(submit);
392	if (ret)
393		goto err_submit_objects;
 
 
 
 
394
395	ret = submit_pin_objects(submit);
396	if (ret)
397		goto out;
398
399	ret = submit_reloc(submit, stream, args->stream_size / 4,
400			   relocs, args->nr_relocs);
401	if (ret)
402		goto out;
 
 
 
 
 
 
 
 
 
 
403
404	memcpy(cmdbuf->vaddr, stream, args->stream_size);
405	cmdbuf->user_size = ALIGN(args->stream_size, 8);
 
406
407	ret = etnaviv_gpu_submit(gpu, submit, cmdbuf);
408	if (ret == 0)
409		cmdbuf = NULL;
410
411	args->fence = submit->fence;
412
413out:
414	submit_unpin_objects(submit);
 
 
 
 
 
 
 
 
 
 
 
 
415
416	/*
417	 * If we're returning -EAGAIN, it may be due to the userptr code
418	 * wanting to run its workqueue outside of any locks. Flush our
419	 * workqueue to ensure that it is run in a timely manner.
420	 */
421	if (ret == -EAGAIN)
422		flush_workqueue(priv->wq);
423
424err_submit_objects:
425	submit_cleanup(submit);
 
 
 
426
427err_submit_cmds:
428	/* if we still own the cmdbuf */
429	if (cmdbuf)
430		etnaviv_gpu_cmdbuf_free(cmdbuf);
431	if (stream)
432		drm_free_large(stream);
433	if (bos)
434		drm_free_large(bos);
435	if (relocs)
436		drm_free_large(relocs);
 
 
437
438	return ret;
439}
v4.17
  1/*
  2 * Copyright (C) 2015 Etnaviv Project
  3 *
  4 * This program is free software; you can redistribute it and/or modify it
  5 * under the terms of the GNU General Public License version 2 as published by
  6 * the Free Software Foundation.
  7 *
  8 * This program is distributed in the hope that it will be useful, but WITHOUT
  9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 10 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 11 * more details.
 12 *
 13 * You should have received a copy of the GNU General Public License along with
 14 * this program.  If not, see <http://www.gnu.org/licenses/>.
 15 */
 16
 17#include <linux/dma-fence-array.h>
 18#include <linux/reservation.h>
 19#include <linux/sync_file.h>
 20#include "etnaviv_cmdbuf.h"
 21#include "etnaviv_drv.h"
 22#include "etnaviv_gpu.h"
 23#include "etnaviv_gem.h"
 24#include "etnaviv_perfmon.h"
 25#include "etnaviv_sched.h"
 26
 27/*
 28 * Cmdstream submission:
 29 */
 30
 31#define BO_INVALID_FLAGS ~(ETNA_SUBMIT_BO_READ | ETNA_SUBMIT_BO_WRITE)
 32/* make sure these don't conflict w/ ETNAVIV_SUBMIT_BO_x */
 33#define BO_LOCKED   0x4000
 34#define BO_PINNED   0x2000
 35
 
 
 
 
 
 36static struct etnaviv_gem_submit *submit_create(struct drm_device *dev,
 37		struct etnaviv_gpu *gpu, size_t nr_bos, size_t nr_pmrs)
 38{
 39	struct etnaviv_gem_submit *submit;
 40	size_t sz = size_vstruct(nr_bos, sizeof(submit->bos[0]), sizeof(*submit));
 
 
 
 
 
 41
 42	submit = kzalloc(sz, GFP_KERNEL);
 43	if (!submit)
 44		return NULL;
 45
 46	submit->pmrs = kcalloc(nr_pmrs, sizeof(struct etnaviv_perfmon_request),
 47			       GFP_KERNEL);
 48	if (!submit->pmrs) {
 49		kfree(submit);
 50		return NULL;
 51	}
 52	submit->nr_pmrs = nr_pmrs;
 53
 54	submit->gpu = gpu;
 55	kref_init(&submit->refcount);
 56
 57	return submit;
 58}
 59
 60static int submit_lookup_objects(struct etnaviv_gem_submit *submit,
 61	struct drm_file *file, struct drm_etnaviv_gem_submit_bo *submit_bos,
 62	unsigned nr_bos)
 63{
 64	struct drm_etnaviv_gem_submit_bo *bo;
 65	unsigned i;
 66	int ret = 0;
 67
 68	spin_lock(&file->table_lock);
 69
 70	for (i = 0, bo = submit_bos; i < nr_bos; i++, bo++) {
 71		struct drm_gem_object *obj;
 72
 73		if (bo->flags & BO_INVALID_FLAGS) {
 74			DRM_ERROR("invalid flags: %x\n", bo->flags);
 75			ret = -EINVAL;
 76			goto out_unlock;
 77		}
 78
 79		submit->bos[i].flags = bo->flags;
 80
 81		/* normally use drm_gem_object_lookup(), but for bulk lookup
 82		 * all under single table_lock just hit object_idr directly:
 83		 */
 84		obj = idr_find(&file->object_idr, bo->handle);
 85		if (!obj) {
 86			DRM_ERROR("invalid handle %u at index %u\n",
 87				  bo->handle, i);
 88			ret = -EINVAL;
 89			goto out_unlock;
 90		}
 91
 92		/*
 93		 * Take a refcount on the object. The file table lock
 94		 * prevents the object_idr's refcount on this being dropped.
 95		 */
 96		drm_gem_object_get(obj);
 97
 98		submit->bos[i].obj = to_etnaviv_bo(obj);
 99	}
100
101out_unlock:
102	submit->nr_bos = i;
103	spin_unlock(&file->table_lock);
104
105	return ret;
106}
107
108static void submit_unlock_object(struct etnaviv_gem_submit *submit, int i)
109{
110	if (submit->bos[i].flags & BO_LOCKED) {
111		struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
112
113		ww_mutex_unlock(&etnaviv_obj->resv->lock);
114		submit->bos[i].flags &= ~BO_LOCKED;
115	}
116}
117
118static int submit_lock_objects(struct etnaviv_gem_submit *submit,
119		struct ww_acquire_ctx *ticket)
120{
121	int contended, slow_locked = -1, i, ret = 0;
122
123retry:
124	for (i = 0; i < submit->nr_bos; i++) {
125		struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
126
127		if (slow_locked == i)
128			slow_locked = -1;
129
130		contended = i;
131
132		if (!(submit->bos[i].flags & BO_LOCKED)) {
133			ret = ww_mutex_lock_interruptible(&etnaviv_obj->resv->lock,
134							  ticket);
135			if (ret == -EALREADY)
136				DRM_ERROR("BO at index %u already on submit list\n",
137					  i);
138			if (ret)
139				goto fail;
140			submit->bos[i].flags |= BO_LOCKED;
141		}
142	}
143
144	ww_acquire_done(ticket);
145
146	return 0;
147
148fail:
149	for (; i >= 0; i--)
150		submit_unlock_object(submit, i);
151
152	if (slow_locked > 0)
153		submit_unlock_object(submit, slow_locked);
154
155	if (ret == -EDEADLK) {
156		struct etnaviv_gem_object *etnaviv_obj;
157
158		etnaviv_obj = submit->bos[contended].obj;
159
160		/* we lost out in a seqno race, lock and retry.. */
161		ret = ww_mutex_lock_slow_interruptible(&etnaviv_obj->resv->lock,
162						       ticket);
163		if (!ret) {
164			submit->bos[contended].flags |= BO_LOCKED;
165			slow_locked = contended;
166			goto retry;
167		}
168	}
169
170	return ret;
171}
172
173static int submit_fence_sync(struct etnaviv_gem_submit *submit)
174{
 
175	int i, ret = 0;
176
177	for (i = 0; i < submit->nr_bos; i++) {
178		struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
179		struct reservation_object *robj = bo->obj->resv;
180
181		if (!(bo->flags & ETNA_SUBMIT_BO_WRITE)) {
182			ret = reservation_object_reserve_shared(robj);
183			if (ret)
184				return ret;
185		}
186
187		if (submit->flags & ETNA_SUBMIT_NO_IMPLICIT)
188			continue;
189
190		if (bo->flags & ETNA_SUBMIT_BO_WRITE) {
191			ret = reservation_object_get_fences_rcu(robj, &bo->excl,
192								&bo->nr_shared,
193								&bo->shared);
194			if (ret)
195				return ret;
196		} else {
197			bo->excl = reservation_object_get_excl_rcu(robj);
198		}
199
 
 
 
200	}
201
202	return ret;
203}
204
205static void submit_attach_object_fences(struct etnaviv_gem_submit *submit)
206{
207	int i;
208
209	for (i = 0; i < submit->nr_bos; i++) {
210		struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
211
212		if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE)
213			reservation_object_add_excl_fence(etnaviv_obj->resv,
214							  submit->out_fence);
215		else
216			reservation_object_add_shared_fence(etnaviv_obj->resv,
217							    submit->out_fence);
218
219		submit_unlock_object(submit, i);
 
220	}
221}
222
223static int submit_pin_objects(struct etnaviv_gem_submit *submit)
224{
225	int i, ret = 0;
226
227	for (i = 0; i < submit->nr_bos; i++) {
228		struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
229		struct etnaviv_vram_mapping *mapping;
230
231		mapping = etnaviv_gem_mapping_get(&etnaviv_obj->base,
232						  submit->gpu);
233		if (IS_ERR(mapping)) {
234			ret = PTR_ERR(mapping);
235			break;
236		}
237		atomic_inc(&etnaviv_obj->gpu_active);
238
239		submit->bos[i].flags |= BO_PINNED;
240		submit->bos[i].mapping = mapping;
241	}
242
243	return ret;
244}
245
246static int submit_bo(struct etnaviv_gem_submit *submit, u32 idx,
247	struct etnaviv_gem_submit_bo **bo)
248{
249	if (idx >= submit->nr_bos) {
250		DRM_ERROR("invalid buffer index: %u (out of %u)\n",
251				idx, submit->nr_bos);
252		return -EINVAL;
253	}
254
255	*bo = &submit->bos[idx];
256
257	return 0;
258}
259
260/* process the reloc's and patch up the cmdstream as needed: */
261static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream,
262		u32 size, const struct drm_etnaviv_gem_submit_reloc *relocs,
263		u32 nr_relocs)
264{
265	u32 i, last_offset = 0;
266	u32 *ptr = stream;
267	int ret;
268
269	for (i = 0; i < nr_relocs; i++) {
270		const struct drm_etnaviv_gem_submit_reloc *r = relocs + i;
271		struct etnaviv_gem_submit_bo *bo;
272		u32 off;
273
274		if (unlikely(r->flags)) {
275			DRM_ERROR("invalid reloc flags\n");
276			return -EINVAL;
277		}
278
279		if (r->submit_offset % 4) {
280			DRM_ERROR("non-aligned reloc offset: %u\n",
281				  r->submit_offset);
282			return -EINVAL;
283		}
284
285		/* offset in dwords: */
286		off = r->submit_offset / 4;
287
288		if ((off >= size ) ||
289				(off < last_offset)) {
290			DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
291			return -EINVAL;
292		}
293
294		ret = submit_bo(submit, r->reloc_idx, &bo);
295		if (ret)
296			return ret;
297
298		if (r->reloc_offset > bo->obj->base.size - sizeof(*ptr)) {
299			DRM_ERROR("relocation %u outside object\n", i);
300			return -EINVAL;
301		}
302
303		ptr[off] = bo->mapping->iova + r->reloc_offset;
304
305		last_offset = off;
306	}
307
308	return 0;
309}
310
311static int submit_perfmon_validate(struct etnaviv_gem_submit *submit,
312		u32 exec_state, const struct drm_etnaviv_gem_submit_pmr *pmrs)
313{
314	u32 i;
315
316	for (i = 0; i < submit->nr_pmrs; i++) {
317		const struct drm_etnaviv_gem_submit_pmr *r = pmrs + i;
318		struct etnaviv_gem_submit_bo *bo;
319		int ret;
320
321		ret = submit_bo(submit, r->read_idx, &bo);
322		if (ret)
323			return ret;
324
325		/* at offset 0 a sequence number gets stored used for userspace sync */
326		if (r->read_offset == 0) {
327			DRM_ERROR("perfmon request: offset is 0");
328			return -EINVAL;
329		}
330
331		if (r->read_offset >= bo->obj->base.size - sizeof(u32)) {
332			DRM_ERROR("perfmon request: offset %u outside object", i);
333			return -EINVAL;
334		}
335
336		if (r->flags & ~(ETNA_PM_PROCESS_PRE | ETNA_PM_PROCESS_POST)) {
337			DRM_ERROR("perfmon request: flags are not valid");
338			return -EINVAL;
339		}
340
341		if (etnaviv_pm_req_validate(r, exec_state)) {
342			DRM_ERROR("perfmon request: domain or signal not valid");
343			return -EINVAL;
344		}
345
346		submit->pmrs[i].flags = r->flags;
347		submit->pmrs[i].domain = r->domain;
348		submit->pmrs[i].signal = r->signal;
349		submit->pmrs[i].sequence = r->sequence;
350		submit->pmrs[i].offset = r->read_offset;
351		submit->pmrs[i].bo_vma = etnaviv_gem_vmap(&bo->obj->base);
352	}
353
354	return 0;
355}
356
357static void submit_cleanup(struct kref *kref)
358{
359	struct etnaviv_gem_submit *submit =
360			container_of(kref, struct etnaviv_gem_submit, refcount);
361	unsigned i;
362
363	if (submit->runtime_resumed)
364		pm_runtime_put_autosuspend(submit->gpu->dev);
365
366	if (submit->cmdbuf.suballoc)
367		etnaviv_cmdbuf_free(&submit->cmdbuf);
368
369	for (i = 0; i < submit->nr_bos; i++) {
370		struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
371
372		/* unpin all objects */
373		if (submit->bos[i].flags & BO_PINNED) {
374			etnaviv_gem_mapping_unreference(submit->bos[i].mapping);
375			atomic_dec(&etnaviv_obj->gpu_active);
376			submit->bos[i].mapping = NULL;
377			submit->bos[i].flags &= ~BO_PINNED;
378		}
379
380		/* if the GPU submit failed, objects might still be locked */
381		submit_unlock_object(submit, i);
382		drm_gem_object_put_unlocked(&etnaviv_obj->base);
383	}
384
385	wake_up_all(&submit->gpu->fence_event);
386
387	if (submit->in_fence)
388		dma_fence_put(submit->in_fence);
389	if (submit->out_fence) {
390		/* first remove from IDR, so fence can not be found anymore */
391		mutex_lock(&submit->gpu->fence_idr_lock);
392		idr_remove(&submit->gpu->fence_idr, submit->out_fence_id);
393		mutex_unlock(&submit->gpu->fence_idr_lock);
394		dma_fence_put(submit->out_fence);
395	}
396	kfree(submit->pmrs);
397	kfree(submit);
398}
399
400void etnaviv_submit_put(struct etnaviv_gem_submit *submit)
401{
402	kref_put(&submit->refcount, submit_cleanup);
403}
404
405int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
406		struct drm_file *file)
407{
408	struct etnaviv_file_private *ctx = file->driver_priv;
409	struct etnaviv_drm_private *priv = dev->dev_private;
410	struct drm_etnaviv_gem_submit *args = data;
411	struct drm_etnaviv_gem_submit_reloc *relocs;
412	struct drm_etnaviv_gem_submit_pmr *pmrs;
413	struct drm_etnaviv_gem_submit_bo *bos;
414	struct etnaviv_gem_submit *submit;
 
415	struct etnaviv_gpu *gpu;
416	struct sync_file *sync_file = NULL;
417	struct ww_acquire_ctx ticket;
418	int out_fence_fd = -1;
419	void *stream;
420	int ret;
421
422	if (args->pipe >= ETNA_MAX_PIPES)
423		return -EINVAL;
424
425	gpu = priv->gpu[args->pipe];
426	if (!gpu)
427		return -ENXIO;
428
429	if (args->stream_size % 4) {
430		DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
431			  args->stream_size);
432		return -EINVAL;
433	}
434
435	if (args->exec_state != ETNA_PIPE_3D &&
436	    args->exec_state != ETNA_PIPE_2D &&
437	    args->exec_state != ETNA_PIPE_VG) {
438		DRM_ERROR("invalid exec_state: 0x%x\n", args->exec_state);
439		return -EINVAL;
440	}
441
442	if (args->flags & ~ETNA_SUBMIT_FLAGS) {
443		DRM_ERROR("invalid flags: 0x%x\n", args->flags);
444		return -EINVAL;
445	}
446
447	/*
448	 * Copy the command submission and bo array to kernel space in
449	 * one go, and do this outside of any locks.
450	 */
451	bos = kvmalloc_array(args->nr_bos, sizeof(*bos), GFP_KERNEL);
452	relocs = kvmalloc_array(args->nr_relocs, sizeof(*relocs), GFP_KERNEL);
453	pmrs = kvmalloc_array(args->nr_pmrs, sizeof(*pmrs), GFP_KERNEL);
454	stream = kvmalloc_array(1, args->stream_size, GFP_KERNEL);
455	if (!bos || !relocs || !pmrs || !stream) {
 
456		ret = -ENOMEM;
457		goto err_submit_cmds;
458	}
459
460	ret = copy_from_user(bos, u64_to_user_ptr(args->bos),
 
 
 
461			     args->nr_bos * sizeof(*bos));
462	if (ret) {
463		ret = -EFAULT;
464		goto err_submit_cmds;
465	}
466
467	ret = copy_from_user(relocs, u64_to_user_ptr(args->relocs),
468			     args->nr_relocs * sizeof(*relocs));
469	if (ret) {
470		ret = -EFAULT;
471		goto err_submit_cmds;
472	}
473
474	ret = copy_from_user(pmrs, u64_to_user_ptr(args->pmrs),
475			     args->nr_pmrs * sizeof(*pmrs));
476	if (ret) {
477		ret = -EFAULT;
478		goto err_submit_cmds;
479	}
480
481	ret = copy_from_user(stream, u64_to_user_ptr(args->stream),
482			     args->stream_size);
483	if (ret) {
484		ret = -EFAULT;
485		goto err_submit_cmds;
486	}
487
488	if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) {
489		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
490		if (out_fence_fd < 0) {
491			ret = out_fence_fd;
492			goto err_submit_cmds;
493		}
494	}
495
496	ww_acquire_init(&ticket, &reservation_ww_class);
497
498	submit = submit_create(dev, gpu, args->nr_bos, args->nr_pmrs);
499	if (!submit) {
500		ret = -ENOMEM;
501		goto err_submit_ww_acquire;
502	}
503
504	ret = etnaviv_cmdbuf_init(gpu->cmdbuf_suballoc, &submit->cmdbuf,
505				  ALIGN(args->stream_size, 8) + 8);
506	if (ret)
507		goto err_submit_objects;
508
509	submit->cmdbuf.ctx = file->driver_priv;
510	submit->exec_state = args->exec_state;
511	submit->flags = args->flags;
512
513	ret = submit_lookup_objects(submit, file, bos, args->nr_bos);
514	if (ret)
515		goto err_submit_objects;
516
517	if (!etnaviv_cmd_validate_one(gpu, stream, args->stream_size / 4,
518				      relocs, args->nr_relocs)) {
519		ret = -EINVAL;
520		goto err_submit_objects;
521	}
522
523	if (args->flags & ETNA_SUBMIT_FENCE_FD_IN) {
524		submit->in_fence = sync_file_get_fence(args->fence_fd);
525		if (!submit->in_fence) {
526			ret = -EINVAL;
527			goto err_submit_objects;
528		}
529	}
530
531	ret = submit_pin_objects(submit);
532	if (ret)
533		goto err_submit_objects;
534
535	ret = submit_reloc(submit, stream, args->stream_size / 4,
536			   relocs, args->nr_relocs);
537	if (ret)
538		goto err_submit_objects;
539
540	ret = submit_perfmon_validate(submit, args->exec_state, pmrs);
541	if (ret)
542		goto err_submit_objects;
543
544	memcpy(submit->cmdbuf.vaddr, stream, args->stream_size);
545
546	ret = submit_lock_objects(submit, &ticket);
547	if (ret)
548		goto err_submit_objects;
549
550	ret = submit_fence_sync(submit);
551	if (ret)
552		goto err_submit_objects;
553
554	ret = etnaviv_sched_push_job(&ctx->sched_entity[args->pipe], submit);
555	if (ret)
556		goto err_submit_objects;
557
558	submit_attach_object_fences(submit);
559
560	if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) {
561		/*
562		 * This can be improved: ideally we want to allocate the sync
563		 * file before kicking off the GPU job and just attach the
564		 * fence to the sync file here, eliminating the ENOMEM
565		 * possibility at this stage.
566		 */
567		sync_file = sync_file_create(submit->out_fence);
568		if (!sync_file) {
569			ret = -ENOMEM;
570			goto err_submit_objects;
571		}
572		fd_install(out_fence_fd, sync_file->file);
573	}
574
575	args->fence_fd = out_fence_fd;
576	args->fence = submit->out_fence_id;
 
 
 
 
 
577
578err_submit_objects:
579	etnaviv_submit_put(submit);
580
581err_submit_ww_acquire:
582	ww_acquire_fini(&ticket);
583
584err_submit_cmds:
585	if (ret && (out_fence_fd >= 0))
586		put_unused_fd(out_fence_fd);
 
587	if (stream)
588		kvfree(stream);
589	if (bos)
590		kvfree(bos);
591	if (relocs)
592		kvfree(relocs);
593	if (pmrs)
594		kvfree(pmrs);
595
596	return ret;
597}