Linux Audio

Check our new training course

Loading...
v4.6
  1/*
  2 * Copyright (C) 2013 Red Hat
  3 * Author: Rob Clark <robdclark@gmail.com>
  4 *
  5 * This program is free software; you can redistribute it and/or modify it
  6 * under the terms of the GNU General Public License version 2 as published by
  7 * the Free Software Foundation.
  8 *
  9 * This program is distributed in the hope that it will be useful, but WITHOUT
 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 12 * more details.
 13 *
 14 * You should have received a copy of the GNU General Public License along with
 15 * this program.  If not, see <http://www.gnu.org/licenses/>.
 16 */
 17
 
 
 18#include "msm_drv.h"
 19#include "msm_gpu.h"
 20#include "msm_gem.h"
 21
 22/*
 23 * Cmdstream submission:
 24 */
 25
 26/* make sure these don't conflict w/ MSM_SUBMIT_BO_x */
 27#define BO_VALID    0x8000
 28#define BO_LOCKED   0x4000
 29#define BO_PINNED   0x2000
 30
 31static inline void __user *to_user_ptr(u64 address)
 32{
 33	return (void __user *)(uintptr_t)address;
 34}
 35
 36static struct msm_gem_submit *submit_create(struct drm_device *dev,
 37		struct msm_gpu *gpu, int nr)
 38{
 39	struct msm_gem_submit *submit;
 40	int sz = sizeof(*submit) + (nr * sizeof(submit->bos[0]));
 
 41
 42	submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
 43	if (submit) {
 44		submit->dev = dev;
 45		submit->gpu = gpu;
 46
 47		/* initially, until copy_from_user() and bo lookup succeeds: */
 48		submit->nr_bos = 0;
 49		submit->nr_cmds = 0;
 50
 51		INIT_LIST_HEAD(&submit->bo_list);
 52		ww_acquire_init(&submit->ticket, &reservation_ww_class);
 53	}
 
 
 
 
 
 
 
 
 
 
 54
 55	return submit;
 56}
 57
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 58static int submit_lookup_objects(struct msm_gem_submit *submit,
 59		struct drm_msm_gem_submit *args, struct drm_file *file)
 60{
 61	unsigned i;
 62	int ret = 0;
 63
 64	spin_lock(&file->table_lock);
 
 65
 66	for (i = 0; i < args->nr_bos; i++) {
 67		struct drm_msm_gem_submit_bo submit_bo;
 68		struct drm_gem_object *obj;
 69		struct msm_gem_object *msm_obj;
 70		void __user *userptr =
 71			to_user_ptr(args->bos + (i * sizeof(submit_bo)));
 72
 73		ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
 74		if (ret) {
 75			ret = -EFAULT;
 76			goto out_unlock;
 
 
 
 
 
 
 
 
 
 
 77		}
 78
 79		if (submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) {
 
 80			DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
 81			ret = -EINVAL;
 82			goto out_unlock;
 83		}
 84
 85		submit->bos[i].flags = submit_bo.flags;
 86		/* in validate_objects() we figure out if this is true: */
 87		submit->bos[i].iova  = submit_bo.presumed;
 88
 89		/* normally use drm_gem_object_lookup(), but for bulk lookup
 90		 * all under single table_lock just hit object_idr directly:
 91		 */
 92		obj = idr_find(&file->object_idr, submit_bo.handle);
 93		if (!obj) {
 94			DRM_ERROR("invalid handle %u at index %u\n", submit_bo.handle, i);
 95			ret = -EINVAL;
 96			goto out_unlock;
 97		}
 98
 99		msm_obj = to_msm_bo(obj);
100
101		if (!list_empty(&msm_obj->submit_entry)) {
102			DRM_ERROR("handle %u at index %u already on submit list\n",
103					submit_bo.handle, i);
104			ret = -EINVAL;
105			goto out_unlock;
106		}
107
108		drm_gem_object_reference(obj);
109
110		submit->bos[i].obj = msm_obj;
111
112		list_add_tail(&msm_obj->submit_entry, &submit->bo_list);
113	}
114
115out_unlock:
116	submit->nr_bos = i;
117	spin_unlock(&file->table_lock);
118
 
 
 
119	return ret;
120}
121
122static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
123{
124	struct msm_gem_object *msm_obj = submit->bos[i].obj;
125
126	if (submit->bos[i].flags & BO_PINNED)
127		msm_gem_put_iova(&msm_obj->base, submit->gpu->id);
128
129	if (submit->bos[i].flags & BO_LOCKED)
130		ww_mutex_unlock(&msm_obj->resv->lock);
131
132	if (!(submit->bos[i].flags & BO_VALID))
133		submit->bos[i].iova = 0;
134
135	submit->bos[i].flags &= ~(BO_LOCKED | BO_PINNED);
136}
137
138/* This is where we make sure all the bo's are reserved and pin'd: */
139static int submit_validate_objects(struct msm_gem_submit *submit)
140{
141	int contended, slow_locked = -1, i, ret = 0;
142
143retry:
144	submit->valid = true;
145
146	for (i = 0; i < submit->nr_bos; i++) {
147		struct msm_gem_object *msm_obj = submit->bos[i].obj;
148		uint32_t iova;
149
150		if (slow_locked == i)
151			slow_locked = -1;
152
153		contended = i;
154
155		if (!(submit->bos[i].flags & BO_LOCKED)) {
156			ret = ww_mutex_lock_interruptible(&msm_obj->resv->lock,
157					&submit->ticket);
158			if (ret)
159				goto fail;
160			submit->bos[i].flags |= BO_LOCKED;
161		}
162
163
164		/* if locking succeeded, pin bo: */
165		ret = msm_gem_get_iova_locked(&msm_obj->base,
166				submit->gpu->id, &iova);
167
168		/* this would break the logic in the fail path.. there is no
169		 * reason for this to happen, but just to be on the safe side
170		 * let's notice if this starts happening in the future:
171		 */
172		WARN_ON(ret == -EDEADLK);
173
174		if (ret)
175			goto fail;
176
177		submit->bos[i].flags |= BO_PINNED;
178
179		if (iova == submit->bos[i].iova) {
180			submit->bos[i].flags |= BO_VALID;
181		} else {
182			submit->bos[i].iova = iova;
183			submit->bos[i].flags &= ~BO_VALID;
184			submit->valid = false;
185		}
186	}
187
188	ww_acquire_done(&submit->ticket);
189
190	return 0;
191
192fail:
193	for (; i >= 0; i--)
194		submit_unlock_unpin_bo(submit, i);
195
196	if (slow_locked > 0)
197		submit_unlock_unpin_bo(submit, slow_locked);
198
199	if (ret == -EDEADLK) {
200		struct msm_gem_object *msm_obj = submit->bos[contended].obj;
201		/* we lost out in a seqno race, lock and retry.. */
202		ret = ww_mutex_lock_slow_interruptible(&msm_obj->resv->lock,
203				&submit->ticket);
204		if (!ret) {
205			submit->bos[contended].flags |= BO_LOCKED;
206			slow_locked = contended;
207			goto retry;
208		}
209	}
210
211	return ret;
212}
213
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
214static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
215		struct msm_gem_object **obj, uint32_t *iova, bool *valid)
216{
217	if (idx >= submit->nr_bos) {
218		DRM_ERROR("invalid buffer index: %u (out of %u)\n",
219				idx, submit->nr_bos);
220		return -EINVAL;
221	}
222
223	if (obj)
224		*obj = submit->bos[idx].obj;
225	if (iova)
226		*iova = submit->bos[idx].iova;
227	if (valid)
228		*valid = !!(submit->bos[idx].flags & BO_VALID);
229
230	return 0;
231}
232
233/* process the reloc's and patch up the cmdstream as needed: */
234static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *obj,
235		uint32_t offset, uint32_t nr_relocs, uint64_t relocs)
236{
237	uint32_t i, last_offset = 0;
238	uint32_t *ptr;
239	int ret;
240
241	if (offset % 4) {
242		DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
243		return -EINVAL;
244	}
245
246	/* For now, just map the entire thing.  Eventually we probably
247	 * to do it page-by-page, w/ kmap() if not vmap()d..
248	 */
249	ptr = msm_gem_vaddr_locked(&obj->base);
250
251	if (IS_ERR(ptr)) {
252		ret = PTR_ERR(ptr);
253		DBG("failed to map: %d", ret);
254		return ret;
255	}
256
257	for (i = 0; i < nr_relocs; i++) {
258		struct drm_msm_gem_submit_reloc submit_reloc;
259		void __user *userptr =
260			to_user_ptr(relocs + (i * sizeof(submit_reloc)));
261		uint32_t iova, off;
 
262		bool valid;
263
264		ret = copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc));
265		if (ret)
266			return -EFAULT;
267
268		if (submit_reloc.submit_offset % 4) {
269			DRM_ERROR("non-aligned reloc offset: %u\n",
270					submit_reloc.submit_offset);
271			return -EINVAL;
 
272		}
273
274		/* offset in dwords: */
275		off = submit_reloc.submit_offset / 4;
276
277		if ((off >= (obj->base.size / 4)) ||
278				(off < last_offset)) {
279			DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
280			return -EINVAL;
 
281		}
282
283		ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid);
284		if (ret)
285			return ret;
286
287		if (valid)
288			continue;
289
290		iova += submit_reloc.reloc_offset;
291
292		if (submit_reloc.shift < 0)
293			iova >>= -submit_reloc.shift;
294		else
295			iova <<= submit_reloc.shift;
296
297		ptr[off] = iova | submit_reloc.or;
298
299		last_offset = off;
300	}
301
302	return 0;
 
 
 
303}
304
305static void submit_cleanup(struct msm_gem_submit *submit, bool fail)
306{
307	unsigned i;
308
309	for (i = 0; i < submit->nr_bos; i++) {
310		struct msm_gem_object *msm_obj = submit->bos[i].obj;
311		submit_unlock_unpin_bo(submit, i);
312		list_del_init(&msm_obj->submit_entry);
313		drm_gem_object_unreference(&msm_obj->base);
314	}
315
316	ww_acquire_fini(&submit->ticket);
317}
318
319int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
320		struct drm_file *file)
321{
322	struct msm_drm_private *priv = dev->dev_private;
323	struct drm_msm_gem_submit *args = data;
324	struct msm_file_private *ctx = file->driver_priv;
325	struct msm_gem_submit *submit;
326	struct msm_gpu *gpu = priv->gpu;
 
 
 
327	unsigned i;
328	int ret;
329
330	if (!gpu)
331		return -ENXIO;
332
333	/* for now, we just have 3d pipe.. eventually this would need to
334	 * be more clever to dispatch to appropriate gpu module:
335	 */
336	if (args->pipe != MSM_PIPE_3D0)
337		return -EINVAL;
338
339	if (args->nr_cmds > MAX_CMDS)
340		return -EINVAL;
341
342	submit = submit_create(dev, gpu, args->nr_bos);
343	if (!submit)
344		return -ENOMEM;
 
 
 
 
 
 
 
 
 
345
346	mutex_lock(&dev->struct_mutex);
 
 
 
 
347
348	ret = submit_lookup_objects(submit, args, file);
349	if (ret)
350		goto out;
351
352	ret = submit_validate_objects(submit);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
353	if (ret)
354		goto out;
355
356	for (i = 0; i < args->nr_cmds; i++) {
357		struct drm_msm_gem_submit_cmd submit_cmd;
358		void __user *userptr =
359			to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
360		struct msm_gem_object *msm_obj;
361		uint32_t iova;
362
363		ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd));
364		if (ret) {
365			ret = -EFAULT;
366			goto out;
367		}
368
369		/* validate input from userspace: */
370		switch (submit_cmd.type) {
371		case MSM_SUBMIT_CMD_BUF:
372		case MSM_SUBMIT_CMD_IB_TARGET_BUF:
373		case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
374			break;
375		default:
376			DRM_ERROR("invalid type: %08x\n", submit_cmd.type);
377			ret = -EINVAL;
378			goto out;
379		}
380
381		ret = submit_bo(submit, submit_cmd.submit_idx,
382				&msm_obj, &iova, NULL);
383		if (ret)
384			goto out;
385
386		if (submit_cmd.size % 4) {
387			DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
388					submit_cmd.size);
389			ret = -EINVAL;
390			goto out;
391		}
392
393		if ((submit_cmd.size + submit_cmd.submit_offset) >=
394				msm_obj->base.size) {
395			DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size);
396			ret = -EINVAL;
397			goto out;
398		}
399
400		submit->cmd[i].type = submit_cmd.type;
401		submit->cmd[i].size = submit_cmd.size / 4;
402		submit->cmd[i].iova = iova + submit_cmd.submit_offset;
403		submit->cmd[i].idx  = submit_cmd.submit_idx;
404
405		if (submit->valid)
406			continue;
407
408		ret = submit_reloc(submit, msm_obj, submit_cmd.submit_offset,
409				submit_cmd.nr_relocs, submit_cmd.relocs);
410		if (ret)
411			goto out;
412	}
413
414	submit->nr_cmds = i;
415
416	ret = msm_gpu_submit(gpu, submit, ctx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
417
418	args->fence = submit->fence;
 
 
 
 
 
419
420out:
421	submit_cleanup(submit, !!ret);
 
 
 
 
 
 
 
 
422	mutex_unlock(&dev->struct_mutex);
423	return ret;
424}
v4.10.11
  1/*
  2 * Copyright (C) 2013 Red Hat
  3 * Author: Rob Clark <robdclark@gmail.com>
  4 *
  5 * This program is free software; you can redistribute it and/or modify it
  6 * under the terms of the GNU General Public License version 2 as published by
  7 * the Free Software Foundation.
  8 *
  9 * This program is distributed in the hope that it will be useful, but WITHOUT
 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 12 * more details.
 13 *
 14 * You should have received a copy of the GNU General Public License along with
 15 * this program.  If not, see <http://www.gnu.org/licenses/>.
 16 */
 17
 18#include <linux/sync_file.h>
 19
 20#include "msm_drv.h"
 21#include "msm_gpu.h"
 22#include "msm_gem.h"
 23
 24/*
 25 * Cmdstream submission:
 26 */
 27
 28/* make sure these don't conflict w/ MSM_SUBMIT_BO_x */
 29#define BO_VALID    0x8000   /* is current addr in cmdstream correct/valid? */
 30#define BO_LOCKED   0x4000
 31#define BO_PINNED   0x2000
 32
 
 
 
 
 
 33static struct msm_gem_submit *submit_create(struct drm_device *dev,
 34		struct msm_gpu *gpu, int nr_bos, int nr_cmds)
 35{
 36	struct msm_gem_submit *submit;
 37	int sz = sizeof(*submit) + (nr_bos * sizeof(submit->bos[0])) +
 38			(nr_cmds * sizeof(*submit->cmd));
 39
 40	submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
 41	if (!submit)
 42		return NULL;
 
 
 
 
 
 43
 44	submit->dev = dev;
 45	submit->gpu = gpu;
 46	submit->fence = NULL;
 47	submit->pid = get_pid(task_pid(current));
 48	submit->cmd = (void *)&submit->bos[nr_bos];
 49
 50	/* initially, until copy_from_user() and bo lookup succeeds: */
 51	submit->nr_bos = 0;
 52	submit->nr_cmds = 0;
 53
 54	INIT_LIST_HEAD(&submit->node);
 55	INIT_LIST_HEAD(&submit->bo_list);
 56	ww_acquire_init(&submit->ticket, &reservation_ww_class);
 57
 58	return submit;
 59}
 60
 61void msm_gem_submit_free(struct msm_gem_submit *submit)
 62{
 63	dma_fence_put(submit->fence);
 64	list_del(&submit->node);
 65	put_pid(submit->pid);
 66	kfree(submit);
 67}
 68
 69static inline unsigned long __must_check
 70copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
 71{
 72	if (access_ok(VERIFY_READ, from, n))
 73		return __copy_from_user_inatomic(to, from, n);
 74	return -EFAULT;
 75}
 76
 77static int submit_lookup_objects(struct msm_gem_submit *submit,
 78		struct drm_msm_gem_submit *args, struct drm_file *file)
 79{
 80	unsigned i;
 81	int ret = 0;
 82
 83	spin_lock(&file->table_lock);
 84	pagefault_disable();
 85
 86	for (i = 0; i < args->nr_bos; i++) {
 87		struct drm_msm_gem_submit_bo submit_bo;
 88		struct drm_gem_object *obj;
 89		struct msm_gem_object *msm_obj;
 90		void __user *userptr =
 91			u64_to_user_ptr(args->bos + (i * sizeof(submit_bo)));
 92
 93		/* make sure we don't have garbage flags, in case we hit
 94		 * error path before flags is initialized:
 95		 */
 96		submit->bos[i].flags = 0;
 97
 98		ret = copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo));
 99		if (unlikely(ret)) {
100			pagefault_enable();
101			spin_unlock(&file->table_lock);
102			ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
103			if (ret)
104				goto out;
105			spin_lock(&file->table_lock);
106			pagefault_disable();
107		}
108
109		if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) ||
110			!(submit_bo.flags & MSM_SUBMIT_BO_FLAGS)) {
111			DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
112			ret = -EINVAL;
113			goto out_unlock;
114		}
115
116		submit->bos[i].flags = submit_bo.flags;
117		/* in validate_objects() we figure out if this is true: */
118		submit->bos[i].iova  = submit_bo.presumed;
119
120		/* normally use drm_gem_object_lookup(), but for bulk lookup
121		 * all under single table_lock just hit object_idr directly:
122		 */
123		obj = idr_find(&file->object_idr, submit_bo.handle);
124		if (!obj) {
125			DRM_ERROR("invalid handle %u at index %u\n", submit_bo.handle, i);
126			ret = -EINVAL;
127			goto out_unlock;
128		}
129
130		msm_obj = to_msm_bo(obj);
131
132		if (!list_empty(&msm_obj->submit_entry)) {
133			DRM_ERROR("handle %u at index %u already on submit list\n",
134					submit_bo.handle, i);
135			ret = -EINVAL;
136			goto out_unlock;
137		}
138
139		drm_gem_object_reference(obj);
140
141		submit->bos[i].obj = msm_obj;
142
143		list_add_tail(&msm_obj->submit_entry, &submit->bo_list);
144	}
145
146out_unlock:
147	pagefault_enable();
148	spin_unlock(&file->table_lock);
149
150out:
151	submit->nr_bos = i;
152
153	return ret;
154}
155
156static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
157{
158	struct msm_gem_object *msm_obj = submit->bos[i].obj;
159
160	if (submit->bos[i].flags & BO_PINNED)
161		msm_gem_put_iova(&msm_obj->base, submit->gpu->id);
162
163	if (submit->bos[i].flags & BO_LOCKED)
164		ww_mutex_unlock(&msm_obj->resv->lock);
165
166	if (!(submit->bos[i].flags & BO_VALID))
167		submit->bos[i].iova = 0;
168
169	submit->bos[i].flags &= ~(BO_LOCKED | BO_PINNED);
170}
171
172/* This is where we make sure all the bo's are reserved and pin'd: */
173static int submit_lock_objects(struct msm_gem_submit *submit)
174{
175	int contended, slow_locked = -1, i, ret = 0;
176
177retry:
 
 
178	for (i = 0; i < submit->nr_bos; i++) {
179		struct msm_gem_object *msm_obj = submit->bos[i].obj;
 
180
181		if (slow_locked == i)
182			slow_locked = -1;
183
184		contended = i;
185
186		if (!(submit->bos[i].flags & BO_LOCKED)) {
187			ret = ww_mutex_lock_interruptible(&msm_obj->resv->lock,
188					&submit->ticket);
189			if (ret)
190				goto fail;
191			submit->bos[i].flags |= BO_LOCKED;
192		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
193	}
194
195	ww_acquire_done(&submit->ticket);
196
197	return 0;
198
199fail:
200	for (; i >= 0; i--)
201		submit_unlock_unpin_bo(submit, i);
202
203	if (slow_locked > 0)
204		submit_unlock_unpin_bo(submit, slow_locked);
205
206	if (ret == -EDEADLK) {
207		struct msm_gem_object *msm_obj = submit->bos[contended].obj;
208		/* we lost out in a seqno race, lock and retry.. */
209		ret = ww_mutex_lock_slow_interruptible(&msm_obj->resv->lock,
210				&submit->ticket);
211		if (!ret) {
212			submit->bos[contended].flags |= BO_LOCKED;
213			slow_locked = contended;
214			goto retry;
215		}
216	}
217
218	return ret;
219}
220
221static int submit_fence_sync(struct msm_gem_submit *submit)
222{
223	int i, ret = 0;
224
225	for (i = 0; i < submit->nr_bos; i++) {
226		struct msm_gem_object *msm_obj = submit->bos[i].obj;
227		bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE;
228
229		ret = msm_gem_sync_object(&msm_obj->base, submit->gpu->fctx, write);
230		if (ret)
231			break;
232	}
233
234	return ret;
235}
236
237static int submit_pin_objects(struct msm_gem_submit *submit)
238{
239	int i, ret = 0;
240
241	submit->valid = true;
242
243	for (i = 0; i < submit->nr_bos; i++) {
244		struct msm_gem_object *msm_obj = submit->bos[i].obj;
245		uint64_t iova;
246
247		/* if locking succeeded, pin bo: */
248		ret = msm_gem_get_iova_locked(&msm_obj->base,
249				submit->gpu->id, &iova);
250
251		if (ret)
252			break;
253
254		submit->bos[i].flags |= BO_PINNED;
255
256		if (iova == submit->bos[i].iova) {
257			submit->bos[i].flags |= BO_VALID;
258		} else {
259			submit->bos[i].iova = iova;
260			/* iova changed, so address in cmdstream is not valid: */
261			submit->bos[i].flags &= ~BO_VALID;
262			submit->valid = false;
263		}
264	}
265
266	return ret;
267}
268
269static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
270		struct msm_gem_object **obj, uint64_t *iova, bool *valid)
271{
272	if (idx >= submit->nr_bos) {
273		DRM_ERROR("invalid buffer index: %u (out of %u)\n",
274				idx, submit->nr_bos);
275		return -EINVAL;
276	}
277
278	if (obj)
279		*obj = submit->bos[idx].obj;
280	if (iova)
281		*iova = submit->bos[idx].iova;
282	if (valid)
283		*valid = !!(submit->bos[idx].flags & BO_VALID);
284
285	return 0;
286}
287
288/* process the reloc's and patch up the cmdstream as needed: */
289static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *obj,
290		uint32_t offset, uint32_t nr_relocs, uint64_t relocs)
291{
292	uint32_t i, last_offset = 0;
293	uint32_t *ptr;
294	int ret = 0;
295
296	if (offset % 4) {
297		DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
298		return -EINVAL;
299	}
300
301	/* For now, just map the entire thing.  Eventually we probably
302	 * to do it page-by-page, w/ kmap() if not vmap()d..
303	 */
304	ptr = msm_gem_get_vaddr_locked(&obj->base);
305
306	if (IS_ERR(ptr)) {
307		ret = PTR_ERR(ptr);
308		DBG("failed to map: %d", ret);
309		return ret;
310	}
311
312	for (i = 0; i < nr_relocs; i++) {
313		struct drm_msm_gem_submit_reloc submit_reloc;
314		void __user *userptr =
315			u64_to_user_ptr(relocs + (i * sizeof(submit_reloc)));
316		uint32_t off;
317		uint64_t iova;
318		bool valid;
319
320		ret = copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc));
321		if (ret)
322			goto out;
323
324		if (submit_reloc.submit_offset % 4) {
325			DRM_ERROR("non-aligned reloc offset: %u\n",
326					submit_reloc.submit_offset);
327			ret = -EINVAL;
328			goto out;
329		}
330
331		/* offset in dwords: */
332		off = submit_reloc.submit_offset / 4;
333
334		if ((off >= (obj->base.size / 4)) ||
335				(off < last_offset)) {
336			DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
337			ret = -EINVAL;
338			goto out;
339		}
340
341		ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid);
342		if (ret)
343			goto out;
344
345		if (valid)
346			continue;
347
348		iova += submit_reloc.reloc_offset;
349
350		if (submit_reloc.shift < 0)
351			iova >>= -submit_reloc.shift;
352		else
353			iova <<= submit_reloc.shift;
354
355		ptr[off] = iova | submit_reloc.or;
356
357		last_offset = off;
358	}
359
360out:
361	msm_gem_put_vaddr_locked(&obj->base);
362
363	return ret;
364}
365
366static void submit_cleanup(struct msm_gem_submit *submit)
367{
368	unsigned i;
369
370	for (i = 0; i < submit->nr_bos; i++) {
371		struct msm_gem_object *msm_obj = submit->bos[i].obj;
372		submit_unlock_unpin_bo(submit, i);
373		list_del_init(&msm_obj->submit_entry);
374		drm_gem_object_unreference(&msm_obj->base);
375	}
376
377	ww_acquire_fini(&submit->ticket);
378}
379
380int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
381		struct drm_file *file)
382{
383	struct msm_drm_private *priv = dev->dev_private;
384	struct drm_msm_gem_submit *args = data;
385	struct msm_file_private *ctx = file->driver_priv;
386	struct msm_gem_submit *submit;
387	struct msm_gpu *gpu = priv->gpu;
388	struct dma_fence *in_fence = NULL;
389	struct sync_file *sync_file = NULL;
390	int out_fence_fd = -1;
391	unsigned i;
392	int ret;
393
394	if (!gpu)
395		return -ENXIO;
396
397	/* for now, we just have 3d pipe.. eventually this would need to
398	 * be more clever to dispatch to appropriate gpu module:
399	 */
400	if (MSM_PIPE_ID(args->flags) != MSM_PIPE_3D0)
401		return -EINVAL;
402
403	if (MSM_PIPE_FLAGS(args->flags) & ~MSM_SUBMIT_FLAGS)
404		return -EINVAL;
405
406	ret = mutex_lock_interruptible(&dev->struct_mutex);
407	if (ret)
408		return ret;
409
410	if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
411		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
412		if (out_fence_fd < 0) {
413			ret = out_fence_fd;
414			goto out_unlock;
415		}
416	}
417	priv->struct_mutex_task = current;
418
419	submit = submit_create(dev, gpu, args->nr_bos, args->nr_cmds);
420	if (!submit) {
421		ret = -ENOMEM;
422		goto out_unlock;
423	}
424
425	ret = submit_lookup_objects(submit, args, file);
426	if (ret)
427		goto out;
428
429	ret = submit_lock_objects(submit);
430	if (ret)
431		goto out;
432
433	if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
434		in_fence = sync_file_get_fence(args->fence_fd);
435
436		if (!in_fence) {
437			ret = -EINVAL;
438			goto out;
439		}
440
441		/* TODO if we get an array-fence due to userspace merging multiple
442		 * fences, we need a way to determine if all the backing fences
443		 * are from our own context..
444		 */
445
446		if (in_fence->context != gpu->fctx->context) {
447			ret = dma_fence_wait(in_fence, true);
448			if (ret)
449				goto out;
450		}
451
452	}
453
454	if (!(args->fence & MSM_SUBMIT_NO_IMPLICIT)) {
455		ret = submit_fence_sync(submit);
456		if (ret)
457			goto out;
458	}
459
460	ret = submit_pin_objects(submit);
461	if (ret)
462		goto out;
463
464	for (i = 0; i < args->nr_cmds; i++) {
465		struct drm_msm_gem_submit_cmd submit_cmd;
466		void __user *userptr =
467			u64_to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
468		struct msm_gem_object *msm_obj;
469		uint64_t iova;
470
471		ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd));
472		if (ret) {
473			ret = -EFAULT;
474			goto out;
475		}
476
477		/* validate input from userspace: */
478		switch (submit_cmd.type) {
479		case MSM_SUBMIT_CMD_BUF:
480		case MSM_SUBMIT_CMD_IB_TARGET_BUF:
481		case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
482			break;
483		default:
484			DRM_ERROR("invalid type: %08x\n", submit_cmd.type);
485			ret = -EINVAL;
486			goto out;
487		}
488
489		ret = submit_bo(submit, submit_cmd.submit_idx,
490				&msm_obj, &iova, NULL);
491		if (ret)
492			goto out;
493
494		if (submit_cmd.size % 4) {
495			DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
496					submit_cmd.size);
497			ret = -EINVAL;
498			goto out;
499		}
500
501		if ((submit_cmd.size + submit_cmd.submit_offset) >=
502				msm_obj->base.size) {
503			DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size);
504			ret = -EINVAL;
505			goto out;
506		}
507
508		submit->cmd[i].type = submit_cmd.type;
509		submit->cmd[i].size = submit_cmd.size / 4;
510		submit->cmd[i].iova = iova + submit_cmd.submit_offset;
511		submit->cmd[i].idx  = submit_cmd.submit_idx;
512
513		if (submit->valid)
514			continue;
515
516		ret = submit_reloc(submit, msm_obj, submit_cmd.submit_offset,
517				submit_cmd.nr_relocs, submit_cmd.relocs);
518		if (ret)
519			goto out;
520	}
521
522	submit->nr_cmds = i;
523
524	submit->fence = msm_fence_alloc(gpu->fctx);
525	if (IS_ERR(submit->fence)) {
526		ret = PTR_ERR(submit->fence);
527		submit->fence = NULL;
528		goto out;
529	}
530
531	if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
532		sync_file = sync_file_create(submit->fence);
533		if (!sync_file) {
534			ret = -ENOMEM;
535			goto out;
536		}
537	}
538
539	msm_gpu_submit(gpu, submit, ctx);
540
541	args->fence = submit->fence->seqno;
542
543	if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
544		fd_install(out_fence_fd, sync_file->file);
545		args->fence_fd = out_fence_fd;
546	}
547
548out:
549	if (in_fence)
550		dma_fence_put(in_fence);
551	submit_cleanup(submit);
552	if (ret)
553		msm_gem_submit_free(submit);
554out_unlock:
555	if (ret && (out_fence_fd >= 0))
556		put_unused_fd(out_fence_fd);
557	priv->struct_mutex_task = NULL;
558	mutex_unlock(&dev->struct_mutex);
559	return ret;
560}