Linux Audio

Check our new training course

Loading...
v4.6
  1/*
  2 * Copyright (C) 2013 Red Hat
  3 * Author: Rob Clark <robdclark@gmail.com>
  4 *
  5 * This program is free software; you can redistribute it and/or modify it
  6 * under the terms of the GNU General Public License version 2 as published by
  7 * the Free Software Foundation.
  8 *
  9 * This program is distributed in the hope that it will be useful, but WITHOUT
 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 12 * more details.
 13 *
 14 * You should have received a copy of the GNU General Public License along with
 15 * this program.  If not, see <http://www.gnu.org/licenses/>.
 16 */
 17
 
 
 18#include "msm_drv.h"
 19#include "msm_gpu.h"
 20#include "msm_gem.h"
 21
 22/*
 23 * Cmdstream submission:
 24 */
 25
 26/* make sure these don't conflict w/ MSM_SUBMIT_BO_x */
 27#define BO_VALID    0x8000
 28#define BO_LOCKED   0x4000
 29#define BO_PINNED   0x2000
 30
 31static inline void __user *to_user_ptr(u64 address)
 32{
 33	return (void __user *)(uintptr_t)address;
 34}
 35
 36static struct msm_gem_submit *submit_create(struct drm_device *dev,
 37		struct msm_gpu *gpu, int nr)
 
 38{
 39	struct msm_gem_submit *submit;
 40	int sz = sizeof(*submit) + (nr * sizeof(submit->bos[0]));
 
 41
 42	submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
 43	if (submit) {
 44		submit->dev = dev;
 45		submit->gpu = gpu;
 46
 47		/* initially, until copy_from_user() and bo lookup succeeds: */
 48		submit->nr_bos = 0;
 49		submit->nr_cmds = 0;
 50
 51		INIT_LIST_HEAD(&submit->bo_list);
 52		ww_acquire_init(&submit->ticket, &reservation_ww_class);
 53	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 54
 55	return submit;
 56}
 57
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 58static int submit_lookup_objects(struct msm_gem_submit *submit,
 59		struct drm_msm_gem_submit *args, struct drm_file *file)
 60{
 61	unsigned i;
 62	int ret = 0;
 63
 64	spin_lock(&file->table_lock);
 
 65
 66	for (i = 0; i < args->nr_bos; i++) {
 67		struct drm_msm_gem_submit_bo submit_bo;
 68		struct drm_gem_object *obj;
 69		struct msm_gem_object *msm_obj;
 70		void __user *userptr =
 71			to_user_ptr(args->bos + (i * sizeof(submit_bo)));
 72
 73		ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
 74		if (ret) {
 75			ret = -EFAULT;
 76			goto out_unlock;
 
 
 
 
 
 
 
 
 
 
 77		}
 78
 79		if (submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) {
 
 80			DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
 81			ret = -EINVAL;
 82			goto out_unlock;
 83		}
 84
 85		submit->bos[i].flags = submit_bo.flags;
 86		/* in validate_objects() we figure out if this is true: */
 87		submit->bos[i].iova  = submit_bo.presumed;
 88
 89		/* normally use drm_gem_object_lookup(), but for bulk lookup
 90		 * all under single table_lock just hit object_idr directly:
 91		 */
 92		obj = idr_find(&file->object_idr, submit_bo.handle);
 93		if (!obj) {
 94			DRM_ERROR("invalid handle %u at index %u\n", submit_bo.handle, i);
 95			ret = -EINVAL;
 96			goto out_unlock;
 97		}
 98
 99		msm_obj = to_msm_bo(obj);
100
101		if (!list_empty(&msm_obj->submit_entry)) {
102			DRM_ERROR("handle %u at index %u already on submit list\n",
103					submit_bo.handle, i);
104			ret = -EINVAL;
105			goto out_unlock;
106		}
107
108		drm_gem_object_reference(obj);
109
110		submit->bos[i].obj = msm_obj;
111
112		list_add_tail(&msm_obj->submit_entry, &submit->bo_list);
113	}
114
115out_unlock:
116	submit->nr_bos = i;
117	spin_unlock(&file->table_lock);
118
 
 
 
119	return ret;
120}
121
122static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
 
123{
124	struct msm_gem_object *msm_obj = submit->bos[i].obj;
125
126	if (submit->bos[i].flags & BO_PINNED)
127		msm_gem_put_iova(&msm_obj->base, submit->gpu->id);
128
129	if (submit->bos[i].flags & BO_LOCKED)
130		ww_mutex_unlock(&msm_obj->resv->lock);
131
132	if (!(submit->bos[i].flags & BO_VALID))
133		submit->bos[i].iova = 0;
134
135	submit->bos[i].flags &= ~(BO_LOCKED | BO_PINNED);
136}
137
138/* This is where we make sure all the bo's are reserved and pin'd: */
139static int submit_validate_objects(struct msm_gem_submit *submit)
140{
141	int contended, slow_locked = -1, i, ret = 0;
142
143retry:
144	submit->valid = true;
145
146	for (i = 0; i < submit->nr_bos; i++) {
147		struct msm_gem_object *msm_obj = submit->bos[i].obj;
148		uint32_t iova;
149
150		if (slow_locked == i)
151			slow_locked = -1;
152
153		contended = i;
154
155		if (!(submit->bos[i].flags & BO_LOCKED)) {
156			ret = ww_mutex_lock_interruptible(&msm_obj->resv->lock,
157					&submit->ticket);
158			if (ret)
159				goto fail;
160			submit->bos[i].flags |= BO_LOCKED;
161		}
162
163
164		/* if locking succeeded, pin bo: */
165		ret = msm_gem_get_iova_locked(&msm_obj->base,
166				submit->gpu->id, &iova);
167
168		/* this would break the logic in the fail path.. there is no
169		 * reason for this to happen, but just to be on the safe side
170		 * let's notice if this starts happening in the future:
171		 */
172		WARN_ON(ret == -EDEADLK);
173
174		if (ret)
175			goto fail;
176
177		submit->bos[i].flags |= BO_PINNED;
178
179		if (iova == submit->bos[i].iova) {
180			submit->bos[i].flags |= BO_VALID;
181		} else {
182			submit->bos[i].iova = iova;
183			submit->bos[i].flags &= ~BO_VALID;
184			submit->valid = false;
185		}
186	}
187
188	ww_acquire_done(&submit->ticket);
189
190	return 0;
191
192fail:
193	for (; i >= 0; i--)
194		submit_unlock_unpin_bo(submit, i);
195
196	if (slow_locked > 0)
197		submit_unlock_unpin_bo(submit, slow_locked);
198
199	if (ret == -EDEADLK) {
200		struct msm_gem_object *msm_obj = submit->bos[contended].obj;
201		/* we lost out in a seqno race, lock and retry.. */
202		ret = ww_mutex_lock_slow_interruptible(&msm_obj->resv->lock,
203				&submit->ticket);
204		if (!ret) {
205			submit->bos[contended].flags |= BO_LOCKED;
206			slow_locked = contended;
207			goto retry;
208		}
209	}
210
211	return ret;
212}
213
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
214static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
215		struct msm_gem_object **obj, uint32_t *iova, bool *valid)
216{
217	if (idx >= submit->nr_bos) {
218		DRM_ERROR("invalid buffer index: %u (out of %u)\n",
219				idx, submit->nr_bos);
220		return -EINVAL;
221	}
222
223	if (obj)
224		*obj = submit->bos[idx].obj;
225	if (iova)
226		*iova = submit->bos[idx].iova;
227	if (valid)
228		*valid = !!(submit->bos[idx].flags & BO_VALID);
229
230	return 0;
231}
232
233/* process the reloc's and patch up the cmdstream as needed: */
234static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *obj,
235		uint32_t offset, uint32_t nr_relocs, uint64_t relocs)
236{
237	uint32_t i, last_offset = 0;
238	uint32_t *ptr;
239	int ret;
240
241	if (offset % 4) {
242		DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
243		return -EINVAL;
244	}
245
246	/* For now, just map the entire thing.  Eventually we probably
247	 * to do it page-by-page, w/ kmap() if not vmap()d..
248	 */
249	ptr = msm_gem_vaddr_locked(&obj->base);
250
251	if (IS_ERR(ptr)) {
252		ret = PTR_ERR(ptr);
253		DBG("failed to map: %d", ret);
254		return ret;
255	}
256
257	for (i = 0; i < nr_relocs; i++) {
258		struct drm_msm_gem_submit_reloc submit_reloc;
259		void __user *userptr =
260			to_user_ptr(relocs + (i * sizeof(submit_reloc)));
261		uint32_t iova, off;
 
262		bool valid;
263
264		ret = copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc));
265		if (ret)
266			return -EFAULT;
 
267
268		if (submit_reloc.submit_offset % 4) {
269			DRM_ERROR("non-aligned reloc offset: %u\n",
270					submit_reloc.submit_offset);
271			return -EINVAL;
 
272		}
273
274		/* offset in dwords: */
275		off = submit_reloc.submit_offset / 4;
276
277		if ((off >= (obj->base.size / 4)) ||
278				(off < last_offset)) {
279			DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
280			return -EINVAL;
 
281		}
282
283		ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid);
284		if (ret)
285			return ret;
286
287		if (valid)
288			continue;
289
290		iova += submit_reloc.reloc_offset;
291
292		if (submit_reloc.shift < 0)
293			iova >>= -submit_reloc.shift;
294		else
295			iova <<= submit_reloc.shift;
296
297		ptr[off] = iova | submit_reloc.or;
298
299		last_offset = off;
300	}
301
302	return 0;
 
 
 
303}
304
305static void submit_cleanup(struct msm_gem_submit *submit, bool fail)
306{
307	unsigned i;
308
309	for (i = 0; i < submit->nr_bos; i++) {
310		struct msm_gem_object *msm_obj = submit->bos[i].obj;
311		submit_unlock_unpin_bo(submit, i);
312		list_del_init(&msm_obj->submit_entry);
313		drm_gem_object_unreference(&msm_obj->base);
314	}
315
316	ww_acquire_fini(&submit->ticket);
317}
318
319int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
320		struct drm_file *file)
321{
322	struct msm_drm_private *priv = dev->dev_private;
323	struct drm_msm_gem_submit *args = data;
324	struct msm_file_private *ctx = file->driver_priv;
325	struct msm_gem_submit *submit;
326	struct msm_gpu *gpu = priv->gpu;
 
 
 
 
 
327	unsigned i;
328	int ret;
329
330	if (!gpu)
331		return -ENXIO;
332
333	/* for now, we just have 3d pipe.. eventually this would need to
334	 * be more clever to dispatch to appropriate gpu module:
335	 */
336	if (args->pipe != MSM_PIPE_3D0)
337		return -EINVAL;
338
339	if (args->nr_cmds > MAX_CMDS)
340		return -EINVAL;
341
342	submit = submit_create(dev, gpu, args->nr_bos);
343	if (!submit)
344		return -ENOMEM;
 
 
 
 
 
 
345
346	mutex_lock(&dev->struct_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
347
348	ret = submit_lookup_objects(submit, args, file);
349	if (ret)
350		goto out;
351
352	ret = submit_validate_objects(submit);
 
 
 
 
 
 
 
 
353	if (ret)
354		goto out;
355
356	for (i = 0; i < args->nr_cmds; i++) {
357		struct drm_msm_gem_submit_cmd submit_cmd;
358		void __user *userptr =
359			to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
360		struct msm_gem_object *msm_obj;
361		uint32_t iova;
362
363		ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd));
364		if (ret) {
365			ret = -EFAULT;
366			goto out;
367		}
368
369		/* validate input from userspace: */
370		switch (submit_cmd.type) {
371		case MSM_SUBMIT_CMD_BUF:
372		case MSM_SUBMIT_CMD_IB_TARGET_BUF:
373		case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
374			break;
375		default:
376			DRM_ERROR("invalid type: %08x\n", submit_cmd.type);
377			ret = -EINVAL;
378			goto out;
379		}
380
381		ret = submit_bo(submit, submit_cmd.submit_idx,
382				&msm_obj, &iova, NULL);
383		if (ret)
384			goto out;
385
386		if (submit_cmd.size % 4) {
387			DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
388					submit_cmd.size);
389			ret = -EINVAL;
390			goto out;
391		}
392
393		if ((submit_cmd.size + submit_cmd.submit_offset) >=
394				msm_obj->base.size) {
 
395			DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size);
396			ret = -EINVAL;
397			goto out;
398		}
399
400		submit->cmd[i].type = submit_cmd.type;
401		submit->cmd[i].size = submit_cmd.size / 4;
402		submit->cmd[i].iova = iova + submit_cmd.submit_offset;
403		submit->cmd[i].idx  = submit_cmd.submit_idx;
404
405		if (submit->valid)
406			continue;
407
408		ret = submit_reloc(submit, msm_obj, submit_cmd.submit_offset,
409				submit_cmd.nr_relocs, submit_cmd.relocs);
410		if (ret)
411			goto out;
412	}
413
414	submit->nr_cmds = i;
415
416	ret = msm_gpu_submit(gpu, submit, ctx);
 
 
 
 
 
417
418	args->fence = submit->fence;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
419
420out:
421	submit_cleanup(submit, !!ret);
 
 
 
 
 
 
 
422	mutex_unlock(&dev->struct_mutex);
423	return ret;
424}
v4.17
  1/*
  2 * Copyright (C) 2013 Red Hat
  3 * Author: Rob Clark <robdclark@gmail.com>
  4 *
  5 * This program is free software; you can redistribute it and/or modify it
  6 * under the terms of the GNU General Public License version 2 as published by
  7 * the Free Software Foundation.
  8 *
  9 * This program is distributed in the hope that it will be useful, but WITHOUT
 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 12 * more details.
 13 *
 14 * You should have received a copy of the GNU General Public License along with
 15 * this program.  If not, see <http://www.gnu.org/licenses/>.
 16 */
 17
 18#include <linux/sync_file.h>
 19
 20#include "msm_drv.h"
 21#include "msm_gpu.h"
 22#include "msm_gem.h"
 23
 24/*
 25 * Cmdstream submission:
 26 */
 27
 28/* make sure these don't conflict w/ MSM_SUBMIT_BO_x */
 29#define BO_VALID    0x8000   /* is current addr in cmdstream correct/valid? */
 30#define BO_LOCKED   0x4000
 31#define BO_PINNED   0x2000
 32
 
 
 
 
 
 33static struct msm_gem_submit *submit_create(struct drm_device *dev,
 34		struct msm_gpu *gpu, struct msm_gpu_submitqueue *queue,
 35		uint32_t nr_bos, uint32_t nr_cmds)
 36{
 37	struct msm_gem_submit *submit;
 38	uint64_t sz = sizeof(*submit) + ((u64)nr_bos * sizeof(submit->bos[0])) +
 39		((u64)nr_cmds * sizeof(submit->cmd[0]));
 40
 41	if (sz > SIZE_MAX)
 42		return NULL;
 
 
 
 
 
 
 43
 44	submit = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
 45	if (!submit)
 46		return NULL;
 47
 48	submit->dev = dev;
 49	submit->gpu = gpu;
 50	submit->fence = NULL;
 51	submit->pid = get_pid(task_pid(current));
 52	submit->cmd = (void *)&submit->bos[nr_bos];
 53	submit->queue = queue;
 54	submit->ring = gpu->rb[queue->prio];
 55
 56	/* initially, until copy_from_user() and bo lookup succeeds: */
 57	submit->nr_bos = 0;
 58	submit->nr_cmds = 0;
 59
 60	INIT_LIST_HEAD(&submit->node);
 61	INIT_LIST_HEAD(&submit->bo_list);
 62	ww_acquire_init(&submit->ticket, &reservation_ww_class);
 63
 64	return submit;
 65}
 66
 67void msm_gem_submit_free(struct msm_gem_submit *submit)
 68{
 69	dma_fence_put(submit->fence);
 70	list_del(&submit->node);
 71	put_pid(submit->pid);
 72	msm_submitqueue_put(submit->queue);
 73
 74	kfree(submit);
 75}
 76
 77static inline unsigned long __must_check
 78copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
 79{
 80	if (access_ok(VERIFY_READ, from, n))
 81		return __copy_from_user_inatomic(to, from, n);
 82	return -EFAULT;
 83}
 84
 85static int submit_lookup_objects(struct msm_gem_submit *submit,
 86		struct drm_msm_gem_submit *args, struct drm_file *file)
 87{
 88	unsigned i;
 89	int ret = 0;
 90
 91	spin_lock(&file->table_lock);
 92	pagefault_disable();
 93
 94	for (i = 0; i < args->nr_bos; i++) {
 95		struct drm_msm_gem_submit_bo submit_bo;
 96		struct drm_gem_object *obj;
 97		struct msm_gem_object *msm_obj;
 98		void __user *userptr =
 99			u64_to_user_ptr(args->bos + (i * sizeof(submit_bo)));
100
101		/* make sure we don't have garbage flags, in case we hit
102		 * error path before flags is initialized:
103		 */
104		submit->bos[i].flags = 0;
105
106		if (copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo))) {
107			pagefault_enable();
108			spin_unlock(&file->table_lock);
109			if (copy_from_user(&submit_bo, userptr, sizeof(submit_bo))) {
110				ret = -EFAULT;
111				goto out;
112			}
113			spin_lock(&file->table_lock);
114			pagefault_disable();
115		}
116
117		if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) ||
118			!(submit_bo.flags & MSM_SUBMIT_BO_FLAGS)) {
119			DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
120			ret = -EINVAL;
121			goto out_unlock;
122		}
123
124		submit->bos[i].flags = submit_bo.flags;
125		/* in validate_objects() we figure out if this is true: */
126		submit->bos[i].iova  = submit_bo.presumed;
127
128		/* normally use drm_gem_object_lookup(), but for bulk lookup
129		 * all under single table_lock just hit object_idr directly:
130		 */
131		obj = idr_find(&file->object_idr, submit_bo.handle);
132		if (!obj) {
133			DRM_ERROR("invalid handle %u at index %u\n", submit_bo.handle, i);
134			ret = -EINVAL;
135			goto out_unlock;
136		}
137
138		msm_obj = to_msm_bo(obj);
139
140		if (!list_empty(&msm_obj->submit_entry)) {
141			DRM_ERROR("handle %u at index %u already on submit list\n",
142					submit_bo.handle, i);
143			ret = -EINVAL;
144			goto out_unlock;
145		}
146
147		drm_gem_object_reference(obj);
148
149		submit->bos[i].obj = msm_obj;
150
151		list_add_tail(&msm_obj->submit_entry, &submit->bo_list);
152	}
153
154out_unlock:
155	pagefault_enable();
156	spin_unlock(&file->table_lock);
157
158out:
159	submit->nr_bos = i;
160
161	return ret;
162}
163
164static void submit_unlock_unpin_bo(struct msm_gem_submit *submit,
165		int i, bool backoff)
166{
167	struct msm_gem_object *msm_obj = submit->bos[i].obj;
168
169	if (submit->bos[i].flags & BO_PINNED)
170		msm_gem_put_iova(&msm_obj->base, submit->gpu->aspace);
171
172	if (submit->bos[i].flags & BO_LOCKED)
173		ww_mutex_unlock(&msm_obj->resv->lock);
174
175	if (backoff && !(submit->bos[i].flags & BO_VALID))
176		submit->bos[i].iova = 0;
177
178	submit->bos[i].flags &= ~(BO_LOCKED | BO_PINNED);
179}
180
181/* This is where we make sure all the bo's are reserved and pin'd: */
182static int submit_lock_objects(struct msm_gem_submit *submit)
183{
184	int contended, slow_locked = -1, i, ret = 0;
185
186retry:
 
 
187	for (i = 0; i < submit->nr_bos; i++) {
188		struct msm_gem_object *msm_obj = submit->bos[i].obj;
 
189
190		if (slow_locked == i)
191			slow_locked = -1;
192
193		contended = i;
194
195		if (!(submit->bos[i].flags & BO_LOCKED)) {
196			ret = ww_mutex_lock_interruptible(&msm_obj->resv->lock,
197					&submit->ticket);
198			if (ret)
199				goto fail;
200			submit->bos[i].flags |= BO_LOCKED;
201		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
202	}
203
204	ww_acquire_done(&submit->ticket);
205
206	return 0;
207
208fail:
209	for (; i >= 0; i--)
210		submit_unlock_unpin_bo(submit, i, true);
211
212	if (slow_locked > 0)
213		submit_unlock_unpin_bo(submit, slow_locked, true);
214
215	if (ret == -EDEADLK) {
216		struct msm_gem_object *msm_obj = submit->bos[contended].obj;
217		/* we lost out in a seqno race, lock and retry.. */
218		ret = ww_mutex_lock_slow_interruptible(&msm_obj->resv->lock,
219				&submit->ticket);
220		if (!ret) {
221			submit->bos[contended].flags |= BO_LOCKED;
222			slow_locked = contended;
223			goto retry;
224		}
225	}
226
227	return ret;
228}
229
230static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
231{
232	int i, ret = 0;
233
234	for (i = 0; i < submit->nr_bos; i++) {
235		struct msm_gem_object *msm_obj = submit->bos[i].obj;
236		bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE;
237
238		if (!write) {
239			/* NOTE: _reserve_shared() must happen before
240			 * _add_shared_fence(), which makes this a slightly
241			 * strange place to call it.  OTOH this is a
242			 * convenient can-fail point to hook it in.
243			 */
244			ret = reservation_object_reserve_shared(msm_obj->resv);
245			if (ret)
246				return ret;
247		}
248
249		if (no_implicit)
250			continue;
251
252		ret = msm_gem_sync_object(&msm_obj->base, submit->ring->fctx,
253			write);
254		if (ret)
255			break;
256	}
257
258	return ret;
259}
260
261static int submit_pin_objects(struct msm_gem_submit *submit)
262{
263	int i, ret = 0;
264
265	submit->valid = true;
266
267	for (i = 0; i < submit->nr_bos; i++) {
268		struct msm_gem_object *msm_obj = submit->bos[i].obj;
269		uint64_t iova;
270
271		/* if locking succeeded, pin bo: */
272		ret = msm_gem_get_iova(&msm_obj->base,
273				submit->gpu->aspace, &iova);
274
275		if (ret)
276			break;
277
278		submit->bos[i].flags |= BO_PINNED;
279
280		if (iova == submit->bos[i].iova) {
281			submit->bos[i].flags |= BO_VALID;
282		} else {
283			submit->bos[i].iova = iova;
284			/* iova changed, so address in cmdstream is not valid: */
285			submit->bos[i].flags &= ~BO_VALID;
286			submit->valid = false;
287		}
288	}
289
290	return ret;
291}
292
293static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
294		struct msm_gem_object **obj, uint64_t *iova, bool *valid)
295{
296	if (idx >= submit->nr_bos) {
297		DRM_ERROR("invalid buffer index: %u (out of %u)\n",
298				idx, submit->nr_bos);
299		return -EINVAL;
300	}
301
302	if (obj)
303		*obj = submit->bos[idx].obj;
304	if (iova)
305		*iova = submit->bos[idx].iova;
306	if (valid)
307		*valid = !!(submit->bos[idx].flags & BO_VALID);
308
309	return 0;
310}
311
312/* process the reloc's and patch up the cmdstream as needed: */
313static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *obj,
314		uint32_t offset, uint32_t nr_relocs, uint64_t relocs)
315{
316	uint32_t i, last_offset = 0;
317	uint32_t *ptr;
318	int ret = 0;
319
320	if (offset % 4) {
321		DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
322		return -EINVAL;
323	}
324
325	/* For now, just map the entire thing.  Eventually we probably
326	 * to do it page-by-page, w/ kmap() if not vmap()d..
327	 */
328	ptr = msm_gem_get_vaddr(&obj->base);
329
330	if (IS_ERR(ptr)) {
331		ret = PTR_ERR(ptr);
332		DBG("failed to map: %d", ret);
333		return ret;
334	}
335
336	for (i = 0; i < nr_relocs; i++) {
337		struct drm_msm_gem_submit_reloc submit_reloc;
338		void __user *userptr =
339			u64_to_user_ptr(relocs + (i * sizeof(submit_reloc)));
340		uint32_t off;
341		uint64_t iova;
342		bool valid;
343
344		if (copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc))) {
345			ret = -EFAULT;
346			goto out;
347		}
348
349		if (submit_reloc.submit_offset % 4) {
350			DRM_ERROR("non-aligned reloc offset: %u\n",
351					submit_reloc.submit_offset);
352			ret = -EINVAL;
353			goto out;
354		}
355
356		/* offset in dwords: */
357		off = submit_reloc.submit_offset / 4;
358
359		if ((off >= (obj->base.size / 4)) ||
360				(off < last_offset)) {
361			DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
362			ret = -EINVAL;
363			goto out;
364		}
365
366		ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid);
367		if (ret)
368			goto out;
369
370		if (valid)
371			continue;
372
373		iova += submit_reloc.reloc_offset;
374
375		if (submit_reloc.shift < 0)
376			iova >>= -submit_reloc.shift;
377		else
378			iova <<= submit_reloc.shift;
379
380		ptr[off] = iova | submit_reloc.or;
381
382		last_offset = off;
383	}
384
385out:
386	msm_gem_put_vaddr(&obj->base);
387
388	return ret;
389}
390
391static void submit_cleanup(struct msm_gem_submit *submit)
392{
393	unsigned i;
394
395	for (i = 0; i < submit->nr_bos; i++) {
396		struct msm_gem_object *msm_obj = submit->bos[i].obj;
397		submit_unlock_unpin_bo(submit, i, false);
398		list_del_init(&msm_obj->submit_entry);
399		drm_gem_object_unreference(&msm_obj->base);
400	}
401
402	ww_acquire_fini(&submit->ticket);
403}
404
405int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
406		struct drm_file *file)
407{
408	struct msm_drm_private *priv = dev->dev_private;
409	struct drm_msm_gem_submit *args = data;
410	struct msm_file_private *ctx = file->driver_priv;
411	struct msm_gem_submit *submit;
412	struct msm_gpu *gpu = priv->gpu;
413	struct dma_fence *in_fence = NULL;
414	struct sync_file *sync_file = NULL;
415	struct msm_gpu_submitqueue *queue;
416	struct msm_ringbuffer *ring;
417	int out_fence_fd = -1;
418	unsigned i;
419	int ret;
420
421	if (!gpu)
422		return -ENXIO;
423
424	/* for now, we just have 3d pipe.. eventually this would need to
425	 * be more clever to dispatch to appropriate gpu module:
426	 */
427	if (MSM_PIPE_ID(args->flags) != MSM_PIPE_3D0)
428		return -EINVAL;
429
430	if (MSM_PIPE_FLAGS(args->flags) & ~MSM_SUBMIT_FLAGS)
431		return -EINVAL;
432
433	if (args->flags & MSM_SUBMIT_SUDO) {
434		if (!IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) ||
435		    !capable(CAP_SYS_RAWIO))
436			return -EINVAL;
437	}
438
439	queue = msm_submitqueue_get(ctx, args->queueid);
440	if (!queue)
441		return -ENOENT;
442
443	ring = gpu->rb[queue->prio];
444
445	if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
446		in_fence = sync_file_get_fence(args->fence_fd);
447
448		if (!in_fence)
449			return -EINVAL;
450
451		/*
452		 * Wait if the fence is from a foreign context, or if the fence
453		 * array contains any fence from a foreign context.
454		 */
455		if (!dma_fence_match_context(in_fence, ring->fctx->context)) {
456			ret = dma_fence_wait(in_fence, true);
457			if (ret)
458				return ret;
459		}
460	}
461
462	ret = mutex_lock_interruptible(&dev->struct_mutex);
463	if (ret)
464		return ret;
465
466	if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
467		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
468		if (out_fence_fd < 0) {
469			ret = out_fence_fd;
470			goto out_unlock;
471		}
472	}
473
474	submit = submit_create(dev, gpu, queue, args->nr_bos, args->nr_cmds);
475	if (!submit) {
476		ret = -ENOMEM;
477		goto out_unlock;
478	}
479
480	if (args->flags & MSM_SUBMIT_SUDO)
481		submit->in_rb = true;
482
483	ret = submit_lookup_objects(submit, args, file);
484	if (ret)
485		goto out;
486
487	ret = submit_lock_objects(submit);
488	if (ret)
489		goto out;
490
491	ret = submit_fence_sync(submit, !!(args->flags & MSM_SUBMIT_NO_IMPLICIT));
492	if (ret)
493		goto out;
494
495	ret = submit_pin_objects(submit);
496	if (ret)
497		goto out;
498
499	for (i = 0; i < args->nr_cmds; i++) {
500		struct drm_msm_gem_submit_cmd submit_cmd;
501		void __user *userptr =
502			u64_to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
503		struct msm_gem_object *msm_obj;
504		uint64_t iova;
505
506		ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd));
507		if (ret) {
508			ret = -EFAULT;
509			goto out;
510		}
511
512		/* validate input from userspace: */
513		switch (submit_cmd.type) {
514		case MSM_SUBMIT_CMD_BUF:
515		case MSM_SUBMIT_CMD_IB_TARGET_BUF:
516		case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
517			break;
518		default:
519			DRM_ERROR("invalid type: %08x\n", submit_cmd.type);
520			ret = -EINVAL;
521			goto out;
522		}
523
524		ret = submit_bo(submit, submit_cmd.submit_idx,
525				&msm_obj, &iova, NULL);
526		if (ret)
527			goto out;
528
529		if (submit_cmd.size % 4) {
530			DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
531					submit_cmd.size);
532			ret = -EINVAL;
533			goto out;
534		}
535
536		if (!submit_cmd.size ||
537			((submit_cmd.size + submit_cmd.submit_offset) >
538				msm_obj->base.size)) {
539			DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size);
540			ret = -EINVAL;
541			goto out;
542		}
543
544		submit->cmd[i].type = submit_cmd.type;
545		submit->cmd[i].size = submit_cmd.size / 4;
546		submit->cmd[i].iova = iova + submit_cmd.submit_offset;
547		submit->cmd[i].idx  = submit_cmd.submit_idx;
548
549		if (submit->valid)
550			continue;
551
552		ret = submit_reloc(submit, msm_obj, submit_cmd.submit_offset,
553				submit_cmd.nr_relocs, submit_cmd.relocs);
554		if (ret)
555			goto out;
556	}
557
558	submit->nr_cmds = i;
559
560	submit->fence = msm_fence_alloc(ring->fctx);
561	if (IS_ERR(submit->fence)) {
562		ret = PTR_ERR(submit->fence);
563		submit->fence = NULL;
564		goto out;
565	}
566
567	if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
568		sync_file = sync_file_create(submit->fence);
569		if (!sync_file) {
570			ret = -ENOMEM;
571			goto out;
572		}
573	}
574
575	msm_gpu_submit(gpu, submit, ctx);
576
577	args->fence = submit->fence->seqno;
578
579	if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
580		fd_install(out_fence_fd, sync_file->file);
581		args->fence_fd = out_fence_fd;
582	}
583
584out:
585	if (in_fence)
586		dma_fence_put(in_fence);
587	submit_cleanup(submit);
588	if (ret)
589		msm_gem_submit_free(submit);
590out_unlock:
591	if (ret && (out_fence_fd >= 0))
592		put_unused_fd(out_fence_fd);
593	mutex_unlock(&dev->struct_mutex);
594	return ret;
595}