Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 * Copyright (C) 2013 Red Hat
  3 * Author: Rob Clark <robdclark@gmail.com>
  4 *
  5 * This program is free software; you can redistribute it and/or modify it
  6 * under the terms of the GNU General Public License version 2 as published by
  7 * the Free Software Foundation.
  8 *
  9 * This program is distributed in the hope that it will be useful, but WITHOUT
 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 12 * more details.
 13 *
 14 * You should have received a copy of the GNU General Public License along with
 15 * this program.  If not, see <http://www.gnu.org/licenses/>.
 16 */
 17
 
 
 
 
 
 
 
 
 18#include "msm_drv.h"
 19#include "msm_gpu.h"
 20#include "msm_gem.h"
 
 
 
 
 
 
 
 21
 22/*
 23 * Cmdstream submission:
 24 */
 25
 26/* make sure these don't conflict w/ MSM_SUBMIT_BO_x */
 27#define BO_VALID    0x8000
 28#define BO_LOCKED   0x4000
 29#define BO_PINNED   0x2000
 30
 31static inline void __user *to_user_ptr(u64 address)
 32{
 33	return (void __user *)(uintptr_t)address;
 34}
 35
 36static struct msm_gem_submit *submit_create(struct drm_device *dev,
 37		struct msm_gpu *gpu, int nr)
 
 
 38{
 
 39	struct msm_gem_submit *submit;
 40	int sz = sizeof(*submit) + (nr * sizeof(submit->bos[0]));
 
 
 
 
 
 
 
 
 
 
 
 41
 42	submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
 43	if (submit) {
 44		submit->dev = dev;
 45		submit->gpu = gpu;
 46
 47		/* initially, until copy_from_user() and bo lookup succeeds: */
 48		submit->nr_bos = 0;
 49		submit->nr_cmds = 0;
 50
 51		INIT_LIST_HEAD(&submit->bo_list);
 52		ww_acquire_init(&submit->ticket, &reservation_ww_class);
 
 
 
 53	}
 54
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 55	return submit;
 56}
 57
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 58static int submit_lookup_objects(struct msm_gem_submit *submit,
 59		struct drm_msm_gem_submit *args, struct drm_file *file)
 60{
 61	unsigned i;
 62	int ret = 0;
 63
 64	spin_lock(&file->table_lock);
 65
 66	for (i = 0; i < args->nr_bos; i++) {
 67		struct drm_msm_gem_submit_bo submit_bo;
 68		struct drm_gem_object *obj;
 69		struct msm_gem_object *msm_obj;
 70		void __user *userptr =
 71			to_user_ptr(args->bos + (i * sizeof(submit_bo)));
 72
 73		ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
 74		if (ret) {
 
 
 
 
 75			ret = -EFAULT;
 76			goto out_unlock;
 
 77		}
 78
 79		if (submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) {
 80			DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
 
 
 
 
 81			ret = -EINVAL;
 82			goto out_unlock;
 
 83		}
 84
 
 85		submit->bos[i].flags = submit_bo.flags;
 86		/* in validate_objects() we figure out if this is true: */
 87		submit->bos[i].iova  = submit_bo.presumed;
 
 
 
 
 88
 89		/* normally use drm_gem_object_lookup(), but for bulk lookup
 90		 * all under single table_lock just hit object_idr directly:
 91		 */
 92		obj = idr_find(&file->object_idr, submit_bo.handle);
 93		if (!obj) {
 94			DRM_ERROR("invalid handle %u at index %u\n", submit_bo.handle, i);
 95			ret = -EINVAL;
 96			goto out_unlock;
 97		}
 98
 99		msm_obj = to_msm_bo(obj);
100
101		if (!list_empty(&msm_obj->submit_entry)) {
102			DRM_ERROR("handle %u at index %u already on submit list\n",
103					submit_bo.handle, i);
104			ret = -EINVAL;
105			goto out_unlock;
106		}
107
108		drm_gem_object_reference(obj);
109
110		submit->bos[i].obj = msm_obj;
111
112		list_add_tail(&msm_obj->submit_entry, &submit->bo_list);
113	}
114
115out_unlock:
116	submit->nr_bos = i;
117	spin_unlock(&file->table_lock);
118
 
 
 
119	return ret;
120}
121
122static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
 
123{
124	struct msm_gem_object *msm_obj = submit->bos[i].obj;
 
 
125
126	if (submit->bos[i].flags & BO_PINNED)
127		msm_gem_put_iova(&msm_obj->base, submit->gpu->id);
 
 
128
129	if (submit->bos[i].flags & BO_LOCKED)
130		ww_mutex_unlock(&msm_obj->resv->lock);
 
 
 
131
132	if (!(submit->bos[i].flags & BO_VALID))
133		submit->bos[i].iova = 0;
 
 
 
 
 
 
 
 
134
135	submit->bos[i].flags &= ~(BO_LOCKED | BO_PINNED);
136}
 
 
 
 
137
138/* This is where we make sure all the bo's are reserved and pin'd: */
139static int submit_validate_objects(struct msm_gem_submit *submit)
140{
141	int contended, slow_locked = -1, i, ret = 0;
 
142
143retry:
144	submit->valid = true;
145
146	for (i = 0; i < submit->nr_bos; i++) {
147		struct msm_gem_object *msm_obj = submit->bos[i].obj;
148		uint32_t iova;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
149
150		if (slow_locked == i)
151			slow_locked = -1;
 
 
 
 
 
 
152
153		contended = i;
154
155		if (!(submit->bos[i].flags & BO_LOCKED)) {
156			ret = ww_mutex_lock_interruptible(&msm_obj->resv->lock,
157					&submit->ticket);
 
 
158			if (ret)
159				goto fail;
160			submit->bos[i].flags |= BO_LOCKED;
161		}
 
162
 
163
164		/* if locking succeeded, pin bo: */
165		ret = msm_gem_get_iova_locked(&msm_obj->base,
166				submit->gpu->id, &iova);
 
 
 
 
 
 
 
 
167
168		/* this would break the logic in the fail path.. there is no
169		 * reason for this to happen, but just to be on the safe side
170		 * let's notice if this starts happening in the future:
171		 */
172		WARN_ON(ret == -EDEADLK);
 
173
 
 
 
174		if (ret)
175			goto fail;
 
176
177		submit->bos[i].flags |= BO_PINNED;
 
178
179		if (iova == submit->bos[i].iova) {
180			submit->bos[i].flags |= BO_VALID;
181		} else {
182			submit->bos[i].iova = iova;
183			submit->bos[i].flags &= ~BO_VALID;
184			submit->valid = false;
 
 
 
 
 
 
 
 
185		}
186	}
187
188	ww_acquire_done(&submit->ticket);
 
 
189
190	return 0;
 
191
192fail:
193	for (; i >= 0; i--)
194		submit_unlock_unpin_bo(submit, i);
195
196	if (slow_locked > 0)
197		submit_unlock_unpin_bo(submit, slow_locked);
198
199	if (ret == -EDEADLK) {
200		struct msm_gem_object *msm_obj = submit->bos[contended].obj;
201		/* we lost out in a seqno race, lock and retry.. */
202		ret = ww_mutex_lock_slow_interruptible(&msm_obj->resv->lock,
203				&submit->ticket);
204		if (!ret) {
205			submit->bos[contended].flags |= BO_LOCKED;
206			slow_locked = contended;
207			goto retry;
208		}
209	}
 
 
 
210
211	return ret;
212}
213
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
214static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
215		struct msm_gem_object **obj, uint32_t *iova, bool *valid)
216{
217	if (idx >= submit->nr_bos) {
218		DRM_ERROR("invalid buffer index: %u (out of %u)\n",
219				idx, submit->nr_bos);
220		return -EINVAL;
221	}
222
223	if (obj)
224		*obj = submit->bos[idx].obj;
225	if (iova)
226		*iova = submit->bos[idx].iova;
227	if (valid)
228		*valid = !!(submit->bos[idx].flags & BO_VALID);
229
230	return 0;
231}
232
233/* process the reloc's and patch up the cmdstream as needed: */
234static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *obj,
235		uint32_t offset, uint32_t nr_relocs, uint64_t relocs)
236{
237	uint32_t i, last_offset = 0;
238	uint32_t *ptr;
239	int ret;
240
241	if (offset % 4) {
242		DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
243		return -EINVAL;
244	}
245
246	/* For now, just map the entire thing.  Eventually we probably
247	 * to do it page-by-page, w/ kmap() if not vmap()d..
248	 */
249	ptr = msm_gem_vaddr_locked(&obj->base);
250
251	if (IS_ERR(ptr)) {
252		ret = PTR_ERR(ptr);
253		DBG("failed to map: %d", ret);
254		return ret;
255	}
256
257	for (i = 0; i < nr_relocs; i++) {
258		struct drm_msm_gem_submit_reloc submit_reloc;
259		void __user *userptr =
260			to_user_ptr(relocs + (i * sizeof(submit_reloc)));
261		uint32_t iova, off;
262		bool valid;
263
264		ret = copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc));
265		if (ret)
266			return -EFAULT;
267
268		if (submit_reloc.submit_offset % 4) {
269			DRM_ERROR("non-aligned reloc offset: %u\n",
270					submit_reloc.submit_offset);
271			return -EINVAL;
 
272		}
273
274		/* offset in dwords: */
275		off = submit_reloc.submit_offset / 4;
276
277		if ((off >= (obj->base.size / 4)) ||
278				(off < last_offset)) {
279			DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
280			return -EINVAL;
 
281		}
282
283		ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid);
284		if (ret)
285			return ret;
286
287		if (valid)
288			continue;
289
290		iova += submit_reloc.reloc_offset;
291
292		if (submit_reloc.shift < 0)
293			iova >>= -submit_reloc.shift;
294		else
295			iova <<= submit_reloc.shift;
296
297		ptr[off] = iova | submit_reloc.or;
298
299		last_offset = off;
300	}
301
302	return 0;
 
 
 
303}
304
305static void submit_cleanup(struct msm_gem_submit *submit, bool fail)
 
 
 
 
306{
307	unsigned i;
 
 
 
 
 
 
 
 
 
 
 
 
308
309	for (i = 0; i < submit->nr_bos; i++) {
310		struct msm_gem_object *msm_obj = submit->bos[i].obj;
311		submit_unlock_unpin_bo(submit, i);
312		list_del_init(&msm_obj->submit_entry);
313		drm_gem_object_unreference(&msm_obj->base);
314	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
315
316	ww_acquire_fini(&submit->ticket);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
317}
318
319int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
320		struct drm_file *file)
321{
322	struct msm_drm_private *priv = dev->dev_private;
323	struct drm_msm_gem_submit *args = data;
324	struct msm_file_private *ctx = file->driver_priv;
325	struct msm_gem_submit *submit;
326	struct msm_gpu *gpu = priv->gpu;
 
 
 
 
 
327	unsigned i;
328	int ret;
329
330	if (!gpu)
331		return -ENXIO;
332
 
 
 
 
 
 
 
 
333	/* for now, we just have 3d pipe.. eventually this would need to
334	 * be more clever to dispatch to appropriate gpu module:
335	 */
336	if (args->pipe != MSM_PIPE_3D0)
337		return -EINVAL;
338
339	if (args->nr_cmds > MAX_CMDS)
340		return -EINVAL;
341
342	submit = submit_create(dev, gpu, args->nr_bos);
343	if (!submit)
344		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
345
346	mutex_lock(&dev->struct_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
347
348	ret = submit_lookup_objects(submit, args, file);
349	if (ret)
350		goto out;
351
352	ret = submit_validate_objects(submit);
353	if (ret)
354		goto out;
355
356	for (i = 0; i < args->nr_cmds; i++) {
357		struct drm_msm_gem_submit_cmd submit_cmd;
358		void __user *userptr =
359			to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
360		struct msm_gem_object *msm_obj;
361		uint32_t iova;
362
363		ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd));
364		if (ret) {
365			ret = -EFAULT;
366			goto out;
367		}
368
369		/* validate input from userspace: */
370		switch (submit_cmd.type) {
371		case MSM_SUBMIT_CMD_BUF:
372		case MSM_SUBMIT_CMD_IB_TARGET_BUF:
373		case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
374			break;
375		default:
376			DRM_ERROR("invalid type: %08x\n", submit_cmd.type);
377			ret = -EINVAL;
378			goto out;
379		}
380
381		ret = submit_bo(submit, submit_cmd.submit_idx,
382				&msm_obj, &iova, NULL);
383		if (ret)
384			goto out;
385
386		if (submit_cmd.size % 4) {
387			DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
388					submit_cmd.size);
 
389			ret = -EINVAL;
390			goto out;
391		}
392
393		if ((submit_cmd.size + submit_cmd.submit_offset) >=
394				msm_obj->base.size) {
395			DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size);
 
 
 
 
396			ret = -EINVAL;
397			goto out;
398		}
399
400		submit->cmd[i].type = submit_cmd.type;
401		submit->cmd[i].size = submit_cmd.size / 4;
402		submit->cmd[i].iova = iova + submit_cmd.submit_offset;
403		submit->cmd[i].idx  = submit_cmd.submit_idx;
404
405		if (submit->valid)
406			continue;
407
408		ret = submit_reloc(submit, msm_obj, submit_cmd.submit_offset,
409				submit_cmd.nr_relocs, submit_cmd.relocs);
410		if (ret)
411			goto out;
412	}
413
414	submit->nr_cmds = i;
415
416	ret = msm_gpu_submit(gpu, submit, ctx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
417
418	args->fence = submit->fence;
419
420out:
421	submit_cleanup(submit, !!ret);
422	mutex_unlock(&dev->struct_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
423	return ret;
424}
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2013 Red Hat
  4 * Author: Rob Clark <robdclark@gmail.com>
 
 
 
 
 
 
 
 
 
 
 
 
  5 */
  6
  7#include <linux/file.h>
  8#include <linux/sync_file.h>
  9#include <linux/uaccess.h>
 10
 11#include <drm/drm_drv.h>
 12#include <drm/drm_file.h>
 13#include <drm/drm_syncobj.h>
 14
 15#include "msm_drv.h"
 16#include "msm_gpu.h"
 17#include "msm_gem.h"
 18#include "msm_gpu_trace.h"
 19
 20/* For userspace errors, use DRM_UT_DRIVER.. so that userspace can enable
 21 * error msgs for debugging, but we don't spam dmesg by default
 22 */
 23#define SUBMIT_ERROR(submit, fmt, ...) \
 24	DRM_DEV_DEBUG_DRIVER((submit)->dev->dev, fmt, ##__VA_ARGS__)
 25
 26/*
 27 * Cmdstream submission:
 28 */
 29
 
 
 
 
 
 
 
 
 
 
 30static struct msm_gem_submit *submit_create(struct drm_device *dev,
 31		struct msm_gpu *gpu,
 32		struct msm_gpu_submitqueue *queue, uint32_t nr_bos,
 33		uint32_t nr_cmds)
 34{
 35	static atomic_t ident = ATOMIC_INIT(0);
 36	struct msm_gem_submit *submit;
 37	uint64_t sz;
 38	int ret;
 39
 40	sz = struct_size(submit, bos, nr_bos) +
 41			((u64)nr_cmds * sizeof(submit->cmd[0]));
 42
 43	if (sz > SIZE_MAX)
 44		return ERR_PTR(-ENOMEM);
 45
 46	submit = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN);
 47	if (!submit)
 48		return ERR_PTR(-ENOMEM);
 49
 50	submit->hw_fence = msm_fence_alloc();
 51	if (IS_ERR(submit->hw_fence)) {
 52		ret = PTR_ERR(submit->hw_fence);
 53		kfree(submit);
 54		return ERR_PTR(ret);
 55	}
 
 
 56
 57	ret = drm_sched_job_init(&submit->base, queue->entity, 1, queue);
 58	if (ret) {
 59		kfree(submit->hw_fence);
 60		kfree(submit);
 61		return ERR_PTR(ret);
 62	}
 63
 64	kref_init(&submit->ref);
 65	submit->dev = dev;
 66	submit->aspace = queue->ctx->aspace;
 67	submit->gpu = gpu;
 68	submit->cmd = (void *)&submit->bos[nr_bos];
 69	submit->queue = queue;
 70	submit->pid = get_pid(task_pid(current));
 71	submit->ring = gpu->rb[queue->ring_nr];
 72	submit->fault_dumped = false;
 73
 74	/* Get a unique identifier for the submission for logging purposes */
 75	submit->ident = atomic_inc_return(&ident) - 1;
 76
 77	INIT_LIST_HEAD(&submit->node);
 78
 79	return submit;
 80}
 81
 82void __msm_gem_submit_destroy(struct kref *kref)
 83{
 84	struct msm_gem_submit *submit =
 85			container_of(kref, struct msm_gem_submit, ref);
 86	unsigned i;
 87
 88	if (submit->fence_id) {
 89		spin_lock(&submit->queue->idr_lock);
 90		idr_remove(&submit->queue->fence_idr, submit->fence_id);
 91		spin_unlock(&submit->queue->idr_lock);
 92	}
 93
 94	dma_fence_put(submit->user_fence);
 95
 96	/*
 97	 * If the submit is freed before msm_job_run(), then hw_fence is
 98	 * just some pre-allocated memory, not a reference counted fence.
 99	 * Once the job runs and the hw_fence is initialized, it will
100	 * have a refcount of at least one, since the submit holds a ref
101	 * to the hw_fence.
102	 */
103	if (kref_read(&submit->hw_fence->refcount) == 0) {
104		kfree(submit->hw_fence);
105	} else {
106		dma_fence_put(submit->hw_fence);
107	}
108
109	put_pid(submit->pid);
110	msm_submitqueue_put(submit->queue);
111
112	for (i = 0; i < submit->nr_cmds; i++)
113		kfree(submit->cmd[i].relocs);
114
115	kfree(submit);
116}
117
118static int submit_lookup_objects(struct msm_gem_submit *submit,
119		struct drm_msm_gem_submit *args, struct drm_file *file)
120{
121	unsigned i;
122	int ret = 0;
123
 
 
124	for (i = 0; i < args->nr_bos; i++) {
125		struct drm_msm_gem_submit_bo submit_bo;
 
 
126		void __user *userptr =
127			u64_to_user_ptr(args->bos + (i * sizeof(submit_bo)));
128
129		/* make sure we don't have garbage flags, in case we hit
130		 * error path before flags is initialized:
131		 */
132		submit->bos[i].flags = 0;
133
134		if (copy_from_user(&submit_bo, userptr, sizeof(submit_bo))) {
135			ret = -EFAULT;
136			i = 0;
137			goto out;
138		}
139
140/* at least one of READ and/or WRITE flags should be set: */
141#define MANDATORY_FLAGS (MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE)
142
143		if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) ||
144			!(submit_bo.flags & MANDATORY_FLAGS)) {
145			SUBMIT_ERROR(submit, "invalid flags: %x\n", submit_bo.flags);
146			ret = -EINVAL;
147			i = 0;
148			goto out;
149		}
150
151		submit->bos[i].handle = submit_bo.handle;
152		submit->bos[i].flags = submit_bo.flags;
153	}
154
155	spin_lock(&file->table_lock);
156
157	for (i = 0; i < args->nr_bos; i++) {
158		struct drm_gem_object *obj;
159
160		/* normally use drm_gem_object_lookup(), but for bulk lookup
161		 * all under single table_lock just hit object_idr directly:
162		 */
163		obj = idr_find(&file->object_idr, submit->bos[i].handle);
164		if (!obj) {
165			SUBMIT_ERROR(submit, "invalid handle %u at index %u\n", submit->bos[i].handle, i);
166			ret = -EINVAL;
167			goto out_unlock;
168		}
169
170		drm_gem_object_get(obj);
171
172		submit->bos[i].obj = obj;
 
 
 
 
 
 
 
 
 
 
 
173	}
174
175out_unlock:
 
176	spin_unlock(&file->table_lock);
177
178out:
179	submit->nr_bos = i;
180
181	return ret;
182}
183
184static int submit_lookup_cmds(struct msm_gem_submit *submit,
185		struct drm_msm_gem_submit *args, struct drm_file *file)
186{
187	unsigned i;
188	size_t sz;
189	int ret = 0;
190
191	for (i = 0; i < args->nr_cmds; i++) {
192		struct drm_msm_gem_submit_cmd submit_cmd;
193		void __user *userptr =
194			u64_to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
195
196		ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd));
197		if (ret) {
198			ret = -EFAULT;
199			goto out;
200		}
201
202		/* validate input from userspace: */
203		switch (submit_cmd.type) {
204		case MSM_SUBMIT_CMD_BUF:
205		case MSM_SUBMIT_CMD_IB_TARGET_BUF:
206		case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
207			break;
208		default:
209			SUBMIT_ERROR(submit, "invalid type: %08x\n", submit_cmd.type);
210			return -EINVAL;
211		}
212
213		if (submit_cmd.size % 4) {
214			SUBMIT_ERROR(submit, "non-aligned cmdstream buffer size: %u\n",
215				     submit_cmd.size);
216			ret = -EINVAL;
217			goto out;
218		}
219
220		submit->cmd[i].type = submit_cmd.type;
221		submit->cmd[i].size = submit_cmd.size / 4;
222		submit->cmd[i].offset = submit_cmd.submit_offset / 4;
223		submit->cmd[i].idx  = submit_cmd.submit_idx;
224		submit->cmd[i].nr_relocs = submit_cmd.nr_relocs;
225
226		userptr = u64_to_user_ptr(submit_cmd.relocs);
 
227
228		sz = array_size(submit_cmd.nr_relocs,
229				sizeof(struct drm_msm_gem_submit_reloc));
230		/* check for overflow: */
231		if (sz == SIZE_MAX) {
232			ret = -ENOMEM;
233			goto out;
234		}
235		submit->cmd[i].relocs = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN);
236		if (!submit->cmd[i].relocs) {
237			ret = -ENOMEM;
238			goto out;
239		}
240		ret = copy_from_user(submit->cmd[i].relocs, userptr, sz);
241		if (ret) {
242			ret = -EFAULT;
243			goto out;
244		}
245	}
246
247out:
248	return ret;
249}
250
251/* This is where we make sure all the bo's are reserved and pin'd: */
252static int submit_lock_objects(struct msm_gem_submit *submit)
253{
254	int ret;
255
256	drm_exec_init(&submit->exec, DRM_EXEC_INTERRUPTIBLE_WAIT, submit->nr_bos);
257
258	drm_exec_until_all_locked (&submit->exec) {
259		for (unsigned i = 0; i < submit->nr_bos; i++) {
260			struct drm_gem_object *obj = submit->bos[i].obj;
261			ret = drm_exec_prepare_obj(&submit->exec, obj, 1);
262			drm_exec_retry_on_contention(&submit->exec);
263			if (ret)
264				goto error;
 
265		}
266	}
267
268	return 0;
269
270error:
271	return ret;
272}
273
274static int submit_fence_sync(struct msm_gem_submit *submit)
275{
276	int i, ret = 0;
277
278	for (i = 0; i < submit->nr_bos; i++) {
279		struct drm_gem_object *obj = submit->bos[i].obj;
280		bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE;
281
282		/* Otherwise userspace can ask for implicit sync to be
283		 * disabled on specific buffers.  This is useful for internal
284		 * usermode driver managed buffers, suballocation, etc.
285		 */
286		if (submit->bos[i].flags & MSM_SUBMIT_BO_NO_IMPLICIT)
287			continue;
288
289		ret = drm_sched_job_add_implicit_dependencies(&submit->base,
290							      obj,
291							      write);
292		if (ret)
293			break;
294	}
295
296	return ret;
297}
298
299static int submit_pin_objects(struct msm_gem_submit *submit)
300{
301	struct msm_drm_private *priv = submit->dev->dev_private;
302	int i, ret = 0;
303
304	for (i = 0; i < submit->nr_bos; i++) {
305		struct drm_gem_object *obj = submit->bos[i].obj;
306		struct msm_gem_vma *vma;
307
308		/* if locking succeeded, pin bo: */
309		vma = msm_gem_get_vma_locked(obj, submit->aspace);
310		if (IS_ERR(vma)) {
311			ret = PTR_ERR(vma);
312			break;
313		}
 
314
315		ret = msm_gem_pin_vma_locked(obj, vma);
316		if (ret)
317			break;
318
319		submit->bos[i].iova = vma->iova;
320	}
321
322	/*
323	 * A second loop while holding the LRU lock (a) avoids acquiring/dropping
324	 * the LRU lock for each individual bo, while (b) avoiding holding the
325	 * LRU lock while calling msm_gem_pin_vma_locked() (which could trigger
326	 * get_pages() which could trigger reclaim.. and if we held the LRU lock
327	 * could trigger deadlock with the shrinker).
328	 */
329	mutex_lock(&priv->lru.lock);
330	for (i = 0; i < submit->nr_bos; i++) {
331		msm_gem_pin_obj_locked(submit->bos[i].obj);
 
 
 
 
 
 
 
332	}
333	mutex_unlock(&priv->lru.lock);
334
335	submit->bos_pinned = true;
336
337	return ret;
338}
339
340static void submit_unpin_objects(struct msm_gem_submit *submit)
341{
342	if (!submit->bos_pinned)
343		return;
344
345	for (int i = 0; i < submit->nr_bos; i++) {
346		struct drm_gem_object *obj = submit->bos[i].obj;
347
348		msm_gem_unpin_locked(obj);
349	}
350
351	submit->bos_pinned = false;
352}
353
354static void submit_attach_object_fences(struct msm_gem_submit *submit)
355{
356	int i;
357
358	for (i = 0; i < submit->nr_bos; i++) {
359		struct drm_gem_object *obj = submit->bos[i].obj;
360
361		if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
362			dma_resv_add_fence(obj->resv, submit->user_fence,
363					   DMA_RESV_USAGE_WRITE);
364		else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
365			dma_resv_add_fence(obj->resv, submit->user_fence,
366					   DMA_RESV_USAGE_READ);
367	}
368}
369
370static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
371		struct drm_gem_object **obj, uint64_t *iova)
372{
373	if (idx >= submit->nr_bos) {
374		SUBMIT_ERROR(submit, "invalid buffer index: %u (out of %u)\n",
375			     idx, submit->nr_bos);
376		return -EINVAL;
377	}
378
379	if (obj)
380		*obj = submit->bos[idx].obj;
381	if (iova)
382		*iova = submit->bos[idx].iova;
 
 
383
384	return 0;
385}
386
387/* process the reloc's and patch up the cmdstream as needed: */
388static int submit_reloc(struct msm_gem_submit *submit, struct drm_gem_object *obj,
389		uint32_t offset, uint32_t nr_relocs, struct drm_msm_gem_submit_reloc *relocs)
390{
391	uint32_t i, last_offset = 0;
392	uint32_t *ptr;
393	int ret = 0;
394
395	if (offset % 4) {
396		SUBMIT_ERROR(submit, "non-aligned cmdstream buffer: %u\n", offset);
397		return -EINVAL;
398	}
399
400	/* For now, just map the entire thing.  Eventually we probably
401	 * to do it page-by-page, w/ kmap() if not vmap()d..
402	 */
403	ptr = msm_gem_get_vaddr_locked(obj);
404
405	if (IS_ERR(ptr)) {
406		ret = PTR_ERR(ptr);
407		DBG("failed to map: %d", ret);
408		return ret;
409	}
410
411	for (i = 0; i < nr_relocs; i++) {
412		struct drm_msm_gem_submit_reloc submit_reloc = relocs[i];
413		uint32_t off;
414		uint64_t iova;
 
 
 
 
 
 
415
416		if (submit_reloc.submit_offset % 4) {
417			SUBMIT_ERROR(submit, "non-aligned reloc offset: %u\n",
418				     submit_reloc.submit_offset);
419			ret = -EINVAL;
420			goto out;
421		}
422
423		/* offset in dwords: */
424		off = submit_reloc.submit_offset / 4;
425
426		if ((off >= (obj->size / 4)) ||
427				(off < last_offset)) {
428			SUBMIT_ERROR(submit, "invalid offset %u at reloc %u\n", off, i);
429			ret = -EINVAL;
430			goto out;
431		}
432
433		ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova);
434		if (ret)
435			goto out;
 
 
 
436
437		iova += submit_reloc.reloc_offset;
438
439		if (submit_reloc.shift < 0)
440			iova >>= -submit_reloc.shift;
441		else
442			iova <<= submit_reloc.shift;
443
444		ptr[off] = iova | submit_reloc.or;
445
446		last_offset = off;
447	}
448
449out:
450	msm_gem_put_vaddr_locked(obj);
451
452	return ret;
453}
454
455/* Cleanup submit at end of ioctl.  In the error case, this also drops
456 * references, unpins, and drops active refcnt.  In the non-error case,
457 * this is done when the submit is retired.
458 */
459static void submit_cleanup(struct msm_gem_submit *submit, bool error)
460{
461	if (error) {
462		submit_unpin_objects(submit);
463		/* job wasn't enqueued to scheduler, so early retirement: */
464		msm_submit_retire(submit);
465	}
466
467	if (submit->exec.objects)
468		drm_exec_fini(&submit->exec);
469}
470
471void msm_submit_retire(struct msm_gem_submit *submit)
472{
473	int i;
474
475	for (i = 0; i < submit->nr_bos; i++) {
476		struct drm_gem_object *obj = submit->bos[i].obj;
477
478		drm_gem_object_put(obj);
 
479	}
480}
481
482struct msm_submit_post_dep {
483	struct drm_syncobj *syncobj;
484	uint64_t point;
485	struct dma_fence_chain *chain;
486};
487
488static struct drm_syncobj **msm_parse_deps(struct msm_gem_submit *submit,
489                                           struct drm_file *file,
490                                           uint64_t in_syncobjs_addr,
491                                           uint32_t nr_in_syncobjs,
492                                           size_t syncobj_stride)
493{
494	struct drm_syncobj **syncobjs = NULL;
495	struct drm_msm_gem_submit_syncobj syncobj_desc = {0};
496	int ret = 0;
497	uint32_t i, j;
498
499	syncobjs = kcalloc(nr_in_syncobjs, sizeof(*syncobjs),
500	                   GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
501	if (!syncobjs)
502		return ERR_PTR(-ENOMEM);
503
504	for (i = 0; i < nr_in_syncobjs; ++i) {
505		uint64_t address = in_syncobjs_addr + i * syncobj_stride;
506
507		if (copy_from_user(&syncobj_desc,
508			           u64_to_user_ptr(address),
509			           min(syncobj_stride, sizeof(syncobj_desc)))) {
510			ret = -EFAULT;
511			break;
512		}
513
514		if (syncobj_desc.point &&
515		    !drm_core_check_feature(submit->dev, DRIVER_SYNCOBJ_TIMELINE)) {
516			ret = -EOPNOTSUPP;
517			break;
518		}
519
520		if (syncobj_desc.flags & ~MSM_SUBMIT_SYNCOBJ_FLAGS) {
521			ret = -EINVAL;
522			break;
523		}
524
525		ret = drm_sched_job_add_syncobj_dependency(&submit->base, file,
526							   syncobj_desc.handle, syncobj_desc.point);
527		if (ret)
528			break;
529
530		if (syncobj_desc.flags & MSM_SUBMIT_SYNCOBJ_RESET) {
531			syncobjs[i] =
532				drm_syncobj_find(file, syncobj_desc.handle);
533			if (!syncobjs[i]) {
534				ret = -EINVAL;
535				break;
536			}
537		}
538	}
539
540	if (ret) {
541		for (j = 0; j <= i; ++j) {
542			if (syncobjs[j])
543				drm_syncobj_put(syncobjs[j]);
544		}
545		kfree(syncobjs);
546		return ERR_PTR(ret);
547	}
548	return syncobjs;
549}
550
551static void msm_reset_syncobjs(struct drm_syncobj **syncobjs,
552                               uint32_t nr_syncobjs)
553{
554	uint32_t i;
555
556	for (i = 0; syncobjs && i < nr_syncobjs; ++i) {
557		if (syncobjs[i])
558			drm_syncobj_replace_fence(syncobjs[i], NULL);
559	}
560}
561
562static struct msm_submit_post_dep *msm_parse_post_deps(struct drm_device *dev,
563                                                       struct drm_file *file,
564                                                       uint64_t syncobjs_addr,
565                                                       uint32_t nr_syncobjs,
566                                                       size_t syncobj_stride)
567{
568	struct msm_submit_post_dep *post_deps;
569	struct drm_msm_gem_submit_syncobj syncobj_desc = {0};
570	int ret = 0;
571	uint32_t i, j;
572
573	post_deps = kcalloc(nr_syncobjs, sizeof(*post_deps),
574			    GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
575	if (!post_deps)
576		return ERR_PTR(-ENOMEM);
577
578	for (i = 0; i < nr_syncobjs; ++i) {
579		uint64_t address = syncobjs_addr + i * syncobj_stride;
580
581		if (copy_from_user(&syncobj_desc,
582			           u64_to_user_ptr(address),
583			           min(syncobj_stride, sizeof(syncobj_desc)))) {
584			ret = -EFAULT;
585			break;
586		}
587
588		post_deps[i].point = syncobj_desc.point;
589
590		if (syncobj_desc.flags) {
591			ret = -EINVAL;
592			break;
593		}
594
595		if (syncobj_desc.point) {
596			if (!drm_core_check_feature(dev,
597			                            DRIVER_SYNCOBJ_TIMELINE)) {
598				ret = -EOPNOTSUPP;
599				break;
600			}
601
602			post_deps[i].chain = dma_fence_chain_alloc();
603			if (!post_deps[i].chain) {
604				ret = -ENOMEM;
605				break;
606			}
607		}
608
609		post_deps[i].syncobj =
610			drm_syncobj_find(file, syncobj_desc.handle);
611		if (!post_deps[i].syncobj) {
612			ret = -EINVAL;
613			break;
614		}
615	}
616
617	if (ret) {
618		for (j = 0; j <= i; ++j) {
619			dma_fence_chain_free(post_deps[j].chain);
620			if (post_deps[j].syncobj)
621				drm_syncobj_put(post_deps[j].syncobj);
622		}
623
624		kfree(post_deps);
625		return ERR_PTR(ret);
626	}
627
628	return post_deps;
629}
630
631static void msm_process_post_deps(struct msm_submit_post_dep *post_deps,
632                                  uint32_t count, struct dma_fence *fence)
633{
634	uint32_t i;
635
636	for (i = 0; post_deps && i < count; ++i) {
637		if (post_deps[i].chain) {
638			drm_syncobj_add_point(post_deps[i].syncobj,
639			                      post_deps[i].chain,
640			                      fence, post_deps[i].point);
641			post_deps[i].chain = NULL;
642		} else {
643			drm_syncobj_replace_fence(post_deps[i].syncobj,
644			                          fence);
645		}
646	}
647}
648
649int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
650		struct drm_file *file)
651{
652	struct msm_drm_private *priv = dev->dev_private;
653	struct drm_msm_gem_submit *args = data;
654	struct msm_file_private *ctx = file->driver_priv;
655	struct msm_gem_submit *submit = NULL;
656	struct msm_gpu *gpu = priv->gpu;
657	struct msm_gpu_submitqueue *queue;
658	struct msm_ringbuffer *ring;
659	struct msm_submit_post_dep *post_deps = NULL;
660	struct drm_syncobj **syncobjs_to_reset = NULL;
661	int out_fence_fd = -1;
662	unsigned i;
663	int ret;
664
665	if (!gpu)
666		return -ENXIO;
667
668	if (args->pad)
669		return -EINVAL;
670
671	if (unlikely(!ctx->aspace) && !capable(CAP_SYS_RAWIO)) {
672		DRM_ERROR_RATELIMITED("IOMMU support or CAP_SYS_RAWIO required!\n");
673		return -EPERM;
674	}
675
676	/* for now, we just have 3d pipe.. eventually this would need to
677	 * be more clever to dispatch to appropriate gpu module:
678	 */
679	if (MSM_PIPE_ID(args->flags) != MSM_PIPE_3D0)
680		return -EINVAL;
681
682	if (MSM_PIPE_FLAGS(args->flags) & ~MSM_SUBMIT_FLAGS)
683		return -EINVAL;
684
685	if (args->flags & MSM_SUBMIT_SUDO) {
686		if (!IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) ||
687		    !capable(CAP_SYS_RAWIO))
688			return -EINVAL;
689	}
690
691	queue = msm_submitqueue_get(ctx, args->queueid);
692	if (!queue)
693		return -ENOENT;
694
695	ring = gpu->rb[queue->ring_nr];
696
697	if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
698		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
699		if (out_fence_fd < 0) {
700			ret = out_fence_fd;
701			goto out_post_unlock;
702		}
703	}
704
705	submit = submit_create(dev, gpu, queue, args->nr_bos, args->nr_cmds);
706	if (IS_ERR(submit)) {
707		ret = PTR_ERR(submit);
708		goto out_post_unlock;
709	}
710
711	trace_msm_gpu_submit(pid_nr(submit->pid), ring->id, submit->ident,
712		args->nr_bos, args->nr_cmds);
713
714	ret = mutex_lock_interruptible(&queue->lock);
715	if (ret)
716		goto out_post_unlock;
717
718	if (args->flags & MSM_SUBMIT_SUDO)
719		submit->in_rb = true;
720
721	if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
722		struct dma_fence *in_fence;
723
724		in_fence = sync_file_get_fence(args->fence_fd);
725
726		if (!in_fence) {
727			ret = -EINVAL;
728			goto out_unlock;
729		}
730
731		ret = drm_sched_job_add_dependency(&submit->base, in_fence);
732		if (ret)
733			goto out_unlock;
734	}
735
736	if (args->flags & MSM_SUBMIT_SYNCOBJ_IN) {
737		syncobjs_to_reset = msm_parse_deps(submit, file,
738		                                   args->in_syncobjs,
739		                                   args->nr_in_syncobjs,
740		                                   args->syncobj_stride);
741		if (IS_ERR(syncobjs_to_reset)) {
742			ret = PTR_ERR(syncobjs_to_reset);
743			goto out_unlock;
744		}
745	}
746
747	if (args->flags & MSM_SUBMIT_SYNCOBJ_OUT) {
748		post_deps = msm_parse_post_deps(dev, file,
749		                                args->out_syncobjs,
750		                                args->nr_out_syncobjs,
751		                                args->syncobj_stride);
752		if (IS_ERR(post_deps)) {
753			ret = PTR_ERR(post_deps);
754			goto out_unlock;
755		}
756	}
757
758	ret = submit_lookup_objects(submit, args, file);
759	if (ret)
760		goto out;
761
762	ret = submit_lookup_cmds(submit, args, file);
763	if (ret)
764		goto out;
765
766	/* copy_*_user while holding a ww ticket upsets lockdep */
767	ret = submit_lock_objects(submit);
768	if (ret)
769		goto out;
 
 
770
771	if (!(args->flags & MSM_SUBMIT_NO_IMPLICIT)) {
772		ret = submit_fence_sync(submit);
773		if (ret)
774			goto out;
775	}
776
777	ret = submit_pin_objects(submit);
778	if (ret)
779		goto out;
780
781	for (i = 0; i < args->nr_cmds; i++) {
782		struct drm_gem_object *obj;
783		uint64_t iova;
 
 
 
 
784
785		ret = submit_bo(submit, submit->cmd[i].idx, &obj, &iova);
 
786		if (ret)
787			goto out;
788
789		if (!submit->cmd[i].size ||
790			((submit->cmd[i].size + submit->cmd[i].offset) >
791				obj->size / 4)) {
792			SUBMIT_ERROR(submit, "invalid cmdstream size: %u\n", submit->cmd[i].size * 4);
793			ret = -EINVAL;
794			goto out;
795		}
796
797		submit->cmd[i].iova = iova + (submit->cmd[i].offset * 4);
798
799		if (likely(!submit->cmd[i].nr_relocs))
800			continue;
801
802		if (!gpu->allow_relocs) {
803			SUBMIT_ERROR(submit, "relocs not allowed\n");
804			ret = -EINVAL;
805			goto out;
806		}
807
808		ret = submit_reloc(submit, obj, submit->cmd[i].offset * 4,
809				submit->cmd[i].nr_relocs, submit->cmd[i].relocs);
 
 
 
 
 
 
 
 
810		if (ret)
811			goto out;
812	}
813
814	submit->nr_cmds = i;
815
816	idr_preload(GFP_KERNEL);
817
818	spin_lock(&queue->idr_lock);
819
820	/*
821	 * If using userspace provided seqno fence, validate that the id
822	 * is available before arming sched job.  Since access to fence_idr
823	 * is serialized on the queue lock, the slot should be still avail
824	 * after the job is armed
825	 */
826	if ((args->flags & MSM_SUBMIT_FENCE_SN_IN) &&
827			(!args->fence || idr_find(&queue->fence_idr, args->fence))) {
828		spin_unlock(&queue->idr_lock);
829		idr_preload_end();
830		ret = -EINVAL;
831		goto out;
832	}
833
834	drm_sched_job_arm(&submit->base);
835
836	submit->user_fence = dma_fence_get(&submit->base.s_fence->finished);
837
838	if (args->flags & MSM_SUBMIT_FENCE_SN_IN) {
839		/*
840		 * Userspace has assigned the seqno fence that it wants
841		 * us to use.  It is an error to pick a fence sequence
842		 * number that is not available.
843		 */
844		submit->fence_id = args->fence;
845		ret = idr_alloc_u32(&queue->fence_idr, submit->user_fence,
846				    &submit->fence_id, submit->fence_id,
847				    GFP_NOWAIT);
848		/*
849		 * We've already validated that the fence_id slot is valid,
850		 * so if idr_alloc_u32 failed, it is a kernel bug
851		 */
852		WARN_ON(ret);
853	} else {
854		/*
855		 * Allocate an id which can be used by WAIT_FENCE ioctl to map
856		 * back to the underlying fence.
857		 */
858		submit->fence_id = idr_alloc_cyclic(&queue->fence_idr,
859						    submit->user_fence, 1,
860						    INT_MAX, GFP_NOWAIT);
861	}
862
863	spin_unlock(&queue->idr_lock);
864	idr_preload_end();
865
866	if (submit->fence_id < 0) {
867		ret = submit->fence_id;
868		submit->fence_id = 0;
869	}
870
871	if (ret == 0 && args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
872		struct sync_file *sync_file = sync_file_create(submit->user_fence);
873		if (!sync_file) {
874			ret = -ENOMEM;
875		} else {
876			fd_install(out_fence_fd, sync_file->file);
877			args->fence_fd = out_fence_fd;
878		}
879	}
880
881	if (ret)
882		goto out;
883
884	submit_attach_object_fences(submit);
885
886	/* The scheduler owns a ref now: */
887	msm_gem_submit_get(submit);
888
889	msm_rd_dump_submit(priv->rd, submit, NULL);
890
891	drm_sched_entity_push_job(&submit->base);
892
893	args->fence = submit->fence_id;
894	queue->last_fence = submit->fence_id;
895
896	msm_reset_syncobjs(syncobjs_to_reset, args->nr_in_syncobjs);
897	msm_process_post_deps(post_deps, args->nr_out_syncobjs,
898	                      submit->user_fence);
899
 
900
901out:
902	submit_cleanup(submit, !!ret);
903out_unlock:
904	mutex_unlock(&queue->lock);
905out_post_unlock:
906	if (ret && (out_fence_fd >= 0))
907		put_unused_fd(out_fence_fd);
908
909	if (!IS_ERR_OR_NULL(submit)) {
910		msm_gem_submit_put(submit);
911	} else {
912		/*
913		 * If the submit hasn't yet taken ownership of the queue
914		 * then we need to drop the reference ourself:
915		 */
916		msm_submitqueue_put(queue);
917	}
918	if (!IS_ERR_OR_NULL(post_deps)) {
919		for (i = 0; i < args->nr_out_syncobjs; ++i) {
920			kfree(post_deps[i].chain);
921			drm_syncobj_put(post_deps[i].syncobj);
922		}
923		kfree(post_deps);
924	}
925
926	if (!IS_ERR_OR_NULL(syncobjs_to_reset)) {
927		for (i = 0; i < args->nr_in_syncobjs; ++i) {
928			if (syncobjs_to_reset[i])
929				drm_syncobj_put(syncobjs_to_reset[i]);
930		}
931		kfree(syncobjs_to_reset);
932	}
933
934	return ret;
935}