Linux Audio

Check our new training course

Loading...
v5.9
  1/*
  2 * Copyright 2008 Advanced Micro Devices, Inc.
  3 * Copyright 2008 Red Hat Inc.
  4 * Copyright 2009 Jerome Glisse.
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the "Software"),
  8 * to deal in the Software without restriction, including without limitation
  9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 10 * and/or sell copies of the Software, and to permit persons to whom the
 11 * Software is furnished to do so, subject to the following conditions:
 12 *
 13 * The above copyright notice and this permission notice shall be included in
 14 * all copies or substantial portions of the Software.
 15 *
 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 22 * OTHER DEALINGS IN THE SOFTWARE.
 23 *
 24 * Authors: Dave Airlie
 25 *          Alex Deucher
 26 *          Jerome Glisse
 27 *          Christian König
 28 */
 29#include <linux/seq_file.h>
 30#include <linux/slab.h>
 31
 32#include <drm/amdgpu_drm.h>
 33#include <drm/drm_debugfs.h>
 34
 35#include "amdgpu.h"
 36#include "atom.h"
 37#include "amdgpu_trace.h"
 38
 39#define AMDGPU_IB_TEST_TIMEOUT	msecs_to_jiffies(1000)
 40#define AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT	msecs_to_jiffies(2000)
 41
 42/*
 43 * IB
 44 * IBs (Indirect Buffers) and areas of GPU accessible memory where
 45 * commands are stored.  You can put a pointer to the IB in the
 46 * command ring and the hw will fetch the commands from the IB
 47 * and execute them.  Generally userspace acceleration drivers
 48 * produce command buffers which are send to the kernel and
 49 * put in IBs for execution by the requested ring.
 50 */
 
 51
 52/**
 53 * amdgpu_ib_get - request an IB (Indirect Buffer)
 54 *
 55 * @ring: ring index the IB is associated with
 56 * @size: requested IB size
 57 * @ib: IB object returned
 58 *
 59 * Request an IB (all asics).  IBs are allocated using the
 60 * suballocator.
 61 * Returns 0 on success, error on failure.
 62 */
 63int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 64		  unsigned size, enum amdgpu_ib_pool_type pool_type,
 65		  struct amdgpu_ib *ib)
 66{
 67	int r;
 68
 69	if (size) {
 70		r = amdgpu_sa_bo_new(&adev->ib_pools[pool_type],
 71				      &ib->sa_bo, size, 256);
 72		if (r) {
 73			dev_err(adev->dev, "failed to get a new IB (%d)\n", r);
 74			return r;
 75		}
 76
 77		ib->ptr = amdgpu_sa_bo_cpu_addr(ib->sa_bo);
 78
 79		if (!vm)
 80			ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
 81	}
 82
 83	return 0;
 84}
 85
 86/**
 87 * amdgpu_ib_free - free an IB (Indirect Buffer)
 88 *
 89 * @adev: amdgpu_device pointer
 90 * @ib: IB object to free
 91 * @f: the fence SA bo need wait on for the ib alloation
 92 *
 93 * Free an IB (all asics).
 94 */
 95void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
 96		    struct dma_fence *f)
 97{
 98	amdgpu_sa_bo_free(adev, &ib->sa_bo, f);
 99}
100
101/**
102 * amdgpu_ib_schedule - schedule an IB (Indirect Buffer) on the ring
103 *
104 * @adev: amdgpu_device pointer
105 * @num_ibs: number of IBs to schedule
106 * @ibs: IB objects to schedule
107 * @f: fence created during this submission
108 *
109 * Schedule an IB on the associated ring (all asics).
110 * Returns 0 on success, error on failure.
111 *
112 * On SI, there are two parallel engines fed from the primary ring,
113 * the CE (Constant Engine) and the DE (Drawing Engine).  Since
114 * resource descriptors have moved to memory, the CE allows you to
115 * prime the caches while the DE is updating register state so that
116 * the resource descriptors will be already in cache when the draw is
117 * processed.  To accomplish this, the userspace driver submits two
118 * IBs, one for the CE and one for the DE.  If there is a CE IB (called
119 * a CONST_IB), it will be put on the ring prior to the DE IB.  Prior
120 * to SI there was just a DE IB.
121 */
122int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
123		       struct amdgpu_ib *ibs, struct amdgpu_job *job,
124		       struct dma_fence **f)
125{
126	struct amdgpu_device *adev = ring->adev;
127	struct amdgpu_ib *ib = &ibs[0];
128	struct dma_fence *tmp = NULL;
129	bool skip_preamble, need_ctx_switch;
130	unsigned patch_offset = ~0;
131	struct amdgpu_vm *vm;
132	uint64_t fence_ctx;
133	uint32_t status = 0, alloc_size;
134	unsigned fence_flags = 0;
135	bool secure;
136
137	unsigned i;
138	int r = 0;
139	bool need_pipe_sync = false;
140
141	if (num_ibs == 0)
142		return -EINVAL;
143
144	/* ring tests don't use a job */
145	if (job) {
146		vm = job->vm;
147		fence_ctx = job->base.s_fence ?
148			job->base.s_fence->scheduled.context : 0;
149	} else {
150		vm = NULL;
151		fence_ctx = 0;
152	}
153
154	if (!ring->sched.ready) {
155		dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name);
156		return -EINVAL;
157	}
158
159	if (vm && !job->vmid) {
160		dev_err(adev->dev, "VM IB without ID\n");
161		return -EINVAL;
162	}
163
164	if ((ib->flags & AMDGPU_IB_FLAGS_SECURE) &&
165	    (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)) {
166		dev_err(adev->dev, "secure submissions not supported on compute rings\n");
167		return -EINVAL;
168	}
169
170	alloc_size = ring->funcs->emit_frame_size + num_ibs *
171		ring->funcs->emit_ib_size;
172
173	r = amdgpu_ring_alloc(ring, alloc_size);
174	if (r) {
175		dev_err(adev->dev, "scheduling IB failed (%d).\n", r);
176		return r;
177	}
178
179	need_ctx_switch = ring->current_ctx != fence_ctx;
180	if (ring->funcs->emit_pipeline_sync && job &&
181	    ((tmp = amdgpu_sync_get_fence(&job->sched_sync)) ||
182	     (amdgpu_sriov_vf(adev) && need_ctx_switch) ||
183	     amdgpu_vm_need_pipeline_sync(ring, job))) {
184		need_pipe_sync = true;
185
186		if (tmp)
187			trace_amdgpu_ib_pipe_sync(job, tmp);
188
189		dma_fence_put(tmp);
190	}
191
192	if ((ib->flags & AMDGPU_IB_FLAG_EMIT_MEM_SYNC) && ring->funcs->emit_mem_sync)
193		ring->funcs->emit_mem_sync(ring);
194
195	if (ring->funcs->insert_start)
196		ring->funcs->insert_start(ring);
197
198	if (job) {
199		r = amdgpu_vm_flush(ring, job, need_pipe_sync);
200		if (r) {
201			amdgpu_ring_undo(ring);
202			return r;
203		}
204	}
205
206	if (job && ring->funcs->init_cond_exec)
207		patch_offset = amdgpu_ring_init_cond_exec(ring);
208
209#ifdef CONFIG_X86_64
210	if (!(adev->flags & AMD_IS_APU))
211#endif
212	{
213		if (ring->funcs->emit_hdp_flush)
214			amdgpu_ring_emit_hdp_flush(ring);
215		else
216			amdgpu_asic_flush_hdp(adev, ring);
217	}
218
219	if (need_ctx_switch)
220		status |= AMDGPU_HAVE_CTX_SWITCH;
221
222	skip_preamble = ring->current_ctx == fence_ctx;
 
223	if (job && ring->funcs->emit_cntxcntl) {
 
 
224		status |= job->preamble_status;
225		status |= job->preemption_status;
226		amdgpu_ring_emit_cntxcntl(ring, status);
227	}
228
229	/* Setup initial TMZiness and send it off.
230	 */
231	secure = false;
232	if (job && ring->funcs->emit_frame_cntl) {
233		secure = ib->flags & AMDGPU_IB_FLAGS_SECURE;
234		amdgpu_ring_emit_frame_cntl(ring, true, secure);
235	}
236
237	for (i = 0; i < num_ibs; ++i) {
238		ib = &ibs[i];
239
240		/* drop preamble IBs if we don't have a context switch */
241		if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
242		    skip_preamble &&
243		    !(status & AMDGPU_PREAMBLE_IB_PRESENT_FIRST) &&
244		    !amdgpu_mcbp &&
245		    !amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */
246			continue;
247
248		if (job && ring->funcs->emit_frame_cntl) {
249			if (secure != !!(ib->flags & AMDGPU_IB_FLAGS_SECURE)) {
250				amdgpu_ring_emit_frame_cntl(ring, false, secure);
251				secure = !secure;
252				amdgpu_ring_emit_frame_cntl(ring, true, secure);
253			}
254		}
255
256		amdgpu_ring_emit_ib(ring, job, ib, status);
257		status &= ~AMDGPU_HAVE_CTX_SWITCH;
258	}
259
260	if (job && ring->funcs->emit_frame_cntl)
261		amdgpu_ring_emit_frame_cntl(ring, false, secure);
262
263#ifdef CONFIG_X86_64
264	if (!(adev->flags & AMD_IS_APU))
265#endif
266		amdgpu_asic_invalidate_hdp(adev, ring);
267
268	if (ib->flags & AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE)
269		fence_flags |= AMDGPU_FENCE_FLAG_TC_WB_ONLY;
270
271	/* wrap the last IB with fence */
272	if (job && job->uf_addr) {
273		amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence,
274				       fence_flags | AMDGPU_FENCE_FLAG_64BIT);
275	}
276
277	r = amdgpu_fence_emit(ring, f, fence_flags);
278	if (r) {
279		dev_err(adev->dev, "failed to emit fence (%d)\n", r);
280		if (job && job->vmid)
281			amdgpu_vmid_reset(adev, ring->funcs->vmhub, job->vmid);
282		amdgpu_ring_undo(ring);
283		return r;
284	}
285
286	if (ring->funcs->insert_end)
287		ring->funcs->insert_end(ring);
288
 
 
 
 
 
 
289	if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
290		amdgpu_ring_patch_cond_exec(ring, patch_offset);
291
292	ring->current_ctx = fence_ctx;
293	if (vm && ring->funcs->emit_switch_buffer)
294		amdgpu_ring_emit_switch_buffer(ring);
295	amdgpu_ring_commit(ring);
296	return 0;
297}
298
299/**
300 * amdgpu_ib_pool_init - Init the IB (Indirect Buffer) pool
301 *
302 * @adev: amdgpu_device pointer
303 *
304 * Initialize the suballocator to manage a pool of memory
305 * for use as IBs (all asics).
306 * Returns 0 on success, error on failure.
307 */
308int amdgpu_ib_pool_init(struct amdgpu_device *adev)
309{
310	unsigned size;
311	int r, i;
312
313	if (adev->ib_pool_ready)
314		return 0;
315
316	for (i = 0; i < AMDGPU_IB_POOL_MAX; i++) {
317		if (i == AMDGPU_IB_POOL_DIRECT)
318			size = PAGE_SIZE * 2;
319		else
320			size = AMDGPU_IB_POOL_SIZE;
321
322		r = amdgpu_sa_bo_manager_init(adev, &adev->ib_pools[i],
323					      size, AMDGPU_GPU_PAGE_SIZE,
324					      AMDGPU_GEM_DOMAIN_GTT);
325		if (r)
326			goto error;
327	}
328	adev->ib_pool_ready = true;
 
 
 
 
 
 
329
 
 
 
 
330	return 0;
331
332error:
333	while (i--)
334		amdgpu_sa_bo_manager_fini(adev, &adev->ib_pools[i]);
335	return r;
336}
337
338/**
339 * amdgpu_ib_pool_fini - Free the IB (Indirect Buffer) pool
340 *
341 * @adev: amdgpu_device pointer
342 *
343 * Tear down the suballocator managing the pool of memory
344 * for use as IBs (all asics).
345 */
346void amdgpu_ib_pool_fini(struct amdgpu_device *adev)
347{
348	int i;
349
350	if (!adev->ib_pool_ready)
351		return;
352
353	for (i = 0; i < AMDGPU_IB_POOL_MAX; i++)
354		amdgpu_sa_bo_manager_fini(adev, &adev->ib_pools[i]);
355	adev->ib_pool_ready = false;
356}
357
358/**
359 * amdgpu_ib_ring_tests - test IBs on the rings
360 *
361 * @adev: amdgpu_device pointer
362 *
363 * Test an IB (Indirect Buffer) on each ring.
364 * If the test fails, disable the ring.
365 * Returns 0 on success, error if the primary GFX ring
366 * IB test fails.
367 */
368int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
369{
370	long tmo_gfx, tmo_mm;
371	int r, ret = 0;
372	unsigned i;
 
 
373
374	tmo_mm = tmo_gfx = AMDGPU_IB_TEST_TIMEOUT;
375	if (amdgpu_sriov_vf(adev)) {
376		/* for MM engines in hypervisor side they are not scheduled together
377		 * with CP and SDMA engines, so even in exclusive mode MM engine could
378		 * still running on other VF thus the IB TEST TIMEOUT for MM engines
379		 * under SR-IOV should be set to a long time. 8 sec should be enough
380		 * for the MM comes back to this VF.
381		 */
382		tmo_mm = 8 * AMDGPU_IB_TEST_TIMEOUT;
383	}
384
385	if (amdgpu_sriov_runtime(adev)) {
386		/* for CP & SDMA engines since they are scheduled together so
387		 * need to make the timeout width enough to cover the time
388		 * cost waiting for it coming back under RUNTIME only
389		*/
390		tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT;
391	} else if (adev->gmc.xgmi.hive_id) {
392		tmo_gfx = AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT;
393	}
394
395	for (i = 0; i < adev->num_rings; ++i) {
396		struct amdgpu_ring *ring = adev->rings[i];
397		long tmo;
398
399		/* KIQ rings don't have an IB test because we never submit IBs
400		 * to them and they have no interrupt support.
401		 */
402		if (!ring->sched.ready || !ring->funcs->test_ib)
403			continue;
404
405		/* MM engine need more time */
406		if (ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
407			ring->funcs->type == AMDGPU_RING_TYPE_VCE ||
408			ring->funcs->type == AMDGPU_RING_TYPE_UVD_ENC ||
409			ring->funcs->type == AMDGPU_RING_TYPE_VCN_DEC ||
410			ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
411			ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
412			tmo = tmo_mm;
413		else
414			tmo = tmo_gfx;
415
416		r = amdgpu_ring_test_ib(ring, tmo);
417		if (!r) {
418			DRM_DEV_DEBUG(adev->dev, "ib test on %s succeeded\n",
419				      ring->name);
420			continue;
421		}
422
423		ring->sched.ready = false;
424		DRM_DEV_ERROR(adev->dev, "IB test failed on %s (%d).\n",
425			  ring->name, r);
426
427		if (ring == &adev->gfx.gfx_ring[0]) {
428			/* oh, oh, that's really bad */
429			adev->accel_working = false;
430			return r;
431
432		} else {
433			ret = r;
 
 
 
 
 
 
 
 
 
434		}
435	}
436	return ret;
437}
438
439/*
440 * Debugfs info
441 */
442#if defined(CONFIG_DEBUG_FS)
443
444static int amdgpu_debugfs_sa_info(struct seq_file *m, void *data)
445{
446	struct drm_info_node *node = (struct drm_info_node *) m->private;
447	struct drm_device *dev = node->minor->dev;
448	struct amdgpu_device *adev = dev->dev_private;
449
450	seq_printf(m, "--------------------- DELAYED --------------------- \n");
451	amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_DELAYED],
452				     m);
453	seq_printf(m, "-------------------- IMMEDIATE -------------------- \n");
454	amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_IMMEDIATE],
455				     m);
456	seq_printf(m, "--------------------- DIRECT ---------------------- \n");
457	amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT], m);
458
459	return 0;
 
460}
461
462static const struct drm_info_list amdgpu_debugfs_sa_list[] = {
463	{"amdgpu_sa_info", &amdgpu_debugfs_sa_info, 0, NULL},
464};
465
466#endif
467
468int amdgpu_debugfs_sa_init(struct amdgpu_device *adev)
469{
470#if defined(CONFIG_DEBUG_FS)
471	return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_sa_list,
472					ARRAY_SIZE(amdgpu_debugfs_sa_list));
473#else
474	return 0;
475#endif
476}
v4.17
  1/*
  2 * Copyright 2008 Advanced Micro Devices, Inc.
  3 * Copyright 2008 Red Hat Inc.
  4 * Copyright 2009 Jerome Glisse.
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the "Software"),
  8 * to deal in the Software without restriction, including without limitation
  9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 10 * and/or sell copies of the Software, and to permit persons to whom the
 11 * Software is furnished to do so, subject to the following conditions:
 12 *
 13 * The above copyright notice and this permission notice shall be included in
 14 * all copies or substantial portions of the Software.
 15 *
 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 22 * OTHER DEALINGS IN THE SOFTWARE.
 23 *
 24 * Authors: Dave Airlie
 25 *          Alex Deucher
 26 *          Jerome Glisse
 27 *          Christian König
 28 */
 29#include <linux/seq_file.h>
 30#include <linux/slab.h>
 31#include <drm/drmP.h>
 32#include <drm/amdgpu_drm.h>
 
 
 33#include "amdgpu.h"
 34#include "atom.h"
 
 35
 36#define AMDGPU_IB_TEST_TIMEOUT	msecs_to_jiffies(1000)
 
 37
 38/*
 39 * IB
 40 * IBs (Indirect Buffers) and areas of GPU accessible memory where
 41 * commands are stored.  You can put a pointer to the IB in the
 42 * command ring and the hw will fetch the commands from the IB
 43 * and execute them.  Generally userspace acceleration drivers
 44 * produce command buffers which are send to the kernel and
 45 * put in IBs for execution by the requested ring.
 46 */
 47static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev);
 48
 49/**
 50 * amdgpu_ib_get - request an IB (Indirect Buffer)
 51 *
 52 * @ring: ring index the IB is associated with
 53 * @size: requested IB size
 54 * @ib: IB object returned
 55 *
 56 * Request an IB (all asics).  IBs are allocated using the
 57 * suballocator.
 58 * Returns 0 on success, error on failure.
 59 */
 60int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 61		  unsigned size, struct amdgpu_ib *ib)
 
 62{
 63	int r;
 64
 65	if (size) {
 66		r = amdgpu_sa_bo_new(&adev->ring_tmp_bo,
 67				      &ib->sa_bo, size, 256);
 68		if (r) {
 69			dev_err(adev->dev, "failed to get a new IB (%d)\n", r);
 70			return r;
 71		}
 72
 73		ib->ptr = amdgpu_sa_bo_cpu_addr(ib->sa_bo);
 74
 75		if (!vm)
 76			ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
 77	}
 78
 79	return 0;
 80}
 81
 82/**
 83 * amdgpu_ib_free - free an IB (Indirect Buffer)
 84 *
 85 * @adev: amdgpu_device pointer
 86 * @ib: IB object to free
 87 * @f: the fence SA bo need wait on for the ib alloation
 88 *
 89 * Free an IB (all asics).
 90 */
 91void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
 92		    struct dma_fence *f)
 93{
 94	amdgpu_sa_bo_free(adev, &ib->sa_bo, f);
 95}
 96
 97/**
 98 * amdgpu_ib_schedule - schedule an IB (Indirect Buffer) on the ring
 99 *
100 * @adev: amdgpu_device pointer
101 * @num_ibs: number of IBs to schedule
102 * @ibs: IB objects to schedule
103 * @f: fence created during this submission
104 *
105 * Schedule an IB on the associated ring (all asics).
106 * Returns 0 on success, error on failure.
107 *
108 * On SI, there are two parallel engines fed from the primary ring,
109 * the CE (Constant Engine) and the DE (Drawing Engine).  Since
110 * resource descriptors have moved to memory, the CE allows you to
111 * prime the caches while the DE is updating register state so that
112 * the resource descriptors will be already in cache when the draw is
113 * processed.  To accomplish this, the userspace driver submits two
114 * IBs, one for the CE and one for the DE.  If there is a CE IB (called
115 * a CONST_IB), it will be put on the ring prior to the DE IB.  Prior
116 * to SI there was just a DE IB.
117 */
118int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
119		       struct amdgpu_ib *ibs, struct amdgpu_job *job,
120		       struct dma_fence **f)
121{
122	struct amdgpu_device *adev = ring->adev;
123	struct amdgpu_ib *ib = &ibs[0];
124	struct dma_fence *tmp = NULL;
125	bool skip_preamble, need_ctx_switch;
126	unsigned patch_offset = ~0;
127	struct amdgpu_vm *vm;
128	uint64_t fence_ctx;
129	uint32_t status = 0, alloc_size;
 
 
130
131	unsigned i;
132	int r = 0;
133	bool need_pipe_sync = false;
134
135	if (num_ibs == 0)
136		return -EINVAL;
137
138	/* ring tests don't use a job */
139	if (job) {
140		vm = job->vm;
141		fence_ctx = job->fence_ctx;
 
142	} else {
143		vm = NULL;
144		fence_ctx = 0;
145	}
146
147	if (!ring->ready) {
148		dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name);
149		return -EINVAL;
150	}
151
152	if (vm && !job->vmid) {
153		dev_err(adev->dev, "VM IB without ID\n");
154		return -EINVAL;
155	}
156
 
 
 
 
 
 
157	alloc_size = ring->funcs->emit_frame_size + num_ibs *
158		ring->funcs->emit_ib_size;
159
160	r = amdgpu_ring_alloc(ring, alloc_size);
161	if (r) {
162		dev_err(adev->dev, "scheduling IB failed (%d).\n", r);
163		return r;
164	}
165
 
166	if (ring->funcs->emit_pipeline_sync && job &&
167	    ((tmp = amdgpu_sync_get_fence(&job->sched_sync, NULL)) ||
 
168	     amdgpu_vm_need_pipeline_sync(ring, job))) {
169		need_pipe_sync = true;
 
 
 
 
170		dma_fence_put(tmp);
171	}
172
 
 
 
173	if (ring->funcs->insert_start)
174		ring->funcs->insert_start(ring);
175
176	if (job) {
177		r = amdgpu_vm_flush(ring, job, need_pipe_sync);
178		if (r) {
179			amdgpu_ring_undo(ring);
180			return r;
181		}
182	}
183
184	if (job && ring->funcs->init_cond_exec)
185		patch_offset = amdgpu_ring_init_cond_exec(ring);
186
187#ifdef CONFIG_X86_64
188	if (!(adev->flags & AMD_IS_APU))
189#endif
190	{
191		if (ring->funcs->emit_hdp_flush)
192			amdgpu_ring_emit_hdp_flush(ring);
193		else
194			amdgpu_asic_flush_hdp(adev, ring);
195	}
196
 
 
 
197	skip_preamble = ring->current_ctx == fence_ctx;
198	need_ctx_switch = ring->current_ctx != fence_ctx;
199	if (job && ring->funcs->emit_cntxcntl) {
200		if (need_ctx_switch)
201			status |= AMDGPU_HAVE_CTX_SWITCH;
202		status |= job->preamble_status;
 
 
 
203
204		amdgpu_ring_emit_cntxcntl(ring, status);
 
 
 
 
 
205	}
206
207	for (i = 0; i < num_ibs; ++i) {
208		ib = &ibs[i];
209
210		/* drop preamble IBs if we don't have a context switch */
211		if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
212			skip_preamble &&
213			!(status & AMDGPU_PREAMBLE_IB_PRESENT_FIRST) &&
214			!amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */
 
215			continue;
216
217		amdgpu_ring_emit_ib(ring, ib, job ? job->vmid : 0,
218				    need_ctx_switch);
219		need_ctx_switch = false;
 
 
 
 
 
 
 
220	}
221
222	if (ring->funcs->emit_tmz)
223		amdgpu_ring_emit_tmz(ring, false);
224
225#ifdef CONFIG_X86_64
226	if (!(adev->flags & AMD_IS_APU))
227#endif
228		amdgpu_asic_invalidate_hdp(adev, ring);
229
230	r = amdgpu_fence_emit(ring, f);
 
 
 
 
 
 
 
 
 
231	if (r) {
232		dev_err(adev->dev, "failed to emit fence (%d)\n", r);
233		if (job && job->vmid)
234			amdgpu_vmid_reset(adev, ring->funcs->vmhub, job->vmid);
235		amdgpu_ring_undo(ring);
236		return r;
237	}
238
239	if (ring->funcs->insert_end)
240		ring->funcs->insert_end(ring);
241
242	/* wrap the last IB with fence */
243	if (job && job->uf_addr) {
244		amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence,
245				       AMDGPU_FENCE_FLAG_64BIT);
246	}
247
248	if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
249		amdgpu_ring_patch_cond_exec(ring, patch_offset);
250
251	ring->current_ctx = fence_ctx;
252	if (vm && ring->funcs->emit_switch_buffer)
253		amdgpu_ring_emit_switch_buffer(ring);
254	amdgpu_ring_commit(ring);
255	return 0;
256}
257
258/**
259 * amdgpu_ib_pool_init - Init the IB (Indirect Buffer) pool
260 *
261 * @adev: amdgpu_device pointer
262 *
263 * Initialize the suballocator to manage a pool of memory
264 * for use as IBs (all asics).
265 * Returns 0 on success, error on failure.
266 */
267int amdgpu_ib_pool_init(struct amdgpu_device *adev)
268{
269	int r;
 
270
271	if (adev->ib_pool_ready) {
272		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
273	}
274	r = amdgpu_sa_bo_manager_init(adev, &adev->ring_tmp_bo,
275				      AMDGPU_IB_POOL_SIZE*64*1024,
276				      AMDGPU_GPU_PAGE_SIZE,
277				      AMDGPU_GEM_DOMAIN_GTT);
278	if (r) {
279		return r;
280	}
281
282	adev->ib_pool_ready = true;
283	if (amdgpu_debugfs_sa_init(adev)) {
284		dev_err(adev->dev, "failed to register debugfs file for SA\n");
285	}
286	return 0;
 
 
 
 
 
287}
288
289/**
290 * amdgpu_ib_pool_fini - Free the IB (Indirect Buffer) pool
291 *
292 * @adev: amdgpu_device pointer
293 *
294 * Tear down the suballocator managing the pool of memory
295 * for use as IBs (all asics).
296 */
297void amdgpu_ib_pool_fini(struct amdgpu_device *adev)
298{
299	if (adev->ib_pool_ready) {
300		amdgpu_sa_bo_manager_fini(adev, &adev->ring_tmp_bo);
301		adev->ib_pool_ready = false;
302	}
 
 
 
 
303}
304
305/**
306 * amdgpu_ib_ring_tests - test IBs on the rings
307 *
308 * @adev: amdgpu_device pointer
309 *
310 * Test an IB (Indirect Buffer) on each ring.
311 * If the test fails, disable the ring.
312 * Returns 0 on success, error if the primary GFX ring
313 * IB test fails.
314 */
315int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
316{
 
 
317	unsigned i;
318	int r, ret = 0;
319	long tmo_gfx, tmo_mm;
320
321	tmo_mm = tmo_gfx = AMDGPU_IB_TEST_TIMEOUT;
322	if (amdgpu_sriov_vf(adev)) {
323		/* for MM engines in hypervisor side they are not scheduled together
324		 * with CP and SDMA engines, so even in exclusive mode MM engine could
325		 * still running on other VF thus the IB TEST TIMEOUT for MM engines
326		 * under SR-IOV should be set to a long time. 8 sec should be enough
327		 * for the MM comes back to this VF.
328		 */
329		tmo_mm = 8 * AMDGPU_IB_TEST_TIMEOUT;
330	}
331
332	if (amdgpu_sriov_runtime(adev)) {
333		/* for CP & SDMA engines since they are scheduled together so
334		 * need to make the timeout width enough to cover the time
335		 * cost waiting for it coming back under RUNTIME only
336		*/
337		tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT;
 
 
338	}
339
340	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
341		struct amdgpu_ring *ring = adev->rings[i];
342		long tmo;
343
344		if (!ring || !ring->ready)
 
 
 
345			continue;
346
347		/* MM engine need more time */
348		if (ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
349			ring->funcs->type == AMDGPU_RING_TYPE_VCE ||
350			ring->funcs->type == AMDGPU_RING_TYPE_UVD_ENC ||
351			ring->funcs->type == AMDGPU_RING_TYPE_VCN_DEC ||
352			ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
 
353			tmo = tmo_mm;
354		else
355			tmo = tmo_gfx;
356
357		r = amdgpu_ring_test_ib(ring, tmo);
358		if (r) {
359			ring->ready = false;
 
 
 
 
 
 
 
 
 
 
 
 
360
361			if (ring == &adev->gfx.gfx_ring[0]) {
362				/* oh, oh, that's really bad */
363				DRM_ERROR("amdgpu: failed testing IB on GFX ring (%d).\n", r);
364				adev->accel_working = false;
365				return r;
366
367			} else {
368				/* still not good, but we can live with it */
369				DRM_ERROR("amdgpu: failed testing IB on ring %d (%d).\n", i, r);
370				ret = r;
371			}
372		}
373	}
374	return ret;
375}
376
377/*
378 * Debugfs info
379 */
380#if defined(CONFIG_DEBUG_FS)
381
382static int amdgpu_debugfs_sa_info(struct seq_file *m, void *data)
383{
384	struct drm_info_node *node = (struct drm_info_node *) m->private;
385	struct drm_device *dev = node->minor->dev;
386	struct amdgpu_device *adev = dev->dev_private;
387
388	amdgpu_sa_bo_dump_debug_info(&adev->ring_tmp_bo, m);
 
 
 
 
 
 
 
389
390	return 0;
391
392}
393
394static const struct drm_info_list amdgpu_debugfs_sa_list[] = {
395	{"amdgpu_sa_info", &amdgpu_debugfs_sa_info, 0, NULL},
396};
397
398#endif
399
400static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev)
401{
402#if defined(CONFIG_DEBUG_FS)
403	return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_sa_list, 1);
 
404#else
405	return 0;
406#endif
407}