Linux Audio

Check our new training course

Loading...
v4.17
  1/*
  2 * Copyright 2008 Advanced Micro Devices, Inc.
  3 * Copyright 2008 Red Hat Inc.
  4 * Copyright 2009 Jerome Glisse.
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the "Software"),
  8 * to deal in the Software without restriction, including without limitation
  9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 10 * and/or sell copies of the Software, and to permit persons to whom the
 11 * Software is furnished to do so, subject to the following conditions:
 12 *
 13 * The above copyright notice and this permission notice shall be included in
 14 * all copies or substantial portions of the Software.
 15 *
 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 22 * OTHER DEALINGS IN THE SOFTWARE.
 23 *
 24 * Authors: Dave Airlie
 25 *          Alex Deucher
 26 *          Jerome Glisse
 27 *          Christian König
 28 */
 29#include <linux/seq_file.h>
 30#include <linux/slab.h>
 
 31#include <linux/debugfs.h>
 32#include <drm/drmP.h>
 33#include <drm/amdgpu_drm.h>
 34#include "amdgpu.h"
 35#include "atom.h"
 36
 37/*
 38 * Rings
 39 * Most engines on the GPU are fed via ring buffers.  Ring
 40 * buffers are areas of GPU accessible memory that the host
 41 * writes commands into and the GPU reads commands out of.
 42 * There is a rptr (read pointer) that determines where the
 43 * GPU is currently reading, and a wptr (write pointer)
 44 * which determines where the host has written.  When the
 45 * pointers are equal, the ring is idle.  When the host
 46 * writes commands to the ring buffer, it increments the
 47 * wptr.  The GPU then starts fetching commands and executes
 48 * them until the pointers are equal again.
 49 */
 50static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
 51				    struct amdgpu_ring *ring);
 52static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring);
 53
 54/**
 55 * amdgpu_ring_alloc - allocate space on the ring buffer
 56 *
 57 * @adev: amdgpu_device pointer
 58 * @ring: amdgpu_ring structure holding ring information
 59 * @ndw: number of dwords to allocate in the ring buffer
 60 *
 61 * Allocate @ndw dwords in the ring buffer (all asics).
 62 * Returns 0 on success, error on failure.
 63 */
 64int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw)
 65{
 66	/* Align requested size with padding so unlock_commit can
 67	 * pad safely */
 68	ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
 69
 70	/* Make sure we aren't trying to allocate more space
 71	 * than the maximum for one submission
 72	 */
 73	if (WARN_ON_ONCE(ndw > ring->max_dw))
 74		return -ENOMEM;
 75
 76	ring->count_dw = ndw;
 77	ring->wptr_old = ring->wptr;
 78
 79	if (ring->funcs->begin_use)
 80		ring->funcs->begin_use(ring);
 81
 82	return 0;
 83}
 84
 85/** amdgpu_ring_insert_nop - insert NOP packets
 86 *
 87 * @ring: amdgpu_ring structure holding ring information
 88 * @count: the number of NOP packets to insert
 89 *
 90 * This is the generic insert_nop function for rings except SDMA
 91 */
 92void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
 93{
 94	int i;
 95
 96	for (i = 0; i < count; i++)
 97		amdgpu_ring_write(ring, ring->funcs->nop);
 98}
 99
100/** amdgpu_ring_generic_pad_ib - pad IB with NOP packets
 
101 *
102 * @ring: amdgpu_ring structure holding ring information
103 * @ib: IB to add NOP packets to
104 *
105 * This is the generic pad_ib function for rings except SDMA
106 */
107void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
108{
109	while (ib->length_dw & ring->funcs->align_mask)
110		ib->ptr[ib->length_dw++] = ring->funcs->nop;
111}
112
113/**
114 * amdgpu_ring_commit - tell the GPU to execute the new
115 * commands on the ring buffer
116 *
117 * @adev: amdgpu_device pointer
118 * @ring: amdgpu_ring structure holding ring information
119 *
120 * Update the wptr (write pointer) to tell the GPU to
121 * execute new commands on the ring buffer (all asics).
122 */
123void amdgpu_ring_commit(struct amdgpu_ring *ring)
124{
125	uint32_t count;
126
127	/* We pad to match fetch size */
128	count = ring->funcs->align_mask + 1 -
129		(ring->wptr & ring->funcs->align_mask);
130	count %= ring->funcs->align_mask + 1;
131	ring->funcs->insert_nop(ring, count);
132
133	mb();
134	amdgpu_ring_set_wptr(ring);
135
136	if (ring->funcs->end_use)
137		ring->funcs->end_use(ring);
138
139	if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ)
140		amdgpu_ring_lru_touch(ring->adev, ring);
141}
142
143/**
144 * amdgpu_ring_undo - reset the wptr
145 *
146 * @ring: amdgpu_ring structure holding ring information
147 *
148 * Reset the driver's copy of the wptr (all asics).
149 */
150void amdgpu_ring_undo(struct amdgpu_ring *ring)
151{
152	ring->wptr = ring->wptr_old;
153
154	if (ring->funcs->end_use)
155		ring->funcs->end_use(ring);
156}
157
158/**
159 * amdgpu_ring_priority_put - restore a ring's priority
160 *
161 * @ring: amdgpu_ring structure holding the information
162 * @priority: target priority
163 *
164 * Release a request for executing at @priority
165 */
166void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
167			      enum drm_sched_priority priority)
168{
169	int i;
170
171	if (!ring->funcs->set_priority)
172		return;
173
174	if (atomic_dec_return(&ring->num_jobs[priority]) > 0)
175		return;
176
177	/* no need to restore if the job is already at the lowest priority */
178	if (priority == DRM_SCHED_PRIORITY_NORMAL)
179		return;
180
181	mutex_lock(&ring->priority_mutex);
182	/* something higher prio is executing, no need to decay */
183	if (ring->priority > priority)
184		goto out_unlock;
185
186	/* decay priority to the next level with a job available */
187	for (i = priority; i >= DRM_SCHED_PRIORITY_MIN; i--) {
188		if (i == DRM_SCHED_PRIORITY_NORMAL
189				|| atomic_read(&ring->num_jobs[i])) {
190			ring->priority = i;
191			ring->funcs->set_priority(ring, i);
192			break;
193		}
194	}
195
196out_unlock:
197	mutex_unlock(&ring->priority_mutex);
198}
199
200/**
201 * amdgpu_ring_priority_get - change the ring's priority
202 *
203 * @ring: amdgpu_ring structure holding the information
204 * @priority: target priority
205 *
206 * Request a ring's priority to be raised to @priority (refcounted).
207 */
208void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
209			      enum drm_sched_priority priority)
210{
211	if (!ring->funcs->set_priority)
212		return;
213
214	atomic_inc(&ring->num_jobs[priority]);
215
216	mutex_lock(&ring->priority_mutex);
217	if (priority <= ring->priority)
218		goto out_unlock;
219
220	ring->priority = priority;
221	ring->funcs->set_priority(ring, priority);
222
223out_unlock:
224	mutex_unlock(&ring->priority_mutex);
225}
226
227/**
228 * amdgpu_ring_init - init driver ring struct.
229 *
230 * @adev: amdgpu_device pointer
231 * @ring: amdgpu_ring structure holding ring information
232 * @max_ndw: maximum number of dw for ring alloc
233 * @nop: nop packet for this ring
 
 
 
234 *
235 * Initialize the driver information for the selected ring (all asics).
236 * Returns 0 on success, error on failure.
237 */
238int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
239		     unsigned max_dw, struct amdgpu_irq_src *irq_src,
240		     unsigned irq_type)
 
241{
242	int r, i;
243	int sched_hw_submission = amdgpu_sched_hw_submission;
 
 
244
245	/* Set the hw submission limit higher for KIQ because
246	 * it's used for a number of gfx/compute tasks by both
247	 * KFD and KGD which may have outstanding fences and
248	 * it doesn't really use the gpu scheduler anyway;
249	 * KIQ tasks get submitted directly to the ring.
250	 */
251	if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
252		sched_hw_submission = max(sched_hw_submission, 256);
 
 
253
254	if (ring->adev == NULL) {
255		if (adev->num_rings >= AMDGPU_MAX_RINGS)
256			return -EINVAL;
257
258		ring->adev = adev;
259		ring->idx = adev->num_rings++;
260		adev->rings[ring->idx] = ring;
261		r = amdgpu_fence_driver_init_ring(ring, sched_hw_submission);
 
 
 
 
 
 
 
262		if (r)
263			return r;
264	}
265
266	r = amdgpu_device_wb_get(adev, &ring->rptr_offs);
267	if (r) {
268		dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
269		return r;
270	}
 
 
 
 
 
 
 
 
 
 
 
 
271
272	r = amdgpu_device_wb_get(adev, &ring->wptr_offs);
273	if (r) {
274		dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
275		return r;
276	}
277
278	r = amdgpu_device_wb_get(adev, &ring->fence_offs);
279	if (r) {
280		dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r);
281		return r;
282	}
283
284	r = amdgpu_device_wb_get(adev, &ring->cond_exe_offs);
285	if (r) {
286		dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r);
287		return r;
 
 
 
 
 
 
 
288	}
289	ring->cond_exe_gpu_addr = adev->wb.gpu_addr + (ring->cond_exe_offs * 4);
290	ring->cond_exe_cpu_addr = &adev->wb.wb[ring->cond_exe_offs];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
291	/* always set cond_exec_polling to CONTINUE */
292	*ring->cond_exe_cpu_addr = 1;
293
294	r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type);
295	if (r) {
296		dev_err(adev->dev, "failed initializing fences (%d).\n", r);
297		return r;
298	}
299
300	ring->ring_size = roundup_pow_of_two(max_dw * 4 * sched_hw_submission);
301
302	ring->buf_mask = (ring->ring_size / 4) - 1;
303	ring->ptr_mask = ring->funcs->support_64bit_ptrs ?
304		0xffffffffffffffff : ring->buf_mask;
 
305	/* Allocate ring buffer */
306	if (ring->ring_obj == NULL) {
307		r = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
 
 
 
 
 
 
 
 
 
 
 
308					    AMDGPU_GEM_DOMAIN_GTT,
309					    &ring->ring_obj,
310					    &ring->gpu_addr,
311					    (void **)&ring->ring);
312		if (r) {
313			dev_err(adev->dev, "(%d) ring create failed\n", r);
314			return r;
315		}
316		amdgpu_ring_clear_ring(ring);
317	}
318
319	ring->max_dw = max_dw;
320	ring->priority = DRM_SCHED_PRIORITY_NORMAL;
321	mutex_init(&ring->priority_mutex);
322	INIT_LIST_HEAD(&ring->lru_list);
323	amdgpu_ring_lru_touch(adev, ring);
324
325	for (i = 0; i < DRM_SCHED_PRIORITY_MAX; ++i)
326		atomic_set(&ring->num_jobs[i], 0);
327
328	if (amdgpu_debugfs_ring_init(adev, ring)) {
329		DRM_ERROR("Failed to register debugfs file for rings !\n");
 
 
 
330	}
331
332	return 0;
333}
334
335/**
336 * amdgpu_ring_fini - tear down the driver ring struct.
337 *
338 * @adev: amdgpu_device pointer
339 * @ring: amdgpu_ring structure holding ring information
340 *
341 * Tear down the driver information for the selected ring (all asics).
342 */
343void amdgpu_ring_fini(struct amdgpu_ring *ring)
344{
345	ring->ready = false;
346
347	/* Not to finish a ring which is not initialized */
348	if (!(ring->adev) || !(ring->adev->rings[ring->idx]))
 
349		return;
350
351	amdgpu_device_wb_free(ring->adev, ring->rptr_offs);
352	amdgpu_device_wb_free(ring->adev, ring->wptr_offs);
353
354	amdgpu_device_wb_free(ring->adev, ring->cond_exe_offs);
355	amdgpu_device_wb_free(ring->adev, ring->fence_offs);
356
357	amdgpu_bo_free_kernel(&ring->ring_obj,
358			      &ring->gpu_addr,
359			      (void **)&ring->ring);
360
361	amdgpu_debugfs_ring_fini(ring);
 
 
 
 
 
 
 
 
 
 
362
363	dma_fence_put(ring->vmid_wait);
364	ring->vmid_wait = NULL;
 
365
366	ring->adev->rings[ring->idx] = NULL;
367}
368
369static void amdgpu_ring_lru_touch_locked(struct amdgpu_device *adev,
370					 struct amdgpu_ring *ring)
371{
372	/* list_move_tail handles the case where ring isn't part of the list */
373	list_move_tail(&ring->lru_list, &adev->ring_lru_list);
374}
375
376static bool amdgpu_ring_is_blacklisted(struct amdgpu_ring *ring,
377				       int *blacklist, int num_blacklist)
 
 
 
 
 
 
 
 
 
 
 
 
 
378{
379	int i;
380
381	for (i = 0; i < num_blacklist; i++) {
382		if (ring->idx == blacklist[i])
383			return true;
384	}
385
386	return false;
387}
388
389/**
390 * amdgpu_ring_lru_get - get the least recently used ring for a HW IP block
391 *
392 * @adev: amdgpu_device pointer
393 * @type: amdgpu_ring_type enum
394 * @blacklist: blacklisted ring ids array
395 * @num_blacklist: number of entries in @blacklist
396 * @lru_pipe_order: find a ring from the least recently used pipe
397 * @ring: output ring
398 *
399 * Retrieve the amdgpu_ring structure for the least recently used ring of
400 * a specific IP block (all asics).
401 * Returns 0 on success, error on failure.
402 */
403int amdgpu_ring_lru_get(struct amdgpu_device *adev, int type,
404			int *blacklist,	int num_blacklist,
405			bool lru_pipe_order, struct amdgpu_ring **ring)
406{
407	struct amdgpu_ring *entry;
408
409	/* List is sorted in LRU order, find first entry corresponding
410	 * to the desired HW IP */
411	*ring = NULL;
412	spin_lock(&adev->ring_lru_list_lock);
413	list_for_each_entry(entry, &adev->ring_lru_list, lru_list) {
414		if (entry->funcs->type != type)
415			continue;
416
417		if (amdgpu_ring_is_blacklisted(entry, blacklist, num_blacklist))
418			continue;
419
420		if (!*ring) {
421			*ring = entry;
422
423			/* We are done for ring LRU */
424			if (!lru_pipe_order)
425				break;
426		}
427
428		/* Move all rings on the same pipe to the end of the list */
429		if (entry->pipe == (*ring)->pipe)
430			amdgpu_ring_lru_touch_locked(adev, entry);
431	}
432
433	/* Move the ring we found to the end of the list */
434	if (*ring)
435		amdgpu_ring_lru_touch_locked(adev, *ring);
436
437	spin_unlock(&adev->ring_lru_list_lock);
 
 
 
438
439	if (!*ring) {
440		DRM_ERROR("Ring LRU contains no entries for ring type:%d\n", type);
441		return -EINVAL;
442	}
443
444	return 0;
445}
446
447/**
448 * amdgpu_ring_lru_touch - mark a ring as recently being used
449 *
450 * @adev: amdgpu_device pointer
451 * @ring: ring to touch
452 *
453 * Move @ring to the tail of the lru list
454 */
455void amdgpu_ring_lru_touch(struct amdgpu_device *adev, struct amdgpu_ring *ring)
456{
457	spin_lock(&adev->ring_lru_list_lock);
458	amdgpu_ring_lru_touch_locked(adev, ring);
459	spin_unlock(&adev->ring_lru_list_lock);
460}
461
462/*
463 * Debugfs info
464 */
465#if defined(CONFIG_DEBUG_FS)
466
467/* Layout of file is 12 bytes consisting of
468 * - rptr
469 * - wptr
470 * - driver's copy of wptr
471 *
472 * followed by n-words of ring data
473 */
474static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
475					size_t size, loff_t *pos)
476{
477	struct amdgpu_ring *ring = file_inode(f)->i_private;
478	int r, i;
479	uint32_t value, result, early[3];
480
481	if (*pos & 3 || size & 3)
482		return -EINVAL;
483
484	result = 0;
485
486	if (*pos < 12) {
487		early[0] = amdgpu_ring_get_rptr(ring) & ring->buf_mask;
488		early[1] = amdgpu_ring_get_wptr(ring) & ring->buf_mask;
489		early[2] = ring->wptr & ring->buf_mask;
490		for (i = *pos / 4; i < 3 && size; i++) {
491			r = put_user(early[i], (uint32_t *)buf);
492			if (r)
493				return r;
494			buf += 4;
495			result += 4;
496			size -= 4;
497			*pos += 4;
498		}
499	}
500
501	while (size) {
502		if (*pos >= (ring->ring_size + 12))
503			return result;
504
505		value = ring->ring[(*pos - 12)/4];
506		r = put_user(value, (uint32_t*)buf);
507		if (r)
508			return r;
509		buf += 4;
510		result += 4;
511		size -= 4;
512		*pos += 4;
513	}
514
515	return result;
516}
517
518static const struct file_operations amdgpu_debugfs_ring_fops = {
519	.owner = THIS_MODULE,
520	.read = amdgpu_debugfs_ring_read,
521	.llseek = default_llseek
522};
523
524#endif
525
526static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
527				    struct amdgpu_ring *ring)
528{
529#if defined(CONFIG_DEBUG_FS)
530	struct drm_minor *minor = adev->ddev->primary;
531	struct dentry *ent, *root = minor->debugfs_root;
532	char name[32];
533
534	sprintf(name, "amdgpu_ring_%s", ring->name);
 
 
 
535
536	ent = debugfs_create_file(name,
537				  S_IFREG | S_IRUGO, root,
538				  ring, &amdgpu_debugfs_ring_fops);
539	if (!ent)
540		return -ENOMEM;
541
542	i_size_write(ent->d_inode, ring->ring_size + 12);
543	ring->ent = ent;
544#endif
545	return 0;
546}
547
548static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring)
 
 
 
 
 
 
 
 
 
549{
550#if defined(CONFIG_DEBUG_FS)
551	debugfs_remove(ring->ent);
552#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
553}
v6.2
  1/*
  2 * Copyright 2008 Advanced Micro Devices, Inc.
  3 * Copyright 2008 Red Hat Inc.
  4 * Copyright 2009 Jerome Glisse.
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the "Software"),
  8 * to deal in the Software without restriction, including without limitation
  9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 10 * and/or sell copies of the Software, and to permit persons to whom the
 11 * Software is furnished to do so, subject to the following conditions:
 12 *
 13 * The above copyright notice and this permission notice shall be included in
 14 * all copies or substantial portions of the Software.
 15 *
 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 22 * OTHER DEALINGS IN THE SOFTWARE.
 23 *
 24 * Authors: Dave Airlie
 25 *          Alex Deucher
 26 *          Jerome Glisse
 27 *          Christian König
 28 */
 29#include <linux/seq_file.h>
 30#include <linux/slab.h>
 31#include <linux/uaccess.h>
 32#include <linux/debugfs.h>
 33
 34#include <drm/amdgpu_drm.h>
 35#include "amdgpu.h"
 36#include "atom.h"
 37
 38/*
 39 * Rings
 40 * Most engines on the GPU are fed via ring buffers.  Ring
 41 * buffers are areas of GPU accessible memory that the host
 42 * writes commands into and the GPU reads commands out of.
 43 * There is a rptr (read pointer) that determines where the
 44 * GPU is currently reading, and a wptr (write pointer)
 45 * which determines where the host has written.  When the
 46 * pointers are equal, the ring is idle.  When the host
 47 * writes commands to the ring buffer, it increments the
 48 * wptr.  The GPU then starts fetching commands and executes
 49 * them until the pointers are equal again.
 50 */
 
 
 
 51
 52/**
 53 * amdgpu_ring_alloc - allocate space on the ring buffer
 54 *
 
 55 * @ring: amdgpu_ring structure holding ring information
 56 * @ndw: number of dwords to allocate in the ring buffer
 57 *
 58 * Allocate @ndw dwords in the ring buffer (all asics).
 59 * Returns 0 on success, error on failure.
 60 */
 61int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw)
 62{
 63	/* Align requested size with padding so unlock_commit can
 64	 * pad safely */
 65	ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
 66
 67	/* Make sure we aren't trying to allocate more space
 68	 * than the maximum for one submission
 69	 */
 70	if (WARN_ON_ONCE(ndw > ring->max_dw))
 71		return -ENOMEM;
 72
 73	ring->count_dw = ndw;
 74	ring->wptr_old = ring->wptr;
 75
 76	if (ring->funcs->begin_use)
 77		ring->funcs->begin_use(ring);
 78
 79	return 0;
 80}
 81
 82/** amdgpu_ring_insert_nop - insert NOP packets
 83 *
 84 * @ring: amdgpu_ring structure holding ring information
 85 * @count: the number of NOP packets to insert
 86 *
 87 * This is the generic insert_nop function for rings except SDMA
 88 */
 89void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
 90{
 91	int i;
 92
 93	for (i = 0; i < count; i++)
 94		amdgpu_ring_write(ring, ring->funcs->nop);
 95}
 96
 97/**
 98 * amdgpu_ring_generic_pad_ib - pad IB with NOP packets
 99 *
100 * @ring: amdgpu_ring structure holding ring information
101 * @ib: IB to add NOP packets to
102 *
103 * This is the generic pad_ib function for rings except SDMA
104 */
105void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
106{
107	while (ib->length_dw & ring->funcs->align_mask)
108		ib->ptr[ib->length_dw++] = ring->funcs->nop;
109}
110
111/**
112 * amdgpu_ring_commit - tell the GPU to execute the new
113 * commands on the ring buffer
114 *
 
115 * @ring: amdgpu_ring structure holding ring information
116 *
117 * Update the wptr (write pointer) to tell the GPU to
118 * execute new commands on the ring buffer (all asics).
119 */
120void amdgpu_ring_commit(struct amdgpu_ring *ring)
121{
122	uint32_t count;
123
124	/* We pad to match fetch size */
125	count = ring->funcs->align_mask + 1 -
126		(ring->wptr & ring->funcs->align_mask);
127	count %= ring->funcs->align_mask + 1;
128	ring->funcs->insert_nop(ring, count);
129
130	mb();
131	amdgpu_ring_set_wptr(ring);
132
133	if (ring->funcs->end_use)
134		ring->funcs->end_use(ring);
 
 
 
135}
136
137/**
138 * amdgpu_ring_undo - reset the wptr
139 *
140 * @ring: amdgpu_ring structure holding ring information
141 *
142 * Reset the driver's copy of the wptr (all asics).
143 */
144void amdgpu_ring_undo(struct amdgpu_ring *ring)
145{
146	ring->wptr = ring->wptr_old;
147
148	if (ring->funcs->end_use)
149		ring->funcs->end_use(ring);
150}
151
152#define amdgpu_ring_get_gpu_addr(ring, offset)				\
153	(ring->is_mes_queue ?						\
154	 (ring->mes_ctx->meta_data_gpu_addr + offset) :			\
155	 (ring->adev->wb.gpu_addr + offset * 4))
156
157#define amdgpu_ring_get_cpu_addr(ring, offset)				\
158	(ring->is_mes_queue ?						\
159	 (void *)((uint8_t *)(ring->mes_ctx->meta_data_ptr) + offset) : \
160	 (&ring->adev->wb.wb[offset]))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
161
162/**
163 * amdgpu_ring_init - init driver ring struct.
164 *
165 * @adev: amdgpu_device pointer
166 * @ring: amdgpu_ring structure holding ring information
167 * @max_dw: maximum number of dw for ring alloc
168 * @irq_src: interrupt source to use for this ring
169 * @irq_type: interrupt type to use for this ring
170 * @hw_prio: ring priority (NORMAL/HIGH)
171 * @sched_score: optional score atomic shared with other schedulers
172 *
173 * Initialize the driver information for the selected ring (all asics).
174 * Returns 0 on success, error on failure.
175 */
176int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
177		     unsigned int max_dw, struct amdgpu_irq_src *irq_src,
178		     unsigned int irq_type, unsigned int hw_prio,
179		     atomic_t *sched_score)
180{
181	int r;
182	int sched_hw_submission = amdgpu_sched_hw_submission;
183	u32 *num_sched;
184	u32 hw_ip;
185
186	/* Set the hw submission limit higher for KIQ because
187	 * it's used for a number of gfx/compute tasks by both
188	 * KFD and KGD which may have outstanding fences and
189	 * it doesn't really use the gpu scheduler anyway;
190	 * KIQ tasks get submitted directly to the ring.
191	 */
192	if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
193		sched_hw_submission = max(sched_hw_submission, 256);
194	else if (ring == &adev->sdma.instance[0].page)
195		sched_hw_submission = 256;
196
197	if (ring->adev == NULL) {
198		if (adev->num_rings >= AMDGPU_MAX_RINGS)
199			return -EINVAL;
200
201		ring->adev = adev;
202		ring->num_hw_submission = sched_hw_submission;
203		ring->sched_score = sched_score;
204		ring->vmid_wait = dma_fence_get_stub();
205
206		if (!ring->is_mes_queue) {
207			ring->idx = adev->num_rings++;
208			adev->rings[ring->idx] = ring;
209		}
210
211		r = amdgpu_fence_driver_init_ring(ring);
212		if (r)
213			return r;
214	}
215
216	if (ring->is_mes_queue) {
217		ring->rptr_offs = amdgpu_mes_ctx_get_offs(ring,
218				AMDGPU_MES_CTX_RPTR_OFFS);
219		ring->wptr_offs = amdgpu_mes_ctx_get_offs(ring,
220				AMDGPU_MES_CTX_WPTR_OFFS);
221		ring->fence_offs = amdgpu_mes_ctx_get_offs(ring,
222				AMDGPU_MES_CTX_FENCE_OFFS);
223		ring->trail_fence_offs = amdgpu_mes_ctx_get_offs(ring,
224				AMDGPU_MES_CTX_TRAIL_FENCE_OFFS);
225		ring->cond_exe_offs = amdgpu_mes_ctx_get_offs(ring,
226				AMDGPU_MES_CTX_COND_EXE_OFFS);
227	} else {
228		r = amdgpu_device_wb_get(adev, &ring->rptr_offs);
229		if (r) {
230			dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
231			return r;
232		}
233
234		r = amdgpu_device_wb_get(adev, &ring->wptr_offs);
235		if (r) {
236			dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
237			return r;
238		}
239
240		r = amdgpu_device_wb_get(adev, &ring->fence_offs);
241		if (r) {
242			dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r);
243			return r;
244		}
245
246		r = amdgpu_device_wb_get(adev, &ring->trail_fence_offs);
247		if (r) {
248			dev_err(adev->dev, "(%d) ring trail_fence_offs wb alloc failed\n", r);
249			return r;
250		}
251
252		r = amdgpu_device_wb_get(adev, &ring->cond_exe_offs);
253		if (r) {
254			dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r);
255			return r;
256		}
257	}
258
259	ring->fence_gpu_addr =
260		amdgpu_ring_get_gpu_addr(ring, ring->fence_offs);
261	ring->fence_cpu_addr =
262		amdgpu_ring_get_cpu_addr(ring, ring->fence_offs);
263
264	ring->rptr_gpu_addr =
265		amdgpu_ring_get_gpu_addr(ring, ring->rptr_offs);
266	ring->rptr_cpu_addr =
267		amdgpu_ring_get_cpu_addr(ring, ring->rptr_offs);
268
269	ring->wptr_gpu_addr =
270		amdgpu_ring_get_gpu_addr(ring, ring->wptr_offs);
271	ring->wptr_cpu_addr =
272		amdgpu_ring_get_cpu_addr(ring, ring->wptr_offs);
273
274	ring->trail_fence_gpu_addr =
275		amdgpu_ring_get_gpu_addr(ring, ring->trail_fence_offs);
276	ring->trail_fence_cpu_addr =
277		amdgpu_ring_get_cpu_addr(ring, ring->trail_fence_offs);
278
279	ring->cond_exe_gpu_addr =
280		amdgpu_ring_get_gpu_addr(ring, ring->cond_exe_offs);
281	ring->cond_exe_cpu_addr =
282		amdgpu_ring_get_cpu_addr(ring, ring->cond_exe_offs);
283
284	/* always set cond_exec_polling to CONTINUE */
285	*ring->cond_exe_cpu_addr = 1;
286
287	r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type);
288	if (r) {
289		dev_err(adev->dev, "failed initializing fences (%d).\n", r);
290		return r;
291	}
292
293	ring->ring_size = roundup_pow_of_two(max_dw * 4 * sched_hw_submission);
294
295	ring->buf_mask = (ring->ring_size / 4) - 1;
296	ring->ptr_mask = ring->funcs->support_64bit_ptrs ?
297		0xffffffffffffffff : ring->buf_mask;
298
299	/* Allocate ring buffer */
300	if (ring->is_mes_queue) {
301		int offset = 0;
302
303		BUG_ON(ring->ring_size > PAGE_SIZE*4);
304
305		offset = amdgpu_mes_ctx_get_offs(ring,
306					 AMDGPU_MES_CTX_RING_OFFS);
307		ring->gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
308		ring->ring = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
309		amdgpu_ring_clear_ring(ring);
310
311	} else if (ring->ring_obj == NULL) {
312		r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_dw, PAGE_SIZE,
313					    AMDGPU_GEM_DOMAIN_GTT,
314					    &ring->ring_obj,
315					    &ring->gpu_addr,
316					    (void **)&ring->ring);
317		if (r) {
318			dev_err(adev->dev, "(%d) ring create failed\n", r);
319			return r;
320		}
321		amdgpu_ring_clear_ring(ring);
322	}
323
324	ring->max_dw = max_dw;
325	ring->hw_prio = hw_prio;
 
 
 
 
 
 
326
327	if (!ring->no_scheduler) {
328		hw_ip = ring->funcs->type;
329		num_sched = &adev->gpu_sched[hw_ip][hw_prio].num_scheds;
330		adev->gpu_sched[hw_ip][hw_prio].sched[(*num_sched)++] =
331			&ring->sched;
332	}
333
334	return 0;
335}
336
337/**
338 * amdgpu_ring_fini - tear down the driver ring struct.
339 *
 
340 * @ring: amdgpu_ring structure holding ring information
341 *
342 * Tear down the driver information for the selected ring (all asics).
343 */
344void amdgpu_ring_fini(struct amdgpu_ring *ring)
345{
 
346
347	/* Not to finish a ring which is not initialized */
348	if (!(ring->adev) ||
349	    (!ring->is_mes_queue && !(ring->adev->rings[ring->idx])))
350		return;
351
352	ring->sched.ready = false;
 
 
 
 
 
 
 
 
353
354	if (!ring->is_mes_queue) {
355		amdgpu_device_wb_free(ring->adev, ring->rptr_offs);
356		amdgpu_device_wb_free(ring->adev, ring->wptr_offs);
357
358		amdgpu_device_wb_free(ring->adev, ring->cond_exe_offs);
359		amdgpu_device_wb_free(ring->adev, ring->fence_offs);
360
361		amdgpu_bo_free_kernel(&ring->ring_obj,
362				      &ring->gpu_addr,
363				      (void **)&ring->ring);
364	}
365
366	dma_fence_put(ring->vmid_wait);
367	ring->vmid_wait = NULL;
368	ring->me = 0;
369
370	if (!ring->is_mes_queue)
371		ring->adev->rings[ring->idx] = NULL;
 
 
 
 
 
 
372}
373
374/**
375 * amdgpu_ring_emit_reg_write_reg_wait_helper - ring helper
376 *
377 * @ring: ring to write to
378 * @reg0: register to write
379 * @reg1: register to wait on
380 * @ref: reference value to write/wait on
381 * @mask: mask to wait on
382 *
383 * Helper for rings that don't support write and wait in a
384 * single oneshot packet.
385 */
386void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
387						uint32_t reg0, uint32_t reg1,
388						uint32_t ref, uint32_t mask)
389{
390	amdgpu_ring_emit_wreg(ring, reg0, ref);
391	amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
 
 
 
 
 
 
392}
393
394/**
395 * amdgpu_ring_soft_recovery - try to soft recover a ring lockup
396 *
397 * @ring: ring to try the recovery on
398 * @vmid: VMID we try to get going again
399 * @fence: timedout fence
 
 
 
400 *
401 * Tries to get a ring proceeding again when it is stuck.
 
 
402 */
403bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
404			       struct dma_fence *fence)
405{
406	ktime_t deadline = ktime_add_us(ktime_get(), 10000);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
407
408	if (amdgpu_sriov_vf(ring->adev) || !ring->funcs->soft_recovery || !fence)
409		return false;
 
410
411	atomic_inc(&ring->adev->gpu_reset_counter);
412	while (!dma_fence_is_signaled(fence) &&
413	       ktime_to_ns(ktime_sub(deadline, ktime_get())) > 0)
414		ring->funcs->soft_recovery(ring, vmid);
415
416	return dma_fence_is_signaled(fence);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
417}
418
419/*
420 * Debugfs info
421 */
422#if defined(CONFIG_DEBUG_FS)
423
424/* Layout of file is 12 bytes consisting of
425 * - rptr
426 * - wptr
427 * - driver's copy of wptr
428 *
429 * followed by n-words of ring data
430 */
431static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
432					size_t size, loff_t *pos)
433{
434	struct amdgpu_ring *ring = file_inode(f)->i_private;
435	int r, i;
436	uint32_t value, result, early[3];
437
438	if (*pos & 3 || size & 3)
439		return -EINVAL;
440
441	result = 0;
442
443	if (*pos < 12) {
444		early[0] = amdgpu_ring_get_rptr(ring) & ring->buf_mask;
445		early[1] = amdgpu_ring_get_wptr(ring) & ring->buf_mask;
446		early[2] = ring->wptr & ring->buf_mask;
447		for (i = *pos / 4; i < 3 && size; i++) {
448			r = put_user(early[i], (uint32_t *)buf);
449			if (r)
450				return r;
451			buf += 4;
452			result += 4;
453			size -= 4;
454			*pos += 4;
455		}
456	}
457
458	while (size) {
459		if (*pos >= (ring->ring_size + 12))
460			return result;
461
462		value = ring->ring[(*pos - 12)/4];
463		r = put_user(value, (uint32_t *)buf);
464		if (r)
465			return r;
466		buf += 4;
467		result += 4;
468		size -= 4;
469		*pos += 4;
470	}
471
472	return result;
473}
474
475static const struct file_operations amdgpu_debugfs_ring_fops = {
476	.owner = THIS_MODULE,
477	.read = amdgpu_debugfs_ring_read,
478	.llseek = default_llseek
479};
480
481#endif
482
483void amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
484			      struct amdgpu_ring *ring)
485{
486#if defined(CONFIG_DEBUG_FS)
487	struct drm_minor *minor = adev_to_drm(adev)->primary;
488	struct dentry *root = minor->debugfs_root;
489	char name[32];
490
491	sprintf(name, "amdgpu_ring_%s", ring->name);
492	debugfs_create_file_size(name, S_IFREG | S_IRUGO, root, ring,
493				 &amdgpu_debugfs_ring_fops,
494				 ring->ring_size + 12);
495
 
 
 
 
 
 
 
 
496#endif
 
497}
498
499/**
500 * amdgpu_ring_test_helper - tests ring and set sched readiness status
501 *
502 * @ring: ring to try the recovery on
503 *
504 * Tests ring and set sched readiness status
505 *
506 * Returns 0 on success, error on failure.
507 */
508int amdgpu_ring_test_helper(struct amdgpu_ring *ring)
509{
510	struct amdgpu_device *adev = ring->adev;
511	int r;
512
513	r = amdgpu_ring_test_ring(ring);
514	if (r)
515		DRM_DEV_ERROR(adev->dev, "ring %s test failed (%d)\n",
516			      ring->name, r);
517	else
518		DRM_DEV_DEBUG(adev->dev, "ring test on %s succeeded\n",
519			      ring->name);
520
521	ring->sched.ready = !r;
522	return r;
523}
524
525static void amdgpu_ring_to_mqd_prop(struct amdgpu_ring *ring,
526				    struct amdgpu_mqd_prop *prop)
527{
528	struct amdgpu_device *adev = ring->adev;
529
530	memset(prop, 0, sizeof(*prop));
531
532	prop->mqd_gpu_addr = ring->mqd_gpu_addr;
533	prop->hqd_base_gpu_addr = ring->gpu_addr;
534	prop->rptr_gpu_addr = ring->rptr_gpu_addr;
535	prop->wptr_gpu_addr = ring->wptr_gpu_addr;
536	prop->queue_size = ring->ring_size;
537	prop->eop_gpu_addr = ring->eop_gpu_addr;
538	prop->use_doorbell = ring->use_doorbell;
539	prop->doorbell_index = ring->doorbell_index;
540
541	/* map_queues packet doesn't need activate the queue,
542	 * so only kiq need set this field.
543	 */
544	prop->hqd_active = ring->funcs->type == AMDGPU_RING_TYPE_KIQ;
545
546	if ((ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE &&
547	     amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) ||
548	    (ring->funcs->type == AMDGPU_RING_TYPE_GFX &&
549	     amdgpu_gfx_is_high_priority_graphics_queue(adev, ring))) {
550		prop->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
551		prop->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
552	}
553}
554
555int amdgpu_ring_init_mqd(struct amdgpu_ring *ring)
556{
557	struct amdgpu_device *adev = ring->adev;
558	struct amdgpu_mqd *mqd_mgr;
559	struct amdgpu_mqd_prop prop;
560
561	amdgpu_ring_to_mqd_prop(ring, &prop);
562
563	ring->wptr = 0;
564
565	if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
566		mqd_mgr = &adev->mqds[AMDGPU_HW_IP_COMPUTE];
567	else
568		mqd_mgr = &adev->mqds[ring->funcs->type];
569
570	return mqd_mgr->init_mqd(adev, ring->mqd_ptr, &prop);
571}
572
573void amdgpu_ring_ib_begin(struct amdgpu_ring *ring)
574{
575	if (ring->is_sw_ring)
576		amdgpu_sw_ring_ib_begin(ring);
577}
578
579void amdgpu_ring_ib_end(struct amdgpu_ring *ring)
580{
581	if (ring->is_sw_ring)
582		amdgpu_sw_ring_ib_end(ring);
583}