Loading...
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 * Christian König
28 */
29#include <linux/seq_file.h>
30#include <linux/slab.h>
31#include <linux/uaccess.h>
32#include <linux/debugfs.h>
33
34#include <drm/amdgpu_drm.h>
35#include "amdgpu.h"
36#include "atom.h"
37
38/*
39 * Rings
40 * Most engines on the GPU are fed via ring buffers. Ring
41 * buffers are areas of GPU accessible memory that the host
42 * writes commands into and the GPU reads commands out of.
43 * There is a rptr (read pointer) that determines where the
44 * GPU is currently reading, and a wptr (write pointer)
45 * which determines where the host has written. When the
46 * pointers are equal, the ring is idle. When the host
47 * writes commands to the ring buffer, it increments the
48 * wptr. The GPU then starts fetching commands and executes
49 * them until the pointers are equal again.
50 */
51
52/**
53 * amdgpu_ring_alloc - allocate space on the ring buffer
54 *
55 * @ring: amdgpu_ring structure holding ring information
56 * @ndw: number of dwords to allocate in the ring buffer
57 *
58 * Allocate @ndw dwords in the ring buffer (all asics).
59 * Returns 0 on success, error on failure.
60 */
61int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw)
62{
63 /* Align requested size with padding so unlock_commit can
64 * pad safely */
65 ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
66
67 /* Make sure we aren't trying to allocate more space
68 * than the maximum for one submission
69 */
70 if (WARN_ON_ONCE(ndw > ring->max_dw))
71 return -ENOMEM;
72
73 ring->count_dw = ndw;
74 ring->wptr_old = ring->wptr;
75
76 if (ring->funcs->begin_use)
77 ring->funcs->begin_use(ring);
78
79 return 0;
80}
81
82/** amdgpu_ring_insert_nop - insert NOP packets
83 *
84 * @ring: amdgpu_ring structure holding ring information
85 * @count: the number of NOP packets to insert
86 *
87 * This is the generic insert_nop function for rings except SDMA
88 */
89void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
90{
91 int i;
92
93 for (i = 0; i < count; i++)
94 amdgpu_ring_write(ring, ring->funcs->nop);
95}
96
97/**
98 * amdgpu_ring_generic_pad_ib - pad IB with NOP packets
99 *
100 * @ring: amdgpu_ring structure holding ring information
101 * @ib: IB to add NOP packets to
102 *
103 * This is the generic pad_ib function for rings except SDMA
104 */
105void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
106{
107 while (ib->length_dw & ring->funcs->align_mask)
108 ib->ptr[ib->length_dw++] = ring->funcs->nop;
109}
110
111/**
112 * amdgpu_ring_commit - tell the GPU to execute the new
113 * commands on the ring buffer
114 *
115 * @ring: amdgpu_ring structure holding ring information
116 *
117 * Update the wptr (write pointer) to tell the GPU to
118 * execute new commands on the ring buffer (all asics).
119 */
120void amdgpu_ring_commit(struct amdgpu_ring *ring)
121{
122 uint32_t count;
123
124 /* We pad to match fetch size */
125 count = ring->funcs->align_mask + 1 -
126 (ring->wptr & ring->funcs->align_mask);
127 count %= ring->funcs->align_mask + 1;
128 ring->funcs->insert_nop(ring, count);
129
130 mb();
131 amdgpu_ring_set_wptr(ring);
132
133 if (ring->funcs->end_use)
134 ring->funcs->end_use(ring);
135}
136
137/**
138 * amdgpu_ring_undo - reset the wptr
139 *
140 * @ring: amdgpu_ring structure holding ring information
141 *
142 * Reset the driver's copy of the wptr (all asics).
143 */
144void amdgpu_ring_undo(struct amdgpu_ring *ring)
145{
146 ring->wptr = ring->wptr_old;
147
148 if (ring->funcs->end_use)
149 ring->funcs->end_use(ring);
150}
151
152#define amdgpu_ring_get_gpu_addr(ring, offset) \
153 (ring->is_mes_queue ? \
154 (ring->mes_ctx->meta_data_gpu_addr + offset) : \
155 (ring->adev->wb.gpu_addr + offset * 4))
156
157#define amdgpu_ring_get_cpu_addr(ring, offset) \
158 (ring->is_mes_queue ? \
159 (void *)((uint8_t *)(ring->mes_ctx->meta_data_ptr) + offset) : \
160 (&ring->adev->wb.wb[offset]))
161
162/**
163 * amdgpu_ring_init - init driver ring struct.
164 *
165 * @adev: amdgpu_device pointer
166 * @ring: amdgpu_ring structure holding ring information
167 * @max_dw: maximum number of dw for ring alloc
168 * @irq_src: interrupt source to use for this ring
169 * @irq_type: interrupt type to use for this ring
170 * @hw_prio: ring priority (NORMAL/HIGH)
171 * @sched_score: optional score atomic shared with other schedulers
172 *
173 * Initialize the driver information for the selected ring (all asics).
174 * Returns 0 on success, error on failure.
175 */
176int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
177 unsigned int max_dw, struct amdgpu_irq_src *irq_src,
178 unsigned int irq_type, unsigned int hw_prio,
179 atomic_t *sched_score)
180{
181 int r;
182 int sched_hw_submission = amdgpu_sched_hw_submission;
183 u32 *num_sched;
184 u32 hw_ip;
185
186 /* Set the hw submission limit higher for KIQ because
187 * it's used for a number of gfx/compute tasks by both
188 * KFD and KGD which may have outstanding fences and
189 * it doesn't really use the gpu scheduler anyway;
190 * KIQ tasks get submitted directly to the ring.
191 */
192 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
193 sched_hw_submission = max(sched_hw_submission, 256);
194 else if (ring == &adev->sdma.instance[0].page)
195 sched_hw_submission = 256;
196
197 if (ring->adev == NULL) {
198 if (adev->num_rings >= AMDGPU_MAX_RINGS)
199 return -EINVAL;
200
201 ring->adev = adev;
202 ring->num_hw_submission = sched_hw_submission;
203 ring->sched_score = sched_score;
204 ring->vmid_wait = dma_fence_get_stub();
205
206 if (!ring->is_mes_queue) {
207 ring->idx = adev->num_rings++;
208 adev->rings[ring->idx] = ring;
209 }
210
211 r = amdgpu_fence_driver_init_ring(ring);
212 if (r)
213 return r;
214 }
215
216 if (ring->is_mes_queue) {
217 ring->rptr_offs = amdgpu_mes_ctx_get_offs(ring,
218 AMDGPU_MES_CTX_RPTR_OFFS);
219 ring->wptr_offs = amdgpu_mes_ctx_get_offs(ring,
220 AMDGPU_MES_CTX_WPTR_OFFS);
221 ring->fence_offs = amdgpu_mes_ctx_get_offs(ring,
222 AMDGPU_MES_CTX_FENCE_OFFS);
223 ring->trail_fence_offs = amdgpu_mes_ctx_get_offs(ring,
224 AMDGPU_MES_CTX_TRAIL_FENCE_OFFS);
225 ring->cond_exe_offs = amdgpu_mes_ctx_get_offs(ring,
226 AMDGPU_MES_CTX_COND_EXE_OFFS);
227 } else {
228 r = amdgpu_device_wb_get(adev, &ring->rptr_offs);
229 if (r) {
230 dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
231 return r;
232 }
233
234 r = amdgpu_device_wb_get(adev, &ring->wptr_offs);
235 if (r) {
236 dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
237 return r;
238 }
239
240 r = amdgpu_device_wb_get(adev, &ring->fence_offs);
241 if (r) {
242 dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r);
243 return r;
244 }
245
246 r = amdgpu_device_wb_get(adev, &ring->trail_fence_offs);
247 if (r) {
248 dev_err(adev->dev, "(%d) ring trail_fence_offs wb alloc failed\n", r);
249 return r;
250 }
251
252 r = amdgpu_device_wb_get(adev, &ring->cond_exe_offs);
253 if (r) {
254 dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r);
255 return r;
256 }
257 }
258
259 ring->fence_gpu_addr =
260 amdgpu_ring_get_gpu_addr(ring, ring->fence_offs);
261 ring->fence_cpu_addr =
262 amdgpu_ring_get_cpu_addr(ring, ring->fence_offs);
263
264 ring->rptr_gpu_addr =
265 amdgpu_ring_get_gpu_addr(ring, ring->rptr_offs);
266 ring->rptr_cpu_addr =
267 amdgpu_ring_get_cpu_addr(ring, ring->rptr_offs);
268
269 ring->wptr_gpu_addr =
270 amdgpu_ring_get_gpu_addr(ring, ring->wptr_offs);
271 ring->wptr_cpu_addr =
272 amdgpu_ring_get_cpu_addr(ring, ring->wptr_offs);
273
274 ring->trail_fence_gpu_addr =
275 amdgpu_ring_get_gpu_addr(ring, ring->trail_fence_offs);
276 ring->trail_fence_cpu_addr =
277 amdgpu_ring_get_cpu_addr(ring, ring->trail_fence_offs);
278
279 ring->cond_exe_gpu_addr =
280 amdgpu_ring_get_gpu_addr(ring, ring->cond_exe_offs);
281 ring->cond_exe_cpu_addr =
282 amdgpu_ring_get_cpu_addr(ring, ring->cond_exe_offs);
283
284 /* always set cond_exec_polling to CONTINUE */
285 *ring->cond_exe_cpu_addr = 1;
286
287 r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type);
288 if (r) {
289 dev_err(adev->dev, "failed initializing fences (%d).\n", r);
290 return r;
291 }
292
293 ring->ring_size = roundup_pow_of_two(max_dw * 4 * sched_hw_submission);
294
295 ring->buf_mask = (ring->ring_size / 4) - 1;
296 ring->ptr_mask = ring->funcs->support_64bit_ptrs ?
297 0xffffffffffffffff : ring->buf_mask;
298
299 /* Allocate ring buffer */
300 if (ring->is_mes_queue) {
301 int offset = 0;
302
303 BUG_ON(ring->ring_size > PAGE_SIZE*4);
304
305 offset = amdgpu_mes_ctx_get_offs(ring,
306 AMDGPU_MES_CTX_RING_OFFS);
307 ring->gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
308 ring->ring = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
309 amdgpu_ring_clear_ring(ring);
310
311 } else if (ring->ring_obj == NULL) {
312 r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_dw, PAGE_SIZE,
313 AMDGPU_GEM_DOMAIN_GTT,
314 &ring->ring_obj,
315 &ring->gpu_addr,
316 (void **)&ring->ring);
317 if (r) {
318 dev_err(adev->dev, "(%d) ring create failed\n", r);
319 return r;
320 }
321 amdgpu_ring_clear_ring(ring);
322 }
323
324 ring->max_dw = max_dw;
325 ring->hw_prio = hw_prio;
326
327 if (!ring->no_scheduler) {
328 hw_ip = ring->funcs->type;
329 num_sched = &adev->gpu_sched[hw_ip][hw_prio].num_scheds;
330 adev->gpu_sched[hw_ip][hw_prio].sched[(*num_sched)++] =
331 &ring->sched;
332 }
333
334 return 0;
335}
336
337/**
338 * amdgpu_ring_fini - tear down the driver ring struct.
339 *
340 * @ring: amdgpu_ring structure holding ring information
341 *
342 * Tear down the driver information for the selected ring (all asics).
343 */
344void amdgpu_ring_fini(struct amdgpu_ring *ring)
345{
346
347 /* Not to finish a ring which is not initialized */
348 if (!(ring->adev) ||
349 (!ring->is_mes_queue && !(ring->adev->rings[ring->idx])))
350 return;
351
352 ring->sched.ready = false;
353
354 if (!ring->is_mes_queue) {
355 amdgpu_device_wb_free(ring->adev, ring->rptr_offs);
356 amdgpu_device_wb_free(ring->adev, ring->wptr_offs);
357
358 amdgpu_device_wb_free(ring->adev, ring->cond_exe_offs);
359 amdgpu_device_wb_free(ring->adev, ring->fence_offs);
360
361 amdgpu_bo_free_kernel(&ring->ring_obj,
362 &ring->gpu_addr,
363 (void **)&ring->ring);
364 }
365
366 dma_fence_put(ring->vmid_wait);
367 ring->vmid_wait = NULL;
368 ring->me = 0;
369
370 if (!ring->is_mes_queue)
371 ring->adev->rings[ring->idx] = NULL;
372}
373
374/**
375 * amdgpu_ring_emit_reg_write_reg_wait_helper - ring helper
376 *
377 * @ring: ring to write to
378 * @reg0: register to write
379 * @reg1: register to wait on
380 * @ref: reference value to write/wait on
381 * @mask: mask to wait on
382 *
383 * Helper for rings that don't support write and wait in a
384 * single oneshot packet.
385 */
386void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
387 uint32_t reg0, uint32_t reg1,
388 uint32_t ref, uint32_t mask)
389{
390 amdgpu_ring_emit_wreg(ring, reg0, ref);
391 amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
392}
393
394/**
395 * amdgpu_ring_soft_recovery - try to soft recover a ring lockup
396 *
397 * @ring: ring to try the recovery on
398 * @vmid: VMID we try to get going again
399 * @fence: timedout fence
400 *
401 * Tries to get a ring proceeding again when it is stuck.
402 */
403bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
404 struct dma_fence *fence)
405{
406 ktime_t deadline = ktime_add_us(ktime_get(), 10000);
407
408 if (amdgpu_sriov_vf(ring->adev) || !ring->funcs->soft_recovery || !fence)
409 return false;
410
411 atomic_inc(&ring->adev->gpu_reset_counter);
412 while (!dma_fence_is_signaled(fence) &&
413 ktime_to_ns(ktime_sub(deadline, ktime_get())) > 0)
414 ring->funcs->soft_recovery(ring, vmid);
415
416 return dma_fence_is_signaled(fence);
417}
418
419/*
420 * Debugfs info
421 */
422#if defined(CONFIG_DEBUG_FS)
423
424/* Layout of file is 12 bytes consisting of
425 * - rptr
426 * - wptr
427 * - driver's copy of wptr
428 *
429 * followed by n-words of ring data
430 */
431static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
432 size_t size, loff_t *pos)
433{
434 struct amdgpu_ring *ring = file_inode(f)->i_private;
435 int r, i;
436 uint32_t value, result, early[3];
437
438 if (*pos & 3 || size & 3)
439 return -EINVAL;
440
441 result = 0;
442
443 if (*pos < 12) {
444 early[0] = amdgpu_ring_get_rptr(ring) & ring->buf_mask;
445 early[1] = amdgpu_ring_get_wptr(ring) & ring->buf_mask;
446 early[2] = ring->wptr & ring->buf_mask;
447 for (i = *pos / 4; i < 3 && size; i++) {
448 r = put_user(early[i], (uint32_t *)buf);
449 if (r)
450 return r;
451 buf += 4;
452 result += 4;
453 size -= 4;
454 *pos += 4;
455 }
456 }
457
458 while (size) {
459 if (*pos >= (ring->ring_size + 12))
460 return result;
461
462 value = ring->ring[(*pos - 12)/4];
463 r = put_user(value, (uint32_t *)buf);
464 if (r)
465 return r;
466 buf += 4;
467 result += 4;
468 size -= 4;
469 *pos += 4;
470 }
471
472 return result;
473}
474
475static const struct file_operations amdgpu_debugfs_ring_fops = {
476 .owner = THIS_MODULE,
477 .read = amdgpu_debugfs_ring_read,
478 .llseek = default_llseek
479};
480
481#endif
482
483void amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
484 struct amdgpu_ring *ring)
485{
486#if defined(CONFIG_DEBUG_FS)
487 struct drm_minor *minor = adev_to_drm(adev)->primary;
488 struct dentry *root = minor->debugfs_root;
489 char name[32];
490
491 sprintf(name, "amdgpu_ring_%s", ring->name);
492 debugfs_create_file_size(name, S_IFREG | S_IRUGO, root, ring,
493 &amdgpu_debugfs_ring_fops,
494 ring->ring_size + 12);
495
496#endif
497}
498
499/**
500 * amdgpu_ring_test_helper - tests ring and set sched readiness status
501 *
502 * @ring: ring to try the recovery on
503 *
504 * Tests ring and set sched readiness status
505 *
506 * Returns 0 on success, error on failure.
507 */
508int amdgpu_ring_test_helper(struct amdgpu_ring *ring)
509{
510 struct amdgpu_device *adev = ring->adev;
511 int r;
512
513 r = amdgpu_ring_test_ring(ring);
514 if (r)
515 DRM_DEV_ERROR(adev->dev, "ring %s test failed (%d)\n",
516 ring->name, r);
517 else
518 DRM_DEV_DEBUG(adev->dev, "ring test on %s succeeded\n",
519 ring->name);
520
521 ring->sched.ready = !r;
522 return r;
523}
524
525static void amdgpu_ring_to_mqd_prop(struct amdgpu_ring *ring,
526 struct amdgpu_mqd_prop *prop)
527{
528 struct amdgpu_device *adev = ring->adev;
529
530 memset(prop, 0, sizeof(*prop));
531
532 prop->mqd_gpu_addr = ring->mqd_gpu_addr;
533 prop->hqd_base_gpu_addr = ring->gpu_addr;
534 prop->rptr_gpu_addr = ring->rptr_gpu_addr;
535 prop->wptr_gpu_addr = ring->wptr_gpu_addr;
536 prop->queue_size = ring->ring_size;
537 prop->eop_gpu_addr = ring->eop_gpu_addr;
538 prop->use_doorbell = ring->use_doorbell;
539 prop->doorbell_index = ring->doorbell_index;
540
541 /* map_queues packet doesn't need activate the queue,
542 * so only kiq need set this field.
543 */
544 prop->hqd_active = ring->funcs->type == AMDGPU_RING_TYPE_KIQ;
545
546 if ((ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE &&
547 amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) ||
548 (ring->funcs->type == AMDGPU_RING_TYPE_GFX &&
549 amdgpu_gfx_is_high_priority_graphics_queue(adev, ring))) {
550 prop->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
551 prop->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
552 }
553}
554
555int amdgpu_ring_init_mqd(struct amdgpu_ring *ring)
556{
557 struct amdgpu_device *adev = ring->adev;
558 struct amdgpu_mqd *mqd_mgr;
559 struct amdgpu_mqd_prop prop;
560
561 amdgpu_ring_to_mqd_prop(ring, &prop);
562
563 ring->wptr = 0;
564
565 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
566 mqd_mgr = &adev->mqds[AMDGPU_HW_IP_COMPUTE];
567 else
568 mqd_mgr = &adev->mqds[ring->funcs->type];
569
570 return mqd_mgr->init_mqd(adev, ring->mqd_ptr, &prop);
571}
572
573void amdgpu_ring_ib_begin(struct amdgpu_ring *ring)
574{
575 if (ring->is_sw_ring)
576 amdgpu_sw_ring_ib_begin(ring);
577}
578
579void amdgpu_ring_ib_end(struct amdgpu_ring *ring)
580{
581 if (ring->is_sw_ring)
582 amdgpu_sw_ring_ib_end(ring);
583}
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 * Christian König
28 */
29#include <linux/seq_file.h>
30#include <linux/slab.h>
31#include <linux/uaccess.h>
32#include <linux/debugfs.h>
33
34#include <drm/amdgpu_drm.h>
35#include "amdgpu.h"
36#include "atom.h"
37
38/*
39 * Rings
40 * Most engines on the GPU are fed via ring buffers. Ring
41 * buffers are areas of GPU accessible memory that the host
42 * writes commands into and the GPU reads commands out of.
43 * There is a rptr (read pointer) that determines where the
44 * GPU is currently reading, and a wptr (write pointer)
45 * which determines where the host has written. When the
46 * pointers are equal, the ring is idle. When the host
47 * writes commands to the ring buffer, it increments the
48 * wptr. The GPU then starts fetching commands and executes
49 * them until the pointers are equal again.
50 */
51static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
52 struct amdgpu_ring *ring);
53static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring);
54
55/**
56 * amdgpu_ring_alloc - allocate space on the ring buffer
57 *
58 * @adev: amdgpu_device pointer
59 * @ring: amdgpu_ring structure holding ring information
60 * @ndw: number of dwords to allocate in the ring buffer
61 *
62 * Allocate @ndw dwords in the ring buffer (all asics).
63 * Returns 0 on success, error on failure.
64 */
65int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw)
66{
67 /* Align requested size with padding so unlock_commit can
68 * pad safely */
69 ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
70
71 /* Make sure we aren't trying to allocate more space
72 * than the maximum for one submission
73 */
74 if (WARN_ON_ONCE(ndw > ring->max_dw))
75 return -ENOMEM;
76
77 ring->count_dw = ndw;
78 ring->wptr_old = ring->wptr;
79
80 if (ring->funcs->begin_use)
81 ring->funcs->begin_use(ring);
82
83 return 0;
84}
85
86/** amdgpu_ring_insert_nop - insert NOP packets
87 *
88 * @ring: amdgpu_ring structure holding ring information
89 * @count: the number of NOP packets to insert
90 *
91 * This is the generic insert_nop function for rings except SDMA
92 */
93void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
94{
95 int i;
96
97 for (i = 0; i < count; i++)
98 amdgpu_ring_write(ring, ring->funcs->nop);
99}
100
101/** amdgpu_ring_generic_pad_ib - pad IB with NOP packets
102 *
103 * @ring: amdgpu_ring structure holding ring information
104 * @ib: IB to add NOP packets to
105 *
106 * This is the generic pad_ib function for rings except SDMA
107 */
108void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
109{
110 while (ib->length_dw & ring->funcs->align_mask)
111 ib->ptr[ib->length_dw++] = ring->funcs->nop;
112}
113
114/**
115 * amdgpu_ring_commit - tell the GPU to execute the new
116 * commands on the ring buffer
117 *
118 * @adev: amdgpu_device pointer
119 * @ring: amdgpu_ring structure holding ring information
120 *
121 * Update the wptr (write pointer) to tell the GPU to
122 * execute new commands on the ring buffer (all asics).
123 */
124void amdgpu_ring_commit(struct amdgpu_ring *ring)
125{
126 uint32_t count;
127
128 /* We pad to match fetch size */
129 count = ring->funcs->align_mask + 1 -
130 (ring->wptr & ring->funcs->align_mask);
131 count %= ring->funcs->align_mask + 1;
132 ring->funcs->insert_nop(ring, count);
133
134 mb();
135 amdgpu_ring_set_wptr(ring);
136
137 if (ring->funcs->end_use)
138 ring->funcs->end_use(ring);
139}
140
141/**
142 * amdgpu_ring_undo - reset the wptr
143 *
144 * @ring: amdgpu_ring structure holding ring information
145 *
146 * Reset the driver's copy of the wptr (all asics).
147 */
148void amdgpu_ring_undo(struct amdgpu_ring *ring)
149{
150 ring->wptr = ring->wptr_old;
151
152 if (ring->funcs->end_use)
153 ring->funcs->end_use(ring);
154}
155
156/**
157 * amdgpu_ring_priority_put - restore a ring's priority
158 *
159 * @ring: amdgpu_ring structure holding the information
160 * @priority: target priority
161 *
162 * Release a request for executing at @priority
163 */
164void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
165 enum drm_sched_priority priority)
166{
167 int i;
168
169 if (!ring->funcs->set_priority)
170 return;
171
172 if (atomic_dec_return(&ring->num_jobs[priority]) > 0)
173 return;
174
175 /* no need to restore if the job is already at the lowest priority */
176 if (priority == DRM_SCHED_PRIORITY_NORMAL)
177 return;
178
179 mutex_lock(&ring->priority_mutex);
180 /* something higher prio is executing, no need to decay */
181 if (ring->priority > priority)
182 goto out_unlock;
183
184 /* decay priority to the next level with a job available */
185 for (i = priority; i >= DRM_SCHED_PRIORITY_MIN; i--) {
186 if (i == DRM_SCHED_PRIORITY_NORMAL
187 || atomic_read(&ring->num_jobs[i])) {
188 ring->priority = i;
189 ring->funcs->set_priority(ring, i);
190 break;
191 }
192 }
193
194out_unlock:
195 mutex_unlock(&ring->priority_mutex);
196}
197
198/**
199 * amdgpu_ring_priority_get - change the ring's priority
200 *
201 * @ring: amdgpu_ring structure holding the information
202 * @priority: target priority
203 *
204 * Request a ring's priority to be raised to @priority (refcounted).
205 */
206void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
207 enum drm_sched_priority priority)
208{
209 if (!ring->funcs->set_priority)
210 return;
211
212 if (atomic_inc_return(&ring->num_jobs[priority]) <= 0)
213 return;
214
215 mutex_lock(&ring->priority_mutex);
216 if (priority <= ring->priority)
217 goto out_unlock;
218
219 ring->priority = priority;
220 ring->funcs->set_priority(ring, priority);
221
222out_unlock:
223 mutex_unlock(&ring->priority_mutex);
224}
225
226/**
227 * amdgpu_ring_init - init driver ring struct.
228 *
229 * @adev: amdgpu_device pointer
230 * @ring: amdgpu_ring structure holding ring information
231 * @max_ndw: maximum number of dw for ring alloc
232 * @nop: nop packet for this ring
233 *
234 * Initialize the driver information for the selected ring (all asics).
235 * Returns 0 on success, error on failure.
236 */
237int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
238 unsigned max_dw, struct amdgpu_irq_src *irq_src,
239 unsigned irq_type)
240{
241 int r, i;
242 int sched_hw_submission = amdgpu_sched_hw_submission;
243
244 /* Set the hw submission limit higher for KIQ because
245 * it's used for a number of gfx/compute tasks by both
246 * KFD and KGD which may have outstanding fences and
247 * it doesn't really use the gpu scheduler anyway;
248 * KIQ tasks get submitted directly to the ring.
249 */
250 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
251 sched_hw_submission = max(sched_hw_submission, 256);
252 else if (ring == &adev->sdma.instance[0].page)
253 sched_hw_submission = 256;
254
255 if (ring->adev == NULL) {
256 if (adev->num_rings >= AMDGPU_MAX_RINGS)
257 return -EINVAL;
258
259 ring->adev = adev;
260 ring->idx = adev->num_rings++;
261 adev->rings[ring->idx] = ring;
262 r = amdgpu_fence_driver_init_ring(ring, sched_hw_submission);
263 if (r)
264 return r;
265 }
266
267 r = amdgpu_device_wb_get(adev, &ring->rptr_offs);
268 if (r) {
269 dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
270 return r;
271 }
272
273 r = amdgpu_device_wb_get(adev, &ring->wptr_offs);
274 if (r) {
275 dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
276 return r;
277 }
278
279 r = amdgpu_device_wb_get(adev, &ring->fence_offs);
280 if (r) {
281 dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r);
282 return r;
283 }
284
285 r = amdgpu_device_wb_get(adev, &ring->trail_fence_offs);
286 if (r) {
287 dev_err(adev->dev,
288 "(%d) ring trail_fence_offs wb alloc failed\n", r);
289 return r;
290 }
291 ring->trail_fence_gpu_addr =
292 adev->wb.gpu_addr + (ring->trail_fence_offs * 4);
293 ring->trail_fence_cpu_addr = &adev->wb.wb[ring->trail_fence_offs];
294
295 r = amdgpu_device_wb_get(adev, &ring->cond_exe_offs);
296 if (r) {
297 dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r);
298 return r;
299 }
300 ring->cond_exe_gpu_addr = adev->wb.gpu_addr + (ring->cond_exe_offs * 4);
301 ring->cond_exe_cpu_addr = &adev->wb.wb[ring->cond_exe_offs];
302 /* always set cond_exec_polling to CONTINUE */
303 *ring->cond_exe_cpu_addr = 1;
304
305 r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type);
306 if (r) {
307 dev_err(adev->dev, "failed initializing fences (%d).\n", r);
308 return r;
309 }
310
311 ring->ring_size = roundup_pow_of_two(max_dw * 4 * sched_hw_submission);
312
313 ring->buf_mask = (ring->ring_size / 4) - 1;
314 ring->ptr_mask = ring->funcs->support_64bit_ptrs ?
315 0xffffffffffffffff : ring->buf_mask;
316 /* Allocate ring buffer */
317 if (ring->ring_obj == NULL) {
318 r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_dw, PAGE_SIZE,
319 AMDGPU_GEM_DOMAIN_GTT,
320 &ring->ring_obj,
321 &ring->gpu_addr,
322 (void **)&ring->ring);
323 if (r) {
324 dev_err(adev->dev, "(%d) ring create failed\n", r);
325 return r;
326 }
327 amdgpu_ring_clear_ring(ring);
328 }
329
330 ring->max_dw = max_dw;
331 ring->priority = DRM_SCHED_PRIORITY_NORMAL;
332 mutex_init(&ring->priority_mutex);
333
334 for (i = 0; i < DRM_SCHED_PRIORITY_MAX; ++i)
335 atomic_set(&ring->num_jobs[i], 0);
336
337 if (amdgpu_debugfs_ring_init(adev, ring)) {
338 DRM_ERROR("Failed to register debugfs file for rings !\n");
339 }
340
341 return 0;
342}
343
344/**
345 * amdgpu_ring_fini - tear down the driver ring struct.
346 *
347 * @adev: amdgpu_device pointer
348 * @ring: amdgpu_ring structure holding ring information
349 *
350 * Tear down the driver information for the selected ring (all asics).
351 */
352void amdgpu_ring_fini(struct amdgpu_ring *ring)
353{
354 ring->sched.ready = false;
355
356 /* Not to finish a ring which is not initialized */
357 if (!(ring->adev) || !(ring->adev->rings[ring->idx]))
358 return;
359
360 amdgpu_device_wb_free(ring->adev, ring->rptr_offs);
361 amdgpu_device_wb_free(ring->adev, ring->wptr_offs);
362
363 amdgpu_device_wb_free(ring->adev, ring->cond_exe_offs);
364 amdgpu_device_wb_free(ring->adev, ring->fence_offs);
365
366 amdgpu_bo_free_kernel(&ring->ring_obj,
367 &ring->gpu_addr,
368 (void **)&ring->ring);
369
370 amdgpu_debugfs_ring_fini(ring);
371
372 dma_fence_put(ring->vmid_wait);
373 ring->vmid_wait = NULL;
374 ring->me = 0;
375
376 ring->adev->rings[ring->idx] = NULL;
377}
378
379/**
380 * amdgpu_ring_emit_reg_write_reg_wait_helper - ring helper
381 *
382 * @adev: amdgpu_device pointer
383 * @reg0: register to write
384 * @reg1: register to wait on
385 * @ref: reference value to write/wait on
386 * @mask: mask to wait on
387 *
388 * Helper for rings that don't support write and wait in a
389 * single oneshot packet.
390 */
391void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
392 uint32_t reg0, uint32_t reg1,
393 uint32_t ref, uint32_t mask)
394{
395 amdgpu_ring_emit_wreg(ring, reg0, ref);
396 amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
397}
398
399/**
400 * amdgpu_ring_soft_recovery - try to soft recover a ring lockup
401 *
402 * @ring: ring to try the recovery on
403 * @vmid: VMID we try to get going again
404 * @fence: timedout fence
405 *
406 * Tries to get a ring proceeding again when it is stuck.
407 */
408bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
409 struct dma_fence *fence)
410{
411 ktime_t deadline = ktime_add_us(ktime_get(), 10000);
412
413 if (amdgpu_sriov_vf(ring->adev) || !ring->funcs->soft_recovery || !fence)
414 return false;
415
416 atomic_inc(&ring->adev->gpu_reset_counter);
417 while (!dma_fence_is_signaled(fence) &&
418 ktime_to_ns(ktime_sub(deadline, ktime_get())) > 0)
419 ring->funcs->soft_recovery(ring, vmid);
420
421 return dma_fence_is_signaled(fence);
422}
423
424/*
425 * Debugfs info
426 */
427#if defined(CONFIG_DEBUG_FS)
428
429/* Layout of file is 12 bytes consisting of
430 * - rptr
431 * - wptr
432 * - driver's copy of wptr
433 *
434 * followed by n-words of ring data
435 */
436static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
437 size_t size, loff_t *pos)
438{
439 struct amdgpu_ring *ring = file_inode(f)->i_private;
440 int r, i;
441 uint32_t value, result, early[3];
442
443 if (*pos & 3 || size & 3)
444 return -EINVAL;
445
446 result = 0;
447
448 if (*pos < 12) {
449 early[0] = amdgpu_ring_get_rptr(ring) & ring->buf_mask;
450 early[1] = amdgpu_ring_get_wptr(ring) & ring->buf_mask;
451 early[2] = ring->wptr & ring->buf_mask;
452 for (i = *pos / 4; i < 3 && size; i++) {
453 r = put_user(early[i], (uint32_t *)buf);
454 if (r)
455 return r;
456 buf += 4;
457 result += 4;
458 size -= 4;
459 *pos += 4;
460 }
461 }
462
463 while (size) {
464 if (*pos >= (ring->ring_size + 12))
465 return result;
466
467 value = ring->ring[(*pos - 12)/4];
468 r = put_user(value, (uint32_t*)buf);
469 if (r)
470 return r;
471 buf += 4;
472 result += 4;
473 size -= 4;
474 *pos += 4;
475 }
476
477 return result;
478}
479
480static const struct file_operations amdgpu_debugfs_ring_fops = {
481 .owner = THIS_MODULE,
482 .read = amdgpu_debugfs_ring_read,
483 .llseek = default_llseek
484};
485
486#endif
487
488static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
489 struct amdgpu_ring *ring)
490{
491#if defined(CONFIG_DEBUG_FS)
492 struct drm_minor *minor = adev->ddev->primary;
493 struct dentry *ent, *root = minor->debugfs_root;
494 char name[32];
495
496 sprintf(name, "amdgpu_ring_%s", ring->name);
497
498 ent = debugfs_create_file(name,
499 S_IFREG | S_IRUGO, root,
500 ring, &amdgpu_debugfs_ring_fops);
501 if (!ent)
502 return -ENOMEM;
503
504 i_size_write(ent->d_inode, ring->ring_size + 12);
505 ring->ent = ent;
506#endif
507 return 0;
508}
509
510static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring)
511{
512#if defined(CONFIG_DEBUG_FS)
513 debugfs_remove(ring->ent);
514#endif
515}
516
517/**
518 * amdgpu_ring_test_helper - tests ring and set sched readiness status
519 *
520 * @ring: ring to try the recovery on
521 *
522 * Tests ring and set sched readiness status
523 *
524 * Returns 0 on success, error on failure.
525 */
526int amdgpu_ring_test_helper(struct amdgpu_ring *ring)
527{
528 struct amdgpu_device *adev = ring->adev;
529 int r;
530
531 r = amdgpu_ring_test_ring(ring);
532 if (r)
533 DRM_DEV_ERROR(adev->dev, "ring %s test failed (%d)\n",
534 ring->name, r);
535 else
536 DRM_DEV_DEBUG(adev->dev, "ring test on %s succeeded\n",
537 ring->name);
538
539 ring->sched.ready = !r;
540 return r;
541}