Loading...
Note: File does not exist in v3.15.
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 */
25
26#include "amdgpu.h"
27#include "amdgpu_gfx.h"
28#include "amdgpu_rlc.h"
29#include "amdgpu_ras.h"
30
31/* delay 0.1 second to enable gfx off feature */
32#define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100)
33
34/*
35 * GPU GFX IP block helpers function.
36 */
37
38int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec,
39 int pipe, int queue)
40{
41 int bit = 0;
42
43 bit += mec * adev->gfx.mec.num_pipe_per_mec
44 * adev->gfx.mec.num_queue_per_pipe;
45 bit += pipe * adev->gfx.mec.num_queue_per_pipe;
46 bit += queue;
47
48 return bit;
49}
50
51void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
52 int *mec, int *pipe, int *queue)
53{
54 *queue = bit % adev->gfx.mec.num_queue_per_pipe;
55 *pipe = (bit / adev->gfx.mec.num_queue_per_pipe)
56 % adev->gfx.mec.num_pipe_per_mec;
57 *mec = (bit / adev->gfx.mec.num_queue_per_pipe)
58 / adev->gfx.mec.num_pipe_per_mec;
59
60}
61
62bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev,
63 int mec, int pipe, int queue)
64{
65 return test_bit(amdgpu_gfx_mec_queue_to_bit(adev, mec, pipe, queue),
66 adev->gfx.mec.queue_bitmap);
67}
68
69int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev,
70 int me, int pipe, int queue)
71{
72 int bit = 0;
73
74 bit += me * adev->gfx.me.num_pipe_per_me
75 * adev->gfx.me.num_queue_per_pipe;
76 bit += pipe * adev->gfx.me.num_queue_per_pipe;
77 bit += queue;
78
79 return bit;
80}
81
82void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit,
83 int *me, int *pipe, int *queue)
84{
85 *queue = bit % adev->gfx.me.num_queue_per_pipe;
86 *pipe = (bit / adev->gfx.me.num_queue_per_pipe)
87 % adev->gfx.me.num_pipe_per_me;
88 *me = (bit / adev->gfx.me.num_queue_per_pipe)
89 / adev->gfx.me.num_pipe_per_me;
90}
91
92bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev,
93 int me, int pipe, int queue)
94{
95 return test_bit(amdgpu_gfx_me_queue_to_bit(adev, me, pipe, queue),
96 adev->gfx.me.queue_bitmap);
97}
98
99/**
100 * amdgpu_gfx_scratch_get - Allocate a scratch register
101 *
102 * @adev: amdgpu_device pointer
103 * @reg: scratch register mmio offset
104 *
105 * Allocate a CP scratch register for use by the driver (all asics).
106 * Returns 0 on success or -EINVAL on failure.
107 */
108int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg)
109{
110 int i;
111
112 i = ffs(adev->gfx.scratch.free_mask);
113 if (i != 0 && i <= adev->gfx.scratch.num_reg) {
114 i--;
115 adev->gfx.scratch.free_mask &= ~(1u << i);
116 *reg = adev->gfx.scratch.reg_base + i;
117 return 0;
118 }
119 return -EINVAL;
120}
121
122/**
123 * amdgpu_gfx_scratch_free - Free a scratch register
124 *
125 * @adev: amdgpu_device pointer
126 * @reg: scratch register mmio offset
127 *
128 * Free a CP scratch register allocated for use by the driver (all asics)
129 */
130void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg)
131{
132 adev->gfx.scratch.free_mask |= 1u << (reg - adev->gfx.scratch.reg_base);
133}
134
135/**
136 * amdgpu_gfx_parse_disable_cu - Parse the disable_cu module parameter
137 *
138 * @mask: array in which the per-shader array disable masks will be stored
139 * @max_se: number of SEs
140 * @max_sh: number of SHs
141 *
142 * The bitmask of CUs to be disabled in the shader array determined by se and
143 * sh is stored in mask[se * max_sh + sh].
144 */
145void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh)
146{
147 unsigned se, sh, cu;
148 const char *p;
149
150 memset(mask, 0, sizeof(*mask) * max_se * max_sh);
151
152 if (!amdgpu_disable_cu || !*amdgpu_disable_cu)
153 return;
154
155 p = amdgpu_disable_cu;
156 for (;;) {
157 char *next;
158 int ret = sscanf(p, "%u.%u.%u", &se, &sh, &cu);
159 if (ret < 3) {
160 DRM_ERROR("amdgpu: could not parse disable_cu\n");
161 return;
162 }
163
164 if (se < max_se && sh < max_sh && cu < 16) {
165 DRM_INFO("amdgpu: disabling CU %u.%u.%u\n", se, sh, cu);
166 mask[se * max_sh + sh] |= 1u << cu;
167 } else {
168 DRM_ERROR("amdgpu: disable_cu %u.%u.%u is out of range\n",
169 se, sh, cu);
170 }
171
172 next = strchr(p, ',');
173 if (!next)
174 break;
175 p = next + 1;
176 }
177}
178
179static bool amdgpu_gfx_is_multipipe_capable(struct amdgpu_device *adev)
180{
181 if (amdgpu_compute_multipipe != -1) {
182 DRM_INFO("amdgpu: forcing compute pipe policy %d\n",
183 amdgpu_compute_multipipe);
184 return amdgpu_compute_multipipe == 1;
185 }
186
187 /* FIXME: spreading the queues across pipes causes perf regressions
188 * on POLARIS11 compute workloads */
189 if (adev->asic_type == CHIP_POLARIS11)
190 return false;
191
192 return adev->gfx.mec.num_mec > 1;
193}
194
195bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
196 int queue)
197{
198 /* Policy: make queue 0 of each pipe as high priority compute queue */
199 return (queue == 0);
200
201}
202
203void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
204{
205 int i, queue, pipe, mec;
206 bool multipipe_policy = amdgpu_gfx_is_multipipe_capable(adev);
207
208 /* policy for amdgpu compute queue ownership */
209 for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
210 queue = i % adev->gfx.mec.num_queue_per_pipe;
211 pipe = (i / adev->gfx.mec.num_queue_per_pipe)
212 % adev->gfx.mec.num_pipe_per_mec;
213 mec = (i / adev->gfx.mec.num_queue_per_pipe)
214 / adev->gfx.mec.num_pipe_per_mec;
215
216 /* we've run out of HW */
217 if (mec >= adev->gfx.mec.num_mec)
218 break;
219
220 if (multipipe_policy) {
221 /* policy: amdgpu owns the first two queues of the first MEC */
222 if (mec == 0 && queue < 2)
223 set_bit(i, adev->gfx.mec.queue_bitmap);
224 } else {
225 /* policy: amdgpu owns all queues in the first pipe */
226 if (mec == 0 && pipe == 0)
227 set_bit(i, adev->gfx.mec.queue_bitmap);
228 }
229 }
230
231 /* update the number of active compute rings */
232 adev->gfx.num_compute_rings =
233 bitmap_weight(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
234
235 /* If you hit this case and edited the policy, you probably just
236 * need to increase AMDGPU_MAX_COMPUTE_RINGS */
237 if (WARN_ON(adev->gfx.num_compute_rings > AMDGPU_MAX_COMPUTE_RINGS))
238 adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
239}
240
241void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
242{
243 int i, queue, me;
244
245 for (i = 0; i < AMDGPU_MAX_GFX_QUEUES; ++i) {
246 queue = i % adev->gfx.me.num_queue_per_pipe;
247 me = (i / adev->gfx.me.num_queue_per_pipe)
248 / adev->gfx.me.num_pipe_per_me;
249
250 if (me >= adev->gfx.me.num_me)
251 break;
252 /* policy: amdgpu owns the first queue per pipe at this stage
253 * will extend to mulitple queues per pipe later */
254 if (me == 0 && queue < 1)
255 set_bit(i, adev->gfx.me.queue_bitmap);
256 }
257
258 /* update the number of active graphics rings */
259 adev->gfx.num_gfx_rings =
260 bitmap_weight(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
261}
262
263static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
264 struct amdgpu_ring *ring)
265{
266 int queue_bit;
267 int mec, pipe, queue;
268
269 queue_bit = adev->gfx.mec.num_mec
270 * adev->gfx.mec.num_pipe_per_mec
271 * adev->gfx.mec.num_queue_per_pipe;
272
273 while (queue_bit-- >= 0) {
274 if (test_bit(queue_bit, adev->gfx.mec.queue_bitmap))
275 continue;
276
277 amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
278
279 /*
280 * 1. Using pipes 2/3 from MEC 2 seems cause problems.
281 * 2. It must use queue id 0, because CGPG_IDLE/SAVE/LOAD/RUN
282 * only can be issued on queue 0.
283 */
284 if ((mec == 1 && pipe > 1) || queue != 0)
285 continue;
286
287 ring->me = mec + 1;
288 ring->pipe = pipe;
289 ring->queue = queue;
290
291 return 0;
292 }
293
294 dev_err(adev->dev, "Failed to find a queue for KIQ\n");
295 return -EINVAL;
296}
297
298int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
299 struct amdgpu_ring *ring,
300 struct amdgpu_irq_src *irq)
301{
302 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
303 int r = 0;
304
305 spin_lock_init(&kiq->ring_lock);
306
307 ring->adev = NULL;
308 ring->ring_obj = NULL;
309 ring->use_doorbell = true;
310 ring->doorbell_index = adev->doorbell_index.kiq;
311
312 r = amdgpu_gfx_kiq_acquire(adev, ring);
313 if (r)
314 return r;
315
316 ring->eop_gpu_addr = kiq->eop_gpu_addr;
317 ring->no_scheduler = true;
318 sprintf(ring->name, "kiq_%d.%d.%d", ring->me, ring->pipe, ring->queue);
319 r = amdgpu_ring_init(adev, ring, 1024,
320 irq, AMDGPU_CP_KIQ_IRQ_DRIVER0,
321 AMDGPU_RING_PRIO_DEFAULT);
322 if (r)
323 dev_warn(adev->dev, "(%d) failed to init kiq ring\n", r);
324
325 return r;
326}
327
328void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring)
329{
330 amdgpu_ring_fini(ring);
331}
332
333void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev)
334{
335 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
336
337 amdgpu_bo_free_kernel(&kiq->eop_obj, &kiq->eop_gpu_addr, NULL);
338}
339
340int amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
341 unsigned hpd_size)
342{
343 int r;
344 u32 *hpd;
345 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
346
347 r = amdgpu_bo_create_kernel(adev, hpd_size, PAGE_SIZE,
348 AMDGPU_GEM_DOMAIN_GTT, &kiq->eop_obj,
349 &kiq->eop_gpu_addr, (void **)&hpd);
350 if (r) {
351 dev_warn(adev->dev, "failed to create KIQ bo (%d).\n", r);
352 return r;
353 }
354
355 memset(hpd, 0, hpd_size);
356
357 r = amdgpu_bo_reserve(kiq->eop_obj, true);
358 if (unlikely(r != 0))
359 dev_warn(adev->dev, "(%d) reserve kiq eop bo failed\n", r);
360 amdgpu_bo_kunmap(kiq->eop_obj);
361 amdgpu_bo_unreserve(kiq->eop_obj);
362
363 return 0;
364}
365
366/* create MQD for each compute/gfx queue */
367int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
368 unsigned mqd_size)
369{
370 struct amdgpu_ring *ring = NULL;
371 int r, i;
372
373 /* create MQD for KIQ */
374 ring = &adev->gfx.kiq.ring;
375 if (!ring->mqd_obj) {
376 /* originaly the KIQ MQD is put in GTT domain, but for SRIOV VRAM domain is a must
377 * otherwise hypervisor trigger SAVE_VF fail after driver unloaded which mean MQD
378 * deallocated and gart_unbind, to strict diverage we decide to use VRAM domain for
379 * KIQ MQD no matter SRIOV or Bare-metal
380 */
381 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
382 AMDGPU_GEM_DOMAIN_VRAM, &ring->mqd_obj,
383 &ring->mqd_gpu_addr, &ring->mqd_ptr);
384 if (r) {
385 dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r);
386 return r;
387 }
388
389 /* prepare MQD backup */
390 adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS] = kmalloc(mqd_size, GFP_KERNEL);
391 if (!adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS])
392 dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
393 }
394
395 if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) {
396 /* create MQD for each KGQ */
397 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
398 ring = &adev->gfx.gfx_ring[i];
399 if (!ring->mqd_obj) {
400 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
401 AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
402 &ring->mqd_gpu_addr, &ring->mqd_ptr);
403 if (r) {
404 dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
405 return r;
406 }
407
408 /* prepare MQD backup */
409 adev->gfx.me.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL);
410 if (!adev->gfx.me.mqd_backup[i])
411 dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
412 }
413 }
414 }
415
416 /* create MQD for each KCQ */
417 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
418 ring = &adev->gfx.compute_ring[i];
419 if (!ring->mqd_obj) {
420 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
421 AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
422 &ring->mqd_gpu_addr, &ring->mqd_ptr);
423 if (r) {
424 dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
425 return r;
426 }
427
428 /* prepare MQD backup */
429 adev->gfx.mec.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL);
430 if (!adev->gfx.mec.mqd_backup[i])
431 dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
432 }
433 }
434
435 return 0;
436}
437
438void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev)
439{
440 struct amdgpu_ring *ring = NULL;
441 int i;
442
443 if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) {
444 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
445 ring = &adev->gfx.gfx_ring[i];
446 kfree(adev->gfx.me.mqd_backup[i]);
447 amdgpu_bo_free_kernel(&ring->mqd_obj,
448 &ring->mqd_gpu_addr,
449 &ring->mqd_ptr);
450 }
451 }
452
453 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
454 ring = &adev->gfx.compute_ring[i];
455 kfree(adev->gfx.mec.mqd_backup[i]);
456 amdgpu_bo_free_kernel(&ring->mqd_obj,
457 &ring->mqd_gpu_addr,
458 &ring->mqd_ptr);
459 }
460
461 ring = &adev->gfx.kiq.ring;
462 kfree(adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]);
463 amdgpu_bo_free_kernel(&ring->mqd_obj,
464 &ring->mqd_gpu_addr,
465 &ring->mqd_ptr);
466}
467
468int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev)
469{
470 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
471 struct amdgpu_ring *kiq_ring = &kiq->ring;
472 int i;
473
474 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
475 return -EINVAL;
476
477 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
478 adev->gfx.num_compute_rings))
479 return -ENOMEM;
480
481 for (i = 0; i < adev->gfx.num_compute_rings; i++)
482 kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.compute_ring[i],
483 RESET_QUEUES, 0, 0);
484
485 return amdgpu_ring_test_helper(kiq_ring);
486}
487
488int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
489 int queue_bit)
490{
491 int mec, pipe, queue;
492 int set_resource_bit = 0;
493
494 amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
495
496 set_resource_bit = mec * 4 * 8 + pipe * 8 + queue;
497
498 return set_resource_bit;
499}
500
501int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev)
502{
503 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
504 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
505 uint64_t queue_mask = 0;
506 int r, i;
507
508 if (!kiq->pmf || !kiq->pmf->kiq_map_queues || !kiq->pmf->kiq_set_resources)
509 return -EINVAL;
510
511 for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
512 if (!test_bit(i, adev->gfx.mec.queue_bitmap))
513 continue;
514
515 /* This situation may be hit in the future if a new HW
516 * generation exposes more than 64 queues. If so, the
517 * definition of queue_mask needs updating */
518 if (WARN_ON(i > (sizeof(queue_mask)*8))) {
519 DRM_ERROR("Invalid KCQ enabled: %d\n", i);
520 break;
521 }
522
523 queue_mask |= (1ull << amdgpu_queue_mask_bit_to_set_resource_bit(adev, i));
524 }
525
526 DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe,
527 kiq_ring->queue);
528
529 r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
530 adev->gfx.num_compute_rings +
531 kiq->pmf->set_resources_size);
532 if (r) {
533 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
534 return r;
535 }
536
537 kiq->pmf->kiq_set_resources(kiq_ring, queue_mask);
538 for (i = 0; i < adev->gfx.num_compute_rings; i++)
539 kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.compute_ring[i]);
540
541 r = amdgpu_ring_test_helper(kiq_ring);
542 if (r)
543 DRM_ERROR("KCQ enable failed\n");
544
545 return r;
546}
547
548/* amdgpu_gfx_off_ctrl - Handle gfx off feature enable/disable
549 *
550 * @adev: amdgpu_device pointer
551 * @bool enable true: enable gfx off feature, false: disable gfx off feature
552 *
553 * 1. gfx off feature will be enabled by gfx ip after gfx cg gp enabled.
554 * 2. other client can send request to disable gfx off feature, the request should be honored.
555 * 3. other client can cancel their request of disable gfx off feature
556 * 4. other client should not send request to enable gfx off feature before disable gfx off feature.
557 */
558
559void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
560{
561 if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
562 return;
563
564 mutex_lock(&adev->gfx.gfx_off_mutex);
565
566 if (!enable)
567 adev->gfx.gfx_off_req_count++;
568 else if (adev->gfx.gfx_off_req_count > 0)
569 adev->gfx.gfx_off_req_count--;
570
571 if (enable && !adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
572 schedule_delayed_work(&adev->gfx.gfx_off_delay_work, GFX_OFF_DELAY_ENABLE);
573 } else if (!enable && adev->gfx.gfx_off_state) {
574 if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false))
575 adev->gfx.gfx_off_state = false;
576 }
577
578 mutex_unlock(&adev->gfx.gfx_off_mutex);
579}
580
581int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value)
582{
583
584 int r = 0;
585
586 mutex_lock(&adev->gfx.gfx_off_mutex);
587
588 r = smu_get_status_gfxoff(adev, value);
589
590 mutex_unlock(&adev->gfx.gfx_off_mutex);
591
592 return r;
593}
594
595int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev)
596{
597 int r;
598 struct ras_fs_if fs_info = {
599 .sysfs_name = "gfx_err_count",
600 };
601 struct ras_ih_if ih_info = {
602 .cb = amdgpu_gfx_process_ras_data_cb,
603 };
604
605 if (!adev->gfx.ras_if) {
606 adev->gfx.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
607 if (!adev->gfx.ras_if)
608 return -ENOMEM;
609 adev->gfx.ras_if->block = AMDGPU_RAS_BLOCK__GFX;
610 adev->gfx.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
611 adev->gfx.ras_if->sub_block_index = 0;
612 strcpy(adev->gfx.ras_if->name, "gfx");
613 }
614 fs_info.head = ih_info.head = *adev->gfx.ras_if;
615
616 r = amdgpu_ras_late_init(adev, adev->gfx.ras_if,
617 &fs_info, &ih_info);
618 if (r)
619 goto free;
620
621 if (amdgpu_ras_is_supported(adev, adev->gfx.ras_if->block)) {
622 r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
623 if (r)
624 goto late_fini;
625 } else {
626 /* free gfx ras_if if ras is not supported */
627 r = 0;
628 goto free;
629 }
630
631 return 0;
632late_fini:
633 amdgpu_ras_late_fini(adev, adev->gfx.ras_if, &ih_info);
634free:
635 kfree(adev->gfx.ras_if);
636 adev->gfx.ras_if = NULL;
637 return r;
638}
639
640void amdgpu_gfx_ras_fini(struct amdgpu_device *adev)
641{
642 if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX) &&
643 adev->gfx.ras_if) {
644 struct ras_common_if *ras_if = adev->gfx.ras_if;
645 struct ras_ih_if ih_info = {
646 .head = *ras_if,
647 .cb = amdgpu_gfx_process_ras_data_cb,
648 };
649
650 amdgpu_ras_late_fini(adev, ras_if, &ih_info);
651 kfree(ras_if);
652 }
653}
654
655int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
656 void *err_data,
657 struct amdgpu_iv_entry *entry)
658{
659 /* TODO ue will trigger an interrupt.
660 *
661 * When “Full RAS” is enabled, the per-IP interrupt sources should
662 * be disabled and the driver should only look for the aggregated
663 * interrupt via sync flood
664 */
665 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) {
666 kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
667 if (adev->gfx.funcs->query_ras_error_count)
668 adev->gfx.funcs->query_ras_error_count(adev, err_data);
669 amdgpu_ras_reset_gpu(adev);
670 }
671 return AMDGPU_RAS_SUCCESS;
672}
673
674int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
675 struct amdgpu_irq_src *source,
676 struct amdgpu_iv_entry *entry)
677{
678 struct ras_common_if *ras_if = adev->gfx.ras_if;
679 struct ras_dispatch_if ih_data = {
680 .entry = entry,
681 };
682
683 if (!ras_if)
684 return 0;
685
686 ih_data.head = *ras_if;
687
688 DRM_ERROR("CP ECC ERROR IRQ\n");
689 amdgpu_ras_interrupt_dispatch(adev, &ih_data);
690 return 0;
691}
692
693uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
694{
695 signed long r, cnt = 0;
696 unsigned long flags;
697 uint32_t seq, reg_val_offs = 0, value = 0;
698 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
699 struct amdgpu_ring *ring = &kiq->ring;
700
701 BUG_ON(!ring->funcs->emit_rreg);
702
703 spin_lock_irqsave(&kiq->ring_lock, flags);
704 if (amdgpu_device_wb_get(adev, ®_val_offs)) {
705 pr_err("critical bug! too many kiq readers\n");
706 goto failed_unlock;
707 }
708 amdgpu_ring_alloc(ring, 32);
709 amdgpu_ring_emit_rreg(ring, reg, reg_val_offs);
710 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
711 if (r)
712 goto failed_undo;
713
714 amdgpu_ring_commit(ring);
715 spin_unlock_irqrestore(&kiq->ring_lock, flags);
716
717 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
718
719 /* don't wait anymore for gpu reset case because this way may
720 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
721 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
722 * never return if we keep waiting in virt_kiq_rreg, which cause
723 * gpu_recover() hang there.
724 *
725 * also don't wait anymore for IRQ context
726 * */
727 if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
728 goto failed_kiq_read;
729
730 might_sleep();
731 while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
732 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
733 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
734 }
735
736 if (cnt > MAX_KIQ_REG_TRY)
737 goto failed_kiq_read;
738
739 mb();
740 value = adev->wb.wb[reg_val_offs];
741 amdgpu_device_wb_free(adev, reg_val_offs);
742 return value;
743
744failed_undo:
745 amdgpu_ring_undo(ring);
746failed_unlock:
747 spin_unlock_irqrestore(&kiq->ring_lock, flags);
748failed_kiq_read:
749 if (reg_val_offs)
750 amdgpu_device_wb_free(adev, reg_val_offs);
751 pr_err("failed to read reg:%x\n", reg);
752 return ~0;
753}
754
755void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
756{
757 signed long r, cnt = 0;
758 unsigned long flags;
759 uint32_t seq;
760 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
761 struct amdgpu_ring *ring = &kiq->ring;
762
763 BUG_ON(!ring->funcs->emit_wreg);
764
765 spin_lock_irqsave(&kiq->ring_lock, flags);
766 amdgpu_ring_alloc(ring, 32);
767 amdgpu_ring_emit_wreg(ring, reg, v);
768 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
769 if (r)
770 goto failed_undo;
771
772 amdgpu_ring_commit(ring);
773 spin_unlock_irqrestore(&kiq->ring_lock, flags);
774
775 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
776
777 /* don't wait anymore for gpu reset case because this way may
778 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
779 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
780 * never return if we keep waiting in virt_kiq_rreg, which cause
781 * gpu_recover() hang there.
782 *
783 * also don't wait anymore for IRQ context
784 * */
785 if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
786 goto failed_kiq_write;
787
788 might_sleep();
789 while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
790
791 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
792 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
793 }
794
795 if (cnt > MAX_KIQ_REG_TRY)
796 goto failed_kiq_write;
797
798 return;
799
800failed_undo:
801 amdgpu_ring_undo(ring);
802 spin_unlock_irqrestore(&kiq->ring_lock, flags);
803failed_kiq_write:
804 pr_err("failed to write reg:%x\n", reg);
805}