Loading...
1/*
2 * Copyright 2021 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/pci.h>
25
26#include "amdgpu.h"
27#include "amdgpu_ih.h"
28
29#include "oss/osssys_6_0_0_offset.h"
30#include "oss/osssys_6_0_0_sh_mask.h"
31
32#include "soc15_common.h"
33#include "ih_v6_0.h"
34
35#define MAX_REARM_RETRY 10
36
37static void ih_v6_0_set_interrupt_funcs(struct amdgpu_device *adev);
38
39/**
40 * ih_v6_0_init_register_offset - Initialize register offset for ih rings
41 *
42 * @adev: amdgpu_device pointer
43 *
44 * Initialize register offset ih rings (IH_V6_0).
45 */
46static void ih_v6_0_init_register_offset(struct amdgpu_device *adev)
47{
48 struct amdgpu_ih_regs *ih_regs;
49
50 /* ih ring 2 is removed
51 * ih ring and ih ring 1 are available */
52 if (adev->irq.ih.ring_size) {
53 ih_regs = &adev->irq.ih.ih_regs;
54 ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_BASE);
55 ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_BASE_HI);
56 ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_CNTL);
57 ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_WPTR);
58 ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_RPTR);
59 ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_DOORBELL_RPTR);
60 ih_regs->ih_rb_wptr_addr_lo = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_WPTR_ADDR_LO);
61 ih_regs->ih_rb_wptr_addr_hi = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_WPTR_ADDR_HI);
62 ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL;
63 }
64
65 if (adev->irq.ih1.ring_size) {
66 ih_regs = &adev->irq.ih1.ih_regs;
67 ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_BASE_RING1);
68 ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_BASE_HI_RING1);
69 ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_CNTL_RING1);
70 ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_WPTR_RING1);
71 ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_RPTR_RING1);
72 ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_DOORBELL_RPTR_RING1);
73 ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING1;
74 }
75}
76
77/**
78 * force_update_wptr_for_self_int - Force update the wptr for self interrupt
79 *
80 * @adev: amdgpu_device pointer
81 * @threshold: threshold to trigger the wptr reporting
82 * @timeout: timeout to trigger the wptr reporting
83 * @enabled: Enable/disable timeout flush mechanism
84 *
85 * threshold input range: 0 ~ 15, default 0,
86 * real_threshold = 2^threshold
87 * timeout input range: 0 ~ 20, default 8,
88 * real_timeout = (2^timeout) * 1024 / (socclk_freq)
89 *
90 * Force update wptr for self interrupt ( >= SIENNA_CICHLID).
91 */
92static void
93force_update_wptr_for_self_int(struct amdgpu_device *adev,
94 u32 threshold, u32 timeout, bool enabled)
95{
96 u32 ih_cntl, ih_rb_cntl;
97
98 ih_cntl = RREG32_SOC15(OSSSYS, 0, regIH_CNTL2);
99 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, regIH_RB_CNTL_RING1);
100
101 ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL2,
102 SELF_IV_FORCE_WPTR_UPDATE_TIMEOUT, timeout);
103 ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL2,
104 SELF_IV_FORCE_WPTR_UPDATE_ENABLE, enabled);
105 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
106 RB_USED_INT_THRESHOLD, threshold);
107
108 if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
109 if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1, ih_rb_cntl))
110 return;
111 } else {
112 WREG32_SOC15(OSSSYS, 0, regIH_RB_CNTL_RING1, ih_rb_cntl);
113 }
114
115 WREG32_SOC15(OSSSYS, 0, regIH_CNTL2, ih_cntl);
116}
117
118/**
119 * ih_v6_0_toggle_ring_interrupts - toggle the interrupt ring buffer
120 *
121 * @adev: amdgpu_device pointer
122 * @ih: amdgpu_ih_ring pointer
123 * @enable: true - enable the interrupts, false - disable the interrupts
124 *
125 * Toggle the interrupt ring buffer (IH_V6_0)
126 */
127static int ih_v6_0_toggle_ring_interrupts(struct amdgpu_device *adev,
128 struct amdgpu_ih_ring *ih,
129 bool enable)
130{
131 struct amdgpu_ih_regs *ih_regs;
132 uint32_t tmp;
133
134 ih_regs = &ih->ih_regs;
135
136 tmp = RREG32(ih_regs->ih_rb_cntl);
137 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0));
138 /* enable_intr field is only valid in ring0 */
139 if (ih == &adev->irq.ih)
140 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
141
142 if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
143 if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp))
144 return -ETIMEDOUT;
145 } else {
146 WREG32(ih_regs->ih_rb_cntl, tmp);
147 }
148
149 if (enable) {
150 ih->enabled = true;
151 } else {
152 /* set rptr, wptr to 0 */
153 WREG32(ih_regs->ih_rb_rptr, 0);
154 WREG32(ih_regs->ih_rb_wptr, 0);
155 ih->enabled = false;
156 ih->rptr = 0;
157 }
158
159 return 0;
160}
161
162/**
163 * ih_v6_0_toggle_interrupts - Toggle all the available interrupt ring buffers
164 *
165 * @adev: amdgpu_device pointer
166 * @enable: enable or disable interrupt ring buffers
167 *
168 * Toggle all the available interrupt ring buffers (IH_V6_0).
169 */
170static int ih_v6_0_toggle_interrupts(struct amdgpu_device *adev, bool enable)
171{
172 struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1};
173 int i;
174 int r;
175
176 for (i = 0; i < ARRAY_SIZE(ih); i++) {
177 if (ih[i]->ring_size) {
178 r = ih_v6_0_toggle_ring_interrupts(adev, ih[i], enable);
179 if (r)
180 return r;
181 }
182 }
183
184 return 0;
185}
186
187static uint32_t ih_v6_0_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl)
188{
189 int rb_bufsz = order_base_2(ih->ring_size / 4);
190
191 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
192 MC_SPACE, ih->use_bus_addr ? 2 : 4);
193 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
194 WPTR_OVERFLOW_CLEAR, 1);
195 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
196 WPTR_OVERFLOW_ENABLE, 1);
197 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz);
198 /* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register
199 * value is written to memory
200 */
201 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
202 WPTR_WRITEBACK_ENABLE, 1);
203 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SNOOP, 1);
204 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_RO, 0);
205 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_VMID, 0);
206
207 return ih_rb_cntl;
208}
209
210static uint32_t ih_v6_0_doorbell_rptr(struct amdgpu_ih_ring *ih)
211{
212 u32 ih_doorbell_rtpr = 0;
213
214 if (ih->use_doorbell) {
215 ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
216 IH_DOORBELL_RPTR, OFFSET,
217 ih->doorbell_index);
218 ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
219 IH_DOORBELL_RPTR,
220 ENABLE, 1);
221 } else {
222 ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
223 IH_DOORBELL_RPTR,
224 ENABLE, 0);
225 }
226 return ih_doorbell_rtpr;
227}
228
229/**
230 * ih_v6_0_enable_ring - enable an ih ring buffer
231 *
232 * @adev: amdgpu_device pointer
233 * @ih: amdgpu_ih_ring pointer
234 *
235 * Enable an ih ring buffer (IH_V6_0)
236 */
237static int ih_v6_0_enable_ring(struct amdgpu_device *adev,
238 struct amdgpu_ih_ring *ih)
239{
240 struct amdgpu_ih_regs *ih_regs;
241 uint32_t tmp;
242
243 ih_regs = &ih->ih_regs;
244
245 /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
246 WREG32(ih_regs->ih_rb_base, ih->gpu_addr >> 8);
247 WREG32(ih_regs->ih_rb_base_hi, (ih->gpu_addr >> 40) & 0xff);
248
249 tmp = RREG32(ih_regs->ih_rb_cntl);
250 tmp = ih_v6_0_rb_cntl(ih, tmp);
251 if (ih == &adev->irq.ih)
252 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled);
253 if (ih == &adev->irq.ih1) {
254 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0);
255 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1);
256 }
257
258 if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
259 if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
260 DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
261 return -ETIMEDOUT;
262 }
263 } else {
264 WREG32(ih_regs->ih_rb_cntl, tmp);
265 }
266
267 if (ih == &adev->irq.ih) {
268 /* set the ih ring 0 writeback address whether it's enabled or not */
269 WREG32(ih_regs->ih_rb_wptr_addr_lo, lower_32_bits(ih->wptr_addr));
270 WREG32(ih_regs->ih_rb_wptr_addr_hi, upper_32_bits(ih->wptr_addr) & 0xFFFF);
271 }
272
273 /* set rptr, wptr to 0 */
274 WREG32(ih_regs->ih_rb_wptr, 0);
275 WREG32(ih_regs->ih_rb_rptr, 0);
276
277 WREG32(ih_regs->ih_doorbell_rptr, ih_v6_0_doorbell_rptr(ih));
278
279 return 0;
280}
281
282/**
283 * ih_v6_0_irq_init - init and enable the interrupt ring
284 *
285 * @adev: amdgpu_device pointer
286 *
287 * Allocate a ring buffer for the interrupt controller,
288 * enable the RLC, disable interrupts, enable the IH
289 * ring buffer and enable it.
290 * Called at device load and reume.
291 * Returns 0 for success, errors for failure.
292 */
293static int ih_v6_0_irq_init(struct amdgpu_device *adev)
294{
295 struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1};
296 u32 ih_chicken;
297 u32 tmp;
298 int ret;
299 int i;
300
301 /* disable irqs */
302 ret = ih_v6_0_toggle_interrupts(adev, false);
303 if (ret)
304 return ret;
305
306 adev->nbio.funcs->ih_control(adev);
307
308 if (unlikely((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) ||
309 (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO))) {
310 if (ih[0]->use_bus_addr) {
311 ih_chicken = RREG32_SOC15(OSSSYS, 0, regIH_CHICKEN);
312 ih_chicken = REG_SET_FIELD(ih_chicken,
313 IH_CHICKEN, MC_SPACE_GPA_ENABLE, 1);
314 WREG32_SOC15(OSSSYS, 0, regIH_CHICKEN, ih_chicken);
315 }
316 }
317
318 for (i = 0; i < ARRAY_SIZE(ih); i++) {
319 if (ih[i]->ring_size) {
320 ret = ih_v6_0_enable_ring(adev, ih[i]);
321 if (ret)
322 return ret;
323 }
324 }
325
326 /* update doorbell range for ih ring 0 */
327 adev->nbio.funcs->ih_doorbell_range(adev, ih[0]->use_doorbell,
328 ih[0]->doorbell_index);
329
330 tmp = RREG32_SOC15(OSSSYS, 0, regIH_STORM_CLIENT_LIST_CNTL);
331 tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
332 CLIENT18_IS_STORM_CLIENT, 1);
333 WREG32_SOC15(OSSSYS, 0, regIH_STORM_CLIENT_LIST_CNTL, tmp);
334
335 tmp = RREG32_SOC15(OSSSYS, 0, regIH_INT_FLOOD_CNTL);
336 tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1);
337 WREG32_SOC15(OSSSYS, 0, regIH_INT_FLOOD_CNTL, tmp);
338
339 /* GC/MMHUB UTCL2 page fault interrupts are configured as
340 * MSI storm capable interrupts by deafult. The delay is
341 * used to avoid ISR being called too frequently
342 * when page fault happens on several continuous page
343 * and thus avoid MSI storm */
344 tmp = RREG32_SOC15(OSSSYS, 0, regIH_MSI_STORM_CTRL);
345 tmp = REG_SET_FIELD(tmp, IH_MSI_STORM_CTRL,
346 DELAY, 3);
347 WREG32_SOC15(OSSSYS, 0, regIH_MSI_STORM_CTRL, tmp);
348
349 pci_set_master(adev->pdev);
350
351 /* enable interrupts */
352 ret = ih_v6_0_toggle_interrupts(adev, true);
353 if (ret)
354 return ret;
355 /* enable wptr force update for self int */
356 force_update_wptr_for_self_int(adev, 0, 8, true);
357
358 if (adev->irq.ih_soft.ring_size)
359 adev->irq.ih_soft.enabled = true;
360
361 return 0;
362}
363
364/**
365 * ih_v6_0_irq_disable - disable interrupts
366 *
367 * @adev: amdgpu_device pointer
368 *
369 * Disable interrupts on the hw.
370 */
371static void ih_v6_0_irq_disable(struct amdgpu_device *adev)
372{
373 force_update_wptr_for_self_int(adev, 0, 8, false);
374 ih_v6_0_toggle_interrupts(adev, false);
375
376 /* Wait and acknowledge irq */
377 mdelay(1);
378}
379
380/**
381 * ih_v6_0_get_wptr - get the IH ring buffer wptr
382 *
383 * @adev: amdgpu_device pointer
384 * @ih: amdgpu_ih_ring pointer
385 *
386 * Get the IH ring buffer wptr from either the register
387 * or the writeback memory buffer. Also check for
388 * ring buffer overflow and deal with it.
389 * Returns the value of the wptr.
390 */
391static u32 ih_v6_0_get_wptr(struct amdgpu_device *adev,
392 struct amdgpu_ih_ring *ih)
393{
394 u32 wptr, tmp;
395 struct amdgpu_ih_regs *ih_regs;
396
397 wptr = le32_to_cpu(*ih->wptr_cpu);
398 ih_regs = &ih->ih_regs;
399
400 if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
401 goto out;
402
403 wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr);
404 if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
405 goto out;
406 wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
407
408 /* When a ring buffer overflow happen start parsing interrupt
409 * from the last not overwritten vector (wptr + 32). Hopefully
410 * this should allow us to catch up.
411 */
412 tmp = (wptr + 32) & ih->ptr_mask;
413 dev_warn(adev->dev, "IH ring buffer overflow "
414 "(0x%08X, 0x%08X, 0x%08X)\n",
415 wptr, ih->rptr, tmp);
416 ih->rptr = tmp;
417
418 tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
419 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
420 WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
421
422 /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
423 * can be detected.
424 */
425 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
426 WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
427out:
428 return (wptr & ih->ptr_mask);
429}
430
431/**
432 * ih_v6_0_irq_rearm - rearm IRQ if lost
433 *
434 * @adev: amdgpu_device pointer
435 * @ih: amdgpu_ih_ring pointer
436 *
437 */
438static void ih_v6_0_irq_rearm(struct amdgpu_device *adev,
439 struct amdgpu_ih_ring *ih)
440{
441 uint32_t v = 0;
442 uint32_t i = 0;
443 struct amdgpu_ih_regs *ih_regs;
444
445 ih_regs = &ih->ih_regs;
446
447 /* Rearm IRQ / re-write doorbell if doorbell write is lost */
448 for (i = 0; i < MAX_REARM_RETRY; i++) {
449 v = RREG32_NO_KIQ(ih_regs->ih_rb_rptr);
450 if ((v < ih->ring_size) && (v != ih->rptr))
451 WDOORBELL32(ih->doorbell_index, ih->rptr);
452 else
453 break;
454 }
455}
456
457/**
458 * ih_v6_0_set_rptr - set the IH ring buffer rptr
459 *
460 * @adev: amdgpu_device pointer
461 * @ih: amdgpu_ih_ring pointer
462 *
463 * Set the IH ring buffer rptr.
464 */
465static void ih_v6_0_set_rptr(struct amdgpu_device *adev,
466 struct amdgpu_ih_ring *ih)
467{
468 struct amdgpu_ih_regs *ih_regs;
469
470 if (ih->use_doorbell) {
471 /* XXX check if swapping is necessary on BE */
472 *ih->rptr_cpu = ih->rptr;
473 WDOORBELL32(ih->doorbell_index, ih->rptr);
474
475 if (amdgpu_sriov_vf(adev))
476 ih_v6_0_irq_rearm(adev, ih);
477 } else {
478 ih_regs = &ih->ih_regs;
479 WREG32(ih_regs->ih_rb_rptr, ih->rptr);
480 }
481}
482
483/**
484 * ih_v6_0_self_irq - dispatch work for ring 1
485 *
486 * @adev: amdgpu_device pointer
487 * @source: irq source
488 * @entry: IV with WPTR update
489 *
490 * Update the WPTR from the IV and schedule work to handle the entries.
491 */
492static int ih_v6_0_self_irq(struct amdgpu_device *adev,
493 struct amdgpu_irq_src *source,
494 struct amdgpu_iv_entry *entry)
495{
496 uint32_t wptr = cpu_to_le32(entry->src_data[0]);
497
498 switch (entry->ring_id) {
499 case 1:
500 *adev->irq.ih1.wptr_cpu = wptr;
501 schedule_work(&adev->irq.ih1_work);
502 break;
503 default:
504 break;
505 }
506 return 0;
507}
508
509static const struct amdgpu_irq_src_funcs ih_v6_0_self_irq_funcs = {
510 .process = ih_v6_0_self_irq,
511};
512
513static void ih_v6_0_set_self_irq_funcs(struct amdgpu_device *adev)
514{
515 adev->irq.self_irq.num_types = 0;
516 adev->irq.self_irq.funcs = &ih_v6_0_self_irq_funcs;
517}
518
519static int ih_v6_0_early_init(void *handle)
520{
521 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
522
523 ih_v6_0_set_interrupt_funcs(adev);
524 ih_v6_0_set_self_irq_funcs(adev);
525 return 0;
526}
527
528static int ih_v6_0_sw_init(void *handle)
529{
530 int r;
531 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
532 bool use_bus_addr;
533
534 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_IH, 0,
535 &adev->irq.self_irq);
536
537 if (r)
538 return r;
539
540 /* use gpu virtual address for ih ring
541 * until ih_checken is programmed to allow
542 * use bus address for ih ring by psp bl */
543 use_bus_addr =
544 (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) ? false : true;
545 r = amdgpu_ih_ring_init(adev, &adev->irq.ih, IH_RING_SIZE, use_bus_addr);
546 if (r)
547 return r;
548
549 adev->irq.ih.use_doorbell = true;
550 adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
551
552 adev->irq.ih1.ring_size = 0;
553 adev->irq.ih2.ring_size = 0;
554
555 /* initialize ih control register offset */
556 ih_v6_0_init_register_offset(adev);
557
558 r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, true);
559 if (r)
560 return r;
561
562 r = amdgpu_irq_init(adev);
563
564 return r;
565}
566
567static int ih_v6_0_sw_fini(void *handle)
568{
569 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
570
571 amdgpu_irq_fini_sw(adev);
572
573 return 0;
574}
575
576static int ih_v6_0_hw_init(void *handle)
577{
578 int r;
579 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
580
581 r = ih_v6_0_irq_init(adev);
582 if (r)
583 return r;
584
585 return 0;
586}
587
588static int ih_v6_0_hw_fini(void *handle)
589{
590 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
591
592 ih_v6_0_irq_disable(adev);
593
594 return 0;
595}
596
597static int ih_v6_0_suspend(void *handle)
598{
599 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
600
601 return ih_v6_0_hw_fini(adev);
602}
603
604static int ih_v6_0_resume(void *handle)
605{
606 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
607
608 return ih_v6_0_hw_init(adev);
609}
610
611static bool ih_v6_0_is_idle(void *handle)
612{
613 /* todo */
614 return true;
615}
616
617static int ih_v6_0_wait_for_idle(void *handle)
618{
619 /* todo */
620 return -ETIMEDOUT;
621}
622
623static int ih_v6_0_soft_reset(void *handle)
624{
625 /* todo */
626 return 0;
627}
628
629static void ih_v6_0_update_clockgating_state(struct amdgpu_device *adev,
630 bool enable)
631{
632 uint32_t data, def, field_val;
633
634 if (adev->cg_flags & AMD_CG_SUPPORT_IH_CG) {
635 def = data = RREG32_SOC15(OSSSYS, 0, regIH_CLK_CTRL);
636 field_val = enable ? 0 : 1;
637 data = REG_SET_FIELD(data, IH_CLK_CTRL,
638 DBUS_MUX_CLK_SOFT_OVERRIDE, field_val);
639 data = REG_SET_FIELD(data, IH_CLK_CTRL,
640 OSSSYS_SHARE_CLK_SOFT_OVERRIDE, field_val);
641 data = REG_SET_FIELD(data, IH_CLK_CTRL,
642 LIMIT_SMN_CLK_SOFT_OVERRIDE, field_val);
643 data = REG_SET_FIELD(data, IH_CLK_CTRL,
644 DYN_CLK_SOFT_OVERRIDE, field_val);
645 data = REG_SET_FIELD(data, IH_CLK_CTRL,
646 REG_CLK_SOFT_OVERRIDE, field_val);
647 if (def != data)
648 WREG32_SOC15(OSSSYS, 0, regIH_CLK_CTRL, data);
649 }
650}
651
652static int ih_v6_0_set_clockgating_state(void *handle,
653 enum amd_clockgating_state state)
654{
655 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
656
657 ih_v6_0_update_clockgating_state(adev,
658 state == AMD_CG_STATE_GATE);
659 return 0;
660}
661
662static void ih_v6_0_update_ih_mem_power_gating(struct amdgpu_device *adev,
663 bool enable)
664{
665 uint32_t ih_mem_pwr_cntl;
666
667 /* Disable ih sram power cntl before switch powergating mode */
668 ih_mem_pwr_cntl = RREG32_SOC15(OSSSYS, 0, regIH_MEM_POWER_CTRL);
669 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
670 IH_BUFFER_MEM_POWER_CTRL_EN, 0);
671 WREG32_SOC15(OSSSYS, 0, regIH_MEM_POWER_CTRL, ih_mem_pwr_cntl);
672
673 /* It is recommended to set mem powergating mode to DS mode */
674 if (enable) {
675 /* mem power mode */
676 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
677 IH_BUFFER_MEM_POWER_LS_EN, 0);
678 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
679 IH_BUFFER_MEM_POWER_DS_EN, 1);
680 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
681 IH_BUFFER_MEM_POWER_SD_EN, 0);
682 /* cam mem power mode */
683 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
684 IH_RETRY_INT_CAM_MEM_POWER_LS_EN, 0);
685 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
686 IH_RETRY_INT_CAM_MEM_POWER_DS_EN, 1);
687 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
688 IH_RETRY_INT_CAM_MEM_POWER_SD_EN, 0);
689 /* re-enable power cntl */
690 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
691 IH_BUFFER_MEM_POWER_CTRL_EN, 1);
692 } else {
693 /* mem power mode */
694 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
695 IH_BUFFER_MEM_POWER_LS_EN, 0);
696 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
697 IH_BUFFER_MEM_POWER_DS_EN, 0);
698 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
699 IH_BUFFER_MEM_POWER_SD_EN, 0);
700 /* cam mem power mode */
701 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
702 IH_RETRY_INT_CAM_MEM_POWER_LS_EN, 0);
703 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
704 IH_RETRY_INT_CAM_MEM_POWER_DS_EN, 0);
705 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
706 IH_RETRY_INT_CAM_MEM_POWER_SD_EN, 0);
707 /* re-enable power cntl*/
708 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
709 IH_BUFFER_MEM_POWER_CTRL_EN, 1);
710 }
711
712 WREG32_SOC15(OSSSYS, 0, regIH_MEM_POWER_CTRL, ih_mem_pwr_cntl);
713}
714
715static int ih_v6_0_set_powergating_state(void *handle,
716 enum amd_powergating_state state)
717{
718 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
719 bool enable = (state == AMD_PG_STATE_GATE);
720
721 if (adev->pg_flags & AMD_PG_SUPPORT_IH_SRAM_PG)
722 ih_v6_0_update_ih_mem_power_gating(adev, enable);
723
724 return 0;
725}
726
727static void ih_v6_0_get_clockgating_state(void *handle, u64 *flags)
728{
729 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
730
731 if (!RREG32_SOC15(OSSSYS, 0, regIH_CLK_CTRL))
732 *flags |= AMD_CG_SUPPORT_IH_CG;
733}
734
735static const struct amd_ip_funcs ih_v6_0_ip_funcs = {
736 .name = "ih_v6_0",
737 .early_init = ih_v6_0_early_init,
738 .late_init = NULL,
739 .sw_init = ih_v6_0_sw_init,
740 .sw_fini = ih_v6_0_sw_fini,
741 .hw_init = ih_v6_0_hw_init,
742 .hw_fini = ih_v6_0_hw_fini,
743 .suspend = ih_v6_0_suspend,
744 .resume = ih_v6_0_resume,
745 .is_idle = ih_v6_0_is_idle,
746 .wait_for_idle = ih_v6_0_wait_for_idle,
747 .soft_reset = ih_v6_0_soft_reset,
748 .set_clockgating_state = ih_v6_0_set_clockgating_state,
749 .set_powergating_state = ih_v6_0_set_powergating_state,
750 .get_clockgating_state = ih_v6_0_get_clockgating_state,
751};
752
753static const struct amdgpu_ih_funcs ih_v6_0_funcs = {
754 .get_wptr = ih_v6_0_get_wptr,
755 .decode_iv = amdgpu_ih_decode_iv_helper,
756 .decode_iv_ts = amdgpu_ih_decode_iv_ts_helper,
757 .set_rptr = ih_v6_0_set_rptr
758};
759
760static void ih_v6_0_set_interrupt_funcs(struct amdgpu_device *adev)
761{
762 adev->irq.ih_funcs = &ih_v6_0_funcs;
763}
764
765const struct amdgpu_ip_block_version ih_v6_0_ip_block = {
766 .type = AMD_IP_BLOCK_TYPE_IH,
767 .major = 6,
768 .minor = 0,
769 .rev = 0,
770 .funcs = &ih_v6_0_ip_funcs,
771};
1/*
2 * Copyright 2021 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/pci.h>
25
26#include "amdgpu.h"
27#include "amdgpu_ih.h"
28
29#include "oss/osssys_6_0_0_offset.h"
30#include "oss/osssys_6_0_0_sh_mask.h"
31
32#include "soc15_common.h"
33#include "ih_v6_0.h"
34
35#define MAX_REARM_RETRY 10
36
37static void ih_v6_0_set_interrupt_funcs(struct amdgpu_device *adev);
38
39/**
40 * ih_v6_0_init_register_offset - Initialize register offset for ih rings
41 *
42 * @adev: amdgpu_device pointer
43 *
44 * Initialize register offset ih rings (IH_V6_0).
45 */
46static void ih_v6_0_init_register_offset(struct amdgpu_device *adev)
47{
48 struct amdgpu_ih_regs *ih_regs;
49
50 /* ih ring 2 is removed
51 * ih ring and ih ring 1 are available */
52 if (adev->irq.ih.ring_size) {
53 ih_regs = &adev->irq.ih.ih_regs;
54 ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_BASE);
55 ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_BASE_HI);
56 ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_CNTL);
57 ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_WPTR);
58 ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_RPTR);
59 ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_DOORBELL_RPTR);
60 ih_regs->ih_rb_wptr_addr_lo = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_WPTR_ADDR_LO);
61 ih_regs->ih_rb_wptr_addr_hi = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_WPTR_ADDR_HI);
62 ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL;
63 }
64
65 if (adev->irq.ih1.ring_size) {
66 ih_regs = &adev->irq.ih1.ih_regs;
67 ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_BASE_RING1);
68 ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_BASE_HI_RING1);
69 ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_CNTL_RING1);
70 ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_WPTR_RING1);
71 ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_RPTR_RING1);
72 ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_DOORBELL_RPTR_RING1);
73 ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING1;
74 }
75}
76
77/**
78 * force_update_wptr_for_self_int - Force update the wptr for self interrupt
79 *
80 * @adev: amdgpu_device pointer
81 * @threshold: threshold to trigger the wptr reporting
82 * @timeout: timeout to trigger the wptr reporting
83 * @enabled: Enable/disable timeout flush mechanism
84 *
85 * threshold input range: 0 ~ 15, default 0,
86 * real_threshold = 2^threshold
87 * timeout input range: 0 ~ 20, default 8,
88 * real_timeout = (2^timeout) * 1024 / (socclk_freq)
89 *
90 * Force update wptr for self interrupt ( >= SIENNA_CICHLID).
91 */
92static void
93force_update_wptr_for_self_int(struct amdgpu_device *adev,
94 u32 threshold, u32 timeout, bool enabled)
95{
96 u32 ih_cntl, ih_rb_cntl;
97
98 ih_cntl = RREG32_SOC15(OSSSYS, 0, regIH_CNTL2);
99 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, regIH_RB_CNTL_RING1);
100
101 ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL2,
102 SELF_IV_FORCE_WPTR_UPDATE_TIMEOUT, timeout);
103 ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL2,
104 SELF_IV_FORCE_WPTR_UPDATE_ENABLE, enabled);
105 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
106 RB_USED_INT_THRESHOLD, threshold);
107
108 if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
109 if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1, ih_rb_cntl))
110 return;
111 } else {
112 WREG32_SOC15(OSSSYS, 0, regIH_RB_CNTL_RING1, ih_rb_cntl);
113 }
114
115 WREG32_SOC15(OSSSYS, 0, regIH_CNTL2, ih_cntl);
116}
117
118/**
119 * ih_v6_0_toggle_ring_interrupts - toggle the interrupt ring buffer
120 *
121 * @adev: amdgpu_device pointer
122 * @ih: amdgpu_ih_ring pointer
123 * @enable: true - enable the interrupts, false - disable the interrupts
124 *
125 * Toggle the interrupt ring buffer (IH_V6_0)
126 */
127static int ih_v6_0_toggle_ring_interrupts(struct amdgpu_device *adev,
128 struct amdgpu_ih_ring *ih,
129 bool enable)
130{
131 struct amdgpu_ih_regs *ih_regs;
132 uint32_t tmp;
133
134 ih_regs = &ih->ih_regs;
135
136 tmp = RREG32(ih_regs->ih_rb_cntl);
137 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0));
138
139 if (enable) {
140 /* Unset the CLEAR_OVERFLOW bit to make sure the next step
141 * is switching the bit from 0 to 1
142 */
143 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
144 if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
145 if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp))
146 return -ETIMEDOUT;
147 } else {
148 WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
149 }
150
151 /* Clear RB_OVERFLOW bit */
152 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
153 if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
154 if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp))
155 return -ETIMEDOUT;
156 } else {
157 WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
158 }
159
160 /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
161 * can be detected.
162 */
163 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
164 }
165
166 /* enable_intr field is only valid in ring0 */
167 if (ih == &adev->irq.ih)
168 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
169
170 if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
171 if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp))
172 return -ETIMEDOUT;
173 } else {
174 WREG32(ih_regs->ih_rb_cntl, tmp);
175 }
176
177 if (enable) {
178 ih->enabled = true;
179 } else {
180 /* set rptr, wptr to 0 */
181 WREG32(ih_regs->ih_rb_rptr, 0);
182 WREG32(ih_regs->ih_rb_wptr, 0);
183 ih->enabled = false;
184 ih->rptr = 0;
185 }
186
187 return 0;
188}
189
190/**
191 * ih_v6_0_toggle_interrupts - Toggle all the available interrupt ring buffers
192 *
193 * @adev: amdgpu_device pointer
194 * @enable: enable or disable interrupt ring buffers
195 *
196 * Toggle all the available interrupt ring buffers (IH_V6_0).
197 */
198static int ih_v6_0_toggle_interrupts(struct amdgpu_device *adev, bool enable)
199{
200 struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1};
201 int i;
202 int r;
203
204 for (i = 0; i < ARRAY_SIZE(ih); i++) {
205 if (ih[i]->ring_size) {
206 r = ih_v6_0_toggle_ring_interrupts(adev, ih[i], enable);
207 if (r)
208 return r;
209 }
210 }
211
212 return 0;
213}
214
215static uint32_t ih_v6_0_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl)
216{
217 int rb_bufsz = order_base_2(ih->ring_size / 4);
218
219 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
220 MC_SPACE, ih->use_bus_addr ? 2 : 4);
221 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
222 WPTR_OVERFLOW_CLEAR, 1);
223 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
224 WPTR_OVERFLOW_ENABLE, 1);
225 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz);
226 /* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register
227 * value is written to memory
228 */
229 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
230 WPTR_WRITEBACK_ENABLE, 1);
231 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SNOOP, 1);
232 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_RO, 0);
233 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_VMID, 0);
234
235 return ih_rb_cntl;
236}
237
238static uint32_t ih_v6_0_doorbell_rptr(struct amdgpu_ih_ring *ih)
239{
240 u32 ih_doorbell_rtpr = 0;
241
242 if (ih->use_doorbell) {
243 ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
244 IH_DOORBELL_RPTR, OFFSET,
245 ih->doorbell_index);
246 ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
247 IH_DOORBELL_RPTR,
248 ENABLE, 1);
249 } else {
250 ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
251 IH_DOORBELL_RPTR,
252 ENABLE, 0);
253 }
254 return ih_doorbell_rtpr;
255}
256
257/**
258 * ih_v6_0_enable_ring - enable an ih ring buffer
259 *
260 * @adev: amdgpu_device pointer
261 * @ih: amdgpu_ih_ring pointer
262 *
263 * Enable an ih ring buffer (IH_V6_0)
264 */
265static int ih_v6_0_enable_ring(struct amdgpu_device *adev,
266 struct amdgpu_ih_ring *ih)
267{
268 struct amdgpu_ih_regs *ih_regs;
269 uint32_t tmp;
270
271 ih_regs = &ih->ih_regs;
272
273 /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
274 WREG32(ih_regs->ih_rb_base, ih->gpu_addr >> 8);
275 WREG32(ih_regs->ih_rb_base_hi, (ih->gpu_addr >> 40) & 0xff);
276
277 tmp = RREG32(ih_regs->ih_rb_cntl);
278 tmp = ih_v6_0_rb_cntl(ih, tmp);
279 if (ih == &adev->irq.ih)
280 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled);
281 if (ih == &adev->irq.ih1) {
282 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0);
283 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1);
284 }
285
286 if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
287 if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
288 DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
289 return -ETIMEDOUT;
290 }
291 } else {
292 WREG32(ih_regs->ih_rb_cntl, tmp);
293 }
294
295 if (ih == &adev->irq.ih) {
296 /* set the ih ring 0 writeback address whether it's enabled or not */
297 WREG32(ih_regs->ih_rb_wptr_addr_lo, lower_32_bits(ih->wptr_addr));
298 WREG32(ih_regs->ih_rb_wptr_addr_hi, upper_32_bits(ih->wptr_addr) & 0xFFFF);
299 }
300
301 /* set rptr, wptr to 0 */
302 WREG32(ih_regs->ih_rb_wptr, 0);
303 WREG32(ih_regs->ih_rb_rptr, 0);
304
305 WREG32(ih_regs->ih_doorbell_rptr, ih_v6_0_doorbell_rptr(ih));
306
307 return 0;
308}
309
310/**
311 * ih_v6_0_irq_init - init and enable the interrupt ring
312 *
313 * @adev: amdgpu_device pointer
314 *
315 * Allocate a ring buffer for the interrupt controller,
316 * enable the RLC, disable interrupts, enable the IH
317 * ring buffer and enable it.
318 * Called at device load and reume.
319 * Returns 0 for success, errors for failure.
320 */
321static int ih_v6_0_irq_init(struct amdgpu_device *adev)
322{
323 struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1};
324 u32 ih_chicken;
325 u32 tmp;
326 int ret;
327 int i;
328
329 /* disable irqs */
330 ret = ih_v6_0_toggle_interrupts(adev, false);
331 if (ret)
332 return ret;
333
334 adev->nbio.funcs->ih_control(adev);
335
336 if (unlikely((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) ||
337 (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO))) {
338 if (ih[0]->use_bus_addr) {
339 ih_chicken = RREG32_SOC15(OSSSYS, 0, regIH_CHICKEN);
340 ih_chicken = REG_SET_FIELD(ih_chicken,
341 IH_CHICKEN, MC_SPACE_GPA_ENABLE, 1);
342 WREG32_SOC15(OSSSYS, 0, regIH_CHICKEN, ih_chicken);
343 }
344 }
345
346 for (i = 0; i < ARRAY_SIZE(ih); i++) {
347 if (ih[i]->ring_size) {
348 ret = ih_v6_0_enable_ring(adev, ih[i]);
349 if (ret)
350 return ret;
351 }
352 }
353
354 /* update doorbell range for ih ring 0 */
355 adev->nbio.funcs->ih_doorbell_range(adev, ih[0]->use_doorbell,
356 ih[0]->doorbell_index);
357
358 tmp = RREG32_SOC15(OSSSYS, 0, regIH_STORM_CLIENT_LIST_CNTL);
359 tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
360 CLIENT18_IS_STORM_CLIENT, 1);
361 WREG32_SOC15(OSSSYS, 0, regIH_STORM_CLIENT_LIST_CNTL, tmp);
362
363 tmp = RREG32_SOC15(OSSSYS, 0, regIH_INT_FLOOD_CNTL);
364 tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1);
365 WREG32_SOC15(OSSSYS, 0, regIH_INT_FLOOD_CNTL, tmp);
366
367 /* GC/MMHUB UTCL2 page fault interrupts are configured as
368 * MSI storm capable interrupts by deafult. The delay is
369 * used to avoid ISR being called too frequently
370 * when page fault happens on several continuous page
371 * and thus avoid MSI storm */
372 tmp = RREG32_SOC15(OSSSYS, 0, regIH_MSI_STORM_CTRL);
373 tmp = REG_SET_FIELD(tmp, IH_MSI_STORM_CTRL,
374 DELAY, 3);
375 WREG32_SOC15(OSSSYS, 0, regIH_MSI_STORM_CTRL, tmp);
376
377 /* Redirect the interrupts to IH RB1 for dGPU */
378 if (adev->irq.ih1.ring_size) {
379 tmp = RREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_INDEX);
380 tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_INDEX, INDEX, 0);
381 WREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_INDEX, tmp);
382
383 tmp = RREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_DATA);
384 tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA, CLIENT_ID, 0xa);
385 tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA, SOURCE_ID, 0x0);
386 tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA,
387 SOURCE_ID_MATCH_ENABLE, 0x1);
388
389 WREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_DATA, tmp);
390 }
391
392 pci_set_master(adev->pdev);
393
394 /* enable interrupts */
395 ret = ih_v6_0_toggle_interrupts(adev, true);
396 if (ret)
397 return ret;
398 /* enable wptr force update for self int */
399 force_update_wptr_for_self_int(adev, 0, 8, true);
400
401 if (adev->irq.ih_soft.ring_size)
402 adev->irq.ih_soft.enabled = true;
403
404 return 0;
405}
406
407/**
408 * ih_v6_0_irq_disable - disable interrupts
409 *
410 * @adev: amdgpu_device pointer
411 *
412 * Disable interrupts on the hw.
413 */
414static void ih_v6_0_irq_disable(struct amdgpu_device *adev)
415{
416 force_update_wptr_for_self_int(adev, 0, 8, false);
417 ih_v6_0_toggle_interrupts(adev, false);
418
419 /* Wait and acknowledge irq */
420 mdelay(1);
421}
422
423/**
424 * ih_v6_0_get_wptr - get the IH ring buffer wptr
425 *
426 * @adev: amdgpu_device pointer
427 * @ih: amdgpu_ih_ring pointer
428 *
429 * Get the IH ring buffer wptr from either the register
430 * or the writeback memory buffer. Also check for
431 * ring buffer overflow and deal with it.
432 * Returns the value of the wptr.
433 */
434static u32 ih_v6_0_get_wptr(struct amdgpu_device *adev,
435 struct amdgpu_ih_ring *ih)
436{
437 u32 wptr, tmp;
438 struct amdgpu_ih_regs *ih_regs;
439
440 wptr = le32_to_cpu(*ih->wptr_cpu);
441 ih_regs = &ih->ih_regs;
442
443 if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
444 goto out;
445
446 wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr);
447 if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
448 goto out;
449 wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
450
451 /* When a ring buffer overflow happen start parsing interrupt
452 * from the last not overwritten vector (wptr + 32). Hopefully
453 * this should allow us to catch up.
454 */
455 tmp = (wptr + 32) & ih->ptr_mask;
456 dev_warn(adev->dev, "IH ring buffer overflow "
457 "(0x%08X, 0x%08X, 0x%08X)\n",
458 wptr, ih->rptr, tmp);
459 ih->rptr = tmp;
460
461 tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
462 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
463 WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
464
465 /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
466 * can be detected.
467 */
468 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
469 WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
470out:
471 return (wptr & ih->ptr_mask);
472}
473
474/**
475 * ih_v6_0_irq_rearm - rearm IRQ if lost
476 *
477 * @adev: amdgpu_device pointer
478 * @ih: amdgpu_ih_ring pointer
479 *
480 */
481static void ih_v6_0_irq_rearm(struct amdgpu_device *adev,
482 struct amdgpu_ih_ring *ih)
483{
484 uint32_t v = 0;
485 uint32_t i = 0;
486 struct amdgpu_ih_regs *ih_regs;
487
488 ih_regs = &ih->ih_regs;
489
490 /* Rearm IRQ / re-write doorbell if doorbell write is lost */
491 for (i = 0; i < MAX_REARM_RETRY; i++) {
492 v = RREG32_NO_KIQ(ih_regs->ih_rb_rptr);
493 if ((v < ih->ring_size) && (v != ih->rptr))
494 WDOORBELL32(ih->doorbell_index, ih->rptr);
495 else
496 break;
497 }
498}
499
500/**
501 * ih_v6_0_set_rptr - set the IH ring buffer rptr
502 *
503 * @adev: amdgpu_device pointer
504 * @ih: amdgpu_ih_ring pointer
505 *
506 * Set the IH ring buffer rptr.
507 */
508static void ih_v6_0_set_rptr(struct amdgpu_device *adev,
509 struct amdgpu_ih_ring *ih)
510{
511 struct amdgpu_ih_regs *ih_regs;
512
513 if (ih->use_doorbell) {
514 /* XXX check if swapping is necessary on BE */
515 *ih->rptr_cpu = ih->rptr;
516 WDOORBELL32(ih->doorbell_index, ih->rptr);
517
518 if (amdgpu_sriov_vf(adev))
519 ih_v6_0_irq_rearm(adev, ih);
520 } else {
521 ih_regs = &ih->ih_regs;
522 WREG32(ih_regs->ih_rb_rptr, ih->rptr);
523 }
524}
525
526/**
527 * ih_v6_0_self_irq - dispatch work for ring 1
528 *
529 * @adev: amdgpu_device pointer
530 * @source: irq source
531 * @entry: IV with WPTR update
532 *
533 * Update the WPTR from the IV and schedule work to handle the entries.
534 */
535static int ih_v6_0_self_irq(struct amdgpu_device *adev,
536 struct amdgpu_irq_src *source,
537 struct amdgpu_iv_entry *entry)
538{
539 uint32_t wptr = cpu_to_le32(entry->src_data[0]);
540
541 switch (entry->ring_id) {
542 case 1:
543 *adev->irq.ih1.wptr_cpu = wptr;
544 schedule_work(&adev->irq.ih1_work);
545 break;
546 default:
547 break;
548 }
549 return 0;
550}
551
552static const struct amdgpu_irq_src_funcs ih_v6_0_self_irq_funcs = {
553 .process = ih_v6_0_self_irq,
554};
555
556static void ih_v6_0_set_self_irq_funcs(struct amdgpu_device *adev)
557{
558 adev->irq.self_irq.num_types = 0;
559 adev->irq.self_irq.funcs = &ih_v6_0_self_irq_funcs;
560}
561
562static int ih_v6_0_early_init(struct amdgpu_ip_block *ip_block)
563{
564 struct amdgpu_device *adev = ip_block->adev;
565
566 ih_v6_0_set_interrupt_funcs(adev);
567 ih_v6_0_set_self_irq_funcs(adev);
568 return 0;
569}
570
571static int ih_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
572{
573 int r;
574 struct amdgpu_device *adev = ip_block->adev;
575 bool use_bus_addr;
576
577 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_IH, 0,
578 &adev->irq.self_irq);
579
580 if (r)
581 return r;
582
583 /* use gpu virtual address for ih ring
584 * until ih_checken is programmed to allow
585 * use bus address for ih ring by psp bl */
586 use_bus_addr =
587 (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) ? false : true;
588 r = amdgpu_ih_ring_init(adev, &adev->irq.ih, IH_RING_SIZE, use_bus_addr);
589 if (r)
590 return r;
591
592 adev->irq.ih.use_doorbell = true;
593 adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
594
595 if (!(adev->flags & AMD_IS_APU)) {
596 r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, IH_RING_SIZE,
597 use_bus_addr);
598 if (r)
599 return r;
600
601 adev->irq.ih1.use_doorbell = true;
602 adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1;
603 }
604
605 /* initialize ih control register offset */
606 ih_v6_0_init_register_offset(adev);
607
608 r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, true);
609 if (r)
610 return r;
611
612 r = amdgpu_irq_init(adev);
613
614 return r;
615}
616
617static int ih_v6_0_sw_fini(struct amdgpu_ip_block *ip_block)
618{
619 struct amdgpu_device *adev = ip_block->adev;
620
621 amdgpu_irq_fini_sw(adev);
622
623 return 0;
624}
625
626static int ih_v6_0_hw_init(struct amdgpu_ip_block *ip_block)
627{
628 int r;
629 struct amdgpu_device *adev = ip_block->adev;
630
631 r = ih_v6_0_irq_init(adev);
632 if (r)
633 return r;
634
635 return 0;
636}
637
638static int ih_v6_0_hw_fini(struct amdgpu_ip_block *ip_block)
639{
640 ih_v6_0_irq_disable(ip_block->adev);
641
642 return 0;
643}
644
645static int ih_v6_0_suspend(struct amdgpu_ip_block *ip_block)
646{
647 return ih_v6_0_hw_fini(ip_block);
648}
649
650static int ih_v6_0_resume(struct amdgpu_ip_block *ip_block)
651{
652 return ih_v6_0_hw_init(ip_block);
653}
654
655static bool ih_v6_0_is_idle(void *handle)
656{
657 /* todo */
658 return true;
659}
660
661static int ih_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
662{
663 /* todo */
664 return -ETIMEDOUT;
665}
666
667static int ih_v6_0_soft_reset(struct amdgpu_ip_block *ip_block)
668{
669 /* todo */
670 return 0;
671}
672
673static void ih_v6_0_update_clockgating_state(struct amdgpu_device *adev,
674 bool enable)
675{
676 uint32_t data, def, field_val;
677
678 if (adev->cg_flags & AMD_CG_SUPPORT_IH_CG) {
679 def = data = RREG32_SOC15(OSSSYS, 0, regIH_CLK_CTRL);
680 field_val = enable ? 0 : 1;
681 data = REG_SET_FIELD(data, IH_CLK_CTRL,
682 DBUS_MUX_CLK_SOFT_OVERRIDE, field_val);
683 data = REG_SET_FIELD(data, IH_CLK_CTRL,
684 OSSSYS_SHARE_CLK_SOFT_OVERRIDE, field_val);
685 data = REG_SET_FIELD(data, IH_CLK_CTRL,
686 LIMIT_SMN_CLK_SOFT_OVERRIDE, field_val);
687 data = REG_SET_FIELD(data, IH_CLK_CTRL,
688 DYN_CLK_SOFT_OVERRIDE, field_val);
689 data = REG_SET_FIELD(data, IH_CLK_CTRL,
690 REG_CLK_SOFT_OVERRIDE, field_val);
691 if (def != data)
692 WREG32_SOC15(OSSSYS, 0, regIH_CLK_CTRL, data);
693 }
694}
695
696static int ih_v6_0_set_clockgating_state(void *handle,
697 enum amd_clockgating_state state)
698{
699 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
700
701 ih_v6_0_update_clockgating_state(adev,
702 state == AMD_CG_STATE_GATE);
703 return 0;
704}
705
706static void ih_v6_0_update_ih_mem_power_gating(struct amdgpu_device *adev,
707 bool enable)
708{
709 uint32_t ih_mem_pwr_cntl;
710
711 /* Disable ih sram power cntl before switch powergating mode */
712 ih_mem_pwr_cntl = RREG32_SOC15(OSSSYS, 0, regIH_MEM_POWER_CTRL);
713 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
714 IH_BUFFER_MEM_POWER_CTRL_EN, 0);
715 WREG32_SOC15(OSSSYS, 0, regIH_MEM_POWER_CTRL, ih_mem_pwr_cntl);
716
717 /* It is recommended to set mem powergating mode to DS mode */
718 if (enable) {
719 /* mem power mode */
720 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
721 IH_BUFFER_MEM_POWER_LS_EN, 0);
722 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
723 IH_BUFFER_MEM_POWER_DS_EN, 1);
724 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
725 IH_BUFFER_MEM_POWER_SD_EN, 0);
726 /* cam mem power mode */
727 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
728 IH_RETRY_INT_CAM_MEM_POWER_LS_EN, 0);
729 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
730 IH_RETRY_INT_CAM_MEM_POWER_DS_EN, 1);
731 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
732 IH_RETRY_INT_CAM_MEM_POWER_SD_EN, 0);
733 /* re-enable power cntl */
734 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
735 IH_BUFFER_MEM_POWER_CTRL_EN, 1);
736 } else {
737 /* mem power mode */
738 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
739 IH_BUFFER_MEM_POWER_LS_EN, 0);
740 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
741 IH_BUFFER_MEM_POWER_DS_EN, 0);
742 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
743 IH_BUFFER_MEM_POWER_SD_EN, 0);
744 /* cam mem power mode */
745 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
746 IH_RETRY_INT_CAM_MEM_POWER_LS_EN, 0);
747 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
748 IH_RETRY_INT_CAM_MEM_POWER_DS_EN, 0);
749 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
750 IH_RETRY_INT_CAM_MEM_POWER_SD_EN, 0);
751 /* re-enable power cntl*/
752 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
753 IH_BUFFER_MEM_POWER_CTRL_EN, 1);
754 }
755
756 WREG32_SOC15(OSSSYS, 0, regIH_MEM_POWER_CTRL, ih_mem_pwr_cntl);
757}
758
759static int ih_v6_0_set_powergating_state(void *handle,
760 enum amd_powergating_state state)
761{
762 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
763 bool enable = (state == AMD_PG_STATE_GATE);
764
765 if (adev->pg_flags & AMD_PG_SUPPORT_IH_SRAM_PG)
766 ih_v6_0_update_ih_mem_power_gating(adev, enable);
767
768 return 0;
769}
770
771static void ih_v6_0_get_clockgating_state(void *handle, u64 *flags)
772{
773 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
774
775 if (!RREG32_SOC15(OSSSYS, 0, regIH_CLK_CTRL))
776 *flags |= AMD_CG_SUPPORT_IH_CG;
777}
778
779static const struct amd_ip_funcs ih_v6_0_ip_funcs = {
780 .name = "ih_v6_0",
781 .early_init = ih_v6_0_early_init,
782 .sw_init = ih_v6_0_sw_init,
783 .sw_fini = ih_v6_0_sw_fini,
784 .hw_init = ih_v6_0_hw_init,
785 .hw_fini = ih_v6_0_hw_fini,
786 .suspend = ih_v6_0_suspend,
787 .resume = ih_v6_0_resume,
788 .is_idle = ih_v6_0_is_idle,
789 .wait_for_idle = ih_v6_0_wait_for_idle,
790 .soft_reset = ih_v6_0_soft_reset,
791 .set_clockgating_state = ih_v6_0_set_clockgating_state,
792 .set_powergating_state = ih_v6_0_set_powergating_state,
793 .get_clockgating_state = ih_v6_0_get_clockgating_state,
794};
795
796static const struct amdgpu_ih_funcs ih_v6_0_funcs = {
797 .get_wptr = ih_v6_0_get_wptr,
798 .decode_iv = amdgpu_ih_decode_iv_helper,
799 .decode_iv_ts = amdgpu_ih_decode_iv_ts_helper,
800 .set_rptr = ih_v6_0_set_rptr
801};
802
803static void ih_v6_0_set_interrupt_funcs(struct amdgpu_device *adev)
804{
805 adev->irq.ih_funcs = &ih_v6_0_funcs;
806}
807
808const struct amdgpu_ip_block_version ih_v6_0_ip_block = {
809 .type = AMD_IP_BLOCK_TYPE_IH,
810 .major = 6,
811 .minor = 0,
812 .rev = 0,
813 .funcs = &ih_v6_0_ip_funcs,
814};