Loading...
1/*
2 * Copyright 2023 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/pci.h>
25
26#include "amdgpu.h"
27#include "amdgpu_ih.h"
28
29#include "oss/osssys_7_0_0_offset.h"
30#include "oss/osssys_7_0_0_sh_mask.h"
31
32#include "soc15_common.h"
33#include "ih_v7_0.h"
34
35#define MAX_REARM_RETRY 10
36
37static void ih_v7_0_set_interrupt_funcs(struct amdgpu_device *adev);
38
39/**
40 * ih_v7_0_init_register_offset - Initialize register offset for ih rings
41 *
42 * @adev: amdgpu_device pointer
43 *
44 * Initialize register offset ih rings (IH_V7_0).
45 */
46static void ih_v7_0_init_register_offset(struct amdgpu_device *adev)
47{
48 struct amdgpu_ih_regs *ih_regs;
49
50 /* ih ring 2 is removed
51 * ih ring and ih ring 1 are available */
52 if (adev->irq.ih.ring_size) {
53 ih_regs = &adev->irq.ih.ih_regs;
54 ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_BASE);
55 ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_BASE_HI);
56 ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_CNTL);
57 ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_WPTR);
58 ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_RPTR);
59 ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_DOORBELL_RPTR);
60 ih_regs->ih_rb_wptr_addr_lo = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_WPTR_ADDR_LO);
61 ih_regs->ih_rb_wptr_addr_hi = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_WPTR_ADDR_HI);
62 ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL;
63 }
64
65 if (adev->irq.ih1.ring_size) {
66 ih_regs = &adev->irq.ih1.ih_regs;
67 ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_BASE_RING1);
68 ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_BASE_HI_RING1);
69 ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_CNTL_RING1);
70 ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_WPTR_RING1);
71 ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_RPTR_RING1);
72 ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_DOORBELL_RPTR_RING1);
73 ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING1;
74 }
75}
76
77/**
78 * force_update_wptr_for_self_int - Force update the wptr for self interrupt
79 *
80 * @adev: amdgpu_device pointer
81 * @threshold: threshold to trigger the wptr reporting
82 * @timeout: timeout to trigger the wptr reporting
83 * @enabled: Enable/disable timeout flush mechanism
84 *
85 * threshold input range: 0 ~ 15, default 0,
86 * real_threshold = 2^threshold
87 * timeout input range: 0 ~ 20, default 8,
88 * real_timeout = (2^timeout) * 1024 / (socclk_freq)
89 *
90 * Force update wptr for self interrupt ( >= SIENNA_CICHLID).
91 */
92static void
93force_update_wptr_for_self_int(struct amdgpu_device *adev,
94 u32 threshold, u32 timeout, bool enabled)
95{
96 u32 ih_cntl, ih_rb_cntl;
97
98 ih_cntl = RREG32_SOC15(OSSSYS, 0, regIH_CNTL2);
99 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, regIH_RB_CNTL_RING1);
100
101 ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL2,
102 SELF_IV_FORCE_WPTR_UPDATE_TIMEOUT, timeout);
103 ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL2,
104 SELF_IV_FORCE_WPTR_UPDATE_ENABLE, enabled);
105 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
106 RB_USED_INT_THRESHOLD, threshold);
107
108 if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
109 if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1, ih_rb_cntl))
110 return;
111 } else {
112 WREG32_SOC15(OSSSYS, 0, regIH_RB_CNTL_RING1, ih_rb_cntl);
113 }
114
115 WREG32_SOC15(OSSSYS, 0, regIH_CNTL2, ih_cntl);
116}
117
118/**
119 * ih_v7_0_toggle_ring_interrupts - toggle the interrupt ring buffer
120 *
121 * @adev: amdgpu_device pointer
122 * @ih: amdgpu_ih_ring pointet
123 * @enable: true - enable the interrupts, false - disable the interrupts
124 *
125 * Toggle the interrupt ring buffer (IH_V7_0)
126 */
127static int ih_v7_0_toggle_ring_interrupts(struct amdgpu_device *adev,
128 struct amdgpu_ih_ring *ih,
129 bool enable)
130{
131 struct amdgpu_ih_regs *ih_regs;
132 uint32_t tmp;
133
134 ih_regs = &ih->ih_regs;
135
136 tmp = RREG32(ih_regs->ih_rb_cntl);
137 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0));
138 /* enable_intr field is only valid in ring0 */
139 if (ih == &adev->irq.ih)
140 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
141
142 if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
143 if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp))
144 return -ETIMEDOUT;
145 } else {
146 WREG32(ih_regs->ih_rb_cntl, tmp);
147 }
148
149 if (enable) {
150 ih->enabled = true;
151 } else {
152 /* set rptr, wptr to 0 */
153 WREG32(ih_regs->ih_rb_rptr, 0);
154 WREG32(ih_regs->ih_rb_wptr, 0);
155 ih->enabled = false;
156 ih->rptr = 0;
157 }
158
159 return 0;
160}
161
162/**
163 * ih_v7_0_toggle_interrupts - Toggle all the available interrupt ring buffers
164 *
165 * @adev: amdgpu_device pointer
166 * @enable: enable or disable interrupt ring buffers
167 *
168 * Toggle all the available interrupt ring buffers (IH_V7_0).
169 */
170static int ih_v7_0_toggle_interrupts(struct amdgpu_device *adev, bool enable)
171{
172 struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1};
173 int i;
174 int r;
175
176 for (i = 0; i < ARRAY_SIZE(ih); i++) {
177 if (ih[i]->ring_size) {
178 r = ih_v7_0_toggle_ring_interrupts(adev, ih[i], enable);
179 if (r)
180 return r;
181 }
182 }
183
184 return 0;
185}
186
187static uint32_t ih_v7_0_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl)
188{
189 int rb_bufsz = order_base_2(ih->ring_size / 4);
190
191 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
192 MC_SPACE, ih->use_bus_addr ? 2 : 4);
193 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
194 WPTR_OVERFLOW_CLEAR, 1);
195 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
196 WPTR_OVERFLOW_ENABLE, 1);
197 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz);
198 /* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register
199 * value is written to memory
200 */
201 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
202 WPTR_WRITEBACK_ENABLE, 1);
203 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SNOOP, 1);
204 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_RO, 0);
205 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_VMID, 0);
206
207 return ih_rb_cntl;
208}
209
210static uint32_t ih_v7_0_doorbell_rptr(struct amdgpu_ih_ring *ih)
211{
212 u32 ih_doorbell_rtpr = 0;
213
214 if (ih->use_doorbell) {
215 ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
216 IH_DOORBELL_RPTR, OFFSET,
217 ih->doorbell_index);
218 ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
219 IH_DOORBELL_RPTR,
220 ENABLE, 1);
221 } else {
222 ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
223 IH_DOORBELL_RPTR,
224 ENABLE, 0);
225 }
226 return ih_doorbell_rtpr;
227}
228
229/**
230 * ih_v7_0_enable_ring - enable an ih ring buffer
231 *
232 * @adev: amdgpu_device pointer
233 * @ih: amdgpu_ih_ring pointer
234 *
235 * Enable an ih ring buffer (IH_V7_0)
236 */
237static int ih_v7_0_enable_ring(struct amdgpu_device *adev,
238 struct amdgpu_ih_ring *ih)
239{
240 struct amdgpu_ih_regs *ih_regs;
241 uint32_t tmp;
242
243 ih_regs = &ih->ih_regs;
244
245 /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
246 WREG32(ih_regs->ih_rb_base, ih->gpu_addr >> 8);
247 WREG32(ih_regs->ih_rb_base_hi, (ih->gpu_addr >> 40) & 0xff);
248
249 tmp = RREG32(ih_regs->ih_rb_cntl);
250 tmp = ih_v7_0_rb_cntl(ih, tmp);
251 if (ih == &adev->irq.ih)
252 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled);
253 if (ih == &adev->irq.ih1) {
254 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0);
255 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1);
256 }
257
258 if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
259 if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
260 DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
261 return -ETIMEDOUT;
262 }
263 } else {
264 WREG32(ih_regs->ih_rb_cntl, tmp);
265 }
266
267 if (ih == &adev->irq.ih) {
268 /* set the ih ring 0 writeback address whether it's enabled or not */
269 WREG32(ih_regs->ih_rb_wptr_addr_lo, lower_32_bits(ih->wptr_addr));
270 WREG32(ih_regs->ih_rb_wptr_addr_hi, upper_32_bits(ih->wptr_addr) & 0xFFFF);
271 }
272
273 /* set rptr, wptr to 0 */
274 WREG32(ih_regs->ih_rb_wptr, 0);
275 WREG32(ih_regs->ih_rb_rptr, 0);
276
277 WREG32(ih_regs->ih_doorbell_rptr, ih_v7_0_doorbell_rptr(ih));
278
279 return 0;
280}
281
282/**
283 * ih_v7_0_irq_init - init and enable the interrupt ring
284 *
285 * @adev: amdgpu_device pointer
286 *
287 * Allocate a ring buffer for the interrupt controller,
288 * enable the RLC, disable interrupts, enable the IH
289 * ring buffer and enable it.
290 * Called at device load and reume.
291 * Returns 0 for success, errors for failure.
292 */
293static int ih_v7_0_irq_init(struct amdgpu_device *adev)
294{
295 struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1};
296 u32 ih_chicken;
297 u32 tmp;
298 int ret;
299 int i;
300
301 /* disable irqs */
302 ret = ih_v7_0_toggle_interrupts(adev, false);
303 if (ret)
304 return ret;
305
306 adev->nbio.funcs->ih_control(adev);
307
308 if (unlikely((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) ||
309 (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO))) {
310 if (ih[0]->use_bus_addr) {
311 ih_chicken = RREG32_SOC15(OSSSYS, 0, regIH_CHICKEN);
312 ih_chicken = REG_SET_FIELD(ih_chicken,
313 IH_CHICKEN, MC_SPACE_GPA_ENABLE, 1);
314 WREG32_SOC15(OSSSYS, 0, regIH_CHICKEN, ih_chicken);
315 }
316 }
317
318 for (i = 0; i < ARRAY_SIZE(ih); i++) {
319 if (ih[i]->ring_size) {
320 ret = ih_v7_0_enable_ring(adev, ih[i]);
321 if (ret)
322 return ret;
323 }
324 }
325
326 /* update doorbell range for ih ring 0 */
327 adev->nbio.funcs->ih_doorbell_range(adev, ih[0]->use_doorbell,
328 ih[0]->doorbell_index);
329
330 tmp = RREG32_SOC15(OSSSYS, 0, regIH_STORM_CLIENT_LIST_CNTL);
331 tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
332 CLIENT18_IS_STORM_CLIENT, 1);
333 WREG32_SOC15(OSSSYS, 0, regIH_STORM_CLIENT_LIST_CNTL, tmp);
334
335 tmp = RREG32_SOC15(OSSSYS, 0, regIH_INT_FLOOD_CNTL);
336 tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1);
337 WREG32_SOC15(OSSSYS, 0, regIH_INT_FLOOD_CNTL, tmp);
338
339 /* GC/MMHUB UTCL2 page fault interrupts are configured as
340 * MSI storm capable interrupts by deafult. The delay is
341 * used to avoid ISR being called too frequently
342 * when page fault happens on several continuous page
343 * and thus avoid MSI storm */
344 tmp = RREG32_SOC15(OSSSYS, 0, regIH_MSI_STORM_CTRL);
345 tmp = REG_SET_FIELD(tmp, IH_MSI_STORM_CTRL,
346 DELAY, 3);
347 WREG32_SOC15(OSSSYS, 0, regIH_MSI_STORM_CTRL, tmp);
348
349 pci_set_master(adev->pdev);
350
351 /* enable interrupts */
352 ret = ih_v7_0_toggle_interrupts(adev, true);
353 if (ret)
354 return ret;
355 /* enable wptr force update for self int */
356 force_update_wptr_for_self_int(adev, 0, 8, true);
357
358 if (adev->irq.ih_soft.ring_size)
359 adev->irq.ih_soft.enabled = true;
360
361 return 0;
362}
363
364/**
365 * ih_v7_0_irq_disable - disable interrupts
366 *
367 * @adev: amdgpu_device pointer
368 *
369 * Disable interrupts on the hw.
370 */
371static void ih_v7_0_irq_disable(struct amdgpu_device *adev)
372{
373 force_update_wptr_for_self_int(adev, 0, 8, false);
374 ih_v7_0_toggle_interrupts(adev, false);
375
376 /* Wait and acknowledge irq */
377 mdelay(1);
378}
379
380/**
381 * ih_v7_0_get_wptr() - get the IH ring buffer wptr
382 *
383 * @adev: amdgpu_device pointer
384 * @ih: IH ring buffer to fetch wptr
385 *
386 * Get the IH ring buffer wptr from either the register
387 * or the writeback memory buffer. Also check for
388 * ring buffer overflow and deal with it.
389 * Returns the value of the wptr.
390 */
391static u32 ih_v7_0_get_wptr(struct amdgpu_device *adev,
392 struct amdgpu_ih_ring *ih)
393{
394 u32 wptr, tmp;
395 struct amdgpu_ih_regs *ih_regs;
396
397 wptr = le32_to_cpu(*ih->wptr_cpu);
398 ih_regs = &ih->ih_regs;
399
400 if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
401 goto out;
402
403 wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr);
404 if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
405 goto out;
406 wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
407
408 /* When a ring buffer overflow happen start parsing interrupt
409 * from the last not overwritten vector (wptr + 32). Hopefully
410 * this should allow us to catch up.
411 */
412 tmp = (wptr + 32) & ih->ptr_mask;
413 dev_warn(adev->dev, "IH ring buffer overflow "
414 "(0x%08X, 0x%08X, 0x%08X)\n",
415 wptr, ih->rptr, tmp);
416 ih->rptr = tmp;
417
418 tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
419 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
420 WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
421
422 /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
423 * can be detected.
424 */
425 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
426 WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
427out:
428 return (wptr & ih->ptr_mask);
429}
430
431/**
432 * ih_v7_0_irq_rearm - rearm IRQ if lost
433 *
434 * @adev: amdgpu_device pointer
435 * @ih: IH ring to match
436 *
437 */
438static void ih_v7_0_irq_rearm(struct amdgpu_device *adev,
439 struct amdgpu_ih_ring *ih)
440{
441 uint32_t v = 0;
442 uint32_t i = 0;
443 struct amdgpu_ih_regs *ih_regs;
444
445 ih_regs = &ih->ih_regs;
446
447 /* Rearm IRQ / re-write doorbell if doorbell write is lost */
448 for (i = 0; i < MAX_REARM_RETRY; i++) {
449 v = RREG32_NO_KIQ(ih_regs->ih_rb_rptr);
450 if ((v < ih->ring_size) && (v != ih->rptr))
451 WDOORBELL32(ih->doorbell_index, ih->rptr);
452 else
453 break;
454 }
455}
456
457/**
458 * ih_v7_0_set_rptr - set the IH ring buffer rptr
459 *
460 * @adev: amdgpu_device pointer
461 * @ih: IH ring buffer to set rptr
462 */
463static void ih_v7_0_set_rptr(struct amdgpu_device *adev,
464 struct amdgpu_ih_ring *ih)
465{
466 struct amdgpu_ih_regs *ih_regs;
467
468 if (ih->use_doorbell) {
469 /* XXX check if swapping is necessary on BE */
470 *ih->rptr_cpu = ih->rptr;
471 WDOORBELL32(ih->doorbell_index, ih->rptr);
472
473 if (amdgpu_sriov_vf(adev))
474 ih_v7_0_irq_rearm(adev, ih);
475 } else {
476 ih_regs = &ih->ih_regs;
477 WREG32(ih_regs->ih_rb_rptr, ih->rptr);
478 }
479}
480
481/**
482 * ih_v7_0_self_irq - dispatch work for ring 1
483 *
484 * @adev: amdgpu_device pointer
485 * @source: irq source
486 * @entry: IV with WPTR update
487 *
488 * Update the WPTR from the IV and schedule work to handle the entries.
489 */
490static int ih_v7_0_self_irq(struct amdgpu_device *adev,
491 struct amdgpu_irq_src *source,
492 struct amdgpu_iv_entry *entry)
493{
494 uint32_t wptr = cpu_to_le32(entry->src_data[0]);
495
496 switch (entry->ring_id) {
497 case 1:
498 *adev->irq.ih1.wptr_cpu = wptr;
499 schedule_work(&adev->irq.ih1_work);
500 break;
501 default: break;
502 }
503 return 0;
504}
505
506static const struct amdgpu_irq_src_funcs ih_v7_0_self_irq_funcs = {
507 .process = ih_v7_0_self_irq,
508};
509
510static void ih_v7_0_set_self_irq_funcs(struct amdgpu_device *adev)
511{
512 adev->irq.self_irq.num_types = 0;
513 adev->irq.self_irq.funcs = &ih_v7_0_self_irq_funcs;
514}
515
516static int ih_v7_0_early_init(void *handle)
517{
518 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
519
520 ih_v7_0_set_interrupt_funcs(adev);
521 ih_v7_0_set_self_irq_funcs(adev);
522 return 0;
523}
524
525static int ih_v7_0_sw_init(void *handle)
526{
527 int r;
528 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
529 bool use_bus_addr;
530
531 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_IH, 0,
532 &adev->irq.self_irq);
533
534 if (r)
535 return r;
536
537 /* use gpu virtual address for ih ring
538 * until ih_checken is programmed to allow
539 * use bus address for ih ring by psp bl */
540 use_bus_addr =
541 (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) ? false : true;
542 r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, use_bus_addr);
543 if (r)
544 return r;
545
546 adev->irq.ih.use_doorbell = true;
547 adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
548
549 adev->irq.ih1.ring_size = 0;
550 adev->irq.ih2.ring_size = 0;
551
552 /* initialize ih control register offset */
553 ih_v7_0_init_register_offset(adev);
554
555 r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, true);
556 if (r)
557 return r;
558
559 r = amdgpu_irq_init(adev);
560
561 return r;
562}
563
564static int ih_v7_0_sw_fini(void *handle)
565{
566 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
567
568 amdgpu_irq_fini_sw(adev);
569
570 return 0;
571}
572
573static int ih_v7_0_hw_init(void *handle)
574{
575 int r;
576 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
577
578 r = ih_v7_0_irq_init(adev);
579 if (r)
580 return r;
581
582 return 0;
583}
584
585static int ih_v7_0_hw_fini(void *handle)
586{
587 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
588
589 ih_v7_0_irq_disable(adev);
590
591 return 0;
592}
593
594static int ih_v7_0_suspend(void *handle)
595{
596 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
597
598 return ih_v7_0_hw_fini(adev);
599}
600
601static int ih_v7_0_resume(void *handle)
602{
603 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
604
605 return ih_v7_0_hw_init(adev);
606}
607
608static bool ih_v7_0_is_idle(void *handle)
609{
610 /* todo */
611 return true;
612}
613
614static int ih_v7_0_wait_for_idle(void *handle)
615{
616 /* todo */
617 return -ETIMEDOUT;
618}
619
620static int ih_v7_0_soft_reset(void *handle)
621{
622 /* todo */
623 return 0;
624}
625
626static void ih_v7_0_update_clockgating_state(struct amdgpu_device *adev,
627 bool enable)
628{
629 uint32_t data, def, field_val;
630
631 if (adev->cg_flags & AMD_CG_SUPPORT_IH_CG) {
632 def = data = RREG32_SOC15(OSSSYS, 0, regIH_CLK_CTRL);
633 field_val = enable ? 0 : 1;
634 data = REG_SET_FIELD(data, IH_CLK_CTRL,
635 DBUS_MUX_CLK_SOFT_OVERRIDE, field_val);
636 data = REG_SET_FIELD(data, IH_CLK_CTRL,
637 OSSSYS_SHARE_CLK_SOFT_OVERRIDE, field_val);
638 data = REG_SET_FIELD(data, IH_CLK_CTRL,
639 LIMIT_SMN_CLK_SOFT_OVERRIDE, field_val);
640 data = REG_SET_FIELD(data, IH_CLK_CTRL,
641 DYN_CLK_SOFT_OVERRIDE, field_val);
642 data = REG_SET_FIELD(data, IH_CLK_CTRL,
643 REG_CLK_SOFT_OVERRIDE, field_val);
644 if (def != data)
645 WREG32_SOC15(OSSSYS, 0, regIH_CLK_CTRL, data);
646 }
647
648 return;
649}
650
651static int ih_v7_0_set_clockgating_state(void *handle,
652 enum amd_clockgating_state state)
653{
654 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
655
656 ih_v7_0_update_clockgating_state(adev,
657 state == AMD_CG_STATE_GATE);
658 return 0;
659}
660
661static void ih_v7_0_update_ih_mem_power_gating(struct amdgpu_device *adev,
662 bool enable)
663{
664 uint32_t ih_mem_pwr_cntl;
665
666 /* Disable ih sram power cntl before switch powergating mode */
667 ih_mem_pwr_cntl = RREG32_SOC15(OSSSYS, 0, regIH_MEM_POWER_CTRL);
668 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
669 IH_BUFFER_MEM_POWER_CTRL_EN, 0);
670 WREG32_SOC15(OSSSYS, 0, regIH_MEM_POWER_CTRL, ih_mem_pwr_cntl);
671
672 /* It is recommended to set mem powergating mode to DS mode */
673 if (enable) {
674 /* mem power mode */
675 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
676 IH_BUFFER_MEM_POWER_LS_EN, 0);
677 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
678 IH_BUFFER_MEM_POWER_DS_EN, 1);
679 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
680 IH_BUFFER_MEM_POWER_SD_EN, 0);
681 /* cam mem power mode */
682 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
683 IH_RETRY_INT_CAM_MEM_POWER_LS_EN, 0);
684 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
685 IH_RETRY_INT_CAM_MEM_POWER_DS_EN, 1);
686 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
687 IH_RETRY_INT_CAM_MEM_POWER_SD_EN, 0);
688 /* re-enable power cntl */
689 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
690 IH_BUFFER_MEM_POWER_CTRL_EN, 1);
691 } else {
692 /* mem power mode */
693 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
694 IH_BUFFER_MEM_POWER_LS_EN, 0);
695 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
696 IH_BUFFER_MEM_POWER_DS_EN, 0);
697 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
698 IH_BUFFER_MEM_POWER_SD_EN, 0);
699 /* cam mem power mode */
700 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
701 IH_RETRY_INT_CAM_MEM_POWER_LS_EN, 0);
702 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
703 IH_RETRY_INT_CAM_MEM_POWER_DS_EN, 0);
704 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
705 IH_RETRY_INT_CAM_MEM_POWER_SD_EN, 0);
706 /* re-enable power cntl*/
707 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
708 IH_BUFFER_MEM_POWER_CTRL_EN, 1);
709 }
710
711 WREG32_SOC15(OSSSYS, 0, regIH_MEM_POWER_CTRL, ih_mem_pwr_cntl);
712}
713
714static int ih_v7_0_set_powergating_state(void *handle,
715 enum amd_powergating_state state)
716{
717 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
718 bool enable = (state == AMD_PG_STATE_GATE);
719
720 if (adev->pg_flags & AMD_PG_SUPPORT_IH_SRAM_PG)
721 ih_v7_0_update_ih_mem_power_gating(adev, enable);
722
723 return 0;
724}
725
726static void ih_v7_0_get_clockgating_state(void *handle, u64 *flags)
727{
728 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
729
730 if (!RREG32_SOC15(OSSSYS, 0, regIH_CLK_CTRL))
731 *flags |= AMD_CG_SUPPORT_IH_CG;
732
733 return;
734}
735
736static const struct amd_ip_funcs ih_v7_0_ip_funcs = {
737 .name = "ih_v7_0",
738 .early_init = ih_v7_0_early_init,
739 .late_init = NULL,
740 .sw_init = ih_v7_0_sw_init,
741 .sw_fini = ih_v7_0_sw_fini,
742 .hw_init = ih_v7_0_hw_init,
743 .hw_fini = ih_v7_0_hw_fini,
744 .suspend = ih_v7_0_suspend,
745 .resume = ih_v7_0_resume,
746 .is_idle = ih_v7_0_is_idle,
747 .wait_for_idle = ih_v7_0_wait_for_idle,
748 .soft_reset = ih_v7_0_soft_reset,
749 .set_clockgating_state = ih_v7_0_set_clockgating_state,
750 .set_powergating_state = ih_v7_0_set_powergating_state,
751 .get_clockgating_state = ih_v7_0_get_clockgating_state,
752};
753
754static const struct amdgpu_ih_funcs ih_v7_0_funcs = {
755 .get_wptr = ih_v7_0_get_wptr,
756 .decode_iv = amdgpu_ih_decode_iv_helper,
757 .decode_iv_ts = amdgpu_ih_decode_iv_ts_helper,
758 .set_rptr = ih_v7_0_set_rptr
759};
760
761static void ih_v7_0_set_interrupt_funcs(struct amdgpu_device *adev)
762{
763 adev->irq.ih_funcs = &ih_v7_0_funcs;
764}
765
766const struct amdgpu_ip_block_version ih_v7_0_ip_block =
767{
768 .type = AMD_IP_BLOCK_TYPE_IH,
769 .major = 7,
770 .minor = 0,
771 .rev = 0,
772 .funcs = &ih_v7_0_ip_funcs,
773};
1/*
2 * Copyright 2023 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/pci.h>
25
26#include "amdgpu.h"
27#include "amdgpu_ih.h"
28
29#include "oss/osssys_7_0_0_offset.h"
30#include "oss/osssys_7_0_0_sh_mask.h"
31
32#include "soc15_common.h"
33#include "ih_v7_0.h"
34
35#define MAX_REARM_RETRY 10
36
37static void ih_v7_0_set_interrupt_funcs(struct amdgpu_device *adev);
38
39/**
40 * ih_v7_0_init_register_offset - Initialize register offset for ih rings
41 *
42 * @adev: amdgpu_device pointer
43 *
44 * Initialize register offset ih rings (IH_V7_0).
45 */
46static void ih_v7_0_init_register_offset(struct amdgpu_device *adev)
47{
48 struct amdgpu_ih_regs *ih_regs;
49
50 /* ih ring 2 is removed
51 * ih ring and ih ring 1 are available */
52 if (adev->irq.ih.ring_size) {
53 ih_regs = &adev->irq.ih.ih_regs;
54 ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_BASE);
55 ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_BASE_HI);
56 ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_CNTL);
57 ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_WPTR);
58 ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_RPTR);
59 ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_DOORBELL_RPTR);
60 ih_regs->ih_rb_wptr_addr_lo = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_WPTR_ADDR_LO);
61 ih_regs->ih_rb_wptr_addr_hi = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_WPTR_ADDR_HI);
62 ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL;
63 }
64
65 if (adev->irq.ih1.ring_size) {
66 ih_regs = &adev->irq.ih1.ih_regs;
67 ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_BASE_RING1);
68 ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_BASE_HI_RING1);
69 ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_CNTL_RING1);
70 ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_WPTR_RING1);
71 ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_RPTR_RING1);
72 ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_DOORBELL_RPTR_RING1);
73 ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING1;
74 }
75}
76
77/**
78 * force_update_wptr_for_self_int - Force update the wptr for self interrupt
79 *
80 * @adev: amdgpu_device pointer
81 * @threshold: threshold to trigger the wptr reporting
82 * @timeout: timeout to trigger the wptr reporting
83 * @enabled: Enable/disable timeout flush mechanism
84 *
85 * threshold input range: 0 ~ 15, default 0,
86 * real_threshold = 2^threshold
87 * timeout input range: 0 ~ 20, default 8,
88 * real_timeout = (2^timeout) * 1024 / (socclk_freq)
89 *
90 * Force update wptr for self interrupt ( >= SIENNA_CICHLID).
91 */
92static void
93force_update_wptr_for_self_int(struct amdgpu_device *adev,
94 u32 threshold, u32 timeout, bool enabled)
95{
96 u32 ih_cntl, ih_rb_cntl;
97
98 ih_cntl = RREG32_SOC15(OSSSYS, 0, regIH_CNTL2);
99 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, regIH_RB_CNTL_RING1);
100
101 ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL2,
102 SELF_IV_FORCE_WPTR_UPDATE_TIMEOUT, timeout);
103 ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL2,
104 SELF_IV_FORCE_WPTR_UPDATE_ENABLE, enabled);
105 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
106 RB_USED_INT_THRESHOLD, threshold);
107
108 if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
109 if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1, ih_rb_cntl))
110 return;
111 } else {
112 WREG32_SOC15(OSSSYS, 0, regIH_RB_CNTL_RING1, ih_rb_cntl);
113 }
114
115 WREG32_SOC15(OSSSYS, 0, regIH_CNTL2, ih_cntl);
116}
117
118/**
119 * ih_v7_0_toggle_ring_interrupts - toggle the interrupt ring buffer
120 *
121 * @adev: amdgpu_device pointer
122 * @ih: amdgpu_ih_ring pointet
123 * @enable: true - enable the interrupts, false - disable the interrupts
124 *
125 * Toggle the interrupt ring buffer (IH_V7_0)
126 */
127static int ih_v7_0_toggle_ring_interrupts(struct amdgpu_device *adev,
128 struct amdgpu_ih_ring *ih,
129 bool enable)
130{
131 struct amdgpu_ih_regs *ih_regs;
132 uint32_t tmp;
133
134 ih_regs = &ih->ih_regs;
135
136 tmp = RREG32(ih_regs->ih_rb_cntl);
137 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0));
138 /* enable_intr field is only valid in ring0 */
139 if (ih == &adev->irq.ih)
140 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
141
142 if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
143 if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp))
144 return -ETIMEDOUT;
145 } else {
146 WREG32(ih_regs->ih_rb_cntl, tmp);
147 }
148
149 if (enable) {
150 ih->enabled = true;
151 } else {
152 /* set rptr, wptr to 0 */
153 WREG32(ih_regs->ih_rb_rptr, 0);
154 WREG32(ih_regs->ih_rb_wptr, 0);
155 ih->enabled = false;
156 ih->rptr = 0;
157 }
158
159 return 0;
160}
161
162/**
163 * ih_v7_0_toggle_interrupts - Toggle all the available interrupt ring buffers
164 *
165 * @adev: amdgpu_device pointer
166 * @enable: enable or disable interrupt ring buffers
167 *
168 * Toggle all the available interrupt ring buffers (IH_V7_0).
169 */
170static int ih_v7_0_toggle_interrupts(struct amdgpu_device *adev, bool enable)
171{
172 struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1};
173 int i;
174 int r;
175
176 for (i = 0; i < ARRAY_SIZE(ih); i++) {
177 if (ih[i]->ring_size) {
178 r = ih_v7_0_toggle_ring_interrupts(adev, ih[i], enable);
179 if (r)
180 return r;
181 }
182 }
183
184 return 0;
185}
186
187static uint32_t ih_v7_0_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl)
188{
189 int rb_bufsz = order_base_2(ih->ring_size / 4);
190
191 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
192 MC_SPACE, ih->use_bus_addr ? 2 : 4);
193 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
194 WPTR_OVERFLOW_CLEAR, 1);
195 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
196 WPTR_OVERFLOW_ENABLE, 1);
197 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz);
198 /* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register
199 * value is written to memory
200 */
201 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
202 WPTR_WRITEBACK_ENABLE, 1);
203 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SNOOP, 1);
204 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_RO, 0);
205 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_VMID, 0);
206
207 return ih_rb_cntl;
208}
209
210static uint32_t ih_v7_0_doorbell_rptr(struct amdgpu_ih_ring *ih)
211{
212 u32 ih_doorbell_rtpr = 0;
213
214 if (ih->use_doorbell) {
215 ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
216 IH_DOORBELL_RPTR, OFFSET,
217 ih->doorbell_index);
218 ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
219 IH_DOORBELL_RPTR,
220 ENABLE, 1);
221 } else {
222 ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
223 IH_DOORBELL_RPTR,
224 ENABLE, 0);
225 }
226 return ih_doorbell_rtpr;
227}
228
229/**
230 * ih_v7_0_enable_ring - enable an ih ring buffer
231 *
232 * @adev: amdgpu_device pointer
233 * @ih: amdgpu_ih_ring pointer
234 *
235 * Enable an ih ring buffer (IH_V7_0)
236 */
237static int ih_v7_0_enable_ring(struct amdgpu_device *adev,
238 struct amdgpu_ih_ring *ih)
239{
240 struct amdgpu_ih_regs *ih_regs;
241 uint32_t tmp;
242
243 ih_regs = &ih->ih_regs;
244
245 /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
246 WREG32(ih_regs->ih_rb_base, ih->gpu_addr >> 8);
247 WREG32(ih_regs->ih_rb_base_hi, (ih->gpu_addr >> 40) & 0xff);
248
249 tmp = RREG32(ih_regs->ih_rb_cntl);
250 tmp = ih_v7_0_rb_cntl(ih, tmp);
251 if (ih == &adev->irq.ih)
252 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled);
253 if (ih == &adev->irq.ih1) {
254 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0);
255 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1);
256 }
257
258 if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
259 if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
260 DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
261 return -ETIMEDOUT;
262 }
263 } else {
264 WREG32(ih_regs->ih_rb_cntl, tmp);
265 }
266
267 if (ih == &adev->irq.ih) {
268 /* set the ih ring 0 writeback address whether it's enabled or not */
269 WREG32(ih_regs->ih_rb_wptr_addr_lo, lower_32_bits(ih->wptr_addr));
270 WREG32(ih_regs->ih_rb_wptr_addr_hi, upper_32_bits(ih->wptr_addr) & 0xFFFF);
271 }
272
273 /* set rptr, wptr to 0 */
274 WREG32(ih_regs->ih_rb_wptr, 0);
275 WREG32(ih_regs->ih_rb_rptr, 0);
276
277 WREG32(ih_regs->ih_doorbell_rptr, ih_v7_0_doorbell_rptr(ih));
278
279 return 0;
280}
281
282/**
283 * ih_v7_0_irq_init - init and enable the interrupt ring
284 *
285 * @adev: amdgpu_device pointer
286 *
287 * Allocate a ring buffer for the interrupt controller,
288 * enable the RLC, disable interrupts, enable the IH
289 * ring buffer and enable it.
290 * Called at device load and reume.
291 * Returns 0 for success, errors for failure.
292 */
293static int ih_v7_0_irq_init(struct amdgpu_device *adev)
294{
295 struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1};
296 u32 ih_chicken;
297 u32 tmp;
298 int ret;
299 int i;
300
301 /* disable irqs */
302 ret = ih_v7_0_toggle_interrupts(adev, false);
303 if (ret)
304 return ret;
305
306 adev->nbio.funcs->ih_control(adev);
307
308 if (unlikely((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) ||
309 (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO))) {
310 if (ih[0]->use_bus_addr) {
311 ih_chicken = RREG32_SOC15(OSSSYS, 0, regIH_CHICKEN);
312 ih_chicken = REG_SET_FIELD(ih_chicken,
313 IH_CHICKEN, MC_SPACE_GPA_ENABLE, 1);
314 WREG32_SOC15(OSSSYS, 0, regIH_CHICKEN, ih_chicken);
315 }
316 }
317
318 for (i = 0; i < ARRAY_SIZE(ih); i++) {
319 if (ih[i]->ring_size) {
320 ret = ih_v7_0_enable_ring(adev, ih[i]);
321 if (ret)
322 return ret;
323 }
324 }
325
326 /* update doorbell range for ih ring 0 */
327 adev->nbio.funcs->ih_doorbell_range(adev, ih[0]->use_doorbell,
328 ih[0]->doorbell_index);
329
330 tmp = RREG32_SOC15(OSSSYS, 0, regIH_STORM_CLIENT_LIST_CNTL);
331 tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
332 CLIENT18_IS_STORM_CLIENT, 1);
333 WREG32_SOC15(OSSSYS, 0, regIH_STORM_CLIENT_LIST_CNTL, tmp);
334
335 tmp = RREG32_SOC15(OSSSYS, 0, regIH_INT_FLOOD_CNTL);
336 tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1);
337 WREG32_SOC15(OSSSYS, 0, regIH_INT_FLOOD_CNTL, tmp);
338
339 /* GC/MMHUB UTCL2 page fault interrupts are configured as
340 * MSI storm capable interrupts by deafult. The delay is
341 * used to avoid ISR being called too frequently
342 * when page fault happens on several continuous page
343 * and thus avoid MSI storm */
344 tmp = RREG32_SOC15(OSSSYS, 0, regIH_MSI_STORM_CTRL);
345 tmp = REG_SET_FIELD(tmp, IH_MSI_STORM_CTRL,
346 DELAY, 3);
347 WREG32_SOC15(OSSSYS, 0, regIH_MSI_STORM_CTRL, tmp);
348
349 /* Redirect the interrupts to IH RB1 for dGPU */
350 if (adev->irq.ih1.ring_size) {
351 tmp = RREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_INDEX);
352 tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_INDEX, INDEX, 0);
353 WREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_INDEX, tmp);
354
355 tmp = RREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_DATA);
356 tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA, CLIENT_ID, 0xa);
357 tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA, SOURCE_ID, 0x0);
358 tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA,
359 SOURCE_ID_MATCH_ENABLE, 0x1);
360
361 WREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_DATA, tmp);
362 }
363
364 pci_set_master(adev->pdev);
365
366 /* enable interrupts */
367 ret = ih_v7_0_toggle_interrupts(adev, true);
368 if (ret)
369 return ret;
370 /* enable wptr force update for self int */
371 force_update_wptr_for_self_int(adev, 0, 8, true);
372
373 if (adev->irq.ih_soft.ring_size)
374 adev->irq.ih_soft.enabled = true;
375
376 return 0;
377}
378
379/**
380 * ih_v7_0_irq_disable - disable interrupts
381 *
382 * @adev: amdgpu_device pointer
383 *
384 * Disable interrupts on the hw.
385 */
386static void ih_v7_0_irq_disable(struct amdgpu_device *adev)
387{
388 force_update_wptr_for_self_int(adev, 0, 8, false);
389 ih_v7_0_toggle_interrupts(adev, false);
390
391 /* Wait and acknowledge irq */
392 mdelay(1);
393}
394
395/**
396 * ih_v7_0_get_wptr() - get the IH ring buffer wptr
397 *
398 * @adev: amdgpu_device pointer
399 * @ih: IH ring buffer to fetch wptr
400 *
401 * Get the IH ring buffer wptr from either the register
402 * or the writeback memory buffer. Also check for
403 * ring buffer overflow and deal with it.
404 * Returns the value of the wptr.
405 */
406static u32 ih_v7_0_get_wptr(struct amdgpu_device *adev,
407 struct amdgpu_ih_ring *ih)
408{
409 u32 wptr, tmp;
410 struct amdgpu_ih_regs *ih_regs;
411
412 wptr = le32_to_cpu(*ih->wptr_cpu);
413 ih_regs = &ih->ih_regs;
414
415 if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
416 goto out;
417
418 wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr);
419 if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
420 goto out;
421 wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
422
423 /* When a ring buffer overflow happen start parsing interrupt
424 * from the last not overwritten vector (wptr + 32). Hopefully
425 * this should allow us to catch up.
426 */
427 tmp = (wptr + 32) & ih->ptr_mask;
428 dev_warn(adev->dev, "IH ring buffer overflow "
429 "(0x%08X, 0x%08X, 0x%08X)\n",
430 wptr, ih->rptr, tmp);
431 ih->rptr = tmp;
432
433 tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
434 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
435 WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
436
437 /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
438 * can be detected.
439 */
440 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
441 WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
442out:
443 return (wptr & ih->ptr_mask);
444}
445
446/**
447 * ih_v7_0_irq_rearm - rearm IRQ if lost
448 *
449 * @adev: amdgpu_device pointer
450 * @ih: IH ring to match
451 *
452 */
453static void ih_v7_0_irq_rearm(struct amdgpu_device *adev,
454 struct amdgpu_ih_ring *ih)
455{
456 uint32_t v = 0;
457 uint32_t i = 0;
458 struct amdgpu_ih_regs *ih_regs;
459
460 ih_regs = &ih->ih_regs;
461
462 /* Rearm IRQ / re-write doorbell if doorbell write is lost */
463 for (i = 0; i < MAX_REARM_RETRY; i++) {
464 v = RREG32_NO_KIQ(ih_regs->ih_rb_rptr);
465 if ((v < ih->ring_size) && (v != ih->rptr))
466 WDOORBELL32(ih->doorbell_index, ih->rptr);
467 else
468 break;
469 }
470}
471
472/**
473 * ih_v7_0_set_rptr - set the IH ring buffer rptr
474 *
475 * @adev: amdgpu_device pointer
476 * @ih: IH ring buffer to set rptr
477 */
478static void ih_v7_0_set_rptr(struct amdgpu_device *adev,
479 struct amdgpu_ih_ring *ih)
480{
481 struct amdgpu_ih_regs *ih_regs;
482
483 if (ih->use_doorbell) {
484 /* XXX check if swapping is necessary on BE */
485 *ih->rptr_cpu = ih->rptr;
486 WDOORBELL32(ih->doorbell_index, ih->rptr);
487
488 if (amdgpu_sriov_vf(adev))
489 ih_v7_0_irq_rearm(adev, ih);
490 } else {
491 ih_regs = &ih->ih_regs;
492 WREG32(ih_regs->ih_rb_rptr, ih->rptr);
493 }
494}
495
496/**
497 * ih_v7_0_self_irq - dispatch work for ring 1
498 *
499 * @adev: amdgpu_device pointer
500 * @source: irq source
501 * @entry: IV with WPTR update
502 *
503 * Update the WPTR from the IV and schedule work to handle the entries.
504 */
505static int ih_v7_0_self_irq(struct amdgpu_device *adev,
506 struct amdgpu_irq_src *source,
507 struct amdgpu_iv_entry *entry)
508{
509 uint32_t wptr = cpu_to_le32(entry->src_data[0]);
510
511 switch (entry->ring_id) {
512 case 1:
513 *adev->irq.ih1.wptr_cpu = wptr;
514 schedule_work(&adev->irq.ih1_work);
515 break;
516 default: break;
517 }
518 return 0;
519}
520
521static const struct amdgpu_irq_src_funcs ih_v7_0_self_irq_funcs = {
522 .process = ih_v7_0_self_irq,
523};
524
525static void ih_v7_0_set_self_irq_funcs(struct amdgpu_device *adev)
526{
527 adev->irq.self_irq.num_types = 0;
528 adev->irq.self_irq.funcs = &ih_v7_0_self_irq_funcs;
529}
530
531static int ih_v7_0_early_init(struct amdgpu_ip_block *ip_block)
532{
533 struct amdgpu_device *adev = ip_block->adev;
534
535 ih_v7_0_set_interrupt_funcs(adev);
536 ih_v7_0_set_self_irq_funcs(adev);
537 return 0;
538}
539
540static int ih_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
541{
542 int r;
543 struct amdgpu_device *adev = ip_block->adev;
544 bool use_bus_addr;
545
546 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_IH, 0,
547 &adev->irq.self_irq);
548
549 if (r)
550 return r;
551
552 /* use gpu virtual address for ih ring
553 * until ih_checken is programmed to allow
554 * use bus address for ih ring by psp bl */
555 use_bus_addr =
556 (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) ? false : true;
557 r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, use_bus_addr);
558 if (r)
559 return r;
560
561 adev->irq.ih.use_doorbell = true;
562 adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
563
564 if (!(adev->flags & AMD_IS_APU)) {
565 r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, IH_RING_SIZE,
566 use_bus_addr);
567 if (r)
568 return r;
569
570 adev->irq.ih1.use_doorbell = true;
571 adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1;
572 }
573
574 /* initialize ih control register offset */
575 ih_v7_0_init_register_offset(adev);
576
577 r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, true);
578 if (r)
579 return r;
580
581 r = amdgpu_irq_init(adev);
582
583 return r;
584}
585
586static int ih_v7_0_sw_fini(struct amdgpu_ip_block *ip_block)
587{
588 struct amdgpu_device *adev = ip_block->adev;
589
590 amdgpu_irq_fini_sw(adev);
591
592 return 0;
593}
594
595static int ih_v7_0_hw_init(struct amdgpu_ip_block *ip_block)
596{
597 int r;
598 struct amdgpu_device *adev = ip_block->adev;
599
600 r = ih_v7_0_irq_init(adev);
601 if (r)
602 return r;
603
604 return 0;
605}
606
607static int ih_v7_0_hw_fini(struct amdgpu_ip_block *ip_block)
608{
609 ih_v7_0_irq_disable(ip_block->adev);
610
611 return 0;
612}
613
614static int ih_v7_0_suspend(struct amdgpu_ip_block *ip_block)
615{
616 return ih_v7_0_hw_fini(ip_block);
617}
618
619static int ih_v7_0_resume(struct amdgpu_ip_block *ip_block)
620{
621 return ih_v7_0_hw_init(ip_block);
622}
623
624static bool ih_v7_0_is_idle(void *handle)
625{
626 /* todo */
627 return true;
628}
629
630static int ih_v7_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
631{
632 /* todo */
633 return -ETIMEDOUT;
634}
635
636static int ih_v7_0_soft_reset(struct amdgpu_ip_block *ip_block)
637{
638 /* todo */
639 return 0;
640}
641
642static void ih_v7_0_update_clockgating_state(struct amdgpu_device *adev,
643 bool enable)
644{
645 uint32_t data, def, field_val;
646
647 if (adev->cg_flags & AMD_CG_SUPPORT_IH_CG) {
648 def = data = RREG32_SOC15(OSSSYS, 0, regIH_CLK_CTRL);
649 field_val = enable ? 0 : 1;
650 data = REG_SET_FIELD(data, IH_CLK_CTRL,
651 DBUS_MUX_CLK_SOFT_OVERRIDE, field_val);
652 data = REG_SET_FIELD(data, IH_CLK_CTRL,
653 OSSSYS_SHARE_CLK_SOFT_OVERRIDE, field_val);
654 data = REG_SET_FIELD(data, IH_CLK_CTRL,
655 LIMIT_SMN_CLK_SOFT_OVERRIDE, field_val);
656 data = REG_SET_FIELD(data, IH_CLK_CTRL,
657 DYN_CLK_SOFT_OVERRIDE, field_val);
658 data = REG_SET_FIELD(data, IH_CLK_CTRL,
659 REG_CLK_SOFT_OVERRIDE, field_val);
660 if (def != data)
661 WREG32_SOC15(OSSSYS, 0, regIH_CLK_CTRL, data);
662 }
663
664 return;
665}
666
667static int ih_v7_0_set_clockgating_state(void *handle,
668 enum amd_clockgating_state state)
669{
670 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
671
672 ih_v7_0_update_clockgating_state(adev,
673 state == AMD_CG_STATE_GATE);
674 return 0;
675}
676
677static void ih_v7_0_update_ih_mem_power_gating(struct amdgpu_device *adev,
678 bool enable)
679{
680 uint32_t ih_mem_pwr_cntl;
681
682 /* Disable ih sram power cntl before switch powergating mode */
683 ih_mem_pwr_cntl = RREG32_SOC15(OSSSYS, 0, regIH_MEM_POWER_CTRL);
684 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
685 IH_BUFFER_MEM_POWER_CTRL_EN, 0);
686 WREG32_SOC15(OSSSYS, 0, regIH_MEM_POWER_CTRL, ih_mem_pwr_cntl);
687
688 /* It is recommended to set mem powergating mode to DS mode */
689 if (enable) {
690 /* mem power mode */
691 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
692 IH_BUFFER_MEM_POWER_LS_EN, 0);
693 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
694 IH_BUFFER_MEM_POWER_DS_EN, 1);
695 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
696 IH_BUFFER_MEM_POWER_SD_EN, 0);
697 /* cam mem power mode */
698 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
699 IH_RETRY_INT_CAM_MEM_POWER_LS_EN, 0);
700 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
701 IH_RETRY_INT_CAM_MEM_POWER_DS_EN, 1);
702 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
703 IH_RETRY_INT_CAM_MEM_POWER_SD_EN, 0);
704 /* re-enable power cntl */
705 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
706 IH_BUFFER_MEM_POWER_CTRL_EN, 1);
707 } else {
708 /* mem power mode */
709 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
710 IH_BUFFER_MEM_POWER_LS_EN, 0);
711 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
712 IH_BUFFER_MEM_POWER_DS_EN, 0);
713 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
714 IH_BUFFER_MEM_POWER_SD_EN, 0);
715 /* cam mem power mode */
716 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
717 IH_RETRY_INT_CAM_MEM_POWER_LS_EN, 0);
718 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
719 IH_RETRY_INT_CAM_MEM_POWER_DS_EN, 0);
720 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
721 IH_RETRY_INT_CAM_MEM_POWER_SD_EN, 0);
722 /* re-enable power cntl*/
723 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
724 IH_BUFFER_MEM_POWER_CTRL_EN, 1);
725 }
726
727 WREG32_SOC15(OSSSYS, 0, regIH_MEM_POWER_CTRL, ih_mem_pwr_cntl);
728}
729
730static int ih_v7_0_set_powergating_state(void *handle,
731 enum amd_powergating_state state)
732{
733 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
734 bool enable = (state == AMD_PG_STATE_GATE);
735
736 if (adev->pg_flags & AMD_PG_SUPPORT_IH_SRAM_PG)
737 ih_v7_0_update_ih_mem_power_gating(adev, enable);
738
739 return 0;
740}
741
742static void ih_v7_0_get_clockgating_state(void *handle, u64 *flags)
743{
744 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
745
746 if (!RREG32_SOC15(OSSSYS, 0, regIH_CLK_CTRL))
747 *flags |= AMD_CG_SUPPORT_IH_CG;
748
749 return;
750}
751
752static const struct amd_ip_funcs ih_v7_0_ip_funcs = {
753 .name = "ih_v7_0",
754 .early_init = ih_v7_0_early_init,
755 .sw_init = ih_v7_0_sw_init,
756 .sw_fini = ih_v7_0_sw_fini,
757 .hw_init = ih_v7_0_hw_init,
758 .hw_fini = ih_v7_0_hw_fini,
759 .suspend = ih_v7_0_suspend,
760 .resume = ih_v7_0_resume,
761 .is_idle = ih_v7_0_is_idle,
762 .wait_for_idle = ih_v7_0_wait_for_idle,
763 .soft_reset = ih_v7_0_soft_reset,
764 .set_clockgating_state = ih_v7_0_set_clockgating_state,
765 .set_powergating_state = ih_v7_0_set_powergating_state,
766 .get_clockgating_state = ih_v7_0_get_clockgating_state,
767};
768
769static const struct amdgpu_ih_funcs ih_v7_0_funcs = {
770 .get_wptr = ih_v7_0_get_wptr,
771 .decode_iv = amdgpu_ih_decode_iv_helper,
772 .decode_iv_ts = amdgpu_ih_decode_iv_ts_helper,
773 .set_rptr = ih_v7_0_set_rptr
774};
775
776static void ih_v7_0_set_interrupt_funcs(struct amdgpu_device *adev)
777{
778 adev->irq.ih_funcs = &ih_v7_0_funcs;
779}
780
781const struct amdgpu_ip_block_version ih_v7_0_ip_block =
782{
783 .type = AMD_IP_BLOCK_TYPE_IH,
784 .major = 7,
785 .minor = 0,
786 .rev = 0,
787 .funcs = &ih_v7_0_ip_funcs,
788};