Loading...
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/firmware.h>
24#include <linux/pci.h>
25
26#include <drm/drm_cache.h>
27
28#include "amdgpu.h"
29#include "amdgpu_atomfirmware.h"
30#include "gmc_v10_0.h"
31#include "umc_v8_7.h"
32
33#include "athub/athub_2_0_0_sh_mask.h"
34#include "athub/athub_2_0_0_offset.h"
35#include "dcn/dcn_2_0_0_offset.h"
36#include "dcn/dcn_2_0_0_sh_mask.h"
37#include "oss/osssys_5_0_0_offset.h"
38#include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
39#include "navi10_enum.h"
40
41#include "soc15.h"
42#include "soc15d.h"
43#include "soc15_common.h"
44
45#include "nbio_v2_3.h"
46
47#include "gfxhub_v2_0.h"
48#include "gfxhub_v2_1.h"
49#include "mmhub_v2_0.h"
50#include "mmhub_v2_3.h"
51#include "athub_v2_0.h"
52#include "athub_v2_1.h"
53
54#include "amdgpu_reset.h"
55
56#if 0
57static const struct soc15_reg_golden golden_settings_navi10_hdp[] =
58{
59 /* TODO add golden setting for hdp */
60};
61#endif
62
63static int gmc_v10_0_ecc_interrupt_state(struct amdgpu_device *adev,
64 struct amdgpu_irq_src *src,
65 unsigned type,
66 enum amdgpu_interrupt_state state)
67{
68 return 0;
69}
70
71static int
72gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
73 struct amdgpu_irq_src *src, unsigned type,
74 enum amdgpu_interrupt_state state)
75{
76 switch (state) {
77 case AMDGPU_IRQ_STATE_DISABLE:
78 /* MM HUB */
79 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, false);
80 /* GFX HUB */
81 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false);
82 break;
83 case AMDGPU_IRQ_STATE_ENABLE:
84 /* MM HUB */
85 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, true);
86 /* GFX HUB */
87 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true);
88 break;
89 default:
90 break;
91 }
92
93 return 0;
94}
95
96static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
97 struct amdgpu_irq_src *source,
98 struct amdgpu_iv_entry *entry)
99{
100 bool retry_fault = !!(entry->src_data[1] & 0x80);
101 bool write_fault = !!(entry->src_data[1] & 0x20);
102 struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
103 struct amdgpu_task_info task_info;
104 uint32_t status = 0;
105 u64 addr;
106
107 addr = (u64)entry->src_data[0] << 12;
108 addr |= ((u64)entry->src_data[1] & 0xf) << 44;
109
110 if (retry_fault) {
111 /* Returning 1 here also prevents sending the IV to the KFD */
112
113 /* Process it onyl if it's the first fault for this address */
114 if (entry->ih != &adev->irq.ih_soft &&
115 amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid,
116 entry->timestamp))
117 return 1;
118
119 /* Delegate it to a different ring if the hardware hasn't
120 * already done it.
121 */
122 if (entry->ih == &adev->irq.ih) {
123 amdgpu_irq_delegate(adev, entry, 8);
124 return 1;
125 }
126
127 /* Try to handle the recoverable page faults by filling page
128 * tables
129 */
130 if (amdgpu_vm_handle_fault(adev, entry->pasid, addr, write_fault))
131 return 1;
132 }
133
134 if (!amdgpu_sriov_vf(adev)) {
135 /*
136 * Issue a dummy read to wait for the status register to
137 * be updated to avoid reading an incorrect value due to
138 * the new fast GRBM interface.
139 */
140 if ((entry->vmid_src == AMDGPU_GFXHUB_0) &&
141 (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 3, 0)))
142 RREG32(hub->vm_l2_pro_fault_status);
143
144 status = RREG32(hub->vm_l2_pro_fault_status);
145 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
146 }
147
148 if (!printk_ratelimit())
149 return 0;
150
151 memset(&task_info, 0, sizeof(struct amdgpu_task_info));
152 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
153
154 dev_err(adev->dev,
155 "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, "
156 "for process %s pid %d thread %s pid %d)\n",
157 entry->vmid_src ? "mmhub" : "gfxhub",
158 entry->src_id, entry->ring_id, entry->vmid,
159 entry->pasid, task_info.process_name, task_info.tgid,
160 task_info.task_name, task_info.pid);
161 dev_err(adev->dev, " in page starting at address 0x%016llx from client 0x%x (%s)\n",
162 addr, entry->client_id,
163 soc15_ih_clientid_name[entry->client_id]);
164
165 if (!amdgpu_sriov_vf(adev))
166 hub->vmhub_funcs->print_l2_protection_fault_status(adev,
167 status);
168
169 return 0;
170}
171
172static const struct amdgpu_irq_src_funcs gmc_v10_0_irq_funcs = {
173 .set = gmc_v10_0_vm_fault_interrupt_state,
174 .process = gmc_v10_0_process_interrupt,
175};
176
177static const struct amdgpu_irq_src_funcs gmc_v10_0_ecc_funcs = {
178 .set = gmc_v10_0_ecc_interrupt_state,
179 .process = amdgpu_umc_process_ecc_irq,
180};
181
182static void gmc_v10_0_set_irq_funcs(struct amdgpu_device *adev)
183{
184 adev->gmc.vm_fault.num_types = 1;
185 adev->gmc.vm_fault.funcs = &gmc_v10_0_irq_funcs;
186
187 if (!amdgpu_sriov_vf(adev)) {
188 adev->gmc.ecc_irq.num_types = 1;
189 adev->gmc.ecc_irq.funcs = &gmc_v10_0_ecc_funcs;
190 }
191}
192
193/**
194 * gmc_v10_0_use_invalidate_semaphore - judge whether to use semaphore
195 *
196 * @adev: amdgpu_device pointer
197 * @vmhub: vmhub type
198 *
199 */
200static bool gmc_v10_0_use_invalidate_semaphore(struct amdgpu_device *adev,
201 uint32_t vmhub)
202{
203 return ((vmhub == AMDGPU_MMHUB_0 ||
204 vmhub == AMDGPU_MMHUB_1) &&
205 (!amdgpu_sriov_vf(adev)));
206}
207
208static bool gmc_v10_0_get_atc_vmid_pasid_mapping_info(
209 struct amdgpu_device *adev,
210 uint8_t vmid, uint16_t *p_pasid)
211{
212 uint32_t value;
213
214 value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
215 + vmid);
216 *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
217
218 return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
219}
220
221/*
222 * GART
223 * VMID 0 is the physical GPU addresses as used by the kernel.
224 * VMIDs 1-15 are used for userspace clients and are handled
225 * by the amdgpu vm/hsa code.
226 */
227
228static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
229 unsigned int vmhub, uint32_t flush_type)
230{
231 bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(adev, vmhub);
232 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
233 u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
234 u32 tmp;
235 /* Use register 17 for GART */
236 const unsigned eng = 17;
237 unsigned int i;
238 unsigned char hub_ip = 0;
239
240 hub_ip = (vmhub == AMDGPU_GFXHUB_0) ?
241 GC_HWIP : MMHUB_HWIP;
242
243 spin_lock(&adev->gmc.invalidate_lock);
244 /*
245 * It may lose gpuvm invalidate acknowldege state across power-gating
246 * off cycle, add semaphore acquire before invalidation and semaphore
247 * release after invalidation to avoid entering power gated state
248 * to WA the Issue
249 */
250
251 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
252 if (use_semaphore) {
253 for (i = 0; i < adev->usec_timeout; i++) {
254 /* a read return value of 1 means semaphore acuqire */
255 tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
256 hub->eng_distance * eng, hub_ip);
257
258 if (tmp & 0x1)
259 break;
260 udelay(1);
261 }
262
263 if (i >= adev->usec_timeout)
264 DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
265 }
266
267 WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req +
268 hub->eng_distance * eng,
269 inv_req, hub_ip);
270
271 /*
272 * Issue a dummy read to wait for the ACK register to be cleared
273 * to avoid a false ACK due to the new fast GRBM interface.
274 */
275 if ((vmhub == AMDGPU_GFXHUB_0) &&
276 (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 3, 0)))
277 RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req +
278 hub->eng_distance * eng, hub_ip);
279
280 /* Wait for ACK with a delay.*/
281 for (i = 0; i < adev->usec_timeout; i++) {
282 tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_ack +
283 hub->eng_distance * eng, hub_ip);
284
285 tmp &= 1 << vmid;
286 if (tmp)
287 break;
288
289 udelay(1);
290 }
291
292 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
293 if (use_semaphore)
294 /*
295 * add semaphore release after invalidation,
296 * write with 0 means semaphore release
297 */
298 WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
299 hub->eng_distance * eng, 0, hub_ip);
300
301 spin_unlock(&adev->gmc.invalidate_lock);
302
303 if (i < adev->usec_timeout)
304 return;
305
306 DRM_ERROR("Timeout waiting for VM flush hub: %d!\n", vmhub);
307}
308
309/**
310 * gmc_v10_0_flush_gpu_tlb - gart tlb flush callback
311 *
312 * @adev: amdgpu_device pointer
313 * @vmid: vm instance to flush
314 * @vmhub: vmhub type
315 * @flush_type: the flush type
316 *
317 * Flush the TLB for the requested page table.
318 */
319static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
320 uint32_t vmhub, uint32_t flush_type)
321{
322 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
323 struct dma_fence *fence;
324 struct amdgpu_job *job;
325
326 int r;
327
328 /* flush hdp cache */
329 adev->hdp.funcs->flush_hdp(adev, NULL);
330
331 /* For SRIOV run time, driver shouldn't access the register through MMIO
332 * Directly use kiq to do the vm invalidation instead
333 */
334 if (adev->gfx.kiq.ring.sched.ready && !adev->enable_mes &&
335 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
336 down_read_trylock(&adev->reset_domain->sem)) {
337 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
338 const unsigned eng = 17;
339 u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
340 u32 req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
341 u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
342
343 amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
344 1 << vmid);
345
346 up_read(&adev->reset_domain->sem);
347 return;
348 }
349
350 mutex_lock(&adev->mman.gtt_window_lock);
351
352 if (vmhub == AMDGPU_MMHUB_0) {
353 gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_MMHUB_0, 0);
354 mutex_unlock(&adev->mman.gtt_window_lock);
355 return;
356 }
357
358 BUG_ON(vmhub != AMDGPU_GFXHUB_0);
359
360 if (!adev->mman.buffer_funcs_enabled ||
361 !adev->ib_pool_ready ||
362 amdgpu_in_reset(adev) ||
363 ring->sched.ready == false) {
364 gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_GFXHUB_0, 0);
365 mutex_unlock(&adev->mman.gtt_window_lock);
366 return;
367 }
368
369 /* The SDMA on Navi has a bug which can theoretically result in memory
370 * corruption if an invalidation happens at the same time as an VA
371 * translation. Avoid this by doing the invalidation from the SDMA
372 * itself.
373 */
374 r = amdgpu_job_alloc_with_ib(ring->adev, &adev->mman.entity,
375 AMDGPU_FENCE_OWNER_UNDEFINED,
376 16 * 4, AMDGPU_IB_POOL_IMMEDIATE,
377 &job);
378 if (r)
379 goto error_alloc;
380
381 job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo);
382 job->vm_needs_flush = true;
383 job->ibs->ptr[job->ibs->length_dw++] = ring->funcs->nop;
384 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
385 fence = amdgpu_job_submit(job);
386
387 mutex_unlock(&adev->mman.gtt_window_lock);
388
389 dma_fence_wait(fence, false);
390 dma_fence_put(fence);
391
392 return;
393
394error_alloc:
395 mutex_unlock(&adev->mman.gtt_window_lock);
396 DRM_ERROR("Error flushing GPU TLB using the SDMA (%d)!\n", r);
397}
398
399/**
400 * gmc_v10_0_flush_gpu_tlb_pasid - tlb flush via pasid
401 *
402 * @adev: amdgpu_device pointer
403 * @pasid: pasid to be flush
404 * @flush_type: the flush type
405 * @all_hub: Used with PACKET3_INVALIDATE_TLBS_ALL_HUB()
406 *
407 * Flush the TLB for the requested pasid.
408 */
409static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
410 uint16_t pasid, uint32_t flush_type,
411 bool all_hub)
412{
413 int vmid, i;
414 signed long r;
415 uint32_t seq;
416 uint16_t queried_pasid;
417 bool ret;
418 u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout;
419 struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
420 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
421
422 if (amdgpu_emu_mode == 0 && ring->sched.ready) {
423 spin_lock(&adev->gfx.kiq.ring_lock);
424 /* 2 dwords flush + 8 dwords fence */
425 amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
426 kiq->pmf->kiq_invalidate_tlbs(ring,
427 pasid, flush_type, all_hub);
428 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
429 if (r) {
430 amdgpu_ring_undo(ring);
431 spin_unlock(&adev->gfx.kiq.ring_lock);
432 return -ETIME;
433 }
434
435 amdgpu_ring_commit(ring);
436 spin_unlock(&adev->gfx.kiq.ring_lock);
437 r = amdgpu_fence_wait_polling(ring, seq, usec_timeout);
438 if (r < 1) {
439 dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
440 return -ETIME;
441 }
442
443 return 0;
444 }
445
446 for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) {
447
448 ret = gmc_v10_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
449 &queried_pasid);
450 if (ret && queried_pasid == pasid) {
451 if (all_hub) {
452 for (i = 0; i < adev->num_vmhubs; i++)
453 gmc_v10_0_flush_gpu_tlb(adev, vmid,
454 i, flush_type);
455 } else {
456 gmc_v10_0_flush_gpu_tlb(adev, vmid,
457 AMDGPU_GFXHUB_0, flush_type);
458 }
459 if (!adev->enable_mes)
460 break;
461 }
462 }
463
464 return 0;
465}
466
467static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
468 unsigned vmid, uint64_t pd_addr)
469{
470 bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
471 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
472 uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0);
473 unsigned eng = ring->vm_inv_eng;
474
475 /*
476 * It may lose gpuvm invalidate acknowldege state across power-gating
477 * off cycle, add semaphore acquire before invalidation and semaphore
478 * release after invalidation to avoid entering power gated state
479 * to WA the Issue
480 */
481
482 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
483 if (use_semaphore)
484 /* a read return value of 1 means semaphore acuqire */
485 amdgpu_ring_emit_reg_wait(ring,
486 hub->vm_inv_eng0_sem +
487 hub->eng_distance * eng, 0x1, 0x1);
488
489 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
490 (hub->ctx_addr_distance * vmid),
491 lower_32_bits(pd_addr));
492
493 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
494 (hub->ctx_addr_distance * vmid),
495 upper_32_bits(pd_addr));
496
497 amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
498 hub->eng_distance * eng,
499 hub->vm_inv_eng0_ack +
500 hub->eng_distance * eng,
501 req, 1 << vmid);
502
503 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
504 if (use_semaphore)
505 /*
506 * add semaphore release after invalidation,
507 * write with 0 means semaphore release
508 */
509 amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
510 hub->eng_distance * eng, 0);
511
512 return pd_addr;
513}
514
515static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
516 unsigned pasid)
517{
518 struct amdgpu_device *adev = ring->adev;
519 uint32_t reg;
520
521 /* MES fw manages IH_VMID_x_LUT updating */
522 if (ring->is_mes_queue)
523 return;
524
525 if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
526 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
527 else
528 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
529
530 amdgpu_ring_emit_wreg(ring, reg, pasid);
531}
532
533/*
534 * PTE format on NAVI 10:
535 * 63:59 reserved
536 * 58 reserved and for sienna_cichlid is used for MALL noalloc
537 * 57 reserved
538 * 56 F
539 * 55 L
540 * 54 reserved
541 * 53:52 SW
542 * 51 T
543 * 50:48 mtype
544 * 47:12 4k physical page base address
545 * 11:7 fragment
546 * 6 write
547 * 5 read
548 * 4 exe
549 * 3 Z
550 * 2 snooped
551 * 1 system
552 * 0 valid
553 *
554 * PDE format on NAVI 10:
555 * 63:59 block fragment size
556 * 58:55 reserved
557 * 54 P
558 * 53:48 reserved
559 * 47:6 physical base address of PD or PTE
560 * 5:3 reserved
561 * 2 C
562 * 1 system
563 * 0 valid
564 */
565
566static uint64_t gmc_v10_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
567{
568 switch (flags) {
569 case AMDGPU_VM_MTYPE_DEFAULT:
570 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
571 case AMDGPU_VM_MTYPE_NC:
572 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
573 case AMDGPU_VM_MTYPE_WC:
574 return AMDGPU_PTE_MTYPE_NV10(MTYPE_WC);
575 case AMDGPU_VM_MTYPE_CC:
576 return AMDGPU_PTE_MTYPE_NV10(MTYPE_CC);
577 case AMDGPU_VM_MTYPE_UC:
578 return AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
579 default:
580 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
581 }
582}
583
584static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
585 uint64_t *addr, uint64_t *flags)
586{
587 if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
588 *addr = amdgpu_gmc_vram_mc2pa(adev, *addr);
589 BUG_ON(*addr & 0xFFFF00000000003FULL);
590
591 if (!adev->gmc.translate_further)
592 return;
593
594 if (level == AMDGPU_VM_PDB1) {
595 /* Set the block fragment size */
596 if (!(*flags & AMDGPU_PDE_PTE))
597 *flags |= AMDGPU_PDE_BFS(0x9);
598
599 } else if (level == AMDGPU_VM_PDB0) {
600 if (*flags & AMDGPU_PDE_PTE)
601 *flags &= ~AMDGPU_PDE_PTE;
602 else
603 *flags |= AMDGPU_PTE_TF;
604 }
605}
606
607static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev,
608 struct amdgpu_bo_va_mapping *mapping,
609 uint64_t *flags)
610{
611 struct amdgpu_bo *bo = mapping->bo_va->base.bo;
612
613 *flags &= ~AMDGPU_PTE_EXECUTABLE;
614 *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
615
616 *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
617 *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
618
619 *flags &= ~AMDGPU_PTE_NOALLOC;
620 *flags |= (mapping->flags & AMDGPU_PTE_NOALLOC);
621
622 if (mapping->flags & AMDGPU_PTE_PRT) {
623 *flags |= AMDGPU_PTE_PRT;
624 *flags |= AMDGPU_PTE_SNOOPED;
625 *flags |= AMDGPU_PTE_LOG;
626 *flags |= AMDGPU_PTE_SYSTEM;
627 *flags &= ~AMDGPU_PTE_VALID;
628 }
629
630 if (bo && bo->flags & (AMDGPU_GEM_CREATE_COHERENT |
631 AMDGPU_GEM_CREATE_UNCACHED))
632 *flags = (*flags & ~AMDGPU_PTE_MTYPE_NV10_MASK) |
633 AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
634}
635
636static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev)
637{
638 u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
639 unsigned size;
640
641 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
642 size = AMDGPU_VBIOS_VGA_ALLOCATION;
643 } else {
644 u32 viewport;
645 u32 pitch;
646
647 viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
648 pitch = RREG32_SOC15(DCE, 0, mmHUBPREQ0_DCSURF_SURFACE_PITCH);
649 size = (REG_GET_FIELD(viewport,
650 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
651 REG_GET_FIELD(pitch, HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH) *
652 4);
653 }
654
655 return size;
656}
657
658static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = {
659 .flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb,
660 .flush_gpu_tlb_pasid = gmc_v10_0_flush_gpu_tlb_pasid,
661 .emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb,
662 .emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping,
663 .map_mtype = gmc_v10_0_map_mtype,
664 .get_vm_pde = gmc_v10_0_get_vm_pde,
665 .get_vm_pte = gmc_v10_0_get_vm_pte,
666 .get_vbios_fb_size = gmc_v10_0_get_vbios_fb_size,
667};
668
669static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev)
670{
671 if (adev->gmc.gmc_funcs == NULL)
672 adev->gmc.gmc_funcs = &gmc_v10_0_gmc_funcs;
673}
674
675static void gmc_v10_0_set_umc_funcs(struct amdgpu_device *adev)
676{
677 switch (adev->ip_versions[UMC_HWIP][0]) {
678 case IP_VERSION(8, 7, 0):
679 adev->umc.max_ras_err_cnt_per_query = UMC_V8_7_TOTAL_CHANNEL_NUM;
680 adev->umc.channel_inst_num = UMC_V8_7_CHANNEL_INSTANCE_NUM;
681 adev->umc.umc_inst_num = UMC_V8_7_UMC_INSTANCE_NUM;
682 adev->umc.channel_offs = UMC_V8_7_PER_CHANNEL_OFFSET_SIENNA;
683 adev->umc.channel_idx_tbl = &umc_v8_7_channel_idx_tbl[0][0];
684 adev->umc.ras = &umc_v8_7_ras;
685 break;
686 default:
687 break;
688 }
689 if (adev->umc.ras) {
690 amdgpu_ras_register_ras_block(adev, &adev->umc.ras->ras_block);
691
692 strcpy(adev->umc.ras->ras_block.ras_comm.name, "umc");
693 adev->umc.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__UMC;
694 adev->umc.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
695 adev->umc.ras_if = &adev->umc.ras->ras_block.ras_comm;
696
697 /* If don't define special ras_late_init function, use default ras_late_init */
698 if (!adev->umc.ras->ras_block.ras_late_init)
699 adev->umc.ras->ras_block.ras_late_init = amdgpu_umc_ras_late_init;
700
701 /* If not defined special ras_cb function, use default ras_cb */
702 if (!adev->umc.ras->ras_block.ras_cb)
703 adev->umc.ras->ras_block.ras_cb = amdgpu_umc_process_ras_data_cb;
704 }
705}
706
707
708static void gmc_v10_0_set_mmhub_funcs(struct amdgpu_device *adev)
709{
710 switch (adev->ip_versions[MMHUB_HWIP][0]) {
711 case IP_VERSION(2, 3, 0):
712 case IP_VERSION(2, 4, 0):
713 case IP_VERSION(2, 4, 1):
714 adev->mmhub.funcs = &mmhub_v2_3_funcs;
715 break;
716 default:
717 adev->mmhub.funcs = &mmhub_v2_0_funcs;
718 break;
719 }
720}
721
722static void gmc_v10_0_set_gfxhub_funcs(struct amdgpu_device *adev)
723{
724 switch (adev->ip_versions[GC_HWIP][0]) {
725 case IP_VERSION(10, 3, 0):
726 case IP_VERSION(10, 3, 2):
727 case IP_VERSION(10, 3, 1):
728 case IP_VERSION(10, 3, 4):
729 case IP_VERSION(10, 3, 5):
730 case IP_VERSION(10, 3, 6):
731 case IP_VERSION(10, 3, 3):
732 case IP_VERSION(10, 3, 7):
733 adev->gfxhub.funcs = &gfxhub_v2_1_funcs;
734 break;
735 default:
736 adev->gfxhub.funcs = &gfxhub_v2_0_funcs;
737 break;
738 }
739}
740
741
742static int gmc_v10_0_early_init(void *handle)
743{
744 int r;
745 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
746
747 gmc_v10_0_set_mmhub_funcs(adev);
748 gmc_v10_0_set_gfxhub_funcs(adev);
749 gmc_v10_0_set_gmc_funcs(adev);
750 gmc_v10_0_set_irq_funcs(adev);
751 gmc_v10_0_set_umc_funcs(adev);
752
753 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
754 adev->gmc.shared_aperture_end =
755 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
756 adev->gmc.private_aperture_start = 0x1000000000000000ULL;
757 adev->gmc.private_aperture_end =
758 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
759
760 r = amdgpu_gmc_ras_early_init(adev);
761 if (r)
762 return r;
763
764 return 0;
765}
766
767static int gmc_v10_0_late_init(void *handle)
768{
769 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
770 int r;
771
772 r = amdgpu_gmc_allocate_vm_inv_eng(adev);
773 if (r)
774 return r;
775
776 r = amdgpu_gmc_ras_late_init(adev);
777 if (r)
778 return r;
779
780 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
781}
782
783static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
784 struct amdgpu_gmc *mc)
785{
786 u64 base = 0;
787
788 base = adev->gfxhub.funcs->get_fb_location(adev);
789
790 /* add the xgmi offset of the physical node */
791 base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
792
793 amdgpu_gmc_vram_location(adev, &adev->gmc, base);
794 amdgpu_gmc_gart_location(adev, mc);
795 amdgpu_gmc_agp_location(adev, mc);
796
797 /* base offset of vram pages */
798 adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
799
800 /* add the xgmi offset of the physical node */
801 adev->vm_manager.vram_base_offset +=
802 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
803}
804
805/**
806 * gmc_v10_0_mc_init - initialize the memory controller driver params
807 *
808 * @adev: amdgpu_device pointer
809 *
810 * Look up the amount of vram, vram width, and decide how to place
811 * vram and gart within the GPU's physical address space.
812 * Returns 0 for success.
813 */
814static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
815{
816 int r;
817
818 /* size in MB on si */
819 adev->gmc.mc_vram_size =
820 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
821 adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
822
823 if (!(adev->flags & AMD_IS_APU)) {
824 r = amdgpu_device_resize_fb_bar(adev);
825 if (r)
826 return r;
827 }
828 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
829 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
830
831#ifdef CONFIG_X86_64
832 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) {
833 adev->gmc.aper_base = adev->gfxhub.funcs->get_mc_fb_offset(adev);
834 adev->gmc.aper_size = adev->gmc.real_vram_size;
835 }
836#endif
837
838 /* In case the PCI BAR is larger than the actual amount of vram */
839 adev->gmc.visible_vram_size = adev->gmc.aper_size;
840 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
841 adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
842
843 /* set the gart size */
844 if (amdgpu_gart_size == -1) {
845 switch (adev->ip_versions[GC_HWIP][0]) {
846 default:
847 adev->gmc.gart_size = 512ULL << 20;
848 break;
849 case IP_VERSION(10, 3, 1): /* DCE SG support */
850 case IP_VERSION(10, 3, 3): /* DCE SG support */
851 case IP_VERSION(10, 3, 6): /* DCE SG support */
852 case IP_VERSION(10, 3, 7): /* DCE SG support */
853 adev->gmc.gart_size = 1024ULL << 20;
854 break;
855 }
856 } else {
857 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
858 }
859
860 gmc_v10_0_vram_gtt_location(adev, &adev->gmc);
861
862 return 0;
863}
864
865static int gmc_v10_0_gart_init(struct amdgpu_device *adev)
866{
867 int r;
868
869 if (adev->gart.bo) {
870 WARN(1, "NAVI10 PCIE GART already initialized\n");
871 return 0;
872 }
873
874 /* Initialize common gart structure */
875 r = amdgpu_gart_init(adev);
876 if (r)
877 return r;
878
879 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
880 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_NV10(MTYPE_UC) |
881 AMDGPU_PTE_EXECUTABLE;
882
883 return amdgpu_gart_table_vram_alloc(adev);
884}
885
886static int gmc_v10_0_sw_init(void *handle)
887{
888 int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
889 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
890
891 adev->gfxhub.funcs->init(adev);
892
893 adev->mmhub.funcs->init(adev);
894
895 spin_lock_init(&adev->gmc.invalidate_lock);
896
897 if ((adev->flags & AMD_IS_APU) && amdgpu_emu_mode == 1) {
898 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_DDR4;
899 adev->gmc.vram_width = 64;
900 } else if (amdgpu_emu_mode == 1) {
901 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_GDDR6;
902 adev->gmc.vram_width = 1 * 128; /* numchan * chansize */
903 } else {
904 r = amdgpu_atomfirmware_get_vram_info(adev,
905 &vram_width, &vram_type, &vram_vendor);
906 adev->gmc.vram_width = vram_width;
907
908 adev->gmc.vram_type = vram_type;
909 adev->gmc.vram_vendor = vram_vendor;
910 }
911
912 switch (adev->ip_versions[GC_HWIP][0]) {
913 case IP_VERSION(10, 3, 0):
914 adev->gmc.mall_size = 128 * 1024 * 1024;
915 break;
916 case IP_VERSION(10, 3, 2):
917 adev->gmc.mall_size = 96 * 1024 * 1024;
918 break;
919 case IP_VERSION(10, 3, 4):
920 adev->gmc.mall_size = 32 * 1024 * 1024;
921 break;
922 case IP_VERSION(10, 3, 5):
923 adev->gmc.mall_size = 16 * 1024 * 1024;
924 break;
925 default:
926 adev->gmc.mall_size = 0;
927 break;
928 }
929
930 switch (adev->ip_versions[GC_HWIP][0]) {
931 case IP_VERSION(10, 1, 10):
932 case IP_VERSION(10, 1, 1):
933 case IP_VERSION(10, 1, 2):
934 case IP_VERSION(10, 1, 3):
935 case IP_VERSION(10, 1, 4):
936 case IP_VERSION(10, 3, 0):
937 case IP_VERSION(10, 3, 2):
938 case IP_VERSION(10, 3, 1):
939 case IP_VERSION(10, 3, 4):
940 case IP_VERSION(10, 3, 5):
941 case IP_VERSION(10, 3, 6):
942 case IP_VERSION(10, 3, 3):
943 case IP_VERSION(10, 3, 7):
944 adev->num_vmhubs = 2;
945 /*
946 * To fulfill 4-level page support,
947 * vm size is 256TB (48bit), maximum size of Navi10/Navi14/Navi12,
948 * block size 512 (9bit)
949 */
950 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
951 break;
952 default:
953 break;
954 }
955
956 /* This interrupt is VMC page fault.*/
957 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC,
958 VMC_1_0__SRCID__VM_FAULT,
959 &adev->gmc.vm_fault);
960
961 if (r)
962 return r;
963
964 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2,
965 UTCL2_1_0__SRCID__FAULT,
966 &adev->gmc.vm_fault);
967 if (r)
968 return r;
969
970 if (!amdgpu_sriov_vf(adev)) {
971 /* interrupt sent to DF. */
972 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
973 &adev->gmc.ecc_irq);
974 if (r)
975 return r;
976 }
977
978 /*
979 * Set the internal MC address mask This is the max address of the GPU's
980 * internal address space.
981 */
982 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
983
984 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
985 if (r) {
986 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
987 return r;
988 }
989
990 adev->need_swiotlb = drm_need_swiotlb(44);
991
992 r = gmc_v10_0_mc_init(adev);
993 if (r)
994 return r;
995
996 amdgpu_gmc_get_vbios_allocations(adev);
997
998 /* Memory manager */
999 r = amdgpu_bo_init(adev);
1000 if (r)
1001 return r;
1002
1003 r = gmc_v10_0_gart_init(adev);
1004 if (r)
1005 return r;
1006
1007 /*
1008 * number of VMs
1009 * VMID 0 is reserved for System
1010 * amdgpu graphics/compute will use VMIDs 1-7
1011 * amdkfd will use VMIDs 8-15
1012 */
1013 adev->vm_manager.first_kfd_vmid = 8;
1014
1015 amdgpu_vm_manager_init(adev);
1016
1017 return 0;
1018}
1019
1020/**
1021 * gmc_v10_0_gart_fini - vm fini callback
1022 *
1023 * @adev: amdgpu_device pointer
1024 *
1025 * Tears down the driver GART/VM setup (CIK).
1026 */
1027static void gmc_v10_0_gart_fini(struct amdgpu_device *adev)
1028{
1029 amdgpu_gart_table_vram_free(adev);
1030}
1031
1032static int gmc_v10_0_sw_fini(void *handle)
1033{
1034 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1035
1036 amdgpu_vm_manager_fini(adev);
1037 gmc_v10_0_gart_fini(adev);
1038 amdgpu_gem_force_release(adev);
1039 amdgpu_bo_fini(adev);
1040
1041 return 0;
1042}
1043
1044static void gmc_v10_0_init_golden_registers(struct amdgpu_device *adev)
1045{
1046}
1047
1048/**
1049 * gmc_v10_0_gart_enable - gart enable
1050 *
1051 * @adev: amdgpu_device pointer
1052 */
1053static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
1054{
1055 int r;
1056 bool value;
1057
1058 if (adev->gart.bo == NULL) {
1059 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
1060 return -EINVAL;
1061 }
1062
1063 amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
1064 r = adev->gfxhub.funcs->gart_enable(adev);
1065 if (r)
1066 return r;
1067
1068 r = adev->mmhub.funcs->gart_enable(adev);
1069 if (r)
1070 return r;
1071
1072 adev->hdp.funcs->init_registers(adev);
1073
1074 /* Flush HDP after it is initialized */
1075 adev->hdp.funcs->flush_hdp(adev, NULL);
1076
1077 value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
1078 false : true;
1079
1080 adev->gfxhub.funcs->set_fault_enable_default(adev, value);
1081 adev->mmhub.funcs->set_fault_enable_default(adev, value);
1082 gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0);
1083 gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0);
1084
1085 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1086 (unsigned)(adev->gmc.gart_size >> 20),
1087 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
1088
1089 return 0;
1090}
1091
1092static int gmc_v10_0_hw_init(void *handle)
1093{
1094 int r;
1095 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1096
1097 /* The sequence of these two function calls matters.*/
1098 gmc_v10_0_init_golden_registers(adev);
1099
1100 /*
1101 * harvestable groups in gc_utcl2 need to be programmed before any GFX block
1102 * register setup within GMC, or else system hang when harvesting SA.
1103 */
1104 if (adev->gfxhub.funcs && adev->gfxhub.funcs->utcl2_harvest)
1105 adev->gfxhub.funcs->utcl2_harvest(adev);
1106
1107 r = gmc_v10_0_gart_enable(adev);
1108 if (r)
1109 return r;
1110
1111 if (amdgpu_emu_mode == 1) {
1112 r = amdgpu_gmc_vram_checking(adev);
1113 if (r)
1114 return r;
1115 }
1116
1117 if (adev->umc.funcs && adev->umc.funcs->init_registers)
1118 adev->umc.funcs->init_registers(adev);
1119
1120 return 0;
1121}
1122
1123/**
1124 * gmc_v10_0_gart_disable - gart disable
1125 *
1126 * @adev: amdgpu_device pointer
1127 *
1128 * This disables all VM page table.
1129 */
1130static void gmc_v10_0_gart_disable(struct amdgpu_device *adev)
1131{
1132 adev->gfxhub.funcs->gart_disable(adev);
1133 adev->mmhub.funcs->gart_disable(adev);
1134}
1135
1136static int gmc_v10_0_hw_fini(void *handle)
1137{
1138 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1139
1140 gmc_v10_0_gart_disable(adev);
1141
1142 if (amdgpu_sriov_vf(adev)) {
1143 /* full access mode, so don't touch any GMC register */
1144 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
1145 return 0;
1146 }
1147
1148 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
1149 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1150
1151 return 0;
1152}
1153
1154static int gmc_v10_0_suspend(void *handle)
1155{
1156 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1157
1158 gmc_v10_0_hw_fini(adev);
1159
1160 return 0;
1161}
1162
1163static int gmc_v10_0_resume(void *handle)
1164{
1165 int r;
1166 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1167
1168 r = gmc_v10_0_hw_init(adev);
1169 if (r)
1170 return r;
1171
1172 amdgpu_vmid_reset_all(adev);
1173
1174 return 0;
1175}
1176
1177static bool gmc_v10_0_is_idle(void *handle)
1178{
1179 /* MC is always ready in GMC v10.*/
1180 return true;
1181}
1182
1183static int gmc_v10_0_wait_for_idle(void *handle)
1184{
1185 /* There is no need to wait for MC idle in GMC v10.*/
1186 return 0;
1187}
1188
1189static int gmc_v10_0_soft_reset(void *handle)
1190{
1191 return 0;
1192}
1193
1194static int gmc_v10_0_set_clockgating_state(void *handle,
1195 enum amd_clockgating_state state)
1196{
1197 int r;
1198 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1199
1200 /*
1201 * The issue mmhub can't disconnect from DF with MMHUB clock gating being disabled
1202 * is a new problem observed at DF 3.0.3, however with the same suspend sequence not
1203 * seen any issue on the DF 3.0.2 series platform.
1204 */
1205 if (adev->in_s0ix && adev->ip_versions[DF_HWIP][0] > IP_VERSION(3, 0, 2)) {
1206 dev_dbg(adev->dev, "keep mmhub clock gating being enabled for s0ix\n");
1207 return 0;
1208 }
1209
1210 r = adev->mmhub.funcs->set_clockgating(adev, state);
1211 if (r)
1212 return r;
1213
1214 if (adev->ip_versions[ATHUB_HWIP][0] >= IP_VERSION(2, 1, 0))
1215 return athub_v2_1_set_clockgating(adev, state);
1216 else
1217 return athub_v2_0_set_clockgating(adev, state);
1218}
1219
1220static void gmc_v10_0_get_clockgating_state(void *handle, u64 *flags)
1221{
1222 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1223
1224 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 3) ||
1225 adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 4))
1226 return;
1227
1228 adev->mmhub.funcs->get_clockgating(adev, flags);
1229
1230 if (adev->ip_versions[ATHUB_HWIP][0] >= IP_VERSION(2, 1, 0))
1231 athub_v2_1_get_clockgating(adev, flags);
1232 else
1233 athub_v2_0_get_clockgating(adev, flags);
1234}
1235
1236static int gmc_v10_0_set_powergating_state(void *handle,
1237 enum amd_powergating_state state)
1238{
1239 return 0;
1240}
1241
1242const struct amd_ip_funcs gmc_v10_0_ip_funcs = {
1243 .name = "gmc_v10_0",
1244 .early_init = gmc_v10_0_early_init,
1245 .late_init = gmc_v10_0_late_init,
1246 .sw_init = gmc_v10_0_sw_init,
1247 .sw_fini = gmc_v10_0_sw_fini,
1248 .hw_init = gmc_v10_0_hw_init,
1249 .hw_fini = gmc_v10_0_hw_fini,
1250 .suspend = gmc_v10_0_suspend,
1251 .resume = gmc_v10_0_resume,
1252 .is_idle = gmc_v10_0_is_idle,
1253 .wait_for_idle = gmc_v10_0_wait_for_idle,
1254 .soft_reset = gmc_v10_0_soft_reset,
1255 .set_clockgating_state = gmc_v10_0_set_clockgating_state,
1256 .set_powergating_state = gmc_v10_0_set_powergating_state,
1257 .get_clockgating_state = gmc_v10_0_get_clockgating_state,
1258};
1259
1260const struct amdgpu_ip_block_version gmc_v10_0_ip_block =
1261{
1262 .type = AMD_IP_BLOCK_TYPE_GMC,
1263 .major = 10,
1264 .minor = 0,
1265 .rev = 0,
1266 .funcs = &gmc_v10_0_ip_funcs,
1267};
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/firmware.h>
24#include <linux/pci.h>
25#include "amdgpu.h"
26#include "amdgpu_atomfirmware.h"
27#include "gmc_v10_0.h"
28
29#include "hdp/hdp_5_0_0_offset.h"
30#include "hdp/hdp_5_0_0_sh_mask.h"
31#include "gc/gc_10_1_0_sh_mask.h"
32#include "mmhub/mmhub_2_0_0_sh_mask.h"
33#include "athub/athub_2_0_0_sh_mask.h"
34#include "athub/athub_2_0_0_offset.h"
35#include "dcn/dcn_2_0_0_offset.h"
36#include "dcn/dcn_2_0_0_sh_mask.h"
37#include "oss/osssys_5_0_0_offset.h"
38#include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
39#include "navi10_enum.h"
40
41#include "soc15.h"
42#include "soc15d.h"
43#include "soc15_common.h"
44
45#include "nbio_v2_3.h"
46
47#include "gfxhub_v2_0.h"
48#include "gfxhub_v2_1.h"
49#include "mmhub_v2_0.h"
50#include "athub_v2_0.h"
51#include "athub_v2_1.h"
52
53#if 0
54static const struct soc15_reg_golden golden_settings_navi10_hdp[] =
55{
56 /* TODO add golden setting for hdp */
57};
58#endif
59
60static int
61gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
62 struct amdgpu_irq_src *src, unsigned type,
63 enum amdgpu_interrupt_state state)
64{
65 struct amdgpu_vmhub *hub;
66 u32 tmp, reg, bits[AMDGPU_MAX_VMHUBS], i;
67
68 bits[AMDGPU_GFXHUB_0] = GCVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
69 GCVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
70 GCVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
71 GCVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
72 GCVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
73 GCVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
74 GCVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
75
76 bits[AMDGPU_MMHUB_0] = MMVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
77 MMVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
78 MMVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
79 MMVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
80 MMVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
81 MMVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
82 MMVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
83
84 switch (state) {
85 case AMDGPU_IRQ_STATE_DISABLE:
86 /* MM HUB */
87 hub = &adev->vmhub[AMDGPU_MMHUB_0];
88 for (i = 0; i < 16; i++) {
89 reg = hub->vm_context0_cntl + hub->ctx_distance * i;
90 tmp = RREG32(reg);
91 tmp &= ~bits[AMDGPU_MMHUB_0];
92 WREG32(reg, tmp);
93 }
94
95 /* GFX HUB */
96 hub = &adev->vmhub[AMDGPU_GFXHUB_0];
97 for (i = 0; i < 16; i++) {
98 reg = hub->vm_context0_cntl + hub->ctx_distance * i;
99 tmp = RREG32(reg);
100 tmp &= ~bits[AMDGPU_GFXHUB_0];
101 WREG32(reg, tmp);
102 }
103 break;
104 case AMDGPU_IRQ_STATE_ENABLE:
105 /* MM HUB */
106 hub = &adev->vmhub[AMDGPU_MMHUB_0];
107 for (i = 0; i < 16; i++) {
108 reg = hub->vm_context0_cntl + hub->ctx_distance * i;
109 tmp = RREG32(reg);
110 tmp |= bits[AMDGPU_MMHUB_0];
111 WREG32(reg, tmp);
112 }
113
114 /* GFX HUB */
115 hub = &adev->vmhub[AMDGPU_GFXHUB_0];
116 for (i = 0; i < 16; i++) {
117 reg = hub->vm_context0_cntl + hub->ctx_distance * i;
118 tmp = RREG32(reg);
119 tmp |= bits[AMDGPU_GFXHUB_0];
120 WREG32(reg, tmp);
121 }
122 break;
123 default:
124 break;
125 }
126
127 return 0;
128}
129
130static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
131 struct amdgpu_irq_src *source,
132 struct amdgpu_iv_entry *entry)
133{
134 struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
135 uint32_t status = 0;
136 u64 addr;
137
138 addr = (u64)entry->src_data[0] << 12;
139 addr |= ((u64)entry->src_data[1] & 0xf) << 44;
140
141 if (!amdgpu_sriov_vf(adev)) {
142 /*
143 * Issue a dummy read to wait for the status register to
144 * be updated to avoid reading an incorrect value due to
145 * the new fast GRBM interface.
146 */
147 if (entry->vmid_src == AMDGPU_GFXHUB_0)
148 RREG32(hub->vm_l2_pro_fault_status);
149
150 status = RREG32(hub->vm_l2_pro_fault_status);
151 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
152 }
153
154 if (printk_ratelimit()) {
155 struct amdgpu_task_info task_info;
156
157 memset(&task_info, 0, sizeof(struct amdgpu_task_info));
158 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
159
160 dev_err(adev->dev,
161 "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, "
162 "for process %s pid %d thread %s pid %d)\n",
163 entry->vmid_src ? "mmhub" : "gfxhub",
164 entry->src_id, entry->ring_id, entry->vmid,
165 entry->pasid, task_info.process_name, task_info.tgid,
166 task_info.task_name, task_info.pid);
167 dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n",
168 addr, entry->client_id);
169 if (!amdgpu_sriov_vf(adev)) {
170 dev_err(adev->dev,
171 "GCVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
172 status);
173 dev_err(adev->dev, "\t Faulty UTCL2 client ID: 0x%lx\n",
174 REG_GET_FIELD(status,
175 GCVM_L2_PROTECTION_FAULT_STATUS, CID));
176 dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
177 REG_GET_FIELD(status,
178 GCVM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
179 dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
180 REG_GET_FIELD(status,
181 GCVM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
182 dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
183 REG_GET_FIELD(status,
184 GCVM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
185 dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
186 REG_GET_FIELD(status,
187 GCVM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
188 dev_err(adev->dev, "\t RW: 0x%lx\n",
189 REG_GET_FIELD(status,
190 GCVM_L2_PROTECTION_FAULT_STATUS, RW));
191 }
192 }
193
194 return 0;
195}
196
197static const struct amdgpu_irq_src_funcs gmc_v10_0_irq_funcs = {
198 .set = gmc_v10_0_vm_fault_interrupt_state,
199 .process = gmc_v10_0_process_interrupt,
200};
201
202static void gmc_v10_0_set_irq_funcs(struct amdgpu_device *adev)
203{
204 adev->gmc.vm_fault.num_types = 1;
205 adev->gmc.vm_fault.funcs = &gmc_v10_0_irq_funcs;
206}
207
208static uint32_t gmc_v10_0_get_invalidate_req(unsigned int vmid,
209 uint32_t flush_type)
210{
211 u32 req = 0;
212
213 /* invalidate using legacy mode on vmid*/
214 req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ,
215 PER_VMID_INVALIDATE_REQ, 1 << vmid);
216 req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
217 req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
218 req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
219 req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
220 req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
221 req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
222 req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ,
223 CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
224
225 return req;
226}
227
228/**
229 * gmc_v10_0_use_invalidate_semaphore - judge whether to use semaphore
230 *
231 * @adev: amdgpu_device pointer
232 * @vmhub: vmhub type
233 *
234 */
235static bool gmc_v10_0_use_invalidate_semaphore(struct amdgpu_device *adev,
236 uint32_t vmhub)
237{
238 return ((vmhub == AMDGPU_MMHUB_0 ||
239 vmhub == AMDGPU_MMHUB_1) &&
240 (!amdgpu_sriov_vf(adev)));
241}
242
243static bool gmc_v10_0_get_atc_vmid_pasid_mapping_info(
244 struct amdgpu_device *adev,
245 uint8_t vmid, uint16_t *p_pasid)
246{
247 uint32_t value;
248
249 value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
250 + vmid);
251 *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
252
253 return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
254}
255
256/*
257 * GART
258 * VMID 0 is the physical GPU addresses as used by the kernel.
259 * VMIDs 1-15 are used for userspace clients and are handled
260 * by the amdgpu vm/hsa code.
261 */
262
263static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
264 unsigned int vmhub, uint32_t flush_type)
265{
266 bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(adev, vmhub);
267 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
268 u32 inv_req = gmc_v10_0_get_invalidate_req(vmid, flush_type);
269 u32 tmp;
270 /* Use register 17 for GART */
271 const unsigned eng = 17;
272 unsigned int i;
273
274 spin_lock(&adev->gmc.invalidate_lock);
275 /*
276 * It may lose gpuvm invalidate acknowldege state across power-gating
277 * off cycle, add semaphore acquire before invalidation and semaphore
278 * release after invalidation to avoid entering power gated state
279 * to WA the Issue
280 */
281
282 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
283 if (use_semaphore) {
284 for (i = 0; i < adev->usec_timeout; i++) {
285 /* a read return value of 1 means semaphore acuqire */
286 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem +
287 hub->eng_distance * eng);
288 if (tmp & 0x1)
289 break;
290 udelay(1);
291 }
292
293 if (i >= adev->usec_timeout)
294 DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
295 }
296
297 WREG32_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req);
298
299 /*
300 * Issue a dummy read to wait for the ACK register to be cleared
301 * to avoid a false ACK due to the new fast GRBM interface.
302 */
303 if (vmhub == AMDGPU_GFXHUB_0)
304 RREG32_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng);
305
306 /* Wait for ACK with a delay.*/
307 for (i = 0; i < adev->usec_timeout; i++) {
308 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack +
309 hub->eng_distance * eng);
310 tmp &= 1 << vmid;
311 if (tmp)
312 break;
313
314 udelay(1);
315 }
316
317 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
318 if (use_semaphore)
319 /*
320 * add semaphore release after invalidation,
321 * write with 0 means semaphore release
322 */
323 WREG32_NO_KIQ(hub->vm_inv_eng0_sem +
324 hub->eng_distance * eng, 0);
325
326 spin_unlock(&adev->gmc.invalidate_lock);
327
328 if (i < adev->usec_timeout)
329 return;
330
331 DRM_ERROR("Timeout waiting for VM flush ACK!\n");
332}
333
334/**
335 * gmc_v10_0_flush_gpu_tlb - gart tlb flush callback
336 *
337 * @adev: amdgpu_device pointer
338 * @vmid: vm instance to flush
339 *
340 * Flush the TLB for the requested page table.
341 */
342static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
343 uint32_t vmhub, uint32_t flush_type)
344{
345 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
346 struct dma_fence *fence;
347 struct amdgpu_job *job;
348
349 int r;
350
351 /* flush hdp cache */
352 adev->nbio.funcs->hdp_flush(adev, NULL);
353
354 /* For SRIOV run time, driver shouldn't access the register through MMIO
355 * Directly use kiq to do the vm invalidation instead
356 */
357 if (adev->gfx.kiq.ring.sched.ready &&
358 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
359 !adev->in_gpu_reset) {
360
361 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
362 const unsigned eng = 17;
363 u32 inv_req = gmc_v10_0_get_invalidate_req(vmid, flush_type);
364 u32 req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
365 u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
366
367 amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
368 1 << vmid);
369 return;
370 }
371
372 mutex_lock(&adev->mman.gtt_window_lock);
373
374 if (vmhub == AMDGPU_MMHUB_0) {
375 gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_MMHUB_0, 0);
376 mutex_unlock(&adev->mman.gtt_window_lock);
377 return;
378 }
379
380 BUG_ON(vmhub != AMDGPU_GFXHUB_0);
381
382 if (!adev->mman.buffer_funcs_enabled ||
383 !adev->ib_pool_ready ||
384 adev->in_gpu_reset ||
385 ring->sched.ready == false) {
386 gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_GFXHUB_0, 0);
387 mutex_unlock(&adev->mman.gtt_window_lock);
388 return;
389 }
390
391 /* The SDMA on Navi has a bug which can theoretically result in memory
392 * corruption if an invalidation happens at the same time as an VA
393 * translation. Avoid this by doing the invalidation from the SDMA
394 * itself.
395 */
396 r = amdgpu_job_alloc_with_ib(adev, 16 * 4, AMDGPU_IB_POOL_IMMEDIATE,
397 &job);
398 if (r)
399 goto error_alloc;
400
401 job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo);
402 job->vm_needs_flush = true;
403 job->ibs->ptr[job->ibs->length_dw++] = ring->funcs->nop;
404 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
405 r = amdgpu_job_submit(job, &adev->mman.entity,
406 AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
407 if (r)
408 goto error_submit;
409
410 mutex_unlock(&adev->mman.gtt_window_lock);
411
412 dma_fence_wait(fence, false);
413 dma_fence_put(fence);
414
415 return;
416
417error_submit:
418 amdgpu_job_free(job);
419
420error_alloc:
421 mutex_unlock(&adev->mman.gtt_window_lock);
422 DRM_ERROR("Error flushing GPU TLB using the SDMA (%d)!\n", r);
423}
424
425/**
426 * gmc_v10_0_flush_gpu_tlb_pasid - tlb flush via pasid
427 *
428 * @adev: amdgpu_device pointer
429 * @pasid: pasid to be flush
430 *
431 * Flush the TLB for the requested pasid.
432 */
433static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
434 uint16_t pasid, uint32_t flush_type,
435 bool all_hub)
436{
437 int vmid, i;
438 signed long r;
439 uint32_t seq;
440 uint16_t queried_pasid;
441 bool ret;
442 struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
443 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
444
445 if (amdgpu_emu_mode == 0 && ring->sched.ready) {
446 spin_lock(&adev->gfx.kiq.ring_lock);
447 /* 2 dwords flush + 8 dwords fence */
448 amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
449 kiq->pmf->kiq_invalidate_tlbs(ring,
450 pasid, flush_type, all_hub);
451 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
452 if (r) {
453 amdgpu_ring_undo(ring);
454 spin_unlock(&adev->gfx.kiq.ring_lock);
455 return -ETIME;
456 }
457
458 amdgpu_ring_commit(ring);
459 spin_unlock(&adev->gfx.kiq.ring_lock);
460 r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
461 if (r < 1) {
462 DRM_ERROR("wait for kiq fence error: %ld.\n", r);
463 return -ETIME;
464 }
465
466 return 0;
467 }
468
469 for (vmid = 1; vmid < 16; vmid++) {
470
471 ret = gmc_v10_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
472 &queried_pasid);
473 if (ret && queried_pasid == pasid) {
474 if (all_hub) {
475 for (i = 0; i < adev->num_vmhubs; i++)
476 gmc_v10_0_flush_gpu_tlb(adev, vmid,
477 i, flush_type);
478 } else {
479 gmc_v10_0_flush_gpu_tlb(adev, vmid,
480 AMDGPU_GFXHUB_0, flush_type);
481 }
482 break;
483 }
484 }
485
486 return 0;
487}
488
489static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
490 unsigned vmid, uint64_t pd_addr)
491{
492 bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
493 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
494 uint32_t req = gmc_v10_0_get_invalidate_req(vmid, 0);
495 unsigned eng = ring->vm_inv_eng;
496
497 /*
498 * It may lose gpuvm invalidate acknowldege state across power-gating
499 * off cycle, add semaphore acquire before invalidation and semaphore
500 * release after invalidation to avoid entering power gated state
501 * to WA the Issue
502 */
503
504 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
505 if (use_semaphore)
506 /* a read return value of 1 means semaphore acuqire */
507 amdgpu_ring_emit_reg_wait(ring,
508 hub->vm_inv_eng0_sem +
509 hub->eng_distance * eng, 0x1, 0x1);
510
511 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
512 (hub->ctx_addr_distance * vmid),
513 lower_32_bits(pd_addr));
514
515 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
516 (hub->ctx_addr_distance * vmid),
517 upper_32_bits(pd_addr));
518
519 amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
520 hub->eng_distance * eng,
521 hub->vm_inv_eng0_ack +
522 hub->eng_distance * eng,
523 req, 1 << vmid);
524
525 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
526 if (use_semaphore)
527 /*
528 * add semaphore release after invalidation,
529 * write with 0 means semaphore release
530 */
531 amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
532 hub->eng_distance * eng, 0);
533
534 return pd_addr;
535}
536
537static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
538 unsigned pasid)
539{
540 struct amdgpu_device *adev = ring->adev;
541 uint32_t reg;
542
543 if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
544 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
545 else
546 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
547
548 amdgpu_ring_emit_wreg(ring, reg, pasid);
549}
550
551/*
552 * PTE format on NAVI 10:
553 * 63:59 reserved
554 * 58:57 reserved
555 * 56 F
556 * 55 L
557 * 54 reserved
558 * 53:52 SW
559 * 51 T
560 * 50:48 mtype
561 * 47:12 4k physical page base address
562 * 11:7 fragment
563 * 6 write
564 * 5 read
565 * 4 exe
566 * 3 Z
567 * 2 snooped
568 * 1 system
569 * 0 valid
570 *
571 * PDE format on NAVI 10:
572 * 63:59 block fragment size
573 * 58:55 reserved
574 * 54 P
575 * 53:48 reserved
576 * 47:6 physical base address of PD or PTE
577 * 5:3 reserved
578 * 2 C
579 * 1 system
580 * 0 valid
581 */
582
583static uint64_t gmc_v10_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
584{
585 switch (flags) {
586 case AMDGPU_VM_MTYPE_DEFAULT:
587 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
588 case AMDGPU_VM_MTYPE_NC:
589 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
590 case AMDGPU_VM_MTYPE_WC:
591 return AMDGPU_PTE_MTYPE_NV10(MTYPE_WC);
592 case AMDGPU_VM_MTYPE_CC:
593 return AMDGPU_PTE_MTYPE_NV10(MTYPE_CC);
594 case AMDGPU_VM_MTYPE_UC:
595 return AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
596 default:
597 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
598 }
599}
600
601static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
602 uint64_t *addr, uint64_t *flags)
603{
604 if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
605 *addr = adev->vm_manager.vram_base_offset + *addr -
606 adev->gmc.vram_start;
607 BUG_ON(*addr & 0xFFFF00000000003FULL);
608
609 if (!adev->gmc.translate_further)
610 return;
611
612 if (level == AMDGPU_VM_PDB1) {
613 /* Set the block fragment size */
614 if (!(*flags & AMDGPU_PDE_PTE))
615 *flags |= AMDGPU_PDE_BFS(0x9);
616
617 } else if (level == AMDGPU_VM_PDB0) {
618 if (*flags & AMDGPU_PDE_PTE)
619 *flags &= ~AMDGPU_PDE_PTE;
620 else
621 *flags |= AMDGPU_PTE_TF;
622 }
623}
624
625static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev,
626 struct amdgpu_bo_va_mapping *mapping,
627 uint64_t *flags)
628{
629 *flags &= ~AMDGPU_PTE_EXECUTABLE;
630 *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
631
632 *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
633 *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
634
635 if (mapping->flags & AMDGPU_PTE_PRT) {
636 *flags |= AMDGPU_PTE_PRT;
637 *flags |= AMDGPU_PTE_SNOOPED;
638 *flags |= AMDGPU_PTE_LOG;
639 *flags |= AMDGPU_PTE_SYSTEM;
640 *flags &= ~AMDGPU_PTE_VALID;
641 }
642}
643
644static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = {
645 .flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb,
646 .flush_gpu_tlb_pasid = gmc_v10_0_flush_gpu_tlb_pasid,
647 .emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb,
648 .emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping,
649 .map_mtype = gmc_v10_0_map_mtype,
650 .get_vm_pde = gmc_v10_0_get_vm_pde,
651 .get_vm_pte = gmc_v10_0_get_vm_pte
652};
653
654static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev)
655{
656 if (adev->gmc.gmc_funcs == NULL)
657 adev->gmc.gmc_funcs = &gmc_v10_0_gmc_funcs;
658}
659
660static int gmc_v10_0_early_init(void *handle)
661{
662 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
663
664 gmc_v10_0_set_gmc_funcs(adev);
665 gmc_v10_0_set_irq_funcs(adev);
666
667 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
668 adev->gmc.shared_aperture_end =
669 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
670 adev->gmc.private_aperture_start = 0x1000000000000000ULL;
671 adev->gmc.private_aperture_end =
672 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
673
674 return 0;
675}
676
677static int gmc_v10_0_late_init(void *handle)
678{
679 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
680 int r;
681
682 amdgpu_bo_late_init(adev);
683
684 r = amdgpu_gmc_allocate_vm_inv_eng(adev);
685 if (r)
686 return r;
687
688 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
689}
690
691static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
692 struct amdgpu_gmc *mc)
693{
694 u64 base = 0;
695
696 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
697 adev->asic_type == CHIP_NAVY_FLOUNDER)
698 base = gfxhub_v2_1_get_fb_location(adev);
699 else
700 base = gfxhub_v2_0_get_fb_location(adev);
701
702 /* add the xgmi offset of the physical node */
703 base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
704
705 amdgpu_gmc_vram_location(adev, &adev->gmc, base);
706 amdgpu_gmc_gart_location(adev, mc);
707
708 /* base offset of vram pages */
709 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
710 adev->asic_type == CHIP_NAVY_FLOUNDER)
711 adev->vm_manager.vram_base_offset = gfxhub_v2_1_get_mc_fb_offset(adev);
712 else
713 adev->vm_manager.vram_base_offset = gfxhub_v2_0_get_mc_fb_offset(adev);
714
715 /* add the xgmi offset of the physical node */
716 adev->vm_manager.vram_base_offset +=
717 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
718}
719
720/**
721 * gmc_v10_0_mc_init - initialize the memory controller driver params
722 *
723 * @adev: amdgpu_device pointer
724 *
725 * Look up the amount of vram, vram width, and decide how to place
726 * vram and gart within the GPU's physical address space.
727 * Returns 0 for success.
728 */
729static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
730{
731 int r;
732
733 /* size in MB on si */
734 adev->gmc.mc_vram_size =
735 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
736 adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
737
738 if (!(adev->flags & AMD_IS_APU)) {
739 r = amdgpu_device_resize_fb_bar(adev);
740 if (r)
741 return r;
742 }
743 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
744 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
745
746 /* In case the PCI BAR is larger than the actual amount of vram */
747 adev->gmc.visible_vram_size = adev->gmc.aper_size;
748 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
749 adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
750
751 /* set the gart size */
752 if (amdgpu_gart_size == -1) {
753 switch (adev->asic_type) {
754 case CHIP_NAVI10:
755 case CHIP_NAVI14:
756 case CHIP_NAVI12:
757 case CHIP_SIENNA_CICHLID:
758 case CHIP_NAVY_FLOUNDER:
759 default:
760 adev->gmc.gart_size = 512ULL << 20;
761 break;
762 }
763 } else
764 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
765
766 gmc_v10_0_vram_gtt_location(adev, &adev->gmc);
767
768 return 0;
769}
770
771static int gmc_v10_0_gart_init(struct amdgpu_device *adev)
772{
773 int r;
774
775 if (adev->gart.bo) {
776 WARN(1, "NAVI10 PCIE GART already initialized\n");
777 return 0;
778 }
779
780 /* Initialize common gart structure */
781 r = amdgpu_gart_init(adev);
782 if (r)
783 return r;
784
785 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
786 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_NV10(MTYPE_UC) |
787 AMDGPU_PTE_EXECUTABLE;
788
789 return amdgpu_gart_table_vram_alloc(adev);
790}
791
792static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev)
793{
794 u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
795 unsigned size;
796
797 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
798 size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
799 } else {
800 u32 viewport;
801 u32 pitch;
802
803 viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
804 pitch = RREG32_SOC15(DCE, 0, mmHUBPREQ0_DCSURF_SURFACE_PITCH);
805 size = (REG_GET_FIELD(viewport,
806 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
807 REG_GET_FIELD(pitch, HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH) *
808 4);
809 }
810 /* return 0 if the pre-OS buffer uses up most of vram */
811 if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024)) {
812 DRM_ERROR("Warning: pre-OS buffer uses most of vram, \
813 be aware of gart table overwrite\n");
814 return 0;
815 }
816
817 return size;
818}
819
820
821
822static int gmc_v10_0_sw_init(void *handle)
823{
824 int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
825 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
826
827 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
828 adev->asic_type == CHIP_NAVY_FLOUNDER)
829 gfxhub_v2_1_init(adev);
830 else
831 gfxhub_v2_0_init(adev);
832
833 mmhub_v2_0_init(adev);
834
835 spin_lock_init(&adev->gmc.invalidate_lock);
836
837 if (adev->asic_type == CHIP_SIENNA_CICHLID && amdgpu_emu_mode == 1) {
838 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_GDDR6;
839 adev->gmc.vram_width = 1 * 128; /* numchan * chansize */
840 } else {
841 r = amdgpu_atomfirmware_get_vram_info(adev,
842 &vram_width, &vram_type, &vram_vendor);
843 adev->gmc.vram_width = vram_width;
844
845 adev->gmc.vram_type = vram_type;
846 adev->gmc.vram_vendor = vram_vendor;
847 }
848
849 switch (adev->asic_type) {
850 case CHIP_NAVI10:
851 case CHIP_NAVI14:
852 case CHIP_NAVI12:
853 case CHIP_SIENNA_CICHLID:
854 case CHIP_NAVY_FLOUNDER:
855 adev->num_vmhubs = 2;
856 /*
857 * To fulfill 4-level page support,
858 * vm size is 256TB (48bit), maximum size of Navi10/Navi14/Navi12,
859 * block size 512 (9bit)
860 */
861 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
862 break;
863 default:
864 break;
865 }
866
867 /* This interrupt is VMC page fault.*/
868 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC,
869 VMC_1_0__SRCID__VM_FAULT,
870 &adev->gmc.vm_fault);
871
872 if (r)
873 return r;
874
875 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2,
876 UTCL2_1_0__SRCID__FAULT,
877 &adev->gmc.vm_fault);
878 if (r)
879 return r;
880
881 /*
882 * Set the internal MC address mask This is the max address of the GPU's
883 * internal address space.
884 */
885 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
886
887 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
888 if (r) {
889 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
890 return r;
891 }
892
893 if (adev->gmc.xgmi.supported) {
894 r = gfxhub_v2_1_get_xgmi_info(adev);
895 if (r)
896 return r;
897 }
898
899 r = gmc_v10_0_mc_init(adev);
900 if (r)
901 return r;
902
903 adev->gmc.stolen_size = gmc_v10_0_get_vbios_fb_size(adev);
904
905 /* Memory manager */
906 r = amdgpu_bo_init(adev);
907 if (r)
908 return r;
909
910 r = gmc_v10_0_gart_init(adev);
911 if (r)
912 return r;
913
914 /*
915 * number of VMs
916 * VMID 0 is reserved for System
917 * amdgpu graphics/compute will use VMIDs 1-7
918 * amdkfd will use VMIDs 8-15
919 */
920 adev->vm_manager.first_kfd_vmid = 8;
921
922 amdgpu_vm_manager_init(adev);
923
924 return 0;
925}
926
927/**
928 * gmc_v8_0_gart_fini - vm fini callback
929 *
930 * @adev: amdgpu_device pointer
931 *
932 * Tears down the driver GART/VM setup (CIK).
933 */
934static void gmc_v10_0_gart_fini(struct amdgpu_device *adev)
935{
936 amdgpu_gart_table_vram_free(adev);
937 amdgpu_gart_fini(adev);
938}
939
940static int gmc_v10_0_sw_fini(void *handle)
941{
942 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
943
944 amdgpu_vm_manager_fini(adev);
945 gmc_v10_0_gart_fini(adev);
946 amdgpu_gem_force_release(adev);
947 amdgpu_bo_fini(adev);
948
949 return 0;
950}
951
952static void gmc_v10_0_init_golden_registers(struct amdgpu_device *adev)
953{
954 switch (adev->asic_type) {
955 case CHIP_NAVI10:
956 case CHIP_NAVI14:
957 case CHIP_NAVI12:
958 case CHIP_SIENNA_CICHLID:
959 case CHIP_NAVY_FLOUNDER:
960 break;
961 default:
962 break;
963 }
964}
965
966/**
967 * gmc_v10_0_gart_enable - gart enable
968 *
969 * @adev: amdgpu_device pointer
970 */
971static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
972{
973 int r;
974 bool value;
975 u32 tmp;
976
977 if (adev->gart.bo == NULL) {
978 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
979 return -EINVAL;
980 }
981
982 r = amdgpu_gart_table_vram_pin(adev);
983 if (r)
984 return r;
985
986 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
987 adev->asic_type == CHIP_NAVY_FLOUNDER)
988 r = gfxhub_v2_1_gart_enable(adev);
989 else
990 r = gfxhub_v2_0_gart_enable(adev);
991 if (r)
992 return r;
993
994 r = mmhub_v2_0_gart_enable(adev);
995 if (r)
996 return r;
997
998 tmp = RREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL);
999 tmp |= HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE_MASK;
1000 WREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL, tmp);
1001
1002 tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
1003 WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
1004
1005 /* Flush HDP after it is initialized */
1006 adev->nbio.funcs->hdp_flush(adev, NULL);
1007
1008 value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
1009 false : true;
1010
1011 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
1012 adev->asic_type == CHIP_NAVY_FLOUNDER)
1013 gfxhub_v2_1_set_fault_enable_default(adev, value);
1014 else
1015 gfxhub_v2_0_set_fault_enable_default(adev, value);
1016 mmhub_v2_0_set_fault_enable_default(adev, value);
1017 gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0);
1018 gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0);
1019
1020 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1021 (unsigned)(adev->gmc.gart_size >> 20),
1022 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
1023
1024 adev->gart.ready = true;
1025
1026 return 0;
1027}
1028
1029static int gmc_v10_0_hw_init(void *handle)
1030{
1031 int r;
1032 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1033
1034 /* The sequence of these two function calls matters.*/
1035 gmc_v10_0_init_golden_registers(adev);
1036
1037 r = gmc_v10_0_gart_enable(adev);
1038 if (r)
1039 return r;
1040
1041 return 0;
1042}
1043
1044/**
1045 * gmc_v10_0_gart_disable - gart disable
1046 *
1047 * @adev: amdgpu_device pointer
1048 *
1049 * This disables all VM page table.
1050 */
1051static void gmc_v10_0_gart_disable(struct amdgpu_device *adev)
1052{
1053 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
1054 adev->asic_type == CHIP_NAVY_FLOUNDER)
1055 gfxhub_v2_1_gart_disable(adev);
1056 else
1057 gfxhub_v2_0_gart_disable(adev);
1058 mmhub_v2_0_gart_disable(adev);
1059 amdgpu_gart_table_vram_unpin(adev);
1060}
1061
1062static int gmc_v10_0_hw_fini(void *handle)
1063{
1064 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1065
1066 if (amdgpu_sriov_vf(adev)) {
1067 /* full access mode, so don't touch any GMC register */
1068 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
1069 return 0;
1070 }
1071
1072 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1073 gmc_v10_0_gart_disable(adev);
1074
1075 return 0;
1076}
1077
1078static int gmc_v10_0_suspend(void *handle)
1079{
1080 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1081
1082 gmc_v10_0_hw_fini(adev);
1083
1084 return 0;
1085}
1086
1087static int gmc_v10_0_resume(void *handle)
1088{
1089 int r;
1090 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1091
1092 r = gmc_v10_0_hw_init(adev);
1093 if (r)
1094 return r;
1095
1096 amdgpu_vmid_reset_all(adev);
1097
1098 return 0;
1099}
1100
1101static bool gmc_v10_0_is_idle(void *handle)
1102{
1103 /* MC is always ready in GMC v10.*/
1104 return true;
1105}
1106
1107static int gmc_v10_0_wait_for_idle(void *handle)
1108{
1109 /* There is no need to wait for MC idle in GMC v10.*/
1110 return 0;
1111}
1112
1113static int gmc_v10_0_soft_reset(void *handle)
1114{
1115 return 0;
1116}
1117
1118static int gmc_v10_0_set_clockgating_state(void *handle,
1119 enum amd_clockgating_state state)
1120{
1121 int r;
1122 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1123
1124 r = mmhub_v2_0_set_clockgating(adev, state);
1125 if (r)
1126 return r;
1127
1128 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
1129 adev->asic_type == CHIP_NAVY_FLOUNDER)
1130 return athub_v2_1_set_clockgating(adev, state);
1131 else
1132 return athub_v2_0_set_clockgating(adev, state);
1133}
1134
1135static void gmc_v10_0_get_clockgating_state(void *handle, u32 *flags)
1136{
1137 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1138
1139 mmhub_v2_0_get_clockgating(adev, flags);
1140
1141 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
1142 adev->asic_type == CHIP_NAVY_FLOUNDER)
1143 athub_v2_1_get_clockgating(adev, flags);
1144 else
1145 athub_v2_0_get_clockgating(adev, flags);
1146}
1147
1148static int gmc_v10_0_set_powergating_state(void *handle,
1149 enum amd_powergating_state state)
1150{
1151 return 0;
1152}
1153
1154const struct amd_ip_funcs gmc_v10_0_ip_funcs = {
1155 .name = "gmc_v10_0",
1156 .early_init = gmc_v10_0_early_init,
1157 .late_init = gmc_v10_0_late_init,
1158 .sw_init = gmc_v10_0_sw_init,
1159 .sw_fini = gmc_v10_0_sw_fini,
1160 .hw_init = gmc_v10_0_hw_init,
1161 .hw_fini = gmc_v10_0_hw_fini,
1162 .suspend = gmc_v10_0_suspend,
1163 .resume = gmc_v10_0_resume,
1164 .is_idle = gmc_v10_0_is_idle,
1165 .wait_for_idle = gmc_v10_0_wait_for_idle,
1166 .soft_reset = gmc_v10_0_soft_reset,
1167 .set_clockgating_state = gmc_v10_0_set_clockgating_state,
1168 .set_powergating_state = gmc_v10_0_set_powergating_state,
1169 .get_clockgating_state = gmc_v10_0_get_clockgating_state,
1170};
1171
1172const struct amdgpu_ip_block_version gmc_v10_0_ip_block =
1173{
1174 .type = AMD_IP_BLOCK_TYPE_GMC,
1175 .major = 10,
1176 .minor = 0,
1177 .rev = 0,
1178 .funcs = &gmc_v10_0_ip_funcs,
1179};