Loading...
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/firmware.h>
24#include <linux/pci.h>
25
26#include <drm/drm_cache.h>
27
28#include "amdgpu.h"
29#include "amdgpu_atomfirmware.h"
30#include "gmc_v10_0.h"
31#include "umc_v8_7.h"
32
33#include "athub/athub_2_0_0_sh_mask.h"
34#include "athub/athub_2_0_0_offset.h"
35#include "dcn/dcn_2_0_0_offset.h"
36#include "dcn/dcn_2_0_0_sh_mask.h"
37#include "oss/osssys_5_0_0_offset.h"
38#include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
39#include "navi10_enum.h"
40
41#include "soc15.h"
42#include "soc15d.h"
43#include "soc15_common.h"
44
45#include "nbio_v2_3.h"
46
47#include "gfxhub_v2_0.h"
48#include "gfxhub_v2_1.h"
49#include "mmhub_v2_0.h"
50#include "mmhub_v2_3.h"
51#include "athub_v2_0.h"
52#include "athub_v2_1.h"
53
54#include "amdgpu_reset.h"
55
56#if 0
57static const struct soc15_reg_golden golden_settings_navi10_hdp[] =
58{
59 /* TODO add golden setting for hdp */
60};
61#endif
62
63static int gmc_v10_0_ecc_interrupt_state(struct amdgpu_device *adev,
64 struct amdgpu_irq_src *src,
65 unsigned type,
66 enum amdgpu_interrupt_state state)
67{
68 return 0;
69}
70
71static int
72gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
73 struct amdgpu_irq_src *src, unsigned type,
74 enum amdgpu_interrupt_state state)
75{
76 switch (state) {
77 case AMDGPU_IRQ_STATE_DISABLE:
78 /* MM HUB */
79 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, false);
80 /* GFX HUB */
81 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false);
82 break;
83 case AMDGPU_IRQ_STATE_ENABLE:
84 /* MM HUB */
85 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, true);
86 /* GFX HUB */
87 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true);
88 break;
89 default:
90 break;
91 }
92
93 return 0;
94}
95
96static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
97 struct amdgpu_irq_src *source,
98 struct amdgpu_iv_entry *entry)
99{
100 bool retry_fault = !!(entry->src_data[1] & 0x80);
101 bool write_fault = !!(entry->src_data[1] & 0x20);
102 struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
103 struct amdgpu_task_info task_info;
104 uint32_t status = 0;
105 u64 addr;
106
107 addr = (u64)entry->src_data[0] << 12;
108 addr |= ((u64)entry->src_data[1] & 0xf) << 44;
109
110 if (retry_fault) {
111 /* Returning 1 here also prevents sending the IV to the KFD */
112
113 /* Process it onyl if it's the first fault for this address */
114 if (entry->ih != &adev->irq.ih_soft &&
115 amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid,
116 entry->timestamp))
117 return 1;
118
119 /* Delegate it to a different ring if the hardware hasn't
120 * already done it.
121 */
122 if (entry->ih == &adev->irq.ih) {
123 amdgpu_irq_delegate(adev, entry, 8);
124 return 1;
125 }
126
127 /* Try to handle the recoverable page faults by filling page
128 * tables
129 */
130 if (amdgpu_vm_handle_fault(adev, entry->pasid, addr, write_fault))
131 return 1;
132 }
133
134 if (!amdgpu_sriov_vf(adev)) {
135 /*
136 * Issue a dummy read to wait for the status register to
137 * be updated to avoid reading an incorrect value due to
138 * the new fast GRBM interface.
139 */
140 if ((entry->vmid_src == AMDGPU_GFXHUB_0) &&
141 (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 3, 0)))
142 RREG32(hub->vm_l2_pro_fault_status);
143
144 status = RREG32(hub->vm_l2_pro_fault_status);
145 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
146 }
147
148 if (!printk_ratelimit())
149 return 0;
150
151 memset(&task_info, 0, sizeof(struct amdgpu_task_info));
152 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
153
154 dev_err(adev->dev,
155 "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, "
156 "for process %s pid %d thread %s pid %d)\n",
157 entry->vmid_src ? "mmhub" : "gfxhub",
158 entry->src_id, entry->ring_id, entry->vmid,
159 entry->pasid, task_info.process_name, task_info.tgid,
160 task_info.task_name, task_info.pid);
161 dev_err(adev->dev, " in page starting at address 0x%016llx from client 0x%x (%s)\n",
162 addr, entry->client_id,
163 soc15_ih_clientid_name[entry->client_id]);
164
165 if (!amdgpu_sriov_vf(adev))
166 hub->vmhub_funcs->print_l2_protection_fault_status(adev,
167 status);
168
169 return 0;
170}
171
172static const struct amdgpu_irq_src_funcs gmc_v10_0_irq_funcs = {
173 .set = gmc_v10_0_vm_fault_interrupt_state,
174 .process = gmc_v10_0_process_interrupt,
175};
176
177static const struct amdgpu_irq_src_funcs gmc_v10_0_ecc_funcs = {
178 .set = gmc_v10_0_ecc_interrupt_state,
179 .process = amdgpu_umc_process_ecc_irq,
180};
181
182static void gmc_v10_0_set_irq_funcs(struct amdgpu_device *adev)
183{
184 adev->gmc.vm_fault.num_types = 1;
185 adev->gmc.vm_fault.funcs = &gmc_v10_0_irq_funcs;
186
187 if (!amdgpu_sriov_vf(adev)) {
188 adev->gmc.ecc_irq.num_types = 1;
189 adev->gmc.ecc_irq.funcs = &gmc_v10_0_ecc_funcs;
190 }
191}
192
193/**
194 * gmc_v10_0_use_invalidate_semaphore - judge whether to use semaphore
195 *
196 * @adev: amdgpu_device pointer
197 * @vmhub: vmhub type
198 *
199 */
200static bool gmc_v10_0_use_invalidate_semaphore(struct amdgpu_device *adev,
201 uint32_t vmhub)
202{
203 return ((vmhub == AMDGPU_MMHUB_0 ||
204 vmhub == AMDGPU_MMHUB_1) &&
205 (!amdgpu_sriov_vf(adev)));
206}
207
208static bool gmc_v10_0_get_atc_vmid_pasid_mapping_info(
209 struct amdgpu_device *adev,
210 uint8_t vmid, uint16_t *p_pasid)
211{
212 uint32_t value;
213
214 value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
215 + vmid);
216 *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
217
218 return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
219}
220
221/*
222 * GART
223 * VMID 0 is the physical GPU addresses as used by the kernel.
224 * VMIDs 1-15 are used for userspace clients and are handled
225 * by the amdgpu vm/hsa code.
226 */
227
228static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
229 unsigned int vmhub, uint32_t flush_type)
230{
231 bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(adev, vmhub);
232 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
233 u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
234 u32 tmp;
235 /* Use register 17 for GART */
236 const unsigned eng = 17;
237 unsigned int i;
238 unsigned char hub_ip = 0;
239
240 hub_ip = (vmhub == AMDGPU_GFXHUB_0) ?
241 GC_HWIP : MMHUB_HWIP;
242
243 spin_lock(&adev->gmc.invalidate_lock);
244 /*
245 * It may lose gpuvm invalidate acknowldege state across power-gating
246 * off cycle, add semaphore acquire before invalidation and semaphore
247 * release after invalidation to avoid entering power gated state
248 * to WA the Issue
249 */
250
251 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
252 if (use_semaphore) {
253 for (i = 0; i < adev->usec_timeout; i++) {
254 /* a read return value of 1 means semaphore acuqire */
255 tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
256 hub->eng_distance * eng, hub_ip);
257
258 if (tmp & 0x1)
259 break;
260 udelay(1);
261 }
262
263 if (i >= adev->usec_timeout)
264 DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
265 }
266
267 WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req +
268 hub->eng_distance * eng,
269 inv_req, hub_ip);
270
271 /*
272 * Issue a dummy read to wait for the ACK register to be cleared
273 * to avoid a false ACK due to the new fast GRBM interface.
274 */
275 if ((vmhub == AMDGPU_GFXHUB_0) &&
276 (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 3, 0)))
277 RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req +
278 hub->eng_distance * eng, hub_ip);
279
280 /* Wait for ACK with a delay.*/
281 for (i = 0; i < adev->usec_timeout; i++) {
282 tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_ack +
283 hub->eng_distance * eng, hub_ip);
284
285 tmp &= 1 << vmid;
286 if (tmp)
287 break;
288
289 udelay(1);
290 }
291
292 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
293 if (use_semaphore)
294 /*
295 * add semaphore release after invalidation,
296 * write with 0 means semaphore release
297 */
298 WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
299 hub->eng_distance * eng, 0, hub_ip);
300
301 spin_unlock(&adev->gmc.invalidate_lock);
302
303 if (i < adev->usec_timeout)
304 return;
305
306 DRM_ERROR("Timeout waiting for VM flush hub: %d!\n", vmhub);
307}
308
309/**
310 * gmc_v10_0_flush_gpu_tlb - gart tlb flush callback
311 *
312 * @adev: amdgpu_device pointer
313 * @vmid: vm instance to flush
314 * @vmhub: vmhub type
315 * @flush_type: the flush type
316 *
317 * Flush the TLB for the requested page table.
318 */
319static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
320 uint32_t vmhub, uint32_t flush_type)
321{
322 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
323 struct dma_fence *fence;
324 struct amdgpu_job *job;
325
326 int r;
327
328 /* flush hdp cache */
329 adev->hdp.funcs->flush_hdp(adev, NULL);
330
331 /* For SRIOV run time, driver shouldn't access the register through MMIO
332 * Directly use kiq to do the vm invalidation instead
333 */
334 if (adev->gfx.kiq.ring.sched.ready && !adev->enable_mes &&
335 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
336 down_read_trylock(&adev->reset_domain->sem)) {
337 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
338 const unsigned eng = 17;
339 u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
340 u32 req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
341 u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
342
343 amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
344 1 << vmid);
345
346 up_read(&adev->reset_domain->sem);
347 return;
348 }
349
350 mutex_lock(&adev->mman.gtt_window_lock);
351
352 if (vmhub == AMDGPU_MMHUB_0) {
353 gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_MMHUB_0, 0);
354 mutex_unlock(&adev->mman.gtt_window_lock);
355 return;
356 }
357
358 BUG_ON(vmhub != AMDGPU_GFXHUB_0);
359
360 if (!adev->mman.buffer_funcs_enabled ||
361 !adev->ib_pool_ready ||
362 amdgpu_in_reset(adev) ||
363 ring->sched.ready == false) {
364 gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_GFXHUB_0, 0);
365 mutex_unlock(&adev->mman.gtt_window_lock);
366 return;
367 }
368
369 /* The SDMA on Navi has a bug which can theoretically result in memory
370 * corruption if an invalidation happens at the same time as an VA
371 * translation. Avoid this by doing the invalidation from the SDMA
372 * itself.
373 */
374 r = amdgpu_job_alloc_with_ib(ring->adev, &adev->mman.entity,
375 AMDGPU_FENCE_OWNER_UNDEFINED,
376 16 * 4, AMDGPU_IB_POOL_IMMEDIATE,
377 &job);
378 if (r)
379 goto error_alloc;
380
381 job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo);
382 job->vm_needs_flush = true;
383 job->ibs->ptr[job->ibs->length_dw++] = ring->funcs->nop;
384 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
385 fence = amdgpu_job_submit(job);
386
387 mutex_unlock(&adev->mman.gtt_window_lock);
388
389 dma_fence_wait(fence, false);
390 dma_fence_put(fence);
391
392 return;
393
394error_alloc:
395 mutex_unlock(&adev->mman.gtt_window_lock);
396 DRM_ERROR("Error flushing GPU TLB using the SDMA (%d)!\n", r);
397}
398
399/**
400 * gmc_v10_0_flush_gpu_tlb_pasid - tlb flush via pasid
401 *
402 * @adev: amdgpu_device pointer
403 * @pasid: pasid to be flush
404 * @flush_type: the flush type
405 * @all_hub: Used with PACKET3_INVALIDATE_TLBS_ALL_HUB()
406 *
407 * Flush the TLB for the requested pasid.
408 */
409static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
410 uint16_t pasid, uint32_t flush_type,
411 bool all_hub)
412{
413 int vmid, i;
414 signed long r;
415 uint32_t seq;
416 uint16_t queried_pasid;
417 bool ret;
418 u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout;
419 struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
420 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
421
422 if (amdgpu_emu_mode == 0 && ring->sched.ready) {
423 spin_lock(&adev->gfx.kiq.ring_lock);
424 /* 2 dwords flush + 8 dwords fence */
425 amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
426 kiq->pmf->kiq_invalidate_tlbs(ring,
427 pasid, flush_type, all_hub);
428 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
429 if (r) {
430 amdgpu_ring_undo(ring);
431 spin_unlock(&adev->gfx.kiq.ring_lock);
432 return -ETIME;
433 }
434
435 amdgpu_ring_commit(ring);
436 spin_unlock(&adev->gfx.kiq.ring_lock);
437 r = amdgpu_fence_wait_polling(ring, seq, usec_timeout);
438 if (r < 1) {
439 dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
440 return -ETIME;
441 }
442
443 return 0;
444 }
445
446 for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) {
447
448 ret = gmc_v10_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
449 &queried_pasid);
450 if (ret && queried_pasid == pasid) {
451 if (all_hub) {
452 for (i = 0; i < adev->num_vmhubs; i++)
453 gmc_v10_0_flush_gpu_tlb(adev, vmid,
454 i, flush_type);
455 } else {
456 gmc_v10_0_flush_gpu_tlb(adev, vmid,
457 AMDGPU_GFXHUB_0, flush_type);
458 }
459 if (!adev->enable_mes)
460 break;
461 }
462 }
463
464 return 0;
465}
466
467static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
468 unsigned vmid, uint64_t pd_addr)
469{
470 bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
471 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
472 uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0);
473 unsigned eng = ring->vm_inv_eng;
474
475 /*
476 * It may lose gpuvm invalidate acknowldege state across power-gating
477 * off cycle, add semaphore acquire before invalidation and semaphore
478 * release after invalidation to avoid entering power gated state
479 * to WA the Issue
480 */
481
482 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
483 if (use_semaphore)
484 /* a read return value of 1 means semaphore acuqire */
485 amdgpu_ring_emit_reg_wait(ring,
486 hub->vm_inv_eng0_sem +
487 hub->eng_distance * eng, 0x1, 0x1);
488
489 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
490 (hub->ctx_addr_distance * vmid),
491 lower_32_bits(pd_addr));
492
493 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
494 (hub->ctx_addr_distance * vmid),
495 upper_32_bits(pd_addr));
496
497 amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
498 hub->eng_distance * eng,
499 hub->vm_inv_eng0_ack +
500 hub->eng_distance * eng,
501 req, 1 << vmid);
502
503 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
504 if (use_semaphore)
505 /*
506 * add semaphore release after invalidation,
507 * write with 0 means semaphore release
508 */
509 amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
510 hub->eng_distance * eng, 0);
511
512 return pd_addr;
513}
514
515static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
516 unsigned pasid)
517{
518 struct amdgpu_device *adev = ring->adev;
519 uint32_t reg;
520
521 /* MES fw manages IH_VMID_x_LUT updating */
522 if (ring->is_mes_queue)
523 return;
524
525 if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
526 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
527 else
528 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
529
530 amdgpu_ring_emit_wreg(ring, reg, pasid);
531}
532
533/*
534 * PTE format on NAVI 10:
535 * 63:59 reserved
536 * 58 reserved and for sienna_cichlid is used for MALL noalloc
537 * 57 reserved
538 * 56 F
539 * 55 L
540 * 54 reserved
541 * 53:52 SW
542 * 51 T
543 * 50:48 mtype
544 * 47:12 4k physical page base address
545 * 11:7 fragment
546 * 6 write
547 * 5 read
548 * 4 exe
549 * 3 Z
550 * 2 snooped
551 * 1 system
552 * 0 valid
553 *
554 * PDE format on NAVI 10:
555 * 63:59 block fragment size
556 * 58:55 reserved
557 * 54 P
558 * 53:48 reserved
559 * 47:6 physical base address of PD or PTE
560 * 5:3 reserved
561 * 2 C
562 * 1 system
563 * 0 valid
564 */
565
566static uint64_t gmc_v10_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
567{
568 switch (flags) {
569 case AMDGPU_VM_MTYPE_DEFAULT:
570 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
571 case AMDGPU_VM_MTYPE_NC:
572 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
573 case AMDGPU_VM_MTYPE_WC:
574 return AMDGPU_PTE_MTYPE_NV10(MTYPE_WC);
575 case AMDGPU_VM_MTYPE_CC:
576 return AMDGPU_PTE_MTYPE_NV10(MTYPE_CC);
577 case AMDGPU_VM_MTYPE_UC:
578 return AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
579 default:
580 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
581 }
582}
583
584static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
585 uint64_t *addr, uint64_t *flags)
586{
587 if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
588 *addr = amdgpu_gmc_vram_mc2pa(adev, *addr);
589 BUG_ON(*addr & 0xFFFF00000000003FULL);
590
591 if (!adev->gmc.translate_further)
592 return;
593
594 if (level == AMDGPU_VM_PDB1) {
595 /* Set the block fragment size */
596 if (!(*flags & AMDGPU_PDE_PTE))
597 *flags |= AMDGPU_PDE_BFS(0x9);
598
599 } else if (level == AMDGPU_VM_PDB0) {
600 if (*flags & AMDGPU_PDE_PTE)
601 *flags &= ~AMDGPU_PDE_PTE;
602 else
603 *flags |= AMDGPU_PTE_TF;
604 }
605}
606
607static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev,
608 struct amdgpu_bo_va_mapping *mapping,
609 uint64_t *flags)
610{
611 struct amdgpu_bo *bo = mapping->bo_va->base.bo;
612
613 *flags &= ~AMDGPU_PTE_EXECUTABLE;
614 *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
615
616 *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
617 *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
618
619 *flags &= ~AMDGPU_PTE_NOALLOC;
620 *flags |= (mapping->flags & AMDGPU_PTE_NOALLOC);
621
622 if (mapping->flags & AMDGPU_PTE_PRT) {
623 *flags |= AMDGPU_PTE_PRT;
624 *flags |= AMDGPU_PTE_SNOOPED;
625 *flags |= AMDGPU_PTE_LOG;
626 *flags |= AMDGPU_PTE_SYSTEM;
627 *flags &= ~AMDGPU_PTE_VALID;
628 }
629
630 if (bo && bo->flags & (AMDGPU_GEM_CREATE_COHERENT |
631 AMDGPU_GEM_CREATE_UNCACHED))
632 *flags = (*flags & ~AMDGPU_PTE_MTYPE_NV10_MASK) |
633 AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
634}
635
636static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev)
637{
638 u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
639 unsigned size;
640
641 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
642 size = AMDGPU_VBIOS_VGA_ALLOCATION;
643 } else {
644 u32 viewport;
645 u32 pitch;
646
647 viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
648 pitch = RREG32_SOC15(DCE, 0, mmHUBPREQ0_DCSURF_SURFACE_PITCH);
649 size = (REG_GET_FIELD(viewport,
650 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
651 REG_GET_FIELD(pitch, HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH) *
652 4);
653 }
654
655 return size;
656}
657
658static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = {
659 .flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb,
660 .flush_gpu_tlb_pasid = gmc_v10_0_flush_gpu_tlb_pasid,
661 .emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb,
662 .emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping,
663 .map_mtype = gmc_v10_0_map_mtype,
664 .get_vm_pde = gmc_v10_0_get_vm_pde,
665 .get_vm_pte = gmc_v10_0_get_vm_pte,
666 .get_vbios_fb_size = gmc_v10_0_get_vbios_fb_size,
667};
668
669static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev)
670{
671 if (adev->gmc.gmc_funcs == NULL)
672 adev->gmc.gmc_funcs = &gmc_v10_0_gmc_funcs;
673}
674
675static void gmc_v10_0_set_umc_funcs(struct amdgpu_device *adev)
676{
677 switch (adev->ip_versions[UMC_HWIP][0]) {
678 case IP_VERSION(8, 7, 0):
679 adev->umc.max_ras_err_cnt_per_query = UMC_V8_7_TOTAL_CHANNEL_NUM;
680 adev->umc.channel_inst_num = UMC_V8_7_CHANNEL_INSTANCE_NUM;
681 adev->umc.umc_inst_num = UMC_V8_7_UMC_INSTANCE_NUM;
682 adev->umc.channel_offs = UMC_V8_7_PER_CHANNEL_OFFSET_SIENNA;
683 adev->umc.channel_idx_tbl = &umc_v8_7_channel_idx_tbl[0][0];
684 adev->umc.ras = &umc_v8_7_ras;
685 break;
686 default:
687 break;
688 }
689 if (adev->umc.ras) {
690 amdgpu_ras_register_ras_block(adev, &adev->umc.ras->ras_block);
691
692 strcpy(adev->umc.ras->ras_block.ras_comm.name, "umc");
693 adev->umc.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__UMC;
694 adev->umc.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
695 adev->umc.ras_if = &adev->umc.ras->ras_block.ras_comm;
696
697 /* If don't define special ras_late_init function, use default ras_late_init */
698 if (!adev->umc.ras->ras_block.ras_late_init)
699 adev->umc.ras->ras_block.ras_late_init = amdgpu_umc_ras_late_init;
700
701 /* If not defined special ras_cb function, use default ras_cb */
702 if (!adev->umc.ras->ras_block.ras_cb)
703 adev->umc.ras->ras_block.ras_cb = amdgpu_umc_process_ras_data_cb;
704 }
705}
706
707
708static void gmc_v10_0_set_mmhub_funcs(struct amdgpu_device *adev)
709{
710 switch (adev->ip_versions[MMHUB_HWIP][0]) {
711 case IP_VERSION(2, 3, 0):
712 case IP_VERSION(2, 4, 0):
713 case IP_VERSION(2, 4, 1):
714 adev->mmhub.funcs = &mmhub_v2_3_funcs;
715 break;
716 default:
717 adev->mmhub.funcs = &mmhub_v2_0_funcs;
718 break;
719 }
720}
721
722static void gmc_v10_0_set_gfxhub_funcs(struct amdgpu_device *adev)
723{
724 switch (adev->ip_versions[GC_HWIP][0]) {
725 case IP_VERSION(10, 3, 0):
726 case IP_VERSION(10, 3, 2):
727 case IP_VERSION(10, 3, 1):
728 case IP_VERSION(10, 3, 4):
729 case IP_VERSION(10, 3, 5):
730 case IP_VERSION(10, 3, 6):
731 case IP_VERSION(10, 3, 3):
732 case IP_VERSION(10, 3, 7):
733 adev->gfxhub.funcs = &gfxhub_v2_1_funcs;
734 break;
735 default:
736 adev->gfxhub.funcs = &gfxhub_v2_0_funcs;
737 break;
738 }
739}
740
741
742static int gmc_v10_0_early_init(void *handle)
743{
744 int r;
745 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
746
747 gmc_v10_0_set_mmhub_funcs(adev);
748 gmc_v10_0_set_gfxhub_funcs(adev);
749 gmc_v10_0_set_gmc_funcs(adev);
750 gmc_v10_0_set_irq_funcs(adev);
751 gmc_v10_0_set_umc_funcs(adev);
752
753 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
754 adev->gmc.shared_aperture_end =
755 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
756 adev->gmc.private_aperture_start = 0x1000000000000000ULL;
757 adev->gmc.private_aperture_end =
758 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
759
760 r = amdgpu_gmc_ras_early_init(adev);
761 if (r)
762 return r;
763
764 return 0;
765}
766
767static int gmc_v10_0_late_init(void *handle)
768{
769 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
770 int r;
771
772 r = amdgpu_gmc_allocate_vm_inv_eng(adev);
773 if (r)
774 return r;
775
776 r = amdgpu_gmc_ras_late_init(adev);
777 if (r)
778 return r;
779
780 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
781}
782
783static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
784 struct amdgpu_gmc *mc)
785{
786 u64 base = 0;
787
788 base = adev->gfxhub.funcs->get_fb_location(adev);
789
790 /* add the xgmi offset of the physical node */
791 base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
792
793 amdgpu_gmc_vram_location(adev, &adev->gmc, base);
794 amdgpu_gmc_gart_location(adev, mc);
795 amdgpu_gmc_agp_location(adev, mc);
796
797 /* base offset of vram pages */
798 adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
799
800 /* add the xgmi offset of the physical node */
801 adev->vm_manager.vram_base_offset +=
802 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
803}
804
805/**
806 * gmc_v10_0_mc_init - initialize the memory controller driver params
807 *
808 * @adev: amdgpu_device pointer
809 *
810 * Look up the amount of vram, vram width, and decide how to place
811 * vram and gart within the GPU's physical address space.
812 * Returns 0 for success.
813 */
814static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
815{
816 int r;
817
818 /* size in MB on si */
819 adev->gmc.mc_vram_size =
820 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
821 adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
822
823 if (!(adev->flags & AMD_IS_APU)) {
824 r = amdgpu_device_resize_fb_bar(adev);
825 if (r)
826 return r;
827 }
828 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
829 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
830
831#ifdef CONFIG_X86_64
832 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) {
833 adev->gmc.aper_base = adev->gfxhub.funcs->get_mc_fb_offset(adev);
834 adev->gmc.aper_size = adev->gmc.real_vram_size;
835 }
836#endif
837
838 /* In case the PCI BAR is larger than the actual amount of vram */
839 adev->gmc.visible_vram_size = adev->gmc.aper_size;
840 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
841 adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
842
843 /* set the gart size */
844 if (amdgpu_gart_size == -1) {
845 switch (adev->ip_versions[GC_HWIP][0]) {
846 default:
847 adev->gmc.gart_size = 512ULL << 20;
848 break;
849 case IP_VERSION(10, 3, 1): /* DCE SG support */
850 case IP_VERSION(10, 3, 3): /* DCE SG support */
851 case IP_VERSION(10, 3, 6): /* DCE SG support */
852 case IP_VERSION(10, 3, 7): /* DCE SG support */
853 adev->gmc.gart_size = 1024ULL << 20;
854 break;
855 }
856 } else {
857 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
858 }
859
860 gmc_v10_0_vram_gtt_location(adev, &adev->gmc);
861
862 return 0;
863}
864
865static int gmc_v10_0_gart_init(struct amdgpu_device *adev)
866{
867 int r;
868
869 if (adev->gart.bo) {
870 WARN(1, "NAVI10 PCIE GART already initialized\n");
871 return 0;
872 }
873
874 /* Initialize common gart structure */
875 r = amdgpu_gart_init(adev);
876 if (r)
877 return r;
878
879 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
880 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_NV10(MTYPE_UC) |
881 AMDGPU_PTE_EXECUTABLE;
882
883 return amdgpu_gart_table_vram_alloc(adev);
884}
885
886static int gmc_v10_0_sw_init(void *handle)
887{
888 int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
889 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
890
891 adev->gfxhub.funcs->init(adev);
892
893 adev->mmhub.funcs->init(adev);
894
895 spin_lock_init(&adev->gmc.invalidate_lock);
896
897 if ((adev->flags & AMD_IS_APU) && amdgpu_emu_mode == 1) {
898 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_DDR4;
899 adev->gmc.vram_width = 64;
900 } else if (amdgpu_emu_mode == 1) {
901 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_GDDR6;
902 adev->gmc.vram_width = 1 * 128; /* numchan * chansize */
903 } else {
904 r = amdgpu_atomfirmware_get_vram_info(adev,
905 &vram_width, &vram_type, &vram_vendor);
906 adev->gmc.vram_width = vram_width;
907
908 adev->gmc.vram_type = vram_type;
909 adev->gmc.vram_vendor = vram_vendor;
910 }
911
912 switch (adev->ip_versions[GC_HWIP][0]) {
913 case IP_VERSION(10, 3, 0):
914 adev->gmc.mall_size = 128 * 1024 * 1024;
915 break;
916 case IP_VERSION(10, 3, 2):
917 adev->gmc.mall_size = 96 * 1024 * 1024;
918 break;
919 case IP_VERSION(10, 3, 4):
920 adev->gmc.mall_size = 32 * 1024 * 1024;
921 break;
922 case IP_VERSION(10, 3, 5):
923 adev->gmc.mall_size = 16 * 1024 * 1024;
924 break;
925 default:
926 adev->gmc.mall_size = 0;
927 break;
928 }
929
930 switch (adev->ip_versions[GC_HWIP][0]) {
931 case IP_VERSION(10, 1, 10):
932 case IP_VERSION(10, 1, 1):
933 case IP_VERSION(10, 1, 2):
934 case IP_VERSION(10, 1, 3):
935 case IP_VERSION(10, 1, 4):
936 case IP_VERSION(10, 3, 0):
937 case IP_VERSION(10, 3, 2):
938 case IP_VERSION(10, 3, 1):
939 case IP_VERSION(10, 3, 4):
940 case IP_VERSION(10, 3, 5):
941 case IP_VERSION(10, 3, 6):
942 case IP_VERSION(10, 3, 3):
943 case IP_VERSION(10, 3, 7):
944 adev->num_vmhubs = 2;
945 /*
946 * To fulfill 4-level page support,
947 * vm size is 256TB (48bit), maximum size of Navi10/Navi14/Navi12,
948 * block size 512 (9bit)
949 */
950 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
951 break;
952 default:
953 break;
954 }
955
956 /* This interrupt is VMC page fault.*/
957 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC,
958 VMC_1_0__SRCID__VM_FAULT,
959 &adev->gmc.vm_fault);
960
961 if (r)
962 return r;
963
964 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2,
965 UTCL2_1_0__SRCID__FAULT,
966 &adev->gmc.vm_fault);
967 if (r)
968 return r;
969
970 if (!amdgpu_sriov_vf(adev)) {
971 /* interrupt sent to DF. */
972 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
973 &adev->gmc.ecc_irq);
974 if (r)
975 return r;
976 }
977
978 /*
979 * Set the internal MC address mask This is the max address of the GPU's
980 * internal address space.
981 */
982 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
983
984 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
985 if (r) {
986 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
987 return r;
988 }
989
990 adev->need_swiotlb = drm_need_swiotlb(44);
991
992 r = gmc_v10_0_mc_init(adev);
993 if (r)
994 return r;
995
996 amdgpu_gmc_get_vbios_allocations(adev);
997
998 /* Memory manager */
999 r = amdgpu_bo_init(adev);
1000 if (r)
1001 return r;
1002
1003 r = gmc_v10_0_gart_init(adev);
1004 if (r)
1005 return r;
1006
1007 /*
1008 * number of VMs
1009 * VMID 0 is reserved for System
1010 * amdgpu graphics/compute will use VMIDs 1-7
1011 * amdkfd will use VMIDs 8-15
1012 */
1013 adev->vm_manager.first_kfd_vmid = 8;
1014
1015 amdgpu_vm_manager_init(adev);
1016
1017 return 0;
1018}
1019
1020/**
1021 * gmc_v10_0_gart_fini - vm fini callback
1022 *
1023 * @adev: amdgpu_device pointer
1024 *
1025 * Tears down the driver GART/VM setup (CIK).
1026 */
1027static void gmc_v10_0_gart_fini(struct amdgpu_device *adev)
1028{
1029 amdgpu_gart_table_vram_free(adev);
1030}
1031
1032static int gmc_v10_0_sw_fini(void *handle)
1033{
1034 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1035
1036 amdgpu_vm_manager_fini(adev);
1037 gmc_v10_0_gart_fini(adev);
1038 amdgpu_gem_force_release(adev);
1039 amdgpu_bo_fini(adev);
1040
1041 return 0;
1042}
1043
1044static void gmc_v10_0_init_golden_registers(struct amdgpu_device *adev)
1045{
1046}
1047
1048/**
1049 * gmc_v10_0_gart_enable - gart enable
1050 *
1051 * @adev: amdgpu_device pointer
1052 */
1053static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
1054{
1055 int r;
1056 bool value;
1057
1058 if (adev->gart.bo == NULL) {
1059 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
1060 return -EINVAL;
1061 }
1062
1063 amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
1064 r = adev->gfxhub.funcs->gart_enable(adev);
1065 if (r)
1066 return r;
1067
1068 r = adev->mmhub.funcs->gart_enable(adev);
1069 if (r)
1070 return r;
1071
1072 adev->hdp.funcs->init_registers(adev);
1073
1074 /* Flush HDP after it is initialized */
1075 adev->hdp.funcs->flush_hdp(adev, NULL);
1076
1077 value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
1078 false : true;
1079
1080 adev->gfxhub.funcs->set_fault_enable_default(adev, value);
1081 adev->mmhub.funcs->set_fault_enable_default(adev, value);
1082 gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0);
1083 gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0);
1084
1085 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1086 (unsigned)(adev->gmc.gart_size >> 20),
1087 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
1088
1089 return 0;
1090}
1091
1092static int gmc_v10_0_hw_init(void *handle)
1093{
1094 int r;
1095 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1096
1097 /* The sequence of these two function calls matters.*/
1098 gmc_v10_0_init_golden_registers(adev);
1099
1100 /*
1101 * harvestable groups in gc_utcl2 need to be programmed before any GFX block
1102 * register setup within GMC, or else system hang when harvesting SA.
1103 */
1104 if (adev->gfxhub.funcs && adev->gfxhub.funcs->utcl2_harvest)
1105 adev->gfxhub.funcs->utcl2_harvest(adev);
1106
1107 r = gmc_v10_0_gart_enable(adev);
1108 if (r)
1109 return r;
1110
1111 if (amdgpu_emu_mode == 1) {
1112 r = amdgpu_gmc_vram_checking(adev);
1113 if (r)
1114 return r;
1115 }
1116
1117 if (adev->umc.funcs && adev->umc.funcs->init_registers)
1118 adev->umc.funcs->init_registers(adev);
1119
1120 return 0;
1121}
1122
1123/**
1124 * gmc_v10_0_gart_disable - gart disable
1125 *
1126 * @adev: amdgpu_device pointer
1127 *
1128 * This disables all VM page table.
1129 */
1130static void gmc_v10_0_gart_disable(struct amdgpu_device *adev)
1131{
1132 adev->gfxhub.funcs->gart_disable(adev);
1133 adev->mmhub.funcs->gart_disable(adev);
1134}
1135
1136static int gmc_v10_0_hw_fini(void *handle)
1137{
1138 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1139
1140 gmc_v10_0_gart_disable(adev);
1141
1142 if (amdgpu_sriov_vf(adev)) {
1143 /* full access mode, so don't touch any GMC register */
1144 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
1145 return 0;
1146 }
1147
1148 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
1149 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1150
1151 return 0;
1152}
1153
1154static int gmc_v10_0_suspend(void *handle)
1155{
1156 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1157
1158 gmc_v10_0_hw_fini(adev);
1159
1160 return 0;
1161}
1162
1163static int gmc_v10_0_resume(void *handle)
1164{
1165 int r;
1166 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1167
1168 r = gmc_v10_0_hw_init(adev);
1169 if (r)
1170 return r;
1171
1172 amdgpu_vmid_reset_all(adev);
1173
1174 return 0;
1175}
1176
1177static bool gmc_v10_0_is_idle(void *handle)
1178{
1179 /* MC is always ready in GMC v10.*/
1180 return true;
1181}
1182
1183static int gmc_v10_0_wait_for_idle(void *handle)
1184{
1185 /* There is no need to wait for MC idle in GMC v10.*/
1186 return 0;
1187}
1188
1189static int gmc_v10_0_soft_reset(void *handle)
1190{
1191 return 0;
1192}
1193
1194static int gmc_v10_0_set_clockgating_state(void *handle,
1195 enum amd_clockgating_state state)
1196{
1197 int r;
1198 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1199
1200 /*
1201 * The issue mmhub can't disconnect from DF with MMHUB clock gating being disabled
1202 * is a new problem observed at DF 3.0.3, however with the same suspend sequence not
1203 * seen any issue on the DF 3.0.2 series platform.
1204 */
1205 if (adev->in_s0ix && adev->ip_versions[DF_HWIP][0] > IP_VERSION(3, 0, 2)) {
1206 dev_dbg(adev->dev, "keep mmhub clock gating being enabled for s0ix\n");
1207 return 0;
1208 }
1209
1210 r = adev->mmhub.funcs->set_clockgating(adev, state);
1211 if (r)
1212 return r;
1213
1214 if (adev->ip_versions[ATHUB_HWIP][0] >= IP_VERSION(2, 1, 0))
1215 return athub_v2_1_set_clockgating(adev, state);
1216 else
1217 return athub_v2_0_set_clockgating(adev, state);
1218}
1219
1220static void gmc_v10_0_get_clockgating_state(void *handle, u64 *flags)
1221{
1222 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1223
1224 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 3) ||
1225 adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 4))
1226 return;
1227
1228 adev->mmhub.funcs->get_clockgating(adev, flags);
1229
1230 if (adev->ip_versions[ATHUB_HWIP][0] >= IP_VERSION(2, 1, 0))
1231 athub_v2_1_get_clockgating(adev, flags);
1232 else
1233 athub_v2_0_get_clockgating(adev, flags);
1234}
1235
1236static int gmc_v10_0_set_powergating_state(void *handle,
1237 enum amd_powergating_state state)
1238{
1239 return 0;
1240}
1241
1242const struct amd_ip_funcs gmc_v10_0_ip_funcs = {
1243 .name = "gmc_v10_0",
1244 .early_init = gmc_v10_0_early_init,
1245 .late_init = gmc_v10_0_late_init,
1246 .sw_init = gmc_v10_0_sw_init,
1247 .sw_fini = gmc_v10_0_sw_fini,
1248 .hw_init = gmc_v10_0_hw_init,
1249 .hw_fini = gmc_v10_0_hw_fini,
1250 .suspend = gmc_v10_0_suspend,
1251 .resume = gmc_v10_0_resume,
1252 .is_idle = gmc_v10_0_is_idle,
1253 .wait_for_idle = gmc_v10_0_wait_for_idle,
1254 .soft_reset = gmc_v10_0_soft_reset,
1255 .set_clockgating_state = gmc_v10_0_set_clockgating_state,
1256 .set_powergating_state = gmc_v10_0_set_powergating_state,
1257 .get_clockgating_state = gmc_v10_0_get_clockgating_state,
1258};
1259
1260const struct amdgpu_ip_block_version gmc_v10_0_ip_block =
1261{
1262 .type = AMD_IP_BLOCK_TYPE_GMC,
1263 .major = 10,
1264 .minor = 0,
1265 .rev = 0,
1266 .funcs = &gmc_v10_0_ip_funcs,
1267};
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/firmware.h>
24#include <linux/pci.h>
25#include "amdgpu.h"
26#include "amdgpu_atomfirmware.h"
27#include "gmc_v10_0.h"
28#include "umc_v8_7.h"
29
30#include "athub/athub_2_0_0_sh_mask.h"
31#include "athub/athub_2_0_0_offset.h"
32#include "dcn/dcn_2_0_0_offset.h"
33#include "dcn/dcn_2_0_0_sh_mask.h"
34#include "oss/osssys_5_0_0_offset.h"
35#include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
36#include "navi10_enum.h"
37
38#include "soc15.h"
39#include "soc15d.h"
40#include "soc15_common.h"
41
42#include "nbio_v2_3.h"
43
44#include "gfxhub_v2_0.h"
45#include "gfxhub_v2_1.h"
46#include "mmhub_v2_0.h"
47#include "mmhub_v2_3.h"
48#include "athub_v2_0.h"
49#include "athub_v2_1.h"
50
51#if 0
52static const struct soc15_reg_golden golden_settings_navi10_hdp[] =
53{
54 /* TODO add golden setting for hdp */
55};
56#endif
57
58static int gmc_v10_0_ecc_interrupt_state(struct amdgpu_device *adev,
59 struct amdgpu_irq_src *src,
60 unsigned type,
61 enum amdgpu_interrupt_state state)
62{
63 return 0;
64}
65
66static int
67gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
68 struct amdgpu_irq_src *src, unsigned type,
69 enum amdgpu_interrupt_state state)
70{
71 switch (state) {
72 case AMDGPU_IRQ_STATE_DISABLE:
73 /* MM HUB */
74 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, false);
75 /* GFX HUB */
76 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false);
77 break;
78 case AMDGPU_IRQ_STATE_ENABLE:
79 /* MM HUB */
80 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, true);
81 /* GFX HUB */
82 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true);
83 break;
84 default:
85 break;
86 }
87
88 return 0;
89}
90
91static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
92 struct amdgpu_irq_src *source,
93 struct amdgpu_iv_entry *entry)
94{
95 bool retry_fault = !!(entry->src_data[1] & 0x80);
96 struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
97 struct amdgpu_task_info task_info;
98 uint32_t status = 0;
99 u64 addr;
100
101 addr = (u64)entry->src_data[0] << 12;
102 addr |= ((u64)entry->src_data[1] & 0xf) << 44;
103
104 if (retry_fault) {
105 /* Returning 1 here also prevents sending the IV to the KFD */
106
107 /* Process it onyl if it's the first fault for this address */
108 if (entry->ih != &adev->irq.ih_soft &&
109 amdgpu_gmc_filter_faults(adev, addr, entry->pasid,
110 entry->timestamp))
111 return 1;
112
113 /* Delegate it to a different ring if the hardware hasn't
114 * already done it.
115 */
116 if (entry->ih == &adev->irq.ih) {
117 amdgpu_irq_delegate(adev, entry, 8);
118 return 1;
119 }
120
121 /* Try to handle the recoverable page faults by filling page
122 * tables
123 */
124 if (amdgpu_vm_handle_fault(adev, entry->pasid, addr))
125 return 1;
126 }
127
128 if (!amdgpu_sriov_vf(adev)) {
129 /*
130 * Issue a dummy read to wait for the status register to
131 * be updated to avoid reading an incorrect value due to
132 * the new fast GRBM interface.
133 */
134 if ((entry->vmid_src == AMDGPU_GFXHUB_0) &&
135 (adev->asic_type < CHIP_SIENNA_CICHLID))
136 RREG32(hub->vm_l2_pro_fault_status);
137
138 status = RREG32(hub->vm_l2_pro_fault_status);
139 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
140 }
141
142 if (!printk_ratelimit())
143 return 0;
144
145 memset(&task_info, 0, sizeof(struct amdgpu_task_info));
146 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
147
148 dev_err(adev->dev,
149 "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, "
150 "for process %s pid %d thread %s pid %d)\n",
151 entry->vmid_src ? "mmhub" : "gfxhub",
152 entry->src_id, entry->ring_id, entry->vmid,
153 entry->pasid, task_info.process_name, task_info.tgid,
154 task_info.task_name, task_info.pid);
155 dev_err(adev->dev, " in page starting at address 0x%016llx from client 0x%x (%s)\n",
156 addr, entry->client_id,
157 soc15_ih_clientid_name[entry->client_id]);
158
159 if (!amdgpu_sriov_vf(adev))
160 hub->vmhub_funcs->print_l2_protection_fault_status(adev,
161 status);
162
163 return 0;
164}
165
166static const struct amdgpu_irq_src_funcs gmc_v10_0_irq_funcs = {
167 .set = gmc_v10_0_vm_fault_interrupt_state,
168 .process = gmc_v10_0_process_interrupt,
169};
170
171static const struct amdgpu_irq_src_funcs gmc_v10_0_ecc_funcs = {
172 .set = gmc_v10_0_ecc_interrupt_state,
173 .process = amdgpu_umc_process_ecc_irq,
174};
175
176static void gmc_v10_0_set_irq_funcs(struct amdgpu_device *adev)
177{
178 adev->gmc.vm_fault.num_types = 1;
179 adev->gmc.vm_fault.funcs = &gmc_v10_0_irq_funcs;
180
181 if (!amdgpu_sriov_vf(adev)) {
182 adev->gmc.ecc_irq.num_types = 1;
183 adev->gmc.ecc_irq.funcs = &gmc_v10_0_ecc_funcs;
184 }
185}
186
187/**
188 * gmc_v10_0_use_invalidate_semaphore - judge whether to use semaphore
189 *
190 * @adev: amdgpu_device pointer
191 * @vmhub: vmhub type
192 *
193 */
194static bool gmc_v10_0_use_invalidate_semaphore(struct amdgpu_device *adev,
195 uint32_t vmhub)
196{
197 return ((vmhub == AMDGPU_MMHUB_0 ||
198 vmhub == AMDGPU_MMHUB_1) &&
199 (!amdgpu_sriov_vf(adev)));
200}
201
202static bool gmc_v10_0_get_atc_vmid_pasid_mapping_info(
203 struct amdgpu_device *adev,
204 uint8_t vmid, uint16_t *p_pasid)
205{
206 uint32_t value;
207
208 value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
209 + vmid);
210 *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
211
212 return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
213}
214
215/*
216 * GART
217 * VMID 0 is the physical GPU addresses as used by the kernel.
218 * VMIDs 1-15 are used for userspace clients and are handled
219 * by the amdgpu vm/hsa code.
220 */
221
222static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
223 unsigned int vmhub, uint32_t flush_type)
224{
225 bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(adev, vmhub);
226 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
227 u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
228 u32 tmp;
229 /* Use register 17 for GART */
230 const unsigned eng = 17;
231 unsigned int i;
232 unsigned char hub_ip = 0;
233
234 hub_ip = (vmhub == AMDGPU_GFXHUB_0) ?
235 GC_HWIP : MMHUB_HWIP;
236
237 spin_lock(&adev->gmc.invalidate_lock);
238 /*
239 * It may lose gpuvm invalidate acknowldege state across power-gating
240 * off cycle, add semaphore acquire before invalidation and semaphore
241 * release after invalidation to avoid entering power gated state
242 * to WA the Issue
243 */
244
245 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
246 if (use_semaphore) {
247 for (i = 0; i < adev->usec_timeout; i++) {
248 /* a read return value of 1 means semaphore acuqire */
249 tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
250 hub->eng_distance * eng, hub_ip);
251
252 if (tmp & 0x1)
253 break;
254 udelay(1);
255 }
256
257 if (i >= adev->usec_timeout)
258 DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
259 }
260
261 WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req +
262 hub->eng_distance * eng,
263 inv_req, hub_ip);
264
265 /*
266 * Issue a dummy read to wait for the ACK register to be cleared
267 * to avoid a false ACK due to the new fast GRBM interface.
268 */
269 if ((vmhub == AMDGPU_GFXHUB_0) &&
270 (adev->asic_type < CHIP_SIENNA_CICHLID))
271 RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req +
272 hub->eng_distance * eng, hub_ip);
273
274 /* Wait for ACK with a delay.*/
275 for (i = 0; i < adev->usec_timeout; i++) {
276 tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_ack +
277 hub->eng_distance * eng, hub_ip);
278
279 tmp &= 1 << vmid;
280 if (tmp)
281 break;
282
283 udelay(1);
284 }
285
286 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
287 if (use_semaphore)
288 /*
289 * add semaphore release after invalidation,
290 * write with 0 means semaphore release
291 */
292 WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
293 hub->eng_distance * eng, 0, hub_ip);
294
295 spin_unlock(&adev->gmc.invalidate_lock);
296
297 if (i < adev->usec_timeout)
298 return;
299
300 DRM_ERROR("Timeout waiting for VM flush hub: %d!\n", vmhub);
301}
302
303/**
304 * gmc_v10_0_flush_gpu_tlb - gart tlb flush callback
305 *
306 * @adev: amdgpu_device pointer
307 * @vmid: vm instance to flush
308 * @vmhub: vmhub type
309 * @flush_type: the flush type
310 *
311 * Flush the TLB for the requested page table.
312 */
313static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
314 uint32_t vmhub, uint32_t flush_type)
315{
316 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
317 struct dma_fence *fence;
318 struct amdgpu_job *job;
319
320 int r;
321
322 /* flush hdp cache */
323 adev->hdp.funcs->flush_hdp(adev, NULL);
324
325 /* For SRIOV run time, driver shouldn't access the register through MMIO
326 * Directly use kiq to do the vm invalidation instead
327 */
328 if (adev->gfx.kiq.ring.sched.ready &&
329 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
330 down_read_trylock(&adev->reset_sem)) {
331 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
332 const unsigned eng = 17;
333 u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
334 u32 req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
335 u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
336
337 amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
338 1 << vmid);
339
340 up_read(&adev->reset_sem);
341 return;
342 }
343
344 mutex_lock(&adev->mman.gtt_window_lock);
345
346 if (vmhub == AMDGPU_MMHUB_0) {
347 gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_MMHUB_0, 0);
348 mutex_unlock(&adev->mman.gtt_window_lock);
349 return;
350 }
351
352 BUG_ON(vmhub != AMDGPU_GFXHUB_0);
353
354 if (!adev->mman.buffer_funcs_enabled ||
355 !adev->ib_pool_ready ||
356 amdgpu_in_reset(adev) ||
357 ring->sched.ready == false) {
358 gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_GFXHUB_0, 0);
359 mutex_unlock(&adev->mman.gtt_window_lock);
360 return;
361 }
362
363 /* The SDMA on Navi has a bug which can theoretically result in memory
364 * corruption if an invalidation happens at the same time as an VA
365 * translation. Avoid this by doing the invalidation from the SDMA
366 * itself.
367 */
368 r = amdgpu_job_alloc_with_ib(adev, 16 * 4, AMDGPU_IB_POOL_IMMEDIATE,
369 &job);
370 if (r)
371 goto error_alloc;
372
373 job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo);
374 job->vm_needs_flush = true;
375 job->ibs->ptr[job->ibs->length_dw++] = ring->funcs->nop;
376 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
377 r = amdgpu_job_submit(job, &adev->mman.entity,
378 AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
379 if (r)
380 goto error_submit;
381
382 mutex_unlock(&adev->mman.gtt_window_lock);
383
384 dma_fence_wait(fence, false);
385 dma_fence_put(fence);
386
387 return;
388
389error_submit:
390 amdgpu_job_free(job);
391
392error_alloc:
393 mutex_unlock(&adev->mman.gtt_window_lock);
394 DRM_ERROR("Error flushing GPU TLB using the SDMA (%d)!\n", r);
395}
396
397/**
398 * gmc_v10_0_flush_gpu_tlb_pasid - tlb flush via pasid
399 *
400 * @adev: amdgpu_device pointer
401 * @pasid: pasid to be flush
402 * @flush_type: the flush type
403 * @all_hub: Used with PACKET3_INVALIDATE_TLBS_ALL_HUB()
404 *
405 * Flush the TLB for the requested pasid.
406 */
407static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
408 uint16_t pasid, uint32_t flush_type,
409 bool all_hub)
410{
411 int vmid, i;
412 signed long r;
413 uint32_t seq;
414 uint16_t queried_pasid;
415 bool ret;
416 struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
417 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
418
419 if (amdgpu_emu_mode == 0 && ring->sched.ready) {
420 spin_lock(&adev->gfx.kiq.ring_lock);
421 /* 2 dwords flush + 8 dwords fence */
422 amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
423 kiq->pmf->kiq_invalidate_tlbs(ring,
424 pasid, flush_type, all_hub);
425 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
426 if (r) {
427 amdgpu_ring_undo(ring);
428 spin_unlock(&adev->gfx.kiq.ring_lock);
429 return -ETIME;
430 }
431
432 amdgpu_ring_commit(ring);
433 spin_unlock(&adev->gfx.kiq.ring_lock);
434 r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
435 if (r < 1) {
436 dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
437 return -ETIME;
438 }
439
440 return 0;
441 }
442
443 for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) {
444
445 ret = gmc_v10_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
446 &queried_pasid);
447 if (ret && queried_pasid == pasid) {
448 if (all_hub) {
449 for (i = 0; i < adev->num_vmhubs; i++)
450 gmc_v10_0_flush_gpu_tlb(adev, vmid,
451 i, flush_type);
452 } else {
453 gmc_v10_0_flush_gpu_tlb(adev, vmid,
454 AMDGPU_GFXHUB_0, flush_type);
455 }
456 break;
457 }
458 }
459
460 return 0;
461}
462
463static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
464 unsigned vmid, uint64_t pd_addr)
465{
466 bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
467 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
468 uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0);
469 unsigned eng = ring->vm_inv_eng;
470
471 /*
472 * It may lose gpuvm invalidate acknowldege state across power-gating
473 * off cycle, add semaphore acquire before invalidation and semaphore
474 * release after invalidation to avoid entering power gated state
475 * to WA the Issue
476 */
477
478 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
479 if (use_semaphore)
480 /* a read return value of 1 means semaphore acuqire */
481 amdgpu_ring_emit_reg_wait(ring,
482 hub->vm_inv_eng0_sem +
483 hub->eng_distance * eng, 0x1, 0x1);
484
485 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
486 (hub->ctx_addr_distance * vmid),
487 lower_32_bits(pd_addr));
488
489 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
490 (hub->ctx_addr_distance * vmid),
491 upper_32_bits(pd_addr));
492
493 amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
494 hub->eng_distance * eng,
495 hub->vm_inv_eng0_ack +
496 hub->eng_distance * eng,
497 req, 1 << vmid);
498
499 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
500 if (use_semaphore)
501 /*
502 * add semaphore release after invalidation,
503 * write with 0 means semaphore release
504 */
505 amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
506 hub->eng_distance * eng, 0);
507
508 return pd_addr;
509}
510
511static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
512 unsigned pasid)
513{
514 struct amdgpu_device *adev = ring->adev;
515 uint32_t reg;
516
517 if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
518 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
519 else
520 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
521
522 amdgpu_ring_emit_wreg(ring, reg, pasid);
523}
524
525/*
526 * PTE format on NAVI 10:
527 * 63:59 reserved
528 * 58 reserved and for sienna_cichlid is used for MALL noalloc
529 * 57 reserved
530 * 56 F
531 * 55 L
532 * 54 reserved
533 * 53:52 SW
534 * 51 T
535 * 50:48 mtype
536 * 47:12 4k physical page base address
537 * 11:7 fragment
538 * 6 write
539 * 5 read
540 * 4 exe
541 * 3 Z
542 * 2 snooped
543 * 1 system
544 * 0 valid
545 *
546 * PDE format on NAVI 10:
547 * 63:59 block fragment size
548 * 58:55 reserved
549 * 54 P
550 * 53:48 reserved
551 * 47:6 physical base address of PD or PTE
552 * 5:3 reserved
553 * 2 C
554 * 1 system
555 * 0 valid
556 */
557
558static uint64_t gmc_v10_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
559{
560 switch (flags) {
561 case AMDGPU_VM_MTYPE_DEFAULT:
562 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
563 case AMDGPU_VM_MTYPE_NC:
564 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
565 case AMDGPU_VM_MTYPE_WC:
566 return AMDGPU_PTE_MTYPE_NV10(MTYPE_WC);
567 case AMDGPU_VM_MTYPE_CC:
568 return AMDGPU_PTE_MTYPE_NV10(MTYPE_CC);
569 case AMDGPU_VM_MTYPE_UC:
570 return AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
571 default:
572 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
573 }
574}
575
576static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
577 uint64_t *addr, uint64_t *flags)
578{
579 if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
580 *addr = amdgpu_gmc_vram_mc2pa(adev, *addr);
581 BUG_ON(*addr & 0xFFFF00000000003FULL);
582
583 if (!adev->gmc.translate_further)
584 return;
585
586 if (level == AMDGPU_VM_PDB1) {
587 /* Set the block fragment size */
588 if (!(*flags & AMDGPU_PDE_PTE))
589 *flags |= AMDGPU_PDE_BFS(0x9);
590
591 } else if (level == AMDGPU_VM_PDB0) {
592 if (*flags & AMDGPU_PDE_PTE)
593 *flags &= ~AMDGPU_PDE_PTE;
594 else
595 *flags |= AMDGPU_PTE_TF;
596 }
597}
598
599static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev,
600 struct amdgpu_bo_va_mapping *mapping,
601 uint64_t *flags)
602{
603 *flags &= ~AMDGPU_PTE_EXECUTABLE;
604 *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
605
606 *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
607 *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
608
609 if (mapping->flags & AMDGPU_PTE_PRT) {
610 *flags |= AMDGPU_PTE_PRT;
611 *flags |= AMDGPU_PTE_SNOOPED;
612 *flags |= AMDGPU_PTE_LOG;
613 *flags |= AMDGPU_PTE_SYSTEM;
614 *flags &= ~AMDGPU_PTE_VALID;
615 }
616}
617
618static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev)
619{
620 u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
621 unsigned size;
622
623 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
624 size = AMDGPU_VBIOS_VGA_ALLOCATION;
625 } else {
626 u32 viewport;
627 u32 pitch;
628
629 viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
630 pitch = RREG32_SOC15(DCE, 0, mmHUBPREQ0_DCSURF_SURFACE_PITCH);
631 size = (REG_GET_FIELD(viewport,
632 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
633 REG_GET_FIELD(pitch, HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH) *
634 4);
635 }
636
637 return size;
638}
639
640static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = {
641 .flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb,
642 .flush_gpu_tlb_pasid = gmc_v10_0_flush_gpu_tlb_pasid,
643 .emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb,
644 .emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping,
645 .map_mtype = gmc_v10_0_map_mtype,
646 .get_vm_pde = gmc_v10_0_get_vm_pde,
647 .get_vm_pte = gmc_v10_0_get_vm_pte,
648 .get_vbios_fb_size = gmc_v10_0_get_vbios_fb_size,
649};
650
651static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev)
652{
653 if (adev->gmc.gmc_funcs == NULL)
654 adev->gmc.gmc_funcs = &gmc_v10_0_gmc_funcs;
655}
656
657static void gmc_v10_0_set_umc_funcs(struct amdgpu_device *adev)
658{
659 switch (adev->asic_type) {
660 case CHIP_SIENNA_CICHLID:
661 adev->umc.max_ras_err_cnt_per_query = UMC_V8_7_TOTAL_CHANNEL_NUM;
662 adev->umc.channel_inst_num = UMC_V8_7_CHANNEL_INSTANCE_NUM;
663 adev->umc.umc_inst_num = UMC_V8_7_UMC_INSTANCE_NUM;
664 adev->umc.channel_offs = UMC_V8_7_PER_CHANNEL_OFFSET_SIENNA;
665 adev->umc.channel_idx_tbl = &umc_v8_7_channel_idx_tbl[0][0];
666 adev->umc.ras_funcs = &umc_v8_7_ras_funcs;
667 break;
668 default:
669 break;
670 }
671}
672
673
674static void gmc_v10_0_set_mmhub_funcs(struct amdgpu_device *adev)
675{
676 switch (adev->asic_type) {
677 case CHIP_VANGOGH:
678 case CHIP_YELLOW_CARP:
679 adev->mmhub.funcs = &mmhub_v2_3_funcs;
680 break;
681 default:
682 adev->mmhub.funcs = &mmhub_v2_0_funcs;
683 break;
684 }
685}
686
687static void gmc_v10_0_set_gfxhub_funcs(struct amdgpu_device *adev)
688{
689 switch (adev->asic_type) {
690 case CHIP_SIENNA_CICHLID:
691 case CHIP_NAVY_FLOUNDER:
692 case CHIP_VANGOGH:
693 case CHIP_DIMGREY_CAVEFISH:
694 case CHIP_BEIGE_GOBY:
695 case CHIP_YELLOW_CARP:
696 adev->gfxhub.funcs = &gfxhub_v2_1_funcs;
697 break;
698 default:
699 adev->gfxhub.funcs = &gfxhub_v2_0_funcs;
700 break;
701 }
702}
703
704
705static int gmc_v10_0_early_init(void *handle)
706{
707 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
708
709 gmc_v10_0_set_mmhub_funcs(adev);
710 gmc_v10_0_set_gfxhub_funcs(adev);
711 gmc_v10_0_set_gmc_funcs(adev);
712 gmc_v10_0_set_irq_funcs(adev);
713 gmc_v10_0_set_umc_funcs(adev);
714
715 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
716 adev->gmc.shared_aperture_end =
717 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
718 adev->gmc.private_aperture_start = 0x1000000000000000ULL;
719 adev->gmc.private_aperture_end =
720 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
721
722 return 0;
723}
724
725static int gmc_v10_0_late_init(void *handle)
726{
727 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
728 int r;
729
730 r = amdgpu_gmc_allocate_vm_inv_eng(adev);
731 if (r)
732 return r;
733
734 r = amdgpu_gmc_ras_late_init(adev);
735 if (r)
736 return r;
737
738 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
739}
740
741static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
742 struct amdgpu_gmc *mc)
743{
744 u64 base = 0;
745
746 base = adev->gfxhub.funcs->get_fb_location(adev);
747
748 /* add the xgmi offset of the physical node */
749 base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
750
751 amdgpu_gmc_vram_location(adev, &adev->gmc, base);
752 amdgpu_gmc_gart_location(adev, mc);
753 amdgpu_gmc_agp_location(adev, mc);
754
755 /* base offset of vram pages */
756 adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
757
758 /* add the xgmi offset of the physical node */
759 adev->vm_manager.vram_base_offset +=
760 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
761}
762
763/**
764 * gmc_v10_0_mc_init - initialize the memory controller driver params
765 *
766 * @adev: amdgpu_device pointer
767 *
768 * Look up the amount of vram, vram width, and decide how to place
769 * vram and gart within the GPU's physical address space.
770 * Returns 0 for success.
771 */
772static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
773{
774 int r;
775
776 /* size in MB on si */
777 adev->gmc.mc_vram_size =
778 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
779 adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
780
781 if (!(adev->flags & AMD_IS_APU)) {
782 r = amdgpu_device_resize_fb_bar(adev);
783 if (r)
784 return r;
785 }
786 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
787 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
788
789#ifdef CONFIG_X86_64
790 if (adev->flags & AMD_IS_APU) {
791 adev->gmc.aper_base = adev->gfxhub.funcs->get_mc_fb_offset(adev);
792 adev->gmc.aper_size = adev->gmc.real_vram_size;
793 }
794#endif
795
796 /* In case the PCI BAR is larger than the actual amount of vram */
797 adev->gmc.visible_vram_size = adev->gmc.aper_size;
798 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
799 adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
800
801 /* set the gart size */
802 if (amdgpu_gart_size == -1) {
803 switch (adev->asic_type) {
804 case CHIP_NAVI10:
805 case CHIP_NAVI14:
806 case CHIP_NAVI12:
807 case CHIP_SIENNA_CICHLID:
808 case CHIP_NAVY_FLOUNDER:
809 case CHIP_VANGOGH:
810 case CHIP_DIMGREY_CAVEFISH:
811 case CHIP_BEIGE_GOBY:
812 case CHIP_YELLOW_CARP:
813 default:
814 adev->gmc.gart_size = 512ULL << 20;
815 break;
816 }
817 } else
818 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
819
820 gmc_v10_0_vram_gtt_location(adev, &adev->gmc);
821
822 return 0;
823}
824
825static int gmc_v10_0_gart_init(struct amdgpu_device *adev)
826{
827 int r;
828
829 if (adev->gart.bo) {
830 WARN(1, "NAVI10 PCIE GART already initialized\n");
831 return 0;
832 }
833
834 /* Initialize common gart structure */
835 r = amdgpu_gart_init(adev);
836 if (r)
837 return r;
838
839 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
840 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_NV10(MTYPE_UC) |
841 AMDGPU_PTE_EXECUTABLE;
842
843 return amdgpu_gart_table_vram_alloc(adev);
844}
845
846static int gmc_v10_0_sw_init(void *handle)
847{
848 int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
849 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
850
851 adev->gfxhub.funcs->init(adev);
852
853 adev->mmhub.funcs->init(adev);
854
855 spin_lock_init(&adev->gmc.invalidate_lock);
856
857 if ((adev->flags & AMD_IS_APU) && amdgpu_emu_mode == 1) {
858 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_DDR4;
859 adev->gmc.vram_width = 64;
860 } else if (amdgpu_emu_mode == 1) {
861 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_GDDR6;
862 adev->gmc.vram_width = 1 * 128; /* numchan * chansize */
863 } else {
864 r = amdgpu_atomfirmware_get_vram_info(adev,
865 &vram_width, &vram_type, &vram_vendor);
866 adev->gmc.vram_width = vram_width;
867
868 adev->gmc.vram_type = vram_type;
869 adev->gmc.vram_vendor = vram_vendor;
870 }
871
872 switch (adev->asic_type) {
873 case CHIP_NAVI10:
874 case CHIP_NAVI14:
875 case CHIP_NAVI12:
876 case CHIP_SIENNA_CICHLID:
877 case CHIP_NAVY_FLOUNDER:
878 case CHIP_VANGOGH:
879 case CHIP_DIMGREY_CAVEFISH:
880 case CHIP_BEIGE_GOBY:
881 case CHIP_YELLOW_CARP:
882 adev->num_vmhubs = 2;
883 /*
884 * To fulfill 4-level page support,
885 * vm size is 256TB (48bit), maximum size of Navi10/Navi14/Navi12,
886 * block size 512 (9bit)
887 */
888 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
889 break;
890 default:
891 break;
892 }
893
894 /* This interrupt is VMC page fault.*/
895 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC,
896 VMC_1_0__SRCID__VM_FAULT,
897 &adev->gmc.vm_fault);
898
899 if (r)
900 return r;
901
902 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2,
903 UTCL2_1_0__SRCID__FAULT,
904 &adev->gmc.vm_fault);
905 if (r)
906 return r;
907
908 if (!amdgpu_sriov_vf(adev)) {
909 /* interrupt sent to DF. */
910 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
911 &adev->gmc.ecc_irq);
912 if (r)
913 return r;
914 }
915
916 /*
917 * Set the internal MC address mask This is the max address of the GPU's
918 * internal address space.
919 */
920 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
921
922 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
923 if (r) {
924 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
925 return r;
926 }
927
928 if (adev->gmc.xgmi.supported) {
929 r = adev->gfxhub.funcs->get_xgmi_info(adev);
930 if (r)
931 return r;
932 }
933
934 r = gmc_v10_0_mc_init(adev);
935 if (r)
936 return r;
937
938 amdgpu_gmc_get_vbios_allocations(adev);
939 amdgpu_gmc_get_reserved_allocation(adev);
940
941 /* Memory manager */
942 r = amdgpu_bo_init(adev);
943 if (r)
944 return r;
945
946 r = gmc_v10_0_gart_init(adev);
947 if (r)
948 return r;
949
950 /*
951 * number of VMs
952 * VMID 0 is reserved for System
953 * amdgpu graphics/compute will use VMIDs 1-7
954 * amdkfd will use VMIDs 8-15
955 */
956 adev->vm_manager.first_kfd_vmid = 8;
957
958 amdgpu_vm_manager_init(adev);
959
960 return 0;
961}
962
963/**
964 * gmc_v10_0_gart_fini - vm fini callback
965 *
966 * @adev: amdgpu_device pointer
967 *
968 * Tears down the driver GART/VM setup (CIK).
969 */
970static void gmc_v10_0_gart_fini(struct amdgpu_device *adev)
971{
972 amdgpu_gart_table_vram_free(adev);
973}
974
975static int gmc_v10_0_sw_fini(void *handle)
976{
977 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
978
979 amdgpu_vm_manager_fini(adev);
980 gmc_v10_0_gart_fini(adev);
981 amdgpu_gem_force_release(adev);
982 amdgpu_bo_fini(adev);
983
984 return 0;
985}
986
987static void gmc_v10_0_init_golden_registers(struct amdgpu_device *adev)
988{
989 switch (adev->asic_type) {
990 case CHIP_NAVI10:
991 case CHIP_NAVI14:
992 case CHIP_NAVI12:
993 case CHIP_SIENNA_CICHLID:
994 case CHIP_NAVY_FLOUNDER:
995 case CHIP_VANGOGH:
996 case CHIP_DIMGREY_CAVEFISH:
997 case CHIP_BEIGE_GOBY:
998 case CHIP_YELLOW_CARP:
999 break;
1000 default:
1001 break;
1002 }
1003}
1004
1005/**
1006 * gmc_v10_0_gart_enable - gart enable
1007 *
1008 * @adev: amdgpu_device pointer
1009 */
1010static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
1011{
1012 int r;
1013 bool value;
1014
1015 if (adev->gart.bo == NULL) {
1016 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
1017 return -EINVAL;
1018 }
1019
1020 r = amdgpu_gart_table_vram_pin(adev);
1021 if (r)
1022 return r;
1023
1024 r = adev->gfxhub.funcs->gart_enable(adev);
1025 if (r)
1026 return r;
1027
1028 r = adev->mmhub.funcs->gart_enable(adev);
1029 if (r)
1030 return r;
1031
1032 adev->hdp.funcs->init_registers(adev);
1033
1034 /* Flush HDP after it is initialized */
1035 adev->hdp.funcs->flush_hdp(adev, NULL);
1036
1037 value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
1038 false : true;
1039
1040 adev->gfxhub.funcs->set_fault_enable_default(adev, value);
1041 adev->mmhub.funcs->set_fault_enable_default(adev, value);
1042 gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0);
1043 gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0);
1044
1045 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1046 (unsigned)(adev->gmc.gart_size >> 20),
1047 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
1048
1049 adev->gart.ready = true;
1050
1051 return 0;
1052}
1053
1054static int gmc_v10_0_hw_init(void *handle)
1055{
1056 int r;
1057 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1058
1059 /* The sequence of these two function calls matters.*/
1060 gmc_v10_0_init_golden_registers(adev);
1061
1062 /*
1063 * harvestable groups in gc_utcl2 need to be programmed before any GFX block
1064 * register setup within GMC, or else system hang when harvesting SA.
1065 */
1066 if (adev->gfxhub.funcs && adev->gfxhub.funcs->utcl2_harvest)
1067 adev->gfxhub.funcs->utcl2_harvest(adev);
1068
1069 r = gmc_v10_0_gart_enable(adev);
1070 if (r)
1071 return r;
1072
1073 if (adev->umc.funcs && adev->umc.funcs->init_registers)
1074 adev->umc.funcs->init_registers(adev);
1075
1076 return 0;
1077}
1078
1079/**
1080 * gmc_v10_0_gart_disable - gart disable
1081 *
1082 * @adev: amdgpu_device pointer
1083 *
1084 * This disables all VM page table.
1085 */
1086static void gmc_v10_0_gart_disable(struct amdgpu_device *adev)
1087{
1088 adev->gfxhub.funcs->gart_disable(adev);
1089 adev->mmhub.funcs->gart_disable(adev);
1090 amdgpu_gart_table_vram_unpin(adev);
1091}
1092
1093static int gmc_v10_0_hw_fini(void *handle)
1094{
1095 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1096
1097 gmc_v10_0_gart_disable(adev);
1098
1099 if (amdgpu_sriov_vf(adev)) {
1100 /* full access mode, so don't touch any GMC register */
1101 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
1102 return 0;
1103 }
1104
1105 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
1106 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1107
1108 return 0;
1109}
1110
1111static int gmc_v10_0_suspend(void *handle)
1112{
1113 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1114
1115 gmc_v10_0_hw_fini(adev);
1116
1117 return 0;
1118}
1119
1120static int gmc_v10_0_resume(void *handle)
1121{
1122 int r;
1123 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1124
1125 r = gmc_v10_0_hw_init(adev);
1126 if (r)
1127 return r;
1128
1129 amdgpu_vmid_reset_all(adev);
1130
1131 return 0;
1132}
1133
1134static bool gmc_v10_0_is_idle(void *handle)
1135{
1136 /* MC is always ready in GMC v10.*/
1137 return true;
1138}
1139
1140static int gmc_v10_0_wait_for_idle(void *handle)
1141{
1142 /* There is no need to wait for MC idle in GMC v10.*/
1143 return 0;
1144}
1145
1146static int gmc_v10_0_soft_reset(void *handle)
1147{
1148 return 0;
1149}
1150
1151static int gmc_v10_0_set_clockgating_state(void *handle,
1152 enum amd_clockgating_state state)
1153{
1154 int r;
1155 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1156
1157 r = adev->mmhub.funcs->set_clockgating(adev, state);
1158 if (r)
1159 return r;
1160
1161 if (adev->asic_type >= CHIP_SIENNA_CICHLID &&
1162 adev->asic_type <= CHIP_YELLOW_CARP)
1163 return athub_v2_1_set_clockgating(adev, state);
1164 else
1165 return athub_v2_0_set_clockgating(adev, state);
1166}
1167
1168static void gmc_v10_0_get_clockgating_state(void *handle, u32 *flags)
1169{
1170 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1171
1172 adev->mmhub.funcs->get_clockgating(adev, flags);
1173
1174 if (adev->asic_type >= CHIP_SIENNA_CICHLID &&
1175 adev->asic_type <= CHIP_YELLOW_CARP)
1176 athub_v2_1_get_clockgating(adev, flags);
1177 else
1178 athub_v2_0_get_clockgating(adev, flags);
1179}
1180
1181static int gmc_v10_0_set_powergating_state(void *handle,
1182 enum amd_powergating_state state)
1183{
1184 return 0;
1185}
1186
1187const struct amd_ip_funcs gmc_v10_0_ip_funcs = {
1188 .name = "gmc_v10_0",
1189 .early_init = gmc_v10_0_early_init,
1190 .late_init = gmc_v10_0_late_init,
1191 .sw_init = gmc_v10_0_sw_init,
1192 .sw_fini = gmc_v10_0_sw_fini,
1193 .hw_init = gmc_v10_0_hw_init,
1194 .hw_fini = gmc_v10_0_hw_fini,
1195 .suspend = gmc_v10_0_suspend,
1196 .resume = gmc_v10_0_resume,
1197 .is_idle = gmc_v10_0_is_idle,
1198 .wait_for_idle = gmc_v10_0_wait_for_idle,
1199 .soft_reset = gmc_v10_0_soft_reset,
1200 .set_clockgating_state = gmc_v10_0_set_clockgating_state,
1201 .set_powergating_state = gmc_v10_0_set_powergating_state,
1202 .get_clockgating_state = gmc_v10_0_get_clockgating_state,
1203};
1204
1205const struct amdgpu_ip_block_version gmc_v10_0_ip_block =
1206{
1207 .type = AMD_IP_BLOCK_TYPE_GMC,
1208 .major = 10,
1209 .minor = 0,
1210 .rev = 0,
1211 .funcs = &gmc_v10_0_ip_funcs,
1212};