Loading...
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/firmware.h>
24#include <linux/pci.h>
25
26#include <drm/drm_cache.h>
27
28#include "amdgpu.h"
29#include "amdgpu_atomfirmware.h"
30#include "gmc_v10_0.h"
31#include "umc_v8_7.h"
32
33#include "athub/athub_2_0_0_sh_mask.h"
34#include "athub/athub_2_0_0_offset.h"
35#include "dcn/dcn_2_0_0_offset.h"
36#include "dcn/dcn_2_0_0_sh_mask.h"
37#include "oss/osssys_5_0_0_offset.h"
38#include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
39#include "navi10_enum.h"
40
41#include "soc15.h"
42#include "soc15d.h"
43#include "soc15_common.h"
44
45#include "nbio_v2_3.h"
46
47#include "gfxhub_v2_0.h"
48#include "gfxhub_v2_1.h"
49#include "mmhub_v2_0.h"
50#include "mmhub_v2_3.h"
51#include "athub_v2_0.h"
52#include "athub_v2_1.h"
53
54static int gmc_v10_0_ecc_interrupt_state(struct amdgpu_device *adev,
55 struct amdgpu_irq_src *src,
56 unsigned int type,
57 enum amdgpu_interrupt_state state)
58{
59 return 0;
60}
61
62static int
63gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
64 struct amdgpu_irq_src *src, unsigned int type,
65 enum amdgpu_interrupt_state state)
66{
67 switch (state) {
68 case AMDGPU_IRQ_STATE_DISABLE:
69 /* MM HUB */
70 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), false);
71 /* GFX HUB */
72 /* This works because this interrupt is only
73 * enabled at init/resume and disabled in
74 * fini/suspend, so the overall state doesn't
75 * change over the course of suspend/resume.
76 */
77 if (!adev->in_s0ix)
78 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), false);
79 break;
80 case AMDGPU_IRQ_STATE_ENABLE:
81 /* MM HUB */
82 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), true);
83 /* GFX HUB */
84 /* This works because this interrupt is only
85 * enabled at init/resume and disabled in
86 * fini/suspend, so the overall state doesn't
87 * change over the course of suspend/resume.
88 */
89 if (!adev->in_s0ix)
90 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), true);
91 break;
92 default:
93 break;
94 }
95
96 return 0;
97}
98
99static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
100 struct amdgpu_irq_src *source,
101 struct amdgpu_iv_entry *entry)
102{
103 uint32_t vmhub_index = entry->client_id == SOC15_IH_CLIENTID_VMC ?
104 AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0);
105 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub_index];
106 bool retry_fault = !!(entry->src_data[1] & 0x80);
107 bool write_fault = !!(entry->src_data[1] & 0x20);
108 struct amdgpu_task_info *task_info;
109 uint32_t status = 0;
110 u64 addr;
111
112 addr = (u64)entry->src_data[0] << 12;
113 addr |= ((u64)entry->src_data[1] & 0xf) << 44;
114
115 if (retry_fault) {
116 /* Returning 1 here also prevents sending the IV to the KFD */
117
118 /* Process it onyl if it's the first fault for this address */
119 if (entry->ih != &adev->irq.ih_soft &&
120 amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid,
121 entry->timestamp))
122 return 1;
123
124 /* Delegate it to a different ring if the hardware hasn't
125 * already done it.
126 */
127 if (entry->ih == &adev->irq.ih) {
128 amdgpu_irq_delegate(adev, entry, 8);
129 return 1;
130 }
131
132 /* Try to handle the recoverable page faults by filling page
133 * tables
134 */
135 if (amdgpu_vm_handle_fault(adev, entry->pasid, 0, 0, addr, write_fault))
136 return 1;
137 }
138
139 if (!amdgpu_sriov_vf(adev)) {
140 /*
141 * Issue a dummy read to wait for the status register to
142 * be updated to avoid reading an incorrect value due to
143 * the new fast GRBM interface.
144 */
145 if ((entry->vmid_src == AMDGPU_GFXHUB(0)) &&
146 (amdgpu_ip_version(adev, GC_HWIP, 0) <
147 IP_VERSION(10, 3, 0)))
148 RREG32(hub->vm_l2_pro_fault_status);
149
150 status = RREG32(hub->vm_l2_pro_fault_status);
151 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
152
153 amdgpu_vm_update_fault_cache(adev, entry->pasid, addr, status,
154 entry->vmid_src ? AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0));
155 }
156
157 if (!printk_ratelimit())
158 return 0;
159
160 dev_err(adev->dev,
161 "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u)\n",
162 entry->vmid_src ? "mmhub" : "gfxhub",
163 entry->src_id, entry->ring_id, entry->vmid, entry->pasid);
164 task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
165 if (task_info) {
166 dev_err(adev->dev,
167 " in process %s pid %d thread %s pid %d\n",
168 task_info->process_name, task_info->tgid,
169 task_info->task_name, task_info->pid);
170 amdgpu_vm_put_task_info(task_info);
171 }
172
173 dev_err(adev->dev, " in page starting at address 0x%016llx from client 0x%x (%s)\n",
174 addr, entry->client_id,
175 soc15_ih_clientid_name[entry->client_id]);
176
177 if (!amdgpu_sriov_vf(adev))
178 hub->vmhub_funcs->print_l2_protection_fault_status(adev,
179 status);
180
181 return 0;
182}
183
184static const struct amdgpu_irq_src_funcs gmc_v10_0_irq_funcs = {
185 .set = gmc_v10_0_vm_fault_interrupt_state,
186 .process = gmc_v10_0_process_interrupt,
187};
188
189static const struct amdgpu_irq_src_funcs gmc_v10_0_ecc_funcs = {
190 .set = gmc_v10_0_ecc_interrupt_state,
191 .process = amdgpu_umc_process_ecc_irq,
192};
193
194static void gmc_v10_0_set_irq_funcs(struct amdgpu_device *adev)
195{
196 adev->gmc.vm_fault.num_types = 1;
197 adev->gmc.vm_fault.funcs = &gmc_v10_0_irq_funcs;
198
199 if (!amdgpu_sriov_vf(adev)) {
200 adev->gmc.ecc_irq.num_types = 1;
201 adev->gmc.ecc_irq.funcs = &gmc_v10_0_ecc_funcs;
202 }
203}
204
205/**
206 * gmc_v10_0_use_invalidate_semaphore - judge whether to use semaphore
207 *
208 * @adev: amdgpu_device pointer
209 * @vmhub: vmhub type
210 *
211 */
212static bool gmc_v10_0_use_invalidate_semaphore(struct amdgpu_device *adev,
213 uint32_t vmhub)
214{
215 return ((vmhub == AMDGPU_MMHUB0(0)) &&
216 (!amdgpu_sriov_vf(adev)));
217}
218
219static bool gmc_v10_0_get_atc_vmid_pasid_mapping_info(
220 struct amdgpu_device *adev,
221 uint8_t vmid, uint16_t *p_pasid)
222{
223 uint32_t value;
224
225 value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
226 + vmid);
227 *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
228
229 return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
230}
231
232/*
233 * GART
234 * VMID 0 is the physical GPU addresses as used by the kernel.
235 * VMIDs 1-15 are used for userspace clients and are handled
236 * by the amdgpu vm/hsa code.
237 */
238
239/**
240 * gmc_v10_0_flush_gpu_tlb - gart tlb flush callback
241 *
242 * @adev: amdgpu_device pointer
243 * @vmid: vm instance to flush
244 * @vmhub: vmhub type
245 * @flush_type: the flush type
246 *
247 * Flush the TLB for the requested page table.
248 */
249static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
250 uint32_t vmhub, uint32_t flush_type)
251{
252 bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(adev, vmhub);
253 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
254 u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
255 /* Use register 17 for GART */
256 const unsigned int eng = 17;
257 unsigned char hub_ip = 0;
258 u32 sem, req, ack;
259 unsigned int i;
260 u32 tmp;
261
262 sem = hub->vm_inv_eng0_sem + hub->eng_distance * eng;
263 req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
264 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
265
266 /* flush hdp cache */
267 adev->hdp.funcs->flush_hdp(adev, NULL);
268
269 /* This is necessary for SRIOV as well as for GFXOFF to function
270 * properly under bare metal
271 */
272 if (adev->gfx.kiq[0].ring.sched.ready && !adev->enable_mes &&
273 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
274 amdgpu_gmc_fw_reg_write_reg_wait(adev, req, ack, inv_req,
275 1 << vmid, GET_INST(GC, 0));
276 return;
277 }
278
279 /* This path is needed before KIQ/MES/GFXOFF are set up */
280 hub_ip = (vmhub == AMDGPU_GFXHUB(0)) ? GC_HWIP : MMHUB_HWIP;
281
282 spin_lock(&adev->gmc.invalidate_lock);
283 /*
284 * It may lose gpuvm invalidate acknowldege state across power-gating
285 * off cycle, add semaphore acquire before invalidation and semaphore
286 * release after invalidation to avoid entering power gated state
287 * to WA the Issue
288 */
289
290 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
291 if (use_semaphore) {
292 for (i = 0; i < adev->usec_timeout; i++) {
293 /* a read return value of 1 means semaphore acuqire */
294 tmp = RREG32_RLC_NO_KIQ(sem, hub_ip);
295 if (tmp & 0x1)
296 break;
297 udelay(1);
298 }
299
300 if (i >= adev->usec_timeout)
301 DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
302 }
303
304 WREG32_RLC_NO_KIQ(req, inv_req, hub_ip);
305
306 /*
307 * Issue a dummy read to wait for the ACK register to be cleared
308 * to avoid a false ACK due to the new fast GRBM interface.
309 */
310 if ((vmhub == AMDGPU_GFXHUB(0)) &&
311 (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 3, 0)))
312 RREG32_RLC_NO_KIQ(req, hub_ip);
313
314 /* Wait for ACK with a delay.*/
315 for (i = 0; i < adev->usec_timeout; i++) {
316 tmp = RREG32_RLC_NO_KIQ(ack, hub_ip);
317 tmp &= 1 << vmid;
318 if (tmp)
319 break;
320
321 udelay(1);
322 }
323
324 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
325 if (use_semaphore)
326 WREG32_RLC_NO_KIQ(sem, 0, hub_ip);
327
328 spin_unlock(&adev->gmc.invalidate_lock);
329
330 if (i >= adev->usec_timeout)
331 dev_err(adev->dev, "Timeout waiting for VM flush hub: %d!\n",
332 vmhub);
333}
334
335/**
336 * gmc_v10_0_flush_gpu_tlb_pasid - tlb flush via pasid
337 *
338 * @adev: amdgpu_device pointer
339 * @pasid: pasid to be flush
340 * @flush_type: the flush type
341 * @all_hub: Used with PACKET3_INVALIDATE_TLBS_ALL_HUB()
342 * @inst: is used to select which instance of KIQ to use for the invalidation
343 *
344 * Flush the TLB for the requested pasid.
345 */
346static void gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
347 uint16_t pasid, uint32_t flush_type,
348 bool all_hub, uint32_t inst)
349{
350 uint16_t queried;
351 int vmid, i;
352
353 for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) {
354 bool valid;
355
356 valid = gmc_v10_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
357 &queried);
358 if (!valid || queried != pasid)
359 continue;
360
361 if (all_hub) {
362 for_each_set_bit(i, adev->vmhubs_mask,
363 AMDGPU_MAX_VMHUBS)
364 gmc_v10_0_flush_gpu_tlb(adev, vmid, i,
365 flush_type);
366 } else {
367 gmc_v10_0_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB(0),
368 flush_type);
369 }
370 }
371}
372
373static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
374 unsigned int vmid, uint64_t pd_addr)
375{
376 bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(ring->adev, ring->vm_hub);
377 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
378 uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0);
379 unsigned int eng = ring->vm_inv_eng;
380
381 /*
382 * It may lose gpuvm invalidate acknowldege state across power-gating
383 * off cycle, add semaphore acquire before invalidation and semaphore
384 * release after invalidation to avoid entering power gated state
385 * to WA the Issue
386 */
387
388 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
389 if (use_semaphore)
390 /* a read return value of 1 means semaphore acuqire */
391 amdgpu_ring_emit_reg_wait(ring,
392 hub->vm_inv_eng0_sem +
393 hub->eng_distance * eng, 0x1, 0x1);
394
395 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
396 (hub->ctx_addr_distance * vmid),
397 lower_32_bits(pd_addr));
398
399 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
400 (hub->ctx_addr_distance * vmid),
401 upper_32_bits(pd_addr));
402
403 amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
404 hub->eng_distance * eng,
405 hub->vm_inv_eng0_ack +
406 hub->eng_distance * eng,
407 req, 1 << vmid);
408
409 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
410 if (use_semaphore)
411 /*
412 * add semaphore release after invalidation,
413 * write with 0 means semaphore release
414 */
415 amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
416 hub->eng_distance * eng, 0);
417
418 return pd_addr;
419}
420
421static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int vmid,
422 unsigned int pasid)
423{
424 struct amdgpu_device *adev = ring->adev;
425 uint32_t reg;
426
427 /* MES fw manages IH_VMID_x_LUT updating */
428 if (ring->is_mes_queue)
429 return;
430
431 if (ring->vm_hub == AMDGPU_GFXHUB(0))
432 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
433 else
434 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
435
436 amdgpu_ring_emit_wreg(ring, reg, pasid);
437}
438
439/*
440 * PTE format on NAVI 10:
441 * 63:59 reserved
442 * 58 reserved and for sienna_cichlid is used for MALL noalloc
443 * 57 reserved
444 * 56 F
445 * 55 L
446 * 54 reserved
447 * 53:52 SW
448 * 51 T
449 * 50:48 mtype
450 * 47:12 4k physical page base address
451 * 11:7 fragment
452 * 6 write
453 * 5 read
454 * 4 exe
455 * 3 Z
456 * 2 snooped
457 * 1 system
458 * 0 valid
459 *
460 * PDE format on NAVI 10:
461 * 63:59 block fragment size
462 * 58:55 reserved
463 * 54 P
464 * 53:48 reserved
465 * 47:6 physical base address of PD or PTE
466 * 5:3 reserved
467 * 2 C
468 * 1 system
469 * 0 valid
470 */
471
472static uint64_t gmc_v10_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
473{
474 switch (flags) {
475 case AMDGPU_VM_MTYPE_DEFAULT:
476 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
477 case AMDGPU_VM_MTYPE_NC:
478 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
479 case AMDGPU_VM_MTYPE_WC:
480 return AMDGPU_PTE_MTYPE_NV10(MTYPE_WC);
481 case AMDGPU_VM_MTYPE_CC:
482 return AMDGPU_PTE_MTYPE_NV10(MTYPE_CC);
483 case AMDGPU_VM_MTYPE_UC:
484 return AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
485 default:
486 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
487 }
488}
489
490static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
491 uint64_t *addr, uint64_t *flags)
492{
493 if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
494 *addr = amdgpu_gmc_vram_mc2pa(adev, *addr);
495 BUG_ON(*addr & 0xFFFF00000000003FULL);
496
497 if (!adev->gmc.translate_further)
498 return;
499
500 if (level == AMDGPU_VM_PDB1) {
501 /* Set the block fragment size */
502 if (!(*flags & AMDGPU_PDE_PTE))
503 *flags |= AMDGPU_PDE_BFS(0x9);
504
505 } else if (level == AMDGPU_VM_PDB0) {
506 if (*flags & AMDGPU_PDE_PTE)
507 *flags &= ~AMDGPU_PDE_PTE;
508 else
509 *flags |= AMDGPU_PTE_TF;
510 }
511}
512
513static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev,
514 struct amdgpu_bo_va_mapping *mapping,
515 uint64_t *flags)
516{
517 struct amdgpu_bo *bo = mapping->bo_va->base.bo;
518
519 *flags &= ~AMDGPU_PTE_EXECUTABLE;
520 *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
521
522 *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
523 *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
524
525 *flags &= ~AMDGPU_PTE_NOALLOC;
526 *flags |= (mapping->flags & AMDGPU_PTE_NOALLOC);
527
528 if (mapping->flags & AMDGPU_PTE_PRT) {
529 *flags |= AMDGPU_PTE_PRT;
530 *flags |= AMDGPU_PTE_SNOOPED;
531 *flags |= AMDGPU_PTE_LOG;
532 *flags |= AMDGPU_PTE_SYSTEM;
533 *flags &= ~AMDGPU_PTE_VALID;
534 }
535
536 if (bo && bo->flags & (AMDGPU_GEM_CREATE_COHERENT |
537 AMDGPU_GEM_CREATE_EXT_COHERENT |
538 AMDGPU_GEM_CREATE_UNCACHED))
539 *flags = (*flags & ~AMDGPU_PTE_MTYPE_NV10_MASK) |
540 AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
541}
542
543static unsigned int gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev)
544{
545 u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
546 unsigned int size;
547
548 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
549 size = AMDGPU_VBIOS_VGA_ALLOCATION;
550 } else {
551 u32 viewport;
552 u32 pitch;
553
554 viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
555 pitch = RREG32_SOC15(DCE, 0, mmHUBPREQ0_DCSURF_SURFACE_PITCH);
556 size = (REG_GET_FIELD(viewport,
557 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
558 REG_GET_FIELD(pitch, HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH) *
559 4);
560 }
561
562 return size;
563}
564
565static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = {
566 .flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb,
567 .flush_gpu_tlb_pasid = gmc_v10_0_flush_gpu_tlb_pasid,
568 .emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb,
569 .emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping,
570 .map_mtype = gmc_v10_0_map_mtype,
571 .get_vm_pde = gmc_v10_0_get_vm_pde,
572 .get_vm_pte = gmc_v10_0_get_vm_pte,
573 .get_vbios_fb_size = gmc_v10_0_get_vbios_fb_size,
574};
575
576static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev)
577{
578 if (adev->gmc.gmc_funcs == NULL)
579 adev->gmc.gmc_funcs = &gmc_v10_0_gmc_funcs;
580}
581
582static void gmc_v10_0_set_umc_funcs(struct amdgpu_device *adev)
583{
584 switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) {
585 case IP_VERSION(8, 7, 0):
586 adev->umc.max_ras_err_cnt_per_query = UMC_V8_7_TOTAL_CHANNEL_NUM;
587 adev->umc.channel_inst_num = UMC_V8_7_CHANNEL_INSTANCE_NUM;
588 adev->umc.umc_inst_num = UMC_V8_7_UMC_INSTANCE_NUM;
589 adev->umc.channel_offs = UMC_V8_7_PER_CHANNEL_OFFSET_SIENNA;
590 adev->umc.retire_unit = 1;
591 adev->umc.channel_idx_tbl = &umc_v8_7_channel_idx_tbl[0][0];
592 adev->umc.ras = &umc_v8_7_ras;
593 break;
594 default:
595 break;
596 }
597}
598
599static void gmc_v10_0_set_mmhub_funcs(struct amdgpu_device *adev)
600{
601 switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
602 case IP_VERSION(2, 3, 0):
603 case IP_VERSION(2, 4, 0):
604 case IP_VERSION(2, 4, 1):
605 adev->mmhub.funcs = &mmhub_v2_3_funcs;
606 break;
607 default:
608 adev->mmhub.funcs = &mmhub_v2_0_funcs;
609 break;
610 }
611}
612
613static void gmc_v10_0_set_gfxhub_funcs(struct amdgpu_device *adev)
614{
615 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
616 case IP_VERSION(10, 3, 0):
617 case IP_VERSION(10, 3, 2):
618 case IP_VERSION(10, 3, 1):
619 case IP_VERSION(10, 3, 4):
620 case IP_VERSION(10, 3, 5):
621 case IP_VERSION(10, 3, 6):
622 case IP_VERSION(10, 3, 3):
623 case IP_VERSION(10, 3, 7):
624 adev->gfxhub.funcs = &gfxhub_v2_1_funcs;
625 break;
626 default:
627 adev->gfxhub.funcs = &gfxhub_v2_0_funcs;
628 break;
629 }
630}
631
632
633static int gmc_v10_0_early_init(void *handle)
634{
635 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
636
637 gmc_v10_0_set_mmhub_funcs(adev);
638 gmc_v10_0_set_gfxhub_funcs(adev);
639 gmc_v10_0_set_gmc_funcs(adev);
640 gmc_v10_0_set_irq_funcs(adev);
641 gmc_v10_0_set_umc_funcs(adev);
642
643 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
644 adev->gmc.shared_aperture_end =
645 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
646 adev->gmc.private_aperture_start = 0x1000000000000000ULL;
647 adev->gmc.private_aperture_end =
648 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
649 adev->gmc.noretry_flags = AMDGPU_VM_NORETRY_FLAGS_TF;
650
651 return 0;
652}
653
654static int gmc_v10_0_late_init(void *handle)
655{
656 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
657 int r;
658
659 r = amdgpu_gmc_allocate_vm_inv_eng(adev);
660 if (r)
661 return r;
662
663 r = amdgpu_gmc_ras_late_init(adev);
664 if (r)
665 return r;
666
667 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
668}
669
670static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
671 struct amdgpu_gmc *mc)
672{
673 u64 base = 0;
674
675 base = adev->gfxhub.funcs->get_fb_location(adev);
676
677 /* add the xgmi offset of the physical node */
678 base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
679
680 amdgpu_gmc_set_agp_default(adev, mc);
681 amdgpu_gmc_vram_location(adev, &adev->gmc, base);
682 amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT);
683 if (!amdgpu_sriov_vf(adev) && (amdgpu_agp == 1))
684 amdgpu_gmc_agp_location(adev, mc);
685
686 /* base offset of vram pages */
687 adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
688
689 /* add the xgmi offset of the physical node */
690 adev->vm_manager.vram_base_offset +=
691 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
692}
693
694/**
695 * gmc_v10_0_mc_init - initialize the memory controller driver params
696 *
697 * @adev: amdgpu_device pointer
698 *
699 * Look up the amount of vram, vram width, and decide how to place
700 * vram and gart within the GPU's physical address space.
701 * Returns 0 for success.
702 */
703static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
704{
705 int r;
706
707 /* size in MB on si */
708 adev->gmc.mc_vram_size =
709 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
710 adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
711
712 if (!(adev->flags & AMD_IS_APU)) {
713 r = amdgpu_device_resize_fb_bar(adev);
714 if (r)
715 return r;
716 }
717 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
718 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
719
720#ifdef CONFIG_X86_64
721 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) {
722 adev->gmc.aper_base = adev->gfxhub.funcs->get_mc_fb_offset(adev);
723 adev->gmc.aper_size = adev->gmc.real_vram_size;
724 }
725#endif
726
727 adev->gmc.visible_vram_size = adev->gmc.aper_size;
728
729 /* set the gart size */
730 if (amdgpu_gart_size == -1) {
731 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
732 default:
733 adev->gmc.gart_size = 512ULL << 20;
734 break;
735 case IP_VERSION(10, 3, 1): /* DCE SG support */
736 case IP_VERSION(10, 3, 3): /* DCE SG support */
737 case IP_VERSION(10, 3, 6): /* DCE SG support */
738 case IP_VERSION(10, 3, 7): /* DCE SG support */
739 adev->gmc.gart_size = 1024ULL << 20;
740 break;
741 }
742 } else {
743 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
744 }
745
746 gmc_v10_0_vram_gtt_location(adev, &adev->gmc);
747
748 return 0;
749}
750
751static int gmc_v10_0_gart_init(struct amdgpu_device *adev)
752{
753 int r;
754
755 if (adev->gart.bo) {
756 WARN(1, "NAVI10 PCIE GART already initialized\n");
757 return 0;
758 }
759
760 /* Initialize common gart structure */
761 r = amdgpu_gart_init(adev);
762 if (r)
763 return r;
764
765 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
766 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_NV10(MTYPE_UC) |
767 AMDGPU_PTE_EXECUTABLE;
768
769 return amdgpu_gart_table_vram_alloc(adev);
770}
771
772static int gmc_v10_0_sw_init(void *handle)
773{
774 int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
775 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
776
777 adev->gfxhub.funcs->init(adev);
778
779 adev->mmhub.funcs->init(adev);
780
781 spin_lock_init(&adev->gmc.invalidate_lock);
782
783 if ((adev->flags & AMD_IS_APU) && amdgpu_emu_mode == 1) {
784 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_DDR4;
785 adev->gmc.vram_width = 64;
786 } else if (amdgpu_emu_mode == 1) {
787 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_GDDR6;
788 adev->gmc.vram_width = 1 * 128; /* numchan * chansize */
789 } else {
790 r = amdgpu_atomfirmware_get_vram_info(adev,
791 &vram_width, &vram_type, &vram_vendor);
792 adev->gmc.vram_width = vram_width;
793
794 adev->gmc.vram_type = vram_type;
795 adev->gmc.vram_vendor = vram_vendor;
796 }
797
798 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
799 case IP_VERSION(10, 3, 0):
800 adev->gmc.mall_size = 128 * 1024 * 1024;
801 break;
802 case IP_VERSION(10, 3, 2):
803 adev->gmc.mall_size = 96 * 1024 * 1024;
804 break;
805 case IP_VERSION(10, 3, 4):
806 adev->gmc.mall_size = 32 * 1024 * 1024;
807 break;
808 case IP_VERSION(10, 3, 5):
809 adev->gmc.mall_size = 16 * 1024 * 1024;
810 break;
811 default:
812 adev->gmc.mall_size = 0;
813 break;
814 }
815
816 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
817 case IP_VERSION(10, 1, 10):
818 case IP_VERSION(10, 1, 1):
819 case IP_VERSION(10, 1, 2):
820 case IP_VERSION(10, 1, 3):
821 case IP_VERSION(10, 1, 4):
822 case IP_VERSION(10, 3, 0):
823 case IP_VERSION(10, 3, 2):
824 case IP_VERSION(10, 3, 1):
825 case IP_VERSION(10, 3, 4):
826 case IP_VERSION(10, 3, 5):
827 case IP_VERSION(10, 3, 6):
828 case IP_VERSION(10, 3, 3):
829 case IP_VERSION(10, 3, 7):
830 set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
831 set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask);
832 /*
833 * To fulfill 4-level page support,
834 * vm size is 256TB (48bit), maximum size of Navi10/Navi14/Navi12,
835 * block size 512 (9bit)
836 */
837 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
838 break;
839 default:
840 break;
841 }
842
843 /* This interrupt is VMC page fault.*/
844 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC,
845 VMC_1_0__SRCID__VM_FAULT,
846 &adev->gmc.vm_fault);
847
848 if (r)
849 return r;
850
851 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2,
852 UTCL2_1_0__SRCID__FAULT,
853 &adev->gmc.vm_fault);
854 if (r)
855 return r;
856
857 if (!amdgpu_sriov_vf(adev)) {
858 /* interrupt sent to DF. */
859 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
860 &adev->gmc.ecc_irq);
861 if (r)
862 return r;
863 }
864
865 /*
866 * Set the internal MC address mask This is the max address of the GPU's
867 * internal address space.
868 */
869 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
870
871 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
872 if (r) {
873 dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n");
874 return r;
875 }
876
877 adev->need_swiotlb = drm_need_swiotlb(44);
878
879 r = gmc_v10_0_mc_init(adev);
880 if (r)
881 return r;
882
883 amdgpu_gmc_get_vbios_allocations(adev);
884
885 /* Memory manager */
886 r = amdgpu_bo_init(adev);
887 if (r)
888 return r;
889
890 r = gmc_v10_0_gart_init(adev);
891 if (r)
892 return r;
893
894 /*
895 * number of VMs
896 * VMID 0 is reserved for System
897 * amdgpu graphics/compute will use VMIDs 1-7
898 * amdkfd will use VMIDs 8-15
899 */
900 adev->vm_manager.first_kfd_vmid = 8;
901
902 amdgpu_vm_manager_init(adev);
903
904 r = amdgpu_gmc_ras_sw_init(adev);
905 if (r)
906 return r;
907
908 return 0;
909}
910
911/**
912 * gmc_v10_0_gart_fini - vm fini callback
913 *
914 * @adev: amdgpu_device pointer
915 *
916 * Tears down the driver GART/VM setup (CIK).
917 */
918static void gmc_v10_0_gart_fini(struct amdgpu_device *adev)
919{
920 amdgpu_gart_table_vram_free(adev);
921}
922
923static int gmc_v10_0_sw_fini(void *handle)
924{
925 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
926
927 amdgpu_vm_manager_fini(adev);
928 gmc_v10_0_gart_fini(adev);
929 amdgpu_gem_force_release(adev);
930 amdgpu_bo_fini(adev);
931
932 return 0;
933}
934
935static void gmc_v10_0_init_golden_registers(struct amdgpu_device *adev)
936{
937}
938
939/**
940 * gmc_v10_0_gart_enable - gart enable
941 *
942 * @adev: amdgpu_device pointer
943 */
944static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
945{
946 int r;
947 bool value;
948
949 if (adev->gart.bo == NULL) {
950 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
951 return -EINVAL;
952 }
953
954 amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
955
956 if (!adev->in_s0ix) {
957 r = adev->gfxhub.funcs->gart_enable(adev);
958 if (r)
959 return r;
960 }
961
962 r = adev->mmhub.funcs->gart_enable(adev);
963 if (r)
964 return r;
965
966 adev->hdp.funcs->init_registers(adev);
967
968 /* Flush HDP after it is initialized */
969 adev->hdp.funcs->flush_hdp(adev, NULL);
970
971 value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
972 false : true;
973
974 if (!adev->in_s0ix)
975 adev->gfxhub.funcs->set_fault_enable_default(adev, value);
976 adev->mmhub.funcs->set_fault_enable_default(adev, value);
977 gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB0(0), 0);
978 if (!adev->in_s0ix)
979 gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB(0), 0);
980
981 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
982 (unsigned int)(adev->gmc.gart_size >> 20),
983 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
984
985 return 0;
986}
987
988static int gmc_v10_0_hw_init(void *handle)
989{
990 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
991 int r;
992
993 adev->gmc.flush_pasid_uses_kiq = !amdgpu_emu_mode;
994
995 /* The sequence of these two function calls matters.*/
996 gmc_v10_0_init_golden_registers(adev);
997
998 /*
999 * harvestable groups in gc_utcl2 need to be programmed before any GFX block
1000 * register setup within GMC, or else system hang when harvesting SA.
1001 */
1002 if (!adev->in_s0ix && adev->gfxhub.funcs && adev->gfxhub.funcs->utcl2_harvest)
1003 adev->gfxhub.funcs->utcl2_harvest(adev);
1004
1005 r = gmc_v10_0_gart_enable(adev);
1006 if (r)
1007 return r;
1008
1009 if (amdgpu_emu_mode == 1) {
1010 r = amdgpu_gmc_vram_checking(adev);
1011 if (r)
1012 return r;
1013 }
1014
1015 if (adev->umc.funcs && adev->umc.funcs->init_registers)
1016 adev->umc.funcs->init_registers(adev);
1017
1018 return 0;
1019}
1020
1021/**
1022 * gmc_v10_0_gart_disable - gart disable
1023 *
1024 * @adev: amdgpu_device pointer
1025 *
1026 * This disables all VM page table.
1027 */
1028static void gmc_v10_0_gart_disable(struct amdgpu_device *adev)
1029{
1030 if (!adev->in_s0ix)
1031 adev->gfxhub.funcs->gart_disable(adev);
1032 adev->mmhub.funcs->gart_disable(adev);
1033}
1034
1035static int gmc_v10_0_hw_fini(void *handle)
1036{
1037 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1038
1039 gmc_v10_0_gart_disable(adev);
1040
1041 if (amdgpu_sriov_vf(adev)) {
1042 /* full access mode, so don't touch any GMC register */
1043 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
1044 return 0;
1045 }
1046
1047 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1048
1049 if (adev->gmc.ecc_irq.funcs &&
1050 amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
1051 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
1052
1053 return 0;
1054}
1055
1056static int gmc_v10_0_suspend(void *handle)
1057{
1058 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1059
1060 gmc_v10_0_hw_fini(adev);
1061
1062 return 0;
1063}
1064
1065static int gmc_v10_0_resume(void *handle)
1066{
1067 int r;
1068 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1069
1070 r = gmc_v10_0_hw_init(adev);
1071 if (r)
1072 return r;
1073
1074 amdgpu_vmid_reset_all(adev);
1075
1076 return 0;
1077}
1078
1079static bool gmc_v10_0_is_idle(void *handle)
1080{
1081 /* MC is always ready in GMC v10.*/
1082 return true;
1083}
1084
1085static int gmc_v10_0_wait_for_idle(void *handle)
1086{
1087 /* There is no need to wait for MC idle in GMC v10.*/
1088 return 0;
1089}
1090
1091static int gmc_v10_0_soft_reset(void *handle)
1092{
1093 return 0;
1094}
1095
1096static int gmc_v10_0_set_clockgating_state(void *handle,
1097 enum amd_clockgating_state state)
1098{
1099 int r;
1100 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1101
1102 /*
1103 * The issue mmhub can't disconnect from DF with MMHUB clock gating being disabled
1104 * is a new problem observed at DF 3.0.3, however with the same suspend sequence not
1105 * seen any issue on the DF 3.0.2 series platform.
1106 */
1107 if (adev->in_s0ix &&
1108 amdgpu_ip_version(adev, DF_HWIP, 0) > IP_VERSION(3, 0, 2)) {
1109 dev_dbg(adev->dev, "keep mmhub clock gating being enabled for s0ix\n");
1110 return 0;
1111 }
1112
1113 r = adev->mmhub.funcs->set_clockgating(adev, state);
1114 if (r)
1115 return r;
1116
1117 if (amdgpu_ip_version(adev, ATHUB_HWIP, 0) >= IP_VERSION(2, 1, 0))
1118 return athub_v2_1_set_clockgating(adev, state);
1119 else
1120 return athub_v2_0_set_clockgating(adev, state);
1121}
1122
1123static void gmc_v10_0_get_clockgating_state(void *handle, u64 *flags)
1124{
1125 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1126
1127 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 1, 3) ||
1128 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 1, 4))
1129 return;
1130
1131 adev->mmhub.funcs->get_clockgating(adev, flags);
1132
1133 if (amdgpu_ip_version(adev, ATHUB_HWIP, 0) >= IP_VERSION(2, 1, 0))
1134 athub_v2_1_get_clockgating(adev, flags);
1135 else
1136 athub_v2_0_get_clockgating(adev, flags);
1137}
1138
1139static int gmc_v10_0_set_powergating_state(void *handle,
1140 enum amd_powergating_state state)
1141{
1142 return 0;
1143}
1144
1145const struct amd_ip_funcs gmc_v10_0_ip_funcs = {
1146 .name = "gmc_v10_0",
1147 .early_init = gmc_v10_0_early_init,
1148 .late_init = gmc_v10_0_late_init,
1149 .sw_init = gmc_v10_0_sw_init,
1150 .sw_fini = gmc_v10_0_sw_fini,
1151 .hw_init = gmc_v10_0_hw_init,
1152 .hw_fini = gmc_v10_0_hw_fini,
1153 .suspend = gmc_v10_0_suspend,
1154 .resume = gmc_v10_0_resume,
1155 .is_idle = gmc_v10_0_is_idle,
1156 .wait_for_idle = gmc_v10_0_wait_for_idle,
1157 .soft_reset = gmc_v10_0_soft_reset,
1158 .set_clockgating_state = gmc_v10_0_set_clockgating_state,
1159 .set_powergating_state = gmc_v10_0_set_powergating_state,
1160 .get_clockgating_state = gmc_v10_0_get_clockgating_state,
1161};
1162
1163const struct amdgpu_ip_block_version gmc_v10_0_ip_block = {
1164 .type = AMD_IP_BLOCK_TYPE_GMC,
1165 .major = 10,
1166 .minor = 0,
1167 .rev = 0,
1168 .funcs = &gmc_v10_0_ip_funcs,
1169};
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/firmware.h>
24#include <linux/pci.h>
25#include "amdgpu.h"
26#include "amdgpu_atomfirmware.h"
27#include "gmc_v10_0.h"
28
29#include "hdp/hdp_5_0_0_offset.h"
30#include "hdp/hdp_5_0_0_sh_mask.h"
31#include "gc/gc_10_1_0_sh_mask.h"
32#include "mmhub/mmhub_2_0_0_sh_mask.h"
33#include "athub/athub_2_0_0_sh_mask.h"
34#include "athub/athub_2_0_0_offset.h"
35#include "dcn/dcn_2_0_0_offset.h"
36#include "dcn/dcn_2_0_0_sh_mask.h"
37#include "oss/osssys_5_0_0_offset.h"
38#include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
39#include "navi10_enum.h"
40
41#include "soc15.h"
42#include "soc15d.h"
43#include "soc15_common.h"
44
45#include "nbio_v2_3.h"
46
47#include "gfxhub_v2_0.h"
48#include "gfxhub_v2_1.h"
49#include "mmhub_v2_0.h"
50#include "athub_v2_0.h"
51#include "athub_v2_1.h"
52
53#if 0
54static const struct soc15_reg_golden golden_settings_navi10_hdp[] =
55{
56 /* TODO add golden setting for hdp */
57};
58#endif
59
60static int
61gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
62 struct amdgpu_irq_src *src, unsigned type,
63 enum amdgpu_interrupt_state state)
64{
65 struct amdgpu_vmhub *hub;
66 u32 tmp, reg, bits[AMDGPU_MAX_VMHUBS], i;
67
68 bits[AMDGPU_GFXHUB_0] = GCVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
69 GCVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
70 GCVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
71 GCVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
72 GCVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
73 GCVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
74 GCVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
75
76 bits[AMDGPU_MMHUB_0] = MMVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
77 MMVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
78 MMVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
79 MMVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
80 MMVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
81 MMVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
82 MMVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
83
84 switch (state) {
85 case AMDGPU_IRQ_STATE_DISABLE:
86 /* MM HUB */
87 hub = &adev->vmhub[AMDGPU_MMHUB_0];
88 for (i = 0; i < 16; i++) {
89 reg = hub->vm_context0_cntl + hub->ctx_distance * i;
90 tmp = RREG32(reg);
91 tmp &= ~bits[AMDGPU_MMHUB_0];
92 WREG32(reg, tmp);
93 }
94
95 /* GFX HUB */
96 hub = &adev->vmhub[AMDGPU_GFXHUB_0];
97 for (i = 0; i < 16; i++) {
98 reg = hub->vm_context0_cntl + hub->ctx_distance * i;
99 tmp = RREG32(reg);
100 tmp &= ~bits[AMDGPU_GFXHUB_0];
101 WREG32(reg, tmp);
102 }
103 break;
104 case AMDGPU_IRQ_STATE_ENABLE:
105 /* MM HUB */
106 hub = &adev->vmhub[AMDGPU_MMHUB_0];
107 for (i = 0; i < 16; i++) {
108 reg = hub->vm_context0_cntl + hub->ctx_distance * i;
109 tmp = RREG32(reg);
110 tmp |= bits[AMDGPU_MMHUB_0];
111 WREG32(reg, tmp);
112 }
113
114 /* GFX HUB */
115 hub = &adev->vmhub[AMDGPU_GFXHUB_0];
116 for (i = 0; i < 16; i++) {
117 reg = hub->vm_context0_cntl + hub->ctx_distance * i;
118 tmp = RREG32(reg);
119 tmp |= bits[AMDGPU_GFXHUB_0];
120 WREG32(reg, tmp);
121 }
122 break;
123 default:
124 break;
125 }
126
127 return 0;
128}
129
130static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
131 struct amdgpu_irq_src *source,
132 struct amdgpu_iv_entry *entry)
133{
134 struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
135 uint32_t status = 0;
136 u64 addr;
137
138 addr = (u64)entry->src_data[0] << 12;
139 addr |= ((u64)entry->src_data[1] & 0xf) << 44;
140
141 if (!amdgpu_sriov_vf(adev)) {
142 /*
143 * Issue a dummy read to wait for the status register to
144 * be updated to avoid reading an incorrect value due to
145 * the new fast GRBM interface.
146 */
147 if (entry->vmid_src == AMDGPU_GFXHUB_0)
148 RREG32(hub->vm_l2_pro_fault_status);
149
150 status = RREG32(hub->vm_l2_pro_fault_status);
151 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
152 }
153
154 if (printk_ratelimit()) {
155 struct amdgpu_task_info task_info;
156
157 memset(&task_info, 0, sizeof(struct amdgpu_task_info));
158 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
159
160 dev_err(adev->dev,
161 "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, "
162 "for process %s pid %d thread %s pid %d)\n",
163 entry->vmid_src ? "mmhub" : "gfxhub",
164 entry->src_id, entry->ring_id, entry->vmid,
165 entry->pasid, task_info.process_name, task_info.tgid,
166 task_info.task_name, task_info.pid);
167 dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n",
168 addr, entry->client_id);
169 if (!amdgpu_sriov_vf(adev)) {
170 dev_err(adev->dev,
171 "GCVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
172 status);
173 dev_err(adev->dev, "\t Faulty UTCL2 client ID: 0x%lx\n",
174 REG_GET_FIELD(status,
175 GCVM_L2_PROTECTION_FAULT_STATUS, CID));
176 dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
177 REG_GET_FIELD(status,
178 GCVM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
179 dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
180 REG_GET_FIELD(status,
181 GCVM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
182 dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
183 REG_GET_FIELD(status,
184 GCVM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
185 dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
186 REG_GET_FIELD(status,
187 GCVM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
188 dev_err(adev->dev, "\t RW: 0x%lx\n",
189 REG_GET_FIELD(status,
190 GCVM_L2_PROTECTION_FAULT_STATUS, RW));
191 }
192 }
193
194 return 0;
195}
196
197static const struct amdgpu_irq_src_funcs gmc_v10_0_irq_funcs = {
198 .set = gmc_v10_0_vm_fault_interrupt_state,
199 .process = gmc_v10_0_process_interrupt,
200};
201
202static void gmc_v10_0_set_irq_funcs(struct amdgpu_device *adev)
203{
204 adev->gmc.vm_fault.num_types = 1;
205 adev->gmc.vm_fault.funcs = &gmc_v10_0_irq_funcs;
206}
207
208static uint32_t gmc_v10_0_get_invalidate_req(unsigned int vmid,
209 uint32_t flush_type)
210{
211 u32 req = 0;
212
213 /* invalidate using legacy mode on vmid*/
214 req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ,
215 PER_VMID_INVALIDATE_REQ, 1 << vmid);
216 req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
217 req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
218 req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
219 req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
220 req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
221 req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
222 req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ,
223 CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
224
225 return req;
226}
227
228/**
229 * gmc_v10_0_use_invalidate_semaphore - judge whether to use semaphore
230 *
231 * @adev: amdgpu_device pointer
232 * @vmhub: vmhub type
233 *
234 */
235static bool gmc_v10_0_use_invalidate_semaphore(struct amdgpu_device *adev,
236 uint32_t vmhub)
237{
238 return ((vmhub == AMDGPU_MMHUB_0 ||
239 vmhub == AMDGPU_MMHUB_1) &&
240 (!amdgpu_sriov_vf(adev)));
241}
242
243static bool gmc_v10_0_get_atc_vmid_pasid_mapping_info(
244 struct amdgpu_device *adev,
245 uint8_t vmid, uint16_t *p_pasid)
246{
247 uint32_t value;
248
249 value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
250 + vmid);
251 *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
252
253 return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
254}
255
256/*
257 * GART
258 * VMID 0 is the physical GPU addresses as used by the kernel.
259 * VMIDs 1-15 are used for userspace clients and are handled
260 * by the amdgpu vm/hsa code.
261 */
262
263static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
264 unsigned int vmhub, uint32_t flush_type)
265{
266 bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(adev, vmhub);
267 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
268 u32 inv_req = gmc_v10_0_get_invalidate_req(vmid, flush_type);
269 u32 tmp;
270 /* Use register 17 for GART */
271 const unsigned eng = 17;
272 unsigned int i;
273
274 spin_lock(&adev->gmc.invalidate_lock);
275 /*
276 * It may lose gpuvm invalidate acknowldege state across power-gating
277 * off cycle, add semaphore acquire before invalidation and semaphore
278 * release after invalidation to avoid entering power gated state
279 * to WA the Issue
280 */
281
282 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
283 if (use_semaphore) {
284 for (i = 0; i < adev->usec_timeout; i++) {
285 /* a read return value of 1 means semaphore acuqire */
286 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem +
287 hub->eng_distance * eng);
288 if (tmp & 0x1)
289 break;
290 udelay(1);
291 }
292
293 if (i >= adev->usec_timeout)
294 DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
295 }
296
297 WREG32_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req);
298
299 /*
300 * Issue a dummy read to wait for the ACK register to be cleared
301 * to avoid a false ACK due to the new fast GRBM interface.
302 */
303 if (vmhub == AMDGPU_GFXHUB_0)
304 RREG32_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng);
305
306 /* Wait for ACK with a delay.*/
307 for (i = 0; i < adev->usec_timeout; i++) {
308 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack +
309 hub->eng_distance * eng);
310 tmp &= 1 << vmid;
311 if (tmp)
312 break;
313
314 udelay(1);
315 }
316
317 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
318 if (use_semaphore)
319 /*
320 * add semaphore release after invalidation,
321 * write with 0 means semaphore release
322 */
323 WREG32_NO_KIQ(hub->vm_inv_eng0_sem +
324 hub->eng_distance * eng, 0);
325
326 spin_unlock(&adev->gmc.invalidate_lock);
327
328 if (i < adev->usec_timeout)
329 return;
330
331 DRM_ERROR("Timeout waiting for VM flush ACK!\n");
332}
333
334/**
335 * gmc_v10_0_flush_gpu_tlb - gart tlb flush callback
336 *
337 * @adev: amdgpu_device pointer
338 * @vmid: vm instance to flush
339 *
340 * Flush the TLB for the requested page table.
341 */
342static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
343 uint32_t vmhub, uint32_t flush_type)
344{
345 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
346 struct dma_fence *fence;
347 struct amdgpu_job *job;
348
349 int r;
350
351 /* flush hdp cache */
352 adev->nbio.funcs->hdp_flush(adev, NULL);
353
354 /* For SRIOV run time, driver shouldn't access the register through MMIO
355 * Directly use kiq to do the vm invalidation instead
356 */
357 if (adev->gfx.kiq.ring.sched.ready &&
358 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
359 !adev->in_gpu_reset) {
360
361 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
362 const unsigned eng = 17;
363 u32 inv_req = gmc_v10_0_get_invalidate_req(vmid, flush_type);
364 u32 req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
365 u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
366
367 amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
368 1 << vmid);
369 return;
370 }
371
372 mutex_lock(&adev->mman.gtt_window_lock);
373
374 if (vmhub == AMDGPU_MMHUB_0) {
375 gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_MMHUB_0, 0);
376 mutex_unlock(&adev->mman.gtt_window_lock);
377 return;
378 }
379
380 BUG_ON(vmhub != AMDGPU_GFXHUB_0);
381
382 if (!adev->mman.buffer_funcs_enabled ||
383 !adev->ib_pool_ready ||
384 adev->in_gpu_reset ||
385 ring->sched.ready == false) {
386 gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_GFXHUB_0, 0);
387 mutex_unlock(&adev->mman.gtt_window_lock);
388 return;
389 }
390
391 /* The SDMA on Navi has a bug which can theoretically result in memory
392 * corruption if an invalidation happens at the same time as an VA
393 * translation. Avoid this by doing the invalidation from the SDMA
394 * itself.
395 */
396 r = amdgpu_job_alloc_with_ib(adev, 16 * 4, AMDGPU_IB_POOL_IMMEDIATE,
397 &job);
398 if (r)
399 goto error_alloc;
400
401 job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo);
402 job->vm_needs_flush = true;
403 job->ibs->ptr[job->ibs->length_dw++] = ring->funcs->nop;
404 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
405 r = amdgpu_job_submit(job, &adev->mman.entity,
406 AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
407 if (r)
408 goto error_submit;
409
410 mutex_unlock(&adev->mman.gtt_window_lock);
411
412 dma_fence_wait(fence, false);
413 dma_fence_put(fence);
414
415 return;
416
417error_submit:
418 amdgpu_job_free(job);
419
420error_alloc:
421 mutex_unlock(&adev->mman.gtt_window_lock);
422 DRM_ERROR("Error flushing GPU TLB using the SDMA (%d)!\n", r);
423}
424
425/**
426 * gmc_v10_0_flush_gpu_tlb_pasid - tlb flush via pasid
427 *
428 * @adev: amdgpu_device pointer
429 * @pasid: pasid to be flush
430 *
431 * Flush the TLB for the requested pasid.
432 */
433static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
434 uint16_t pasid, uint32_t flush_type,
435 bool all_hub)
436{
437 int vmid, i;
438 signed long r;
439 uint32_t seq;
440 uint16_t queried_pasid;
441 bool ret;
442 struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
443 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
444
445 if (amdgpu_emu_mode == 0 && ring->sched.ready) {
446 spin_lock(&adev->gfx.kiq.ring_lock);
447 /* 2 dwords flush + 8 dwords fence */
448 amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
449 kiq->pmf->kiq_invalidate_tlbs(ring,
450 pasid, flush_type, all_hub);
451 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
452 if (r) {
453 amdgpu_ring_undo(ring);
454 spin_unlock(&adev->gfx.kiq.ring_lock);
455 return -ETIME;
456 }
457
458 amdgpu_ring_commit(ring);
459 spin_unlock(&adev->gfx.kiq.ring_lock);
460 r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
461 if (r < 1) {
462 DRM_ERROR("wait for kiq fence error: %ld.\n", r);
463 return -ETIME;
464 }
465
466 return 0;
467 }
468
469 for (vmid = 1; vmid < 16; vmid++) {
470
471 ret = gmc_v10_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
472 &queried_pasid);
473 if (ret && queried_pasid == pasid) {
474 if (all_hub) {
475 for (i = 0; i < adev->num_vmhubs; i++)
476 gmc_v10_0_flush_gpu_tlb(adev, vmid,
477 i, flush_type);
478 } else {
479 gmc_v10_0_flush_gpu_tlb(adev, vmid,
480 AMDGPU_GFXHUB_0, flush_type);
481 }
482 break;
483 }
484 }
485
486 return 0;
487}
488
489static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
490 unsigned vmid, uint64_t pd_addr)
491{
492 bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
493 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
494 uint32_t req = gmc_v10_0_get_invalidate_req(vmid, 0);
495 unsigned eng = ring->vm_inv_eng;
496
497 /*
498 * It may lose gpuvm invalidate acknowldege state across power-gating
499 * off cycle, add semaphore acquire before invalidation and semaphore
500 * release after invalidation to avoid entering power gated state
501 * to WA the Issue
502 */
503
504 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
505 if (use_semaphore)
506 /* a read return value of 1 means semaphore acuqire */
507 amdgpu_ring_emit_reg_wait(ring,
508 hub->vm_inv_eng0_sem +
509 hub->eng_distance * eng, 0x1, 0x1);
510
511 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
512 (hub->ctx_addr_distance * vmid),
513 lower_32_bits(pd_addr));
514
515 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
516 (hub->ctx_addr_distance * vmid),
517 upper_32_bits(pd_addr));
518
519 amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
520 hub->eng_distance * eng,
521 hub->vm_inv_eng0_ack +
522 hub->eng_distance * eng,
523 req, 1 << vmid);
524
525 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
526 if (use_semaphore)
527 /*
528 * add semaphore release after invalidation,
529 * write with 0 means semaphore release
530 */
531 amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
532 hub->eng_distance * eng, 0);
533
534 return pd_addr;
535}
536
537static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
538 unsigned pasid)
539{
540 struct amdgpu_device *adev = ring->adev;
541 uint32_t reg;
542
543 if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
544 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
545 else
546 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
547
548 amdgpu_ring_emit_wreg(ring, reg, pasid);
549}
550
551/*
552 * PTE format on NAVI 10:
553 * 63:59 reserved
554 * 58:57 reserved
555 * 56 F
556 * 55 L
557 * 54 reserved
558 * 53:52 SW
559 * 51 T
560 * 50:48 mtype
561 * 47:12 4k physical page base address
562 * 11:7 fragment
563 * 6 write
564 * 5 read
565 * 4 exe
566 * 3 Z
567 * 2 snooped
568 * 1 system
569 * 0 valid
570 *
571 * PDE format on NAVI 10:
572 * 63:59 block fragment size
573 * 58:55 reserved
574 * 54 P
575 * 53:48 reserved
576 * 47:6 physical base address of PD or PTE
577 * 5:3 reserved
578 * 2 C
579 * 1 system
580 * 0 valid
581 */
582
583static uint64_t gmc_v10_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
584{
585 switch (flags) {
586 case AMDGPU_VM_MTYPE_DEFAULT:
587 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
588 case AMDGPU_VM_MTYPE_NC:
589 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
590 case AMDGPU_VM_MTYPE_WC:
591 return AMDGPU_PTE_MTYPE_NV10(MTYPE_WC);
592 case AMDGPU_VM_MTYPE_CC:
593 return AMDGPU_PTE_MTYPE_NV10(MTYPE_CC);
594 case AMDGPU_VM_MTYPE_UC:
595 return AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
596 default:
597 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
598 }
599}
600
601static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
602 uint64_t *addr, uint64_t *flags)
603{
604 if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
605 *addr = adev->vm_manager.vram_base_offset + *addr -
606 adev->gmc.vram_start;
607 BUG_ON(*addr & 0xFFFF00000000003FULL);
608
609 if (!adev->gmc.translate_further)
610 return;
611
612 if (level == AMDGPU_VM_PDB1) {
613 /* Set the block fragment size */
614 if (!(*flags & AMDGPU_PDE_PTE))
615 *flags |= AMDGPU_PDE_BFS(0x9);
616
617 } else if (level == AMDGPU_VM_PDB0) {
618 if (*flags & AMDGPU_PDE_PTE)
619 *flags &= ~AMDGPU_PDE_PTE;
620 else
621 *flags |= AMDGPU_PTE_TF;
622 }
623}
624
625static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev,
626 struct amdgpu_bo_va_mapping *mapping,
627 uint64_t *flags)
628{
629 *flags &= ~AMDGPU_PTE_EXECUTABLE;
630 *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
631
632 *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
633 *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
634
635 if (mapping->flags & AMDGPU_PTE_PRT) {
636 *flags |= AMDGPU_PTE_PRT;
637 *flags |= AMDGPU_PTE_SNOOPED;
638 *flags |= AMDGPU_PTE_LOG;
639 *flags |= AMDGPU_PTE_SYSTEM;
640 *flags &= ~AMDGPU_PTE_VALID;
641 }
642}
643
644static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = {
645 .flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb,
646 .flush_gpu_tlb_pasid = gmc_v10_0_flush_gpu_tlb_pasid,
647 .emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb,
648 .emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping,
649 .map_mtype = gmc_v10_0_map_mtype,
650 .get_vm_pde = gmc_v10_0_get_vm_pde,
651 .get_vm_pte = gmc_v10_0_get_vm_pte
652};
653
654static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev)
655{
656 if (adev->gmc.gmc_funcs == NULL)
657 adev->gmc.gmc_funcs = &gmc_v10_0_gmc_funcs;
658}
659
660static int gmc_v10_0_early_init(void *handle)
661{
662 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
663
664 gmc_v10_0_set_gmc_funcs(adev);
665 gmc_v10_0_set_irq_funcs(adev);
666
667 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
668 adev->gmc.shared_aperture_end =
669 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
670 adev->gmc.private_aperture_start = 0x1000000000000000ULL;
671 adev->gmc.private_aperture_end =
672 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
673
674 return 0;
675}
676
677static int gmc_v10_0_late_init(void *handle)
678{
679 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
680 int r;
681
682 amdgpu_bo_late_init(adev);
683
684 r = amdgpu_gmc_allocate_vm_inv_eng(adev);
685 if (r)
686 return r;
687
688 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
689}
690
691static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
692 struct amdgpu_gmc *mc)
693{
694 u64 base = 0;
695
696 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
697 adev->asic_type == CHIP_NAVY_FLOUNDER)
698 base = gfxhub_v2_1_get_fb_location(adev);
699 else
700 base = gfxhub_v2_0_get_fb_location(adev);
701
702 /* add the xgmi offset of the physical node */
703 base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
704
705 amdgpu_gmc_vram_location(adev, &adev->gmc, base);
706 amdgpu_gmc_gart_location(adev, mc);
707
708 /* base offset of vram pages */
709 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
710 adev->asic_type == CHIP_NAVY_FLOUNDER)
711 adev->vm_manager.vram_base_offset = gfxhub_v2_1_get_mc_fb_offset(adev);
712 else
713 adev->vm_manager.vram_base_offset = gfxhub_v2_0_get_mc_fb_offset(adev);
714
715 /* add the xgmi offset of the physical node */
716 adev->vm_manager.vram_base_offset +=
717 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
718}
719
720/**
721 * gmc_v10_0_mc_init - initialize the memory controller driver params
722 *
723 * @adev: amdgpu_device pointer
724 *
725 * Look up the amount of vram, vram width, and decide how to place
726 * vram and gart within the GPU's physical address space.
727 * Returns 0 for success.
728 */
729static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
730{
731 int r;
732
733 /* size in MB on si */
734 adev->gmc.mc_vram_size =
735 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
736 adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
737
738 if (!(adev->flags & AMD_IS_APU)) {
739 r = amdgpu_device_resize_fb_bar(adev);
740 if (r)
741 return r;
742 }
743 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
744 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
745
746 /* In case the PCI BAR is larger than the actual amount of vram */
747 adev->gmc.visible_vram_size = adev->gmc.aper_size;
748 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
749 adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
750
751 /* set the gart size */
752 if (amdgpu_gart_size == -1) {
753 switch (adev->asic_type) {
754 case CHIP_NAVI10:
755 case CHIP_NAVI14:
756 case CHIP_NAVI12:
757 case CHIP_SIENNA_CICHLID:
758 case CHIP_NAVY_FLOUNDER:
759 default:
760 adev->gmc.gart_size = 512ULL << 20;
761 break;
762 }
763 } else
764 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
765
766 gmc_v10_0_vram_gtt_location(adev, &adev->gmc);
767
768 return 0;
769}
770
771static int gmc_v10_0_gart_init(struct amdgpu_device *adev)
772{
773 int r;
774
775 if (adev->gart.bo) {
776 WARN(1, "NAVI10 PCIE GART already initialized\n");
777 return 0;
778 }
779
780 /* Initialize common gart structure */
781 r = amdgpu_gart_init(adev);
782 if (r)
783 return r;
784
785 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
786 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_NV10(MTYPE_UC) |
787 AMDGPU_PTE_EXECUTABLE;
788
789 return amdgpu_gart_table_vram_alloc(adev);
790}
791
792static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev)
793{
794 u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
795 unsigned size;
796
797 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
798 size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
799 } else {
800 u32 viewport;
801 u32 pitch;
802
803 viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
804 pitch = RREG32_SOC15(DCE, 0, mmHUBPREQ0_DCSURF_SURFACE_PITCH);
805 size = (REG_GET_FIELD(viewport,
806 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
807 REG_GET_FIELD(pitch, HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH) *
808 4);
809 }
810 /* return 0 if the pre-OS buffer uses up most of vram */
811 if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024)) {
812 DRM_ERROR("Warning: pre-OS buffer uses most of vram, \
813 be aware of gart table overwrite\n");
814 return 0;
815 }
816
817 return size;
818}
819
820
821
822static int gmc_v10_0_sw_init(void *handle)
823{
824 int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
825 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
826
827 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
828 adev->asic_type == CHIP_NAVY_FLOUNDER)
829 gfxhub_v2_1_init(adev);
830 else
831 gfxhub_v2_0_init(adev);
832
833 mmhub_v2_0_init(adev);
834
835 spin_lock_init(&adev->gmc.invalidate_lock);
836
837 if (adev->asic_type == CHIP_SIENNA_CICHLID && amdgpu_emu_mode == 1) {
838 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_GDDR6;
839 adev->gmc.vram_width = 1 * 128; /* numchan * chansize */
840 } else {
841 r = amdgpu_atomfirmware_get_vram_info(adev,
842 &vram_width, &vram_type, &vram_vendor);
843 adev->gmc.vram_width = vram_width;
844
845 adev->gmc.vram_type = vram_type;
846 adev->gmc.vram_vendor = vram_vendor;
847 }
848
849 switch (adev->asic_type) {
850 case CHIP_NAVI10:
851 case CHIP_NAVI14:
852 case CHIP_NAVI12:
853 case CHIP_SIENNA_CICHLID:
854 case CHIP_NAVY_FLOUNDER:
855 adev->num_vmhubs = 2;
856 /*
857 * To fulfill 4-level page support,
858 * vm size is 256TB (48bit), maximum size of Navi10/Navi14/Navi12,
859 * block size 512 (9bit)
860 */
861 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
862 break;
863 default:
864 break;
865 }
866
867 /* This interrupt is VMC page fault.*/
868 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC,
869 VMC_1_0__SRCID__VM_FAULT,
870 &adev->gmc.vm_fault);
871
872 if (r)
873 return r;
874
875 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2,
876 UTCL2_1_0__SRCID__FAULT,
877 &adev->gmc.vm_fault);
878 if (r)
879 return r;
880
881 /*
882 * Set the internal MC address mask This is the max address of the GPU's
883 * internal address space.
884 */
885 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
886
887 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
888 if (r) {
889 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
890 return r;
891 }
892
893 if (adev->gmc.xgmi.supported) {
894 r = gfxhub_v2_1_get_xgmi_info(adev);
895 if (r)
896 return r;
897 }
898
899 r = gmc_v10_0_mc_init(adev);
900 if (r)
901 return r;
902
903 adev->gmc.stolen_size = gmc_v10_0_get_vbios_fb_size(adev);
904
905 /* Memory manager */
906 r = amdgpu_bo_init(adev);
907 if (r)
908 return r;
909
910 r = gmc_v10_0_gart_init(adev);
911 if (r)
912 return r;
913
914 /*
915 * number of VMs
916 * VMID 0 is reserved for System
917 * amdgpu graphics/compute will use VMIDs 1-7
918 * amdkfd will use VMIDs 8-15
919 */
920 adev->vm_manager.first_kfd_vmid = 8;
921
922 amdgpu_vm_manager_init(adev);
923
924 return 0;
925}
926
927/**
928 * gmc_v8_0_gart_fini - vm fini callback
929 *
930 * @adev: amdgpu_device pointer
931 *
932 * Tears down the driver GART/VM setup (CIK).
933 */
934static void gmc_v10_0_gart_fini(struct amdgpu_device *adev)
935{
936 amdgpu_gart_table_vram_free(adev);
937 amdgpu_gart_fini(adev);
938}
939
940static int gmc_v10_0_sw_fini(void *handle)
941{
942 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
943
944 amdgpu_vm_manager_fini(adev);
945 gmc_v10_0_gart_fini(adev);
946 amdgpu_gem_force_release(adev);
947 amdgpu_bo_fini(adev);
948
949 return 0;
950}
951
952static void gmc_v10_0_init_golden_registers(struct amdgpu_device *adev)
953{
954 switch (adev->asic_type) {
955 case CHIP_NAVI10:
956 case CHIP_NAVI14:
957 case CHIP_NAVI12:
958 case CHIP_SIENNA_CICHLID:
959 case CHIP_NAVY_FLOUNDER:
960 break;
961 default:
962 break;
963 }
964}
965
966/**
967 * gmc_v10_0_gart_enable - gart enable
968 *
969 * @adev: amdgpu_device pointer
970 */
971static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
972{
973 int r;
974 bool value;
975 u32 tmp;
976
977 if (adev->gart.bo == NULL) {
978 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
979 return -EINVAL;
980 }
981
982 r = amdgpu_gart_table_vram_pin(adev);
983 if (r)
984 return r;
985
986 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
987 adev->asic_type == CHIP_NAVY_FLOUNDER)
988 r = gfxhub_v2_1_gart_enable(adev);
989 else
990 r = gfxhub_v2_0_gart_enable(adev);
991 if (r)
992 return r;
993
994 r = mmhub_v2_0_gart_enable(adev);
995 if (r)
996 return r;
997
998 tmp = RREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL);
999 tmp |= HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE_MASK;
1000 WREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL, tmp);
1001
1002 tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
1003 WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
1004
1005 /* Flush HDP after it is initialized */
1006 adev->nbio.funcs->hdp_flush(adev, NULL);
1007
1008 value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
1009 false : true;
1010
1011 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
1012 adev->asic_type == CHIP_NAVY_FLOUNDER)
1013 gfxhub_v2_1_set_fault_enable_default(adev, value);
1014 else
1015 gfxhub_v2_0_set_fault_enable_default(adev, value);
1016 mmhub_v2_0_set_fault_enable_default(adev, value);
1017 gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0);
1018 gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0);
1019
1020 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1021 (unsigned)(adev->gmc.gart_size >> 20),
1022 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
1023
1024 adev->gart.ready = true;
1025
1026 return 0;
1027}
1028
1029static int gmc_v10_0_hw_init(void *handle)
1030{
1031 int r;
1032 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1033
1034 /* The sequence of these two function calls matters.*/
1035 gmc_v10_0_init_golden_registers(adev);
1036
1037 r = gmc_v10_0_gart_enable(adev);
1038 if (r)
1039 return r;
1040
1041 return 0;
1042}
1043
1044/**
1045 * gmc_v10_0_gart_disable - gart disable
1046 *
1047 * @adev: amdgpu_device pointer
1048 *
1049 * This disables all VM page table.
1050 */
1051static void gmc_v10_0_gart_disable(struct amdgpu_device *adev)
1052{
1053 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
1054 adev->asic_type == CHIP_NAVY_FLOUNDER)
1055 gfxhub_v2_1_gart_disable(adev);
1056 else
1057 gfxhub_v2_0_gart_disable(adev);
1058 mmhub_v2_0_gart_disable(adev);
1059 amdgpu_gart_table_vram_unpin(adev);
1060}
1061
1062static int gmc_v10_0_hw_fini(void *handle)
1063{
1064 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1065
1066 if (amdgpu_sriov_vf(adev)) {
1067 /* full access mode, so don't touch any GMC register */
1068 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
1069 return 0;
1070 }
1071
1072 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1073 gmc_v10_0_gart_disable(adev);
1074
1075 return 0;
1076}
1077
1078static int gmc_v10_0_suspend(void *handle)
1079{
1080 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1081
1082 gmc_v10_0_hw_fini(adev);
1083
1084 return 0;
1085}
1086
1087static int gmc_v10_0_resume(void *handle)
1088{
1089 int r;
1090 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1091
1092 r = gmc_v10_0_hw_init(adev);
1093 if (r)
1094 return r;
1095
1096 amdgpu_vmid_reset_all(adev);
1097
1098 return 0;
1099}
1100
1101static bool gmc_v10_0_is_idle(void *handle)
1102{
1103 /* MC is always ready in GMC v10.*/
1104 return true;
1105}
1106
1107static int gmc_v10_0_wait_for_idle(void *handle)
1108{
1109 /* There is no need to wait for MC idle in GMC v10.*/
1110 return 0;
1111}
1112
1113static int gmc_v10_0_soft_reset(void *handle)
1114{
1115 return 0;
1116}
1117
1118static int gmc_v10_0_set_clockgating_state(void *handle,
1119 enum amd_clockgating_state state)
1120{
1121 int r;
1122 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1123
1124 r = mmhub_v2_0_set_clockgating(adev, state);
1125 if (r)
1126 return r;
1127
1128 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
1129 adev->asic_type == CHIP_NAVY_FLOUNDER)
1130 return athub_v2_1_set_clockgating(adev, state);
1131 else
1132 return athub_v2_0_set_clockgating(adev, state);
1133}
1134
1135static void gmc_v10_0_get_clockgating_state(void *handle, u32 *flags)
1136{
1137 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1138
1139 mmhub_v2_0_get_clockgating(adev, flags);
1140
1141 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
1142 adev->asic_type == CHIP_NAVY_FLOUNDER)
1143 athub_v2_1_get_clockgating(adev, flags);
1144 else
1145 athub_v2_0_get_clockgating(adev, flags);
1146}
1147
1148static int gmc_v10_0_set_powergating_state(void *handle,
1149 enum amd_powergating_state state)
1150{
1151 return 0;
1152}
1153
1154const struct amd_ip_funcs gmc_v10_0_ip_funcs = {
1155 .name = "gmc_v10_0",
1156 .early_init = gmc_v10_0_early_init,
1157 .late_init = gmc_v10_0_late_init,
1158 .sw_init = gmc_v10_0_sw_init,
1159 .sw_fini = gmc_v10_0_sw_fini,
1160 .hw_init = gmc_v10_0_hw_init,
1161 .hw_fini = gmc_v10_0_hw_fini,
1162 .suspend = gmc_v10_0_suspend,
1163 .resume = gmc_v10_0_resume,
1164 .is_idle = gmc_v10_0_is_idle,
1165 .wait_for_idle = gmc_v10_0_wait_for_idle,
1166 .soft_reset = gmc_v10_0_soft_reset,
1167 .set_clockgating_state = gmc_v10_0_set_clockgating_state,
1168 .set_powergating_state = gmc_v10_0_set_powergating_state,
1169 .get_clockgating_state = gmc_v10_0_get_clockgating_state,
1170};
1171
1172const struct amdgpu_ip_block_version gmc_v10_0_ip_block =
1173{
1174 .type = AMD_IP_BLOCK_TYPE_GMC,
1175 .major = 10,
1176 .minor = 0,
1177 .rev = 0,
1178 .funcs = &gmc_v10_0_ip_funcs,
1179};