Loading...
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/firmware.h>
24#include "drmP.h"
25#include "amdgpu.h"
26#include "gmc_v8_0.h"
27#include "amdgpu_ucode.h"
28
29#include "gmc/gmc_8_1_d.h"
30#include "gmc/gmc_8_1_sh_mask.h"
31
32#include "bif/bif_5_0_d.h"
33#include "bif/bif_5_0_sh_mask.h"
34
35#include "oss/oss_3_0_d.h"
36#include "oss/oss_3_0_sh_mask.h"
37
38#include "vid.h"
39#include "vi.h"
40
41
42static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev);
43static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
44static int gmc_v8_0_wait_for_idle(void *handle);
45
46MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
47MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
48MODULE_FIRMWARE("amdgpu/polaris10_mc.bin");
49MODULE_FIRMWARE("amdgpu/polaris12_mc.bin");
50
51static const u32 golden_settings_tonga_a11[] =
52{
53 mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
54 mmMC_HUB_RDREQ_DMIF_LIMIT, 0x0000007f, 0x00000028,
55 mmMC_HUB_WDP_UMC, 0x00007fb6, 0x00000991,
56 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
57 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
58 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
59 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
60};
61
62static const u32 tonga_mgcg_cgcg_init[] =
63{
64 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
65};
66
67static const u32 golden_settings_fiji_a10[] =
68{
69 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
70 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
71 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
72 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
73};
74
75static const u32 fiji_mgcg_cgcg_init[] =
76{
77 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
78};
79
80static const u32 golden_settings_polaris11_a11[] =
81{
82 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
83 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
84 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
85 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
86};
87
88static const u32 golden_settings_polaris10_a11[] =
89{
90 mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
91 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
92 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
93 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
94 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
95};
96
97static const u32 cz_mgcg_cgcg_init[] =
98{
99 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
100};
101
102static const u32 stoney_mgcg_cgcg_init[] =
103{
104 mmATC_MISC_CG, 0xffffffff, 0x000c0200,
105 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
106};
107
108static const u32 golden_settings_stoney_common[] =
109{
110 mmMC_HUB_RDREQ_UVD, MC_HUB_RDREQ_UVD__PRESCALE_MASK, 0x00000004,
111 mmMC_RD_GRP_OTH, MC_RD_GRP_OTH__UVD_MASK, 0x00600000
112};
113
114static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
115{
116 switch (adev->asic_type) {
117 case CHIP_FIJI:
118 amdgpu_program_register_sequence(adev,
119 fiji_mgcg_cgcg_init,
120 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
121 amdgpu_program_register_sequence(adev,
122 golden_settings_fiji_a10,
123 (const u32)ARRAY_SIZE(golden_settings_fiji_a10));
124 break;
125 case CHIP_TONGA:
126 amdgpu_program_register_sequence(adev,
127 tonga_mgcg_cgcg_init,
128 (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init));
129 amdgpu_program_register_sequence(adev,
130 golden_settings_tonga_a11,
131 (const u32)ARRAY_SIZE(golden_settings_tonga_a11));
132 break;
133 case CHIP_POLARIS11:
134 case CHIP_POLARIS12:
135 amdgpu_program_register_sequence(adev,
136 golden_settings_polaris11_a11,
137 (const u32)ARRAY_SIZE(golden_settings_polaris11_a11));
138 break;
139 case CHIP_POLARIS10:
140 amdgpu_program_register_sequence(adev,
141 golden_settings_polaris10_a11,
142 (const u32)ARRAY_SIZE(golden_settings_polaris10_a11));
143 break;
144 case CHIP_CARRIZO:
145 amdgpu_program_register_sequence(adev,
146 cz_mgcg_cgcg_init,
147 (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
148 break;
149 case CHIP_STONEY:
150 amdgpu_program_register_sequence(adev,
151 stoney_mgcg_cgcg_init,
152 (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
153 amdgpu_program_register_sequence(adev,
154 golden_settings_stoney_common,
155 (const u32)ARRAY_SIZE(golden_settings_stoney_common));
156 break;
157 default:
158 break;
159 }
160}
161
162static void gmc_v8_0_mc_stop(struct amdgpu_device *adev,
163 struct amdgpu_mode_mc_save *save)
164{
165 u32 blackout;
166
167 if (adev->mode_info.num_crtc)
168 amdgpu_display_stop_mc_access(adev, save);
169
170 gmc_v8_0_wait_for_idle(adev);
171
172 blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
173 if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
174 /* Block CPU access */
175 WREG32(mmBIF_FB_EN, 0);
176 /* blackout the MC */
177 blackout = REG_SET_FIELD(blackout,
178 MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 1);
179 WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout);
180 }
181 /* wait for the MC to settle */
182 udelay(100);
183}
184
185static void gmc_v8_0_mc_resume(struct amdgpu_device *adev,
186 struct amdgpu_mode_mc_save *save)
187{
188 u32 tmp;
189
190 /* unblackout the MC */
191 tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
192 tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
193 WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
194 /* allow CPU access */
195 tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
196 tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
197 WREG32(mmBIF_FB_EN, tmp);
198
199 if (adev->mode_info.num_crtc)
200 amdgpu_display_resume_mc_access(adev, save);
201}
202
203/**
204 * gmc_v8_0_init_microcode - load ucode images from disk
205 *
206 * @adev: amdgpu_device pointer
207 *
208 * Use the firmware interface to load the ucode images into
209 * the driver (not loaded into hw).
210 * Returns 0 on success, error on failure.
211 */
212static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
213{
214 const char *chip_name;
215 char fw_name[30];
216 int err;
217
218 DRM_DEBUG("\n");
219
220 switch (adev->asic_type) {
221 case CHIP_TONGA:
222 chip_name = "tonga";
223 break;
224 case CHIP_POLARIS11:
225 chip_name = "polaris11";
226 break;
227 case CHIP_POLARIS10:
228 chip_name = "polaris10";
229 break;
230 case CHIP_POLARIS12:
231 chip_name = "polaris12";
232 break;
233 case CHIP_FIJI:
234 case CHIP_CARRIZO:
235 case CHIP_STONEY:
236 return 0;
237 default: BUG();
238 }
239
240 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
241 err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
242 if (err)
243 goto out;
244 err = amdgpu_ucode_validate(adev->mc.fw);
245
246out:
247 if (err) {
248 printk(KERN_ERR
249 "mc: Failed to load firmware \"%s\"\n",
250 fw_name);
251 release_firmware(adev->mc.fw);
252 adev->mc.fw = NULL;
253 }
254 return err;
255}
256
257/**
258 * gmc_v8_0_mc_load_microcode - load MC ucode into the hw
259 *
260 * @adev: amdgpu_device pointer
261 *
262 * Load the GDDR MC ucode into the hw (CIK).
263 * Returns 0 on success, error on failure.
264 */
265static int gmc_v8_0_mc_load_microcode(struct amdgpu_device *adev)
266{
267 const struct mc_firmware_header_v1_0 *hdr;
268 const __le32 *fw_data = NULL;
269 const __le32 *io_mc_regs = NULL;
270 u32 running;
271 int i, ucode_size, regs_size;
272
273 if (!adev->mc.fw)
274 return -EINVAL;
275
276 /* Skip MC ucode loading on SR-IOV capable boards.
277 * vbios does this for us in asic_init in that case.
278 * Skip MC ucode loading on VF, because hypervisor will do that
279 * for this adaptor.
280 */
281 if (amdgpu_sriov_bios(adev))
282 return 0;
283
284 hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data;
285 amdgpu_ucode_print_mc_hdr(&hdr->header);
286
287 adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version);
288 regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
289 io_mc_regs = (const __le32 *)
290 (adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
291 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
292 fw_data = (const __le32 *)
293 (adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
294
295 running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
296
297 if (running == 0) {
298 /* reset the engine and set to writable */
299 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
300 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
301
302 /* load mc io regs */
303 for (i = 0; i < regs_size; i++) {
304 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
305 WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
306 }
307 /* load the MC ucode */
308 for (i = 0; i < ucode_size; i++)
309 WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
310
311 /* put the engine back into the active state */
312 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
313 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
314 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
315
316 /* wait for training to complete */
317 for (i = 0; i < adev->usec_timeout; i++) {
318 if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
319 MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D0))
320 break;
321 udelay(1);
322 }
323 for (i = 0; i < adev->usec_timeout; i++) {
324 if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
325 MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D1))
326 break;
327 udelay(1);
328 }
329 }
330
331 return 0;
332}
333
334static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
335 struct amdgpu_mc *mc)
336{
337 if (mc->mc_vram_size > 0xFFC0000000ULL) {
338 /* leave room for at least 1024M GTT */
339 dev_warn(adev->dev, "limiting VRAM\n");
340 mc->real_vram_size = 0xFFC0000000ULL;
341 mc->mc_vram_size = 0xFFC0000000ULL;
342 }
343 amdgpu_vram_location(adev, &adev->mc, 0);
344 adev->mc.gtt_base_align = 0;
345 amdgpu_gtt_location(adev, mc);
346}
347
348/**
349 * gmc_v8_0_mc_program - program the GPU memory controller
350 *
351 * @adev: amdgpu_device pointer
352 *
353 * Set the location of vram, gart, and AGP in the GPU's
354 * physical address space (CIK).
355 */
356static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
357{
358 struct amdgpu_mode_mc_save save;
359 u32 tmp;
360 int i, j;
361
362 /* Initialize HDP */
363 for (i = 0, j = 0; i < 32; i++, j += 0x6) {
364 WREG32((0xb05 + j), 0x00000000);
365 WREG32((0xb06 + j), 0x00000000);
366 WREG32((0xb07 + j), 0x00000000);
367 WREG32((0xb08 + j), 0x00000000);
368 WREG32((0xb09 + j), 0x00000000);
369 }
370 WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
371
372 if (adev->mode_info.num_crtc)
373 amdgpu_display_set_vga_render_state(adev, false);
374
375 gmc_v8_0_mc_stop(adev, &save);
376 if (gmc_v8_0_wait_for_idle((void *)adev)) {
377 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
378 }
379 /* Update configuration */
380 WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
381 adev->mc.vram_start >> 12);
382 WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
383 adev->mc.vram_end >> 12);
384 WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
385 adev->vram_scratch.gpu_addr >> 12);
386 tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16;
387 tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF);
388 WREG32(mmMC_VM_FB_LOCATION, tmp);
389 /* XXX double check these! */
390 WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8));
391 WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
392 WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
393 WREG32(mmMC_VM_AGP_BASE, 0);
394 WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
395 WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
396 if (gmc_v8_0_wait_for_idle((void *)adev)) {
397 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
398 }
399 gmc_v8_0_mc_resume(adev, &save);
400
401 WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
402
403 tmp = RREG32(mmHDP_MISC_CNTL);
404 tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 0);
405 WREG32(mmHDP_MISC_CNTL, tmp);
406
407 tmp = RREG32(mmHDP_HOST_PATH_CNTL);
408 WREG32(mmHDP_HOST_PATH_CNTL, tmp);
409}
410
411/**
412 * gmc_v8_0_mc_init - initialize the memory controller driver params
413 *
414 * @adev: amdgpu_device pointer
415 *
416 * Look up the amount of vram, vram width, and decide how to place
417 * vram and gart within the GPU's physical address space (CIK).
418 * Returns 0 for success.
419 */
420static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
421{
422 u32 tmp;
423 int chansize, numchan;
424
425 /* Get VRAM informations */
426 tmp = RREG32(mmMC_ARB_RAMCFG);
427 if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
428 chansize = 64;
429 } else {
430 chansize = 32;
431 }
432 tmp = RREG32(mmMC_SHARED_CHMAP);
433 switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
434 case 0:
435 default:
436 numchan = 1;
437 break;
438 case 1:
439 numchan = 2;
440 break;
441 case 2:
442 numchan = 4;
443 break;
444 case 3:
445 numchan = 8;
446 break;
447 case 4:
448 numchan = 3;
449 break;
450 case 5:
451 numchan = 6;
452 break;
453 case 6:
454 numchan = 10;
455 break;
456 case 7:
457 numchan = 12;
458 break;
459 case 8:
460 numchan = 16;
461 break;
462 }
463 adev->mc.vram_width = numchan * chansize;
464 /* Could aper size report 0 ? */
465 adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
466 adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
467 /* size in MB on si */
468 adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
469 adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
470 adev->mc.visible_vram_size = adev->mc.aper_size;
471
472 /* In case the PCI BAR is larger than the actual amount of vram */
473 if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
474 adev->mc.visible_vram_size = adev->mc.real_vram_size;
475
476 /* unless the user had overridden it, set the gart
477 * size equal to the 1024 or vram, whichever is larger.
478 */
479 if (amdgpu_gart_size == -1)
480 adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size);
481 else
482 adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
483
484 gmc_v8_0_vram_gtt_location(adev, &adev->mc);
485
486 return 0;
487}
488
489/*
490 * GART
491 * VMID 0 is the physical GPU addresses as used by the kernel.
492 * VMIDs 1-15 are used for userspace clients and are handled
493 * by the amdgpu vm/hsa code.
494 */
495
496/**
497 * gmc_v8_0_gart_flush_gpu_tlb - gart tlb flush callback
498 *
499 * @adev: amdgpu_device pointer
500 * @vmid: vm instance to flush
501 *
502 * Flush the TLB for the requested page table (CIK).
503 */
504static void gmc_v8_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
505 uint32_t vmid)
506{
507 /* flush hdp cache */
508 WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
509
510 /* bits 0-15 are the VM contexts0-15 */
511 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
512}
513
514/**
515 * gmc_v8_0_gart_set_pte_pde - update the page tables using MMIO
516 *
517 * @adev: amdgpu_device pointer
518 * @cpu_pt_addr: cpu address of the page table
519 * @gpu_page_idx: entry in the page table to update
520 * @addr: dst addr to write into pte/pde
521 * @flags: access flags
522 *
523 * Update the page tables using the CPU.
524 */
525static int gmc_v8_0_gart_set_pte_pde(struct amdgpu_device *adev,
526 void *cpu_pt_addr,
527 uint32_t gpu_page_idx,
528 uint64_t addr,
529 uint32_t flags)
530{
531 void __iomem *ptr = (void *)cpu_pt_addr;
532 uint64_t value;
533
534 /*
535 * PTE format on VI:
536 * 63:40 reserved
537 * 39:12 4k physical page base address
538 * 11:7 fragment
539 * 6 write
540 * 5 read
541 * 4 exe
542 * 3 reserved
543 * 2 snooped
544 * 1 system
545 * 0 valid
546 *
547 * PDE format on VI:
548 * 63:59 block fragment size
549 * 58:40 reserved
550 * 39:1 physical base address of PTE
551 * bits 5:1 must be 0.
552 * 0 valid
553 */
554 value = addr & 0x000000FFFFFFF000ULL;
555 value |= flags;
556 writeq(value, ptr + (gpu_page_idx * 8));
557
558 return 0;
559}
560
561/**
562 * gmc_v8_0_set_fault_enable_default - update VM fault handling
563 *
564 * @adev: amdgpu_device pointer
565 * @value: true redirects VM faults to the default page
566 */
567static void gmc_v8_0_set_fault_enable_default(struct amdgpu_device *adev,
568 bool value)
569{
570 u32 tmp;
571
572 tmp = RREG32(mmVM_CONTEXT1_CNTL);
573 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
574 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
575 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
576 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
577 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
578 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
579 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
580 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
581 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
582 READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
583 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
584 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
585 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
586 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
587 WREG32(mmVM_CONTEXT1_CNTL, tmp);
588}
589
590/**
591 * gmc_v8_0_gart_enable - gart enable
592 *
593 * @adev: amdgpu_device pointer
594 *
595 * This sets up the TLBs, programs the page tables for VMID0,
596 * sets up the hw for VMIDs 1-15 which are allocated on
597 * demand, and sets up the global locations for the LDS, GDS,
598 * and GPUVM for FSA64 clients (CIK).
599 * Returns 0 for success, errors for failure.
600 */
601static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
602{
603 int r, i;
604 u32 tmp;
605
606 if (adev->gart.robj == NULL) {
607 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
608 return -EINVAL;
609 }
610 r = amdgpu_gart_table_vram_pin(adev);
611 if (r)
612 return r;
613 /* Setup TLB control */
614 tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
615 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
616 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 1);
617 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
618 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 1);
619 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
620 WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
621 /* Setup L2 cache */
622 tmp = RREG32(mmVM_L2_CNTL);
623 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
624 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
625 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE, 1);
626 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
627 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
628 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
629 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
630 WREG32(mmVM_L2_CNTL, tmp);
631 tmp = RREG32(mmVM_L2_CNTL2);
632 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
633 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
634 WREG32(mmVM_L2_CNTL2, tmp);
635 tmp = RREG32(mmVM_L2_CNTL3);
636 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1);
637 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 4);
638 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 4);
639 WREG32(mmVM_L2_CNTL3, tmp);
640 /* XXX: set to enable PTE/PDE in system memory */
641 tmp = RREG32(mmVM_L2_CNTL4);
642 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_PHYSICAL, 0);
643 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SHARED, 0);
644 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SNOOP, 0);
645 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_PHYSICAL, 0);
646 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SHARED, 0);
647 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SNOOP, 0);
648 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_PHYSICAL, 0);
649 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SHARED, 0);
650 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SNOOP, 0);
651 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_PHYSICAL, 0);
652 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SHARED, 0);
653 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP, 0);
654 WREG32(mmVM_L2_CNTL4, tmp);
655 /* setup context0 */
656 WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
657 WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
658 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
659 WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
660 (u32)(adev->dummy_page.addr >> 12));
661 WREG32(mmVM_CONTEXT0_CNTL2, 0);
662 tmp = RREG32(mmVM_CONTEXT0_CNTL);
663 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
664 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
665 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
666 WREG32(mmVM_CONTEXT0_CNTL, tmp);
667
668 WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR, 0);
669 WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR, 0);
670 WREG32(mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET, 0);
671
672 /* empty context1-15 */
673 /* FIXME start with 4G, once using 2 level pt switch to full
674 * vm size space
675 */
676 /* set vm size, must be a multiple of 4 */
677 WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
678 WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
679 for (i = 1; i < 16; i++) {
680 if (i < 8)
681 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
682 adev->gart.table_addr >> 12);
683 else
684 WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
685 adev->gart.table_addr >> 12);
686 }
687
688 /* enable context1-15 */
689 WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
690 (u32)(adev->dummy_page.addr >> 12));
691 WREG32(mmVM_CONTEXT1_CNTL2, 4);
692 tmp = RREG32(mmVM_CONTEXT1_CNTL);
693 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
694 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
695 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
696 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
697 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
698 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
699 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
700 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
701 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
702 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
703 amdgpu_vm_block_size - 9);
704 WREG32(mmVM_CONTEXT1_CNTL, tmp);
705 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
706 gmc_v8_0_set_fault_enable_default(adev, false);
707 else
708 gmc_v8_0_set_fault_enable_default(adev, true);
709
710 gmc_v8_0_gart_flush_gpu_tlb(adev, 0);
711 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
712 (unsigned)(adev->mc.gtt_size >> 20),
713 (unsigned long long)adev->gart.table_addr);
714 adev->gart.ready = true;
715 return 0;
716}
717
718static int gmc_v8_0_gart_init(struct amdgpu_device *adev)
719{
720 int r;
721
722 if (adev->gart.robj) {
723 WARN(1, "R600 PCIE GART already initialized\n");
724 return 0;
725 }
726 /* Initialize common gart structure */
727 r = amdgpu_gart_init(adev);
728 if (r)
729 return r;
730 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
731 return amdgpu_gart_table_vram_alloc(adev);
732}
733
734/**
735 * gmc_v8_0_gart_disable - gart disable
736 *
737 * @adev: amdgpu_device pointer
738 *
739 * This disables all VM page table (CIK).
740 */
741static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
742{
743 u32 tmp;
744
745 /* Disable all tables */
746 WREG32(mmVM_CONTEXT0_CNTL, 0);
747 WREG32(mmVM_CONTEXT1_CNTL, 0);
748 /* Setup TLB control */
749 tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
750 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
751 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 0);
752 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0);
753 WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
754 /* Setup L2 cache */
755 tmp = RREG32(mmVM_L2_CNTL);
756 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
757 WREG32(mmVM_L2_CNTL, tmp);
758 WREG32(mmVM_L2_CNTL2, 0);
759 amdgpu_gart_table_vram_unpin(adev);
760}
761
762/**
763 * gmc_v8_0_gart_fini - vm fini callback
764 *
765 * @adev: amdgpu_device pointer
766 *
767 * Tears down the driver GART/VM setup (CIK).
768 */
769static void gmc_v8_0_gart_fini(struct amdgpu_device *adev)
770{
771 amdgpu_gart_table_vram_free(adev);
772 amdgpu_gart_fini(adev);
773}
774
775/*
776 * vm
777 * VMID 0 is the physical GPU addresses as used by the kernel.
778 * VMIDs 1-15 are used for userspace clients and are handled
779 * by the amdgpu vm/hsa code.
780 */
781/**
782 * gmc_v8_0_vm_init - cik vm init callback
783 *
784 * @adev: amdgpu_device pointer
785 *
786 * Inits cik specific vm parameters (number of VMs, base of vram for
787 * VMIDs 1-15) (CIK).
788 * Returns 0 for success.
789 */
790static int gmc_v8_0_vm_init(struct amdgpu_device *adev)
791{
792 /*
793 * number of VMs
794 * VMID 0 is reserved for System
795 * amdgpu graphics/compute will use VMIDs 1-7
796 * amdkfd will use VMIDs 8-15
797 */
798 adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS;
799 amdgpu_vm_manager_init(adev);
800
801 /* base offset of vram pages */
802 if (adev->flags & AMD_IS_APU) {
803 u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
804 tmp <<= 22;
805 adev->vm_manager.vram_base_offset = tmp;
806 } else
807 adev->vm_manager.vram_base_offset = 0;
808
809 return 0;
810}
811
812/**
813 * gmc_v8_0_vm_fini - cik vm fini callback
814 *
815 * @adev: amdgpu_device pointer
816 *
817 * Tear down any asic specific VM setup (CIK).
818 */
819static void gmc_v8_0_vm_fini(struct amdgpu_device *adev)
820{
821}
822
823/**
824 * gmc_v8_0_vm_decode_fault - print human readable fault info
825 *
826 * @adev: amdgpu_device pointer
827 * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
828 * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
829 *
830 * Print human readable fault information (CIK).
831 */
832static void gmc_v8_0_vm_decode_fault(struct amdgpu_device *adev,
833 u32 status, u32 addr, u32 mc_client)
834{
835 u32 mc_id;
836 u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
837 u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
838 PROTECTIONS);
839 char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
840 (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
841
842 mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
843 MEMORY_CLIENT_ID);
844
845 dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
846 protections, vmid, addr,
847 REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
848 MEMORY_CLIENT_RW) ?
849 "write" : "read", block, mc_client, mc_id);
850}
851
852static int gmc_v8_0_convert_vram_type(int mc_seq_vram_type)
853{
854 switch (mc_seq_vram_type) {
855 case MC_SEQ_MISC0__MT__GDDR1:
856 return AMDGPU_VRAM_TYPE_GDDR1;
857 case MC_SEQ_MISC0__MT__DDR2:
858 return AMDGPU_VRAM_TYPE_DDR2;
859 case MC_SEQ_MISC0__MT__GDDR3:
860 return AMDGPU_VRAM_TYPE_GDDR3;
861 case MC_SEQ_MISC0__MT__GDDR4:
862 return AMDGPU_VRAM_TYPE_GDDR4;
863 case MC_SEQ_MISC0__MT__GDDR5:
864 return AMDGPU_VRAM_TYPE_GDDR5;
865 case MC_SEQ_MISC0__MT__HBM:
866 return AMDGPU_VRAM_TYPE_HBM;
867 case MC_SEQ_MISC0__MT__DDR3:
868 return AMDGPU_VRAM_TYPE_DDR3;
869 default:
870 return AMDGPU_VRAM_TYPE_UNKNOWN;
871 }
872}
873
874static int gmc_v8_0_early_init(void *handle)
875{
876 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
877
878 gmc_v8_0_set_gart_funcs(adev);
879 gmc_v8_0_set_irq_funcs(adev);
880
881 return 0;
882}
883
884static int gmc_v8_0_late_init(void *handle)
885{
886 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
887
888 if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
889 return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
890 else
891 return 0;
892}
893
894#define mmMC_SEQ_MISC0_FIJI 0xA71
895
896static int gmc_v8_0_sw_init(void *handle)
897{
898 int r;
899 int dma_bits;
900 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
901
902 if (adev->flags & AMD_IS_APU) {
903 adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
904 } else {
905 u32 tmp;
906
907 if (adev->asic_type == CHIP_FIJI)
908 tmp = RREG32(mmMC_SEQ_MISC0_FIJI);
909 else
910 tmp = RREG32(mmMC_SEQ_MISC0);
911 tmp &= MC_SEQ_MISC0__MT__MASK;
912 adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp);
913 }
914
915 r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
916 if (r)
917 return r;
918
919 r = amdgpu_irq_add_id(adev, 147, &adev->mc.vm_fault);
920 if (r)
921 return r;
922
923 /* Adjust VM size here.
924 * Currently set to 4GB ((1 << 20) 4k pages).
925 * Max GPUVM size for cayman and SI is 40 bits.
926 */
927 adev->vm_manager.max_pfn = amdgpu_vm_size << 18;
928
929 /* Set the internal MC address mask
930 * This is the max address of the GPU's
931 * internal address space.
932 */
933 adev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
934
935 /* set DMA mask + need_dma32 flags.
936 * PCIE - can handle 40-bits.
937 * IGP - can handle 40-bits
938 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
939 */
940 adev->need_dma32 = false;
941 dma_bits = adev->need_dma32 ? 32 : 40;
942 r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
943 if (r) {
944 adev->need_dma32 = true;
945 dma_bits = 32;
946 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
947 }
948 r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
949 if (r) {
950 pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
951 printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
952 }
953
954 r = gmc_v8_0_init_microcode(adev);
955 if (r) {
956 DRM_ERROR("Failed to load mc firmware!\n");
957 return r;
958 }
959
960 r = gmc_v8_0_mc_init(adev);
961 if (r)
962 return r;
963
964 /* Memory manager */
965 r = amdgpu_bo_init(adev);
966 if (r)
967 return r;
968
969 r = gmc_v8_0_gart_init(adev);
970 if (r)
971 return r;
972
973 if (!adev->vm_manager.enabled) {
974 r = gmc_v8_0_vm_init(adev);
975 if (r) {
976 dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
977 return r;
978 }
979 adev->vm_manager.enabled = true;
980 }
981
982 return r;
983}
984
985static int gmc_v8_0_sw_fini(void *handle)
986{
987 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
988
989 if (adev->vm_manager.enabled) {
990 amdgpu_vm_manager_fini(adev);
991 gmc_v8_0_vm_fini(adev);
992 adev->vm_manager.enabled = false;
993 }
994 gmc_v8_0_gart_fini(adev);
995 amdgpu_gem_force_release(adev);
996 amdgpu_bo_fini(adev);
997
998 return 0;
999}
1000
1001static int gmc_v8_0_hw_init(void *handle)
1002{
1003 int r;
1004 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1005
1006 gmc_v8_0_init_golden_registers(adev);
1007
1008 gmc_v8_0_mc_program(adev);
1009
1010 if (adev->asic_type == CHIP_TONGA) {
1011 r = gmc_v8_0_mc_load_microcode(adev);
1012 if (r) {
1013 DRM_ERROR("Failed to load MC firmware!\n");
1014 return r;
1015 }
1016 }
1017
1018 r = gmc_v8_0_gart_enable(adev);
1019 if (r)
1020 return r;
1021
1022 return r;
1023}
1024
1025static int gmc_v8_0_hw_fini(void *handle)
1026{
1027 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1028
1029 amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
1030 gmc_v8_0_gart_disable(adev);
1031
1032 return 0;
1033}
1034
1035static int gmc_v8_0_suspend(void *handle)
1036{
1037 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1038
1039 if (adev->vm_manager.enabled) {
1040 gmc_v8_0_vm_fini(adev);
1041 adev->vm_manager.enabled = false;
1042 }
1043 gmc_v8_0_hw_fini(adev);
1044
1045 return 0;
1046}
1047
1048static int gmc_v8_0_resume(void *handle)
1049{
1050 int r;
1051 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1052
1053 r = gmc_v8_0_hw_init(adev);
1054 if (r)
1055 return r;
1056
1057 if (!adev->vm_manager.enabled) {
1058 r = gmc_v8_0_vm_init(adev);
1059 if (r) {
1060 dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
1061 return r;
1062 }
1063 adev->vm_manager.enabled = true;
1064 }
1065
1066 return r;
1067}
1068
1069static bool gmc_v8_0_is_idle(void *handle)
1070{
1071 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1072 u32 tmp = RREG32(mmSRBM_STATUS);
1073
1074 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1075 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
1076 return false;
1077
1078 return true;
1079}
1080
1081static int gmc_v8_0_wait_for_idle(void *handle)
1082{
1083 unsigned i;
1084 u32 tmp;
1085 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1086
1087 for (i = 0; i < adev->usec_timeout; i++) {
1088 /* read MC_STATUS */
1089 tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
1090 SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1091 SRBM_STATUS__MCC_BUSY_MASK |
1092 SRBM_STATUS__MCD_BUSY_MASK |
1093 SRBM_STATUS__VMC_BUSY_MASK |
1094 SRBM_STATUS__VMC1_BUSY_MASK);
1095 if (!tmp)
1096 return 0;
1097 udelay(1);
1098 }
1099 return -ETIMEDOUT;
1100
1101}
1102
1103static bool gmc_v8_0_check_soft_reset(void *handle)
1104{
1105 u32 srbm_soft_reset = 0;
1106 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1107 u32 tmp = RREG32(mmSRBM_STATUS);
1108
1109 if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
1110 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1111 SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
1112
1113 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1114 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
1115 if (!(adev->flags & AMD_IS_APU))
1116 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1117 SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
1118 }
1119 if (srbm_soft_reset) {
1120 adev->mc.srbm_soft_reset = srbm_soft_reset;
1121 return true;
1122 } else {
1123 adev->mc.srbm_soft_reset = 0;
1124 return false;
1125 }
1126}
1127
1128static int gmc_v8_0_pre_soft_reset(void *handle)
1129{
1130 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1131
1132 if (!adev->mc.srbm_soft_reset)
1133 return 0;
1134
1135 gmc_v8_0_mc_stop(adev, &adev->mc.save);
1136 if (gmc_v8_0_wait_for_idle(adev)) {
1137 dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
1138 }
1139
1140 return 0;
1141}
1142
1143static int gmc_v8_0_soft_reset(void *handle)
1144{
1145 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1146 u32 srbm_soft_reset;
1147
1148 if (!adev->mc.srbm_soft_reset)
1149 return 0;
1150 srbm_soft_reset = adev->mc.srbm_soft_reset;
1151
1152 if (srbm_soft_reset) {
1153 u32 tmp;
1154
1155 tmp = RREG32(mmSRBM_SOFT_RESET);
1156 tmp |= srbm_soft_reset;
1157 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1158 WREG32(mmSRBM_SOFT_RESET, tmp);
1159 tmp = RREG32(mmSRBM_SOFT_RESET);
1160
1161 udelay(50);
1162
1163 tmp &= ~srbm_soft_reset;
1164 WREG32(mmSRBM_SOFT_RESET, tmp);
1165 tmp = RREG32(mmSRBM_SOFT_RESET);
1166
1167 /* Wait a little for things to settle down */
1168 udelay(50);
1169 }
1170
1171 return 0;
1172}
1173
1174static int gmc_v8_0_post_soft_reset(void *handle)
1175{
1176 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1177
1178 if (!adev->mc.srbm_soft_reset)
1179 return 0;
1180
1181 gmc_v8_0_mc_resume(adev, &adev->mc.save);
1182 return 0;
1183}
1184
1185static int gmc_v8_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
1186 struct amdgpu_irq_src *src,
1187 unsigned type,
1188 enum amdgpu_interrupt_state state)
1189{
1190 u32 tmp;
1191 u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1192 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1193 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1194 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1195 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1196 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1197 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
1198
1199 switch (state) {
1200 case AMDGPU_IRQ_STATE_DISABLE:
1201 /* system context */
1202 tmp = RREG32(mmVM_CONTEXT0_CNTL);
1203 tmp &= ~bits;
1204 WREG32(mmVM_CONTEXT0_CNTL, tmp);
1205 /* VMs */
1206 tmp = RREG32(mmVM_CONTEXT1_CNTL);
1207 tmp &= ~bits;
1208 WREG32(mmVM_CONTEXT1_CNTL, tmp);
1209 break;
1210 case AMDGPU_IRQ_STATE_ENABLE:
1211 /* system context */
1212 tmp = RREG32(mmVM_CONTEXT0_CNTL);
1213 tmp |= bits;
1214 WREG32(mmVM_CONTEXT0_CNTL, tmp);
1215 /* VMs */
1216 tmp = RREG32(mmVM_CONTEXT1_CNTL);
1217 tmp |= bits;
1218 WREG32(mmVM_CONTEXT1_CNTL, tmp);
1219 break;
1220 default:
1221 break;
1222 }
1223
1224 return 0;
1225}
1226
1227static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
1228 struct amdgpu_irq_src *source,
1229 struct amdgpu_iv_entry *entry)
1230{
1231 u32 addr, status, mc_client;
1232
1233 addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
1234 status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
1235 mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
1236 /* reset addr and status */
1237 WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1238
1239 if (!addr && !status)
1240 return 0;
1241
1242 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
1243 gmc_v8_0_set_fault_enable_default(adev, false);
1244
1245 if (printk_ratelimit()) {
1246 dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1247 entry->src_id, entry->src_data);
1248 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
1249 addr);
1250 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1251 status);
1252 gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client);
1253 }
1254
1255 return 0;
1256}
1257
1258static void fiji_update_mc_medium_grain_clock_gating(struct amdgpu_device *adev,
1259 bool enable)
1260{
1261 uint32_t data;
1262
1263 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
1264 data = RREG32(mmMC_HUB_MISC_HUB_CG);
1265 data |= MC_HUB_MISC_HUB_CG__ENABLE_MASK;
1266 WREG32(mmMC_HUB_MISC_HUB_CG, data);
1267
1268 data = RREG32(mmMC_HUB_MISC_SIP_CG);
1269 data |= MC_HUB_MISC_SIP_CG__ENABLE_MASK;
1270 WREG32(mmMC_HUB_MISC_SIP_CG, data);
1271
1272 data = RREG32(mmMC_HUB_MISC_VM_CG);
1273 data |= MC_HUB_MISC_VM_CG__ENABLE_MASK;
1274 WREG32(mmMC_HUB_MISC_VM_CG, data);
1275
1276 data = RREG32(mmMC_XPB_CLK_GAT);
1277 data |= MC_XPB_CLK_GAT__ENABLE_MASK;
1278 WREG32(mmMC_XPB_CLK_GAT, data);
1279
1280 data = RREG32(mmATC_MISC_CG);
1281 data |= ATC_MISC_CG__ENABLE_MASK;
1282 WREG32(mmATC_MISC_CG, data);
1283
1284 data = RREG32(mmMC_CITF_MISC_WR_CG);
1285 data |= MC_CITF_MISC_WR_CG__ENABLE_MASK;
1286 WREG32(mmMC_CITF_MISC_WR_CG, data);
1287
1288 data = RREG32(mmMC_CITF_MISC_RD_CG);
1289 data |= MC_CITF_MISC_RD_CG__ENABLE_MASK;
1290 WREG32(mmMC_CITF_MISC_RD_CG, data);
1291
1292 data = RREG32(mmMC_CITF_MISC_VM_CG);
1293 data |= MC_CITF_MISC_VM_CG__ENABLE_MASK;
1294 WREG32(mmMC_CITF_MISC_VM_CG, data);
1295
1296 data = RREG32(mmVM_L2_CG);
1297 data |= VM_L2_CG__ENABLE_MASK;
1298 WREG32(mmVM_L2_CG, data);
1299 } else {
1300 data = RREG32(mmMC_HUB_MISC_HUB_CG);
1301 data &= ~MC_HUB_MISC_HUB_CG__ENABLE_MASK;
1302 WREG32(mmMC_HUB_MISC_HUB_CG, data);
1303
1304 data = RREG32(mmMC_HUB_MISC_SIP_CG);
1305 data &= ~MC_HUB_MISC_SIP_CG__ENABLE_MASK;
1306 WREG32(mmMC_HUB_MISC_SIP_CG, data);
1307
1308 data = RREG32(mmMC_HUB_MISC_VM_CG);
1309 data &= ~MC_HUB_MISC_VM_CG__ENABLE_MASK;
1310 WREG32(mmMC_HUB_MISC_VM_CG, data);
1311
1312 data = RREG32(mmMC_XPB_CLK_GAT);
1313 data &= ~MC_XPB_CLK_GAT__ENABLE_MASK;
1314 WREG32(mmMC_XPB_CLK_GAT, data);
1315
1316 data = RREG32(mmATC_MISC_CG);
1317 data &= ~ATC_MISC_CG__ENABLE_MASK;
1318 WREG32(mmATC_MISC_CG, data);
1319
1320 data = RREG32(mmMC_CITF_MISC_WR_CG);
1321 data &= ~MC_CITF_MISC_WR_CG__ENABLE_MASK;
1322 WREG32(mmMC_CITF_MISC_WR_CG, data);
1323
1324 data = RREG32(mmMC_CITF_MISC_RD_CG);
1325 data &= ~MC_CITF_MISC_RD_CG__ENABLE_MASK;
1326 WREG32(mmMC_CITF_MISC_RD_CG, data);
1327
1328 data = RREG32(mmMC_CITF_MISC_VM_CG);
1329 data &= ~MC_CITF_MISC_VM_CG__ENABLE_MASK;
1330 WREG32(mmMC_CITF_MISC_VM_CG, data);
1331
1332 data = RREG32(mmVM_L2_CG);
1333 data &= ~VM_L2_CG__ENABLE_MASK;
1334 WREG32(mmVM_L2_CG, data);
1335 }
1336}
1337
1338static void fiji_update_mc_light_sleep(struct amdgpu_device *adev,
1339 bool enable)
1340{
1341 uint32_t data;
1342
1343 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) {
1344 data = RREG32(mmMC_HUB_MISC_HUB_CG);
1345 data |= MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK;
1346 WREG32(mmMC_HUB_MISC_HUB_CG, data);
1347
1348 data = RREG32(mmMC_HUB_MISC_SIP_CG);
1349 data |= MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK;
1350 WREG32(mmMC_HUB_MISC_SIP_CG, data);
1351
1352 data = RREG32(mmMC_HUB_MISC_VM_CG);
1353 data |= MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1354 WREG32(mmMC_HUB_MISC_VM_CG, data);
1355
1356 data = RREG32(mmMC_XPB_CLK_GAT);
1357 data |= MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK;
1358 WREG32(mmMC_XPB_CLK_GAT, data);
1359
1360 data = RREG32(mmATC_MISC_CG);
1361 data |= ATC_MISC_CG__MEM_LS_ENABLE_MASK;
1362 WREG32(mmATC_MISC_CG, data);
1363
1364 data = RREG32(mmMC_CITF_MISC_WR_CG);
1365 data |= MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK;
1366 WREG32(mmMC_CITF_MISC_WR_CG, data);
1367
1368 data = RREG32(mmMC_CITF_MISC_RD_CG);
1369 data |= MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK;
1370 WREG32(mmMC_CITF_MISC_RD_CG, data);
1371
1372 data = RREG32(mmMC_CITF_MISC_VM_CG);
1373 data |= MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1374 WREG32(mmMC_CITF_MISC_VM_CG, data);
1375
1376 data = RREG32(mmVM_L2_CG);
1377 data |= VM_L2_CG__MEM_LS_ENABLE_MASK;
1378 WREG32(mmVM_L2_CG, data);
1379 } else {
1380 data = RREG32(mmMC_HUB_MISC_HUB_CG);
1381 data &= ~MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK;
1382 WREG32(mmMC_HUB_MISC_HUB_CG, data);
1383
1384 data = RREG32(mmMC_HUB_MISC_SIP_CG);
1385 data &= ~MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK;
1386 WREG32(mmMC_HUB_MISC_SIP_CG, data);
1387
1388 data = RREG32(mmMC_HUB_MISC_VM_CG);
1389 data &= ~MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1390 WREG32(mmMC_HUB_MISC_VM_CG, data);
1391
1392 data = RREG32(mmMC_XPB_CLK_GAT);
1393 data &= ~MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK;
1394 WREG32(mmMC_XPB_CLK_GAT, data);
1395
1396 data = RREG32(mmATC_MISC_CG);
1397 data &= ~ATC_MISC_CG__MEM_LS_ENABLE_MASK;
1398 WREG32(mmATC_MISC_CG, data);
1399
1400 data = RREG32(mmMC_CITF_MISC_WR_CG);
1401 data &= ~MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK;
1402 WREG32(mmMC_CITF_MISC_WR_CG, data);
1403
1404 data = RREG32(mmMC_CITF_MISC_RD_CG);
1405 data &= ~MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK;
1406 WREG32(mmMC_CITF_MISC_RD_CG, data);
1407
1408 data = RREG32(mmMC_CITF_MISC_VM_CG);
1409 data &= ~MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1410 WREG32(mmMC_CITF_MISC_VM_CG, data);
1411
1412 data = RREG32(mmVM_L2_CG);
1413 data &= ~VM_L2_CG__MEM_LS_ENABLE_MASK;
1414 WREG32(mmVM_L2_CG, data);
1415 }
1416}
1417
1418static int gmc_v8_0_set_clockgating_state(void *handle,
1419 enum amd_clockgating_state state)
1420{
1421 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1422
1423 switch (adev->asic_type) {
1424 case CHIP_FIJI:
1425 fiji_update_mc_medium_grain_clock_gating(adev,
1426 state == AMD_CG_STATE_GATE ? true : false);
1427 fiji_update_mc_light_sleep(adev,
1428 state == AMD_CG_STATE_GATE ? true : false);
1429 break;
1430 default:
1431 break;
1432 }
1433 return 0;
1434}
1435
1436static int gmc_v8_0_set_powergating_state(void *handle,
1437 enum amd_powergating_state state)
1438{
1439 return 0;
1440}
1441
1442static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
1443 .name = "gmc_v8_0",
1444 .early_init = gmc_v8_0_early_init,
1445 .late_init = gmc_v8_0_late_init,
1446 .sw_init = gmc_v8_0_sw_init,
1447 .sw_fini = gmc_v8_0_sw_fini,
1448 .hw_init = gmc_v8_0_hw_init,
1449 .hw_fini = gmc_v8_0_hw_fini,
1450 .suspend = gmc_v8_0_suspend,
1451 .resume = gmc_v8_0_resume,
1452 .is_idle = gmc_v8_0_is_idle,
1453 .wait_for_idle = gmc_v8_0_wait_for_idle,
1454 .check_soft_reset = gmc_v8_0_check_soft_reset,
1455 .pre_soft_reset = gmc_v8_0_pre_soft_reset,
1456 .soft_reset = gmc_v8_0_soft_reset,
1457 .post_soft_reset = gmc_v8_0_post_soft_reset,
1458 .set_clockgating_state = gmc_v8_0_set_clockgating_state,
1459 .set_powergating_state = gmc_v8_0_set_powergating_state,
1460};
1461
1462static const struct amdgpu_gart_funcs gmc_v8_0_gart_funcs = {
1463 .flush_gpu_tlb = gmc_v8_0_gart_flush_gpu_tlb,
1464 .set_pte_pde = gmc_v8_0_gart_set_pte_pde,
1465};
1466
1467static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
1468 .set = gmc_v8_0_vm_fault_interrupt_state,
1469 .process = gmc_v8_0_process_interrupt,
1470};
1471
1472static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev)
1473{
1474 if (adev->gart.gart_funcs == NULL)
1475 adev->gart.gart_funcs = &gmc_v8_0_gart_funcs;
1476}
1477
1478static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev)
1479{
1480 adev->mc.vm_fault.num_types = 1;
1481 adev->mc.vm_fault.funcs = &gmc_v8_0_irq_funcs;
1482}
1483
1484const struct amdgpu_ip_block_version gmc_v8_0_ip_block =
1485{
1486 .type = AMD_IP_BLOCK_TYPE_GMC,
1487 .major = 8,
1488 .minor = 0,
1489 .rev = 0,
1490 .funcs = &gmc_v8_0_ip_funcs,
1491};
1492
1493const struct amdgpu_ip_block_version gmc_v8_1_ip_block =
1494{
1495 .type = AMD_IP_BLOCK_TYPE_GMC,
1496 .major = 8,
1497 .minor = 1,
1498 .rev = 0,
1499 .funcs = &gmc_v8_0_ip_funcs,
1500};
1501
1502const struct amdgpu_ip_block_version gmc_v8_5_ip_block =
1503{
1504 .type = AMD_IP_BLOCK_TYPE_GMC,
1505 .major = 8,
1506 .minor = 5,
1507 .rev = 0,
1508 .funcs = &gmc_v8_0_ip_funcs,
1509};
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/firmware.h>
24#include <drm/drmP.h>
25#include <drm/drm_cache.h>
26#include "amdgpu.h"
27#include "gmc_v8_0.h"
28#include "amdgpu_ucode.h"
29
30#include "gmc/gmc_8_1_d.h"
31#include "gmc/gmc_8_1_sh_mask.h"
32
33#include "bif/bif_5_0_d.h"
34#include "bif/bif_5_0_sh_mask.h"
35
36#include "oss/oss_3_0_d.h"
37#include "oss/oss_3_0_sh_mask.h"
38
39#include "dce/dce_10_0_d.h"
40#include "dce/dce_10_0_sh_mask.h"
41
42#include "vid.h"
43#include "vi.h"
44
45#include "amdgpu_atombios.h"
46
47
48static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev);
49static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
50static int gmc_v8_0_wait_for_idle(void *handle);
51
52MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
53MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
54MODULE_FIRMWARE("amdgpu/polaris10_mc.bin");
55MODULE_FIRMWARE("amdgpu/polaris12_mc.bin");
56
57static const u32 golden_settings_tonga_a11[] =
58{
59 mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
60 mmMC_HUB_RDREQ_DMIF_LIMIT, 0x0000007f, 0x00000028,
61 mmMC_HUB_WDP_UMC, 0x00007fb6, 0x00000991,
62 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
63 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
64 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
65 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
66};
67
68static const u32 tonga_mgcg_cgcg_init[] =
69{
70 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
71};
72
73static const u32 golden_settings_fiji_a10[] =
74{
75 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
76 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
77 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
78 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
79};
80
81static const u32 fiji_mgcg_cgcg_init[] =
82{
83 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
84};
85
86static const u32 golden_settings_polaris11_a11[] =
87{
88 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
89 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
90 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
91 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
92};
93
94static const u32 golden_settings_polaris10_a11[] =
95{
96 mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
97 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
98 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
99 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
100 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
101};
102
103static const u32 cz_mgcg_cgcg_init[] =
104{
105 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
106};
107
108static const u32 stoney_mgcg_cgcg_init[] =
109{
110 mmATC_MISC_CG, 0xffffffff, 0x000c0200,
111 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
112};
113
114static const u32 golden_settings_stoney_common[] =
115{
116 mmMC_HUB_RDREQ_UVD, MC_HUB_RDREQ_UVD__PRESCALE_MASK, 0x00000004,
117 mmMC_RD_GRP_OTH, MC_RD_GRP_OTH__UVD_MASK, 0x00600000
118};
119
120static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
121{
122 switch (adev->asic_type) {
123 case CHIP_FIJI:
124 amdgpu_device_program_register_sequence(adev,
125 fiji_mgcg_cgcg_init,
126 ARRAY_SIZE(fiji_mgcg_cgcg_init));
127 amdgpu_device_program_register_sequence(adev,
128 golden_settings_fiji_a10,
129 ARRAY_SIZE(golden_settings_fiji_a10));
130 break;
131 case CHIP_TONGA:
132 amdgpu_device_program_register_sequence(adev,
133 tonga_mgcg_cgcg_init,
134 ARRAY_SIZE(tonga_mgcg_cgcg_init));
135 amdgpu_device_program_register_sequence(adev,
136 golden_settings_tonga_a11,
137 ARRAY_SIZE(golden_settings_tonga_a11));
138 break;
139 case CHIP_POLARIS11:
140 case CHIP_POLARIS12:
141 amdgpu_device_program_register_sequence(adev,
142 golden_settings_polaris11_a11,
143 ARRAY_SIZE(golden_settings_polaris11_a11));
144 break;
145 case CHIP_POLARIS10:
146 amdgpu_device_program_register_sequence(adev,
147 golden_settings_polaris10_a11,
148 ARRAY_SIZE(golden_settings_polaris10_a11));
149 break;
150 case CHIP_CARRIZO:
151 amdgpu_device_program_register_sequence(adev,
152 cz_mgcg_cgcg_init,
153 ARRAY_SIZE(cz_mgcg_cgcg_init));
154 break;
155 case CHIP_STONEY:
156 amdgpu_device_program_register_sequence(adev,
157 stoney_mgcg_cgcg_init,
158 ARRAY_SIZE(stoney_mgcg_cgcg_init));
159 amdgpu_device_program_register_sequence(adev,
160 golden_settings_stoney_common,
161 ARRAY_SIZE(golden_settings_stoney_common));
162 break;
163 default:
164 break;
165 }
166}
167
168static void gmc_v8_0_mc_stop(struct amdgpu_device *adev)
169{
170 u32 blackout;
171
172 gmc_v8_0_wait_for_idle(adev);
173
174 blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
175 if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
176 /* Block CPU access */
177 WREG32(mmBIF_FB_EN, 0);
178 /* blackout the MC */
179 blackout = REG_SET_FIELD(blackout,
180 MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 1);
181 WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout);
182 }
183 /* wait for the MC to settle */
184 udelay(100);
185}
186
187static void gmc_v8_0_mc_resume(struct amdgpu_device *adev)
188{
189 u32 tmp;
190
191 /* unblackout the MC */
192 tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
193 tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
194 WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
195 /* allow CPU access */
196 tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
197 tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
198 WREG32(mmBIF_FB_EN, tmp);
199}
200
201/**
202 * gmc_v8_0_init_microcode - load ucode images from disk
203 *
204 * @adev: amdgpu_device pointer
205 *
206 * Use the firmware interface to load the ucode images into
207 * the driver (not loaded into hw).
208 * Returns 0 on success, error on failure.
209 */
210static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
211{
212 const char *chip_name;
213 char fw_name[30];
214 int err;
215
216 DRM_DEBUG("\n");
217
218 switch (adev->asic_type) {
219 case CHIP_TONGA:
220 chip_name = "tonga";
221 break;
222 case CHIP_POLARIS11:
223 chip_name = "polaris11";
224 break;
225 case CHIP_POLARIS10:
226 chip_name = "polaris10";
227 break;
228 case CHIP_POLARIS12:
229 chip_name = "polaris12";
230 break;
231 case CHIP_FIJI:
232 case CHIP_CARRIZO:
233 case CHIP_STONEY:
234 return 0;
235 default: BUG();
236 }
237
238 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
239 err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
240 if (err)
241 goto out;
242 err = amdgpu_ucode_validate(adev->gmc.fw);
243
244out:
245 if (err) {
246 pr_err("mc: Failed to load firmware \"%s\"\n", fw_name);
247 release_firmware(adev->gmc.fw);
248 adev->gmc.fw = NULL;
249 }
250 return err;
251}
252
253/**
254 * gmc_v8_0_tonga_mc_load_microcode - load tonga MC ucode into the hw
255 *
256 * @adev: amdgpu_device pointer
257 *
258 * Load the GDDR MC ucode into the hw (CIK).
259 * Returns 0 on success, error on failure.
260 */
261static int gmc_v8_0_tonga_mc_load_microcode(struct amdgpu_device *adev)
262{
263 const struct mc_firmware_header_v1_0 *hdr;
264 const __le32 *fw_data = NULL;
265 const __le32 *io_mc_regs = NULL;
266 u32 running;
267 int i, ucode_size, regs_size;
268
269 /* Skip MC ucode loading on SR-IOV capable boards.
270 * vbios does this for us in asic_init in that case.
271 * Skip MC ucode loading on VF, because hypervisor will do that
272 * for this adaptor.
273 */
274 if (amdgpu_sriov_bios(adev))
275 return 0;
276
277 if (!adev->gmc.fw)
278 return -EINVAL;
279
280 hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
281 amdgpu_ucode_print_mc_hdr(&hdr->header);
282
283 adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
284 regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
285 io_mc_regs = (const __le32 *)
286 (adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
287 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
288 fw_data = (const __le32 *)
289 (adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
290
291 running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
292
293 if (running == 0) {
294 /* reset the engine and set to writable */
295 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
296 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
297
298 /* load mc io regs */
299 for (i = 0; i < regs_size; i++) {
300 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
301 WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
302 }
303 /* load the MC ucode */
304 for (i = 0; i < ucode_size; i++)
305 WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
306
307 /* put the engine back into the active state */
308 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
309 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
310 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
311
312 /* wait for training to complete */
313 for (i = 0; i < adev->usec_timeout; i++) {
314 if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
315 MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D0))
316 break;
317 udelay(1);
318 }
319 for (i = 0; i < adev->usec_timeout; i++) {
320 if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
321 MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D1))
322 break;
323 udelay(1);
324 }
325 }
326
327 return 0;
328}
329
330static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
331{
332 const struct mc_firmware_header_v1_0 *hdr;
333 const __le32 *fw_data = NULL;
334 const __le32 *io_mc_regs = NULL;
335 u32 data, vbios_version;
336 int i, ucode_size, regs_size;
337
338 /* Skip MC ucode loading on SR-IOV capable boards.
339 * vbios does this for us in asic_init in that case.
340 * Skip MC ucode loading on VF, because hypervisor will do that
341 * for this adaptor.
342 */
343 if (amdgpu_sriov_bios(adev))
344 return 0;
345
346 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
347 data = RREG32(mmMC_SEQ_IO_DEBUG_DATA);
348 vbios_version = data & 0xf;
349
350 if (vbios_version == 0)
351 return 0;
352
353 if (!adev->gmc.fw)
354 return -EINVAL;
355
356 hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
357 amdgpu_ucode_print_mc_hdr(&hdr->header);
358
359 adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
360 regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
361 io_mc_regs = (const __le32 *)
362 (adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
363 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
364 fw_data = (const __le32 *)
365 (adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
366
367 data = RREG32(mmMC_SEQ_MISC0);
368 data &= ~(0x40);
369 WREG32(mmMC_SEQ_MISC0, data);
370
371 /* load mc io regs */
372 for (i = 0; i < regs_size; i++) {
373 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
374 WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
375 }
376
377 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
378 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
379
380 /* load the MC ucode */
381 for (i = 0; i < ucode_size; i++)
382 WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
383
384 /* put the engine back into the active state */
385 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
386 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
387 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
388
389 /* wait for training to complete */
390 for (i = 0; i < adev->usec_timeout; i++) {
391 data = RREG32(mmMC_SEQ_MISC0);
392 if (data & 0x80)
393 break;
394 udelay(1);
395 }
396
397 return 0;
398}
399
400static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
401 struct amdgpu_gmc *mc)
402{
403 u64 base = 0;
404
405 if (!amdgpu_sriov_vf(adev))
406 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
407 base <<= 24;
408
409 amdgpu_device_vram_location(adev, &adev->gmc, base);
410 amdgpu_device_gart_location(adev, mc);
411}
412
413/**
414 * gmc_v8_0_mc_program - program the GPU memory controller
415 *
416 * @adev: amdgpu_device pointer
417 *
418 * Set the location of vram, gart, and AGP in the GPU's
419 * physical address space (CIK).
420 */
421static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
422{
423 u32 tmp;
424 int i, j;
425
426 /* Initialize HDP */
427 for (i = 0, j = 0; i < 32; i++, j += 0x6) {
428 WREG32((0xb05 + j), 0x00000000);
429 WREG32((0xb06 + j), 0x00000000);
430 WREG32((0xb07 + j), 0x00000000);
431 WREG32((0xb08 + j), 0x00000000);
432 WREG32((0xb09 + j), 0x00000000);
433 }
434 WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
435
436 if (gmc_v8_0_wait_for_idle((void *)adev)) {
437 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
438 }
439 if (adev->mode_info.num_crtc) {
440 /* Lockout access through VGA aperture*/
441 tmp = RREG32(mmVGA_HDP_CONTROL);
442 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
443 WREG32(mmVGA_HDP_CONTROL, tmp);
444
445 /* disable VGA render */
446 tmp = RREG32(mmVGA_RENDER_CONTROL);
447 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
448 WREG32(mmVGA_RENDER_CONTROL, tmp);
449 }
450 /* Update configuration */
451 WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
452 adev->gmc.vram_start >> 12);
453 WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
454 adev->gmc.vram_end >> 12);
455 WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
456 adev->vram_scratch.gpu_addr >> 12);
457
458 if (amdgpu_sriov_vf(adev)) {
459 tmp = ((adev->gmc.vram_end >> 24) & 0xFFFF) << 16;
460 tmp |= ((adev->gmc.vram_start >> 24) & 0xFFFF);
461 WREG32(mmMC_VM_FB_LOCATION, tmp);
462 /* XXX double check these! */
463 WREG32(mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
464 WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
465 WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
466 }
467
468 WREG32(mmMC_VM_AGP_BASE, 0);
469 WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
470 WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
471 if (gmc_v8_0_wait_for_idle((void *)adev)) {
472 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
473 }
474
475 WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
476
477 tmp = RREG32(mmHDP_MISC_CNTL);
478 tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 0);
479 WREG32(mmHDP_MISC_CNTL, tmp);
480
481 tmp = RREG32(mmHDP_HOST_PATH_CNTL);
482 WREG32(mmHDP_HOST_PATH_CNTL, tmp);
483}
484
485/**
486 * gmc_v8_0_mc_init - initialize the memory controller driver params
487 *
488 * @adev: amdgpu_device pointer
489 *
490 * Look up the amount of vram, vram width, and decide how to place
491 * vram and gart within the GPU's physical address space (CIK).
492 * Returns 0 for success.
493 */
494static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
495{
496 int r;
497
498 adev->gmc.vram_width = amdgpu_atombios_get_vram_width(adev);
499 if (!adev->gmc.vram_width) {
500 u32 tmp;
501 int chansize, numchan;
502
503 /* Get VRAM informations */
504 tmp = RREG32(mmMC_ARB_RAMCFG);
505 if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
506 chansize = 64;
507 } else {
508 chansize = 32;
509 }
510 tmp = RREG32(mmMC_SHARED_CHMAP);
511 switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
512 case 0:
513 default:
514 numchan = 1;
515 break;
516 case 1:
517 numchan = 2;
518 break;
519 case 2:
520 numchan = 4;
521 break;
522 case 3:
523 numchan = 8;
524 break;
525 case 4:
526 numchan = 3;
527 break;
528 case 5:
529 numchan = 6;
530 break;
531 case 6:
532 numchan = 10;
533 break;
534 case 7:
535 numchan = 12;
536 break;
537 case 8:
538 numchan = 16;
539 break;
540 }
541 adev->gmc.vram_width = numchan * chansize;
542 }
543 /* size in MB on si */
544 adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
545 adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
546
547 if (!(adev->flags & AMD_IS_APU)) {
548 r = amdgpu_device_resize_fb_bar(adev);
549 if (r)
550 return r;
551 }
552 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
553 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
554
555#ifdef CONFIG_X86_64
556 if (adev->flags & AMD_IS_APU) {
557 adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
558 adev->gmc.aper_size = adev->gmc.real_vram_size;
559 }
560#endif
561
562 /* In case the PCI BAR is larger than the actual amount of vram */
563 adev->gmc.visible_vram_size = adev->gmc.aper_size;
564 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
565 adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
566
567 /* set the gart size */
568 if (amdgpu_gart_size == -1) {
569 switch (adev->asic_type) {
570 case CHIP_POLARIS11: /* all engines support GPUVM */
571 case CHIP_POLARIS10: /* all engines support GPUVM */
572 case CHIP_POLARIS12: /* all engines support GPUVM */
573 default:
574 adev->gmc.gart_size = 256ULL << 20;
575 break;
576 case CHIP_TONGA: /* UVD, VCE do not support GPUVM */
577 case CHIP_FIJI: /* UVD, VCE do not support GPUVM */
578 case CHIP_CARRIZO: /* UVD, VCE do not support GPUVM, DCE SG support */
579 case CHIP_STONEY: /* UVD does not support GPUVM, DCE SG support */
580 adev->gmc.gart_size = 1024ULL << 20;
581 break;
582 }
583 } else {
584 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
585 }
586
587 gmc_v8_0_vram_gtt_location(adev, &adev->gmc);
588
589 return 0;
590}
591
592/*
593 * GART
594 * VMID 0 is the physical GPU addresses as used by the kernel.
595 * VMIDs 1-15 are used for userspace clients and are handled
596 * by the amdgpu vm/hsa code.
597 */
598
599/**
600 * gmc_v8_0_flush_gpu_tlb - gart tlb flush callback
601 *
602 * @adev: amdgpu_device pointer
603 * @vmid: vm instance to flush
604 *
605 * Flush the TLB for the requested page table (CIK).
606 */
607static void gmc_v8_0_flush_gpu_tlb(struct amdgpu_device *adev,
608 uint32_t vmid)
609{
610 /* bits 0-15 are the VM contexts0-15 */
611 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
612}
613
614static uint64_t gmc_v8_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
615 unsigned vmid, uint64_t pd_addr)
616{
617 uint32_t reg;
618
619 if (vmid < 8)
620 reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid;
621 else
622 reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8;
623 amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12);
624
625 /* bits 0-15 are the VM contexts0-15 */
626 amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid);
627
628 return pd_addr;
629}
630
631static void gmc_v8_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
632 unsigned pasid)
633{
634 amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid);
635}
636
637/**
638 * gmc_v8_0_set_pte_pde - update the page tables using MMIO
639 *
640 * @adev: amdgpu_device pointer
641 * @cpu_pt_addr: cpu address of the page table
642 * @gpu_page_idx: entry in the page table to update
643 * @addr: dst addr to write into pte/pde
644 * @flags: access flags
645 *
646 * Update the page tables using the CPU.
647 */
648static int gmc_v8_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
649 uint32_t gpu_page_idx, uint64_t addr,
650 uint64_t flags)
651{
652 void __iomem *ptr = (void *)cpu_pt_addr;
653 uint64_t value;
654
655 /*
656 * PTE format on VI:
657 * 63:40 reserved
658 * 39:12 4k physical page base address
659 * 11:7 fragment
660 * 6 write
661 * 5 read
662 * 4 exe
663 * 3 reserved
664 * 2 snooped
665 * 1 system
666 * 0 valid
667 *
668 * PDE format on VI:
669 * 63:59 block fragment size
670 * 58:40 reserved
671 * 39:1 physical base address of PTE
672 * bits 5:1 must be 0.
673 * 0 valid
674 */
675 value = addr & 0x000000FFFFFFF000ULL;
676 value |= flags;
677 writeq(value, ptr + (gpu_page_idx * 8));
678
679 return 0;
680}
681
682static uint64_t gmc_v8_0_get_vm_pte_flags(struct amdgpu_device *adev,
683 uint32_t flags)
684{
685 uint64_t pte_flag = 0;
686
687 if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
688 pte_flag |= AMDGPU_PTE_EXECUTABLE;
689 if (flags & AMDGPU_VM_PAGE_READABLE)
690 pte_flag |= AMDGPU_PTE_READABLE;
691 if (flags & AMDGPU_VM_PAGE_WRITEABLE)
692 pte_flag |= AMDGPU_PTE_WRITEABLE;
693 if (flags & AMDGPU_VM_PAGE_PRT)
694 pte_flag |= AMDGPU_PTE_PRT;
695
696 return pte_flag;
697}
698
699static void gmc_v8_0_get_vm_pde(struct amdgpu_device *adev, int level,
700 uint64_t *addr, uint64_t *flags)
701{
702 BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
703}
704
705/**
706 * gmc_v8_0_set_fault_enable_default - update VM fault handling
707 *
708 * @adev: amdgpu_device pointer
709 * @value: true redirects VM faults to the default page
710 */
711static void gmc_v8_0_set_fault_enable_default(struct amdgpu_device *adev,
712 bool value)
713{
714 u32 tmp;
715
716 tmp = RREG32(mmVM_CONTEXT1_CNTL);
717 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
718 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
719 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
720 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
721 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
722 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
723 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
724 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
725 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
726 READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
727 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
728 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
729 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
730 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
731 WREG32(mmVM_CONTEXT1_CNTL, tmp);
732}
733
734/**
735 * gmc_v8_0_set_prt - set PRT VM fault
736 *
737 * @adev: amdgpu_device pointer
738 * @enable: enable/disable VM fault handling for PRT
739*/
740static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable)
741{
742 u32 tmp;
743
744 if (enable && !adev->gmc.prt_warning) {
745 dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n");
746 adev->gmc.prt_warning = true;
747 }
748
749 tmp = RREG32(mmVM_PRT_CNTL);
750 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
751 CB_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable);
752 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
753 CB_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable);
754 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
755 TC_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable);
756 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
757 TC_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable);
758 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
759 L2_CACHE_STORE_INVALID_ENTRIES, enable);
760 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
761 L1_TLB_STORE_INVALID_ENTRIES, enable);
762 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
763 MASK_PDE0_FAULT, enable);
764 WREG32(mmVM_PRT_CNTL, tmp);
765
766 if (enable) {
767 uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT;
768 uint32_t high = adev->vm_manager.max_pfn -
769 (AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT);
770
771 WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
772 WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
773 WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, low);
774 WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, low);
775 WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, high);
776 WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, high);
777 WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, high);
778 WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, high);
779 } else {
780 WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, 0xfffffff);
781 WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, 0xfffffff);
782 WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, 0xfffffff);
783 WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, 0xfffffff);
784 WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, 0x0);
785 WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, 0x0);
786 WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, 0x0);
787 WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, 0x0);
788 }
789}
790
791/**
792 * gmc_v8_0_gart_enable - gart enable
793 *
794 * @adev: amdgpu_device pointer
795 *
796 * This sets up the TLBs, programs the page tables for VMID0,
797 * sets up the hw for VMIDs 1-15 which are allocated on
798 * demand, and sets up the global locations for the LDS, GDS,
799 * and GPUVM for FSA64 clients (CIK).
800 * Returns 0 for success, errors for failure.
801 */
802static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
803{
804 int r, i;
805 u32 tmp, field;
806
807 if (adev->gart.robj == NULL) {
808 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
809 return -EINVAL;
810 }
811 r = amdgpu_gart_table_vram_pin(adev);
812 if (r)
813 return r;
814 /* Setup TLB control */
815 tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
816 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
817 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 1);
818 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
819 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 1);
820 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
821 WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
822 /* Setup L2 cache */
823 tmp = RREG32(mmVM_L2_CNTL);
824 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
825 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
826 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE, 1);
827 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
828 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
829 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
830 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
831 WREG32(mmVM_L2_CNTL, tmp);
832 tmp = RREG32(mmVM_L2_CNTL2);
833 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
834 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
835 WREG32(mmVM_L2_CNTL2, tmp);
836
837 field = adev->vm_manager.fragment_size;
838 tmp = RREG32(mmVM_L2_CNTL3);
839 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1);
840 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field);
841 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, field);
842 WREG32(mmVM_L2_CNTL3, tmp);
843 /* XXX: set to enable PTE/PDE in system memory */
844 tmp = RREG32(mmVM_L2_CNTL4);
845 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_PHYSICAL, 0);
846 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SHARED, 0);
847 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SNOOP, 0);
848 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_PHYSICAL, 0);
849 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SHARED, 0);
850 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SNOOP, 0);
851 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_PHYSICAL, 0);
852 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SHARED, 0);
853 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SNOOP, 0);
854 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_PHYSICAL, 0);
855 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SHARED, 0);
856 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP, 0);
857 WREG32(mmVM_L2_CNTL4, tmp);
858 /* setup context0 */
859 WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12);
860 WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12);
861 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
862 WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
863 (u32)(adev->dummy_page_addr >> 12));
864 WREG32(mmVM_CONTEXT0_CNTL2, 0);
865 tmp = RREG32(mmVM_CONTEXT0_CNTL);
866 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
867 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
868 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
869 WREG32(mmVM_CONTEXT0_CNTL, tmp);
870
871 WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR, 0);
872 WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR, 0);
873 WREG32(mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET, 0);
874
875 /* empty context1-15 */
876 /* FIXME start with 4G, once using 2 level pt switch to full
877 * vm size space
878 */
879 /* set vm size, must be a multiple of 4 */
880 WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
881 WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
882 for (i = 1; i < 16; i++) {
883 if (i < 8)
884 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
885 adev->gart.table_addr >> 12);
886 else
887 WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
888 adev->gart.table_addr >> 12);
889 }
890
891 /* enable context1-15 */
892 WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
893 (u32)(adev->dummy_page_addr >> 12));
894 WREG32(mmVM_CONTEXT1_CNTL2, 4);
895 tmp = RREG32(mmVM_CONTEXT1_CNTL);
896 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
897 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
898 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
899 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
900 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
901 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
902 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
903 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
904 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
905 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
906 adev->vm_manager.block_size - 9);
907 WREG32(mmVM_CONTEXT1_CNTL, tmp);
908 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
909 gmc_v8_0_set_fault_enable_default(adev, false);
910 else
911 gmc_v8_0_set_fault_enable_default(adev, true);
912
913 gmc_v8_0_flush_gpu_tlb(adev, 0);
914 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
915 (unsigned)(adev->gmc.gart_size >> 20),
916 (unsigned long long)adev->gart.table_addr);
917 adev->gart.ready = true;
918 return 0;
919}
920
921static int gmc_v8_0_gart_init(struct amdgpu_device *adev)
922{
923 int r;
924
925 if (adev->gart.robj) {
926 WARN(1, "R600 PCIE GART already initialized\n");
927 return 0;
928 }
929 /* Initialize common gart structure */
930 r = amdgpu_gart_init(adev);
931 if (r)
932 return r;
933 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
934 adev->gart.gart_pte_flags = AMDGPU_PTE_EXECUTABLE;
935 return amdgpu_gart_table_vram_alloc(adev);
936}
937
938/**
939 * gmc_v8_0_gart_disable - gart disable
940 *
941 * @adev: amdgpu_device pointer
942 *
943 * This disables all VM page table (CIK).
944 */
945static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
946{
947 u32 tmp;
948
949 /* Disable all tables */
950 WREG32(mmVM_CONTEXT0_CNTL, 0);
951 WREG32(mmVM_CONTEXT1_CNTL, 0);
952 /* Setup TLB control */
953 tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
954 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
955 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 0);
956 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0);
957 WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
958 /* Setup L2 cache */
959 tmp = RREG32(mmVM_L2_CNTL);
960 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
961 WREG32(mmVM_L2_CNTL, tmp);
962 WREG32(mmVM_L2_CNTL2, 0);
963 amdgpu_gart_table_vram_unpin(adev);
964}
965
966/**
967 * gmc_v8_0_gart_fini - vm fini callback
968 *
969 * @adev: amdgpu_device pointer
970 *
971 * Tears down the driver GART/VM setup (CIK).
972 */
973static void gmc_v8_0_gart_fini(struct amdgpu_device *adev)
974{
975 amdgpu_gart_table_vram_free(adev);
976 amdgpu_gart_fini(adev);
977}
978
979/**
980 * gmc_v8_0_vm_decode_fault - print human readable fault info
981 *
982 * @adev: amdgpu_device pointer
983 * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
984 * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
985 *
986 * Print human readable fault information (CIK).
987 */
988static void gmc_v8_0_vm_decode_fault(struct amdgpu_device *adev, u32 status,
989 u32 addr, u32 mc_client, unsigned pasid)
990{
991 u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
992 u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
993 PROTECTIONS);
994 char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
995 (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
996 u32 mc_id;
997
998 mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
999 MEMORY_CLIENT_ID);
1000
1001 dev_err(adev->dev, "VM fault (0x%02x, vmid %d, pasid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
1002 protections, vmid, pasid, addr,
1003 REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1004 MEMORY_CLIENT_RW) ?
1005 "write" : "read", block, mc_client, mc_id);
1006}
1007
1008static int gmc_v8_0_convert_vram_type(int mc_seq_vram_type)
1009{
1010 switch (mc_seq_vram_type) {
1011 case MC_SEQ_MISC0__MT__GDDR1:
1012 return AMDGPU_VRAM_TYPE_GDDR1;
1013 case MC_SEQ_MISC0__MT__DDR2:
1014 return AMDGPU_VRAM_TYPE_DDR2;
1015 case MC_SEQ_MISC0__MT__GDDR3:
1016 return AMDGPU_VRAM_TYPE_GDDR3;
1017 case MC_SEQ_MISC0__MT__GDDR4:
1018 return AMDGPU_VRAM_TYPE_GDDR4;
1019 case MC_SEQ_MISC0__MT__GDDR5:
1020 return AMDGPU_VRAM_TYPE_GDDR5;
1021 case MC_SEQ_MISC0__MT__HBM:
1022 return AMDGPU_VRAM_TYPE_HBM;
1023 case MC_SEQ_MISC0__MT__DDR3:
1024 return AMDGPU_VRAM_TYPE_DDR3;
1025 default:
1026 return AMDGPU_VRAM_TYPE_UNKNOWN;
1027 }
1028}
1029
1030static int gmc_v8_0_early_init(void *handle)
1031{
1032 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1033
1034 gmc_v8_0_set_gmc_funcs(adev);
1035 gmc_v8_0_set_irq_funcs(adev);
1036
1037 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
1038 adev->gmc.shared_aperture_end =
1039 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
1040 adev->gmc.private_aperture_start =
1041 adev->gmc.shared_aperture_end + 1;
1042 adev->gmc.private_aperture_end =
1043 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
1044
1045 return 0;
1046}
1047
1048static int gmc_v8_0_late_init(void *handle)
1049{
1050 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1051
1052 if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
1053 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
1054 else
1055 return 0;
1056}
1057
1058#define mmMC_SEQ_MISC0_FIJI 0xA71
1059
1060static int gmc_v8_0_sw_init(void *handle)
1061{
1062 int r;
1063 int dma_bits;
1064 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1065
1066 if (adev->flags & AMD_IS_APU) {
1067 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
1068 } else {
1069 u32 tmp;
1070
1071 if (adev->asic_type == CHIP_FIJI)
1072 tmp = RREG32(mmMC_SEQ_MISC0_FIJI);
1073 else
1074 tmp = RREG32(mmMC_SEQ_MISC0);
1075 tmp &= MC_SEQ_MISC0__MT__MASK;
1076 adev->gmc.vram_type = gmc_v8_0_convert_vram_type(tmp);
1077 }
1078
1079 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault);
1080 if (r)
1081 return r;
1082
1083 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault);
1084 if (r)
1085 return r;
1086
1087 /* Adjust VM size here.
1088 * Currently set to 4GB ((1 << 20) 4k pages).
1089 * Max GPUVM size for cayman and SI is 40 bits.
1090 */
1091 amdgpu_vm_adjust_size(adev, 64, 9, 1, 40);
1092
1093 /* Set the internal MC address mask
1094 * This is the max address of the GPU's
1095 * internal address space.
1096 */
1097 adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1098
1099 adev->gmc.stolen_size = 256 * 1024;
1100
1101 /* set DMA mask + need_dma32 flags.
1102 * PCIE - can handle 40-bits.
1103 * IGP - can handle 40-bits
1104 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
1105 */
1106 adev->need_dma32 = false;
1107 dma_bits = adev->need_dma32 ? 32 : 40;
1108 r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
1109 if (r) {
1110 adev->need_dma32 = true;
1111 dma_bits = 32;
1112 pr_warn("amdgpu: No suitable DMA available\n");
1113 }
1114 r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
1115 if (r) {
1116 pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
1117 pr_warn("amdgpu: No coherent DMA available\n");
1118 }
1119 adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
1120
1121 r = gmc_v8_0_init_microcode(adev);
1122 if (r) {
1123 DRM_ERROR("Failed to load mc firmware!\n");
1124 return r;
1125 }
1126
1127 r = gmc_v8_0_mc_init(adev);
1128 if (r)
1129 return r;
1130
1131 /* Memory manager */
1132 r = amdgpu_bo_init(adev);
1133 if (r)
1134 return r;
1135
1136 r = gmc_v8_0_gart_init(adev);
1137 if (r)
1138 return r;
1139
1140 /*
1141 * number of VMs
1142 * VMID 0 is reserved for System
1143 * amdgpu graphics/compute will use VMIDs 1-7
1144 * amdkfd will use VMIDs 8-15
1145 */
1146 adev->vm_manager.id_mgr[0].num_ids = AMDGPU_NUM_OF_VMIDS;
1147 amdgpu_vm_manager_init(adev);
1148
1149 /* base offset of vram pages */
1150 if (adev->flags & AMD_IS_APU) {
1151 u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
1152
1153 tmp <<= 22;
1154 adev->vm_manager.vram_base_offset = tmp;
1155 } else {
1156 adev->vm_manager.vram_base_offset = 0;
1157 }
1158
1159 return 0;
1160}
1161
1162static int gmc_v8_0_sw_fini(void *handle)
1163{
1164 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1165
1166 amdgpu_gem_force_release(adev);
1167 amdgpu_vm_manager_fini(adev);
1168 gmc_v8_0_gart_fini(adev);
1169 amdgpu_bo_fini(adev);
1170 release_firmware(adev->gmc.fw);
1171 adev->gmc.fw = NULL;
1172
1173 return 0;
1174}
1175
1176static int gmc_v8_0_hw_init(void *handle)
1177{
1178 int r;
1179 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1180
1181 gmc_v8_0_init_golden_registers(adev);
1182
1183 gmc_v8_0_mc_program(adev);
1184
1185 if (adev->asic_type == CHIP_TONGA) {
1186 r = gmc_v8_0_tonga_mc_load_microcode(adev);
1187 if (r) {
1188 DRM_ERROR("Failed to load MC firmware!\n");
1189 return r;
1190 }
1191 } else if (adev->asic_type == CHIP_POLARIS11 ||
1192 adev->asic_type == CHIP_POLARIS10 ||
1193 adev->asic_type == CHIP_POLARIS12) {
1194 r = gmc_v8_0_polaris_mc_load_microcode(adev);
1195 if (r) {
1196 DRM_ERROR("Failed to load MC firmware!\n");
1197 return r;
1198 }
1199 }
1200
1201 r = gmc_v8_0_gart_enable(adev);
1202 if (r)
1203 return r;
1204
1205 return r;
1206}
1207
1208static int gmc_v8_0_hw_fini(void *handle)
1209{
1210 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1211
1212 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1213 gmc_v8_0_gart_disable(adev);
1214
1215 return 0;
1216}
1217
1218static int gmc_v8_0_suspend(void *handle)
1219{
1220 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1221
1222 gmc_v8_0_hw_fini(adev);
1223
1224 return 0;
1225}
1226
1227static int gmc_v8_0_resume(void *handle)
1228{
1229 int r;
1230 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1231
1232 r = gmc_v8_0_hw_init(adev);
1233 if (r)
1234 return r;
1235
1236 amdgpu_vmid_reset_all(adev);
1237
1238 return 0;
1239}
1240
1241static bool gmc_v8_0_is_idle(void *handle)
1242{
1243 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1244 u32 tmp = RREG32(mmSRBM_STATUS);
1245
1246 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1247 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
1248 return false;
1249
1250 return true;
1251}
1252
1253static int gmc_v8_0_wait_for_idle(void *handle)
1254{
1255 unsigned i;
1256 u32 tmp;
1257 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1258
1259 for (i = 0; i < adev->usec_timeout; i++) {
1260 /* read MC_STATUS */
1261 tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
1262 SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1263 SRBM_STATUS__MCC_BUSY_MASK |
1264 SRBM_STATUS__MCD_BUSY_MASK |
1265 SRBM_STATUS__VMC_BUSY_MASK |
1266 SRBM_STATUS__VMC1_BUSY_MASK);
1267 if (!tmp)
1268 return 0;
1269 udelay(1);
1270 }
1271 return -ETIMEDOUT;
1272
1273}
1274
1275static bool gmc_v8_0_check_soft_reset(void *handle)
1276{
1277 u32 srbm_soft_reset = 0;
1278 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1279 u32 tmp = RREG32(mmSRBM_STATUS);
1280
1281 if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
1282 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1283 SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
1284
1285 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1286 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
1287 if (!(adev->flags & AMD_IS_APU))
1288 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1289 SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
1290 }
1291 if (srbm_soft_reset) {
1292 adev->gmc.srbm_soft_reset = srbm_soft_reset;
1293 return true;
1294 } else {
1295 adev->gmc.srbm_soft_reset = 0;
1296 return false;
1297 }
1298}
1299
1300static int gmc_v8_0_pre_soft_reset(void *handle)
1301{
1302 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1303
1304 if (!adev->gmc.srbm_soft_reset)
1305 return 0;
1306
1307 gmc_v8_0_mc_stop(adev);
1308 if (gmc_v8_0_wait_for_idle(adev)) {
1309 dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
1310 }
1311
1312 return 0;
1313}
1314
1315static int gmc_v8_0_soft_reset(void *handle)
1316{
1317 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1318 u32 srbm_soft_reset;
1319
1320 if (!adev->gmc.srbm_soft_reset)
1321 return 0;
1322 srbm_soft_reset = adev->gmc.srbm_soft_reset;
1323
1324 if (srbm_soft_reset) {
1325 u32 tmp;
1326
1327 tmp = RREG32(mmSRBM_SOFT_RESET);
1328 tmp |= srbm_soft_reset;
1329 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1330 WREG32(mmSRBM_SOFT_RESET, tmp);
1331 tmp = RREG32(mmSRBM_SOFT_RESET);
1332
1333 udelay(50);
1334
1335 tmp &= ~srbm_soft_reset;
1336 WREG32(mmSRBM_SOFT_RESET, tmp);
1337 tmp = RREG32(mmSRBM_SOFT_RESET);
1338
1339 /* Wait a little for things to settle down */
1340 udelay(50);
1341 }
1342
1343 return 0;
1344}
1345
1346static int gmc_v8_0_post_soft_reset(void *handle)
1347{
1348 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1349
1350 if (!adev->gmc.srbm_soft_reset)
1351 return 0;
1352
1353 gmc_v8_0_mc_resume(adev);
1354 return 0;
1355}
1356
1357static int gmc_v8_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
1358 struct amdgpu_irq_src *src,
1359 unsigned type,
1360 enum amdgpu_interrupt_state state)
1361{
1362 u32 tmp;
1363 u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1364 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1365 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1366 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1367 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1368 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1369 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
1370
1371 switch (state) {
1372 case AMDGPU_IRQ_STATE_DISABLE:
1373 /* system context */
1374 tmp = RREG32(mmVM_CONTEXT0_CNTL);
1375 tmp &= ~bits;
1376 WREG32(mmVM_CONTEXT0_CNTL, tmp);
1377 /* VMs */
1378 tmp = RREG32(mmVM_CONTEXT1_CNTL);
1379 tmp &= ~bits;
1380 WREG32(mmVM_CONTEXT1_CNTL, tmp);
1381 break;
1382 case AMDGPU_IRQ_STATE_ENABLE:
1383 /* system context */
1384 tmp = RREG32(mmVM_CONTEXT0_CNTL);
1385 tmp |= bits;
1386 WREG32(mmVM_CONTEXT0_CNTL, tmp);
1387 /* VMs */
1388 tmp = RREG32(mmVM_CONTEXT1_CNTL);
1389 tmp |= bits;
1390 WREG32(mmVM_CONTEXT1_CNTL, tmp);
1391 break;
1392 default:
1393 break;
1394 }
1395
1396 return 0;
1397}
1398
1399static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
1400 struct amdgpu_irq_src *source,
1401 struct amdgpu_iv_entry *entry)
1402{
1403 u32 addr, status, mc_client;
1404
1405 if (amdgpu_sriov_vf(adev)) {
1406 dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1407 entry->src_id, entry->src_data[0]);
1408 dev_err(adev->dev, " Can't decode VM fault info here on SRIOV VF\n");
1409 return 0;
1410 }
1411
1412 addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
1413 status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
1414 mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
1415 /* reset addr and status */
1416 WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1417
1418 if (!addr && !status)
1419 return 0;
1420
1421 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
1422 gmc_v8_0_set_fault_enable_default(adev, false);
1423
1424 if (printk_ratelimit()) {
1425 dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1426 entry->src_id, entry->src_data[0]);
1427 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
1428 addr);
1429 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1430 status);
1431 gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client,
1432 entry->pasid);
1433 }
1434
1435 return 0;
1436}
1437
1438static void fiji_update_mc_medium_grain_clock_gating(struct amdgpu_device *adev,
1439 bool enable)
1440{
1441 uint32_t data;
1442
1443 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
1444 data = RREG32(mmMC_HUB_MISC_HUB_CG);
1445 data |= MC_HUB_MISC_HUB_CG__ENABLE_MASK;
1446 WREG32(mmMC_HUB_MISC_HUB_CG, data);
1447
1448 data = RREG32(mmMC_HUB_MISC_SIP_CG);
1449 data |= MC_HUB_MISC_SIP_CG__ENABLE_MASK;
1450 WREG32(mmMC_HUB_MISC_SIP_CG, data);
1451
1452 data = RREG32(mmMC_HUB_MISC_VM_CG);
1453 data |= MC_HUB_MISC_VM_CG__ENABLE_MASK;
1454 WREG32(mmMC_HUB_MISC_VM_CG, data);
1455
1456 data = RREG32(mmMC_XPB_CLK_GAT);
1457 data |= MC_XPB_CLK_GAT__ENABLE_MASK;
1458 WREG32(mmMC_XPB_CLK_GAT, data);
1459
1460 data = RREG32(mmATC_MISC_CG);
1461 data |= ATC_MISC_CG__ENABLE_MASK;
1462 WREG32(mmATC_MISC_CG, data);
1463
1464 data = RREG32(mmMC_CITF_MISC_WR_CG);
1465 data |= MC_CITF_MISC_WR_CG__ENABLE_MASK;
1466 WREG32(mmMC_CITF_MISC_WR_CG, data);
1467
1468 data = RREG32(mmMC_CITF_MISC_RD_CG);
1469 data |= MC_CITF_MISC_RD_CG__ENABLE_MASK;
1470 WREG32(mmMC_CITF_MISC_RD_CG, data);
1471
1472 data = RREG32(mmMC_CITF_MISC_VM_CG);
1473 data |= MC_CITF_MISC_VM_CG__ENABLE_MASK;
1474 WREG32(mmMC_CITF_MISC_VM_CG, data);
1475
1476 data = RREG32(mmVM_L2_CG);
1477 data |= VM_L2_CG__ENABLE_MASK;
1478 WREG32(mmVM_L2_CG, data);
1479 } else {
1480 data = RREG32(mmMC_HUB_MISC_HUB_CG);
1481 data &= ~MC_HUB_MISC_HUB_CG__ENABLE_MASK;
1482 WREG32(mmMC_HUB_MISC_HUB_CG, data);
1483
1484 data = RREG32(mmMC_HUB_MISC_SIP_CG);
1485 data &= ~MC_HUB_MISC_SIP_CG__ENABLE_MASK;
1486 WREG32(mmMC_HUB_MISC_SIP_CG, data);
1487
1488 data = RREG32(mmMC_HUB_MISC_VM_CG);
1489 data &= ~MC_HUB_MISC_VM_CG__ENABLE_MASK;
1490 WREG32(mmMC_HUB_MISC_VM_CG, data);
1491
1492 data = RREG32(mmMC_XPB_CLK_GAT);
1493 data &= ~MC_XPB_CLK_GAT__ENABLE_MASK;
1494 WREG32(mmMC_XPB_CLK_GAT, data);
1495
1496 data = RREG32(mmATC_MISC_CG);
1497 data &= ~ATC_MISC_CG__ENABLE_MASK;
1498 WREG32(mmATC_MISC_CG, data);
1499
1500 data = RREG32(mmMC_CITF_MISC_WR_CG);
1501 data &= ~MC_CITF_MISC_WR_CG__ENABLE_MASK;
1502 WREG32(mmMC_CITF_MISC_WR_CG, data);
1503
1504 data = RREG32(mmMC_CITF_MISC_RD_CG);
1505 data &= ~MC_CITF_MISC_RD_CG__ENABLE_MASK;
1506 WREG32(mmMC_CITF_MISC_RD_CG, data);
1507
1508 data = RREG32(mmMC_CITF_MISC_VM_CG);
1509 data &= ~MC_CITF_MISC_VM_CG__ENABLE_MASK;
1510 WREG32(mmMC_CITF_MISC_VM_CG, data);
1511
1512 data = RREG32(mmVM_L2_CG);
1513 data &= ~VM_L2_CG__ENABLE_MASK;
1514 WREG32(mmVM_L2_CG, data);
1515 }
1516}
1517
1518static void fiji_update_mc_light_sleep(struct amdgpu_device *adev,
1519 bool enable)
1520{
1521 uint32_t data;
1522
1523 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) {
1524 data = RREG32(mmMC_HUB_MISC_HUB_CG);
1525 data |= MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK;
1526 WREG32(mmMC_HUB_MISC_HUB_CG, data);
1527
1528 data = RREG32(mmMC_HUB_MISC_SIP_CG);
1529 data |= MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK;
1530 WREG32(mmMC_HUB_MISC_SIP_CG, data);
1531
1532 data = RREG32(mmMC_HUB_MISC_VM_CG);
1533 data |= MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1534 WREG32(mmMC_HUB_MISC_VM_CG, data);
1535
1536 data = RREG32(mmMC_XPB_CLK_GAT);
1537 data |= MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK;
1538 WREG32(mmMC_XPB_CLK_GAT, data);
1539
1540 data = RREG32(mmATC_MISC_CG);
1541 data |= ATC_MISC_CG__MEM_LS_ENABLE_MASK;
1542 WREG32(mmATC_MISC_CG, data);
1543
1544 data = RREG32(mmMC_CITF_MISC_WR_CG);
1545 data |= MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK;
1546 WREG32(mmMC_CITF_MISC_WR_CG, data);
1547
1548 data = RREG32(mmMC_CITF_MISC_RD_CG);
1549 data |= MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK;
1550 WREG32(mmMC_CITF_MISC_RD_CG, data);
1551
1552 data = RREG32(mmMC_CITF_MISC_VM_CG);
1553 data |= MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1554 WREG32(mmMC_CITF_MISC_VM_CG, data);
1555
1556 data = RREG32(mmVM_L2_CG);
1557 data |= VM_L2_CG__MEM_LS_ENABLE_MASK;
1558 WREG32(mmVM_L2_CG, data);
1559 } else {
1560 data = RREG32(mmMC_HUB_MISC_HUB_CG);
1561 data &= ~MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK;
1562 WREG32(mmMC_HUB_MISC_HUB_CG, data);
1563
1564 data = RREG32(mmMC_HUB_MISC_SIP_CG);
1565 data &= ~MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK;
1566 WREG32(mmMC_HUB_MISC_SIP_CG, data);
1567
1568 data = RREG32(mmMC_HUB_MISC_VM_CG);
1569 data &= ~MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1570 WREG32(mmMC_HUB_MISC_VM_CG, data);
1571
1572 data = RREG32(mmMC_XPB_CLK_GAT);
1573 data &= ~MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK;
1574 WREG32(mmMC_XPB_CLK_GAT, data);
1575
1576 data = RREG32(mmATC_MISC_CG);
1577 data &= ~ATC_MISC_CG__MEM_LS_ENABLE_MASK;
1578 WREG32(mmATC_MISC_CG, data);
1579
1580 data = RREG32(mmMC_CITF_MISC_WR_CG);
1581 data &= ~MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK;
1582 WREG32(mmMC_CITF_MISC_WR_CG, data);
1583
1584 data = RREG32(mmMC_CITF_MISC_RD_CG);
1585 data &= ~MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK;
1586 WREG32(mmMC_CITF_MISC_RD_CG, data);
1587
1588 data = RREG32(mmMC_CITF_MISC_VM_CG);
1589 data &= ~MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1590 WREG32(mmMC_CITF_MISC_VM_CG, data);
1591
1592 data = RREG32(mmVM_L2_CG);
1593 data &= ~VM_L2_CG__MEM_LS_ENABLE_MASK;
1594 WREG32(mmVM_L2_CG, data);
1595 }
1596}
1597
1598static int gmc_v8_0_set_clockgating_state(void *handle,
1599 enum amd_clockgating_state state)
1600{
1601 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1602
1603 if (amdgpu_sriov_vf(adev))
1604 return 0;
1605
1606 switch (adev->asic_type) {
1607 case CHIP_FIJI:
1608 fiji_update_mc_medium_grain_clock_gating(adev,
1609 state == AMD_CG_STATE_GATE);
1610 fiji_update_mc_light_sleep(adev,
1611 state == AMD_CG_STATE_GATE);
1612 break;
1613 default:
1614 break;
1615 }
1616 return 0;
1617}
1618
1619static int gmc_v8_0_set_powergating_state(void *handle,
1620 enum amd_powergating_state state)
1621{
1622 return 0;
1623}
1624
1625static void gmc_v8_0_get_clockgating_state(void *handle, u32 *flags)
1626{
1627 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1628 int data;
1629
1630 if (amdgpu_sriov_vf(adev))
1631 *flags = 0;
1632
1633 /* AMD_CG_SUPPORT_MC_MGCG */
1634 data = RREG32(mmMC_HUB_MISC_HUB_CG);
1635 if (data & MC_HUB_MISC_HUB_CG__ENABLE_MASK)
1636 *flags |= AMD_CG_SUPPORT_MC_MGCG;
1637
1638 /* AMD_CG_SUPPORT_MC_LS */
1639 if (data & MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK)
1640 *flags |= AMD_CG_SUPPORT_MC_LS;
1641}
1642
1643static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
1644 .name = "gmc_v8_0",
1645 .early_init = gmc_v8_0_early_init,
1646 .late_init = gmc_v8_0_late_init,
1647 .sw_init = gmc_v8_0_sw_init,
1648 .sw_fini = gmc_v8_0_sw_fini,
1649 .hw_init = gmc_v8_0_hw_init,
1650 .hw_fini = gmc_v8_0_hw_fini,
1651 .suspend = gmc_v8_0_suspend,
1652 .resume = gmc_v8_0_resume,
1653 .is_idle = gmc_v8_0_is_idle,
1654 .wait_for_idle = gmc_v8_0_wait_for_idle,
1655 .check_soft_reset = gmc_v8_0_check_soft_reset,
1656 .pre_soft_reset = gmc_v8_0_pre_soft_reset,
1657 .soft_reset = gmc_v8_0_soft_reset,
1658 .post_soft_reset = gmc_v8_0_post_soft_reset,
1659 .set_clockgating_state = gmc_v8_0_set_clockgating_state,
1660 .set_powergating_state = gmc_v8_0_set_powergating_state,
1661 .get_clockgating_state = gmc_v8_0_get_clockgating_state,
1662};
1663
1664static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = {
1665 .flush_gpu_tlb = gmc_v8_0_flush_gpu_tlb,
1666 .emit_flush_gpu_tlb = gmc_v8_0_emit_flush_gpu_tlb,
1667 .emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping,
1668 .set_pte_pde = gmc_v8_0_set_pte_pde,
1669 .set_prt = gmc_v8_0_set_prt,
1670 .get_vm_pte_flags = gmc_v8_0_get_vm_pte_flags,
1671 .get_vm_pde = gmc_v8_0_get_vm_pde
1672};
1673
1674static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
1675 .set = gmc_v8_0_vm_fault_interrupt_state,
1676 .process = gmc_v8_0_process_interrupt,
1677};
1678
1679static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev)
1680{
1681 if (adev->gmc.gmc_funcs == NULL)
1682 adev->gmc.gmc_funcs = &gmc_v8_0_gmc_funcs;
1683}
1684
1685static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev)
1686{
1687 adev->gmc.vm_fault.num_types = 1;
1688 adev->gmc.vm_fault.funcs = &gmc_v8_0_irq_funcs;
1689}
1690
1691const struct amdgpu_ip_block_version gmc_v8_0_ip_block =
1692{
1693 .type = AMD_IP_BLOCK_TYPE_GMC,
1694 .major = 8,
1695 .minor = 0,
1696 .rev = 0,
1697 .funcs = &gmc_v8_0_ip_funcs,
1698};
1699
1700const struct amdgpu_ip_block_version gmc_v8_1_ip_block =
1701{
1702 .type = AMD_IP_BLOCK_TYPE_GMC,
1703 .major = 8,
1704 .minor = 1,
1705 .rev = 0,
1706 .funcs = &gmc_v8_0_ip_funcs,
1707};
1708
1709const struct amdgpu_ip_block_version gmc_v8_5_ip_block =
1710{
1711 .type = AMD_IP_BLOCK_TYPE_GMC,
1712 .major = 8,
1713 .minor = 5,
1714 .rev = 0,
1715 .funcs = &gmc_v8_0_ip_funcs,
1716};