Loading...
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25#include <linux/module.h>
26#include <linux/pci.h>
27
28#include <drm/drm_cache.h>
29#include "amdgpu.h"
30#include "gmc_v8_0.h"
31#include "amdgpu_ucode.h"
32#include "amdgpu_amdkfd.h"
33#include "amdgpu_gem.h"
34
35#include "gmc/gmc_8_1_d.h"
36#include "gmc/gmc_8_1_sh_mask.h"
37
38#include "bif/bif_5_0_d.h"
39#include "bif/bif_5_0_sh_mask.h"
40
41#include "oss/oss_3_0_d.h"
42#include "oss/oss_3_0_sh_mask.h"
43
44#include "dce/dce_10_0_d.h"
45#include "dce/dce_10_0_sh_mask.h"
46
47#include "vid.h"
48#include "vi.h"
49
50#include "amdgpu_atombios.h"
51
52#include "ivsrcid/ivsrcid_vislands30.h"
53
54static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev);
55static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
56static int gmc_v8_0_wait_for_idle(void *handle);
57
58MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
59MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
60MODULE_FIRMWARE("amdgpu/polaris10_mc.bin");
61MODULE_FIRMWARE("amdgpu/polaris12_mc.bin");
62MODULE_FIRMWARE("amdgpu/polaris12_32_mc.bin");
63MODULE_FIRMWARE("amdgpu/polaris11_k_mc.bin");
64MODULE_FIRMWARE("amdgpu/polaris10_k_mc.bin");
65MODULE_FIRMWARE("amdgpu/polaris12_k_mc.bin");
66
67static const u32 golden_settings_tonga_a11[] =
68{
69 mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
70 mmMC_HUB_RDREQ_DMIF_LIMIT, 0x0000007f, 0x00000028,
71 mmMC_HUB_WDP_UMC, 0x00007fb6, 0x00000991,
72 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
73 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
74 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
75 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
76};
77
78static const u32 tonga_mgcg_cgcg_init[] =
79{
80 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
81};
82
83static const u32 golden_settings_fiji_a10[] =
84{
85 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
86 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
87 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
88 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
89};
90
91static const u32 fiji_mgcg_cgcg_init[] =
92{
93 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
94};
95
96static const u32 golden_settings_polaris11_a11[] =
97{
98 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
99 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
100 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
101 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
102};
103
104static const u32 golden_settings_polaris10_a11[] =
105{
106 mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
107 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
108 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
109 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
110 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
111};
112
113static const u32 cz_mgcg_cgcg_init[] =
114{
115 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
116};
117
118static const u32 stoney_mgcg_cgcg_init[] =
119{
120 mmATC_MISC_CG, 0xffffffff, 0x000c0200,
121 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
122};
123
124static const u32 golden_settings_stoney_common[] =
125{
126 mmMC_HUB_RDREQ_UVD, MC_HUB_RDREQ_UVD__PRESCALE_MASK, 0x00000004,
127 mmMC_RD_GRP_OTH, MC_RD_GRP_OTH__UVD_MASK, 0x00600000
128};
129
130static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
131{
132 switch (adev->asic_type) {
133 case CHIP_FIJI:
134 amdgpu_device_program_register_sequence(adev,
135 fiji_mgcg_cgcg_init,
136 ARRAY_SIZE(fiji_mgcg_cgcg_init));
137 amdgpu_device_program_register_sequence(adev,
138 golden_settings_fiji_a10,
139 ARRAY_SIZE(golden_settings_fiji_a10));
140 break;
141 case CHIP_TONGA:
142 amdgpu_device_program_register_sequence(adev,
143 tonga_mgcg_cgcg_init,
144 ARRAY_SIZE(tonga_mgcg_cgcg_init));
145 amdgpu_device_program_register_sequence(adev,
146 golden_settings_tonga_a11,
147 ARRAY_SIZE(golden_settings_tonga_a11));
148 break;
149 case CHIP_POLARIS11:
150 case CHIP_POLARIS12:
151 case CHIP_VEGAM:
152 amdgpu_device_program_register_sequence(adev,
153 golden_settings_polaris11_a11,
154 ARRAY_SIZE(golden_settings_polaris11_a11));
155 break;
156 case CHIP_POLARIS10:
157 amdgpu_device_program_register_sequence(adev,
158 golden_settings_polaris10_a11,
159 ARRAY_SIZE(golden_settings_polaris10_a11));
160 break;
161 case CHIP_CARRIZO:
162 amdgpu_device_program_register_sequence(adev,
163 cz_mgcg_cgcg_init,
164 ARRAY_SIZE(cz_mgcg_cgcg_init));
165 break;
166 case CHIP_STONEY:
167 amdgpu_device_program_register_sequence(adev,
168 stoney_mgcg_cgcg_init,
169 ARRAY_SIZE(stoney_mgcg_cgcg_init));
170 amdgpu_device_program_register_sequence(adev,
171 golden_settings_stoney_common,
172 ARRAY_SIZE(golden_settings_stoney_common));
173 break;
174 default:
175 break;
176 }
177}
178
179static void gmc_v8_0_mc_stop(struct amdgpu_device *adev)
180{
181 u32 blackout;
182
183 gmc_v8_0_wait_for_idle(adev);
184
185 blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
186 if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
187 /* Block CPU access */
188 WREG32(mmBIF_FB_EN, 0);
189 /* blackout the MC */
190 blackout = REG_SET_FIELD(blackout,
191 MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 1);
192 WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout);
193 }
194 /* wait for the MC to settle */
195 udelay(100);
196}
197
198static void gmc_v8_0_mc_resume(struct amdgpu_device *adev)
199{
200 u32 tmp;
201
202 /* unblackout the MC */
203 tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
204 tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
205 WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
206 /* allow CPU access */
207 tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
208 tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
209 WREG32(mmBIF_FB_EN, tmp);
210}
211
212/**
213 * gmc_v8_0_init_microcode - load ucode images from disk
214 *
215 * @adev: amdgpu_device pointer
216 *
217 * Use the firmware interface to load the ucode images into
218 * the driver (not loaded into hw).
219 * Returns 0 on success, error on failure.
220 */
221static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
222{
223 const char *chip_name;
224 char fw_name[30];
225 int err;
226
227 DRM_DEBUG("\n");
228
229 switch (adev->asic_type) {
230 case CHIP_TONGA:
231 chip_name = "tonga";
232 break;
233 case CHIP_POLARIS11:
234 if (ASICID_IS_P21(adev->pdev->device, adev->pdev->revision) ||
235 ASICID_IS_P31(adev->pdev->device, adev->pdev->revision))
236 chip_name = "polaris11_k";
237 else
238 chip_name = "polaris11";
239 break;
240 case CHIP_POLARIS10:
241 if (ASICID_IS_P30(adev->pdev->device, adev->pdev->revision))
242 chip_name = "polaris10_k";
243 else
244 chip_name = "polaris10";
245 break;
246 case CHIP_POLARIS12:
247 if (ASICID_IS_P23(adev->pdev->device, adev->pdev->revision)) {
248 chip_name = "polaris12_k";
249 } else {
250 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, ixMC_IO_DEBUG_UP_159);
251 /* Polaris12 32bit ASIC needs a special MC firmware */
252 if (RREG32(mmMC_SEQ_IO_DEBUG_DATA) == 0x05b4dc40)
253 chip_name = "polaris12_32";
254 else
255 chip_name = "polaris12";
256 }
257 break;
258 case CHIP_FIJI:
259 case CHIP_CARRIZO:
260 case CHIP_STONEY:
261 case CHIP_VEGAM:
262 return 0;
263 default: BUG();
264 }
265
266 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
267 err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
268 if (err)
269 goto out;
270 err = amdgpu_ucode_validate(adev->gmc.fw);
271
272out:
273 if (err) {
274 pr_err("mc: Failed to load firmware \"%s\"\n", fw_name);
275 release_firmware(adev->gmc.fw);
276 adev->gmc.fw = NULL;
277 }
278 return err;
279}
280
281/**
282 * gmc_v8_0_tonga_mc_load_microcode - load tonga MC ucode into the hw
283 *
284 * @adev: amdgpu_device pointer
285 *
286 * Load the GDDR MC ucode into the hw (VI).
287 * Returns 0 on success, error on failure.
288 */
289static int gmc_v8_0_tonga_mc_load_microcode(struct amdgpu_device *adev)
290{
291 const struct mc_firmware_header_v1_0 *hdr;
292 const __le32 *fw_data = NULL;
293 const __le32 *io_mc_regs = NULL;
294 u32 running;
295 int i, ucode_size, regs_size;
296
297 /* Skip MC ucode loading on SR-IOV capable boards.
298 * vbios does this for us in asic_init in that case.
299 * Skip MC ucode loading on VF, because hypervisor will do that
300 * for this adaptor.
301 */
302 if (amdgpu_sriov_bios(adev))
303 return 0;
304
305 if (!adev->gmc.fw)
306 return -EINVAL;
307
308 hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
309 amdgpu_ucode_print_mc_hdr(&hdr->header);
310
311 adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
312 regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
313 io_mc_regs = (const __le32 *)
314 (adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
315 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
316 fw_data = (const __le32 *)
317 (adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
318
319 running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
320
321 if (running == 0) {
322 /* reset the engine and set to writable */
323 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
324 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
325
326 /* load mc io regs */
327 for (i = 0; i < regs_size; i++) {
328 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
329 WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
330 }
331 /* load the MC ucode */
332 for (i = 0; i < ucode_size; i++)
333 WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
334
335 /* put the engine back into the active state */
336 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
337 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
338 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
339
340 /* wait for training to complete */
341 for (i = 0; i < adev->usec_timeout; i++) {
342 if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
343 MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D0))
344 break;
345 udelay(1);
346 }
347 for (i = 0; i < adev->usec_timeout; i++) {
348 if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
349 MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D1))
350 break;
351 udelay(1);
352 }
353 }
354
355 return 0;
356}
357
358static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
359{
360 const struct mc_firmware_header_v1_0 *hdr;
361 const __le32 *fw_data = NULL;
362 const __le32 *io_mc_regs = NULL;
363 u32 data;
364 int i, ucode_size, regs_size;
365
366 /* Skip MC ucode loading on SR-IOV capable boards.
367 * vbios does this for us in asic_init in that case.
368 * Skip MC ucode loading on VF, because hypervisor will do that
369 * for this adaptor.
370 */
371 if (amdgpu_sriov_bios(adev))
372 return 0;
373
374 if (!adev->gmc.fw)
375 return -EINVAL;
376
377 hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
378 amdgpu_ucode_print_mc_hdr(&hdr->header);
379
380 adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
381 regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
382 io_mc_regs = (const __le32 *)
383 (adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
384 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
385 fw_data = (const __le32 *)
386 (adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
387
388 data = RREG32(mmMC_SEQ_MISC0);
389 data &= ~(0x40);
390 WREG32(mmMC_SEQ_MISC0, data);
391
392 /* load mc io regs */
393 for (i = 0; i < regs_size; i++) {
394 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
395 WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
396 }
397
398 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
399 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
400
401 /* load the MC ucode */
402 for (i = 0; i < ucode_size; i++)
403 WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
404
405 /* put the engine back into the active state */
406 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
407 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
408 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
409
410 /* wait for training to complete */
411 for (i = 0; i < adev->usec_timeout; i++) {
412 data = RREG32(mmMC_SEQ_MISC0);
413 if (data & 0x80)
414 break;
415 udelay(1);
416 }
417
418 return 0;
419}
420
421static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
422 struct amdgpu_gmc *mc)
423{
424 u64 base = 0;
425
426 if (!amdgpu_sriov_vf(adev))
427 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
428 base <<= 24;
429
430 amdgpu_gmc_vram_location(adev, mc, base);
431 amdgpu_gmc_gart_location(adev, mc);
432}
433
434/**
435 * gmc_v8_0_mc_program - program the GPU memory controller
436 *
437 * @adev: amdgpu_device pointer
438 *
439 * Set the location of vram, gart, and AGP in the GPU's
440 * physical address space (VI).
441 */
442static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
443{
444 u32 tmp;
445 int i, j;
446
447 /* Initialize HDP */
448 for (i = 0, j = 0; i < 32; i++, j += 0x6) {
449 WREG32((0xb05 + j), 0x00000000);
450 WREG32((0xb06 + j), 0x00000000);
451 WREG32((0xb07 + j), 0x00000000);
452 WREG32((0xb08 + j), 0x00000000);
453 WREG32((0xb09 + j), 0x00000000);
454 }
455 WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
456
457 if (gmc_v8_0_wait_for_idle((void *)adev)) {
458 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
459 }
460 if (adev->mode_info.num_crtc) {
461 /* Lockout access through VGA aperture*/
462 tmp = RREG32(mmVGA_HDP_CONTROL);
463 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
464 WREG32(mmVGA_HDP_CONTROL, tmp);
465
466 /* disable VGA render */
467 tmp = RREG32(mmVGA_RENDER_CONTROL);
468 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
469 WREG32(mmVGA_RENDER_CONTROL, tmp);
470 }
471 /* Update configuration */
472 WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
473 adev->gmc.vram_start >> 12);
474 WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
475 adev->gmc.vram_end >> 12);
476 WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
477 adev->vram_scratch.gpu_addr >> 12);
478
479 if (amdgpu_sriov_vf(adev)) {
480 tmp = ((adev->gmc.vram_end >> 24) & 0xFFFF) << 16;
481 tmp |= ((adev->gmc.vram_start >> 24) & 0xFFFF);
482 WREG32(mmMC_VM_FB_LOCATION, tmp);
483 /* XXX double check these! */
484 WREG32(mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
485 WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
486 WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
487 }
488
489 WREG32(mmMC_VM_AGP_BASE, 0);
490 WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
491 WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
492 if (gmc_v8_0_wait_for_idle((void *)adev)) {
493 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
494 }
495
496 WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
497
498 tmp = RREG32(mmHDP_MISC_CNTL);
499 tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 0);
500 WREG32(mmHDP_MISC_CNTL, tmp);
501
502 tmp = RREG32(mmHDP_HOST_PATH_CNTL);
503 WREG32(mmHDP_HOST_PATH_CNTL, tmp);
504}
505
506/**
507 * gmc_v8_0_mc_init - initialize the memory controller driver params
508 *
509 * @adev: amdgpu_device pointer
510 *
511 * Look up the amount of vram, vram width, and decide how to place
512 * vram and gart within the GPU's physical address space (VI).
513 * Returns 0 for success.
514 */
515static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
516{
517 int r;
518
519 adev->gmc.vram_width = amdgpu_atombios_get_vram_width(adev);
520 if (!adev->gmc.vram_width) {
521 u32 tmp;
522 int chansize, numchan;
523
524 /* Get VRAM informations */
525 tmp = RREG32(mmMC_ARB_RAMCFG);
526 if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
527 chansize = 64;
528 } else {
529 chansize = 32;
530 }
531 tmp = RREG32(mmMC_SHARED_CHMAP);
532 switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
533 case 0:
534 default:
535 numchan = 1;
536 break;
537 case 1:
538 numchan = 2;
539 break;
540 case 2:
541 numchan = 4;
542 break;
543 case 3:
544 numchan = 8;
545 break;
546 case 4:
547 numchan = 3;
548 break;
549 case 5:
550 numchan = 6;
551 break;
552 case 6:
553 numchan = 10;
554 break;
555 case 7:
556 numchan = 12;
557 break;
558 case 8:
559 numchan = 16;
560 break;
561 }
562 adev->gmc.vram_width = numchan * chansize;
563 }
564 /* size in MB on si */
565 adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
566 adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
567
568 if (!(adev->flags & AMD_IS_APU)) {
569 r = amdgpu_device_resize_fb_bar(adev);
570 if (r)
571 return r;
572 }
573 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
574 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
575
576#ifdef CONFIG_X86_64
577 if (adev->flags & AMD_IS_APU) {
578 adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
579 adev->gmc.aper_size = adev->gmc.real_vram_size;
580 }
581#endif
582
583 /* In case the PCI BAR is larger than the actual amount of vram */
584 adev->gmc.visible_vram_size = adev->gmc.aper_size;
585 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
586 adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
587
588 /* set the gart size */
589 if (amdgpu_gart_size == -1) {
590 switch (adev->asic_type) {
591 case CHIP_POLARIS10: /* all engines support GPUVM */
592 case CHIP_POLARIS11: /* all engines support GPUVM */
593 case CHIP_POLARIS12: /* all engines support GPUVM */
594 case CHIP_VEGAM: /* all engines support GPUVM */
595 default:
596 adev->gmc.gart_size = 256ULL << 20;
597 break;
598 case CHIP_TONGA: /* UVD, VCE do not support GPUVM */
599 case CHIP_FIJI: /* UVD, VCE do not support GPUVM */
600 case CHIP_CARRIZO: /* UVD, VCE do not support GPUVM, DCE SG support */
601 case CHIP_STONEY: /* UVD does not support GPUVM, DCE SG support */
602 adev->gmc.gart_size = 1024ULL << 20;
603 break;
604 }
605 } else {
606 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
607 }
608
609 adev->gmc.gart_size += adev->pm.smu_prv_buffer_size;
610 gmc_v8_0_vram_gtt_location(adev, &adev->gmc);
611
612 return 0;
613}
614
615/**
616 * gmc_v8_0_flush_gpu_tlb_pasid - tlb flush via pasid
617 *
618 * @adev: amdgpu_device pointer
619 * @pasid: pasid to be flush
620 * @flush_type: type of flush
621 * @all_hub: flush all hubs
622 *
623 * Flush the TLB for the requested pasid.
624 */
625static int gmc_v8_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
626 uint16_t pasid, uint32_t flush_type,
627 bool all_hub)
628{
629 int vmid;
630 unsigned int tmp;
631
632 if (amdgpu_in_reset(adev))
633 return -EIO;
634
635 for (vmid = 1; vmid < 16; vmid++) {
636
637 tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
638 if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) &&
639 (tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) {
640 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
641 RREG32(mmVM_INVALIDATE_RESPONSE);
642 break;
643 }
644 }
645
646 return 0;
647
648}
649
650/*
651 * GART
652 * VMID 0 is the physical GPU addresses as used by the kernel.
653 * VMIDs 1-15 are used for userspace clients and are handled
654 * by the amdgpu vm/hsa code.
655 */
656
657/**
658 * gmc_v8_0_flush_gpu_tlb - gart tlb flush callback
659 *
660 * @adev: amdgpu_device pointer
661 * @vmid: vm instance to flush
662 * @vmhub: which hub to flush
663 * @flush_type: type of flush
664 *
665 * Flush the TLB for the requested page table (VI).
666 */
667static void gmc_v8_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
668 uint32_t vmhub, uint32_t flush_type)
669{
670 /* bits 0-15 are the VM contexts0-15 */
671 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
672}
673
674static uint64_t gmc_v8_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
675 unsigned vmid, uint64_t pd_addr)
676{
677 uint32_t reg;
678
679 if (vmid < 8)
680 reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid;
681 else
682 reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8;
683 amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12);
684
685 /* bits 0-15 are the VM contexts0-15 */
686 amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid);
687
688 return pd_addr;
689}
690
691static void gmc_v8_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
692 unsigned pasid)
693{
694 amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid);
695}
696
697/*
698 * PTE format on VI:
699 * 63:40 reserved
700 * 39:12 4k physical page base address
701 * 11:7 fragment
702 * 6 write
703 * 5 read
704 * 4 exe
705 * 3 reserved
706 * 2 snooped
707 * 1 system
708 * 0 valid
709 *
710 * PDE format on VI:
711 * 63:59 block fragment size
712 * 58:40 reserved
713 * 39:1 physical base address of PTE
714 * bits 5:1 must be 0.
715 * 0 valid
716 */
717
718static void gmc_v8_0_get_vm_pde(struct amdgpu_device *adev, int level,
719 uint64_t *addr, uint64_t *flags)
720{
721 BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
722}
723
724static void gmc_v8_0_get_vm_pte(struct amdgpu_device *adev,
725 struct amdgpu_bo_va_mapping *mapping,
726 uint64_t *flags)
727{
728 *flags &= ~AMDGPU_PTE_EXECUTABLE;
729 *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
730 *flags &= ~AMDGPU_PTE_PRT;
731}
732
733/**
734 * gmc_v8_0_set_fault_enable_default - update VM fault handling
735 *
736 * @adev: amdgpu_device pointer
737 * @value: true redirects VM faults to the default page
738 */
739static void gmc_v8_0_set_fault_enable_default(struct amdgpu_device *adev,
740 bool value)
741{
742 u32 tmp;
743
744 tmp = RREG32(mmVM_CONTEXT1_CNTL);
745 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
746 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
747 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
748 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
749 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
750 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
751 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
752 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
753 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
754 READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
755 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
756 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
757 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
758 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
759 WREG32(mmVM_CONTEXT1_CNTL, tmp);
760}
761
762/**
763 * gmc_v8_0_set_prt - set PRT VM fault
764 *
765 * @adev: amdgpu_device pointer
766 * @enable: enable/disable VM fault handling for PRT
767*/
768static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable)
769{
770 u32 tmp;
771
772 if (enable && !adev->gmc.prt_warning) {
773 dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n");
774 adev->gmc.prt_warning = true;
775 }
776
777 tmp = RREG32(mmVM_PRT_CNTL);
778 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
779 CB_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable);
780 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
781 CB_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable);
782 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
783 TC_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable);
784 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
785 TC_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable);
786 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
787 L2_CACHE_STORE_INVALID_ENTRIES, enable);
788 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
789 L1_TLB_STORE_INVALID_ENTRIES, enable);
790 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
791 MASK_PDE0_FAULT, enable);
792 WREG32(mmVM_PRT_CNTL, tmp);
793
794 if (enable) {
795 uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT;
796 uint32_t high = adev->vm_manager.max_pfn -
797 (AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT);
798
799 WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
800 WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
801 WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, low);
802 WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, low);
803 WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, high);
804 WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, high);
805 WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, high);
806 WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, high);
807 } else {
808 WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, 0xfffffff);
809 WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, 0xfffffff);
810 WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, 0xfffffff);
811 WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, 0xfffffff);
812 WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, 0x0);
813 WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, 0x0);
814 WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, 0x0);
815 WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, 0x0);
816 }
817}
818
819/**
820 * gmc_v8_0_gart_enable - gart enable
821 *
822 * @adev: amdgpu_device pointer
823 *
824 * This sets up the TLBs, programs the page tables for VMID0,
825 * sets up the hw for VMIDs 1-15 which are allocated on
826 * demand, and sets up the global locations for the LDS, GDS,
827 * and GPUVM for FSA64 clients (VI).
828 * Returns 0 for success, errors for failure.
829 */
830static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
831{
832 uint64_t table_addr;
833 int r, i;
834 u32 tmp, field;
835
836 if (adev->gart.bo == NULL) {
837 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
838 return -EINVAL;
839 }
840 r = amdgpu_gart_table_vram_pin(adev);
841 if (r)
842 return r;
843
844 table_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
845
846 /* Setup TLB control */
847 tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
848 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
849 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 1);
850 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
851 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 1);
852 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
853 WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
854 /* Setup L2 cache */
855 tmp = RREG32(mmVM_L2_CNTL);
856 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
857 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
858 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE, 1);
859 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
860 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
861 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
862 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
863 WREG32(mmVM_L2_CNTL, tmp);
864 tmp = RREG32(mmVM_L2_CNTL2);
865 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
866 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
867 WREG32(mmVM_L2_CNTL2, tmp);
868
869 field = adev->vm_manager.fragment_size;
870 tmp = RREG32(mmVM_L2_CNTL3);
871 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1);
872 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field);
873 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, field);
874 WREG32(mmVM_L2_CNTL3, tmp);
875 /* XXX: set to enable PTE/PDE in system memory */
876 tmp = RREG32(mmVM_L2_CNTL4);
877 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_PHYSICAL, 0);
878 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SHARED, 0);
879 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SNOOP, 0);
880 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_PHYSICAL, 0);
881 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SHARED, 0);
882 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SNOOP, 0);
883 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_PHYSICAL, 0);
884 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SHARED, 0);
885 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SNOOP, 0);
886 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_PHYSICAL, 0);
887 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SHARED, 0);
888 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP, 0);
889 WREG32(mmVM_L2_CNTL4, tmp);
890 /* setup context0 */
891 WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12);
892 WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12);
893 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, table_addr >> 12);
894 WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
895 (u32)(adev->dummy_page_addr >> 12));
896 WREG32(mmVM_CONTEXT0_CNTL2, 0);
897 tmp = RREG32(mmVM_CONTEXT0_CNTL);
898 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
899 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
900 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
901 WREG32(mmVM_CONTEXT0_CNTL, tmp);
902
903 WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR, 0);
904 WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR, 0);
905 WREG32(mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET, 0);
906
907 /* empty context1-15 */
908 /* FIXME start with 4G, once using 2 level pt switch to full
909 * vm size space
910 */
911 /* set vm size, must be a multiple of 4 */
912 WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
913 WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
914 for (i = 1; i < AMDGPU_NUM_VMID; i++) {
915 if (i < 8)
916 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
917 table_addr >> 12);
918 else
919 WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
920 table_addr >> 12);
921 }
922
923 /* enable context1-15 */
924 WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
925 (u32)(adev->dummy_page_addr >> 12));
926 WREG32(mmVM_CONTEXT1_CNTL2, 4);
927 tmp = RREG32(mmVM_CONTEXT1_CNTL);
928 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
929 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
930 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
931 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
932 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
933 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
934 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
935 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
936 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
937 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
938 adev->vm_manager.block_size - 9);
939 WREG32(mmVM_CONTEXT1_CNTL, tmp);
940 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
941 gmc_v8_0_set_fault_enable_default(adev, false);
942 else
943 gmc_v8_0_set_fault_enable_default(adev, true);
944
945 gmc_v8_0_flush_gpu_tlb(adev, 0, 0, 0);
946 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
947 (unsigned)(adev->gmc.gart_size >> 20),
948 (unsigned long long)table_addr);
949 adev->gart.ready = true;
950 return 0;
951}
952
953static int gmc_v8_0_gart_init(struct amdgpu_device *adev)
954{
955 int r;
956
957 if (adev->gart.bo) {
958 WARN(1, "R600 PCIE GART already initialized\n");
959 return 0;
960 }
961 /* Initialize common gart structure */
962 r = amdgpu_gart_init(adev);
963 if (r)
964 return r;
965 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
966 adev->gart.gart_pte_flags = AMDGPU_PTE_EXECUTABLE;
967 return amdgpu_gart_table_vram_alloc(adev);
968}
969
970/**
971 * gmc_v8_0_gart_disable - gart disable
972 *
973 * @adev: amdgpu_device pointer
974 *
975 * This disables all VM page table (VI).
976 */
977static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
978{
979 u32 tmp;
980
981 /* Disable all tables */
982 WREG32(mmVM_CONTEXT0_CNTL, 0);
983 WREG32(mmVM_CONTEXT1_CNTL, 0);
984 /* Setup TLB control */
985 tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
986 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
987 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 0);
988 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0);
989 WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
990 /* Setup L2 cache */
991 tmp = RREG32(mmVM_L2_CNTL);
992 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
993 WREG32(mmVM_L2_CNTL, tmp);
994 WREG32(mmVM_L2_CNTL2, 0);
995 amdgpu_gart_table_vram_unpin(adev);
996}
997
998/**
999 * gmc_v8_0_vm_decode_fault - print human readable fault info
1000 *
1001 * @adev: amdgpu_device pointer
1002 * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
1003 * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
1004 * @mc_client: VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT register value
1005 * @pasid: debug logging only - no functional use
1006 *
1007 * Print human readable fault information (VI).
1008 */
1009static void gmc_v8_0_vm_decode_fault(struct amdgpu_device *adev, u32 status,
1010 u32 addr, u32 mc_client, unsigned pasid)
1011{
1012 u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
1013 u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1014 PROTECTIONS);
1015 char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
1016 (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
1017 u32 mc_id;
1018
1019 mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1020 MEMORY_CLIENT_ID);
1021
1022 dev_err(adev->dev, "VM fault (0x%02x, vmid %d, pasid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
1023 protections, vmid, pasid, addr,
1024 REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1025 MEMORY_CLIENT_RW) ?
1026 "write" : "read", block, mc_client, mc_id);
1027}
1028
1029static int gmc_v8_0_convert_vram_type(int mc_seq_vram_type)
1030{
1031 switch (mc_seq_vram_type) {
1032 case MC_SEQ_MISC0__MT__GDDR1:
1033 return AMDGPU_VRAM_TYPE_GDDR1;
1034 case MC_SEQ_MISC0__MT__DDR2:
1035 return AMDGPU_VRAM_TYPE_DDR2;
1036 case MC_SEQ_MISC0__MT__GDDR3:
1037 return AMDGPU_VRAM_TYPE_GDDR3;
1038 case MC_SEQ_MISC0__MT__GDDR4:
1039 return AMDGPU_VRAM_TYPE_GDDR4;
1040 case MC_SEQ_MISC0__MT__GDDR5:
1041 return AMDGPU_VRAM_TYPE_GDDR5;
1042 case MC_SEQ_MISC0__MT__HBM:
1043 return AMDGPU_VRAM_TYPE_HBM;
1044 case MC_SEQ_MISC0__MT__DDR3:
1045 return AMDGPU_VRAM_TYPE_DDR3;
1046 default:
1047 return AMDGPU_VRAM_TYPE_UNKNOWN;
1048 }
1049}
1050
1051static int gmc_v8_0_early_init(void *handle)
1052{
1053 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1054
1055 gmc_v8_0_set_gmc_funcs(adev);
1056 gmc_v8_0_set_irq_funcs(adev);
1057
1058 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
1059 adev->gmc.shared_aperture_end =
1060 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
1061 adev->gmc.private_aperture_start =
1062 adev->gmc.shared_aperture_end + 1;
1063 adev->gmc.private_aperture_end =
1064 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
1065
1066 return 0;
1067}
1068
1069static int gmc_v8_0_late_init(void *handle)
1070{
1071 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1072
1073 if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
1074 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
1075 else
1076 return 0;
1077}
1078
1079static unsigned gmc_v8_0_get_vbios_fb_size(struct amdgpu_device *adev)
1080{
1081 u32 d1vga_control = RREG32(mmD1VGA_CONTROL);
1082 unsigned size;
1083
1084 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
1085 size = AMDGPU_VBIOS_VGA_ALLOCATION;
1086 } else {
1087 u32 viewport = RREG32(mmVIEWPORT_SIZE);
1088 size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
1089 REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
1090 4);
1091 }
1092
1093 return size;
1094}
1095
1096#define mmMC_SEQ_MISC0_FIJI 0xA71
1097
1098static int gmc_v8_0_sw_init(void *handle)
1099{
1100 int r;
1101 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1102
1103 adev->num_vmhubs = 1;
1104
1105 if (adev->flags & AMD_IS_APU) {
1106 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
1107 } else {
1108 u32 tmp;
1109
1110 if ((adev->asic_type == CHIP_FIJI) ||
1111 (adev->asic_type == CHIP_VEGAM))
1112 tmp = RREG32(mmMC_SEQ_MISC0_FIJI);
1113 else
1114 tmp = RREG32(mmMC_SEQ_MISC0);
1115 tmp &= MC_SEQ_MISC0__MT__MASK;
1116 adev->gmc.vram_type = gmc_v8_0_convert_vram_type(tmp);
1117 }
1118
1119 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault);
1120 if (r)
1121 return r;
1122
1123 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault);
1124 if (r)
1125 return r;
1126
1127 /* Adjust VM size here.
1128 * Currently set to 4GB ((1 << 20) 4k pages).
1129 * Max GPUVM size for cayman and SI is 40 bits.
1130 */
1131 amdgpu_vm_adjust_size(adev, 64, 9, 1, 40);
1132
1133 /* Set the internal MC address mask
1134 * This is the max address of the GPU's
1135 * internal address space.
1136 */
1137 adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1138
1139 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40));
1140 if (r) {
1141 pr_warn("No suitable DMA available\n");
1142 return r;
1143 }
1144 adev->need_swiotlb = drm_need_swiotlb(40);
1145
1146 r = gmc_v8_0_init_microcode(adev);
1147 if (r) {
1148 DRM_ERROR("Failed to load mc firmware!\n");
1149 return r;
1150 }
1151
1152 r = gmc_v8_0_mc_init(adev);
1153 if (r)
1154 return r;
1155
1156 amdgpu_gmc_get_vbios_allocations(adev);
1157
1158 /* Memory manager */
1159 r = amdgpu_bo_init(adev);
1160 if (r)
1161 return r;
1162
1163 r = gmc_v8_0_gart_init(adev);
1164 if (r)
1165 return r;
1166
1167 /*
1168 * number of VMs
1169 * VMID 0 is reserved for System
1170 * amdgpu graphics/compute will use VMIDs 1-7
1171 * amdkfd will use VMIDs 8-15
1172 */
1173 adev->vm_manager.first_kfd_vmid = 8;
1174 amdgpu_vm_manager_init(adev);
1175
1176 /* base offset of vram pages */
1177 if (adev->flags & AMD_IS_APU) {
1178 u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
1179
1180 tmp <<= 22;
1181 adev->vm_manager.vram_base_offset = tmp;
1182 } else {
1183 adev->vm_manager.vram_base_offset = 0;
1184 }
1185
1186 adev->gmc.vm_fault_info = kmalloc(sizeof(struct kfd_vm_fault_info),
1187 GFP_KERNEL);
1188 if (!adev->gmc.vm_fault_info)
1189 return -ENOMEM;
1190 atomic_set(&adev->gmc.vm_fault_info_updated, 0);
1191
1192 return 0;
1193}
1194
1195static int gmc_v8_0_sw_fini(void *handle)
1196{
1197 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1198
1199 amdgpu_gem_force_release(adev);
1200 amdgpu_vm_manager_fini(adev);
1201 kfree(adev->gmc.vm_fault_info);
1202 amdgpu_gart_table_vram_free(adev);
1203 amdgpu_bo_fini(adev);
1204 release_firmware(adev->gmc.fw);
1205 adev->gmc.fw = NULL;
1206
1207 return 0;
1208}
1209
1210static int gmc_v8_0_hw_init(void *handle)
1211{
1212 int r;
1213 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1214
1215 gmc_v8_0_init_golden_registers(adev);
1216
1217 gmc_v8_0_mc_program(adev);
1218
1219 if (adev->asic_type == CHIP_TONGA) {
1220 r = gmc_v8_0_tonga_mc_load_microcode(adev);
1221 if (r) {
1222 DRM_ERROR("Failed to load MC firmware!\n");
1223 return r;
1224 }
1225 } else if (adev->asic_type == CHIP_POLARIS11 ||
1226 adev->asic_type == CHIP_POLARIS10 ||
1227 adev->asic_type == CHIP_POLARIS12) {
1228 r = gmc_v8_0_polaris_mc_load_microcode(adev);
1229 if (r) {
1230 DRM_ERROR("Failed to load MC firmware!\n");
1231 return r;
1232 }
1233 }
1234
1235 r = gmc_v8_0_gart_enable(adev);
1236 if (r)
1237 return r;
1238
1239 return r;
1240}
1241
1242static int gmc_v8_0_hw_fini(void *handle)
1243{
1244 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1245
1246 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1247 gmc_v8_0_gart_disable(adev);
1248
1249 return 0;
1250}
1251
1252static int gmc_v8_0_suspend(void *handle)
1253{
1254 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1255
1256 gmc_v8_0_hw_fini(adev);
1257
1258 return 0;
1259}
1260
1261static int gmc_v8_0_resume(void *handle)
1262{
1263 int r;
1264 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1265
1266 r = gmc_v8_0_hw_init(adev);
1267 if (r)
1268 return r;
1269
1270 amdgpu_vmid_reset_all(adev);
1271
1272 return 0;
1273}
1274
1275static bool gmc_v8_0_is_idle(void *handle)
1276{
1277 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1278 u32 tmp = RREG32(mmSRBM_STATUS);
1279
1280 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1281 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
1282 return false;
1283
1284 return true;
1285}
1286
1287static int gmc_v8_0_wait_for_idle(void *handle)
1288{
1289 unsigned i;
1290 u32 tmp;
1291 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1292
1293 for (i = 0; i < adev->usec_timeout; i++) {
1294 /* read MC_STATUS */
1295 tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
1296 SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1297 SRBM_STATUS__MCC_BUSY_MASK |
1298 SRBM_STATUS__MCD_BUSY_MASK |
1299 SRBM_STATUS__VMC_BUSY_MASK |
1300 SRBM_STATUS__VMC1_BUSY_MASK);
1301 if (!tmp)
1302 return 0;
1303 udelay(1);
1304 }
1305 return -ETIMEDOUT;
1306
1307}
1308
1309static bool gmc_v8_0_check_soft_reset(void *handle)
1310{
1311 u32 srbm_soft_reset = 0;
1312 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1313 u32 tmp = RREG32(mmSRBM_STATUS);
1314
1315 if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
1316 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1317 SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
1318
1319 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1320 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
1321 if (!(adev->flags & AMD_IS_APU))
1322 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1323 SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
1324 }
1325 if (srbm_soft_reset) {
1326 adev->gmc.srbm_soft_reset = srbm_soft_reset;
1327 return true;
1328 } else {
1329 adev->gmc.srbm_soft_reset = 0;
1330 return false;
1331 }
1332}
1333
1334static int gmc_v8_0_pre_soft_reset(void *handle)
1335{
1336 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1337
1338 if (!adev->gmc.srbm_soft_reset)
1339 return 0;
1340
1341 gmc_v8_0_mc_stop(adev);
1342 if (gmc_v8_0_wait_for_idle(adev)) {
1343 dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
1344 }
1345
1346 return 0;
1347}
1348
1349static int gmc_v8_0_soft_reset(void *handle)
1350{
1351 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1352 u32 srbm_soft_reset;
1353
1354 if (!adev->gmc.srbm_soft_reset)
1355 return 0;
1356 srbm_soft_reset = adev->gmc.srbm_soft_reset;
1357
1358 if (srbm_soft_reset) {
1359 u32 tmp;
1360
1361 tmp = RREG32(mmSRBM_SOFT_RESET);
1362 tmp |= srbm_soft_reset;
1363 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1364 WREG32(mmSRBM_SOFT_RESET, tmp);
1365 tmp = RREG32(mmSRBM_SOFT_RESET);
1366
1367 udelay(50);
1368
1369 tmp &= ~srbm_soft_reset;
1370 WREG32(mmSRBM_SOFT_RESET, tmp);
1371 tmp = RREG32(mmSRBM_SOFT_RESET);
1372
1373 /* Wait a little for things to settle down */
1374 udelay(50);
1375 }
1376
1377 return 0;
1378}
1379
1380static int gmc_v8_0_post_soft_reset(void *handle)
1381{
1382 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1383
1384 if (!adev->gmc.srbm_soft_reset)
1385 return 0;
1386
1387 gmc_v8_0_mc_resume(adev);
1388 return 0;
1389}
1390
1391static int gmc_v8_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
1392 struct amdgpu_irq_src *src,
1393 unsigned type,
1394 enum amdgpu_interrupt_state state)
1395{
1396 u32 tmp;
1397 u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1398 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1399 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1400 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1401 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1402 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1403 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
1404
1405 switch (state) {
1406 case AMDGPU_IRQ_STATE_DISABLE:
1407 /* system context */
1408 tmp = RREG32(mmVM_CONTEXT0_CNTL);
1409 tmp &= ~bits;
1410 WREG32(mmVM_CONTEXT0_CNTL, tmp);
1411 /* VMs */
1412 tmp = RREG32(mmVM_CONTEXT1_CNTL);
1413 tmp &= ~bits;
1414 WREG32(mmVM_CONTEXT1_CNTL, tmp);
1415 break;
1416 case AMDGPU_IRQ_STATE_ENABLE:
1417 /* system context */
1418 tmp = RREG32(mmVM_CONTEXT0_CNTL);
1419 tmp |= bits;
1420 WREG32(mmVM_CONTEXT0_CNTL, tmp);
1421 /* VMs */
1422 tmp = RREG32(mmVM_CONTEXT1_CNTL);
1423 tmp |= bits;
1424 WREG32(mmVM_CONTEXT1_CNTL, tmp);
1425 break;
1426 default:
1427 break;
1428 }
1429
1430 return 0;
1431}
1432
1433static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
1434 struct amdgpu_irq_src *source,
1435 struct amdgpu_iv_entry *entry)
1436{
1437 u32 addr, status, mc_client, vmid;
1438
1439 if (amdgpu_sriov_vf(adev)) {
1440 dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1441 entry->src_id, entry->src_data[0]);
1442 dev_err(adev->dev, " Can't decode VM fault info here on SRIOV VF\n");
1443 return 0;
1444 }
1445
1446 addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
1447 status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
1448 mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
1449 /* reset addr and status */
1450 WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1451
1452 if (!addr && !status)
1453 return 0;
1454
1455 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
1456 gmc_v8_0_set_fault_enable_default(adev, false);
1457
1458 if (printk_ratelimit()) {
1459 struct amdgpu_task_info task_info;
1460
1461 memset(&task_info, 0, sizeof(struct amdgpu_task_info));
1462 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
1463
1464 dev_err(adev->dev, "GPU fault detected: %d 0x%08x for process %s pid %d thread %s pid %d\n",
1465 entry->src_id, entry->src_data[0], task_info.process_name,
1466 task_info.tgid, task_info.task_name, task_info.pid);
1467 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
1468 addr);
1469 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1470 status);
1471 gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client,
1472 entry->pasid);
1473 }
1474
1475 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1476 VMID);
1477 if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid)
1478 && !atomic_read(&adev->gmc.vm_fault_info_updated)) {
1479 struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info;
1480 u32 protections = REG_GET_FIELD(status,
1481 VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1482 PROTECTIONS);
1483
1484 info->vmid = vmid;
1485 info->mc_id = REG_GET_FIELD(status,
1486 VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1487 MEMORY_CLIENT_ID);
1488 info->status = status;
1489 info->page_addr = addr;
1490 info->prot_valid = protections & 0x7 ? true : false;
1491 info->prot_read = protections & 0x8 ? true : false;
1492 info->prot_write = protections & 0x10 ? true : false;
1493 info->prot_exec = protections & 0x20 ? true : false;
1494 mb();
1495 atomic_set(&adev->gmc.vm_fault_info_updated, 1);
1496 }
1497
1498 return 0;
1499}
1500
1501static void fiji_update_mc_medium_grain_clock_gating(struct amdgpu_device *adev,
1502 bool enable)
1503{
1504 uint32_t data;
1505
1506 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
1507 data = RREG32(mmMC_HUB_MISC_HUB_CG);
1508 data |= MC_HUB_MISC_HUB_CG__ENABLE_MASK;
1509 WREG32(mmMC_HUB_MISC_HUB_CG, data);
1510
1511 data = RREG32(mmMC_HUB_MISC_SIP_CG);
1512 data |= MC_HUB_MISC_SIP_CG__ENABLE_MASK;
1513 WREG32(mmMC_HUB_MISC_SIP_CG, data);
1514
1515 data = RREG32(mmMC_HUB_MISC_VM_CG);
1516 data |= MC_HUB_MISC_VM_CG__ENABLE_MASK;
1517 WREG32(mmMC_HUB_MISC_VM_CG, data);
1518
1519 data = RREG32(mmMC_XPB_CLK_GAT);
1520 data |= MC_XPB_CLK_GAT__ENABLE_MASK;
1521 WREG32(mmMC_XPB_CLK_GAT, data);
1522
1523 data = RREG32(mmATC_MISC_CG);
1524 data |= ATC_MISC_CG__ENABLE_MASK;
1525 WREG32(mmATC_MISC_CG, data);
1526
1527 data = RREG32(mmMC_CITF_MISC_WR_CG);
1528 data |= MC_CITF_MISC_WR_CG__ENABLE_MASK;
1529 WREG32(mmMC_CITF_MISC_WR_CG, data);
1530
1531 data = RREG32(mmMC_CITF_MISC_RD_CG);
1532 data |= MC_CITF_MISC_RD_CG__ENABLE_MASK;
1533 WREG32(mmMC_CITF_MISC_RD_CG, data);
1534
1535 data = RREG32(mmMC_CITF_MISC_VM_CG);
1536 data |= MC_CITF_MISC_VM_CG__ENABLE_MASK;
1537 WREG32(mmMC_CITF_MISC_VM_CG, data);
1538
1539 data = RREG32(mmVM_L2_CG);
1540 data |= VM_L2_CG__ENABLE_MASK;
1541 WREG32(mmVM_L2_CG, data);
1542 } else {
1543 data = RREG32(mmMC_HUB_MISC_HUB_CG);
1544 data &= ~MC_HUB_MISC_HUB_CG__ENABLE_MASK;
1545 WREG32(mmMC_HUB_MISC_HUB_CG, data);
1546
1547 data = RREG32(mmMC_HUB_MISC_SIP_CG);
1548 data &= ~MC_HUB_MISC_SIP_CG__ENABLE_MASK;
1549 WREG32(mmMC_HUB_MISC_SIP_CG, data);
1550
1551 data = RREG32(mmMC_HUB_MISC_VM_CG);
1552 data &= ~MC_HUB_MISC_VM_CG__ENABLE_MASK;
1553 WREG32(mmMC_HUB_MISC_VM_CG, data);
1554
1555 data = RREG32(mmMC_XPB_CLK_GAT);
1556 data &= ~MC_XPB_CLK_GAT__ENABLE_MASK;
1557 WREG32(mmMC_XPB_CLK_GAT, data);
1558
1559 data = RREG32(mmATC_MISC_CG);
1560 data &= ~ATC_MISC_CG__ENABLE_MASK;
1561 WREG32(mmATC_MISC_CG, data);
1562
1563 data = RREG32(mmMC_CITF_MISC_WR_CG);
1564 data &= ~MC_CITF_MISC_WR_CG__ENABLE_MASK;
1565 WREG32(mmMC_CITF_MISC_WR_CG, data);
1566
1567 data = RREG32(mmMC_CITF_MISC_RD_CG);
1568 data &= ~MC_CITF_MISC_RD_CG__ENABLE_MASK;
1569 WREG32(mmMC_CITF_MISC_RD_CG, data);
1570
1571 data = RREG32(mmMC_CITF_MISC_VM_CG);
1572 data &= ~MC_CITF_MISC_VM_CG__ENABLE_MASK;
1573 WREG32(mmMC_CITF_MISC_VM_CG, data);
1574
1575 data = RREG32(mmVM_L2_CG);
1576 data &= ~VM_L2_CG__ENABLE_MASK;
1577 WREG32(mmVM_L2_CG, data);
1578 }
1579}
1580
1581static void fiji_update_mc_light_sleep(struct amdgpu_device *adev,
1582 bool enable)
1583{
1584 uint32_t data;
1585
1586 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) {
1587 data = RREG32(mmMC_HUB_MISC_HUB_CG);
1588 data |= MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK;
1589 WREG32(mmMC_HUB_MISC_HUB_CG, data);
1590
1591 data = RREG32(mmMC_HUB_MISC_SIP_CG);
1592 data |= MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK;
1593 WREG32(mmMC_HUB_MISC_SIP_CG, data);
1594
1595 data = RREG32(mmMC_HUB_MISC_VM_CG);
1596 data |= MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1597 WREG32(mmMC_HUB_MISC_VM_CG, data);
1598
1599 data = RREG32(mmMC_XPB_CLK_GAT);
1600 data |= MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK;
1601 WREG32(mmMC_XPB_CLK_GAT, data);
1602
1603 data = RREG32(mmATC_MISC_CG);
1604 data |= ATC_MISC_CG__MEM_LS_ENABLE_MASK;
1605 WREG32(mmATC_MISC_CG, data);
1606
1607 data = RREG32(mmMC_CITF_MISC_WR_CG);
1608 data |= MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK;
1609 WREG32(mmMC_CITF_MISC_WR_CG, data);
1610
1611 data = RREG32(mmMC_CITF_MISC_RD_CG);
1612 data |= MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK;
1613 WREG32(mmMC_CITF_MISC_RD_CG, data);
1614
1615 data = RREG32(mmMC_CITF_MISC_VM_CG);
1616 data |= MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1617 WREG32(mmMC_CITF_MISC_VM_CG, data);
1618
1619 data = RREG32(mmVM_L2_CG);
1620 data |= VM_L2_CG__MEM_LS_ENABLE_MASK;
1621 WREG32(mmVM_L2_CG, data);
1622 } else {
1623 data = RREG32(mmMC_HUB_MISC_HUB_CG);
1624 data &= ~MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK;
1625 WREG32(mmMC_HUB_MISC_HUB_CG, data);
1626
1627 data = RREG32(mmMC_HUB_MISC_SIP_CG);
1628 data &= ~MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK;
1629 WREG32(mmMC_HUB_MISC_SIP_CG, data);
1630
1631 data = RREG32(mmMC_HUB_MISC_VM_CG);
1632 data &= ~MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1633 WREG32(mmMC_HUB_MISC_VM_CG, data);
1634
1635 data = RREG32(mmMC_XPB_CLK_GAT);
1636 data &= ~MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK;
1637 WREG32(mmMC_XPB_CLK_GAT, data);
1638
1639 data = RREG32(mmATC_MISC_CG);
1640 data &= ~ATC_MISC_CG__MEM_LS_ENABLE_MASK;
1641 WREG32(mmATC_MISC_CG, data);
1642
1643 data = RREG32(mmMC_CITF_MISC_WR_CG);
1644 data &= ~MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK;
1645 WREG32(mmMC_CITF_MISC_WR_CG, data);
1646
1647 data = RREG32(mmMC_CITF_MISC_RD_CG);
1648 data &= ~MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK;
1649 WREG32(mmMC_CITF_MISC_RD_CG, data);
1650
1651 data = RREG32(mmMC_CITF_MISC_VM_CG);
1652 data &= ~MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1653 WREG32(mmMC_CITF_MISC_VM_CG, data);
1654
1655 data = RREG32(mmVM_L2_CG);
1656 data &= ~VM_L2_CG__MEM_LS_ENABLE_MASK;
1657 WREG32(mmVM_L2_CG, data);
1658 }
1659}
1660
1661static int gmc_v8_0_set_clockgating_state(void *handle,
1662 enum amd_clockgating_state state)
1663{
1664 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1665
1666 if (amdgpu_sriov_vf(adev))
1667 return 0;
1668
1669 switch (adev->asic_type) {
1670 case CHIP_FIJI:
1671 fiji_update_mc_medium_grain_clock_gating(adev,
1672 state == AMD_CG_STATE_GATE);
1673 fiji_update_mc_light_sleep(adev,
1674 state == AMD_CG_STATE_GATE);
1675 break;
1676 default:
1677 break;
1678 }
1679 return 0;
1680}
1681
1682static int gmc_v8_0_set_powergating_state(void *handle,
1683 enum amd_powergating_state state)
1684{
1685 return 0;
1686}
1687
1688static void gmc_v8_0_get_clockgating_state(void *handle, u32 *flags)
1689{
1690 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1691 int data;
1692
1693 if (amdgpu_sriov_vf(adev))
1694 *flags = 0;
1695
1696 /* AMD_CG_SUPPORT_MC_MGCG */
1697 data = RREG32(mmMC_HUB_MISC_HUB_CG);
1698 if (data & MC_HUB_MISC_HUB_CG__ENABLE_MASK)
1699 *flags |= AMD_CG_SUPPORT_MC_MGCG;
1700
1701 /* AMD_CG_SUPPORT_MC_LS */
1702 if (data & MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK)
1703 *flags |= AMD_CG_SUPPORT_MC_LS;
1704}
1705
1706static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
1707 .name = "gmc_v8_0",
1708 .early_init = gmc_v8_0_early_init,
1709 .late_init = gmc_v8_0_late_init,
1710 .sw_init = gmc_v8_0_sw_init,
1711 .sw_fini = gmc_v8_0_sw_fini,
1712 .hw_init = gmc_v8_0_hw_init,
1713 .hw_fini = gmc_v8_0_hw_fini,
1714 .suspend = gmc_v8_0_suspend,
1715 .resume = gmc_v8_0_resume,
1716 .is_idle = gmc_v8_0_is_idle,
1717 .wait_for_idle = gmc_v8_0_wait_for_idle,
1718 .check_soft_reset = gmc_v8_0_check_soft_reset,
1719 .pre_soft_reset = gmc_v8_0_pre_soft_reset,
1720 .soft_reset = gmc_v8_0_soft_reset,
1721 .post_soft_reset = gmc_v8_0_post_soft_reset,
1722 .set_clockgating_state = gmc_v8_0_set_clockgating_state,
1723 .set_powergating_state = gmc_v8_0_set_powergating_state,
1724 .get_clockgating_state = gmc_v8_0_get_clockgating_state,
1725};
1726
1727static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = {
1728 .flush_gpu_tlb = gmc_v8_0_flush_gpu_tlb,
1729 .flush_gpu_tlb_pasid = gmc_v8_0_flush_gpu_tlb_pasid,
1730 .emit_flush_gpu_tlb = gmc_v8_0_emit_flush_gpu_tlb,
1731 .emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping,
1732 .set_prt = gmc_v8_0_set_prt,
1733 .get_vm_pde = gmc_v8_0_get_vm_pde,
1734 .get_vm_pte = gmc_v8_0_get_vm_pte,
1735 .get_vbios_fb_size = gmc_v8_0_get_vbios_fb_size,
1736};
1737
1738static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
1739 .set = gmc_v8_0_vm_fault_interrupt_state,
1740 .process = gmc_v8_0_process_interrupt,
1741};
1742
1743static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev)
1744{
1745 adev->gmc.gmc_funcs = &gmc_v8_0_gmc_funcs;
1746}
1747
1748static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev)
1749{
1750 adev->gmc.vm_fault.num_types = 1;
1751 adev->gmc.vm_fault.funcs = &gmc_v8_0_irq_funcs;
1752}
1753
1754const struct amdgpu_ip_block_version gmc_v8_0_ip_block =
1755{
1756 .type = AMD_IP_BLOCK_TYPE_GMC,
1757 .major = 8,
1758 .minor = 0,
1759 .rev = 0,
1760 .funcs = &gmc_v8_0_ip_funcs,
1761};
1762
1763const struct amdgpu_ip_block_version gmc_v8_1_ip_block =
1764{
1765 .type = AMD_IP_BLOCK_TYPE_GMC,
1766 .major = 8,
1767 .minor = 1,
1768 .rev = 0,
1769 .funcs = &gmc_v8_0_ip_funcs,
1770};
1771
1772const struct amdgpu_ip_block_version gmc_v8_5_ip_block =
1773{
1774 .type = AMD_IP_BLOCK_TYPE_GMC,
1775 .major = 8,
1776 .minor = 5,
1777 .rev = 0,
1778 .funcs = &gmc_v8_0_ip_funcs,
1779};
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25#include <linux/module.h>
26#include <linux/pci.h>
27
28#include <drm/drm_cache.h>
29#include "amdgpu.h"
30#include "gmc_v8_0.h"
31#include "amdgpu_ucode.h"
32#include "amdgpu_amdkfd.h"
33#include "amdgpu_gem.h"
34
35#include "gmc/gmc_8_1_d.h"
36#include "gmc/gmc_8_1_sh_mask.h"
37
38#include "bif/bif_5_0_d.h"
39#include "bif/bif_5_0_sh_mask.h"
40
41#include "oss/oss_3_0_d.h"
42#include "oss/oss_3_0_sh_mask.h"
43
44#include "dce/dce_10_0_d.h"
45#include "dce/dce_10_0_sh_mask.h"
46
47#include "vid.h"
48#include "vi.h"
49
50#include "amdgpu_atombios.h"
51
52#include "ivsrcid/ivsrcid_vislands30.h"
53
54static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev);
55static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
56static int gmc_v8_0_wait_for_idle(void *handle);
57
58MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
59MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
60MODULE_FIRMWARE("amdgpu/polaris10_mc.bin");
61MODULE_FIRMWARE("amdgpu/polaris12_mc.bin");
62MODULE_FIRMWARE("amdgpu/polaris11_k_mc.bin");
63MODULE_FIRMWARE("amdgpu/polaris10_k_mc.bin");
64MODULE_FIRMWARE("amdgpu/polaris12_k_mc.bin");
65
66static const u32 golden_settings_tonga_a11[] =
67{
68 mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
69 mmMC_HUB_RDREQ_DMIF_LIMIT, 0x0000007f, 0x00000028,
70 mmMC_HUB_WDP_UMC, 0x00007fb6, 0x00000991,
71 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
72 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
73 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
74 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
75};
76
77static const u32 tonga_mgcg_cgcg_init[] =
78{
79 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
80};
81
82static const u32 golden_settings_fiji_a10[] =
83{
84 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
85 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
86 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
87 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
88};
89
90static const u32 fiji_mgcg_cgcg_init[] =
91{
92 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
93};
94
95static const u32 golden_settings_polaris11_a11[] =
96{
97 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
98 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
99 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
100 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
101};
102
103static const u32 golden_settings_polaris10_a11[] =
104{
105 mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
106 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
107 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
108 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
109 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
110};
111
112static const u32 cz_mgcg_cgcg_init[] =
113{
114 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
115};
116
117static const u32 stoney_mgcg_cgcg_init[] =
118{
119 mmATC_MISC_CG, 0xffffffff, 0x000c0200,
120 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
121};
122
123static const u32 golden_settings_stoney_common[] =
124{
125 mmMC_HUB_RDREQ_UVD, MC_HUB_RDREQ_UVD__PRESCALE_MASK, 0x00000004,
126 mmMC_RD_GRP_OTH, MC_RD_GRP_OTH__UVD_MASK, 0x00600000
127};
128
129static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
130{
131 switch (adev->asic_type) {
132 case CHIP_FIJI:
133 amdgpu_device_program_register_sequence(adev,
134 fiji_mgcg_cgcg_init,
135 ARRAY_SIZE(fiji_mgcg_cgcg_init));
136 amdgpu_device_program_register_sequence(adev,
137 golden_settings_fiji_a10,
138 ARRAY_SIZE(golden_settings_fiji_a10));
139 break;
140 case CHIP_TONGA:
141 amdgpu_device_program_register_sequence(adev,
142 tonga_mgcg_cgcg_init,
143 ARRAY_SIZE(tonga_mgcg_cgcg_init));
144 amdgpu_device_program_register_sequence(adev,
145 golden_settings_tonga_a11,
146 ARRAY_SIZE(golden_settings_tonga_a11));
147 break;
148 case CHIP_POLARIS11:
149 case CHIP_POLARIS12:
150 case CHIP_VEGAM:
151 amdgpu_device_program_register_sequence(adev,
152 golden_settings_polaris11_a11,
153 ARRAY_SIZE(golden_settings_polaris11_a11));
154 break;
155 case CHIP_POLARIS10:
156 amdgpu_device_program_register_sequence(adev,
157 golden_settings_polaris10_a11,
158 ARRAY_SIZE(golden_settings_polaris10_a11));
159 break;
160 case CHIP_CARRIZO:
161 amdgpu_device_program_register_sequence(adev,
162 cz_mgcg_cgcg_init,
163 ARRAY_SIZE(cz_mgcg_cgcg_init));
164 break;
165 case CHIP_STONEY:
166 amdgpu_device_program_register_sequence(adev,
167 stoney_mgcg_cgcg_init,
168 ARRAY_SIZE(stoney_mgcg_cgcg_init));
169 amdgpu_device_program_register_sequence(adev,
170 golden_settings_stoney_common,
171 ARRAY_SIZE(golden_settings_stoney_common));
172 break;
173 default:
174 break;
175 }
176}
177
178static void gmc_v8_0_mc_stop(struct amdgpu_device *adev)
179{
180 u32 blackout;
181
182 gmc_v8_0_wait_for_idle(adev);
183
184 blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
185 if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
186 /* Block CPU access */
187 WREG32(mmBIF_FB_EN, 0);
188 /* blackout the MC */
189 blackout = REG_SET_FIELD(blackout,
190 MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 1);
191 WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout);
192 }
193 /* wait for the MC to settle */
194 udelay(100);
195}
196
197static void gmc_v8_0_mc_resume(struct amdgpu_device *adev)
198{
199 u32 tmp;
200
201 /* unblackout the MC */
202 tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
203 tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
204 WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
205 /* allow CPU access */
206 tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
207 tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
208 WREG32(mmBIF_FB_EN, tmp);
209}
210
211/**
212 * gmc_v8_0_init_microcode - load ucode images from disk
213 *
214 * @adev: amdgpu_device pointer
215 *
216 * Use the firmware interface to load the ucode images into
217 * the driver (not loaded into hw).
218 * Returns 0 on success, error on failure.
219 */
220static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
221{
222 const char *chip_name;
223 char fw_name[30];
224 int err;
225
226 DRM_DEBUG("\n");
227
228 switch (adev->asic_type) {
229 case CHIP_TONGA:
230 chip_name = "tonga";
231 break;
232 case CHIP_POLARIS11:
233 if (((adev->pdev->device == 0x67ef) &&
234 ((adev->pdev->revision == 0xe0) ||
235 (adev->pdev->revision == 0xe5))) ||
236 ((adev->pdev->device == 0x67ff) &&
237 ((adev->pdev->revision == 0xcf) ||
238 (adev->pdev->revision == 0xef) ||
239 (adev->pdev->revision == 0xff))))
240 chip_name = "polaris11_k";
241 else if ((adev->pdev->device == 0x67ef) &&
242 (adev->pdev->revision == 0xe2))
243 chip_name = "polaris11_k";
244 else
245 chip_name = "polaris11";
246 break;
247 case CHIP_POLARIS10:
248 if ((adev->pdev->device == 0x67df) &&
249 ((adev->pdev->revision == 0xe1) ||
250 (adev->pdev->revision == 0xf7)))
251 chip_name = "polaris10_k";
252 else
253 chip_name = "polaris10";
254 break;
255 case CHIP_POLARIS12:
256 if (((adev->pdev->device == 0x6987) &&
257 ((adev->pdev->revision == 0xc0) ||
258 (adev->pdev->revision == 0xc3))) ||
259 ((adev->pdev->device == 0x6981) &&
260 ((adev->pdev->revision == 0x00) ||
261 (adev->pdev->revision == 0x01) ||
262 (adev->pdev->revision == 0x10))))
263 chip_name = "polaris12_k";
264 else
265 chip_name = "polaris12";
266 break;
267 case CHIP_FIJI:
268 case CHIP_CARRIZO:
269 case CHIP_STONEY:
270 case CHIP_VEGAM:
271 return 0;
272 default: BUG();
273 }
274
275 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
276 err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
277 if (err)
278 goto out;
279 err = amdgpu_ucode_validate(adev->gmc.fw);
280
281out:
282 if (err) {
283 pr_err("mc: Failed to load firmware \"%s\"\n", fw_name);
284 release_firmware(adev->gmc.fw);
285 adev->gmc.fw = NULL;
286 }
287 return err;
288}
289
290/**
291 * gmc_v8_0_tonga_mc_load_microcode - load tonga MC ucode into the hw
292 *
293 * @adev: amdgpu_device pointer
294 *
295 * Load the GDDR MC ucode into the hw (VI).
296 * Returns 0 on success, error on failure.
297 */
298static int gmc_v8_0_tonga_mc_load_microcode(struct amdgpu_device *adev)
299{
300 const struct mc_firmware_header_v1_0 *hdr;
301 const __le32 *fw_data = NULL;
302 const __le32 *io_mc_regs = NULL;
303 u32 running;
304 int i, ucode_size, regs_size;
305
306 /* Skip MC ucode loading on SR-IOV capable boards.
307 * vbios does this for us in asic_init in that case.
308 * Skip MC ucode loading on VF, because hypervisor will do that
309 * for this adaptor.
310 */
311 if (amdgpu_sriov_bios(adev))
312 return 0;
313
314 if (!adev->gmc.fw)
315 return -EINVAL;
316
317 hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
318 amdgpu_ucode_print_mc_hdr(&hdr->header);
319
320 adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
321 regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
322 io_mc_regs = (const __le32 *)
323 (adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
324 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
325 fw_data = (const __le32 *)
326 (adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
327
328 running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
329
330 if (running == 0) {
331 /* reset the engine and set to writable */
332 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
333 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
334
335 /* load mc io regs */
336 for (i = 0; i < regs_size; i++) {
337 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
338 WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
339 }
340 /* load the MC ucode */
341 for (i = 0; i < ucode_size; i++)
342 WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
343
344 /* put the engine back into the active state */
345 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
346 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
347 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
348
349 /* wait for training to complete */
350 for (i = 0; i < adev->usec_timeout; i++) {
351 if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
352 MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D0))
353 break;
354 udelay(1);
355 }
356 for (i = 0; i < adev->usec_timeout; i++) {
357 if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
358 MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D1))
359 break;
360 udelay(1);
361 }
362 }
363
364 return 0;
365}
366
367static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
368{
369 const struct mc_firmware_header_v1_0 *hdr;
370 const __le32 *fw_data = NULL;
371 const __le32 *io_mc_regs = NULL;
372 u32 data;
373 int i, ucode_size, regs_size;
374
375 /* Skip MC ucode loading on SR-IOV capable boards.
376 * vbios does this for us in asic_init in that case.
377 * Skip MC ucode loading on VF, because hypervisor will do that
378 * for this adaptor.
379 */
380 if (amdgpu_sriov_bios(adev))
381 return 0;
382
383 if (!adev->gmc.fw)
384 return -EINVAL;
385
386 hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
387 amdgpu_ucode_print_mc_hdr(&hdr->header);
388
389 adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
390 regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
391 io_mc_regs = (const __le32 *)
392 (adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
393 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
394 fw_data = (const __le32 *)
395 (adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
396
397 data = RREG32(mmMC_SEQ_MISC0);
398 data &= ~(0x40);
399 WREG32(mmMC_SEQ_MISC0, data);
400
401 /* load mc io regs */
402 for (i = 0; i < regs_size; i++) {
403 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
404 WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
405 }
406
407 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
408 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
409
410 /* load the MC ucode */
411 for (i = 0; i < ucode_size; i++)
412 WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
413
414 /* put the engine back into the active state */
415 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
416 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
417 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
418
419 /* wait for training to complete */
420 for (i = 0; i < adev->usec_timeout; i++) {
421 data = RREG32(mmMC_SEQ_MISC0);
422 if (data & 0x80)
423 break;
424 udelay(1);
425 }
426
427 return 0;
428}
429
430static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
431 struct amdgpu_gmc *mc)
432{
433 u64 base = 0;
434
435 if (!amdgpu_sriov_vf(adev))
436 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
437 base <<= 24;
438
439 amdgpu_gmc_vram_location(adev, mc, base);
440 amdgpu_gmc_gart_location(adev, mc);
441}
442
443/**
444 * gmc_v8_0_mc_program - program the GPU memory controller
445 *
446 * @adev: amdgpu_device pointer
447 *
448 * Set the location of vram, gart, and AGP in the GPU's
449 * physical address space (VI).
450 */
451static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
452{
453 u32 tmp;
454 int i, j;
455
456 /* Initialize HDP */
457 for (i = 0, j = 0; i < 32; i++, j += 0x6) {
458 WREG32((0xb05 + j), 0x00000000);
459 WREG32((0xb06 + j), 0x00000000);
460 WREG32((0xb07 + j), 0x00000000);
461 WREG32((0xb08 + j), 0x00000000);
462 WREG32((0xb09 + j), 0x00000000);
463 }
464 WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
465
466 if (gmc_v8_0_wait_for_idle((void *)adev)) {
467 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
468 }
469 if (adev->mode_info.num_crtc) {
470 /* Lockout access through VGA aperture*/
471 tmp = RREG32(mmVGA_HDP_CONTROL);
472 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
473 WREG32(mmVGA_HDP_CONTROL, tmp);
474
475 /* disable VGA render */
476 tmp = RREG32(mmVGA_RENDER_CONTROL);
477 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
478 WREG32(mmVGA_RENDER_CONTROL, tmp);
479 }
480 /* Update configuration */
481 WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
482 adev->gmc.vram_start >> 12);
483 WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
484 adev->gmc.vram_end >> 12);
485 WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
486 adev->vram_scratch.gpu_addr >> 12);
487
488 if (amdgpu_sriov_vf(adev)) {
489 tmp = ((adev->gmc.vram_end >> 24) & 0xFFFF) << 16;
490 tmp |= ((adev->gmc.vram_start >> 24) & 0xFFFF);
491 WREG32(mmMC_VM_FB_LOCATION, tmp);
492 /* XXX double check these! */
493 WREG32(mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
494 WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
495 WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
496 }
497
498 WREG32(mmMC_VM_AGP_BASE, 0);
499 WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
500 WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
501 if (gmc_v8_0_wait_for_idle((void *)adev)) {
502 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
503 }
504
505 WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
506
507 tmp = RREG32(mmHDP_MISC_CNTL);
508 tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 0);
509 WREG32(mmHDP_MISC_CNTL, tmp);
510
511 tmp = RREG32(mmHDP_HOST_PATH_CNTL);
512 WREG32(mmHDP_HOST_PATH_CNTL, tmp);
513}
514
515/**
516 * gmc_v8_0_mc_init - initialize the memory controller driver params
517 *
518 * @adev: amdgpu_device pointer
519 *
520 * Look up the amount of vram, vram width, and decide how to place
521 * vram and gart within the GPU's physical address space (VI).
522 * Returns 0 for success.
523 */
524static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
525{
526 int r;
527
528 adev->gmc.vram_width = amdgpu_atombios_get_vram_width(adev);
529 if (!adev->gmc.vram_width) {
530 u32 tmp;
531 int chansize, numchan;
532
533 /* Get VRAM informations */
534 tmp = RREG32(mmMC_ARB_RAMCFG);
535 if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
536 chansize = 64;
537 } else {
538 chansize = 32;
539 }
540 tmp = RREG32(mmMC_SHARED_CHMAP);
541 switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
542 case 0:
543 default:
544 numchan = 1;
545 break;
546 case 1:
547 numchan = 2;
548 break;
549 case 2:
550 numchan = 4;
551 break;
552 case 3:
553 numchan = 8;
554 break;
555 case 4:
556 numchan = 3;
557 break;
558 case 5:
559 numchan = 6;
560 break;
561 case 6:
562 numchan = 10;
563 break;
564 case 7:
565 numchan = 12;
566 break;
567 case 8:
568 numchan = 16;
569 break;
570 }
571 adev->gmc.vram_width = numchan * chansize;
572 }
573 /* size in MB on si */
574 adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
575 adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
576
577 if (!(adev->flags & AMD_IS_APU)) {
578 r = amdgpu_device_resize_fb_bar(adev);
579 if (r)
580 return r;
581 }
582 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
583 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
584
585#ifdef CONFIG_X86_64
586 if (adev->flags & AMD_IS_APU) {
587 adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
588 adev->gmc.aper_size = adev->gmc.real_vram_size;
589 }
590#endif
591
592 /* In case the PCI BAR is larger than the actual amount of vram */
593 adev->gmc.visible_vram_size = adev->gmc.aper_size;
594 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
595 adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
596
597 /* set the gart size */
598 if (amdgpu_gart_size == -1) {
599 switch (adev->asic_type) {
600 case CHIP_POLARIS10: /* all engines support GPUVM */
601 case CHIP_POLARIS11: /* all engines support GPUVM */
602 case CHIP_POLARIS12: /* all engines support GPUVM */
603 case CHIP_VEGAM: /* all engines support GPUVM */
604 default:
605 adev->gmc.gart_size = 256ULL << 20;
606 break;
607 case CHIP_TONGA: /* UVD, VCE do not support GPUVM */
608 case CHIP_FIJI: /* UVD, VCE do not support GPUVM */
609 case CHIP_CARRIZO: /* UVD, VCE do not support GPUVM, DCE SG support */
610 case CHIP_STONEY: /* UVD does not support GPUVM, DCE SG support */
611 adev->gmc.gart_size = 1024ULL << 20;
612 break;
613 }
614 } else {
615 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
616 }
617
618 gmc_v8_0_vram_gtt_location(adev, &adev->gmc);
619
620 return 0;
621}
622
623/**
624 * gmc_v8_0_flush_gpu_tlb_pasid - tlb flush via pasid
625 *
626 * @adev: amdgpu_device pointer
627 * @pasid: pasid to be flush
628 *
629 * Flush the TLB for the requested pasid.
630 */
631static int gmc_v8_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
632 uint16_t pasid, uint32_t flush_type,
633 bool all_hub)
634{
635 int vmid;
636 unsigned int tmp;
637
638 if (adev->in_gpu_reset)
639 return -EIO;
640
641 for (vmid = 1; vmid < 16; vmid++) {
642
643 tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
644 if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) &&
645 (tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) {
646 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
647 RREG32(mmVM_INVALIDATE_RESPONSE);
648 break;
649 }
650 }
651
652 return 0;
653
654}
655
656/*
657 * GART
658 * VMID 0 is the physical GPU addresses as used by the kernel.
659 * VMIDs 1-15 are used for userspace clients and are handled
660 * by the amdgpu vm/hsa code.
661 */
662
663/**
664 * gmc_v8_0_flush_gpu_tlb - gart tlb flush callback
665 *
666 * @adev: amdgpu_device pointer
667 * @vmid: vm instance to flush
668 *
669 * Flush the TLB for the requested page table (VI).
670 */
671static void gmc_v8_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
672 uint32_t vmhub, uint32_t flush_type)
673{
674 /* bits 0-15 are the VM contexts0-15 */
675 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
676}
677
678static uint64_t gmc_v8_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
679 unsigned vmid, uint64_t pd_addr)
680{
681 uint32_t reg;
682
683 if (vmid < 8)
684 reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid;
685 else
686 reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8;
687 amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12);
688
689 /* bits 0-15 are the VM contexts0-15 */
690 amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid);
691
692 return pd_addr;
693}
694
695static void gmc_v8_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
696 unsigned pasid)
697{
698 amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid);
699}
700
701/*
702 * PTE format on VI:
703 * 63:40 reserved
704 * 39:12 4k physical page base address
705 * 11:7 fragment
706 * 6 write
707 * 5 read
708 * 4 exe
709 * 3 reserved
710 * 2 snooped
711 * 1 system
712 * 0 valid
713 *
714 * PDE format on VI:
715 * 63:59 block fragment size
716 * 58:40 reserved
717 * 39:1 physical base address of PTE
718 * bits 5:1 must be 0.
719 * 0 valid
720 */
721
722static void gmc_v8_0_get_vm_pde(struct amdgpu_device *adev, int level,
723 uint64_t *addr, uint64_t *flags)
724{
725 BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
726}
727
728static void gmc_v8_0_get_vm_pte(struct amdgpu_device *adev,
729 struct amdgpu_bo_va_mapping *mapping,
730 uint64_t *flags)
731{
732 *flags &= ~AMDGPU_PTE_EXECUTABLE;
733 *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
734 *flags &= ~AMDGPU_PTE_PRT;
735}
736
737/**
738 * gmc_v8_0_set_fault_enable_default - update VM fault handling
739 *
740 * @adev: amdgpu_device pointer
741 * @value: true redirects VM faults to the default page
742 */
743static void gmc_v8_0_set_fault_enable_default(struct amdgpu_device *adev,
744 bool value)
745{
746 u32 tmp;
747
748 tmp = RREG32(mmVM_CONTEXT1_CNTL);
749 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
750 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
751 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
752 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
753 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
754 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
755 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
756 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
757 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
758 READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
759 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
760 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
761 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
762 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
763 WREG32(mmVM_CONTEXT1_CNTL, tmp);
764}
765
766/**
767 * gmc_v8_0_set_prt - set PRT VM fault
768 *
769 * @adev: amdgpu_device pointer
770 * @enable: enable/disable VM fault handling for PRT
771*/
772static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable)
773{
774 u32 tmp;
775
776 if (enable && !adev->gmc.prt_warning) {
777 dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n");
778 adev->gmc.prt_warning = true;
779 }
780
781 tmp = RREG32(mmVM_PRT_CNTL);
782 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
783 CB_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable);
784 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
785 CB_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable);
786 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
787 TC_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable);
788 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
789 TC_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable);
790 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
791 L2_CACHE_STORE_INVALID_ENTRIES, enable);
792 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
793 L1_TLB_STORE_INVALID_ENTRIES, enable);
794 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
795 MASK_PDE0_FAULT, enable);
796 WREG32(mmVM_PRT_CNTL, tmp);
797
798 if (enable) {
799 uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT;
800 uint32_t high = adev->vm_manager.max_pfn -
801 (AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT);
802
803 WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
804 WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
805 WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, low);
806 WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, low);
807 WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, high);
808 WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, high);
809 WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, high);
810 WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, high);
811 } else {
812 WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, 0xfffffff);
813 WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, 0xfffffff);
814 WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, 0xfffffff);
815 WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, 0xfffffff);
816 WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, 0x0);
817 WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, 0x0);
818 WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, 0x0);
819 WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, 0x0);
820 }
821}
822
823/**
824 * gmc_v8_0_gart_enable - gart enable
825 *
826 * @adev: amdgpu_device pointer
827 *
828 * This sets up the TLBs, programs the page tables for VMID0,
829 * sets up the hw for VMIDs 1-15 which are allocated on
830 * demand, and sets up the global locations for the LDS, GDS,
831 * and GPUVM for FSA64 clients (VI).
832 * Returns 0 for success, errors for failure.
833 */
834static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
835{
836 uint64_t table_addr;
837 int r, i;
838 u32 tmp, field;
839
840 if (adev->gart.bo == NULL) {
841 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
842 return -EINVAL;
843 }
844 r = amdgpu_gart_table_vram_pin(adev);
845 if (r)
846 return r;
847
848 table_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
849
850 /* Setup TLB control */
851 tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
852 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
853 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 1);
854 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
855 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 1);
856 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
857 WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
858 /* Setup L2 cache */
859 tmp = RREG32(mmVM_L2_CNTL);
860 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
861 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
862 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE, 1);
863 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
864 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
865 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
866 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
867 WREG32(mmVM_L2_CNTL, tmp);
868 tmp = RREG32(mmVM_L2_CNTL2);
869 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
870 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
871 WREG32(mmVM_L2_CNTL2, tmp);
872
873 field = adev->vm_manager.fragment_size;
874 tmp = RREG32(mmVM_L2_CNTL3);
875 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1);
876 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field);
877 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, field);
878 WREG32(mmVM_L2_CNTL3, tmp);
879 /* XXX: set to enable PTE/PDE in system memory */
880 tmp = RREG32(mmVM_L2_CNTL4);
881 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_PHYSICAL, 0);
882 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SHARED, 0);
883 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SNOOP, 0);
884 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_PHYSICAL, 0);
885 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SHARED, 0);
886 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SNOOP, 0);
887 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_PHYSICAL, 0);
888 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SHARED, 0);
889 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SNOOP, 0);
890 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_PHYSICAL, 0);
891 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SHARED, 0);
892 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP, 0);
893 WREG32(mmVM_L2_CNTL4, tmp);
894 /* setup context0 */
895 WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12);
896 WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12);
897 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, table_addr >> 12);
898 WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
899 (u32)(adev->dummy_page_addr >> 12));
900 WREG32(mmVM_CONTEXT0_CNTL2, 0);
901 tmp = RREG32(mmVM_CONTEXT0_CNTL);
902 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
903 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
904 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
905 WREG32(mmVM_CONTEXT0_CNTL, tmp);
906
907 WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR, 0);
908 WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR, 0);
909 WREG32(mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET, 0);
910
911 /* empty context1-15 */
912 /* FIXME start with 4G, once using 2 level pt switch to full
913 * vm size space
914 */
915 /* set vm size, must be a multiple of 4 */
916 WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
917 WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
918 for (i = 1; i < 16; i++) {
919 if (i < 8)
920 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
921 table_addr >> 12);
922 else
923 WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
924 table_addr >> 12);
925 }
926
927 /* enable context1-15 */
928 WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
929 (u32)(adev->dummy_page_addr >> 12));
930 WREG32(mmVM_CONTEXT1_CNTL2, 4);
931 tmp = RREG32(mmVM_CONTEXT1_CNTL);
932 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
933 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
934 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
935 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
936 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
937 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
938 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
939 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
940 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
941 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
942 adev->vm_manager.block_size - 9);
943 WREG32(mmVM_CONTEXT1_CNTL, tmp);
944 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
945 gmc_v8_0_set_fault_enable_default(adev, false);
946 else
947 gmc_v8_0_set_fault_enable_default(adev, true);
948
949 gmc_v8_0_flush_gpu_tlb(adev, 0, 0, 0);
950 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
951 (unsigned)(adev->gmc.gart_size >> 20),
952 (unsigned long long)table_addr);
953 adev->gart.ready = true;
954 return 0;
955}
956
957static int gmc_v8_0_gart_init(struct amdgpu_device *adev)
958{
959 int r;
960
961 if (adev->gart.bo) {
962 WARN(1, "R600 PCIE GART already initialized\n");
963 return 0;
964 }
965 /* Initialize common gart structure */
966 r = amdgpu_gart_init(adev);
967 if (r)
968 return r;
969 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
970 adev->gart.gart_pte_flags = AMDGPU_PTE_EXECUTABLE;
971 return amdgpu_gart_table_vram_alloc(adev);
972}
973
974/**
975 * gmc_v8_0_gart_disable - gart disable
976 *
977 * @adev: amdgpu_device pointer
978 *
979 * This disables all VM page table (VI).
980 */
981static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
982{
983 u32 tmp;
984
985 /* Disable all tables */
986 WREG32(mmVM_CONTEXT0_CNTL, 0);
987 WREG32(mmVM_CONTEXT1_CNTL, 0);
988 /* Setup TLB control */
989 tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
990 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
991 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 0);
992 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0);
993 WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
994 /* Setup L2 cache */
995 tmp = RREG32(mmVM_L2_CNTL);
996 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
997 WREG32(mmVM_L2_CNTL, tmp);
998 WREG32(mmVM_L2_CNTL2, 0);
999 amdgpu_gart_table_vram_unpin(adev);
1000}
1001
1002/**
1003 * gmc_v8_0_vm_decode_fault - print human readable fault info
1004 *
1005 * @adev: amdgpu_device pointer
1006 * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
1007 * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
1008 * @mc_client: VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT register value
1009 *
1010 * Print human readable fault information (VI).
1011 */
1012static void gmc_v8_0_vm_decode_fault(struct amdgpu_device *adev, u32 status,
1013 u32 addr, u32 mc_client, unsigned pasid)
1014{
1015 u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
1016 u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1017 PROTECTIONS);
1018 char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
1019 (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
1020 u32 mc_id;
1021
1022 mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1023 MEMORY_CLIENT_ID);
1024
1025 dev_err(adev->dev, "VM fault (0x%02x, vmid %d, pasid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
1026 protections, vmid, pasid, addr,
1027 REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1028 MEMORY_CLIENT_RW) ?
1029 "write" : "read", block, mc_client, mc_id);
1030}
1031
1032static int gmc_v8_0_convert_vram_type(int mc_seq_vram_type)
1033{
1034 switch (mc_seq_vram_type) {
1035 case MC_SEQ_MISC0__MT__GDDR1:
1036 return AMDGPU_VRAM_TYPE_GDDR1;
1037 case MC_SEQ_MISC0__MT__DDR2:
1038 return AMDGPU_VRAM_TYPE_DDR2;
1039 case MC_SEQ_MISC0__MT__GDDR3:
1040 return AMDGPU_VRAM_TYPE_GDDR3;
1041 case MC_SEQ_MISC0__MT__GDDR4:
1042 return AMDGPU_VRAM_TYPE_GDDR4;
1043 case MC_SEQ_MISC0__MT__GDDR5:
1044 return AMDGPU_VRAM_TYPE_GDDR5;
1045 case MC_SEQ_MISC0__MT__HBM:
1046 return AMDGPU_VRAM_TYPE_HBM;
1047 case MC_SEQ_MISC0__MT__DDR3:
1048 return AMDGPU_VRAM_TYPE_DDR3;
1049 default:
1050 return AMDGPU_VRAM_TYPE_UNKNOWN;
1051 }
1052}
1053
1054static int gmc_v8_0_early_init(void *handle)
1055{
1056 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1057
1058 gmc_v8_0_set_gmc_funcs(adev);
1059 gmc_v8_0_set_irq_funcs(adev);
1060
1061 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
1062 adev->gmc.shared_aperture_end =
1063 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
1064 adev->gmc.private_aperture_start =
1065 adev->gmc.shared_aperture_end + 1;
1066 adev->gmc.private_aperture_end =
1067 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
1068
1069 return 0;
1070}
1071
1072static int gmc_v8_0_late_init(void *handle)
1073{
1074 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1075
1076 amdgpu_bo_late_init(adev);
1077
1078 if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
1079 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
1080 else
1081 return 0;
1082}
1083
1084static unsigned gmc_v8_0_get_vbios_fb_size(struct amdgpu_device *adev)
1085{
1086 u32 d1vga_control = RREG32(mmD1VGA_CONTROL);
1087 unsigned size;
1088
1089 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
1090 size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
1091 } else {
1092 u32 viewport = RREG32(mmVIEWPORT_SIZE);
1093 size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
1094 REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
1095 4);
1096 }
1097 /* return 0 if the pre-OS buffer uses up most of vram */
1098 if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
1099 return 0;
1100 return size;
1101}
1102
1103#define mmMC_SEQ_MISC0_FIJI 0xA71
1104
1105static int gmc_v8_0_sw_init(void *handle)
1106{
1107 int r;
1108 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1109
1110 adev->num_vmhubs = 1;
1111
1112 if (adev->flags & AMD_IS_APU) {
1113 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
1114 } else {
1115 u32 tmp;
1116
1117 if ((adev->asic_type == CHIP_FIJI) ||
1118 (adev->asic_type == CHIP_VEGAM))
1119 tmp = RREG32(mmMC_SEQ_MISC0_FIJI);
1120 else
1121 tmp = RREG32(mmMC_SEQ_MISC0);
1122 tmp &= MC_SEQ_MISC0__MT__MASK;
1123 adev->gmc.vram_type = gmc_v8_0_convert_vram_type(tmp);
1124 }
1125
1126 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault);
1127 if (r)
1128 return r;
1129
1130 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault);
1131 if (r)
1132 return r;
1133
1134 /* Adjust VM size here.
1135 * Currently set to 4GB ((1 << 20) 4k pages).
1136 * Max GPUVM size for cayman and SI is 40 bits.
1137 */
1138 amdgpu_vm_adjust_size(adev, 64, 9, 1, 40);
1139
1140 /* Set the internal MC address mask
1141 * This is the max address of the GPU's
1142 * internal address space.
1143 */
1144 adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1145
1146 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40));
1147 if (r) {
1148 pr_warn("No suitable DMA available\n");
1149 return r;
1150 }
1151 adev->need_swiotlb = drm_need_swiotlb(40);
1152
1153 r = gmc_v8_0_init_microcode(adev);
1154 if (r) {
1155 DRM_ERROR("Failed to load mc firmware!\n");
1156 return r;
1157 }
1158
1159 r = gmc_v8_0_mc_init(adev);
1160 if (r)
1161 return r;
1162
1163 adev->gmc.stolen_size = gmc_v8_0_get_vbios_fb_size(adev);
1164
1165 /* Memory manager */
1166 r = amdgpu_bo_init(adev);
1167 if (r)
1168 return r;
1169
1170 r = gmc_v8_0_gart_init(adev);
1171 if (r)
1172 return r;
1173
1174 /*
1175 * number of VMs
1176 * VMID 0 is reserved for System
1177 * amdgpu graphics/compute will use VMIDs 1-7
1178 * amdkfd will use VMIDs 8-15
1179 */
1180 adev->vm_manager.first_kfd_vmid = 8;
1181 amdgpu_vm_manager_init(adev);
1182
1183 /* base offset of vram pages */
1184 if (adev->flags & AMD_IS_APU) {
1185 u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
1186
1187 tmp <<= 22;
1188 adev->vm_manager.vram_base_offset = tmp;
1189 } else {
1190 adev->vm_manager.vram_base_offset = 0;
1191 }
1192
1193 adev->gmc.vm_fault_info = kmalloc(sizeof(struct kfd_vm_fault_info),
1194 GFP_KERNEL);
1195 if (!adev->gmc.vm_fault_info)
1196 return -ENOMEM;
1197 atomic_set(&adev->gmc.vm_fault_info_updated, 0);
1198
1199 return 0;
1200}
1201
1202static int gmc_v8_0_sw_fini(void *handle)
1203{
1204 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1205
1206 amdgpu_gem_force_release(adev);
1207 amdgpu_vm_manager_fini(adev);
1208 kfree(adev->gmc.vm_fault_info);
1209 amdgpu_gart_table_vram_free(adev);
1210 amdgpu_bo_fini(adev);
1211 amdgpu_gart_fini(adev);
1212 release_firmware(adev->gmc.fw);
1213 adev->gmc.fw = NULL;
1214
1215 return 0;
1216}
1217
1218static int gmc_v8_0_hw_init(void *handle)
1219{
1220 int r;
1221 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1222
1223 gmc_v8_0_init_golden_registers(adev);
1224
1225 gmc_v8_0_mc_program(adev);
1226
1227 if (adev->asic_type == CHIP_TONGA) {
1228 r = gmc_v8_0_tonga_mc_load_microcode(adev);
1229 if (r) {
1230 DRM_ERROR("Failed to load MC firmware!\n");
1231 return r;
1232 }
1233 } else if (adev->asic_type == CHIP_POLARIS11 ||
1234 adev->asic_type == CHIP_POLARIS10 ||
1235 adev->asic_type == CHIP_POLARIS12) {
1236 r = gmc_v8_0_polaris_mc_load_microcode(adev);
1237 if (r) {
1238 DRM_ERROR("Failed to load MC firmware!\n");
1239 return r;
1240 }
1241 }
1242
1243 r = gmc_v8_0_gart_enable(adev);
1244 if (r)
1245 return r;
1246
1247 return r;
1248}
1249
1250static int gmc_v8_0_hw_fini(void *handle)
1251{
1252 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1253
1254 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1255 gmc_v8_0_gart_disable(adev);
1256
1257 return 0;
1258}
1259
1260static int gmc_v8_0_suspend(void *handle)
1261{
1262 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1263
1264 gmc_v8_0_hw_fini(adev);
1265
1266 return 0;
1267}
1268
1269static int gmc_v8_0_resume(void *handle)
1270{
1271 int r;
1272 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1273
1274 r = gmc_v8_0_hw_init(adev);
1275 if (r)
1276 return r;
1277
1278 amdgpu_vmid_reset_all(adev);
1279
1280 return 0;
1281}
1282
1283static bool gmc_v8_0_is_idle(void *handle)
1284{
1285 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1286 u32 tmp = RREG32(mmSRBM_STATUS);
1287
1288 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1289 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
1290 return false;
1291
1292 return true;
1293}
1294
1295static int gmc_v8_0_wait_for_idle(void *handle)
1296{
1297 unsigned i;
1298 u32 tmp;
1299 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1300
1301 for (i = 0; i < adev->usec_timeout; i++) {
1302 /* read MC_STATUS */
1303 tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
1304 SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1305 SRBM_STATUS__MCC_BUSY_MASK |
1306 SRBM_STATUS__MCD_BUSY_MASK |
1307 SRBM_STATUS__VMC_BUSY_MASK |
1308 SRBM_STATUS__VMC1_BUSY_MASK);
1309 if (!tmp)
1310 return 0;
1311 udelay(1);
1312 }
1313 return -ETIMEDOUT;
1314
1315}
1316
1317static bool gmc_v8_0_check_soft_reset(void *handle)
1318{
1319 u32 srbm_soft_reset = 0;
1320 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1321 u32 tmp = RREG32(mmSRBM_STATUS);
1322
1323 if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
1324 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1325 SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
1326
1327 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1328 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
1329 if (!(adev->flags & AMD_IS_APU))
1330 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1331 SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
1332 }
1333 if (srbm_soft_reset) {
1334 adev->gmc.srbm_soft_reset = srbm_soft_reset;
1335 return true;
1336 } else {
1337 adev->gmc.srbm_soft_reset = 0;
1338 return false;
1339 }
1340}
1341
1342static int gmc_v8_0_pre_soft_reset(void *handle)
1343{
1344 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1345
1346 if (!adev->gmc.srbm_soft_reset)
1347 return 0;
1348
1349 gmc_v8_0_mc_stop(adev);
1350 if (gmc_v8_0_wait_for_idle(adev)) {
1351 dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
1352 }
1353
1354 return 0;
1355}
1356
1357static int gmc_v8_0_soft_reset(void *handle)
1358{
1359 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1360 u32 srbm_soft_reset;
1361
1362 if (!adev->gmc.srbm_soft_reset)
1363 return 0;
1364 srbm_soft_reset = adev->gmc.srbm_soft_reset;
1365
1366 if (srbm_soft_reset) {
1367 u32 tmp;
1368
1369 tmp = RREG32(mmSRBM_SOFT_RESET);
1370 tmp |= srbm_soft_reset;
1371 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1372 WREG32(mmSRBM_SOFT_RESET, tmp);
1373 tmp = RREG32(mmSRBM_SOFT_RESET);
1374
1375 udelay(50);
1376
1377 tmp &= ~srbm_soft_reset;
1378 WREG32(mmSRBM_SOFT_RESET, tmp);
1379 tmp = RREG32(mmSRBM_SOFT_RESET);
1380
1381 /* Wait a little for things to settle down */
1382 udelay(50);
1383 }
1384
1385 return 0;
1386}
1387
1388static int gmc_v8_0_post_soft_reset(void *handle)
1389{
1390 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1391
1392 if (!adev->gmc.srbm_soft_reset)
1393 return 0;
1394
1395 gmc_v8_0_mc_resume(adev);
1396 return 0;
1397}
1398
1399static int gmc_v8_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
1400 struct amdgpu_irq_src *src,
1401 unsigned type,
1402 enum amdgpu_interrupt_state state)
1403{
1404 u32 tmp;
1405 u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1406 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1407 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1408 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1409 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1410 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1411 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
1412
1413 switch (state) {
1414 case AMDGPU_IRQ_STATE_DISABLE:
1415 /* system context */
1416 tmp = RREG32(mmVM_CONTEXT0_CNTL);
1417 tmp &= ~bits;
1418 WREG32(mmVM_CONTEXT0_CNTL, tmp);
1419 /* VMs */
1420 tmp = RREG32(mmVM_CONTEXT1_CNTL);
1421 tmp &= ~bits;
1422 WREG32(mmVM_CONTEXT1_CNTL, tmp);
1423 break;
1424 case AMDGPU_IRQ_STATE_ENABLE:
1425 /* system context */
1426 tmp = RREG32(mmVM_CONTEXT0_CNTL);
1427 tmp |= bits;
1428 WREG32(mmVM_CONTEXT0_CNTL, tmp);
1429 /* VMs */
1430 tmp = RREG32(mmVM_CONTEXT1_CNTL);
1431 tmp |= bits;
1432 WREG32(mmVM_CONTEXT1_CNTL, tmp);
1433 break;
1434 default:
1435 break;
1436 }
1437
1438 return 0;
1439}
1440
1441static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
1442 struct amdgpu_irq_src *source,
1443 struct amdgpu_iv_entry *entry)
1444{
1445 u32 addr, status, mc_client, vmid;
1446
1447 if (amdgpu_sriov_vf(adev)) {
1448 dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1449 entry->src_id, entry->src_data[0]);
1450 dev_err(adev->dev, " Can't decode VM fault info here on SRIOV VF\n");
1451 return 0;
1452 }
1453
1454 addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
1455 status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
1456 mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
1457 /* reset addr and status */
1458 WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1459
1460 if (!addr && !status)
1461 return 0;
1462
1463 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
1464 gmc_v8_0_set_fault_enable_default(adev, false);
1465
1466 if (printk_ratelimit()) {
1467 struct amdgpu_task_info task_info;
1468
1469 memset(&task_info, 0, sizeof(struct amdgpu_task_info));
1470 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
1471
1472 dev_err(adev->dev, "GPU fault detected: %d 0x%08x for process %s pid %d thread %s pid %d\n",
1473 entry->src_id, entry->src_data[0], task_info.process_name,
1474 task_info.tgid, task_info.task_name, task_info.pid);
1475 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
1476 addr);
1477 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1478 status);
1479 gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client,
1480 entry->pasid);
1481 }
1482
1483 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1484 VMID);
1485 if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid)
1486 && !atomic_read(&adev->gmc.vm_fault_info_updated)) {
1487 struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info;
1488 u32 protections = REG_GET_FIELD(status,
1489 VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1490 PROTECTIONS);
1491
1492 info->vmid = vmid;
1493 info->mc_id = REG_GET_FIELD(status,
1494 VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1495 MEMORY_CLIENT_ID);
1496 info->status = status;
1497 info->page_addr = addr;
1498 info->prot_valid = protections & 0x7 ? true : false;
1499 info->prot_read = protections & 0x8 ? true : false;
1500 info->prot_write = protections & 0x10 ? true : false;
1501 info->prot_exec = protections & 0x20 ? true : false;
1502 mb();
1503 atomic_set(&adev->gmc.vm_fault_info_updated, 1);
1504 }
1505
1506 return 0;
1507}
1508
1509static void fiji_update_mc_medium_grain_clock_gating(struct amdgpu_device *adev,
1510 bool enable)
1511{
1512 uint32_t data;
1513
1514 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
1515 data = RREG32(mmMC_HUB_MISC_HUB_CG);
1516 data |= MC_HUB_MISC_HUB_CG__ENABLE_MASK;
1517 WREG32(mmMC_HUB_MISC_HUB_CG, data);
1518
1519 data = RREG32(mmMC_HUB_MISC_SIP_CG);
1520 data |= MC_HUB_MISC_SIP_CG__ENABLE_MASK;
1521 WREG32(mmMC_HUB_MISC_SIP_CG, data);
1522
1523 data = RREG32(mmMC_HUB_MISC_VM_CG);
1524 data |= MC_HUB_MISC_VM_CG__ENABLE_MASK;
1525 WREG32(mmMC_HUB_MISC_VM_CG, data);
1526
1527 data = RREG32(mmMC_XPB_CLK_GAT);
1528 data |= MC_XPB_CLK_GAT__ENABLE_MASK;
1529 WREG32(mmMC_XPB_CLK_GAT, data);
1530
1531 data = RREG32(mmATC_MISC_CG);
1532 data |= ATC_MISC_CG__ENABLE_MASK;
1533 WREG32(mmATC_MISC_CG, data);
1534
1535 data = RREG32(mmMC_CITF_MISC_WR_CG);
1536 data |= MC_CITF_MISC_WR_CG__ENABLE_MASK;
1537 WREG32(mmMC_CITF_MISC_WR_CG, data);
1538
1539 data = RREG32(mmMC_CITF_MISC_RD_CG);
1540 data |= MC_CITF_MISC_RD_CG__ENABLE_MASK;
1541 WREG32(mmMC_CITF_MISC_RD_CG, data);
1542
1543 data = RREG32(mmMC_CITF_MISC_VM_CG);
1544 data |= MC_CITF_MISC_VM_CG__ENABLE_MASK;
1545 WREG32(mmMC_CITF_MISC_VM_CG, data);
1546
1547 data = RREG32(mmVM_L2_CG);
1548 data |= VM_L2_CG__ENABLE_MASK;
1549 WREG32(mmVM_L2_CG, data);
1550 } else {
1551 data = RREG32(mmMC_HUB_MISC_HUB_CG);
1552 data &= ~MC_HUB_MISC_HUB_CG__ENABLE_MASK;
1553 WREG32(mmMC_HUB_MISC_HUB_CG, data);
1554
1555 data = RREG32(mmMC_HUB_MISC_SIP_CG);
1556 data &= ~MC_HUB_MISC_SIP_CG__ENABLE_MASK;
1557 WREG32(mmMC_HUB_MISC_SIP_CG, data);
1558
1559 data = RREG32(mmMC_HUB_MISC_VM_CG);
1560 data &= ~MC_HUB_MISC_VM_CG__ENABLE_MASK;
1561 WREG32(mmMC_HUB_MISC_VM_CG, data);
1562
1563 data = RREG32(mmMC_XPB_CLK_GAT);
1564 data &= ~MC_XPB_CLK_GAT__ENABLE_MASK;
1565 WREG32(mmMC_XPB_CLK_GAT, data);
1566
1567 data = RREG32(mmATC_MISC_CG);
1568 data &= ~ATC_MISC_CG__ENABLE_MASK;
1569 WREG32(mmATC_MISC_CG, data);
1570
1571 data = RREG32(mmMC_CITF_MISC_WR_CG);
1572 data &= ~MC_CITF_MISC_WR_CG__ENABLE_MASK;
1573 WREG32(mmMC_CITF_MISC_WR_CG, data);
1574
1575 data = RREG32(mmMC_CITF_MISC_RD_CG);
1576 data &= ~MC_CITF_MISC_RD_CG__ENABLE_MASK;
1577 WREG32(mmMC_CITF_MISC_RD_CG, data);
1578
1579 data = RREG32(mmMC_CITF_MISC_VM_CG);
1580 data &= ~MC_CITF_MISC_VM_CG__ENABLE_MASK;
1581 WREG32(mmMC_CITF_MISC_VM_CG, data);
1582
1583 data = RREG32(mmVM_L2_CG);
1584 data &= ~VM_L2_CG__ENABLE_MASK;
1585 WREG32(mmVM_L2_CG, data);
1586 }
1587}
1588
1589static void fiji_update_mc_light_sleep(struct amdgpu_device *adev,
1590 bool enable)
1591{
1592 uint32_t data;
1593
1594 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) {
1595 data = RREG32(mmMC_HUB_MISC_HUB_CG);
1596 data |= MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK;
1597 WREG32(mmMC_HUB_MISC_HUB_CG, data);
1598
1599 data = RREG32(mmMC_HUB_MISC_SIP_CG);
1600 data |= MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK;
1601 WREG32(mmMC_HUB_MISC_SIP_CG, data);
1602
1603 data = RREG32(mmMC_HUB_MISC_VM_CG);
1604 data |= MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1605 WREG32(mmMC_HUB_MISC_VM_CG, data);
1606
1607 data = RREG32(mmMC_XPB_CLK_GAT);
1608 data |= MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK;
1609 WREG32(mmMC_XPB_CLK_GAT, data);
1610
1611 data = RREG32(mmATC_MISC_CG);
1612 data |= ATC_MISC_CG__MEM_LS_ENABLE_MASK;
1613 WREG32(mmATC_MISC_CG, data);
1614
1615 data = RREG32(mmMC_CITF_MISC_WR_CG);
1616 data |= MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK;
1617 WREG32(mmMC_CITF_MISC_WR_CG, data);
1618
1619 data = RREG32(mmMC_CITF_MISC_RD_CG);
1620 data |= MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK;
1621 WREG32(mmMC_CITF_MISC_RD_CG, data);
1622
1623 data = RREG32(mmMC_CITF_MISC_VM_CG);
1624 data |= MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1625 WREG32(mmMC_CITF_MISC_VM_CG, data);
1626
1627 data = RREG32(mmVM_L2_CG);
1628 data |= VM_L2_CG__MEM_LS_ENABLE_MASK;
1629 WREG32(mmVM_L2_CG, data);
1630 } else {
1631 data = RREG32(mmMC_HUB_MISC_HUB_CG);
1632 data &= ~MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK;
1633 WREG32(mmMC_HUB_MISC_HUB_CG, data);
1634
1635 data = RREG32(mmMC_HUB_MISC_SIP_CG);
1636 data &= ~MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK;
1637 WREG32(mmMC_HUB_MISC_SIP_CG, data);
1638
1639 data = RREG32(mmMC_HUB_MISC_VM_CG);
1640 data &= ~MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1641 WREG32(mmMC_HUB_MISC_VM_CG, data);
1642
1643 data = RREG32(mmMC_XPB_CLK_GAT);
1644 data &= ~MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK;
1645 WREG32(mmMC_XPB_CLK_GAT, data);
1646
1647 data = RREG32(mmATC_MISC_CG);
1648 data &= ~ATC_MISC_CG__MEM_LS_ENABLE_MASK;
1649 WREG32(mmATC_MISC_CG, data);
1650
1651 data = RREG32(mmMC_CITF_MISC_WR_CG);
1652 data &= ~MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK;
1653 WREG32(mmMC_CITF_MISC_WR_CG, data);
1654
1655 data = RREG32(mmMC_CITF_MISC_RD_CG);
1656 data &= ~MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK;
1657 WREG32(mmMC_CITF_MISC_RD_CG, data);
1658
1659 data = RREG32(mmMC_CITF_MISC_VM_CG);
1660 data &= ~MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1661 WREG32(mmMC_CITF_MISC_VM_CG, data);
1662
1663 data = RREG32(mmVM_L2_CG);
1664 data &= ~VM_L2_CG__MEM_LS_ENABLE_MASK;
1665 WREG32(mmVM_L2_CG, data);
1666 }
1667}
1668
1669static int gmc_v8_0_set_clockgating_state(void *handle,
1670 enum amd_clockgating_state state)
1671{
1672 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1673
1674 if (amdgpu_sriov_vf(adev))
1675 return 0;
1676
1677 switch (adev->asic_type) {
1678 case CHIP_FIJI:
1679 fiji_update_mc_medium_grain_clock_gating(adev,
1680 state == AMD_CG_STATE_GATE);
1681 fiji_update_mc_light_sleep(adev,
1682 state == AMD_CG_STATE_GATE);
1683 break;
1684 default:
1685 break;
1686 }
1687 return 0;
1688}
1689
1690static int gmc_v8_0_set_powergating_state(void *handle,
1691 enum amd_powergating_state state)
1692{
1693 return 0;
1694}
1695
1696static void gmc_v8_0_get_clockgating_state(void *handle, u32 *flags)
1697{
1698 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1699 int data;
1700
1701 if (amdgpu_sriov_vf(adev))
1702 *flags = 0;
1703
1704 /* AMD_CG_SUPPORT_MC_MGCG */
1705 data = RREG32(mmMC_HUB_MISC_HUB_CG);
1706 if (data & MC_HUB_MISC_HUB_CG__ENABLE_MASK)
1707 *flags |= AMD_CG_SUPPORT_MC_MGCG;
1708
1709 /* AMD_CG_SUPPORT_MC_LS */
1710 if (data & MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK)
1711 *flags |= AMD_CG_SUPPORT_MC_LS;
1712}
1713
1714static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
1715 .name = "gmc_v8_0",
1716 .early_init = gmc_v8_0_early_init,
1717 .late_init = gmc_v8_0_late_init,
1718 .sw_init = gmc_v8_0_sw_init,
1719 .sw_fini = gmc_v8_0_sw_fini,
1720 .hw_init = gmc_v8_0_hw_init,
1721 .hw_fini = gmc_v8_0_hw_fini,
1722 .suspend = gmc_v8_0_suspend,
1723 .resume = gmc_v8_0_resume,
1724 .is_idle = gmc_v8_0_is_idle,
1725 .wait_for_idle = gmc_v8_0_wait_for_idle,
1726 .check_soft_reset = gmc_v8_0_check_soft_reset,
1727 .pre_soft_reset = gmc_v8_0_pre_soft_reset,
1728 .soft_reset = gmc_v8_0_soft_reset,
1729 .post_soft_reset = gmc_v8_0_post_soft_reset,
1730 .set_clockgating_state = gmc_v8_0_set_clockgating_state,
1731 .set_powergating_state = gmc_v8_0_set_powergating_state,
1732 .get_clockgating_state = gmc_v8_0_get_clockgating_state,
1733};
1734
1735static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = {
1736 .flush_gpu_tlb = gmc_v8_0_flush_gpu_tlb,
1737 .flush_gpu_tlb_pasid = gmc_v8_0_flush_gpu_tlb_pasid,
1738 .emit_flush_gpu_tlb = gmc_v8_0_emit_flush_gpu_tlb,
1739 .emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping,
1740 .set_prt = gmc_v8_0_set_prt,
1741 .get_vm_pde = gmc_v8_0_get_vm_pde,
1742 .get_vm_pte = gmc_v8_0_get_vm_pte
1743};
1744
1745static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
1746 .set = gmc_v8_0_vm_fault_interrupt_state,
1747 .process = gmc_v8_0_process_interrupt,
1748};
1749
1750static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev)
1751{
1752 adev->gmc.gmc_funcs = &gmc_v8_0_gmc_funcs;
1753}
1754
1755static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev)
1756{
1757 adev->gmc.vm_fault.num_types = 1;
1758 adev->gmc.vm_fault.funcs = &gmc_v8_0_irq_funcs;
1759}
1760
1761const struct amdgpu_ip_block_version gmc_v8_0_ip_block =
1762{
1763 .type = AMD_IP_BLOCK_TYPE_GMC,
1764 .major = 8,
1765 .minor = 0,
1766 .rev = 0,
1767 .funcs = &gmc_v8_0_ip_funcs,
1768};
1769
1770const struct amdgpu_ip_block_version gmc_v8_1_ip_block =
1771{
1772 .type = AMD_IP_BLOCK_TYPE_GMC,
1773 .major = 8,
1774 .minor = 1,
1775 .rev = 0,
1776 .funcs = &gmc_v8_0_ip_funcs,
1777};
1778
1779const struct amdgpu_ip_block_version gmc_v8_5_ip_block =
1780{
1781 .type = AMD_IP_BLOCK_TYPE_GMC,
1782 .major = 8,
1783 .minor = 5,
1784 .rev = 0,
1785 .funcs = &gmc_v8_0_ip_funcs,
1786};