Loading...
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25#include <linux/module.h>
26#include <linux/pci.h>
27
28#include <drm/drm_cache.h>
29#include "amdgpu.h"
30#include "gmc_v8_0.h"
31#include "amdgpu_ucode.h"
32#include "amdgpu_amdkfd.h"
33#include "amdgpu_gem.h"
34
35#include "gmc/gmc_8_1_d.h"
36#include "gmc/gmc_8_1_sh_mask.h"
37
38#include "bif/bif_5_0_d.h"
39#include "bif/bif_5_0_sh_mask.h"
40
41#include "oss/oss_3_0_d.h"
42#include "oss/oss_3_0_sh_mask.h"
43
44#include "dce/dce_10_0_d.h"
45#include "dce/dce_10_0_sh_mask.h"
46
47#include "vid.h"
48#include "vi.h"
49
50#include "amdgpu_atombios.h"
51
52#include "ivsrcid/ivsrcid_vislands30.h"
53
54static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev);
55static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
56static int gmc_v8_0_wait_for_idle(void *handle);
57
58MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
59MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
60MODULE_FIRMWARE("amdgpu/polaris10_mc.bin");
61MODULE_FIRMWARE("amdgpu/polaris12_mc.bin");
62MODULE_FIRMWARE("amdgpu/polaris12_32_mc.bin");
63MODULE_FIRMWARE("amdgpu/polaris11_k_mc.bin");
64MODULE_FIRMWARE("amdgpu/polaris10_k_mc.bin");
65MODULE_FIRMWARE("amdgpu/polaris12_k_mc.bin");
66
67static const u32 golden_settings_tonga_a11[] =
68{
69 mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
70 mmMC_HUB_RDREQ_DMIF_LIMIT, 0x0000007f, 0x00000028,
71 mmMC_HUB_WDP_UMC, 0x00007fb6, 0x00000991,
72 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
73 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
74 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
75 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
76};
77
78static const u32 tonga_mgcg_cgcg_init[] =
79{
80 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
81};
82
83static const u32 golden_settings_fiji_a10[] =
84{
85 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
86 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
87 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
88 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
89};
90
91static const u32 fiji_mgcg_cgcg_init[] =
92{
93 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
94};
95
96static const u32 golden_settings_polaris11_a11[] =
97{
98 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
99 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
100 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
101 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
102};
103
104static const u32 golden_settings_polaris10_a11[] =
105{
106 mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
107 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
108 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
109 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
110 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
111};
112
113static const u32 cz_mgcg_cgcg_init[] =
114{
115 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
116};
117
118static const u32 stoney_mgcg_cgcg_init[] =
119{
120 mmATC_MISC_CG, 0xffffffff, 0x000c0200,
121 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
122};
123
124static const u32 golden_settings_stoney_common[] =
125{
126 mmMC_HUB_RDREQ_UVD, MC_HUB_RDREQ_UVD__PRESCALE_MASK, 0x00000004,
127 mmMC_RD_GRP_OTH, MC_RD_GRP_OTH__UVD_MASK, 0x00600000
128};
129
130static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
131{
132 switch (adev->asic_type) {
133 case CHIP_FIJI:
134 amdgpu_device_program_register_sequence(adev,
135 fiji_mgcg_cgcg_init,
136 ARRAY_SIZE(fiji_mgcg_cgcg_init));
137 amdgpu_device_program_register_sequence(adev,
138 golden_settings_fiji_a10,
139 ARRAY_SIZE(golden_settings_fiji_a10));
140 break;
141 case CHIP_TONGA:
142 amdgpu_device_program_register_sequence(adev,
143 tonga_mgcg_cgcg_init,
144 ARRAY_SIZE(tonga_mgcg_cgcg_init));
145 amdgpu_device_program_register_sequence(adev,
146 golden_settings_tonga_a11,
147 ARRAY_SIZE(golden_settings_tonga_a11));
148 break;
149 case CHIP_POLARIS11:
150 case CHIP_POLARIS12:
151 case CHIP_VEGAM:
152 amdgpu_device_program_register_sequence(adev,
153 golden_settings_polaris11_a11,
154 ARRAY_SIZE(golden_settings_polaris11_a11));
155 break;
156 case CHIP_POLARIS10:
157 amdgpu_device_program_register_sequence(adev,
158 golden_settings_polaris10_a11,
159 ARRAY_SIZE(golden_settings_polaris10_a11));
160 break;
161 case CHIP_CARRIZO:
162 amdgpu_device_program_register_sequence(adev,
163 cz_mgcg_cgcg_init,
164 ARRAY_SIZE(cz_mgcg_cgcg_init));
165 break;
166 case CHIP_STONEY:
167 amdgpu_device_program_register_sequence(adev,
168 stoney_mgcg_cgcg_init,
169 ARRAY_SIZE(stoney_mgcg_cgcg_init));
170 amdgpu_device_program_register_sequence(adev,
171 golden_settings_stoney_common,
172 ARRAY_SIZE(golden_settings_stoney_common));
173 break;
174 default:
175 break;
176 }
177}
178
179static void gmc_v8_0_mc_stop(struct amdgpu_device *adev)
180{
181 u32 blackout;
182
183 gmc_v8_0_wait_for_idle(adev);
184
185 blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
186 if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
187 /* Block CPU access */
188 WREG32(mmBIF_FB_EN, 0);
189 /* blackout the MC */
190 blackout = REG_SET_FIELD(blackout,
191 MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 1);
192 WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout);
193 }
194 /* wait for the MC to settle */
195 udelay(100);
196}
197
198static void gmc_v8_0_mc_resume(struct amdgpu_device *adev)
199{
200 u32 tmp;
201
202 /* unblackout the MC */
203 tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
204 tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
205 WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
206 /* allow CPU access */
207 tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
208 tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
209 WREG32(mmBIF_FB_EN, tmp);
210}
211
212/**
213 * gmc_v8_0_init_microcode - load ucode images from disk
214 *
215 * @adev: amdgpu_device pointer
216 *
217 * Use the firmware interface to load the ucode images into
218 * the driver (not loaded into hw).
219 * Returns 0 on success, error on failure.
220 */
221static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
222{
223 const char *chip_name;
224 char fw_name[30];
225 int err;
226
227 DRM_DEBUG("\n");
228
229 switch (adev->asic_type) {
230 case CHIP_TONGA:
231 chip_name = "tonga";
232 break;
233 case CHIP_POLARIS11:
234 if (ASICID_IS_P21(adev->pdev->device, adev->pdev->revision) ||
235 ASICID_IS_P31(adev->pdev->device, adev->pdev->revision))
236 chip_name = "polaris11_k";
237 else
238 chip_name = "polaris11";
239 break;
240 case CHIP_POLARIS10:
241 if (ASICID_IS_P30(adev->pdev->device, adev->pdev->revision))
242 chip_name = "polaris10_k";
243 else
244 chip_name = "polaris10";
245 break;
246 case CHIP_POLARIS12:
247 if (ASICID_IS_P23(adev->pdev->device, adev->pdev->revision)) {
248 chip_name = "polaris12_k";
249 } else {
250 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, ixMC_IO_DEBUG_UP_159);
251 /* Polaris12 32bit ASIC needs a special MC firmware */
252 if (RREG32(mmMC_SEQ_IO_DEBUG_DATA) == 0x05b4dc40)
253 chip_name = "polaris12_32";
254 else
255 chip_name = "polaris12";
256 }
257 break;
258 case CHIP_FIJI:
259 case CHIP_CARRIZO:
260 case CHIP_STONEY:
261 case CHIP_VEGAM:
262 return 0;
263 default: BUG();
264 }
265
266 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
267 err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
268 if (err)
269 goto out;
270 err = amdgpu_ucode_validate(adev->gmc.fw);
271
272out:
273 if (err) {
274 pr_err("mc: Failed to load firmware \"%s\"\n", fw_name);
275 release_firmware(adev->gmc.fw);
276 adev->gmc.fw = NULL;
277 }
278 return err;
279}
280
281/**
282 * gmc_v8_0_tonga_mc_load_microcode - load tonga MC ucode into the hw
283 *
284 * @adev: amdgpu_device pointer
285 *
286 * Load the GDDR MC ucode into the hw (VI).
287 * Returns 0 on success, error on failure.
288 */
289static int gmc_v8_0_tonga_mc_load_microcode(struct amdgpu_device *adev)
290{
291 const struct mc_firmware_header_v1_0 *hdr;
292 const __le32 *fw_data = NULL;
293 const __le32 *io_mc_regs = NULL;
294 u32 running;
295 int i, ucode_size, regs_size;
296
297 /* Skip MC ucode loading on SR-IOV capable boards.
298 * vbios does this for us in asic_init in that case.
299 * Skip MC ucode loading on VF, because hypervisor will do that
300 * for this adaptor.
301 */
302 if (amdgpu_sriov_bios(adev))
303 return 0;
304
305 if (!adev->gmc.fw)
306 return -EINVAL;
307
308 hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
309 amdgpu_ucode_print_mc_hdr(&hdr->header);
310
311 adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
312 regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
313 io_mc_regs = (const __le32 *)
314 (adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
315 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
316 fw_data = (const __le32 *)
317 (adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
318
319 running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
320
321 if (running == 0) {
322 /* reset the engine and set to writable */
323 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
324 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
325
326 /* load mc io regs */
327 for (i = 0; i < regs_size; i++) {
328 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
329 WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
330 }
331 /* load the MC ucode */
332 for (i = 0; i < ucode_size; i++)
333 WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
334
335 /* put the engine back into the active state */
336 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
337 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
338 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
339
340 /* wait for training to complete */
341 for (i = 0; i < adev->usec_timeout; i++) {
342 if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
343 MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D0))
344 break;
345 udelay(1);
346 }
347 for (i = 0; i < adev->usec_timeout; i++) {
348 if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
349 MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D1))
350 break;
351 udelay(1);
352 }
353 }
354
355 return 0;
356}
357
358static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
359{
360 const struct mc_firmware_header_v1_0 *hdr;
361 const __le32 *fw_data = NULL;
362 const __le32 *io_mc_regs = NULL;
363 u32 data;
364 int i, ucode_size, regs_size;
365
366 /* Skip MC ucode loading on SR-IOV capable boards.
367 * vbios does this for us in asic_init in that case.
368 * Skip MC ucode loading on VF, because hypervisor will do that
369 * for this adaptor.
370 */
371 if (amdgpu_sriov_bios(adev))
372 return 0;
373
374 if (!adev->gmc.fw)
375 return -EINVAL;
376
377 hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
378 amdgpu_ucode_print_mc_hdr(&hdr->header);
379
380 adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
381 regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
382 io_mc_regs = (const __le32 *)
383 (adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
384 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
385 fw_data = (const __le32 *)
386 (adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
387
388 data = RREG32(mmMC_SEQ_MISC0);
389 data &= ~(0x40);
390 WREG32(mmMC_SEQ_MISC0, data);
391
392 /* load mc io regs */
393 for (i = 0; i < regs_size; i++) {
394 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
395 WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
396 }
397
398 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
399 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
400
401 /* load the MC ucode */
402 for (i = 0; i < ucode_size; i++)
403 WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
404
405 /* put the engine back into the active state */
406 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
407 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
408 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
409
410 /* wait for training to complete */
411 for (i = 0; i < adev->usec_timeout; i++) {
412 data = RREG32(mmMC_SEQ_MISC0);
413 if (data & 0x80)
414 break;
415 udelay(1);
416 }
417
418 return 0;
419}
420
421static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
422 struct amdgpu_gmc *mc)
423{
424 u64 base = 0;
425
426 if (!amdgpu_sriov_vf(adev))
427 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
428 base <<= 24;
429
430 amdgpu_gmc_vram_location(adev, mc, base);
431 amdgpu_gmc_gart_location(adev, mc);
432}
433
434/**
435 * gmc_v8_0_mc_program - program the GPU memory controller
436 *
437 * @adev: amdgpu_device pointer
438 *
439 * Set the location of vram, gart, and AGP in the GPU's
440 * physical address space (VI).
441 */
442static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
443{
444 u32 tmp;
445 int i, j;
446
447 /* Initialize HDP */
448 for (i = 0, j = 0; i < 32; i++, j += 0x6) {
449 WREG32((0xb05 + j), 0x00000000);
450 WREG32((0xb06 + j), 0x00000000);
451 WREG32((0xb07 + j), 0x00000000);
452 WREG32((0xb08 + j), 0x00000000);
453 WREG32((0xb09 + j), 0x00000000);
454 }
455 WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
456
457 if (gmc_v8_0_wait_for_idle((void *)adev)) {
458 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
459 }
460 if (adev->mode_info.num_crtc) {
461 /* Lockout access through VGA aperture*/
462 tmp = RREG32(mmVGA_HDP_CONTROL);
463 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
464 WREG32(mmVGA_HDP_CONTROL, tmp);
465
466 /* disable VGA render */
467 tmp = RREG32(mmVGA_RENDER_CONTROL);
468 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
469 WREG32(mmVGA_RENDER_CONTROL, tmp);
470 }
471 /* Update configuration */
472 WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
473 adev->gmc.vram_start >> 12);
474 WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
475 adev->gmc.vram_end >> 12);
476 WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
477 adev->vram_scratch.gpu_addr >> 12);
478
479 if (amdgpu_sriov_vf(adev)) {
480 tmp = ((adev->gmc.vram_end >> 24) & 0xFFFF) << 16;
481 tmp |= ((adev->gmc.vram_start >> 24) & 0xFFFF);
482 WREG32(mmMC_VM_FB_LOCATION, tmp);
483 /* XXX double check these! */
484 WREG32(mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
485 WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
486 WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
487 }
488
489 WREG32(mmMC_VM_AGP_BASE, 0);
490 WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
491 WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
492 if (gmc_v8_0_wait_for_idle((void *)adev)) {
493 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
494 }
495
496 WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
497
498 tmp = RREG32(mmHDP_MISC_CNTL);
499 tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 0);
500 WREG32(mmHDP_MISC_CNTL, tmp);
501
502 tmp = RREG32(mmHDP_HOST_PATH_CNTL);
503 WREG32(mmHDP_HOST_PATH_CNTL, tmp);
504}
505
506/**
507 * gmc_v8_0_mc_init - initialize the memory controller driver params
508 *
509 * @adev: amdgpu_device pointer
510 *
511 * Look up the amount of vram, vram width, and decide how to place
512 * vram and gart within the GPU's physical address space (VI).
513 * Returns 0 for success.
514 */
515static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
516{
517 int r;
518 u32 tmp;
519
520 adev->gmc.vram_width = amdgpu_atombios_get_vram_width(adev);
521 if (!adev->gmc.vram_width) {
522 int chansize, numchan;
523
524 /* Get VRAM informations */
525 tmp = RREG32(mmMC_ARB_RAMCFG);
526 if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
527 chansize = 64;
528 } else {
529 chansize = 32;
530 }
531 tmp = RREG32(mmMC_SHARED_CHMAP);
532 switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
533 case 0:
534 default:
535 numchan = 1;
536 break;
537 case 1:
538 numchan = 2;
539 break;
540 case 2:
541 numchan = 4;
542 break;
543 case 3:
544 numchan = 8;
545 break;
546 case 4:
547 numchan = 3;
548 break;
549 case 5:
550 numchan = 6;
551 break;
552 case 6:
553 numchan = 10;
554 break;
555 case 7:
556 numchan = 12;
557 break;
558 case 8:
559 numchan = 16;
560 break;
561 }
562 adev->gmc.vram_width = numchan * chansize;
563 }
564 /* size in MB on si */
565 tmp = RREG32(mmCONFIG_MEMSIZE);
566 /* some boards may have garbage in the upper 16 bits */
567 if (tmp & 0xffff0000) {
568 DRM_INFO("Probable bad vram size: 0x%08x\n", tmp);
569 if (tmp & 0xffff)
570 tmp &= 0xffff;
571 }
572 adev->gmc.mc_vram_size = tmp * 1024ULL * 1024ULL;
573 adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
574
575 if (!(adev->flags & AMD_IS_APU)) {
576 r = amdgpu_device_resize_fb_bar(adev);
577 if (r)
578 return r;
579 }
580 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
581 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
582
583#ifdef CONFIG_X86_64
584 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) {
585 adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
586 adev->gmc.aper_size = adev->gmc.real_vram_size;
587 }
588#endif
589
590 /* In case the PCI BAR is larger than the actual amount of vram */
591 adev->gmc.visible_vram_size = adev->gmc.aper_size;
592 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
593 adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
594
595 /* set the gart size */
596 if (amdgpu_gart_size == -1) {
597 switch (adev->asic_type) {
598 case CHIP_POLARIS10: /* all engines support GPUVM */
599 case CHIP_POLARIS11: /* all engines support GPUVM */
600 case CHIP_POLARIS12: /* all engines support GPUVM */
601 case CHIP_VEGAM: /* all engines support GPUVM */
602 default:
603 adev->gmc.gart_size = 256ULL << 20;
604 break;
605 case CHIP_TONGA: /* UVD, VCE do not support GPUVM */
606 case CHIP_FIJI: /* UVD, VCE do not support GPUVM */
607 case CHIP_CARRIZO: /* UVD, VCE do not support GPUVM, DCE SG support */
608 case CHIP_STONEY: /* UVD does not support GPUVM, DCE SG support */
609 adev->gmc.gart_size = 1024ULL << 20;
610 break;
611 }
612 } else {
613 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
614 }
615
616 adev->gmc.gart_size += adev->pm.smu_prv_buffer_size;
617 gmc_v8_0_vram_gtt_location(adev, &adev->gmc);
618
619 return 0;
620}
621
622/**
623 * gmc_v8_0_flush_gpu_tlb_pasid - tlb flush via pasid
624 *
625 * @adev: amdgpu_device pointer
626 * @pasid: pasid to be flush
627 * @flush_type: type of flush
628 * @all_hub: flush all hubs
629 *
630 * Flush the TLB for the requested pasid.
631 */
632static int gmc_v8_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
633 uint16_t pasid, uint32_t flush_type,
634 bool all_hub)
635{
636 int vmid;
637 unsigned int tmp;
638
639 if (amdgpu_in_reset(adev))
640 return -EIO;
641
642 for (vmid = 1; vmid < 16; vmid++) {
643
644 tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
645 if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) &&
646 (tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) {
647 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
648 RREG32(mmVM_INVALIDATE_RESPONSE);
649 break;
650 }
651 }
652
653 return 0;
654
655}
656
657/*
658 * GART
659 * VMID 0 is the physical GPU addresses as used by the kernel.
660 * VMIDs 1-15 are used for userspace clients and are handled
661 * by the amdgpu vm/hsa code.
662 */
663
664/**
665 * gmc_v8_0_flush_gpu_tlb - gart tlb flush callback
666 *
667 * @adev: amdgpu_device pointer
668 * @vmid: vm instance to flush
669 * @vmhub: which hub to flush
670 * @flush_type: type of flush
671 *
672 * Flush the TLB for the requested page table (VI).
673 */
674static void gmc_v8_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
675 uint32_t vmhub, uint32_t flush_type)
676{
677 /* bits 0-15 are the VM contexts0-15 */
678 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
679}
680
681static uint64_t gmc_v8_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
682 unsigned vmid, uint64_t pd_addr)
683{
684 uint32_t reg;
685
686 if (vmid < 8)
687 reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid;
688 else
689 reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8;
690 amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12);
691
692 /* bits 0-15 are the VM contexts0-15 */
693 amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid);
694
695 return pd_addr;
696}
697
698static void gmc_v8_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
699 unsigned pasid)
700{
701 amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid);
702}
703
704/*
705 * PTE format on VI:
706 * 63:40 reserved
707 * 39:12 4k physical page base address
708 * 11:7 fragment
709 * 6 write
710 * 5 read
711 * 4 exe
712 * 3 reserved
713 * 2 snooped
714 * 1 system
715 * 0 valid
716 *
717 * PDE format on VI:
718 * 63:59 block fragment size
719 * 58:40 reserved
720 * 39:1 physical base address of PTE
721 * bits 5:1 must be 0.
722 * 0 valid
723 */
724
725static void gmc_v8_0_get_vm_pde(struct amdgpu_device *adev, int level,
726 uint64_t *addr, uint64_t *flags)
727{
728 BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
729}
730
731static void gmc_v8_0_get_vm_pte(struct amdgpu_device *adev,
732 struct amdgpu_bo_va_mapping *mapping,
733 uint64_t *flags)
734{
735 *flags &= ~AMDGPU_PTE_EXECUTABLE;
736 *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
737 *flags &= ~AMDGPU_PTE_PRT;
738}
739
740/**
741 * gmc_v8_0_set_fault_enable_default - update VM fault handling
742 *
743 * @adev: amdgpu_device pointer
744 * @value: true redirects VM faults to the default page
745 */
746static void gmc_v8_0_set_fault_enable_default(struct amdgpu_device *adev,
747 bool value)
748{
749 u32 tmp;
750
751 tmp = RREG32(mmVM_CONTEXT1_CNTL);
752 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
753 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
754 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
755 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
756 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
757 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
758 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
759 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
760 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
761 READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
762 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
763 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
764 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
765 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
766 WREG32(mmVM_CONTEXT1_CNTL, tmp);
767}
768
769/**
770 * gmc_v8_0_set_prt - set PRT VM fault
771 *
772 * @adev: amdgpu_device pointer
773 * @enable: enable/disable VM fault handling for PRT
774*/
775static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable)
776{
777 u32 tmp;
778
779 if (enable && !adev->gmc.prt_warning) {
780 dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n");
781 adev->gmc.prt_warning = true;
782 }
783
784 tmp = RREG32(mmVM_PRT_CNTL);
785 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
786 CB_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable);
787 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
788 CB_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable);
789 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
790 TC_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable);
791 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
792 TC_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable);
793 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
794 L2_CACHE_STORE_INVALID_ENTRIES, enable);
795 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
796 L1_TLB_STORE_INVALID_ENTRIES, enable);
797 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
798 MASK_PDE0_FAULT, enable);
799 WREG32(mmVM_PRT_CNTL, tmp);
800
801 if (enable) {
802 uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT;
803 uint32_t high = adev->vm_manager.max_pfn -
804 (AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT);
805
806 WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
807 WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
808 WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, low);
809 WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, low);
810 WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, high);
811 WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, high);
812 WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, high);
813 WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, high);
814 } else {
815 WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, 0xfffffff);
816 WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, 0xfffffff);
817 WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, 0xfffffff);
818 WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, 0xfffffff);
819 WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, 0x0);
820 WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, 0x0);
821 WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, 0x0);
822 WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, 0x0);
823 }
824}
825
826/**
827 * gmc_v8_0_gart_enable - gart enable
828 *
829 * @adev: amdgpu_device pointer
830 *
831 * This sets up the TLBs, programs the page tables for VMID0,
832 * sets up the hw for VMIDs 1-15 which are allocated on
833 * demand, and sets up the global locations for the LDS, GDS,
834 * and GPUVM for FSA64 clients (VI).
835 * Returns 0 for success, errors for failure.
836 */
837static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
838{
839 uint64_t table_addr;
840 u32 tmp, field;
841 int i;
842
843 if (adev->gart.bo == NULL) {
844 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
845 return -EINVAL;
846 }
847 amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
848 table_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
849
850 /* Setup TLB control */
851 tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
852 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
853 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 1);
854 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
855 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 1);
856 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
857 WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
858 /* Setup L2 cache */
859 tmp = RREG32(mmVM_L2_CNTL);
860 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
861 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
862 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE, 1);
863 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
864 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
865 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
866 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
867 WREG32(mmVM_L2_CNTL, tmp);
868 tmp = RREG32(mmVM_L2_CNTL2);
869 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
870 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
871 WREG32(mmVM_L2_CNTL2, tmp);
872
873 field = adev->vm_manager.fragment_size;
874 tmp = RREG32(mmVM_L2_CNTL3);
875 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1);
876 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field);
877 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, field);
878 WREG32(mmVM_L2_CNTL3, tmp);
879 /* XXX: set to enable PTE/PDE in system memory */
880 tmp = RREG32(mmVM_L2_CNTL4);
881 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_PHYSICAL, 0);
882 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SHARED, 0);
883 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SNOOP, 0);
884 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_PHYSICAL, 0);
885 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SHARED, 0);
886 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SNOOP, 0);
887 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_PHYSICAL, 0);
888 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SHARED, 0);
889 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SNOOP, 0);
890 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_PHYSICAL, 0);
891 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SHARED, 0);
892 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP, 0);
893 WREG32(mmVM_L2_CNTL4, tmp);
894 /* setup context0 */
895 WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12);
896 WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12);
897 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, table_addr >> 12);
898 WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
899 (u32)(adev->dummy_page_addr >> 12));
900 WREG32(mmVM_CONTEXT0_CNTL2, 0);
901 tmp = RREG32(mmVM_CONTEXT0_CNTL);
902 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
903 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
904 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
905 WREG32(mmVM_CONTEXT0_CNTL, tmp);
906
907 WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR, 0);
908 WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR, 0);
909 WREG32(mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET, 0);
910
911 /* empty context1-15 */
912 /* FIXME start with 4G, once using 2 level pt switch to full
913 * vm size space
914 */
915 /* set vm size, must be a multiple of 4 */
916 WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
917 WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
918 for (i = 1; i < AMDGPU_NUM_VMID; i++) {
919 if (i < 8)
920 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
921 table_addr >> 12);
922 else
923 WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
924 table_addr >> 12);
925 }
926
927 /* enable context1-15 */
928 WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
929 (u32)(adev->dummy_page_addr >> 12));
930 WREG32(mmVM_CONTEXT1_CNTL2, 4);
931 tmp = RREG32(mmVM_CONTEXT1_CNTL);
932 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
933 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
934 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
935 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
936 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
937 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
938 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
939 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
940 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
941 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
942 adev->vm_manager.block_size - 9);
943 WREG32(mmVM_CONTEXT1_CNTL, tmp);
944 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
945 gmc_v8_0_set_fault_enable_default(adev, false);
946 else
947 gmc_v8_0_set_fault_enable_default(adev, true);
948
949 gmc_v8_0_flush_gpu_tlb(adev, 0, 0, 0);
950 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
951 (unsigned)(adev->gmc.gart_size >> 20),
952 (unsigned long long)table_addr);
953 return 0;
954}
955
956static int gmc_v8_0_gart_init(struct amdgpu_device *adev)
957{
958 int r;
959
960 if (adev->gart.bo) {
961 WARN(1, "R600 PCIE GART already initialized\n");
962 return 0;
963 }
964 /* Initialize common gart structure */
965 r = amdgpu_gart_init(adev);
966 if (r)
967 return r;
968 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
969 adev->gart.gart_pte_flags = AMDGPU_PTE_EXECUTABLE;
970 return amdgpu_gart_table_vram_alloc(adev);
971}
972
973/**
974 * gmc_v8_0_gart_disable - gart disable
975 *
976 * @adev: amdgpu_device pointer
977 *
978 * This disables all VM page table (VI).
979 */
980static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
981{
982 u32 tmp;
983
984 /* Disable all tables */
985 WREG32(mmVM_CONTEXT0_CNTL, 0);
986 WREG32(mmVM_CONTEXT1_CNTL, 0);
987 /* Setup TLB control */
988 tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
989 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
990 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 0);
991 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0);
992 WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
993 /* Setup L2 cache */
994 tmp = RREG32(mmVM_L2_CNTL);
995 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
996 WREG32(mmVM_L2_CNTL, tmp);
997 WREG32(mmVM_L2_CNTL2, 0);
998}
999
1000/**
1001 * gmc_v8_0_vm_decode_fault - print human readable fault info
1002 *
1003 * @adev: amdgpu_device pointer
1004 * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
1005 * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
1006 * @mc_client: VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT register value
1007 * @pasid: debug logging only - no functional use
1008 *
1009 * Print human readable fault information (VI).
1010 */
1011static void gmc_v8_0_vm_decode_fault(struct amdgpu_device *adev, u32 status,
1012 u32 addr, u32 mc_client, unsigned pasid)
1013{
1014 u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
1015 u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1016 PROTECTIONS);
1017 char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
1018 (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
1019 u32 mc_id;
1020
1021 mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1022 MEMORY_CLIENT_ID);
1023
1024 dev_err(adev->dev, "VM fault (0x%02x, vmid %d, pasid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
1025 protections, vmid, pasid, addr,
1026 REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1027 MEMORY_CLIENT_RW) ?
1028 "write" : "read", block, mc_client, mc_id);
1029}
1030
1031static int gmc_v8_0_convert_vram_type(int mc_seq_vram_type)
1032{
1033 switch (mc_seq_vram_type) {
1034 case MC_SEQ_MISC0__MT__GDDR1:
1035 return AMDGPU_VRAM_TYPE_GDDR1;
1036 case MC_SEQ_MISC0__MT__DDR2:
1037 return AMDGPU_VRAM_TYPE_DDR2;
1038 case MC_SEQ_MISC0__MT__GDDR3:
1039 return AMDGPU_VRAM_TYPE_GDDR3;
1040 case MC_SEQ_MISC0__MT__GDDR4:
1041 return AMDGPU_VRAM_TYPE_GDDR4;
1042 case MC_SEQ_MISC0__MT__GDDR5:
1043 return AMDGPU_VRAM_TYPE_GDDR5;
1044 case MC_SEQ_MISC0__MT__HBM:
1045 return AMDGPU_VRAM_TYPE_HBM;
1046 case MC_SEQ_MISC0__MT__DDR3:
1047 return AMDGPU_VRAM_TYPE_DDR3;
1048 default:
1049 return AMDGPU_VRAM_TYPE_UNKNOWN;
1050 }
1051}
1052
1053static int gmc_v8_0_early_init(void *handle)
1054{
1055 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1056
1057 gmc_v8_0_set_gmc_funcs(adev);
1058 gmc_v8_0_set_irq_funcs(adev);
1059
1060 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
1061 adev->gmc.shared_aperture_end =
1062 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
1063 adev->gmc.private_aperture_start =
1064 adev->gmc.shared_aperture_end + 1;
1065 adev->gmc.private_aperture_end =
1066 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
1067
1068 return 0;
1069}
1070
1071static int gmc_v8_0_late_init(void *handle)
1072{
1073 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1074
1075 if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
1076 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
1077 else
1078 return 0;
1079}
1080
1081static unsigned gmc_v8_0_get_vbios_fb_size(struct amdgpu_device *adev)
1082{
1083 u32 d1vga_control = RREG32(mmD1VGA_CONTROL);
1084 unsigned size;
1085
1086 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
1087 size = AMDGPU_VBIOS_VGA_ALLOCATION;
1088 } else {
1089 u32 viewport = RREG32(mmVIEWPORT_SIZE);
1090 size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
1091 REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
1092 4);
1093 }
1094
1095 return size;
1096}
1097
1098#define mmMC_SEQ_MISC0_FIJI 0xA71
1099
1100static int gmc_v8_0_sw_init(void *handle)
1101{
1102 int r;
1103 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1104
1105 adev->num_vmhubs = 1;
1106
1107 if (adev->flags & AMD_IS_APU) {
1108 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
1109 } else {
1110 u32 tmp;
1111
1112 if ((adev->asic_type == CHIP_FIJI) ||
1113 (adev->asic_type == CHIP_VEGAM))
1114 tmp = RREG32(mmMC_SEQ_MISC0_FIJI);
1115 else
1116 tmp = RREG32(mmMC_SEQ_MISC0);
1117 tmp &= MC_SEQ_MISC0__MT__MASK;
1118 adev->gmc.vram_type = gmc_v8_0_convert_vram_type(tmp);
1119 }
1120
1121 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault);
1122 if (r)
1123 return r;
1124
1125 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault);
1126 if (r)
1127 return r;
1128
1129 /* Adjust VM size here.
1130 * Currently set to 4GB ((1 << 20) 4k pages).
1131 * Max GPUVM size for cayman and SI is 40 bits.
1132 */
1133 amdgpu_vm_adjust_size(adev, 64, 9, 1, 40);
1134
1135 /* Set the internal MC address mask
1136 * This is the max address of the GPU's
1137 * internal address space.
1138 */
1139 adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1140
1141 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40));
1142 if (r) {
1143 pr_warn("No suitable DMA available\n");
1144 return r;
1145 }
1146 adev->need_swiotlb = drm_need_swiotlb(40);
1147
1148 r = gmc_v8_0_init_microcode(adev);
1149 if (r) {
1150 DRM_ERROR("Failed to load mc firmware!\n");
1151 return r;
1152 }
1153
1154 r = gmc_v8_0_mc_init(adev);
1155 if (r)
1156 return r;
1157
1158 amdgpu_gmc_get_vbios_allocations(adev);
1159
1160 /* Memory manager */
1161 r = amdgpu_bo_init(adev);
1162 if (r)
1163 return r;
1164
1165 r = gmc_v8_0_gart_init(adev);
1166 if (r)
1167 return r;
1168
1169 /*
1170 * number of VMs
1171 * VMID 0 is reserved for System
1172 * amdgpu graphics/compute will use VMIDs 1-7
1173 * amdkfd will use VMIDs 8-15
1174 */
1175 adev->vm_manager.first_kfd_vmid = 8;
1176 amdgpu_vm_manager_init(adev);
1177
1178 /* base offset of vram pages */
1179 if (adev->flags & AMD_IS_APU) {
1180 u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
1181
1182 tmp <<= 22;
1183 adev->vm_manager.vram_base_offset = tmp;
1184 } else {
1185 adev->vm_manager.vram_base_offset = 0;
1186 }
1187
1188 adev->gmc.vm_fault_info = kmalloc(sizeof(struct kfd_vm_fault_info),
1189 GFP_KERNEL);
1190 if (!adev->gmc.vm_fault_info)
1191 return -ENOMEM;
1192 atomic_set(&adev->gmc.vm_fault_info_updated, 0);
1193
1194 return 0;
1195}
1196
1197static int gmc_v8_0_sw_fini(void *handle)
1198{
1199 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1200
1201 amdgpu_gem_force_release(adev);
1202 amdgpu_vm_manager_fini(adev);
1203 kfree(adev->gmc.vm_fault_info);
1204 amdgpu_gart_table_vram_free(adev);
1205 amdgpu_bo_fini(adev);
1206 release_firmware(adev->gmc.fw);
1207 adev->gmc.fw = NULL;
1208
1209 return 0;
1210}
1211
1212static int gmc_v8_0_hw_init(void *handle)
1213{
1214 int r;
1215 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1216
1217 gmc_v8_0_init_golden_registers(adev);
1218
1219 gmc_v8_0_mc_program(adev);
1220
1221 if (adev->asic_type == CHIP_TONGA) {
1222 r = gmc_v8_0_tonga_mc_load_microcode(adev);
1223 if (r) {
1224 DRM_ERROR("Failed to load MC firmware!\n");
1225 return r;
1226 }
1227 } else if (adev->asic_type == CHIP_POLARIS11 ||
1228 adev->asic_type == CHIP_POLARIS10 ||
1229 adev->asic_type == CHIP_POLARIS12) {
1230 r = gmc_v8_0_polaris_mc_load_microcode(adev);
1231 if (r) {
1232 DRM_ERROR("Failed to load MC firmware!\n");
1233 return r;
1234 }
1235 }
1236
1237 r = gmc_v8_0_gart_enable(adev);
1238 if (r)
1239 return r;
1240
1241 if (amdgpu_emu_mode == 1)
1242 return amdgpu_gmc_vram_checking(adev);
1243 else
1244 return r;
1245}
1246
1247static int gmc_v8_0_hw_fini(void *handle)
1248{
1249 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1250
1251 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1252 gmc_v8_0_gart_disable(adev);
1253
1254 return 0;
1255}
1256
1257static int gmc_v8_0_suspend(void *handle)
1258{
1259 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1260
1261 gmc_v8_0_hw_fini(adev);
1262
1263 return 0;
1264}
1265
1266static int gmc_v8_0_resume(void *handle)
1267{
1268 int r;
1269 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1270
1271 r = gmc_v8_0_hw_init(adev);
1272 if (r)
1273 return r;
1274
1275 amdgpu_vmid_reset_all(adev);
1276
1277 return 0;
1278}
1279
1280static bool gmc_v8_0_is_idle(void *handle)
1281{
1282 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1283 u32 tmp = RREG32(mmSRBM_STATUS);
1284
1285 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1286 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
1287 return false;
1288
1289 return true;
1290}
1291
1292static int gmc_v8_0_wait_for_idle(void *handle)
1293{
1294 unsigned i;
1295 u32 tmp;
1296 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1297
1298 for (i = 0; i < adev->usec_timeout; i++) {
1299 /* read MC_STATUS */
1300 tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
1301 SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1302 SRBM_STATUS__MCC_BUSY_MASK |
1303 SRBM_STATUS__MCD_BUSY_MASK |
1304 SRBM_STATUS__VMC_BUSY_MASK |
1305 SRBM_STATUS__VMC1_BUSY_MASK);
1306 if (!tmp)
1307 return 0;
1308 udelay(1);
1309 }
1310 return -ETIMEDOUT;
1311
1312}
1313
1314static bool gmc_v8_0_check_soft_reset(void *handle)
1315{
1316 u32 srbm_soft_reset = 0;
1317 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1318 u32 tmp = RREG32(mmSRBM_STATUS);
1319
1320 if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
1321 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1322 SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
1323
1324 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1325 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
1326 if (!(adev->flags & AMD_IS_APU))
1327 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1328 SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
1329 }
1330 if (srbm_soft_reset) {
1331 adev->gmc.srbm_soft_reset = srbm_soft_reset;
1332 return true;
1333 } else {
1334 adev->gmc.srbm_soft_reset = 0;
1335 return false;
1336 }
1337}
1338
1339static int gmc_v8_0_pre_soft_reset(void *handle)
1340{
1341 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1342
1343 if (!adev->gmc.srbm_soft_reset)
1344 return 0;
1345
1346 gmc_v8_0_mc_stop(adev);
1347 if (gmc_v8_0_wait_for_idle(adev)) {
1348 dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
1349 }
1350
1351 return 0;
1352}
1353
1354static int gmc_v8_0_soft_reset(void *handle)
1355{
1356 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1357 u32 srbm_soft_reset;
1358
1359 if (!adev->gmc.srbm_soft_reset)
1360 return 0;
1361 srbm_soft_reset = adev->gmc.srbm_soft_reset;
1362
1363 if (srbm_soft_reset) {
1364 u32 tmp;
1365
1366 tmp = RREG32(mmSRBM_SOFT_RESET);
1367 tmp |= srbm_soft_reset;
1368 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1369 WREG32(mmSRBM_SOFT_RESET, tmp);
1370 tmp = RREG32(mmSRBM_SOFT_RESET);
1371
1372 udelay(50);
1373
1374 tmp &= ~srbm_soft_reset;
1375 WREG32(mmSRBM_SOFT_RESET, tmp);
1376 tmp = RREG32(mmSRBM_SOFT_RESET);
1377
1378 /* Wait a little for things to settle down */
1379 udelay(50);
1380 }
1381
1382 return 0;
1383}
1384
1385static int gmc_v8_0_post_soft_reset(void *handle)
1386{
1387 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1388
1389 if (!adev->gmc.srbm_soft_reset)
1390 return 0;
1391
1392 gmc_v8_0_mc_resume(adev);
1393 return 0;
1394}
1395
1396static int gmc_v8_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
1397 struct amdgpu_irq_src *src,
1398 unsigned type,
1399 enum amdgpu_interrupt_state state)
1400{
1401 u32 tmp;
1402 u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1403 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1404 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1405 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1406 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1407 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1408 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
1409
1410 switch (state) {
1411 case AMDGPU_IRQ_STATE_DISABLE:
1412 /* system context */
1413 tmp = RREG32(mmVM_CONTEXT0_CNTL);
1414 tmp &= ~bits;
1415 WREG32(mmVM_CONTEXT0_CNTL, tmp);
1416 /* VMs */
1417 tmp = RREG32(mmVM_CONTEXT1_CNTL);
1418 tmp &= ~bits;
1419 WREG32(mmVM_CONTEXT1_CNTL, tmp);
1420 break;
1421 case AMDGPU_IRQ_STATE_ENABLE:
1422 /* system context */
1423 tmp = RREG32(mmVM_CONTEXT0_CNTL);
1424 tmp |= bits;
1425 WREG32(mmVM_CONTEXT0_CNTL, tmp);
1426 /* VMs */
1427 tmp = RREG32(mmVM_CONTEXT1_CNTL);
1428 tmp |= bits;
1429 WREG32(mmVM_CONTEXT1_CNTL, tmp);
1430 break;
1431 default:
1432 break;
1433 }
1434
1435 return 0;
1436}
1437
1438static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
1439 struct amdgpu_irq_src *source,
1440 struct amdgpu_iv_entry *entry)
1441{
1442 u32 addr, status, mc_client, vmid;
1443
1444 if (amdgpu_sriov_vf(adev)) {
1445 dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1446 entry->src_id, entry->src_data[0]);
1447 dev_err(adev->dev, " Can't decode VM fault info here on SRIOV VF\n");
1448 return 0;
1449 }
1450
1451 addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
1452 status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
1453 mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
1454 /* reset addr and status */
1455 WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1456
1457 if (!addr && !status)
1458 return 0;
1459
1460 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
1461 gmc_v8_0_set_fault_enable_default(adev, false);
1462
1463 if (printk_ratelimit()) {
1464 struct amdgpu_task_info task_info;
1465
1466 memset(&task_info, 0, sizeof(struct amdgpu_task_info));
1467 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
1468
1469 dev_err(adev->dev, "GPU fault detected: %d 0x%08x for process %s pid %d thread %s pid %d\n",
1470 entry->src_id, entry->src_data[0], task_info.process_name,
1471 task_info.tgid, task_info.task_name, task_info.pid);
1472 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
1473 addr);
1474 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1475 status);
1476 gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client,
1477 entry->pasid);
1478 }
1479
1480 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1481 VMID);
1482 if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid)
1483 && !atomic_read(&adev->gmc.vm_fault_info_updated)) {
1484 struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info;
1485 u32 protections = REG_GET_FIELD(status,
1486 VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1487 PROTECTIONS);
1488
1489 info->vmid = vmid;
1490 info->mc_id = REG_GET_FIELD(status,
1491 VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1492 MEMORY_CLIENT_ID);
1493 info->status = status;
1494 info->page_addr = addr;
1495 info->prot_valid = protections & 0x7 ? true : false;
1496 info->prot_read = protections & 0x8 ? true : false;
1497 info->prot_write = protections & 0x10 ? true : false;
1498 info->prot_exec = protections & 0x20 ? true : false;
1499 mb();
1500 atomic_set(&adev->gmc.vm_fault_info_updated, 1);
1501 }
1502
1503 return 0;
1504}
1505
1506static void fiji_update_mc_medium_grain_clock_gating(struct amdgpu_device *adev,
1507 bool enable)
1508{
1509 uint32_t data;
1510
1511 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
1512 data = RREG32(mmMC_HUB_MISC_HUB_CG);
1513 data |= MC_HUB_MISC_HUB_CG__ENABLE_MASK;
1514 WREG32(mmMC_HUB_MISC_HUB_CG, data);
1515
1516 data = RREG32(mmMC_HUB_MISC_SIP_CG);
1517 data |= MC_HUB_MISC_SIP_CG__ENABLE_MASK;
1518 WREG32(mmMC_HUB_MISC_SIP_CG, data);
1519
1520 data = RREG32(mmMC_HUB_MISC_VM_CG);
1521 data |= MC_HUB_MISC_VM_CG__ENABLE_MASK;
1522 WREG32(mmMC_HUB_MISC_VM_CG, data);
1523
1524 data = RREG32(mmMC_XPB_CLK_GAT);
1525 data |= MC_XPB_CLK_GAT__ENABLE_MASK;
1526 WREG32(mmMC_XPB_CLK_GAT, data);
1527
1528 data = RREG32(mmATC_MISC_CG);
1529 data |= ATC_MISC_CG__ENABLE_MASK;
1530 WREG32(mmATC_MISC_CG, data);
1531
1532 data = RREG32(mmMC_CITF_MISC_WR_CG);
1533 data |= MC_CITF_MISC_WR_CG__ENABLE_MASK;
1534 WREG32(mmMC_CITF_MISC_WR_CG, data);
1535
1536 data = RREG32(mmMC_CITF_MISC_RD_CG);
1537 data |= MC_CITF_MISC_RD_CG__ENABLE_MASK;
1538 WREG32(mmMC_CITF_MISC_RD_CG, data);
1539
1540 data = RREG32(mmMC_CITF_MISC_VM_CG);
1541 data |= MC_CITF_MISC_VM_CG__ENABLE_MASK;
1542 WREG32(mmMC_CITF_MISC_VM_CG, data);
1543
1544 data = RREG32(mmVM_L2_CG);
1545 data |= VM_L2_CG__ENABLE_MASK;
1546 WREG32(mmVM_L2_CG, data);
1547 } else {
1548 data = RREG32(mmMC_HUB_MISC_HUB_CG);
1549 data &= ~MC_HUB_MISC_HUB_CG__ENABLE_MASK;
1550 WREG32(mmMC_HUB_MISC_HUB_CG, data);
1551
1552 data = RREG32(mmMC_HUB_MISC_SIP_CG);
1553 data &= ~MC_HUB_MISC_SIP_CG__ENABLE_MASK;
1554 WREG32(mmMC_HUB_MISC_SIP_CG, data);
1555
1556 data = RREG32(mmMC_HUB_MISC_VM_CG);
1557 data &= ~MC_HUB_MISC_VM_CG__ENABLE_MASK;
1558 WREG32(mmMC_HUB_MISC_VM_CG, data);
1559
1560 data = RREG32(mmMC_XPB_CLK_GAT);
1561 data &= ~MC_XPB_CLK_GAT__ENABLE_MASK;
1562 WREG32(mmMC_XPB_CLK_GAT, data);
1563
1564 data = RREG32(mmATC_MISC_CG);
1565 data &= ~ATC_MISC_CG__ENABLE_MASK;
1566 WREG32(mmATC_MISC_CG, data);
1567
1568 data = RREG32(mmMC_CITF_MISC_WR_CG);
1569 data &= ~MC_CITF_MISC_WR_CG__ENABLE_MASK;
1570 WREG32(mmMC_CITF_MISC_WR_CG, data);
1571
1572 data = RREG32(mmMC_CITF_MISC_RD_CG);
1573 data &= ~MC_CITF_MISC_RD_CG__ENABLE_MASK;
1574 WREG32(mmMC_CITF_MISC_RD_CG, data);
1575
1576 data = RREG32(mmMC_CITF_MISC_VM_CG);
1577 data &= ~MC_CITF_MISC_VM_CG__ENABLE_MASK;
1578 WREG32(mmMC_CITF_MISC_VM_CG, data);
1579
1580 data = RREG32(mmVM_L2_CG);
1581 data &= ~VM_L2_CG__ENABLE_MASK;
1582 WREG32(mmVM_L2_CG, data);
1583 }
1584}
1585
1586static void fiji_update_mc_light_sleep(struct amdgpu_device *adev,
1587 bool enable)
1588{
1589 uint32_t data;
1590
1591 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) {
1592 data = RREG32(mmMC_HUB_MISC_HUB_CG);
1593 data |= MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK;
1594 WREG32(mmMC_HUB_MISC_HUB_CG, data);
1595
1596 data = RREG32(mmMC_HUB_MISC_SIP_CG);
1597 data |= MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK;
1598 WREG32(mmMC_HUB_MISC_SIP_CG, data);
1599
1600 data = RREG32(mmMC_HUB_MISC_VM_CG);
1601 data |= MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1602 WREG32(mmMC_HUB_MISC_VM_CG, data);
1603
1604 data = RREG32(mmMC_XPB_CLK_GAT);
1605 data |= MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK;
1606 WREG32(mmMC_XPB_CLK_GAT, data);
1607
1608 data = RREG32(mmATC_MISC_CG);
1609 data |= ATC_MISC_CG__MEM_LS_ENABLE_MASK;
1610 WREG32(mmATC_MISC_CG, data);
1611
1612 data = RREG32(mmMC_CITF_MISC_WR_CG);
1613 data |= MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK;
1614 WREG32(mmMC_CITF_MISC_WR_CG, data);
1615
1616 data = RREG32(mmMC_CITF_MISC_RD_CG);
1617 data |= MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK;
1618 WREG32(mmMC_CITF_MISC_RD_CG, data);
1619
1620 data = RREG32(mmMC_CITF_MISC_VM_CG);
1621 data |= MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1622 WREG32(mmMC_CITF_MISC_VM_CG, data);
1623
1624 data = RREG32(mmVM_L2_CG);
1625 data |= VM_L2_CG__MEM_LS_ENABLE_MASK;
1626 WREG32(mmVM_L2_CG, data);
1627 } else {
1628 data = RREG32(mmMC_HUB_MISC_HUB_CG);
1629 data &= ~MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK;
1630 WREG32(mmMC_HUB_MISC_HUB_CG, data);
1631
1632 data = RREG32(mmMC_HUB_MISC_SIP_CG);
1633 data &= ~MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK;
1634 WREG32(mmMC_HUB_MISC_SIP_CG, data);
1635
1636 data = RREG32(mmMC_HUB_MISC_VM_CG);
1637 data &= ~MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1638 WREG32(mmMC_HUB_MISC_VM_CG, data);
1639
1640 data = RREG32(mmMC_XPB_CLK_GAT);
1641 data &= ~MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK;
1642 WREG32(mmMC_XPB_CLK_GAT, data);
1643
1644 data = RREG32(mmATC_MISC_CG);
1645 data &= ~ATC_MISC_CG__MEM_LS_ENABLE_MASK;
1646 WREG32(mmATC_MISC_CG, data);
1647
1648 data = RREG32(mmMC_CITF_MISC_WR_CG);
1649 data &= ~MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK;
1650 WREG32(mmMC_CITF_MISC_WR_CG, data);
1651
1652 data = RREG32(mmMC_CITF_MISC_RD_CG);
1653 data &= ~MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK;
1654 WREG32(mmMC_CITF_MISC_RD_CG, data);
1655
1656 data = RREG32(mmMC_CITF_MISC_VM_CG);
1657 data &= ~MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1658 WREG32(mmMC_CITF_MISC_VM_CG, data);
1659
1660 data = RREG32(mmVM_L2_CG);
1661 data &= ~VM_L2_CG__MEM_LS_ENABLE_MASK;
1662 WREG32(mmVM_L2_CG, data);
1663 }
1664}
1665
1666static int gmc_v8_0_set_clockgating_state(void *handle,
1667 enum amd_clockgating_state state)
1668{
1669 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1670
1671 if (amdgpu_sriov_vf(adev))
1672 return 0;
1673
1674 switch (adev->asic_type) {
1675 case CHIP_FIJI:
1676 fiji_update_mc_medium_grain_clock_gating(adev,
1677 state == AMD_CG_STATE_GATE);
1678 fiji_update_mc_light_sleep(adev,
1679 state == AMD_CG_STATE_GATE);
1680 break;
1681 default:
1682 break;
1683 }
1684 return 0;
1685}
1686
1687static int gmc_v8_0_set_powergating_state(void *handle,
1688 enum amd_powergating_state state)
1689{
1690 return 0;
1691}
1692
1693static void gmc_v8_0_get_clockgating_state(void *handle, u64 *flags)
1694{
1695 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1696 int data;
1697
1698 if (amdgpu_sriov_vf(adev))
1699 *flags = 0;
1700
1701 /* AMD_CG_SUPPORT_MC_MGCG */
1702 data = RREG32(mmMC_HUB_MISC_HUB_CG);
1703 if (data & MC_HUB_MISC_HUB_CG__ENABLE_MASK)
1704 *flags |= AMD_CG_SUPPORT_MC_MGCG;
1705
1706 /* AMD_CG_SUPPORT_MC_LS */
1707 if (data & MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK)
1708 *flags |= AMD_CG_SUPPORT_MC_LS;
1709}
1710
1711static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
1712 .name = "gmc_v8_0",
1713 .early_init = gmc_v8_0_early_init,
1714 .late_init = gmc_v8_0_late_init,
1715 .sw_init = gmc_v8_0_sw_init,
1716 .sw_fini = gmc_v8_0_sw_fini,
1717 .hw_init = gmc_v8_0_hw_init,
1718 .hw_fini = gmc_v8_0_hw_fini,
1719 .suspend = gmc_v8_0_suspend,
1720 .resume = gmc_v8_0_resume,
1721 .is_idle = gmc_v8_0_is_idle,
1722 .wait_for_idle = gmc_v8_0_wait_for_idle,
1723 .check_soft_reset = gmc_v8_0_check_soft_reset,
1724 .pre_soft_reset = gmc_v8_0_pre_soft_reset,
1725 .soft_reset = gmc_v8_0_soft_reset,
1726 .post_soft_reset = gmc_v8_0_post_soft_reset,
1727 .set_clockgating_state = gmc_v8_0_set_clockgating_state,
1728 .set_powergating_state = gmc_v8_0_set_powergating_state,
1729 .get_clockgating_state = gmc_v8_0_get_clockgating_state,
1730};
1731
1732static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = {
1733 .flush_gpu_tlb = gmc_v8_0_flush_gpu_tlb,
1734 .flush_gpu_tlb_pasid = gmc_v8_0_flush_gpu_tlb_pasid,
1735 .emit_flush_gpu_tlb = gmc_v8_0_emit_flush_gpu_tlb,
1736 .emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping,
1737 .set_prt = gmc_v8_0_set_prt,
1738 .get_vm_pde = gmc_v8_0_get_vm_pde,
1739 .get_vm_pte = gmc_v8_0_get_vm_pte,
1740 .get_vbios_fb_size = gmc_v8_0_get_vbios_fb_size,
1741};
1742
1743static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
1744 .set = gmc_v8_0_vm_fault_interrupt_state,
1745 .process = gmc_v8_0_process_interrupt,
1746};
1747
1748static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev)
1749{
1750 adev->gmc.gmc_funcs = &gmc_v8_0_gmc_funcs;
1751}
1752
1753static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev)
1754{
1755 adev->gmc.vm_fault.num_types = 1;
1756 adev->gmc.vm_fault.funcs = &gmc_v8_0_irq_funcs;
1757}
1758
1759const struct amdgpu_ip_block_version gmc_v8_0_ip_block =
1760{
1761 .type = AMD_IP_BLOCK_TYPE_GMC,
1762 .major = 8,
1763 .minor = 0,
1764 .rev = 0,
1765 .funcs = &gmc_v8_0_ip_funcs,
1766};
1767
1768const struct amdgpu_ip_block_version gmc_v8_1_ip_block =
1769{
1770 .type = AMD_IP_BLOCK_TYPE_GMC,
1771 .major = 8,
1772 .minor = 1,
1773 .rev = 0,
1774 .funcs = &gmc_v8_0_ip_funcs,
1775};
1776
1777const struct amdgpu_ip_block_version gmc_v8_5_ip_block =
1778{
1779 .type = AMD_IP_BLOCK_TYPE_GMC,
1780 .major = 8,
1781 .minor = 5,
1782 .rev = 0,
1783 .funcs = &gmc_v8_0_ip_funcs,
1784};
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/firmware.h>
24#include "drmP.h"
25#include "amdgpu.h"
26#include "gmc_v8_0.h"
27#include "amdgpu_ucode.h"
28
29#include "gmc/gmc_8_1_d.h"
30#include "gmc/gmc_8_1_sh_mask.h"
31
32#include "bif/bif_5_0_d.h"
33#include "bif/bif_5_0_sh_mask.h"
34
35#include "oss/oss_3_0_d.h"
36#include "oss/oss_3_0_sh_mask.h"
37
38#include "vid.h"
39#include "vi.h"
40
41
42static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev);
43static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
44static int gmc_v8_0_wait_for_idle(void *handle);
45
46MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
47MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
48MODULE_FIRMWARE("amdgpu/polaris10_mc.bin");
49MODULE_FIRMWARE("amdgpu/polaris12_mc.bin");
50
51static const u32 golden_settings_tonga_a11[] =
52{
53 mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
54 mmMC_HUB_RDREQ_DMIF_LIMIT, 0x0000007f, 0x00000028,
55 mmMC_HUB_WDP_UMC, 0x00007fb6, 0x00000991,
56 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
57 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
58 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
59 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
60};
61
62static const u32 tonga_mgcg_cgcg_init[] =
63{
64 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
65};
66
67static const u32 golden_settings_fiji_a10[] =
68{
69 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
70 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
71 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
72 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
73};
74
75static const u32 fiji_mgcg_cgcg_init[] =
76{
77 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
78};
79
80static const u32 golden_settings_polaris11_a11[] =
81{
82 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
83 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
84 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
85 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
86};
87
88static const u32 golden_settings_polaris10_a11[] =
89{
90 mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
91 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
92 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
93 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
94 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
95};
96
97static const u32 cz_mgcg_cgcg_init[] =
98{
99 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
100};
101
102static const u32 stoney_mgcg_cgcg_init[] =
103{
104 mmATC_MISC_CG, 0xffffffff, 0x000c0200,
105 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
106};
107
108static const u32 golden_settings_stoney_common[] =
109{
110 mmMC_HUB_RDREQ_UVD, MC_HUB_RDREQ_UVD__PRESCALE_MASK, 0x00000004,
111 mmMC_RD_GRP_OTH, MC_RD_GRP_OTH__UVD_MASK, 0x00600000
112};
113
114static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
115{
116 switch (adev->asic_type) {
117 case CHIP_FIJI:
118 amdgpu_program_register_sequence(adev,
119 fiji_mgcg_cgcg_init,
120 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
121 amdgpu_program_register_sequence(adev,
122 golden_settings_fiji_a10,
123 (const u32)ARRAY_SIZE(golden_settings_fiji_a10));
124 break;
125 case CHIP_TONGA:
126 amdgpu_program_register_sequence(adev,
127 tonga_mgcg_cgcg_init,
128 (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init));
129 amdgpu_program_register_sequence(adev,
130 golden_settings_tonga_a11,
131 (const u32)ARRAY_SIZE(golden_settings_tonga_a11));
132 break;
133 case CHIP_POLARIS11:
134 case CHIP_POLARIS12:
135 amdgpu_program_register_sequence(adev,
136 golden_settings_polaris11_a11,
137 (const u32)ARRAY_SIZE(golden_settings_polaris11_a11));
138 break;
139 case CHIP_POLARIS10:
140 amdgpu_program_register_sequence(adev,
141 golden_settings_polaris10_a11,
142 (const u32)ARRAY_SIZE(golden_settings_polaris10_a11));
143 break;
144 case CHIP_CARRIZO:
145 amdgpu_program_register_sequence(adev,
146 cz_mgcg_cgcg_init,
147 (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
148 break;
149 case CHIP_STONEY:
150 amdgpu_program_register_sequence(adev,
151 stoney_mgcg_cgcg_init,
152 (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
153 amdgpu_program_register_sequence(adev,
154 golden_settings_stoney_common,
155 (const u32)ARRAY_SIZE(golden_settings_stoney_common));
156 break;
157 default:
158 break;
159 }
160}
161
162static void gmc_v8_0_mc_stop(struct amdgpu_device *adev,
163 struct amdgpu_mode_mc_save *save)
164{
165 u32 blackout;
166
167 if (adev->mode_info.num_crtc)
168 amdgpu_display_stop_mc_access(adev, save);
169
170 gmc_v8_0_wait_for_idle(adev);
171
172 blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
173 if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
174 /* Block CPU access */
175 WREG32(mmBIF_FB_EN, 0);
176 /* blackout the MC */
177 blackout = REG_SET_FIELD(blackout,
178 MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 1);
179 WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout);
180 }
181 /* wait for the MC to settle */
182 udelay(100);
183}
184
185static void gmc_v8_0_mc_resume(struct amdgpu_device *adev,
186 struct amdgpu_mode_mc_save *save)
187{
188 u32 tmp;
189
190 /* unblackout the MC */
191 tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
192 tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
193 WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
194 /* allow CPU access */
195 tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
196 tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
197 WREG32(mmBIF_FB_EN, tmp);
198
199 if (adev->mode_info.num_crtc)
200 amdgpu_display_resume_mc_access(adev, save);
201}
202
203/**
204 * gmc_v8_0_init_microcode - load ucode images from disk
205 *
206 * @adev: amdgpu_device pointer
207 *
208 * Use the firmware interface to load the ucode images into
209 * the driver (not loaded into hw).
210 * Returns 0 on success, error on failure.
211 */
212static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
213{
214 const char *chip_name;
215 char fw_name[30];
216 int err;
217
218 DRM_DEBUG("\n");
219
220 switch (adev->asic_type) {
221 case CHIP_TONGA:
222 chip_name = "tonga";
223 break;
224 case CHIP_POLARIS11:
225 chip_name = "polaris11";
226 break;
227 case CHIP_POLARIS10:
228 chip_name = "polaris10";
229 break;
230 case CHIP_POLARIS12:
231 chip_name = "polaris12";
232 break;
233 case CHIP_FIJI:
234 case CHIP_CARRIZO:
235 case CHIP_STONEY:
236 return 0;
237 default: BUG();
238 }
239
240 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
241 err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
242 if (err)
243 goto out;
244 err = amdgpu_ucode_validate(adev->mc.fw);
245
246out:
247 if (err) {
248 printk(KERN_ERR
249 "mc: Failed to load firmware \"%s\"\n",
250 fw_name);
251 release_firmware(adev->mc.fw);
252 adev->mc.fw = NULL;
253 }
254 return err;
255}
256
257/**
258 * gmc_v8_0_mc_load_microcode - load MC ucode into the hw
259 *
260 * @adev: amdgpu_device pointer
261 *
262 * Load the GDDR MC ucode into the hw (CIK).
263 * Returns 0 on success, error on failure.
264 */
265static int gmc_v8_0_mc_load_microcode(struct amdgpu_device *adev)
266{
267 const struct mc_firmware_header_v1_0 *hdr;
268 const __le32 *fw_data = NULL;
269 const __le32 *io_mc_regs = NULL;
270 u32 running;
271 int i, ucode_size, regs_size;
272
273 if (!adev->mc.fw)
274 return -EINVAL;
275
276 /* Skip MC ucode loading on SR-IOV capable boards.
277 * vbios does this for us in asic_init in that case.
278 * Skip MC ucode loading on VF, because hypervisor will do that
279 * for this adaptor.
280 */
281 if (amdgpu_sriov_bios(adev))
282 return 0;
283
284 hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data;
285 amdgpu_ucode_print_mc_hdr(&hdr->header);
286
287 adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version);
288 regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
289 io_mc_regs = (const __le32 *)
290 (adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
291 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
292 fw_data = (const __le32 *)
293 (adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
294
295 running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
296
297 if (running == 0) {
298 /* reset the engine and set to writable */
299 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
300 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
301
302 /* load mc io regs */
303 for (i = 0; i < regs_size; i++) {
304 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
305 WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
306 }
307 /* load the MC ucode */
308 for (i = 0; i < ucode_size; i++)
309 WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
310
311 /* put the engine back into the active state */
312 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
313 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
314 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
315
316 /* wait for training to complete */
317 for (i = 0; i < adev->usec_timeout; i++) {
318 if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
319 MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D0))
320 break;
321 udelay(1);
322 }
323 for (i = 0; i < adev->usec_timeout; i++) {
324 if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
325 MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D1))
326 break;
327 udelay(1);
328 }
329 }
330
331 return 0;
332}
333
334static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
335 struct amdgpu_mc *mc)
336{
337 if (mc->mc_vram_size > 0xFFC0000000ULL) {
338 /* leave room for at least 1024M GTT */
339 dev_warn(adev->dev, "limiting VRAM\n");
340 mc->real_vram_size = 0xFFC0000000ULL;
341 mc->mc_vram_size = 0xFFC0000000ULL;
342 }
343 amdgpu_vram_location(adev, &adev->mc, 0);
344 adev->mc.gtt_base_align = 0;
345 amdgpu_gtt_location(adev, mc);
346}
347
348/**
349 * gmc_v8_0_mc_program - program the GPU memory controller
350 *
351 * @adev: amdgpu_device pointer
352 *
353 * Set the location of vram, gart, and AGP in the GPU's
354 * physical address space (CIK).
355 */
356static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
357{
358 struct amdgpu_mode_mc_save save;
359 u32 tmp;
360 int i, j;
361
362 /* Initialize HDP */
363 for (i = 0, j = 0; i < 32; i++, j += 0x6) {
364 WREG32((0xb05 + j), 0x00000000);
365 WREG32((0xb06 + j), 0x00000000);
366 WREG32((0xb07 + j), 0x00000000);
367 WREG32((0xb08 + j), 0x00000000);
368 WREG32((0xb09 + j), 0x00000000);
369 }
370 WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
371
372 if (adev->mode_info.num_crtc)
373 amdgpu_display_set_vga_render_state(adev, false);
374
375 gmc_v8_0_mc_stop(adev, &save);
376 if (gmc_v8_0_wait_for_idle((void *)adev)) {
377 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
378 }
379 /* Update configuration */
380 WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
381 adev->mc.vram_start >> 12);
382 WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
383 adev->mc.vram_end >> 12);
384 WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
385 adev->vram_scratch.gpu_addr >> 12);
386 tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16;
387 tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF);
388 WREG32(mmMC_VM_FB_LOCATION, tmp);
389 /* XXX double check these! */
390 WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8));
391 WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
392 WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
393 WREG32(mmMC_VM_AGP_BASE, 0);
394 WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
395 WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
396 if (gmc_v8_0_wait_for_idle((void *)adev)) {
397 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
398 }
399 gmc_v8_0_mc_resume(adev, &save);
400
401 WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
402
403 tmp = RREG32(mmHDP_MISC_CNTL);
404 tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 0);
405 WREG32(mmHDP_MISC_CNTL, tmp);
406
407 tmp = RREG32(mmHDP_HOST_PATH_CNTL);
408 WREG32(mmHDP_HOST_PATH_CNTL, tmp);
409}
410
411/**
412 * gmc_v8_0_mc_init - initialize the memory controller driver params
413 *
414 * @adev: amdgpu_device pointer
415 *
416 * Look up the amount of vram, vram width, and decide how to place
417 * vram and gart within the GPU's physical address space (CIK).
418 * Returns 0 for success.
419 */
420static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
421{
422 u32 tmp;
423 int chansize, numchan;
424
425 /* Get VRAM informations */
426 tmp = RREG32(mmMC_ARB_RAMCFG);
427 if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
428 chansize = 64;
429 } else {
430 chansize = 32;
431 }
432 tmp = RREG32(mmMC_SHARED_CHMAP);
433 switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
434 case 0:
435 default:
436 numchan = 1;
437 break;
438 case 1:
439 numchan = 2;
440 break;
441 case 2:
442 numchan = 4;
443 break;
444 case 3:
445 numchan = 8;
446 break;
447 case 4:
448 numchan = 3;
449 break;
450 case 5:
451 numchan = 6;
452 break;
453 case 6:
454 numchan = 10;
455 break;
456 case 7:
457 numchan = 12;
458 break;
459 case 8:
460 numchan = 16;
461 break;
462 }
463 adev->mc.vram_width = numchan * chansize;
464 /* Could aper size report 0 ? */
465 adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
466 adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
467 /* size in MB on si */
468 adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
469 adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
470 adev->mc.visible_vram_size = adev->mc.aper_size;
471
472 /* In case the PCI BAR is larger than the actual amount of vram */
473 if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
474 adev->mc.visible_vram_size = adev->mc.real_vram_size;
475
476 /* unless the user had overridden it, set the gart
477 * size equal to the 1024 or vram, whichever is larger.
478 */
479 if (amdgpu_gart_size == -1)
480 adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size);
481 else
482 adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
483
484 gmc_v8_0_vram_gtt_location(adev, &adev->mc);
485
486 return 0;
487}
488
489/*
490 * GART
491 * VMID 0 is the physical GPU addresses as used by the kernel.
492 * VMIDs 1-15 are used for userspace clients and are handled
493 * by the amdgpu vm/hsa code.
494 */
495
496/**
497 * gmc_v8_0_gart_flush_gpu_tlb - gart tlb flush callback
498 *
499 * @adev: amdgpu_device pointer
500 * @vmid: vm instance to flush
501 *
502 * Flush the TLB for the requested page table (CIK).
503 */
504static void gmc_v8_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
505 uint32_t vmid)
506{
507 /* flush hdp cache */
508 WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
509
510 /* bits 0-15 are the VM contexts0-15 */
511 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
512}
513
514/**
515 * gmc_v8_0_gart_set_pte_pde - update the page tables using MMIO
516 *
517 * @adev: amdgpu_device pointer
518 * @cpu_pt_addr: cpu address of the page table
519 * @gpu_page_idx: entry in the page table to update
520 * @addr: dst addr to write into pte/pde
521 * @flags: access flags
522 *
523 * Update the page tables using the CPU.
524 */
525static int gmc_v8_0_gart_set_pte_pde(struct amdgpu_device *adev,
526 void *cpu_pt_addr,
527 uint32_t gpu_page_idx,
528 uint64_t addr,
529 uint32_t flags)
530{
531 void __iomem *ptr = (void *)cpu_pt_addr;
532 uint64_t value;
533
534 /*
535 * PTE format on VI:
536 * 63:40 reserved
537 * 39:12 4k physical page base address
538 * 11:7 fragment
539 * 6 write
540 * 5 read
541 * 4 exe
542 * 3 reserved
543 * 2 snooped
544 * 1 system
545 * 0 valid
546 *
547 * PDE format on VI:
548 * 63:59 block fragment size
549 * 58:40 reserved
550 * 39:1 physical base address of PTE
551 * bits 5:1 must be 0.
552 * 0 valid
553 */
554 value = addr & 0x000000FFFFFFF000ULL;
555 value |= flags;
556 writeq(value, ptr + (gpu_page_idx * 8));
557
558 return 0;
559}
560
561/**
562 * gmc_v8_0_set_fault_enable_default - update VM fault handling
563 *
564 * @adev: amdgpu_device pointer
565 * @value: true redirects VM faults to the default page
566 */
567static void gmc_v8_0_set_fault_enable_default(struct amdgpu_device *adev,
568 bool value)
569{
570 u32 tmp;
571
572 tmp = RREG32(mmVM_CONTEXT1_CNTL);
573 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
574 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
575 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
576 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
577 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
578 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
579 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
580 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
581 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
582 READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
583 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
584 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
585 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
586 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
587 WREG32(mmVM_CONTEXT1_CNTL, tmp);
588}
589
590/**
591 * gmc_v8_0_gart_enable - gart enable
592 *
593 * @adev: amdgpu_device pointer
594 *
595 * This sets up the TLBs, programs the page tables for VMID0,
596 * sets up the hw for VMIDs 1-15 which are allocated on
597 * demand, and sets up the global locations for the LDS, GDS,
598 * and GPUVM for FSA64 clients (CIK).
599 * Returns 0 for success, errors for failure.
600 */
601static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
602{
603 int r, i;
604 u32 tmp;
605
606 if (adev->gart.robj == NULL) {
607 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
608 return -EINVAL;
609 }
610 r = amdgpu_gart_table_vram_pin(adev);
611 if (r)
612 return r;
613 /* Setup TLB control */
614 tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
615 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
616 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 1);
617 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
618 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 1);
619 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
620 WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
621 /* Setup L2 cache */
622 tmp = RREG32(mmVM_L2_CNTL);
623 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
624 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
625 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE, 1);
626 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
627 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
628 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
629 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
630 WREG32(mmVM_L2_CNTL, tmp);
631 tmp = RREG32(mmVM_L2_CNTL2);
632 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
633 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
634 WREG32(mmVM_L2_CNTL2, tmp);
635 tmp = RREG32(mmVM_L2_CNTL3);
636 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1);
637 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 4);
638 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 4);
639 WREG32(mmVM_L2_CNTL3, tmp);
640 /* XXX: set to enable PTE/PDE in system memory */
641 tmp = RREG32(mmVM_L2_CNTL4);
642 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_PHYSICAL, 0);
643 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SHARED, 0);
644 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SNOOP, 0);
645 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_PHYSICAL, 0);
646 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SHARED, 0);
647 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SNOOP, 0);
648 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_PHYSICAL, 0);
649 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SHARED, 0);
650 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SNOOP, 0);
651 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_PHYSICAL, 0);
652 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SHARED, 0);
653 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP, 0);
654 WREG32(mmVM_L2_CNTL4, tmp);
655 /* setup context0 */
656 WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
657 WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
658 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
659 WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
660 (u32)(adev->dummy_page.addr >> 12));
661 WREG32(mmVM_CONTEXT0_CNTL2, 0);
662 tmp = RREG32(mmVM_CONTEXT0_CNTL);
663 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
664 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
665 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
666 WREG32(mmVM_CONTEXT0_CNTL, tmp);
667
668 WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR, 0);
669 WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR, 0);
670 WREG32(mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET, 0);
671
672 /* empty context1-15 */
673 /* FIXME start with 4G, once using 2 level pt switch to full
674 * vm size space
675 */
676 /* set vm size, must be a multiple of 4 */
677 WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
678 WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
679 for (i = 1; i < 16; i++) {
680 if (i < 8)
681 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
682 adev->gart.table_addr >> 12);
683 else
684 WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
685 adev->gart.table_addr >> 12);
686 }
687
688 /* enable context1-15 */
689 WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
690 (u32)(adev->dummy_page.addr >> 12));
691 WREG32(mmVM_CONTEXT1_CNTL2, 4);
692 tmp = RREG32(mmVM_CONTEXT1_CNTL);
693 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
694 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
695 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
696 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
697 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
698 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
699 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
700 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
701 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
702 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
703 amdgpu_vm_block_size - 9);
704 WREG32(mmVM_CONTEXT1_CNTL, tmp);
705 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
706 gmc_v8_0_set_fault_enable_default(adev, false);
707 else
708 gmc_v8_0_set_fault_enable_default(adev, true);
709
710 gmc_v8_0_gart_flush_gpu_tlb(adev, 0);
711 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
712 (unsigned)(adev->mc.gtt_size >> 20),
713 (unsigned long long)adev->gart.table_addr);
714 adev->gart.ready = true;
715 return 0;
716}
717
718static int gmc_v8_0_gart_init(struct amdgpu_device *adev)
719{
720 int r;
721
722 if (adev->gart.robj) {
723 WARN(1, "R600 PCIE GART already initialized\n");
724 return 0;
725 }
726 /* Initialize common gart structure */
727 r = amdgpu_gart_init(adev);
728 if (r)
729 return r;
730 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
731 return amdgpu_gart_table_vram_alloc(adev);
732}
733
734/**
735 * gmc_v8_0_gart_disable - gart disable
736 *
737 * @adev: amdgpu_device pointer
738 *
739 * This disables all VM page table (CIK).
740 */
741static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
742{
743 u32 tmp;
744
745 /* Disable all tables */
746 WREG32(mmVM_CONTEXT0_CNTL, 0);
747 WREG32(mmVM_CONTEXT1_CNTL, 0);
748 /* Setup TLB control */
749 tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
750 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
751 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 0);
752 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0);
753 WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
754 /* Setup L2 cache */
755 tmp = RREG32(mmVM_L2_CNTL);
756 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
757 WREG32(mmVM_L2_CNTL, tmp);
758 WREG32(mmVM_L2_CNTL2, 0);
759 amdgpu_gart_table_vram_unpin(adev);
760}
761
762/**
763 * gmc_v8_0_gart_fini - vm fini callback
764 *
765 * @adev: amdgpu_device pointer
766 *
767 * Tears down the driver GART/VM setup (CIK).
768 */
769static void gmc_v8_0_gart_fini(struct amdgpu_device *adev)
770{
771 amdgpu_gart_table_vram_free(adev);
772 amdgpu_gart_fini(adev);
773}
774
775/*
776 * vm
777 * VMID 0 is the physical GPU addresses as used by the kernel.
778 * VMIDs 1-15 are used for userspace clients and are handled
779 * by the amdgpu vm/hsa code.
780 */
781/**
782 * gmc_v8_0_vm_init - cik vm init callback
783 *
784 * @adev: amdgpu_device pointer
785 *
786 * Inits cik specific vm parameters (number of VMs, base of vram for
787 * VMIDs 1-15) (CIK).
788 * Returns 0 for success.
789 */
790static int gmc_v8_0_vm_init(struct amdgpu_device *adev)
791{
792 /*
793 * number of VMs
794 * VMID 0 is reserved for System
795 * amdgpu graphics/compute will use VMIDs 1-7
796 * amdkfd will use VMIDs 8-15
797 */
798 adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS;
799 amdgpu_vm_manager_init(adev);
800
801 /* base offset of vram pages */
802 if (adev->flags & AMD_IS_APU) {
803 u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
804 tmp <<= 22;
805 adev->vm_manager.vram_base_offset = tmp;
806 } else
807 adev->vm_manager.vram_base_offset = 0;
808
809 return 0;
810}
811
812/**
813 * gmc_v8_0_vm_fini - cik vm fini callback
814 *
815 * @adev: amdgpu_device pointer
816 *
817 * Tear down any asic specific VM setup (CIK).
818 */
819static void gmc_v8_0_vm_fini(struct amdgpu_device *adev)
820{
821}
822
823/**
824 * gmc_v8_0_vm_decode_fault - print human readable fault info
825 *
826 * @adev: amdgpu_device pointer
827 * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
828 * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
829 *
830 * Print human readable fault information (CIK).
831 */
832static void gmc_v8_0_vm_decode_fault(struct amdgpu_device *adev,
833 u32 status, u32 addr, u32 mc_client)
834{
835 u32 mc_id;
836 u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
837 u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
838 PROTECTIONS);
839 char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
840 (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
841
842 mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
843 MEMORY_CLIENT_ID);
844
845 dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
846 protections, vmid, addr,
847 REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
848 MEMORY_CLIENT_RW) ?
849 "write" : "read", block, mc_client, mc_id);
850}
851
852static int gmc_v8_0_convert_vram_type(int mc_seq_vram_type)
853{
854 switch (mc_seq_vram_type) {
855 case MC_SEQ_MISC0__MT__GDDR1:
856 return AMDGPU_VRAM_TYPE_GDDR1;
857 case MC_SEQ_MISC0__MT__DDR2:
858 return AMDGPU_VRAM_TYPE_DDR2;
859 case MC_SEQ_MISC0__MT__GDDR3:
860 return AMDGPU_VRAM_TYPE_GDDR3;
861 case MC_SEQ_MISC0__MT__GDDR4:
862 return AMDGPU_VRAM_TYPE_GDDR4;
863 case MC_SEQ_MISC0__MT__GDDR5:
864 return AMDGPU_VRAM_TYPE_GDDR5;
865 case MC_SEQ_MISC0__MT__HBM:
866 return AMDGPU_VRAM_TYPE_HBM;
867 case MC_SEQ_MISC0__MT__DDR3:
868 return AMDGPU_VRAM_TYPE_DDR3;
869 default:
870 return AMDGPU_VRAM_TYPE_UNKNOWN;
871 }
872}
873
874static int gmc_v8_0_early_init(void *handle)
875{
876 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
877
878 gmc_v8_0_set_gart_funcs(adev);
879 gmc_v8_0_set_irq_funcs(adev);
880
881 return 0;
882}
883
884static int gmc_v8_0_late_init(void *handle)
885{
886 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
887
888 if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
889 return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
890 else
891 return 0;
892}
893
894#define mmMC_SEQ_MISC0_FIJI 0xA71
895
896static int gmc_v8_0_sw_init(void *handle)
897{
898 int r;
899 int dma_bits;
900 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
901
902 if (adev->flags & AMD_IS_APU) {
903 adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
904 } else {
905 u32 tmp;
906
907 if (adev->asic_type == CHIP_FIJI)
908 tmp = RREG32(mmMC_SEQ_MISC0_FIJI);
909 else
910 tmp = RREG32(mmMC_SEQ_MISC0);
911 tmp &= MC_SEQ_MISC0__MT__MASK;
912 adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp);
913 }
914
915 r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
916 if (r)
917 return r;
918
919 r = amdgpu_irq_add_id(adev, 147, &adev->mc.vm_fault);
920 if (r)
921 return r;
922
923 /* Adjust VM size here.
924 * Currently set to 4GB ((1 << 20) 4k pages).
925 * Max GPUVM size for cayman and SI is 40 bits.
926 */
927 adev->vm_manager.max_pfn = amdgpu_vm_size << 18;
928
929 /* Set the internal MC address mask
930 * This is the max address of the GPU's
931 * internal address space.
932 */
933 adev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
934
935 /* set DMA mask + need_dma32 flags.
936 * PCIE - can handle 40-bits.
937 * IGP - can handle 40-bits
938 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
939 */
940 adev->need_dma32 = false;
941 dma_bits = adev->need_dma32 ? 32 : 40;
942 r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
943 if (r) {
944 adev->need_dma32 = true;
945 dma_bits = 32;
946 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
947 }
948 r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
949 if (r) {
950 pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
951 printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
952 }
953
954 r = gmc_v8_0_init_microcode(adev);
955 if (r) {
956 DRM_ERROR("Failed to load mc firmware!\n");
957 return r;
958 }
959
960 r = gmc_v8_0_mc_init(adev);
961 if (r)
962 return r;
963
964 /* Memory manager */
965 r = amdgpu_bo_init(adev);
966 if (r)
967 return r;
968
969 r = gmc_v8_0_gart_init(adev);
970 if (r)
971 return r;
972
973 if (!adev->vm_manager.enabled) {
974 r = gmc_v8_0_vm_init(adev);
975 if (r) {
976 dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
977 return r;
978 }
979 adev->vm_manager.enabled = true;
980 }
981
982 return r;
983}
984
985static int gmc_v8_0_sw_fini(void *handle)
986{
987 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
988
989 if (adev->vm_manager.enabled) {
990 amdgpu_vm_manager_fini(adev);
991 gmc_v8_0_vm_fini(adev);
992 adev->vm_manager.enabled = false;
993 }
994 gmc_v8_0_gart_fini(adev);
995 amdgpu_gem_force_release(adev);
996 amdgpu_bo_fini(adev);
997
998 return 0;
999}
1000
1001static int gmc_v8_0_hw_init(void *handle)
1002{
1003 int r;
1004 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1005
1006 gmc_v8_0_init_golden_registers(adev);
1007
1008 gmc_v8_0_mc_program(adev);
1009
1010 if (adev->asic_type == CHIP_TONGA) {
1011 r = gmc_v8_0_mc_load_microcode(adev);
1012 if (r) {
1013 DRM_ERROR("Failed to load MC firmware!\n");
1014 return r;
1015 }
1016 }
1017
1018 r = gmc_v8_0_gart_enable(adev);
1019 if (r)
1020 return r;
1021
1022 return r;
1023}
1024
1025static int gmc_v8_0_hw_fini(void *handle)
1026{
1027 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1028
1029 amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
1030 gmc_v8_0_gart_disable(adev);
1031
1032 return 0;
1033}
1034
1035static int gmc_v8_0_suspend(void *handle)
1036{
1037 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1038
1039 if (adev->vm_manager.enabled) {
1040 gmc_v8_0_vm_fini(adev);
1041 adev->vm_manager.enabled = false;
1042 }
1043 gmc_v8_0_hw_fini(adev);
1044
1045 return 0;
1046}
1047
1048static int gmc_v8_0_resume(void *handle)
1049{
1050 int r;
1051 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1052
1053 r = gmc_v8_0_hw_init(adev);
1054 if (r)
1055 return r;
1056
1057 if (!adev->vm_manager.enabled) {
1058 r = gmc_v8_0_vm_init(adev);
1059 if (r) {
1060 dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
1061 return r;
1062 }
1063 adev->vm_manager.enabled = true;
1064 }
1065
1066 return r;
1067}
1068
1069static bool gmc_v8_0_is_idle(void *handle)
1070{
1071 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1072 u32 tmp = RREG32(mmSRBM_STATUS);
1073
1074 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1075 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
1076 return false;
1077
1078 return true;
1079}
1080
1081static int gmc_v8_0_wait_for_idle(void *handle)
1082{
1083 unsigned i;
1084 u32 tmp;
1085 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1086
1087 for (i = 0; i < adev->usec_timeout; i++) {
1088 /* read MC_STATUS */
1089 tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
1090 SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1091 SRBM_STATUS__MCC_BUSY_MASK |
1092 SRBM_STATUS__MCD_BUSY_MASK |
1093 SRBM_STATUS__VMC_BUSY_MASK |
1094 SRBM_STATUS__VMC1_BUSY_MASK);
1095 if (!tmp)
1096 return 0;
1097 udelay(1);
1098 }
1099 return -ETIMEDOUT;
1100
1101}
1102
1103static bool gmc_v8_0_check_soft_reset(void *handle)
1104{
1105 u32 srbm_soft_reset = 0;
1106 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1107 u32 tmp = RREG32(mmSRBM_STATUS);
1108
1109 if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
1110 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1111 SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
1112
1113 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1114 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
1115 if (!(adev->flags & AMD_IS_APU))
1116 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1117 SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
1118 }
1119 if (srbm_soft_reset) {
1120 adev->mc.srbm_soft_reset = srbm_soft_reset;
1121 return true;
1122 } else {
1123 adev->mc.srbm_soft_reset = 0;
1124 return false;
1125 }
1126}
1127
1128static int gmc_v8_0_pre_soft_reset(void *handle)
1129{
1130 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1131
1132 if (!adev->mc.srbm_soft_reset)
1133 return 0;
1134
1135 gmc_v8_0_mc_stop(adev, &adev->mc.save);
1136 if (gmc_v8_0_wait_for_idle(adev)) {
1137 dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
1138 }
1139
1140 return 0;
1141}
1142
1143static int gmc_v8_0_soft_reset(void *handle)
1144{
1145 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1146 u32 srbm_soft_reset;
1147
1148 if (!adev->mc.srbm_soft_reset)
1149 return 0;
1150 srbm_soft_reset = adev->mc.srbm_soft_reset;
1151
1152 if (srbm_soft_reset) {
1153 u32 tmp;
1154
1155 tmp = RREG32(mmSRBM_SOFT_RESET);
1156 tmp |= srbm_soft_reset;
1157 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1158 WREG32(mmSRBM_SOFT_RESET, tmp);
1159 tmp = RREG32(mmSRBM_SOFT_RESET);
1160
1161 udelay(50);
1162
1163 tmp &= ~srbm_soft_reset;
1164 WREG32(mmSRBM_SOFT_RESET, tmp);
1165 tmp = RREG32(mmSRBM_SOFT_RESET);
1166
1167 /* Wait a little for things to settle down */
1168 udelay(50);
1169 }
1170
1171 return 0;
1172}
1173
1174static int gmc_v8_0_post_soft_reset(void *handle)
1175{
1176 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1177
1178 if (!adev->mc.srbm_soft_reset)
1179 return 0;
1180
1181 gmc_v8_0_mc_resume(adev, &adev->mc.save);
1182 return 0;
1183}
1184
1185static int gmc_v8_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
1186 struct amdgpu_irq_src *src,
1187 unsigned type,
1188 enum amdgpu_interrupt_state state)
1189{
1190 u32 tmp;
1191 u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1192 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1193 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1194 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1195 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1196 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1197 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
1198
1199 switch (state) {
1200 case AMDGPU_IRQ_STATE_DISABLE:
1201 /* system context */
1202 tmp = RREG32(mmVM_CONTEXT0_CNTL);
1203 tmp &= ~bits;
1204 WREG32(mmVM_CONTEXT0_CNTL, tmp);
1205 /* VMs */
1206 tmp = RREG32(mmVM_CONTEXT1_CNTL);
1207 tmp &= ~bits;
1208 WREG32(mmVM_CONTEXT1_CNTL, tmp);
1209 break;
1210 case AMDGPU_IRQ_STATE_ENABLE:
1211 /* system context */
1212 tmp = RREG32(mmVM_CONTEXT0_CNTL);
1213 tmp |= bits;
1214 WREG32(mmVM_CONTEXT0_CNTL, tmp);
1215 /* VMs */
1216 tmp = RREG32(mmVM_CONTEXT1_CNTL);
1217 tmp |= bits;
1218 WREG32(mmVM_CONTEXT1_CNTL, tmp);
1219 break;
1220 default:
1221 break;
1222 }
1223
1224 return 0;
1225}
1226
1227static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
1228 struct amdgpu_irq_src *source,
1229 struct amdgpu_iv_entry *entry)
1230{
1231 u32 addr, status, mc_client;
1232
1233 addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
1234 status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
1235 mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
1236 /* reset addr and status */
1237 WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1238
1239 if (!addr && !status)
1240 return 0;
1241
1242 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
1243 gmc_v8_0_set_fault_enable_default(adev, false);
1244
1245 if (printk_ratelimit()) {
1246 dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1247 entry->src_id, entry->src_data);
1248 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
1249 addr);
1250 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1251 status);
1252 gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client);
1253 }
1254
1255 return 0;
1256}
1257
1258static void fiji_update_mc_medium_grain_clock_gating(struct amdgpu_device *adev,
1259 bool enable)
1260{
1261 uint32_t data;
1262
1263 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
1264 data = RREG32(mmMC_HUB_MISC_HUB_CG);
1265 data |= MC_HUB_MISC_HUB_CG__ENABLE_MASK;
1266 WREG32(mmMC_HUB_MISC_HUB_CG, data);
1267
1268 data = RREG32(mmMC_HUB_MISC_SIP_CG);
1269 data |= MC_HUB_MISC_SIP_CG__ENABLE_MASK;
1270 WREG32(mmMC_HUB_MISC_SIP_CG, data);
1271
1272 data = RREG32(mmMC_HUB_MISC_VM_CG);
1273 data |= MC_HUB_MISC_VM_CG__ENABLE_MASK;
1274 WREG32(mmMC_HUB_MISC_VM_CG, data);
1275
1276 data = RREG32(mmMC_XPB_CLK_GAT);
1277 data |= MC_XPB_CLK_GAT__ENABLE_MASK;
1278 WREG32(mmMC_XPB_CLK_GAT, data);
1279
1280 data = RREG32(mmATC_MISC_CG);
1281 data |= ATC_MISC_CG__ENABLE_MASK;
1282 WREG32(mmATC_MISC_CG, data);
1283
1284 data = RREG32(mmMC_CITF_MISC_WR_CG);
1285 data |= MC_CITF_MISC_WR_CG__ENABLE_MASK;
1286 WREG32(mmMC_CITF_MISC_WR_CG, data);
1287
1288 data = RREG32(mmMC_CITF_MISC_RD_CG);
1289 data |= MC_CITF_MISC_RD_CG__ENABLE_MASK;
1290 WREG32(mmMC_CITF_MISC_RD_CG, data);
1291
1292 data = RREG32(mmMC_CITF_MISC_VM_CG);
1293 data |= MC_CITF_MISC_VM_CG__ENABLE_MASK;
1294 WREG32(mmMC_CITF_MISC_VM_CG, data);
1295
1296 data = RREG32(mmVM_L2_CG);
1297 data |= VM_L2_CG__ENABLE_MASK;
1298 WREG32(mmVM_L2_CG, data);
1299 } else {
1300 data = RREG32(mmMC_HUB_MISC_HUB_CG);
1301 data &= ~MC_HUB_MISC_HUB_CG__ENABLE_MASK;
1302 WREG32(mmMC_HUB_MISC_HUB_CG, data);
1303
1304 data = RREG32(mmMC_HUB_MISC_SIP_CG);
1305 data &= ~MC_HUB_MISC_SIP_CG__ENABLE_MASK;
1306 WREG32(mmMC_HUB_MISC_SIP_CG, data);
1307
1308 data = RREG32(mmMC_HUB_MISC_VM_CG);
1309 data &= ~MC_HUB_MISC_VM_CG__ENABLE_MASK;
1310 WREG32(mmMC_HUB_MISC_VM_CG, data);
1311
1312 data = RREG32(mmMC_XPB_CLK_GAT);
1313 data &= ~MC_XPB_CLK_GAT__ENABLE_MASK;
1314 WREG32(mmMC_XPB_CLK_GAT, data);
1315
1316 data = RREG32(mmATC_MISC_CG);
1317 data &= ~ATC_MISC_CG__ENABLE_MASK;
1318 WREG32(mmATC_MISC_CG, data);
1319
1320 data = RREG32(mmMC_CITF_MISC_WR_CG);
1321 data &= ~MC_CITF_MISC_WR_CG__ENABLE_MASK;
1322 WREG32(mmMC_CITF_MISC_WR_CG, data);
1323
1324 data = RREG32(mmMC_CITF_MISC_RD_CG);
1325 data &= ~MC_CITF_MISC_RD_CG__ENABLE_MASK;
1326 WREG32(mmMC_CITF_MISC_RD_CG, data);
1327
1328 data = RREG32(mmMC_CITF_MISC_VM_CG);
1329 data &= ~MC_CITF_MISC_VM_CG__ENABLE_MASK;
1330 WREG32(mmMC_CITF_MISC_VM_CG, data);
1331
1332 data = RREG32(mmVM_L2_CG);
1333 data &= ~VM_L2_CG__ENABLE_MASK;
1334 WREG32(mmVM_L2_CG, data);
1335 }
1336}
1337
1338static void fiji_update_mc_light_sleep(struct amdgpu_device *adev,
1339 bool enable)
1340{
1341 uint32_t data;
1342
1343 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) {
1344 data = RREG32(mmMC_HUB_MISC_HUB_CG);
1345 data |= MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK;
1346 WREG32(mmMC_HUB_MISC_HUB_CG, data);
1347
1348 data = RREG32(mmMC_HUB_MISC_SIP_CG);
1349 data |= MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK;
1350 WREG32(mmMC_HUB_MISC_SIP_CG, data);
1351
1352 data = RREG32(mmMC_HUB_MISC_VM_CG);
1353 data |= MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1354 WREG32(mmMC_HUB_MISC_VM_CG, data);
1355
1356 data = RREG32(mmMC_XPB_CLK_GAT);
1357 data |= MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK;
1358 WREG32(mmMC_XPB_CLK_GAT, data);
1359
1360 data = RREG32(mmATC_MISC_CG);
1361 data |= ATC_MISC_CG__MEM_LS_ENABLE_MASK;
1362 WREG32(mmATC_MISC_CG, data);
1363
1364 data = RREG32(mmMC_CITF_MISC_WR_CG);
1365 data |= MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK;
1366 WREG32(mmMC_CITF_MISC_WR_CG, data);
1367
1368 data = RREG32(mmMC_CITF_MISC_RD_CG);
1369 data |= MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK;
1370 WREG32(mmMC_CITF_MISC_RD_CG, data);
1371
1372 data = RREG32(mmMC_CITF_MISC_VM_CG);
1373 data |= MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1374 WREG32(mmMC_CITF_MISC_VM_CG, data);
1375
1376 data = RREG32(mmVM_L2_CG);
1377 data |= VM_L2_CG__MEM_LS_ENABLE_MASK;
1378 WREG32(mmVM_L2_CG, data);
1379 } else {
1380 data = RREG32(mmMC_HUB_MISC_HUB_CG);
1381 data &= ~MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK;
1382 WREG32(mmMC_HUB_MISC_HUB_CG, data);
1383
1384 data = RREG32(mmMC_HUB_MISC_SIP_CG);
1385 data &= ~MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK;
1386 WREG32(mmMC_HUB_MISC_SIP_CG, data);
1387
1388 data = RREG32(mmMC_HUB_MISC_VM_CG);
1389 data &= ~MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1390 WREG32(mmMC_HUB_MISC_VM_CG, data);
1391
1392 data = RREG32(mmMC_XPB_CLK_GAT);
1393 data &= ~MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK;
1394 WREG32(mmMC_XPB_CLK_GAT, data);
1395
1396 data = RREG32(mmATC_MISC_CG);
1397 data &= ~ATC_MISC_CG__MEM_LS_ENABLE_MASK;
1398 WREG32(mmATC_MISC_CG, data);
1399
1400 data = RREG32(mmMC_CITF_MISC_WR_CG);
1401 data &= ~MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK;
1402 WREG32(mmMC_CITF_MISC_WR_CG, data);
1403
1404 data = RREG32(mmMC_CITF_MISC_RD_CG);
1405 data &= ~MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK;
1406 WREG32(mmMC_CITF_MISC_RD_CG, data);
1407
1408 data = RREG32(mmMC_CITF_MISC_VM_CG);
1409 data &= ~MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1410 WREG32(mmMC_CITF_MISC_VM_CG, data);
1411
1412 data = RREG32(mmVM_L2_CG);
1413 data &= ~VM_L2_CG__MEM_LS_ENABLE_MASK;
1414 WREG32(mmVM_L2_CG, data);
1415 }
1416}
1417
1418static int gmc_v8_0_set_clockgating_state(void *handle,
1419 enum amd_clockgating_state state)
1420{
1421 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1422
1423 switch (adev->asic_type) {
1424 case CHIP_FIJI:
1425 fiji_update_mc_medium_grain_clock_gating(adev,
1426 state == AMD_CG_STATE_GATE ? true : false);
1427 fiji_update_mc_light_sleep(adev,
1428 state == AMD_CG_STATE_GATE ? true : false);
1429 break;
1430 default:
1431 break;
1432 }
1433 return 0;
1434}
1435
1436static int gmc_v8_0_set_powergating_state(void *handle,
1437 enum amd_powergating_state state)
1438{
1439 return 0;
1440}
1441
1442static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
1443 .name = "gmc_v8_0",
1444 .early_init = gmc_v8_0_early_init,
1445 .late_init = gmc_v8_0_late_init,
1446 .sw_init = gmc_v8_0_sw_init,
1447 .sw_fini = gmc_v8_0_sw_fini,
1448 .hw_init = gmc_v8_0_hw_init,
1449 .hw_fini = gmc_v8_0_hw_fini,
1450 .suspend = gmc_v8_0_suspend,
1451 .resume = gmc_v8_0_resume,
1452 .is_idle = gmc_v8_0_is_idle,
1453 .wait_for_idle = gmc_v8_0_wait_for_idle,
1454 .check_soft_reset = gmc_v8_0_check_soft_reset,
1455 .pre_soft_reset = gmc_v8_0_pre_soft_reset,
1456 .soft_reset = gmc_v8_0_soft_reset,
1457 .post_soft_reset = gmc_v8_0_post_soft_reset,
1458 .set_clockgating_state = gmc_v8_0_set_clockgating_state,
1459 .set_powergating_state = gmc_v8_0_set_powergating_state,
1460};
1461
1462static const struct amdgpu_gart_funcs gmc_v8_0_gart_funcs = {
1463 .flush_gpu_tlb = gmc_v8_0_gart_flush_gpu_tlb,
1464 .set_pte_pde = gmc_v8_0_gart_set_pte_pde,
1465};
1466
1467static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
1468 .set = gmc_v8_0_vm_fault_interrupt_state,
1469 .process = gmc_v8_0_process_interrupt,
1470};
1471
1472static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev)
1473{
1474 if (adev->gart.gart_funcs == NULL)
1475 adev->gart.gart_funcs = &gmc_v8_0_gart_funcs;
1476}
1477
1478static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev)
1479{
1480 adev->mc.vm_fault.num_types = 1;
1481 adev->mc.vm_fault.funcs = &gmc_v8_0_irq_funcs;
1482}
1483
1484const struct amdgpu_ip_block_version gmc_v8_0_ip_block =
1485{
1486 .type = AMD_IP_BLOCK_TYPE_GMC,
1487 .major = 8,
1488 .minor = 0,
1489 .rev = 0,
1490 .funcs = &gmc_v8_0_ip_funcs,
1491};
1492
1493const struct amdgpu_ip_block_version gmc_v8_1_ip_block =
1494{
1495 .type = AMD_IP_BLOCK_TYPE_GMC,
1496 .major = 8,
1497 .minor = 1,
1498 .rev = 0,
1499 .funcs = &gmc_v8_0_ip_funcs,
1500};
1501
1502const struct amdgpu_ip_block_version gmc_v8_5_ip_block =
1503{
1504 .type = AMD_IP_BLOCK_TYPE_GMC,
1505 .major = 8,
1506 .minor = 5,
1507 .rev = 0,
1508 .funcs = &gmc_v8_0_ip_funcs,
1509};