Loading...
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/firmware.h>
24#include "drmP.h"
25#include "amdgpu.h"
26#include "cikd.h"
27#include "cik.h"
28#include "gmc_v7_0.h"
29#include "amdgpu_ucode.h"
30
31#include "bif/bif_4_1_d.h"
32#include "bif/bif_4_1_sh_mask.h"
33
34#include "gmc/gmc_7_1_d.h"
35#include "gmc/gmc_7_1_sh_mask.h"
36
37#include "oss/oss_2_0_d.h"
38#include "oss/oss_2_0_sh_mask.h"
39
40static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev);
41static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
42static int gmc_v7_0_wait_for_idle(void *handle);
43
44MODULE_FIRMWARE("radeon/bonaire_mc.bin");
45MODULE_FIRMWARE("radeon/hawaii_mc.bin");
46MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
47
48static const u32 golden_settings_iceland_a11[] =
49{
50 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
51 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
52 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
53 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
54};
55
56static const u32 iceland_mgcg_cgcg_init[] =
57{
58 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
59};
60
61static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev)
62{
63 switch (adev->asic_type) {
64 case CHIP_TOPAZ:
65 amdgpu_program_register_sequence(adev,
66 iceland_mgcg_cgcg_init,
67 (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
68 amdgpu_program_register_sequence(adev,
69 golden_settings_iceland_a11,
70 (const u32)ARRAY_SIZE(golden_settings_iceland_a11));
71 break;
72 default:
73 break;
74 }
75}
76
77static void gmc_v7_0_mc_stop(struct amdgpu_device *adev,
78 struct amdgpu_mode_mc_save *save)
79{
80 u32 blackout;
81
82 if (adev->mode_info.num_crtc)
83 amdgpu_display_stop_mc_access(adev, save);
84
85 gmc_v7_0_wait_for_idle((void *)adev);
86
87 blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
88 if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
89 /* Block CPU access */
90 WREG32(mmBIF_FB_EN, 0);
91 /* blackout the MC */
92 blackout = REG_SET_FIELD(blackout,
93 MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
94 WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
95 }
96 /* wait for the MC to settle */
97 udelay(100);
98}
99
100static void gmc_v7_0_mc_resume(struct amdgpu_device *adev,
101 struct amdgpu_mode_mc_save *save)
102{
103 u32 tmp;
104
105 /* unblackout the MC */
106 tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
107 tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
108 WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
109 /* allow CPU access */
110 tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
111 tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
112 WREG32(mmBIF_FB_EN, tmp);
113
114 if (adev->mode_info.num_crtc)
115 amdgpu_display_resume_mc_access(adev, save);
116}
117
118/**
119 * gmc_v7_0_init_microcode - load ucode images from disk
120 *
121 * @adev: amdgpu_device pointer
122 *
123 * Use the firmware interface to load the ucode images into
124 * the driver (not loaded into hw).
125 * Returns 0 on success, error on failure.
126 */
127static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
128{
129 const char *chip_name;
130 char fw_name[30];
131 int err;
132
133 DRM_DEBUG("\n");
134
135 switch (adev->asic_type) {
136 case CHIP_BONAIRE:
137 chip_name = "bonaire";
138 break;
139 case CHIP_HAWAII:
140 chip_name = "hawaii";
141 break;
142 case CHIP_TOPAZ:
143 chip_name = "topaz";
144 break;
145 case CHIP_KAVERI:
146 case CHIP_KABINI:
147 case CHIP_MULLINS:
148 return 0;
149 default: BUG();
150 }
151
152 if (adev->asic_type == CHIP_TOPAZ)
153 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
154 else
155 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
156
157 err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
158 if (err)
159 goto out;
160 err = amdgpu_ucode_validate(adev->mc.fw);
161
162out:
163 if (err) {
164 printk(KERN_ERR
165 "cik_mc: Failed to load firmware \"%s\"\n",
166 fw_name);
167 release_firmware(adev->mc.fw);
168 adev->mc.fw = NULL;
169 }
170 return err;
171}
172
173/**
174 * gmc_v7_0_mc_load_microcode - load MC ucode into the hw
175 *
176 * @adev: amdgpu_device pointer
177 *
178 * Load the GDDR MC ucode into the hw (CIK).
179 * Returns 0 on success, error on failure.
180 */
181static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev)
182{
183 const struct mc_firmware_header_v1_0 *hdr;
184 const __le32 *fw_data = NULL;
185 const __le32 *io_mc_regs = NULL;
186 u32 running;
187 int i, ucode_size, regs_size;
188
189 if (!adev->mc.fw)
190 return -EINVAL;
191
192 hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data;
193 amdgpu_ucode_print_mc_hdr(&hdr->header);
194
195 adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version);
196 regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
197 io_mc_regs = (const __le32 *)
198 (adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
199 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
200 fw_data = (const __le32 *)
201 (adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
202
203 running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
204
205 if (running == 0) {
206 /* reset the engine and set to writable */
207 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
208 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
209
210 /* load mc io regs */
211 for (i = 0; i < regs_size; i++) {
212 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
213 WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
214 }
215 /* load the MC ucode */
216 for (i = 0; i < ucode_size; i++)
217 WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
218
219 /* put the engine back into the active state */
220 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
221 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
222 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
223
224 /* wait for training to complete */
225 for (i = 0; i < adev->usec_timeout; i++) {
226 if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
227 MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D0))
228 break;
229 udelay(1);
230 }
231 for (i = 0; i < adev->usec_timeout; i++) {
232 if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
233 MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D1))
234 break;
235 udelay(1);
236 }
237 }
238
239 return 0;
240}
241
242static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev,
243 struct amdgpu_mc *mc)
244{
245 if (mc->mc_vram_size > 0xFFC0000000ULL) {
246 /* leave room for at least 1024M GTT */
247 dev_warn(adev->dev, "limiting VRAM\n");
248 mc->real_vram_size = 0xFFC0000000ULL;
249 mc->mc_vram_size = 0xFFC0000000ULL;
250 }
251 amdgpu_vram_location(adev, &adev->mc, 0);
252 adev->mc.gtt_base_align = 0;
253 amdgpu_gtt_location(adev, mc);
254}
255
256/**
257 * gmc_v7_0_mc_program - program the GPU memory controller
258 *
259 * @adev: amdgpu_device pointer
260 *
261 * Set the location of vram, gart, and AGP in the GPU's
262 * physical address space (CIK).
263 */
264static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
265{
266 struct amdgpu_mode_mc_save save;
267 u32 tmp;
268 int i, j;
269
270 /* Initialize HDP */
271 for (i = 0, j = 0; i < 32; i++, j += 0x6) {
272 WREG32((0xb05 + j), 0x00000000);
273 WREG32((0xb06 + j), 0x00000000);
274 WREG32((0xb07 + j), 0x00000000);
275 WREG32((0xb08 + j), 0x00000000);
276 WREG32((0xb09 + j), 0x00000000);
277 }
278 WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
279
280 if (adev->mode_info.num_crtc)
281 amdgpu_display_set_vga_render_state(adev, false);
282
283 gmc_v7_0_mc_stop(adev, &save);
284 if (gmc_v7_0_wait_for_idle((void *)adev)) {
285 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
286 }
287 /* Update configuration */
288 WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
289 adev->mc.vram_start >> 12);
290 WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
291 adev->mc.vram_end >> 12);
292 WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
293 adev->vram_scratch.gpu_addr >> 12);
294 tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16;
295 tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF);
296 WREG32(mmMC_VM_FB_LOCATION, tmp);
297 /* XXX double check these! */
298 WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8));
299 WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
300 WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
301 WREG32(mmMC_VM_AGP_BASE, 0);
302 WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
303 WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
304 if (gmc_v7_0_wait_for_idle((void *)adev)) {
305 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
306 }
307 gmc_v7_0_mc_resume(adev, &save);
308
309 WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
310
311 tmp = RREG32(mmHDP_MISC_CNTL);
312 tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 0);
313 WREG32(mmHDP_MISC_CNTL, tmp);
314
315 tmp = RREG32(mmHDP_HOST_PATH_CNTL);
316 WREG32(mmHDP_HOST_PATH_CNTL, tmp);
317}
318
319/**
320 * gmc_v7_0_mc_init - initialize the memory controller driver params
321 *
322 * @adev: amdgpu_device pointer
323 *
324 * Look up the amount of vram, vram width, and decide how to place
325 * vram and gart within the GPU's physical address space (CIK).
326 * Returns 0 for success.
327 */
328static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
329{
330 u32 tmp;
331 int chansize, numchan;
332
333 /* Get VRAM informations */
334 tmp = RREG32(mmMC_ARB_RAMCFG);
335 if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
336 chansize = 64;
337 } else {
338 chansize = 32;
339 }
340 tmp = RREG32(mmMC_SHARED_CHMAP);
341 switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
342 case 0:
343 default:
344 numchan = 1;
345 break;
346 case 1:
347 numchan = 2;
348 break;
349 case 2:
350 numchan = 4;
351 break;
352 case 3:
353 numchan = 8;
354 break;
355 case 4:
356 numchan = 3;
357 break;
358 case 5:
359 numchan = 6;
360 break;
361 case 6:
362 numchan = 10;
363 break;
364 case 7:
365 numchan = 12;
366 break;
367 case 8:
368 numchan = 16;
369 break;
370 }
371 adev->mc.vram_width = numchan * chansize;
372 /* Could aper size report 0 ? */
373 adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
374 adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
375 /* size in MB on si */
376 adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
377 adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
378 adev->mc.visible_vram_size = adev->mc.aper_size;
379
380 /* In case the PCI BAR is larger than the actual amount of vram */
381 if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
382 adev->mc.visible_vram_size = adev->mc.real_vram_size;
383
384 /* unless the user had overridden it, set the gart
385 * size equal to the 1024 or vram, whichever is larger.
386 */
387 if (amdgpu_gart_size == -1)
388 adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size);
389 else
390 adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
391
392 gmc_v7_0_vram_gtt_location(adev, &adev->mc);
393
394 return 0;
395}
396
397/*
398 * GART
399 * VMID 0 is the physical GPU addresses as used by the kernel.
400 * VMIDs 1-15 are used for userspace clients and are handled
401 * by the amdgpu vm/hsa code.
402 */
403
404/**
405 * gmc_v7_0_gart_flush_gpu_tlb - gart tlb flush callback
406 *
407 * @adev: amdgpu_device pointer
408 * @vmid: vm instance to flush
409 *
410 * Flush the TLB for the requested page table (CIK).
411 */
412static void gmc_v7_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
413 uint32_t vmid)
414{
415 /* flush hdp cache */
416 WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
417
418 /* bits 0-15 are the VM contexts0-15 */
419 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
420}
421
422/**
423 * gmc_v7_0_gart_set_pte_pde - update the page tables using MMIO
424 *
425 * @adev: amdgpu_device pointer
426 * @cpu_pt_addr: cpu address of the page table
427 * @gpu_page_idx: entry in the page table to update
428 * @addr: dst addr to write into pte/pde
429 * @flags: access flags
430 *
431 * Update the page tables using the CPU.
432 */
433static int gmc_v7_0_gart_set_pte_pde(struct amdgpu_device *adev,
434 void *cpu_pt_addr,
435 uint32_t gpu_page_idx,
436 uint64_t addr,
437 uint32_t flags)
438{
439 void __iomem *ptr = (void *)cpu_pt_addr;
440 uint64_t value;
441
442 value = addr & 0xFFFFFFFFFFFFF000ULL;
443 value |= flags;
444 writeq(value, ptr + (gpu_page_idx * 8));
445
446 return 0;
447}
448
449/**
450 * gmc_v8_0_set_fault_enable_default - update VM fault handling
451 *
452 * @adev: amdgpu_device pointer
453 * @value: true redirects VM faults to the default page
454 */
455static void gmc_v7_0_set_fault_enable_default(struct amdgpu_device *adev,
456 bool value)
457{
458 u32 tmp;
459
460 tmp = RREG32(mmVM_CONTEXT1_CNTL);
461 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
462 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
463 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
464 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
465 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
466 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
467 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
468 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
469 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
470 READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
471 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
472 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
473 WREG32(mmVM_CONTEXT1_CNTL, tmp);
474}
475
476/**
477 * gmc_v7_0_gart_enable - gart enable
478 *
479 * @adev: amdgpu_device pointer
480 *
481 * This sets up the TLBs, programs the page tables for VMID0,
482 * sets up the hw for VMIDs 1-15 which are allocated on
483 * demand, and sets up the global locations for the LDS, GDS,
484 * and GPUVM for FSA64 clients (CIK).
485 * Returns 0 for success, errors for failure.
486 */
487static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
488{
489 int r, i;
490 u32 tmp;
491
492 if (adev->gart.robj == NULL) {
493 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
494 return -EINVAL;
495 }
496 r = amdgpu_gart_table_vram_pin(adev);
497 if (r)
498 return r;
499 /* Setup TLB control */
500 tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
501 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
502 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 1);
503 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
504 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 1);
505 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
506 WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
507 /* Setup L2 cache */
508 tmp = RREG32(mmVM_L2_CNTL);
509 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
510 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
511 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE, 1);
512 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
513 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
514 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
515 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
516 WREG32(mmVM_L2_CNTL, tmp);
517 tmp = REG_SET_FIELD(0, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
518 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
519 WREG32(mmVM_L2_CNTL2, tmp);
520 tmp = RREG32(mmVM_L2_CNTL3);
521 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1);
522 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 4);
523 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 4);
524 WREG32(mmVM_L2_CNTL3, tmp);
525 /* setup context0 */
526 WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
527 WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
528 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
529 WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
530 (u32)(adev->dummy_page.addr >> 12));
531 WREG32(mmVM_CONTEXT0_CNTL2, 0);
532 tmp = RREG32(mmVM_CONTEXT0_CNTL);
533 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
534 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
535 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
536 WREG32(mmVM_CONTEXT0_CNTL, tmp);
537
538 WREG32(0x575, 0);
539 WREG32(0x576, 0);
540 WREG32(0x577, 0);
541
542 /* empty context1-15 */
543 /* FIXME start with 4G, once using 2 level pt switch to full
544 * vm size space
545 */
546 /* set vm size, must be a multiple of 4 */
547 WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
548 WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
549 for (i = 1; i < 16; i++) {
550 if (i < 8)
551 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
552 adev->gart.table_addr >> 12);
553 else
554 WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
555 adev->gart.table_addr >> 12);
556 }
557
558 /* enable context1-15 */
559 WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
560 (u32)(adev->dummy_page.addr >> 12));
561 WREG32(mmVM_CONTEXT1_CNTL2, 4);
562 tmp = RREG32(mmVM_CONTEXT1_CNTL);
563 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
564 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
565 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
566 amdgpu_vm_block_size - 9);
567 WREG32(mmVM_CONTEXT1_CNTL, tmp);
568 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
569 gmc_v7_0_set_fault_enable_default(adev, false);
570 else
571 gmc_v7_0_set_fault_enable_default(adev, true);
572
573 if (adev->asic_type == CHIP_KAVERI) {
574 tmp = RREG32(mmCHUB_CONTROL);
575 tmp &= ~BYPASS_VM;
576 WREG32(mmCHUB_CONTROL, tmp);
577 }
578
579 gmc_v7_0_gart_flush_gpu_tlb(adev, 0);
580 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
581 (unsigned)(adev->mc.gtt_size >> 20),
582 (unsigned long long)adev->gart.table_addr);
583 adev->gart.ready = true;
584 return 0;
585}
586
587static int gmc_v7_0_gart_init(struct amdgpu_device *adev)
588{
589 int r;
590
591 if (adev->gart.robj) {
592 WARN(1, "R600 PCIE GART already initialized\n");
593 return 0;
594 }
595 /* Initialize common gart structure */
596 r = amdgpu_gart_init(adev);
597 if (r)
598 return r;
599 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
600 return amdgpu_gart_table_vram_alloc(adev);
601}
602
603/**
604 * gmc_v7_0_gart_disable - gart disable
605 *
606 * @adev: amdgpu_device pointer
607 *
608 * This disables all VM page table (CIK).
609 */
610static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
611{
612 u32 tmp;
613
614 /* Disable all tables */
615 WREG32(mmVM_CONTEXT0_CNTL, 0);
616 WREG32(mmVM_CONTEXT1_CNTL, 0);
617 /* Setup TLB control */
618 tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
619 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
620 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 0);
621 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0);
622 WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
623 /* Setup L2 cache */
624 tmp = RREG32(mmVM_L2_CNTL);
625 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
626 WREG32(mmVM_L2_CNTL, tmp);
627 WREG32(mmVM_L2_CNTL2, 0);
628 amdgpu_gart_table_vram_unpin(adev);
629}
630
631/**
632 * gmc_v7_0_gart_fini - vm fini callback
633 *
634 * @adev: amdgpu_device pointer
635 *
636 * Tears down the driver GART/VM setup (CIK).
637 */
638static void gmc_v7_0_gart_fini(struct amdgpu_device *adev)
639{
640 amdgpu_gart_table_vram_free(adev);
641 amdgpu_gart_fini(adev);
642}
643
644/*
645 * vm
646 * VMID 0 is the physical GPU addresses as used by the kernel.
647 * VMIDs 1-15 are used for userspace clients and are handled
648 * by the amdgpu vm/hsa code.
649 */
650/**
651 * gmc_v7_0_vm_init - cik vm init callback
652 *
653 * @adev: amdgpu_device pointer
654 *
655 * Inits cik specific vm parameters (number of VMs, base of vram for
656 * VMIDs 1-15) (CIK).
657 * Returns 0 for success.
658 */
659static int gmc_v7_0_vm_init(struct amdgpu_device *adev)
660{
661 /*
662 * number of VMs
663 * VMID 0 is reserved for System
664 * amdgpu graphics/compute will use VMIDs 1-7
665 * amdkfd will use VMIDs 8-15
666 */
667 adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS;
668 amdgpu_vm_manager_init(adev);
669
670 /* base offset of vram pages */
671 if (adev->flags & AMD_IS_APU) {
672 u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
673 tmp <<= 22;
674 adev->vm_manager.vram_base_offset = tmp;
675 } else
676 adev->vm_manager.vram_base_offset = 0;
677
678 return 0;
679}
680
681/**
682 * gmc_v7_0_vm_fini - cik vm fini callback
683 *
684 * @adev: amdgpu_device pointer
685 *
686 * Tear down any asic specific VM setup (CIK).
687 */
688static void gmc_v7_0_vm_fini(struct amdgpu_device *adev)
689{
690}
691
692/**
693 * gmc_v7_0_vm_decode_fault - print human readable fault info
694 *
695 * @adev: amdgpu_device pointer
696 * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
697 * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
698 *
699 * Print human readable fault information (CIK).
700 */
701static void gmc_v7_0_vm_decode_fault(struct amdgpu_device *adev,
702 u32 status, u32 addr, u32 mc_client)
703{
704 u32 mc_id;
705 u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
706 u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
707 PROTECTIONS);
708 char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
709 (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
710
711 mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
712 MEMORY_CLIENT_ID);
713
714 dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
715 protections, vmid, addr,
716 REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
717 MEMORY_CLIENT_RW) ?
718 "write" : "read", block, mc_client, mc_id);
719}
720
721
722static const u32 mc_cg_registers[] = {
723 mmMC_HUB_MISC_HUB_CG,
724 mmMC_HUB_MISC_SIP_CG,
725 mmMC_HUB_MISC_VM_CG,
726 mmMC_XPB_CLK_GAT,
727 mmATC_MISC_CG,
728 mmMC_CITF_MISC_WR_CG,
729 mmMC_CITF_MISC_RD_CG,
730 mmMC_CITF_MISC_VM_CG,
731 mmVM_L2_CG,
732};
733
734static const u32 mc_cg_ls_en[] = {
735 MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK,
736 MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK,
737 MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK,
738 MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK,
739 ATC_MISC_CG__MEM_LS_ENABLE_MASK,
740 MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK,
741 MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK,
742 MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK,
743 VM_L2_CG__MEM_LS_ENABLE_MASK,
744};
745
746static const u32 mc_cg_en[] = {
747 MC_HUB_MISC_HUB_CG__ENABLE_MASK,
748 MC_HUB_MISC_SIP_CG__ENABLE_MASK,
749 MC_HUB_MISC_VM_CG__ENABLE_MASK,
750 MC_XPB_CLK_GAT__ENABLE_MASK,
751 ATC_MISC_CG__ENABLE_MASK,
752 MC_CITF_MISC_WR_CG__ENABLE_MASK,
753 MC_CITF_MISC_RD_CG__ENABLE_MASK,
754 MC_CITF_MISC_VM_CG__ENABLE_MASK,
755 VM_L2_CG__ENABLE_MASK,
756};
757
758static void gmc_v7_0_enable_mc_ls(struct amdgpu_device *adev,
759 bool enable)
760{
761 int i;
762 u32 orig, data;
763
764 for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
765 orig = data = RREG32(mc_cg_registers[i]);
766 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
767 data |= mc_cg_ls_en[i];
768 else
769 data &= ~mc_cg_ls_en[i];
770 if (data != orig)
771 WREG32(mc_cg_registers[i], data);
772 }
773}
774
775static void gmc_v7_0_enable_mc_mgcg(struct amdgpu_device *adev,
776 bool enable)
777{
778 int i;
779 u32 orig, data;
780
781 for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
782 orig = data = RREG32(mc_cg_registers[i]);
783 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
784 data |= mc_cg_en[i];
785 else
786 data &= ~mc_cg_en[i];
787 if (data != orig)
788 WREG32(mc_cg_registers[i], data);
789 }
790}
791
792static void gmc_v7_0_enable_bif_mgls(struct amdgpu_device *adev,
793 bool enable)
794{
795 u32 orig, data;
796
797 orig = data = RREG32_PCIE(ixPCIE_CNTL2);
798
799 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
800 data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1);
801 data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1);
802 data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1);
803 data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 1);
804 } else {
805 data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 0);
806 data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 0);
807 data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 0);
808 data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 0);
809 }
810
811 if (orig != data)
812 WREG32_PCIE(ixPCIE_CNTL2, data);
813}
814
815static void gmc_v7_0_enable_hdp_mgcg(struct amdgpu_device *adev,
816 bool enable)
817{
818 u32 orig, data;
819
820 orig = data = RREG32(mmHDP_HOST_PATH_CNTL);
821
822 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
823 data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0);
824 else
825 data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1);
826
827 if (orig != data)
828 WREG32(mmHDP_HOST_PATH_CNTL, data);
829}
830
831static void gmc_v7_0_enable_hdp_ls(struct amdgpu_device *adev,
832 bool enable)
833{
834 u32 orig, data;
835
836 orig = data = RREG32(mmHDP_MEM_POWER_LS);
837
838 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
839 data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1);
840 else
841 data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0);
842
843 if (orig != data)
844 WREG32(mmHDP_MEM_POWER_LS, data);
845}
846
847static int gmc_v7_0_convert_vram_type(int mc_seq_vram_type)
848{
849 switch (mc_seq_vram_type) {
850 case MC_SEQ_MISC0__MT__GDDR1:
851 return AMDGPU_VRAM_TYPE_GDDR1;
852 case MC_SEQ_MISC0__MT__DDR2:
853 return AMDGPU_VRAM_TYPE_DDR2;
854 case MC_SEQ_MISC0__MT__GDDR3:
855 return AMDGPU_VRAM_TYPE_GDDR3;
856 case MC_SEQ_MISC0__MT__GDDR4:
857 return AMDGPU_VRAM_TYPE_GDDR4;
858 case MC_SEQ_MISC0__MT__GDDR5:
859 return AMDGPU_VRAM_TYPE_GDDR5;
860 case MC_SEQ_MISC0__MT__HBM:
861 return AMDGPU_VRAM_TYPE_HBM;
862 case MC_SEQ_MISC0__MT__DDR3:
863 return AMDGPU_VRAM_TYPE_DDR3;
864 default:
865 return AMDGPU_VRAM_TYPE_UNKNOWN;
866 }
867}
868
869static int gmc_v7_0_early_init(void *handle)
870{
871 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
872
873 gmc_v7_0_set_gart_funcs(adev);
874 gmc_v7_0_set_irq_funcs(adev);
875
876 return 0;
877}
878
879static int gmc_v7_0_late_init(void *handle)
880{
881 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
882
883 if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
884 return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
885 else
886 return 0;
887}
888
889static int gmc_v7_0_sw_init(void *handle)
890{
891 int r;
892 int dma_bits;
893 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
894
895 if (adev->flags & AMD_IS_APU) {
896 adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
897 } else {
898 u32 tmp = RREG32(mmMC_SEQ_MISC0);
899 tmp &= MC_SEQ_MISC0__MT__MASK;
900 adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp);
901 }
902
903 r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
904 if (r)
905 return r;
906
907 r = amdgpu_irq_add_id(adev, 147, &adev->mc.vm_fault);
908 if (r)
909 return r;
910
911 /* Adjust VM size here.
912 * Currently set to 4GB ((1 << 20) 4k pages).
913 * Max GPUVM size for cayman and SI is 40 bits.
914 */
915 adev->vm_manager.max_pfn = amdgpu_vm_size << 18;
916
917 /* Set the internal MC address mask
918 * This is the max address of the GPU's
919 * internal address space.
920 */
921 adev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
922
923 /* set DMA mask + need_dma32 flags.
924 * PCIE - can handle 40-bits.
925 * IGP - can handle 40-bits
926 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
927 */
928 adev->need_dma32 = false;
929 dma_bits = adev->need_dma32 ? 32 : 40;
930 r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
931 if (r) {
932 adev->need_dma32 = true;
933 dma_bits = 32;
934 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
935 }
936 r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
937 if (r) {
938 pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
939 printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
940 }
941
942 r = gmc_v7_0_init_microcode(adev);
943 if (r) {
944 DRM_ERROR("Failed to load mc firmware!\n");
945 return r;
946 }
947
948 r = gmc_v7_0_mc_init(adev);
949 if (r)
950 return r;
951
952 /* Memory manager */
953 r = amdgpu_bo_init(adev);
954 if (r)
955 return r;
956
957 r = gmc_v7_0_gart_init(adev);
958 if (r)
959 return r;
960
961 if (!adev->vm_manager.enabled) {
962 r = gmc_v7_0_vm_init(adev);
963 if (r) {
964 dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
965 return r;
966 }
967 adev->vm_manager.enabled = true;
968 }
969
970 return r;
971}
972
973static int gmc_v7_0_sw_fini(void *handle)
974{
975 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
976
977 if (adev->vm_manager.enabled) {
978 amdgpu_vm_manager_fini(adev);
979 gmc_v7_0_vm_fini(adev);
980 adev->vm_manager.enabled = false;
981 }
982 gmc_v7_0_gart_fini(adev);
983 amdgpu_gem_force_release(adev);
984 amdgpu_bo_fini(adev);
985
986 return 0;
987}
988
989static int gmc_v7_0_hw_init(void *handle)
990{
991 int r;
992 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
993
994 gmc_v7_0_init_golden_registers(adev);
995
996 gmc_v7_0_mc_program(adev);
997
998 if (!(adev->flags & AMD_IS_APU)) {
999 r = gmc_v7_0_mc_load_microcode(adev);
1000 if (r) {
1001 DRM_ERROR("Failed to load MC firmware!\n");
1002 return r;
1003 }
1004 }
1005
1006 r = gmc_v7_0_gart_enable(adev);
1007 if (r)
1008 return r;
1009
1010 return r;
1011}
1012
1013static int gmc_v7_0_hw_fini(void *handle)
1014{
1015 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1016
1017 amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
1018 gmc_v7_0_gart_disable(adev);
1019
1020 return 0;
1021}
1022
1023static int gmc_v7_0_suspend(void *handle)
1024{
1025 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1026
1027 if (adev->vm_manager.enabled) {
1028 gmc_v7_0_vm_fini(adev);
1029 adev->vm_manager.enabled = false;
1030 }
1031 gmc_v7_0_hw_fini(adev);
1032
1033 return 0;
1034}
1035
1036static int gmc_v7_0_resume(void *handle)
1037{
1038 int r;
1039 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1040
1041 r = gmc_v7_0_hw_init(adev);
1042 if (r)
1043 return r;
1044
1045 if (!adev->vm_manager.enabled) {
1046 r = gmc_v7_0_vm_init(adev);
1047 if (r) {
1048 dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
1049 return r;
1050 }
1051 adev->vm_manager.enabled = true;
1052 }
1053
1054 return r;
1055}
1056
1057static bool gmc_v7_0_is_idle(void *handle)
1058{
1059 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1060 u32 tmp = RREG32(mmSRBM_STATUS);
1061
1062 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1063 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
1064 return false;
1065
1066 return true;
1067}
1068
1069static int gmc_v7_0_wait_for_idle(void *handle)
1070{
1071 unsigned i;
1072 u32 tmp;
1073 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1074
1075 for (i = 0; i < adev->usec_timeout; i++) {
1076 /* read MC_STATUS */
1077 tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
1078 SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1079 SRBM_STATUS__MCC_BUSY_MASK |
1080 SRBM_STATUS__MCD_BUSY_MASK |
1081 SRBM_STATUS__VMC_BUSY_MASK);
1082 if (!tmp)
1083 return 0;
1084 udelay(1);
1085 }
1086 return -ETIMEDOUT;
1087
1088}
1089
1090static int gmc_v7_0_soft_reset(void *handle)
1091{
1092 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1093 struct amdgpu_mode_mc_save save;
1094 u32 srbm_soft_reset = 0;
1095 u32 tmp = RREG32(mmSRBM_STATUS);
1096
1097 if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
1098 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1099 SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
1100
1101 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1102 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
1103 if (!(adev->flags & AMD_IS_APU))
1104 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1105 SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
1106 }
1107
1108 if (srbm_soft_reset) {
1109 gmc_v7_0_mc_stop(adev, &save);
1110 if (gmc_v7_0_wait_for_idle((void *)adev)) {
1111 dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
1112 }
1113
1114
1115 tmp = RREG32(mmSRBM_SOFT_RESET);
1116 tmp |= srbm_soft_reset;
1117 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1118 WREG32(mmSRBM_SOFT_RESET, tmp);
1119 tmp = RREG32(mmSRBM_SOFT_RESET);
1120
1121 udelay(50);
1122
1123 tmp &= ~srbm_soft_reset;
1124 WREG32(mmSRBM_SOFT_RESET, tmp);
1125 tmp = RREG32(mmSRBM_SOFT_RESET);
1126
1127 /* Wait a little for things to settle down */
1128 udelay(50);
1129
1130 gmc_v7_0_mc_resume(adev, &save);
1131 udelay(50);
1132 }
1133
1134 return 0;
1135}
1136
1137static int gmc_v7_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
1138 struct amdgpu_irq_src *src,
1139 unsigned type,
1140 enum amdgpu_interrupt_state state)
1141{
1142 u32 tmp;
1143 u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1144 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1145 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1146 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1147 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1148 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
1149
1150 switch (state) {
1151 case AMDGPU_IRQ_STATE_DISABLE:
1152 /* system context */
1153 tmp = RREG32(mmVM_CONTEXT0_CNTL);
1154 tmp &= ~bits;
1155 WREG32(mmVM_CONTEXT0_CNTL, tmp);
1156 /* VMs */
1157 tmp = RREG32(mmVM_CONTEXT1_CNTL);
1158 tmp &= ~bits;
1159 WREG32(mmVM_CONTEXT1_CNTL, tmp);
1160 break;
1161 case AMDGPU_IRQ_STATE_ENABLE:
1162 /* system context */
1163 tmp = RREG32(mmVM_CONTEXT0_CNTL);
1164 tmp |= bits;
1165 WREG32(mmVM_CONTEXT0_CNTL, tmp);
1166 /* VMs */
1167 tmp = RREG32(mmVM_CONTEXT1_CNTL);
1168 tmp |= bits;
1169 WREG32(mmVM_CONTEXT1_CNTL, tmp);
1170 break;
1171 default:
1172 break;
1173 }
1174
1175 return 0;
1176}
1177
1178static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
1179 struct amdgpu_irq_src *source,
1180 struct amdgpu_iv_entry *entry)
1181{
1182 u32 addr, status, mc_client;
1183
1184 addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
1185 status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
1186 mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
1187 /* reset addr and status */
1188 WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1189
1190 if (!addr && !status)
1191 return 0;
1192
1193 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
1194 gmc_v7_0_set_fault_enable_default(adev, false);
1195
1196 if (printk_ratelimit()) {
1197 dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1198 entry->src_id, entry->src_data);
1199 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
1200 addr);
1201 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1202 status);
1203 gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client);
1204 }
1205
1206 return 0;
1207}
1208
1209static int gmc_v7_0_set_clockgating_state(void *handle,
1210 enum amd_clockgating_state state)
1211{
1212 bool gate = false;
1213 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1214
1215 if (state == AMD_CG_STATE_GATE)
1216 gate = true;
1217
1218 if (!(adev->flags & AMD_IS_APU)) {
1219 gmc_v7_0_enable_mc_mgcg(adev, gate);
1220 gmc_v7_0_enable_mc_ls(adev, gate);
1221 }
1222 gmc_v7_0_enable_bif_mgls(adev, gate);
1223 gmc_v7_0_enable_hdp_mgcg(adev, gate);
1224 gmc_v7_0_enable_hdp_ls(adev, gate);
1225
1226 return 0;
1227}
1228
1229static int gmc_v7_0_set_powergating_state(void *handle,
1230 enum amd_powergating_state state)
1231{
1232 return 0;
1233}
1234
1235static const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
1236 .name = "gmc_v7_0",
1237 .early_init = gmc_v7_0_early_init,
1238 .late_init = gmc_v7_0_late_init,
1239 .sw_init = gmc_v7_0_sw_init,
1240 .sw_fini = gmc_v7_0_sw_fini,
1241 .hw_init = gmc_v7_0_hw_init,
1242 .hw_fini = gmc_v7_0_hw_fini,
1243 .suspend = gmc_v7_0_suspend,
1244 .resume = gmc_v7_0_resume,
1245 .is_idle = gmc_v7_0_is_idle,
1246 .wait_for_idle = gmc_v7_0_wait_for_idle,
1247 .soft_reset = gmc_v7_0_soft_reset,
1248 .set_clockgating_state = gmc_v7_0_set_clockgating_state,
1249 .set_powergating_state = gmc_v7_0_set_powergating_state,
1250};
1251
1252static const struct amdgpu_gart_funcs gmc_v7_0_gart_funcs = {
1253 .flush_gpu_tlb = gmc_v7_0_gart_flush_gpu_tlb,
1254 .set_pte_pde = gmc_v7_0_gart_set_pte_pde,
1255};
1256
1257static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = {
1258 .set = gmc_v7_0_vm_fault_interrupt_state,
1259 .process = gmc_v7_0_process_interrupt,
1260};
1261
1262static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev)
1263{
1264 if (adev->gart.gart_funcs == NULL)
1265 adev->gart.gart_funcs = &gmc_v7_0_gart_funcs;
1266}
1267
1268static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev)
1269{
1270 adev->mc.vm_fault.num_types = 1;
1271 adev->mc.vm_fault.funcs = &gmc_v7_0_irq_funcs;
1272}
1273
1274const struct amdgpu_ip_block_version gmc_v7_0_ip_block =
1275{
1276 .type = AMD_IP_BLOCK_TYPE_GMC,
1277 .major = 7,
1278 .minor = 0,
1279 .rev = 0,
1280 .funcs = &gmc_v7_0_ip_funcs,
1281};
1282
1283const struct amdgpu_ip_block_version gmc_v7_4_ip_block =
1284{
1285 .type = AMD_IP_BLOCK_TYPE_GMC,
1286 .major = 7,
1287 .minor = 4,
1288 .rev = 0,
1289 .funcs = &gmc_v7_0_ip_funcs,
1290};
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25#include <linux/module.h>
26#include <linux/pci.h>
27
28#include <drm/drm_cache.h>
29#include "amdgpu.h"
30#include "cikd.h"
31#include "cik.h"
32#include "gmc_v7_0.h"
33#include "amdgpu_ucode.h"
34#include "amdgpu_amdkfd.h"
35#include "amdgpu_gem.h"
36
37#include "bif/bif_4_1_d.h"
38#include "bif/bif_4_1_sh_mask.h"
39
40#include "gmc/gmc_7_1_d.h"
41#include "gmc/gmc_7_1_sh_mask.h"
42
43#include "oss/oss_2_0_d.h"
44#include "oss/oss_2_0_sh_mask.h"
45
46#include "dce/dce_8_0_d.h"
47#include "dce/dce_8_0_sh_mask.h"
48
49#include "amdgpu_atombios.h"
50
51#include "ivsrcid/ivsrcid_vislands30.h"
52
53static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev);
54static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
55static int gmc_v7_0_wait_for_idle(void *handle);
56
57MODULE_FIRMWARE("amdgpu/bonaire_mc.bin");
58MODULE_FIRMWARE("amdgpu/hawaii_mc.bin");
59MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
60
61static const u32 golden_settings_iceland_a11[] = {
62 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
63 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
64 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
65 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
66};
67
68static const u32 iceland_mgcg_cgcg_init[] = {
69 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
70};
71
72static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev)
73{
74 switch (adev->asic_type) {
75 case CHIP_TOPAZ:
76 amdgpu_device_program_register_sequence(adev,
77 iceland_mgcg_cgcg_init,
78 ARRAY_SIZE(iceland_mgcg_cgcg_init));
79 amdgpu_device_program_register_sequence(adev,
80 golden_settings_iceland_a11,
81 ARRAY_SIZE(golden_settings_iceland_a11));
82 break;
83 default:
84 break;
85 }
86}
87
88static void gmc_v7_0_mc_stop(struct amdgpu_device *adev)
89{
90 u32 blackout;
91
92 gmc_v7_0_wait_for_idle((void *)adev);
93
94 blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
95 if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
96 /* Block CPU access */
97 WREG32(mmBIF_FB_EN, 0);
98 /* blackout the MC */
99 blackout = REG_SET_FIELD(blackout,
100 MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
101 WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
102 }
103 /* wait for the MC to settle */
104 udelay(100);
105}
106
107static void gmc_v7_0_mc_resume(struct amdgpu_device *adev)
108{
109 u32 tmp;
110
111 /* unblackout the MC */
112 tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
113 tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
114 WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
115 /* allow CPU access */
116 tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
117 tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
118 WREG32(mmBIF_FB_EN, tmp);
119}
120
121/**
122 * gmc_v7_0_init_microcode - load ucode images from disk
123 *
124 * @adev: amdgpu_device pointer
125 *
126 * Use the firmware interface to load the ucode images into
127 * the driver (not loaded into hw).
128 * Returns 0 on success, error on failure.
129 */
130static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
131{
132 const char *chip_name;
133 char fw_name[30];
134 int err;
135
136 DRM_DEBUG("\n");
137
138 switch (adev->asic_type) {
139 case CHIP_BONAIRE:
140 chip_name = "bonaire";
141 break;
142 case CHIP_HAWAII:
143 chip_name = "hawaii";
144 break;
145 case CHIP_TOPAZ:
146 chip_name = "topaz";
147 break;
148 case CHIP_KAVERI:
149 case CHIP_KABINI:
150 case CHIP_MULLINS:
151 return 0;
152 default:
153 return -EINVAL;
154 }
155
156 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
157
158 err = amdgpu_ucode_request(adev, &adev->gmc.fw, fw_name);
159 if (err) {
160 pr_err("cik_mc: Failed to load firmware \"%s\"\n", fw_name);
161 amdgpu_ucode_release(&adev->gmc.fw);
162 }
163 return err;
164}
165
166/**
167 * gmc_v7_0_mc_load_microcode - load MC ucode into the hw
168 *
169 * @adev: amdgpu_device pointer
170 *
171 * Load the GDDR MC ucode into the hw (CIK).
172 * Returns 0 on success, error on failure.
173 */
174static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev)
175{
176 const struct mc_firmware_header_v1_0 *hdr;
177 const __le32 *fw_data = NULL;
178 const __le32 *io_mc_regs = NULL;
179 u32 running;
180 int i, ucode_size, regs_size;
181
182 if (!adev->gmc.fw)
183 return -EINVAL;
184
185 hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
186 amdgpu_ucode_print_mc_hdr(&hdr->header);
187
188 adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
189 regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
190 io_mc_regs = (const __le32 *)
191 (adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
192 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
193 fw_data = (const __le32 *)
194 (adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
195
196 running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
197
198 if (running == 0) {
199 /* reset the engine and set to writable */
200 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
201 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
202
203 /* load mc io regs */
204 for (i = 0; i < regs_size; i++) {
205 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
206 WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
207 }
208 /* load the MC ucode */
209 for (i = 0; i < ucode_size; i++)
210 WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
211
212 /* put the engine back into the active state */
213 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
214 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
215 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
216
217 /* wait for training to complete */
218 for (i = 0; i < adev->usec_timeout; i++) {
219 if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
220 MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D0))
221 break;
222 udelay(1);
223 }
224 for (i = 0; i < adev->usec_timeout; i++) {
225 if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
226 MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D1))
227 break;
228 udelay(1);
229 }
230 }
231
232 return 0;
233}
234
235static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev,
236 struct amdgpu_gmc *mc)
237{
238 u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
239
240 base <<= 24;
241
242 amdgpu_gmc_set_agp_default(adev, mc);
243 amdgpu_gmc_vram_location(adev, mc, base);
244 amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT);
245}
246
247/**
248 * gmc_v7_0_mc_program - program the GPU memory controller
249 *
250 * @adev: amdgpu_device pointer
251 *
252 * Set the location of vram, gart, and AGP in the GPU's
253 * physical address space (CIK).
254 */
255static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
256{
257 u32 tmp;
258 int i, j;
259
260 /* Initialize HDP */
261 for (i = 0, j = 0; i < 32; i++, j += 0x6) {
262 WREG32((0xb05 + j), 0x00000000);
263 WREG32((0xb06 + j), 0x00000000);
264 WREG32((0xb07 + j), 0x00000000);
265 WREG32((0xb08 + j), 0x00000000);
266 WREG32((0xb09 + j), 0x00000000);
267 }
268 WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
269
270 if (gmc_v7_0_wait_for_idle((void *)adev))
271 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
272
273 if (adev->mode_info.num_crtc) {
274 /* Lockout access through VGA aperture*/
275 tmp = RREG32(mmVGA_HDP_CONTROL);
276 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
277 WREG32(mmVGA_HDP_CONTROL, tmp);
278
279 /* disable VGA render */
280 tmp = RREG32(mmVGA_RENDER_CONTROL);
281 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
282 WREG32(mmVGA_RENDER_CONTROL, tmp);
283 }
284 /* Update configuration */
285 WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
286 adev->gmc.vram_start >> 12);
287 WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
288 adev->gmc.vram_end >> 12);
289 WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
290 adev->mem_scratch.gpu_addr >> 12);
291 WREG32(mmMC_VM_AGP_BASE, 0);
292 WREG32(mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 22);
293 WREG32(mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 22);
294 if (gmc_v7_0_wait_for_idle((void *)adev))
295 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
296
297 WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
298
299 tmp = RREG32(mmHDP_MISC_CNTL);
300 tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 0);
301 WREG32(mmHDP_MISC_CNTL, tmp);
302
303 tmp = RREG32(mmHDP_HOST_PATH_CNTL);
304 WREG32(mmHDP_HOST_PATH_CNTL, tmp);
305}
306
307/**
308 * gmc_v7_0_mc_init - initialize the memory controller driver params
309 *
310 * @adev: amdgpu_device pointer
311 *
312 * Look up the amount of vram, vram width, and decide how to place
313 * vram and gart within the GPU's physical address space (CIK).
314 * Returns 0 for success.
315 */
316static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
317{
318 int r;
319
320 adev->gmc.vram_width = amdgpu_atombios_get_vram_width(adev);
321 if (!adev->gmc.vram_width) {
322 u32 tmp;
323 int chansize, numchan;
324
325 /* Get VRAM informations */
326 tmp = RREG32(mmMC_ARB_RAMCFG);
327 if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE))
328 chansize = 64;
329 else
330 chansize = 32;
331
332 tmp = RREG32(mmMC_SHARED_CHMAP);
333 switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
334 case 0:
335 default:
336 numchan = 1;
337 break;
338 case 1:
339 numchan = 2;
340 break;
341 case 2:
342 numchan = 4;
343 break;
344 case 3:
345 numchan = 8;
346 break;
347 case 4:
348 numchan = 3;
349 break;
350 case 5:
351 numchan = 6;
352 break;
353 case 6:
354 numchan = 10;
355 break;
356 case 7:
357 numchan = 12;
358 break;
359 case 8:
360 numchan = 16;
361 break;
362 }
363 adev->gmc.vram_width = numchan * chansize;
364 }
365 /* size in MB on si */
366 adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
367 adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
368
369 if (!(adev->flags & AMD_IS_APU)) {
370 r = amdgpu_device_resize_fb_bar(adev);
371 if (r)
372 return r;
373 }
374 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
375 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
376
377#ifdef CONFIG_X86_64
378 if ((adev->flags & AMD_IS_APU) &&
379 adev->gmc.real_vram_size > adev->gmc.aper_size &&
380 !amdgpu_passthrough(adev)) {
381 adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
382 adev->gmc.aper_size = adev->gmc.real_vram_size;
383 }
384#endif
385
386 adev->gmc.visible_vram_size = adev->gmc.aper_size;
387
388 /* set the gart size */
389 if (amdgpu_gart_size == -1) {
390 switch (adev->asic_type) {
391 case CHIP_TOPAZ: /* no MM engines */
392 default:
393 adev->gmc.gart_size = 256ULL << 20;
394 break;
395#ifdef CONFIG_DRM_AMDGPU_CIK
396 case CHIP_BONAIRE: /* UVD, VCE do not support GPUVM */
397 case CHIP_HAWAII: /* UVD, VCE do not support GPUVM */
398 case CHIP_KAVERI: /* UVD, VCE do not support GPUVM */
399 case CHIP_KABINI: /* UVD, VCE do not support GPUVM */
400 case CHIP_MULLINS: /* UVD, VCE do not support GPUVM */
401 adev->gmc.gart_size = 1024ULL << 20;
402 break;
403#endif
404 }
405 } else {
406 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
407 }
408
409 adev->gmc.gart_size += adev->pm.smu_prv_buffer_size;
410 gmc_v7_0_vram_gtt_location(adev, &adev->gmc);
411
412 return 0;
413}
414
415/**
416 * gmc_v7_0_flush_gpu_tlb_pasid - tlb flush via pasid
417 *
418 * @adev: amdgpu_device pointer
419 * @pasid: pasid to be flush
420 * @flush_type: type of flush
421 * @all_hub: flush all hubs
422 * @inst: is used to select which instance of KIQ to use for the invalidation
423 *
424 * Flush the TLB for the requested pasid.
425 */
426static void gmc_v7_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
427 uint16_t pasid, uint32_t flush_type,
428 bool all_hub, uint32_t inst)
429{
430 u32 mask = 0x0;
431 int vmid;
432
433 for (vmid = 1; vmid < 16; vmid++) {
434 u32 tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
435
436 if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) &&
437 (tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid)
438 mask |= 1 << vmid;
439 }
440
441 WREG32(mmVM_INVALIDATE_REQUEST, mask);
442 RREG32(mmVM_INVALIDATE_RESPONSE);
443}
444
445/*
446 * GART
447 * VMID 0 is the physical GPU addresses as used by the kernel.
448 * VMIDs 1-15 are used for userspace clients and are handled
449 * by the amdgpu vm/hsa code.
450 */
451
452/**
453 * gmc_v7_0_flush_gpu_tlb - gart tlb flush callback
454 *
455 * @adev: amdgpu_device pointer
456 * @vmid: vm instance to flush
457 * @vmhub: which hub to flush
458 * @flush_type: type of flush
459 * *
460 * Flush the TLB for the requested page table (CIK).
461 */
462static void gmc_v7_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
463 uint32_t vmhub, uint32_t flush_type)
464{
465 /* bits 0-15 are the VM contexts0-15 */
466 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
467}
468
469static uint64_t gmc_v7_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
470 unsigned int vmid, uint64_t pd_addr)
471{
472 uint32_t reg;
473
474 if (vmid < 8)
475 reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid;
476 else
477 reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8;
478 amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12);
479
480 /* bits 0-15 are the VM contexts0-15 */
481 amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid);
482
483 return pd_addr;
484}
485
486static void gmc_v7_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int vmid,
487 unsigned int pasid)
488{
489 amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid);
490}
491
492static void gmc_v7_0_get_vm_pde(struct amdgpu_device *adev, int level,
493 uint64_t *addr, uint64_t *flags)
494{
495 BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
496}
497
498static void gmc_v7_0_get_vm_pte(struct amdgpu_device *adev,
499 struct amdgpu_bo_va_mapping *mapping,
500 uint64_t *flags)
501{
502 *flags &= ~AMDGPU_PTE_EXECUTABLE;
503 *flags &= ~AMDGPU_PTE_PRT;
504}
505
506/**
507 * gmc_v7_0_set_fault_enable_default - update VM fault handling
508 *
509 * @adev: amdgpu_device pointer
510 * @value: true redirects VM faults to the default page
511 */
512static void gmc_v7_0_set_fault_enable_default(struct amdgpu_device *adev,
513 bool value)
514{
515 u32 tmp;
516
517 tmp = RREG32(mmVM_CONTEXT1_CNTL);
518 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
519 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
520 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
521 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
522 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
523 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
524 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
525 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
526 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
527 READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
528 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
529 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
530 WREG32(mmVM_CONTEXT1_CNTL, tmp);
531}
532
533/**
534 * gmc_v7_0_set_prt - set PRT VM fault
535 *
536 * @adev: amdgpu_device pointer
537 * @enable: enable/disable VM fault handling for PRT
538 */
539static void gmc_v7_0_set_prt(struct amdgpu_device *adev, bool enable)
540{
541 uint32_t tmp;
542
543 if (enable && !adev->gmc.prt_warning) {
544 dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n");
545 adev->gmc.prt_warning = true;
546 }
547
548 tmp = RREG32(mmVM_PRT_CNTL);
549 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
550 CB_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable);
551 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
552 CB_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable);
553 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
554 TC_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable);
555 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
556 TC_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable);
557 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
558 L2_CACHE_STORE_INVALID_ENTRIES, enable);
559 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
560 L1_TLB_STORE_INVALID_ENTRIES, enable);
561 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
562 MASK_PDE0_FAULT, enable);
563 WREG32(mmVM_PRT_CNTL, tmp);
564
565 if (enable) {
566 uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT;
567 uint32_t high = adev->vm_manager.max_pfn -
568 (AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT);
569
570 WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
571 WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
572 WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, low);
573 WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, low);
574 WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, high);
575 WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, high);
576 WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, high);
577 WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, high);
578 } else {
579 WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, 0xfffffff);
580 WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, 0xfffffff);
581 WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, 0xfffffff);
582 WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, 0xfffffff);
583 WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, 0x0);
584 WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, 0x0);
585 WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, 0x0);
586 WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, 0x0);
587 }
588}
589
590/**
591 * gmc_v7_0_gart_enable - gart enable
592 *
593 * @adev: amdgpu_device pointer
594 *
595 * This sets up the TLBs, programs the page tables for VMID0,
596 * sets up the hw for VMIDs 1-15 which are allocated on
597 * demand, and sets up the global locations for the LDS, GDS,
598 * and GPUVM for FSA64 clients (CIK).
599 * Returns 0 for success, errors for failure.
600 */
601static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
602{
603 uint64_t table_addr;
604 u32 tmp, field;
605 int i;
606
607 if (adev->gart.bo == NULL) {
608 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
609 return -EINVAL;
610 }
611 amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
612 table_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
613
614 /* Setup TLB control */
615 tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
616 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
617 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 1);
618 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
619 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 1);
620 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
621 WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
622 /* Setup L2 cache */
623 tmp = RREG32(mmVM_L2_CNTL);
624 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
625 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
626 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE, 1);
627 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
628 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
629 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
630 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
631 WREG32(mmVM_L2_CNTL, tmp);
632 tmp = REG_SET_FIELD(0, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
633 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
634 WREG32(mmVM_L2_CNTL2, tmp);
635
636 field = adev->vm_manager.fragment_size;
637 tmp = RREG32(mmVM_L2_CNTL3);
638 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1);
639 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field);
640 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, field);
641 WREG32(mmVM_L2_CNTL3, tmp);
642 /* setup context0 */
643 WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12);
644 WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12);
645 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, table_addr >> 12);
646 WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
647 (u32)(adev->dummy_page_addr >> 12));
648 WREG32(mmVM_CONTEXT0_CNTL2, 0);
649 tmp = RREG32(mmVM_CONTEXT0_CNTL);
650 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
651 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
652 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
653 WREG32(mmVM_CONTEXT0_CNTL, tmp);
654
655 WREG32(0x575, 0);
656 WREG32(0x576, 0);
657 WREG32(0x577, 0);
658
659 /* empty context1-15 */
660 /* FIXME start with 4G, once using 2 level pt switch to full
661 * vm size space
662 */
663 /* set vm size, must be a multiple of 4 */
664 WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
665 WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
666 for (i = 1; i < AMDGPU_NUM_VMID; i++) {
667 if (i < 8)
668 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
669 table_addr >> 12);
670 else
671 WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
672 table_addr >> 12);
673 }
674
675 /* enable context1-15 */
676 WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
677 (u32)(adev->dummy_page_addr >> 12));
678 WREG32(mmVM_CONTEXT1_CNTL2, 4);
679 tmp = RREG32(mmVM_CONTEXT1_CNTL);
680 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
681 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
682 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
683 adev->vm_manager.block_size - 9);
684 WREG32(mmVM_CONTEXT1_CNTL, tmp);
685 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
686 gmc_v7_0_set_fault_enable_default(adev, false);
687 else
688 gmc_v7_0_set_fault_enable_default(adev, true);
689
690 if (adev->asic_type == CHIP_KAVERI) {
691 tmp = RREG32(mmCHUB_CONTROL);
692 tmp &= ~BYPASS_VM;
693 WREG32(mmCHUB_CONTROL, tmp);
694 }
695
696 gmc_v7_0_flush_gpu_tlb(adev, 0, 0, 0);
697 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
698 (unsigned int)(adev->gmc.gart_size >> 20),
699 (unsigned long long)table_addr);
700 return 0;
701}
702
703static int gmc_v7_0_gart_init(struct amdgpu_device *adev)
704{
705 int r;
706
707 if (adev->gart.bo) {
708 WARN(1, "R600 PCIE GART already initialized\n");
709 return 0;
710 }
711 /* Initialize common gart structure */
712 r = amdgpu_gart_init(adev);
713 if (r)
714 return r;
715 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
716 adev->gart.gart_pte_flags = 0;
717 return amdgpu_gart_table_vram_alloc(adev);
718}
719
720/**
721 * gmc_v7_0_gart_disable - gart disable
722 *
723 * @adev: amdgpu_device pointer
724 *
725 * This disables all VM page table (CIK).
726 */
727static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
728{
729 u32 tmp;
730
731 /* Disable all tables */
732 WREG32(mmVM_CONTEXT0_CNTL, 0);
733 WREG32(mmVM_CONTEXT1_CNTL, 0);
734 /* Setup TLB control */
735 tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
736 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
737 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 0);
738 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0);
739 WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
740 /* Setup L2 cache */
741 tmp = RREG32(mmVM_L2_CNTL);
742 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
743 WREG32(mmVM_L2_CNTL, tmp);
744 WREG32(mmVM_L2_CNTL2, 0);
745}
746
747/**
748 * gmc_v7_0_vm_decode_fault - print human readable fault info
749 *
750 * @adev: amdgpu_device pointer
751 * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
752 * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
753 * @mc_client: VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT register value
754 * @pasid: debug logging only - no functional use
755 *
756 * Print human readable fault information (CIK).
757 */
758static void gmc_v7_0_vm_decode_fault(struct amdgpu_device *adev, u32 status,
759 u32 addr, u32 mc_client, unsigned int pasid)
760{
761 u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
762 u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
763 PROTECTIONS);
764 char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
765 (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
766 u32 mc_id;
767
768 mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
769 MEMORY_CLIENT_ID);
770
771 dev_err(adev->dev, "VM fault (0x%02x, vmid %d, pasid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
772 protections, vmid, pasid, addr,
773 REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
774 MEMORY_CLIENT_RW) ?
775 "write" : "read", block, mc_client, mc_id);
776}
777
778
779static const u32 mc_cg_registers[] = {
780 mmMC_HUB_MISC_HUB_CG,
781 mmMC_HUB_MISC_SIP_CG,
782 mmMC_HUB_MISC_VM_CG,
783 mmMC_XPB_CLK_GAT,
784 mmATC_MISC_CG,
785 mmMC_CITF_MISC_WR_CG,
786 mmMC_CITF_MISC_RD_CG,
787 mmMC_CITF_MISC_VM_CG,
788 mmVM_L2_CG,
789};
790
791static const u32 mc_cg_ls_en[] = {
792 MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK,
793 MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK,
794 MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK,
795 MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK,
796 ATC_MISC_CG__MEM_LS_ENABLE_MASK,
797 MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK,
798 MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK,
799 MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK,
800 VM_L2_CG__MEM_LS_ENABLE_MASK,
801};
802
803static const u32 mc_cg_en[] = {
804 MC_HUB_MISC_HUB_CG__ENABLE_MASK,
805 MC_HUB_MISC_SIP_CG__ENABLE_MASK,
806 MC_HUB_MISC_VM_CG__ENABLE_MASK,
807 MC_XPB_CLK_GAT__ENABLE_MASK,
808 ATC_MISC_CG__ENABLE_MASK,
809 MC_CITF_MISC_WR_CG__ENABLE_MASK,
810 MC_CITF_MISC_RD_CG__ENABLE_MASK,
811 MC_CITF_MISC_VM_CG__ENABLE_MASK,
812 VM_L2_CG__ENABLE_MASK,
813};
814
815static void gmc_v7_0_enable_mc_ls(struct amdgpu_device *adev,
816 bool enable)
817{
818 int i;
819 u32 orig, data;
820
821 for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
822 orig = data = RREG32(mc_cg_registers[i]);
823 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
824 data |= mc_cg_ls_en[i];
825 else
826 data &= ~mc_cg_ls_en[i];
827 if (data != orig)
828 WREG32(mc_cg_registers[i], data);
829 }
830}
831
832static void gmc_v7_0_enable_mc_mgcg(struct amdgpu_device *adev,
833 bool enable)
834{
835 int i;
836 u32 orig, data;
837
838 for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
839 orig = data = RREG32(mc_cg_registers[i]);
840 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
841 data |= mc_cg_en[i];
842 else
843 data &= ~mc_cg_en[i];
844 if (data != orig)
845 WREG32(mc_cg_registers[i], data);
846 }
847}
848
849static void gmc_v7_0_enable_bif_mgls(struct amdgpu_device *adev,
850 bool enable)
851{
852 u32 orig, data;
853
854 orig = data = RREG32_PCIE(ixPCIE_CNTL2);
855
856 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
857 data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1);
858 data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1);
859 data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1);
860 data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 1);
861 } else {
862 data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 0);
863 data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 0);
864 data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 0);
865 data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 0);
866 }
867
868 if (orig != data)
869 WREG32_PCIE(ixPCIE_CNTL2, data);
870}
871
872static void gmc_v7_0_enable_hdp_mgcg(struct amdgpu_device *adev,
873 bool enable)
874{
875 u32 orig, data;
876
877 orig = data = RREG32(mmHDP_HOST_PATH_CNTL);
878
879 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
880 data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0);
881 else
882 data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1);
883
884 if (orig != data)
885 WREG32(mmHDP_HOST_PATH_CNTL, data);
886}
887
888static void gmc_v7_0_enable_hdp_ls(struct amdgpu_device *adev,
889 bool enable)
890{
891 u32 orig, data;
892
893 orig = data = RREG32(mmHDP_MEM_POWER_LS);
894
895 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
896 data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1);
897 else
898 data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0);
899
900 if (orig != data)
901 WREG32(mmHDP_MEM_POWER_LS, data);
902}
903
904static int gmc_v7_0_convert_vram_type(int mc_seq_vram_type)
905{
906 switch (mc_seq_vram_type) {
907 case MC_SEQ_MISC0__MT__GDDR1:
908 return AMDGPU_VRAM_TYPE_GDDR1;
909 case MC_SEQ_MISC0__MT__DDR2:
910 return AMDGPU_VRAM_TYPE_DDR2;
911 case MC_SEQ_MISC0__MT__GDDR3:
912 return AMDGPU_VRAM_TYPE_GDDR3;
913 case MC_SEQ_MISC0__MT__GDDR4:
914 return AMDGPU_VRAM_TYPE_GDDR4;
915 case MC_SEQ_MISC0__MT__GDDR5:
916 return AMDGPU_VRAM_TYPE_GDDR5;
917 case MC_SEQ_MISC0__MT__HBM:
918 return AMDGPU_VRAM_TYPE_HBM;
919 case MC_SEQ_MISC0__MT__DDR3:
920 return AMDGPU_VRAM_TYPE_DDR3;
921 default:
922 return AMDGPU_VRAM_TYPE_UNKNOWN;
923 }
924}
925
926static int gmc_v7_0_early_init(void *handle)
927{
928 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
929
930 gmc_v7_0_set_gmc_funcs(adev);
931 gmc_v7_0_set_irq_funcs(adev);
932
933 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
934 adev->gmc.shared_aperture_end =
935 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
936 adev->gmc.private_aperture_start =
937 adev->gmc.shared_aperture_end + 1;
938 adev->gmc.private_aperture_end =
939 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
940 adev->gmc.noretry_flags = AMDGPU_VM_NORETRY_FLAGS_TF;
941
942 return 0;
943}
944
945static int gmc_v7_0_late_init(void *handle)
946{
947 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
948
949 if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
950 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
951 else
952 return 0;
953}
954
955static unsigned int gmc_v7_0_get_vbios_fb_size(struct amdgpu_device *adev)
956{
957 u32 d1vga_control = RREG32(mmD1VGA_CONTROL);
958 unsigned int size;
959
960 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
961 size = AMDGPU_VBIOS_VGA_ALLOCATION;
962 } else {
963 u32 viewport = RREG32(mmVIEWPORT_SIZE);
964
965 size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
966 REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
967 4);
968 }
969
970 return size;
971}
972
973static int gmc_v7_0_sw_init(void *handle)
974{
975 int r;
976 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
977
978 set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
979
980 if (adev->flags & AMD_IS_APU) {
981 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
982 } else {
983 u32 tmp = RREG32(mmMC_SEQ_MISC0);
984
985 tmp &= MC_SEQ_MISC0__MT__MASK;
986 adev->gmc.vram_type = gmc_v7_0_convert_vram_type(tmp);
987 }
988
989 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault);
990 if (r)
991 return r;
992
993 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault);
994 if (r)
995 return r;
996
997 /* Adjust VM size here.
998 * Currently set to 4GB ((1 << 20) 4k pages).
999 * Max GPUVM size for cayman and SI is 40 bits.
1000 */
1001 amdgpu_vm_adjust_size(adev, 64, 9, 1, 40);
1002
1003 /* Set the internal MC address mask
1004 * This is the max address of the GPU's
1005 * internal address space.
1006 */
1007 adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1008
1009 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40));
1010 if (r) {
1011 pr_warn("No suitable DMA available\n");
1012 return r;
1013 }
1014 adev->need_swiotlb = drm_need_swiotlb(40);
1015
1016 r = gmc_v7_0_init_microcode(adev);
1017 if (r) {
1018 DRM_ERROR("Failed to load mc firmware!\n");
1019 return r;
1020 }
1021
1022 r = gmc_v7_0_mc_init(adev);
1023 if (r)
1024 return r;
1025
1026 amdgpu_gmc_get_vbios_allocations(adev);
1027
1028 /* Memory manager */
1029 r = amdgpu_bo_init(adev);
1030 if (r)
1031 return r;
1032
1033 r = gmc_v7_0_gart_init(adev);
1034 if (r)
1035 return r;
1036
1037 /*
1038 * number of VMs
1039 * VMID 0 is reserved for System
1040 * amdgpu graphics/compute will use VMIDs 1-7
1041 * amdkfd will use VMIDs 8-15
1042 */
1043 adev->vm_manager.first_kfd_vmid = 8;
1044 amdgpu_vm_manager_init(adev);
1045
1046 /* base offset of vram pages */
1047 if (adev->flags & AMD_IS_APU) {
1048 u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
1049
1050 tmp <<= 22;
1051 adev->vm_manager.vram_base_offset = tmp;
1052 } else {
1053 adev->vm_manager.vram_base_offset = 0;
1054 }
1055
1056 adev->gmc.vm_fault_info = kmalloc(sizeof(struct kfd_vm_fault_info),
1057 GFP_KERNEL);
1058 if (!adev->gmc.vm_fault_info)
1059 return -ENOMEM;
1060 atomic_set(&adev->gmc.vm_fault_info_updated, 0);
1061
1062 return 0;
1063}
1064
1065static int gmc_v7_0_sw_fini(void *handle)
1066{
1067 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1068
1069 amdgpu_gem_force_release(adev);
1070 amdgpu_vm_manager_fini(adev);
1071 kfree(adev->gmc.vm_fault_info);
1072 amdgpu_gart_table_vram_free(adev);
1073 amdgpu_bo_fini(adev);
1074 amdgpu_ucode_release(&adev->gmc.fw);
1075
1076 return 0;
1077}
1078
1079static int gmc_v7_0_hw_init(void *handle)
1080{
1081 int r;
1082 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1083
1084 gmc_v7_0_init_golden_registers(adev);
1085
1086 gmc_v7_0_mc_program(adev);
1087
1088 if (!(adev->flags & AMD_IS_APU)) {
1089 r = gmc_v7_0_mc_load_microcode(adev);
1090 if (r) {
1091 DRM_ERROR("Failed to load MC firmware!\n");
1092 return r;
1093 }
1094 }
1095
1096 r = gmc_v7_0_gart_enable(adev);
1097 if (r)
1098 return r;
1099
1100 if (amdgpu_emu_mode == 1)
1101 return amdgpu_gmc_vram_checking(adev);
1102
1103 return 0;
1104}
1105
1106static int gmc_v7_0_hw_fini(void *handle)
1107{
1108 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1109
1110 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1111 gmc_v7_0_gart_disable(adev);
1112
1113 return 0;
1114}
1115
1116static int gmc_v7_0_suspend(void *handle)
1117{
1118 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1119
1120 gmc_v7_0_hw_fini(adev);
1121
1122 return 0;
1123}
1124
1125static int gmc_v7_0_resume(void *handle)
1126{
1127 int r;
1128 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1129
1130 r = gmc_v7_0_hw_init(adev);
1131 if (r)
1132 return r;
1133
1134 amdgpu_vmid_reset_all(adev);
1135
1136 return 0;
1137}
1138
1139static bool gmc_v7_0_is_idle(void *handle)
1140{
1141 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1142 u32 tmp = RREG32(mmSRBM_STATUS);
1143
1144 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1145 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
1146 return false;
1147
1148 return true;
1149}
1150
1151static int gmc_v7_0_wait_for_idle(void *handle)
1152{
1153 unsigned int i;
1154 u32 tmp;
1155 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1156
1157 for (i = 0; i < adev->usec_timeout; i++) {
1158 /* read MC_STATUS */
1159 tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
1160 SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1161 SRBM_STATUS__MCC_BUSY_MASK |
1162 SRBM_STATUS__MCD_BUSY_MASK |
1163 SRBM_STATUS__VMC_BUSY_MASK);
1164 if (!tmp)
1165 return 0;
1166 udelay(1);
1167 }
1168 return -ETIMEDOUT;
1169
1170}
1171
1172static int gmc_v7_0_soft_reset(void *handle)
1173{
1174 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1175 u32 srbm_soft_reset = 0;
1176 u32 tmp = RREG32(mmSRBM_STATUS);
1177
1178 if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
1179 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1180 SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
1181
1182 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1183 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
1184 if (!(adev->flags & AMD_IS_APU))
1185 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1186 SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
1187 }
1188
1189 if (srbm_soft_reset) {
1190 gmc_v7_0_mc_stop(adev);
1191 if (gmc_v7_0_wait_for_idle((void *)adev))
1192 dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
1193
1194 tmp = RREG32(mmSRBM_SOFT_RESET);
1195 tmp |= srbm_soft_reset;
1196 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1197 WREG32(mmSRBM_SOFT_RESET, tmp);
1198 tmp = RREG32(mmSRBM_SOFT_RESET);
1199
1200 udelay(50);
1201
1202 tmp &= ~srbm_soft_reset;
1203 WREG32(mmSRBM_SOFT_RESET, tmp);
1204 tmp = RREG32(mmSRBM_SOFT_RESET);
1205
1206 /* Wait a little for things to settle down */
1207 udelay(50);
1208
1209 gmc_v7_0_mc_resume(adev);
1210 udelay(50);
1211 }
1212
1213 return 0;
1214}
1215
1216static int gmc_v7_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
1217 struct amdgpu_irq_src *src,
1218 unsigned int type,
1219 enum amdgpu_interrupt_state state)
1220{
1221 u32 tmp;
1222 u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1223 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1224 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1225 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1226 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1227 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
1228
1229 switch (state) {
1230 case AMDGPU_IRQ_STATE_DISABLE:
1231 /* system context */
1232 tmp = RREG32(mmVM_CONTEXT0_CNTL);
1233 tmp &= ~bits;
1234 WREG32(mmVM_CONTEXT0_CNTL, tmp);
1235 /* VMs */
1236 tmp = RREG32(mmVM_CONTEXT1_CNTL);
1237 tmp &= ~bits;
1238 WREG32(mmVM_CONTEXT1_CNTL, tmp);
1239 break;
1240 case AMDGPU_IRQ_STATE_ENABLE:
1241 /* system context */
1242 tmp = RREG32(mmVM_CONTEXT0_CNTL);
1243 tmp |= bits;
1244 WREG32(mmVM_CONTEXT0_CNTL, tmp);
1245 /* VMs */
1246 tmp = RREG32(mmVM_CONTEXT1_CNTL);
1247 tmp |= bits;
1248 WREG32(mmVM_CONTEXT1_CNTL, tmp);
1249 break;
1250 default:
1251 break;
1252 }
1253
1254 return 0;
1255}
1256
1257static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
1258 struct amdgpu_irq_src *source,
1259 struct amdgpu_iv_entry *entry)
1260{
1261 u32 addr, status, mc_client, vmid;
1262
1263 addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
1264 status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
1265 mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
1266 /* reset addr and status */
1267 WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1268
1269 if (!addr && !status)
1270 return 0;
1271
1272 amdgpu_vm_update_fault_cache(adev, entry->pasid,
1273 ((u64)addr) << AMDGPU_GPU_PAGE_SHIFT, status, AMDGPU_GFXHUB(0));
1274
1275 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
1276 gmc_v7_0_set_fault_enable_default(adev, false);
1277
1278 if (printk_ratelimit()) {
1279 dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1280 entry->src_id, entry->src_data[0]);
1281 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
1282 addr);
1283 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1284 status);
1285 gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client,
1286 entry->pasid);
1287 }
1288
1289 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1290 VMID);
1291 if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid)
1292 && !atomic_read(&adev->gmc.vm_fault_info_updated)) {
1293 struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info;
1294 u32 protections = REG_GET_FIELD(status,
1295 VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1296 PROTECTIONS);
1297
1298 info->vmid = vmid;
1299 info->mc_id = REG_GET_FIELD(status,
1300 VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1301 MEMORY_CLIENT_ID);
1302 info->status = status;
1303 info->page_addr = addr;
1304 info->prot_valid = protections & 0x7 ? true : false;
1305 info->prot_read = protections & 0x8 ? true : false;
1306 info->prot_write = protections & 0x10 ? true : false;
1307 info->prot_exec = protections & 0x20 ? true : false;
1308 mb();
1309 atomic_set(&adev->gmc.vm_fault_info_updated, 1);
1310 }
1311
1312 return 0;
1313}
1314
1315static int gmc_v7_0_set_clockgating_state(void *handle,
1316 enum amd_clockgating_state state)
1317{
1318 bool gate = false;
1319 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1320
1321 if (state == AMD_CG_STATE_GATE)
1322 gate = true;
1323
1324 if (!(adev->flags & AMD_IS_APU)) {
1325 gmc_v7_0_enable_mc_mgcg(adev, gate);
1326 gmc_v7_0_enable_mc_ls(adev, gate);
1327 }
1328 gmc_v7_0_enable_bif_mgls(adev, gate);
1329 gmc_v7_0_enable_hdp_mgcg(adev, gate);
1330 gmc_v7_0_enable_hdp_ls(adev, gate);
1331
1332 return 0;
1333}
1334
1335static int gmc_v7_0_set_powergating_state(void *handle,
1336 enum amd_powergating_state state)
1337{
1338 return 0;
1339}
1340
1341static const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
1342 .name = "gmc_v7_0",
1343 .early_init = gmc_v7_0_early_init,
1344 .late_init = gmc_v7_0_late_init,
1345 .sw_init = gmc_v7_0_sw_init,
1346 .sw_fini = gmc_v7_0_sw_fini,
1347 .hw_init = gmc_v7_0_hw_init,
1348 .hw_fini = gmc_v7_0_hw_fini,
1349 .suspend = gmc_v7_0_suspend,
1350 .resume = gmc_v7_0_resume,
1351 .is_idle = gmc_v7_0_is_idle,
1352 .wait_for_idle = gmc_v7_0_wait_for_idle,
1353 .soft_reset = gmc_v7_0_soft_reset,
1354 .set_clockgating_state = gmc_v7_0_set_clockgating_state,
1355 .set_powergating_state = gmc_v7_0_set_powergating_state,
1356};
1357
1358static const struct amdgpu_gmc_funcs gmc_v7_0_gmc_funcs = {
1359 .flush_gpu_tlb = gmc_v7_0_flush_gpu_tlb,
1360 .flush_gpu_tlb_pasid = gmc_v7_0_flush_gpu_tlb_pasid,
1361 .emit_flush_gpu_tlb = gmc_v7_0_emit_flush_gpu_tlb,
1362 .emit_pasid_mapping = gmc_v7_0_emit_pasid_mapping,
1363 .set_prt = gmc_v7_0_set_prt,
1364 .get_vm_pde = gmc_v7_0_get_vm_pde,
1365 .get_vm_pte = gmc_v7_0_get_vm_pte,
1366 .get_vbios_fb_size = gmc_v7_0_get_vbios_fb_size,
1367};
1368
1369static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = {
1370 .set = gmc_v7_0_vm_fault_interrupt_state,
1371 .process = gmc_v7_0_process_interrupt,
1372};
1373
1374static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev)
1375{
1376 adev->gmc.gmc_funcs = &gmc_v7_0_gmc_funcs;
1377}
1378
1379static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev)
1380{
1381 adev->gmc.vm_fault.num_types = 1;
1382 adev->gmc.vm_fault.funcs = &gmc_v7_0_irq_funcs;
1383}
1384
1385const struct amdgpu_ip_block_version gmc_v7_0_ip_block = {
1386 .type = AMD_IP_BLOCK_TYPE_GMC,
1387 .major = 7,
1388 .minor = 0,
1389 .rev = 0,
1390 .funcs = &gmc_v7_0_ip_funcs,
1391};
1392
1393const struct amdgpu_ip_block_version gmc_v7_4_ip_block = {
1394 .type = AMD_IP_BLOCK_TYPE_GMC,
1395 .major = 7,
1396 .minor = 4,
1397 .rev = 0,
1398 .funcs = &gmc_v7_0_ip_funcs,
1399};