Linux Audio

Check our new training course

Loading...
v6.9.4
  1/*
  2 * Copyright 2019 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 */
 23
 24#include "amdgpu.h"
 25#include "gfxhub_v2_0.h"
 26
 27#include "gc/gc_10_1_0_offset.h"
 28#include "gc/gc_10_1_0_sh_mask.h"
 29#include "gc/gc_10_1_0_default.h"
 30#include "navi10_enum.h"
 31
 32#include "soc15_common.h"
 33
 34static const char * const gfxhub_client_ids[] = {
 35	"CB/DB",
 36	"Reserved",
 37	"GE1",
 38	"GE2",
 39	"CPF",
 40	"CPC",
 41	"CPG",
 42	"RLC",
 43	"TCP",
 44	"SQC (inst)",
 45	"SQC (data)",
 46	"SQG",
 47	"Reserved",
 48	"SDMA0",
 49	"SDMA1",
 50	"GCR",
 51	"SDMA2",
 52	"SDMA3",
 53};
 54
 55static uint32_t gfxhub_v2_0_get_invalidate_req(unsigned int vmid,
 56					       uint32_t flush_type)
 57{
 58	u32 req = 0;
 59
 60	/* invalidate using legacy mode on vmid*/
 61	req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ,
 62			    PER_VMID_INVALIDATE_REQ, 1 << vmid);
 63	req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
 64	req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
 65	req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
 66	req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
 67	req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
 68	req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
 69	req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ,
 70			    CLEAR_PROTECTION_FAULT_STATUS_ADDR,	0);
 71
 72	return req;
 73}
 74
 75static void
 76gfxhub_v2_0_print_l2_protection_fault_status(struct amdgpu_device *adev,
 77					     uint32_t status)
 78{
 79	u32 cid = REG_GET_FIELD(status,
 80				GCVM_L2_PROTECTION_FAULT_STATUS, CID);
 81
 82	dev_err(adev->dev,
 83		"GCVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
 84		status);
 85	dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
 86		cid >= ARRAY_SIZE(gfxhub_client_ids) ? "unknown" : gfxhub_client_ids[cid],
 87		cid);
 88	dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
 89		REG_GET_FIELD(status,
 90		GCVM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
 91	dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
 92		REG_GET_FIELD(status,
 93		GCVM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
 94	dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
 95		REG_GET_FIELD(status,
 96		GCVM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
 97	dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
 98		REG_GET_FIELD(status,
 99		GCVM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
100	dev_err(adev->dev, "\t RW: 0x%lx\n",
101		REG_GET_FIELD(status,
102		GCVM_L2_PROTECTION_FAULT_STATUS, RW));
103}
104
105static u64 gfxhub_v2_0_get_fb_location(struct amdgpu_device *adev)
106{
107	u64 base = RREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_BASE);
108
109	base &= GCMC_VM_FB_LOCATION_BASE__FB_BASE_MASK;
110	base <<= 24;
111
112	return base;
113}
114
115static u64 gfxhub_v2_0_get_mc_fb_offset(struct amdgpu_device *adev)
116{
117	return (u64)RREG32_SOC15(GC, 0, mmGCMC_VM_FB_OFFSET) << 24;
118}
119
120static void gfxhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
121				uint64_t page_table_base)
122{
123	struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
124
125	WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
126			    hub->ctx_addr_distance * vmid,
127			    lower_32_bits(page_table_base));
128
129	WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
130			    hub->ctx_addr_distance * vmid,
131			    upper_32_bits(page_table_base));
132}
133
134static void gfxhub_v2_0_init_gart_aperture_regs(struct amdgpu_device *adev)
135{
136	uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
137
138	gfxhub_v2_0_setup_vm_pt_regs(adev, 0, pt_base);
139
140	WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
141		     (u32)(adev->gmc.gart_start >> 12));
142	WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
143		     (u32)(adev->gmc.gart_start >> 44));
144
145	WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
146		     (u32)(adev->gmc.gart_end >> 12));
147	WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
148		     (u32)(adev->gmc.gart_end >> 44));
149}
150
151static void gfxhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev)
152{
153	uint64_t value;
154
155	if (!amdgpu_sriov_vf(adev)) {
156		/* Program the AGP BAR */
 
 
 
 
 
157		WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BASE, 0);
158		WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
159		WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
160
161		/* Program the system aperture low logical page number. */
162		WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_LOW_ADDR,
163			     min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
164		WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
165			     max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
166
167		/* Set default page address. */
168		value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr);
 
169		WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
170			     (u32)(value >> 12));
171		WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
172			     (u32)(value >> 44));
173	}
174
175	/* Program "protection fault". */
176	WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
177		     (u32)(adev->dummy_page_addr >> 12));
178	WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
179		     (u32)((u64)adev->dummy_page_addr >> 44));
180
181	WREG32_FIELD15(GC, 0, GCVM_L2_PROTECTION_FAULT_CNTL2,
182		       ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
183}
184
185
186static void gfxhub_v2_0_init_tlb_regs(struct amdgpu_device *adev)
187{
188	uint32_t tmp;
189
190	/* Setup TLB control */
191	tmp = RREG32_SOC15(GC, 0, mmGCMC_VM_MX_L1_TLB_CNTL);
192
193	tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
194	tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
195	tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
196			    ENABLE_ADVANCED_DRIVER_MODEL, 1);
197	tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
198			    SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
 
199	tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
200			    MTYPE, MTYPE_UC); /* UC, uncached */
201
202	WREG32_SOC15(GC, 0, mmGCMC_VM_MX_L1_TLB_CNTL, tmp);
203}
204
205static void gfxhub_v2_0_init_cache_regs(struct amdgpu_device *adev)
206{
207	uint32_t tmp;
208
209	/* These regs are not accessible for VF, PF will program these in SRIOV */
210	if (amdgpu_sriov_vf(adev))
211		return;
212
213	/* Setup L2 cache */
214	tmp = RREG32_SOC15(GC, 0, mmGCVM_L2_CNTL);
215	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, ENABLE_L2_CACHE, 1);
216	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 0);
217	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL,
218			    ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
219	/* XXX for emulation, Refer to closed source code.*/
220	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL,
221			    L2_PDE0_CACHE_TAG_GENERATION_MODE, 0);
222	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
223	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
224	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
225	WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL, tmp);
226
227	tmp = RREG32_SOC15(GC, 0, mmGCVM_L2_CNTL2);
228	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
229	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
230	WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL2, tmp);
231
232	tmp = mmGCVM_L2_CNTL3_DEFAULT;
233	if (adev->gmc.translate_further) {
234		tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, BANK_SELECT, 12);
235		tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3,
236				    L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
237	} else {
238		tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, BANK_SELECT, 9);
239		tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3,
240				    L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
241	}
242	WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL3, tmp);
243
244	tmp = mmGCVM_L2_CNTL4_DEFAULT;
245	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
246	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
247	WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL4, tmp);
248
249	tmp = mmGCVM_L2_CNTL5_DEFAULT;
250	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL5, L2_CACHE_SMALLK_FRAGMENT_SIZE, 0);
251	WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL5, tmp);
252}
253
254static void gfxhub_v2_0_enable_system_domain(struct amdgpu_device *adev)
255{
256	uint32_t tmp;
257
258	tmp = RREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_CNTL);
259	tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
260	tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
261	tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL,
262			    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
263	WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_CNTL, tmp);
264}
265
266static void gfxhub_v2_0_disable_identity_aperture(struct amdgpu_device *adev)
267{
268	WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
269		     0xFFFFFFFF);
270	WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
271		     0x0000000F);
272
273	WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32,
274		     0);
275	WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32,
276		     0);
277
278	WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, 0);
279	WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, 0);
280
281}
282
283static void gfxhub_v2_0_setup_vmid_config(struct amdgpu_device *adev)
284{
285	struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
286	int i;
287	uint32_t tmp;
288
289	for (i = 0; i <= 14; i++) {
290		tmp = RREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_CNTL, i * hub->ctx_distance);
291		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
292		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,
293				    adev->vm_manager.num_level);
294		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
295				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
296		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
297				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
298		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
299				PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
300		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
301				VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
302		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
303				READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
304		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
305				WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
306		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
307				EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
308		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
309				PAGE_TABLE_BLOCK_SIZE,
310				adev->vm_manager.block_size - 9);
311		/* Send no-retry XNACK on fault to suppress VM fault storm. */
312		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
313				    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
314				    !adev->gmc.noretry);
315		WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_CNTL,
316				    i * hub->ctx_distance, tmp);
317		WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
318				    i * hub->ctx_addr_distance, 0);
319		WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
320				    i * hub->ctx_addr_distance, 0);
321		WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
322				    i * hub->ctx_addr_distance,
323				    lower_32_bits(adev->vm_manager.max_pfn - 1));
324		WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
325				    i * hub->ctx_addr_distance,
326				    upper_32_bits(adev->vm_manager.max_pfn - 1));
327	}
328
329	hub->vm_cntx_cntl = tmp;
330}
331
332static void gfxhub_v2_0_program_invalidation(struct amdgpu_device *adev)
333{
334	struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
335	unsigned int i;
336
337	for (i = 0 ; i < 18; ++i) {
338		WREG32_SOC15_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
339				    i * hub->eng_addr_distance, 0xffffffff);
340		WREG32_SOC15_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
341				    i * hub->eng_addr_distance, 0x1f);
342	}
343}
344
345static int gfxhub_v2_0_gart_enable(struct amdgpu_device *adev)
346{
347	/* GART Enable. */
348	gfxhub_v2_0_init_gart_aperture_regs(adev);
349	gfxhub_v2_0_init_system_aperture_regs(adev);
350	gfxhub_v2_0_init_tlb_regs(adev);
351	gfxhub_v2_0_init_cache_regs(adev);
352
353	gfxhub_v2_0_enable_system_domain(adev);
354	gfxhub_v2_0_disable_identity_aperture(adev);
355	gfxhub_v2_0_setup_vmid_config(adev);
356	gfxhub_v2_0_program_invalidation(adev);
357
358	return 0;
359}
360
361static void gfxhub_v2_0_gart_disable(struct amdgpu_device *adev)
362{
363	struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
364	u32 tmp;
365	u32 i;
366
367	/* Disable all tables */
368	for (i = 0; i < 16; i++)
369		WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_CNTL,
370				    i * hub->ctx_distance, 0);
371
372	/* Setup TLB control */
373	tmp = RREG32_SOC15(GC, 0, mmGCMC_VM_MX_L1_TLB_CNTL);
374	tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
375	tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
376			    ENABLE_ADVANCED_DRIVER_MODEL, 0);
377	WREG32_SOC15(GC, 0, mmGCMC_VM_MX_L1_TLB_CNTL, tmp);
378
379	if (!amdgpu_sriov_vf(adev)) {
380		/* Setup L2 cache */
381		WREG32_FIELD15(GC, 0, GCVM_L2_CNTL, ENABLE_L2_CACHE, 0);
382		WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL3, 0);
383	}
384}
385
386/**
387 * gfxhub_v2_0_set_fault_enable_default - update GART/VM fault handling
388 *
389 * @adev: amdgpu_device pointer
390 * @value: true redirects VM faults to the default page
391 */
392static void gfxhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev,
393					  bool value)
394{
395	u32 tmp;
396
397	tmp = RREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL);
398	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
399			    RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
400	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
401			    PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
402	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
403			    PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
404	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
405			    PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
406	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
407			    TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
408			    value);
409	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
410			    NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
411	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
412			    DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
413	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
414			    VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
415	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
416			    READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
417	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
418			    WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
419	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
420			    EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
421	if (!value) {
422		tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
423				CRASH_ON_NO_RETRY_FAULT, 1);
424		tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
425				CRASH_ON_RETRY_FAULT, 1);
426	}
427	WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL, tmp);
428}
429
430static const struct amdgpu_vmhub_funcs gfxhub_v2_0_vmhub_funcs = {
431	.print_l2_protection_fault_status = gfxhub_v2_0_print_l2_protection_fault_status,
432	.get_invalidate_req = gfxhub_v2_0_get_invalidate_req,
433};
434
435static void gfxhub_v2_0_init(struct amdgpu_device *adev)
436{
437	struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
438
439	hub->ctx0_ptb_addr_lo32 =
440		SOC15_REG_OFFSET(GC, 0,
441				 mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
442	hub->ctx0_ptb_addr_hi32 =
443		SOC15_REG_OFFSET(GC, 0,
444				 mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
445	hub->vm_inv_eng0_sem =
446		SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_SEM);
447	hub->vm_inv_eng0_req =
448		SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_REQ);
449	hub->vm_inv_eng0_ack =
450		SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_ACK);
451	hub->vm_context0_cntl =
452		SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_CNTL);
453	hub->vm_l2_pro_fault_status =
454		SOC15_REG_OFFSET(GC, 0, mmGCVM_L2_PROTECTION_FAULT_STATUS);
455	hub->vm_l2_pro_fault_cntl =
456		SOC15_REG_OFFSET(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL);
457
458	hub->ctx_distance = mmGCVM_CONTEXT1_CNTL - mmGCVM_CONTEXT0_CNTL;
459	hub->ctx_addr_distance = mmGCVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 -
460		mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
461	hub->eng_distance = mmGCVM_INVALIDATE_ENG1_REQ -
462		mmGCVM_INVALIDATE_ENG0_REQ;
463	hub->eng_addr_distance = mmGCVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
464		mmGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
465
466	hub->vm_cntx_cntl_vm_fault = GCVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
467		GCVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
468		GCVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
469		GCVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
470		GCVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
471		GCVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
472		GCVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
473
474	/* TODO: This is only needed on some Navi 1x revisions */
475	hub->sdma_invalidation_workaround = true;
476
477	hub->vmhub_funcs = &gfxhub_v2_0_vmhub_funcs;
478}
479
480const struct amdgpu_gfxhub_funcs gfxhub_v2_0_funcs = {
481	.get_fb_location = gfxhub_v2_0_get_fb_location,
482	.get_mc_fb_offset = gfxhub_v2_0_get_mc_fb_offset,
483	.setup_vm_pt_regs = gfxhub_v2_0_setup_vm_pt_regs,
484	.gart_enable = gfxhub_v2_0_gart_enable,
485	.gart_disable = gfxhub_v2_0_gart_disable,
486	.set_fault_enable_default = gfxhub_v2_0_set_fault_enable_default,
487	.init = gfxhub_v2_0_init,
488};
v5.9
  1/*
  2 * Copyright 2019 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 */
 23
 24#include "amdgpu.h"
 25#include "gfxhub_v2_0.h"
 26
 27#include "gc/gc_10_1_0_offset.h"
 28#include "gc/gc_10_1_0_sh_mask.h"
 29#include "gc/gc_10_1_0_default.h"
 30#include "navi10_enum.h"
 31
 32#include "soc15_common.h"
 33
 34u64 gfxhub_v2_0_get_fb_location(struct amdgpu_device *adev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 35{
 36	u64 base = RREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_BASE);
 37
 38	base &= GCMC_VM_FB_LOCATION_BASE__FB_BASE_MASK;
 39	base <<= 24;
 40
 41	return base;
 42}
 43
 44u64 gfxhub_v2_0_get_mc_fb_offset(struct amdgpu_device *adev)
 45{
 46	return (u64)RREG32_SOC15(GC, 0, mmGCMC_VM_FB_OFFSET) << 24;
 47}
 48
 49void gfxhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
 50				uint64_t page_table_base)
 51{
 52	struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
 53
 54	WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
 55			    hub->ctx_addr_distance * vmid,
 56			    lower_32_bits(page_table_base));
 57
 58	WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
 59			    hub->ctx_addr_distance * vmid,
 60			    upper_32_bits(page_table_base));
 61}
 62
 63static void gfxhub_v2_0_init_gart_aperture_regs(struct amdgpu_device *adev)
 64{
 65	uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
 66
 67	gfxhub_v2_0_setup_vm_pt_regs(adev, 0, pt_base);
 68
 69	WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
 70		     (u32)(adev->gmc.gart_start >> 12));
 71	WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
 72		     (u32)(adev->gmc.gart_start >> 44));
 73
 74	WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
 75		     (u32)(adev->gmc.gart_end >> 12));
 76	WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
 77		     (u32)(adev->gmc.gart_end >> 44));
 78}
 79
 80static void gfxhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev)
 81{
 82	uint64_t value;
 83
 84	if (!amdgpu_sriov_vf(adev)) {
 85		/*
 86		 * the new L1 policy will block SRIOV guest from writing
 87		 * these regs, and they will be programed at host.
 88		 * so skip programing these regs.
 89		 */
 90		/* Disable AGP. */
 91		WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BASE, 0);
 92		WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_TOP, 0);
 93		WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BOT, 0x00FFFFFF);
 94
 95		/* Program the system aperture low logical page number. */
 96		WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_LOW_ADDR,
 97			     adev->gmc.vram_start >> 18);
 98		WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
 99			     adev->gmc.vram_end >> 18);
100
101		/* Set default page address. */
102		value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start
103			+ adev->vm_manager.vram_base_offset;
104		WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
105			     (u32)(value >> 12));
106		WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
107			     (u32)(value >> 44));
108	}
109
110	/* Program "protection fault". */
111	WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
112		     (u32)(adev->dummy_page_addr >> 12));
113	WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
114		     (u32)((u64)adev->dummy_page_addr >> 44));
115
116	WREG32_FIELD15(GC, 0, GCVM_L2_PROTECTION_FAULT_CNTL2,
117		       ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
118}
119
120
121static void gfxhub_v2_0_init_tlb_regs(struct amdgpu_device *adev)
122{
123	uint32_t tmp;
124
125	/* Setup TLB control */
126	tmp = RREG32_SOC15(GC, 0, mmGCMC_VM_MX_L1_TLB_CNTL);
127
128	tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
129	tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
130	tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
131			    ENABLE_ADVANCED_DRIVER_MODEL, 1);
132	tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
133			    SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
134	tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
135	tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
136			    MTYPE, MTYPE_UC); /* UC, uncached */
137
138	WREG32_SOC15(GC, 0, mmGCMC_VM_MX_L1_TLB_CNTL, tmp);
139}
140
141static void gfxhub_v2_0_init_cache_regs(struct amdgpu_device *adev)
142{
143	uint32_t tmp;
144
145	/* These regs are not accessible for VF, PF will program these in SRIOV */
146	if (amdgpu_sriov_vf(adev))
147		return;
148
149	/* Setup L2 cache */
150	tmp = RREG32_SOC15(GC, 0, mmGCVM_L2_CNTL);
151	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, ENABLE_L2_CACHE, 1);
152	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 0);
153	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL,
154			    ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
155	/* XXX for emulation, Refer to closed source code.*/
156	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL,
157			    L2_PDE0_CACHE_TAG_GENERATION_MODE, 0);
158	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
159	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
160	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
161	WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL, tmp);
162
163	tmp = RREG32_SOC15(GC, 0, mmGCVM_L2_CNTL2);
164	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
165	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
166	WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL2, tmp);
167
168	tmp = mmGCVM_L2_CNTL3_DEFAULT;
169	if (adev->gmc.translate_further) {
170		tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, BANK_SELECT, 12);
171		tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3,
172				    L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
173	} else {
174		tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, BANK_SELECT, 9);
175		tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3,
176				    L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
177	}
178	WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL3, tmp);
179
180	tmp = mmGCVM_L2_CNTL4_DEFAULT;
181	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
182	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
183	WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL4, tmp);
184
185	tmp = mmGCVM_L2_CNTL5_DEFAULT;
186	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL5, L2_CACHE_SMALLK_FRAGMENT_SIZE, 0);
187	WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL5, tmp);
188}
189
190static void gfxhub_v2_0_enable_system_domain(struct amdgpu_device *adev)
191{
192	uint32_t tmp;
193
194	tmp = RREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_CNTL);
195	tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
196	tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
197	tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL,
198			    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
199	WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_CNTL, tmp);
200}
201
202static void gfxhub_v2_0_disable_identity_aperture(struct amdgpu_device *adev)
203{
204	WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
205		     0xFFFFFFFF);
206	WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
207		     0x0000000F);
208
209	WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32,
210		     0);
211	WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32,
212		     0);
213
214	WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, 0);
215	WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, 0);
216
217}
218
219static void gfxhub_v2_0_setup_vmid_config(struct amdgpu_device *adev)
220{
221	struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
222	int i;
223	uint32_t tmp;
224
225	for (i = 0; i <= 14; i++) {
226		tmp = RREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_CNTL, i);
227		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
228		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,
229				    adev->vm_manager.num_level);
230		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
231				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
232		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
233				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
234		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
235				PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
236		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
237				VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
238		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
239				READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
240		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
241				WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
242		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
243				EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
244		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
245				PAGE_TABLE_BLOCK_SIZE,
246				adev->vm_manager.block_size - 9);
247		/* Send no-retry XNACK on fault to suppress VM fault storm. */
248		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
249				    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
250				    !amdgpu_noretry);
251		WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_CNTL,
252				    i * hub->ctx_distance, tmp);
253		WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
254				    i * hub->ctx_addr_distance, 0);
255		WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
256				    i * hub->ctx_addr_distance, 0);
257		WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
258				    i * hub->ctx_addr_distance,
259				    lower_32_bits(adev->vm_manager.max_pfn - 1));
260		WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
261				    i * hub->ctx_addr_distance,
262				    upper_32_bits(adev->vm_manager.max_pfn - 1));
263	}
 
 
264}
265
266static void gfxhub_v2_0_program_invalidation(struct amdgpu_device *adev)
267{
268	struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
269	unsigned i;
270
271	for (i = 0 ; i < 18; ++i) {
272		WREG32_SOC15_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
273				    i * hub->eng_addr_distance, 0xffffffff);
274		WREG32_SOC15_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
275				    i * hub->eng_addr_distance, 0x1f);
276	}
277}
278
279int gfxhub_v2_0_gart_enable(struct amdgpu_device *adev)
280{
281	/* GART Enable. */
282	gfxhub_v2_0_init_gart_aperture_regs(adev);
283	gfxhub_v2_0_init_system_aperture_regs(adev);
284	gfxhub_v2_0_init_tlb_regs(adev);
285	gfxhub_v2_0_init_cache_regs(adev);
286
287	gfxhub_v2_0_enable_system_domain(adev);
288	gfxhub_v2_0_disable_identity_aperture(adev);
289	gfxhub_v2_0_setup_vmid_config(adev);
290	gfxhub_v2_0_program_invalidation(adev);
291
292	return 0;
293}
294
295void gfxhub_v2_0_gart_disable(struct amdgpu_device *adev)
296{
297	struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
298	u32 tmp;
299	u32 i;
300
301	/* Disable all tables */
302	for (i = 0; i < 16; i++)
303		WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_CNTL,
304				    i * hub->ctx_distance, 0);
305
306	/* Setup TLB control */
307	tmp = RREG32_SOC15(GC, 0, mmGCMC_VM_MX_L1_TLB_CNTL);
308	tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
309	tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
310			    ENABLE_ADVANCED_DRIVER_MODEL, 0);
311	WREG32_SOC15(GC, 0, mmGCMC_VM_MX_L1_TLB_CNTL, tmp);
312
313	if (!amdgpu_sriov_vf(adev)) {
314		/* Setup L2 cache */
315		WREG32_FIELD15(GC, 0, GCVM_L2_CNTL, ENABLE_L2_CACHE, 0);
316		WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL3, 0);
317	}
318}
319
320/**
321 * gfxhub_v2_0_set_fault_enable_default - update GART/VM fault handling
322 *
323 * @adev: amdgpu_device pointer
324 * @value: true redirects VM faults to the default page
325 */
326void gfxhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev,
327					  bool value)
328{
329	u32 tmp;
 
330	tmp = RREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL);
331	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
332			    RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
333	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
334			    PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
335	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
336			    PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
337	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
338			    PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
339	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
340			    TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
341			    value);
342	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
343			    NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
344	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
345			    DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
346	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
347			    VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
348	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
349			    READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
350	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
351			    WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
352	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
353			    EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
354	if (!value) {
355		tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
356				CRASH_ON_NO_RETRY_FAULT, 1);
357		tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
358				CRASH_ON_RETRY_FAULT, 1);
359	}
360	WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL, tmp);
361}
362
363void gfxhub_v2_0_init(struct amdgpu_device *adev)
 
 
 
 
 
364{
365	struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
366
367	hub->ctx0_ptb_addr_lo32 =
368		SOC15_REG_OFFSET(GC, 0,
369				 mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
370	hub->ctx0_ptb_addr_hi32 =
371		SOC15_REG_OFFSET(GC, 0,
372				 mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
373	hub->vm_inv_eng0_sem =
374		SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_SEM);
375	hub->vm_inv_eng0_req =
376		SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_REQ);
377	hub->vm_inv_eng0_ack =
378		SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_ACK);
379	hub->vm_context0_cntl =
380		SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_CNTL);
381	hub->vm_l2_pro_fault_status =
382		SOC15_REG_OFFSET(GC, 0, mmGCVM_L2_PROTECTION_FAULT_STATUS);
383	hub->vm_l2_pro_fault_cntl =
384		SOC15_REG_OFFSET(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL);
385
386	hub->ctx_distance = mmGCVM_CONTEXT1_CNTL - mmGCVM_CONTEXT0_CNTL;
387	hub->ctx_addr_distance = mmGCVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 -
388		mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
389	hub->eng_distance = mmGCVM_INVALIDATE_ENG1_REQ -
390		mmGCVM_INVALIDATE_ENG0_REQ;
391	hub->eng_addr_distance = mmGCVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
392		mmGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
393}