Loading...
Note: File does not exist in v4.17.
1/*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26
27#include <linux/io-64-nonatomic-lo-hi.h>
28
29#include "amdgpu.h"
30#include "amdgpu_gmc.h"
31#include "amdgpu_ras.h"
32#include "amdgpu_xgmi.h"
33
34#include <drm/drm_drv.h>
35
36/**
37 * amdgpu_gmc_pdb0_alloc - allocate vram for pdb0
38 *
39 * @adev: amdgpu_device pointer
40 *
41 * Allocate video memory for pdb0 and map it for CPU access
42 * Returns 0 for success, error for failure.
43 */
44int amdgpu_gmc_pdb0_alloc(struct amdgpu_device *adev)
45{
46 int r;
47 struct amdgpu_bo_param bp;
48 u64 vram_size = adev->gmc.xgmi.node_segment_size * adev->gmc.xgmi.num_physical_nodes;
49 uint32_t pde0_page_shift = adev->gmc.vmid0_page_table_block_size + 21;
50 uint32_t npdes = (vram_size + (1ULL << pde0_page_shift) -1) >> pde0_page_shift;
51
52 memset(&bp, 0, sizeof(bp));
53 bp.size = PAGE_ALIGN((npdes + 1) * 8);
54 bp.byte_align = PAGE_SIZE;
55 bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
56 bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
57 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
58 bp.type = ttm_bo_type_kernel;
59 bp.resv = NULL;
60 bp.bo_ptr_size = sizeof(struct amdgpu_bo);
61
62 r = amdgpu_bo_create(adev, &bp, &adev->gmc.pdb0_bo);
63 if (r)
64 return r;
65
66 r = amdgpu_bo_reserve(adev->gmc.pdb0_bo, false);
67 if (unlikely(r != 0))
68 goto bo_reserve_failure;
69
70 r = amdgpu_bo_pin(adev->gmc.pdb0_bo, AMDGPU_GEM_DOMAIN_VRAM);
71 if (r)
72 goto bo_pin_failure;
73 r = amdgpu_bo_kmap(adev->gmc.pdb0_bo, &adev->gmc.ptr_pdb0);
74 if (r)
75 goto bo_kmap_failure;
76
77 amdgpu_bo_unreserve(adev->gmc.pdb0_bo);
78 return 0;
79
80bo_kmap_failure:
81 amdgpu_bo_unpin(adev->gmc.pdb0_bo);
82bo_pin_failure:
83 amdgpu_bo_unreserve(adev->gmc.pdb0_bo);
84bo_reserve_failure:
85 amdgpu_bo_unref(&adev->gmc.pdb0_bo);
86 return r;
87}
88
89/**
90 * amdgpu_gmc_get_pde_for_bo - get the PDE for a BO
91 *
92 * @bo: the BO to get the PDE for
93 * @level: the level in the PD hirarchy
94 * @addr: resulting addr
95 * @flags: resulting flags
96 *
97 * Get the address and flags to be used for a PDE (Page Directory Entry).
98 */
99void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
100 uint64_t *addr, uint64_t *flags)
101{
102 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
103
104 switch (bo->tbo.resource->mem_type) {
105 case TTM_PL_TT:
106 *addr = bo->tbo.ttm->dma_address[0];
107 break;
108 case TTM_PL_VRAM:
109 *addr = amdgpu_bo_gpu_offset(bo);
110 break;
111 default:
112 *addr = 0;
113 break;
114 }
115 *flags = amdgpu_ttm_tt_pde_flags(bo->tbo.ttm, bo->tbo.resource);
116 amdgpu_gmc_get_vm_pde(adev, level, addr, flags);
117}
118
119/*
120 * amdgpu_gmc_pd_addr - return the address of the root directory
121 */
122uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo)
123{
124 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
125 uint64_t pd_addr;
126
127 /* TODO: move that into ASIC specific code */
128 if (adev->asic_type >= CHIP_VEGA10) {
129 uint64_t flags = AMDGPU_PTE_VALID;
130
131 amdgpu_gmc_get_pde_for_bo(bo, -1, &pd_addr, &flags);
132 pd_addr |= flags;
133 } else {
134 pd_addr = amdgpu_bo_gpu_offset(bo);
135 }
136 return pd_addr;
137}
138
139/**
140 * amdgpu_gmc_set_pte_pde - update the page tables using CPU
141 *
142 * @adev: amdgpu_device pointer
143 * @cpu_pt_addr: cpu address of the page table
144 * @gpu_page_idx: entry in the page table to update
145 * @addr: dst addr to write into pte/pde
146 * @flags: access flags
147 *
148 * Update the page tables using CPU.
149 */
150int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
151 uint32_t gpu_page_idx, uint64_t addr,
152 uint64_t flags)
153{
154 void __iomem *ptr = (void *)cpu_pt_addr;
155 uint64_t value;
156 int idx;
157
158 if (!drm_dev_enter(&adev->ddev, &idx))
159 return 0;
160
161 /*
162 * The following is for PTE only. GART does not have PDEs.
163 */
164 value = addr & 0x0000FFFFFFFFF000ULL;
165 value |= flags;
166 writeq(value, ptr + (gpu_page_idx * 8));
167
168 drm_dev_exit(idx);
169
170 return 0;
171}
172
173/**
174 * amdgpu_gmc_agp_addr - return the address in the AGP address space
175 *
176 * @bo: TTM BO which needs the address, must be in GTT domain
177 *
178 * Tries to figure out how to access the BO through the AGP aperture. Returns
179 * AMDGPU_BO_INVALID_OFFSET if that is not possible.
180 */
181uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo)
182{
183 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
184
185 if (bo->ttm->num_pages != 1 || bo->ttm->caching == ttm_cached)
186 return AMDGPU_BO_INVALID_OFFSET;
187
188 if (bo->ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size)
189 return AMDGPU_BO_INVALID_OFFSET;
190
191 return adev->gmc.agp_start + bo->ttm->dma_address[0];
192}
193
194/**
195 * amdgpu_gmc_vram_location - try to find VRAM location
196 *
197 * @adev: amdgpu device structure holding all necessary information
198 * @mc: memory controller structure holding memory information
199 * @base: base address at which to put VRAM
200 *
201 * Function will try to place VRAM at base address provided
202 * as parameter.
203 */
204void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
205 u64 base)
206{
207 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
208
209 mc->vram_start = base;
210 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
211 if (limit && limit < mc->real_vram_size)
212 mc->real_vram_size = limit;
213
214 if (mc->xgmi.num_physical_nodes == 0) {
215 mc->fb_start = mc->vram_start;
216 mc->fb_end = mc->vram_end;
217 }
218 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
219 mc->mc_vram_size >> 20, mc->vram_start,
220 mc->vram_end, mc->real_vram_size >> 20);
221}
222
223/** amdgpu_gmc_sysvm_location - place vram and gart in sysvm aperture
224 *
225 * @adev: amdgpu device structure holding all necessary information
226 * @mc: memory controller structure holding memory information
227 *
228 * This function is only used if use GART for FB translation. In such
229 * case, we use sysvm aperture (vmid0 page tables) for both vram
230 * and gart (aka system memory) access.
231 *
232 * GPUVM (and our organization of vmid0 page tables) require sysvm
233 * aperture to be placed at a location aligned with 8 times of native
234 * page size. For example, if vm_context0_cntl.page_table_block_size
235 * is 12, then native page size is 8G (2M*2^12), sysvm should start
236 * with a 64G aligned address. For simplicity, we just put sysvm at
237 * address 0. So vram start at address 0 and gart is right after vram.
238 */
239void amdgpu_gmc_sysvm_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
240{
241 u64 hive_vram_start = 0;
242 u64 hive_vram_end = mc->xgmi.node_segment_size * mc->xgmi.num_physical_nodes - 1;
243 mc->vram_start = mc->xgmi.node_segment_size * mc->xgmi.physical_node_id;
244 mc->vram_end = mc->vram_start + mc->xgmi.node_segment_size - 1;
245 mc->gart_start = hive_vram_end + 1;
246 mc->gart_end = mc->gart_start + mc->gart_size - 1;
247 mc->fb_start = hive_vram_start;
248 mc->fb_end = hive_vram_end;
249 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
250 mc->mc_vram_size >> 20, mc->vram_start,
251 mc->vram_end, mc->real_vram_size >> 20);
252 dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n",
253 mc->gart_size >> 20, mc->gart_start, mc->gart_end);
254}
255
256/**
257 * amdgpu_gmc_gart_location - try to find GART location
258 *
259 * @adev: amdgpu device structure holding all necessary information
260 * @mc: memory controller structure holding memory information
261 *
262 * Function will place try to place GART before or after VRAM.
263 * If GART size is bigger than space left then we ajust GART size.
264 * Thus function will never fails.
265 */
266void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
267{
268 const uint64_t four_gb = 0x100000000ULL;
269 u64 size_af, size_bf;
270 /*To avoid the hole, limit the max mc address to AMDGPU_GMC_HOLE_START*/
271 u64 max_mc_address = min(adev->gmc.mc_mask, AMDGPU_GMC_HOLE_START - 1);
272
273 /* VCE doesn't like it when BOs cross a 4GB segment, so align
274 * the GART base on a 4GB boundary as well.
275 */
276 size_bf = mc->fb_start;
277 size_af = max_mc_address + 1 - ALIGN(mc->fb_end + 1, four_gb);
278
279 if (mc->gart_size > max(size_bf, size_af)) {
280 dev_warn(adev->dev, "limiting GART\n");
281 mc->gart_size = max(size_bf, size_af);
282 }
283
284 if ((size_bf >= mc->gart_size && size_bf < size_af) ||
285 (size_af < mc->gart_size))
286 mc->gart_start = 0;
287 else
288 mc->gart_start = max_mc_address - mc->gart_size + 1;
289
290 mc->gart_start &= ~(four_gb - 1);
291 mc->gart_end = mc->gart_start + mc->gart_size - 1;
292 dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n",
293 mc->gart_size >> 20, mc->gart_start, mc->gart_end);
294}
295
296/**
297 * amdgpu_gmc_agp_location - try to find AGP location
298 * @adev: amdgpu device structure holding all necessary information
299 * @mc: memory controller structure holding memory information
300 *
301 * Function will place try to find a place for the AGP BAR in the MC address
302 * space.
303 *
304 * AGP BAR will be assigned the largest available hole in the address space.
305 * Should be called after VRAM and GART locations are setup.
306 */
307void amdgpu_gmc_agp_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
308{
309 const uint64_t sixteen_gb = 1ULL << 34;
310 const uint64_t sixteen_gb_mask = ~(sixteen_gb - 1);
311 u64 size_af, size_bf;
312
313 if (amdgpu_sriov_vf(adev)) {
314 mc->agp_start = 0xffffffffffff;
315 mc->agp_end = 0x0;
316 mc->agp_size = 0;
317
318 return;
319 }
320
321 if (mc->fb_start > mc->gart_start) {
322 size_bf = (mc->fb_start & sixteen_gb_mask) -
323 ALIGN(mc->gart_end + 1, sixteen_gb);
324 size_af = mc->mc_mask + 1 - ALIGN(mc->fb_end + 1, sixteen_gb);
325 } else {
326 size_bf = mc->fb_start & sixteen_gb_mask;
327 size_af = (mc->gart_start & sixteen_gb_mask) -
328 ALIGN(mc->fb_end + 1, sixteen_gb);
329 }
330
331 if (size_bf > size_af) {
332 mc->agp_start = (mc->fb_start - size_bf) & sixteen_gb_mask;
333 mc->agp_size = size_bf;
334 } else {
335 mc->agp_start = ALIGN(mc->fb_end + 1, sixteen_gb);
336 mc->agp_size = size_af;
337 }
338
339 mc->agp_end = mc->agp_start + mc->agp_size - 1;
340 dev_info(adev->dev, "AGP: %lluM 0x%016llX - 0x%016llX\n",
341 mc->agp_size >> 20, mc->agp_start, mc->agp_end);
342}
343
344/**
345 * amdgpu_gmc_fault_key - get hask key from vm fault address and pasid
346 *
347 * @addr: 48 bit physical address, page aligned (36 significant bits)
348 * @pasid: 16 bit process address space identifier
349 */
350static inline uint64_t amdgpu_gmc_fault_key(uint64_t addr, uint16_t pasid)
351{
352 return addr << 4 | pasid;
353}
354
355/**
356 * amdgpu_gmc_filter_faults - filter VM faults
357 *
358 * @adev: amdgpu device structure
359 * @addr: address of the VM fault
360 * @pasid: PASID of the process causing the fault
361 * @timestamp: timestamp of the fault
362 *
363 * Returns:
364 * True if the fault was filtered and should not be processed further.
365 * False if the fault is a new one and needs to be handled.
366 */
367bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr,
368 uint16_t pasid, uint64_t timestamp)
369{
370 struct amdgpu_gmc *gmc = &adev->gmc;
371 uint64_t stamp, key = amdgpu_gmc_fault_key(addr, pasid);
372 struct amdgpu_gmc_fault *fault;
373 uint32_t hash;
374
375 /* If we don't have space left in the ring buffer return immediately */
376 stamp = max(timestamp, AMDGPU_GMC_FAULT_TIMEOUT + 1) -
377 AMDGPU_GMC_FAULT_TIMEOUT;
378 if (gmc->fault_ring[gmc->last_fault].timestamp >= stamp)
379 return true;
380
381 /* Try to find the fault in the hash */
382 hash = hash_64(key, AMDGPU_GMC_FAULT_HASH_ORDER);
383 fault = &gmc->fault_ring[gmc->fault_hash[hash].idx];
384 while (fault->timestamp >= stamp) {
385 uint64_t tmp;
386
387 if (atomic64_read(&fault->key) == key)
388 return true;
389
390 tmp = fault->timestamp;
391 fault = &gmc->fault_ring[fault->next];
392
393 /* Check if the entry was reused */
394 if (fault->timestamp >= tmp)
395 break;
396 }
397
398 /* Add the fault to the ring */
399 fault = &gmc->fault_ring[gmc->last_fault];
400 atomic64_set(&fault->key, key);
401 fault->timestamp = timestamp;
402
403 /* And update the hash */
404 fault->next = gmc->fault_hash[hash].idx;
405 gmc->fault_hash[hash].idx = gmc->last_fault++;
406 return false;
407}
408
409/**
410 * amdgpu_gmc_filter_faults_remove - remove address from VM faults filter
411 *
412 * @adev: amdgpu device structure
413 * @addr: address of the VM fault
414 * @pasid: PASID of the process causing the fault
415 *
416 * Remove the address from fault filter, then future vm fault on this address
417 * will pass to retry fault handler to recover.
418 */
419void amdgpu_gmc_filter_faults_remove(struct amdgpu_device *adev, uint64_t addr,
420 uint16_t pasid)
421{
422 struct amdgpu_gmc *gmc = &adev->gmc;
423 uint64_t key = amdgpu_gmc_fault_key(addr, pasid);
424 struct amdgpu_gmc_fault *fault;
425 uint32_t hash;
426 uint64_t tmp;
427
428 hash = hash_64(key, AMDGPU_GMC_FAULT_HASH_ORDER);
429 fault = &gmc->fault_ring[gmc->fault_hash[hash].idx];
430 do {
431 if (atomic64_cmpxchg(&fault->key, key, 0) == key)
432 break;
433
434 tmp = fault->timestamp;
435 fault = &gmc->fault_ring[fault->next];
436 } while (fault->timestamp < tmp);
437}
438
439int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev)
440{
441 int r;
442
443 if (adev->umc.ras_funcs &&
444 adev->umc.ras_funcs->ras_late_init) {
445 r = adev->umc.ras_funcs->ras_late_init(adev);
446 if (r)
447 return r;
448 }
449
450 if (adev->mmhub.ras_funcs &&
451 adev->mmhub.ras_funcs->ras_late_init) {
452 r = adev->mmhub.ras_funcs->ras_late_init(adev);
453 if (r)
454 return r;
455 }
456
457 if (!adev->gmc.xgmi.connected_to_cpu)
458 adev->gmc.xgmi.ras_funcs = &xgmi_ras_funcs;
459
460 if (adev->gmc.xgmi.ras_funcs &&
461 adev->gmc.xgmi.ras_funcs->ras_late_init) {
462 r = adev->gmc.xgmi.ras_funcs->ras_late_init(adev);
463 if (r)
464 return r;
465 }
466
467 if (adev->hdp.ras_funcs &&
468 adev->hdp.ras_funcs->ras_late_init) {
469 r = adev->hdp.ras_funcs->ras_late_init(adev);
470 if (r)
471 return r;
472 }
473
474 return 0;
475}
476
477void amdgpu_gmc_ras_fini(struct amdgpu_device *adev)
478{
479 if (adev->umc.ras_funcs &&
480 adev->umc.ras_funcs->ras_fini)
481 adev->umc.ras_funcs->ras_fini(adev);
482
483 if (adev->mmhub.ras_funcs &&
484 adev->mmhub.ras_funcs->ras_fini)
485 adev->mmhub.ras_funcs->ras_fini(adev);
486
487 if (adev->gmc.xgmi.ras_funcs &&
488 adev->gmc.xgmi.ras_funcs->ras_fini)
489 adev->gmc.xgmi.ras_funcs->ras_fini(adev);
490
491 if (adev->hdp.ras_funcs &&
492 adev->hdp.ras_funcs->ras_fini)
493 adev->hdp.ras_funcs->ras_fini(adev);
494}
495
496 /*
497 * The latest engine allocation on gfx9/10 is:
498 * Engine 2, 3: firmware
499 * Engine 0, 1, 4~16: amdgpu ring,
500 * subject to change when ring number changes
501 * Engine 17: Gart flushes
502 */
503#define GFXHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3
504#define MMHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3
505
506int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)
507{
508 struct amdgpu_ring *ring;
509 unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] =
510 {GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP,
511 GFXHUB_FREE_VM_INV_ENGS_BITMAP};
512 unsigned i;
513 unsigned vmhub, inv_eng;
514
515 for (i = 0; i < adev->num_rings; ++i) {
516 ring = adev->rings[i];
517 vmhub = ring->funcs->vmhub;
518
519 if (ring == &adev->mes.ring)
520 continue;
521
522 inv_eng = ffs(vm_inv_engs[vmhub]);
523 if (!inv_eng) {
524 dev_err(adev->dev, "no VM inv eng for ring %s\n",
525 ring->name);
526 return -EINVAL;
527 }
528
529 ring->vm_inv_eng = inv_eng - 1;
530 vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng);
531
532 dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
533 ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
534 }
535
536 return 0;
537}
538
539/**
540 * amdgpu_gmc_tmz_set -- check and set if a device supports TMZ
541 * @adev: amdgpu_device pointer
542 *
543 * Check and set if an the device @adev supports Trusted Memory
544 * Zones (TMZ).
545 */
546void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
547{
548 switch (adev->asic_type) {
549 case CHIP_RAVEN:
550 case CHIP_RENOIR:
551 if (amdgpu_tmz == 0) {
552 adev->gmc.tmz_enabled = false;
553 dev_info(adev->dev,
554 "Trusted Memory Zone (TMZ) feature disabled (cmd line)\n");
555 } else {
556 adev->gmc.tmz_enabled = true;
557 dev_info(adev->dev,
558 "Trusted Memory Zone (TMZ) feature enabled\n");
559 }
560 break;
561 case CHIP_NAVI10:
562 case CHIP_NAVI14:
563 case CHIP_NAVI12:
564 case CHIP_VANGOGH:
565 case CHIP_YELLOW_CARP:
566 /* Don't enable it by default yet.
567 */
568 if (amdgpu_tmz < 1) {
569 adev->gmc.tmz_enabled = false;
570 dev_info(adev->dev,
571 "Trusted Memory Zone (TMZ) feature disabled as experimental (default)\n");
572 } else {
573 adev->gmc.tmz_enabled = true;
574 dev_info(adev->dev,
575 "Trusted Memory Zone (TMZ) feature enabled as experimental (cmd line)\n");
576 }
577 break;
578 default:
579 adev->gmc.tmz_enabled = false;
580 dev_warn(adev->dev,
581 "Trusted Memory Zone (TMZ) feature not supported\n");
582 break;
583 }
584}
585
586/**
587 * amdgpu_gmc_noretry_set -- set per asic noretry defaults
588 * @adev: amdgpu_device pointer
589 *
590 * Set a per asic default for the no-retry parameter.
591 *
592 */
593void amdgpu_gmc_noretry_set(struct amdgpu_device *adev)
594{
595 struct amdgpu_gmc *gmc = &adev->gmc;
596
597 switch (adev->asic_type) {
598 case CHIP_VEGA10:
599 case CHIP_VEGA20:
600 case CHIP_ARCTURUS:
601 case CHIP_ALDEBARAN:
602 /*
603 * noretry = 0 will cause kfd page fault tests fail
604 * for some ASICs, so set default to 1 for these ASICs.
605 */
606 if (amdgpu_noretry == -1)
607 gmc->noretry = 1;
608 else
609 gmc->noretry = amdgpu_noretry;
610 break;
611 case CHIP_RAVEN:
612 default:
613 /* Raven currently has issues with noretry
614 * regardless of what we decide for other
615 * asics, we should leave raven with
616 * noretry = 0 until we root cause the
617 * issues.
618 *
619 * default this to 0 for now, but we may want
620 * to change this in the future for certain
621 * GPUs as it can increase performance in
622 * certain cases.
623 */
624 if (amdgpu_noretry == -1)
625 gmc->noretry = 0;
626 else
627 gmc->noretry = amdgpu_noretry;
628 break;
629 }
630}
631
632void amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type,
633 bool enable)
634{
635 struct amdgpu_vmhub *hub;
636 u32 tmp, reg, i;
637
638 hub = &adev->vmhub[hub_type];
639 for (i = 0; i < 16; i++) {
640 reg = hub->vm_context0_cntl + hub->ctx_distance * i;
641
642 tmp = (hub_type == AMDGPU_GFXHUB_0) ?
643 RREG32_SOC15_IP(GC, reg) :
644 RREG32_SOC15_IP(MMHUB, reg);
645
646 if (enable)
647 tmp |= hub->vm_cntx_cntl_vm_fault;
648 else
649 tmp &= ~hub->vm_cntx_cntl_vm_fault;
650
651 (hub_type == AMDGPU_GFXHUB_0) ?
652 WREG32_SOC15_IP(GC, reg, tmp) :
653 WREG32_SOC15_IP(MMHUB, reg, tmp);
654 }
655}
656
657void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev)
658{
659 unsigned size;
660
661 /*
662 * TODO:
663 * Currently there is a bug where some memory client outside
664 * of the driver writes to first 8M of VRAM on S3 resume,
665 * this overrides GART which by default gets placed in first 8M and
666 * causes VM_FAULTS once GTT is accessed.
667 * Keep the stolen memory reservation until the while this is not solved.
668 */
669 switch (adev->asic_type) {
670 case CHIP_VEGA10:
671 case CHIP_RAVEN:
672 case CHIP_RENOIR:
673 adev->mman.keep_stolen_vga_memory = true;
674 break;
675 default:
676 adev->mman.keep_stolen_vga_memory = false;
677 break;
678 }
679
680 if (amdgpu_sriov_vf(adev) ||
681 !amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_DCE)) {
682 size = 0;
683 } else {
684 size = amdgpu_gmc_get_vbios_fb_size(adev);
685
686 if (adev->mman.keep_stolen_vga_memory)
687 size = max(size, (unsigned)AMDGPU_VBIOS_VGA_ALLOCATION);
688 }
689
690 /* set to 0 if the pre-OS buffer uses up most of vram */
691 if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
692 size = 0;
693
694 if (size > AMDGPU_VBIOS_VGA_ALLOCATION) {
695 adev->mman.stolen_vga_size = AMDGPU_VBIOS_VGA_ALLOCATION;
696 adev->mman.stolen_extended_size = size - adev->mman.stolen_vga_size;
697 } else {
698 adev->mman.stolen_vga_size = size;
699 adev->mman.stolen_extended_size = 0;
700 }
701}
702
703/**
704 * amdgpu_gmc_init_pdb0 - initialize PDB0
705 *
706 * @adev: amdgpu_device pointer
707 *
708 * This function is only used when GART page table is used
709 * for FB address translatioin. In such a case, we construct
710 * a 2-level system VM page table: PDB0->PTB, to cover both
711 * VRAM of the hive and system memory.
712 *
713 * PDB0 is static, initialized once on driver initialization.
714 * The first n entries of PDB0 are used as PTE by setting
715 * P bit to 1, pointing to VRAM. The n+1'th entry points
716 * to a big PTB covering system memory.
717 *
718 */
719void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev)
720{
721 int i;
722 uint64_t flags = adev->gart.gart_pte_flags; //TODO it is UC. explore NC/RW?
723 /* Each PDE0 (used as PTE) covers (2^vmid0_page_table_block_size)*2M
724 */
725 u64 vram_size = adev->gmc.xgmi.node_segment_size * adev->gmc.xgmi.num_physical_nodes;
726 u64 pde0_page_size = (1ULL<<adev->gmc.vmid0_page_table_block_size)<<21;
727 u64 vram_addr = adev->vm_manager.vram_base_offset -
728 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
729 u64 vram_end = vram_addr + vram_size;
730 u64 gart_ptb_gpu_pa = amdgpu_gmc_vram_pa(adev, adev->gart.bo);
731
732 flags |= AMDGPU_PTE_VALID | AMDGPU_PTE_READABLE;
733 flags |= AMDGPU_PTE_WRITEABLE;
734 flags |= AMDGPU_PTE_SNOOPED;
735 flags |= AMDGPU_PTE_FRAG((adev->gmc.vmid0_page_table_block_size + 9*1));
736 flags |= AMDGPU_PDE_PTE;
737
738 /* The first n PDE0 entries are used as PTE,
739 * pointing to vram
740 */
741 for (i = 0; vram_addr < vram_end; i++, vram_addr += pde0_page_size)
742 amdgpu_gmc_set_pte_pde(adev, adev->gmc.ptr_pdb0, i, vram_addr, flags);
743
744 /* The n+1'th PDE0 entry points to a huge
745 * PTB who has more than 512 entries each
746 * pointing to a 4K system page
747 */
748 flags = AMDGPU_PTE_VALID;
749 flags |= AMDGPU_PDE_BFS(0) | AMDGPU_PTE_SNOOPED;
750 /* Requires gart_ptb_gpu_pa to be 4K aligned */
751 amdgpu_gmc_set_pte_pde(adev, adev->gmc.ptr_pdb0, i, gart_ptb_gpu_pa, flags);
752}
753
754/**
755 * amdgpu_gmc_vram_mc2pa - calculate vram buffer's physical address from MC
756 * address
757 *
758 * @adev: amdgpu_device pointer
759 * @mc_addr: MC address of buffer
760 */
761uint64_t amdgpu_gmc_vram_mc2pa(struct amdgpu_device *adev, uint64_t mc_addr)
762{
763 return mc_addr - adev->gmc.vram_start + adev->vm_manager.vram_base_offset;
764}
765
766/**
767 * amdgpu_gmc_vram_pa - calculate vram buffer object's physical address from
768 * GPU's view
769 *
770 * @adev: amdgpu_device pointer
771 * @bo: amdgpu buffer object
772 */
773uint64_t amdgpu_gmc_vram_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo)
774{
775 return amdgpu_gmc_vram_mc2pa(adev, amdgpu_bo_gpu_offset(bo));
776}
777
778/**
779 * amdgpu_gmc_vram_cpu_pa - calculate vram buffer object's physical address
780 * from CPU's view
781 *
782 * @adev: amdgpu_device pointer
783 * @bo: amdgpu buffer object
784 */
785uint64_t amdgpu_gmc_vram_cpu_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo)
786{
787 return amdgpu_bo_gpu_offset(bo) - adev->gmc.vram_start + adev->gmc.aper_base;
788}
789
790void amdgpu_gmc_get_reserved_allocation(struct amdgpu_device *adev)
791{
792 /* Some ASICs need to reserve a region of video memory to avoid access
793 * from driver */
794 adev->mman.stolen_reserved_offset = 0;
795 adev->mman.stolen_reserved_size = 0;
796
797 switch (adev->asic_type) {
798 case CHIP_YELLOW_CARP:
799 if (amdgpu_discovery == 0) {
800 adev->mman.stolen_reserved_offset = 0x1ffb0000;
801 adev->mman.stolen_reserved_size = 64 * PAGE_SIZE;
802 }
803 break;
804 default:
805 break;
806 }
807}