Loading...
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/kthread.h>
29#include <linux/console.h>
30#include <linux/slab.h>
31#include <drm/drmP.h>
32#include <drm/drm_crtc_helper.h>
33#include <drm/drm_atomic_helper.h>
34#include <drm/amdgpu_drm.h>
35#include <linux/vgaarb.h>
36#include <linux/vga_switcheroo.h>
37#include <linux/efi.h>
38#include "amdgpu.h"
39#include "amdgpu_trace.h"
40#include "amdgpu_i2c.h"
41#include "atom.h"
42#include "amdgpu_atombios.h"
43#include "amdgpu_atomfirmware.h"
44#include "amd_pcie.h"
45#ifdef CONFIG_DRM_AMDGPU_SI
46#include "si.h"
47#endif
48#ifdef CONFIG_DRM_AMDGPU_CIK
49#include "cik.h"
50#endif
51#include "vi.h"
52#include "soc15.h"
53#include "bif/bif_4_1_d.h"
54#include <linux/pci.h>
55#include <linux/firmware.h>
56#include "amdgpu_vf_error.h"
57
58#include "amdgpu_amdkfd.h"
59#include "amdgpu_pm.h"
60
61MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
62MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
63MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
64
65#define AMDGPU_RESUME_MS 2000
66
67static const char *amdgpu_asic_name[] = {
68 "TAHITI",
69 "PITCAIRN",
70 "VERDE",
71 "OLAND",
72 "HAINAN",
73 "BONAIRE",
74 "KAVERI",
75 "KABINI",
76 "HAWAII",
77 "MULLINS",
78 "TOPAZ",
79 "TONGA",
80 "FIJI",
81 "CARRIZO",
82 "STONEY",
83 "POLARIS10",
84 "POLARIS11",
85 "POLARIS12",
86 "VEGA10",
87 "VEGA12",
88 "RAVEN",
89 "LAST",
90};
91
92static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
93
94/**
95 * amdgpu_device_is_px - Is the device is a dGPU with HG/PX power control
96 *
97 * @dev: drm_device pointer
98 *
99 * Returns true if the device is a dGPU with HG/PX power control,
100 * otherwise return false.
101 */
102bool amdgpu_device_is_px(struct drm_device *dev)
103{
104 struct amdgpu_device *adev = dev->dev_private;
105
106 if (adev->flags & AMD_IS_PX)
107 return true;
108 return false;
109}
110
111/*
112 * MMIO register access helper functions.
113 */
114/**
115 * amdgpu_mm_rreg - read a memory mapped IO register
116 *
117 * @adev: amdgpu_device pointer
118 * @reg: dword aligned register offset
119 * @acc_flags: access flags which require special behavior
120 *
121 * Returns the 32 bit value from the offset specified.
122 */
123uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
124 uint32_t acc_flags)
125{
126 uint32_t ret;
127
128 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
129 return amdgpu_virt_kiq_rreg(adev, reg);
130
131 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
132 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
133 else {
134 unsigned long flags;
135
136 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
137 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
138 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
139 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
140 }
141 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
142 return ret;
143}
144
145/*
146 * MMIO register read with bytes helper functions
147 * @offset:bytes offset from MMIO start
148 *
149*/
150
151/**
152 * amdgpu_mm_rreg8 - read a memory mapped IO register
153 *
154 * @adev: amdgpu_device pointer
155 * @offset: byte aligned register offset
156 *
157 * Returns the 8 bit value from the offset specified.
158 */
159uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) {
160 if (offset < adev->rmmio_size)
161 return (readb(adev->rmmio + offset));
162 BUG();
163}
164
165/*
166 * MMIO register write with bytes helper functions
167 * @offset:bytes offset from MMIO start
168 * @value: the value want to be written to the register
169 *
170*/
171/**
172 * amdgpu_mm_wreg8 - read a memory mapped IO register
173 *
174 * @adev: amdgpu_device pointer
175 * @offset: byte aligned register offset
176 * @value: 8 bit value to write
177 *
178 * Writes the value specified to the offset specified.
179 */
180void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) {
181 if (offset < adev->rmmio_size)
182 writeb(value, adev->rmmio + offset);
183 else
184 BUG();
185}
186
187/**
188 * amdgpu_mm_wreg - write to a memory mapped IO register
189 *
190 * @adev: amdgpu_device pointer
191 * @reg: dword aligned register offset
192 * @v: 32 bit value to write to the register
193 * @acc_flags: access flags which require special behavior
194 *
195 * Writes the value specified to the offset specified.
196 */
197void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
198 uint32_t acc_flags)
199{
200 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
201
202 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
203 adev->last_mm_index = v;
204 }
205
206 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
207 return amdgpu_virt_kiq_wreg(adev, reg, v);
208
209 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
210 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
211 else {
212 unsigned long flags;
213
214 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
215 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
216 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
217 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
218 }
219
220 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
221 udelay(500);
222 }
223}
224
225/**
226 * amdgpu_io_rreg - read an IO register
227 *
228 * @adev: amdgpu_device pointer
229 * @reg: dword aligned register offset
230 *
231 * Returns the 32 bit value from the offset specified.
232 */
233u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
234{
235 if ((reg * 4) < adev->rio_mem_size)
236 return ioread32(adev->rio_mem + (reg * 4));
237 else {
238 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
239 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
240 }
241}
242
243/**
244 * amdgpu_io_wreg - write to an IO register
245 *
246 * @adev: amdgpu_device pointer
247 * @reg: dword aligned register offset
248 * @v: 32 bit value to write to the register
249 *
250 * Writes the value specified to the offset specified.
251 */
252void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
253{
254 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
255 adev->last_mm_index = v;
256 }
257
258 if ((reg * 4) < adev->rio_mem_size)
259 iowrite32(v, adev->rio_mem + (reg * 4));
260 else {
261 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
262 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
263 }
264
265 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
266 udelay(500);
267 }
268}
269
270/**
271 * amdgpu_mm_rdoorbell - read a doorbell dword
272 *
273 * @adev: amdgpu_device pointer
274 * @index: doorbell index
275 *
276 * Returns the value in the doorbell aperture at the
277 * requested doorbell index (CIK).
278 */
279u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
280{
281 if (index < adev->doorbell.num_doorbells) {
282 return readl(adev->doorbell.ptr + index);
283 } else {
284 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
285 return 0;
286 }
287}
288
289/**
290 * amdgpu_mm_wdoorbell - write a doorbell dword
291 *
292 * @adev: amdgpu_device pointer
293 * @index: doorbell index
294 * @v: value to write
295 *
296 * Writes @v to the doorbell aperture at the
297 * requested doorbell index (CIK).
298 */
299void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
300{
301 if (index < adev->doorbell.num_doorbells) {
302 writel(v, adev->doorbell.ptr + index);
303 } else {
304 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
305 }
306}
307
308/**
309 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
310 *
311 * @adev: amdgpu_device pointer
312 * @index: doorbell index
313 *
314 * Returns the value in the doorbell aperture at the
315 * requested doorbell index (VEGA10+).
316 */
317u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
318{
319 if (index < adev->doorbell.num_doorbells) {
320 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
321 } else {
322 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
323 return 0;
324 }
325}
326
327/**
328 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
329 *
330 * @adev: amdgpu_device pointer
331 * @index: doorbell index
332 * @v: value to write
333 *
334 * Writes @v to the doorbell aperture at the
335 * requested doorbell index (VEGA10+).
336 */
337void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
338{
339 if (index < adev->doorbell.num_doorbells) {
340 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
341 } else {
342 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
343 }
344}
345
346/**
347 * amdgpu_invalid_rreg - dummy reg read function
348 *
349 * @adev: amdgpu device pointer
350 * @reg: offset of register
351 *
352 * Dummy register read function. Used for register blocks
353 * that certain asics don't have (all asics).
354 * Returns the value in the register.
355 */
356static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
357{
358 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
359 BUG();
360 return 0;
361}
362
363/**
364 * amdgpu_invalid_wreg - dummy reg write function
365 *
366 * @adev: amdgpu device pointer
367 * @reg: offset of register
368 * @v: value to write to the register
369 *
370 * Dummy register read function. Used for register blocks
371 * that certain asics don't have (all asics).
372 */
373static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
374{
375 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
376 reg, v);
377 BUG();
378}
379
380/**
381 * amdgpu_block_invalid_rreg - dummy reg read function
382 *
383 * @adev: amdgpu device pointer
384 * @block: offset of instance
385 * @reg: offset of register
386 *
387 * Dummy register read function. Used for register blocks
388 * that certain asics don't have (all asics).
389 * Returns the value in the register.
390 */
391static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
392 uint32_t block, uint32_t reg)
393{
394 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
395 reg, block);
396 BUG();
397 return 0;
398}
399
400/**
401 * amdgpu_block_invalid_wreg - dummy reg write function
402 *
403 * @adev: amdgpu device pointer
404 * @block: offset of instance
405 * @reg: offset of register
406 * @v: value to write to the register
407 *
408 * Dummy register read function. Used for register blocks
409 * that certain asics don't have (all asics).
410 */
411static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
412 uint32_t block,
413 uint32_t reg, uint32_t v)
414{
415 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
416 reg, block, v);
417 BUG();
418}
419
420/**
421 * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
422 *
423 * @adev: amdgpu device pointer
424 *
425 * Allocates a scratch page of VRAM for use by various things in the
426 * driver.
427 */
428static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
429{
430 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
431 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
432 &adev->vram_scratch.robj,
433 &adev->vram_scratch.gpu_addr,
434 (void **)&adev->vram_scratch.ptr);
435}
436
437/**
438 * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
439 *
440 * @adev: amdgpu device pointer
441 *
442 * Frees the VRAM scratch page.
443 */
444static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
445{
446 amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
447}
448
449/**
450 * amdgpu_device_program_register_sequence - program an array of registers.
451 *
452 * @adev: amdgpu_device pointer
453 * @registers: pointer to the register array
454 * @array_size: size of the register array
455 *
456 * Programs an array or registers with and and or masks.
457 * This is a helper for setting golden registers.
458 */
459void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
460 const u32 *registers,
461 const u32 array_size)
462{
463 u32 tmp, reg, and_mask, or_mask;
464 int i;
465
466 if (array_size % 3)
467 return;
468
469 for (i = 0; i < array_size; i +=3) {
470 reg = registers[i + 0];
471 and_mask = registers[i + 1];
472 or_mask = registers[i + 2];
473
474 if (and_mask == 0xffffffff) {
475 tmp = or_mask;
476 } else {
477 tmp = RREG32(reg);
478 tmp &= ~and_mask;
479 tmp |= or_mask;
480 }
481 WREG32(reg, tmp);
482 }
483}
484
485/**
486 * amdgpu_device_pci_config_reset - reset the GPU
487 *
488 * @adev: amdgpu_device pointer
489 *
490 * Resets the GPU using the pci config reset sequence.
491 * Only applicable to asics prior to vega10.
492 */
493void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
494{
495 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
496}
497
498/*
499 * GPU doorbell aperture helpers function.
500 */
501/**
502 * amdgpu_device_doorbell_init - Init doorbell driver information.
503 *
504 * @adev: amdgpu_device pointer
505 *
506 * Init doorbell driver information (CIK)
507 * Returns 0 on success, error on failure.
508 */
509static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
510{
511 /* No doorbell on SI hardware generation */
512 if (adev->asic_type < CHIP_BONAIRE) {
513 adev->doorbell.base = 0;
514 adev->doorbell.size = 0;
515 adev->doorbell.num_doorbells = 0;
516 adev->doorbell.ptr = NULL;
517 return 0;
518 }
519
520 if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
521 return -EINVAL;
522
523 /* doorbell bar mapping */
524 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
525 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
526
527 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
528 AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
529 if (adev->doorbell.num_doorbells == 0)
530 return -EINVAL;
531
532 adev->doorbell.ptr = ioremap(adev->doorbell.base,
533 adev->doorbell.num_doorbells *
534 sizeof(u32));
535 if (adev->doorbell.ptr == NULL)
536 return -ENOMEM;
537
538 return 0;
539}
540
541/**
542 * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
543 *
544 * @adev: amdgpu_device pointer
545 *
546 * Tear down doorbell driver information (CIK)
547 */
548static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
549{
550 iounmap(adev->doorbell.ptr);
551 adev->doorbell.ptr = NULL;
552}
553
554
555
556/*
557 * amdgpu_device_wb_*()
558 * Writeback is the method by which the GPU updates special pages in memory
559 * with the status of certain GPU events (fences, ring pointers,etc.).
560 */
561
562/**
563 * amdgpu_device_wb_fini - Disable Writeback and free memory
564 *
565 * @adev: amdgpu_device pointer
566 *
567 * Disables Writeback and frees the Writeback memory (all asics).
568 * Used at driver shutdown.
569 */
570static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
571{
572 if (adev->wb.wb_obj) {
573 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
574 &adev->wb.gpu_addr,
575 (void **)&adev->wb.wb);
576 adev->wb.wb_obj = NULL;
577 }
578}
579
580/**
581 * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
582 *
583 * @adev: amdgpu_device pointer
584 *
585 * Initializes writeback and allocates writeback memory (all asics).
586 * Used at driver startup.
587 * Returns 0 on success or an -error on failure.
588 */
589static int amdgpu_device_wb_init(struct amdgpu_device *adev)
590{
591 int r;
592
593 if (adev->wb.wb_obj == NULL) {
594 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
595 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
596 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
597 &adev->wb.wb_obj, &adev->wb.gpu_addr,
598 (void **)&adev->wb.wb);
599 if (r) {
600 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
601 return r;
602 }
603
604 adev->wb.num_wb = AMDGPU_MAX_WB;
605 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
606
607 /* clear wb memory */
608 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
609 }
610
611 return 0;
612}
613
614/**
615 * amdgpu_device_wb_get - Allocate a wb entry
616 *
617 * @adev: amdgpu_device pointer
618 * @wb: wb index
619 *
620 * Allocate a wb slot for use by the driver (all asics).
621 * Returns 0 on success or -EINVAL on failure.
622 */
623int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
624{
625 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
626
627 if (offset < adev->wb.num_wb) {
628 __set_bit(offset, adev->wb.used);
629 *wb = offset << 3; /* convert to dw offset */
630 return 0;
631 } else {
632 return -EINVAL;
633 }
634}
635
636/**
637 * amdgpu_device_wb_free - Free a wb entry
638 *
639 * @adev: amdgpu_device pointer
640 * @wb: wb index
641 *
642 * Free a wb slot allocated for use by the driver (all asics)
643 */
644void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
645{
646 wb >>= 3;
647 if (wb < adev->wb.num_wb)
648 __clear_bit(wb, adev->wb.used);
649}
650
651/**
652 * amdgpu_device_vram_location - try to find VRAM location
653 *
654 * @adev: amdgpu device structure holding all necessary informations
655 * @mc: memory controller structure holding memory informations
656 * @base: base address at which to put VRAM
657 *
658 * Function will try to place VRAM at base address provided
659 * as parameter.
660 */
661void amdgpu_device_vram_location(struct amdgpu_device *adev,
662 struct amdgpu_gmc *mc, u64 base)
663{
664 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
665
666 mc->vram_start = base;
667 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
668 if (limit && limit < mc->real_vram_size)
669 mc->real_vram_size = limit;
670 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
671 mc->mc_vram_size >> 20, mc->vram_start,
672 mc->vram_end, mc->real_vram_size >> 20);
673}
674
675/**
676 * amdgpu_device_gart_location - try to find GTT location
677 *
678 * @adev: amdgpu device structure holding all necessary informations
679 * @mc: memory controller structure holding memory informations
680 *
681 * Function will place try to place GTT before or after VRAM.
682 *
683 * If GTT size is bigger than space left then we ajust GTT size.
684 * Thus function will never fails.
685 *
686 * FIXME: when reducing GTT size align new size on power of 2.
687 */
688void amdgpu_device_gart_location(struct amdgpu_device *adev,
689 struct amdgpu_gmc *mc)
690{
691 u64 size_af, size_bf;
692
693 size_af = adev->gmc.mc_mask - mc->vram_end;
694 size_bf = mc->vram_start;
695 if (size_bf > size_af) {
696 if (mc->gart_size > size_bf) {
697 dev_warn(adev->dev, "limiting GTT\n");
698 mc->gart_size = size_bf;
699 }
700 mc->gart_start = 0;
701 } else {
702 if (mc->gart_size > size_af) {
703 dev_warn(adev->dev, "limiting GTT\n");
704 mc->gart_size = size_af;
705 }
706 /* VCE doesn't like it when BOs cross a 4GB segment, so align
707 * the GART base on a 4GB boundary as well.
708 */
709 mc->gart_start = ALIGN(mc->vram_end + 1, 0x100000000ULL);
710 }
711 mc->gart_end = mc->gart_start + mc->gart_size - 1;
712 dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
713 mc->gart_size >> 20, mc->gart_start, mc->gart_end);
714}
715
716/**
717 * amdgpu_device_resize_fb_bar - try to resize FB BAR
718 *
719 * @adev: amdgpu_device pointer
720 *
721 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
722 * to fail, but if any of the BARs is not accessible after the size we abort
723 * driver loading by returning -ENODEV.
724 */
725int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
726{
727 u64 space_needed = roundup_pow_of_two(adev->gmc.real_vram_size);
728 u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1;
729 struct pci_bus *root;
730 struct resource *res;
731 unsigned i;
732 u16 cmd;
733 int r;
734
735 /* Bypass for VF */
736 if (amdgpu_sriov_vf(adev))
737 return 0;
738
739 /* Check if the root BUS has 64bit memory resources */
740 root = adev->pdev->bus;
741 while (root->parent)
742 root = root->parent;
743
744 pci_bus_for_each_resource(root, res, i) {
745 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
746 res->start > 0x100000000ull)
747 break;
748 }
749
750 /* Trying to resize is pointless without a root hub window above 4GB */
751 if (!res)
752 return 0;
753
754 /* Disable memory decoding while we change the BAR addresses and size */
755 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
756 pci_write_config_word(adev->pdev, PCI_COMMAND,
757 cmd & ~PCI_COMMAND_MEMORY);
758
759 /* Free the VRAM and doorbell BAR, we most likely need to move both. */
760 amdgpu_device_doorbell_fini(adev);
761 if (adev->asic_type >= CHIP_BONAIRE)
762 pci_release_resource(adev->pdev, 2);
763
764 pci_release_resource(adev->pdev, 0);
765
766 r = pci_resize_resource(adev->pdev, 0, rbar_size);
767 if (r == -ENOSPC)
768 DRM_INFO("Not enough PCI address space for a large BAR.");
769 else if (r && r != -ENOTSUPP)
770 DRM_ERROR("Problem resizing BAR0 (%d).", r);
771
772 pci_assign_unassigned_bus_resources(adev->pdev->bus);
773
774 /* When the doorbell or fb BAR isn't available we have no chance of
775 * using the device.
776 */
777 r = amdgpu_device_doorbell_init(adev);
778 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
779 return -ENODEV;
780
781 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
782
783 return 0;
784}
785
786/*
787 * GPU helpers function.
788 */
789/**
790 * amdgpu_device_need_post - check if the hw need post or not
791 *
792 * @adev: amdgpu_device pointer
793 *
794 * Check if the asic has been initialized (all asics) at driver startup
795 * or post is needed if hw reset is performed.
796 * Returns true if need or false if not.
797 */
798bool amdgpu_device_need_post(struct amdgpu_device *adev)
799{
800 uint32_t reg;
801
802 if (amdgpu_sriov_vf(adev))
803 return false;
804
805 if (amdgpu_passthrough(adev)) {
806 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
807 * some old smc fw still need driver do vPost otherwise gpu hang, while
808 * those smc fw version above 22.15 doesn't have this flaw, so we force
809 * vpost executed for smc version below 22.15
810 */
811 if (adev->asic_type == CHIP_FIJI) {
812 int err;
813 uint32_t fw_ver;
814 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
815 /* force vPost if error occured */
816 if (err)
817 return true;
818
819 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
820 if (fw_ver < 0x00160e00)
821 return true;
822 }
823 }
824
825 if (adev->has_hw_reset) {
826 adev->has_hw_reset = false;
827 return true;
828 }
829
830 /* bios scratch used on CIK+ */
831 if (adev->asic_type >= CHIP_BONAIRE)
832 return amdgpu_atombios_scratch_need_asic_init(adev);
833
834 /* check MEM_SIZE for older asics */
835 reg = amdgpu_asic_get_config_memsize(adev);
836
837 if ((reg != 0) && (reg != 0xffffffff))
838 return false;
839
840 return true;
841}
842
843/* if we get transitioned to only one device, take VGA back */
844/**
845 * amdgpu_device_vga_set_decode - enable/disable vga decode
846 *
847 * @cookie: amdgpu_device pointer
848 * @state: enable/disable vga decode
849 *
850 * Enable/disable vga decode (all asics).
851 * Returns VGA resource flags.
852 */
853static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state)
854{
855 struct amdgpu_device *adev = cookie;
856 amdgpu_asic_set_vga_state(adev, state);
857 if (state)
858 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
859 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
860 else
861 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
862}
863
864/**
865 * amdgpu_device_check_block_size - validate the vm block size
866 *
867 * @adev: amdgpu_device pointer
868 *
869 * Validates the vm block size specified via module parameter.
870 * The vm block size defines number of bits in page table versus page directory,
871 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
872 * page table and the remaining bits are in the page directory.
873 */
874static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
875{
876 /* defines number of bits in page table versus page directory,
877 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
878 * page table and the remaining bits are in the page directory */
879 if (amdgpu_vm_block_size == -1)
880 return;
881
882 if (amdgpu_vm_block_size < 9) {
883 dev_warn(adev->dev, "VM page table size (%d) too small\n",
884 amdgpu_vm_block_size);
885 amdgpu_vm_block_size = -1;
886 }
887}
888
889/**
890 * amdgpu_device_check_vm_size - validate the vm size
891 *
892 * @adev: amdgpu_device pointer
893 *
894 * Validates the vm size in GB specified via module parameter.
895 * The VM size is the size of the GPU virtual memory space in GB.
896 */
897static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
898{
899 /* no need to check the default value */
900 if (amdgpu_vm_size == -1)
901 return;
902
903 if (amdgpu_vm_size < 1) {
904 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
905 amdgpu_vm_size);
906 amdgpu_vm_size = -1;
907 }
908}
909
910/**
911 * amdgpu_device_check_arguments - validate module params
912 *
913 * @adev: amdgpu_device pointer
914 *
915 * Validates certain module parameters and updates
916 * the associated values used by the driver (all asics).
917 */
918static void amdgpu_device_check_arguments(struct amdgpu_device *adev)
919{
920 if (amdgpu_sched_jobs < 4) {
921 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
922 amdgpu_sched_jobs);
923 amdgpu_sched_jobs = 4;
924 } else if (!is_power_of_2(amdgpu_sched_jobs)){
925 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
926 amdgpu_sched_jobs);
927 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
928 }
929
930 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
931 /* gart size must be greater or equal to 32M */
932 dev_warn(adev->dev, "gart size (%d) too small\n",
933 amdgpu_gart_size);
934 amdgpu_gart_size = -1;
935 }
936
937 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
938 /* gtt size must be greater or equal to 32M */
939 dev_warn(adev->dev, "gtt size (%d) too small\n",
940 amdgpu_gtt_size);
941 amdgpu_gtt_size = -1;
942 }
943
944 /* valid range is between 4 and 9 inclusive */
945 if (amdgpu_vm_fragment_size != -1 &&
946 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
947 dev_warn(adev->dev, "valid range is between 4 and 9\n");
948 amdgpu_vm_fragment_size = -1;
949 }
950
951 amdgpu_device_check_vm_size(adev);
952
953 amdgpu_device_check_block_size(adev);
954
955 if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
956 !is_power_of_2(amdgpu_vram_page_split))) {
957 dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
958 amdgpu_vram_page_split);
959 amdgpu_vram_page_split = 1024;
960 }
961
962 if (amdgpu_lockup_timeout == 0) {
963 dev_warn(adev->dev, "lockup_timeout msut be > 0, adjusting to 10000\n");
964 amdgpu_lockup_timeout = 10000;
965 }
966
967 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
968}
969
970/**
971 * amdgpu_switcheroo_set_state - set switcheroo state
972 *
973 * @pdev: pci dev pointer
974 * @state: vga_switcheroo state
975 *
976 * Callback for the switcheroo driver. Suspends or resumes the
977 * the asics before or after it is powered up using ACPI methods.
978 */
979static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
980{
981 struct drm_device *dev = pci_get_drvdata(pdev);
982
983 if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
984 return;
985
986 if (state == VGA_SWITCHEROO_ON) {
987 pr_info("amdgpu: switched on\n");
988 /* don't suspend or resume card normally */
989 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
990
991 amdgpu_device_resume(dev, true, true);
992
993 dev->switch_power_state = DRM_SWITCH_POWER_ON;
994 drm_kms_helper_poll_enable(dev);
995 } else {
996 pr_info("amdgpu: switched off\n");
997 drm_kms_helper_poll_disable(dev);
998 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
999 amdgpu_device_suspend(dev, true, true);
1000 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1001 }
1002}
1003
1004/**
1005 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1006 *
1007 * @pdev: pci dev pointer
1008 *
1009 * Callback for the switcheroo driver. Check of the switcheroo
1010 * state can be changed.
1011 * Returns true if the state can be changed, false if not.
1012 */
1013static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1014{
1015 struct drm_device *dev = pci_get_drvdata(pdev);
1016
1017 /*
1018 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1019 * locking inversion with the driver load path. And the access here is
1020 * completely racy anyway. So don't bother with locking for now.
1021 */
1022 return dev->open_count == 0;
1023}
1024
1025static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1026 .set_gpu_state = amdgpu_switcheroo_set_state,
1027 .reprobe = NULL,
1028 .can_switch = amdgpu_switcheroo_can_switch,
1029};
1030
1031/**
1032 * amdgpu_device_ip_set_clockgating_state - set the CG state
1033 *
1034 * @adev: amdgpu_device pointer
1035 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1036 * @state: clockgating state (gate or ungate)
1037 *
1038 * Sets the requested clockgating state for all instances of
1039 * the hardware IP specified.
1040 * Returns the error code from the last instance.
1041 */
1042int amdgpu_device_ip_set_clockgating_state(struct amdgpu_device *adev,
1043 enum amd_ip_block_type block_type,
1044 enum amd_clockgating_state state)
1045{
1046 int i, r = 0;
1047
1048 for (i = 0; i < adev->num_ip_blocks; i++) {
1049 if (!adev->ip_blocks[i].status.valid)
1050 continue;
1051 if (adev->ip_blocks[i].version->type != block_type)
1052 continue;
1053 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1054 continue;
1055 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1056 (void *)adev, state);
1057 if (r)
1058 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1059 adev->ip_blocks[i].version->funcs->name, r);
1060 }
1061 return r;
1062}
1063
1064/**
1065 * amdgpu_device_ip_set_powergating_state - set the PG state
1066 *
1067 * @adev: amdgpu_device pointer
1068 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1069 * @state: powergating state (gate or ungate)
1070 *
1071 * Sets the requested powergating state for all instances of
1072 * the hardware IP specified.
1073 * Returns the error code from the last instance.
1074 */
1075int amdgpu_device_ip_set_powergating_state(struct amdgpu_device *adev,
1076 enum amd_ip_block_type block_type,
1077 enum amd_powergating_state state)
1078{
1079 int i, r = 0;
1080
1081 for (i = 0; i < adev->num_ip_blocks; i++) {
1082 if (!adev->ip_blocks[i].status.valid)
1083 continue;
1084 if (adev->ip_blocks[i].version->type != block_type)
1085 continue;
1086 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1087 continue;
1088 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1089 (void *)adev, state);
1090 if (r)
1091 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1092 adev->ip_blocks[i].version->funcs->name, r);
1093 }
1094 return r;
1095}
1096
1097/**
1098 * amdgpu_device_ip_get_clockgating_state - get the CG state
1099 *
1100 * @adev: amdgpu_device pointer
1101 * @flags: clockgating feature flags
1102 *
1103 * Walks the list of IPs on the device and updates the clockgating
1104 * flags for each IP.
1105 * Updates @flags with the feature flags for each hardware IP where
1106 * clockgating is enabled.
1107 */
1108void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1109 u32 *flags)
1110{
1111 int i;
1112
1113 for (i = 0; i < adev->num_ip_blocks; i++) {
1114 if (!adev->ip_blocks[i].status.valid)
1115 continue;
1116 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1117 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1118 }
1119}
1120
1121/**
1122 * amdgpu_device_ip_wait_for_idle - wait for idle
1123 *
1124 * @adev: amdgpu_device pointer
1125 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1126 *
1127 * Waits for the request hardware IP to be idle.
1128 * Returns 0 for success or a negative error code on failure.
1129 */
1130int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1131 enum amd_ip_block_type block_type)
1132{
1133 int i, r;
1134
1135 for (i = 0; i < adev->num_ip_blocks; i++) {
1136 if (!adev->ip_blocks[i].status.valid)
1137 continue;
1138 if (adev->ip_blocks[i].version->type == block_type) {
1139 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1140 if (r)
1141 return r;
1142 break;
1143 }
1144 }
1145 return 0;
1146
1147}
1148
1149/**
1150 * amdgpu_device_ip_is_idle - is the hardware IP idle
1151 *
1152 * @adev: amdgpu_device pointer
1153 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1154 *
1155 * Check if the hardware IP is idle or not.
1156 * Returns true if it the IP is idle, false if not.
1157 */
1158bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1159 enum amd_ip_block_type block_type)
1160{
1161 int i;
1162
1163 for (i = 0; i < adev->num_ip_blocks; i++) {
1164 if (!adev->ip_blocks[i].status.valid)
1165 continue;
1166 if (adev->ip_blocks[i].version->type == block_type)
1167 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1168 }
1169 return true;
1170
1171}
1172
1173/**
1174 * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1175 *
1176 * @adev: amdgpu_device pointer
1177 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1178 *
1179 * Returns a pointer to the hardware IP block structure
1180 * if it exists for the asic, otherwise NULL.
1181 */
1182struct amdgpu_ip_block *
1183amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1184 enum amd_ip_block_type type)
1185{
1186 int i;
1187
1188 for (i = 0; i < adev->num_ip_blocks; i++)
1189 if (adev->ip_blocks[i].version->type == type)
1190 return &adev->ip_blocks[i];
1191
1192 return NULL;
1193}
1194
1195/**
1196 * amdgpu_device_ip_block_version_cmp
1197 *
1198 * @adev: amdgpu_device pointer
1199 * @type: enum amd_ip_block_type
1200 * @major: major version
1201 * @minor: minor version
1202 *
1203 * return 0 if equal or greater
1204 * return 1 if smaller or the ip_block doesn't exist
1205 */
1206int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1207 enum amd_ip_block_type type,
1208 u32 major, u32 minor)
1209{
1210 struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1211
1212 if (ip_block && ((ip_block->version->major > major) ||
1213 ((ip_block->version->major == major) &&
1214 (ip_block->version->minor >= minor))))
1215 return 0;
1216
1217 return 1;
1218}
1219
1220/**
1221 * amdgpu_device_ip_block_add
1222 *
1223 * @adev: amdgpu_device pointer
1224 * @ip_block_version: pointer to the IP to add
1225 *
1226 * Adds the IP block driver information to the collection of IPs
1227 * on the asic.
1228 */
1229int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1230 const struct amdgpu_ip_block_version *ip_block_version)
1231{
1232 if (!ip_block_version)
1233 return -EINVAL;
1234
1235 DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1236 ip_block_version->funcs->name);
1237
1238 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1239
1240 return 0;
1241}
1242
1243/**
1244 * amdgpu_device_enable_virtual_display - enable virtual display feature
1245 *
1246 * @adev: amdgpu_device pointer
1247 *
1248 * Enabled the virtual display feature if the user has enabled it via
1249 * the module parameter virtual_display. This feature provides a virtual
1250 * display hardware on headless boards or in virtualized environments.
1251 * This function parses and validates the configuration string specified by
1252 * the user and configues the virtual display configuration (number of
1253 * virtual connectors, crtcs, etc.) specified.
1254 */
1255static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1256{
1257 adev->enable_virtual_display = false;
1258
1259 if (amdgpu_virtual_display) {
1260 struct drm_device *ddev = adev->ddev;
1261 const char *pci_address_name = pci_name(ddev->pdev);
1262 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1263
1264 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1265 pciaddstr_tmp = pciaddstr;
1266 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1267 pciaddname = strsep(&pciaddname_tmp, ",");
1268 if (!strcmp("all", pciaddname)
1269 || !strcmp(pci_address_name, pciaddname)) {
1270 long num_crtc;
1271 int res = -1;
1272
1273 adev->enable_virtual_display = true;
1274
1275 if (pciaddname_tmp)
1276 res = kstrtol(pciaddname_tmp, 10,
1277 &num_crtc);
1278
1279 if (!res) {
1280 if (num_crtc < 1)
1281 num_crtc = 1;
1282 if (num_crtc > 6)
1283 num_crtc = 6;
1284 adev->mode_info.num_crtc = num_crtc;
1285 } else {
1286 adev->mode_info.num_crtc = 1;
1287 }
1288 break;
1289 }
1290 }
1291
1292 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1293 amdgpu_virtual_display, pci_address_name,
1294 adev->enable_virtual_display, adev->mode_info.num_crtc);
1295
1296 kfree(pciaddstr);
1297 }
1298}
1299
1300/**
1301 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1302 *
1303 * @adev: amdgpu_device pointer
1304 *
1305 * Parses the asic configuration parameters specified in the gpu info
1306 * firmware and makes them availale to the driver for use in configuring
1307 * the asic.
1308 * Returns 0 on success, -EINVAL on failure.
1309 */
1310static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1311{
1312 const char *chip_name;
1313 char fw_name[30];
1314 int err;
1315 const struct gpu_info_firmware_header_v1_0 *hdr;
1316
1317 adev->firmware.gpu_info_fw = NULL;
1318
1319 switch (adev->asic_type) {
1320 case CHIP_TOPAZ:
1321 case CHIP_TONGA:
1322 case CHIP_FIJI:
1323 case CHIP_POLARIS11:
1324 case CHIP_POLARIS10:
1325 case CHIP_POLARIS12:
1326 case CHIP_CARRIZO:
1327 case CHIP_STONEY:
1328#ifdef CONFIG_DRM_AMDGPU_SI
1329 case CHIP_VERDE:
1330 case CHIP_TAHITI:
1331 case CHIP_PITCAIRN:
1332 case CHIP_OLAND:
1333 case CHIP_HAINAN:
1334#endif
1335#ifdef CONFIG_DRM_AMDGPU_CIK
1336 case CHIP_BONAIRE:
1337 case CHIP_HAWAII:
1338 case CHIP_KAVERI:
1339 case CHIP_KABINI:
1340 case CHIP_MULLINS:
1341#endif
1342 default:
1343 return 0;
1344 case CHIP_VEGA10:
1345 chip_name = "vega10";
1346 break;
1347 case CHIP_VEGA12:
1348 chip_name = "vega12";
1349 break;
1350 case CHIP_RAVEN:
1351 chip_name = "raven";
1352 break;
1353 }
1354
1355 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1356 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1357 if (err) {
1358 dev_err(adev->dev,
1359 "Failed to load gpu_info firmware \"%s\"\n",
1360 fw_name);
1361 goto out;
1362 }
1363 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1364 if (err) {
1365 dev_err(adev->dev,
1366 "Failed to validate gpu_info firmware \"%s\"\n",
1367 fw_name);
1368 goto out;
1369 }
1370
1371 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1372 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1373
1374 switch (hdr->version_major) {
1375 case 1:
1376 {
1377 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
1378 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
1379 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1380
1381 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1382 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1383 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1384 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
1385 adev->gfx.config.max_texture_channel_caches =
1386 le32_to_cpu(gpu_info_fw->gc_num_tccs);
1387 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1388 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1389 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1390 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
1391 adev->gfx.config.double_offchip_lds_buf =
1392 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1393 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
1394 adev->gfx.cu_info.max_waves_per_simd =
1395 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1396 adev->gfx.cu_info.max_scratch_slots_per_cu =
1397 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1398 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
1399 break;
1400 }
1401 default:
1402 dev_err(adev->dev,
1403 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1404 err = -EINVAL;
1405 goto out;
1406 }
1407out:
1408 return err;
1409}
1410
1411/**
1412 * amdgpu_device_ip_early_init - run early init for hardware IPs
1413 *
1414 * @adev: amdgpu_device pointer
1415 *
1416 * Early initialization pass for hardware IPs. The hardware IPs that make
1417 * up each asic are discovered each IP's early_init callback is run. This
1418 * is the first stage in initializing the asic.
1419 * Returns 0 on success, negative error code on failure.
1420 */
1421static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
1422{
1423 int i, r;
1424
1425 amdgpu_device_enable_virtual_display(adev);
1426
1427 switch (adev->asic_type) {
1428 case CHIP_TOPAZ:
1429 case CHIP_TONGA:
1430 case CHIP_FIJI:
1431 case CHIP_POLARIS11:
1432 case CHIP_POLARIS10:
1433 case CHIP_POLARIS12:
1434 case CHIP_CARRIZO:
1435 case CHIP_STONEY:
1436 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
1437 adev->family = AMDGPU_FAMILY_CZ;
1438 else
1439 adev->family = AMDGPU_FAMILY_VI;
1440
1441 r = vi_set_ip_blocks(adev);
1442 if (r)
1443 return r;
1444 break;
1445#ifdef CONFIG_DRM_AMDGPU_SI
1446 case CHIP_VERDE:
1447 case CHIP_TAHITI:
1448 case CHIP_PITCAIRN:
1449 case CHIP_OLAND:
1450 case CHIP_HAINAN:
1451 adev->family = AMDGPU_FAMILY_SI;
1452 r = si_set_ip_blocks(adev);
1453 if (r)
1454 return r;
1455 break;
1456#endif
1457#ifdef CONFIG_DRM_AMDGPU_CIK
1458 case CHIP_BONAIRE:
1459 case CHIP_HAWAII:
1460 case CHIP_KAVERI:
1461 case CHIP_KABINI:
1462 case CHIP_MULLINS:
1463 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1464 adev->family = AMDGPU_FAMILY_CI;
1465 else
1466 adev->family = AMDGPU_FAMILY_KV;
1467
1468 r = cik_set_ip_blocks(adev);
1469 if (r)
1470 return r;
1471 break;
1472#endif
1473 case CHIP_VEGA10:
1474 case CHIP_VEGA12:
1475 case CHIP_RAVEN:
1476 if (adev->asic_type == CHIP_RAVEN)
1477 adev->family = AMDGPU_FAMILY_RV;
1478 else
1479 adev->family = AMDGPU_FAMILY_AI;
1480
1481 r = soc15_set_ip_blocks(adev);
1482 if (r)
1483 return r;
1484 break;
1485 default:
1486 /* FIXME: not supported yet */
1487 return -EINVAL;
1488 }
1489
1490 r = amdgpu_device_parse_gpu_info_fw(adev);
1491 if (r)
1492 return r;
1493
1494 amdgpu_amdkfd_device_probe(adev);
1495
1496 if (amdgpu_sriov_vf(adev)) {
1497 r = amdgpu_virt_request_full_gpu(adev, true);
1498 if (r)
1499 return -EAGAIN;
1500 }
1501
1502 for (i = 0; i < adev->num_ip_blocks; i++) {
1503 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
1504 DRM_ERROR("disabled ip block: %d <%s>\n",
1505 i, adev->ip_blocks[i].version->funcs->name);
1506 adev->ip_blocks[i].status.valid = false;
1507 } else {
1508 if (adev->ip_blocks[i].version->funcs->early_init) {
1509 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
1510 if (r == -ENOENT) {
1511 adev->ip_blocks[i].status.valid = false;
1512 } else if (r) {
1513 DRM_ERROR("early_init of IP block <%s> failed %d\n",
1514 adev->ip_blocks[i].version->funcs->name, r);
1515 return r;
1516 } else {
1517 adev->ip_blocks[i].status.valid = true;
1518 }
1519 } else {
1520 adev->ip_blocks[i].status.valid = true;
1521 }
1522 }
1523 }
1524
1525 adev->cg_flags &= amdgpu_cg_mask;
1526 adev->pg_flags &= amdgpu_pg_mask;
1527
1528 return 0;
1529}
1530
1531/**
1532 * amdgpu_device_ip_init - run init for hardware IPs
1533 *
1534 * @adev: amdgpu_device pointer
1535 *
1536 * Main initialization pass for hardware IPs. The list of all the hardware
1537 * IPs that make up the asic is walked and the sw_init and hw_init callbacks
1538 * are run. sw_init initializes the software state associated with each IP
1539 * and hw_init initializes the hardware associated with each IP.
1540 * Returns 0 on success, negative error code on failure.
1541 */
1542static int amdgpu_device_ip_init(struct amdgpu_device *adev)
1543{
1544 int i, r;
1545
1546 for (i = 0; i < adev->num_ip_blocks; i++) {
1547 if (!adev->ip_blocks[i].status.valid)
1548 continue;
1549 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
1550 if (r) {
1551 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1552 adev->ip_blocks[i].version->funcs->name, r);
1553 return r;
1554 }
1555 adev->ip_blocks[i].status.sw = true;
1556
1557 /* need to do gmc hw init early so we can allocate gpu mem */
1558 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
1559 r = amdgpu_device_vram_scratch_init(adev);
1560 if (r) {
1561 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
1562 return r;
1563 }
1564 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
1565 if (r) {
1566 DRM_ERROR("hw_init %d failed %d\n", i, r);
1567 return r;
1568 }
1569 r = amdgpu_device_wb_init(adev);
1570 if (r) {
1571 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
1572 return r;
1573 }
1574 adev->ip_blocks[i].status.hw = true;
1575
1576 /* right after GMC hw init, we create CSA */
1577 if (amdgpu_sriov_vf(adev)) {
1578 r = amdgpu_allocate_static_csa(adev);
1579 if (r) {
1580 DRM_ERROR("allocate CSA failed %d\n", r);
1581 return r;
1582 }
1583 }
1584 }
1585 }
1586
1587 for (i = 0; i < adev->num_ip_blocks; i++) {
1588 if (!adev->ip_blocks[i].status.sw)
1589 continue;
1590 if (adev->ip_blocks[i].status.hw)
1591 continue;
1592 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
1593 if (r) {
1594 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1595 adev->ip_blocks[i].version->funcs->name, r);
1596 return r;
1597 }
1598 adev->ip_blocks[i].status.hw = true;
1599 }
1600
1601 amdgpu_amdkfd_device_init(adev);
1602
1603 if (amdgpu_sriov_vf(adev))
1604 amdgpu_virt_release_full_gpu(adev, true);
1605
1606 return 0;
1607}
1608
1609/**
1610 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
1611 *
1612 * @adev: amdgpu_device pointer
1613 *
1614 * Writes a reset magic value to the gart pointer in VRAM. The driver calls
1615 * this function before a GPU reset. If the value is retained after a
1616 * GPU reset, VRAM has not been lost. Some GPU resets may destry VRAM contents.
1617 */
1618static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
1619{
1620 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
1621}
1622
1623/**
1624 * amdgpu_device_check_vram_lost - check if vram is valid
1625 *
1626 * @adev: amdgpu_device pointer
1627 *
1628 * Checks the reset magic value written to the gart pointer in VRAM.
1629 * The driver calls this after a GPU reset to see if the contents of
1630 * VRAM is lost or now.
1631 * returns true if vram is lost, false if not.
1632 */
1633static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
1634{
1635 return !!memcmp(adev->gart.ptr, adev->reset_magic,
1636 AMDGPU_RESET_MAGIC_NUM);
1637}
1638
1639/**
1640 * amdgpu_device_ip_late_set_cg_state - late init for clockgating
1641 *
1642 * @adev: amdgpu_device pointer
1643 *
1644 * Late initialization pass enabling clockgating for hardware IPs.
1645 * The list of all the hardware IPs that make up the asic is walked and the
1646 * set_clockgating_state callbacks are run. This stage is run late
1647 * in the init process.
1648 * Returns 0 on success, negative error code on failure.
1649 */
1650static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
1651{
1652 int i = 0, r;
1653
1654 if (amdgpu_emu_mode == 1)
1655 return 0;
1656
1657 for (i = 0; i < adev->num_ip_blocks; i++) {
1658 if (!adev->ip_blocks[i].status.valid)
1659 continue;
1660 /* skip CG for VCE/UVD, it's handled specially */
1661 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1662 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
1663 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
1664 /* enable clockgating to save power */
1665 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1666 AMD_CG_STATE_GATE);
1667 if (r) {
1668 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
1669 adev->ip_blocks[i].version->funcs->name, r);
1670 return r;
1671 }
1672 }
1673 }
1674 return 0;
1675}
1676
1677/**
1678 * amdgpu_device_ip_late_init - run late init for hardware IPs
1679 *
1680 * @adev: amdgpu_device pointer
1681 *
1682 * Late initialization pass for hardware IPs. The list of all the hardware
1683 * IPs that make up the asic is walked and the late_init callbacks are run.
1684 * late_init covers any special initialization that an IP requires
1685 * after all of the have been initialized or something that needs to happen
1686 * late in the init process.
1687 * Returns 0 on success, negative error code on failure.
1688 */
1689static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
1690{
1691 int i = 0, r;
1692
1693 for (i = 0; i < adev->num_ip_blocks; i++) {
1694 if (!adev->ip_blocks[i].status.valid)
1695 continue;
1696 if (adev->ip_blocks[i].version->funcs->late_init) {
1697 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
1698 if (r) {
1699 DRM_ERROR("late_init of IP block <%s> failed %d\n",
1700 adev->ip_blocks[i].version->funcs->name, r);
1701 return r;
1702 }
1703 adev->ip_blocks[i].status.late_initialized = true;
1704 }
1705 }
1706
1707 mod_delayed_work(system_wq, &adev->late_init_work,
1708 msecs_to_jiffies(AMDGPU_RESUME_MS));
1709
1710 amdgpu_device_fill_reset_magic(adev);
1711
1712 return 0;
1713}
1714
1715/**
1716 * amdgpu_device_ip_fini - run fini for hardware IPs
1717 *
1718 * @adev: amdgpu_device pointer
1719 *
1720 * Main teardown pass for hardware IPs. The list of all the hardware
1721 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
1722 * are run. hw_fini tears down the hardware associated with each IP
1723 * and sw_fini tears down any software state associated with each IP.
1724 * Returns 0 on success, negative error code on failure.
1725 */
1726static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
1727{
1728 int i, r;
1729
1730 amdgpu_amdkfd_device_fini(adev);
1731 /* need to disable SMC first */
1732 for (i = 0; i < adev->num_ip_blocks; i++) {
1733 if (!adev->ip_blocks[i].status.hw)
1734 continue;
1735 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC &&
1736 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
1737 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1738 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1739 AMD_CG_STATE_UNGATE);
1740 if (r) {
1741 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1742 adev->ip_blocks[i].version->funcs->name, r);
1743 return r;
1744 }
1745 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
1746 /* XXX handle errors */
1747 if (r) {
1748 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1749 adev->ip_blocks[i].version->funcs->name, r);
1750 }
1751 adev->ip_blocks[i].status.hw = false;
1752 break;
1753 }
1754 }
1755
1756 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1757 if (!adev->ip_blocks[i].status.hw)
1758 continue;
1759
1760 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1761 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
1762 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
1763 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1764 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1765 AMD_CG_STATE_UNGATE);
1766 if (r) {
1767 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1768 adev->ip_blocks[i].version->funcs->name, r);
1769 return r;
1770 }
1771 }
1772
1773 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
1774 /* XXX handle errors */
1775 if (r) {
1776 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1777 adev->ip_blocks[i].version->funcs->name, r);
1778 }
1779
1780 adev->ip_blocks[i].status.hw = false;
1781 }
1782
1783
1784 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1785 if (!adev->ip_blocks[i].status.sw)
1786 continue;
1787
1788 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
1789 amdgpu_free_static_csa(adev);
1790 amdgpu_device_wb_fini(adev);
1791 amdgpu_device_vram_scratch_fini(adev);
1792 }
1793
1794 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
1795 /* XXX handle errors */
1796 if (r) {
1797 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
1798 adev->ip_blocks[i].version->funcs->name, r);
1799 }
1800 adev->ip_blocks[i].status.sw = false;
1801 adev->ip_blocks[i].status.valid = false;
1802 }
1803
1804 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1805 if (!adev->ip_blocks[i].status.late_initialized)
1806 continue;
1807 if (adev->ip_blocks[i].version->funcs->late_fini)
1808 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
1809 adev->ip_blocks[i].status.late_initialized = false;
1810 }
1811
1812 if (amdgpu_sriov_vf(adev))
1813 if (amdgpu_virt_release_full_gpu(adev, false))
1814 DRM_ERROR("failed to release exclusive mode on fini\n");
1815
1816 return 0;
1817}
1818
1819/**
1820 * amdgpu_device_ip_late_init_func_handler - work handler for clockgating
1821 *
1822 * @work: work_struct
1823 *
1824 * Work handler for amdgpu_device_ip_late_set_cg_state. We put the
1825 * clockgating setup into a worker thread to speed up driver init and
1826 * resume from suspend.
1827 */
1828static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
1829{
1830 struct amdgpu_device *adev =
1831 container_of(work, struct amdgpu_device, late_init_work.work);
1832 amdgpu_device_ip_late_set_cg_state(adev);
1833}
1834
1835/**
1836 * amdgpu_device_ip_suspend - run suspend for hardware IPs
1837 *
1838 * @adev: amdgpu_device pointer
1839 *
1840 * Main suspend function for hardware IPs. The list of all the hardware
1841 * IPs that make up the asic is walked, clockgating is disabled and the
1842 * suspend callbacks are run. suspend puts the hardware and software state
1843 * in each IP into a state suitable for suspend.
1844 * Returns 0 on success, negative error code on failure.
1845 */
1846int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
1847{
1848 int i, r;
1849
1850 if (amdgpu_sriov_vf(adev))
1851 amdgpu_virt_request_full_gpu(adev, false);
1852
1853 /* ungate SMC block first */
1854 r = amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
1855 AMD_CG_STATE_UNGATE);
1856 if (r) {
1857 DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n", r);
1858 }
1859
1860 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1861 if (!adev->ip_blocks[i].status.valid)
1862 continue;
1863 /* ungate blocks so that suspend can properly shut them down */
1864 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_SMC &&
1865 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
1866 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1867 AMD_CG_STATE_UNGATE);
1868 if (r) {
1869 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1870 adev->ip_blocks[i].version->funcs->name, r);
1871 }
1872 }
1873 /* XXX handle errors */
1874 r = adev->ip_blocks[i].version->funcs->suspend(adev);
1875 /* XXX handle errors */
1876 if (r) {
1877 DRM_ERROR("suspend of IP block <%s> failed %d\n",
1878 adev->ip_blocks[i].version->funcs->name, r);
1879 }
1880 }
1881
1882 if (amdgpu_sriov_vf(adev))
1883 amdgpu_virt_release_full_gpu(adev, false);
1884
1885 return 0;
1886}
1887
1888static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
1889{
1890 int i, r;
1891
1892 static enum amd_ip_block_type ip_order[] = {
1893 AMD_IP_BLOCK_TYPE_GMC,
1894 AMD_IP_BLOCK_TYPE_COMMON,
1895 AMD_IP_BLOCK_TYPE_IH,
1896 };
1897
1898 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1899 int j;
1900 struct amdgpu_ip_block *block;
1901
1902 for (j = 0; j < adev->num_ip_blocks; j++) {
1903 block = &adev->ip_blocks[j];
1904
1905 if (block->version->type != ip_order[i] ||
1906 !block->status.valid)
1907 continue;
1908
1909 r = block->version->funcs->hw_init(adev);
1910 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
1911 if (r)
1912 return r;
1913 }
1914 }
1915
1916 return 0;
1917}
1918
1919static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
1920{
1921 int i, r;
1922
1923 static enum amd_ip_block_type ip_order[] = {
1924 AMD_IP_BLOCK_TYPE_SMC,
1925 AMD_IP_BLOCK_TYPE_PSP,
1926 AMD_IP_BLOCK_TYPE_DCE,
1927 AMD_IP_BLOCK_TYPE_GFX,
1928 AMD_IP_BLOCK_TYPE_SDMA,
1929 AMD_IP_BLOCK_TYPE_UVD,
1930 AMD_IP_BLOCK_TYPE_VCE
1931 };
1932
1933 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1934 int j;
1935 struct amdgpu_ip_block *block;
1936
1937 for (j = 0; j < adev->num_ip_blocks; j++) {
1938 block = &adev->ip_blocks[j];
1939
1940 if (block->version->type != ip_order[i] ||
1941 !block->status.valid)
1942 continue;
1943
1944 r = block->version->funcs->hw_init(adev);
1945 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
1946 if (r)
1947 return r;
1948 }
1949 }
1950
1951 return 0;
1952}
1953
1954/**
1955 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
1956 *
1957 * @adev: amdgpu_device pointer
1958 *
1959 * First resume function for hardware IPs. The list of all the hardware
1960 * IPs that make up the asic is walked and the resume callbacks are run for
1961 * COMMON, GMC, and IH. resume puts the hardware into a functional state
1962 * after a suspend and updates the software state as necessary. This
1963 * function is also used for restoring the GPU after a GPU reset.
1964 * Returns 0 on success, negative error code on failure.
1965 */
1966static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
1967{
1968 int i, r;
1969
1970 for (i = 0; i < adev->num_ip_blocks; i++) {
1971 if (!adev->ip_blocks[i].status.valid)
1972 continue;
1973 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1974 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
1975 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
1976 r = adev->ip_blocks[i].version->funcs->resume(adev);
1977 if (r) {
1978 DRM_ERROR("resume of IP block <%s> failed %d\n",
1979 adev->ip_blocks[i].version->funcs->name, r);
1980 return r;
1981 }
1982 }
1983 }
1984
1985 return 0;
1986}
1987
1988/**
1989 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
1990 *
1991 * @adev: amdgpu_device pointer
1992 *
1993 * First resume function for hardware IPs. The list of all the hardware
1994 * IPs that make up the asic is walked and the resume callbacks are run for
1995 * all blocks except COMMON, GMC, and IH. resume puts the hardware into a
1996 * functional state after a suspend and updates the software state as
1997 * necessary. This function is also used for restoring the GPU after a GPU
1998 * reset.
1999 * Returns 0 on success, negative error code on failure.
2000 */
2001static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
2002{
2003 int i, r;
2004
2005 for (i = 0; i < adev->num_ip_blocks; i++) {
2006 if (!adev->ip_blocks[i].status.valid)
2007 continue;
2008 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2009 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2010 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)
2011 continue;
2012 r = adev->ip_blocks[i].version->funcs->resume(adev);
2013 if (r) {
2014 DRM_ERROR("resume of IP block <%s> failed %d\n",
2015 adev->ip_blocks[i].version->funcs->name, r);
2016 return r;
2017 }
2018 }
2019
2020 return 0;
2021}
2022
2023/**
2024 * amdgpu_device_ip_resume - run resume for hardware IPs
2025 *
2026 * @adev: amdgpu_device pointer
2027 *
2028 * Main resume function for hardware IPs. The hardware IPs
2029 * are split into two resume functions because they are
2030 * are also used in in recovering from a GPU reset and some additional
2031 * steps need to be take between them. In this case (S3/S4) they are
2032 * run sequentially.
2033 * Returns 0 on success, negative error code on failure.
2034 */
2035static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
2036{
2037 int r;
2038
2039 r = amdgpu_device_ip_resume_phase1(adev);
2040 if (r)
2041 return r;
2042 r = amdgpu_device_ip_resume_phase2(adev);
2043
2044 return r;
2045}
2046
2047/**
2048 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
2049 *
2050 * @adev: amdgpu_device pointer
2051 *
2052 * Query the VBIOS data tables to determine if the board supports SR-IOV.
2053 */
2054static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
2055{
2056 if (amdgpu_sriov_vf(adev)) {
2057 if (adev->is_atom_fw) {
2058 if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
2059 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2060 } else {
2061 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
2062 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2063 }
2064
2065 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
2066 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
2067 }
2068}
2069
2070/**
2071 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
2072 *
2073 * @asic_type: AMD asic type
2074 *
2075 * Check if there is DC (new modesetting infrastructre) support for an asic.
2076 * returns true if DC has support, false if not.
2077 */
2078bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
2079{
2080 switch (asic_type) {
2081#if defined(CONFIG_DRM_AMD_DC)
2082 case CHIP_BONAIRE:
2083 case CHIP_HAWAII:
2084 case CHIP_KAVERI:
2085 case CHIP_KABINI:
2086 case CHIP_MULLINS:
2087 case CHIP_CARRIZO:
2088 case CHIP_STONEY:
2089 case CHIP_POLARIS11:
2090 case CHIP_POLARIS10:
2091 case CHIP_POLARIS12:
2092 case CHIP_TONGA:
2093 case CHIP_FIJI:
2094#if defined(CONFIG_DRM_AMD_DC_PRE_VEGA)
2095 return amdgpu_dc != 0;
2096#endif
2097 case CHIP_VEGA10:
2098 case CHIP_VEGA12:
2099#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
2100 case CHIP_RAVEN:
2101#endif
2102 return amdgpu_dc != 0;
2103#endif
2104 default:
2105 return false;
2106 }
2107}
2108
2109/**
2110 * amdgpu_device_has_dc_support - check if dc is supported
2111 *
2112 * @adev: amdgpu_device_pointer
2113 *
2114 * Returns true for supported, false for not supported
2115 */
2116bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
2117{
2118 if (amdgpu_sriov_vf(adev))
2119 return false;
2120
2121 return amdgpu_device_asic_has_dc_support(adev->asic_type);
2122}
2123
2124/**
2125 * amdgpu_device_init - initialize the driver
2126 *
2127 * @adev: amdgpu_device pointer
2128 * @pdev: drm dev pointer
2129 * @pdev: pci dev pointer
2130 * @flags: driver flags
2131 *
2132 * Initializes the driver info and hw (all asics).
2133 * Returns 0 for success or an error on failure.
2134 * Called at driver startup.
2135 */
2136int amdgpu_device_init(struct amdgpu_device *adev,
2137 struct drm_device *ddev,
2138 struct pci_dev *pdev,
2139 uint32_t flags)
2140{
2141 int r, i;
2142 bool runtime = false;
2143 u32 max_MBps;
2144
2145 adev->shutdown = false;
2146 adev->dev = &pdev->dev;
2147 adev->ddev = ddev;
2148 adev->pdev = pdev;
2149 adev->flags = flags;
2150 adev->asic_type = flags & AMD_ASIC_MASK;
2151 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
2152 if (amdgpu_emu_mode == 1)
2153 adev->usec_timeout *= 2;
2154 adev->gmc.gart_size = 512 * 1024 * 1024;
2155 adev->accel_working = false;
2156 adev->num_rings = 0;
2157 adev->mman.buffer_funcs = NULL;
2158 adev->mman.buffer_funcs_ring = NULL;
2159 adev->vm_manager.vm_pte_funcs = NULL;
2160 adev->vm_manager.vm_pte_num_rings = 0;
2161 adev->gmc.gmc_funcs = NULL;
2162 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2163 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
2164
2165 adev->smc_rreg = &amdgpu_invalid_rreg;
2166 adev->smc_wreg = &amdgpu_invalid_wreg;
2167 adev->pcie_rreg = &amdgpu_invalid_rreg;
2168 adev->pcie_wreg = &amdgpu_invalid_wreg;
2169 adev->pciep_rreg = &amdgpu_invalid_rreg;
2170 adev->pciep_wreg = &amdgpu_invalid_wreg;
2171 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
2172 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
2173 adev->didt_rreg = &amdgpu_invalid_rreg;
2174 adev->didt_wreg = &amdgpu_invalid_wreg;
2175 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
2176 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
2177 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
2178 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
2179
2180 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
2181 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
2182 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
2183
2184 /* mutex initialization are all done here so we
2185 * can recall function without having locking issues */
2186 atomic_set(&adev->irq.ih.lock, 0);
2187 mutex_init(&adev->firmware.mutex);
2188 mutex_init(&adev->pm.mutex);
2189 mutex_init(&adev->gfx.gpu_clock_mutex);
2190 mutex_init(&adev->srbm_mutex);
2191 mutex_init(&adev->gfx.pipe_reserve_mutex);
2192 mutex_init(&adev->grbm_idx_mutex);
2193 mutex_init(&adev->mn_lock);
2194 mutex_init(&adev->virt.vf_errors.lock);
2195 hash_init(adev->mn_hash);
2196 mutex_init(&adev->lock_reset);
2197
2198 amdgpu_device_check_arguments(adev);
2199
2200 spin_lock_init(&adev->mmio_idx_lock);
2201 spin_lock_init(&adev->smc_idx_lock);
2202 spin_lock_init(&adev->pcie_idx_lock);
2203 spin_lock_init(&adev->uvd_ctx_idx_lock);
2204 spin_lock_init(&adev->didt_idx_lock);
2205 spin_lock_init(&adev->gc_cac_idx_lock);
2206 spin_lock_init(&adev->se_cac_idx_lock);
2207 spin_lock_init(&adev->audio_endpt_idx_lock);
2208 spin_lock_init(&adev->mm_stats.lock);
2209
2210 INIT_LIST_HEAD(&adev->shadow_list);
2211 mutex_init(&adev->shadow_list_lock);
2212
2213 INIT_LIST_HEAD(&adev->ring_lru_list);
2214 spin_lock_init(&adev->ring_lru_list_lock);
2215
2216 INIT_DELAYED_WORK(&adev->late_init_work,
2217 amdgpu_device_ip_late_init_func_handler);
2218
2219 /* Registers mapping */
2220 /* TODO: block userspace mapping of io register */
2221 if (adev->asic_type >= CHIP_BONAIRE) {
2222 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
2223 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
2224 } else {
2225 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
2226 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
2227 }
2228
2229 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
2230 if (adev->rmmio == NULL) {
2231 return -ENOMEM;
2232 }
2233 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
2234 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
2235
2236 /* doorbell bar mapping */
2237 amdgpu_device_doorbell_init(adev);
2238
2239 /* io port mapping */
2240 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2241 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
2242 adev->rio_mem_size = pci_resource_len(adev->pdev, i);
2243 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
2244 break;
2245 }
2246 }
2247 if (adev->rio_mem == NULL)
2248 DRM_INFO("PCI I/O BAR is not found.\n");
2249
2250 amdgpu_device_get_pcie_info(adev);
2251
2252 /* early init functions */
2253 r = amdgpu_device_ip_early_init(adev);
2254 if (r)
2255 return r;
2256
2257 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
2258 /* this will fail for cards that aren't VGA class devices, just
2259 * ignore it */
2260 vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
2261
2262 if (amdgpu_device_is_px(ddev))
2263 runtime = true;
2264 if (!pci_is_thunderbolt_attached(adev->pdev))
2265 vga_switcheroo_register_client(adev->pdev,
2266 &amdgpu_switcheroo_ops, runtime);
2267 if (runtime)
2268 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
2269
2270 if (amdgpu_emu_mode == 1) {
2271 /* post the asic on emulation mode */
2272 emu_soc_asic_init(adev);
2273 goto fence_driver_init;
2274 }
2275
2276 /* Read BIOS */
2277 if (!amdgpu_get_bios(adev)) {
2278 r = -EINVAL;
2279 goto failed;
2280 }
2281
2282 r = amdgpu_atombios_init(adev);
2283 if (r) {
2284 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2285 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2286 goto failed;
2287 }
2288
2289 /* detect if we are with an SRIOV vbios */
2290 amdgpu_device_detect_sriov_bios(adev);
2291
2292 /* Post card if necessary */
2293 if (amdgpu_device_need_post(adev)) {
2294 if (!adev->bios) {
2295 dev_err(adev->dev, "no vBIOS found\n");
2296 r = -EINVAL;
2297 goto failed;
2298 }
2299 DRM_INFO("GPU posting now...\n");
2300 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2301 if (r) {
2302 dev_err(adev->dev, "gpu post error!\n");
2303 goto failed;
2304 }
2305 }
2306
2307 if (adev->is_atom_fw) {
2308 /* Initialize clocks */
2309 r = amdgpu_atomfirmware_get_clock_info(adev);
2310 if (r) {
2311 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
2312 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
2313 goto failed;
2314 }
2315 } else {
2316 /* Initialize clocks */
2317 r = amdgpu_atombios_get_clock_info(adev);
2318 if (r) {
2319 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
2320 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
2321 goto failed;
2322 }
2323 /* init i2c buses */
2324 if (!amdgpu_device_has_dc_support(adev))
2325 amdgpu_atombios_i2c_init(adev);
2326 }
2327
2328fence_driver_init:
2329 /* Fence driver */
2330 r = amdgpu_fence_driver_init(adev);
2331 if (r) {
2332 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
2333 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
2334 goto failed;
2335 }
2336
2337 /* init the mode config */
2338 drm_mode_config_init(adev->ddev);
2339
2340 r = amdgpu_device_ip_init(adev);
2341 if (r) {
2342 /* failed in exclusive mode due to timeout */
2343 if (amdgpu_sriov_vf(adev) &&
2344 !amdgpu_sriov_runtime(adev) &&
2345 amdgpu_virt_mmio_blocked(adev) &&
2346 !amdgpu_virt_wait_reset(adev)) {
2347 dev_err(adev->dev, "VF exclusive mode timeout\n");
2348 /* Don't send request since VF is inactive. */
2349 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
2350 adev->virt.ops = NULL;
2351 r = -EAGAIN;
2352 goto failed;
2353 }
2354 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
2355 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
2356 goto failed;
2357 }
2358
2359 adev->accel_working = true;
2360
2361 amdgpu_vm_check_compute_bug(adev);
2362
2363 /* Initialize the buffer migration limit. */
2364 if (amdgpu_moverate >= 0)
2365 max_MBps = amdgpu_moverate;
2366 else
2367 max_MBps = 8; /* Allow 8 MB/s. */
2368 /* Get a log2 for easy divisions. */
2369 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
2370
2371 r = amdgpu_ib_pool_init(adev);
2372 if (r) {
2373 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2374 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2375 goto failed;
2376 }
2377
2378 r = amdgpu_ib_ring_tests(adev);
2379 if (r)
2380 DRM_ERROR("ib ring test failed (%d).\n", r);
2381
2382 if (amdgpu_sriov_vf(adev))
2383 amdgpu_virt_init_data_exchange(adev);
2384
2385 amdgpu_fbdev_init(adev);
2386
2387 r = amdgpu_pm_sysfs_init(adev);
2388 if (r)
2389 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
2390
2391 r = amdgpu_debugfs_gem_init(adev);
2392 if (r)
2393 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
2394
2395 r = amdgpu_debugfs_regs_init(adev);
2396 if (r)
2397 DRM_ERROR("registering register debugfs failed (%d).\n", r);
2398
2399 r = amdgpu_debugfs_firmware_init(adev);
2400 if (r)
2401 DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
2402
2403 r = amdgpu_debugfs_init(adev);
2404 if (r)
2405 DRM_ERROR("Creating debugfs files failed (%d).\n", r);
2406
2407 if ((amdgpu_testing & 1)) {
2408 if (adev->accel_working)
2409 amdgpu_test_moves(adev);
2410 else
2411 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
2412 }
2413 if (amdgpu_benchmarking) {
2414 if (adev->accel_working)
2415 amdgpu_benchmark(adev, amdgpu_benchmarking);
2416 else
2417 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
2418 }
2419
2420 /* enable clockgating, etc. after ib tests, etc. since some blocks require
2421 * explicit gating rather than handling it automatically.
2422 */
2423 r = amdgpu_device_ip_late_init(adev);
2424 if (r) {
2425 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
2426 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
2427 goto failed;
2428 }
2429
2430 return 0;
2431
2432failed:
2433 amdgpu_vf_error_trans_all(adev);
2434 if (runtime)
2435 vga_switcheroo_fini_domain_pm_ops(adev->dev);
2436
2437 return r;
2438}
2439
2440/**
2441 * amdgpu_device_fini - tear down the driver
2442 *
2443 * @adev: amdgpu_device pointer
2444 *
2445 * Tear down the driver info (all asics).
2446 * Called at driver shutdown.
2447 */
2448void amdgpu_device_fini(struct amdgpu_device *adev)
2449{
2450 int r;
2451
2452 DRM_INFO("amdgpu: finishing device.\n");
2453 adev->shutdown = true;
2454 /* disable all interrupts */
2455 amdgpu_irq_disable_all(adev);
2456 if (adev->mode_info.mode_config_initialized){
2457 if (!amdgpu_device_has_dc_support(adev))
2458 drm_crtc_force_disable_all(adev->ddev);
2459 else
2460 drm_atomic_helper_shutdown(adev->ddev);
2461 }
2462 amdgpu_ib_pool_fini(adev);
2463 amdgpu_fence_driver_fini(adev);
2464 amdgpu_pm_sysfs_fini(adev);
2465 amdgpu_fbdev_fini(adev);
2466 r = amdgpu_device_ip_fini(adev);
2467 if (adev->firmware.gpu_info_fw) {
2468 release_firmware(adev->firmware.gpu_info_fw);
2469 adev->firmware.gpu_info_fw = NULL;
2470 }
2471 adev->accel_working = false;
2472 cancel_delayed_work_sync(&adev->late_init_work);
2473 /* free i2c buses */
2474 if (!amdgpu_device_has_dc_support(adev))
2475 amdgpu_i2c_fini(adev);
2476
2477 if (amdgpu_emu_mode != 1)
2478 amdgpu_atombios_fini(adev);
2479
2480 kfree(adev->bios);
2481 adev->bios = NULL;
2482 if (!pci_is_thunderbolt_attached(adev->pdev))
2483 vga_switcheroo_unregister_client(adev->pdev);
2484 if (adev->flags & AMD_IS_PX)
2485 vga_switcheroo_fini_domain_pm_ops(adev->dev);
2486 vga_client_register(adev->pdev, NULL, NULL, NULL);
2487 if (adev->rio_mem)
2488 pci_iounmap(adev->pdev, adev->rio_mem);
2489 adev->rio_mem = NULL;
2490 iounmap(adev->rmmio);
2491 adev->rmmio = NULL;
2492 amdgpu_device_doorbell_fini(adev);
2493 amdgpu_debugfs_regs_cleanup(adev);
2494}
2495
2496
2497/*
2498 * Suspend & resume.
2499 */
2500/**
2501 * amdgpu_device_suspend - initiate device suspend
2502 *
2503 * @pdev: drm dev pointer
2504 * @state: suspend state
2505 *
2506 * Puts the hw in the suspend state (all asics).
2507 * Returns 0 for success or an error on failure.
2508 * Called at driver suspend.
2509 */
2510int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
2511{
2512 struct amdgpu_device *adev;
2513 struct drm_crtc *crtc;
2514 struct drm_connector *connector;
2515 int r;
2516
2517 if (dev == NULL || dev->dev_private == NULL) {
2518 return -ENODEV;
2519 }
2520
2521 adev = dev->dev_private;
2522
2523 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2524 return 0;
2525
2526 drm_kms_helper_poll_disable(dev);
2527
2528 if (!amdgpu_device_has_dc_support(adev)) {
2529 /* turn off display hw */
2530 drm_modeset_lock_all(dev);
2531 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2532 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
2533 }
2534 drm_modeset_unlock_all(dev);
2535 }
2536
2537 amdgpu_amdkfd_suspend(adev);
2538
2539 /* unpin the front buffers and cursors */
2540 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2541 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2542 struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
2543 struct amdgpu_bo *robj;
2544
2545 if (amdgpu_crtc->cursor_bo) {
2546 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2547 r = amdgpu_bo_reserve(aobj, true);
2548 if (r == 0) {
2549 amdgpu_bo_unpin(aobj);
2550 amdgpu_bo_unreserve(aobj);
2551 }
2552 }
2553
2554 if (rfb == NULL || rfb->obj == NULL) {
2555 continue;
2556 }
2557 robj = gem_to_amdgpu_bo(rfb->obj);
2558 /* don't unpin kernel fb objects */
2559 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
2560 r = amdgpu_bo_reserve(robj, true);
2561 if (r == 0) {
2562 amdgpu_bo_unpin(robj);
2563 amdgpu_bo_unreserve(robj);
2564 }
2565 }
2566 }
2567 /* evict vram memory */
2568 amdgpu_bo_evict_vram(adev);
2569
2570 amdgpu_fence_driver_suspend(adev);
2571
2572 r = amdgpu_device_ip_suspend(adev);
2573
2574 /* evict remaining vram memory
2575 * This second call to evict vram is to evict the gart page table
2576 * using the CPU.
2577 */
2578 amdgpu_bo_evict_vram(adev);
2579
2580 pci_save_state(dev->pdev);
2581 if (suspend) {
2582 /* Shut down the device */
2583 pci_disable_device(dev->pdev);
2584 pci_set_power_state(dev->pdev, PCI_D3hot);
2585 } else {
2586 r = amdgpu_asic_reset(adev);
2587 if (r)
2588 DRM_ERROR("amdgpu asic reset failed\n");
2589 }
2590
2591 if (fbcon) {
2592 console_lock();
2593 amdgpu_fbdev_set_suspend(adev, 1);
2594 console_unlock();
2595 }
2596 return 0;
2597}
2598
2599/**
2600 * amdgpu_device_resume - initiate device resume
2601 *
2602 * @pdev: drm dev pointer
2603 *
2604 * Bring the hw back to operating state (all asics).
2605 * Returns 0 for success or an error on failure.
2606 * Called at driver resume.
2607 */
2608int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
2609{
2610 struct drm_connector *connector;
2611 struct amdgpu_device *adev = dev->dev_private;
2612 struct drm_crtc *crtc;
2613 int r = 0;
2614
2615 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2616 return 0;
2617
2618 if (fbcon)
2619 console_lock();
2620
2621 if (resume) {
2622 pci_set_power_state(dev->pdev, PCI_D0);
2623 pci_restore_state(dev->pdev);
2624 r = pci_enable_device(dev->pdev);
2625 if (r)
2626 goto unlock;
2627 }
2628
2629 /* post card */
2630 if (amdgpu_device_need_post(adev)) {
2631 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2632 if (r)
2633 DRM_ERROR("amdgpu asic init failed\n");
2634 }
2635
2636 r = amdgpu_device_ip_resume(adev);
2637 if (r) {
2638 DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r);
2639 goto unlock;
2640 }
2641 amdgpu_fence_driver_resume(adev);
2642
2643 if (resume) {
2644 r = amdgpu_ib_ring_tests(adev);
2645 if (r)
2646 DRM_ERROR("ib ring test failed (%d).\n", r);
2647 }
2648
2649 r = amdgpu_device_ip_late_init(adev);
2650 if (r)
2651 goto unlock;
2652
2653 /* pin cursors */
2654 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2655 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2656
2657 if (amdgpu_crtc->cursor_bo) {
2658 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2659 r = amdgpu_bo_reserve(aobj, true);
2660 if (r == 0) {
2661 r = amdgpu_bo_pin(aobj,
2662 AMDGPU_GEM_DOMAIN_VRAM,
2663 &amdgpu_crtc->cursor_addr);
2664 if (r != 0)
2665 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
2666 amdgpu_bo_unreserve(aobj);
2667 }
2668 }
2669 }
2670 r = amdgpu_amdkfd_resume(adev);
2671 if (r)
2672 return r;
2673
2674 /* blat the mode back in */
2675 if (fbcon) {
2676 if (!amdgpu_device_has_dc_support(adev)) {
2677 /* pre DCE11 */
2678 drm_helper_resume_force_mode(dev);
2679
2680 /* turn on display hw */
2681 drm_modeset_lock_all(dev);
2682 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2683 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
2684 }
2685 drm_modeset_unlock_all(dev);
2686 }
2687 }
2688
2689 drm_kms_helper_poll_enable(dev);
2690
2691 /*
2692 * Most of the connector probing functions try to acquire runtime pm
2693 * refs to ensure that the GPU is powered on when connector polling is
2694 * performed. Since we're calling this from a runtime PM callback,
2695 * trying to acquire rpm refs will cause us to deadlock.
2696 *
2697 * Since we're guaranteed to be holding the rpm lock, it's safe to
2698 * temporarily disable the rpm helpers so this doesn't deadlock us.
2699 */
2700#ifdef CONFIG_PM
2701 dev->dev->power.disable_depth++;
2702#endif
2703 if (!amdgpu_device_has_dc_support(adev))
2704 drm_helper_hpd_irq_event(dev);
2705 else
2706 drm_kms_helper_hotplug_event(dev);
2707#ifdef CONFIG_PM
2708 dev->dev->power.disable_depth--;
2709#endif
2710
2711 if (fbcon)
2712 amdgpu_fbdev_set_suspend(adev, 0);
2713
2714unlock:
2715 if (fbcon)
2716 console_unlock();
2717
2718 return r;
2719}
2720
2721/**
2722 * amdgpu_device_ip_check_soft_reset - did soft reset succeed
2723 *
2724 * @adev: amdgpu_device pointer
2725 *
2726 * The list of all the hardware IPs that make up the asic is walked and
2727 * the check_soft_reset callbacks are run. check_soft_reset determines
2728 * if the asic is still hung or not.
2729 * Returns true if any of the IPs are still in a hung state, false if not.
2730 */
2731static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
2732{
2733 int i;
2734 bool asic_hang = false;
2735
2736 if (amdgpu_sriov_vf(adev))
2737 return true;
2738
2739 for (i = 0; i < adev->num_ip_blocks; i++) {
2740 if (!adev->ip_blocks[i].status.valid)
2741 continue;
2742 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
2743 adev->ip_blocks[i].status.hang =
2744 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
2745 if (adev->ip_blocks[i].status.hang) {
2746 DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
2747 asic_hang = true;
2748 }
2749 }
2750 return asic_hang;
2751}
2752
2753/**
2754 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
2755 *
2756 * @adev: amdgpu_device pointer
2757 *
2758 * The list of all the hardware IPs that make up the asic is walked and the
2759 * pre_soft_reset callbacks are run if the block is hung. pre_soft_reset
2760 * handles any IP specific hardware or software state changes that are
2761 * necessary for a soft reset to succeed.
2762 * Returns 0 on success, negative error code on failure.
2763 */
2764static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
2765{
2766 int i, r = 0;
2767
2768 for (i = 0; i < adev->num_ip_blocks; i++) {
2769 if (!adev->ip_blocks[i].status.valid)
2770 continue;
2771 if (adev->ip_blocks[i].status.hang &&
2772 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
2773 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
2774 if (r)
2775 return r;
2776 }
2777 }
2778
2779 return 0;
2780}
2781
2782/**
2783 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
2784 *
2785 * @adev: amdgpu_device pointer
2786 *
2787 * Some hardware IPs cannot be soft reset. If they are hung, a full gpu
2788 * reset is necessary to recover.
2789 * Returns true if a full asic reset is required, false if not.
2790 */
2791static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
2792{
2793 int i;
2794
2795 for (i = 0; i < adev->num_ip_blocks; i++) {
2796 if (!adev->ip_blocks[i].status.valid)
2797 continue;
2798 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
2799 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
2800 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
2801 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
2802 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
2803 if (adev->ip_blocks[i].status.hang) {
2804 DRM_INFO("Some block need full reset!\n");
2805 return true;
2806 }
2807 }
2808 }
2809 return false;
2810}
2811
2812/**
2813 * amdgpu_device_ip_soft_reset - do a soft reset
2814 *
2815 * @adev: amdgpu_device pointer
2816 *
2817 * The list of all the hardware IPs that make up the asic is walked and the
2818 * soft_reset callbacks are run if the block is hung. soft_reset handles any
2819 * IP specific hardware or software state changes that are necessary to soft
2820 * reset the IP.
2821 * Returns 0 on success, negative error code on failure.
2822 */
2823static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
2824{
2825 int i, r = 0;
2826
2827 for (i = 0; i < adev->num_ip_blocks; i++) {
2828 if (!adev->ip_blocks[i].status.valid)
2829 continue;
2830 if (adev->ip_blocks[i].status.hang &&
2831 adev->ip_blocks[i].version->funcs->soft_reset) {
2832 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
2833 if (r)
2834 return r;
2835 }
2836 }
2837
2838 return 0;
2839}
2840
2841/**
2842 * amdgpu_device_ip_post_soft_reset - clean up from soft reset
2843 *
2844 * @adev: amdgpu_device pointer
2845 *
2846 * The list of all the hardware IPs that make up the asic is walked and the
2847 * post_soft_reset callbacks are run if the asic was hung. post_soft_reset
2848 * handles any IP specific hardware or software state changes that are
2849 * necessary after the IP has been soft reset.
2850 * Returns 0 on success, negative error code on failure.
2851 */
2852static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
2853{
2854 int i, r = 0;
2855
2856 for (i = 0; i < adev->num_ip_blocks; i++) {
2857 if (!adev->ip_blocks[i].status.valid)
2858 continue;
2859 if (adev->ip_blocks[i].status.hang &&
2860 adev->ip_blocks[i].version->funcs->post_soft_reset)
2861 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
2862 if (r)
2863 return r;
2864 }
2865
2866 return 0;
2867}
2868
2869/**
2870 * amdgpu_device_recover_vram_from_shadow - restore shadowed VRAM buffers
2871 *
2872 * @adev: amdgpu_device pointer
2873 * @ring: amdgpu_ring for the engine handling the buffer operations
2874 * @bo: amdgpu_bo buffer whose shadow is being restored
2875 * @fence: dma_fence associated with the operation
2876 *
2877 * Restores the VRAM buffer contents from the shadow in GTT. Used to
2878 * restore things like GPUVM page tables after a GPU reset where
2879 * the contents of VRAM might be lost.
2880 * Returns 0 on success, negative error code on failure.
2881 */
2882static int amdgpu_device_recover_vram_from_shadow(struct amdgpu_device *adev,
2883 struct amdgpu_ring *ring,
2884 struct amdgpu_bo *bo,
2885 struct dma_fence **fence)
2886{
2887 uint32_t domain;
2888 int r;
2889
2890 if (!bo->shadow)
2891 return 0;
2892
2893 r = amdgpu_bo_reserve(bo, true);
2894 if (r)
2895 return r;
2896 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
2897 /* if bo has been evicted, then no need to recover */
2898 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
2899 r = amdgpu_bo_validate(bo->shadow);
2900 if (r) {
2901 DRM_ERROR("bo validate failed!\n");
2902 goto err;
2903 }
2904
2905 r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
2906 NULL, fence, true);
2907 if (r) {
2908 DRM_ERROR("recover page table failed!\n");
2909 goto err;
2910 }
2911 }
2912err:
2913 amdgpu_bo_unreserve(bo);
2914 return r;
2915}
2916
2917/**
2918 * amdgpu_device_handle_vram_lost - Handle the loss of VRAM contents
2919 *
2920 * @adev: amdgpu_device pointer
2921 *
2922 * Restores the contents of VRAM buffers from the shadows in GTT. Used to
2923 * restore things like GPUVM page tables after a GPU reset where
2924 * the contents of VRAM might be lost.
2925 * Returns 0 on success, 1 on failure.
2926 */
2927static int amdgpu_device_handle_vram_lost(struct amdgpu_device *adev)
2928{
2929 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2930 struct amdgpu_bo *bo, *tmp;
2931 struct dma_fence *fence = NULL, *next = NULL;
2932 long r = 1;
2933 int i = 0;
2934 long tmo;
2935
2936 if (amdgpu_sriov_runtime(adev))
2937 tmo = msecs_to_jiffies(amdgpu_lockup_timeout);
2938 else
2939 tmo = msecs_to_jiffies(100);
2940
2941 DRM_INFO("recover vram bo from shadow start\n");
2942 mutex_lock(&adev->shadow_list_lock);
2943 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
2944 next = NULL;
2945 amdgpu_device_recover_vram_from_shadow(adev, ring, bo, &next);
2946 if (fence) {
2947 r = dma_fence_wait_timeout(fence, false, tmo);
2948 if (r == 0)
2949 pr_err("wait fence %p[%d] timeout\n", fence, i);
2950 else if (r < 0)
2951 pr_err("wait fence %p[%d] interrupted\n", fence, i);
2952 if (r < 1) {
2953 dma_fence_put(fence);
2954 fence = next;
2955 break;
2956 }
2957 i++;
2958 }
2959
2960 dma_fence_put(fence);
2961 fence = next;
2962 }
2963 mutex_unlock(&adev->shadow_list_lock);
2964
2965 if (fence) {
2966 r = dma_fence_wait_timeout(fence, false, tmo);
2967 if (r == 0)
2968 pr_err("wait fence %p[%d] timeout\n", fence, i);
2969 else if (r < 0)
2970 pr_err("wait fence %p[%d] interrupted\n", fence, i);
2971
2972 }
2973 dma_fence_put(fence);
2974
2975 if (r > 0)
2976 DRM_INFO("recover vram bo from shadow done\n");
2977 else
2978 DRM_ERROR("recover vram bo from shadow failed\n");
2979
2980 return (r > 0) ? 0 : 1;
2981}
2982
2983/**
2984 * amdgpu_device_reset - reset ASIC/GPU for bare-metal or passthrough
2985 *
2986 * @adev: amdgpu device pointer
2987 *
2988 * attempt to do soft-reset or full-reset and reinitialize Asic
2989 * return 0 means successed otherwise failed
2990 */
2991static int amdgpu_device_reset(struct amdgpu_device *adev)
2992{
2993 bool need_full_reset, vram_lost = 0;
2994 int r;
2995
2996 need_full_reset = amdgpu_device_ip_need_full_reset(adev);
2997
2998 if (!need_full_reset) {
2999 amdgpu_device_ip_pre_soft_reset(adev);
3000 r = amdgpu_device_ip_soft_reset(adev);
3001 amdgpu_device_ip_post_soft_reset(adev);
3002 if (r || amdgpu_device_ip_check_soft_reset(adev)) {
3003 DRM_INFO("soft reset failed, will fallback to full reset!\n");
3004 need_full_reset = true;
3005 }
3006 }
3007
3008 if (need_full_reset) {
3009 r = amdgpu_device_ip_suspend(adev);
3010
3011retry:
3012 r = amdgpu_asic_reset(adev);
3013 /* post card */
3014 amdgpu_atom_asic_init(adev->mode_info.atom_context);
3015
3016 if (!r) {
3017 dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
3018 r = amdgpu_device_ip_resume_phase1(adev);
3019 if (r)
3020 goto out;
3021
3022 vram_lost = amdgpu_device_check_vram_lost(adev);
3023 if (vram_lost) {
3024 DRM_ERROR("VRAM is lost!\n");
3025 atomic_inc(&adev->vram_lost_counter);
3026 }
3027
3028 r = amdgpu_gtt_mgr_recover(
3029 &adev->mman.bdev.man[TTM_PL_TT]);
3030 if (r)
3031 goto out;
3032
3033 r = amdgpu_device_ip_resume_phase2(adev);
3034 if (r)
3035 goto out;
3036
3037 if (vram_lost)
3038 amdgpu_device_fill_reset_magic(adev);
3039 }
3040 }
3041
3042out:
3043 if (!r) {
3044 amdgpu_irq_gpu_reset_resume_helper(adev);
3045 r = amdgpu_ib_ring_tests(adev);
3046 if (r) {
3047 dev_err(adev->dev, "ib ring test failed (%d).\n", r);
3048 r = amdgpu_device_ip_suspend(adev);
3049 need_full_reset = true;
3050 goto retry;
3051 }
3052 }
3053
3054 if (!r && ((need_full_reset && !(adev->flags & AMD_IS_APU)) || vram_lost))
3055 r = amdgpu_device_handle_vram_lost(adev);
3056
3057 return r;
3058}
3059
3060/**
3061 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
3062 *
3063 * @adev: amdgpu device pointer
3064 *
3065 * do VF FLR and reinitialize Asic
3066 * return 0 means successed otherwise failed
3067 */
3068static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
3069 bool from_hypervisor)
3070{
3071 int r;
3072
3073 if (from_hypervisor)
3074 r = amdgpu_virt_request_full_gpu(adev, true);
3075 else
3076 r = amdgpu_virt_reset_gpu(adev);
3077 if (r)
3078 return r;
3079
3080 /* Resume IP prior to SMC */
3081 r = amdgpu_device_ip_reinit_early_sriov(adev);
3082 if (r)
3083 goto error;
3084
3085 /* we need recover gart prior to run SMC/CP/SDMA resume */
3086 amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]);
3087
3088 /* now we are okay to resume SMC/CP/SDMA */
3089 r = amdgpu_device_ip_reinit_late_sriov(adev);
3090 amdgpu_virt_release_full_gpu(adev, true);
3091 if (r)
3092 goto error;
3093
3094 amdgpu_irq_gpu_reset_resume_helper(adev);
3095 r = amdgpu_ib_ring_tests(adev);
3096
3097 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
3098 atomic_inc(&adev->vram_lost_counter);
3099 r = amdgpu_device_handle_vram_lost(adev);
3100 }
3101
3102error:
3103
3104 return r;
3105}
3106
3107/**
3108 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
3109 *
3110 * @adev: amdgpu device pointer
3111 * @job: which job trigger hang
3112 * @force forces reset regardless of amdgpu_gpu_recovery
3113 *
3114 * Attempt to reset the GPU if it has hung (all asics).
3115 * Returns 0 for success or an error on failure.
3116 */
3117int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
3118 struct amdgpu_job *job, bool force)
3119{
3120 struct drm_atomic_state *state = NULL;
3121 int i, r, resched;
3122
3123 if (!force && !amdgpu_device_ip_check_soft_reset(adev)) {
3124 DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
3125 return 0;
3126 }
3127
3128 if (!force && (amdgpu_gpu_recovery == 0 ||
3129 (amdgpu_gpu_recovery == -1 && !amdgpu_sriov_vf(adev)))) {
3130 DRM_INFO("GPU recovery disabled.\n");
3131 return 0;
3132 }
3133
3134 dev_info(adev->dev, "GPU reset begin!\n");
3135
3136 mutex_lock(&adev->lock_reset);
3137 atomic_inc(&adev->gpu_reset_counter);
3138 adev->in_gpu_reset = 1;
3139
3140 /* block TTM */
3141 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
3142
3143 /* store modesetting */
3144 if (amdgpu_device_has_dc_support(adev))
3145 state = drm_atomic_helper_suspend(adev->ddev);
3146
3147 /* block all schedulers and reset given job's ring */
3148 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
3149 struct amdgpu_ring *ring = adev->rings[i];
3150
3151 if (!ring || !ring->sched.thread)
3152 continue;
3153
3154 kthread_park(ring->sched.thread);
3155
3156 if (job && job->ring->idx != i)
3157 continue;
3158
3159 drm_sched_hw_job_reset(&ring->sched, &job->base);
3160
3161 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
3162 amdgpu_fence_driver_force_completion(ring);
3163 }
3164
3165 if (amdgpu_sriov_vf(adev))
3166 r = amdgpu_device_reset_sriov(adev, job ? false : true);
3167 else
3168 r = amdgpu_device_reset(adev);
3169
3170 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
3171 struct amdgpu_ring *ring = adev->rings[i];
3172
3173 if (!ring || !ring->sched.thread)
3174 continue;
3175
3176 /* only need recovery sched of the given job's ring
3177 * or all rings (in the case @job is NULL)
3178 * after above amdgpu_reset accomplished
3179 */
3180 if ((!job || job->ring->idx == i) && !r)
3181 drm_sched_job_recovery(&ring->sched);
3182
3183 kthread_unpark(ring->sched.thread);
3184 }
3185
3186 if (amdgpu_device_has_dc_support(adev)) {
3187 if (drm_atomic_helper_resume(adev->ddev, state))
3188 dev_info(adev->dev, "drm resume failed:%d\n", r);
3189 } else {
3190 drm_helper_resume_force_mode(adev->ddev);
3191 }
3192
3193 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
3194
3195 if (r) {
3196 /* bad news, how to tell it to userspace ? */
3197 dev_info(adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
3198 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
3199 } else {
3200 dev_info(adev->dev, "GPU reset(%d) successed!\n",atomic_read(&adev->gpu_reset_counter));
3201 }
3202
3203 amdgpu_vf_error_trans_all(adev);
3204 adev->in_gpu_reset = 0;
3205 mutex_unlock(&adev->lock_reset);
3206 return r;
3207}
3208
3209/**
3210 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
3211 *
3212 * @adev: amdgpu_device pointer
3213 *
3214 * Fetchs and stores in the driver the PCIE capabilities (gen speed
3215 * and lanes) of the slot the device is in. Handles APUs and
3216 * virtualized environments where PCIE config space may not be available.
3217 */
3218static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
3219{
3220 u32 mask;
3221 int ret;
3222
3223 if (amdgpu_pcie_gen_cap)
3224 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
3225
3226 if (amdgpu_pcie_lane_cap)
3227 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
3228
3229 /* covers APUs as well */
3230 if (pci_is_root_bus(adev->pdev->bus)) {
3231 if (adev->pm.pcie_gen_mask == 0)
3232 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
3233 if (adev->pm.pcie_mlw_mask == 0)
3234 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
3235 return;
3236 }
3237
3238 if (adev->pm.pcie_gen_mask == 0) {
3239 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
3240 if (!ret) {
3241 adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3242 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
3243 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
3244
3245 if (mask & DRM_PCIE_SPEED_25)
3246 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
3247 if (mask & DRM_PCIE_SPEED_50)
3248 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
3249 if (mask & DRM_PCIE_SPEED_80)
3250 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
3251 } else {
3252 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
3253 }
3254 }
3255 if (adev->pm.pcie_mlw_mask == 0) {
3256 ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
3257 if (!ret) {
3258 switch (mask) {
3259 case 32:
3260 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
3261 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
3262 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3263 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3264 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3265 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3266 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3267 break;
3268 case 16:
3269 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
3270 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3271 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3272 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3273 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3274 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3275 break;
3276 case 12:
3277 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3278 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3279 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3280 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3281 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3282 break;
3283 case 8:
3284 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3285 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3286 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3287 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3288 break;
3289 case 4:
3290 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3291 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3292 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3293 break;
3294 case 2:
3295 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3296 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3297 break;
3298 case 1:
3299 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
3300 break;
3301 default:
3302 break;
3303 }
3304 } else {
3305 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
3306 }
3307 }
3308}
3309
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/power_supply.h>
29#include <linux/kthread.h>
30#include <linux/module.h>
31#include <linux/console.h>
32#include <linux/slab.h>
33
34#include <drm/drm_atomic_helper.h>
35#include <drm/drm_probe_helper.h>
36#include <drm/amdgpu_drm.h>
37#include <linux/vgaarb.h>
38#include <linux/vga_switcheroo.h>
39#include <linux/efi.h>
40#include "amdgpu.h"
41#include "amdgpu_trace.h"
42#include "amdgpu_i2c.h"
43#include "atom.h"
44#include "amdgpu_atombios.h"
45#include "amdgpu_atomfirmware.h"
46#include "amd_pcie.h"
47#ifdef CONFIG_DRM_AMDGPU_SI
48#include "si.h"
49#endif
50#ifdef CONFIG_DRM_AMDGPU_CIK
51#include "cik.h"
52#endif
53#include "vi.h"
54#include "soc15.h"
55#include "nv.h"
56#include "bif/bif_4_1_d.h"
57#include <linux/pci.h>
58#include <linux/firmware.h>
59#include "amdgpu_vf_error.h"
60
61#include "amdgpu_amdkfd.h"
62#include "amdgpu_pm.h"
63
64#include "amdgpu_xgmi.h"
65#include "amdgpu_ras.h"
66#include "amdgpu_pmu.h"
67#include "amdgpu_fru_eeprom.h"
68
69#include <linux/suspend.h>
70#include <drm/task_barrier.h>
71#include <linux/pm_runtime.h>
72
73MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
74MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
75MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
76MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
77MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
78MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
79MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin");
80MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
81MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
82MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
83
84#define AMDGPU_RESUME_MS 2000
85
86const char *amdgpu_asic_name[] = {
87 "TAHITI",
88 "PITCAIRN",
89 "VERDE",
90 "OLAND",
91 "HAINAN",
92 "BONAIRE",
93 "KAVERI",
94 "KABINI",
95 "HAWAII",
96 "MULLINS",
97 "TOPAZ",
98 "TONGA",
99 "FIJI",
100 "CARRIZO",
101 "STONEY",
102 "POLARIS10",
103 "POLARIS11",
104 "POLARIS12",
105 "VEGAM",
106 "VEGA10",
107 "VEGA12",
108 "VEGA20",
109 "RAVEN",
110 "ARCTURUS",
111 "RENOIR",
112 "NAVI10",
113 "NAVI14",
114 "NAVI12",
115 "SIENNA_CICHLID",
116 "NAVY_FLOUNDER",
117 "LAST",
118};
119
120/**
121 * DOC: pcie_replay_count
122 *
123 * The amdgpu driver provides a sysfs API for reporting the total number
124 * of PCIe replays (NAKs)
125 * The file pcie_replay_count is used for this and returns the total
126 * number of replays as a sum of the NAKs generated and NAKs received
127 */
128
129static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
130 struct device_attribute *attr, char *buf)
131{
132 struct drm_device *ddev = dev_get_drvdata(dev);
133 struct amdgpu_device *adev = ddev->dev_private;
134 uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
135
136 return snprintf(buf, PAGE_SIZE, "%llu\n", cnt);
137}
138
139static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
140 amdgpu_device_get_pcie_replay_count, NULL);
141
142static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
143
144/**
145 * DOC: product_name
146 *
147 * The amdgpu driver provides a sysfs API for reporting the product name
148 * for the device
149 * The file serial_number is used for this and returns the product name
150 * as returned from the FRU.
151 * NOTE: This is only available for certain server cards
152 */
153
154static ssize_t amdgpu_device_get_product_name(struct device *dev,
155 struct device_attribute *attr, char *buf)
156{
157 struct drm_device *ddev = dev_get_drvdata(dev);
158 struct amdgpu_device *adev = ddev->dev_private;
159
160 return snprintf(buf, PAGE_SIZE, "%s\n", adev->product_name);
161}
162
163static DEVICE_ATTR(product_name, S_IRUGO,
164 amdgpu_device_get_product_name, NULL);
165
166/**
167 * DOC: product_number
168 *
169 * The amdgpu driver provides a sysfs API for reporting the part number
170 * for the device
171 * The file serial_number is used for this and returns the part number
172 * as returned from the FRU.
173 * NOTE: This is only available for certain server cards
174 */
175
176static ssize_t amdgpu_device_get_product_number(struct device *dev,
177 struct device_attribute *attr, char *buf)
178{
179 struct drm_device *ddev = dev_get_drvdata(dev);
180 struct amdgpu_device *adev = ddev->dev_private;
181
182 return snprintf(buf, PAGE_SIZE, "%s\n", adev->product_number);
183}
184
185static DEVICE_ATTR(product_number, S_IRUGO,
186 amdgpu_device_get_product_number, NULL);
187
188/**
189 * DOC: serial_number
190 *
191 * The amdgpu driver provides a sysfs API for reporting the serial number
192 * for the device
193 * The file serial_number is used for this and returns the serial number
194 * as returned from the FRU.
195 * NOTE: This is only available for certain server cards
196 */
197
198static ssize_t amdgpu_device_get_serial_number(struct device *dev,
199 struct device_attribute *attr, char *buf)
200{
201 struct drm_device *ddev = dev_get_drvdata(dev);
202 struct amdgpu_device *adev = ddev->dev_private;
203
204 return snprintf(buf, PAGE_SIZE, "%s\n", adev->serial);
205}
206
207static DEVICE_ATTR(serial_number, S_IRUGO,
208 amdgpu_device_get_serial_number, NULL);
209
210/**
211 * amdgpu_device_supports_boco - Is the device a dGPU with HG/PX power control
212 *
213 * @dev: drm_device pointer
214 *
215 * Returns true if the device is a dGPU with HG/PX power control,
216 * otherwise return false.
217 */
218bool amdgpu_device_supports_boco(struct drm_device *dev)
219{
220 struct amdgpu_device *adev = dev->dev_private;
221
222 if (adev->flags & AMD_IS_PX)
223 return true;
224 return false;
225}
226
227/**
228 * amdgpu_device_supports_baco - Does the device support BACO
229 *
230 * @dev: drm_device pointer
231 *
232 * Returns true if the device supporte BACO,
233 * otherwise return false.
234 */
235bool amdgpu_device_supports_baco(struct drm_device *dev)
236{
237 struct amdgpu_device *adev = dev->dev_private;
238
239 return amdgpu_asic_supports_baco(adev);
240}
241
242/**
243 * VRAM access helper functions.
244 *
245 * amdgpu_device_vram_access - read/write a buffer in vram
246 *
247 * @adev: amdgpu_device pointer
248 * @pos: offset of the buffer in vram
249 * @buf: virtual address of the buffer in system memory
250 * @size: read/write size, sizeof(@buf) must > @size
251 * @write: true - write to vram, otherwise - read from vram
252 */
253void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
254 uint32_t *buf, size_t size, bool write)
255{
256 unsigned long flags;
257 uint32_t hi = ~0;
258 uint64_t last;
259
260
261#ifdef CONFIG_64BIT
262 last = min(pos + size, adev->gmc.visible_vram_size);
263 if (last > pos) {
264 void __iomem *addr = adev->mman.aper_base_kaddr + pos;
265 size_t count = last - pos;
266
267 if (write) {
268 memcpy_toio(addr, buf, count);
269 mb();
270 amdgpu_asic_flush_hdp(adev, NULL);
271 } else {
272 amdgpu_asic_invalidate_hdp(adev, NULL);
273 mb();
274 memcpy_fromio(buf, addr, count);
275 }
276
277 if (count == size)
278 return;
279
280 pos += count;
281 buf += count / 4;
282 size -= count;
283 }
284#endif
285
286 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
287 for (last = pos + size; pos < last; pos += 4) {
288 uint32_t tmp = pos >> 31;
289
290 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
291 if (tmp != hi) {
292 WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
293 hi = tmp;
294 }
295 if (write)
296 WREG32_NO_KIQ(mmMM_DATA, *buf++);
297 else
298 *buf++ = RREG32_NO_KIQ(mmMM_DATA);
299 }
300 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
301}
302
303/*
304 * MMIO register access helper functions.
305 */
306/**
307 * amdgpu_mm_rreg - read a memory mapped IO register
308 *
309 * @adev: amdgpu_device pointer
310 * @reg: dword aligned register offset
311 * @acc_flags: access flags which require special behavior
312 *
313 * Returns the 32 bit value from the offset specified.
314 */
315uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
316 uint32_t acc_flags)
317{
318 uint32_t ret;
319
320 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
321 return amdgpu_kiq_rreg(adev, reg);
322
323 if ((reg * 4) < adev->rmmio_size)
324 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
325 else {
326 unsigned long flags;
327
328 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
329 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
330 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
331 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
332 }
333 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
334 return ret;
335}
336
337/*
338 * MMIO register read with bytes helper functions
339 * @offset:bytes offset from MMIO start
340 *
341*/
342
343/**
344 * amdgpu_mm_rreg8 - read a memory mapped IO register
345 *
346 * @adev: amdgpu_device pointer
347 * @offset: byte aligned register offset
348 *
349 * Returns the 8 bit value from the offset specified.
350 */
351uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) {
352 if (offset < adev->rmmio_size)
353 return (readb(adev->rmmio + offset));
354 BUG();
355}
356
357/*
358 * MMIO register write with bytes helper functions
359 * @offset:bytes offset from MMIO start
360 * @value: the value want to be written to the register
361 *
362*/
363/**
364 * amdgpu_mm_wreg8 - read a memory mapped IO register
365 *
366 * @adev: amdgpu_device pointer
367 * @offset: byte aligned register offset
368 * @value: 8 bit value to write
369 *
370 * Writes the value specified to the offset specified.
371 */
372void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) {
373 if (offset < adev->rmmio_size)
374 writeb(value, adev->rmmio + offset);
375 else
376 BUG();
377}
378
379void static inline amdgpu_mm_wreg_mmio(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t acc_flags)
380{
381 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
382
383 if ((reg * 4) < adev->rmmio_size)
384 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
385 else {
386 unsigned long flags;
387
388 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
389 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
390 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
391 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
392 }
393}
394
395/**
396 * amdgpu_mm_wreg - write to a memory mapped IO register
397 *
398 * @adev: amdgpu_device pointer
399 * @reg: dword aligned register offset
400 * @v: 32 bit value to write to the register
401 * @acc_flags: access flags which require special behavior
402 *
403 * Writes the value specified to the offset specified.
404 */
405void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
406 uint32_t acc_flags)
407{
408 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
409 return amdgpu_kiq_wreg(adev, reg, v);
410
411 amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags);
412}
413
414/*
415 * amdgpu_mm_wreg_mmio_rlc - write register either with mmio or with RLC path if in range
416 *
417 * this function is invoked only the debugfs register access
418 * */
419void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
420 uint32_t acc_flags)
421{
422 if (amdgpu_sriov_fullaccess(adev) &&
423 adev->gfx.rlc.funcs &&
424 adev->gfx.rlc.funcs->is_rlcg_access_range) {
425
426 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
427 return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v);
428 }
429
430 amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags);
431}
432
433/**
434 * amdgpu_io_rreg - read an IO register
435 *
436 * @adev: amdgpu_device pointer
437 * @reg: dword aligned register offset
438 *
439 * Returns the 32 bit value from the offset specified.
440 */
441u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
442{
443 if ((reg * 4) < adev->rio_mem_size)
444 return ioread32(adev->rio_mem + (reg * 4));
445 else {
446 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
447 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
448 }
449}
450
451/**
452 * amdgpu_io_wreg - write to an IO register
453 *
454 * @adev: amdgpu_device pointer
455 * @reg: dword aligned register offset
456 * @v: 32 bit value to write to the register
457 *
458 * Writes the value specified to the offset specified.
459 */
460void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
461{
462 if ((reg * 4) < adev->rio_mem_size)
463 iowrite32(v, adev->rio_mem + (reg * 4));
464 else {
465 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
466 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
467 }
468}
469
470/**
471 * amdgpu_mm_rdoorbell - read a doorbell dword
472 *
473 * @adev: amdgpu_device pointer
474 * @index: doorbell index
475 *
476 * Returns the value in the doorbell aperture at the
477 * requested doorbell index (CIK).
478 */
479u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
480{
481 if (index < adev->doorbell.num_doorbells) {
482 return readl(adev->doorbell.ptr + index);
483 } else {
484 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
485 return 0;
486 }
487}
488
489/**
490 * amdgpu_mm_wdoorbell - write a doorbell dword
491 *
492 * @adev: amdgpu_device pointer
493 * @index: doorbell index
494 * @v: value to write
495 *
496 * Writes @v to the doorbell aperture at the
497 * requested doorbell index (CIK).
498 */
499void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
500{
501 if (index < adev->doorbell.num_doorbells) {
502 writel(v, adev->doorbell.ptr + index);
503 } else {
504 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
505 }
506}
507
508/**
509 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
510 *
511 * @adev: amdgpu_device pointer
512 * @index: doorbell index
513 *
514 * Returns the value in the doorbell aperture at the
515 * requested doorbell index (VEGA10+).
516 */
517u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
518{
519 if (index < adev->doorbell.num_doorbells) {
520 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
521 } else {
522 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
523 return 0;
524 }
525}
526
527/**
528 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
529 *
530 * @adev: amdgpu_device pointer
531 * @index: doorbell index
532 * @v: value to write
533 *
534 * Writes @v to the doorbell aperture at the
535 * requested doorbell index (VEGA10+).
536 */
537void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
538{
539 if (index < adev->doorbell.num_doorbells) {
540 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
541 } else {
542 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
543 }
544}
545
546/**
547 * amdgpu_invalid_rreg - dummy reg read function
548 *
549 * @adev: amdgpu device pointer
550 * @reg: offset of register
551 *
552 * Dummy register read function. Used for register blocks
553 * that certain asics don't have (all asics).
554 * Returns the value in the register.
555 */
556static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
557{
558 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
559 BUG();
560 return 0;
561}
562
563/**
564 * amdgpu_invalid_wreg - dummy reg write function
565 *
566 * @adev: amdgpu device pointer
567 * @reg: offset of register
568 * @v: value to write to the register
569 *
570 * Dummy register read function. Used for register blocks
571 * that certain asics don't have (all asics).
572 */
573static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
574{
575 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
576 reg, v);
577 BUG();
578}
579
580/**
581 * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
582 *
583 * @adev: amdgpu device pointer
584 * @reg: offset of register
585 *
586 * Dummy register read function. Used for register blocks
587 * that certain asics don't have (all asics).
588 * Returns the value in the register.
589 */
590static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
591{
592 DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
593 BUG();
594 return 0;
595}
596
597/**
598 * amdgpu_invalid_wreg64 - dummy reg write function
599 *
600 * @adev: amdgpu device pointer
601 * @reg: offset of register
602 * @v: value to write to the register
603 *
604 * Dummy register read function. Used for register blocks
605 * that certain asics don't have (all asics).
606 */
607static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
608{
609 DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
610 reg, v);
611 BUG();
612}
613
614/**
615 * amdgpu_block_invalid_rreg - dummy reg read function
616 *
617 * @adev: amdgpu device pointer
618 * @block: offset of instance
619 * @reg: offset of register
620 *
621 * Dummy register read function. Used for register blocks
622 * that certain asics don't have (all asics).
623 * Returns the value in the register.
624 */
625static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
626 uint32_t block, uint32_t reg)
627{
628 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
629 reg, block);
630 BUG();
631 return 0;
632}
633
634/**
635 * amdgpu_block_invalid_wreg - dummy reg write function
636 *
637 * @adev: amdgpu device pointer
638 * @block: offset of instance
639 * @reg: offset of register
640 * @v: value to write to the register
641 *
642 * Dummy register read function. Used for register blocks
643 * that certain asics don't have (all asics).
644 */
645static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
646 uint32_t block,
647 uint32_t reg, uint32_t v)
648{
649 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
650 reg, block, v);
651 BUG();
652}
653
654/**
655 * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
656 *
657 * @adev: amdgpu device pointer
658 *
659 * Allocates a scratch page of VRAM for use by various things in the
660 * driver.
661 */
662static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
663{
664 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
665 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
666 &adev->vram_scratch.robj,
667 &adev->vram_scratch.gpu_addr,
668 (void **)&adev->vram_scratch.ptr);
669}
670
671/**
672 * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
673 *
674 * @adev: amdgpu device pointer
675 *
676 * Frees the VRAM scratch page.
677 */
678static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
679{
680 amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
681}
682
683/**
684 * amdgpu_device_program_register_sequence - program an array of registers.
685 *
686 * @adev: amdgpu_device pointer
687 * @registers: pointer to the register array
688 * @array_size: size of the register array
689 *
690 * Programs an array or registers with and and or masks.
691 * This is a helper for setting golden registers.
692 */
693void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
694 const u32 *registers,
695 const u32 array_size)
696{
697 u32 tmp, reg, and_mask, or_mask;
698 int i;
699
700 if (array_size % 3)
701 return;
702
703 for (i = 0; i < array_size; i +=3) {
704 reg = registers[i + 0];
705 and_mask = registers[i + 1];
706 or_mask = registers[i + 2];
707
708 if (and_mask == 0xffffffff) {
709 tmp = or_mask;
710 } else {
711 tmp = RREG32(reg);
712 tmp &= ~and_mask;
713 if (adev->family >= AMDGPU_FAMILY_AI)
714 tmp |= (or_mask & and_mask);
715 else
716 tmp |= or_mask;
717 }
718 WREG32(reg, tmp);
719 }
720}
721
722/**
723 * amdgpu_device_pci_config_reset - reset the GPU
724 *
725 * @adev: amdgpu_device pointer
726 *
727 * Resets the GPU using the pci config reset sequence.
728 * Only applicable to asics prior to vega10.
729 */
730void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
731{
732 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
733}
734
735/*
736 * GPU doorbell aperture helpers function.
737 */
738/**
739 * amdgpu_device_doorbell_init - Init doorbell driver information.
740 *
741 * @adev: amdgpu_device pointer
742 *
743 * Init doorbell driver information (CIK)
744 * Returns 0 on success, error on failure.
745 */
746static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
747{
748
749 /* No doorbell on SI hardware generation */
750 if (adev->asic_type < CHIP_BONAIRE) {
751 adev->doorbell.base = 0;
752 adev->doorbell.size = 0;
753 adev->doorbell.num_doorbells = 0;
754 adev->doorbell.ptr = NULL;
755 return 0;
756 }
757
758 if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
759 return -EINVAL;
760
761 amdgpu_asic_init_doorbell_index(adev);
762
763 /* doorbell bar mapping */
764 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
765 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
766
767 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
768 adev->doorbell_index.max_assignment+1);
769 if (adev->doorbell.num_doorbells == 0)
770 return -EINVAL;
771
772 /* For Vega, reserve and map two pages on doorbell BAR since SDMA
773 * paging queue doorbell use the second page. The
774 * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
775 * doorbells are in the first page. So with paging queue enabled,
776 * the max num_doorbells should + 1 page (0x400 in dword)
777 */
778 if (adev->asic_type >= CHIP_VEGA10)
779 adev->doorbell.num_doorbells += 0x400;
780
781 adev->doorbell.ptr = ioremap(adev->doorbell.base,
782 adev->doorbell.num_doorbells *
783 sizeof(u32));
784 if (adev->doorbell.ptr == NULL)
785 return -ENOMEM;
786
787 return 0;
788}
789
790/**
791 * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
792 *
793 * @adev: amdgpu_device pointer
794 *
795 * Tear down doorbell driver information (CIK)
796 */
797static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
798{
799 iounmap(adev->doorbell.ptr);
800 adev->doorbell.ptr = NULL;
801}
802
803
804
805/*
806 * amdgpu_device_wb_*()
807 * Writeback is the method by which the GPU updates special pages in memory
808 * with the status of certain GPU events (fences, ring pointers,etc.).
809 */
810
811/**
812 * amdgpu_device_wb_fini - Disable Writeback and free memory
813 *
814 * @adev: amdgpu_device pointer
815 *
816 * Disables Writeback and frees the Writeback memory (all asics).
817 * Used at driver shutdown.
818 */
819static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
820{
821 if (adev->wb.wb_obj) {
822 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
823 &adev->wb.gpu_addr,
824 (void **)&adev->wb.wb);
825 adev->wb.wb_obj = NULL;
826 }
827}
828
829/**
830 * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
831 *
832 * @adev: amdgpu_device pointer
833 *
834 * Initializes writeback and allocates writeback memory (all asics).
835 * Used at driver startup.
836 * Returns 0 on success or an -error on failure.
837 */
838static int amdgpu_device_wb_init(struct amdgpu_device *adev)
839{
840 int r;
841
842 if (adev->wb.wb_obj == NULL) {
843 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
844 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
845 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
846 &adev->wb.wb_obj, &adev->wb.gpu_addr,
847 (void **)&adev->wb.wb);
848 if (r) {
849 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
850 return r;
851 }
852
853 adev->wb.num_wb = AMDGPU_MAX_WB;
854 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
855
856 /* clear wb memory */
857 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
858 }
859
860 return 0;
861}
862
863/**
864 * amdgpu_device_wb_get - Allocate a wb entry
865 *
866 * @adev: amdgpu_device pointer
867 * @wb: wb index
868 *
869 * Allocate a wb slot for use by the driver (all asics).
870 * Returns 0 on success or -EINVAL on failure.
871 */
872int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
873{
874 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
875
876 if (offset < adev->wb.num_wb) {
877 __set_bit(offset, adev->wb.used);
878 *wb = offset << 3; /* convert to dw offset */
879 return 0;
880 } else {
881 return -EINVAL;
882 }
883}
884
885/**
886 * amdgpu_device_wb_free - Free a wb entry
887 *
888 * @adev: amdgpu_device pointer
889 * @wb: wb index
890 *
891 * Free a wb slot allocated for use by the driver (all asics)
892 */
893void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
894{
895 wb >>= 3;
896 if (wb < adev->wb.num_wb)
897 __clear_bit(wb, adev->wb.used);
898}
899
900/**
901 * amdgpu_device_resize_fb_bar - try to resize FB BAR
902 *
903 * @adev: amdgpu_device pointer
904 *
905 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
906 * to fail, but if any of the BARs is not accessible after the size we abort
907 * driver loading by returning -ENODEV.
908 */
909int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
910{
911 u64 space_needed = roundup_pow_of_two(adev->gmc.real_vram_size);
912 u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1;
913 struct pci_bus *root;
914 struct resource *res;
915 unsigned i;
916 u16 cmd;
917 int r;
918
919 /* Bypass for VF */
920 if (amdgpu_sriov_vf(adev))
921 return 0;
922
923 /* skip if the bios has already enabled large BAR */
924 if (adev->gmc.real_vram_size &&
925 (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
926 return 0;
927
928 /* Check if the root BUS has 64bit memory resources */
929 root = adev->pdev->bus;
930 while (root->parent)
931 root = root->parent;
932
933 pci_bus_for_each_resource(root, res, i) {
934 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
935 res->start > 0x100000000ull)
936 break;
937 }
938
939 /* Trying to resize is pointless without a root hub window above 4GB */
940 if (!res)
941 return 0;
942
943 /* Disable memory decoding while we change the BAR addresses and size */
944 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
945 pci_write_config_word(adev->pdev, PCI_COMMAND,
946 cmd & ~PCI_COMMAND_MEMORY);
947
948 /* Free the VRAM and doorbell BAR, we most likely need to move both. */
949 amdgpu_device_doorbell_fini(adev);
950 if (adev->asic_type >= CHIP_BONAIRE)
951 pci_release_resource(adev->pdev, 2);
952
953 pci_release_resource(adev->pdev, 0);
954
955 r = pci_resize_resource(adev->pdev, 0, rbar_size);
956 if (r == -ENOSPC)
957 DRM_INFO("Not enough PCI address space for a large BAR.");
958 else if (r && r != -ENOTSUPP)
959 DRM_ERROR("Problem resizing BAR0 (%d).", r);
960
961 pci_assign_unassigned_bus_resources(adev->pdev->bus);
962
963 /* When the doorbell or fb BAR isn't available we have no chance of
964 * using the device.
965 */
966 r = amdgpu_device_doorbell_init(adev);
967 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
968 return -ENODEV;
969
970 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
971
972 return 0;
973}
974
975/*
976 * GPU helpers function.
977 */
978/**
979 * amdgpu_device_need_post - check if the hw need post or not
980 *
981 * @adev: amdgpu_device pointer
982 *
983 * Check if the asic has been initialized (all asics) at driver startup
984 * or post is needed if hw reset is performed.
985 * Returns true if need or false if not.
986 */
987bool amdgpu_device_need_post(struct amdgpu_device *adev)
988{
989 uint32_t reg;
990
991 if (amdgpu_sriov_vf(adev))
992 return false;
993
994 if (amdgpu_passthrough(adev)) {
995 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
996 * some old smc fw still need driver do vPost otherwise gpu hang, while
997 * those smc fw version above 22.15 doesn't have this flaw, so we force
998 * vpost executed for smc version below 22.15
999 */
1000 if (adev->asic_type == CHIP_FIJI) {
1001 int err;
1002 uint32_t fw_ver;
1003 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1004 /* force vPost if error occured */
1005 if (err)
1006 return true;
1007
1008 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1009 if (fw_ver < 0x00160e00)
1010 return true;
1011 }
1012 }
1013
1014 if (adev->has_hw_reset) {
1015 adev->has_hw_reset = false;
1016 return true;
1017 }
1018
1019 /* bios scratch used on CIK+ */
1020 if (adev->asic_type >= CHIP_BONAIRE)
1021 return amdgpu_atombios_scratch_need_asic_init(adev);
1022
1023 /* check MEM_SIZE for older asics */
1024 reg = amdgpu_asic_get_config_memsize(adev);
1025
1026 if ((reg != 0) && (reg != 0xffffffff))
1027 return false;
1028
1029 return true;
1030}
1031
1032/* if we get transitioned to only one device, take VGA back */
1033/**
1034 * amdgpu_device_vga_set_decode - enable/disable vga decode
1035 *
1036 * @cookie: amdgpu_device pointer
1037 * @state: enable/disable vga decode
1038 *
1039 * Enable/disable vga decode (all asics).
1040 * Returns VGA resource flags.
1041 */
1042static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state)
1043{
1044 struct amdgpu_device *adev = cookie;
1045 amdgpu_asic_set_vga_state(adev, state);
1046 if (state)
1047 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1048 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1049 else
1050 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1051}
1052
1053/**
1054 * amdgpu_device_check_block_size - validate the vm block size
1055 *
1056 * @adev: amdgpu_device pointer
1057 *
1058 * Validates the vm block size specified via module parameter.
1059 * The vm block size defines number of bits in page table versus page directory,
1060 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1061 * page table and the remaining bits are in the page directory.
1062 */
1063static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1064{
1065 /* defines number of bits in page table versus page directory,
1066 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1067 * page table and the remaining bits are in the page directory */
1068 if (amdgpu_vm_block_size == -1)
1069 return;
1070
1071 if (amdgpu_vm_block_size < 9) {
1072 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1073 amdgpu_vm_block_size);
1074 amdgpu_vm_block_size = -1;
1075 }
1076}
1077
1078/**
1079 * amdgpu_device_check_vm_size - validate the vm size
1080 *
1081 * @adev: amdgpu_device pointer
1082 *
1083 * Validates the vm size in GB specified via module parameter.
1084 * The VM size is the size of the GPU virtual memory space in GB.
1085 */
1086static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1087{
1088 /* no need to check the default value */
1089 if (amdgpu_vm_size == -1)
1090 return;
1091
1092 if (amdgpu_vm_size < 1) {
1093 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1094 amdgpu_vm_size);
1095 amdgpu_vm_size = -1;
1096 }
1097}
1098
1099static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1100{
1101 struct sysinfo si;
1102 bool is_os_64 = (sizeof(void *) == 8);
1103 uint64_t total_memory;
1104 uint64_t dram_size_seven_GB = 0x1B8000000;
1105 uint64_t dram_size_three_GB = 0xB8000000;
1106
1107 if (amdgpu_smu_memory_pool_size == 0)
1108 return;
1109
1110 if (!is_os_64) {
1111 DRM_WARN("Not 64-bit OS, feature not supported\n");
1112 goto def_value;
1113 }
1114 si_meminfo(&si);
1115 total_memory = (uint64_t)si.totalram * si.mem_unit;
1116
1117 if ((amdgpu_smu_memory_pool_size == 1) ||
1118 (amdgpu_smu_memory_pool_size == 2)) {
1119 if (total_memory < dram_size_three_GB)
1120 goto def_value1;
1121 } else if ((amdgpu_smu_memory_pool_size == 4) ||
1122 (amdgpu_smu_memory_pool_size == 8)) {
1123 if (total_memory < dram_size_seven_GB)
1124 goto def_value1;
1125 } else {
1126 DRM_WARN("Smu memory pool size not supported\n");
1127 goto def_value;
1128 }
1129 adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1130
1131 return;
1132
1133def_value1:
1134 DRM_WARN("No enough system memory\n");
1135def_value:
1136 adev->pm.smu_prv_buffer_size = 0;
1137}
1138
1139/**
1140 * amdgpu_device_check_arguments - validate module params
1141 *
1142 * @adev: amdgpu_device pointer
1143 *
1144 * Validates certain module parameters and updates
1145 * the associated values used by the driver (all asics).
1146 */
1147static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1148{
1149 if (amdgpu_sched_jobs < 4) {
1150 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1151 amdgpu_sched_jobs);
1152 amdgpu_sched_jobs = 4;
1153 } else if (!is_power_of_2(amdgpu_sched_jobs)){
1154 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1155 amdgpu_sched_jobs);
1156 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1157 }
1158
1159 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1160 /* gart size must be greater or equal to 32M */
1161 dev_warn(adev->dev, "gart size (%d) too small\n",
1162 amdgpu_gart_size);
1163 amdgpu_gart_size = -1;
1164 }
1165
1166 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1167 /* gtt size must be greater or equal to 32M */
1168 dev_warn(adev->dev, "gtt size (%d) too small\n",
1169 amdgpu_gtt_size);
1170 amdgpu_gtt_size = -1;
1171 }
1172
1173 /* valid range is between 4 and 9 inclusive */
1174 if (amdgpu_vm_fragment_size != -1 &&
1175 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1176 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1177 amdgpu_vm_fragment_size = -1;
1178 }
1179
1180 if (amdgpu_sched_hw_submission < 2) {
1181 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1182 amdgpu_sched_hw_submission);
1183 amdgpu_sched_hw_submission = 2;
1184 } else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1185 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1186 amdgpu_sched_hw_submission);
1187 amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1188 }
1189
1190 amdgpu_device_check_smu_prv_buffer_size(adev);
1191
1192 amdgpu_device_check_vm_size(adev);
1193
1194 amdgpu_device_check_block_size(adev);
1195
1196 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1197
1198 amdgpu_gmc_tmz_set(adev);
1199
1200 return 0;
1201}
1202
1203/**
1204 * amdgpu_switcheroo_set_state - set switcheroo state
1205 *
1206 * @pdev: pci dev pointer
1207 * @state: vga_switcheroo state
1208 *
1209 * Callback for the switcheroo driver. Suspends or resumes the
1210 * the asics before or after it is powered up using ACPI methods.
1211 */
1212static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1213{
1214 struct drm_device *dev = pci_get_drvdata(pdev);
1215 int r;
1216
1217 if (amdgpu_device_supports_boco(dev) && state == VGA_SWITCHEROO_OFF)
1218 return;
1219
1220 if (state == VGA_SWITCHEROO_ON) {
1221 pr_info("switched on\n");
1222 /* don't suspend or resume card normally */
1223 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1224
1225 pci_set_power_state(dev->pdev, PCI_D0);
1226 pci_restore_state(dev->pdev);
1227 r = pci_enable_device(dev->pdev);
1228 if (r)
1229 DRM_WARN("pci_enable_device failed (%d)\n", r);
1230 amdgpu_device_resume(dev, true);
1231
1232 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1233 drm_kms_helper_poll_enable(dev);
1234 } else {
1235 pr_info("switched off\n");
1236 drm_kms_helper_poll_disable(dev);
1237 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1238 amdgpu_device_suspend(dev, true);
1239 pci_save_state(dev->pdev);
1240 /* Shut down the device */
1241 pci_disable_device(dev->pdev);
1242 pci_set_power_state(dev->pdev, PCI_D3cold);
1243 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1244 }
1245}
1246
1247/**
1248 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1249 *
1250 * @pdev: pci dev pointer
1251 *
1252 * Callback for the switcheroo driver. Check of the switcheroo
1253 * state can be changed.
1254 * Returns true if the state can be changed, false if not.
1255 */
1256static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1257{
1258 struct drm_device *dev = pci_get_drvdata(pdev);
1259
1260 /*
1261 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1262 * locking inversion with the driver load path. And the access here is
1263 * completely racy anyway. So don't bother with locking for now.
1264 */
1265 return atomic_read(&dev->open_count) == 0;
1266}
1267
1268static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1269 .set_gpu_state = amdgpu_switcheroo_set_state,
1270 .reprobe = NULL,
1271 .can_switch = amdgpu_switcheroo_can_switch,
1272};
1273
1274/**
1275 * amdgpu_device_ip_set_clockgating_state - set the CG state
1276 *
1277 * @dev: amdgpu_device pointer
1278 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1279 * @state: clockgating state (gate or ungate)
1280 *
1281 * Sets the requested clockgating state for all instances of
1282 * the hardware IP specified.
1283 * Returns the error code from the last instance.
1284 */
1285int amdgpu_device_ip_set_clockgating_state(void *dev,
1286 enum amd_ip_block_type block_type,
1287 enum amd_clockgating_state state)
1288{
1289 struct amdgpu_device *adev = dev;
1290 int i, r = 0;
1291
1292 for (i = 0; i < adev->num_ip_blocks; i++) {
1293 if (!adev->ip_blocks[i].status.valid)
1294 continue;
1295 if (adev->ip_blocks[i].version->type != block_type)
1296 continue;
1297 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1298 continue;
1299 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1300 (void *)adev, state);
1301 if (r)
1302 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1303 adev->ip_blocks[i].version->funcs->name, r);
1304 }
1305 return r;
1306}
1307
1308/**
1309 * amdgpu_device_ip_set_powergating_state - set the PG state
1310 *
1311 * @dev: amdgpu_device pointer
1312 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1313 * @state: powergating state (gate or ungate)
1314 *
1315 * Sets the requested powergating state for all instances of
1316 * the hardware IP specified.
1317 * Returns the error code from the last instance.
1318 */
1319int amdgpu_device_ip_set_powergating_state(void *dev,
1320 enum amd_ip_block_type block_type,
1321 enum amd_powergating_state state)
1322{
1323 struct amdgpu_device *adev = dev;
1324 int i, r = 0;
1325
1326 for (i = 0; i < adev->num_ip_blocks; i++) {
1327 if (!adev->ip_blocks[i].status.valid)
1328 continue;
1329 if (adev->ip_blocks[i].version->type != block_type)
1330 continue;
1331 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1332 continue;
1333 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1334 (void *)adev, state);
1335 if (r)
1336 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1337 adev->ip_blocks[i].version->funcs->name, r);
1338 }
1339 return r;
1340}
1341
1342/**
1343 * amdgpu_device_ip_get_clockgating_state - get the CG state
1344 *
1345 * @adev: amdgpu_device pointer
1346 * @flags: clockgating feature flags
1347 *
1348 * Walks the list of IPs on the device and updates the clockgating
1349 * flags for each IP.
1350 * Updates @flags with the feature flags for each hardware IP where
1351 * clockgating is enabled.
1352 */
1353void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1354 u32 *flags)
1355{
1356 int i;
1357
1358 for (i = 0; i < adev->num_ip_blocks; i++) {
1359 if (!adev->ip_blocks[i].status.valid)
1360 continue;
1361 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1362 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1363 }
1364}
1365
1366/**
1367 * amdgpu_device_ip_wait_for_idle - wait for idle
1368 *
1369 * @adev: amdgpu_device pointer
1370 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1371 *
1372 * Waits for the request hardware IP to be idle.
1373 * Returns 0 for success or a negative error code on failure.
1374 */
1375int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1376 enum amd_ip_block_type block_type)
1377{
1378 int i, r;
1379
1380 for (i = 0; i < adev->num_ip_blocks; i++) {
1381 if (!adev->ip_blocks[i].status.valid)
1382 continue;
1383 if (adev->ip_blocks[i].version->type == block_type) {
1384 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1385 if (r)
1386 return r;
1387 break;
1388 }
1389 }
1390 return 0;
1391
1392}
1393
1394/**
1395 * amdgpu_device_ip_is_idle - is the hardware IP idle
1396 *
1397 * @adev: amdgpu_device pointer
1398 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1399 *
1400 * Check if the hardware IP is idle or not.
1401 * Returns true if it the IP is idle, false if not.
1402 */
1403bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1404 enum amd_ip_block_type block_type)
1405{
1406 int i;
1407
1408 for (i = 0; i < adev->num_ip_blocks; i++) {
1409 if (!adev->ip_blocks[i].status.valid)
1410 continue;
1411 if (adev->ip_blocks[i].version->type == block_type)
1412 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1413 }
1414 return true;
1415
1416}
1417
1418/**
1419 * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1420 *
1421 * @adev: amdgpu_device pointer
1422 * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1423 *
1424 * Returns a pointer to the hardware IP block structure
1425 * if it exists for the asic, otherwise NULL.
1426 */
1427struct amdgpu_ip_block *
1428amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1429 enum amd_ip_block_type type)
1430{
1431 int i;
1432
1433 for (i = 0; i < adev->num_ip_blocks; i++)
1434 if (adev->ip_blocks[i].version->type == type)
1435 return &adev->ip_blocks[i];
1436
1437 return NULL;
1438}
1439
1440/**
1441 * amdgpu_device_ip_block_version_cmp
1442 *
1443 * @adev: amdgpu_device pointer
1444 * @type: enum amd_ip_block_type
1445 * @major: major version
1446 * @minor: minor version
1447 *
1448 * return 0 if equal or greater
1449 * return 1 if smaller or the ip_block doesn't exist
1450 */
1451int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1452 enum amd_ip_block_type type,
1453 u32 major, u32 minor)
1454{
1455 struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1456
1457 if (ip_block && ((ip_block->version->major > major) ||
1458 ((ip_block->version->major == major) &&
1459 (ip_block->version->minor >= minor))))
1460 return 0;
1461
1462 return 1;
1463}
1464
1465/**
1466 * amdgpu_device_ip_block_add
1467 *
1468 * @adev: amdgpu_device pointer
1469 * @ip_block_version: pointer to the IP to add
1470 *
1471 * Adds the IP block driver information to the collection of IPs
1472 * on the asic.
1473 */
1474int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1475 const struct amdgpu_ip_block_version *ip_block_version)
1476{
1477 if (!ip_block_version)
1478 return -EINVAL;
1479
1480 DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1481 ip_block_version->funcs->name);
1482
1483 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1484
1485 return 0;
1486}
1487
1488/**
1489 * amdgpu_device_enable_virtual_display - enable virtual display feature
1490 *
1491 * @adev: amdgpu_device pointer
1492 *
1493 * Enabled the virtual display feature if the user has enabled it via
1494 * the module parameter virtual_display. This feature provides a virtual
1495 * display hardware on headless boards or in virtualized environments.
1496 * This function parses and validates the configuration string specified by
1497 * the user and configues the virtual display configuration (number of
1498 * virtual connectors, crtcs, etc.) specified.
1499 */
1500static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1501{
1502 adev->enable_virtual_display = false;
1503
1504 if (amdgpu_virtual_display) {
1505 struct drm_device *ddev = adev->ddev;
1506 const char *pci_address_name = pci_name(ddev->pdev);
1507 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1508
1509 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1510 pciaddstr_tmp = pciaddstr;
1511 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1512 pciaddname = strsep(&pciaddname_tmp, ",");
1513 if (!strcmp("all", pciaddname)
1514 || !strcmp(pci_address_name, pciaddname)) {
1515 long num_crtc;
1516 int res = -1;
1517
1518 adev->enable_virtual_display = true;
1519
1520 if (pciaddname_tmp)
1521 res = kstrtol(pciaddname_tmp, 10,
1522 &num_crtc);
1523
1524 if (!res) {
1525 if (num_crtc < 1)
1526 num_crtc = 1;
1527 if (num_crtc > 6)
1528 num_crtc = 6;
1529 adev->mode_info.num_crtc = num_crtc;
1530 } else {
1531 adev->mode_info.num_crtc = 1;
1532 }
1533 break;
1534 }
1535 }
1536
1537 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1538 amdgpu_virtual_display, pci_address_name,
1539 adev->enable_virtual_display, adev->mode_info.num_crtc);
1540
1541 kfree(pciaddstr);
1542 }
1543}
1544
1545/**
1546 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1547 *
1548 * @adev: amdgpu_device pointer
1549 *
1550 * Parses the asic configuration parameters specified in the gpu info
1551 * firmware and makes them availale to the driver for use in configuring
1552 * the asic.
1553 * Returns 0 on success, -EINVAL on failure.
1554 */
1555static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1556{
1557 const char *chip_name;
1558 char fw_name[40];
1559 int err;
1560 const struct gpu_info_firmware_header_v1_0 *hdr;
1561
1562 adev->firmware.gpu_info_fw = NULL;
1563
1564 if (adev->discovery_bin) {
1565 amdgpu_discovery_get_gfx_info(adev);
1566
1567 /*
1568 * FIXME: The bounding box is still needed by Navi12, so
1569 * temporarily read it from gpu_info firmware. Should be droped
1570 * when DAL no longer needs it.
1571 */
1572 if (adev->asic_type != CHIP_NAVI12)
1573 return 0;
1574 }
1575
1576 switch (adev->asic_type) {
1577#ifdef CONFIG_DRM_AMDGPU_SI
1578 case CHIP_VERDE:
1579 case CHIP_TAHITI:
1580 case CHIP_PITCAIRN:
1581 case CHIP_OLAND:
1582 case CHIP_HAINAN:
1583#endif
1584#ifdef CONFIG_DRM_AMDGPU_CIK
1585 case CHIP_BONAIRE:
1586 case CHIP_HAWAII:
1587 case CHIP_KAVERI:
1588 case CHIP_KABINI:
1589 case CHIP_MULLINS:
1590#endif
1591 case CHIP_TOPAZ:
1592 case CHIP_TONGA:
1593 case CHIP_FIJI:
1594 case CHIP_POLARIS10:
1595 case CHIP_POLARIS11:
1596 case CHIP_POLARIS12:
1597 case CHIP_VEGAM:
1598 case CHIP_CARRIZO:
1599 case CHIP_STONEY:
1600 case CHIP_VEGA20:
1601 case CHIP_SIENNA_CICHLID:
1602 case CHIP_NAVY_FLOUNDER:
1603 default:
1604 return 0;
1605 case CHIP_VEGA10:
1606 chip_name = "vega10";
1607 break;
1608 case CHIP_VEGA12:
1609 chip_name = "vega12";
1610 break;
1611 case CHIP_RAVEN:
1612 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1613 chip_name = "raven2";
1614 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1615 chip_name = "picasso";
1616 else
1617 chip_name = "raven";
1618 break;
1619 case CHIP_ARCTURUS:
1620 chip_name = "arcturus";
1621 break;
1622 case CHIP_RENOIR:
1623 chip_name = "renoir";
1624 break;
1625 case CHIP_NAVI10:
1626 chip_name = "navi10";
1627 break;
1628 case CHIP_NAVI14:
1629 chip_name = "navi14";
1630 break;
1631 case CHIP_NAVI12:
1632 chip_name = "navi12";
1633 break;
1634 }
1635
1636 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1637 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1638 if (err) {
1639 dev_err(adev->dev,
1640 "Failed to load gpu_info firmware \"%s\"\n",
1641 fw_name);
1642 goto out;
1643 }
1644 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1645 if (err) {
1646 dev_err(adev->dev,
1647 "Failed to validate gpu_info firmware \"%s\"\n",
1648 fw_name);
1649 goto out;
1650 }
1651
1652 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1653 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1654
1655 switch (hdr->version_major) {
1656 case 1:
1657 {
1658 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
1659 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
1660 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1661
1662 /*
1663 * Should be droped when DAL no longer needs it.
1664 */
1665 if (adev->asic_type == CHIP_NAVI12)
1666 goto parse_soc_bounding_box;
1667
1668 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1669 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1670 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1671 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
1672 adev->gfx.config.max_texture_channel_caches =
1673 le32_to_cpu(gpu_info_fw->gc_num_tccs);
1674 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1675 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1676 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1677 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
1678 adev->gfx.config.double_offchip_lds_buf =
1679 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1680 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
1681 adev->gfx.cu_info.max_waves_per_simd =
1682 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1683 adev->gfx.cu_info.max_scratch_slots_per_cu =
1684 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1685 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
1686 if (hdr->version_minor >= 1) {
1687 const struct gpu_info_firmware_v1_1 *gpu_info_fw =
1688 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
1689 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1690 adev->gfx.config.num_sc_per_sh =
1691 le32_to_cpu(gpu_info_fw->num_sc_per_sh);
1692 adev->gfx.config.num_packer_per_sc =
1693 le32_to_cpu(gpu_info_fw->num_packer_per_sc);
1694 }
1695
1696parse_soc_bounding_box:
1697 /*
1698 * soc bounding box info is not integrated in disocovery table,
1699 * we always need to parse it from gpu info firmware if needed.
1700 */
1701 if (hdr->version_minor == 2) {
1702 const struct gpu_info_firmware_v1_2 *gpu_info_fw =
1703 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
1704 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1705 adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
1706 }
1707 break;
1708 }
1709 default:
1710 dev_err(adev->dev,
1711 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1712 err = -EINVAL;
1713 goto out;
1714 }
1715out:
1716 return err;
1717}
1718
1719/**
1720 * amdgpu_device_ip_early_init - run early init for hardware IPs
1721 *
1722 * @adev: amdgpu_device pointer
1723 *
1724 * Early initialization pass for hardware IPs. The hardware IPs that make
1725 * up each asic are discovered each IP's early_init callback is run. This
1726 * is the first stage in initializing the asic.
1727 * Returns 0 on success, negative error code on failure.
1728 */
1729static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
1730{
1731 int i, r;
1732
1733 amdgpu_device_enable_virtual_display(adev);
1734
1735 if (amdgpu_sriov_vf(adev)) {
1736 r = amdgpu_virt_request_full_gpu(adev, true);
1737 if (r)
1738 return r;
1739 }
1740
1741 switch (adev->asic_type) {
1742#ifdef CONFIG_DRM_AMDGPU_SI
1743 case CHIP_VERDE:
1744 case CHIP_TAHITI:
1745 case CHIP_PITCAIRN:
1746 case CHIP_OLAND:
1747 case CHIP_HAINAN:
1748 adev->family = AMDGPU_FAMILY_SI;
1749 r = si_set_ip_blocks(adev);
1750 if (r)
1751 return r;
1752 break;
1753#endif
1754#ifdef CONFIG_DRM_AMDGPU_CIK
1755 case CHIP_BONAIRE:
1756 case CHIP_HAWAII:
1757 case CHIP_KAVERI:
1758 case CHIP_KABINI:
1759 case CHIP_MULLINS:
1760 if (adev->flags & AMD_IS_APU)
1761 adev->family = AMDGPU_FAMILY_KV;
1762 else
1763 adev->family = AMDGPU_FAMILY_CI;
1764
1765 r = cik_set_ip_blocks(adev);
1766 if (r)
1767 return r;
1768 break;
1769#endif
1770 case CHIP_TOPAZ:
1771 case CHIP_TONGA:
1772 case CHIP_FIJI:
1773 case CHIP_POLARIS10:
1774 case CHIP_POLARIS11:
1775 case CHIP_POLARIS12:
1776 case CHIP_VEGAM:
1777 case CHIP_CARRIZO:
1778 case CHIP_STONEY:
1779 if (adev->flags & AMD_IS_APU)
1780 adev->family = AMDGPU_FAMILY_CZ;
1781 else
1782 adev->family = AMDGPU_FAMILY_VI;
1783
1784 r = vi_set_ip_blocks(adev);
1785 if (r)
1786 return r;
1787 break;
1788 case CHIP_VEGA10:
1789 case CHIP_VEGA12:
1790 case CHIP_VEGA20:
1791 case CHIP_RAVEN:
1792 case CHIP_ARCTURUS:
1793 case CHIP_RENOIR:
1794 if (adev->flags & AMD_IS_APU)
1795 adev->family = AMDGPU_FAMILY_RV;
1796 else
1797 adev->family = AMDGPU_FAMILY_AI;
1798
1799 r = soc15_set_ip_blocks(adev);
1800 if (r)
1801 return r;
1802 break;
1803 case CHIP_NAVI10:
1804 case CHIP_NAVI14:
1805 case CHIP_NAVI12:
1806 case CHIP_SIENNA_CICHLID:
1807 case CHIP_NAVY_FLOUNDER:
1808 adev->family = AMDGPU_FAMILY_NV;
1809
1810 r = nv_set_ip_blocks(adev);
1811 if (r)
1812 return r;
1813 break;
1814 default:
1815 /* FIXME: not supported yet */
1816 return -EINVAL;
1817 }
1818
1819 amdgpu_amdkfd_device_probe(adev);
1820
1821 adev->pm.pp_feature = amdgpu_pp_feature_mask;
1822 if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
1823 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
1824
1825 for (i = 0; i < adev->num_ip_blocks; i++) {
1826 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
1827 DRM_ERROR("disabled ip block: %d <%s>\n",
1828 i, adev->ip_blocks[i].version->funcs->name);
1829 adev->ip_blocks[i].status.valid = false;
1830 } else {
1831 if (adev->ip_blocks[i].version->funcs->early_init) {
1832 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
1833 if (r == -ENOENT) {
1834 adev->ip_blocks[i].status.valid = false;
1835 } else if (r) {
1836 DRM_ERROR("early_init of IP block <%s> failed %d\n",
1837 adev->ip_blocks[i].version->funcs->name, r);
1838 return r;
1839 } else {
1840 adev->ip_blocks[i].status.valid = true;
1841 }
1842 } else {
1843 adev->ip_blocks[i].status.valid = true;
1844 }
1845 }
1846 /* get the vbios after the asic_funcs are set up */
1847 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
1848 r = amdgpu_device_parse_gpu_info_fw(adev);
1849 if (r)
1850 return r;
1851
1852 /* Read BIOS */
1853 if (!amdgpu_get_bios(adev))
1854 return -EINVAL;
1855
1856 r = amdgpu_atombios_init(adev);
1857 if (r) {
1858 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
1859 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
1860 return r;
1861 }
1862 }
1863 }
1864
1865 adev->cg_flags &= amdgpu_cg_mask;
1866 adev->pg_flags &= amdgpu_pg_mask;
1867
1868 return 0;
1869}
1870
1871static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
1872{
1873 int i, r;
1874
1875 for (i = 0; i < adev->num_ip_blocks; i++) {
1876 if (!adev->ip_blocks[i].status.sw)
1877 continue;
1878 if (adev->ip_blocks[i].status.hw)
1879 continue;
1880 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1881 (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
1882 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
1883 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
1884 if (r) {
1885 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1886 adev->ip_blocks[i].version->funcs->name, r);
1887 return r;
1888 }
1889 adev->ip_blocks[i].status.hw = true;
1890 }
1891 }
1892
1893 return 0;
1894}
1895
1896static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
1897{
1898 int i, r;
1899
1900 for (i = 0; i < adev->num_ip_blocks; i++) {
1901 if (!adev->ip_blocks[i].status.sw)
1902 continue;
1903 if (adev->ip_blocks[i].status.hw)
1904 continue;
1905 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
1906 if (r) {
1907 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1908 adev->ip_blocks[i].version->funcs->name, r);
1909 return r;
1910 }
1911 adev->ip_blocks[i].status.hw = true;
1912 }
1913
1914 return 0;
1915}
1916
1917static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
1918{
1919 int r = 0;
1920 int i;
1921 uint32_t smu_version;
1922
1923 if (adev->asic_type >= CHIP_VEGA10) {
1924 for (i = 0; i < adev->num_ip_blocks; i++) {
1925 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
1926 continue;
1927
1928 /* no need to do the fw loading again if already done*/
1929 if (adev->ip_blocks[i].status.hw == true)
1930 break;
1931
1932 if (adev->in_gpu_reset || adev->in_suspend) {
1933 r = adev->ip_blocks[i].version->funcs->resume(adev);
1934 if (r) {
1935 DRM_ERROR("resume of IP block <%s> failed %d\n",
1936 adev->ip_blocks[i].version->funcs->name, r);
1937 return r;
1938 }
1939 } else {
1940 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
1941 if (r) {
1942 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1943 adev->ip_blocks[i].version->funcs->name, r);
1944 return r;
1945 }
1946 }
1947
1948 adev->ip_blocks[i].status.hw = true;
1949 break;
1950 }
1951 }
1952
1953 if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
1954 r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
1955
1956 return r;
1957}
1958
1959/**
1960 * amdgpu_device_ip_init - run init for hardware IPs
1961 *
1962 * @adev: amdgpu_device pointer
1963 *
1964 * Main initialization pass for hardware IPs. The list of all the hardware
1965 * IPs that make up the asic is walked and the sw_init and hw_init callbacks
1966 * are run. sw_init initializes the software state associated with each IP
1967 * and hw_init initializes the hardware associated with each IP.
1968 * Returns 0 on success, negative error code on failure.
1969 */
1970static int amdgpu_device_ip_init(struct amdgpu_device *adev)
1971{
1972 int i, r;
1973
1974 r = amdgpu_ras_init(adev);
1975 if (r)
1976 return r;
1977
1978 for (i = 0; i < adev->num_ip_blocks; i++) {
1979 if (!adev->ip_blocks[i].status.valid)
1980 continue;
1981 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
1982 if (r) {
1983 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1984 adev->ip_blocks[i].version->funcs->name, r);
1985 goto init_failed;
1986 }
1987 adev->ip_blocks[i].status.sw = true;
1988
1989 /* need to do gmc hw init early so we can allocate gpu mem */
1990 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
1991 r = amdgpu_device_vram_scratch_init(adev);
1992 if (r) {
1993 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
1994 goto init_failed;
1995 }
1996 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
1997 if (r) {
1998 DRM_ERROR("hw_init %d failed %d\n", i, r);
1999 goto init_failed;
2000 }
2001 r = amdgpu_device_wb_init(adev);
2002 if (r) {
2003 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2004 goto init_failed;
2005 }
2006 adev->ip_blocks[i].status.hw = true;
2007
2008 /* right after GMC hw init, we create CSA */
2009 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
2010 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2011 AMDGPU_GEM_DOMAIN_VRAM,
2012 AMDGPU_CSA_SIZE);
2013 if (r) {
2014 DRM_ERROR("allocate CSA failed %d\n", r);
2015 goto init_failed;
2016 }
2017 }
2018 }
2019 }
2020
2021 if (amdgpu_sriov_vf(adev))
2022 amdgpu_virt_init_data_exchange(adev);
2023
2024 r = amdgpu_ib_pool_init(adev);
2025 if (r) {
2026 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2027 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2028 goto init_failed;
2029 }
2030
2031 r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2032 if (r)
2033 goto init_failed;
2034
2035 r = amdgpu_device_ip_hw_init_phase1(adev);
2036 if (r)
2037 goto init_failed;
2038
2039 r = amdgpu_device_fw_loading(adev);
2040 if (r)
2041 goto init_failed;
2042
2043 r = amdgpu_device_ip_hw_init_phase2(adev);
2044 if (r)
2045 goto init_failed;
2046
2047 /*
2048 * retired pages will be loaded from eeprom and reserved here,
2049 * it should be called after amdgpu_device_ip_hw_init_phase2 since
2050 * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2051 * for I2C communication which only true at this point.
2052 * recovery_init may fail, but it can free all resources allocated by
2053 * itself and its failure should not stop amdgpu init process.
2054 *
2055 * Note: theoretically, this should be called before all vram allocations
2056 * to protect retired page from abusing
2057 */
2058 amdgpu_ras_recovery_init(adev);
2059
2060 if (adev->gmc.xgmi.num_physical_nodes > 1)
2061 amdgpu_xgmi_add_device(adev);
2062 amdgpu_amdkfd_device_init(adev);
2063
2064 amdgpu_fru_get_product_info(adev);
2065
2066init_failed:
2067 if (amdgpu_sriov_vf(adev))
2068 amdgpu_virt_release_full_gpu(adev, true);
2069
2070 return r;
2071}
2072
2073/**
2074 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2075 *
2076 * @adev: amdgpu_device pointer
2077 *
2078 * Writes a reset magic value to the gart pointer in VRAM. The driver calls
2079 * this function before a GPU reset. If the value is retained after a
2080 * GPU reset, VRAM has not been lost. Some GPU resets may destry VRAM contents.
2081 */
2082static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2083{
2084 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2085}
2086
2087/**
2088 * amdgpu_device_check_vram_lost - check if vram is valid
2089 *
2090 * @adev: amdgpu_device pointer
2091 *
2092 * Checks the reset magic value written to the gart pointer in VRAM.
2093 * The driver calls this after a GPU reset to see if the contents of
2094 * VRAM is lost or now.
2095 * returns true if vram is lost, false if not.
2096 */
2097static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2098{
2099 if (memcmp(adev->gart.ptr, adev->reset_magic,
2100 AMDGPU_RESET_MAGIC_NUM))
2101 return true;
2102
2103 if (!adev->in_gpu_reset)
2104 return false;
2105
2106 /*
2107 * For all ASICs with baco/mode1 reset, the VRAM is
2108 * always assumed to be lost.
2109 */
2110 switch (amdgpu_asic_reset_method(adev)) {
2111 case AMD_RESET_METHOD_BACO:
2112 case AMD_RESET_METHOD_MODE1:
2113 return true;
2114 default:
2115 return false;
2116 }
2117}
2118
2119/**
2120 * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2121 *
2122 * @adev: amdgpu_device pointer
2123 * @state: clockgating state (gate or ungate)
2124 *
2125 * The list of all the hardware IPs that make up the asic is walked and the
2126 * set_clockgating_state callbacks are run.
2127 * Late initialization pass enabling clockgating for hardware IPs.
2128 * Fini or suspend, pass disabling clockgating for hardware IPs.
2129 * Returns 0 on success, negative error code on failure.
2130 */
2131
2132static int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2133 enum amd_clockgating_state state)
2134{
2135 int i, j, r;
2136
2137 if (amdgpu_emu_mode == 1)
2138 return 0;
2139
2140 for (j = 0; j < adev->num_ip_blocks; j++) {
2141 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2142 if (!adev->ip_blocks[i].status.late_initialized)
2143 continue;
2144 /* skip CG for VCE/UVD, it's handled specially */
2145 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2146 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2147 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2148 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2149 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2150 /* enable clockgating to save power */
2151 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2152 state);
2153 if (r) {
2154 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2155 adev->ip_blocks[i].version->funcs->name, r);
2156 return r;
2157 }
2158 }
2159 }
2160
2161 return 0;
2162}
2163
2164static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_powergating_state state)
2165{
2166 int i, j, r;
2167
2168 if (amdgpu_emu_mode == 1)
2169 return 0;
2170
2171 for (j = 0; j < adev->num_ip_blocks; j++) {
2172 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2173 if (!adev->ip_blocks[i].status.late_initialized)
2174 continue;
2175 /* skip CG for VCE/UVD, it's handled specially */
2176 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2177 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2178 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2179 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2180 adev->ip_blocks[i].version->funcs->set_powergating_state) {
2181 /* enable powergating to save power */
2182 r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2183 state);
2184 if (r) {
2185 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2186 adev->ip_blocks[i].version->funcs->name, r);
2187 return r;
2188 }
2189 }
2190 }
2191 return 0;
2192}
2193
2194static int amdgpu_device_enable_mgpu_fan_boost(void)
2195{
2196 struct amdgpu_gpu_instance *gpu_ins;
2197 struct amdgpu_device *adev;
2198 int i, ret = 0;
2199
2200 mutex_lock(&mgpu_info.mutex);
2201
2202 /*
2203 * MGPU fan boost feature should be enabled
2204 * only when there are two or more dGPUs in
2205 * the system
2206 */
2207 if (mgpu_info.num_dgpu < 2)
2208 goto out;
2209
2210 for (i = 0; i < mgpu_info.num_dgpu; i++) {
2211 gpu_ins = &(mgpu_info.gpu_ins[i]);
2212 adev = gpu_ins->adev;
2213 if (!(adev->flags & AMD_IS_APU) &&
2214 !gpu_ins->mgpu_fan_enabled &&
2215 adev->powerplay.pp_funcs &&
2216 adev->powerplay.pp_funcs->enable_mgpu_fan_boost) {
2217 ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2218 if (ret)
2219 break;
2220
2221 gpu_ins->mgpu_fan_enabled = 1;
2222 }
2223 }
2224
2225out:
2226 mutex_unlock(&mgpu_info.mutex);
2227
2228 return ret;
2229}
2230
2231/**
2232 * amdgpu_device_ip_late_init - run late init for hardware IPs
2233 *
2234 * @adev: amdgpu_device pointer
2235 *
2236 * Late initialization pass for hardware IPs. The list of all the hardware
2237 * IPs that make up the asic is walked and the late_init callbacks are run.
2238 * late_init covers any special initialization that an IP requires
2239 * after all of the have been initialized or something that needs to happen
2240 * late in the init process.
2241 * Returns 0 on success, negative error code on failure.
2242 */
2243static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2244{
2245 struct amdgpu_gpu_instance *gpu_instance;
2246 int i = 0, r;
2247
2248 for (i = 0; i < adev->num_ip_blocks; i++) {
2249 if (!adev->ip_blocks[i].status.hw)
2250 continue;
2251 if (adev->ip_blocks[i].version->funcs->late_init) {
2252 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2253 if (r) {
2254 DRM_ERROR("late_init of IP block <%s> failed %d\n",
2255 adev->ip_blocks[i].version->funcs->name, r);
2256 return r;
2257 }
2258 }
2259 adev->ip_blocks[i].status.late_initialized = true;
2260 }
2261
2262 amdgpu_ras_set_error_query_ready(adev, true);
2263
2264 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2265 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2266
2267 amdgpu_device_fill_reset_magic(adev);
2268
2269 r = amdgpu_device_enable_mgpu_fan_boost();
2270 if (r)
2271 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2272
2273
2274 if (adev->gmc.xgmi.num_physical_nodes > 1) {
2275 mutex_lock(&mgpu_info.mutex);
2276
2277 /*
2278 * Reset device p-state to low as this was booted with high.
2279 *
2280 * This should be performed only after all devices from the same
2281 * hive get initialized.
2282 *
2283 * However, it's unknown how many device in the hive in advance.
2284 * As this is counted one by one during devices initializations.
2285 *
2286 * So, we wait for all XGMI interlinked devices initialized.
2287 * This may bring some delays as those devices may come from
2288 * different hives. But that should be OK.
2289 */
2290 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2291 for (i = 0; i < mgpu_info.num_gpu; i++) {
2292 gpu_instance = &(mgpu_info.gpu_ins[i]);
2293 if (gpu_instance->adev->flags & AMD_IS_APU)
2294 continue;
2295
2296 r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2297 AMDGPU_XGMI_PSTATE_MIN);
2298 if (r) {
2299 DRM_ERROR("pstate setting failed (%d).\n", r);
2300 break;
2301 }
2302 }
2303 }
2304
2305 mutex_unlock(&mgpu_info.mutex);
2306 }
2307
2308 return 0;
2309}
2310
2311/**
2312 * amdgpu_device_ip_fini - run fini for hardware IPs
2313 *
2314 * @adev: amdgpu_device pointer
2315 *
2316 * Main teardown pass for hardware IPs. The list of all the hardware
2317 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2318 * are run. hw_fini tears down the hardware associated with each IP
2319 * and sw_fini tears down any software state associated with each IP.
2320 * Returns 0 on success, negative error code on failure.
2321 */
2322static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2323{
2324 int i, r;
2325
2326 if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2327 amdgpu_virt_release_ras_err_handler_data(adev);
2328
2329 amdgpu_ras_pre_fini(adev);
2330
2331 if (adev->gmc.xgmi.num_physical_nodes > 1)
2332 amdgpu_xgmi_remove_device(adev);
2333
2334 amdgpu_amdkfd_device_fini(adev);
2335
2336 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2337 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2338
2339 /* need to disable SMC first */
2340 for (i = 0; i < adev->num_ip_blocks; i++) {
2341 if (!adev->ip_blocks[i].status.hw)
2342 continue;
2343 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2344 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2345 /* XXX handle errors */
2346 if (r) {
2347 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2348 adev->ip_blocks[i].version->funcs->name, r);
2349 }
2350 adev->ip_blocks[i].status.hw = false;
2351 break;
2352 }
2353 }
2354
2355 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2356 if (!adev->ip_blocks[i].status.hw)
2357 continue;
2358
2359 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2360 /* XXX handle errors */
2361 if (r) {
2362 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2363 adev->ip_blocks[i].version->funcs->name, r);
2364 }
2365
2366 adev->ip_blocks[i].status.hw = false;
2367 }
2368
2369
2370 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2371 if (!adev->ip_blocks[i].status.sw)
2372 continue;
2373
2374 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2375 amdgpu_ucode_free_bo(adev);
2376 amdgpu_free_static_csa(&adev->virt.csa_obj);
2377 amdgpu_device_wb_fini(adev);
2378 amdgpu_device_vram_scratch_fini(adev);
2379 amdgpu_ib_pool_fini(adev);
2380 }
2381
2382 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2383 /* XXX handle errors */
2384 if (r) {
2385 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2386 adev->ip_blocks[i].version->funcs->name, r);
2387 }
2388 adev->ip_blocks[i].status.sw = false;
2389 adev->ip_blocks[i].status.valid = false;
2390 }
2391
2392 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2393 if (!adev->ip_blocks[i].status.late_initialized)
2394 continue;
2395 if (adev->ip_blocks[i].version->funcs->late_fini)
2396 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2397 adev->ip_blocks[i].status.late_initialized = false;
2398 }
2399
2400 amdgpu_ras_fini(adev);
2401
2402 if (amdgpu_sriov_vf(adev))
2403 if (amdgpu_virt_release_full_gpu(adev, false))
2404 DRM_ERROR("failed to release exclusive mode on fini\n");
2405
2406 return 0;
2407}
2408
2409/**
2410 * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2411 *
2412 * @work: work_struct.
2413 */
2414static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2415{
2416 struct amdgpu_device *adev =
2417 container_of(work, struct amdgpu_device, delayed_init_work.work);
2418 int r;
2419
2420 r = amdgpu_ib_ring_tests(adev);
2421 if (r)
2422 DRM_ERROR("ib ring test failed (%d).\n", r);
2423}
2424
2425static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2426{
2427 struct amdgpu_device *adev =
2428 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2429
2430 mutex_lock(&adev->gfx.gfx_off_mutex);
2431 if (!adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
2432 if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2433 adev->gfx.gfx_off_state = true;
2434 }
2435 mutex_unlock(&adev->gfx.gfx_off_mutex);
2436}
2437
2438/**
2439 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2440 *
2441 * @adev: amdgpu_device pointer
2442 *
2443 * Main suspend function for hardware IPs. The list of all the hardware
2444 * IPs that make up the asic is walked, clockgating is disabled and the
2445 * suspend callbacks are run. suspend puts the hardware and software state
2446 * in each IP into a state suitable for suspend.
2447 * Returns 0 on success, negative error code on failure.
2448 */
2449static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2450{
2451 int i, r;
2452
2453 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2454 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2455
2456 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2457 if (!adev->ip_blocks[i].status.valid)
2458 continue;
2459
2460 /* displays are handled separately */
2461 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2462 continue;
2463
2464 /* XXX handle errors */
2465 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2466 /* XXX handle errors */
2467 if (r) {
2468 DRM_ERROR("suspend of IP block <%s> failed %d\n",
2469 adev->ip_blocks[i].version->funcs->name, r);
2470 return r;
2471 }
2472
2473 adev->ip_blocks[i].status.hw = false;
2474 }
2475
2476 return 0;
2477}
2478
2479/**
2480 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2481 *
2482 * @adev: amdgpu_device pointer
2483 *
2484 * Main suspend function for hardware IPs. The list of all the hardware
2485 * IPs that make up the asic is walked, clockgating is disabled and the
2486 * suspend callbacks are run. suspend puts the hardware and software state
2487 * in each IP into a state suitable for suspend.
2488 * Returns 0 on success, negative error code on failure.
2489 */
2490static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
2491{
2492 int i, r;
2493
2494 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2495 if (!adev->ip_blocks[i].status.valid)
2496 continue;
2497 /* displays are handled in phase1 */
2498 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
2499 continue;
2500 /* PSP lost connection when err_event_athub occurs */
2501 if (amdgpu_ras_intr_triggered() &&
2502 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
2503 adev->ip_blocks[i].status.hw = false;
2504 continue;
2505 }
2506 /* XXX handle errors */
2507 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2508 /* XXX handle errors */
2509 if (r) {
2510 DRM_ERROR("suspend of IP block <%s> failed %d\n",
2511 adev->ip_blocks[i].version->funcs->name, r);
2512 }
2513 adev->ip_blocks[i].status.hw = false;
2514 /* handle putting the SMC in the appropriate state */
2515 if(!amdgpu_sriov_vf(adev)){
2516 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2517 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
2518 if (r) {
2519 DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
2520 adev->mp1_state, r);
2521 return r;
2522 }
2523 }
2524 }
2525 adev->ip_blocks[i].status.hw = false;
2526 }
2527
2528 return 0;
2529}
2530
2531/**
2532 * amdgpu_device_ip_suspend - run suspend for hardware IPs
2533 *
2534 * @adev: amdgpu_device pointer
2535 *
2536 * Main suspend function for hardware IPs. The list of all the hardware
2537 * IPs that make up the asic is walked, clockgating is disabled and the
2538 * suspend callbacks are run. suspend puts the hardware and software state
2539 * in each IP into a state suitable for suspend.
2540 * Returns 0 on success, negative error code on failure.
2541 */
2542int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
2543{
2544 int r;
2545
2546 if (amdgpu_sriov_vf(adev))
2547 amdgpu_virt_request_full_gpu(adev, false);
2548
2549 r = amdgpu_device_ip_suspend_phase1(adev);
2550 if (r)
2551 return r;
2552 r = amdgpu_device_ip_suspend_phase2(adev);
2553
2554 if (amdgpu_sriov_vf(adev))
2555 amdgpu_virt_release_full_gpu(adev, false);
2556
2557 return r;
2558}
2559
2560static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
2561{
2562 int i, r;
2563
2564 static enum amd_ip_block_type ip_order[] = {
2565 AMD_IP_BLOCK_TYPE_GMC,
2566 AMD_IP_BLOCK_TYPE_COMMON,
2567 AMD_IP_BLOCK_TYPE_PSP,
2568 AMD_IP_BLOCK_TYPE_IH,
2569 };
2570
2571 for (i = 0; i < adev->num_ip_blocks; i++)
2572 adev->ip_blocks[i].status.hw = false;
2573
2574 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
2575 int j;
2576 struct amdgpu_ip_block *block;
2577
2578 for (j = 0; j < adev->num_ip_blocks; j++) {
2579 block = &adev->ip_blocks[j];
2580
2581 if (block->version->type != ip_order[i] ||
2582 !block->status.valid)
2583 continue;
2584
2585 r = block->version->funcs->hw_init(adev);
2586 DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
2587 if (r)
2588 return r;
2589 block->status.hw = true;
2590 }
2591 }
2592
2593 return 0;
2594}
2595
2596static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
2597{
2598 int i, r;
2599
2600 static enum amd_ip_block_type ip_order[] = {
2601 AMD_IP_BLOCK_TYPE_SMC,
2602 AMD_IP_BLOCK_TYPE_DCE,
2603 AMD_IP_BLOCK_TYPE_GFX,
2604 AMD_IP_BLOCK_TYPE_SDMA,
2605 AMD_IP_BLOCK_TYPE_UVD,
2606 AMD_IP_BLOCK_TYPE_VCE,
2607 AMD_IP_BLOCK_TYPE_VCN
2608 };
2609
2610 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
2611 int j;
2612 struct amdgpu_ip_block *block;
2613
2614 for (j = 0; j < adev->num_ip_blocks; j++) {
2615 block = &adev->ip_blocks[j];
2616
2617 if (block->version->type != ip_order[i] ||
2618 !block->status.valid ||
2619 block->status.hw)
2620 continue;
2621
2622 if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
2623 r = block->version->funcs->resume(adev);
2624 else
2625 r = block->version->funcs->hw_init(adev);
2626
2627 DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
2628 if (r)
2629 return r;
2630 block->status.hw = true;
2631 }
2632 }
2633
2634 return 0;
2635}
2636
2637/**
2638 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
2639 *
2640 * @adev: amdgpu_device pointer
2641 *
2642 * First resume function for hardware IPs. The list of all the hardware
2643 * IPs that make up the asic is walked and the resume callbacks are run for
2644 * COMMON, GMC, and IH. resume puts the hardware into a functional state
2645 * after a suspend and updates the software state as necessary. This
2646 * function is also used for restoring the GPU after a GPU reset.
2647 * Returns 0 on success, negative error code on failure.
2648 */
2649static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
2650{
2651 int i, r;
2652
2653 for (i = 0; i < adev->num_ip_blocks; i++) {
2654 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
2655 continue;
2656 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2657 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2658 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2659
2660 r = adev->ip_blocks[i].version->funcs->resume(adev);
2661 if (r) {
2662 DRM_ERROR("resume of IP block <%s> failed %d\n",
2663 adev->ip_blocks[i].version->funcs->name, r);
2664 return r;
2665 }
2666 adev->ip_blocks[i].status.hw = true;
2667 }
2668 }
2669
2670 return 0;
2671}
2672
2673/**
2674 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
2675 *
2676 * @adev: amdgpu_device pointer
2677 *
2678 * First resume function for hardware IPs. The list of all the hardware
2679 * IPs that make up the asic is walked and the resume callbacks are run for
2680 * all blocks except COMMON, GMC, and IH. resume puts the hardware into a
2681 * functional state after a suspend and updates the software state as
2682 * necessary. This function is also used for restoring the GPU after a GPU
2683 * reset.
2684 * Returns 0 on success, negative error code on failure.
2685 */
2686static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
2687{
2688 int i, r;
2689
2690 for (i = 0; i < adev->num_ip_blocks; i++) {
2691 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
2692 continue;
2693 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2694 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2695 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
2696 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
2697 continue;
2698 r = adev->ip_blocks[i].version->funcs->resume(adev);
2699 if (r) {
2700 DRM_ERROR("resume of IP block <%s> failed %d\n",
2701 adev->ip_blocks[i].version->funcs->name, r);
2702 return r;
2703 }
2704 adev->ip_blocks[i].status.hw = true;
2705 }
2706
2707 return 0;
2708}
2709
2710/**
2711 * amdgpu_device_ip_resume - run resume for hardware IPs
2712 *
2713 * @adev: amdgpu_device pointer
2714 *
2715 * Main resume function for hardware IPs. The hardware IPs
2716 * are split into two resume functions because they are
2717 * are also used in in recovering from a GPU reset and some additional
2718 * steps need to be take between them. In this case (S3/S4) they are
2719 * run sequentially.
2720 * Returns 0 on success, negative error code on failure.
2721 */
2722static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
2723{
2724 int r;
2725
2726 r = amdgpu_device_ip_resume_phase1(adev);
2727 if (r)
2728 return r;
2729
2730 r = amdgpu_device_fw_loading(adev);
2731 if (r)
2732 return r;
2733
2734 r = amdgpu_device_ip_resume_phase2(adev);
2735
2736 return r;
2737}
2738
2739/**
2740 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
2741 *
2742 * @adev: amdgpu_device pointer
2743 *
2744 * Query the VBIOS data tables to determine if the board supports SR-IOV.
2745 */
2746static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
2747{
2748 if (amdgpu_sriov_vf(adev)) {
2749 if (adev->is_atom_fw) {
2750 if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
2751 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2752 } else {
2753 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
2754 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2755 }
2756
2757 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
2758 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
2759 }
2760}
2761
2762/**
2763 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
2764 *
2765 * @asic_type: AMD asic type
2766 *
2767 * Check if there is DC (new modesetting infrastructre) support for an asic.
2768 * returns true if DC has support, false if not.
2769 */
2770bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
2771{
2772 switch (asic_type) {
2773#if defined(CONFIG_DRM_AMD_DC)
2774 case CHIP_BONAIRE:
2775 case CHIP_KAVERI:
2776 case CHIP_KABINI:
2777 case CHIP_MULLINS:
2778 /*
2779 * We have systems in the wild with these ASICs that require
2780 * LVDS and VGA support which is not supported with DC.
2781 *
2782 * Fallback to the non-DC driver here by default so as not to
2783 * cause regressions.
2784 */
2785 return amdgpu_dc > 0;
2786 case CHIP_HAWAII:
2787 case CHIP_CARRIZO:
2788 case CHIP_STONEY:
2789 case CHIP_POLARIS10:
2790 case CHIP_POLARIS11:
2791 case CHIP_POLARIS12:
2792 case CHIP_VEGAM:
2793 case CHIP_TONGA:
2794 case CHIP_FIJI:
2795 case CHIP_VEGA10:
2796 case CHIP_VEGA12:
2797 case CHIP_VEGA20:
2798#if defined(CONFIG_DRM_AMD_DC_DCN)
2799 case CHIP_RAVEN:
2800 case CHIP_NAVI10:
2801 case CHIP_NAVI14:
2802 case CHIP_NAVI12:
2803 case CHIP_RENOIR:
2804#endif
2805#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
2806 case CHIP_SIENNA_CICHLID:
2807 case CHIP_NAVY_FLOUNDER:
2808#endif
2809 return amdgpu_dc != 0;
2810#endif
2811 default:
2812 if (amdgpu_dc > 0)
2813 DRM_INFO("Display Core has been requested via kernel parameter "
2814 "but isn't supported by ASIC, ignoring\n");
2815 return false;
2816 }
2817}
2818
2819/**
2820 * amdgpu_device_has_dc_support - check if dc is supported
2821 *
2822 * @adev: amdgpu_device_pointer
2823 *
2824 * Returns true for supported, false for not supported
2825 */
2826bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
2827{
2828 if (amdgpu_sriov_vf(adev))
2829 return false;
2830
2831 return amdgpu_device_asic_has_dc_support(adev->asic_type);
2832}
2833
2834
2835static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
2836{
2837 struct amdgpu_device *adev =
2838 container_of(__work, struct amdgpu_device, xgmi_reset_work);
2839 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0);
2840
2841 /* It's a bug to not have a hive within this function */
2842 if (WARN_ON(!hive))
2843 return;
2844
2845 /*
2846 * Use task barrier to synchronize all xgmi reset works across the
2847 * hive. task_barrier_enter and task_barrier_exit will block
2848 * until all the threads running the xgmi reset works reach
2849 * those points. task_barrier_full will do both blocks.
2850 */
2851 if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
2852
2853 task_barrier_enter(&hive->tb);
2854 adev->asic_reset_res = amdgpu_device_baco_enter(adev->ddev);
2855
2856 if (adev->asic_reset_res)
2857 goto fail;
2858
2859 task_barrier_exit(&hive->tb);
2860 adev->asic_reset_res = amdgpu_device_baco_exit(adev->ddev);
2861
2862 if (adev->asic_reset_res)
2863 goto fail;
2864
2865 if (adev->mmhub.funcs && adev->mmhub.funcs->reset_ras_error_count)
2866 adev->mmhub.funcs->reset_ras_error_count(adev);
2867 } else {
2868
2869 task_barrier_full(&hive->tb);
2870 adev->asic_reset_res = amdgpu_asic_reset(adev);
2871 }
2872
2873fail:
2874 if (adev->asic_reset_res)
2875 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
2876 adev->asic_reset_res, adev->ddev->unique);
2877}
2878
2879static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
2880{
2881 char *input = amdgpu_lockup_timeout;
2882 char *timeout_setting = NULL;
2883 int index = 0;
2884 long timeout;
2885 int ret = 0;
2886
2887 /*
2888 * By default timeout for non compute jobs is 10000.
2889 * And there is no timeout enforced on compute jobs.
2890 * In SR-IOV or passthrough mode, timeout for compute
2891 * jobs are 60000 by default.
2892 */
2893 adev->gfx_timeout = msecs_to_jiffies(10000);
2894 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
2895 if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
2896 adev->compute_timeout = msecs_to_jiffies(60000);
2897 else
2898 adev->compute_timeout = MAX_SCHEDULE_TIMEOUT;
2899
2900 if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
2901 while ((timeout_setting = strsep(&input, ",")) &&
2902 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
2903 ret = kstrtol(timeout_setting, 0, &timeout);
2904 if (ret)
2905 return ret;
2906
2907 if (timeout == 0) {
2908 index++;
2909 continue;
2910 } else if (timeout < 0) {
2911 timeout = MAX_SCHEDULE_TIMEOUT;
2912 } else {
2913 timeout = msecs_to_jiffies(timeout);
2914 }
2915
2916 switch (index++) {
2917 case 0:
2918 adev->gfx_timeout = timeout;
2919 break;
2920 case 1:
2921 adev->compute_timeout = timeout;
2922 break;
2923 case 2:
2924 adev->sdma_timeout = timeout;
2925 break;
2926 case 3:
2927 adev->video_timeout = timeout;
2928 break;
2929 default:
2930 break;
2931 }
2932 }
2933 /*
2934 * There is only one value specified and
2935 * it should apply to all non-compute jobs.
2936 */
2937 if (index == 1) {
2938 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
2939 if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
2940 adev->compute_timeout = adev->gfx_timeout;
2941 }
2942 }
2943
2944 return ret;
2945}
2946
2947static const struct attribute *amdgpu_dev_attributes[] = {
2948 &dev_attr_product_name.attr,
2949 &dev_attr_product_number.attr,
2950 &dev_attr_serial_number.attr,
2951 &dev_attr_pcie_replay_count.attr,
2952 NULL
2953};
2954
2955/**
2956 * amdgpu_device_init - initialize the driver
2957 *
2958 * @adev: amdgpu_device pointer
2959 * @ddev: drm dev pointer
2960 * @pdev: pci dev pointer
2961 * @flags: driver flags
2962 *
2963 * Initializes the driver info and hw (all asics).
2964 * Returns 0 for success or an error on failure.
2965 * Called at driver startup.
2966 */
2967int amdgpu_device_init(struct amdgpu_device *adev,
2968 struct drm_device *ddev,
2969 struct pci_dev *pdev,
2970 uint32_t flags)
2971{
2972 int r, i;
2973 bool boco = false;
2974 u32 max_MBps;
2975
2976 adev->shutdown = false;
2977 adev->dev = &pdev->dev;
2978 adev->ddev = ddev;
2979 adev->pdev = pdev;
2980 adev->flags = flags;
2981
2982 if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
2983 adev->asic_type = amdgpu_force_asic_type;
2984 else
2985 adev->asic_type = flags & AMD_ASIC_MASK;
2986
2987 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
2988 if (amdgpu_emu_mode == 1)
2989 adev->usec_timeout *= 10;
2990 adev->gmc.gart_size = 512 * 1024 * 1024;
2991 adev->accel_working = false;
2992 adev->num_rings = 0;
2993 adev->mman.buffer_funcs = NULL;
2994 adev->mman.buffer_funcs_ring = NULL;
2995 adev->vm_manager.vm_pte_funcs = NULL;
2996 adev->vm_manager.vm_pte_num_scheds = 0;
2997 adev->gmc.gmc_funcs = NULL;
2998 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2999 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3000
3001 adev->smc_rreg = &amdgpu_invalid_rreg;
3002 adev->smc_wreg = &amdgpu_invalid_wreg;
3003 adev->pcie_rreg = &amdgpu_invalid_rreg;
3004 adev->pcie_wreg = &amdgpu_invalid_wreg;
3005 adev->pciep_rreg = &amdgpu_invalid_rreg;
3006 adev->pciep_wreg = &amdgpu_invalid_wreg;
3007 adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3008 adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3009 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3010 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3011 adev->didt_rreg = &amdgpu_invalid_rreg;
3012 adev->didt_wreg = &amdgpu_invalid_wreg;
3013 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3014 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3015 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3016 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3017
3018 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3019 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3020 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3021
3022 /* mutex initialization are all done here so we
3023 * can recall function without having locking issues */
3024 atomic_set(&adev->irq.ih.lock, 0);
3025 mutex_init(&adev->firmware.mutex);
3026 mutex_init(&adev->pm.mutex);
3027 mutex_init(&adev->gfx.gpu_clock_mutex);
3028 mutex_init(&adev->srbm_mutex);
3029 mutex_init(&adev->gfx.pipe_reserve_mutex);
3030 mutex_init(&adev->gfx.gfx_off_mutex);
3031 mutex_init(&adev->grbm_idx_mutex);
3032 mutex_init(&adev->mn_lock);
3033 mutex_init(&adev->virt.vf_errors.lock);
3034 hash_init(adev->mn_hash);
3035 mutex_init(&adev->lock_reset);
3036 mutex_init(&adev->psp.mutex);
3037 mutex_init(&adev->notifier_lock);
3038
3039 r = amdgpu_device_check_arguments(adev);
3040 if (r)
3041 return r;
3042
3043 spin_lock_init(&adev->mmio_idx_lock);
3044 spin_lock_init(&adev->smc_idx_lock);
3045 spin_lock_init(&adev->pcie_idx_lock);
3046 spin_lock_init(&adev->uvd_ctx_idx_lock);
3047 spin_lock_init(&adev->didt_idx_lock);
3048 spin_lock_init(&adev->gc_cac_idx_lock);
3049 spin_lock_init(&adev->se_cac_idx_lock);
3050 spin_lock_init(&adev->audio_endpt_idx_lock);
3051 spin_lock_init(&adev->mm_stats.lock);
3052
3053 INIT_LIST_HEAD(&adev->shadow_list);
3054 mutex_init(&adev->shadow_list_lock);
3055
3056 INIT_DELAYED_WORK(&adev->delayed_init_work,
3057 amdgpu_device_delayed_init_work_handler);
3058 INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3059 amdgpu_device_delay_enable_gfx_off);
3060
3061 INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3062
3063 adev->gfx.gfx_off_req_count = 1;
3064 adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3065
3066 atomic_set(&adev->throttling_logging_enabled, 1);
3067 /*
3068 * If throttling continues, logging will be performed every minute
3069 * to avoid log flooding. "-1" is subtracted since the thermal
3070 * throttling interrupt comes every second. Thus, the total logging
3071 * interval is 59 seconds(retelimited printk interval) + 1(waiting
3072 * for throttling interrupt) = 60 seconds.
3073 */
3074 ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3075 ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3076
3077 /* Registers mapping */
3078 /* TODO: block userspace mapping of io register */
3079 if (adev->asic_type >= CHIP_BONAIRE) {
3080 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3081 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3082 } else {
3083 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3084 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3085 }
3086
3087 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3088 if (adev->rmmio == NULL) {
3089 return -ENOMEM;
3090 }
3091 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3092 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
3093
3094 /* io port mapping */
3095 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
3096 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
3097 adev->rio_mem_size = pci_resource_len(adev->pdev, i);
3098 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
3099 break;
3100 }
3101 }
3102 if (adev->rio_mem == NULL)
3103 DRM_INFO("PCI I/O BAR is not found.\n");
3104
3105 /* enable PCIE atomic ops */
3106 r = pci_enable_atomic_ops_to_root(adev->pdev,
3107 PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3108 PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3109 if (r) {
3110 adev->have_atomics_support = false;
3111 DRM_INFO("PCIE atomic ops is not supported\n");
3112 } else {
3113 adev->have_atomics_support = true;
3114 }
3115
3116 amdgpu_device_get_pcie_info(adev);
3117
3118 if (amdgpu_mcbp)
3119 DRM_INFO("MCBP is enabled\n");
3120
3121 if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10)
3122 adev->enable_mes = true;
3123
3124 /* detect hw virtualization here */
3125 amdgpu_detect_virtualization(adev);
3126
3127 r = amdgpu_device_get_job_timeout_settings(adev);
3128 if (r) {
3129 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3130 return r;
3131 }
3132
3133 /* early init functions */
3134 r = amdgpu_device_ip_early_init(adev);
3135 if (r)
3136 return r;
3137
3138 /* doorbell bar mapping and doorbell index init*/
3139 amdgpu_device_doorbell_init(adev);
3140
3141 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
3142 /* this will fail for cards that aren't VGA class devices, just
3143 * ignore it */
3144 vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
3145
3146 if (amdgpu_device_supports_boco(ddev))
3147 boco = true;
3148 if (amdgpu_has_atpx() &&
3149 (amdgpu_is_atpx_hybrid() ||
3150 amdgpu_has_atpx_dgpu_power_cntl()) &&
3151 !pci_is_thunderbolt_attached(adev->pdev))
3152 vga_switcheroo_register_client(adev->pdev,
3153 &amdgpu_switcheroo_ops, boco);
3154 if (boco)
3155 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
3156
3157 if (amdgpu_emu_mode == 1) {
3158 /* post the asic on emulation mode */
3159 emu_soc_asic_init(adev);
3160 goto fence_driver_init;
3161 }
3162
3163 /* detect if we are with an SRIOV vbios */
3164 amdgpu_device_detect_sriov_bios(adev);
3165
3166 /* check if we need to reset the asic
3167 * E.g., driver was not cleanly unloaded previously, etc.
3168 */
3169 if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3170 r = amdgpu_asic_reset(adev);
3171 if (r) {
3172 dev_err(adev->dev, "asic reset on init failed\n");
3173 goto failed;
3174 }
3175 }
3176
3177 /* Post card if necessary */
3178 if (amdgpu_device_need_post(adev)) {
3179 if (!adev->bios) {
3180 dev_err(adev->dev, "no vBIOS found\n");
3181 r = -EINVAL;
3182 goto failed;
3183 }
3184 DRM_INFO("GPU posting now...\n");
3185 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
3186 if (r) {
3187 dev_err(adev->dev, "gpu post error!\n");
3188 goto failed;
3189 }
3190 }
3191
3192 if (adev->is_atom_fw) {
3193 /* Initialize clocks */
3194 r = amdgpu_atomfirmware_get_clock_info(adev);
3195 if (r) {
3196 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3197 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3198 goto failed;
3199 }
3200 } else {
3201 /* Initialize clocks */
3202 r = amdgpu_atombios_get_clock_info(adev);
3203 if (r) {
3204 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3205 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3206 goto failed;
3207 }
3208 /* init i2c buses */
3209 if (!amdgpu_device_has_dc_support(adev))
3210 amdgpu_atombios_i2c_init(adev);
3211 }
3212
3213fence_driver_init:
3214 /* Fence driver */
3215 r = amdgpu_fence_driver_init(adev);
3216 if (r) {
3217 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
3218 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3219 goto failed;
3220 }
3221
3222 /* init the mode config */
3223 drm_mode_config_init(adev->ddev);
3224
3225 r = amdgpu_device_ip_init(adev);
3226 if (r) {
3227 /* failed in exclusive mode due to timeout */
3228 if (amdgpu_sriov_vf(adev) &&
3229 !amdgpu_sriov_runtime(adev) &&
3230 amdgpu_virt_mmio_blocked(adev) &&
3231 !amdgpu_virt_wait_reset(adev)) {
3232 dev_err(adev->dev, "VF exclusive mode timeout\n");
3233 /* Don't send request since VF is inactive. */
3234 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
3235 adev->virt.ops = NULL;
3236 r = -EAGAIN;
3237 goto failed;
3238 }
3239 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
3240 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3241 goto failed;
3242 }
3243
3244 dev_info(adev->dev,
3245 "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
3246 adev->gfx.config.max_shader_engines,
3247 adev->gfx.config.max_sh_per_se,
3248 adev->gfx.config.max_cu_per_sh,
3249 adev->gfx.cu_info.number);
3250
3251 adev->accel_working = true;
3252
3253 amdgpu_vm_check_compute_bug(adev);
3254
3255 /* Initialize the buffer migration limit. */
3256 if (amdgpu_moverate >= 0)
3257 max_MBps = amdgpu_moverate;
3258 else
3259 max_MBps = 8; /* Allow 8 MB/s. */
3260 /* Get a log2 for easy divisions. */
3261 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3262
3263 amdgpu_fbdev_init(adev);
3264
3265 r = amdgpu_pm_sysfs_init(adev);
3266 if (r) {
3267 adev->pm_sysfs_en = false;
3268 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
3269 } else
3270 adev->pm_sysfs_en = true;
3271
3272 r = amdgpu_ucode_sysfs_init(adev);
3273 if (r) {
3274 adev->ucode_sysfs_en = false;
3275 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
3276 } else
3277 adev->ucode_sysfs_en = true;
3278
3279 if ((amdgpu_testing & 1)) {
3280 if (adev->accel_working)
3281 amdgpu_test_moves(adev);
3282 else
3283 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
3284 }
3285 if (amdgpu_benchmarking) {
3286 if (adev->accel_working)
3287 amdgpu_benchmark(adev, amdgpu_benchmarking);
3288 else
3289 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
3290 }
3291
3292 /*
3293 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3294 * Otherwise the mgpu fan boost feature will be skipped due to the
3295 * gpu instance is counted less.
3296 */
3297 amdgpu_register_gpu_instance(adev);
3298
3299 /* enable clockgating, etc. after ib tests, etc. since some blocks require
3300 * explicit gating rather than handling it automatically.
3301 */
3302 r = amdgpu_device_ip_late_init(adev);
3303 if (r) {
3304 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
3305 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
3306 goto failed;
3307 }
3308
3309 /* must succeed. */
3310 amdgpu_ras_resume(adev);
3311
3312 queue_delayed_work(system_wq, &adev->delayed_init_work,
3313 msecs_to_jiffies(AMDGPU_RESUME_MS));
3314
3315 if (amdgpu_sriov_vf(adev))
3316 flush_delayed_work(&adev->delayed_init_work);
3317
3318 r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
3319 if (r) {
3320 dev_err(adev->dev, "Could not create amdgpu device attr\n");
3321 return r;
3322 }
3323
3324 if (IS_ENABLED(CONFIG_PERF_EVENTS))
3325 r = amdgpu_pmu_init(adev);
3326 if (r)
3327 dev_err(adev->dev, "amdgpu_pmu_init failed\n");
3328
3329 return 0;
3330
3331failed:
3332 amdgpu_vf_error_trans_all(adev);
3333 if (boco)
3334 vga_switcheroo_fini_domain_pm_ops(adev->dev);
3335
3336 return r;
3337}
3338
3339/**
3340 * amdgpu_device_fini - tear down the driver
3341 *
3342 * @adev: amdgpu_device pointer
3343 *
3344 * Tear down the driver info (all asics).
3345 * Called at driver shutdown.
3346 */
3347void amdgpu_device_fini(struct amdgpu_device *adev)
3348{
3349 int r;
3350
3351 DRM_INFO("amdgpu: finishing device.\n");
3352 flush_delayed_work(&adev->delayed_init_work);
3353 adev->shutdown = true;
3354
3355 /* make sure IB test finished before entering exclusive mode
3356 * to avoid preemption on IB test
3357 * */
3358 if (amdgpu_sriov_vf(adev))
3359 amdgpu_virt_request_full_gpu(adev, false);
3360
3361 /* disable all interrupts */
3362 amdgpu_irq_disable_all(adev);
3363 if (adev->mode_info.mode_config_initialized){
3364 if (!amdgpu_device_has_dc_support(adev))
3365 drm_helper_force_disable_all(adev->ddev);
3366 else
3367 drm_atomic_helper_shutdown(adev->ddev);
3368 }
3369 amdgpu_fence_driver_fini(adev);
3370 if (adev->pm_sysfs_en)
3371 amdgpu_pm_sysfs_fini(adev);
3372 amdgpu_fbdev_fini(adev);
3373 r = amdgpu_device_ip_fini(adev);
3374 release_firmware(adev->firmware.gpu_info_fw);
3375 adev->firmware.gpu_info_fw = NULL;
3376 adev->accel_working = false;
3377 /* free i2c buses */
3378 if (!amdgpu_device_has_dc_support(adev))
3379 amdgpu_i2c_fini(adev);
3380
3381 if (amdgpu_emu_mode != 1)
3382 amdgpu_atombios_fini(adev);
3383
3384 kfree(adev->bios);
3385 adev->bios = NULL;
3386 if (amdgpu_has_atpx() &&
3387 (amdgpu_is_atpx_hybrid() ||
3388 amdgpu_has_atpx_dgpu_power_cntl()) &&
3389 !pci_is_thunderbolt_attached(adev->pdev))
3390 vga_switcheroo_unregister_client(adev->pdev);
3391 if (amdgpu_device_supports_boco(adev->ddev))
3392 vga_switcheroo_fini_domain_pm_ops(adev->dev);
3393 vga_client_register(adev->pdev, NULL, NULL, NULL);
3394 if (adev->rio_mem)
3395 pci_iounmap(adev->pdev, adev->rio_mem);
3396 adev->rio_mem = NULL;
3397 iounmap(adev->rmmio);
3398 adev->rmmio = NULL;
3399 amdgpu_device_doorbell_fini(adev);
3400
3401 if (adev->ucode_sysfs_en)
3402 amdgpu_ucode_sysfs_fini(adev);
3403
3404 sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
3405 if (IS_ENABLED(CONFIG_PERF_EVENTS))
3406 amdgpu_pmu_fini(adev);
3407 if (adev->discovery_bin)
3408 amdgpu_discovery_fini(adev);
3409}
3410
3411
3412/*
3413 * Suspend & resume.
3414 */
3415/**
3416 * amdgpu_device_suspend - initiate device suspend
3417 *
3418 * @dev: drm dev pointer
3419 * @fbcon : notify the fbdev of suspend
3420 *
3421 * Puts the hw in the suspend state (all asics).
3422 * Returns 0 for success or an error on failure.
3423 * Called at driver suspend.
3424 */
3425int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
3426{
3427 struct amdgpu_device *adev;
3428 struct drm_crtc *crtc;
3429 struct drm_connector *connector;
3430 struct drm_connector_list_iter iter;
3431 int r;
3432
3433 if (dev == NULL || dev->dev_private == NULL) {
3434 return -ENODEV;
3435 }
3436
3437 adev = dev->dev_private;
3438
3439 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3440 return 0;
3441
3442 adev->in_suspend = true;
3443 drm_kms_helper_poll_disable(dev);
3444
3445 if (fbcon)
3446 amdgpu_fbdev_set_suspend(adev, 1);
3447
3448 cancel_delayed_work_sync(&adev->delayed_init_work);
3449
3450 if (!amdgpu_device_has_dc_support(adev)) {
3451 /* turn off display hw */
3452 drm_modeset_lock_all(dev);
3453 drm_connector_list_iter_begin(dev, &iter);
3454 drm_for_each_connector_iter(connector, &iter)
3455 drm_helper_connector_dpms(connector,
3456 DRM_MODE_DPMS_OFF);
3457 drm_connector_list_iter_end(&iter);
3458 drm_modeset_unlock_all(dev);
3459 /* unpin the front buffers and cursors */
3460 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3461 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
3462 struct drm_framebuffer *fb = crtc->primary->fb;
3463 struct amdgpu_bo *robj;
3464
3465 if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
3466 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
3467 r = amdgpu_bo_reserve(aobj, true);
3468 if (r == 0) {
3469 amdgpu_bo_unpin(aobj);
3470 amdgpu_bo_unreserve(aobj);
3471 }
3472 }
3473
3474 if (fb == NULL || fb->obj[0] == NULL) {
3475 continue;
3476 }
3477 robj = gem_to_amdgpu_bo(fb->obj[0]);
3478 /* don't unpin kernel fb objects */
3479 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
3480 r = amdgpu_bo_reserve(robj, true);
3481 if (r == 0) {
3482 amdgpu_bo_unpin(robj);
3483 amdgpu_bo_unreserve(robj);
3484 }
3485 }
3486 }
3487 }
3488
3489 amdgpu_ras_suspend(adev);
3490
3491 r = amdgpu_device_ip_suspend_phase1(adev);
3492
3493 amdgpu_amdkfd_suspend(adev, !fbcon);
3494
3495 /* evict vram memory */
3496 amdgpu_bo_evict_vram(adev);
3497
3498 amdgpu_fence_driver_suspend(adev);
3499
3500 r = amdgpu_device_ip_suspend_phase2(adev);
3501
3502 /* evict remaining vram memory
3503 * This second call to evict vram is to evict the gart page table
3504 * using the CPU.
3505 */
3506 amdgpu_bo_evict_vram(adev);
3507
3508 return 0;
3509}
3510
3511/**
3512 * amdgpu_device_resume - initiate device resume
3513 *
3514 * @dev: drm dev pointer
3515 * @fbcon : notify the fbdev of resume
3516 *
3517 * Bring the hw back to operating state (all asics).
3518 * Returns 0 for success or an error on failure.
3519 * Called at driver resume.
3520 */
3521int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
3522{
3523 struct drm_connector *connector;
3524 struct drm_connector_list_iter iter;
3525 struct amdgpu_device *adev = dev->dev_private;
3526 struct drm_crtc *crtc;
3527 int r = 0;
3528
3529 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3530 return 0;
3531
3532 /* post card */
3533 if (amdgpu_device_need_post(adev)) {
3534 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
3535 if (r)
3536 DRM_ERROR("amdgpu asic init failed\n");
3537 }
3538
3539 r = amdgpu_device_ip_resume(adev);
3540 if (r) {
3541 DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r);
3542 return r;
3543 }
3544 amdgpu_fence_driver_resume(adev);
3545
3546
3547 r = amdgpu_device_ip_late_init(adev);
3548 if (r)
3549 return r;
3550
3551 queue_delayed_work(system_wq, &adev->delayed_init_work,
3552 msecs_to_jiffies(AMDGPU_RESUME_MS));
3553
3554 if (!amdgpu_device_has_dc_support(adev)) {
3555 /* pin cursors */
3556 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3557 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
3558
3559 if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
3560 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
3561 r = amdgpu_bo_reserve(aobj, true);
3562 if (r == 0) {
3563 r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
3564 if (r != 0)
3565 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
3566 amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
3567 amdgpu_bo_unreserve(aobj);
3568 }
3569 }
3570 }
3571 }
3572 r = amdgpu_amdkfd_resume(adev, !fbcon);
3573 if (r)
3574 return r;
3575
3576 /* Make sure IB tests flushed */
3577 flush_delayed_work(&adev->delayed_init_work);
3578
3579 /* blat the mode back in */
3580 if (fbcon) {
3581 if (!amdgpu_device_has_dc_support(adev)) {
3582 /* pre DCE11 */
3583 drm_helper_resume_force_mode(dev);
3584
3585 /* turn on display hw */
3586 drm_modeset_lock_all(dev);
3587
3588 drm_connector_list_iter_begin(dev, &iter);
3589 drm_for_each_connector_iter(connector, &iter)
3590 drm_helper_connector_dpms(connector,
3591 DRM_MODE_DPMS_ON);
3592 drm_connector_list_iter_end(&iter);
3593
3594 drm_modeset_unlock_all(dev);
3595 }
3596 amdgpu_fbdev_set_suspend(adev, 0);
3597 }
3598
3599 drm_kms_helper_poll_enable(dev);
3600
3601 amdgpu_ras_resume(adev);
3602
3603 /*
3604 * Most of the connector probing functions try to acquire runtime pm
3605 * refs to ensure that the GPU is powered on when connector polling is
3606 * performed. Since we're calling this from a runtime PM callback,
3607 * trying to acquire rpm refs will cause us to deadlock.
3608 *
3609 * Since we're guaranteed to be holding the rpm lock, it's safe to
3610 * temporarily disable the rpm helpers so this doesn't deadlock us.
3611 */
3612#ifdef CONFIG_PM
3613 dev->dev->power.disable_depth++;
3614#endif
3615 if (!amdgpu_device_has_dc_support(adev))
3616 drm_helper_hpd_irq_event(dev);
3617 else
3618 drm_kms_helper_hotplug_event(dev);
3619#ifdef CONFIG_PM
3620 dev->dev->power.disable_depth--;
3621#endif
3622 adev->in_suspend = false;
3623
3624 return 0;
3625}
3626
3627/**
3628 * amdgpu_device_ip_check_soft_reset - did soft reset succeed
3629 *
3630 * @adev: amdgpu_device pointer
3631 *
3632 * The list of all the hardware IPs that make up the asic is walked and
3633 * the check_soft_reset callbacks are run. check_soft_reset determines
3634 * if the asic is still hung or not.
3635 * Returns true if any of the IPs are still in a hung state, false if not.
3636 */
3637static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
3638{
3639 int i;
3640 bool asic_hang = false;
3641
3642 if (amdgpu_sriov_vf(adev))
3643 return true;
3644
3645 if (amdgpu_asic_need_full_reset(adev))
3646 return true;
3647
3648 for (i = 0; i < adev->num_ip_blocks; i++) {
3649 if (!adev->ip_blocks[i].status.valid)
3650 continue;
3651 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
3652 adev->ip_blocks[i].status.hang =
3653 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
3654 if (adev->ip_blocks[i].status.hang) {
3655 DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
3656 asic_hang = true;
3657 }
3658 }
3659 return asic_hang;
3660}
3661
3662/**
3663 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
3664 *
3665 * @adev: amdgpu_device pointer
3666 *
3667 * The list of all the hardware IPs that make up the asic is walked and the
3668 * pre_soft_reset callbacks are run if the block is hung. pre_soft_reset
3669 * handles any IP specific hardware or software state changes that are
3670 * necessary for a soft reset to succeed.
3671 * Returns 0 on success, negative error code on failure.
3672 */
3673static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
3674{
3675 int i, r = 0;
3676
3677 for (i = 0; i < adev->num_ip_blocks; i++) {
3678 if (!adev->ip_blocks[i].status.valid)
3679 continue;
3680 if (adev->ip_blocks[i].status.hang &&
3681 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
3682 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
3683 if (r)
3684 return r;
3685 }
3686 }
3687
3688 return 0;
3689}
3690
3691/**
3692 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
3693 *
3694 * @adev: amdgpu_device pointer
3695 *
3696 * Some hardware IPs cannot be soft reset. If they are hung, a full gpu
3697 * reset is necessary to recover.
3698 * Returns true if a full asic reset is required, false if not.
3699 */
3700static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
3701{
3702 int i;
3703
3704 if (amdgpu_asic_need_full_reset(adev))
3705 return true;
3706
3707 for (i = 0; i < adev->num_ip_blocks; i++) {
3708 if (!adev->ip_blocks[i].status.valid)
3709 continue;
3710 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
3711 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
3712 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
3713 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
3714 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
3715 if (adev->ip_blocks[i].status.hang) {
3716 DRM_INFO("Some block need full reset!\n");
3717 return true;
3718 }
3719 }
3720 }
3721 return false;
3722}
3723
3724/**
3725 * amdgpu_device_ip_soft_reset - do a soft reset
3726 *
3727 * @adev: amdgpu_device pointer
3728 *
3729 * The list of all the hardware IPs that make up the asic is walked and the
3730 * soft_reset callbacks are run if the block is hung. soft_reset handles any
3731 * IP specific hardware or software state changes that are necessary to soft
3732 * reset the IP.
3733 * Returns 0 on success, negative error code on failure.
3734 */
3735static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
3736{
3737 int i, r = 0;
3738
3739 for (i = 0; i < adev->num_ip_blocks; i++) {
3740 if (!adev->ip_blocks[i].status.valid)
3741 continue;
3742 if (adev->ip_blocks[i].status.hang &&
3743 adev->ip_blocks[i].version->funcs->soft_reset) {
3744 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
3745 if (r)
3746 return r;
3747 }
3748 }
3749
3750 return 0;
3751}
3752
3753/**
3754 * amdgpu_device_ip_post_soft_reset - clean up from soft reset
3755 *
3756 * @adev: amdgpu_device pointer
3757 *
3758 * The list of all the hardware IPs that make up the asic is walked and the
3759 * post_soft_reset callbacks are run if the asic was hung. post_soft_reset
3760 * handles any IP specific hardware or software state changes that are
3761 * necessary after the IP has been soft reset.
3762 * Returns 0 on success, negative error code on failure.
3763 */
3764static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
3765{
3766 int i, r = 0;
3767
3768 for (i = 0; i < adev->num_ip_blocks; i++) {
3769 if (!adev->ip_blocks[i].status.valid)
3770 continue;
3771 if (adev->ip_blocks[i].status.hang &&
3772 adev->ip_blocks[i].version->funcs->post_soft_reset)
3773 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
3774 if (r)
3775 return r;
3776 }
3777
3778 return 0;
3779}
3780
3781/**
3782 * amdgpu_device_recover_vram - Recover some VRAM contents
3783 *
3784 * @adev: amdgpu_device pointer
3785 *
3786 * Restores the contents of VRAM buffers from the shadows in GTT. Used to
3787 * restore things like GPUVM page tables after a GPU reset where
3788 * the contents of VRAM might be lost.
3789 *
3790 * Returns:
3791 * 0 on success, negative error code on failure.
3792 */
3793static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
3794{
3795 struct dma_fence *fence = NULL, *next = NULL;
3796 struct amdgpu_bo *shadow;
3797 long r = 1, tmo;
3798
3799 if (amdgpu_sriov_runtime(adev))
3800 tmo = msecs_to_jiffies(8000);
3801 else
3802 tmo = msecs_to_jiffies(100);
3803
3804 DRM_INFO("recover vram bo from shadow start\n");
3805 mutex_lock(&adev->shadow_list_lock);
3806 list_for_each_entry(shadow, &adev->shadow_list, shadow_list) {
3807
3808 /* No need to recover an evicted BO */
3809 if (shadow->tbo.mem.mem_type != TTM_PL_TT ||
3810 shadow->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET ||
3811 shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM)
3812 continue;
3813
3814 r = amdgpu_bo_restore_shadow(shadow, &next);
3815 if (r)
3816 break;
3817
3818 if (fence) {
3819 tmo = dma_fence_wait_timeout(fence, false, tmo);
3820 dma_fence_put(fence);
3821 fence = next;
3822 if (tmo == 0) {
3823 r = -ETIMEDOUT;
3824 break;
3825 } else if (tmo < 0) {
3826 r = tmo;
3827 break;
3828 }
3829 } else {
3830 fence = next;
3831 }
3832 }
3833 mutex_unlock(&adev->shadow_list_lock);
3834
3835 if (fence)
3836 tmo = dma_fence_wait_timeout(fence, false, tmo);
3837 dma_fence_put(fence);
3838
3839 if (r < 0 || tmo <= 0) {
3840 DRM_ERROR("recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
3841 return -EIO;
3842 }
3843
3844 DRM_INFO("recover vram bo from shadow done\n");
3845 return 0;
3846}
3847
3848
3849/**
3850 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
3851 *
3852 * @adev: amdgpu device pointer
3853 * @from_hypervisor: request from hypervisor
3854 *
3855 * do VF FLR and reinitialize Asic
3856 * return 0 means succeeded otherwise failed
3857 */
3858static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
3859 bool from_hypervisor)
3860{
3861 int r;
3862
3863 if (from_hypervisor)
3864 r = amdgpu_virt_request_full_gpu(adev, true);
3865 else
3866 r = amdgpu_virt_reset_gpu(adev);
3867 if (r)
3868 return r;
3869
3870 amdgpu_amdkfd_pre_reset(adev);
3871
3872 /* Resume IP prior to SMC */
3873 r = amdgpu_device_ip_reinit_early_sriov(adev);
3874 if (r)
3875 goto error;
3876
3877 amdgpu_virt_init_data_exchange(adev);
3878 /* we need recover gart prior to run SMC/CP/SDMA resume */
3879 amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]);
3880
3881 r = amdgpu_device_fw_loading(adev);
3882 if (r)
3883 return r;
3884
3885 /* now we are okay to resume SMC/CP/SDMA */
3886 r = amdgpu_device_ip_reinit_late_sriov(adev);
3887 if (r)
3888 goto error;
3889
3890 amdgpu_irq_gpu_reset_resume_helper(adev);
3891 r = amdgpu_ib_ring_tests(adev);
3892 amdgpu_amdkfd_post_reset(adev);
3893
3894error:
3895 amdgpu_virt_release_full_gpu(adev, true);
3896 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
3897 amdgpu_inc_vram_lost(adev);
3898 r = amdgpu_device_recover_vram(adev);
3899 }
3900
3901 return r;
3902}
3903
3904/**
3905 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
3906 *
3907 * @adev: amdgpu device pointer
3908 *
3909 * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
3910 * a hung GPU.
3911 */
3912bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
3913{
3914 if (!amdgpu_device_ip_check_soft_reset(adev)) {
3915 DRM_INFO("Timeout, but no hardware hang detected.\n");
3916 return false;
3917 }
3918
3919 if (amdgpu_gpu_recovery == 0)
3920 goto disabled;
3921
3922 if (amdgpu_sriov_vf(adev))
3923 return true;
3924
3925 if (amdgpu_gpu_recovery == -1) {
3926 switch (adev->asic_type) {
3927 case CHIP_BONAIRE:
3928 case CHIP_HAWAII:
3929 case CHIP_TOPAZ:
3930 case CHIP_TONGA:
3931 case CHIP_FIJI:
3932 case CHIP_POLARIS10:
3933 case CHIP_POLARIS11:
3934 case CHIP_POLARIS12:
3935 case CHIP_VEGAM:
3936 case CHIP_VEGA20:
3937 case CHIP_VEGA10:
3938 case CHIP_VEGA12:
3939 case CHIP_RAVEN:
3940 case CHIP_ARCTURUS:
3941 case CHIP_RENOIR:
3942 case CHIP_NAVI10:
3943 case CHIP_NAVI14:
3944 case CHIP_NAVI12:
3945 case CHIP_SIENNA_CICHLID:
3946 break;
3947 default:
3948 goto disabled;
3949 }
3950 }
3951
3952 return true;
3953
3954disabled:
3955 DRM_INFO("GPU recovery disabled.\n");
3956 return false;
3957}
3958
3959
3960static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
3961 struct amdgpu_job *job,
3962 bool *need_full_reset_arg)
3963{
3964 int i, r = 0;
3965 bool need_full_reset = *need_full_reset_arg;
3966
3967 amdgpu_debugfs_wait_dump(adev);
3968
3969 /* block all schedulers and reset given job's ring */
3970 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
3971 struct amdgpu_ring *ring = adev->rings[i];
3972
3973 if (!ring || !ring->sched.thread)
3974 continue;
3975
3976 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
3977 amdgpu_fence_driver_force_completion(ring);
3978 }
3979
3980 if(job)
3981 drm_sched_increase_karma(&job->base);
3982
3983 /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
3984 if (!amdgpu_sriov_vf(adev)) {
3985
3986 if (!need_full_reset)
3987 need_full_reset = amdgpu_device_ip_need_full_reset(adev);
3988
3989 if (!need_full_reset) {
3990 amdgpu_device_ip_pre_soft_reset(adev);
3991 r = amdgpu_device_ip_soft_reset(adev);
3992 amdgpu_device_ip_post_soft_reset(adev);
3993 if (r || amdgpu_device_ip_check_soft_reset(adev)) {
3994 DRM_INFO("soft reset failed, will fallback to full reset!\n");
3995 need_full_reset = true;
3996 }
3997 }
3998
3999 if (need_full_reset)
4000 r = amdgpu_device_ip_suspend(adev);
4001
4002 *need_full_reset_arg = need_full_reset;
4003 }
4004
4005 return r;
4006}
4007
4008static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
4009 struct list_head *device_list_handle,
4010 bool *need_full_reset_arg)
4011{
4012 struct amdgpu_device *tmp_adev = NULL;
4013 bool need_full_reset = *need_full_reset_arg, vram_lost = false;
4014 int r = 0;
4015
4016 /*
4017 * ASIC reset has to be done on all HGMI hive nodes ASAP
4018 * to allow proper links negotiation in FW (within 1 sec)
4019 */
4020 if (need_full_reset) {
4021 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4022 /* For XGMI run all resets in parallel to speed up the process */
4023 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4024 if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
4025 r = -EALREADY;
4026 } else
4027 r = amdgpu_asic_reset(tmp_adev);
4028
4029 if (r) {
4030 DRM_ERROR("ASIC reset failed with error, %d for drm dev, %s",
4031 r, tmp_adev->ddev->unique);
4032 break;
4033 }
4034 }
4035
4036 /* For XGMI wait for all resets to complete before proceed */
4037 if (!r) {
4038 list_for_each_entry(tmp_adev, device_list_handle,
4039 gmc.xgmi.head) {
4040 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4041 flush_work(&tmp_adev->xgmi_reset_work);
4042 r = tmp_adev->asic_reset_res;
4043 if (r)
4044 break;
4045 }
4046 }
4047 }
4048 }
4049
4050 if (!r && amdgpu_ras_intr_triggered()) {
4051 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4052 if (tmp_adev->mmhub.funcs &&
4053 tmp_adev->mmhub.funcs->reset_ras_error_count)
4054 tmp_adev->mmhub.funcs->reset_ras_error_count(tmp_adev);
4055 }
4056
4057 amdgpu_ras_intr_cleared();
4058 }
4059
4060 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4061 if (need_full_reset) {
4062 /* post card */
4063 if (amdgpu_atom_asic_init(tmp_adev->mode_info.atom_context))
4064 DRM_WARN("asic atom init failed!");
4065
4066 if (!r) {
4067 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
4068 r = amdgpu_device_ip_resume_phase1(tmp_adev);
4069 if (r)
4070 goto out;
4071
4072 vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
4073 if (vram_lost) {
4074 DRM_INFO("VRAM is lost due to GPU reset!\n");
4075 amdgpu_inc_vram_lost(tmp_adev);
4076 }
4077
4078 r = amdgpu_gtt_mgr_recover(
4079 &tmp_adev->mman.bdev.man[TTM_PL_TT]);
4080 if (r)
4081 goto out;
4082
4083 r = amdgpu_device_fw_loading(tmp_adev);
4084 if (r)
4085 return r;
4086
4087 r = amdgpu_device_ip_resume_phase2(tmp_adev);
4088 if (r)
4089 goto out;
4090
4091 if (vram_lost)
4092 amdgpu_device_fill_reset_magic(tmp_adev);
4093
4094 /*
4095 * Add this ASIC as tracked as reset was already
4096 * complete successfully.
4097 */
4098 amdgpu_register_gpu_instance(tmp_adev);
4099
4100 r = amdgpu_device_ip_late_init(tmp_adev);
4101 if (r)
4102 goto out;
4103
4104 amdgpu_fbdev_set_suspend(tmp_adev, 0);
4105
4106 /* must succeed. */
4107 amdgpu_ras_resume(tmp_adev);
4108
4109 /* Update PSP FW topology after reset */
4110 if (hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4111 r = amdgpu_xgmi_update_topology(hive, tmp_adev);
4112 }
4113 }
4114
4115
4116out:
4117 if (!r) {
4118 amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
4119 r = amdgpu_ib_ring_tests(tmp_adev);
4120 if (r) {
4121 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
4122 r = amdgpu_device_ip_suspend(tmp_adev);
4123 need_full_reset = true;
4124 r = -EAGAIN;
4125 goto end;
4126 }
4127 }
4128
4129 if (!r)
4130 r = amdgpu_device_recover_vram(tmp_adev);
4131 else
4132 tmp_adev->asic_reset_res = r;
4133 }
4134
4135end:
4136 *need_full_reset_arg = need_full_reset;
4137 return r;
4138}
4139
4140static bool amdgpu_device_lock_adev(struct amdgpu_device *adev, bool trylock)
4141{
4142 if (trylock) {
4143 if (!mutex_trylock(&adev->lock_reset))
4144 return false;
4145 } else
4146 mutex_lock(&adev->lock_reset);
4147
4148 atomic_inc(&adev->gpu_reset_counter);
4149 adev->in_gpu_reset = true;
4150 switch (amdgpu_asic_reset_method(adev)) {
4151 case AMD_RESET_METHOD_MODE1:
4152 adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
4153 break;
4154 case AMD_RESET_METHOD_MODE2:
4155 adev->mp1_state = PP_MP1_STATE_RESET;
4156 break;
4157 default:
4158 adev->mp1_state = PP_MP1_STATE_NONE;
4159 break;
4160 }
4161
4162 return true;
4163}
4164
4165static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
4166{
4167 amdgpu_vf_error_trans_all(adev);
4168 adev->mp1_state = PP_MP1_STATE_NONE;
4169 adev->in_gpu_reset = false;
4170 mutex_unlock(&adev->lock_reset);
4171}
4172
4173static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
4174{
4175 struct pci_dev *p = NULL;
4176
4177 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4178 adev->pdev->bus->number, 1);
4179 if (p) {
4180 pm_runtime_enable(&(p->dev));
4181 pm_runtime_resume(&(p->dev));
4182 }
4183}
4184
4185static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
4186{
4187 enum amd_reset_method reset_method;
4188 struct pci_dev *p = NULL;
4189 u64 expires;
4190
4191 /*
4192 * For now, only BACO and mode1 reset are confirmed
4193 * to suffer the audio issue without proper suspended.
4194 */
4195 reset_method = amdgpu_asic_reset_method(adev);
4196 if ((reset_method != AMD_RESET_METHOD_BACO) &&
4197 (reset_method != AMD_RESET_METHOD_MODE1))
4198 return -EINVAL;
4199
4200 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4201 adev->pdev->bus->number, 1);
4202 if (!p)
4203 return -ENODEV;
4204
4205 expires = pm_runtime_autosuspend_expiration(&(p->dev));
4206 if (!expires)
4207 /*
4208 * If we cannot get the audio device autosuspend delay,
4209 * a fixed 4S interval will be used. Considering 3S is
4210 * the audio controller default autosuspend delay setting.
4211 * 4S used here is guaranteed to cover that.
4212 */
4213 expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
4214
4215 while (!pm_runtime_status_suspended(&(p->dev))) {
4216 if (!pm_runtime_suspend(&(p->dev)))
4217 break;
4218
4219 if (expires < ktime_get_mono_fast_ns()) {
4220 dev_warn(adev->dev, "failed to suspend display audio\n");
4221 /* TODO: abort the succeeding gpu reset? */
4222 return -ETIMEDOUT;
4223 }
4224 }
4225
4226 pm_runtime_disable(&(p->dev));
4227
4228 return 0;
4229}
4230
4231/**
4232 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
4233 *
4234 * @adev: amdgpu device pointer
4235 * @job: which job trigger hang
4236 *
4237 * Attempt to reset the GPU if it has hung (all asics).
4238 * Attempt to do soft-reset or full-reset and reinitialize Asic
4239 * Returns 0 for success or an error on failure.
4240 */
4241
4242int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
4243 struct amdgpu_job *job)
4244{
4245 struct list_head device_list, *device_list_handle = NULL;
4246 bool need_full_reset = false;
4247 bool job_signaled = false;
4248 struct amdgpu_hive_info *hive = NULL;
4249 struct amdgpu_device *tmp_adev = NULL;
4250 int i, r = 0;
4251 bool need_emergency_restart = false;
4252 bool audio_suspended = false;
4253
4254 /**
4255 * Special case: RAS triggered and full reset isn't supported
4256 */
4257 need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
4258
4259 /*
4260 * Flush RAM to disk so that after reboot
4261 * the user can read log and see why the system rebooted.
4262 */
4263 if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
4264 DRM_WARN("Emergency reboot.");
4265
4266 ksys_sync_helper();
4267 emergency_restart();
4268 }
4269
4270 dev_info(adev->dev, "GPU %s begin!\n",
4271 need_emergency_restart ? "jobs stop":"reset");
4272
4273 /*
4274 * Here we trylock to avoid chain of resets executing from
4275 * either trigger by jobs on different adevs in XGMI hive or jobs on
4276 * different schedulers for same device while this TO handler is running.
4277 * We always reset all schedulers for device and all devices for XGMI
4278 * hive so that should take care of them too.
4279 */
4280 hive = amdgpu_get_xgmi_hive(adev, true);
4281 if (hive && !mutex_trylock(&hive->reset_lock)) {
4282 DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
4283 job ? job->base.id : -1, hive->hive_id);
4284 mutex_unlock(&hive->hive_lock);
4285 return 0;
4286 }
4287
4288 /*
4289 * Build list of devices to reset.
4290 * In case we are in XGMI hive mode, resort the device list
4291 * to put adev in the 1st position.
4292 */
4293 INIT_LIST_HEAD(&device_list);
4294 if (adev->gmc.xgmi.num_physical_nodes > 1) {
4295 if (!hive)
4296 return -ENODEV;
4297 if (!list_is_first(&adev->gmc.xgmi.head, &hive->device_list))
4298 list_rotate_to_front(&adev->gmc.xgmi.head, &hive->device_list);
4299 device_list_handle = &hive->device_list;
4300 } else {
4301 list_add_tail(&adev->gmc.xgmi.head, &device_list);
4302 device_list_handle = &device_list;
4303 }
4304
4305 /* block all schedulers and reset given job's ring */
4306 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4307 if (!amdgpu_device_lock_adev(tmp_adev, !hive)) {
4308 DRM_INFO("Bailing on TDR for s_job:%llx, as another already in progress",
4309 job ? job->base.id : -1);
4310 mutex_unlock(&hive->hive_lock);
4311 return 0;
4312 }
4313
4314 /*
4315 * Try to put the audio codec into suspend state
4316 * before gpu reset started.
4317 *
4318 * Due to the power domain of the graphics device
4319 * is shared with AZ power domain. Without this,
4320 * we may change the audio hardware from behind
4321 * the audio driver's back. That will trigger
4322 * some audio codec errors.
4323 */
4324 if (!amdgpu_device_suspend_display_audio(tmp_adev))
4325 audio_suspended = true;
4326
4327 amdgpu_ras_set_error_query_ready(tmp_adev, false);
4328
4329 cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
4330
4331 if (!amdgpu_sriov_vf(tmp_adev))
4332 amdgpu_amdkfd_pre_reset(tmp_adev);
4333
4334 /*
4335 * Mark these ASICs to be reseted as untracked first
4336 * And add them back after reset completed
4337 */
4338 amdgpu_unregister_gpu_instance(tmp_adev);
4339
4340 amdgpu_fbdev_set_suspend(tmp_adev, 1);
4341
4342 /* disable ras on ALL IPs */
4343 if (!need_emergency_restart &&
4344 amdgpu_device_ip_need_full_reset(tmp_adev))
4345 amdgpu_ras_suspend(tmp_adev);
4346
4347 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4348 struct amdgpu_ring *ring = tmp_adev->rings[i];
4349
4350 if (!ring || !ring->sched.thread)
4351 continue;
4352
4353 drm_sched_stop(&ring->sched, job ? &job->base : NULL);
4354
4355 if (need_emergency_restart)
4356 amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
4357 }
4358 }
4359
4360 if (need_emergency_restart)
4361 goto skip_sched_resume;
4362
4363 /*
4364 * Must check guilty signal here since after this point all old
4365 * HW fences are force signaled.
4366 *
4367 * job->base holds a reference to parent fence
4368 */
4369 if (job && job->base.s_fence->parent &&
4370 dma_fence_is_signaled(job->base.s_fence->parent)) {
4371 job_signaled = true;
4372 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
4373 goto skip_hw_reset;
4374 }
4375
4376retry: /* Rest of adevs pre asic reset from XGMI hive. */
4377 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4378 r = amdgpu_device_pre_asic_reset(tmp_adev,
4379 NULL,
4380 &need_full_reset);
4381 /*TODO Should we stop ?*/
4382 if (r) {
4383 DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
4384 r, tmp_adev->ddev->unique);
4385 tmp_adev->asic_reset_res = r;
4386 }
4387 }
4388
4389 /* Actual ASIC resets if needed.*/
4390 /* TODO Implement XGMI hive reset logic for SRIOV */
4391 if (amdgpu_sriov_vf(adev)) {
4392 r = amdgpu_device_reset_sriov(adev, job ? false : true);
4393 if (r)
4394 adev->asic_reset_res = r;
4395 } else {
4396 r = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset);
4397 if (r && r == -EAGAIN)
4398 goto retry;
4399 }
4400
4401skip_hw_reset:
4402
4403 /* Post ASIC reset for all devs .*/
4404 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4405
4406 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4407 struct amdgpu_ring *ring = tmp_adev->rings[i];
4408
4409 if (!ring || !ring->sched.thread)
4410 continue;
4411
4412 /* No point to resubmit jobs if we didn't HW reset*/
4413 if (!tmp_adev->asic_reset_res && !job_signaled)
4414 drm_sched_resubmit_jobs(&ring->sched);
4415
4416 drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
4417 }
4418
4419 if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) {
4420 drm_helper_resume_force_mode(tmp_adev->ddev);
4421 }
4422
4423 tmp_adev->asic_reset_res = 0;
4424
4425 if (r) {
4426 /* bad news, how to tell it to userspace ? */
4427 dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
4428 amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
4429 } else {
4430 dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
4431 }
4432 }
4433
4434skip_sched_resume:
4435 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4436 /*unlock kfd: SRIOV would do it separately */
4437 if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
4438 amdgpu_amdkfd_post_reset(tmp_adev);
4439 if (audio_suspended)
4440 amdgpu_device_resume_display_audio(tmp_adev);
4441 amdgpu_device_unlock_adev(tmp_adev);
4442 }
4443
4444 if (hive) {
4445 mutex_unlock(&hive->reset_lock);
4446 mutex_unlock(&hive->hive_lock);
4447 }
4448
4449 if (r)
4450 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
4451 return r;
4452}
4453
4454/**
4455 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
4456 *
4457 * @adev: amdgpu_device pointer
4458 *
4459 * Fetchs and stores in the driver the PCIE capabilities (gen speed
4460 * and lanes) of the slot the device is in. Handles APUs and
4461 * virtualized environments where PCIE config space may not be available.
4462 */
4463static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
4464{
4465 struct pci_dev *pdev;
4466 enum pci_bus_speed speed_cap, platform_speed_cap;
4467 enum pcie_link_width platform_link_width;
4468
4469 if (amdgpu_pcie_gen_cap)
4470 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
4471
4472 if (amdgpu_pcie_lane_cap)
4473 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
4474
4475 /* covers APUs as well */
4476 if (pci_is_root_bus(adev->pdev->bus)) {
4477 if (adev->pm.pcie_gen_mask == 0)
4478 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
4479 if (adev->pm.pcie_mlw_mask == 0)
4480 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
4481 return;
4482 }
4483
4484 if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
4485 return;
4486
4487 pcie_bandwidth_available(adev->pdev, NULL,
4488 &platform_speed_cap, &platform_link_width);
4489
4490 if (adev->pm.pcie_gen_mask == 0) {
4491 /* asic caps */
4492 pdev = adev->pdev;
4493 speed_cap = pcie_get_speed_cap(pdev);
4494 if (speed_cap == PCI_SPEED_UNKNOWN) {
4495 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4496 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4497 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
4498 } else {
4499 if (speed_cap == PCIE_SPEED_16_0GT)
4500 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4501 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4502 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
4503 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
4504 else if (speed_cap == PCIE_SPEED_8_0GT)
4505 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4506 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4507 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
4508 else if (speed_cap == PCIE_SPEED_5_0GT)
4509 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4510 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
4511 else
4512 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
4513 }
4514 /* platform caps */
4515 if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
4516 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4517 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
4518 } else {
4519 if (platform_speed_cap == PCIE_SPEED_16_0GT)
4520 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4521 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4522 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
4523 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
4524 else if (platform_speed_cap == PCIE_SPEED_8_0GT)
4525 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4526 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4527 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
4528 else if (platform_speed_cap == PCIE_SPEED_5_0GT)
4529 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4530 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
4531 else
4532 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
4533
4534 }
4535 }
4536 if (adev->pm.pcie_mlw_mask == 0) {
4537 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
4538 adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
4539 } else {
4540 switch (platform_link_width) {
4541 case PCIE_LNK_X32:
4542 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
4543 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
4544 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
4545 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4546 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4547 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4548 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4549 break;
4550 case PCIE_LNK_X16:
4551 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
4552 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
4553 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4554 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4555 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4556 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4557 break;
4558 case PCIE_LNK_X12:
4559 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
4560 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4561 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4562 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4563 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4564 break;
4565 case PCIE_LNK_X8:
4566 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4567 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4568 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4569 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4570 break;
4571 case PCIE_LNK_X4:
4572 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4573 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4574 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4575 break;
4576 case PCIE_LNK_X2:
4577 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4578 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4579 break;
4580 case PCIE_LNK_X1:
4581 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
4582 break;
4583 default:
4584 break;
4585 }
4586 }
4587 }
4588}
4589
4590int amdgpu_device_baco_enter(struct drm_device *dev)
4591{
4592 struct amdgpu_device *adev = dev->dev_private;
4593 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
4594
4595 if (!amdgpu_device_supports_baco(adev->ddev))
4596 return -ENOTSUPP;
4597
4598 if (ras && ras->supported)
4599 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
4600
4601 return amdgpu_dpm_baco_enter(adev);
4602}
4603
4604int amdgpu_device_baco_exit(struct drm_device *dev)
4605{
4606 struct amdgpu_device *adev = dev->dev_private;
4607 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
4608 int ret = 0;
4609
4610 if (!amdgpu_device_supports_baco(adev->ddev))
4611 return -ENOTSUPP;
4612
4613 ret = amdgpu_dpm_baco_exit(adev);
4614 if (ret)
4615 return ret;
4616
4617 if (ras && ras->supported)
4618 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
4619
4620 return 0;
4621}