Loading...
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/power_supply.h>
29#include <linux/kthread.h>
30#include <linux/module.h>
31#include <linux/console.h>
32#include <linux/slab.h>
33
34#include <drm/drm_atomic_helper.h>
35#include <drm/drm_probe_helper.h>
36#include <drm/amdgpu_drm.h>
37#include <linux/vgaarb.h>
38#include <linux/vga_switcheroo.h>
39#include <linux/efi.h>
40#include "amdgpu.h"
41#include "amdgpu_trace.h"
42#include "amdgpu_i2c.h"
43#include "atom.h"
44#include "amdgpu_atombios.h"
45#include "amdgpu_atomfirmware.h"
46#include "amd_pcie.h"
47#ifdef CONFIG_DRM_AMDGPU_SI
48#include "si.h"
49#endif
50#ifdef CONFIG_DRM_AMDGPU_CIK
51#include "cik.h"
52#endif
53#include "vi.h"
54#include "soc15.h"
55#include "nv.h"
56#include "bif/bif_4_1_d.h"
57#include <linux/pci.h>
58#include <linux/firmware.h>
59#include "amdgpu_vf_error.h"
60
61#include "amdgpu_amdkfd.h"
62#include "amdgpu_pm.h"
63
64#include "amdgpu_xgmi.h"
65#include "amdgpu_ras.h"
66#include "amdgpu_pmu.h"
67#include "amdgpu_fru_eeprom.h"
68
69#include <linux/suspend.h>
70#include <drm/task_barrier.h>
71#include <linux/pm_runtime.h>
72
73MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
74MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
75MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
76MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
77MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
78MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
79MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin");
80MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
81MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
82MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
83
84#define AMDGPU_RESUME_MS 2000
85
86const char *amdgpu_asic_name[] = {
87 "TAHITI",
88 "PITCAIRN",
89 "VERDE",
90 "OLAND",
91 "HAINAN",
92 "BONAIRE",
93 "KAVERI",
94 "KABINI",
95 "HAWAII",
96 "MULLINS",
97 "TOPAZ",
98 "TONGA",
99 "FIJI",
100 "CARRIZO",
101 "STONEY",
102 "POLARIS10",
103 "POLARIS11",
104 "POLARIS12",
105 "VEGAM",
106 "VEGA10",
107 "VEGA12",
108 "VEGA20",
109 "RAVEN",
110 "ARCTURUS",
111 "RENOIR",
112 "NAVI10",
113 "NAVI14",
114 "NAVI12",
115 "SIENNA_CICHLID",
116 "NAVY_FLOUNDER",
117 "LAST",
118};
119
120/**
121 * DOC: pcie_replay_count
122 *
123 * The amdgpu driver provides a sysfs API for reporting the total number
124 * of PCIe replays (NAKs)
125 * The file pcie_replay_count is used for this and returns the total
126 * number of replays as a sum of the NAKs generated and NAKs received
127 */
128
129static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
130 struct device_attribute *attr, char *buf)
131{
132 struct drm_device *ddev = dev_get_drvdata(dev);
133 struct amdgpu_device *adev = ddev->dev_private;
134 uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
135
136 return snprintf(buf, PAGE_SIZE, "%llu\n", cnt);
137}
138
139static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
140 amdgpu_device_get_pcie_replay_count, NULL);
141
142static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
143
144/**
145 * DOC: product_name
146 *
147 * The amdgpu driver provides a sysfs API for reporting the product name
148 * for the device
149 * The file serial_number is used for this and returns the product name
150 * as returned from the FRU.
151 * NOTE: This is only available for certain server cards
152 */
153
154static ssize_t amdgpu_device_get_product_name(struct device *dev,
155 struct device_attribute *attr, char *buf)
156{
157 struct drm_device *ddev = dev_get_drvdata(dev);
158 struct amdgpu_device *adev = ddev->dev_private;
159
160 return snprintf(buf, PAGE_SIZE, "%s\n", adev->product_name);
161}
162
163static DEVICE_ATTR(product_name, S_IRUGO,
164 amdgpu_device_get_product_name, NULL);
165
166/**
167 * DOC: product_number
168 *
169 * The amdgpu driver provides a sysfs API for reporting the part number
170 * for the device
171 * The file serial_number is used for this and returns the part number
172 * as returned from the FRU.
173 * NOTE: This is only available for certain server cards
174 */
175
176static ssize_t amdgpu_device_get_product_number(struct device *dev,
177 struct device_attribute *attr, char *buf)
178{
179 struct drm_device *ddev = dev_get_drvdata(dev);
180 struct amdgpu_device *adev = ddev->dev_private;
181
182 return snprintf(buf, PAGE_SIZE, "%s\n", adev->product_number);
183}
184
185static DEVICE_ATTR(product_number, S_IRUGO,
186 amdgpu_device_get_product_number, NULL);
187
188/**
189 * DOC: serial_number
190 *
191 * The amdgpu driver provides a sysfs API for reporting the serial number
192 * for the device
193 * The file serial_number is used for this and returns the serial number
194 * as returned from the FRU.
195 * NOTE: This is only available for certain server cards
196 */
197
198static ssize_t amdgpu_device_get_serial_number(struct device *dev,
199 struct device_attribute *attr, char *buf)
200{
201 struct drm_device *ddev = dev_get_drvdata(dev);
202 struct amdgpu_device *adev = ddev->dev_private;
203
204 return snprintf(buf, PAGE_SIZE, "%s\n", adev->serial);
205}
206
207static DEVICE_ATTR(serial_number, S_IRUGO,
208 amdgpu_device_get_serial_number, NULL);
209
210/**
211 * amdgpu_device_supports_boco - Is the device a dGPU with HG/PX power control
212 *
213 * @dev: drm_device pointer
214 *
215 * Returns true if the device is a dGPU with HG/PX power control,
216 * otherwise return false.
217 */
218bool amdgpu_device_supports_boco(struct drm_device *dev)
219{
220 struct amdgpu_device *adev = dev->dev_private;
221
222 if (adev->flags & AMD_IS_PX)
223 return true;
224 return false;
225}
226
227/**
228 * amdgpu_device_supports_baco - Does the device support BACO
229 *
230 * @dev: drm_device pointer
231 *
232 * Returns true if the device supporte BACO,
233 * otherwise return false.
234 */
235bool amdgpu_device_supports_baco(struct drm_device *dev)
236{
237 struct amdgpu_device *adev = dev->dev_private;
238
239 return amdgpu_asic_supports_baco(adev);
240}
241
242/**
243 * VRAM access helper functions.
244 *
245 * amdgpu_device_vram_access - read/write a buffer in vram
246 *
247 * @adev: amdgpu_device pointer
248 * @pos: offset of the buffer in vram
249 * @buf: virtual address of the buffer in system memory
250 * @size: read/write size, sizeof(@buf) must > @size
251 * @write: true - write to vram, otherwise - read from vram
252 */
253void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
254 uint32_t *buf, size_t size, bool write)
255{
256 unsigned long flags;
257 uint32_t hi = ~0;
258 uint64_t last;
259
260
261#ifdef CONFIG_64BIT
262 last = min(pos + size, adev->gmc.visible_vram_size);
263 if (last > pos) {
264 void __iomem *addr = adev->mman.aper_base_kaddr + pos;
265 size_t count = last - pos;
266
267 if (write) {
268 memcpy_toio(addr, buf, count);
269 mb();
270 amdgpu_asic_flush_hdp(adev, NULL);
271 } else {
272 amdgpu_asic_invalidate_hdp(adev, NULL);
273 mb();
274 memcpy_fromio(buf, addr, count);
275 }
276
277 if (count == size)
278 return;
279
280 pos += count;
281 buf += count / 4;
282 size -= count;
283 }
284#endif
285
286 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
287 for (last = pos + size; pos < last; pos += 4) {
288 uint32_t tmp = pos >> 31;
289
290 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
291 if (tmp != hi) {
292 WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
293 hi = tmp;
294 }
295 if (write)
296 WREG32_NO_KIQ(mmMM_DATA, *buf++);
297 else
298 *buf++ = RREG32_NO_KIQ(mmMM_DATA);
299 }
300 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
301}
302
303/*
304 * MMIO register access helper functions.
305 */
306/**
307 * amdgpu_mm_rreg - read a memory mapped IO register
308 *
309 * @adev: amdgpu_device pointer
310 * @reg: dword aligned register offset
311 * @acc_flags: access flags which require special behavior
312 *
313 * Returns the 32 bit value from the offset specified.
314 */
315uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
316 uint32_t acc_flags)
317{
318 uint32_t ret;
319
320 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
321 return amdgpu_kiq_rreg(adev, reg);
322
323 if ((reg * 4) < adev->rmmio_size)
324 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
325 else {
326 unsigned long flags;
327
328 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
329 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
330 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
331 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
332 }
333 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
334 return ret;
335}
336
337/*
338 * MMIO register read with bytes helper functions
339 * @offset:bytes offset from MMIO start
340 *
341*/
342
343/**
344 * amdgpu_mm_rreg8 - read a memory mapped IO register
345 *
346 * @adev: amdgpu_device pointer
347 * @offset: byte aligned register offset
348 *
349 * Returns the 8 bit value from the offset specified.
350 */
351uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) {
352 if (offset < adev->rmmio_size)
353 return (readb(adev->rmmio + offset));
354 BUG();
355}
356
357/*
358 * MMIO register write with bytes helper functions
359 * @offset:bytes offset from MMIO start
360 * @value: the value want to be written to the register
361 *
362*/
363/**
364 * amdgpu_mm_wreg8 - read a memory mapped IO register
365 *
366 * @adev: amdgpu_device pointer
367 * @offset: byte aligned register offset
368 * @value: 8 bit value to write
369 *
370 * Writes the value specified to the offset specified.
371 */
372void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) {
373 if (offset < adev->rmmio_size)
374 writeb(value, adev->rmmio + offset);
375 else
376 BUG();
377}
378
379void static inline amdgpu_mm_wreg_mmio(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t acc_flags)
380{
381 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
382
383 if ((reg * 4) < adev->rmmio_size)
384 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
385 else {
386 unsigned long flags;
387
388 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
389 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
390 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
391 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
392 }
393}
394
395/**
396 * amdgpu_mm_wreg - write to a memory mapped IO register
397 *
398 * @adev: amdgpu_device pointer
399 * @reg: dword aligned register offset
400 * @v: 32 bit value to write to the register
401 * @acc_flags: access flags which require special behavior
402 *
403 * Writes the value specified to the offset specified.
404 */
405void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
406 uint32_t acc_flags)
407{
408 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
409 return amdgpu_kiq_wreg(adev, reg, v);
410
411 amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags);
412}
413
414/*
415 * amdgpu_mm_wreg_mmio_rlc - write register either with mmio or with RLC path if in range
416 *
417 * this function is invoked only the debugfs register access
418 * */
419void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
420 uint32_t acc_flags)
421{
422 if (amdgpu_sriov_fullaccess(adev) &&
423 adev->gfx.rlc.funcs &&
424 adev->gfx.rlc.funcs->is_rlcg_access_range) {
425
426 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
427 return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v);
428 }
429
430 amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags);
431}
432
433/**
434 * amdgpu_io_rreg - read an IO register
435 *
436 * @adev: amdgpu_device pointer
437 * @reg: dword aligned register offset
438 *
439 * Returns the 32 bit value from the offset specified.
440 */
441u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
442{
443 if ((reg * 4) < adev->rio_mem_size)
444 return ioread32(adev->rio_mem + (reg * 4));
445 else {
446 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
447 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
448 }
449}
450
451/**
452 * amdgpu_io_wreg - write to an IO register
453 *
454 * @adev: amdgpu_device pointer
455 * @reg: dword aligned register offset
456 * @v: 32 bit value to write to the register
457 *
458 * Writes the value specified to the offset specified.
459 */
460void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
461{
462 if ((reg * 4) < adev->rio_mem_size)
463 iowrite32(v, adev->rio_mem + (reg * 4));
464 else {
465 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
466 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
467 }
468}
469
470/**
471 * amdgpu_mm_rdoorbell - read a doorbell dword
472 *
473 * @adev: amdgpu_device pointer
474 * @index: doorbell index
475 *
476 * Returns the value in the doorbell aperture at the
477 * requested doorbell index (CIK).
478 */
479u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
480{
481 if (index < adev->doorbell.num_doorbells) {
482 return readl(adev->doorbell.ptr + index);
483 } else {
484 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
485 return 0;
486 }
487}
488
489/**
490 * amdgpu_mm_wdoorbell - write a doorbell dword
491 *
492 * @adev: amdgpu_device pointer
493 * @index: doorbell index
494 * @v: value to write
495 *
496 * Writes @v to the doorbell aperture at the
497 * requested doorbell index (CIK).
498 */
499void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
500{
501 if (index < adev->doorbell.num_doorbells) {
502 writel(v, adev->doorbell.ptr + index);
503 } else {
504 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
505 }
506}
507
508/**
509 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
510 *
511 * @adev: amdgpu_device pointer
512 * @index: doorbell index
513 *
514 * Returns the value in the doorbell aperture at the
515 * requested doorbell index (VEGA10+).
516 */
517u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
518{
519 if (index < adev->doorbell.num_doorbells) {
520 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
521 } else {
522 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
523 return 0;
524 }
525}
526
527/**
528 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
529 *
530 * @adev: amdgpu_device pointer
531 * @index: doorbell index
532 * @v: value to write
533 *
534 * Writes @v to the doorbell aperture at the
535 * requested doorbell index (VEGA10+).
536 */
537void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
538{
539 if (index < adev->doorbell.num_doorbells) {
540 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
541 } else {
542 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
543 }
544}
545
546/**
547 * amdgpu_invalid_rreg - dummy reg read function
548 *
549 * @adev: amdgpu device pointer
550 * @reg: offset of register
551 *
552 * Dummy register read function. Used for register blocks
553 * that certain asics don't have (all asics).
554 * Returns the value in the register.
555 */
556static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
557{
558 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
559 BUG();
560 return 0;
561}
562
563/**
564 * amdgpu_invalid_wreg - dummy reg write function
565 *
566 * @adev: amdgpu device pointer
567 * @reg: offset of register
568 * @v: value to write to the register
569 *
570 * Dummy register read function. Used for register blocks
571 * that certain asics don't have (all asics).
572 */
573static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
574{
575 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
576 reg, v);
577 BUG();
578}
579
580/**
581 * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
582 *
583 * @adev: amdgpu device pointer
584 * @reg: offset of register
585 *
586 * Dummy register read function. Used for register blocks
587 * that certain asics don't have (all asics).
588 * Returns the value in the register.
589 */
590static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
591{
592 DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
593 BUG();
594 return 0;
595}
596
597/**
598 * amdgpu_invalid_wreg64 - dummy reg write function
599 *
600 * @adev: amdgpu device pointer
601 * @reg: offset of register
602 * @v: value to write to the register
603 *
604 * Dummy register read function. Used for register blocks
605 * that certain asics don't have (all asics).
606 */
607static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
608{
609 DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
610 reg, v);
611 BUG();
612}
613
614/**
615 * amdgpu_block_invalid_rreg - dummy reg read function
616 *
617 * @adev: amdgpu device pointer
618 * @block: offset of instance
619 * @reg: offset of register
620 *
621 * Dummy register read function. Used for register blocks
622 * that certain asics don't have (all asics).
623 * Returns the value in the register.
624 */
625static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
626 uint32_t block, uint32_t reg)
627{
628 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
629 reg, block);
630 BUG();
631 return 0;
632}
633
634/**
635 * amdgpu_block_invalid_wreg - dummy reg write function
636 *
637 * @adev: amdgpu device pointer
638 * @block: offset of instance
639 * @reg: offset of register
640 * @v: value to write to the register
641 *
642 * Dummy register read function. Used for register blocks
643 * that certain asics don't have (all asics).
644 */
645static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
646 uint32_t block,
647 uint32_t reg, uint32_t v)
648{
649 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
650 reg, block, v);
651 BUG();
652}
653
654/**
655 * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
656 *
657 * @adev: amdgpu device pointer
658 *
659 * Allocates a scratch page of VRAM for use by various things in the
660 * driver.
661 */
662static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
663{
664 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
665 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
666 &adev->vram_scratch.robj,
667 &adev->vram_scratch.gpu_addr,
668 (void **)&adev->vram_scratch.ptr);
669}
670
671/**
672 * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
673 *
674 * @adev: amdgpu device pointer
675 *
676 * Frees the VRAM scratch page.
677 */
678static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
679{
680 amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
681}
682
683/**
684 * amdgpu_device_program_register_sequence - program an array of registers.
685 *
686 * @adev: amdgpu_device pointer
687 * @registers: pointer to the register array
688 * @array_size: size of the register array
689 *
690 * Programs an array or registers with and and or masks.
691 * This is a helper for setting golden registers.
692 */
693void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
694 const u32 *registers,
695 const u32 array_size)
696{
697 u32 tmp, reg, and_mask, or_mask;
698 int i;
699
700 if (array_size % 3)
701 return;
702
703 for (i = 0; i < array_size; i +=3) {
704 reg = registers[i + 0];
705 and_mask = registers[i + 1];
706 or_mask = registers[i + 2];
707
708 if (and_mask == 0xffffffff) {
709 tmp = or_mask;
710 } else {
711 tmp = RREG32(reg);
712 tmp &= ~and_mask;
713 if (adev->family >= AMDGPU_FAMILY_AI)
714 tmp |= (or_mask & and_mask);
715 else
716 tmp |= or_mask;
717 }
718 WREG32(reg, tmp);
719 }
720}
721
722/**
723 * amdgpu_device_pci_config_reset - reset the GPU
724 *
725 * @adev: amdgpu_device pointer
726 *
727 * Resets the GPU using the pci config reset sequence.
728 * Only applicable to asics prior to vega10.
729 */
730void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
731{
732 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
733}
734
735/*
736 * GPU doorbell aperture helpers function.
737 */
738/**
739 * amdgpu_device_doorbell_init - Init doorbell driver information.
740 *
741 * @adev: amdgpu_device pointer
742 *
743 * Init doorbell driver information (CIK)
744 * Returns 0 on success, error on failure.
745 */
746static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
747{
748
749 /* No doorbell on SI hardware generation */
750 if (adev->asic_type < CHIP_BONAIRE) {
751 adev->doorbell.base = 0;
752 adev->doorbell.size = 0;
753 adev->doorbell.num_doorbells = 0;
754 adev->doorbell.ptr = NULL;
755 return 0;
756 }
757
758 if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
759 return -EINVAL;
760
761 amdgpu_asic_init_doorbell_index(adev);
762
763 /* doorbell bar mapping */
764 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
765 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
766
767 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
768 adev->doorbell_index.max_assignment+1);
769 if (adev->doorbell.num_doorbells == 0)
770 return -EINVAL;
771
772 /* For Vega, reserve and map two pages on doorbell BAR since SDMA
773 * paging queue doorbell use the second page. The
774 * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
775 * doorbells are in the first page. So with paging queue enabled,
776 * the max num_doorbells should + 1 page (0x400 in dword)
777 */
778 if (adev->asic_type >= CHIP_VEGA10)
779 adev->doorbell.num_doorbells += 0x400;
780
781 adev->doorbell.ptr = ioremap(adev->doorbell.base,
782 adev->doorbell.num_doorbells *
783 sizeof(u32));
784 if (adev->doorbell.ptr == NULL)
785 return -ENOMEM;
786
787 return 0;
788}
789
790/**
791 * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
792 *
793 * @adev: amdgpu_device pointer
794 *
795 * Tear down doorbell driver information (CIK)
796 */
797static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
798{
799 iounmap(adev->doorbell.ptr);
800 adev->doorbell.ptr = NULL;
801}
802
803
804
805/*
806 * amdgpu_device_wb_*()
807 * Writeback is the method by which the GPU updates special pages in memory
808 * with the status of certain GPU events (fences, ring pointers,etc.).
809 */
810
811/**
812 * amdgpu_device_wb_fini - Disable Writeback and free memory
813 *
814 * @adev: amdgpu_device pointer
815 *
816 * Disables Writeback and frees the Writeback memory (all asics).
817 * Used at driver shutdown.
818 */
819static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
820{
821 if (adev->wb.wb_obj) {
822 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
823 &adev->wb.gpu_addr,
824 (void **)&adev->wb.wb);
825 adev->wb.wb_obj = NULL;
826 }
827}
828
829/**
830 * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
831 *
832 * @adev: amdgpu_device pointer
833 *
834 * Initializes writeback and allocates writeback memory (all asics).
835 * Used at driver startup.
836 * Returns 0 on success or an -error on failure.
837 */
838static int amdgpu_device_wb_init(struct amdgpu_device *adev)
839{
840 int r;
841
842 if (adev->wb.wb_obj == NULL) {
843 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
844 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
845 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
846 &adev->wb.wb_obj, &adev->wb.gpu_addr,
847 (void **)&adev->wb.wb);
848 if (r) {
849 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
850 return r;
851 }
852
853 adev->wb.num_wb = AMDGPU_MAX_WB;
854 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
855
856 /* clear wb memory */
857 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
858 }
859
860 return 0;
861}
862
863/**
864 * amdgpu_device_wb_get - Allocate a wb entry
865 *
866 * @adev: amdgpu_device pointer
867 * @wb: wb index
868 *
869 * Allocate a wb slot for use by the driver (all asics).
870 * Returns 0 on success or -EINVAL on failure.
871 */
872int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
873{
874 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
875
876 if (offset < adev->wb.num_wb) {
877 __set_bit(offset, adev->wb.used);
878 *wb = offset << 3; /* convert to dw offset */
879 return 0;
880 } else {
881 return -EINVAL;
882 }
883}
884
885/**
886 * amdgpu_device_wb_free - Free a wb entry
887 *
888 * @adev: amdgpu_device pointer
889 * @wb: wb index
890 *
891 * Free a wb slot allocated for use by the driver (all asics)
892 */
893void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
894{
895 wb >>= 3;
896 if (wb < adev->wb.num_wb)
897 __clear_bit(wb, adev->wb.used);
898}
899
900/**
901 * amdgpu_device_resize_fb_bar - try to resize FB BAR
902 *
903 * @adev: amdgpu_device pointer
904 *
905 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
906 * to fail, but if any of the BARs is not accessible after the size we abort
907 * driver loading by returning -ENODEV.
908 */
909int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
910{
911 u64 space_needed = roundup_pow_of_two(adev->gmc.real_vram_size);
912 u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1;
913 struct pci_bus *root;
914 struct resource *res;
915 unsigned i;
916 u16 cmd;
917 int r;
918
919 /* Bypass for VF */
920 if (amdgpu_sriov_vf(adev))
921 return 0;
922
923 /* skip if the bios has already enabled large BAR */
924 if (adev->gmc.real_vram_size &&
925 (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
926 return 0;
927
928 /* Check if the root BUS has 64bit memory resources */
929 root = adev->pdev->bus;
930 while (root->parent)
931 root = root->parent;
932
933 pci_bus_for_each_resource(root, res, i) {
934 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
935 res->start > 0x100000000ull)
936 break;
937 }
938
939 /* Trying to resize is pointless without a root hub window above 4GB */
940 if (!res)
941 return 0;
942
943 /* Disable memory decoding while we change the BAR addresses and size */
944 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
945 pci_write_config_word(adev->pdev, PCI_COMMAND,
946 cmd & ~PCI_COMMAND_MEMORY);
947
948 /* Free the VRAM and doorbell BAR, we most likely need to move both. */
949 amdgpu_device_doorbell_fini(adev);
950 if (adev->asic_type >= CHIP_BONAIRE)
951 pci_release_resource(adev->pdev, 2);
952
953 pci_release_resource(adev->pdev, 0);
954
955 r = pci_resize_resource(adev->pdev, 0, rbar_size);
956 if (r == -ENOSPC)
957 DRM_INFO("Not enough PCI address space for a large BAR.");
958 else if (r && r != -ENOTSUPP)
959 DRM_ERROR("Problem resizing BAR0 (%d).", r);
960
961 pci_assign_unassigned_bus_resources(adev->pdev->bus);
962
963 /* When the doorbell or fb BAR isn't available we have no chance of
964 * using the device.
965 */
966 r = amdgpu_device_doorbell_init(adev);
967 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
968 return -ENODEV;
969
970 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
971
972 return 0;
973}
974
975/*
976 * GPU helpers function.
977 */
978/**
979 * amdgpu_device_need_post - check if the hw need post or not
980 *
981 * @adev: amdgpu_device pointer
982 *
983 * Check if the asic has been initialized (all asics) at driver startup
984 * or post is needed if hw reset is performed.
985 * Returns true if need or false if not.
986 */
987bool amdgpu_device_need_post(struct amdgpu_device *adev)
988{
989 uint32_t reg;
990
991 if (amdgpu_sriov_vf(adev))
992 return false;
993
994 if (amdgpu_passthrough(adev)) {
995 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
996 * some old smc fw still need driver do vPost otherwise gpu hang, while
997 * those smc fw version above 22.15 doesn't have this flaw, so we force
998 * vpost executed for smc version below 22.15
999 */
1000 if (adev->asic_type == CHIP_FIJI) {
1001 int err;
1002 uint32_t fw_ver;
1003 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1004 /* force vPost if error occured */
1005 if (err)
1006 return true;
1007
1008 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1009 if (fw_ver < 0x00160e00)
1010 return true;
1011 }
1012 }
1013
1014 if (adev->has_hw_reset) {
1015 adev->has_hw_reset = false;
1016 return true;
1017 }
1018
1019 /* bios scratch used on CIK+ */
1020 if (adev->asic_type >= CHIP_BONAIRE)
1021 return amdgpu_atombios_scratch_need_asic_init(adev);
1022
1023 /* check MEM_SIZE for older asics */
1024 reg = amdgpu_asic_get_config_memsize(adev);
1025
1026 if ((reg != 0) && (reg != 0xffffffff))
1027 return false;
1028
1029 return true;
1030}
1031
1032/* if we get transitioned to only one device, take VGA back */
1033/**
1034 * amdgpu_device_vga_set_decode - enable/disable vga decode
1035 *
1036 * @cookie: amdgpu_device pointer
1037 * @state: enable/disable vga decode
1038 *
1039 * Enable/disable vga decode (all asics).
1040 * Returns VGA resource flags.
1041 */
1042static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state)
1043{
1044 struct amdgpu_device *adev = cookie;
1045 amdgpu_asic_set_vga_state(adev, state);
1046 if (state)
1047 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1048 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1049 else
1050 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1051}
1052
1053/**
1054 * amdgpu_device_check_block_size - validate the vm block size
1055 *
1056 * @adev: amdgpu_device pointer
1057 *
1058 * Validates the vm block size specified via module parameter.
1059 * The vm block size defines number of bits in page table versus page directory,
1060 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1061 * page table and the remaining bits are in the page directory.
1062 */
1063static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1064{
1065 /* defines number of bits in page table versus page directory,
1066 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1067 * page table and the remaining bits are in the page directory */
1068 if (amdgpu_vm_block_size == -1)
1069 return;
1070
1071 if (amdgpu_vm_block_size < 9) {
1072 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1073 amdgpu_vm_block_size);
1074 amdgpu_vm_block_size = -1;
1075 }
1076}
1077
1078/**
1079 * amdgpu_device_check_vm_size - validate the vm size
1080 *
1081 * @adev: amdgpu_device pointer
1082 *
1083 * Validates the vm size in GB specified via module parameter.
1084 * The VM size is the size of the GPU virtual memory space in GB.
1085 */
1086static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1087{
1088 /* no need to check the default value */
1089 if (amdgpu_vm_size == -1)
1090 return;
1091
1092 if (amdgpu_vm_size < 1) {
1093 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1094 amdgpu_vm_size);
1095 amdgpu_vm_size = -1;
1096 }
1097}
1098
1099static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1100{
1101 struct sysinfo si;
1102 bool is_os_64 = (sizeof(void *) == 8);
1103 uint64_t total_memory;
1104 uint64_t dram_size_seven_GB = 0x1B8000000;
1105 uint64_t dram_size_three_GB = 0xB8000000;
1106
1107 if (amdgpu_smu_memory_pool_size == 0)
1108 return;
1109
1110 if (!is_os_64) {
1111 DRM_WARN("Not 64-bit OS, feature not supported\n");
1112 goto def_value;
1113 }
1114 si_meminfo(&si);
1115 total_memory = (uint64_t)si.totalram * si.mem_unit;
1116
1117 if ((amdgpu_smu_memory_pool_size == 1) ||
1118 (amdgpu_smu_memory_pool_size == 2)) {
1119 if (total_memory < dram_size_three_GB)
1120 goto def_value1;
1121 } else if ((amdgpu_smu_memory_pool_size == 4) ||
1122 (amdgpu_smu_memory_pool_size == 8)) {
1123 if (total_memory < dram_size_seven_GB)
1124 goto def_value1;
1125 } else {
1126 DRM_WARN("Smu memory pool size not supported\n");
1127 goto def_value;
1128 }
1129 adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1130
1131 return;
1132
1133def_value1:
1134 DRM_WARN("No enough system memory\n");
1135def_value:
1136 adev->pm.smu_prv_buffer_size = 0;
1137}
1138
1139/**
1140 * amdgpu_device_check_arguments - validate module params
1141 *
1142 * @adev: amdgpu_device pointer
1143 *
1144 * Validates certain module parameters and updates
1145 * the associated values used by the driver (all asics).
1146 */
1147static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1148{
1149 if (amdgpu_sched_jobs < 4) {
1150 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1151 amdgpu_sched_jobs);
1152 amdgpu_sched_jobs = 4;
1153 } else if (!is_power_of_2(amdgpu_sched_jobs)){
1154 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1155 amdgpu_sched_jobs);
1156 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1157 }
1158
1159 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1160 /* gart size must be greater or equal to 32M */
1161 dev_warn(adev->dev, "gart size (%d) too small\n",
1162 amdgpu_gart_size);
1163 amdgpu_gart_size = -1;
1164 }
1165
1166 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1167 /* gtt size must be greater or equal to 32M */
1168 dev_warn(adev->dev, "gtt size (%d) too small\n",
1169 amdgpu_gtt_size);
1170 amdgpu_gtt_size = -1;
1171 }
1172
1173 /* valid range is between 4 and 9 inclusive */
1174 if (amdgpu_vm_fragment_size != -1 &&
1175 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1176 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1177 amdgpu_vm_fragment_size = -1;
1178 }
1179
1180 if (amdgpu_sched_hw_submission < 2) {
1181 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1182 amdgpu_sched_hw_submission);
1183 amdgpu_sched_hw_submission = 2;
1184 } else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1185 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1186 amdgpu_sched_hw_submission);
1187 amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1188 }
1189
1190 amdgpu_device_check_smu_prv_buffer_size(adev);
1191
1192 amdgpu_device_check_vm_size(adev);
1193
1194 amdgpu_device_check_block_size(adev);
1195
1196 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1197
1198 amdgpu_gmc_tmz_set(adev);
1199
1200 return 0;
1201}
1202
1203/**
1204 * amdgpu_switcheroo_set_state - set switcheroo state
1205 *
1206 * @pdev: pci dev pointer
1207 * @state: vga_switcheroo state
1208 *
1209 * Callback for the switcheroo driver. Suspends or resumes the
1210 * the asics before or after it is powered up using ACPI methods.
1211 */
1212static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1213{
1214 struct drm_device *dev = pci_get_drvdata(pdev);
1215 int r;
1216
1217 if (amdgpu_device_supports_boco(dev) && state == VGA_SWITCHEROO_OFF)
1218 return;
1219
1220 if (state == VGA_SWITCHEROO_ON) {
1221 pr_info("switched on\n");
1222 /* don't suspend or resume card normally */
1223 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1224
1225 pci_set_power_state(dev->pdev, PCI_D0);
1226 pci_restore_state(dev->pdev);
1227 r = pci_enable_device(dev->pdev);
1228 if (r)
1229 DRM_WARN("pci_enable_device failed (%d)\n", r);
1230 amdgpu_device_resume(dev, true);
1231
1232 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1233 drm_kms_helper_poll_enable(dev);
1234 } else {
1235 pr_info("switched off\n");
1236 drm_kms_helper_poll_disable(dev);
1237 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1238 amdgpu_device_suspend(dev, true);
1239 pci_save_state(dev->pdev);
1240 /* Shut down the device */
1241 pci_disable_device(dev->pdev);
1242 pci_set_power_state(dev->pdev, PCI_D3cold);
1243 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1244 }
1245}
1246
1247/**
1248 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1249 *
1250 * @pdev: pci dev pointer
1251 *
1252 * Callback for the switcheroo driver. Check of the switcheroo
1253 * state can be changed.
1254 * Returns true if the state can be changed, false if not.
1255 */
1256static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1257{
1258 struct drm_device *dev = pci_get_drvdata(pdev);
1259
1260 /*
1261 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1262 * locking inversion with the driver load path. And the access here is
1263 * completely racy anyway. So don't bother with locking for now.
1264 */
1265 return atomic_read(&dev->open_count) == 0;
1266}
1267
1268static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1269 .set_gpu_state = amdgpu_switcheroo_set_state,
1270 .reprobe = NULL,
1271 .can_switch = amdgpu_switcheroo_can_switch,
1272};
1273
1274/**
1275 * amdgpu_device_ip_set_clockgating_state - set the CG state
1276 *
1277 * @dev: amdgpu_device pointer
1278 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1279 * @state: clockgating state (gate or ungate)
1280 *
1281 * Sets the requested clockgating state for all instances of
1282 * the hardware IP specified.
1283 * Returns the error code from the last instance.
1284 */
1285int amdgpu_device_ip_set_clockgating_state(void *dev,
1286 enum amd_ip_block_type block_type,
1287 enum amd_clockgating_state state)
1288{
1289 struct amdgpu_device *adev = dev;
1290 int i, r = 0;
1291
1292 for (i = 0; i < adev->num_ip_blocks; i++) {
1293 if (!adev->ip_blocks[i].status.valid)
1294 continue;
1295 if (adev->ip_blocks[i].version->type != block_type)
1296 continue;
1297 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1298 continue;
1299 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1300 (void *)adev, state);
1301 if (r)
1302 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1303 adev->ip_blocks[i].version->funcs->name, r);
1304 }
1305 return r;
1306}
1307
1308/**
1309 * amdgpu_device_ip_set_powergating_state - set the PG state
1310 *
1311 * @dev: amdgpu_device pointer
1312 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1313 * @state: powergating state (gate or ungate)
1314 *
1315 * Sets the requested powergating state for all instances of
1316 * the hardware IP specified.
1317 * Returns the error code from the last instance.
1318 */
1319int amdgpu_device_ip_set_powergating_state(void *dev,
1320 enum amd_ip_block_type block_type,
1321 enum amd_powergating_state state)
1322{
1323 struct amdgpu_device *adev = dev;
1324 int i, r = 0;
1325
1326 for (i = 0; i < adev->num_ip_blocks; i++) {
1327 if (!adev->ip_blocks[i].status.valid)
1328 continue;
1329 if (adev->ip_blocks[i].version->type != block_type)
1330 continue;
1331 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1332 continue;
1333 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1334 (void *)adev, state);
1335 if (r)
1336 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1337 adev->ip_blocks[i].version->funcs->name, r);
1338 }
1339 return r;
1340}
1341
1342/**
1343 * amdgpu_device_ip_get_clockgating_state - get the CG state
1344 *
1345 * @adev: amdgpu_device pointer
1346 * @flags: clockgating feature flags
1347 *
1348 * Walks the list of IPs on the device and updates the clockgating
1349 * flags for each IP.
1350 * Updates @flags with the feature flags for each hardware IP where
1351 * clockgating is enabled.
1352 */
1353void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1354 u32 *flags)
1355{
1356 int i;
1357
1358 for (i = 0; i < adev->num_ip_blocks; i++) {
1359 if (!adev->ip_blocks[i].status.valid)
1360 continue;
1361 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1362 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1363 }
1364}
1365
1366/**
1367 * amdgpu_device_ip_wait_for_idle - wait for idle
1368 *
1369 * @adev: amdgpu_device pointer
1370 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1371 *
1372 * Waits for the request hardware IP to be idle.
1373 * Returns 0 for success or a negative error code on failure.
1374 */
1375int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1376 enum amd_ip_block_type block_type)
1377{
1378 int i, r;
1379
1380 for (i = 0; i < adev->num_ip_blocks; i++) {
1381 if (!adev->ip_blocks[i].status.valid)
1382 continue;
1383 if (adev->ip_blocks[i].version->type == block_type) {
1384 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1385 if (r)
1386 return r;
1387 break;
1388 }
1389 }
1390 return 0;
1391
1392}
1393
1394/**
1395 * amdgpu_device_ip_is_idle - is the hardware IP idle
1396 *
1397 * @adev: amdgpu_device pointer
1398 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1399 *
1400 * Check if the hardware IP is idle or not.
1401 * Returns true if it the IP is idle, false if not.
1402 */
1403bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1404 enum amd_ip_block_type block_type)
1405{
1406 int i;
1407
1408 for (i = 0; i < adev->num_ip_blocks; i++) {
1409 if (!adev->ip_blocks[i].status.valid)
1410 continue;
1411 if (adev->ip_blocks[i].version->type == block_type)
1412 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1413 }
1414 return true;
1415
1416}
1417
1418/**
1419 * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1420 *
1421 * @adev: amdgpu_device pointer
1422 * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1423 *
1424 * Returns a pointer to the hardware IP block structure
1425 * if it exists for the asic, otherwise NULL.
1426 */
1427struct amdgpu_ip_block *
1428amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1429 enum amd_ip_block_type type)
1430{
1431 int i;
1432
1433 for (i = 0; i < adev->num_ip_blocks; i++)
1434 if (adev->ip_blocks[i].version->type == type)
1435 return &adev->ip_blocks[i];
1436
1437 return NULL;
1438}
1439
1440/**
1441 * amdgpu_device_ip_block_version_cmp
1442 *
1443 * @adev: amdgpu_device pointer
1444 * @type: enum amd_ip_block_type
1445 * @major: major version
1446 * @minor: minor version
1447 *
1448 * return 0 if equal or greater
1449 * return 1 if smaller or the ip_block doesn't exist
1450 */
1451int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1452 enum amd_ip_block_type type,
1453 u32 major, u32 minor)
1454{
1455 struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1456
1457 if (ip_block && ((ip_block->version->major > major) ||
1458 ((ip_block->version->major == major) &&
1459 (ip_block->version->minor >= minor))))
1460 return 0;
1461
1462 return 1;
1463}
1464
1465/**
1466 * amdgpu_device_ip_block_add
1467 *
1468 * @adev: amdgpu_device pointer
1469 * @ip_block_version: pointer to the IP to add
1470 *
1471 * Adds the IP block driver information to the collection of IPs
1472 * on the asic.
1473 */
1474int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1475 const struct amdgpu_ip_block_version *ip_block_version)
1476{
1477 if (!ip_block_version)
1478 return -EINVAL;
1479
1480 DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1481 ip_block_version->funcs->name);
1482
1483 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1484
1485 return 0;
1486}
1487
1488/**
1489 * amdgpu_device_enable_virtual_display - enable virtual display feature
1490 *
1491 * @adev: amdgpu_device pointer
1492 *
1493 * Enabled the virtual display feature if the user has enabled it via
1494 * the module parameter virtual_display. This feature provides a virtual
1495 * display hardware on headless boards or in virtualized environments.
1496 * This function parses and validates the configuration string specified by
1497 * the user and configues the virtual display configuration (number of
1498 * virtual connectors, crtcs, etc.) specified.
1499 */
1500static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1501{
1502 adev->enable_virtual_display = false;
1503
1504 if (amdgpu_virtual_display) {
1505 struct drm_device *ddev = adev->ddev;
1506 const char *pci_address_name = pci_name(ddev->pdev);
1507 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1508
1509 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1510 pciaddstr_tmp = pciaddstr;
1511 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1512 pciaddname = strsep(&pciaddname_tmp, ",");
1513 if (!strcmp("all", pciaddname)
1514 || !strcmp(pci_address_name, pciaddname)) {
1515 long num_crtc;
1516 int res = -1;
1517
1518 adev->enable_virtual_display = true;
1519
1520 if (pciaddname_tmp)
1521 res = kstrtol(pciaddname_tmp, 10,
1522 &num_crtc);
1523
1524 if (!res) {
1525 if (num_crtc < 1)
1526 num_crtc = 1;
1527 if (num_crtc > 6)
1528 num_crtc = 6;
1529 adev->mode_info.num_crtc = num_crtc;
1530 } else {
1531 adev->mode_info.num_crtc = 1;
1532 }
1533 break;
1534 }
1535 }
1536
1537 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1538 amdgpu_virtual_display, pci_address_name,
1539 adev->enable_virtual_display, adev->mode_info.num_crtc);
1540
1541 kfree(pciaddstr);
1542 }
1543}
1544
1545/**
1546 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1547 *
1548 * @adev: amdgpu_device pointer
1549 *
1550 * Parses the asic configuration parameters specified in the gpu info
1551 * firmware and makes them availale to the driver for use in configuring
1552 * the asic.
1553 * Returns 0 on success, -EINVAL on failure.
1554 */
1555static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1556{
1557 const char *chip_name;
1558 char fw_name[40];
1559 int err;
1560 const struct gpu_info_firmware_header_v1_0 *hdr;
1561
1562 adev->firmware.gpu_info_fw = NULL;
1563
1564 if (adev->discovery_bin) {
1565 amdgpu_discovery_get_gfx_info(adev);
1566
1567 /*
1568 * FIXME: The bounding box is still needed by Navi12, so
1569 * temporarily read it from gpu_info firmware. Should be droped
1570 * when DAL no longer needs it.
1571 */
1572 if (adev->asic_type != CHIP_NAVI12)
1573 return 0;
1574 }
1575
1576 switch (adev->asic_type) {
1577#ifdef CONFIG_DRM_AMDGPU_SI
1578 case CHIP_VERDE:
1579 case CHIP_TAHITI:
1580 case CHIP_PITCAIRN:
1581 case CHIP_OLAND:
1582 case CHIP_HAINAN:
1583#endif
1584#ifdef CONFIG_DRM_AMDGPU_CIK
1585 case CHIP_BONAIRE:
1586 case CHIP_HAWAII:
1587 case CHIP_KAVERI:
1588 case CHIP_KABINI:
1589 case CHIP_MULLINS:
1590#endif
1591 case CHIP_TOPAZ:
1592 case CHIP_TONGA:
1593 case CHIP_FIJI:
1594 case CHIP_POLARIS10:
1595 case CHIP_POLARIS11:
1596 case CHIP_POLARIS12:
1597 case CHIP_VEGAM:
1598 case CHIP_CARRIZO:
1599 case CHIP_STONEY:
1600 case CHIP_VEGA20:
1601 case CHIP_SIENNA_CICHLID:
1602 case CHIP_NAVY_FLOUNDER:
1603 default:
1604 return 0;
1605 case CHIP_VEGA10:
1606 chip_name = "vega10";
1607 break;
1608 case CHIP_VEGA12:
1609 chip_name = "vega12";
1610 break;
1611 case CHIP_RAVEN:
1612 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1613 chip_name = "raven2";
1614 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1615 chip_name = "picasso";
1616 else
1617 chip_name = "raven";
1618 break;
1619 case CHIP_ARCTURUS:
1620 chip_name = "arcturus";
1621 break;
1622 case CHIP_RENOIR:
1623 chip_name = "renoir";
1624 break;
1625 case CHIP_NAVI10:
1626 chip_name = "navi10";
1627 break;
1628 case CHIP_NAVI14:
1629 chip_name = "navi14";
1630 break;
1631 case CHIP_NAVI12:
1632 chip_name = "navi12";
1633 break;
1634 }
1635
1636 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1637 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1638 if (err) {
1639 dev_err(adev->dev,
1640 "Failed to load gpu_info firmware \"%s\"\n",
1641 fw_name);
1642 goto out;
1643 }
1644 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1645 if (err) {
1646 dev_err(adev->dev,
1647 "Failed to validate gpu_info firmware \"%s\"\n",
1648 fw_name);
1649 goto out;
1650 }
1651
1652 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1653 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1654
1655 switch (hdr->version_major) {
1656 case 1:
1657 {
1658 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
1659 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
1660 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1661
1662 /*
1663 * Should be droped when DAL no longer needs it.
1664 */
1665 if (adev->asic_type == CHIP_NAVI12)
1666 goto parse_soc_bounding_box;
1667
1668 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1669 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1670 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1671 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
1672 adev->gfx.config.max_texture_channel_caches =
1673 le32_to_cpu(gpu_info_fw->gc_num_tccs);
1674 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1675 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1676 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1677 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
1678 adev->gfx.config.double_offchip_lds_buf =
1679 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1680 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
1681 adev->gfx.cu_info.max_waves_per_simd =
1682 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1683 adev->gfx.cu_info.max_scratch_slots_per_cu =
1684 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1685 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
1686 if (hdr->version_minor >= 1) {
1687 const struct gpu_info_firmware_v1_1 *gpu_info_fw =
1688 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
1689 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1690 adev->gfx.config.num_sc_per_sh =
1691 le32_to_cpu(gpu_info_fw->num_sc_per_sh);
1692 adev->gfx.config.num_packer_per_sc =
1693 le32_to_cpu(gpu_info_fw->num_packer_per_sc);
1694 }
1695
1696parse_soc_bounding_box:
1697 /*
1698 * soc bounding box info is not integrated in disocovery table,
1699 * we always need to parse it from gpu info firmware if needed.
1700 */
1701 if (hdr->version_minor == 2) {
1702 const struct gpu_info_firmware_v1_2 *gpu_info_fw =
1703 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
1704 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1705 adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
1706 }
1707 break;
1708 }
1709 default:
1710 dev_err(adev->dev,
1711 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1712 err = -EINVAL;
1713 goto out;
1714 }
1715out:
1716 return err;
1717}
1718
1719/**
1720 * amdgpu_device_ip_early_init - run early init for hardware IPs
1721 *
1722 * @adev: amdgpu_device pointer
1723 *
1724 * Early initialization pass for hardware IPs. The hardware IPs that make
1725 * up each asic are discovered each IP's early_init callback is run. This
1726 * is the first stage in initializing the asic.
1727 * Returns 0 on success, negative error code on failure.
1728 */
1729static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
1730{
1731 int i, r;
1732
1733 amdgpu_device_enable_virtual_display(adev);
1734
1735 if (amdgpu_sriov_vf(adev)) {
1736 r = amdgpu_virt_request_full_gpu(adev, true);
1737 if (r)
1738 return r;
1739 }
1740
1741 switch (adev->asic_type) {
1742#ifdef CONFIG_DRM_AMDGPU_SI
1743 case CHIP_VERDE:
1744 case CHIP_TAHITI:
1745 case CHIP_PITCAIRN:
1746 case CHIP_OLAND:
1747 case CHIP_HAINAN:
1748 adev->family = AMDGPU_FAMILY_SI;
1749 r = si_set_ip_blocks(adev);
1750 if (r)
1751 return r;
1752 break;
1753#endif
1754#ifdef CONFIG_DRM_AMDGPU_CIK
1755 case CHIP_BONAIRE:
1756 case CHIP_HAWAII:
1757 case CHIP_KAVERI:
1758 case CHIP_KABINI:
1759 case CHIP_MULLINS:
1760 if (adev->flags & AMD_IS_APU)
1761 adev->family = AMDGPU_FAMILY_KV;
1762 else
1763 adev->family = AMDGPU_FAMILY_CI;
1764
1765 r = cik_set_ip_blocks(adev);
1766 if (r)
1767 return r;
1768 break;
1769#endif
1770 case CHIP_TOPAZ:
1771 case CHIP_TONGA:
1772 case CHIP_FIJI:
1773 case CHIP_POLARIS10:
1774 case CHIP_POLARIS11:
1775 case CHIP_POLARIS12:
1776 case CHIP_VEGAM:
1777 case CHIP_CARRIZO:
1778 case CHIP_STONEY:
1779 if (adev->flags & AMD_IS_APU)
1780 adev->family = AMDGPU_FAMILY_CZ;
1781 else
1782 adev->family = AMDGPU_FAMILY_VI;
1783
1784 r = vi_set_ip_blocks(adev);
1785 if (r)
1786 return r;
1787 break;
1788 case CHIP_VEGA10:
1789 case CHIP_VEGA12:
1790 case CHIP_VEGA20:
1791 case CHIP_RAVEN:
1792 case CHIP_ARCTURUS:
1793 case CHIP_RENOIR:
1794 if (adev->flags & AMD_IS_APU)
1795 adev->family = AMDGPU_FAMILY_RV;
1796 else
1797 adev->family = AMDGPU_FAMILY_AI;
1798
1799 r = soc15_set_ip_blocks(adev);
1800 if (r)
1801 return r;
1802 break;
1803 case CHIP_NAVI10:
1804 case CHIP_NAVI14:
1805 case CHIP_NAVI12:
1806 case CHIP_SIENNA_CICHLID:
1807 case CHIP_NAVY_FLOUNDER:
1808 adev->family = AMDGPU_FAMILY_NV;
1809
1810 r = nv_set_ip_blocks(adev);
1811 if (r)
1812 return r;
1813 break;
1814 default:
1815 /* FIXME: not supported yet */
1816 return -EINVAL;
1817 }
1818
1819 amdgpu_amdkfd_device_probe(adev);
1820
1821 adev->pm.pp_feature = amdgpu_pp_feature_mask;
1822 if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
1823 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
1824
1825 for (i = 0; i < adev->num_ip_blocks; i++) {
1826 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
1827 DRM_ERROR("disabled ip block: %d <%s>\n",
1828 i, adev->ip_blocks[i].version->funcs->name);
1829 adev->ip_blocks[i].status.valid = false;
1830 } else {
1831 if (adev->ip_blocks[i].version->funcs->early_init) {
1832 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
1833 if (r == -ENOENT) {
1834 adev->ip_blocks[i].status.valid = false;
1835 } else if (r) {
1836 DRM_ERROR("early_init of IP block <%s> failed %d\n",
1837 adev->ip_blocks[i].version->funcs->name, r);
1838 return r;
1839 } else {
1840 adev->ip_blocks[i].status.valid = true;
1841 }
1842 } else {
1843 adev->ip_blocks[i].status.valid = true;
1844 }
1845 }
1846 /* get the vbios after the asic_funcs are set up */
1847 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
1848 r = amdgpu_device_parse_gpu_info_fw(adev);
1849 if (r)
1850 return r;
1851
1852 /* Read BIOS */
1853 if (!amdgpu_get_bios(adev))
1854 return -EINVAL;
1855
1856 r = amdgpu_atombios_init(adev);
1857 if (r) {
1858 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
1859 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
1860 return r;
1861 }
1862 }
1863 }
1864
1865 adev->cg_flags &= amdgpu_cg_mask;
1866 adev->pg_flags &= amdgpu_pg_mask;
1867
1868 return 0;
1869}
1870
1871static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
1872{
1873 int i, r;
1874
1875 for (i = 0; i < adev->num_ip_blocks; i++) {
1876 if (!adev->ip_blocks[i].status.sw)
1877 continue;
1878 if (adev->ip_blocks[i].status.hw)
1879 continue;
1880 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1881 (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
1882 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
1883 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
1884 if (r) {
1885 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1886 adev->ip_blocks[i].version->funcs->name, r);
1887 return r;
1888 }
1889 adev->ip_blocks[i].status.hw = true;
1890 }
1891 }
1892
1893 return 0;
1894}
1895
1896static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
1897{
1898 int i, r;
1899
1900 for (i = 0; i < adev->num_ip_blocks; i++) {
1901 if (!adev->ip_blocks[i].status.sw)
1902 continue;
1903 if (adev->ip_blocks[i].status.hw)
1904 continue;
1905 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
1906 if (r) {
1907 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1908 adev->ip_blocks[i].version->funcs->name, r);
1909 return r;
1910 }
1911 adev->ip_blocks[i].status.hw = true;
1912 }
1913
1914 return 0;
1915}
1916
1917static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
1918{
1919 int r = 0;
1920 int i;
1921 uint32_t smu_version;
1922
1923 if (adev->asic_type >= CHIP_VEGA10) {
1924 for (i = 0; i < adev->num_ip_blocks; i++) {
1925 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
1926 continue;
1927
1928 /* no need to do the fw loading again if already done*/
1929 if (adev->ip_blocks[i].status.hw == true)
1930 break;
1931
1932 if (adev->in_gpu_reset || adev->in_suspend) {
1933 r = adev->ip_blocks[i].version->funcs->resume(adev);
1934 if (r) {
1935 DRM_ERROR("resume of IP block <%s> failed %d\n",
1936 adev->ip_blocks[i].version->funcs->name, r);
1937 return r;
1938 }
1939 } else {
1940 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
1941 if (r) {
1942 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1943 adev->ip_blocks[i].version->funcs->name, r);
1944 return r;
1945 }
1946 }
1947
1948 adev->ip_blocks[i].status.hw = true;
1949 break;
1950 }
1951 }
1952
1953 if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
1954 r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
1955
1956 return r;
1957}
1958
1959/**
1960 * amdgpu_device_ip_init - run init for hardware IPs
1961 *
1962 * @adev: amdgpu_device pointer
1963 *
1964 * Main initialization pass for hardware IPs. The list of all the hardware
1965 * IPs that make up the asic is walked and the sw_init and hw_init callbacks
1966 * are run. sw_init initializes the software state associated with each IP
1967 * and hw_init initializes the hardware associated with each IP.
1968 * Returns 0 on success, negative error code on failure.
1969 */
1970static int amdgpu_device_ip_init(struct amdgpu_device *adev)
1971{
1972 int i, r;
1973
1974 r = amdgpu_ras_init(adev);
1975 if (r)
1976 return r;
1977
1978 for (i = 0; i < adev->num_ip_blocks; i++) {
1979 if (!adev->ip_blocks[i].status.valid)
1980 continue;
1981 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
1982 if (r) {
1983 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1984 adev->ip_blocks[i].version->funcs->name, r);
1985 goto init_failed;
1986 }
1987 adev->ip_blocks[i].status.sw = true;
1988
1989 /* need to do gmc hw init early so we can allocate gpu mem */
1990 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
1991 r = amdgpu_device_vram_scratch_init(adev);
1992 if (r) {
1993 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
1994 goto init_failed;
1995 }
1996 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
1997 if (r) {
1998 DRM_ERROR("hw_init %d failed %d\n", i, r);
1999 goto init_failed;
2000 }
2001 r = amdgpu_device_wb_init(adev);
2002 if (r) {
2003 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2004 goto init_failed;
2005 }
2006 adev->ip_blocks[i].status.hw = true;
2007
2008 /* right after GMC hw init, we create CSA */
2009 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
2010 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2011 AMDGPU_GEM_DOMAIN_VRAM,
2012 AMDGPU_CSA_SIZE);
2013 if (r) {
2014 DRM_ERROR("allocate CSA failed %d\n", r);
2015 goto init_failed;
2016 }
2017 }
2018 }
2019 }
2020
2021 if (amdgpu_sriov_vf(adev))
2022 amdgpu_virt_init_data_exchange(adev);
2023
2024 r = amdgpu_ib_pool_init(adev);
2025 if (r) {
2026 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2027 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2028 goto init_failed;
2029 }
2030
2031 r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2032 if (r)
2033 goto init_failed;
2034
2035 r = amdgpu_device_ip_hw_init_phase1(adev);
2036 if (r)
2037 goto init_failed;
2038
2039 r = amdgpu_device_fw_loading(adev);
2040 if (r)
2041 goto init_failed;
2042
2043 r = amdgpu_device_ip_hw_init_phase2(adev);
2044 if (r)
2045 goto init_failed;
2046
2047 /*
2048 * retired pages will be loaded from eeprom and reserved here,
2049 * it should be called after amdgpu_device_ip_hw_init_phase2 since
2050 * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2051 * for I2C communication which only true at this point.
2052 * recovery_init may fail, but it can free all resources allocated by
2053 * itself and its failure should not stop amdgpu init process.
2054 *
2055 * Note: theoretically, this should be called before all vram allocations
2056 * to protect retired page from abusing
2057 */
2058 amdgpu_ras_recovery_init(adev);
2059
2060 if (adev->gmc.xgmi.num_physical_nodes > 1)
2061 amdgpu_xgmi_add_device(adev);
2062 amdgpu_amdkfd_device_init(adev);
2063
2064 amdgpu_fru_get_product_info(adev);
2065
2066init_failed:
2067 if (amdgpu_sriov_vf(adev))
2068 amdgpu_virt_release_full_gpu(adev, true);
2069
2070 return r;
2071}
2072
2073/**
2074 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2075 *
2076 * @adev: amdgpu_device pointer
2077 *
2078 * Writes a reset magic value to the gart pointer in VRAM. The driver calls
2079 * this function before a GPU reset. If the value is retained after a
2080 * GPU reset, VRAM has not been lost. Some GPU resets may destry VRAM contents.
2081 */
2082static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2083{
2084 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2085}
2086
2087/**
2088 * amdgpu_device_check_vram_lost - check if vram is valid
2089 *
2090 * @adev: amdgpu_device pointer
2091 *
2092 * Checks the reset magic value written to the gart pointer in VRAM.
2093 * The driver calls this after a GPU reset to see if the contents of
2094 * VRAM is lost or now.
2095 * returns true if vram is lost, false if not.
2096 */
2097static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2098{
2099 if (memcmp(adev->gart.ptr, adev->reset_magic,
2100 AMDGPU_RESET_MAGIC_NUM))
2101 return true;
2102
2103 if (!adev->in_gpu_reset)
2104 return false;
2105
2106 /*
2107 * For all ASICs with baco/mode1 reset, the VRAM is
2108 * always assumed to be lost.
2109 */
2110 switch (amdgpu_asic_reset_method(adev)) {
2111 case AMD_RESET_METHOD_BACO:
2112 case AMD_RESET_METHOD_MODE1:
2113 return true;
2114 default:
2115 return false;
2116 }
2117}
2118
2119/**
2120 * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2121 *
2122 * @adev: amdgpu_device pointer
2123 * @state: clockgating state (gate or ungate)
2124 *
2125 * The list of all the hardware IPs that make up the asic is walked and the
2126 * set_clockgating_state callbacks are run.
2127 * Late initialization pass enabling clockgating for hardware IPs.
2128 * Fini or suspend, pass disabling clockgating for hardware IPs.
2129 * Returns 0 on success, negative error code on failure.
2130 */
2131
2132static int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2133 enum amd_clockgating_state state)
2134{
2135 int i, j, r;
2136
2137 if (amdgpu_emu_mode == 1)
2138 return 0;
2139
2140 for (j = 0; j < adev->num_ip_blocks; j++) {
2141 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2142 if (!adev->ip_blocks[i].status.late_initialized)
2143 continue;
2144 /* skip CG for VCE/UVD, it's handled specially */
2145 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2146 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2147 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2148 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2149 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2150 /* enable clockgating to save power */
2151 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2152 state);
2153 if (r) {
2154 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2155 adev->ip_blocks[i].version->funcs->name, r);
2156 return r;
2157 }
2158 }
2159 }
2160
2161 return 0;
2162}
2163
2164static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_powergating_state state)
2165{
2166 int i, j, r;
2167
2168 if (amdgpu_emu_mode == 1)
2169 return 0;
2170
2171 for (j = 0; j < adev->num_ip_blocks; j++) {
2172 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2173 if (!adev->ip_blocks[i].status.late_initialized)
2174 continue;
2175 /* skip CG for VCE/UVD, it's handled specially */
2176 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2177 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2178 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2179 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2180 adev->ip_blocks[i].version->funcs->set_powergating_state) {
2181 /* enable powergating to save power */
2182 r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2183 state);
2184 if (r) {
2185 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2186 adev->ip_blocks[i].version->funcs->name, r);
2187 return r;
2188 }
2189 }
2190 }
2191 return 0;
2192}
2193
2194static int amdgpu_device_enable_mgpu_fan_boost(void)
2195{
2196 struct amdgpu_gpu_instance *gpu_ins;
2197 struct amdgpu_device *adev;
2198 int i, ret = 0;
2199
2200 mutex_lock(&mgpu_info.mutex);
2201
2202 /*
2203 * MGPU fan boost feature should be enabled
2204 * only when there are two or more dGPUs in
2205 * the system
2206 */
2207 if (mgpu_info.num_dgpu < 2)
2208 goto out;
2209
2210 for (i = 0; i < mgpu_info.num_dgpu; i++) {
2211 gpu_ins = &(mgpu_info.gpu_ins[i]);
2212 adev = gpu_ins->adev;
2213 if (!(adev->flags & AMD_IS_APU) &&
2214 !gpu_ins->mgpu_fan_enabled &&
2215 adev->powerplay.pp_funcs &&
2216 adev->powerplay.pp_funcs->enable_mgpu_fan_boost) {
2217 ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2218 if (ret)
2219 break;
2220
2221 gpu_ins->mgpu_fan_enabled = 1;
2222 }
2223 }
2224
2225out:
2226 mutex_unlock(&mgpu_info.mutex);
2227
2228 return ret;
2229}
2230
2231/**
2232 * amdgpu_device_ip_late_init - run late init for hardware IPs
2233 *
2234 * @adev: amdgpu_device pointer
2235 *
2236 * Late initialization pass for hardware IPs. The list of all the hardware
2237 * IPs that make up the asic is walked and the late_init callbacks are run.
2238 * late_init covers any special initialization that an IP requires
2239 * after all of the have been initialized or something that needs to happen
2240 * late in the init process.
2241 * Returns 0 on success, negative error code on failure.
2242 */
2243static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2244{
2245 struct amdgpu_gpu_instance *gpu_instance;
2246 int i = 0, r;
2247
2248 for (i = 0; i < adev->num_ip_blocks; i++) {
2249 if (!adev->ip_blocks[i].status.hw)
2250 continue;
2251 if (adev->ip_blocks[i].version->funcs->late_init) {
2252 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2253 if (r) {
2254 DRM_ERROR("late_init of IP block <%s> failed %d\n",
2255 adev->ip_blocks[i].version->funcs->name, r);
2256 return r;
2257 }
2258 }
2259 adev->ip_blocks[i].status.late_initialized = true;
2260 }
2261
2262 amdgpu_ras_set_error_query_ready(adev, true);
2263
2264 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2265 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2266
2267 amdgpu_device_fill_reset_magic(adev);
2268
2269 r = amdgpu_device_enable_mgpu_fan_boost();
2270 if (r)
2271 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2272
2273
2274 if (adev->gmc.xgmi.num_physical_nodes > 1) {
2275 mutex_lock(&mgpu_info.mutex);
2276
2277 /*
2278 * Reset device p-state to low as this was booted with high.
2279 *
2280 * This should be performed only after all devices from the same
2281 * hive get initialized.
2282 *
2283 * However, it's unknown how many device in the hive in advance.
2284 * As this is counted one by one during devices initializations.
2285 *
2286 * So, we wait for all XGMI interlinked devices initialized.
2287 * This may bring some delays as those devices may come from
2288 * different hives. But that should be OK.
2289 */
2290 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2291 for (i = 0; i < mgpu_info.num_gpu; i++) {
2292 gpu_instance = &(mgpu_info.gpu_ins[i]);
2293 if (gpu_instance->adev->flags & AMD_IS_APU)
2294 continue;
2295
2296 r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2297 AMDGPU_XGMI_PSTATE_MIN);
2298 if (r) {
2299 DRM_ERROR("pstate setting failed (%d).\n", r);
2300 break;
2301 }
2302 }
2303 }
2304
2305 mutex_unlock(&mgpu_info.mutex);
2306 }
2307
2308 return 0;
2309}
2310
2311/**
2312 * amdgpu_device_ip_fini - run fini for hardware IPs
2313 *
2314 * @adev: amdgpu_device pointer
2315 *
2316 * Main teardown pass for hardware IPs. The list of all the hardware
2317 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2318 * are run. hw_fini tears down the hardware associated with each IP
2319 * and sw_fini tears down any software state associated with each IP.
2320 * Returns 0 on success, negative error code on failure.
2321 */
2322static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2323{
2324 int i, r;
2325
2326 if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2327 amdgpu_virt_release_ras_err_handler_data(adev);
2328
2329 amdgpu_ras_pre_fini(adev);
2330
2331 if (adev->gmc.xgmi.num_physical_nodes > 1)
2332 amdgpu_xgmi_remove_device(adev);
2333
2334 amdgpu_amdkfd_device_fini(adev);
2335
2336 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2337 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2338
2339 /* need to disable SMC first */
2340 for (i = 0; i < adev->num_ip_blocks; i++) {
2341 if (!adev->ip_blocks[i].status.hw)
2342 continue;
2343 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2344 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2345 /* XXX handle errors */
2346 if (r) {
2347 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2348 adev->ip_blocks[i].version->funcs->name, r);
2349 }
2350 adev->ip_blocks[i].status.hw = false;
2351 break;
2352 }
2353 }
2354
2355 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2356 if (!adev->ip_blocks[i].status.hw)
2357 continue;
2358
2359 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2360 /* XXX handle errors */
2361 if (r) {
2362 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2363 adev->ip_blocks[i].version->funcs->name, r);
2364 }
2365
2366 adev->ip_blocks[i].status.hw = false;
2367 }
2368
2369
2370 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2371 if (!adev->ip_blocks[i].status.sw)
2372 continue;
2373
2374 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2375 amdgpu_ucode_free_bo(adev);
2376 amdgpu_free_static_csa(&adev->virt.csa_obj);
2377 amdgpu_device_wb_fini(adev);
2378 amdgpu_device_vram_scratch_fini(adev);
2379 amdgpu_ib_pool_fini(adev);
2380 }
2381
2382 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2383 /* XXX handle errors */
2384 if (r) {
2385 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2386 adev->ip_blocks[i].version->funcs->name, r);
2387 }
2388 adev->ip_blocks[i].status.sw = false;
2389 adev->ip_blocks[i].status.valid = false;
2390 }
2391
2392 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2393 if (!adev->ip_blocks[i].status.late_initialized)
2394 continue;
2395 if (adev->ip_blocks[i].version->funcs->late_fini)
2396 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2397 adev->ip_blocks[i].status.late_initialized = false;
2398 }
2399
2400 amdgpu_ras_fini(adev);
2401
2402 if (amdgpu_sriov_vf(adev))
2403 if (amdgpu_virt_release_full_gpu(adev, false))
2404 DRM_ERROR("failed to release exclusive mode on fini\n");
2405
2406 return 0;
2407}
2408
2409/**
2410 * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2411 *
2412 * @work: work_struct.
2413 */
2414static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2415{
2416 struct amdgpu_device *adev =
2417 container_of(work, struct amdgpu_device, delayed_init_work.work);
2418 int r;
2419
2420 r = amdgpu_ib_ring_tests(adev);
2421 if (r)
2422 DRM_ERROR("ib ring test failed (%d).\n", r);
2423}
2424
2425static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2426{
2427 struct amdgpu_device *adev =
2428 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2429
2430 mutex_lock(&adev->gfx.gfx_off_mutex);
2431 if (!adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
2432 if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2433 adev->gfx.gfx_off_state = true;
2434 }
2435 mutex_unlock(&adev->gfx.gfx_off_mutex);
2436}
2437
2438/**
2439 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2440 *
2441 * @adev: amdgpu_device pointer
2442 *
2443 * Main suspend function for hardware IPs. The list of all the hardware
2444 * IPs that make up the asic is walked, clockgating is disabled and the
2445 * suspend callbacks are run. suspend puts the hardware and software state
2446 * in each IP into a state suitable for suspend.
2447 * Returns 0 on success, negative error code on failure.
2448 */
2449static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2450{
2451 int i, r;
2452
2453 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2454 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2455
2456 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2457 if (!adev->ip_blocks[i].status.valid)
2458 continue;
2459
2460 /* displays are handled separately */
2461 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2462 continue;
2463
2464 /* XXX handle errors */
2465 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2466 /* XXX handle errors */
2467 if (r) {
2468 DRM_ERROR("suspend of IP block <%s> failed %d\n",
2469 adev->ip_blocks[i].version->funcs->name, r);
2470 return r;
2471 }
2472
2473 adev->ip_blocks[i].status.hw = false;
2474 }
2475
2476 return 0;
2477}
2478
2479/**
2480 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2481 *
2482 * @adev: amdgpu_device pointer
2483 *
2484 * Main suspend function for hardware IPs. The list of all the hardware
2485 * IPs that make up the asic is walked, clockgating is disabled and the
2486 * suspend callbacks are run. suspend puts the hardware and software state
2487 * in each IP into a state suitable for suspend.
2488 * Returns 0 on success, negative error code on failure.
2489 */
2490static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
2491{
2492 int i, r;
2493
2494 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2495 if (!adev->ip_blocks[i].status.valid)
2496 continue;
2497 /* displays are handled in phase1 */
2498 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
2499 continue;
2500 /* PSP lost connection when err_event_athub occurs */
2501 if (amdgpu_ras_intr_triggered() &&
2502 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
2503 adev->ip_blocks[i].status.hw = false;
2504 continue;
2505 }
2506 /* XXX handle errors */
2507 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2508 /* XXX handle errors */
2509 if (r) {
2510 DRM_ERROR("suspend of IP block <%s> failed %d\n",
2511 adev->ip_blocks[i].version->funcs->name, r);
2512 }
2513 adev->ip_blocks[i].status.hw = false;
2514 /* handle putting the SMC in the appropriate state */
2515 if(!amdgpu_sriov_vf(adev)){
2516 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2517 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
2518 if (r) {
2519 DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
2520 adev->mp1_state, r);
2521 return r;
2522 }
2523 }
2524 }
2525 adev->ip_blocks[i].status.hw = false;
2526 }
2527
2528 return 0;
2529}
2530
2531/**
2532 * amdgpu_device_ip_suspend - run suspend for hardware IPs
2533 *
2534 * @adev: amdgpu_device pointer
2535 *
2536 * Main suspend function for hardware IPs. The list of all the hardware
2537 * IPs that make up the asic is walked, clockgating is disabled and the
2538 * suspend callbacks are run. suspend puts the hardware and software state
2539 * in each IP into a state suitable for suspend.
2540 * Returns 0 on success, negative error code on failure.
2541 */
2542int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
2543{
2544 int r;
2545
2546 if (amdgpu_sriov_vf(adev))
2547 amdgpu_virt_request_full_gpu(adev, false);
2548
2549 r = amdgpu_device_ip_suspend_phase1(adev);
2550 if (r)
2551 return r;
2552 r = amdgpu_device_ip_suspend_phase2(adev);
2553
2554 if (amdgpu_sriov_vf(adev))
2555 amdgpu_virt_release_full_gpu(adev, false);
2556
2557 return r;
2558}
2559
2560static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
2561{
2562 int i, r;
2563
2564 static enum amd_ip_block_type ip_order[] = {
2565 AMD_IP_BLOCK_TYPE_GMC,
2566 AMD_IP_BLOCK_TYPE_COMMON,
2567 AMD_IP_BLOCK_TYPE_PSP,
2568 AMD_IP_BLOCK_TYPE_IH,
2569 };
2570
2571 for (i = 0; i < adev->num_ip_blocks; i++)
2572 adev->ip_blocks[i].status.hw = false;
2573
2574 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
2575 int j;
2576 struct amdgpu_ip_block *block;
2577
2578 for (j = 0; j < adev->num_ip_blocks; j++) {
2579 block = &adev->ip_blocks[j];
2580
2581 if (block->version->type != ip_order[i] ||
2582 !block->status.valid)
2583 continue;
2584
2585 r = block->version->funcs->hw_init(adev);
2586 DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
2587 if (r)
2588 return r;
2589 block->status.hw = true;
2590 }
2591 }
2592
2593 return 0;
2594}
2595
2596static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
2597{
2598 int i, r;
2599
2600 static enum amd_ip_block_type ip_order[] = {
2601 AMD_IP_BLOCK_TYPE_SMC,
2602 AMD_IP_BLOCK_TYPE_DCE,
2603 AMD_IP_BLOCK_TYPE_GFX,
2604 AMD_IP_BLOCK_TYPE_SDMA,
2605 AMD_IP_BLOCK_TYPE_UVD,
2606 AMD_IP_BLOCK_TYPE_VCE,
2607 AMD_IP_BLOCK_TYPE_VCN
2608 };
2609
2610 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
2611 int j;
2612 struct amdgpu_ip_block *block;
2613
2614 for (j = 0; j < adev->num_ip_blocks; j++) {
2615 block = &adev->ip_blocks[j];
2616
2617 if (block->version->type != ip_order[i] ||
2618 !block->status.valid ||
2619 block->status.hw)
2620 continue;
2621
2622 if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
2623 r = block->version->funcs->resume(adev);
2624 else
2625 r = block->version->funcs->hw_init(adev);
2626
2627 DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
2628 if (r)
2629 return r;
2630 block->status.hw = true;
2631 }
2632 }
2633
2634 return 0;
2635}
2636
2637/**
2638 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
2639 *
2640 * @adev: amdgpu_device pointer
2641 *
2642 * First resume function for hardware IPs. The list of all the hardware
2643 * IPs that make up the asic is walked and the resume callbacks are run for
2644 * COMMON, GMC, and IH. resume puts the hardware into a functional state
2645 * after a suspend and updates the software state as necessary. This
2646 * function is also used for restoring the GPU after a GPU reset.
2647 * Returns 0 on success, negative error code on failure.
2648 */
2649static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
2650{
2651 int i, r;
2652
2653 for (i = 0; i < adev->num_ip_blocks; i++) {
2654 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
2655 continue;
2656 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2657 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2658 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2659
2660 r = adev->ip_blocks[i].version->funcs->resume(adev);
2661 if (r) {
2662 DRM_ERROR("resume of IP block <%s> failed %d\n",
2663 adev->ip_blocks[i].version->funcs->name, r);
2664 return r;
2665 }
2666 adev->ip_blocks[i].status.hw = true;
2667 }
2668 }
2669
2670 return 0;
2671}
2672
2673/**
2674 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
2675 *
2676 * @adev: amdgpu_device pointer
2677 *
2678 * First resume function for hardware IPs. The list of all the hardware
2679 * IPs that make up the asic is walked and the resume callbacks are run for
2680 * all blocks except COMMON, GMC, and IH. resume puts the hardware into a
2681 * functional state after a suspend and updates the software state as
2682 * necessary. This function is also used for restoring the GPU after a GPU
2683 * reset.
2684 * Returns 0 on success, negative error code on failure.
2685 */
2686static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
2687{
2688 int i, r;
2689
2690 for (i = 0; i < adev->num_ip_blocks; i++) {
2691 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
2692 continue;
2693 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2694 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2695 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
2696 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
2697 continue;
2698 r = adev->ip_blocks[i].version->funcs->resume(adev);
2699 if (r) {
2700 DRM_ERROR("resume of IP block <%s> failed %d\n",
2701 adev->ip_blocks[i].version->funcs->name, r);
2702 return r;
2703 }
2704 adev->ip_blocks[i].status.hw = true;
2705 }
2706
2707 return 0;
2708}
2709
2710/**
2711 * amdgpu_device_ip_resume - run resume for hardware IPs
2712 *
2713 * @adev: amdgpu_device pointer
2714 *
2715 * Main resume function for hardware IPs. The hardware IPs
2716 * are split into two resume functions because they are
2717 * are also used in in recovering from a GPU reset and some additional
2718 * steps need to be take between them. In this case (S3/S4) they are
2719 * run sequentially.
2720 * Returns 0 on success, negative error code on failure.
2721 */
2722static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
2723{
2724 int r;
2725
2726 r = amdgpu_device_ip_resume_phase1(adev);
2727 if (r)
2728 return r;
2729
2730 r = amdgpu_device_fw_loading(adev);
2731 if (r)
2732 return r;
2733
2734 r = amdgpu_device_ip_resume_phase2(adev);
2735
2736 return r;
2737}
2738
2739/**
2740 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
2741 *
2742 * @adev: amdgpu_device pointer
2743 *
2744 * Query the VBIOS data tables to determine if the board supports SR-IOV.
2745 */
2746static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
2747{
2748 if (amdgpu_sriov_vf(adev)) {
2749 if (adev->is_atom_fw) {
2750 if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
2751 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2752 } else {
2753 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
2754 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2755 }
2756
2757 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
2758 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
2759 }
2760}
2761
2762/**
2763 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
2764 *
2765 * @asic_type: AMD asic type
2766 *
2767 * Check if there is DC (new modesetting infrastructre) support for an asic.
2768 * returns true if DC has support, false if not.
2769 */
2770bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
2771{
2772 switch (asic_type) {
2773#if defined(CONFIG_DRM_AMD_DC)
2774 case CHIP_BONAIRE:
2775 case CHIP_KAVERI:
2776 case CHIP_KABINI:
2777 case CHIP_MULLINS:
2778 /*
2779 * We have systems in the wild with these ASICs that require
2780 * LVDS and VGA support which is not supported with DC.
2781 *
2782 * Fallback to the non-DC driver here by default so as not to
2783 * cause regressions.
2784 */
2785 return amdgpu_dc > 0;
2786 case CHIP_HAWAII:
2787 case CHIP_CARRIZO:
2788 case CHIP_STONEY:
2789 case CHIP_POLARIS10:
2790 case CHIP_POLARIS11:
2791 case CHIP_POLARIS12:
2792 case CHIP_VEGAM:
2793 case CHIP_TONGA:
2794 case CHIP_FIJI:
2795 case CHIP_VEGA10:
2796 case CHIP_VEGA12:
2797 case CHIP_VEGA20:
2798#if defined(CONFIG_DRM_AMD_DC_DCN)
2799 case CHIP_RAVEN:
2800 case CHIP_NAVI10:
2801 case CHIP_NAVI14:
2802 case CHIP_NAVI12:
2803 case CHIP_RENOIR:
2804#endif
2805#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
2806 case CHIP_SIENNA_CICHLID:
2807 case CHIP_NAVY_FLOUNDER:
2808#endif
2809 return amdgpu_dc != 0;
2810#endif
2811 default:
2812 if (amdgpu_dc > 0)
2813 DRM_INFO("Display Core has been requested via kernel parameter "
2814 "but isn't supported by ASIC, ignoring\n");
2815 return false;
2816 }
2817}
2818
2819/**
2820 * amdgpu_device_has_dc_support - check if dc is supported
2821 *
2822 * @adev: amdgpu_device_pointer
2823 *
2824 * Returns true for supported, false for not supported
2825 */
2826bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
2827{
2828 if (amdgpu_sriov_vf(adev))
2829 return false;
2830
2831 return amdgpu_device_asic_has_dc_support(adev->asic_type);
2832}
2833
2834
2835static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
2836{
2837 struct amdgpu_device *adev =
2838 container_of(__work, struct amdgpu_device, xgmi_reset_work);
2839 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0);
2840
2841 /* It's a bug to not have a hive within this function */
2842 if (WARN_ON(!hive))
2843 return;
2844
2845 /*
2846 * Use task barrier to synchronize all xgmi reset works across the
2847 * hive. task_barrier_enter and task_barrier_exit will block
2848 * until all the threads running the xgmi reset works reach
2849 * those points. task_barrier_full will do both blocks.
2850 */
2851 if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
2852
2853 task_barrier_enter(&hive->tb);
2854 adev->asic_reset_res = amdgpu_device_baco_enter(adev->ddev);
2855
2856 if (adev->asic_reset_res)
2857 goto fail;
2858
2859 task_barrier_exit(&hive->tb);
2860 adev->asic_reset_res = amdgpu_device_baco_exit(adev->ddev);
2861
2862 if (adev->asic_reset_res)
2863 goto fail;
2864
2865 if (adev->mmhub.funcs && adev->mmhub.funcs->reset_ras_error_count)
2866 adev->mmhub.funcs->reset_ras_error_count(adev);
2867 } else {
2868
2869 task_barrier_full(&hive->tb);
2870 adev->asic_reset_res = amdgpu_asic_reset(adev);
2871 }
2872
2873fail:
2874 if (adev->asic_reset_res)
2875 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
2876 adev->asic_reset_res, adev->ddev->unique);
2877}
2878
2879static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
2880{
2881 char *input = amdgpu_lockup_timeout;
2882 char *timeout_setting = NULL;
2883 int index = 0;
2884 long timeout;
2885 int ret = 0;
2886
2887 /*
2888 * By default timeout for non compute jobs is 10000.
2889 * And there is no timeout enforced on compute jobs.
2890 * In SR-IOV or passthrough mode, timeout for compute
2891 * jobs are 60000 by default.
2892 */
2893 adev->gfx_timeout = msecs_to_jiffies(10000);
2894 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
2895 if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
2896 adev->compute_timeout = msecs_to_jiffies(60000);
2897 else
2898 adev->compute_timeout = MAX_SCHEDULE_TIMEOUT;
2899
2900 if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
2901 while ((timeout_setting = strsep(&input, ",")) &&
2902 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
2903 ret = kstrtol(timeout_setting, 0, &timeout);
2904 if (ret)
2905 return ret;
2906
2907 if (timeout == 0) {
2908 index++;
2909 continue;
2910 } else if (timeout < 0) {
2911 timeout = MAX_SCHEDULE_TIMEOUT;
2912 } else {
2913 timeout = msecs_to_jiffies(timeout);
2914 }
2915
2916 switch (index++) {
2917 case 0:
2918 adev->gfx_timeout = timeout;
2919 break;
2920 case 1:
2921 adev->compute_timeout = timeout;
2922 break;
2923 case 2:
2924 adev->sdma_timeout = timeout;
2925 break;
2926 case 3:
2927 adev->video_timeout = timeout;
2928 break;
2929 default:
2930 break;
2931 }
2932 }
2933 /*
2934 * There is only one value specified and
2935 * it should apply to all non-compute jobs.
2936 */
2937 if (index == 1) {
2938 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
2939 if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
2940 adev->compute_timeout = adev->gfx_timeout;
2941 }
2942 }
2943
2944 return ret;
2945}
2946
2947static const struct attribute *amdgpu_dev_attributes[] = {
2948 &dev_attr_product_name.attr,
2949 &dev_attr_product_number.attr,
2950 &dev_attr_serial_number.attr,
2951 &dev_attr_pcie_replay_count.attr,
2952 NULL
2953};
2954
2955/**
2956 * amdgpu_device_init - initialize the driver
2957 *
2958 * @adev: amdgpu_device pointer
2959 * @ddev: drm dev pointer
2960 * @pdev: pci dev pointer
2961 * @flags: driver flags
2962 *
2963 * Initializes the driver info and hw (all asics).
2964 * Returns 0 for success or an error on failure.
2965 * Called at driver startup.
2966 */
2967int amdgpu_device_init(struct amdgpu_device *adev,
2968 struct drm_device *ddev,
2969 struct pci_dev *pdev,
2970 uint32_t flags)
2971{
2972 int r, i;
2973 bool boco = false;
2974 u32 max_MBps;
2975
2976 adev->shutdown = false;
2977 adev->dev = &pdev->dev;
2978 adev->ddev = ddev;
2979 adev->pdev = pdev;
2980 adev->flags = flags;
2981
2982 if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
2983 adev->asic_type = amdgpu_force_asic_type;
2984 else
2985 adev->asic_type = flags & AMD_ASIC_MASK;
2986
2987 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
2988 if (amdgpu_emu_mode == 1)
2989 adev->usec_timeout *= 10;
2990 adev->gmc.gart_size = 512 * 1024 * 1024;
2991 adev->accel_working = false;
2992 adev->num_rings = 0;
2993 adev->mman.buffer_funcs = NULL;
2994 adev->mman.buffer_funcs_ring = NULL;
2995 adev->vm_manager.vm_pte_funcs = NULL;
2996 adev->vm_manager.vm_pte_num_scheds = 0;
2997 adev->gmc.gmc_funcs = NULL;
2998 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2999 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3000
3001 adev->smc_rreg = &amdgpu_invalid_rreg;
3002 adev->smc_wreg = &amdgpu_invalid_wreg;
3003 adev->pcie_rreg = &amdgpu_invalid_rreg;
3004 adev->pcie_wreg = &amdgpu_invalid_wreg;
3005 adev->pciep_rreg = &amdgpu_invalid_rreg;
3006 adev->pciep_wreg = &amdgpu_invalid_wreg;
3007 adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3008 adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3009 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3010 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3011 adev->didt_rreg = &amdgpu_invalid_rreg;
3012 adev->didt_wreg = &amdgpu_invalid_wreg;
3013 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3014 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3015 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3016 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3017
3018 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3019 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3020 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3021
3022 /* mutex initialization are all done here so we
3023 * can recall function without having locking issues */
3024 atomic_set(&adev->irq.ih.lock, 0);
3025 mutex_init(&adev->firmware.mutex);
3026 mutex_init(&adev->pm.mutex);
3027 mutex_init(&adev->gfx.gpu_clock_mutex);
3028 mutex_init(&adev->srbm_mutex);
3029 mutex_init(&adev->gfx.pipe_reserve_mutex);
3030 mutex_init(&adev->gfx.gfx_off_mutex);
3031 mutex_init(&adev->grbm_idx_mutex);
3032 mutex_init(&adev->mn_lock);
3033 mutex_init(&adev->virt.vf_errors.lock);
3034 hash_init(adev->mn_hash);
3035 mutex_init(&adev->lock_reset);
3036 mutex_init(&adev->psp.mutex);
3037 mutex_init(&adev->notifier_lock);
3038
3039 r = amdgpu_device_check_arguments(adev);
3040 if (r)
3041 return r;
3042
3043 spin_lock_init(&adev->mmio_idx_lock);
3044 spin_lock_init(&adev->smc_idx_lock);
3045 spin_lock_init(&adev->pcie_idx_lock);
3046 spin_lock_init(&adev->uvd_ctx_idx_lock);
3047 spin_lock_init(&adev->didt_idx_lock);
3048 spin_lock_init(&adev->gc_cac_idx_lock);
3049 spin_lock_init(&adev->se_cac_idx_lock);
3050 spin_lock_init(&adev->audio_endpt_idx_lock);
3051 spin_lock_init(&adev->mm_stats.lock);
3052
3053 INIT_LIST_HEAD(&adev->shadow_list);
3054 mutex_init(&adev->shadow_list_lock);
3055
3056 INIT_DELAYED_WORK(&adev->delayed_init_work,
3057 amdgpu_device_delayed_init_work_handler);
3058 INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3059 amdgpu_device_delay_enable_gfx_off);
3060
3061 INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3062
3063 adev->gfx.gfx_off_req_count = 1;
3064 adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3065
3066 atomic_set(&adev->throttling_logging_enabled, 1);
3067 /*
3068 * If throttling continues, logging will be performed every minute
3069 * to avoid log flooding. "-1" is subtracted since the thermal
3070 * throttling interrupt comes every second. Thus, the total logging
3071 * interval is 59 seconds(retelimited printk interval) + 1(waiting
3072 * for throttling interrupt) = 60 seconds.
3073 */
3074 ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3075 ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3076
3077 /* Registers mapping */
3078 /* TODO: block userspace mapping of io register */
3079 if (adev->asic_type >= CHIP_BONAIRE) {
3080 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3081 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3082 } else {
3083 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3084 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3085 }
3086
3087 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3088 if (adev->rmmio == NULL) {
3089 return -ENOMEM;
3090 }
3091 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3092 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
3093
3094 /* io port mapping */
3095 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
3096 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
3097 adev->rio_mem_size = pci_resource_len(adev->pdev, i);
3098 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
3099 break;
3100 }
3101 }
3102 if (adev->rio_mem == NULL)
3103 DRM_INFO("PCI I/O BAR is not found.\n");
3104
3105 /* enable PCIE atomic ops */
3106 r = pci_enable_atomic_ops_to_root(adev->pdev,
3107 PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3108 PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3109 if (r) {
3110 adev->have_atomics_support = false;
3111 DRM_INFO("PCIE atomic ops is not supported\n");
3112 } else {
3113 adev->have_atomics_support = true;
3114 }
3115
3116 amdgpu_device_get_pcie_info(adev);
3117
3118 if (amdgpu_mcbp)
3119 DRM_INFO("MCBP is enabled\n");
3120
3121 if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10)
3122 adev->enable_mes = true;
3123
3124 /* detect hw virtualization here */
3125 amdgpu_detect_virtualization(adev);
3126
3127 r = amdgpu_device_get_job_timeout_settings(adev);
3128 if (r) {
3129 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3130 return r;
3131 }
3132
3133 /* early init functions */
3134 r = amdgpu_device_ip_early_init(adev);
3135 if (r)
3136 return r;
3137
3138 /* doorbell bar mapping and doorbell index init*/
3139 amdgpu_device_doorbell_init(adev);
3140
3141 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
3142 /* this will fail for cards that aren't VGA class devices, just
3143 * ignore it */
3144 vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
3145
3146 if (amdgpu_device_supports_boco(ddev))
3147 boco = true;
3148 if (amdgpu_has_atpx() &&
3149 (amdgpu_is_atpx_hybrid() ||
3150 amdgpu_has_atpx_dgpu_power_cntl()) &&
3151 !pci_is_thunderbolt_attached(adev->pdev))
3152 vga_switcheroo_register_client(adev->pdev,
3153 &amdgpu_switcheroo_ops, boco);
3154 if (boco)
3155 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
3156
3157 if (amdgpu_emu_mode == 1) {
3158 /* post the asic on emulation mode */
3159 emu_soc_asic_init(adev);
3160 goto fence_driver_init;
3161 }
3162
3163 /* detect if we are with an SRIOV vbios */
3164 amdgpu_device_detect_sriov_bios(adev);
3165
3166 /* check if we need to reset the asic
3167 * E.g., driver was not cleanly unloaded previously, etc.
3168 */
3169 if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3170 r = amdgpu_asic_reset(adev);
3171 if (r) {
3172 dev_err(adev->dev, "asic reset on init failed\n");
3173 goto failed;
3174 }
3175 }
3176
3177 /* Post card if necessary */
3178 if (amdgpu_device_need_post(adev)) {
3179 if (!adev->bios) {
3180 dev_err(adev->dev, "no vBIOS found\n");
3181 r = -EINVAL;
3182 goto failed;
3183 }
3184 DRM_INFO("GPU posting now...\n");
3185 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
3186 if (r) {
3187 dev_err(adev->dev, "gpu post error!\n");
3188 goto failed;
3189 }
3190 }
3191
3192 if (adev->is_atom_fw) {
3193 /* Initialize clocks */
3194 r = amdgpu_atomfirmware_get_clock_info(adev);
3195 if (r) {
3196 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3197 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3198 goto failed;
3199 }
3200 } else {
3201 /* Initialize clocks */
3202 r = amdgpu_atombios_get_clock_info(adev);
3203 if (r) {
3204 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3205 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3206 goto failed;
3207 }
3208 /* init i2c buses */
3209 if (!amdgpu_device_has_dc_support(adev))
3210 amdgpu_atombios_i2c_init(adev);
3211 }
3212
3213fence_driver_init:
3214 /* Fence driver */
3215 r = amdgpu_fence_driver_init(adev);
3216 if (r) {
3217 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
3218 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3219 goto failed;
3220 }
3221
3222 /* init the mode config */
3223 drm_mode_config_init(adev->ddev);
3224
3225 r = amdgpu_device_ip_init(adev);
3226 if (r) {
3227 /* failed in exclusive mode due to timeout */
3228 if (amdgpu_sriov_vf(adev) &&
3229 !amdgpu_sriov_runtime(adev) &&
3230 amdgpu_virt_mmio_blocked(adev) &&
3231 !amdgpu_virt_wait_reset(adev)) {
3232 dev_err(adev->dev, "VF exclusive mode timeout\n");
3233 /* Don't send request since VF is inactive. */
3234 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
3235 adev->virt.ops = NULL;
3236 r = -EAGAIN;
3237 goto failed;
3238 }
3239 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
3240 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3241 goto failed;
3242 }
3243
3244 dev_info(adev->dev,
3245 "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
3246 adev->gfx.config.max_shader_engines,
3247 adev->gfx.config.max_sh_per_se,
3248 adev->gfx.config.max_cu_per_sh,
3249 adev->gfx.cu_info.number);
3250
3251 adev->accel_working = true;
3252
3253 amdgpu_vm_check_compute_bug(adev);
3254
3255 /* Initialize the buffer migration limit. */
3256 if (amdgpu_moverate >= 0)
3257 max_MBps = amdgpu_moverate;
3258 else
3259 max_MBps = 8; /* Allow 8 MB/s. */
3260 /* Get a log2 for easy divisions. */
3261 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3262
3263 amdgpu_fbdev_init(adev);
3264
3265 r = amdgpu_pm_sysfs_init(adev);
3266 if (r) {
3267 adev->pm_sysfs_en = false;
3268 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
3269 } else
3270 adev->pm_sysfs_en = true;
3271
3272 r = amdgpu_ucode_sysfs_init(adev);
3273 if (r) {
3274 adev->ucode_sysfs_en = false;
3275 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
3276 } else
3277 adev->ucode_sysfs_en = true;
3278
3279 if ((amdgpu_testing & 1)) {
3280 if (adev->accel_working)
3281 amdgpu_test_moves(adev);
3282 else
3283 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
3284 }
3285 if (amdgpu_benchmarking) {
3286 if (adev->accel_working)
3287 amdgpu_benchmark(adev, amdgpu_benchmarking);
3288 else
3289 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
3290 }
3291
3292 /*
3293 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3294 * Otherwise the mgpu fan boost feature will be skipped due to the
3295 * gpu instance is counted less.
3296 */
3297 amdgpu_register_gpu_instance(adev);
3298
3299 /* enable clockgating, etc. after ib tests, etc. since some blocks require
3300 * explicit gating rather than handling it automatically.
3301 */
3302 r = amdgpu_device_ip_late_init(adev);
3303 if (r) {
3304 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
3305 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
3306 goto failed;
3307 }
3308
3309 /* must succeed. */
3310 amdgpu_ras_resume(adev);
3311
3312 queue_delayed_work(system_wq, &adev->delayed_init_work,
3313 msecs_to_jiffies(AMDGPU_RESUME_MS));
3314
3315 if (amdgpu_sriov_vf(adev))
3316 flush_delayed_work(&adev->delayed_init_work);
3317
3318 r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
3319 if (r) {
3320 dev_err(adev->dev, "Could not create amdgpu device attr\n");
3321 return r;
3322 }
3323
3324 if (IS_ENABLED(CONFIG_PERF_EVENTS))
3325 r = amdgpu_pmu_init(adev);
3326 if (r)
3327 dev_err(adev->dev, "amdgpu_pmu_init failed\n");
3328
3329 return 0;
3330
3331failed:
3332 amdgpu_vf_error_trans_all(adev);
3333 if (boco)
3334 vga_switcheroo_fini_domain_pm_ops(adev->dev);
3335
3336 return r;
3337}
3338
3339/**
3340 * amdgpu_device_fini - tear down the driver
3341 *
3342 * @adev: amdgpu_device pointer
3343 *
3344 * Tear down the driver info (all asics).
3345 * Called at driver shutdown.
3346 */
3347void amdgpu_device_fini(struct amdgpu_device *adev)
3348{
3349 int r;
3350
3351 DRM_INFO("amdgpu: finishing device.\n");
3352 flush_delayed_work(&adev->delayed_init_work);
3353 adev->shutdown = true;
3354
3355 /* make sure IB test finished before entering exclusive mode
3356 * to avoid preemption on IB test
3357 * */
3358 if (amdgpu_sriov_vf(adev))
3359 amdgpu_virt_request_full_gpu(adev, false);
3360
3361 /* disable all interrupts */
3362 amdgpu_irq_disable_all(adev);
3363 if (adev->mode_info.mode_config_initialized){
3364 if (!amdgpu_device_has_dc_support(adev))
3365 drm_helper_force_disable_all(adev->ddev);
3366 else
3367 drm_atomic_helper_shutdown(adev->ddev);
3368 }
3369 amdgpu_fence_driver_fini(adev);
3370 if (adev->pm_sysfs_en)
3371 amdgpu_pm_sysfs_fini(adev);
3372 amdgpu_fbdev_fini(adev);
3373 r = amdgpu_device_ip_fini(adev);
3374 release_firmware(adev->firmware.gpu_info_fw);
3375 adev->firmware.gpu_info_fw = NULL;
3376 adev->accel_working = false;
3377 /* free i2c buses */
3378 if (!amdgpu_device_has_dc_support(adev))
3379 amdgpu_i2c_fini(adev);
3380
3381 if (amdgpu_emu_mode != 1)
3382 amdgpu_atombios_fini(adev);
3383
3384 kfree(adev->bios);
3385 adev->bios = NULL;
3386 if (amdgpu_has_atpx() &&
3387 (amdgpu_is_atpx_hybrid() ||
3388 amdgpu_has_atpx_dgpu_power_cntl()) &&
3389 !pci_is_thunderbolt_attached(adev->pdev))
3390 vga_switcheroo_unregister_client(adev->pdev);
3391 if (amdgpu_device_supports_boco(adev->ddev))
3392 vga_switcheroo_fini_domain_pm_ops(adev->dev);
3393 vga_client_register(adev->pdev, NULL, NULL, NULL);
3394 if (adev->rio_mem)
3395 pci_iounmap(adev->pdev, adev->rio_mem);
3396 adev->rio_mem = NULL;
3397 iounmap(adev->rmmio);
3398 adev->rmmio = NULL;
3399 amdgpu_device_doorbell_fini(adev);
3400
3401 if (adev->ucode_sysfs_en)
3402 amdgpu_ucode_sysfs_fini(adev);
3403
3404 sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
3405 if (IS_ENABLED(CONFIG_PERF_EVENTS))
3406 amdgpu_pmu_fini(adev);
3407 if (adev->discovery_bin)
3408 amdgpu_discovery_fini(adev);
3409}
3410
3411
3412/*
3413 * Suspend & resume.
3414 */
3415/**
3416 * amdgpu_device_suspend - initiate device suspend
3417 *
3418 * @dev: drm dev pointer
3419 * @fbcon : notify the fbdev of suspend
3420 *
3421 * Puts the hw in the suspend state (all asics).
3422 * Returns 0 for success or an error on failure.
3423 * Called at driver suspend.
3424 */
3425int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
3426{
3427 struct amdgpu_device *adev;
3428 struct drm_crtc *crtc;
3429 struct drm_connector *connector;
3430 struct drm_connector_list_iter iter;
3431 int r;
3432
3433 if (dev == NULL || dev->dev_private == NULL) {
3434 return -ENODEV;
3435 }
3436
3437 adev = dev->dev_private;
3438
3439 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3440 return 0;
3441
3442 adev->in_suspend = true;
3443 drm_kms_helper_poll_disable(dev);
3444
3445 if (fbcon)
3446 amdgpu_fbdev_set_suspend(adev, 1);
3447
3448 cancel_delayed_work_sync(&adev->delayed_init_work);
3449
3450 if (!amdgpu_device_has_dc_support(adev)) {
3451 /* turn off display hw */
3452 drm_modeset_lock_all(dev);
3453 drm_connector_list_iter_begin(dev, &iter);
3454 drm_for_each_connector_iter(connector, &iter)
3455 drm_helper_connector_dpms(connector,
3456 DRM_MODE_DPMS_OFF);
3457 drm_connector_list_iter_end(&iter);
3458 drm_modeset_unlock_all(dev);
3459 /* unpin the front buffers and cursors */
3460 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3461 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
3462 struct drm_framebuffer *fb = crtc->primary->fb;
3463 struct amdgpu_bo *robj;
3464
3465 if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
3466 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
3467 r = amdgpu_bo_reserve(aobj, true);
3468 if (r == 0) {
3469 amdgpu_bo_unpin(aobj);
3470 amdgpu_bo_unreserve(aobj);
3471 }
3472 }
3473
3474 if (fb == NULL || fb->obj[0] == NULL) {
3475 continue;
3476 }
3477 robj = gem_to_amdgpu_bo(fb->obj[0]);
3478 /* don't unpin kernel fb objects */
3479 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
3480 r = amdgpu_bo_reserve(robj, true);
3481 if (r == 0) {
3482 amdgpu_bo_unpin(robj);
3483 amdgpu_bo_unreserve(robj);
3484 }
3485 }
3486 }
3487 }
3488
3489 amdgpu_ras_suspend(adev);
3490
3491 r = amdgpu_device_ip_suspend_phase1(adev);
3492
3493 amdgpu_amdkfd_suspend(adev, !fbcon);
3494
3495 /* evict vram memory */
3496 amdgpu_bo_evict_vram(adev);
3497
3498 amdgpu_fence_driver_suspend(adev);
3499
3500 r = amdgpu_device_ip_suspend_phase2(adev);
3501
3502 /* evict remaining vram memory
3503 * This second call to evict vram is to evict the gart page table
3504 * using the CPU.
3505 */
3506 amdgpu_bo_evict_vram(adev);
3507
3508 return 0;
3509}
3510
3511/**
3512 * amdgpu_device_resume - initiate device resume
3513 *
3514 * @dev: drm dev pointer
3515 * @fbcon : notify the fbdev of resume
3516 *
3517 * Bring the hw back to operating state (all asics).
3518 * Returns 0 for success or an error on failure.
3519 * Called at driver resume.
3520 */
3521int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
3522{
3523 struct drm_connector *connector;
3524 struct drm_connector_list_iter iter;
3525 struct amdgpu_device *adev = dev->dev_private;
3526 struct drm_crtc *crtc;
3527 int r = 0;
3528
3529 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3530 return 0;
3531
3532 /* post card */
3533 if (amdgpu_device_need_post(adev)) {
3534 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
3535 if (r)
3536 DRM_ERROR("amdgpu asic init failed\n");
3537 }
3538
3539 r = amdgpu_device_ip_resume(adev);
3540 if (r) {
3541 DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r);
3542 return r;
3543 }
3544 amdgpu_fence_driver_resume(adev);
3545
3546
3547 r = amdgpu_device_ip_late_init(adev);
3548 if (r)
3549 return r;
3550
3551 queue_delayed_work(system_wq, &adev->delayed_init_work,
3552 msecs_to_jiffies(AMDGPU_RESUME_MS));
3553
3554 if (!amdgpu_device_has_dc_support(adev)) {
3555 /* pin cursors */
3556 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3557 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
3558
3559 if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
3560 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
3561 r = amdgpu_bo_reserve(aobj, true);
3562 if (r == 0) {
3563 r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
3564 if (r != 0)
3565 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
3566 amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
3567 amdgpu_bo_unreserve(aobj);
3568 }
3569 }
3570 }
3571 }
3572 r = amdgpu_amdkfd_resume(adev, !fbcon);
3573 if (r)
3574 return r;
3575
3576 /* Make sure IB tests flushed */
3577 flush_delayed_work(&adev->delayed_init_work);
3578
3579 /* blat the mode back in */
3580 if (fbcon) {
3581 if (!amdgpu_device_has_dc_support(adev)) {
3582 /* pre DCE11 */
3583 drm_helper_resume_force_mode(dev);
3584
3585 /* turn on display hw */
3586 drm_modeset_lock_all(dev);
3587
3588 drm_connector_list_iter_begin(dev, &iter);
3589 drm_for_each_connector_iter(connector, &iter)
3590 drm_helper_connector_dpms(connector,
3591 DRM_MODE_DPMS_ON);
3592 drm_connector_list_iter_end(&iter);
3593
3594 drm_modeset_unlock_all(dev);
3595 }
3596 amdgpu_fbdev_set_suspend(adev, 0);
3597 }
3598
3599 drm_kms_helper_poll_enable(dev);
3600
3601 amdgpu_ras_resume(adev);
3602
3603 /*
3604 * Most of the connector probing functions try to acquire runtime pm
3605 * refs to ensure that the GPU is powered on when connector polling is
3606 * performed. Since we're calling this from a runtime PM callback,
3607 * trying to acquire rpm refs will cause us to deadlock.
3608 *
3609 * Since we're guaranteed to be holding the rpm lock, it's safe to
3610 * temporarily disable the rpm helpers so this doesn't deadlock us.
3611 */
3612#ifdef CONFIG_PM
3613 dev->dev->power.disable_depth++;
3614#endif
3615 if (!amdgpu_device_has_dc_support(adev))
3616 drm_helper_hpd_irq_event(dev);
3617 else
3618 drm_kms_helper_hotplug_event(dev);
3619#ifdef CONFIG_PM
3620 dev->dev->power.disable_depth--;
3621#endif
3622 adev->in_suspend = false;
3623
3624 return 0;
3625}
3626
3627/**
3628 * amdgpu_device_ip_check_soft_reset - did soft reset succeed
3629 *
3630 * @adev: amdgpu_device pointer
3631 *
3632 * The list of all the hardware IPs that make up the asic is walked and
3633 * the check_soft_reset callbacks are run. check_soft_reset determines
3634 * if the asic is still hung or not.
3635 * Returns true if any of the IPs are still in a hung state, false if not.
3636 */
3637static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
3638{
3639 int i;
3640 bool asic_hang = false;
3641
3642 if (amdgpu_sriov_vf(adev))
3643 return true;
3644
3645 if (amdgpu_asic_need_full_reset(adev))
3646 return true;
3647
3648 for (i = 0; i < adev->num_ip_blocks; i++) {
3649 if (!adev->ip_blocks[i].status.valid)
3650 continue;
3651 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
3652 adev->ip_blocks[i].status.hang =
3653 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
3654 if (adev->ip_blocks[i].status.hang) {
3655 DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
3656 asic_hang = true;
3657 }
3658 }
3659 return asic_hang;
3660}
3661
3662/**
3663 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
3664 *
3665 * @adev: amdgpu_device pointer
3666 *
3667 * The list of all the hardware IPs that make up the asic is walked and the
3668 * pre_soft_reset callbacks are run if the block is hung. pre_soft_reset
3669 * handles any IP specific hardware or software state changes that are
3670 * necessary for a soft reset to succeed.
3671 * Returns 0 on success, negative error code on failure.
3672 */
3673static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
3674{
3675 int i, r = 0;
3676
3677 for (i = 0; i < adev->num_ip_blocks; i++) {
3678 if (!adev->ip_blocks[i].status.valid)
3679 continue;
3680 if (adev->ip_blocks[i].status.hang &&
3681 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
3682 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
3683 if (r)
3684 return r;
3685 }
3686 }
3687
3688 return 0;
3689}
3690
3691/**
3692 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
3693 *
3694 * @adev: amdgpu_device pointer
3695 *
3696 * Some hardware IPs cannot be soft reset. If they are hung, a full gpu
3697 * reset is necessary to recover.
3698 * Returns true if a full asic reset is required, false if not.
3699 */
3700static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
3701{
3702 int i;
3703
3704 if (amdgpu_asic_need_full_reset(adev))
3705 return true;
3706
3707 for (i = 0; i < adev->num_ip_blocks; i++) {
3708 if (!adev->ip_blocks[i].status.valid)
3709 continue;
3710 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
3711 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
3712 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
3713 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
3714 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
3715 if (adev->ip_blocks[i].status.hang) {
3716 DRM_INFO("Some block need full reset!\n");
3717 return true;
3718 }
3719 }
3720 }
3721 return false;
3722}
3723
3724/**
3725 * amdgpu_device_ip_soft_reset - do a soft reset
3726 *
3727 * @adev: amdgpu_device pointer
3728 *
3729 * The list of all the hardware IPs that make up the asic is walked and the
3730 * soft_reset callbacks are run if the block is hung. soft_reset handles any
3731 * IP specific hardware or software state changes that are necessary to soft
3732 * reset the IP.
3733 * Returns 0 on success, negative error code on failure.
3734 */
3735static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
3736{
3737 int i, r = 0;
3738
3739 for (i = 0; i < adev->num_ip_blocks; i++) {
3740 if (!adev->ip_blocks[i].status.valid)
3741 continue;
3742 if (adev->ip_blocks[i].status.hang &&
3743 adev->ip_blocks[i].version->funcs->soft_reset) {
3744 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
3745 if (r)
3746 return r;
3747 }
3748 }
3749
3750 return 0;
3751}
3752
3753/**
3754 * amdgpu_device_ip_post_soft_reset - clean up from soft reset
3755 *
3756 * @adev: amdgpu_device pointer
3757 *
3758 * The list of all the hardware IPs that make up the asic is walked and the
3759 * post_soft_reset callbacks are run if the asic was hung. post_soft_reset
3760 * handles any IP specific hardware or software state changes that are
3761 * necessary after the IP has been soft reset.
3762 * Returns 0 on success, negative error code on failure.
3763 */
3764static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
3765{
3766 int i, r = 0;
3767
3768 for (i = 0; i < adev->num_ip_blocks; i++) {
3769 if (!adev->ip_blocks[i].status.valid)
3770 continue;
3771 if (adev->ip_blocks[i].status.hang &&
3772 adev->ip_blocks[i].version->funcs->post_soft_reset)
3773 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
3774 if (r)
3775 return r;
3776 }
3777
3778 return 0;
3779}
3780
3781/**
3782 * amdgpu_device_recover_vram - Recover some VRAM contents
3783 *
3784 * @adev: amdgpu_device pointer
3785 *
3786 * Restores the contents of VRAM buffers from the shadows in GTT. Used to
3787 * restore things like GPUVM page tables after a GPU reset where
3788 * the contents of VRAM might be lost.
3789 *
3790 * Returns:
3791 * 0 on success, negative error code on failure.
3792 */
3793static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
3794{
3795 struct dma_fence *fence = NULL, *next = NULL;
3796 struct amdgpu_bo *shadow;
3797 long r = 1, tmo;
3798
3799 if (amdgpu_sriov_runtime(adev))
3800 tmo = msecs_to_jiffies(8000);
3801 else
3802 tmo = msecs_to_jiffies(100);
3803
3804 DRM_INFO("recover vram bo from shadow start\n");
3805 mutex_lock(&adev->shadow_list_lock);
3806 list_for_each_entry(shadow, &adev->shadow_list, shadow_list) {
3807
3808 /* No need to recover an evicted BO */
3809 if (shadow->tbo.mem.mem_type != TTM_PL_TT ||
3810 shadow->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET ||
3811 shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM)
3812 continue;
3813
3814 r = amdgpu_bo_restore_shadow(shadow, &next);
3815 if (r)
3816 break;
3817
3818 if (fence) {
3819 tmo = dma_fence_wait_timeout(fence, false, tmo);
3820 dma_fence_put(fence);
3821 fence = next;
3822 if (tmo == 0) {
3823 r = -ETIMEDOUT;
3824 break;
3825 } else if (tmo < 0) {
3826 r = tmo;
3827 break;
3828 }
3829 } else {
3830 fence = next;
3831 }
3832 }
3833 mutex_unlock(&adev->shadow_list_lock);
3834
3835 if (fence)
3836 tmo = dma_fence_wait_timeout(fence, false, tmo);
3837 dma_fence_put(fence);
3838
3839 if (r < 0 || tmo <= 0) {
3840 DRM_ERROR("recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
3841 return -EIO;
3842 }
3843
3844 DRM_INFO("recover vram bo from shadow done\n");
3845 return 0;
3846}
3847
3848
3849/**
3850 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
3851 *
3852 * @adev: amdgpu device pointer
3853 * @from_hypervisor: request from hypervisor
3854 *
3855 * do VF FLR and reinitialize Asic
3856 * return 0 means succeeded otherwise failed
3857 */
3858static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
3859 bool from_hypervisor)
3860{
3861 int r;
3862
3863 if (from_hypervisor)
3864 r = amdgpu_virt_request_full_gpu(adev, true);
3865 else
3866 r = amdgpu_virt_reset_gpu(adev);
3867 if (r)
3868 return r;
3869
3870 amdgpu_amdkfd_pre_reset(adev);
3871
3872 /* Resume IP prior to SMC */
3873 r = amdgpu_device_ip_reinit_early_sriov(adev);
3874 if (r)
3875 goto error;
3876
3877 amdgpu_virt_init_data_exchange(adev);
3878 /* we need recover gart prior to run SMC/CP/SDMA resume */
3879 amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]);
3880
3881 r = amdgpu_device_fw_loading(adev);
3882 if (r)
3883 return r;
3884
3885 /* now we are okay to resume SMC/CP/SDMA */
3886 r = amdgpu_device_ip_reinit_late_sriov(adev);
3887 if (r)
3888 goto error;
3889
3890 amdgpu_irq_gpu_reset_resume_helper(adev);
3891 r = amdgpu_ib_ring_tests(adev);
3892 amdgpu_amdkfd_post_reset(adev);
3893
3894error:
3895 amdgpu_virt_release_full_gpu(adev, true);
3896 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
3897 amdgpu_inc_vram_lost(adev);
3898 r = amdgpu_device_recover_vram(adev);
3899 }
3900
3901 return r;
3902}
3903
3904/**
3905 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
3906 *
3907 * @adev: amdgpu device pointer
3908 *
3909 * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
3910 * a hung GPU.
3911 */
3912bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
3913{
3914 if (!amdgpu_device_ip_check_soft_reset(adev)) {
3915 DRM_INFO("Timeout, but no hardware hang detected.\n");
3916 return false;
3917 }
3918
3919 if (amdgpu_gpu_recovery == 0)
3920 goto disabled;
3921
3922 if (amdgpu_sriov_vf(adev))
3923 return true;
3924
3925 if (amdgpu_gpu_recovery == -1) {
3926 switch (adev->asic_type) {
3927 case CHIP_BONAIRE:
3928 case CHIP_HAWAII:
3929 case CHIP_TOPAZ:
3930 case CHIP_TONGA:
3931 case CHIP_FIJI:
3932 case CHIP_POLARIS10:
3933 case CHIP_POLARIS11:
3934 case CHIP_POLARIS12:
3935 case CHIP_VEGAM:
3936 case CHIP_VEGA20:
3937 case CHIP_VEGA10:
3938 case CHIP_VEGA12:
3939 case CHIP_RAVEN:
3940 case CHIP_ARCTURUS:
3941 case CHIP_RENOIR:
3942 case CHIP_NAVI10:
3943 case CHIP_NAVI14:
3944 case CHIP_NAVI12:
3945 case CHIP_SIENNA_CICHLID:
3946 break;
3947 default:
3948 goto disabled;
3949 }
3950 }
3951
3952 return true;
3953
3954disabled:
3955 DRM_INFO("GPU recovery disabled.\n");
3956 return false;
3957}
3958
3959
3960static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
3961 struct amdgpu_job *job,
3962 bool *need_full_reset_arg)
3963{
3964 int i, r = 0;
3965 bool need_full_reset = *need_full_reset_arg;
3966
3967 amdgpu_debugfs_wait_dump(adev);
3968
3969 /* block all schedulers and reset given job's ring */
3970 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
3971 struct amdgpu_ring *ring = adev->rings[i];
3972
3973 if (!ring || !ring->sched.thread)
3974 continue;
3975
3976 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
3977 amdgpu_fence_driver_force_completion(ring);
3978 }
3979
3980 if(job)
3981 drm_sched_increase_karma(&job->base);
3982
3983 /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
3984 if (!amdgpu_sriov_vf(adev)) {
3985
3986 if (!need_full_reset)
3987 need_full_reset = amdgpu_device_ip_need_full_reset(adev);
3988
3989 if (!need_full_reset) {
3990 amdgpu_device_ip_pre_soft_reset(adev);
3991 r = amdgpu_device_ip_soft_reset(adev);
3992 amdgpu_device_ip_post_soft_reset(adev);
3993 if (r || amdgpu_device_ip_check_soft_reset(adev)) {
3994 DRM_INFO("soft reset failed, will fallback to full reset!\n");
3995 need_full_reset = true;
3996 }
3997 }
3998
3999 if (need_full_reset)
4000 r = amdgpu_device_ip_suspend(adev);
4001
4002 *need_full_reset_arg = need_full_reset;
4003 }
4004
4005 return r;
4006}
4007
4008static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
4009 struct list_head *device_list_handle,
4010 bool *need_full_reset_arg)
4011{
4012 struct amdgpu_device *tmp_adev = NULL;
4013 bool need_full_reset = *need_full_reset_arg, vram_lost = false;
4014 int r = 0;
4015
4016 /*
4017 * ASIC reset has to be done on all HGMI hive nodes ASAP
4018 * to allow proper links negotiation in FW (within 1 sec)
4019 */
4020 if (need_full_reset) {
4021 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4022 /* For XGMI run all resets in parallel to speed up the process */
4023 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4024 if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
4025 r = -EALREADY;
4026 } else
4027 r = amdgpu_asic_reset(tmp_adev);
4028
4029 if (r) {
4030 DRM_ERROR("ASIC reset failed with error, %d for drm dev, %s",
4031 r, tmp_adev->ddev->unique);
4032 break;
4033 }
4034 }
4035
4036 /* For XGMI wait for all resets to complete before proceed */
4037 if (!r) {
4038 list_for_each_entry(tmp_adev, device_list_handle,
4039 gmc.xgmi.head) {
4040 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4041 flush_work(&tmp_adev->xgmi_reset_work);
4042 r = tmp_adev->asic_reset_res;
4043 if (r)
4044 break;
4045 }
4046 }
4047 }
4048 }
4049
4050 if (!r && amdgpu_ras_intr_triggered()) {
4051 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4052 if (tmp_adev->mmhub.funcs &&
4053 tmp_adev->mmhub.funcs->reset_ras_error_count)
4054 tmp_adev->mmhub.funcs->reset_ras_error_count(tmp_adev);
4055 }
4056
4057 amdgpu_ras_intr_cleared();
4058 }
4059
4060 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4061 if (need_full_reset) {
4062 /* post card */
4063 if (amdgpu_atom_asic_init(tmp_adev->mode_info.atom_context))
4064 DRM_WARN("asic atom init failed!");
4065
4066 if (!r) {
4067 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
4068 r = amdgpu_device_ip_resume_phase1(tmp_adev);
4069 if (r)
4070 goto out;
4071
4072 vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
4073 if (vram_lost) {
4074 DRM_INFO("VRAM is lost due to GPU reset!\n");
4075 amdgpu_inc_vram_lost(tmp_adev);
4076 }
4077
4078 r = amdgpu_gtt_mgr_recover(
4079 &tmp_adev->mman.bdev.man[TTM_PL_TT]);
4080 if (r)
4081 goto out;
4082
4083 r = amdgpu_device_fw_loading(tmp_adev);
4084 if (r)
4085 return r;
4086
4087 r = amdgpu_device_ip_resume_phase2(tmp_adev);
4088 if (r)
4089 goto out;
4090
4091 if (vram_lost)
4092 amdgpu_device_fill_reset_magic(tmp_adev);
4093
4094 /*
4095 * Add this ASIC as tracked as reset was already
4096 * complete successfully.
4097 */
4098 amdgpu_register_gpu_instance(tmp_adev);
4099
4100 r = amdgpu_device_ip_late_init(tmp_adev);
4101 if (r)
4102 goto out;
4103
4104 amdgpu_fbdev_set_suspend(tmp_adev, 0);
4105
4106 /* must succeed. */
4107 amdgpu_ras_resume(tmp_adev);
4108
4109 /* Update PSP FW topology after reset */
4110 if (hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4111 r = amdgpu_xgmi_update_topology(hive, tmp_adev);
4112 }
4113 }
4114
4115
4116out:
4117 if (!r) {
4118 amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
4119 r = amdgpu_ib_ring_tests(tmp_adev);
4120 if (r) {
4121 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
4122 r = amdgpu_device_ip_suspend(tmp_adev);
4123 need_full_reset = true;
4124 r = -EAGAIN;
4125 goto end;
4126 }
4127 }
4128
4129 if (!r)
4130 r = amdgpu_device_recover_vram(tmp_adev);
4131 else
4132 tmp_adev->asic_reset_res = r;
4133 }
4134
4135end:
4136 *need_full_reset_arg = need_full_reset;
4137 return r;
4138}
4139
4140static bool amdgpu_device_lock_adev(struct amdgpu_device *adev, bool trylock)
4141{
4142 if (trylock) {
4143 if (!mutex_trylock(&adev->lock_reset))
4144 return false;
4145 } else
4146 mutex_lock(&adev->lock_reset);
4147
4148 atomic_inc(&adev->gpu_reset_counter);
4149 adev->in_gpu_reset = true;
4150 switch (amdgpu_asic_reset_method(adev)) {
4151 case AMD_RESET_METHOD_MODE1:
4152 adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
4153 break;
4154 case AMD_RESET_METHOD_MODE2:
4155 adev->mp1_state = PP_MP1_STATE_RESET;
4156 break;
4157 default:
4158 adev->mp1_state = PP_MP1_STATE_NONE;
4159 break;
4160 }
4161
4162 return true;
4163}
4164
4165static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
4166{
4167 amdgpu_vf_error_trans_all(adev);
4168 adev->mp1_state = PP_MP1_STATE_NONE;
4169 adev->in_gpu_reset = false;
4170 mutex_unlock(&adev->lock_reset);
4171}
4172
4173static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
4174{
4175 struct pci_dev *p = NULL;
4176
4177 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4178 adev->pdev->bus->number, 1);
4179 if (p) {
4180 pm_runtime_enable(&(p->dev));
4181 pm_runtime_resume(&(p->dev));
4182 }
4183}
4184
4185static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
4186{
4187 enum amd_reset_method reset_method;
4188 struct pci_dev *p = NULL;
4189 u64 expires;
4190
4191 /*
4192 * For now, only BACO and mode1 reset are confirmed
4193 * to suffer the audio issue without proper suspended.
4194 */
4195 reset_method = amdgpu_asic_reset_method(adev);
4196 if ((reset_method != AMD_RESET_METHOD_BACO) &&
4197 (reset_method != AMD_RESET_METHOD_MODE1))
4198 return -EINVAL;
4199
4200 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4201 adev->pdev->bus->number, 1);
4202 if (!p)
4203 return -ENODEV;
4204
4205 expires = pm_runtime_autosuspend_expiration(&(p->dev));
4206 if (!expires)
4207 /*
4208 * If we cannot get the audio device autosuspend delay,
4209 * a fixed 4S interval will be used. Considering 3S is
4210 * the audio controller default autosuspend delay setting.
4211 * 4S used here is guaranteed to cover that.
4212 */
4213 expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
4214
4215 while (!pm_runtime_status_suspended(&(p->dev))) {
4216 if (!pm_runtime_suspend(&(p->dev)))
4217 break;
4218
4219 if (expires < ktime_get_mono_fast_ns()) {
4220 dev_warn(adev->dev, "failed to suspend display audio\n");
4221 /* TODO: abort the succeeding gpu reset? */
4222 return -ETIMEDOUT;
4223 }
4224 }
4225
4226 pm_runtime_disable(&(p->dev));
4227
4228 return 0;
4229}
4230
4231/**
4232 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
4233 *
4234 * @adev: amdgpu device pointer
4235 * @job: which job trigger hang
4236 *
4237 * Attempt to reset the GPU if it has hung (all asics).
4238 * Attempt to do soft-reset or full-reset and reinitialize Asic
4239 * Returns 0 for success or an error on failure.
4240 */
4241
4242int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
4243 struct amdgpu_job *job)
4244{
4245 struct list_head device_list, *device_list_handle = NULL;
4246 bool need_full_reset = false;
4247 bool job_signaled = false;
4248 struct amdgpu_hive_info *hive = NULL;
4249 struct amdgpu_device *tmp_adev = NULL;
4250 int i, r = 0;
4251 bool need_emergency_restart = false;
4252 bool audio_suspended = false;
4253
4254 /**
4255 * Special case: RAS triggered and full reset isn't supported
4256 */
4257 need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
4258
4259 /*
4260 * Flush RAM to disk so that after reboot
4261 * the user can read log and see why the system rebooted.
4262 */
4263 if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
4264 DRM_WARN("Emergency reboot.");
4265
4266 ksys_sync_helper();
4267 emergency_restart();
4268 }
4269
4270 dev_info(adev->dev, "GPU %s begin!\n",
4271 need_emergency_restart ? "jobs stop":"reset");
4272
4273 /*
4274 * Here we trylock to avoid chain of resets executing from
4275 * either trigger by jobs on different adevs in XGMI hive or jobs on
4276 * different schedulers for same device while this TO handler is running.
4277 * We always reset all schedulers for device and all devices for XGMI
4278 * hive so that should take care of them too.
4279 */
4280 hive = amdgpu_get_xgmi_hive(adev, true);
4281 if (hive && !mutex_trylock(&hive->reset_lock)) {
4282 DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
4283 job ? job->base.id : -1, hive->hive_id);
4284 mutex_unlock(&hive->hive_lock);
4285 return 0;
4286 }
4287
4288 /*
4289 * Build list of devices to reset.
4290 * In case we are in XGMI hive mode, resort the device list
4291 * to put adev in the 1st position.
4292 */
4293 INIT_LIST_HEAD(&device_list);
4294 if (adev->gmc.xgmi.num_physical_nodes > 1) {
4295 if (!hive)
4296 return -ENODEV;
4297 if (!list_is_first(&adev->gmc.xgmi.head, &hive->device_list))
4298 list_rotate_to_front(&adev->gmc.xgmi.head, &hive->device_list);
4299 device_list_handle = &hive->device_list;
4300 } else {
4301 list_add_tail(&adev->gmc.xgmi.head, &device_list);
4302 device_list_handle = &device_list;
4303 }
4304
4305 /* block all schedulers and reset given job's ring */
4306 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4307 if (!amdgpu_device_lock_adev(tmp_adev, !hive)) {
4308 DRM_INFO("Bailing on TDR for s_job:%llx, as another already in progress",
4309 job ? job->base.id : -1);
4310 mutex_unlock(&hive->hive_lock);
4311 return 0;
4312 }
4313
4314 /*
4315 * Try to put the audio codec into suspend state
4316 * before gpu reset started.
4317 *
4318 * Due to the power domain of the graphics device
4319 * is shared with AZ power domain. Without this,
4320 * we may change the audio hardware from behind
4321 * the audio driver's back. That will trigger
4322 * some audio codec errors.
4323 */
4324 if (!amdgpu_device_suspend_display_audio(tmp_adev))
4325 audio_suspended = true;
4326
4327 amdgpu_ras_set_error_query_ready(tmp_adev, false);
4328
4329 cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
4330
4331 if (!amdgpu_sriov_vf(tmp_adev))
4332 amdgpu_amdkfd_pre_reset(tmp_adev);
4333
4334 /*
4335 * Mark these ASICs to be reseted as untracked first
4336 * And add them back after reset completed
4337 */
4338 amdgpu_unregister_gpu_instance(tmp_adev);
4339
4340 amdgpu_fbdev_set_suspend(tmp_adev, 1);
4341
4342 /* disable ras on ALL IPs */
4343 if (!need_emergency_restart &&
4344 amdgpu_device_ip_need_full_reset(tmp_adev))
4345 amdgpu_ras_suspend(tmp_adev);
4346
4347 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4348 struct amdgpu_ring *ring = tmp_adev->rings[i];
4349
4350 if (!ring || !ring->sched.thread)
4351 continue;
4352
4353 drm_sched_stop(&ring->sched, job ? &job->base : NULL);
4354
4355 if (need_emergency_restart)
4356 amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
4357 }
4358 }
4359
4360 if (need_emergency_restart)
4361 goto skip_sched_resume;
4362
4363 /*
4364 * Must check guilty signal here since after this point all old
4365 * HW fences are force signaled.
4366 *
4367 * job->base holds a reference to parent fence
4368 */
4369 if (job && job->base.s_fence->parent &&
4370 dma_fence_is_signaled(job->base.s_fence->parent)) {
4371 job_signaled = true;
4372 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
4373 goto skip_hw_reset;
4374 }
4375
4376retry: /* Rest of adevs pre asic reset from XGMI hive. */
4377 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4378 r = amdgpu_device_pre_asic_reset(tmp_adev,
4379 NULL,
4380 &need_full_reset);
4381 /*TODO Should we stop ?*/
4382 if (r) {
4383 DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
4384 r, tmp_adev->ddev->unique);
4385 tmp_adev->asic_reset_res = r;
4386 }
4387 }
4388
4389 /* Actual ASIC resets if needed.*/
4390 /* TODO Implement XGMI hive reset logic for SRIOV */
4391 if (amdgpu_sriov_vf(adev)) {
4392 r = amdgpu_device_reset_sriov(adev, job ? false : true);
4393 if (r)
4394 adev->asic_reset_res = r;
4395 } else {
4396 r = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset);
4397 if (r && r == -EAGAIN)
4398 goto retry;
4399 }
4400
4401skip_hw_reset:
4402
4403 /* Post ASIC reset for all devs .*/
4404 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4405
4406 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4407 struct amdgpu_ring *ring = tmp_adev->rings[i];
4408
4409 if (!ring || !ring->sched.thread)
4410 continue;
4411
4412 /* No point to resubmit jobs if we didn't HW reset*/
4413 if (!tmp_adev->asic_reset_res && !job_signaled)
4414 drm_sched_resubmit_jobs(&ring->sched);
4415
4416 drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
4417 }
4418
4419 if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) {
4420 drm_helper_resume_force_mode(tmp_adev->ddev);
4421 }
4422
4423 tmp_adev->asic_reset_res = 0;
4424
4425 if (r) {
4426 /* bad news, how to tell it to userspace ? */
4427 dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
4428 amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
4429 } else {
4430 dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
4431 }
4432 }
4433
4434skip_sched_resume:
4435 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4436 /*unlock kfd: SRIOV would do it separately */
4437 if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
4438 amdgpu_amdkfd_post_reset(tmp_adev);
4439 if (audio_suspended)
4440 amdgpu_device_resume_display_audio(tmp_adev);
4441 amdgpu_device_unlock_adev(tmp_adev);
4442 }
4443
4444 if (hive) {
4445 mutex_unlock(&hive->reset_lock);
4446 mutex_unlock(&hive->hive_lock);
4447 }
4448
4449 if (r)
4450 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
4451 return r;
4452}
4453
4454/**
4455 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
4456 *
4457 * @adev: amdgpu_device pointer
4458 *
4459 * Fetchs and stores in the driver the PCIE capabilities (gen speed
4460 * and lanes) of the slot the device is in. Handles APUs and
4461 * virtualized environments where PCIE config space may not be available.
4462 */
4463static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
4464{
4465 struct pci_dev *pdev;
4466 enum pci_bus_speed speed_cap, platform_speed_cap;
4467 enum pcie_link_width platform_link_width;
4468
4469 if (amdgpu_pcie_gen_cap)
4470 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
4471
4472 if (amdgpu_pcie_lane_cap)
4473 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
4474
4475 /* covers APUs as well */
4476 if (pci_is_root_bus(adev->pdev->bus)) {
4477 if (adev->pm.pcie_gen_mask == 0)
4478 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
4479 if (adev->pm.pcie_mlw_mask == 0)
4480 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
4481 return;
4482 }
4483
4484 if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
4485 return;
4486
4487 pcie_bandwidth_available(adev->pdev, NULL,
4488 &platform_speed_cap, &platform_link_width);
4489
4490 if (adev->pm.pcie_gen_mask == 0) {
4491 /* asic caps */
4492 pdev = adev->pdev;
4493 speed_cap = pcie_get_speed_cap(pdev);
4494 if (speed_cap == PCI_SPEED_UNKNOWN) {
4495 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4496 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4497 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
4498 } else {
4499 if (speed_cap == PCIE_SPEED_16_0GT)
4500 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4501 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4502 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
4503 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
4504 else if (speed_cap == PCIE_SPEED_8_0GT)
4505 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4506 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4507 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
4508 else if (speed_cap == PCIE_SPEED_5_0GT)
4509 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4510 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
4511 else
4512 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
4513 }
4514 /* platform caps */
4515 if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
4516 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4517 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
4518 } else {
4519 if (platform_speed_cap == PCIE_SPEED_16_0GT)
4520 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4521 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4522 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
4523 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
4524 else if (platform_speed_cap == PCIE_SPEED_8_0GT)
4525 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4526 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4527 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
4528 else if (platform_speed_cap == PCIE_SPEED_5_0GT)
4529 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4530 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
4531 else
4532 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
4533
4534 }
4535 }
4536 if (adev->pm.pcie_mlw_mask == 0) {
4537 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
4538 adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
4539 } else {
4540 switch (platform_link_width) {
4541 case PCIE_LNK_X32:
4542 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
4543 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
4544 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
4545 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4546 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4547 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4548 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4549 break;
4550 case PCIE_LNK_X16:
4551 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
4552 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
4553 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4554 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4555 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4556 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4557 break;
4558 case PCIE_LNK_X12:
4559 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
4560 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4561 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4562 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4563 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4564 break;
4565 case PCIE_LNK_X8:
4566 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4567 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4568 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4569 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4570 break;
4571 case PCIE_LNK_X4:
4572 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4573 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4574 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4575 break;
4576 case PCIE_LNK_X2:
4577 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4578 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4579 break;
4580 case PCIE_LNK_X1:
4581 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
4582 break;
4583 default:
4584 break;
4585 }
4586 }
4587 }
4588}
4589
4590int amdgpu_device_baco_enter(struct drm_device *dev)
4591{
4592 struct amdgpu_device *adev = dev->dev_private;
4593 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
4594
4595 if (!amdgpu_device_supports_baco(adev->ddev))
4596 return -ENOTSUPP;
4597
4598 if (ras && ras->supported)
4599 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
4600
4601 return amdgpu_dpm_baco_enter(adev);
4602}
4603
4604int amdgpu_device_baco_exit(struct drm_device *dev)
4605{
4606 struct amdgpu_device *adev = dev->dev_private;
4607 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
4608 int ret = 0;
4609
4610 if (!amdgpu_device_supports_baco(adev->ddev))
4611 return -ENOTSUPP;
4612
4613 ret = amdgpu_dpm_baco_exit(adev);
4614 if (ret)
4615 return ret;
4616
4617 if (ras && ras->supported)
4618 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
4619
4620 return 0;
4621}
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/power_supply.h>
29#include <linux/kthread.h>
30#include <linux/module.h>
31#include <linux/console.h>
32#include <linux/slab.h>
33
34#include <drm/drm_atomic_helper.h>
35#include <drm/drm_probe_helper.h>
36#include <drm/amdgpu_drm.h>
37#include <linux/vgaarb.h>
38#include <linux/vga_switcheroo.h>
39#include <linux/efi.h>
40#include "amdgpu.h"
41#include "amdgpu_trace.h"
42#include "amdgpu_i2c.h"
43#include "atom.h"
44#include "amdgpu_atombios.h"
45#include "amdgpu_atomfirmware.h"
46#include "amd_pcie.h"
47#ifdef CONFIG_DRM_AMDGPU_SI
48#include "si.h"
49#endif
50#ifdef CONFIG_DRM_AMDGPU_CIK
51#include "cik.h"
52#endif
53#include "vi.h"
54#include "soc15.h"
55#include "nv.h"
56#include "bif/bif_4_1_d.h"
57#include <linux/pci.h>
58#include <linux/firmware.h>
59#include "amdgpu_vf_error.h"
60
61#include "amdgpu_amdkfd.h"
62#include "amdgpu_pm.h"
63
64#include "amdgpu_xgmi.h"
65#include "amdgpu_ras.h"
66#include "amdgpu_pmu.h"
67
68MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
69MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
70MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
71MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
72MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
73MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
74MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin");
75MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
76MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
77MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
78
79#define AMDGPU_RESUME_MS 2000
80
81static const char *amdgpu_asic_name[] = {
82 "TAHITI",
83 "PITCAIRN",
84 "VERDE",
85 "OLAND",
86 "HAINAN",
87 "BONAIRE",
88 "KAVERI",
89 "KABINI",
90 "HAWAII",
91 "MULLINS",
92 "TOPAZ",
93 "TONGA",
94 "FIJI",
95 "CARRIZO",
96 "STONEY",
97 "POLARIS10",
98 "POLARIS11",
99 "POLARIS12",
100 "VEGAM",
101 "VEGA10",
102 "VEGA12",
103 "VEGA20",
104 "RAVEN",
105 "ARCTURUS",
106 "RENOIR",
107 "NAVI10",
108 "NAVI14",
109 "NAVI12",
110 "LAST",
111};
112
113/**
114 * DOC: pcie_replay_count
115 *
116 * The amdgpu driver provides a sysfs API for reporting the total number
117 * of PCIe replays (NAKs)
118 * The file pcie_replay_count is used for this and returns the total
119 * number of replays as a sum of the NAKs generated and NAKs received
120 */
121
122static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
123 struct device_attribute *attr, char *buf)
124{
125 struct drm_device *ddev = dev_get_drvdata(dev);
126 struct amdgpu_device *adev = ddev->dev_private;
127 uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
128
129 return snprintf(buf, PAGE_SIZE, "%llu\n", cnt);
130}
131
132static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
133 amdgpu_device_get_pcie_replay_count, NULL);
134
135static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
136
137/**
138 * amdgpu_device_is_px - Is the device is a dGPU with HG/PX power control
139 *
140 * @dev: drm_device pointer
141 *
142 * Returns true if the device is a dGPU with HG/PX power control,
143 * otherwise return false.
144 */
145bool amdgpu_device_is_px(struct drm_device *dev)
146{
147 struct amdgpu_device *adev = dev->dev_private;
148
149 if (adev->flags & AMD_IS_PX)
150 return true;
151 return false;
152}
153
154/*
155 * MMIO register access helper functions.
156 */
157/**
158 * amdgpu_mm_rreg - read a memory mapped IO register
159 *
160 * @adev: amdgpu_device pointer
161 * @reg: dword aligned register offset
162 * @acc_flags: access flags which require special behavior
163 *
164 * Returns the 32 bit value from the offset specified.
165 */
166uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
167 uint32_t acc_flags)
168{
169 uint32_t ret;
170
171 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
172 return amdgpu_virt_kiq_rreg(adev, reg);
173
174 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
175 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
176 else {
177 unsigned long flags;
178
179 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
180 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
181 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
182 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
183 }
184 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
185 return ret;
186}
187
188/*
189 * MMIO register read with bytes helper functions
190 * @offset:bytes offset from MMIO start
191 *
192*/
193
194/**
195 * amdgpu_mm_rreg8 - read a memory mapped IO register
196 *
197 * @adev: amdgpu_device pointer
198 * @offset: byte aligned register offset
199 *
200 * Returns the 8 bit value from the offset specified.
201 */
202uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) {
203 if (offset < adev->rmmio_size)
204 return (readb(adev->rmmio + offset));
205 BUG();
206}
207
208/*
209 * MMIO register write with bytes helper functions
210 * @offset:bytes offset from MMIO start
211 * @value: the value want to be written to the register
212 *
213*/
214/**
215 * amdgpu_mm_wreg8 - read a memory mapped IO register
216 *
217 * @adev: amdgpu_device pointer
218 * @offset: byte aligned register offset
219 * @value: 8 bit value to write
220 *
221 * Writes the value specified to the offset specified.
222 */
223void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) {
224 if (offset < adev->rmmio_size)
225 writeb(value, adev->rmmio + offset);
226 else
227 BUG();
228}
229
230/**
231 * amdgpu_mm_wreg - write to a memory mapped IO register
232 *
233 * @adev: amdgpu_device pointer
234 * @reg: dword aligned register offset
235 * @v: 32 bit value to write to the register
236 * @acc_flags: access flags which require special behavior
237 *
238 * Writes the value specified to the offset specified.
239 */
240void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
241 uint32_t acc_flags)
242{
243 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
244
245 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
246 adev->last_mm_index = v;
247 }
248
249 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
250 return amdgpu_virt_kiq_wreg(adev, reg, v);
251
252 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
253 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
254 else {
255 unsigned long flags;
256
257 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
258 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
259 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
260 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
261 }
262
263 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
264 udelay(500);
265 }
266}
267
268/**
269 * amdgpu_io_rreg - read an IO register
270 *
271 * @adev: amdgpu_device pointer
272 * @reg: dword aligned register offset
273 *
274 * Returns the 32 bit value from the offset specified.
275 */
276u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
277{
278 if ((reg * 4) < adev->rio_mem_size)
279 return ioread32(adev->rio_mem + (reg * 4));
280 else {
281 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
282 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
283 }
284}
285
286/**
287 * amdgpu_io_wreg - write to an IO register
288 *
289 * @adev: amdgpu_device pointer
290 * @reg: dword aligned register offset
291 * @v: 32 bit value to write to the register
292 *
293 * Writes the value specified to the offset specified.
294 */
295void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
296{
297 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
298 adev->last_mm_index = v;
299 }
300
301 if ((reg * 4) < adev->rio_mem_size)
302 iowrite32(v, adev->rio_mem + (reg * 4));
303 else {
304 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
305 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
306 }
307
308 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
309 udelay(500);
310 }
311}
312
313/**
314 * amdgpu_mm_rdoorbell - read a doorbell dword
315 *
316 * @adev: amdgpu_device pointer
317 * @index: doorbell index
318 *
319 * Returns the value in the doorbell aperture at the
320 * requested doorbell index (CIK).
321 */
322u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
323{
324 if (index < adev->doorbell.num_doorbells) {
325 return readl(adev->doorbell.ptr + index);
326 } else {
327 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
328 return 0;
329 }
330}
331
332/**
333 * amdgpu_mm_wdoorbell - write a doorbell dword
334 *
335 * @adev: amdgpu_device pointer
336 * @index: doorbell index
337 * @v: value to write
338 *
339 * Writes @v to the doorbell aperture at the
340 * requested doorbell index (CIK).
341 */
342void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
343{
344 if (index < adev->doorbell.num_doorbells) {
345 writel(v, adev->doorbell.ptr + index);
346 } else {
347 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
348 }
349}
350
351/**
352 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
353 *
354 * @adev: amdgpu_device pointer
355 * @index: doorbell index
356 *
357 * Returns the value in the doorbell aperture at the
358 * requested doorbell index (VEGA10+).
359 */
360u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
361{
362 if (index < adev->doorbell.num_doorbells) {
363 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
364 } else {
365 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
366 return 0;
367 }
368}
369
370/**
371 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
372 *
373 * @adev: amdgpu_device pointer
374 * @index: doorbell index
375 * @v: value to write
376 *
377 * Writes @v to the doorbell aperture at the
378 * requested doorbell index (VEGA10+).
379 */
380void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
381{
382 if (index < adev->doorbell.num_doorbells) {
383 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
384 } else {
385 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
386 }
387}
388
389/**
390 * amdgpu_invalid_rreg - dummy reg read function
391 *
392 * @adev: amdgpu device pointer
393 * @reg: offset of register
394 *
395 * Dummy register read function. Used for register blocks
396 * that certain asics don't have (all asics).
397 * Returns the value in the register.
398 */
399static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
400{
401 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
402 BUG();
403 return 0;
404}
405
406/**
407 * amdgpu_invalid_wreg - dummy reg write function
408 *
409 * @adev: amdgpu device pointer
410 * @reg: offset of register
411 * @v: value to write to the register
412 *
413 * Dummy register read function. Used for register blocks
414 * that certain asics don't have (all asics).
415 */
416static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
417{
418 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
419 reg, v);
420 BUG();
421}
422
423/**
424 * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
425 *
426 * @adev: amdgpu device pointer
427 * @reg: offset of register
428 *
429 * Dummy register read function. Used for register blocks
430 * that certain asics don't have (all asics).
431 * Returns the value in the register.
432 */
433static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
434{
435 DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
436 BUG();
437 return 0;
438}
439
440/**
441 * amdgpu_invalid_wreg64 - dummy reg write function
442 *
443 * @adev: amdgpu device pointer
444 * @reg: offset of register
445 * @v: value to write to the register
446 *
447 * Dummy register read function. Used for register blocks
448 * that certain asics don't have (all asics).
449 */
450static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
451{
452 DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
453 reg, v);
454 BUG();
455}
456
457/**
458 * amdgpu_block_invalid_rreg - dummy reg read function
459 *
460 * @adev: amdgpu device pointer
461 * @block: offset of instance
462 * @reg: offset of register
463 *
464 * Dummy register read function. Used for register blocks
465 * that certain asics don't have (all asics).
466 * Returns the value in the register.
467 */
468static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
469 uint32_t block, uint32_t reg)
470{
471 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
472 reg, block);
473 BUG();
474 return 0;
475}
476
477/**
478 * amdgpu_block_invalid_wreg - dummy reg write function
479 *
480 * @adev: amdgpu device pointer
481 * @block: offset of instance
482 * @reg: offset of register
483 * @v: value to write to the register
484 *
485 * Dummy register read function. Used for register blocks
486 * that certain asics don't have (all asics).
487 */
488static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
489 uint32_t block,
490 uint32_t reg, uint32_t v)
491{
492 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
493 reg, block, v);
494 BUG();
495}
496
497/**
498 * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
499 *
500 * @adev: amdgpu device pointer
501 *
502 * Allocates a scratch page of VRAM for use by various things in the
503 * driver.
504 */
505static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
506{
507 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
508 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
509 &adev->vram_scratch.robj,
510 &adev->vram_scratch.gpu_addr,
511 (void **)&adev->vram_scratch.ptr);
512}
513
514/**
515 * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
516 *
517 * @adev: amdgpu device pointer
518 *
519 * Frees the VRAM scratch page.
520 */
521static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
522{
523 amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
524}
525
526/**
527 * amdgpu_device_program_register_sequence - program an array of registers.
528 *
529 * @adev: amdgpu_device pointer
530 * @registers: pointer to the register array
531 * @array_size: size of the register array
532 *
533 * Programs an array or registers with and and or masks.
534 * This is a helper for setting golden registers.
535 */
536void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
537 const u32 *registers,
538 const u32 array_size)
539{
540 u32 tmp, reg, and_mask, or_mask;
541 int i;
542
543 if (array_size % 3)
544 return;
545
546 for (i = 0; i < array_size; i +=3) {
547 reg = registers[i + 0];
548 and_mask = registers[i + 1];
549 or_mask = registers[i + 2];
550
551 if (and_mask == 0xffffffff) {
552 tmp = or_mask;
553 } else {
554 tmp = RREG32(reg);
555 tmp &= ~and_mask;
556 if (adev->family >= AMDGPU_FAMILY_AI)
557 tmp |= (or_mask & and_mask);
558 else
559 tmp |= or_mask;
560 }
561 WREG32(reg, tmp);
562 }
563}
564
565/**
566 * amdgpu_device_pci_config_reset - reset the GPU
567 *
568 * @adev: amdgpu_device pointer
569 *
570 * Resets the GPU using the pci config reset sequence.
571 * Only applicable to asics prior to vega10.
572 */
573void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
574{
575 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
576}
577
578/*
579 * GPU doorbell aperture helpers function.
580 */
581/**
582 * amdgpu_device_doorbell_init - Init doorbell driver information.
583 *
584 * @adev: amdgpu_device pointer
585 *
586 * Init doorbell driver information (CIK)
587 * Returns 0 on success, error on failure.
588 */
589static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
590{
591
592 /* No doorbell on SI hardware generation */
593 if (adev->asic_type < CHIP_BONAIRE) {
594 adev->doorbell.base = 0;
595 adev->doorbell.size = 0;
596 adev->doorbell.num_doorbells = 0;
597 adev->doorbell.ptr = NULL;
598 return 0;
599 }
600
601 if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
602 return -EINVAL;
603
604 amdgpu_asic_init_doorbell_index(adev);
605
606 /* doorbell bar mapping */
607 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
608 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
609
610 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
611 adev->doorbell_index.max_assignment+1);
612 if (adev->doorbell.num_doorbells == 0)
613 return -EINVAL;
614
615 /* For Vega, reserve and map two pages on doorbell BAR since SDMA
616 * paging queue doorbell use the second page. The
617 * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
618 * doorbells are in the first page. So with paging queue enabled,
619 * the max num_doorbells should + 1 page (0x400 in dword)
620 */
621 if (adev->asic_type >= CHIP_VEGA10)
622 adev->doorbell.num_doorbells += 0x400;
623
624 adev->doorbell.ptr = ioremap(adev->doorbell.base,
625 adev->doorbell.num_doorbells *
626 sizeof(u32));
627 if (adev->doorbell.ptr == NULL)
628 return -ENOMEM;
629
630 return 0;
631}
632
633/**
634 * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
635 *
636 * @adev: amdgpu_device pointer
637 *
638 * Tear down doorbell driver information (CIK)
639 */
640static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
641{
642 iounmap(adev->doorbell.ptr);
643 adev->doorbell.ptr = NULL;
644}
645
646
647
648/*
649 * amdgpu_device_wb_*()
650 * Writeback is the method by which the GPU updates special pages in memory
651 * with the status of certain GPU events (fences, ring pointers,etc.).
652 */
653
654/**
655 * amdgpu_device_wb_fini - Disable Writeback and free memory
656 *
657 * @adev: amdgpu_device pointer
658 *
659 * Disables Writeback and frees the Writeback memory (all asics).
660 * Used at driver shutdown.
661 */
662static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
663{
664 if (adev->wb.wb_obj) {
665 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
666 &adev->wb.gpu_addr,
667 (void **)&adev->wb.wb);
668 adev->wb.wb_obj = NULL;
669 }
670}
671
672/**
673 * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
674 *
675 * @adev: amdgpu_device pointer
676 *
677 * Initializes writeback and allocates writeback memory (all asics).
678 * Used at driver startup.
679 * Returns 0 on success or an -error on failure.
680 */
681static int amdgpu_device_wb_init(struct amdgpu_device *adev)
682{
683 int r;
684
685 if (adev->wb.wb_obj == NULL) {
686 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
687 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
688 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
689 &adev->wb.wb_obj, &adev->wb.gpu_addr,
690 (void **)&adev->wb.wb);
691 if (r) {
692 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
693 return r;
694 }
695
696 adev->wb.num_wb = AMDGPU_MAX_WB;
697 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
698
699 /* clear wb memory */
700 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
701 }
702
703 return 0;
704}
705
706/**
707 * amdgpu_device_wb_get - Allocate a wb entry
708 *
709 * @adev: amdgpu_device pointer
710 * @wb: wb index
711 *
712 * Allocate a wb slot for use by the driver (all asics).
713 * Returns 0 on success or -EINVAL on failure.
714 */
715int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
716{
717 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
718
719 if (offset < adev->wb.num_wb) {
720 __set_bit(offset, adev->wb.used);
721 *wb = offset << 3; /* convert to dw offset */
722 return 0;
723 } else {
724 return -EINVAL;
725 }
726}
727
728/**
729 * amdgpu_device_wb_free - Free a wb entry
730 *
731 * @adev: amdgpu_device pointer
732 * @wb: wb index
733 *
734 * Free a wb slot allocated for use by the driver (all asics)
735 */
736void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
737{
738 wb >>= 3;
739 if (wb < adev->wb.num_wb)
740 __clear_bit(wb, adev->wb.used);
741}
742
743/**
744 * amdgpu_device_resize_fb_bar - try to resize FB BAR
745 *
746 * @adev: amdgpu_device pointer
747 *
748 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
749 * to fail, but if any of the BARs is not accessible after the size we abort
750 * driver loading by returning -ENODEV.
751 */
752int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
753{
754 u64 space_needed = roundup_pow_of_two(adev->gmc.real_vram_size);
755 u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1;
756 struct pci_bus *root;
757 struct resource *res;
758 unsigned i;
759 u16 cmd;
760 int r;
761
762 /* Bypass for VF */
763 if (amdgpu_sriov_vf(adev))
764 return 0;
765
766 /* Check if the root BUS has 64bit memory resources */
767 root = adev->pdev->bus;
768 while (root->parent)
769 root = root->parent;
770
771 pci_bus_for_each_resource(root, res, i) {
772 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
773 res->start > 0x100000000ull)
774 break;
775 }
776
777 /* Trying to resize is pointless without a root hub window above 4GB */
778 if (!res)
779 return 0;
780
781 /* Disable memory decoding while we change the BAR addresses and size */
782 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
783 pci_write_config_word(adev->pdev, PCI_COMMAND,
784 cmd & ~PCI_COMMAND_MEMORY);
785
786 /* Free the VRAM and doorbell BAR, we most likely need to move both. */
787 amdgpu_device_doorbell_fini(adev);
788 if (adev->asic_type >= CHIP_BONAIRE)
789 pci_release_resource(adev->pdev, 2);
790
791 pci_release_resource(adev->pdev, 0);
792
793 r = pci_resize_resource(adev->pdev, 0, rbar_size);
794 if (r == -ENOSPC)
795 DRM_INFO("Not enough PCI address space for a large BAR.");
796 else if (r && r != -ENOTSUPP)
797 DRM_ERROR("Problem resizing BAR0 (%d).", r);
798
799 pci_assign_unassigned_bus_resources(adev->pdev->bus);
800
801 /* When the doorbell or fb BAR isn't available we have no chance of
802 * using the device.
803 */
804 r = amdgpu_device_doorbell_init(adev);
805 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
806 return -ENODEV;
807
808 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
809
810 return 0;
811}
812
813/*
814 * GPU helpers function.
815 */
816/**
817 * amdgpu_device_need_post - check if the hw need post or not
818 *
819 * @adev: amdgpu_device pointer
820 *
821 * Check if the asic has been initialized (all asics) at driver startup
822 * or post is needed if hw reset is performed.
823 * Returns true if need or false if not.
824 */
825bool amdgpu_device_need_post(struct amdgpu_device *adev)
826{
827 uint32_t reg;
828
829 if (amdgpu_sriov_vf(adev))
830 return false;
831
832 if (amdgpu_passthrough(adev)) {
833 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
834 * some old smc fw still need driver do vPost otherwise gpu hang, while
835 * those smc fw version above 22.15 doesn't have this flaw, so we force
836 * vpost executed for smc version below 22.15
837 */
838 if (adev->asic_type == CHIP_FIJI) {
839 int err;
840 uint32_t fw_ver;
841 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
842 /* force vPost if error occured */
843 if (err)
844 return true;
845
846 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
847 if (fw_ver < 0x00160e00)
848 return true;
849 }
850 }
851
852 if (adev->has_hw_reset) {
853 adev->has_hw_reset = false;
854 return true;
855 }
856
857 /* bios scratch used on CIK+ */
858 if (adev->asic_type >= CHIP_BONAIRE)
859 return amdgpu_atombios_scratch_need_asic_init(adev);
860
861 /* check MEM_SIZE for older asics */
862 reg = amdgpu_asic_get_config_memsize(adev);
863
864 if ((reg != 0) && (reg != 0xffffffff))
865 return false;
866
867 return true;
868}
869
870/* if we get transitioned to only one device, take VGA back */
871/**
872 * amdgpu_device_vga_set_decode - enable/disable vga decode
873 *
874 * @cookie: amdgpu_device pointer
875 * @state: enable/disable vga decode
876 *
877 * Enable/disable vga decode (all asics).
878 * Returns VGA resource flags.
879 */
880static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state)
881{
882 struct amdgpu_device *adev = cookie;
883 amdgpu_asic_set_vga_state(adev, state);
884 if (state)
885 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
886 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
887 else
888 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
889}
890
891/**
892 * amdgpu_device_check_block_size - validate the vm block size
893 *
894 * @adev: amdgpu_device pointer
895 *
896 * Validates the vm block size specified via module parameter.
897 * The vm block size defines number of bits in page table versus page directory,
898 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
899 * page table and the remaining bits are in the page directory.
900 */
901static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
902{
903 /* defines number of bits in page table versus page directory,
904 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
905 * page table and the remaining bits are in the page directory */
906 if (amdgpu_vm_block_size == -1)
907 return;
908
909 if (amdgpu_vm_block_size < 9) {
910 dev_warn(adev->dev, "VM page table size (%d) too small\n",
911 amdgpu_vm_block_size);
912 amdgpu_vm_block_size = -1;
913 }
914}
915
916/**
917 * amdgpu_device_check_vm_size - validate the vm size
918 *
919 * @adev: amdgpu_device pointer
920 *
921 * Validates the vm size in GB specified via module parameter.
922 * The VM size is the size of the GPU virtual memory space in GB.
923 */
924static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
925{
926 /* no need to check the default value */
927 if (amdgpu_vm_size == -1)
928 return;
929
930 if (amdgpu_vm_size < 1) {
931 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
932 amdgpu_vm_size);
933 amdgpu_vm_size = -1;
934 }
935}
936
937static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
938{
939 struct sysinfo si;
940 bool is_os_64 = (sizeof(void *) == 8) ? true : false;
941 uint64_t total_memory;
942 uint64_t dram_size_seven_GB = 0x1B8000000;
943 uint64_t dram_size_three_GB = 0xB8000000;
944
945 if (amdgpu_smu_memory_pool_size == 0)
946 return;
947
948 if (!is_os_64) {
949 DRM_WARN("Not 64-bit OS, feature not supported\n");
950 goto def_value;
951 }
952 si_meminfo(&si);
953 total_memory = (uint64_t)si.totalram * si.mem_unit;
954
955 if ((amdgpu_smu_memory_pool_size == 1) ||
956 (amdgpu_smu_memory_pool_size == 2)) {
957 if (total_memory < dram_size_three_GB)
958 goto def_value1;
959 } else if ((amdgpu_smu_memory_pool_size == 4) ||
960 (amdgpu_smu_memory_pool_size == 8)) {
961 if (total_memory < dram_size_seven_GB)
962 goto def_value1;
963 } else {
964 DRM_WARN("Smu memory pool size not supported\n");
965 goto def_value;
966 }
967 adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
968
969 return;
970
971def_value1:
972 DRM_WARN("No enough system memory\n");
973def_value:
974 adev->pm.smu_prv_buffer_size = 0;
975}
976
977/**
978 * amdgpu_device_check_arguments - validate module params
979 *
980 * @adev: amdgpu_device pointer
981 *
982 * Validates certain module parameters and updates
983 * the associated values used by the driver (all asics).
984 */
985static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
986{
987 int ret = 0;
988
989 if (amdgpu_sched_jobs < 4) {
990 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
991 amdgpu_sched_jobs);
992 amdgpu_sched_jobs = 4;
993 } else if (!is_power_of_2(amdgpu_sched_jobs)){
994 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
995 amdgpu_sched_jobs);
996 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
997 }
998
999 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1000 /* gart size must be greater or equal to 32M */
1001 dev_warn(adev->dev, "gart size (%d) too small\n",
1002 amdgpu_gart_size);
1003 amdgpu_gart_size = -1;
1004 }
1005
1006 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1007 /* gtt size must be greater or equal to 32M */
1008 dev_warn(adev->dev, "gtt size (%d) too small\n",
1009 amdgpu_gtt_size);
1010 amdgpu_gtt_size = -1;
1011 }
1012
1013 /* valid range is between 4 and 9 inclusive */
1014 if (amdgpu_vm_fragment_size != -1 &&
1015 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1016 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1017 amdgpu_vm_fragment_size = -1;
1018 }
1019
1020 amdgpu_device_check_smu_prv_buffer_size(adev);
1021
1022 amdgpu_device_check_vm_size(adev);
1023
1024 amdgpu_device_check_block_size(adev);
1025
1026 ret = amdgpu_device_get_job_timeout_settings(adev);
1027 if (ret) {
1028 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
1029 return ret;
1030 }
1031
1032 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1033
1034 return ret;
1035}
1036
1037/**
1038 * amdgpu_switcheroo_set_state - set switcheroo state
1039 *
1040 * @pdev: pci dev pointer
1041 * @state: vga_switcheroo state
1042 *
1043 * Callback for the switcheroo driver. Suspends or resumes the
1044 * the asics before or after it is powered up using ACPI methods.
1045 */
1046static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1047{
1048 struct drm_device *dev = pci_get_drvdata(pdev);
1049
1050 if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1051 return;
1052
1053 if (state == VGA_SWITCHEROO_ON) {
1054 pr_info("amdgpu: switched on\n");
1055 /* don't suspend or resume card normally */
1056 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1057
1058 amdgpu_device_resume(dev, true, true);
1059
1060 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1061 drm_kms_helper_poll_enable(dev);
1062 } else {
1063 pr_info("amdgpu: switched off\n");
1064 drm_kms_helper_poll_disable(dev);
1065 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1066 amdgpu_device_suspend(dev, true, true);
1067 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1068 }
1069}
1070
1071/**
1072 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1073 *
1074 * @pdev: pci dev pointer
1075 *
1076 * Callback for the switcheroo driver. Check of the switcheroo
1077 * state can be changed.
1078 * Returns true if the state can be changed, false if not.
1079 */
1080static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1081{
1082 struct drm_device *dev = pci_get_drvdata(pdev);
1083
1084 /*
1085 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1086 * locking inversion with the driver load path. And the access here is
1087 * completely racy anyway. So don't bother with locking for now.
1088 */
1089 return dev->open_count == 0;
1090}
1091
1092static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1093 .set_gpu_state = amdgpu_switcheroo_set_state,
1094 .reprobe = NULL,
1095 .can_switch = amdgpu_switcheroo_can_switch,
1096};
1097
1098/**
1099 * amdgpu_device_ip_set_clockgating_state - set the CG state
1100 *
1101 * @dev: amdgpu_device pointer
1102 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1103 * @state: clockgating state (gate or ungate)
1104 *
1105 * Sets the requested clockgating state for all instances of
1106 * the hardware IP specified.
1107 * Returns the error code from the last instance.
1108 */
1109int amdgpu_device_ip_set_clockgating_state(void *dev,
1110 enum amd_ip_block_type block_type,
1111 enum amd_clockgating_state state)
1112{
1113 struct amdgpu_device *adev = dev;
1114 int i, r = 0;
1115
1116 for (i = 0; i < adev->num_ip_blocks; i++) {
1117 if (!adev->ip_blocks[i].status.valid)
1118 continue;
1119 if (adev->ip_blocks[i].version->type != block_type)
1120 continue;
1121 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1122 continue;
1123 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1124 (void *)adev, state);
1125 if (r)
1126 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1127 adev->ip_blocks[i].version->funcs->name, r);
1128 }
1129 return r;
1130}
1131
1132/**
1133 * amdgpu_device_ip_set_powergating_state - set the PG state
1134 *
1135 * @dev: amdgpu_device pointer
1136 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1137 * @state: powergating state (gate or ungate)
1138 *
1139 * Sets the requested powergating state for all instances of
1140 * the hardware IP specified.
1141 * Returns the error code from the last instance.
1142 */
1143int amdgpu_device_ip_set_powergating_state(void *dev,
1144 enum amd_ip_block_type block_type,
1145 enum amd_powergating_state state)
1146{
1147 struct amdgpu_device *adev = dev;
1148 int i, r = 0;
1149
1150 for (i = 0; i < adev->num_ip_blocks; i++) {
1151 if (!adev->ip_blocks[i].status.valid)
1152 continue;
1153 if (adev->ip_blocks[i].version->type != block_type)
1154 continue;
1155 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1156 continue;
1157 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1158 (void *)adev, state);
1159 if (r)
1160 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1161 adev->ip_blocks[i].version->funcs->name, r);
1162 }
1163 return r;
1164}
1165
1166/**
1167 * amdgpu_device_ip_get_clockgating_state - get the CG state
1168 *
1169 * @adev: amdgpu_device pointer
1170 * @flags: clockgating feature flags
1171 *
1172 * Walks the list of IPs on the device and updates the clockgating
1173 * flags for each IP.
1174 * Updates @flags with the feature flags for each hardware IP where
1175 * clockgating is enabled.
1176 */
1177void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1178 u32 *flags)
1179{
1180 int i;
1181
1182 for (i = 0; i < adev->num_ip_blocks; i++) {
1183 if (!adev->ip_blocks[i].status.valid)
1184 continue;
1185 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1186 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1187 }
1188}
1189
1190/**
1191 * amdgpu_device_ip_wait_for_idle - wait for idle
1192 *
1193 * @adev: amdgpu_device pointer
1194 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1195 *
1196 * Waits for the request hardware IP to be idle.
1197 * Returns 0 for success or a negative error code on failure.
1198 */
1199int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1200 enum amd_ip_block_type block_type)
1201{
1202 int i, r;
1203
1204 for (i = 0; i < adev->num_ip_blocks; i++) {
1205 if (!adev->ip_blocks[i].status.valid)
1206 continue;
1207 if (adev->ip_blocks[i].version->type == block_type) {
1208 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1209 if (r)
1210 return r;
1211 break;
1212 }
1213 }
1214 return 0;
1215
1216}
1217
1218/**
1219 * amdgpu_device_ip_is_idle - is the hardware IP idle
1220 *
1221 * @adev: amdgpu_device pointer
1222 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1223 *
1224 * Check if the hardware IP is idle or not.
1225 * Returns true if it the IP is idle, false if not.
1226 */
1227bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1228 enum amd_ip_block_type block_type)
1229{
1230 int i;
1231
1232 for (i = 0; i < adev->num_ip_blocks; i++) {
1233 if (!adev->ip_blocks[i].status.valid)
1234 continue;
1235 if (adev->ip_blocks[i].version->type == block_type)
1236 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1237 }
1238 return true;
1239
1240}
1241
1242/**
1243 * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1244 *
1245 * @adev: amdgpu_device pointer
1246 * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1247 *
1248 * Returns a pointer to the hardware IP block structure
1249 * if it exists for the asic, otherwise NULL.
1250 */
1251struct amdgpu_ip_block *
1252amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1253 enum amd_ip_block_type type)
1254{
1255 int i;
1256
1257 for (i = 0; i < adev->num_ip_blocks; i++)
1258 if (adev->ip_blocks[i].version->type == type)
1259 return &adev->ip_blocks[i];
1260
1261 return NULL;
1262}
1263
1264/**
1265 * amdgpu_device_ip_block_version_cmp
1266 *
1267 * @adev: amdgpu_device pointer
1268 * @type: enum amd_ip_block_type
1269 * @major: major version
1270 * @minor: minor version
1271 *
1272 * return 0 if equal or greater
1273 * return 1 if smaller or the ip_block doesn't exist
1274 */
1275int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1276 enum amd_ip_block_type type,
1277 u32 major, u32 minor)
1278{
1279 struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1280
1281 if (ip_block && ((ip_block->version->major > major) ||
1282 ((ip_block->version->major == major) &&
1283 (ip_block->version->minor >= minor))))
1284 return 0;
1285
1286 return 1;
1287}
1288
1289/**
1290 * amdgpu_device_ip_block_add
1291 *
1292 * @adev: amdgpu_device pointer
1293 * @ip_block_version: pointer to the IP to add
1294 *
1295 * Adds the IP block driver information to the collection of IPs
1296 * on the asic.
1297 */
1298int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1299 const struct amdgpu_ip_block_version *ip_block_version)
1300{
1301 if (!ip_block_version)
1302 return -EINVAL;
1303
1304 DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1305 ip_block_version->funcs->name);
1306
1307 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1308
1309 return 0;
1310}
1311
1312/**
1313 * amdgpu_device_enable_virtual_display - enable virtual display feature
1314 *
1315 * @adev: amdgpu_device pointer
1316 *
1317 * Enabled the virtual display feature if the user has enabled it via
1318 * the module parameter virtual_display. This feature provides a virtual
1319 * display hardware on headless boards or in virtualized environments.
1320 * This function parses and validates the configuration string specified by
1321 * the user and configues the virtual display configuration (number of
1322 * virtual connectors, crtcs, etc.) specified.
1323 */
1324static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1325{
1326 adev->enable_virtual_display = false;
1327
1328 if (amdgpu_virtual_display) {
1329 struct drm_device *ddev = adev->ddev;
1330 const char *pci_address_name = pci_name(ddev->pdev);
1331 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1332
1333 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1334 pciaddstr_tmp = pciaddstr;
1335 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1336 pciaddname = strsep(&pciaddname_tmp, ",");
1337 if (!strcmp("all", pciaddname)
1338 || !strcmp(pci_address_name, pciaddname)) {
1339 long num_crtc;
1340 int res = -1;
1341
1342 adev->enable_virtual_display = true;
1343
1344 if (pciaddname_tmp)
1345 res = kstrtol(pciaddname_tmp, 10,
1346 &num_crtc);
1347
1348 if (!res) {
1349 if (num_crtc < 1)
1350 num_crtc = 1;
1351 if (num_crtc > 6)
1352 num_crtc = 6;
1353 adev->mode_info.num_crtc = num_crtc;
1354 } else {
1355 adev->mode_info.num_crtc = 1;
1356 }
1357 break;
1358 }
1359 }
1360
1361 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1362 amdgpu_virtual_display, pci_address_name,
1363 adev->enable_virtual_display, adev->mode_info.num_crtc);
1364
1365 kfree(pciaddstr);
1366 }
1367}
1368
1369/**
1370 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1371 *
1372 * @adev: amdgpu_device pointer
1373 *
1374 * Parses the asic configuration parameters specified in the gpu info
1375 * firmware and makes them availale to the driver for use in configuring
1376 * the asic.
1377 * Returns 0 on success, -EINVAL on failure.
1378 */
1379static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1380{
1381 const char *chip_name;
1382 char fw_name[30];
1383 int err;
1384 const struct gpu_info_firmware_header_v1_0 *hdr;
1385
1386 adev->firmware.gpu_info_fw = NULL;
1387
1388 switch (adev->asic_type) {
1389 case CHIP_TOPAZ:
1390 case CHIP_TONGA:
1391 case CHIP_FIJI:
1392 case CHIP_POLARIS10:
1393 case CHIP_POLARIS11:
1394 case CHIP_POLARIS12:
1395 case CHIP_VEGAM:
1396 case CHIP_CARRIZO:
1397 case CHIP_STONEY:
1398#ifdef CONFIG_DRM_AMDGPU_SI
1399 case CHIP_VERDE:
1400 case CHIP_TAHITI:
1401 case CHIP_PITCAIRN:
1402 case CHIP_OLAND:
1403 case CHIP_HAINAN:
1404#endif
1405#ifdef CONFIG_DRM_AMDGPU_CIK
1406 case CHIP_BONAIRE:
1407 case CHIP_HAWAII:
1408 case CHIP_KAVERI:
1409 case CHIP_KABINI:
1410 case CHIP_MULLINS:
1411#endif
1412 case CHIP_VEGA20:
1413 default:
1414 return 0;
1415 case CHIP_VEGA10:
1416 chip_name = "vega10";
1417 break;
1418 case CHIP_VEGA12:
1419 chip_name = "vega12";
1420 break;
1421 case CHIP_RAVEN:
1422 if (adev->rev_id >= 8)
1423 chip_name = "raven2";
1424 else if (adev->pdev->device == 0x15d8)
1425 chip_name = "picasso";
1426 else
1427 chip_name = "raven";
1428 break;
1429 case CHIP_ARCTURUS:
1430 chip_name = "arcturus";
1431 break;
1432 case CHIP_RENOIR:
1433 chip_name = "renoir";
1434 break;
1435 case CHIP_NAVI10:
1436 chip_name = "navi10";
1437 break;
1438 case CHIP_NAVI14:
1439 chip_name = "navi14";
1440 break;
1441 case CHIP_NAVI12:
1442 chip_name = "navi12";
1443 break;
1444 }
1445
1446 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1447 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1448 if (err) {
1449 dev_err(adev->dev,
1450 "Failed to load gpu_info firmware \"%s\"\n",
1451 fw_name);
1452 goto out;
1453 }
1454 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1455 if (err) {
1456 dev_err(adev->dev,
1457 "Failed to validate gpu_info firmware \"%s\"\n",
1458 fw_name);
1459 goto out;
1460 }
1461
1462 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1463 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1464
1465 switch (hdr->version_major) {
1466 case 1:
1467 {
1468 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
1469 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
1470 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1471
1472 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1473 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1474 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1475 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
1476 adev->gfx.config.max_texture_channel_caches =
1477 le32_to_cpu(gpu_info_fw->gc_num_tccs);
1478 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1479 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1480 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1481 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
1482 adev->gfx.config.double_offchip_lds_buf =
1483 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1484 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
1485 adev->gfx.cu_info.max_waves_per_simd =
1486 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1487 adev->gfx.cu_info.max_scratch_slots_per_cu =
1488 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1489 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
1490 if (hdr->version_minor >= 1) {
1491 const struct gpu_info_firmware_v1_1 *gpu_info_fw =
1492 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
1493 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1494 adev->gfx.config.num_sc_per_sh =
1495 le32_to_cpu(gpu_info_fw->num_sc_per_sh);
1496 adev->gfx.config.num_packer_per_sc =
1497 le32_to_cpu(gpu_info_fw->num_packer_per_sc);
1498 }
1499#ifdef CONFIG_DRM_AMD_DC_DCN2_0
1500 if (hdr->version_minor == 2) {
1501 const struct gpu_info_firmware_v1_2 *gpu_info_fw =
1502 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
1503 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1504 adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
1505 }
1506#endif
1507 break;
1508 }
1509 default:
1510 dev_err(adev->dev,
1511 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1512 err = -EINVAL;
1513 goto out;
1514 }
1515out:
1516 return err;
1517}
1518
1519/**
1520 * amdgpu_device_ip_early_init - run early init for hardware IPs
1521 *
1522 * @adev: amdgpu_device pointer
1523 *
1524 * Early initialization pass for hardware IPs. The hardware IPs that make
1525 * up each asic are discovered each IP's early_init callback is run. This
1526 * is the first stage in initializing the asic.
1527 * Returns 0 on success, negative error code on failure.
1528 */
1529static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
1530{
1531 int i, r;
1532
1533 amdgpu_device_enable_virtual_display(adev);
1534
1535 switch (adev->asic_type) {
1536 case CHIP_TOPAZ:
1537 case CHIP_TONGA:
1538 case CHIP_FIJI:
1539 case CHIP_POLARIS10:
1540 case CHIP_POLARIS11:
1541 case CHIP_POLARIS12:
1542 case CHIP_VEGAM:
1543 case CHIP_CARRIZO:
1544 case CHIP_STONEY:
1545 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
1546 adev->family = AMDGPU_FAMILY_CZ;
1547 else
1548 adev->family = AMDGPU_FAMILY_VI;
1549
1550 r = vi_set_ip_blocks(adev);
1551 if (r)
1552 return r;
1553 break;
1554#ifdef CONFIG_DRM_AMDGPU_SI
1555 case CHIP_VERDE:
1556 case CHIP_TAHITI:
1557 case CHIP_PITCAIRN:
1558 case CHIP_OLAND:
1559 case CHIP_HAINAN:
1560 adev->family = AMDGPU_FAMILY_SI;
1561 r = si_set_ip_blocks(adev);
1562 if (r)
1563 return r;
1564 break;
1565#endif
1566#ifdef CONFIG_DRM_AMDGPU_CIK
1567 case CHIP_BONAIRE:
1568 case CHIP_HAWAII:
1569 case CHIP_KAVERI:
1570 case CHIP_KABINI:
1571 case CHIP_MULLINS:
1572 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1573 adev->family = AMDGPU_FAMILY_CI;
1574 else
1575 adev->family = AMDGPU_FAMILY_KV;
1576
1577 r = cik_set_ip_blocks(adev);
1578 if (r)
1579 return r;
1580 break;
1581#endif
1582 case CHIP_VEGA10:
1583 case CHIP_VEGA12:
1584 case CHIP_VEGA20:
1585 case CHIP_RAVEN:
1586 case CHIP_ARCTURUS:
1587 case CHIP_RENOIR:
1588 if (adev->asic_type == CHIP_RAVEN ||
1589 adev->asic_type == CHIP_RENOIR)
1590 adev->family = AMDGPU_FAMILY_RV;
1591 else
1592 adev->family = AMDGPU_FAMILY_AI;
1593
1594 r = soc15_set_ip_blocks(adev);
1595 if (r)
1596 return r;
1597 break;
1598 case CHIP_NAVI10:
1599 case CHIP_NAVI14:
1600 case CHIP_NAVI12:
1601 adev->family = AMDGPU_FAMILY_NV;
1602
1603 r = nv_set_ip_blocks(adev);
1604 if (r)
1605 return r;
1606 break;
1607 default:
1608 /* FIXME: not supported yet */
1609 return -EINVAL;
1610 }
1611
1612 r = amdgpu_device_parse_gpu_info_fw(adev);
1613 if (r)
1614 return r;
1615
1616 amdgpu_amdkfd_device_probe(adev);
1617
1618 if (amdgpu_sriov_vf(adev)) {
1619 r = amdgpu_virt_request_full_gpu(adev, true);
1620 if (r)
1621 return -EAGAIN;
1622 }
1623
1624 adev->pm.pp_feature = amdgpu_pp_feature_mask;
1625 if (amdgpu_sriov_vf(adev))
1626 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
1627
1628 for (i = 0; i < adev->num_ip_blocks; i++) {
1629 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
1630 DRM_ERROR("disabled ip block: %d <%s>\n",
1631 i, adev->ip_blocks[i].version->funcs->name);
1632 adev->ip_blocks[i].status.valid = false;
1633 } else {
1634 if (adev->ip_blocks[i].version->funcs->early_init) {
1635 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
1636 if (r == -ENOENT) {
1637 adev->ip_blocks[i].status.valid = false;
1638 } else if (r) {
1639 DRM_ERROR("early_init of IP block <%s> failed %d\n",
1640 adev->ip_blocks[i].version->funcs->name, r);
1641 return r;
1642 } else {
1643 adev->ip_blocks[i].status.valid = true;
1644 }
1645 } else {
1646 adev->ip_blocks[i].status.valid = true;
1647 }
1648 }
1649 /* get the vbios after the asic_funcs are set up */
1650 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
1651 /* Read BIOS */
1652 if (!amdgpu_get_bios(adev))
1653 return -EINVAL;
1654
1655 r = amdgpu_atombios_init(adev);
1656 if (r) {
1657 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
1658 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
1659 return r;
1660 }
1661 }
1662 }
1663
1664 adev->cg_flags &= amdgpu_cg_mask;
1665 adev->pg_flags &= amdgpu_pg_mask;
1666
1667 return 0;
1668}
1669
1670static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
1671{
1672 int i, r;
1673
1674 for (i = 0; i < adev->num_ip_blocks; i++) {
1675 if (!adev->ip_blocks[i].status.sw)
1676 continue;
1677 if (adev->ip_blocks[i].status.hw)
1678 continue;
1679 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1680 (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
1681 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
1682 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
1683 if (r) {
1684 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1685 adev->ip_blocks[i].version->funcs->name, r);
1686 return r;
1687 }
1688 adev->ip_blocks[i].status.hw = true;
1689 }
1690 }
1691
1692 return 0;
1693}
1694
1695static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
1696{
1697 int i, r;
1698
1699 for (i = 0; i < adev->num_ip_blocks; i++) {
1700 if (!adev->ip_blocks[i].status.sw)
1701 continue;
1702 if (adev->ip_blocks[i].status.hw)
1703 continue;
1704 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
1705 if (r) {
1706 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1707 adev->ip_blocks[i].version->funcs->name, r);
1708 return r;
1709 }
1710 adev->ip_blocks[i].status.hw = true;
1711 }
1712
1713 return 0;
1714}
1715
1716static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
1717{
1718 int r = 0;
1719 int i;
1720 uint32_t smu_version;
1721
1722 if (adev->asic_type >= CHIP_VEGA10) {
1723 for (i = 0; i < adev->num_ip_blocks; i++) {
1724 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
1725 continue;
1726
1727 /* no need to do the fw loading again if already done*/
1728 if (adev->ip_blocks[i].status.hw == true)
1729 break;
1730
1731 if (adev->in_gpu_reset || adev->in_suspend) {
1732 r = adev->ip_blocks[i].version->funcs->resume(adev);
1733 if (r) {
1734 DRM_ERROR("resume of IP block <%s> failed %d\n",
1735 adev->ip_blocks[i].version->funcs->name, r);
1736 return r;
1737 }
1738 } else {
1739 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
1740 if (r) {
1741 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1742 adev->ip_blocks[i].version->funcs->name, r);
1743 return r;
1744 }
1745 }
1746
1747 adev->ip_blocks[i].status.hw = true;
1748 break;
1749 }
1750 }
1751
1752 r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
1753
1754 return r;
1755}
1756
1757/**
1758 * amdgpu_device_ip_init - run init for hardware IPs
1759 *
1760 * @adev: amdgpu_device pointer
1761 *
1762 * Main initialization pass for hardware IPs. The list of all the hardware
1763 * IPs that make up the asic is walked and the sw_init and hw_init callbacks
1764 * are run. sw_init initializes the software state associated with each IP
1765 * and hw_init initializes the hardware associated with each IP.
1766 * Returns 0 on success, negative error code on failure.
1767 */
1768static int amdgpu_device_ip_init(struct amdgpu_device *adev)
1769{
1770 int i, r;
1771
1772 r = amdgpu_ras_init(adev);
1773 if (r)
1774 return r;
1775
1776 for (i = 0; i < adev->num_ip_blocks; i++) {
1777 if (!adev->ip_blocks[i].status.valid)
1778 continue;
1779 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
1780 if (r) {
1781 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1782 adev->ip_blocks[i].version->funcs->name, r);
1783 goto init_failed;
1784 }
1785 adev->ip_blocks[i].status.sw = true;
1786
1787 /* need to do gmc hw init early so we can allocate gpu mem */
1788 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
1789 r = amdgpu_device_vram_scratch_init(adev);
1790 if (r) {
1791 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
1792 goto init_failed;
1793 }
1794 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
1795 if (r) {
1796 DRM_ERROR("hw_init %d failed %d\n", i, r);
1797 goto init_failed;
1798 }
1799 r = amdgpu_device_wb_init(adev);
1800 if (r) {
1801 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
1802 goto init_failed;
1803 }
1804 adev->ip_blocks[i].status.hw = true;
1805
1806 /* right after GMC hw init, we create CSA */
1807 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
1808 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
1809 AMDGPU_GEM_DOMAIN_VRAM,
1810 AMDGPU_CSA_SIZE);
1811 if (r) {
1812 DRM_ERROR("allocate CSA failed %d\n", r);
1813 goto init_failed;
1814 }
1815 }
1816 }
1817 }
1818
1819 r = amdgpu_ib_pool_init(adev);
1820 if (r) {
1821 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
1822 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
1823 goto init_failed;
1824 }
1825
1826 r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
1827 if (r)
1828 goto init_failed;
1829
1830 r = amdgpu_device_ip_hw_init_phase1(adev);
1831 if (r)
1832 goto init_failed;
1833
1834 r = amdgpu_device_fw_loading(adev);
1835 if (r)
1836 goto init_failed;
1837
1838 r = amdgpu_device_ip_hw_init_phase2(adev);
1839 if (r)
1840 goto init_failed;
1841
1842 if (adev->gmc.xgmi.num_physical_nodes > 1)
1843 amdgpu_xgmi_add_device(adev);
1844 amdgpu_amdkfd_device_init(adev);
1845
1846init_failed:
1847 if (amdgpu_sriov_vf(adev)) {
1848 if (!r)
1849 amdgpu_virt_init_data_exchange(adev);
1850 amdgpu_virt_release_full_gpu(adev, true);
1851 }
1852
1853 return r;
1854}
1855
1856/**
1857 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
1858 *
1859 * @adev: amdgpu_device pointer
1860 *
1861 * Writes a reset magic value to the gart pointer in VRAM. The driver calls
1862 * this function before a GPU reset. If the value is retained after a
1863 * GPU reset, VRAM has not been lost. Some GPU resets may destry VRAM contents.
1864 */
1865static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
1866{
1867 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
1868}
1869
1870/**
1871 * amdgpu_device_check_vram_lost - check if vram is valid
1872 *
1873 * @adev: amdgpu_device pointer
1874 *
1875 * Checks the reset magic value written to the gart pointer in VRAM.
1876 * The driver calls this after a GPU reset to see if the contents of
1877 * VRAM is lost or now.
1878 * returns true if vram is lost, false if not.
1879 */
1880static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
1881{
1882 return !!memcmp(adev->gart.ptr, adev->reset_magic,
1883 AMDGPU_RESET_MAGIC_NUM);
1884}
1885
1886/**
1887 * amdgpu_device_set_cg_state - set clockgating for amdgpu device
1888 *
1889 * @adev: amdgpu_device pointer
1890 *
1891 * The list of all the hardware IPs that make up the asic is walked and the
1892 * set_clockgating_state callbacks are run.
1893 * Late initialization pass enabling clockgating for hardware IPs.
1894 * Fini or suspend, pass disabling clockgating for hardware IPs.
1895 * Returns 0 on success, negative error code on failure.
1896 */
1897
1898static int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
1899 enum amd_clockgating_state state)
1900{
1901 int i, j, r;
1902
1903 if (amdgpu_emu_mode == 1)
1904 return 0;
1905
1906 for (j = 0; j < adev->num_ip_blocks; j++) {
1907 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
1908 if (!adev->ip_blocks[i].status.late_initialized)
1909 continue;
1910 /* skip CG for VCE/UVD, it's handled specially */
1911 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1912 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
1913 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
1914 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
1915 /* enable clockgating to save power */
1916 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1917 state);
1918 if (r) {
1919 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
1920 adev->ip_blocks[i].version->funcs->name, r);
1921 return r;
1922 }
1923 }
1924 }
1925
1926 return 0;
1927}
1928
1929static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_powergating_state state)
1930{
1931 int i, j, r;
1932
1933 if (amdgpu_emu_mode == 1)
1934 return 0;
1935
1936 for (j = 0; j < adev->num_ip_blocks; j++) {
1937 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
1938 if (!adev->ip_blocks[i].status.late_initialized)
1939 continue;
1940 /* skip CG for VCE/UVD, it's handled specially */
1941 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1942 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
1943 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
1944 adev->ip_blocks[i].version->funcs->set_powergating_state) {
1945 /* enable powergating to save power */
1946 r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
1947 state);
1948 if (r) {
1949 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
1950 adev->ip_blocks[i].version->funcs->name, r);
1951 return r;
1952 }
1953 }
1954 }
1955 return 0;
1956}
1957
1958static int amdgpu_device_enable_mgpu_fan_boost(void)
1959{
1960 struct amdgpu_gpu_instance *gpu_ins;
1961 struct amdgpu_device *adev;
1962 int i, ret = 0;
1963
1964 mutex_lock(&mgpu_info.mutex);
1965
1966 /*
1967 * MGPU fan boost feature should be enabled
1968 * only when there are two or more dGPUs in
1969 * the system
1970 */
1971 if (mgpu_info.num_dgpu < 2)
1972 goto out;
1973
1974 for (i = 0; i < mgpu_info.num_dgpu; i++) {
1975 gpu_ins = &(mgpu_info.gpu_ins[i]);
1976 adev = gpu_ins->adev;
1977 if (!(adev->flags & AMD_IS_APU) &&
1978 !gpu_ins->mgpu_fan_enabled &&
1979 adev->powerplay.pp_funcs &&
1980 adev->powerplay.pp_funcs->enable_mgpu_fan_boost) {
1981 ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
1982 if (ret)
1983 break;
1984
1985 gpu_ins->mgpu_fan_enabled = 1;
1986 }
1987 }
1988
1989out:
1990 mutex_unlock(&mgpu_info.mutex);
1991
1992 return ret;
1993}
1994
1995/**
1996 * amdgpu_device_ip_late_init - run late init for hardware IPs
1997 *
1998 * @adev: amdgpu_device pointer
1999 *
2000 * Late initialization pass for hardware IPs. The list of all the hardware
2001 * IPs that make up the asic is walked and the late_init callbacks are run.
2002 * late_init covers any special initialization that an IP requires
2003 * after all of the have been initialized or something that needs to happen
2004 * late in the init process.
2005 * Returns 0 on success, negative error code on failure.
2006 */
2007static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2008{
2009 int i = 0, r;
2010
2011 for (i = 0; i < adev->num_ip_blocks; i++) {
2012 if (!adev->ip_blocks[i].status.hw)
2013 continue;
2014 if (adev->ip_blocks[i].version->funcs->late_init) {
2015 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2016 if (r) {
2017 DRM_ERROR("late_init of IP block <%s> failed %d\n",
2018 adev->ip_blocks[i].version->funcs->name, r);
2019 return r;
2020 }
2021 }
2022 adev->ip_blocks[i].status.late_initialized = true;
2023 }
2024
2025 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2026 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2027
2028 amdgpu_device_fill_reset_magic(adev);
2029
2030 r = amdgpu_device_enable_mgpu_fan_boost();
2031 if (r)
2032 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2033
2034 /* set to low pstate by default */
2035 amdgpu_xgmi_set_pstate(adev, 0);
2036
2037 return 0;
2038}
2039
2040/**
2041 * amdgpu_device_ip_fini - run fini for hardware IPs
2042 *
2043 * @adev: amdgpu_device pointer
2044 *
2045 * Main teardown pass for hardware IPs. The list of all the hardware
2046 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2047 * are run. hw_fini tears down the hardware associated with each IP
2048 * and sw_fini tears down any software state associated with each IP.
2049 * Returns 0 on success, negative error code on failure.
2050 */
2051static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2052{
2053 int i, r;
2054
2055 amdgpu_ras_pre_fini(adev);
2056
2057 if (adev->gmc.xgmi.num_physical_nodes > 1)
2058 amdgpu_xgmi_remove_device(adev);
2059
2060 amdgpu_amdkfd_device_fini(adev);
2061
2062 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2063 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2064
2065 /* need to disable SMC first */
2066 for (i = 0; i < adev->num_ip_blocks; i++) {
2067 if (!adev->ip_blocks[i].status.hw)
2068 continue;
2069 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2070 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2071 /* XXX handle errors */
2072 if (r) {
2073 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2074 adev->ip_blocks[i].version->funcs->name, r);
2075 }
2076 adev->ip_blocks[i].status.hw = false;
2077 break;
2078 }
2079 }
2080
2081 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2082 if (!adev->ip_blocks[i].status.hw)
2083 continue;
2084
2085 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2086 /* XXX handle errors */
2087 if (r) {
2088 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2089 adev->ip_blocks[i].version->funcs->name, r);
2090 }
2091
2092 adev->ip_blocks[i].status.hw = false;
2093 }
2094
2095
2096 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2097 if (!adev->ip_blocks[i].status.sw)
2098 continue;
2099
2100 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2101 amdgpu_ucode_free_bo(adev);
2102 amdgpu_free_static_csa(&adev->virt.csa_obj);
2103 amdgpu_device_wb_fini(adev);
2104 amdgpu_device_vram_scratch_fini(adev);
2105 amdgpu_ib_pool_fini(adev);
2106 }
2107
2108 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2109 /* XXX handle errors */
2110 if (r) {
2111 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2112 adev->ip_blocks[i].version->funcs->name, r);
2113 }
2114 adev->ip_blocks[i].status.sw = false;
2115 adev->ip_blocks[i].status.valid = false;
2116 }
2117
2118 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2119 if (!adev->ip_blocks[i].status.late_initialized)
2120 continue;
2121 if (adev->ip_blocks[i].version->funcs->late_fini)
2122 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2123 adev->ip_blocks[i].status.late_initialized = false;
2124 }
2125
2126 amdgpu_ras_fini(adev);
2127
2128 if (amdgpu_sriov_vf(adev))
2129 if (amdgpu_virt_release_full_gpu(adev, false))
2130 DRM_ERROR("failed to release exclusive mode on fini\n");
2131
2132 return 0;
2133}
2134
2135/**
2136 * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2137 *
2138 * @work: work_struct.
2139 */
2140static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2141{
2142 struct amdgpu_device *adev =
2143 container_of(work, struct amdgpu_device, delayed_init_work.work);
2144 int r;
2145
2146 r = amdgpu_ib_ring_tests(adev);
2147 if (r)
2148 DRM_ERROR("ib ring test failed (%d).\n", r);
2149}
2150
2151static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2152{
2153 struct amdgpu_device *adev =
2154 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2155
2156 mutex_lock(&adev->gfx.gfx_off_mutex);
2157 if (!adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
2158 if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2159 adev->gfx.gfx_off_state = true;
2160 }
2161 mutex_unlock(&adev->gfx.gfx_off_mutex);
2162}
2163
2164/**
2165 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2166 *
2167 * @adev: amdgpu_device pointer
2168 *
2169 * Main suspend function for hardware IPs. The list of all the hardware
2170 * IPs that make up the asic is walked, clockgating is disabled and the
2171 * suspend callbacks are run. suspend puts the hardware and software state
2172 * in each IP into a state suitable for suspend.
2173 * Returns 0 on success, negative error code on failure.
2174 */
2175static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2176{
2177 int i, r;
2178
2179 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2180 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2181
2182 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2183 if (!adev->ip_blocks[i].status.valid)
2184 continue;
2185 /* displays are handled separately */
2186 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) {
2187 /* XXX handle errors */
2188 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2189 /* XXX handle errors */
2190 if (r) {
2191 DRM_ERROR("suspend of IP block <%s> failed %d\n",
2192 adev->ip_blocks[i].version->funcs->name, r);
2193 return r;
2194 }
2195 adev->ip_blocks[i].status.hw = false;
2196 }
2197 }
2198
2199 return 0;
2200}
2201
2202/**
2203 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2204 *
2205 * @adev: amdgpu_device pointer
2206 *
2207 * Main suspend function for hardware IPs. The list of all the hardware
2208 * IPs that make up the asic is walked, clockgating is disabled and the
2209 * suspend callbacks are run. suspend puts the hardware and software state
2210 * in each IP into a state suitable for suspend.
2211 * Returns 0 on success, negative error code on failure.
2212 */
2213static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
2214{
2215 int i, r;
2216
2217 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2218 if (!adev->ip_blocks[i].status.valid)
2219 continue;
2220 /* displays are handled in phase1 */
2221 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
2222 continue;
2223 /* XXX handle errors */
2224 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2225 /* XXX handle errors */
2226 if (r) {
2227 DRM_ERROR("suspend of IP block <%s> failed %d\n",
2228 adev->ip_blocks[i].version->funcs->name, r);
2229 }
2230 adev->ip_blocks[i].status.hw = false;
2231 /* handle putting the SMC in the appropriate state */
2232 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2233 if (is_support_sw_smu(adev)) {
2234 /* todo */
2235 } else if (adev->powerplay.pp_funcs &&
2236 adev->powerplay.pp_funcs->set_mp1_state) {
2237 r = adev->powerplay.pp_funcs->set_mp1_state(
2238 adev->powerplay.pp_handle,
2239 adev->mp1_state);
2240 if (r) {
2241 DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
2242 adev->mp1_state, r);
2243 return r;
2244 }
2245 }
2246 }
2247
2248 adev->ip_blocks[i].status.hw = false;
2249 }
2250
2251 return 0;
2252}
2253
2254/**
2255 * amdgpu_device_ip_suspend - run suspend for hardware IPs
2256 *
2257 * @adev: amdgpu_device pointer
2258 *
2259 * Main suspend function for hardware IPs. The list of all the hardware
2260 * IPs that make up the asic is walked, clockgating is disabled and the
2261 * suspend callbacks are run. suspend puts the hardware and software state
2262 * in each IP into a state suitable for suspend.
2263 * Returns 0 on success, negative error code on failure.
2264 */
2265int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
2266{
2267 int r;
2268
2269 if (amdgpu_sriov_vf(adev))
2270 amdgpu_virt_request_full_gpu(adev, false);
2271
2272 r = amdgpu_device_ip_suspend_phase1(adev);
2273 if (r)
2274 return r;
2275 r = amdgpu_device_ip_suspend_phase2(adev);
2276
2277 if (amdgpu_sriov_vf(adev))
2278 amdgpu_virt_release_full_gpu(adev, false);
2279
2280 return r;
2281}
2282
2283static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
2284{
2285 int i, r;
2286
2287 static enum amd_ip_block_type ip_order[] = {
2288 AMD_IP_BLOCK_TYPE_GMC,
2289 AMD_IP_BLOCK_TYPE_COMMON,
2290 AMD_IP_BLOCK_TYPE_PSP,
2291 AMD_IP_BLOCK_TYPE_IH,
2292 };
2293
2294 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
2295 int j;
2296 struct amdgpu_ip_block *block;
2297
2298 for (j = 0; j < adev->num_ip_blocks; j++) {
2299 block = &adev->ip_blocks[j];
2300
2301 block->status.hw = false;
2302 if (block->version->type != ip_order[i] ||
2303 !block->status.valid)
2304 continue;
2305
2306 r = block->version->funcs->hw_init(adev);
2307 DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
2308 if (r)
2309 return r;
2310 block->status.hw = true;
2311 }
2312 }
2313
2314 return 0;
2315}
2316
2317static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
2318{
2319 int i, r;
2320
2321 static enum amd_ip_block_type ip_order[] = {
2322 AMD_IP_BLOCK_TYPE_SMC,
2323 AMD_IP_BLOCK_TYPE_DCE,
2324 AMD_IP_BLOCK_TYPE_GFX,
2325 AMD_IP_BLOCK_TYPE_SDMA,
2326 AMD_IP_BLOCK_TYPE_UVD,
2327 AMD_IP_BLOCK_TYPE_VCE
2328 };
2329
2330 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
2331 int j;
2332 struct amdgpu_ip_block *block;
2333
2334 for (j = 0; j < adev->num_ip_blocks; j++) {
2335 block = &adev->ip_blocks[j];
2336
2337 if (block->version->type != ip_order[i] ||
2338 !block->status.valid ||
2339 block->status.hw)
2340 continue;
2341
2342 r = block->version->funcs->hw_init(adev);
2343 DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
2344 if (r)
2345 return r;
2346 block->status.hw = true;
2347 }
2348 }
2349
2350 return 0;
2351}
2352
2353/**
2354 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
2355 *
2356 * @adev: amdgpu_device pointer
2357 *
2358 * First resume function for hardware IPs. The list of all the hardware
2359 * IPs that make up the asic is walked and the resume callbacks are run for
2360 * COMMON, GMC, and IH. resume puts the hardware into a functional state
2361 * after a suspend and updates the software state as necessary. This
2362 * function is also used for restoring the GPU after a GPU reset.
2363 * Returns 0 on success, negative error code on failure.
2364 */
2365static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
2366{
2367 int i, r;
2368
2369 for (i = 0; i < adev->num_ip_blocks; i++) {
2370 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
2371 continue;
2372 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2373 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2374 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2375
2376 r = adev->ip_blocks[i].version->funcs->resume(adev);
2377 if (r) {
2378 DRM_ERROR("resume of IP block <%s> failed %d\n",
2379 adev->ip_blocks[i].version->funcs->name, r);
2380 return r;
2381 }
2382 adev->ip_blocks[i].status.hw = true;
2383 }
2384 }
2385
2386 return 0;
2387}
2388
2389/**
2390 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
2391 *
2392 * @adev: amdgpu_device pointer
2393 *
2394 * First resume function for hardware IPs. The list of all the hardware
2395 * IPs that make up the asic is walked and the resume callbacks are run for
2396 * all blocks except COMMON, GMC, and IH. resume puts the hardware into a
2397 * functional state after a suspend and updates the software state as
2398 * necessary. This function is also used for restoring the GPU after a GPU
2399 * reset.
2400 * Returns 0 on success, negative error code on failure.
2401 */
2402static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
2403{
2404 int i, r;
2405
2406 for (i = 0; i < adev->num_ip_blocks; i++) {
2407 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
2408 continue;
2409 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2410 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2411 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
2412 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
2413 continue;
2414 r = adev->ip_blocks[i].version->funcs->resume(adev);
2415 if (r) {
2416 DRM_ERROR("resume of IP block <%s> failed %d\n",
2417 adev->ip_blocks[i].version->funcs->name, r);
2418 return r;
2419 }
2420 adev->ip_blocks[i].status.hw = true;
2421 }
2422
2423 return 0;
2424}
2425
2426/**
2427 * amdgpu_device_ip_resume - run resume for hardware IPs
2428 *
2429 * @adev: amdgpu_device pointer
2430 *
2431 * Main resume function for hardware IPs. The hardware IPs
2432 * are split into two resume functions because they are
2433 * are also used in in recovering from a GPU reset and some additional
2434 * steps need to be take between them. In this case (S3/S4) they are
2435 * run sequentially.
2436 * Returns 0 on success, negative error code on failure.
2437 */
2438static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
2439{
2440 int r;
2441
2442 r = amdgpu_device_ip_resume_phase1(adev);
2443 if (r)
2444 return r;
2445
2446 r = amdgpu_device_fw_loading(adev);
2447 if (r)
2448 return r;
2449
2450 r = amdgpu_device_ip_resume_phase2(adev);
2451
2452 return r;
2453}
2454
2455/**
2456 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
2457 *
2458 * @adev: amdgpu_device pointer
2459 *
2460 * Query the VBIOS data tables to determine if the board supports SR-IOV.
2461 */
2462static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
2463{
2464 if (amdgpu_sriov_vf(adev)) {
2465 if (adev->is_atom_fw) {
2466 if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
2467 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2468 } else {
2469 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
2470 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2471 }
2472
2473 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
2474 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
2475 }
2476}
2477
2478/**
2479 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
2480 *
2481 * @asic_type: AMD asic type
2482 *
2483 * Check if there is DC (new modesetting infrastructre) support for an asic.
2484 * returns true if DC has support, false if not.
2485 */
2486bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
2487{
2488 switch (asic_type) {
2489#if defined(CONFIG_DRM_AMD_DC)
2490 case CHIP_BONAIRE:
2491 case CHIP_KAVERI:
2492 case CHIP_KABINI:
2493 case CHIP_MULLINS:
2494 /*
2495 * We have systems in the wild with these ASICs that require
2496 * LVDS and VGA support which is not supported with DC.
2497 *
2498 * Fallback to the non-DC driver here by default so as not to
2499 * cause regressions.
2500 */
2501 return amdgpu_dc > 0;
2502 case CHIP_HAWAII:
2503 case CHIP_CARRIZO:
2504 case CHIP_STONEY:
2505 case CHIP_POLARIS10:
2506 case CHIP_POLARIS11:
2507 case CHIP_POLARIS12:
2508 case CHIP_VEGAM:
2509 case CHIP_TONGA:
2510 case CHIP_FIJI:
2511 case CHIP_VEGA10:
2512 case CHIP_VEGA12:
2513 case CHIP_VEGA20:
2514#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
2515 case CHIP_RAVEN:
2516#endif
2517#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
2518 case CHIP_NAVI10:
2519 case CHIP_NAVI14:
2520 case CHIP_NAVI12:
2521#endif
2522#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
2523 case CHIP_RENOIR:
2524#endif
2525 return amdgpu_dc != 0;
2526#endif
2527 default:
2528 return false;
2529 }
2530}
2531
2532/**
2533 * amdgpu_device_has_dc_support - check if dc is supported
2534 *
2535 * @adev: amdgpu_device_pointer
2536 *
2537 * Returns true for supported, false for not supported
2538 */
2539bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
2540{
2541 if (amdgpu_sriov_vf(adev))
2542 return false;
2543
2544 return amdgpu_device_asic_has_dc_support(adev->asic_type);
2545}
2546
2547
2548static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
2549{
2550 struct amdgpu_device *adev =
2551 container_of(__work, struct amdgpu_device, xgmi_reset_work);
2552
2553 adev->asic_reset_res = amdgpu_asic_reset(adev);
2554 if (adev->asic_reset_res)
2555 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
2556 adev->asic_reset_res, adev->ddev->unique);
2557}
2558
2559
2560/**
2561 * amdgpu_device_init - initialize the driver
2562 *
2563 * @adev: amdgpu_device pointer
2564 * @ddev: drm dev pointer
2565 * @pdev: pci dev pointer
2566 * @flags: driver flags
2567 *
2568 * Initializes the driver info and hw (all asics).
2569 * Returns 0 for success or an error on failure.
2570 * Called at driver startup.
2571 */
2572int amdgpu_device_init(struct amdgpu_device *adev,
2573 struct drm_device *ddev,
2574 struct pci_dev *pdev,
2575 uint32_t flags)
2576{
2577 int r, i;
2578 bool runtime = false;
2579 u32 max_MBps;
2580
2581 adev->shutdown = false;
2582 adev->dev = &pdev->dev;
2583 adev->ddev = ddev;
2584 adev->pdev = pdev;
2585 adev->flags = flags;
2586 adev->asic_type = flags & AMD_ASIC_MASK;
2587 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
2588 if (amdgpu_emu_mode == 1)
2589 adev->usec_timeout *= 2;
2590 adev->gmc.gart_size = 512 * 1024 * 1024;
2591 adev->accel_working = false;
2592 adev->num_rings = 0;
2593 adev->mman.buffer_funcs = NULL;
2594 adev->mman.buffer_funcs_ring = NULL;
2595 adev->vm_manager.vm_pte_funcs = NULL;
2596 adev->vm_manager.vm_pte_num_rqs = 0;
2597 adev->gmc.gmc_funcs = NULL;
2598 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2599 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
2600
2601 adev->smc_rreg = &amdgpu_invalid_rreg;
2602 adev->smc_wreg = &amdgpu_invalid_wreg;
2603 adev->pcie_rreg = &amdgpu_invalid_rreg;
2604 adev->pcie_wreg = &amdgpu_invalid_wreg;
2605 adev->pciep_rreg = &amdgpu_invalid_rreg;
2606 adev->pciep_wreg = &amdgpu_invalid_wreg;
2607 adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
2608 adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
2609 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
2610 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
2611 adev->didt_rreg = &amdgpu_invalid_rreg;
2612 adev->didt_wreg = &amdgpu_invalid_wreg;
2613 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
2614 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
2615 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
2616 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
2617
2618 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
2619 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
2620 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
2621
2622 /* mutex initialization are all done here so we
2623 * can recall function without having locking issues */
2624 atomic_set(&adev->irq.ih.lock, 0);
2625 mutex_init(&adev->firmware.mutex);
2626 mutex_init(&adev->pm.mutex);
2627 mutex_init(&adev->gfx.gpu_clock_mutex);
2628 mutex_init(&adev->srbm_mutex);
2629 mutex_init(&adev->gfx.pipe_reserve_mutex);
2630 mutex_init(&adev->gfx.gfx_off_mutex);
2631 mutex_init(&adev->grbm_idx_mutex);
2632 mutex_init(&adev->mn_lock);
2633 mutex_init(&adev->virt.vf_errors.lock);
2634 hash_init(adev->mn_hash);
2635 mutex_init(&adev->lock_reset);
2636 mutex_init(&adev->virt.dpm_mutex);
2637 mutex_init(&adev->psp.mutex);
2638
2639 r = amdgpu_device_check_arguments(adev);
2640 if (r)
2641 return r;
2642
2643 spin_lock_init(&adev->mmio_idx_lock);
2644 spin_lock_init(&adev->smc_idx_lock);
2645 spin_lock_init(&adev->pcie_idx_lock);
2646 spin_lock_init(&adev->uvd_ctx_idx_lock);
2647 spin_lock_init(&adev->didt_idx_lock);
2648 spin_lock_init(&adev->gc_cac_idx_lock);
2649 spin_lock_init(&adev->se_cac_idx_lock);
2650 spin_lock_init(&adev->audio_endpt_idx_lock);
2651 spin_lock_init(&adev->mm_stats.lock);
2652
2653 INIT_LIST_HEAD(&adev->shadow_list);
2654 mutex_init(&adev->shadow_list_lock);
2655
2656 INIT_LIST_HEAD(&adev->ring_lru_list);
2657 spin_lock_init(&adev->ring_lru_list_lock);
2658
2659 INIT_DELAYED_WORK(&adev->delayed_init_work,
2660 amdgpu_device_delayed_init_work_handler);
2661 INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
2662 amdgpu_device_delay_enable_gfx_off);
2663
2664 INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
2665
2666 adev->gfx.gfx_off_req_count = 1;
2667 adev->pm.ac_power = power_supply_is_system_supplied() > 0 ? true : false;
2668
2669 /* Registers mapping */
2670 /* TODO: block userspace mapping of io register */
2671 if (adev->asic_type >= CHIP_BONAIRE) {
2672 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
2673 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
2674 } else {
2675 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
2676 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
2677 }
2678
2679 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
2680 if (adev->rmmio == NULL) {
2681 return -ENOMEM;
2682 }
2683 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
2684 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
2685
2686 /* io port mapping */
2687 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2688 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
2689 adev->rio_mem_size = pci_resource_len(adev->pdev, i);
2690 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
2691 break;
2692 }
2693 }
2694 if (adev->rio_mem == NULL)
2695 DRM_INFO("PCI I/O BAR is not found.\n");
2696
2697 /* enable PCIE atomic ops */
2698 r = pci_enable_atomic_ops_to_root(adev->pdev,
2699 PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
2700 PCI_EXP_DEVCAP2_ATOMIC_COMP64);
2701 if (r) {
2702 adev->have_atomics_support = false;
2703 DRM_INFO("PCIE atomic ops is not supported\n");
2704 } else {
2705 adev->have_atomics_support = true;
2706 }
2707
2708 amdgpu_device_get_pcie_info(adev);
2709
2710 if (amdgpu_mcbp)
2711 DRM_INFO("MCBP is enabled\n");
2712
2713 if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10)
2714 adev->enable_mes = true;
2715
2716 if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) {
2717 r = amdgpu_discovery_init(adev);
2718 if (r) {
2719 dev_err(adev->dev, "amdgpu_discovery_init failed\n");
2720 return r;
2721 }
2722 }
2723
2724 /* early init functions */
2725 r = amdgpu_device_ip_early_init(adev);
2726 if (r)
2727 return r;
2728
2729 /* doorbell bar mapping and doorbell index init*/
2730 amdgpu_device_doorbell_init(adev);
2731
2732 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
2733 /* this will fail for cards that aren't VGA class devices, just
2734 * ignore it */
2735 vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
2736
2737 if (amdgpu_device_is_px(ddev))
2738 runtime = true;
2739 if (!pci_is_thunderbolt_attached(adev->pdev))
2740 vga_switcheroo_register_client(adev->pdev,
2741 &amdgpu_switcheroo_ops, runtime);
2742 if (runtime)
2743 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
2744
2745 if (amdgpu_emu_mode == 1) {
2746 /* post the asic on emulation mode */
2747 emu_soc_asic_init(adev);
2748 goto fence_driver_init;
2749 }
2750
2751 /* detect if we are with an SRIOV vbios */
2752 amdgpu_device_detect_sriov_bios(adev);
2753
2754 /* check if we need to reset the asic
2755 * E.g., driver was not cleanly unloaded previously, etc.
2756 */
2757 if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
2758 r = amdgpu_asic_reset(adev);
2759 if (r) {
2760 dev_err(adev->dev, "asic reset on init failed\n");
2761 goto failed;
2762 }
2763 }
2764
2765 /* Post card if necessary */
2766 if (amdgpu_device_need_post(adev)) {
2767 if (!adev->bios) {
2768 dev_err(adev->dev, "no vBIOS found\n");
2769 r = -EINVAL;
2770 goto failed;
2771 }
2772 DRM_INFO("GPU posting now...\n");
2773 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2774 if (r) {
2775 dev_err(adev->dev, "gpu post error!\n");
2776 goto failed;
2777 }
2778 }
2779
2780 if (adev->is_atom_fw) {
2781 /* Initialize clocks */
2782 r = amdgpu_atomfirmware_get_clock_info(adev);
2783 if (r) {
2784 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
2785 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
2786 goto failed;
2787 }
2788 } else {
2789 /* Initialize clocks */
2790 r = amdgpu_atombios_get_clock_info(adev);
2791 if (r) {
2792 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
2793 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
2794 goto failed;
2795 }
2796 /* init i2c buses */
2797 if (!amdgpu_device_has_dc_support(adev))
2798 amdgpu_atombios_i2c_init(adev);
2799 }
2800
2801fence_driver_init:
2802 /* Fence driver */
2803 r = amdgpu_fence_driver_init(adev);
2804 if (r) {
2805 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
2806 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
2807 goto failed;
2808 }
2809
2810 /* init the mode config */
2811 drm_mode_config_init(adev->ddev);
2812
2813 r = amdgpu_device_ip_init(adev);
2814 if (r) {
2815 /* failed in exclusive mode due to timeout */
2816 if (amdgpu_sriov_vf(adev) &&
2817 !amdgpu_sriov_runtime(adev) &&
2818 amdgpu_virt_mmio_blocked(adev) &&
2819 !amdgpu_virt_wait_reset(adev)) {
2820 dev_err(adev->dev, "VF exclusive mode timeout\n");
2821 /* Don't send request since VF is inactive. */
2822 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
2823 adev->virt.ops = NULL;
2824 r = -EAGAIN;
2825 goto failed;
2826 }
2827 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
2828 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
2829 if (amdgpu_virt_request_full_gpu(adev, false))
2830 amdgpu_virt_release_full_gpu(adev, false);
2831 goto failed;
2832 }
2833
2834 adev->accel_working = true;
2835
2836 amdgpu_vm_check_compute_bug(adev);
2837
2838 /* Initialize the buffer migration limit. */
2839 if (amdgpu_moverate >= 0)
2840 max_MBps = amdgpu_moverate;
2841 else
2842 max_MBps = 8; /* Allow 8 MB/s. */
2843 /* Get a log2 for easy divisions. */
2844 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
2845
2846 amdgpu_fbdev_init(adev);
2847
2848 if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev))
2849 amdgpu_pm_virt_sysfs_init(adev);
2850
2851 r = amdgpu_pm_sysfs_init(adev);
2852 if (r)
2853 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
2854
2855 r = amdgpu_ucode_sysfs_init(adev);
2856 if (r)
2857 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
2858
2859 r = amdgpu_debugfs_gem_init(adev);
2860 if (r)
2861 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
2862
2863 r = amdgpu_debugfs_regs_init(adev);
2864 if (r)
2865 DRM_ERROR("registering register debugfs failed (%d).\n", r);
2866
2867 r = amdgpu_debugfs_firmware_init(adev);
2868 if (r)
2869 DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
2870
2871 r = amdgpu_debugfs_init(adev);
2872 if (r)
2873 DRM_ERROR("Creating debugfs files failed (%d).\n", r);
2874
2875 if ((amdgpu_testing & 1)) {
2876 if (adev->accel_working)
2877 amdgpu_test_moves(adev);
2878 else
2879 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
2880 }
2881 if (amdgpu_benchmarking) {
2882 if (adev->accel_working)
2883 amdgpu_benchmark(adev, amdgpu_benchmarking);
2884 else
2885 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
2886 }
2887
2888 /*
2889 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
2890 * Otherwise the mgpu fan boost feature will be skipped due to the
2891 * gpu instance is counted less.
2892 */
2893 amdgpu_register_gpu_instance(adev);
2894
2895 /* enable clockgating, etc. after ib tests, etc. since some blocks require
2896 * explicit gating rather than handling it automatically.
2897 */
2898 r = amdgpu_device_ip_late_init(adev);
2899 if (r) {
2900 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
2901 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
2902 goto failed;
2903 }
2904
2905 /* must succeed. */
2906 amdgpu_ras_resume(adev);
2907
2908 queue_delayed_work(system_wq, &adev->delayed_init_work,
2909 msecs_to_jiffies(AMDGPU_RESUME_MS));
2910
2911 r = device_create_file(adev->dev, &dev_attr_pcie_replay_count);
2912 if (r) {
2913 dev_err(adev->dev, "Could not create pcie_replay_count");
2914 return r;
2915 }
2916
2917 if (IS_ENABLED(CONFIG_PERF_EVENTS))
2918 r = amdgpu_pmu_init(adev);
2919 if (r)
2920 dev_err(adev->dev, "amdgpu_pmu_init failed\n");
2921
2922 return 0;
2923
2924failed:
2925 amdgpu_vf_error_trans_all(adev);
2926 if (runtime)
2927 vga_switcheroo_fini_domain_pm_ops(adev->dev);
2928
2929 return r;
2930}
2931
2932/**
2933 * amdgpu_device_fini - tear down the driver
2934 *
2935 * @adev: amdgpu_device pointer
2936 *
2937 * Tear down the driver info (all asics).
2938 * Called at driver shutdown.
2939 */
2940void amdgpu_device_fini(struct amdgpu_device *adev)
2941{
2942 int r;
2943
2944 DRM_INFO("amdgpu: finishing device.\n");
2945 adev->shutdown = true;
2946 /* disable all interrupts */
2947 amdgpu_irq_disable_all(adev);
2948 if (adev->mode_info.mode_config_initialized){
2949 if (!amdgpu_device_has_dc_support(adev))
2950 drm_helper_force_disable_all(adev->ddev);
2951 else
2952 drm_atomic_helper_shutdown(adev->ddev);
2953 }
2954 amdgpu_fence_driver_fini(adev);
2955 amdgpu_pm_sysfs_fini(adev);
2956 amdgpu_fbdev_fini(adev);
2957 r = amdgpu_device_ip_fini(adev);
2958 if (adev->firmware.gpu_info_fw) {
2959 release_firmware(adev->firmware.gpu_info_fw);
2960 adev->firmware.gpu_info_fw = NULL;
2961 }
2962 adev->accel_working = false;
2963 cancel_delayed_work_sync(&adev->delayed_init_work);
2964 /* free i2c buses */
2965 if (!amdgpu_device_has_dc_support(adev))
2966 amdgpu_i2c_fini(adev);
2967
2968 if (amdgpu_emu_mode != 1)
2969 amdgpu_atombios_fini(adev);
2970
2971 kfree(adev->bios);
2972 adev->bios = NULL;
2973 if (!pci_is_thunderbolt_attached(adev->pdev))
2974 vga_switcheroo_unregister_client(adev->pdev);
2975 if (adev->flags & AMD_IS_PX)
2976 vga_switcheroo_fini_domain_pm_ops(adev->dev);
2977 vga_client_register(adev->pdev, NULL, NULL, NULL);
2978 if (adev->rio_mem)
2979 pci_iounmap(adev->pdev, adev->rio_mem);
2980 adev->rio_mem = NULL;
2981 iounmap(adev->rmmio);
2982 adev->rmmio = NULL;
2983 amdgpu_device_doorbell_fini(adev);
2984 if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev))
2985 amdgpu_pm_virt_sysfs_fini(adev);
2986
2987 amdgpu_debugfs_regs_cleanup(adev);
2988 device_remove_file(adev->dev, &dev_attr_pcie_replay_count);
2989 amdgpu_ucode_sysfs_fini(adev);
2990 if (IS_ENABLED(CONFIG_PERF_EVENTS))
2991 amdgpu_pmu_fini(adev);
2992 amdgpu_debugfs_preempt_cleanup(adev);
2993 if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
2994 amdgpu_discovery_fini(adev);
2995}
2996
2997
2998/*
2999 * Suspend & resume.
3000 */
3001/**
3002 * amdgpu_device_suspend - initiate device suspend
3003 *
3004 * @dev: drm dev pointer
3005 * @suspend: suspend state
3006 * @fbcon : notify the fbdev of suspend
3007 *
3008 * Puts the hw in the suspend state (all asics).
3009 * Returns 0 for success or an error on failure.
3010 * Called at driver suspend.
3011 */
3012int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
3013{
3014 struct amdgpu_device *adev;
3015 struct drm_crtc *crtc;
3016 struct drm_connector *connector;
3017 int r;
3018
3019 if (dev == NULL || dev->dev_private == NULL) {
3020 return -ENODEV;
3021 }
3022
3023 adev = dev->dev_private;
3024
3025 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3026 return 0;
3027
3028 adev->in_suspend = true;
3029 drm_kms_helper_poll_disable(dev);
3030
3031 if (fbcon)
3032 amdgpu_fbdev_set_suspend(adev, 1);
3033
3034 cancel_delayed_work_sync(&adev->delayed_init_work);
3035
3036 if (!amdgpu_device_has_dc_support(adev)) {
3037 /* turn off display hw */
3038 drm_modeset_lock_all(dev);
3039 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
3040 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
3041 }
3042 drm_modeset_unlock_all(dev);
3043 /* unpin the front buffers and cursors */
3044 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3045 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
3046 struct drm_framebuffer *fb = crtc->primary->fb;
3047 struct amdgpu_bo *robj;
3048
3049 if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
3050 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
3051 r = amdgpu_bo_reserve(aobj, true);
3052 if (r == 0) {
3053 amdgpu_bo_unpin(aobj);
3054 amdgpu_bo_unreserve(aobj);
3055 }
3056 }
3057
3058 if (fb == NULL || fb->obj[0] == NULL) {
3059 continue;
3060 }
3061 robj = gem_to_amdgpu_bo(fb->obj[0]);
3062 /* don't unpin kernel fb objects */
3063 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
3064 r = amdgpu_bo_reserve(robj, true);
3065 if (r == 0) {
3066 amdgpu_bo_unpin(robj);
3067 amdgpu_bo_unreserve(robj);
3068 }
3069 }
3070 }
3071 }
3072
3073 amdgpu_amdkfd_suspend(adev);
3074
3075 amdgpu_ras_suspend(adev);
3076
3077 r = amdgpu_device_ip_suspend_phase1(adev);
3078
3079 /* evict vram memory */
3080 amdgpu_bo_evict_vram(adev);
3081
3082 amdgpu_fence_driver_suspend(adev);
3083
3084 r = amdgpu_device_ip_suspend_phase2(adev);
3085
3086 /* evict remaining vram memory
3087 * This second call to evict vram is to evict the gart page table
3088 * using the CPU.
3089 */
3090 amdgpu_bo_evict_vram(adev);
3091
3092 pci_save_state(dev->pdev);
3093 if (suspend) {
3094 /* Shut down the device */
3095 pci_disable_device(dev->pdev);
3096 pci_set_power_state(dev->pdev, PCI_D3hot);
3097 } else {
3098 r = amdgpu_asic_reset(adev);
3099 if (r)
3100 DRM_ERROR("amdgpu asic reset failed\n");
3101 }
3102
3103 return 0;
3104}
3105
3106/**
3107 * amdgpu_device_resume - initiate device resume
3108 *
3109 * @dev: drm dev pointer
3110 * @resume: resume state
3111 * @fbcon : notify the fbdev of resume
3112 *
3113 * Bring the hw back to operating state (all asics).
3114 * Returns 0 for success or an error on failure.
3115 * Called at driver resume.
3116 */
3117int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
3118{
3119 struct drm_connector *connector;
3120 struct amdgpu_device *adev = dev->dev_private;
3121 struct drm_crtc *crtc;
3122 int r = 0;
3123
3124 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3125 return 0;
3126
3127 if (resume) {
3128 pci_set_power_state(dev->pdev, PCI_D0);
3129 pci_restore_state(dev->pdev);
3130 r = pci_enable_device(dev->pdev);
3131 if (r)
3132 return r;
3133 }
3134
3135 /* post card */
3136 if (amdgpu_device_need_post(adev)) {
3137 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
3138 if (r)
3139 DRM_ERROR("amdgpu asic init failed\n");
3140 }
3141
3142 r = amdgpu_device_ip_resume(adev);
3143 if (r) {
3144 DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r);
3145 return r;
3146 }
3147 amdgpu_fence_driver_resume(adev);
3148
3149
3150 r = amdgpu_device_ip_late_init(adev);
3151 if (r)
3152 return r;
3153
3154 queue_delayed_work(system_wq, &adev->delayed_init_work,
3155 msecs_to_jiffies(AMDGPU_RESUME_MS));
3156
3157 if (!amdgpu_device_has_dc_support(adev)) {
3158 /* pin cursors */
3159 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3160 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
3161
3162 if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
3163 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
3164 r = amdgpu_bo_reserve(aobj, true);
3165 if (r == 0) {
3166 r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
3167 if (r != 0)
3168 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
3169 amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
3170 amdgpu_bo_unreserve(aobj);
3171 }
3172 }
3173 }
3174 }
3175 r = amdgpu_amdkfd_resume(adev);
3176 if (r)
3177 return r;
3178
3179 /* Make sure IB tests flushed */
3180 flush_delayed_work(&adev->delayed_init_work);
3181
3182 /* blat the mode back in */
3183 if (fbcon) {
3184 if (!amdgpu_device_has_dc_support(adev)) {
3185 /* pre DCE11 */
3186 drm_helper_resume_force_mode(dev);
3187
3188 /* turn on display hw */
3189 drm_modeset_lock_all(dev);
3190 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
3191 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
3192 }
3193 drm_modeset_unlock_all(dev);
3194 }
3195 amdgpu_fbdev_set_suspend(adev, 0);
3196 }
3197
3198 drm_kms_helper_poll_enable(dev);
3199
3200 amdgpu_ras_resume(adev);
3201
3202 /*
3203 * Most of the connector probing functions try to acquire runtime pm
3204 * refs to ensure that the GPU is powered on when connector polling is
3205 * performed. Since we're calling this from a runtime PM callback,
3206 * trying to acquire rpm refs will cause us to deadlock.
3207 *
3208 * Since we're guaranteed to be holding the rpm lock, it's safe to
3209 * temporarily disable the rpm helpers so this doesn't deadlock us.
3210 */
3211#ifdef CONFIG_PM
3212 dev->dev->power.disable_depth++;
3213#endif
3214 if (!amdgpu_device_has_dc_support(adev))
3215 drm_helper_hpd_irq_event(dev);
3216 else
3217 drm_kms_helper_hotplug_event(dev);
3218#ifdef CONFIG_PM
3219 dev->dev->power.disable_depth--;
3220#endif
3221 adev->in_suspend = false;
3222
3223 return 0;
3224}
3225
3226/**
3227 * amdgpu_device_ip_check_soft_reset - did soft reset succeed
3228 *
3229 * @adev: amdgpu_device pointer
3230 *
3231 * The list of all the hardware IPs that make up the asic is walked and
3232 * the check_soft_reset callbacks are run. check_soft_reset determines
3233 * if the asic is still hung or not.
3234 * Returns true if any of the IPs are still in a hung state, false if not.
3235 */
3236static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
3237{
3238 int i;
3239 bool asic_hang = false;
3240
3241 if (amdgpu_sriov_vf(adev))
3242 return true;
3243
3244 if (amdgpu_asic_need_full_reset(adev))
3245 return true;
3246
3247 for (i = 0; i < adev->num_ip_blocks; i++) {
3248 if (!adev->ip_blocks[i].status.valid)
3249 continue;
3250 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
3251 adev->ip_blocks[i].status.hang =
3252 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
3253 if (adev->ip_blocks[i].status.hang) {
3254 DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
3255 asic_hang = true;
3256 }
3257 }
3258 return asic_hang;
3259}
3260
3261/**
3262 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
3263 *
3264 * @adev: amdgpu_device pointer
3265 *
3266 * The list of all the hardware IPs that make up the asic is walked and the
3267 * pre_soft_reset callbacks are run if the block is hung. pre_soft_reset
3268 * handles any IP specific hardware or software state changes that are
3269 * necessary for a soft reset to succeed.
3270 * Returns 0 on success, negative error code on failure.
3271 */
3272static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
3273{
3274 int i, r = 0;
3275
3276 for (i = 0; i < adev->num_ip_blocks; i++) {
3277 if (!adev->ip_blocks[i].status.valid)
3278 continue;
3279 if (adev->ip_blocks[i].status.hang &&
3280 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
3281 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
3282 if (r)
3283 return r;
3284 }
3285 }
3286
3287 return 0;
3288}
3289
3290/**
3291 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
3292 *
3293 * @adev: amdgpu_device pointer
3294 *
3295 * Some hardware IPs cannot be soft reset. If they are hung, a full gpu
3296 * reset is necessary to recover.
3297 * Returns true if a full asic reset is required, false if not.
3298 */
3299static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
3300{
3301 int i;
3302
3303 if (amdgpu_asic_need_full_reset(adev))
3304 return true;
3305
3306 for (i = 0; i < adev->num_ip_blocks; i++) {
3307 if (!adev->ip_blocks[i].status.valid)
3308 continue;
3309 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
3310 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
3311 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
3312 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
3313 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
3314 if (adev->ip_blocks[i].status.hang) {
3315 DRM_INFO("Some block need full reset!\n");
3316 return true;
3317 }
3318 }
3319 }
3320 return false;
3321}
3322
3323/**
3324 * amdgpu_device_ip_soft_reset - do a soft reset
3325 *
3326 * @adev: amdgpu_device pointer
3327 *
3328 * The list of all the hardware IPs that make up the asic is walked and the
3329 * soft_reset callbacks are run if the block is hung. soft_reset handles any
3330 * IP specific hardware or software state changes that are necessary to soft
3331 * reset the IP.
3332 * Returns 0 on success, negative error code on failure.
3333 */
3334static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
3335{
3336 int i, r = 0;
3337
3338 for (i = 0; i < adev->num_ip_blocks; i++) {
3339 if (!adev->ip_blocks[i].status.valid)
3340 continue;
3341 if (adev->ip_blocks[i].status.hang &&
3342 adev->ip_blocks[i].version->funcs->soft_reset) {
3343 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
3344 if (r)
3345 return r;
3346 }
3347 }
3348
3349 return 0;
3350}
3351
3352/**
3353 * amdgpu_device_ip_post_soft_reset - clean up from soft reset
3354 *
3355 * @adev: amdgpu_device pointer
3356 *
3357 * The list of all the hardware IPs that make up the asic is walked and the
3358 * post_soft_reset callbacks are run if the asic was hung. post_soft_reset
3359 * handles any IP specific hardware or software state changes that are
3360 * necessary after the IP has been soft reset.
3361 * Returns 0 on success, negative error code on failure.
3362 */
3363static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
3364{
3365 int i, r = 0;
3366
3367 for (i = 0; i < adev->num_ip_blocks; i++) {
3368 if (!adev->ip_blocks[i].status.valid)
3369 continue;
3370 if (adev->ip_blocks[i].status.hang &&
3371 adev->ip_blocks[i].version->funcs->post_soft_reset)
3372 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
3373 if (r)
3374 return r;
3375 }
3376
3377 return 0;
3378}
3379
3380/**
3381 * amdgpu_device_recover_vram - Recover some VRAM contents
3382 *
3383 * @adev: amdgpu_device pointer
3384 *
3385 * Restores the contents of VRAM buffers from the shadows in GTT. Used to
3386 * restore things like GPUVM page tables after a GPU reset where
3387 * the contents of VRAM might be lost.
3388 *
3389 * Returns:
3390 * 0 on success, negative error code on failure.
3391 */
3392static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
3393{
3394 struct dma_fence *fence = NULL, *next = NULL;
3395 struct amdgpu_bo *shadow;
3396 long r = 1, tmo;
3397
3398 if (amdgpu_sriov_runtime(adev))
3399 tmo = msecs_to_jiffies(8000);
3400 else
3401 tmo = msecs_to_jiffies(100);
3402
3403 DRM_INFO("recover vram bo from shadow start\n");
3404 mutex_lock(&adev->shadow_list_lock);
3405 list_for_each_entry(shadow, &adev->shadow_list, shadow_list) {
3406
3407 /* No need to recover an evicted BO */
3408 if (shadow->tbo.mem.mem_type != TTM_PL_TT ||
3409 shadow->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET ||
3410 shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM)
3411 continue;
3412
3413 r = amdgpu_bo_restore_shadow(shadow, &next);
3414 if (r)
3415 break;
3416
3417 if (fence) {
3418 tmo = dma_fence_wait_timeout(fence, false, tmo);
3419 dma_fence_put(fence);
3420 fence = next;
3421 if (tmo == 0) {
3422 r = -ETIMEDOUT;
3423 break;
3424 } else if (tmo < 0) {
3425 r = tmo;
3426 break;
3427 }
3428 } else {
3429 fence = next;
3430 }
3431 }
3432 mutex_unlock(&adev->shadow_list_lock);
3433
3434 if (fence)
3435 tmo = dma_fence_wait_timeout(fence, false, tmo);
3436 dma_fence_put(fence);
3437
3438 if (r < 0 || tmo <= 0) {
3439 DRM_ERROR("recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
3440 return -EIO;
3441 }
3442
3443 DRM_INFO("recover vram bo from shadow done\n");
3444 return 0;
3445}
3446
3447
3448/**
3449 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
3450 *
3451 * @adev: amdgpu device pointer
3452 * @from_hypervisor: request from hypervisor
3453 *
3454 * do VF FLR and reinitialize Asic
3455 * return 0 means succeeded otherwise failed
3456 */
3457static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
3458 bool from_hypervisor)
3459{
3460 int r;
3461
3462 if (from_hypervisor)
3463 r = amdgpu_virt_request_full_gpu(adev, true);
3464 else
3465 r = amdgpu_virt_reset_gpu(adev);
3466 if (r)
3467 return r;
3468
3469 amdgpu_amdkfd_pre_reset(adev);
3470
3471 /* Resume IP prior to SMC */
3472 r = amdgpu_device_ip_reinit_early_sriov(adev);
3473 if (r)
3474 goto error;
3475
3476 /* we need recover gart prior to run SMC/CP/SDMA resume */
3477 amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]);
3478
3479 r = amdgpu_device_fw_loading(adev);
3480 if (r)
3481 return r;
3482
3483 /* now we are okay to resume SMC/CP/SDMA */
3484 r = amdgpu_device_ip_reinit_late_sriov(adev);
3485 if (r)
3486 goto error;
3487
3488 amdgpu_irq_gpu_reset_resume_helper(adev);
3489 r = amdgpu_ib_ring_tests(adev);
3490 amdgpu_amdkfd_post_reset(adev);
3491
3492error:
3493 amdgpu_virt_init_data_exchange(adev);
3494 amdgpu_virt_release_full_gpu(adev, true);
3495 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
3496 amdgpu_inc_vram_lost(adev);
3497 r = amdgpu_device_recover_vram(adev);
3498 }
3499
3500 return r;
3501}
3502
3503/**
3504 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
3505 *
3506 * @adev: amdgpu device pointer
3507 *
3508 * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
3509 * a hung GPU.
3510 */
3511bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
3512{
3513 if (!amdgpu_device_ip_check_soft_reset(adev)) {
3514 DRM_INFO("Timeout, but no hardware hang detected.\n");
3515 return false;
3516 }
3517
3518 if (amdgpu_gpu_recovery == 0)
3519 goto disabled;
3520
3521 if (amdgpu_sriov_vf(adev))
3522 return true;
3523
3524 if (amdgpu_gpu_recovery == -1) {
3525 switch (adev->asic_type) {
3526 case CHIP_BONAIRE:
3527 case CHIP_HAWAII:
3528 case CHIP_TOPAZ:
3529 case CHIP_TONGA:
3530 case CHIP_FIJI:
3531 case CHIP_POLARIS10:
3532 case CHIP_POLARIS11:
3533 case CHIP_POLARIS12:
3534 case CHIP_VEGAM:
3535 case CHIP_VEGA20:
3536 case CHIP_VEGA10:
3537 case CHIP_VEGA12:
3538 case CHIP_RAVEN:
3539 break;
3540 default:
3541 goto disabled;
3542 }
3543 }
3544
3545 return true;
3546
3547disabled:
3548 DRM_INFO("GPU recovery disabled.\n");
3549 return false;
3550}
3551
3552
3553static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
3554 struct amdgpu_job *job,
3555 bool *need_full_reset_arg)
3556{
3557 int i, r = 0;
3558 bool need_full_reset = *need_full_reset_arg;
3559
3560 /* block all schedulers and reset given job's ring */
3561 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
3562 struct amdgpu_ring *ring = adev->rings[i];
3563
3564 if (!ring || !ring->sched.thread)
3565 continue;
3566
3567 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
3568 amdgpu_fence_driver_force_completion(ring);
3569 }
3570
3571 if(job)
3572 drm_sched_increase_karma(&job->base);
3573
3574 /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
3575 if (!amdgpu_sriov_vf(adev)) {
3576
3577 if (!need_full_reset)
3578 need_full_reset = amdgpu_device_ip_need_full_reset(adev);
3579
3580 if (!need_full_reset) {
3581 amdgpu_device_ip_pre_soft_reset(adev);
3582 r = amdgpu_device_ip_soft_reset(adev);
3583 amdgpu_device_ip_post_soft_reset(adev);
3584 if (r || amdgpu_device_ip_check_soft_reset(adev)) {
3585 DRM_INFO("soft reset failed, will fallback to full reset!\n");
3586 need_full_reset = true;
3587 }
3588 }
3589
3590 if (need_full_reset)
3591 r = amdgpu_device_ip_suspend(adev);
3592
3593 *need_full_reset_arg = need_full_reset;
3594 }
3595
3596 return r;
3597}
3598
3599static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
3600 struct list_head *device_list_handle,
3601 bool *need_full_reset_arg)
3602{
3603 struct amdgpu_device *tmp_adev = NULL;
3604 bool need_full_reset = *need_full_reset_arg, vram_lost = false;
3605 int r = 0;
3606
3607 /*
3608 * ASIC reset has to be done on all HGMI hive nodes ASAP
3609 * to allow proper links negotiation in FW (within 1 sec)
3610 */
3611 if (need_full_reset) {
3612 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
3613 /* For XGMI run all resets in parallel to speed up the process */
3614 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
3615 if (!queue_work(system_highpri_wq, &tmp_adev->xgmi_reset_work))
3616 r = -EALREADY;
3617 } else
3618 r = amdgpu_asic_reset(tmp_adev);
3619
3620 if (r) {
3621 DRM_ERROR("ASIC reset failed with error, %d for drm dev, %s",
3622 r, tmp_adev->ddev->unique);
3623 break;
3624 }
3625 }
3626
3627 /* For XGMI wait for all PSP resets to complete before proceed */
3628 if (!r) {
3629 list_for_each_entry(tmp_adev, device_list_handle,
3630 gmc.xgmi.head) {
3631 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
3632 flush_work(&tmp_adev->xgmi_reset_work);
3633 r = tmp_adev->asic_reset_res;
3634 if (r)
3635 break;
3636 }
3637 }
3638
3639 list_for_each_entry(tmp_adev, device_list_handle,
3640 gmc.xgmi.head) {
3641 amdgpu_ras_reserve_bad_pages(tmp_adev);
3642 }
3643 }
3644 }
3645
3646
3647 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
3648 if (need_full_reset) {
3649 /* post card */
3650 if (amdgpu_atom_asic_init(tmp_adev->mode_info.atom_context))
3651 DRM_WARN("asic atom init failed!");
3652
3653 if (!r) {
3654 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
3655 r = amdgpu_device_ip_resume_phase1(tmp_adev);
3656 if (r)
3657 goto out;
3658
3659 vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
3660 if (vram_lost) {
3661 DRM_INFO("VRAM is lost due to GPU reset!\n");
3662 amdgpu_inc_vram_lost(tmp_adev);
3663 }
3664
3665 r = amdgpu_gtt_mgr_recover(
3666 &tmp_adev->mman.bdev.man[TTM_PL_TT]);
3667 if (r)
3668 goto out;
3669
3670 r = amdgpu_device_fw_loading(tmp_adev);
3671 if (r)
3672 return r;
3673
3674 r = amdgpu_device_ip_resume_phase2(tmp_adev);
3675 if (r)
3676 goto out;
3677
3678 if (vram_lost)
3679 amdgpu_device_fill_reset_magic(tmp_adev);
3680
3681 /*
3682 * Add this ASIC as tracked as reset was already
3683 * complete successfully.
3684 */
3685 amdgpu_register_gpu_instance(tmp_adev);
3686
3687 r = amdgpu_device_ip_late_init(tmp_adev);
3688 if (r)
3689 goto out;
3690
3691 /* must succeed. */
3692 amdgpu_ras_resume(tmp_adev);
3693
3694 /* Update PSP FW topology after reset */
3695 if (hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1)
3696 r = amdgpu_xgmi_update_topology(hive, tmp_adev);
3697 }
3698 }
3699
3700
3701out:
3702 if (!r) {
3703 amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
3704 r = amdgpu_ib_ring_tests(tmp_adev);
3705 if (r) {
3706 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
3707 r = amdgpu_device_ip_suspend(tmp_adev);
3708 need_full_reset = true;
3709 r = -EAGAIN;
3710 goto end;
3711 }
3712 }
3713
3714 if (!r)
3715 r = amdgpu_device_recover_vram(tmp_adev);
3716 else
3717 tmp_adev->asic_reset_res = r;
3718 }
3719
3720end:
3721 *need_full_reset_arg = need_full_reset;
3722 return r;
3723}
3724
3725static bool amdgpu_device_lock_adev(struct amdgpu_device *adev, bool trylock)
3726{
3727 if (trylock) {
3728 if (!mutex_trylock(&adev->lock_reset))
3729 return false;
3730 } else
3731 mutex_lock(&adev->lock_reset);
3732
3733 atomic_inc(&adev->gpu_reset_counter);
3734 adev->in_gpu_reset = 1;
3735 switch (amdgpu_asic_reset_method(adev)) {
3736 case AMD_RESET_METHOD_MODE1:
3737 adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
3738 break;
3739 case AMD_RESET_METHOD_MODE2:
3740 adev->mp1_state = PP_MP1_STATE_RESET;
3741 break;
3742 default:
3743 adev->mp1_state = PP_MP1_STATE_NONE;
3744 break;
3745 }
3746 /* Block kfd: SRIOV would do it separately */
3747 if (!amdgpu_sriov_vf(adev))
3748 amdgpu_amdkfd_pre_reset(adev);
3749
3750 return true;
3751}
3752
3753static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
3754{
3755 /*unlock kfd: SRIOV would do it separately */
3756 if (!amdgpu_sriov_vf(adev))
3757 amdgpu_amdkfd_post_reset(adev);
3758 amdgpu_vf_error_trans_all(adev);
3759 adev->mp1_state = PP_MP1_STATE_NONE;
3760 adev->in_gpu_reset = 0;
3761 mutex_unlock(&adev->lock_reset);
3762}
3763
3764
3765/**
3766 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
3767 *
3768 * @adev: amdgpu device pointer
3769 * @job: which job trigger hang
3770 *
3771 * Attempt to reset the GPU if it has hung (all asics).
3772 * Attempt to do soft-reset or full-reset and reinitialize Asic
3773 * Returns 0 for success or an error on failure.
3774 */
3775
3776int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
3777 struct amdgpu_job *job)
3778{
3779 struct list_head device_list, *device_list_handle = NULL;
3780 bool need_full_reset, job_signaled;
3781 struct amdgpu_hive_info *hive = NULL;
3782 struct amdgpu_device *tmp_adev = NULL;
3783 int i, r = 0;
3784
3785 need_full_reset = job_signaled = false;
3786 INIT_LIST_HEAD(&device_list);
3787
3788 dev_info(adev->dev, "GPU reset begin!\n");
3789
3790 cancel_delayed_work_sync(&adev->delayed_init_work);
3791
3792 hive = amdgpu_get_xgmi_hive(adev, false);
3793
3794 /*
3795 * Here we trylock to avoid chain of resets executing from
3796 * either trigger by jobs on different adevs in XGMI hive or jobs on
3797 * different schedulers for same device while this TO handler is running.
3798 * We always reset all schedulers for device and all devices for XGMI
3799 * hive so that should take care of them too.
3800 */
3801
3802 if (hive && !mutex_trylock(&hive->reset_lock)) {
3803 DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
3804 job ? job->base.id : -1, hive->hive_id);
3805 return 0;
3806 }
3807
3808 /* Start with adev pre asic reset first for soft reset check.*/
3809 if (!amdgpu_device_lock_adev(adev, !hive)) {
3810 DRM_INFO("Bailing on TDR for s_job:%llx, as another already in progress",
3811 job ? job->base.id : -1);
3812 return 0;
3813 }
3814
3815 /* Build list of devices to reset */
3816 if (adev->gmc.xgmi.num_physical_nodes > 1) {
3817 if (!hive) {
3818 amdgpu_device_unlock_adev(adev);
3819 return -ENODEV;
3820 }
3821
3822 /*
3823 * In case we are in XGMI hive mode device reset is done for all the
3824 * nodes in the hive to retrain all XGMI links and hence the reset
3825 * sequence is executed in loop on all nodes.
3826 */
3827 device_list_handle = &hive->device_list;
3828 } else {
3829 list_add_tail(&adev->gmc.xgmi.head, &device_list);
3830 device_list_handle = &device_list;
3831 }
3832
3833 /*
3834 * Mark these ASICs to be reseted as untracked first
3835 * And add them back after reset completed
3836 */
3837 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head)
3838 amdgpu_unregister_gpu_instance(tmp_adev);
3839
3840 /* block all schedulers and reset given job's ring */
3841 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
3842 /* disable ras on ALL IPs */
3843 if (amdgpu_device_ip_need_full_reset(tmp_adev))
3844 amdgpu_ras_suspend(tmp_adev);
3845
3846 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
3847 struct amdgpu_ring *ring = tmp_adev->rings[i];
3848
3849 if (!ring || !ring->sched.thread)
3850 continue;
3851
3852 drm_sched_stop(&ring->sched, job ? &job->base : NULL);
3853 }
3854 }
3855
3856
3857 /*
3858 * Must check guilty signal here since after this point all old
3859 * HW fences are force signaled.
3860 *
3861 * job->base holds a reference to parent fence
3862 */
3863 if (job && job->base.s_fence->parent &&
3864 dma_fence_is_signaled(job->base.s_fence->parent))
3865 job_signaled = true;
3866
3867 if (!amdgpu_device_ip_need_full_reset(adev))
3868 device_list_handle = &device_list;
3869
3870 if (job_signaled) {
3871 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
3872 goto skip_hw_reset;
3873 }
3874
3875
3876 /* Guilty job will be freed after this*/
3877 r = amdgpu_device_pre_asic_reset(adev, job, &need_full_reset);
3878 if (r) {
3879 /*TODO Should we stop ?*/
3880 DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
3881 r, adev->ddev->unique);
3882 adev->asic_reset_res = r;
3883 }
3884
3885retry: /* Rest of adevs pre asic reset from XGMI hive. */
3886 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
3887
3888 if (tmp_adev == adev)
3889 continue;
3890
3891 amdgpu_device_lock_adev(tmp_adev, false);
3892 r = amdgpu_device_pre_asic_reset(tmp_adev,
3893 NULL,
3894 &need_full_reset);
3895 /*TODO Should we stop ?*/
3896 if (r) {
3897 DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
3898 r, tmp_adev->ddev->unique);
3899 tmp_adev->asic_reset_res = r;
3900 }
3901 }
3902
3903 /* Actual ASIC resets if needed.*/
3904 /* TODO Implement XGMI hive reset logic for SRIOV */
3905 if (amdgpu_sriov_vf(adev)) {
3906 r = amdgpu_device_reset_sriov(adev, job ? false : true);
3907 if (r)
3908 adev->asic_reset_res = r;
3909 } else {
3910 r = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset);
3911 if (r && r == -EAGAIN)
3912 goto retry;
3913 }
3914
3915skip_hw_reset:
3916
3917 /* Post ASIC reset for all devs .*/
3918 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
3919 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
3920 struct amdgpu_ring *ring = tmp_adev->rings[i];
3921
3922 if (!ring || !ring->sched.thread)
3923 continue;
3924
3925 /* No point to resubmit jobs if we didn't HW reset*/
3926 if (!tmp_adev->asic_reset_res && !job_signaled)
3927 drm_sched_resubmit_jobs(&ring->sched);
3928
3929 drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
3930 }
3931
3932 if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) {
3933 drm_helper_resume_force_mode(tmp_adev->ddev);
3934 }
3935
3936 tmp_adev->asic_reset_res = 0;
3937
3938 if (r) {
3939 /* bad news, how to tell it to userspace ? */
3940 dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
3941 amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
3942 } else {
3943 dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&adev->gpu_reset_counter));
3944 }
3945
3946 amdgpu_device_unlock_adev(tmp_adev);
3947 }
3948
3949 if (hive)
3950 mutex_unlock(&hive->reset_lock);
3951
3952 if (r)
3953 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
3954 return r;
3955}
3956
3957/**
3958 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
3959 *
3960 * @adev: amdgpu_device pointer
3961 *
3962 * Fetchs and stores in the driver the PCIE capabilities (gen speed
3963 * and lanes) of the slot the device is in. Handles APUs and
3964 * virtualized environments where PCIE config space may not be available.
3965 */
3966static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
3967{
3968 struct pci_dev *pdev;
3969 enum pci_bus_speed speed_cap, platform_speed_cap;
3970 enum pcie_link_width platform_link_width;
3971
3972 if (amdgpu_pcie_gen_cap)
3973 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
3974
3975 if (amdgpu_pcie_lane_cap)
3976 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
3977
3978 /* covers APUs as well */
3979 if (pci_is_root_bus(adev->pdev->bus)) {
3980 if (adev->pm.pcie_gen_mask == 0)
3981 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
3982 if (adev->pm.pcie_mlw_mask == 0)
3983 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
3984 return;
3985 }
3986
3987 if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
3988 return;
3989
3990 pcie_bandwidth_available(adev->pdev, NULL,
3991 &platform_speed_cap, &platform_link_width);
3992
3993 if (adev->pm.pcie_gen_mask == 0) {
3994 /* asic caps */
3995 pdev = adev->pdev;
3996 speed_cap = pcie_get_speed_cap(pdev);
3997 if (speed_cap == PCI_SPEED_UNKNOWN) {
3998 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3999 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4000 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
4001 } else {
4002 if (speed_cap == PCIE_SPEED_16_0GT)
4003 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4004 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4005 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
4006 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
4007 else if (speed_cap == PCIE_SPEED_8_0GT)
4008 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4009 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4010 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
4011 else if (speed_cap == PCIE_SPEED_5_0GT)
4012 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4013 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
4014 else
4015 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
4016 }
4017 /* platform caps */
4018 if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
4019 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4020 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
4021 } else {
4022 if (platform_speed_cap == PCIE_SPEED_16_0GT)
4023 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4024 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4025 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
4026 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
4027 else if (platform_speed_cap == PCIE_SPEED_8_0GT)
4028 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4029 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4030 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
4031 else if (platform_speed_cap == PCIE_SPEED_5_0GT)
4032 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4033 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
4034 else
4035 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
4036
4037 }
4038 }
4039 if (adev->pm.pcie_mlw_mask == 0) {
4040 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
4041 adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
4042 } else {
4043 switch (platform_link_width) {
4044 case PCIE_LNK_X32:
4045 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
4046 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
4047 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
4048 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4049 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4050 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4051 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4052 break;
4053 case PCIE_LNK_X16:
4054 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
4055 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
4056 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4057 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4058 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4059 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4060 break;
4061 case PCIE_LNK_X12:
4062 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
4063 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4064 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4065 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4066 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4067 break;
4068 case PCIE_LNK_X8:
4069 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4070 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4071 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4072 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4073 break;
4074 case PCIE_LNK_X4:
4075 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4076 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4077 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4078 break;
4079 case PCIE_LNK_X2:
4080 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4081 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4082 break;
4083 case PCIE_LNK_X1:
4084 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
4085 break;
4086 default:
4087 break;
4088 }
4089 }
4090 }
4091}
4092