Loading...
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/power_supply.h>
29#include <linux/kthread.h>
30#include <linux/module.h>
31#include <linux/console.h>
32#include <linux/slab.h>
33
34#include <drm/drm_atomic_helper.h>
35#include <drm/drm_probe_helper.h>
36#include <drm/amdgpu_drm.h>
37#include <linux/vgaarb.h>
38#include <linux/vga_switcheroo.h>
39#include <linux/efi.h>
40#include "amdgpu.h"
41#include "amdgpu_trace.h"
42#include "amdgpu_i2c.h"
43#include "atom.h"
44#include "amdgpu_atombios.h"
45#include "amdgpu_atomfirmware.h"
46#include "amd_pcie.h"
47#ifdef CONFIG_DRM_AMDGPU_SI
48#include "si.h"
49#endif
50#ifdef CONFIG_DRM_AMDGPU_CIK
51#include "cik.h"
52#endif
53#include "vi.h"
54#include "soc15.h"
55#include "nv.h"
56#include "bif/bif_4_1_d.h"
57#include <linux/pci.h>
58#include <linux/firmware.h>
59#include "amdgpu_vf_error.h"
60
61#include "amdgpu_amdkfd.h"
62#include "amdgpu_pm.h"
63
64#include "amdgpu_xgmi.h"
65#include "amdgpu_ras.h"
66#include "amdgpu_pmu.h"
67#include "amdgpu_fru_eeprom.h"
68
69#include <linux/suspend.h>
70#include <drm/task_barrier.h>
71#include <linux/pm_runtime.h>
72
73MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
74MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
75MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
76MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
77MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
78MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
79MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin");
80MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
81MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
82MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
83
84#define AMDGPU_RESUME_MS 2000
85
86const char *amdgpu_asic_name[] = {
87 "TAHITI",
88 "PITCAIRN",
89 "VERDE",
90 "OLAND",
91 "HAINAN",
92 "BONAIRE",
93 "KAVERI",
94 "KABINI",
95 "HAWAII",
96 "MULLINS",
97 "TOPAZ",
98 "TONGA",
99 "FIJI",
100 "CARRIZO",
101 "STONEY",
102 "POLARIS10",
103 "POLARIS11",
104 "POLARIS12",
105 "VEGAM",
106 "VEGA10",
107 "VEGA12",
108 "VEGA20",
109 "RAVEN",
110 "ARCTURUS",
111 "RENOIR",
112 "NAVI10",
113 "NAVI14",
114 "NAVI12",
115 "SIENNA_CICHLID",
116 "NAVY_FLOUNDER",
117 "LAST",
118};
119
120/**
121 * DOC: pcie_replay_count
122 *
123 * The amdgpu driver provides a sysfs API for reporting the total number
124 * of PCIe replays (NAKs)
125 * The file pcie_replay_count is used for this and returns the total
126 * number of replays as a sum of the NAKs generated and NAKs received
127 */
128
129static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
130 struct device_attribute *attr, char *buf)
131{
132 struct drm_device *ddev = dev_get_drvdata(dev);
133 struct amdgpu_device *adev = ddev->dev_private;
134 uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
135
136 return snprintf(buf, PAGE_SIZE, "%llu\n", cnt);
137}
138
139static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
140 amdgpu_device_get_pcie_replay_count, NULL);
141
142static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
143
144/**
145 * DOC: product_name
146 *
147 * The amdgpu driver provides a sysfs API for reporting the product name
148 * for the device
149 * The file serial_number is used for this and returns the product name
150 * as returned from the FRU.
151 * NOTE: This is only available for certain server cards
152 */
153
154static ssize_t amdgpu_device_get_product_name(struct device *dev,
155 struct device_attribute *attr, char *buf)
156{
157 struct drm_device *ddev = dev_get_drvdata(dev);
158 struct amdgpu_device *adev = ddev->dev_private;
159
160 return snprintf(buf, PAGE_SIZE, "%s\n", adev->product_name);
161}
162
163static DEVICE_ATTR(product_name, S_IRUGO,
164 amdgpu_device_get_product_name, NULL);
165
166/**
167 * DOC: product_number
168 *
169 * The amdgpu driver provides a sysfs API for reporting the part number
170 * for the device
171 * The file serial_number is used for this and returns the part number
172 * as returned from the FRU.
173 * NOTE: This is only available for certain server cards
174 */
175
176static ssize_t amdgpu_device_get_product_number(struct device *dev,
177 struct device_attribute *attr, char *buf)
178{
179 struct drm_device *ddev = dev_get_drvdata(dev);
180 struct amdgpu_device *adev = ddev->dev_private;
181
182 return snprintf(buf, PAGE_SIZE, "%s\n", adev->product_number);
183}
184
185static DEVICE_ATTR(product_number, S_IRUGO,
186 amdgpu_device_get_product_number, NULL);
187
188/**
189 * DOC: serial_number
190 *
191 * The amdgpu driver provides a sysfs API for reporting the serial number
192 * for the device
193 * The file serial_number is used for this and returns the serial number
194 * as returned from the FRU.
195 * NOTE: This is only available for certain server cards
196 */
197
198static ssize_t amdgpu_device_get_serial_number(struct device *dev,
199 struct device_attribute *attr, char *buf)
200{
201 struct drm_device *ddev = dev_get_drvdata(dev);
202 struct amdgpu_device *adev = ddev->dev_private;
203
204 return snprintf(buf, PAGE_SIZE, "%s\n", adev->serial);
205}
206
207static DEVICE_ATTR(serial_number, S_IRUGO,
208 amdgpu_device_get_serial_number, NULL);
209
210/**
211 * amdgpu_device_supports_boco - Is the device a dGPU with HG/PX power control
212 *
213 * @dev: drm_device pointer
214 *
215 * Returns true if the device is a dGPU with HG/PX power control,
216 * otherwise return false.
217 */
218bool amdgpu_device_supports_boco(struct drm_device *dev)
219{
220 struct amdgpu_device *adev = dev->dev_private;
221
222 if (adev->flags & AMD_IS_PX)
223 return true;
224 return false;
225}
226
227/**
228 * amdgpu_device_supports_baco - Does the device support BACO
229 *
230 * @dev: drm_device pointer
231 *
232 * Returns true if the device supporte BACO,
233 * otherwise return false.
234 */
235bool amdgpu_device_supports_baco(struct drm_device *dev)
236{
237 struct amdgpu_device *adev = dev->dev_private;
238
239 return amdgpu_asic_supports_baco(adev);
240}
241
242/**
243 * VRAM access helper functions.
244 *
245 * amdgpu_device_vram_access - read/write a buffer in vram
246 *
247 * @adev: amdgpu_device pointer
248 * @pos: offset of the buffer in vram
249 * @buf: virtual address of the buffer in system memory
250 * @size: read/write size, sizeof(@buf) must > @size
251 * @write: true - write to vram, otherwise - read from vram
252 */
253void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
254 uint32_t *buf, size_t size, bool write)
255{
256 unsigned long flags;
257 uint32_t hi = ~0;
258 uint64_t last;
259
260
261#ifdef CONFIG_64BIT
262 last = min(pos + size, adev->gmc.visible_vram_size);
263 if (last > pos) {
264 void __iomem *addr = adev->mman.aper_base_kaddr + pos;
265 size_t count = last - pos;
266
267 if (write) {
268 memcpy_toio(addr, buf, count);
269 mb();
270 amdgpu_asic_flush_hdp(adev, NULL);
271 } else {
272 amdgpu_asic_invalidate_hdp(adev, NULL);
273 mb();
274 memcpy_fromio(buf, addr, count);
275 }
276
277 if (count == size)
278 return;
279
280 pos += count;
281 buf += count / 4;
282 size -= count;
283 }
284#endif
285
286 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
287 for (last = pos + size; pos < last; pos += 4) {
288 uint32_t tmp = pos >> 31;
289
290 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
291 if (tmp != hi) {
292 WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
293 hi = tmp;
294 }
295 if (write)
296 WREG32_NO_KIQ(mmMM_DATA, *buf++);
297 else
298 *buf++ = RREG32_NO_KIQ(mmMM_DATA);
299 }
300 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
301}
302
303/*
304 * MMIO register access helper functions.
305 */
306/**
307 * amdgpu_mm_rreg - read a memory mapped IO register
308 *
309 * @adev: amdgpu_device pointer
310 * @reg: dword aligned register offset
311 * @acc_flags: access flags which require special behavior
312 *
313 * Returns the 32 bit value from the offset specified.
314 */
315uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
316 uint32_t acc_flags)
317{
318 uint32_t ret;
319
320 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
321 return amdgpu_kiq_rreg(adev, reg);
322
323 if ((reg * 4) < adev->rmmio_size)
324 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
325 else {
326 unsigned long flags;
327
328 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
329 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
330 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
331 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
332 }
333 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
334 return ret;
335}
336
337/*
338 * MMIO register read with bytes helper functions
339 * @offset:bytes offset from MMIO start
340 *
341*/
342
343/**
344 * amdgpu_mm_rreg8 - read a memory mapped IO register
345 *
346 * @adev: amdgpu_device pointer
347 * @offset: byte aligned register offset
348 *
349 * Returns the 8 bit value from the offset specified.
350 */
351uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) {
352 if (offset < adev->rmmio_size)
353 return (readb(adev->rmmio + offset));
354 BUG();
355}
356
357/*
358 * MMIO register write with bytes helper functions
359 * @offset:bytes offset from MMIO start
360 * @value: the value want to be written to the register
361 *
362*/
363/**
364 * amdgpu_mm_wreg8 - read a memory mapped IO register
365 *
366 * @adev: amdgpu_device pointer
367 * @offset: byte aligned register offset
368 * @value: 8 bit value to write
369 *
370 * Writes the value specified to the offset specified.
371 */
372void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) {
373 if (offset < adev->rmmio_size)
374 writeb(value, adev->rmmio + offset);
375 else
376 BUG();
377}
378
379void static inline amdgpu_mm_wreg_mmio(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t acc_flags)
380{
381 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
382
383 if ((reg * 4) < adev->rmmio_size)
384 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
385 else {
386 unsigned long flags;
387
388 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
389 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
390 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
391 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
392 }
393}
394
395/**
396 * amdgpu_mm_wreg - write to a memory mapped IO register
397 *
398 * @adev: amdgpu_device pointer
399 * @reg: dword aligned register offset
400 * @v: 32 bit value to write to the register
401 * @acc_flags: access flags which require special behavior
402 *
403 * Writes the value specified to the offset specified.
404 */
405void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
406 uint32_t acc_flags)
407{
408 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
409 return amdgpu_kiq_wreg(adev, reg, v);
410
411 amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags);
412}
413
414/*
415 * amdgpu_mm_wreg_mmio_rlc - write register either with mmio or with RLC path if in range
416 *
417 * this function is invoked only the debugfs register access
418 * */
419void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
420 uint32_t acc_flags)
421{
422 if (amdgpu_sriov_fullaccess(adev) &&
423 adev->gfx.rlc.funcs &&
424 adev->gfx.rlc.funcs->is_rlcg_access_range) {
425
426 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
427 return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v);
428 }
429
430 amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags);
431}
432
433/**
434 * amdgpu_io_rreg - read an IO register
435 *
436 * @adev: amdgpu_device pointer
437 * @reg: dword aligned register offset
438 *
439 * Returns the 32 bit value from the offset specified.
440 */
441u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
442{
443 if ((reg * 4) < adev->rio_mem_size)
444 return ioread32(adev->rio_mem + (reg * 4));
445 else {
446 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
447 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
448 }
449}
450
451/**
452 * amdgpu_io_wreg - write to an IO register
453 *
454 * @adev: amdgpu_device pointer
455 * @reg: dword aligned register offset
456 * @v: 32 bit value to write to the register
457 *
458 * Writes the value specified to the offset specified.
459 */
460void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
461{
462 if ((reg * 4) < adev->rio_mem_size)
463 iowrite32(v, adev->rio_mem + (reg * 4));
464 else {
465 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
466 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
467 }
468}
469
470/**
471 * amdgpu_mm_rdoorbell - read a doorbell dword
472 *
473 * @adev: amdgpu_device pointer
474 * @index: doorbell index
475 *
476 * Returns the value in the doorbell aperture at the
477 * requested doorbell index (CIK).
478 */
479u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
480{
481 if (index < adev->doorbell.num_doorbells) {
482 return readl(adev->doorbell.ptr + index);
483 } else {
484 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
485 return 0;
486 }
487}
488
489/**
490 * amdgpu_mm_wdoorbell - write a doorbell dword
491 *
492 * @adev: amdgpu_device pointer
493 * @index: doorbell index
494 * @v: value to write
495 *
496 * Writes @v to the doorbell aperture at the
497 * requested doorbell index (CIK).
498 */
499void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
500{
501 if (index < adev->doorbell.num_doorbells) {
502 writel(v, adev->doorbell.ptr + index);
503 } else {
504 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
505 }
506}
507
508/**
509 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
510 *
511 * @adev: amdgpu_device pointer
512 * @index: doorbell index
513 *
514 * Returns the value in the doorbell aperture at the
515 * requested doorbell index (VEGA10+).
516 */
517u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
518{
519 if (index < adev->doorbell.num_doorbells) {
520 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
521 } else {
522 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
523 return 0;
524 }
525}
526
527/**
528 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
529 *
530 * @adev: amdgpu_device pointer
531 * @index: doorbell index
532 * @v: value to write
533 *
534 * Writes @v to the doorbell aperture at the
535 * requested doorbell index (VEGA10+).
536 */
537void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
538{
539 if (index < adev->doorbell.num_doorbells) {
540 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
541 } else {
542 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
543 }
544}
545
546/**
547 * amdgpu_invalid_rreg - dummy reg read function
548 *
549 * @adev: amdgpu device pointer
550 * @reg: offset of register
551 *
552 * Dummy register read function. Used for register blocks
553 * that certain asics don't have (all asics).
554 * Returns the value in the register.
555 */
556static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
557{
558 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
559 BUG();
560 return 0;
561}
562
563/**
564 * amdgpu_invalid_wreg - dummy reg write function
565 *
566 * @adev: amdgpu device pointer
567 * @reg: offset of register
568 * @v: value to write to the register
569 *
570 * Dummy register read function. Used for register blocks
571 * that certain asics don't have (all asics).
572 */
573static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
574{
575 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
576 reg, v);
577 BUG();
578}
579
580/**
581 * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
582 *
583 * @adev: amdgpu device pointer
584 * @reg: offset of register
585 *
586 * Dummy register read function. Used for register blocks
587 * that certain asics don't have (all asics).
588 * Returns the value in the register.
589 */
590static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
591{
592 DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
593 BUG();
594 return 0;
595}
596
597/**
598 * amdgpu_invalid_wreg64 - dummy reg write function
599 *
600 * @adev: amdgpu device pointer
601 * @reg: offset of register
602 * @v: value to write to the register
603 *
604 * Dummy register read function. Used for register blocks
605 * that certain asics don't have (all asics).
606 */
607static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
608{
609 DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
610 reg, v);
611 BUG();
612}
613
614/**
615 * amdgpu_block_invalid_rreg - dummy reg read function
616 *
617 * @adev: amdgpu device pointer
618 * @block: offset of instance
619 * @reg: offset of register
620 *
621 * Dummy register read function. Used for register blocks
622 * that certain asics don't have (all asics).
623 * Returns the value in the register.
624 */
625static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
626 uint32_t block, uint32_t reg)
627{
628 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
629 reg, block);
630 BUG();
631 return 0;
632}
633
634/**
635 * amdgpu_block_invalid_wreg - dummy reg write function
636 *
637 * @adev: amdgpu device pointer
638 * @block: offset of instance
639 * @reg: offset of register
640 * @v: value to write to the register
641 *
642 * Dummy register read function. Used for register blocks
643 * that certain asics don't have (all asics).
644 */
645static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
646 uint32_t block,
647 uint32_t reg, uint32_t v)
648{
649 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
650 reg, block, v);
651 BUG();
652}
653
654/**
655 * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
656 *
657 * @adev: amdgpu device pointer
658 *
659 * Allocates a scratch page of VRAM for use by various things in the
660 * driver.
661 */
662static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
663{
664 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
665 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
666 &adev->vram_scratch.robj,
667 &adev->vram_scratch.gpu_addr,
668 (void **)&adev->vram_scratch.ptr);
669}
670
671/**
672 * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
673 *
674 * @adev: amdgpu device pointer
675 *
676 * Frees the VRAM scratch page.
677 */
678static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
679{
680 amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
681}
682
683/**
684 * amdgpu_device_program_register_sequence - program an array of registers.
685 *
686 * @adev: amdgpu_device pointer
687 * @registers: pointer to the register array
688 * @array_size: size of the register array
689 *
690 * Programs an array or registers with and and or masks.
691 * This is a helper for setting golden registers.
692 */
693void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
694 const u32 *registers,
695 const u32 array_size)
696{
697 u32 tmp, reg, and_mask, or_mask;
698 int i;
699
700 if (array_size % 3)
701 return;
702
703 for (i = 0; i < array_size; i +=3) {
704 reg = registers[i + 0];
705 and_mask = registers[i + 1];
706 or_mask = registers[i + 2];
707
708 if (and_mask == 0xffffffff) {
709 tmp = or_mask;
710 } else {
711 tmp = RREG32(reg);
712 tmp &= ~and_mask;
713 if (adev->family >= AMDGPU_FAMILY_AI)
714 tmp |= (or_mask & and_mask);
715 else
716 tmp |= or_mask;
717 }
718 WREG32(reg, tmp);
719 }
720}
721
722/**
723 * amdgpu_device_pci_config_reset - reset the GPU
724 *
725 * @adev: amdgpu_device pointer
726 *
727 * Resets the GPU using the pci config reset sequence.
728 * Only applicable to asics prior to vega10.
729 */
730void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
731{
732 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
733}
734
735/*
736 * GPU doorbell aperture helpers function.
737 */
738/**
739 * amdgpu_device_doorbell_init - Init doorbell driver information.
740 *
741 * @adev: amdgpu_device pointer
742 *
743 * Init doorbell driver information (CIK)
744 * Returns 0 on success, error on failure.
745 */
746static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
747{
748
749 /* No doorbell on SI hardware generation */
750 if (adev->asic_type < CHIP_BONAIRE) {
751 adev->doorbell.base = 0;
752 adev->doorbell.size = 0;
753 adev->doorbell.num_doorbells = 0;
754 adev->doorbell.ptr = NULL;
755 return 0;
756 }
757
758 if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
759 return -EINVAL;
760
761 amdgpu_asic_init_doorbell_index(adev);
762
763 /* doorbell bar mapping */
764 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
765 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
766
767 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
768 adev->doorbell_index.max_assignment+1);
769 if (adev->doorbell.num_doorbells == 0)
770 return -EINVAL;
771
772 /* For Vega, reserve and map two pages on doorbell BAR since SDMA
773 * paging queue doorbell use the second page. The
774 * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
775 * doorbells are in the first page. So with paging queue enabled,
776 * the max num_doorbells should + 1 page (0x400 in dword)
777 */
778 if (adev->asic_type >= CHIP_VEGA10)
779 adev->doorbell.num_doorbells += 0x400;
780
781 adev->doorbell.ptr = ioremap(adev->doorbell.base,
782 adev->doorbell.num_doorbells *
783 sizeof(u32));
784 if (adev->doorbell.ptr == NULL)
785 return -ENOMEM;
786
787 return 0;
788}
789
790/**
791 * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
792 *
793 * @adev: amdgpu_device pointer
794 *
795 * Tear down doorbell driver information (CIK)
796 */
797static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
798{
799 iounmap(adev->doorbell.ptr);
800 adev->doorbell.ptr = NULL;
801}
802
803
804
805/*
806 * amdgpu_device_wb_*()
807 * Writeback is the method by which the GPU updates special pages in memory
808 * with the status of certain GPU events (fences, ring pointers,etc.).
809 */
810
811/**
812 * amdgpu_device_wb_fini - Disable Writeback and free memory
813 *
814 * @adev: amdgpu_device pointer
815 *
816 * Disables Writeback and frees the Writeback memory (all asics).
817 * Used at driver shutdown.
818 */
819static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
820{
821 if (adev->wb.wb_obj) {
822 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
823 &adev->wb.gpu_addr,
824 (void **)&adev->wb.wb);
825 adev->wb.wb_obj = NULL;
826 }
827}
828
829/**
830 * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
831 *
832 * @adev: amdgpu_device pointer
833 *
834 * Initializes writeback and allocates writeback memory (all asics).
835 * Used at driver startup.
836 * Returns 0 on success or an -error on failure.
837 */
838static int amdgpu_device_wb_init(struct amdgpu_device *adev)
839{
840 int r;
841
842 if (adev->wb.wb_obj == NULL) {
843 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
844 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
845 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
846 &adev->wb.wb_obj, &adev->wb.gpu_addr,
847 (void **)&adev->wb.wb);
848 if (r) {
849 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
850 return r;
851 }
852
853 adev->wb.num_wb = AMDGPU_MAX_WB;
854 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
855
856 /* clear wb memory */
857 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
858 }
859
860 return 0;
861}
862
863/**
864 * amdgpu_device_wb_get - Allocate a wb entry
865 *
866 * @adev: amdgpu_device pointer
867 * @wb: wb index
868 *
869 * Allocate a wb slot for use by the driver (all asics).
870 * Returns 0 on success or -EINVAL on failure.
871 */
872int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
873{
874 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
875
876 if (offset < adev->wb.num_wb) {
877 __set_bit(offset, adev->wb.used);
878 *wb = offset << 3; /* convert to dw offset */
879 return 0;
880 } else {
881 return -EINVAL;
882 }
883}
884
885/**
886 * amdgpu_device_wb_free - Free a wb entry
887 *
888 * @adev: amdgpu_device pointer
889 * @wb: wb index
890 *
891 * Free a wb slot allocated for use by the driver (all asics)
892 */
893void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
894{
895 wb >>= 3;
896 if (wb < adev->wb.num_wb)
897 __clear_bit(wb, adev->wb.used);
898}
899
900/**
901 * amdgpu_device_resize_fb_bar - try to resize FB BAR
902 *
903 * @adev: amdgpu_device pointer
904 *
905 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
906 * to fail, but if any of the BARs is not accessible after the size we abort
907 * driver loading by returning -ENODEV.
908 */
909int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
910{
911 u64 space_needed = roundup_pow_of_two(adev->gmc.real_vram_size);
912 u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1;
913 struct pci_bus *root;
914 struct resource *res;
915 unsigned i;
916 u16 cmd;
917 int r;
918
919 /* Bypass for VF */
920 if (amdgpu_sriov_vf(adev))
921 return 0;
922
923 /* skip if the bios has already enabled large BAR */
924 if (adev->gmc.real_vram_size &&
925 (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
926 return 0;
927
928 /* Check if the root BUS has 64bit memory resources */
929 root = adev->pdev->bus;
930 while (root->parent)
931 root = root->parent;
932
933 pci_bus_for_each_resource(root, res, i) {
934 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
935 res->start > 0x100000000ull)
936 break;
937 }
938
939 /* Trying to resize is pointless without a root hub window above 4GB */
940 if (!res)
941 return 0;
942
943 /* Disable memory decoding while we change the BAR addresses and size */
944 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
945 pci_write_config_word(adev->pdev, PCI_COMMAND,
946 cmd & ~PCI_COMMAND_MEMORY);
947
948 /* Free the VRAM and doorbell BAR, we most likely need to move both. */
949 amdgpu_device_doorbell_fini(adev);
950 if (adev->asic_type >= CHIP_BONAIRE)
951 pci_release_resource(adev->pdev, 2);
952
953 pci_release_resource(adev->pdev, 0);
954
955 r = pci_resize_resource(adev->pdev, 0, rbar_size);
956 if (r == -ENOSPC)
957 DRM_INFO("Not enough PCI address space for a large BAR.");
958 else if (r && r != -ENOTSUPP)
959 DRM_ERROR("Problem resizing BAR0 (%d).", r);
960
961 pci_assign_unassigned_bus_resources(adev->pdev->bus);
962
963 /* When the doorbell or fb BAR isn't available we have no chance of
964 * using the device.
965 */
966 r = amdgpu_device_doorbell_init(adev);
967 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
968 return -ENODEV;
969
970 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
971
972 return 0;
973}
974
975/*
976 * GPU helpers function.
977 */
978/**
979 * amdgpu_device_need_post - check if the hw need post or not
980 *
981 * @adev: amdgpu_device pointer
982 *
983 * Check if the asic has been initialized (all asics) at driver startup
984 * or post is needed if hw reset is performed.
985 * Returns true if need or false if not.
986 */
987bool amdgpu_device_need_post(struct amdgpu_device *adev)
988{
989 uint32_t reg;
990
991 if (amdgpu_sriov_vf(adev))
992 return false;
993
994 if (amdgpu_passthrough(adev)) {
995 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
996 * some old smc fw still need driver do vPost otherwise gpu hang, while
997 * those smc fw version above 22.15 doesn't have this flaw, so we force
998 * vpost executed for smc version below 22.15
999 */
1000 if (adev->asic_type == CHIP_FIJI) {
1001 int err;
1002 uint32_t fw_ver;
1003 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1004 /* force vPost if error occured */
1005 if (err)
1006 return true;
1007
1008 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1009 if (fw_ver < 0x00160e00)
1010 return true;
1011 }
1012 }
1013
1014 if (adev->has_hw_reset) {
1015 adev->has_hw_reset = false;
1016 return true;
1017 }
1018
1019 /* bios scratch used on CIK+ */
1020 if (adev->asic_type >= CHIP_BONAIRE)
1021 return amdgpu_atombios_scratch_need_asic_init(adev);
1022
1023 /* check MEM_SIZE for older asics */
1024 reg = amdgpu_asic_get_config_memsize(adev);
1025
1026 if ((reg != 0) && (reg != 0xffffffff))
1027 return false;
1028
1029 return true;
1030}
1031
1032/* if we get transitioned to only one device, take VGA back */
1033/**
1034 * amdgpu_device_vga_set_decode - enable/disable vga decode
1035 *
1036 * @cookie: amdgpu_device pointer
1037 * @state: enable/disable vga decode
1038 *
1039 * Enable/disable vga decode (all asics).
1040 * Returns VGA resource flags.
1041 */
1042static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state)
1043{
1044 struct amdgpu_device *adev = cookie;
1045 amdgpu_asic_set_vga_state(adev, state);
1046 if (state)
1047 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1048 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1049 else
1050 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1051}
1052
1053/**
1054 * amdgpu_device_check_block_size - validate the vm block size
1055 *
1056 * @adev: amdgpu_device pointer
1057 *
1058 * Validates the vm block size specified via module parameter.
1059 * The vm block size defines number of bits in page table versus page directory,
1060 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1061 * page table and the remaining bits are in the page directory.
1062 */
1063static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1064{
1065 /* defines number of bits in page table versus page directory,
1066 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1067 * page table and the remaining bits are in the page directory */
1068 if (amdgpu_vm_block_size == -1)
1069 return;
1070
1071 if (amdgpu_vm_block_size < 9) {
1072 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1073 amdgpu_vm_block_size);
1074 amdgpu_vm_block_size = -1;
1075 }
1076}
1077
1078/**
1079 * amdgpu_device_check_vm_size - validate the vm size
1080 *
1081 * @adev: amdgpu_device pointer
1082 *
1083 * Validates the vm size in GB specified via module parameter.
1084 * The VM size is the size of the GPU virtual memory space in GB.
1085 */
1086static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1087{
1088 /* no need to check the default value */
1089 if (amdgpu_vm_size == -1)
1090 return;
1091
1092 if (amdgpu_vm_size < 1) {
1093 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1094 amdgpu_vm_size);
1095 amdgpu_vm_size = -1;
1096 }
1097}
1098
1099static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1100{
1101 struct sysinfo si;
1102 bool is_os_64 = (sizeof(void *) == 8);
1103 uint64_t total_memory;
1104 uint64_t dram_size_seven_GB = 0x1B8000000;
1105 uint64_t dram_size_three_GB = 0xB8000000;
1106
1107 if (amdgpu_smu_memory_pool_size == 0)
1108 return;
1109
1110 if (!is_os_64) {
1111 DRM_WARN("Not 64-bit OS, feature not supported\n");
1112 goto def_value;
1113 }
1114 si_meminfo(&si);
1115 total_memory = (uint64_t)si.totalram * si.mem_unit;
1116
1117 if ((amdgpu_smu_memory_pool_size == 1) ||
1118 (amdgpu_smu_memory_pool_size == 2)) {
1119 if (total_memory < dram_size_three_GB)
1120 goto def_value1;
1121 } else if ((amdgpu_smu_memory_pool_size == 4) ||
1122 (amdgpu_smu_memory_pool_size == 8)) {
1123 if (total_memory < dram_size_seven_GB)
1124 goto def_value1;
1125 } else {
1126 DRM_WARN("Smu memory pool size not supported\n");
1127 goto def_value;
1128 }
1129 adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1130
1131 return;
1132
1133def_value1:
1134 DRM_WARN("No enough system memory\n");
1135def_value:
1136 adev->pm.smu_prv_buffer_size = 0;
1137}
1138
1139/**
1140 * amdgpu_device_check_arguments - validate module params
1141 *
1142 * @adev: amdgpu_device pointer
1143 *
1144 * Validates certain module parameters and updates
1145 * the associated values used by the driver (all asics).
1146 */
1147static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1148{
1149 if (amdgpu_sched_jobs < 4) {
1150 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1151 amdgpu_sched_jobs);
1152 amdgpu_sched_jobs = 4;
1153 } else if (!is_power_of_2(amdgpu_sched_jobs)){
1154 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1155 amdgpu_sched_jobs);
1156 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1157 }
1158
1159 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1160 /* gart size must be greater or equal to 32M */
1161 dev_warn(adev->dev, "gart size (%d) too small\n",
1162 amdgpu_gart_size);
1163 amdgpu_gart_size = -1;
1164 }
1165
1166 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1167 /* gtt size must be greater or equal to 32M */
1168 dev_warn(adev->dev, "gtt size (%d) too small\n",
1169 amdgpu_gtt_size);
1170 amdgpu_gtt_size = -1;
1171 }
1172
1173 /* valid range is between 4 and 9 inclusive */
1174 if (amdgpu_vm_fragment_size != -1 &&
1175 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1176 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1177 amdgpu_vm_fragment_size = -1;
1178 }
1179
1180 if (amdgpu_sched_hw_submission < 2) {
1181 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1182 amdgpu_sched_hw_submission);
1183 amdgpu_sched_hw_submission = 2;
1184 } else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1185 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1186 amdgpu_sched_hw_submission);
1187 amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1188 }
1189
1190 amdgpu_device_check_smu_prv_buffer_size(adev);
1191
1192 amdgpu_device_check_vm_size(adev);
1193
1194 amdgpu_device_check_block_size(adev);
1195
1196 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1197
1198 amdgpu_gmc_tmz_set(adev);
1199
1200 return 0;
1201}
1202
1203/**
1204 * amdgpu_switcheroo_set_state - set switcheroo state
1205 *
1206 * @pdev: pci dev pointer
1207 * @state: vga_switcheroo state
1208 *
1209 * Callback for the switcheroo driver. Suspends or resumes the
1210 * the asics before or after it is powered up using ACPI methods.
1211 */
1212static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1213{
1214 struct drm_device *dev = pci_get_drvdata(pdev);
1215 int r;
1216
1217 if (amdgpu_device_supports_boco(dev) && state == VGA_SWITCHEROO_OFF)
1218 return;
1219
1220 if (state == VGA_SWITCHEROO_ON) {
1221 pr_info("switched on\n");
1222 /* don't suspend or resume card normally */
1223 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1224
1225 pci_set_power_state(dev->pdev, PCI_D0);
1226 pci_restore_state(dev->pdev);
1227 r = pci_enable_device(dev->pdev);
1228 if (r)
1229 DRM_WARN("pci_enable_device failed (%d)\n", r);
1230 amdgpu_device_resume(dev, true);
1231
1232 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1233 drm_kms_helper_poll_enable(dev);
1234 } else {
1235 pr_info("switched off\n");
1236 drm_kms_helper_poll_disable(dev);
1237 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1238 amdgpu_device_suspend(dev, true);
1239 pci_save_state(dev->pdev);
1240 /* Shut down the device */
1241 pci_disable_device(dev->pdev);
1242 pci_set_power_state(dev->pdev, PCI_D3cold);
1243 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1244 }
1245}
1246
1247/**
1248 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1249 *
1250 * @pdev: pci dev pointer
1251 *
1252 * Callback for the switcheroo driver. Check of the switcheroo
1253 * state can be changed.
1254 * Returns true if the state can be changed, false if not.
1255 */
1256static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1257{
1258 struct drm_device *dev = pci_get_drvdata(pdev);
1259
1260 /*
1261 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1262 * locking inversion with the driver load path. And the access here is
1263 * completely racy anyway. So don't bother with locking for now.
1264 */
1265 return atomic_read(&dev->open_count) == 0;
1266}
1267
1268static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1269 .set_gpu_state = amdgpu_switcheroo_set_state,
1270 .reprobe = NULL,
1271 .can_switch = amdgpu_switcheroo_can_switch,
1272};
1273
1274/**
1275 * amdgpu_device_ip_set_clockgating_state - set the CG state
1276 *
1277 * @dev: amdgpu_device pointer
1278 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1279 * @state: clockgating state (gate or ungate)
1280 *
1281 * Sets the requested clockgating state for all instances of
1282 * the hardware IP specified.
1283 * Returns the error code from the last instance.
1284 */
1285int amdgpu_device_ip_set_clockgating_state(void *dev,
1286 enum amd_ip_block_type block_type,
1287 enum amd_clockgating_state state)
1288{
1289 struct amdgpu_device *adev = dev;
1290 int i, r = 0;
1291
1292 for (i = 0; i < adev->num_ip_blocks; i++) {
1293 if (!adev->ip_blocks[i].status.valid)
1294 continue;
1295 if (adev->ip_blocks[i].version->type != block_type)
1296 continue;
1297 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1298 continue;
1299 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1300 (void *)adev, state);
1301 if (r)
1302 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1303 adev->ip_blocks[i].version->funcs->name, r);
1304 }
1305 return r;
1306}
1307
1308/**
1309 * amdgpu_device_ip_set_powergating_state - set the PG state
1310 *
1311 * @dev: amdgpu_device pointer
1312 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1313 * @state: powergating state (gate or ungate)
1314 *
1315 * Sets the requested powergating state for all instances of
1316 * the hardware IP specified.
1317 * Returns the error code from the last instance.
1318 */
1319int amdgpu_device_ip_set_powergating_state(void *dev,
1320 enum amd_ip_block_type block_type,
1321 enum amd_powergating_state state)
1322{
1323 struct amdgpu_device *adev = dev;
1324 int i, r = 0;
1325
1326 for (i = 0; i < adev->num_ip_blocks; i++) {
1327 if (!adev->ip_blocks[i].status.valid)
1328 continue;
1329 if (adev->ip_blocks[i].version->type != block_type)
1330 continue;
1331 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1332 continue;
1333 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1334 (void *)adev, state);
1335 if (r)
1336 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1337 adev->ip_blocks[i].version->funcs->name, r);
1338 }
1339 return r;
1340}
1341
1342/**
1343 * amdgpu_device_ip_get_clockgating_state - get the CG state
1344 *
1345 * @adev: amdgpu_device pointer
1346 * @flags: clockgating feature flags
1347 *
1348 * Walks the list of IPs on the device and updates the clockgating
1349 * flags for each IP.
1350 * Updates @flags with the feature flags for each hardware IP where
1351 * clockgating is enabled.
1352 */
1353void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1354 u32 *flags)
1355{
1356 int i;
1357
1358 for (i = 0; i < adev->num_ip_blocks; i++) {
1359 if (!adev->ip_blocks[i].status.valid)
1360 continue;
1361 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1362 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1363 }
1364}
1365
1366/**
1367 * amdgpu_device_ip_wait_for_idle - wait for idle
1368 *
1369 * @adev: amdgpu_device pointer
1370 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1371 *
1372 * Waits for the request hardware IP to be idle.
1373 * Returns 0 for success or a negative error code on failure.
1374 */
1375int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1376 enum amd_ip_block_type block_type)
1377{
1378 int i, r;
1379
1380 for (i = 0; i < adev->num_ip_blocks; i++) {
1381 if (!adev->ip_blocks[i].status.valid)
1382 continue;
1383 if (adev->ip_blocks[i].version->type == block_type) {
1384 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1385 if (r)
1386 return r;
1387 break;
1388 }
1389 }
1390 return 0;
1391
1392}
1393
1394/**
1395 * amdgpu_device_ip_is_idle - is the hardware IP idle
1396 *
1397 * @adev: amdgpu_device pointer
1398 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1399 *
1400 * Check if the hardware IP is idle or not.
1401 * Returns true if it the IP is idle, false if not.
1402 */
1403bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1404 enum amd_ip_block_type block_type)
1405{
1406 int i;
1407
1408 for (i = 0; i < adev->num_ip_blocks; i++) {
1409 if (!adev->ip_blocks[i].status.valid)
1410 continue;
1411 if (adev->ip_blocks[i].version->type == block_type)
1412 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1413 }
1414 return true;
1415
1416}
1417
1418/**
1419 * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1420 *
1421 * @adev: amdgpu_device pointer
1422 * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1423 *
1424 * Returns a pointer to the hardware IP block structure
1425 * if it exists for the asic, otherwise NULL.
1426 */
1427struct amdgpu_ip_block *
1428amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1429 enum amd_ip_block_type type)
1430{
1431 int i;
1432
1433 for (i = 0; i < adev->num_ip_blocks; i++)
1434 if (adev->ip_blocks[i].version->type == type)
1435 return &adev->ip_blocks[i];
1436
1437 return NULL;
1438}
1439
1440/**
1441 * amdgpu_device_ip_block_version_cmp
1442 *
1443 * @adev: amdgpu_device pointer
1444 * @type: enum amd_ip_block_type
1445 * @major: major version
1446 * @minor: minor version
1447 *
1448 * return 0 if equal or greater
1449 * return 1 if smaller or the ip_block doesn't exist
1450 */
1451int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1452 enum amd_ip_block_type type,
1453 u32 major, u32 minor)
1454{
1455 struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1456
1457 if (ip_block && ((ip_block->version->major > major) ||
1458 ((ip_block->version->major == major) &&
1459 (ip_block->version->minor >= minor))))
1460 return 0;
1461
1462 return 1;
1463}
1464
1465/**
1466 * amdgpu_device_ip_block_add
1467 *
1468 * @adev: amdgpu_device pointer
1469 * @ip_block_version: pointer to the IP to add
1470 *
1471 * Adds the IP block driver information to the collection of IPs
1472 * on the asic.
1473 */
1474int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1475 const struct amdgpu_ip_block_version *ip_block_version)
1476{
1477 if (!ip_block_version)
1478 return -EINVAL;
1479
1480 DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1481 ip_block_version->funcs->name);
1482
1483 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1484
1485 return 0;
1486}
1487
1488/**
1489 * amdgpu_device_enable_virtual_display - enable virtual display feature
1490 *
1491 * @adev: amdgpu_device pointer
1492 *
1493 * Enabled the virtual display feature if the user has enabled it via
1494 * the module parameter virtual_display. This feature provides a virtual
1495 * display hardware on headless boards or in virtualized environments.
1496 * This function parses and validates the configuration string specified by
1497 * the user and configues the virtual display configuration (number of
1498 * virtual connectors, crtcs, etc.) specified.
1499 */
1500static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1501{
1502 adev->enable_virtual_display = false;
1503
1504 if (amdgpu_virtual_display) {
1505 struct drm_device *ddev = adev->ddev;
1506 const char *pci_address_name = pci_name(ddev->pdev);
1507 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1508
1509 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1510 pciaddstr_tmp = pciaddstr;
1511 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1512 pciaddname = strsep(&pciaddname_tmp, ",");
1513 if (!strcmp("all", pciaddname)
1514 || !strcmp(pci_address_name, pciaddname)) {
1515 long num_crtc;
1516 int res = -1;
1517
1518 adev->enable_virtual_display = true;
1519
1520 if (pciaddname_tmp)
1521 res = kstrtol(pciaddname_tmp, 10,
1522 &num_crtc);
1523
1524 if (!res) {
1525 if (num_crtc < 1)
1526 num_crtc = 1;
1527 if (num_crtc > 6)
1528 num_crtc = 6;
1529 adev->mode_info.num_crtc = num_crtc;
1530 } else {
1531 adev->mode_info.num_crtc = 1;
1532 }
1533 break;
1534 }
1535 }
1536
1537 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1538 amdgpu_virtual_display, pci_address_name,
1539 adev->enable_virtual_display, adev->mode_info.num_crtc);
1540
1541 kfree(pciaddstr);
1542 }
1543}
1544
1545/**
1546 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1547 *
1548 * @adev: amdgpu_device pointer
1549 *
1550 * Parses the asic configuration parameters specified in the gpu info
1551 * firmware and makes them availale to the driver for use in configuring
1552 * the asic.
1553 * Returns 0 on success, -EINVAL on failure.
1554 */
1555static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1556{
1557 const char *chip_name;
1558 char fw_name[40];
1559 int err;
1560 const struct gpu_info_firmware_header_v1_0 *hdr;
1561
1562 adev->firmware.gpu_info_fw = NULL;
1563
1564 if (adev->discovery_bin) {
1565 amdgpu_discovery_get_gfx_info(adev);
1566
1567 /*
1568 * FIXME: The bounding box is still needed by Navi12, so
1569 * temporarily read it from gpu_info firmware. Should be droped
1570 * when DAL no longer needs it.
1571 */
1572 if (adev->asic_type != CHIP_NAVI12)
1573 return 0;
1574 }
1575
1576 switch (adev->asic_type) {
1577#ifdef CONFIG_DRM_AMDGPU_SI
1578 case CHIP_VERDE:
1579 case CHIP_TAHITI:
1580 case CHIP_PITCAIRN:
1581 case CHIP_OLAND:
1582 case CHIP_HAINAN:
1583#endif
1584#ifdef CONFIG_DRM_AMDGPU_CIK
1585 case CHIP_BONAIRE:
1586 case CHIP_HAWAII:
1587 case CHIP_KAVERI:
1588 case CHIP_KABINI:
1589 case CHIP_MULLINS:
1590#endif
1591 case CHIP_TOPAZ:
1592 case CHIP_TONGA:
1593 case CHIP_FIJI:
1594 case CHIP_POLARIS10:
1595 case CHIP_POLARIS11:
1596 case CHIP_POLARIS12:
1597 case CHIP_VEGAM:
1598 case CHIP_CARRIZO:
1599 case CHIP_STONEY:
1600 case CHIP_VEGA20:
1601 case CHIP_SIENNA_CICHLID:
1602 case CHIP_NAVY_FLOUNDER:
1603 default:
1604 return 0;
1605 case CHIP_VEGA10:
1606 chip_name = "vega10";
1607 break;
1608 case CHIP_VEGA12:
1609 chip_name = "vega12";
1610 break;
1611 case CHIP_RAVEN:
1612 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1613 chip_name = "raven2";
1614 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1615 chip_name = "picasso";
1616 else
1617 chip_name = "raven";
1618 break;
1619 case CHIP_ARCTURUS:
1620 chip_name = "arcturus";
1621 break;
1622 case CHIP_RENOIR:
1623 chip_name = "renoir";
1624 break;
1625 case CHIP_NAVI10:
1626 chip_name = "navi10";
1627 break;
1628 case CHIP_NAVI14:
1629 chip_name = "navi14";
1630 break;
1631 case CHIP_NAVI12:
1632 chip_name = "navi12";
1633 break;
1634 }
1635
1636 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1637 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1638 if (err) {
1639 dev_err(adev->dev,
1640 "Failed to load gpu_info firmware \"%s\"\n",
1641 fw_name);
1642 goto out;
1643 }
1644 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1645 if (err) {
1646 dev_err(adev->dev,
1647 "Failed to validate gpu_info firmware \"%s\"\n",
1648 fw_name);
1649 goto out;
1650 }
1651
1652 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1653 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1654
1655 switch (hdr->version_major) {
1656 case 1:
1657 {
1658 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
1659 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
1660 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1661
1662 /*
1663 * Should be droped when DAL no longer needs it.
1664 */
1665 if (adev->asic_type == CHIP_NAVI12)
1666 goto parse_soc_bounding_box;
1667
1668 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1669 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1670 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1671 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
1672 adev->gfx.config.max_texture_channel_caches =
1673 le32_to_cpu(gpu_info_fw->gc_num_tccs);
1674 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1675 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1676 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1677 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
1678 adev->gfx.config.double_offchip_lds_buf =
1679 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1680 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
1681 adev->gfx.cu_info.max_waves_per_simd =
1682 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1683 adev->gfx.cu_info.max_scratch_slots_per_cu =
1684 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1685 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
1686 if (hdr->version_minor >= 1) {
1687 const struct gpu_info_firmware_v1_1 *gpu_info_fw =
1688 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
1689 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1690 adev->gfx.config.num_sc_per_sh =
1691 le32_to_cpu(gpu_info_fw->num_sc_per_sh);
1692 adev->gfx.config.num_packer_per_sc =
1693 le32_to_cpu(gpu_info_fw->num_packer_per_sc);
1694 }
1695
1696parse_soc_bounding_box:
1697 /*
1698 * soc bounding box info is not integrated in disocovery table,
1699 * we always need to parse it from gpu info firmware if needed.
1700 */
1701 if (hdr->version_minor == 2) {
1702 const struct gpu_info_firmware_v1_2 *gpu_info_fw =
1703 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
1704 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1705 adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
1706 }
1707 break;
1708 }
1709 default:
1710 dev_err(adev->dev,
1711 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1712 err = -EINVAL;
1713 goto out;
1714 }
1715out:
1716 return err;
1717}
1718
1719/**
1720 * amdgpu_device_ip_early_init - run early init for hardware IPs
1721 *
1722 * @adev: amdgpu_device pointer
1723 *
1724 * Early initialization pass for hardware IPs. The hardware IPs that make
1725 * up each asic are discovered each IP's early_init callback is run. This
1726 * is the first stage in initializing the asic.
1727 * Returns 0 on success, negative error code on failure.
1728 */
1729static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
1730{
1731 int i, r;
1732
1733 amdgpu_device_enable_virtual_display(adev);
1734
1735 if (amdgpu_sriov_vf(adev)) {
1736 r = amdgpu_virt_request_full_gpu(adev, true);
1737 if (r)
1738 return r;
1739 }
1740
1741 switch (adev->asic_type) {
1742#ifdef CONFIG_DRM_AMDGPU_SI
1743 case CHIP_VERDE:
1744 case CHIP_TAHITI:
1745 case CHIP_PITCAIRN:
1746 case CHIP_OLAND:
1747 case CHIP_HAINAN:
1748 adev->family = AMDGPU_FAMILY_SI;
1749 r = si_set_ip_blocks(adev);
1750 if (r)
1751 return r;
1752 break;
1753#endif
1754#ifdef CONFIG_DRM_AMDGPU_CIK
1755 case CHIP_BONAIRE:
1756 case CHIP_HAWAII:
1757 case CHIP_KAVERI:
1758 case CHIP_KABINI:
1759 case CHIP_MULLINS:
1760 if (adev->flags & AMD_IS_APU)
1761 adev->family = AMDGPU_FAMILY_KV;
1762 else
1763 adev->family = AMDGPU_FAMILY_CI;
1764
1765 r = cik_set_ip_blocks(adev);
1766 if (r)
1767 return r;
1768 break;
1769#endif
1770 case CHIP_TOPAZ:
1771 case CHIP_TONGA:
1772 case CHIP_FIJI:
1773 case CHIP_POLARIS10:
1774 case CHIP_POLARIS11:
1775 case CHIP_POLARIS12:
1776 case CHIP_VEGAM:
1777 case CHIP_CARRIZO:
1778 case CHIP_STONEY:
1779 if (adev->flags & AMD_IS_APU)
1780 adev->family = AMDGPU_FAMILY_CZ;
1781 else
1782 adev->family = AMDGPU_FAMILY_VI;
1783
1784 r = vi_set_ip_blocks(adev);
1785 if (r)
1786 return r;
1787 break;
1788 case CHIP_VEGA10:
1789 case CHIP_VEGA12:
1790 case CHIP_VEGA20:
1791 case CHIP_RAVEN:
1792 case CHIP_ARCTURUS:
1793 case CHIP_RENOIR:
1794 if (adev->flags & AMD_IS_APU)
1795 adev->family = AMDGPU_FAMILY_RV;
1796 else
1797 adev->family = AMDGPU_FAMILY_AI;
1798
1799 r = soc15_set_ip_blocks(adev);
1800 if (r)
1801 return r;
1802 break;
1803 case CHIP_NAVI10:
1804 case CHIP_NAVI14:
1805 case CHIP_NAVI12:
1806 case CHIP_SIENNA_CICHLID:
1807 case CHIP_NAVY_FLOUNDER:
1808 adev->family = AMDGPU_FAMILY_NV;
1809
1810 r = nv_set_ip_blocks(adev);
1811 if (r)
1812 return r;
1813 break;
1814 default:
1815 /* FIXME: not supported yet */
1816 return -EINVAL;
1817 }
1818
1819 amdgpu_amdkfd_device_probe(adev);
1820
1821 adev->pm.pp_feature = amdgpu_pp_feature_mask;
1822 if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
1823 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
1824
1825 for (i = 0; i < adev->num_ip_blocks; i++) {
1826 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
1827 DRM_ERROR("disabled ip block: %d <%s>\n",
1828 i, adev->ip_blocks[i].version->funcs->name);
1829 adev->ip_blocks[i].status.valid = false;
1830 } else {
1831 if (adev->ip_blocks[i].version->funcs->early_init) {
1832 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
1833 if (r == -ENOENT) {
1834 adev->ip_blocks[i].status.valid = false;
1835 } else if (r) {
1836 DRM_ERROR("early_init of IP block <%s> failed %d\n",
1837 adev->ip_blocks[i].version->funcs->name, r);
1838 return r;
1839 } else {
1840 adev->ip_blocks[i].status.valid = true;
1841 }
1842 } else {
1843 adev->ip_blocks[i].status.valid = true;
1844 }
1845 }
1846 /* get the vbios after the asic_funcs are set up */
1847 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
1848 r = amdgpu_device_parse_gpu_info_fw(adev);
1849 if (r)
1850 return r;
1851
1852 /* Read BIOS */
1853 if (!amdgpu_get_bios(adev))
1854 return -EINVAL;
1855
1856 r = amdgpu_atombios_init(adev);
1857 if (r) {
1858 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
1859 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
1860 return r;
1861 }
1862 }
1863 }
1864
1865 adev->cg_flags &= amdgpu_cg_mask;
1866 adev->pg_flags &= amdgpu_pg_mask;
1867
1868 return 0;
1869}
1870
1871static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
1872{
1873 int i, r;
1874
1875 for (i = 0; i < adev->num_ip_blocks; i++) {
1876 if (!adev->ip_blocks[i].status.sw)
1877 continue;
1878 if (adev->ip_blocks[i].status.hw)
1879 continue;
1880 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1881 (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
1882 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
1883 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
1884 if (r) {
1885 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1886 adev->ip_blocks[i].version->funcs->name, r);
1887 return r;
1888 }
1889 adev->ip_blocks[i].status.hw = true;
1890 }
1891 }
1892
1893 return 0;
1894}
1895
1896static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
1897{
1898 int i, r;
1899
1900 for (i = 0; i < adev->num_ip_blocks; i++) {
1901 if (!adev->ip_blocks[i].status.sw)
1902 continue;
1903 if (adev->ip_blocks[i].status.hw)
1904 continue;
1905 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
1906 if (r) {
1907 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1908 adev->ip_blocks[i].version->funcs->name, r);
1909 return r;
1910 }
1911 adev->ip_blocks[i].status.hw = true;
1912 }
1913
1914 return 0;
1915}
1916
1917static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
1918{
1919 int r = 0;
1920 int i;
1921 uint32_t smu_version;
1922
1923 if (adev->asic_type >= CHIP_VEGA10) {
1924 for (i = 0; i < adev->num_ip_blocks; i++) {
1925 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
1926 continue;
1927
1928 /* no need to do the fw loading again if already done*/
1929 if (adev->ip_blocks[i].status.hw == true)
1930 break;
1931
1932 if (adev->in_gpu_reset || adev->in_suspend) {
1933 r = adev->ip_blocks[i].version->funcs->resume(adev);
1934 if (r) {
1935 DRM_ERROR("resume of IP block <%s> failed %d\n",
1936 adev->ip_blocks[i].version->funcs->name, r);
1937 return r;
1938 }
1939 } else {
1940 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
1941 if (r) {
1942 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1943 adev->ip_blocks[i].version->funcs->name, r);
1944 return r;
1945 }
1946 }
1947
1948 adev->ip_blocks[i].status.hw = true;
1949 break;
1950 }
1951 }
1952
1953 if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
1954 r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
1955
1956 return r;
1957}
1958
1959/**
1960 * amdgpu_device_ip_init - run init for hardware IPs
1961 *
1962 * @adev: amdgpu_device pointer
1963 *
1964 * Main initialization pass for hardware IPs. The list of all the hardware
1965 * IPs that make up the asic is walked and the sw_init and hw_init callbacks
1966 * are run. sw_init initializes the software state associated with each IP
1967 * and hw_init initializes the hardware associated with each IP.
1968 * Returns 0 on success, negative error code on failure.
1969 */
1970static int amdgpu_device_ip_init(struct amdgpu_device *adev)
1971{
1972 int i, r;
1973
1974 r = amdgpu_ras_init(adev);
1975 if (r)
1976 return r;
1977
1978 for (i = 0; i < adev->num_ip_blocks; i++) {
1979 if (!adev->ip_blocks[i].status.valid)
1980 continue;
1981 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
1982 if (r) {
1983 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1984 adev->ip_blocks[i].version->funcs->name, r);
1985 goto init_failed;
1986 }
1987 adev->ip_blocks[i].status.sw = true;
1988
1989 /* need to do gmc hw init early so we can allocate gpu mem */
1990 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
1991 r = amdgpu_device_vram_scratch_init(adev);
1992 if (r) {
1993 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
1994 goto init_failed;
1995 }
1996 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
1997 if (r) {
1998 DRM_ERROR("hw_init %d failed %d\n", i, r);
1999 goto init_failed;
2000 }
2001 r = amdgpu_device_wb_init(adev);
2002 if (r) {
2003 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2004 goto init_failed;
2005 }
2006 adev->ip_blocks[i].status.hw = true;
2007
2008 /* right after GMC hw init, we create CSA */
2009 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
2010 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2011 AMDGPU_GEM_DOMAIN_VRAM,
2012 AMDGPU_CSA_SIZE);
2013 if (r) {
2014 DRM_ERROR("allocate CSA failed %d\n", r);
2015 goto init_failed;
2016 }
2017 }
2018 }
2019 }
2020
2021 if (amdgpu_sriov_vf(adev))
2022 amdgpu_virt_init_data_exchange(adev);
2023
2024 r = amdgpu_ib_pool_init(adev);
2025 if (r) {
2026 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2027 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2028 goto init_failed;
2029 }
2030
2031 r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2032 if (r)
2033 goto init_failed;
2034
2035 r = amdgpu_device_ip_hw_init_phase1(adev);
2036 if (r)
2037 goto init_failed;
2038
2039 r = amdgpu_device_fw_loading(adev);
2040 if (r)
2041 goto init_failed;
2042
2043 r = amdgpu_device_ip_hw_init_phase2(adev);
2044 if (r)
2045 goto init_failed;
2046
2047 /*
2048 * retired pages will be loaded from eeprom and reserved here,
2049 * it should be called after amdgpu_device_ip_hw_init_phase2 since
2050 * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2051 * for I2C communication which only true at this point.
2052 * recovery_init may fail, but it can free all resources allocated by
2053 * itself and its failure should not stop amdgpu init process.
2054 *
2055 * Note: theoretically, this should be called before all vram allocations
2056 * to protect retired page from abusing
2057 */
2058 amdgpu_ras_recovery_init(adev);
2059
2060 if (adev->gmc.xgmi.num_physical_nodes > 1)
2061 amdgpu_xgmi_add_device(adev);
2062 amdgpu_amdkfd_device_init(adev);
2063
2064 amdgpu_fru_get_product_info(adev);
2065
2066init_failed:
2067 if (amdgpu_sriov_vf(adev))
2068 amdgpu_virt_release_full_gpu(adev, true);
2069
2070 return r;
2071}
2072
2073/**
2074 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2075 *
2076 * @adev: amdgpu_device pointer
2077 *
2078 * Writes a reset magic value to the gart pointer in VRAM. The driver calls
2079 * this function before a GPU reset. If the value is retained after a
2080 * GPU reset, VRAM has not been lost. Some GPU resets may destry VRAM contents.
2081 */
2082static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2083{
2084 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2085}
2086
2087/**
2088 * amdgpu_device_check_vram_lost - check if vram is valid
2089 *
2090 * @adev: amdgpu_device pointer
2091 *
2092 * Checks the reset magic value written to the gart pointer in VRAM.
2093 * The driver calls this after a GPU reset to see if the contents of
2094 * VRAM is lost or now.
2095 * returns true if vram is lost, false if not.
2096 */
2097static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2098{
2099 if (memcmp(adev->gart.ptr, adev->reset_magic,
2100 AMDGPU_RESET_MAGIC_NUM))
2101 return true;
2102
2103 if (!adev->in_gpu_reset)
2104 return false;
2105
2106 /*
2107 * For all ASICs with baco/mode1 reset, the VRAM is
2108 * always assumed to be lost.
2109 */
2110 switch (amdgpu_asic_reset_method(adev)) {
2111 case AMD_RESET_METHOD_BACO:
2112 case AMD_RESET_METHOD_MODE1:
2113 return true;
2114 default:
2115 return false;
2116 }
2117}
2118
2119/**
2120 * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2121 *
2122 * @adev: amdgpu_device pointer
2123 * @state: clockgating state (gate or ungate)
2124 *
2125 * The list of all the hardware IPs that make up the asic is walked and the
2126 * set_clockgating_state callbacks are run.
2127 * Late initialization pass enabling clockgating for hardware IPs.
2128 * Fini or suspend, pass disabling clockgating for hardware IPs.
2129 * Returns 0 on success, negative error code on failure.
2130 */
2131
2132static int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2133 enum amd_clockgating_state state)
2134{
2135 int i, j, r;
2136
2137 if (amdgpu_emu_mode == 1)
2138 return 0;
2139
2140 for (j = 0; j < adev->num_ip_blocks; j++) {
2141 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2142 if (!adev->ip_blocks[i].status.late_initialized)
2143 continue;
2144 /* skip CG for VCE/UVD, it's handled specially */
2145 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2146 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2147 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2148 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2149 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2150 /* enable clockgating to save power */
2151 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2152 state);
2153 if (r) {
2154 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2155 adev->ip_blocks[i].version->funcs->name, r);
2156 return r;
2157 }
2158 }
2159 }
2160
2161 return 0;
2162}
2163
2164static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_powergating_state state)
2165{
2166 int i, j, r;
2167
2168 if (amdgpu_emu_mode == 1)
2169 return 0;
2170
2171 for (j = 0; j < adev->num_ip_blocks; j++) {
2172 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2173 if (!adev->ip_blocks[i].status.late_initialized)
2174 continue;
2175 /* skip CG for VCE/UVD, it's handled specially */
2176 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2177 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2178 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2179 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2180 adev->ip_blocks[i].version->funcs->set_powergating_state) {
2181 /* enable powergating to save power */
2182 r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2183 state);
2184 if (r) {
2185 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2186 adev->ip_blocks[i].version->funcs->name, r);
2187 return r;
2188 }
2189 }
2190 }
2191 return 0;
2192}
2193
2194static int amdgpu_device_enable_mgpu_fan_boost(void)
2195{
2196 struct amdgpu_gpu_instance *gpu_ins;
2197 struct amdgpu_device *adev;
2198 int i, ret = 0;
2199
2200 mutex_lock(&mgpu_info.mutex);
2201
2202 /*
2203 * MGPU fan boost feature should be enabled
2204 * only when there are two or more dGPUs in
2205 * the system
2206 */
2207 if (mgpu_info.num_dgpu < 2)
2208 goto out;
2209
2210 for (i = 0; i < mgpu_info.num_dgpu; i++) {
2211 gpu_ins = &(mgpu_info.gpu_ins[i]);
2212 adev = gpu_ins->adev;
2213 if (!(adev->flags & AMD_IS_APU) &&
2214 !gpu_ins->mgpu_fan_enabled &&
2215 adev->powerplay.pp_funcs &&
2216 adev->powerplay.pp_funcs->enable_mgpu_fan_boost) {
2217 ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2218 if (ret)
2219 break;
2220
2221 gpu_ins->mgpu_fan_enabled = 1;
2222 }
2223 }
2224
2225out:
2226 mutex_unlock(&mgpu_info.mutex);
2227
2228 return ret;
2229}
2230
2231/**
2232 * amdgpu_device_ip_late_init - run late init for hardware IPs
2233 *
2234 * @adev: amdgpu_device pointer
2235 *
2236 * Late initialization pass for hardware IPs. The list of all the hardware
2237 * IPs that make up the asic is walked and the late_init callbacks are run.
2238 * late_init covers any special initialization that an IP requires
2239 * after all of the have been initialized or something that needs to happen
2240 * late in the init process.
2241 * Returns 0 on success, negative error code on failure.
2242 */
2243static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2244{
2245 struct amdgpu_gpu_instance *gpu_instance;
2246 int i = 0, r;
2247
2248 for (i = 0; i < adev->num_ip_blocks; i++) {
2249 if (!adev->ip_blocks[i].status.hw)
2250 continue;
2251 if (adev->ip_blocks[i].version->funcs->late_init) {
2252 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2253 if (r) {
2254 DRM_ERROR("late_init of IP block <%s> failed %d\n",
2255 adev->ip_blocks[i].version->funcs->name, r);
2256 return r;
2257 }
2258 }
2259 adev->ip_blocks[i].status.late_initialized = true;
2260 }
2261
2262 amdgpu_ras_set_error_query_ready(adev, true);
2263
2264 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2265 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2266
2267 amdgpu_device_fill_reset_magic(adev);
2268
2269 r = amdgpu_device_enable_mgpu_fan_boost();
2270 if (r)
2271 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2272
2273
2274 if (adev->gmc.xgmi.num_physical_nodes > 1) {
2275 mutex_lock(&mgpu_info.mutex);
2276
2277 /*
2278 * Reset device p-state to low as this was booted with high.
2279 *
2280 * This should be performed only after all devices from the same
2281 * hive get initialized.
2282 *
2283 * However, it's unknown how many device in the hive in advance.
2284 * As this is counted one by one during devices initializations.
2285 *
2286 * So, we wait for all XGMI interlinked devices initialized.
2287 * This may bring some delays as those devices may come from
2288 * different hives. But that should be OK.
2289 */
2290 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2291 for (i = 0; i < mgpu_info.num_gpu; i++) {
2292 gpu_instance = &(mgpu_info.gpu_ins[i]);
2293 if (gpu_instance->adev->flags & AMD_IS_APU)
2294 continue;
2295
2296 r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2297 AMDGPU_XGMI_PSTATE_MIN);
2298 if (r) {
2299 DRM_ERROR("pstate setting failed (%d).\n", r);
2300 break;
2301 }
2302 }
2303 }
2304
2305 mutex_unlock(&mgpu_info.mutex);
2306 }
2307
2308 return 0;
2309}
2310
2311/**
2312 * amdgpu_device_ip_fini - run fini for hardware IPs
2313 *
2314 * @adev: amdgpu_device pointer
2315 *
2316 * Main teardown pass for hardware IPs. The list of all the hardware
2317 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2318 * are run. hw_fini tears down the hardware associated with each IP
2319 * and sw_fini tears down any software state associated with each IP.
2320 * Returns 0 on success, negative error code on failure.
2321 */
2322static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2323{
2324 int i, r;
2325
2326 if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2327 amdgpu_virt_release_ras_err_handler_data(adev);
2328
2329 amdgpu_ras_pre_fini(adev);
2330
2331 if (adev->gmc.xgmi.num_physical_nodes > 1)
2332 amdgpu_xgmi_remove_device(adev);
2333
2334 amdgpu_amdkfd_device_fini(adev);
2335
2336 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2337 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2338
2339 /* need to disable SMC first */
2340 for (i = 0; i < adev->num_ip_blocks; i++) {
2341 if (!adev->ip_blocks[i].status.hw)
2342 continue;
2343 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2344 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2345 /* XXX handle errors */
2346 if (r) {
2347 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2348 adev->ip_blocks[i].version->funcs->name, r);
2349 }
2350 adev->ip_blocks[i].status.hw = false;
2351 break;
2352 }
2353 }
2354
2355 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2356 if (!adev->ip_blocks[i].status.hw)
2357 continue;
2358
2359 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2360 /* XXX handle errors */
2361 if (r) {
2362 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2363 adev->ip_blocks[i].version->funcs->name, r);
2364 }
2365
2366 adev->ip_blocks[i].status.hw = false;
2367 }
2368
2369
2370 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2371 if (!adev->ip_blocks[i].status.sw)
2372 continue;
2373
2374 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2375 amdgpu_ucode_free_bo(adev);
2376 amdgpu_free_static_csa(&adev->virt.csa_obj);
2377 amdgpu_device_wb_fini(adev);
2378 amdgpu_device_vram_scratch_fini(adev);
2379 amdgpu_ib_pool_fini(adev);
2380 }
2381
2382 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2383 /* XXX handle errors */
2384 if (r) {
2385 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2386 adev->ip_blocks[i].version->funcs->name, r);
2387 }
2388 adev->ip_blocks[i].status.sw = false;
2389 adev->ip_blocks[i].status.valid = false;
2390 }
2391
2392 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2393 if (!adev->ip_blocks[i].status.late_initialized)
2394 continue;
2395 if (adev->ip_blocks[i].version->funcs->late_fini)
2396 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2397 adev->ip_blocks[i].status.late_initialized = false;
2398 }
2399
2400 amdgpu_ras_fini(adev);
2401
2402 if (amdgpu_sriov_vf(adev))
2403 if (amdgpu_virt_release_full_gpu(adev, false))
2404 DRM_ERROR("failed to release exclusive mode on fini\n");
2405
2406 return 0;
2407}
2408
2409/**
2410 * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2411 *
2412 * @work: work_struct.
2413 */
2414static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2415{
2416 struct amdgpu_device *adev =
2417 container_of(work, struct amdgpu_device, delayed_init_work.work);
2418 int r;
2419
2420 r = amdgpu_ib_ring_tests(adev);
2421 if (r)
2422 DRM_ERROR("ib ring test failed (%d).\n", r);
2423}
2424
2425static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2426{
2427 struct amdgpu_device *adev =
2428 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2429
2430 mutex_lock(&adev->gfx.gfx_off_mutex);
2431 if (!adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
2432 if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2433 adev->gfx.gfx_off_state = true;
2434 }
2435 mutex_unlock(&adev->gfx.gfx_off_mutex);
2436}
2437
2438/**
2439 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2440 *
2441 * @adev: amdgpu_device pointer
2442 *
2443 * Main suspend function for hardware IPs. The list of all the hardware
2444 * IPs that make up the asic is walked, clockgating is disabled and the
2445 * suspend callbacks are run. suspend puts the hardware and software state
2446 * in each IP into a state suitable for suspend.
2447 * Returns 0 on success, negative error code on failure.
2448 */
2449static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2450{
2451 int i, r;
2452
2453 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2454 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2455
2456 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2457 if (!adev->ip_blocks[i].status.valid)
2458 continue;
2459
2460 /* displays are handled separately */
2461 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2462 continue;
2463
2464 /* XXX handle errors */
2465 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2466 /* XXX handle errors */
2467 if (r) {
2468 DRM_ERROR("suspend of IP block <%s> failed %d\n",
2469 adev->ip_blocks[i].version->funcs->name, r);
2470 return r;
2471 }
2472
2473 adev->ip_blocks[i].status.hw = false;
2474 }
2475
2476 return 0;
2477}
2478
2479/**
2480 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2481 *
2482 * @adev: amdgpu_device pointer
2483 *
2484 * Main suspend function for hardware IPs. The list of all the hardware
2485 * IPs that make up the asic is walked, clockgating is disabled and the
2486 * suspend callbacks are run. suspend puts the hardware and software state
2487 * in each IP into a state suitable for suspend.
2488 * Returns 0 on success, negative error code on failure.
2489 */
2490static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
2491{
2492 int i, r;
2493
2494 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2495 if (!adev->ip_blocks[i].status.valid)
2496 continue;
2497 /* displays are handled in phase1 */
2498 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
2499 continue;
2500 /* PSP lost connection when err_event_athub occurs */
2501 if (amdgpu_ras_intr_triggered() &&
2502 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
2503 adev->ip_blocks[i].status.hw = false;
2504 continue;
2505 }
2506 /* XXX handle errors */
2507 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2508 /* XXX handle errors */
2509 if (r) {
2510 DRM_ERROR("suspend of IP block <%s> failed %d\n",
2511 adev->ip_blocks[i].version->funcs->name, r);
2512 }
2513 adev->ip_blocks[i].status.hw = false;
2514 /* handle putting the SMC in the appropriate state */
2515 if(!amdgpu_sriov_vf(adev)){
2516 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2517 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
2518 if (r) {
2519 DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
2520 adev->mp1_state, r);
2521 return r;
2522 }
2523 }
2524 }
2525 adev->ip_blocks[i].status.hw = false;
2526 }
2527
2528 return 0;
2529}
2530
2531/**
2532 * amdgpu_device_ip_suspend - run suspend for hardware IPs
2533 *
2534 * @adev: amdgpu_device pointer
2535 *
2536 * Main suspend function for hardware IPs. The list of all the hardware
2537 * IPs that make up the asic is walked, clockgating is disabled and the
2538 * suspend callbacks are run. suspend puts the hardware and software state
2539 * in each IP into a state suitable for suspend.
2540 * Returns 0 on success, negative error code on failure.
2541 */
2542int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
2543{
2544 int r;
2545
2546 if (amdgpu_sriov_vf(adev))
2547 amdgpu_virt_request_full_gpu(adev, false);
2548
2549 r = amdgpu_device_ip_suspend_phase1(adev);
2550 if (r)
2551 return r;
2552 r = amdgpu_device_ip_suspend_phase2(adev);
2553
2554 if (amdgpu_sriov_vf(adev))
2555 amdgpu_virt_release_full_gpu(adev, false);
2556
2557 return r;
2558}
2559
2560static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
2561{
2562 int i, r;
2563
2564 static enum amd_ip_block_type ip_order[] = {
2565 AMD_IP_BLOCK_TYPE_GMC,
2566 AMD_IP_BLOCK_TYPE_COMMON,
2567 AMD_IP_BLOCK_TYPE_PSP,
2568 AMD_IP_BLOCK_TYPE_IH,
2569 };
2570
2571 for (i = 0; i < adev->num_ip_blocks; i++)
2572 adev->ip_blocks[i].status.hw = false;
2573
2574 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
2575 int j;
2576 struct amdgpu_ip_block *block;
2577
2578 for (j = 0; j < adev->num_ip_blocks; j++) {
2579 block = &adev->ip_blocks[j];
2580
2581 if (block->version->type != ip_order[i] ||
2582 !block->status.valid)
2583 continue;
2584
2585 r = block->version->funcs->hw_init(adev);
2586 DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
2587 if (r)
2588 return r;
2589 block->status.hw = true;
2590 }
2591 }
2592
2593 return 0;
2594}
2595
2596static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
2597{
2598 int i, r;
2599
2600 static enum amd_ip_block_type ip_order[] = {
2601 AMD_IP_BLOCK_TYPE_SMC,
2602 AMD_IP_BLOCK_TYPE_DCE,
2603 AMD_IP_BLOCK_TYPE_GFX,
2604 AMD_IP_BLOCK_TYPE_SDMA,
2605 AMD_IP_BLOCK_TYPE_UVD,
2606 AMD_IP_BLOCK_TYPE_VCE,
2607 AMD_IP_BLOCK_TYPE_VCN
2608 };
2609
2610 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
2611 int j;
2612 struct amdgpu_ip_block *block;
2613
2614 for (j = 0; j < adev->num_ip_blocks; j++) {
2615 block = &adev->ip_blocks[j];
2616
2617 if (block->version->type != ip_order[i] ||
2618 !block->status.valid ||
2619 block->status.hw)
2620 continue;
2621
2622 if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
2623 r = block->version->funcs->resume(adev);
2624 else
2625 r = block->version->funcs->hw_init(adev);
2626
2627 DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
2628 if (r)
2629 return r;
2630 block->status.hw = true;
2631 }
2632 }
2633
2634 return 0;
2635}
2636
2637/**
2638 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
2639 *
2640 * @adev: amdgpu_device pointer
2641 *
2642 * First resume function for hardware IPs. The list of all the hardware
2643 * IPs that make up the asic is walked and the resume callbacks are run for
2644 * COMMON, GMC, and IH. resume puts the hardware into a functional state
2645 * after a suspend and updates the software state as necessary. This
2646 * function is also used for restoring the GPU after a GPU reset.
2647 * Returns 0 on success, negative error code on failure.
2648 */
2649static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
2650{
2651 int i, r;
2652
2653 for (i = 0; i < adev->num_ip_blocks; i++) {
2654 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
2655 continue;
2656 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2657 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2658 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2659
2660 r = adev->ip_blocks[i].version->funcs->resume(adev);
2661 if (r) {
2662 DRM_ERROR("resume of IP block <%s> failed %d\n",
2663 adev->ip_blocks[i].version->funcs->name, r);
2664 return r;
2665 }
2666 adev->ip_blocks[i].status.hw = true;
2667 }
2668 }
2669
2670 return 0;
2671}
2672
2673/**
2674 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
2675 *
2676 * @adev: amdgpu_device pointer
2677 *
2678 * First resume function for hardware IPs. The list of all the hardware
2679 * IPs that make up the asic is walked and the resume callbacks are run for
2680 * all blocks except COMMON, GMC, and IH. resume puts the hardware into a
2681 * functional state after a suspend and updates the software state as
2682 * necessary. This function is also used for restoring the GPU after a GPU
2683 * reset.
2684 * Returns 0 on success, negative error code on failure.
2685 */
2686static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
2687{
2688 int i, r;
2689
2690 for (i = 0; i < adev->num_ip_blocks; i++) {
2691 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
2692 continue;
2693 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2694 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2695 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
2696 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
2697 continue;
2698 r = adev->ip_blocks[i].version->funcs->resume(adev);
2699 if (r) {
2700 DRM_ERROR("resume of IP block <%s> failed %d\n",
2701 adev->ip_blocks[i].version->funcs->name, r);
2702 return r;
2703 }
2704 adev->ip_blocks[i].status.hw = true;
2705 }
2706
2707 return 0;
2708}
2709
2710/**
2711 * amdgpu_device_ip_resume - run resume for hardware IPs
2712 *
2713 * @adev: amdgpu_device pointer
2714 *
2715 * Main resume function for hardware IPs. The hardware IPs
2716 * are split into two resume functions because they are
2717 * are also used in in recovering from a GPU reset and some additional
2718 * steps need to be take between them. In this case (S3/S4) they are
2719 * run sequentially.
2720 * Returns 0 on success, negative error code on failure.
2721 */
2722static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
2723{
2724 int r;
2725
2726 r = amdgpu_device_ip_resume_phase1(adev);
2727 if (r)
2728 return r;
2729
2730 r = amdgpu_device_fw_loading(adev);
2731 if (r)
2732 return r;
2733
2734 r = amdgpu_device_ip_resume_phase2(adev);
2735
2736 return r;
2737}
2738
2739/**
2740 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
2741 *
2742 * @adev: amdgpu_device pointer
2743 *
2744 * Query the VBIOS data tables to determine if the board supports SR-IOV.
2745 */
2746static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
2747{
2748 if (amdgpu_sriov_vf(adev)) {
2749 if (adev->is_atom_fw) {
2750 if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
2751 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2752 } else {
2753 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
2754 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2755 }
2756
2757 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
2758 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
2759 }
2760}
2761
2762/**
2763 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
2764 *
2765 * @asic_type: AMD asic type
2766 *
2767 * Check if there is DC (new modesetting infrastructre) support for an asic.
2768 * returns true if DC has support, false if not.
2769 */
2770bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
2771{
2772 switch (asic_type) {
2773#if defined(CONFIG_DRM_AMD_DC)
2774 case CHIP_BONAIRE:
2775 case CHIP_KAVERI:
2776 case CHIP_KABINI:
2777 case CHIP_MULLINS:
2778 /*
2779 * We have systems in the wild with these ASICs that require
2780 * LVDS and VGA support which is not supported with DC.
2781 *
2782 * Fallback to the non-DC driver here by default so as not to
2783 * cause regressions.
2784 */
2785 return amdgpu_dc > 0;
2786 case CHIP_HAWAII:
2787 case CHIP_CARRIZO:
2788 case CHIP_STONEY:
2789 case CHIP_POLARIS10:
2790 case CHIP_POLARIS11:
2791 case CHIP_POLARIS12:
2792 case CHIP_VEGAM:
2793 case CHIP_TONGA:
2794 case CHIP_FIJI:
2795 case CHIP_VEGA10:
2796 case CHIP_VEGA12:
2797 case CHIP_VEGA20:
2798#if defined(CONFIG_DRM_AMD_DC_DCN)
2799 case CHIP_RAVEN:
2800 case CHIP_NAVI10:
2801 case CHIP_NAVI14:
2802 case CHIP_NAVI12:
2803 case CHIP_RENOIR:
2804#endif
2805#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
2806 case CHIP_SIENNA_CICHLID:
2807 case CHIP_NAVY_FLOUNDER:
2808#endif
2809 return amdgpu_dc != 0;
2810#endif
2811 default:
2812 if (amdgpu_dc > 0)
2813 DRM_INFO("Display Core has been requested via kernel parameter "
2814 "but isn't supported by ASIC, ignoring\n");
2815 return false;
2816 }
2817}
2818
2819/**
2820 * amdgpu_device_has_dc_support - check if dc is supported
2821 *
2822 * @adev: amdgpu_device_pointer
2823 *
2824 * Returns true for supported, false for not supported
2825 */
2826bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
2827{
2828 if (amdgpu_sriov_vf(adev))
2829 return false;
2830
2831 return amdgpu_device_asic_has_dc_support(adev->asic_type);
2832}
2833
2834
2835static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
2836{
2837 struct amdgpu_device *adev =
2838 container_of(__work, struct amdgpu_device, xgmi_reset_work);
2839 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0);
2840
2841 /* It's a bug to not have a hive within this function */
2842 if (WARN_ON(!hive))
2843 return;
2844
2845 /*
2846 * Use task barrier to synchronize all xgmi reset works across the
2847 * hive. task_barrier_enter and task_barrier_exit will block
2848 * until all the threads running the xgmi reset works reach
2849 * those points. task_barrier_full will do both blocks.
2850 */
2851 if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
2852
2853 task_barrier_enter(&hive->tb);
2854 adev->asic_reset_res = amdgpu_device_baco_enter(adev->ddev);
2855
2856 if (adev->asic_reset_res)
2857 goto fail;
2858
2859 task_barrier_exit(&hive->tb);
2860 adev->asic_reset_res = amdgpu_device_baco_exit(adev->ddev);
2861
2862 if (adev->asic_reset_res)
2863 goto fail;
2864
2865 if (adev->mmhub.funcs && adev->mmhub.funcs->reset_ras_error_count)
2866 adev->mmhub.funcs->reset_ras_error_count(adev);
2867 } else {
2868
2869 task_barrier_full(&hive->tb);
2870 adev->asic_reset_res = amdgpu_asic_reset(adev);
2871 }
2872
2873fail:
2874 if (adev->asic_reset_res)
2875 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
2876 adev->asic_reset_res, adev->ddev->unique);
2877}
2878
2879static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
2880{
2881 char *input = amdgpu_lockup_timeout;
2882 char *timeout_setting = NULL;
2883 int index = 0;
2884 long timeout;
2885 int ret = 0;
2886
2887 /*
2888 * By default timeout for non compute jobs is 10000.
2889 * And there is no timeout enforced on compute jobs.
2890 * In SR-IOV or passthrough mode, timeout for compute
2891 * jobs are 60000 by default.
2892 */
2893 adev->gfx_timeout = msecs_to_jiffies(10000);
2894 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
2895 if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
2896 adev->compute_timeout = msecs_to_jiffies(60000);
2897 else
2898 adev->compute_timeout = MAX_SCHEDULE_TIMEOUT;
2899
2900 if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
2901 while ((timeout_setting = strsep(&input, ",")) &&
2902 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
2903 ret = kstrtol(timeout_setting, 0, &timeout);
2904 if (ret)
2905 return ret;
2906
2907 if (timeout == 0) {
2908 index++;
2909 continue;
2910 } else if (timeout < 0) {
2911 timeout = MAX_SCHEDULE_TIMEOUT;
2912 } else {
2913 timeout = msecs_to_jiffies(timeout);
2914 }
2915
2916 switch (index++) {
2917 case 0:
2918 adev->gfx_timeout = timeout;
2919 break;
2920 case 1:
2921 adev->compute_timeout = timeout;
2922 break;
2923 case 2:
2924 adev->sdma_timeout = timeout;
2925 break;
2926 case 3:
2927 adev->video_timeout = timeout;
2928 break;
2929 default:
2930 break;
2931 }
2932 }
2933 /*
2934 * There is only one value specified and
2935 * it should apply to all non-compute jobs.
2936 */
2937 if (index == 1) {
2938 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
2939 if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
2940 adev->compute_timeout = adev->gfx_timeout;
2941 }
2942 }
2943
2944 return ret;
2945}
2946
2947static const struct attribute *amdgpu_dev_attributes[] = {
2948 &dev_attr_product_name.attr,
2949 &dev_attr_product_number.attr,
2950 &dev_attr_serial_number.attr,
2951 &dev_attr_pcie_replay_count.attr,
2952 NULL
2953};
2954
2955/**
2956 * amdgpu_device_init - initialize the driver
2957 *
2958 * @adev: amdgpu_device pointer
2959 * @ddev: drm dev pointer
2960 * @pdev: pci dev pointer
2961 * @flags: driver flags
2962 *
2963 * Initializes the driver info and hw (all asics).
2964 * Returns 0 for success or an error on failure.
2965 * Called at driver startup.
2966 */
2967int amdgpu_device_init(struct amdgpu_device *adev,
2968 struct drm_device *ddev,
2969 struct pci_dev *pdev,
2970 uint32_t flags)
2971{
2972 int r, i;
2973 bool boco = false;
2974 u32 max_MBps;
2975
2976 adev->shutdown = false;
2977 adev->dev = &pdev->dev;
2978 adev->ddev = ddev;
2979 adev->pdev = pdev;
2980 adev->flags = flags;
2981
2982 if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
2983 adev->asic_type = amdgpu_force_asic_type;
2984 else
2985 adev->asic_type = flags & AMD_ASIC_MASK;
2986
2987 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
2988 if (amdgpu_emu_mode == 1)
2989 adev->usec_timeout *= 10;
2990 adev->gmc.gart_size = 512 * 1024 * 1024;
2991 adev->accel_working = false;
2992 adev->num_rings = 0;
2993 adev->mman.buffer_funcs = NULL;
2994 adev->mman.buffer_funcs_ring = NULL;
2995 adev->vm_manager.vm_pte_funcs = NULL;
2996 adev->vm_manager.vm_pte_num_scheds = 0;
2997 adev->gmc.gmc_funcs = NULL;
2998 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2999 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3000
3001 adev->smc_rreg = &amdgpu_invalid_rreg;
3002 adev->smc_wreg = &amdgpu_invalid_wreg;
3003 adev->pcie_rreg = &amdgpu_invalid_rreg;
3004 adev->pcie_wreg = &amdgpu_invalid_wreg;
3005 adev->pciep_rreg = &amdgpu_invalid_rreg;
3006 adev->pciep_wreg = &amdgpu_invalid_wreg;
3007 adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3008 adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3009 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3010 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3011 adev->didt_rreg = &amdgpu_invalid_rreg;
3012 adev->didt_wreg = &amdgpu_invalid_wreg;
3013 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3014 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3015 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3016 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3017
3018 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3019 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3020 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3021
3022 /* mutex initialization are all done here so we
3023 * can recall function without having locking issues */
3024 atomic_set(&adev->irq.ih.lock, 0);
3025 mutex_init(&adev->firmware.mutex);
3026 mutex_init(&adev->pm.mutex);
3027 mutex_init(&adev->gfx.gpu_clock_mutex);
3028 mutex_init(&adev->srbm_mutex);
3029 mutex_init(&adev->gfx.pipe_reserve_mutex);
3030 mutex_init(&adev->gfx.gfx_off_mutex);
3031 mutex_init(&adev->grbm_idx_mutex);
3032 mutex_init(&adev->mn_lock);
3033 mutex_init(&adev->virt.vf_errors.lock);
3034 hash_init(adev->mn_hash);
3035 mutex_init(&adev->lock_reset);
3036 mutex_init(&adev->psp.mutex);
3037 mutex_init(&adev->notifier_lock);
3038
3039 r = amdgpu_device_check_arguments(adev);
3040 if (r)
3041 return r;
3042
3043 spin_lock_init(&adev->mmio_idx_lock);
3044 spin_lock_init(&adev->smc_idx_lock);
3045 spin_lock_init(&adev->pcie_idx_lock);
3046 spin_lock_init(&adev->uvd_ctx_idx_lock);
3047 spin_lock_init(&adev->didt_idx_lock);
3048 spin_lock_init(&adev->gc_cac_idx_lock);
3049 spin_lock_init(&adev->se_cac_idx_lock);
3050 spin_lock_init(&adev->audio_endpt_idx_lock);
3051 spin_lock_init(&adev->mm_stats.lock);
3052
3053 INIT_LIST_HEAD(&adev->shadow_list);
3054 mutex_init(&adev->shadow_list_lock);
3055
3056 INIT_DELAYED_WORK(&adev->delayed_init_work,
3057 amdgpu_device_delayed_init_work_handler);
3058 INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3059 amdgpu_device_delay_enable_gfx_off);
3060
3061 INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3062
3063 adev->gfx.gfx_off_req_count = 1;
3064 adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3065
3066 atomic_set(&adev->throttling_logging_enabled, 1);
3067 /*
3068 * If throttling continues, logging will be performed every minute
3069 * to avoid log flooding. "-1" is subtracted since the thermal
3070 * throttling interrupt comes every second. Thus, the total logging
3071 * interval is 59 seconds(retelimited printk interval) + 1(waiting
3072 * for throttling interrupt) = 60 seconds.
3073 */
3074 ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3075 ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3076
3077 /* Registers mapping */
3078 /* TODO: block userspace mapping of io register */
3079 if (adev->asic_type >= CHIP_BONAIRE) {
3080 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3081 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3082 } else {
3083 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3084 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3085 }
3086
3087 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3088 if (adev->rmmio == NULL) {
3089 return -ENOMEM;
3090 }
3091 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3092 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
3093
3094 /* io port mapping */
3095 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
3096 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
3097 adev->rio_mem_size = pci_resource_len(adev->pdev, i);
3098 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
3099 break;
3100 }
3101 }
3102 if (adev->rio_mem == NULL)
3103 DRM_INFO("PCI I/O BAR is not found.\n");
3104
3105 /* enable PCIE atomic ops */
3106 r = pci_enable_atomic_ops_to_root(adev->pdev,
3107 PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3108 PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3109 if (r) {
3110 adev->have_atomics_support = false;
3111 DRM_INFO("PCIE atomic ops is not supported\n");
3112 } else {
3113 adev->have_atomics_support = true;
3114 }
3115
3116 amdgpu_device_get_pcie_info(adev);
3117
3118 if (amdgpu_mcbp)
3119 DRM_INFO("MCBP is enabled\n");
3120
3121 if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10)
3122 adev->enable_mes = true;
3123
3124 /* detect hw virtualization here */
3125 amdgpu_detect_virtualization(adev);
3126
3127 r = amdgpu_device_get_job_timeout_settings(adev);
3128 if (r) {
3129 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3130 return r;
3131 }
3132
3133 /* early init functions */
3134 r = amdgpu_device_ip_early_init(adev);
3135 if (r)
3136 return r;
3137
3138 /* doorbell bar mapping and doorbell index init*/
3139 amdgpu_device_doorbell_init(adev);
3140
3141 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
3142 /* this will fail for cards that aren't VGA class devices, just
3143 * ignore it */
3144 vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
3145
3146 if (amdgpu_device_supports_boco(ddev))
3147 boco = true;
3148 if (amdgpu_has_atpx() &&
3149 (amdgpu_is_atpx_hybrid() ||
3150 amdgpu_has_atpx_dgpu_power_cntl()) &&
3151 !pci_is_thunderbolt_attached(adev->pdev))
3152 vga_switcheroo_register_client(adev->pdev,
3153 &amdgpu_switcheroo_ops, boco);
3154 if (boco)
3155 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
3156
3157 if (amdgpu_emu_mode == 1) {
3158 /* post the asic on emulation mode */
3159 emu_soc_asic_init(adev);
3160 goto fence_driver_init;
3161 }
3162
3163 /* detect if we are with an SRIOV vbios */
3164 amdgpu_device_detect_sriov_bios(adev);
3165
3166 /* check if we need to reset the asic
3167 * E.g., driver was not cleanly unloaded previously, etc.
3168 */
3169 if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3170 r = amdgpu_asic_reset(adev);
3171 if (r) {
3172 dev_err(adev->dev, "asic reset on init failed\n");
3173 goto failed;
3174 }
3175 }
3176
3177 /* Post card if necessary */
3178 if (amdgpu_device_need_post(adev)) {
3179 if (!adev->bios) {
3180 dev_err(adev->dev, "no vBIOS found\n");
3181 r = -EINVAL;
3182 goto failed;
3183 }
3184 DRM_INFO("GPU posting now...\n");
3185 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
3186 if (r) {
3187 dev_err(adev->dev, "gpu post error!\n");
3188 goto failed;
3189 }
3190 }
3191
3192 if (adev->is_atom_fw) {
3193 /* Initialize clocks */
3194 r = amdgpu_atomfirmware_get_clock_info(adev);
3195 if (r) {
3196 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3197 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3198 goto failed;
3199 }
3200 } else {
3201 /* Initialize clocks */
3202 r = amdgpu_atombios_get_clock_info(adev);
3203 if (r) {
3204 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3205 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3206 goto failed;
3207 }
3208 /* init i2c buses */
3209 if (!amdgpu_device_has_dc_support(adev))
3210 amdgpu_atombios_i2c_init(adev);
3211 }
3212
3213fence_driver_init:
3214 /* Fence driver */
3215 r = amdgpu_fence_driver_init(adev);
3216 if (r) {
3217 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
3218 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3219 goto failed;
3220 }
3221
3222 /* init the mode config */
3223 drm_mode_config_init(adev->ddev);
3224
3225 r = amdgpu_device_ip_init(adev);
3226 if (r) {
3227 /* failed in exclusive mode due to timeout */
3228 if (amdgpu_sriov_vf(adev) &&
3229 !amdgpu_sriov_runtime(adev) &&
3230 amdgpu_virt_mmio_blocked(adev) &&
3231 !amdgpu_virt_wait_reset(adev)) {
3232 dev_err(adev->dev, "VF exclusive mode timeout\n");
3233 /* Don't send request since VF is inactive. */
3234 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
3235 adev->virt.ops = NULL;
3236 r = -EAGAIN;
3237 goto failed;
3238 }
3239 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
3240 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3241 goto failed;
3242 }
3243
3244 dev_info(adev->dev,
3245 "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
3246 adev->gfx.config.max_shader_engines,
3247 adev->gfx.config.max_sh_per_se,
3248 adev->gfx.config.max_cu_per_sh,
3249 adev->gfx.cu_info.number);
3250
3251 adev->accel_working = true;
3252
3253 amdgpu_vm_check_compute_bug(adev);
3254
3255 /* Initialize the buffer migration limit. */
3256 if (amdgpu_moverate >= 0)
3257 max_MBps = amdgpu_moverate;
3258 else
3259 max_MBps = 8; /* Allow 8 MB/s. */
3260 /* Get a log2 for easy divisions. */
3261 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3262
3263 amdgpu_fbdev_init(adev);
3264
3265 r = amdgpu_pm_sysfs_init(adev);
3266 if (r) {
3267 adev->pm_sysfs_en = false;
3268 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
3269 } else
3270 adev->pm_sysfs_en = true;
3271
3272 r = amdgpu_ucode_sysfs_init(adev);
3273 if (r) {
3274 adev->ucode_sysfs_en = false;
3275 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
3276 } else
3277 adev->ucode_sysfs_en = true;
3278
3279 if ((amdgpu_testing & 1)) {
3280 if (adev->accel_working)
3281 amdgpu_test_moves(adev);
3282 else
3283 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
3284 }
3285 if (amdgpu_benchmarking) {
3286 if (adev->accel_working)
3287 amdgpu_benchmark(adev, amdgpu_benchmarking);
3288 else
3289 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
3290 }
3291
3292 /*
3293 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3294 * Otherwise the mgpu fan boost feature will be skipped due to the
3295 * gpu instance is counted less.
3296 */
3297 amdgpu_register_gpu_instance(adev);
3298
3299 /* enable clockgating, etc. after ib tests, etc. since some blocks require
3300 * explicit gating rather than handling it automatically.
3301 */
3302 r = amdgpu_device_ip_late_init(adev);
3303 if (r) {
3304 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
3305 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
3306 goto failed;
3307 }
3308
3309 /* must succeed. */
3310 amdgpu_ras_resume(adev);
3311
3312 queue_delayed_work(system_wq, &adev->delayed_init_work,
3313 msecs_to_jiffies(AMDGPU_RESUME_MS));
3314
3315 if (amdgpu_sriov_vf(adev))
3316 flush_delayed_work(&adev->delayed_init_work);
3317
3318 r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
3319 if (r) {
3320 dev_err(adev->dev, "Could not create amdgpu device attr\n");
3321 return r;
3322 }
3323
3324 if (IS_ENABLED(CONFIG_PERF_EVENTS))
3325 r = amdgpu_pmu_init(adev);
3326 if (r)
3327 dev_err(adev->dev, "amdgpu_pmu_init failed\n");
3328
3329 return 0;
3330
3331failed:
3332 amdgpu_vf_error_trans_all(adev);
3333 if (boco)
3334 vga_switcheroo_fini_domain_pm_ops(adev->dev);
3335
3336 return r;
3337}
3338
3339/**
3340 * amdgpu_device_fini - tear down the driver
3341 *
3342 * @adev: amdgpu_device pointer
3343 *
3344 * Tear down the driver info (all asics).
3345 * Called at driver shutdown.
3346 */
3347void amdgpu_device_fini(struct amdgpu_device *adev)
3348{
3349 int r;
3350
3351 DRM_INFO("amdgpu: finishing device.\n");
3352 flush_delayed_work(&adev->delayed_init_work);
3353 adev->shutdown = true;
3354
3355 /* make sure IB test finished before entering exclusive mode
3356 * to avoid preemption on IB test
3357 * */
3358 if (amdgpu_sriov_vf(adev))
3359 amdgpu_virt_request_full_gpu(adev, false);
3360
3361 /* disable all interrupts */
3362 amdgpu_irq_disable_all(adev);
3363 if (adev->mode_info.mode_config_initialized){
3364 if (!amdgpu_device_has_dc_support(adev))
3365 drm_helper_force_disable_all(adev->ddev);
3366 else
3367 drm_atomic_helper_shutdown(adev->ddev);
3368 }
3369 amdgpu_fence_driver_fini(adev);
3370 if (adev->pm_sysfs_en)
3371 amdgpu_pm_sysfs_fini(adev);
3372 amdgpu_fbdev_fini(adev);
3373 r = amdgpu_device_ip_fini(adev);
3374 release_firmware(adev->firmware.gpu_info_fw);
3375 adev->firmware.gpu_info_fw = NULL;
3376 adev->accel_working = false;
3377 /* free i2c buses */
3378 if (!amdgpu_device_has_dc_support(adev))
3379 amdgpu_i2c_fini(adev);
3380
3381 if (amdgpu_emu_mode != 1)
3382 amdgpu_atombios_fini(adev);
3383
3384 kfree(adev->bios);
3385 adev->bios = NULL;
3386 if (amdgpu_has_atpx() &&
3387 (amdgpu_is_atpx_hybrid() ||
3388 amdgpu_has_atpx_dgpu_power_cntl()) &&
3389 !pci_is_thunderbolt_attached(adev->pdev))
3390 vga_switcheroo_unregister_client(adev->pdev);
3391 if (amdgpu_device_supports_boco(adev->ddev))
3392 vga_switcheroo_fini_domain_pm_ops(adev->dev);
3393 vga_client_register(adev->pdev, NULL, NULL, NULL);
3394 if (adev->rio_mem)
3395 pci_iounmap(adev->pdev, adev->rio_mem);
3396 adev->rio_mem = NULL;
3397 iounmap(adev->rmmio);
3398 adev->rmmio = NULL;
3399 amdgpu_device_doorbell_fini(adev);
3400
3401 if (adev->ucode_sysfs_en)
3402 amdgpu_ucode_sysfs_fini(adev);
3403
3404 sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
3405 if (IS_ENABLED(CONFIG_PERF_EVENTS))
3406 amdgpu_pmu_fini(adev);
3407 if (adev->discovery_bin)
3408 amdgpu_discovery_fini(adev);
3409}
3410
3411
3412/*
3413 * Suspend & resume.
3414 */
3415/**
3416 * amdgpu_device_suspend - initiate device suspend
3417 *
3418 * @dev: drm dev pointer
3419 * @fbcon : notify the fbdev of suspend
3420 *
3421 * Puts the hw in the suspend state (all asics).
3422 * Returns 0 for success or an error on failure.
3423 * Called at driver suspend.
3424 */
3425int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
3426{
3427 struct amdgpu_device *adev;
3428 struct drm_crtc *crtc;
3429 struct drm_connector *connector;
3430 struct drm_connector_list_iter iter;
3431 int r;
3432
3433 if (dev == NULL || dev->dev_private == NULL) {
3434 return -ENODEV;
3435 }
3436
3437 adev = dev->dev_private;
3438
3439 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3440 return 0;
3441
3442 adev->in_suspend = true;
3443 drm_kms_helper_poll_disable(dev);
3444
3445 if (fbcon)
3446 amdgpu_fbdev_set_suspend(adev, 1);
3447
3448 cancel_delayed_work_sync(&adev->delayed_init_work);
3449
3450 if (!amdgpu_device_has_dc_support(adev)) {
3451 /* turn off display hw */
3452 drm_modeset_lock_all(dev);
3453 drm_connector_list_iter_begin(dev, &iter);
3454 drm_for_each_connector_iter(connector, &iter)
3455 drm_helper_connector_dpms(connector,
3456 DRM_MODE_DPMS_OFF);
3457 drm_connector_list_iter_end(&iter);
3458 drm_modeset_unlock_all(dev);
3459 /* unpin the front buffers and cursors */
3460 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3461 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
3462 struct drm_framebuffer *fb = crtc->primary->fb;
3463 struct amdgpu_bo *robj;
3464
3465 if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
3466 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
3467 r = amdgpu_bo_reserve(aobj, true);
3468 if (r == 0) {
3469 amdgpu_bo_unpin(aobj);
3470 amdgpu_bo_unreserve(aobj);
3471 }
3472 }
3473
3474 if (fb == NULL || fb->obj[0] == NULL) {
3475 continue;
3476 }
3477 robj = gem_to_amdgpu_bo(fb->obj[0]);
3478 /* don't unpin kernel fb objects */
3479 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
3480 r = amdgpu_bo_reserve(robj, true);
3481 if (r == 0) {
3482 amdgpu_bo_unpin(robj);
3483 amdgpu_bo_unreserve(robj);
3484 }
3485 }
3486 }
3487 }
3488
3489 amdgpu_ras_suspend(adev);
3490
3491 r = amdgpu_device_ip_suspend_phase1(adev);
3492
3493 amdgpu_amdkfd_suspend(adev, !fbcon);
3494
3495 /* evict vram memory */
3496 amdgpu_bo_evict_vram(adev);
3497
3498 amdgpu_fence_driver_suspend(adev);
3499
3500 r = amdgpu_device_ip_suspend_phase2(adev);
3501
3502 /* evict remaining vram memory
3503 * This second call to evict vram is to evict the gart page table
3504 * using the CPU.
3505 */
3506 amdgpu_bo_evict_vram(adev);
3507
3508 return 0;
3509}
3510
3511/**
3512 * amdgpu_device_resume - initiate device resume
3513 *
3514 * @dev: drm dev pointer
3515 * @fbcon : notify the fbdev of resume
3516 *
3517 * Bring the hw back to operating state (all asics).
3518 * Returns 0 for success or an error on failure.
3519 * Called at driver resume.
3520 */
3521int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
3522{
3523 struct drm_connector *connector;
3524 struct drm_connector_list_iter iter;
3525 struct amdgpu_device *adev = dev->dev_private;
3526 struct drm_crtc *crtc;
3527 int r = 0;
3528
3529 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3530 return 0;
3531
3532 /* post card */
3533 if (amdgpu_device_need_post(adev)) {
3534 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
3535 if (r)
3536 DRM_ERROR("amdgpu asic init failed\n");
3537 }
3538
3539 r = amdgpu_device_ip_resume(adev);
3540 if (r) {
3541 DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r);
3542 return r;
3543 }
3544 amdgpu_fence_driver_resume(adev);
3545
3546
3547 r = amdgpu_device_ip_late_init(adev);
3548 if (r)
3549 return r;
3550
3551 queue_delayed_work(system_wq, &adev->delayed_init_work,
3552 msecs_to_jiffies(AMDGPU_RESUME_MS));
3553
3554 if (!amdgpu_device_has_dc_support(adev)) {
3555 /* pin cursors */
3556 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3557 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
3558
3559 if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
3560 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
3561 r = amdgpu_bo_reserve(aobj, true);
3562 if (r == 0) {
3563 r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
3564 if (r != 0)
3565 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
3566 amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
3567 amdgpu_bo_unreserve(aobj);
3568 }
3569 }
3570 }
3571 }
3572 r = amdgpu_amdkfd_resume(adev, !fbcon);
3573 if (r)
3574 return r;
3575
3576 /* Make sure IB tests flushed */
3577 flush_delayed_work(&adev->delayed_init_work);
3578
3579 /* blat the mode back in */
3580 if (fbcon) {
3581 if (!amdgpu_device_has_dc_support(adev)) {
3582 /* pre DCE11 */
3583 drm_helper_resume_force_mode(dev);
3584
3585 /* turn on display hw */
3586 drm_modeset_lock_all(dev);
3587
3588 drm_connector_list_iter_begin(dev, &iter);
3589 drm_for_each_connector_iter(connector, &iter)
3590 drm_helper_connector_dpms(connector,
3591 DRM_MODE_DPMS_ON);
3592 drm_connector_list_iter_end(&iter);
3593
3594 drm_modeset_unlock_all(dev);
3595 }
3596 amdgpu_fbdev_set_suspend(adev, 0);
3597 }
3598
3599 drm_kms_helper_poll_enable(dev);
3600
3601 amdgpu_ras_resume(adev);
3602
3603 /*
3604 * Most of the connector probing functions try to acquire runtime pm
3605 * refs to ensure that the GPU is powered on when connector polling is
3606 * performed. Since we're calling this from a runtime PM callback,
3607 * trying to acquire rpm refs will cause us to deadlock.
3608 *
3609 * Since we're guaranteed to be holding the rpm lock, it's safe to
3610 * temporarily disable the rpm helpers so this doesn't deadlock us.
3611 */
3612#ifdef CONFIG_PM
3613 dev->dev->power.disable_depth++;
3614#endif
3615 if (!amdgpu_device_has_dc_support(adev))
3616 drm_helper_hpd_irq_event(dev);
3617 else
3618 drm_kms_helper_hotplug_event(dev);
3619#ifdef CONFIG_PM
3620 dev->dev->power.disable_depth--;
3621#endif
3622 adev->in_suspend = false;
3623
3624 return 0;
3625}
3626
3627/**
3628 * amdgpu_device_ip_check_soft_reset - did soft reset succeed
3629 *
3630 * @adev: amdgpu_device pointer
3631 *
3632 * The list of all the hardware IPs that make up the asic is walked and
3633 * the check_soft_reset callbacks are run. check_soft_reset determines
3634 * if the asic is still hung or not.
3635 * Returns true if any of the IPs are still in a hung state, false if not.
3636 */
3637static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
3638{
3639 int i;
3640 bool asic_hang = false;
3641
3642 if (amdgpu_sriov_vf(adev))
3643 return true;
3644
3645 if (amdgpu_asic_need_full_reset(adev))
3646 return true;
3647
3648 for (i = 0; i < adev->num_ip_blocks; i++) {
3649 if (!adev->ip_blocks[i].status.valid)
3650 continue;
3651 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
3652 adev->ip_blocks[i].status.hang =
3653 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
3654 if (adev->ip_blocks[i].status.hang) {
3655 DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
3656 asic_hang = true;
3657 }
3658 }
3659 return asic_hang;
3660}
3661
3662/**
3663 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
3664 *
3665 * @adev: amdgpu_device pointer
3666 *
3667 * The list of all the hardware IPs that make up the asic is walked and the
3668 * pre_soft_reset callbacks are run if the block is hung. pre_soft_reset
3669 * handles any IP specific hardware or software state changes that are
3670 * necessary for a soft reset to succeed.
3671 * Returns 0 on success, negative error code on failure.
3672 */
3673static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
3674{
3675 int i, r = 0;
3676
3677 for (i = 0; i < adev->num_ip_blocks; i++) {
3678 if (!adev->ip_blocks[i].status.valid)
3679 continue;
3680 if (adev->ip_blocks[i].status.hang &&
3681 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
3682 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
3683 if (r)
3684 return r;
3685 }
3686 }
3687
3688 return 0;
3689}
3690
3691/**
3692 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
3693 *
3694 * @adev: amdgpu_device pointer
3695 *
3696 * Some hardware IPs cannot be soft reset. If they are hung, a full gpu
3697 * reset is necessary to recover.
3698 * Returns true if a full asic reset is required, false if not.
3699 */
3700static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
3701{
3702 int i;
3703
3704 if (amdgpu_asic_need_full_reset(adev))
3705 return true;
3706
3707 for (i = 0; i < adev->num_ip_blocks; i++) {
3708 if (!adev->ip_blocks[i].status.valid)
3709 continue;
3710 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
3711 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
3712 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
3713 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
3714 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
3715 if (adev->ip_blocks[i].status.hang) {
3716 DRM_INFO("Some block need full reset!\n");
3717 return true;
3718 }
3719 }
3720 }
3721 return false;
3722}
3723
3724/**
3725 * amdgpu_device_ip_soft_reset - do a soft reset
3726 *
3727 * @adev: amdgpu_device pointer
3728 *
3729 * The list of all the hardware IPs that make up the asic is walked and the
3730 * soft_reset callbacks are run if the block is hung. soft_reset handles any
3731 * IP specific hardware or software state changes that are necessary to soft
3732 * reset the IP.
3733 * Returns 0 on success, negative error code on failure.
3734 */
3735static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
3736{
3737 int i, r = 0;
3738
3739 for (i = 0; i < adev->num_ip_blocks; i++) {
3740 if (!adev->ip_blocks[i].status.valid)
3741 continue;
3742 if (adev->ip_blocks[i].status.hang &&
3743 adev->ip_blocks[i].version->funcs->soft_reset) {
3744 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
3745 if (r)
3746 return r;
3747 }
3748 }
3749
3750 return 0;
3751}
3752
3753/**
3754 * amdgpu_device_ip_post_soft_reset - clean up from soft reset
3755 *
3756 * @adev: amdgpu_device pointer
3757 *
3758 * The list of all the hardware IPs that make up the asic is walked and the
3759 * post_soft_reset callbacks are run if the asic was hung. post_soft_reset
3760 * handles any IP specific hardware or software state changes that are
3761 * necessary after the IP has been soft reset.
3762 * Returns 0 on success, negative error code on failure.
3763 */
3764static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
3765{
3766 int i, r = 0;
3767
3768 for (i = 0; i < adev->num_ip_blocks; i++) {
3769 if (!adev->ip_blocks[i].status.valid)
3770 continue;
3771 if (adev->ip_blocks[i].status.hang &&
3772 adev->ip_blocks[i].version->funcs->post_soft_reset)
3773 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
3774 if (r)
3775 return r;
3776 }
3777
3778 return 0;
3779}
3780
3781/**
3782 * amdgpu_device_recover_vram - Recover some VRAM contents
3783 *
3784 * @adev: amdgpu_device pointer
3785 *
3786 * Restores the contents of VRAM buffers from the shadows in GTT. Used to
3787 * restore things like GPUVM page tables after a GPU reset where
3788 * the contents of VRAM might be lost.
3789 *
3790 * Returns:
3791 * 0 on success, negative error code on failure.
3792 */
3793static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
3794{
3795 struct dma_fence *fence = NULL, *next = NULL;
3796 struct amdgpu_bo *shadow;
3797 long r = 1, tmo;
3798
3799 if (amdgpu_sriov_runtime(adev))
3800 tmo = msecs_to_jiffies(8000);
3801 else
3802 tmo = msecs_to_jiffies(100);
3803
3804 DRM_INFO("recover vram bo from shadow start\n");
3805 mutex_lock(&adev->shadow_list_lock);
3806 list_for_each_entry(shadow, &adev->shadow_list, shadow_list) {
3807
3808 /* No need to recover an evicted BO */
3809 if (shadow->tbo.mem.mem_type != TTM_PL_TT ||
3810 shadow->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET ||
3811 shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM)
3812 continue;
3813
3814 r = amdgpu_bo_restore_shadow(shadow, &next);
3815 if (r)
3816 break;
3817
3818 if (fence) {
3819 tmo = dma_fence_wait_timeout(fence, false, tmo);
3820 dma_fence_put(fence);
3821 fence = next;
3822 if (tmo == 0) {
3823 r = -ETIMEDOUT;
3824 break;
3825 } else if (tmo < 0) {
3826 r = tmo;
3827 break;
3828 }
3829 } else {
3830 fence = next;
3831 }
3832 }
3833 mutex_unlock(&adev->shadow_list_lock);
3834
3835 if (fence)
3836 tmo = dma_fence_wait_timeout(fence, false, tmo);
3837 dma_fence_put(fence);
3838
3839 if (r < 0 || tmo <= 0) {
3840 DRM_ERROR("recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
3841 return -EIO;
3842 }
3843
3844 DRM_INFO("recover vram bo from shadow done\n");
3845 return 0;
3846}
3847
3848
3849/**
3850 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
3851 *
3852 * @adev: amdgpu device pointer
3853 * @from_hypervisor: request from hypervisor
3854 *
3855 * do VF FLR and reinitialize Asic
3856 * return 0 means succeeded otherwise failed
3857 */
3858static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
3859 bool from_hypervisor)
3860{
3861 int r;
3862
3863 if (from_hypervisor)
3864 r = amdgpu_virt_request_full_gpu(adev, true);
3865 else
3866 r = amdgpu_virt_reset_gpu(adev);
3867 if (r)
3868 return r;
3869
3870 amdgpu_amdkfd_pre_reset(adev);
3871
3872 /* Resume IP prior to SMC */
3873 r = amdgpu_device_ip_reinit_early_sriov(adev);
3874 if (r)
3875 goto error;
3876
3877 amdgpu_virt_init_data_exchange(adev);
3878 /* we need recover gart prior to run SMC/CP/SDMA resume */
3879 amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]);
3880
3881 r = amdgpu_device_fw_loading(adev);
3882 if (r)
3883 return r;
3884
3885 /* now we are okay to resume SMC/CP/SDMA */
3886 r = amdgpu_device_ip_reinit_late_sriov(adev);
3887 if (r)
3888 goto error;
3889
3890 amdgpu_irq_gpu_reset_resume_helper(adev);
3891 r = amdgpu_ib_ring_tests(adev);
3892 amdgpu_amdkfd_post_reset(adev);
3893
3894error:
3895 amdgpu_virt_release_full_gpu(adev, true);
3896 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
3897 amdgpu_inc_vram_lost(adev);
3898 r = amdgpu_device_recover_vram(adev);
3899 }
3900
3901 return r;
3902}
3903
3904/**
3905 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
3906 *
3907 * @adev: amdgpu device pointer
3908 *
3909 * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
3910 * a hung GPU.
3911 */
3912bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
3913{
3914 if (!amdgpu_device_ip_check_soft_reset(adev)) {
3915 DRM_INFO("Timeout, but no hardware hang detected.\n");
3916 return false;
3917 }
3918
3919 if (amdgpu_gpu_recovery == 0)
3920 goto disabled;
3921
3922 if (amdgpu_sriov_vf(adev))
3923 return true;
3924
3925 if (amdgpu_gpu_recovery == -1) {
3926 switch (adev->asic_type) {
3927 case CHIP_BONAIRE:
3928 case CHIP_HAWAII:
3929 case CHIP_TOPAZ:
3930 case CHIP_TONGA:
3931 case CHIP_FIJI:
3932 case CHIP_POLARIS10:
3933 case CHIP_POLARIS11:
3934 case CHIP_POLARIS12:
3935 case CHIP_VEGAM:
3936 case CHIP_VEGA20:
3937 case CHIP_VEGA10:
3938 case CHIP_VEGA12:
3939 case CHIP_RAVEN:
3940 case CHIP_ARCTURUS:
3941 case CHIP_RENOIR:
3942 case CHIP_NAVI10:
3943 case CHIP_NAVI14:
3944 case CHIP_NAVI12:
3945 case CHIP_SIENNA_CICHLID:
3946 break;
3947 default:
3948 goto disabled;
3949 }
3950 }
3951
3952 return true;
3953
3954disabled:
3955 DRM_INFO("GPU recovery disabled.\n");
3956 return false;
3957}
3958
3959
3960static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
3961 struct amdgpu_job *job,
3962 bool *need_full_reset_arg)
3963{
3964 int i, r = 0;
3965 bool need_full_reset = *need_full_reset_arg;
3966
3967 amdgpu_debugfs_wait_dump(adev);
3968
3969 /* block all schedulers and reset given job's ring */
3970 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
3971 struct amdgpu_ring *ring = adev->rings[i];
3972
3973 if (!ring || !ring->sched.thread)
3974 continue;
3975
3976 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
3977 amdgpu_fence_driver_force_completion(ring);
3978 }
3979
3980 if(job)
3981 drm_sched_increase_karma(&job->base);
3982
3983 /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
3984 if (!amdgpu_sriov_vf(adev)) {
3985
3986 if (!need_full_reset)
3987 need_full_reset = amdgpu_device_ip_need_full_reset(adev);
3988
3989 if (!need_full_reset) {
3990 amdgpu_device_ip_pre_soft_reset(adev);
3991 r = amdgpu_device_ip_soft_reset(adev);
3992 amdgpu_device_ip_post_soft_reset(adev);
3993 if (r || amdgpu_device_ip_check_soft_reset(adev)) {
3994 DRM_INFO("soft reset failed, will fallback to full reset!\n");
3995 need_full_reset = true;
3996 }
3997 }
3998
3999 if (need_full_reset)
4000 r = amdgpu_device_ip_suspend(adev);
4001
4002 *need_full_reset_arg = need_full_reset;
4003 }
4004
4005 return r;
4006}
4007
4008static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
4009 struct list_head *device_list_handle,
4010 bool *need_full_reset_arg)
4011{
4012 struct amdgpu_device *tmp_adev = NULL;
4013 bool need_full_reset = *need_full_reset_arg, vram_lost = false;
4014 int r = 0;
4015
4016 /*
4017 * ASIC reset has to be done on all HGMI hive nodes ASAP
4018 * to allow proper links negotiation in FW (within 1 sec)
4019 */
4020 if (need_full_reset) {
4021 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4022 /* For XGMI run all resets in parallel to speed up the process */
4023 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4024 if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
4025 r = -EALREADY;
4026 } else
4027 r = amdgpu_asic_reset(tmp_adev);
4028
4029 if (r) {
4030 DRM_ERROR("ASIC reset failed with error, %d for drm dev, %s",
4031 r, tmp_adev->ddev->unique);
4032 break;
4033 }
4034 }
4035
4036 /* For XGMI wait for all resets to complete before proceed */
4037 if (!r) {
4038 list_for_each_entry(tmp_adev, device_list_handle,
4039 gmc.xgmi.head) {
4040 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4041 flush_work(&tmp_adev->xgmi_reset_work);
4042 r = tmp_adev->asic_reset_res;
4043 if (r)
4044 break;
4045 }
4046 }
4047 }
4048 }
4049
4050 if (!r && amdgpu_ras_intr_triggered()) {
4051 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4052 if (tmp_adev->mmhub.funcs &&
4053 tmp_adev->mmhub.funcs->reset_ras_error_count)
4054 tmp_adev->mmhub.funcs->reset_ras_error_count(tmp_adev);
4055 }
4056
4057 amdgpu_ras_intr_cleared();
4058 }
4059
4060 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4061 if (need_full_reset) {
4062 /* post card */
4063 if (amdgpu_atom_asic_init(tmp_adev->mode_info.atom_context))
4064 DRM_WARN("asic atom init failed!");
4065
4066 if (!r) {
4067 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
4068 r = amdgpu_device_ip_resume_phase1(tmp_adev);
4069 if (r)
4070 goto out;
4071
4072 vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
4073 if (vram_lost) {
4074 DRM_INFO("VRAM is lost due to GPU reset!\n");
4075 amdgpu_inc_vram_lost(tmp_adev);
4076 }
4077
4078 r = amdgpu_gtt_mgr_recover(
4079 &tmp_adev->mman.bdev.man[TTM_PL_TT]);
4080 if (r)
4081 goto out;
4082
4083 r = amdgpu_device_fw_loading(tmp_adev);
4084 if (r)
4085 return r;
4086
4087 r = amdgpu_device_ip_resume_phase2(tmp_adev);
4088 if (r)
4089 goto out;
4090
4091 if (vram_lost)
4092 amdgpu_device_fill_reset_magic(tmp_adev);
4093
4094 /*
4095 * Add this ASIC as tracked as reset was already
4096 * complete successfully.
4097 */
4098 amdgpu_register_gpu_instance(tmp_adev);
4099
4100 r = amdgpu_device_ip_late_init(tmp_adev);
4101 if (r)
4102 goto out;
4103
4104 amdgpu_fbdev_set_suspend(tmp_adev, 0);
4105
4106 /* must succeed. */
4107 amdgpu_ras_resume(tmp_adev);
4108
4109 /* Update PSP FW topology after reset */
4110 if (hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4111 r = amdgpu_xgmi_update_topology(hive, tmp_adev);
4112 }
4113 }
4114
4115
4116out:
4117 if (!r) {
4118 amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
4119 r = amdgpu_ib_ring_tests(tmp_adev);
4120 if (r) {
4121 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
4122 r = amdgpu_device_ip_suspend(tmp_adev);
4123 need_full_reset = true;
4124 r = -EAGAIN;
4125 goto end;
4126 }
4127 }
4128
4129 if (!r)
4130 r = amdgpu_device_recover_vram(tmp_adev);
4131 else
4132 tmp_adev->asic_reset_res = r;
4133 }
4134
4135end:
4136 *need_full_reset_arg = need_full_reset;
4137 return r;
4138}
4139
4140static bool amdgpu_device_lock_adev(struct amdgpu_device *adev, bool trylock)
4141{
4142 if (trylock) {
4143 if (!mutex_trylock(&adev->lock_reset))
4144 return false;
4145 } else
4146 mutex_lock(&adev->lock_reset);
4147
4148 atomic_inc(&adev->gpu_reset_counter);
4149 adev->in_gpu_reset = true;
4150 switch (amdgpu_asic_reset_method(adev)) {
4151 case AMD_RESET_METHOD_MODE1:
4152 adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
4153 break;
4154 case AMD_RESET_METHOD_MODE2:
4155 adev->mp1_state = PP_MP1_STATE_RESET;
4156 break;
4157 default:
4158 adev->mp1_state = PP_MP1_STATE_NONE;
4159 break;
4160 }
4161
4162 return true;
4163}
4164
4165static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
4166{
4167 amdgpu_vf_error_trans_all(adev);
4168 adev->mp1_state = PP_MP1_STATE_NONE;
4169 adev->in_gpu_reset = false;
4170 mutex_unlock(&adev->lock_reset);
4171}
4172
4173static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
4174{
4175 struct pci_dev *p = NULL;
4176
4177 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4178 adev->pdev->bus->number, 1);
4179 if (p) {
4180 pm_runtime_enable(&(p->dev));
4181 pm_runtime_resume(&(p->dev));
4182 }
4183}
4184
4185static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
4186{
4187 enum amd_reset_method reset_method;
4188 struct pci_dev *p = NULL;
4189 u64 expires;
4190
4191 /*
4192 * For now, only BACO and mode1 reset are confirmed
4193 * to suffer the audio issue without proper suspended.
4194 */
4195 reset_method = amdgpu_asic_reset_method(adev);
4196 if ((reset_method != AMD_RESET_METHOD_BACO) &&
4197 (reset_method != AMD_RESET_METHOD_MODE1))
4198 return -EINVAL;
4199
4200 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4201 adev->pdev->bus->number, 1);
4202 if (!p)
4203 return -ENODEV;
4204
4205 expires = pm_runtime_autosuspend_expiration(&(p->dev));
4206 if (!expires)
4207 /*
4208 * If we cannot get the audio device autosuspend delay,
4209 * a fixed 4S interval will be used. Considering 3S is
4210 * the audio controller default autosuspend delay setting.
4211 * 4S used here is guaranteed to cover that.
4212 */
4213 expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
4214
4215 while (!pm_runtime_status_suspended(&(p->dev))) {
4216 if (!pm_runtime_suspend(&(p->dev)))
4217 break;
4218
4219 if (expires < ktime_get_mono_fast_ns()) {
4220 dev_warn(adev->dev, "failed to suspend display audio\n");
4221 /* TODO: abort the succeeding gpu reset? */
4222 return -ETIMEDOUT;
4223 }
4224 }
4225
4226 pm_runtime_disable(&(p->dev));
4227
4228 return 0;
4229}
4230
4231/**
4232 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
4233 *
4234 * @adev: amdgpu device pointer
4235 * @job: which job trigger hang
4236 *
4237 * Attempt to reset the GPU if it has hung (all asics).
4238 * Attempt to do soft-reset or full-reset and reinitialize Asic
4239 * Returns 0 for success or an error on failure.
4240 */
4241
4242int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
4243 struct amdgpu_job *job)
4244{
4245 struct list_head device_list, *device_list_handle = NULL;
4246 bool need_full_reset = false;
4247 bool job_signaled = false;
4248 struct amdgpu_hive_info *hive = NULL;
4249 struct amdgpu_device *tmp_adev = NULL;
4250 int i, r = 0;
4251 bool need_emergency_restart = false;
4252 bool audio_suspended = false;
4253
4254 /**
4255 * Special case: RAS triggered and full reset isn't supported
4256 */
4257 need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
4258
4259 /*
4260 * Flush RAM to disk so that after reboot
4261 * the user can read log and see why the system rebooted.
4262 */
4263 if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
4264 DRM_WARN("Emergency reboot.");
4265
4266 ksys_sync_helper();
4267 emergency_restart();
4268 }
4269
4270 dev_info(adev->dev, "GPU %s begin!\n",
4271 need_emergency_restart ? "jobs stop":"reset");
4272
4273 /*
4274 * Here we trylock to avoid chain of resets executing from
4275 * either trigger by jobs on different adevs in XGMI hive or jobs on
4276 * different schedulers for same device while this TO handler is running.
4277 * We always reset all schedulers for device and all devices for XGMI
4278 * hive so that should take care of them too.
4279 */
4280 hive = amdgpu_get_xgmi_hive(adev, true);
4281 if (hive && !mutex_trylock(&hive->reset_lock)) {
4282 DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
4283 job ? job->base.id : -1, hive->hive_id);
4284 mutex_unlock(&hive->hive_lock);
4285 return 0;
4286 }
4287
4288 /*
4289 * Build list of devices to reset.
4290 * In case we are in XGMI hive mode, resort the device list
4291 * to put adev in the 1st position.
4292 */
4293 INIT_LIST_HEAD(&device_list);
4294 if (adev->gmc.xgmi.num_physical_nodes > 1) {
4295 if (!hive)
4296 return -ENODEV;
4297 if (!list_is_first(&adev->gmc.xgmi.head, &hive->device_list))
4298 list_rotate_to_front(&adev->gmc.xgmi.head, &hive->device_list);
4299 device_list_handle = &hive->device_list;
4300 } else {
4301 list_add_tail(&adev->gmc.xgmi.head, &device_list);
4302 device_list_handle = &device_list;
4303 }
4304
4305 /* block all schedulers and reset given job's ring */
4306 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4307 if (!amdgpu_device_lock_adev(tmp_adev, !hive)) {
4308 DRM_INFO("Bailing on TDR for s_job:%llx, as another already in progress",
4309 job ? job->base.id : -1);
4310 mutex_unlock(&hive->hive_lock);
4311 return 0;
4312 }
4313
4314 /*
4315 * Try to put the audio codec into suspend state
4316 * before gpu reset started.
4317 *
4318 * Due to the power domain of the graphics device
4319 * is shared with AZ power domain. Without this,
4320 * we may change the audio hardware from behind
4321 * the audio driver's back. That will trigger
4322 * some audio codec errors.
4323 */
4324 if (!amdgpu_device_suspend_display_audio(tmp_adev))
4325 audio_suspended = true;
4326
4327 amdgpu_ras_set_error_query_ready(tmp_adev, false);
4328
4329 cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
4330
4331 if (!amdgpu_sriov_vf(tmp_adev))
4332 amdgpu_amdkfd_pre_reset(tmp_adev);
4333
4334 /*
4335 * Mark these ASICs to be reseted as untracked first
4336 * And add them back after reset completed
4337 */
4338 amdgpu_unregister_gpu_instance(tmp_adev);
4339
4340 amdgpu_fbdev_set_suspend(tmp_adev, 1);
4341
4342 /* disable ras on ALL IPs */
4343 if (!need_emergency_restart &&
4344 amdgpu_device_ip_need_full_reset(tmp_adev))
4345 amdgpu_ras_suspend(tmp_adev);
4346
4347 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4348 struct amdgpu_ring *ring = tmp_adev->rings[i];
4349
4350 if (!ring || !ring->sched.thread)
4351 continue;
4352
4353 drm_sched_stop(&ring->sched, job ? &job->base : NULL);
4354
4355 if (need_emergency_restart)
4356 amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
4357 }
4358 }
4359
4360 if (need_emergency_restart)
4361 goto skip_sched_resume;
4362
4363 /*
4364 * Must check guilty signal here since after this point all old
4365 * HW fences are force signaled.
4366 *
4367 * job->base holds a reference to parent fence
4368 */
4369 if (job && job->base.s_fence->parent &&
4370 dma_fence_is_signaled(job->base.s_fence->parent)) {
4371 job_signaled = true;
4372 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
4373 goto skip_hw_reset;
4374 }
4375
4376retry: /* Rest of adevs pre asic reset from XGMI hive. */
4377 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4378 r = amdgpu_device_pre_asic_reset(tmp_adev,
4379 NULL,
4380 &need_full_reset);
4381 /*TODO Should we stop ?*/
4382 if (r) {
4383 DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
4384 r, tmp_adev->ddev->unique);
4385 tmp_adev->asic_reset_res = r;
4386 }
4387 }
4388
4389 /* Actual ASIC resets if needed.*/
4390 /* TODO Implement XGMI hive reset logic for SRIOV */
4391 if (amdgpu_sriov_vf(adev)) {
4392 r = amdgpu_device_reset_sriov(adev, job ? false : true);
4393 if (r)
4394 adev->asic_reset_res = r;
4395 } else {
4396 r = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset);
4397 if (r && r == -EAGAIN)
4398 goto retry;
4399 }
4400
4401skip_hw_reset:
4402
4403 /* Post ASIC reset for all devs .*/
4404 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4405
4406 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4407 struct amdgpu_ring *ring = tmp_adev->rings[i];
4408
4409 if (!ring || !ring->sched.thread)
4410 continue;
4411
4412 /* No point to resubmit jobs if we didn't HW reset*/
4413 if (!tmp_adev->asic_reset_res && !job_signaled)
4414 drm_sched_resubmit_jobs(&ring->sched);
4415
4416 drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
4417 }
4418
4419 if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) {
4420 drm_helper_resume_force_mode(tmp_adev->ddev);
4421 }
4422
4423 tmp_adev->asic_reset_res = 0;
4424
4425 if (r) {
4426 /* bad news, how to tell it to userspace ? */
4427 dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
4428 amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
4429 } else {
4430 dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
4431 }
4432 }
4433
4434skip_sched_resume:
4435 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4436 /*unlock kfd: SRIOV would do it separately */
4437 if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
4438 amdgpu_amdkfd_post_reset(tmp_adev);
4439 if (audio_suspended)
4440 amdgpu_device_resume_display_audio(tmp_adev);
4441 amdgpu_device_unlock_adev(tmp_adev);
4442 }
4443
4444 if (hive) {
4445 mutex_unlock(&hive->reset_lock);
4446 mutex_unlock(&hive->hive_lock);
4447 }
4448
4449 if (r)
4450 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
4451 return r;
4452}
4453
4454/**
4455 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
4456 *
4457 * @adev: amdgpu_device pointer
4458 *
4459 * Fetchs and stores in the driver the PCIE capabilities (gen speed
4460 * and lanes) of the slot the device is in. Handles APUs and
4461 * virtualized environments where PCIE config space may not be available.
4462 */
4463static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
4464{
4465 struct pci_dev *pdev;
4466 enum pci_bus_speed speed_cap, platform_speed_cap;
4467 enum pcie_link_width platform_link_width;
4468
4469 if (amdgpu_pcie_gen_cap)
4470 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
4471
4472 if (amdgpu_pcie_lane_cap)
4473 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
4474
4475 /* covers APUs as well */
4476 if (pci_is_root_bus(adev->pdev->bus)) {
4477 if (adev->pm.pcie_gen_mask == 0)
4478 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
4479 if (adev->pm.pcie_mlw_mask == 0)
4480 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
4481 return;
4482 }
4483
4484 if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
4485 return;
4486
4487 pcie_bandwidth_available(adev->pdev, NULL,
4488 &platform_speed_cap, &platform_link_width);
4489
4490 if (adev->pm.pcie_gen_mask == 0) {
4491 /* asic caps */
4492 pdev = adev->pdev;
4493 speed_cap = pcie_get_speed_cap(pdev);
4494 if (speed_cap == PCI_SPEED_UNKNOWN) {
4495 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4496 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4497 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
4498 } else {
4499 if (speed_cap == PCIE_SPEED_16_0GT)
4500 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4501 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4502 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
4503 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
4504 else if (speed_cap == PCIE_SPEED_8_0GT)
4505 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4506 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4507 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
4508 else if (speed_cap == PCIE_SPEED_5_0GT)
4509 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4510 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
4511 else
4512 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
4513 }
4514 /* platform caps */
4515 if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
4516 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4517 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
4518 } else {
4519 if (platform_speed_cap == PCIE_SPEED_16_0GT)
4520 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4521 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4522 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
4523 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
4524 else if (platform_speed_cap == PCIE_SPEED_8_0GT)
4525 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4526 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4527 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
4528 else if (platform_speed_cap == PCIE_SPEED_5_0GT)
4529 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4530 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
4531 else
4532 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
4533
4534 }
4535 }
4536 if (adev->pm.pcie_mlw_mask == 0) {
4537 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
4538 adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
4539 } else {
4540 switch (platform_link_width) {
4541 case PCIE_LNK_X32:
4542 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
4543 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
4544 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
4545 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4546 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4547 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4548 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4549 break;
4550 case PCIE_LNK_X16:
4551 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
4552 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
4553 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4554 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4555 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4556 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4557 break;
4558 case PCIE_LNK_X12:
4559 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
4560 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4561 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4562 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4563 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4564 break;
4565 case PCIE_LNK_X8:
4566 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4567 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4568 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4569 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4570 break;
4571 case PCIE_LNK_X4:
4572 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4573 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4574 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4575 break;
4576 case PCIE_LNK_X2:
4577 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4578 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4579 break;
4580 case PCIE_LNK_X1:
4581 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
4582 break;
4583 default:
4584 break;
4585 }
4586 }
4587 }
4588}
4589
4590int amdgpu_device_baco_enter(struct drm_device *dev)
4591{
4592 struct amdgpu_device *adev = dev->dev_private;
4593 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
4594
4595 if (!amdgpu_device_supports_baco(adev->ddev))
4596 return -ENOTSUPP;
4597
4598 if (ras && ras->supported)
4599 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
4600
4601 return amdgpu_dpm_baco_enter(adev);
4602}
4603
4604int amdgpu_device_baco_exit(struct drm_device *dev)
4605{
4606 struct amdgpu_device *adev = dev->dev_private;
4607 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
4608 int ret = 0;
4609
4610 if (!amdgpu_device_supports_baco(adev->ddev))
4611 return -ENOTSUPP;
4612
4613 ret = amdgpu_dpm_baco_exit(adev);
4614 if (ret)
4615 return ret;
4616
4617 if (ras && ras->supported)
4618 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
4619
4620 return 0;
4621}
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/power_supply.h>
29#include <linux/kthread.h>
30#include <linux/module.h>
31#include <linux/console.h>
32#include <linux/slab.h>
33#include <linux/iommu.h>
34#include <linux/pci.h>
35#include <linux/devcoredump.h>
36#include <generated/utsrelease.h>
37#include <linux/pci-p2pdma.h>
38
39#include <drm/drm_aperture.h>
40#include <drm/drm_atomic_helper.h>
41#include <drm/drm_fb_helper.h>
42#include <drm/drm_probe_helper.h>
43#include <drm/amdgpu_drm.h>
44#include <linux/vgaarb.h>
45#include <linux/vga_switcheroo.h>
46#include <linux/efi.h>
47#include "amdgpu.h"
48#include "amdgpu_trace.h"
49#include "amdgpu_i2c.h"
50#include "atom.h"
51#include "amdgpu_atombios.h"
52#include "amdgpu_atomfirmware.h"
53#include "amd_pcie.h"
54#ifdef CONFIG_DRM_AMDGPU_SI
55#include "si.h"
56#endif
57#ifdef CONFIG_DRM_AMDGPU_CIK
58#include "cik.h"
59#endif
60#include "vi.h"
61#include "soc15.h"
62#include "nv.h"
63#include "bif/bif_4_1_d.h"
64#include <linux/firmware.h>
65#include "amdgpu_vf_error.h"
66
67#include "amdgpu_amdkfd.h"
68#include "amdgpu_pm.h"
69
70#include "amdgpu_xgmi.h"
71#include "amdgpu_ras.h"
72#include "amdgpu_pmu.h"
73#include "amdgpu_fru_eeprom.h"
74#include "amdgpu_reset.h"
75
76#include <linux/suspend.h>
77#include <drm/task_barrier.h>
78#include <linux/pm_runtime.h>
79
80#include <drm/drm_drv.h>
81
82MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
83MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
84MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
85MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
86MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
87MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
88MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
89
90#define AMDGPU_RESUME_MS 2000
91#define AMDGPU_MAX_RETRY_LIMIT 2
92#define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
93
94static const struct drm_driver amdgpu_kms_driver;
95
96const char *amdgpu_asic_name[] = {
97 "TAHITI",
98 "PITCAIRN",
99 "VERDE",
100 "OLAND",
101 "HAINAN",
102 "BONAIRE",
103 "KAVERI",
104 "KABINI",
105 "HAWAII",
106 "MULLINS",
107 "TOPAZ",
108 "TONGA",
109 "FIJI",
110 "CARRIZO",
111 "STONEY",
112 "POLARIS10",
113 "POLARIS11",
114 "POLARIS12",
115 "VEGAM",
116 "VEGA10",
117 "VEGA12",
118 "VEGA20",
119 "RAVEN",
120 "ARCTURUS",
121 "RENOIR",
122 "ALDEBARAN",
123 "NAVI10",
124 "CYAN_SKILLFISH",
125 "NAVI14",
126 "NAVI12",
127 "SIENNA_CICHLID",
128 "NAVY_FLOUNDER",
129 "VANGOGH",
130 "DIMGREY_CAVEFISH",
131 "BEIGE_GOBY",
132 "YELLOW_CARP",
133 "IP DISCOVERY",
134 "LAST",
135};
136
137/**
138 * DOC: pcie_replay_count
139 *
140 * The amdgpu driver provides a sysfs API for reporting the total number
141 * of PCIe replays (NAKs)
142 * The file pcie_replay_count is used for this and returns the total
143 * number of replays as a sum of the NAKs generated and NAKs received
144 */
145
146static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
147 struct device_attribute *attr, char *buf)
148{
149 struct drm_device *ddev = dev_get_drvdata(dev);
150 struct amdgpu_device *adev = drm_to_adev(ddev);
151 uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
152
153 return sysfs_emit(buf, "%llu\n", cnt);
154}
155
156static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
157 amdgpu_device_get_pcie_replay_count, NULL);
158
159static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
160
161/**
162 * DOC: product_name
163 *
164 * The amdgpu driver provides a sysfs API for reporting the product name
165 * for the device
166 * The file serial_number is used for this and returns the product name
167 * as returned from the FRU.
168 * NOTE: This is only available for certain server cards
169 */
170
171static ssize_t amdgpu_device_get_product_name(struct device *dev,
172 struct device_attribute *attr, char *buf)
173{
174 struct drm_device *ddev = dev_get_drvdata(dev);
175 struct amdgpu_device *adev = drm_to_adev(ddev);
176
177 return sysfs_emit(buf, "%s\n", adev->product_name);
178}
179
180static DEVICE_ATTR(product_name, S_IRUGO,
181 amdgpu_device_get_product_name, NULL);
182
183/**
184 * DOC: product_number
185 *
186 * The amdgpu driver provides a sysfs API for reporting the part number
187 * for the device
188 * The file serial_number is used for this and returns the part number
189 * as returned from the FRU.
190 * NOTE: This is only available for certain server cards
191 */
192
193static ssize_t amdgpu_device_get_product_number(struct device *dev,
194 struct device_attribute *attr, char *buf)
195{
196 struct drm_device *ddev = dev_get_drvdata(dev);
197 struct amdgpu_device *adev = drm_to_adev(ddev);
198
199 return sysfs_emit(buf, "%s\n", adev->product_number);
200}
201
202static DEVICE_ATTR(product_number, S_IRUGO,
203 amdgpu_device_get_product_number, NULL);
204
205/**
206 * DOC: serial_number
207 *
208 * The amdgpu driver provides a sysfs API for reporting the serial number
209 * for the device
210 * The file serial_number is used for this and returns the serial number
211 * as returned from the FRU.
212 * NOTE: This is only available for certain server cards
213 */
214
215static ssize_t amdgpu_device_get_serial_number(struct device *dev,
216 struct device_attribute *attr, char *buf)
217{
218 struct drm_device *ddev = dev_get_drvdata(dev);
219 struct amdgpu_device *adev = drm_to_adev(ddev);
220
221 return sysfs_emit(buf, "%s\n", adev->serial);
222}
223
224static DEVICE_ATTR(serial_number, S_IRUGO,
225 amdgpu_device_get_serial_number, NULL);
226
227/**
228 * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
229 *
230 * @dev: drm_device pointer
231 *
232 * Returns true if the device is a dGPU with ATPX power control,
233 * otherwise return false.
234 */
235bool amdgpu_device_supports_px(struct drm_device *dev)
236{
237 struct amdgpu_device *adev = drm_to_adev(dev);
238
239 if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
240 return true;
241 return false;
242}
243
244/**
245 * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
246 *
247 * @dev: drm_device pointer
248 *
249 * Returns true if the device is a dGPU with ACPI power control,
250 * otherwise return false.
251 */
252bool amdgpu_device_supports_boco(struct drm_device *dev)
253{
254 struct amdgpu_device *adev = drm_to_adev(dev);
255
256 if (adev->has_pr3 ||
257 ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
258 return true;
259 return false;
260}
261
262/**
263 * amdgpu_device_supports_baco - Does the device support BACO
264 *
265 * @dev: drm_device pointer
266 *
267 * Returns true if the device supporte BACO,
268 * otherwise return false.
269 */
270bool amdgpu_device_supports_baco(struct drm_device *dev)
271{
272 struct amdgpu_device *adev = drm_to_adev(dev);
273
274 return amdgpu_asic_supports_baco(adev);
275}
276
277/**
278 * amdgpu_device_supports_smart_shift - Is the device dGPU with
279 * smart shift support
280 *
281 * @dev: drm_device pointer
282 *
283 * Returns true if the device is a dGPU with Smart Shift support,
284 * otherwise returns false.
285 */
286bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
287{
288 return (amdgpu_device_supports_boco(dev) &&
289 amdgpu_acpi_is_power_shift_control_supported());
290}
291
292/*
293 * VRAM access helper functions
294 */
295
296/**
297 * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
298 *
299 * @adev: amdgpu_device pointer
300 * @pos: offset of the buffer in vram
301 * @buf: virtual address of the buffer in system memory
302 * @size: read/write size, sizeof(@buf) must > @size
303 * @write: true - write to vram, otherwise - read from vram
304 */
305void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
306 void *buf, size_t size, bool write)
307{
308 unsigned long flags;
309 uint32_t hi = ~0, tmp = 0;
310 uint32_t *data = buf;
311 uint64_t last;
312 int idx;
313
314 if (!drm_dev_enter(adev_to_drm(adev), &idx))
315 return;
316
317 BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
318
319 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
320 for (last = pos + size; pos < last; pos += 4) {
321 tmp = pos >> 31;
322
323 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
324 if (tmp != hi) {
325 WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
326 hi = tmp;
327 }
328 if (write)
329 WREG32_NO_KIQ(mmMM_DATA, *data++);
330 else
331 *data++ = RREG32_NO_KIQ(mmMM_DATA);
332 }
333
334 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
335 drm_dev_exit(idx);
336}
337
338/**
339 * amdgpu_device_aper_access - access vram by vram aperature
340 *
341 * @adev: amdgpu_device pointer
342 * @pos: offset of the buffer in vram
343 * @buf: virtual address of the buffer in system memory
344 * @size: read/write size, sizeof(@buf) must > @size
345 * @write: true - write to vram, otherwise - read from vram
346 *
347 * The return value means how many bytes have been transferred.
348 */
349size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
350 void *buf, size_t size, bool write)
351{
352#ifdef CONFIG_64BIT
353 void __iomem *addr;
354 size_t count = 0;
355 uint64_t last;
356
357 if (!adev->mman.aper_base_kaddr)
358 return 0;
359
360 last = min(pos + size, adev->gmc.visible_vram_size);
361 if (last > pos) {
362 addr = adev->mman.aper_base_kaddr + pos;
363 count = last - pos;
364
365 if (write) {
366 memcpy_toio(addr, buf, count);
367 mb();
368 amdgpu_device_flush_hdp(adev, NULL);
369 } else {
370 amdgpu_device_invalidate_hdp(adev, NULL);
371 mb();
372 memcpy_fromio(buf, addr, count);
373 }
374
375 }
376
377 return count;
378#else
379 return 0;
380#endif
381}
382
383/**
384 * amdgpu_device_vram_access - read/write a buffer in vram
385 *
386 * @adev: amdgpu_device pointer
387 * @pos: offset of the buffer in vram
388 * @buf: virtual address of the buffer in system memory
389 * @size: read/write size, sizeof(@buf) must > @size
390 * @write: true - write to vram, otherwise - read from vram
391 */
392void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
393 void *buf, size_t size, bool write)
394{
395 size_t count;
396
397 /* try to using vram apreature to access vram first */
398 count = amdgpu_device_aper_access(adev, pos, buf, size, write);
399 size -= count;
400 if (size) {
401 /* using MM to access rest vram */
402 pos += count;
403 buf += count;
404 amdgpu_device_mm_access(adev, pos, buf, size, write);
405 }
406}
407
408/*
409 * register access helper functions.
410 */
411
412/* Check if hw access should be skipped because of hotplug or device error */
413bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
414{
415 if (adev->no_hw_access)
416 return true;
417
418#ifdef CONFIG_LOCKDEP
419 /*
420 * This is a bit complicated to understand, so worth a comment. What we assert
421 * here is that the GPU reset is not running on another thread in parallel.
422 *
423 * For this we trylock the read side of the reset semaphore, if that succeeds
424 * we know that the reset is not running in paralell.
425 *
426 * If the trylock fails we assert that we are either already holding the read
427 * side of the lock or are the reset thread itself and hold the write side of
428 * the lock.
429 */
430 if (in_task()) {
431 if (down_read_trylock(&adev->reset_domain->sem))
432 up_read(&adev->reset_domain->sem);
433 else
434 lockdep_assert_held(&adev->reset_domain->sem);
435 }
436#endif
437 return false;
438}
439
440/**
441 * amdgpu_device_rreg - read a memory mapped IO or indirect register
442 *
443 * @adev: amdgpu_device pointer
444 * @reg: dword aligned register offset
445 * @acc_flags: access flags which require special behavior
446 *
447 * Returns the 32 bit value from the offset specified.
448 */
449uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
450 uint32_t reg, uint32_t acc_flags)
451{
452 uint32_t ret;
453
454 if (amdgpu_device_skip_hw_access(adev))
455 return 0;
456
457 if ((reg * 4) < adev->rmmio_size) {
458 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
459 amdgpu_sriov_runtime(adev) &&
460 down_read_trylock(&adev->reset_domain->sem)) {
461 ret = amdgpu_kiq_rreg(adev, reg);
462 up_read(&adev->reset_domain->sem);
463 } else {
464 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
465 }
466 } else {
467 ret = adev->pcie_rreg(adev, reg * 4);
468 }
469
470 trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
471
472 return ret;
473}
474
475/*
476 * MMIO register read with bytes helper functions
477 * @offset:bytes offset from MMIO start
478 *
479*/
480
481/**
482 * amdgpu_mm_rreg8 - read a memory mapped IO register
483 *
484 * @adev: amdgpu_device pointer
485 * @offset: byte aligned register offset
486 *
487 * Returns the 8 bit value from the offset specified.
488 */
489uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
490{
491 if (amdgpu_device_skip_hw_access(adev))
492 return 0;
493
494 if (offset < adev->rmmio_size)
495 return (readb(adev->rmmio + offset));
496 BUG();
497}
498
499/*
500 * MMIO register write with bytes helper functions
501 * @offset:bytes offset from MMIO start
502 * @value: the value want to be written to the register
503 *
504*/
505/**
506 * amdgpu_mm_wreg8 - read a memory mapped IO register
507 *
508 * @adev: amdgpu_device pointer
509 * @offset: byte aligned register offset
510 * @value: 8 bit value to write
511 *
512 * Writes the value specified to the offset specified.
513 */
514void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
515{
516 if (amdgpu_device_skip_hw_access(adev))
517 return;
518
519 if (offset < adev->rmmio_size)
520 writeb(value, adev->rmmio + offset);
521 else
522 BUG();
523}
524
525/**
526 * amdgpu_device_wreg - write to a memory mapped IO or indirect register
527 *
528 * @adev: amdgpu_device pointer
529 * @reg: dword aligned register offset
530 * @v: 32 bit value to write to the register
531 * @acc_flags: access flags which require special behavior
532 *
533 * Writes the value specified to the offset specified.
534 */
535void amdgpu_device_wreg(struct amdgpu_device *adev,
536 uint32_t reg, uint32_t v,
537 uint32_t acc_flags)
538{
539 if (amdgpu_device_skip_hw_access(adev))
540 return;
541
542 if ((reg * 4) < adev->rmmio_size) {
543 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
544 amdgpu_sriov_runtime(adev) &&
545 down_read_trylock(&adev->reset_domain->sem)) {
546 amdgpu_kiq_wreg(adev, reg, v);
547 up_read(&adev->reset_domain->sem);
548 } else {
549 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
550 }
551 } else {
552 adev->pcie_wreg(adev, reg * 4, v);
553 }
554
555 trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
556}
557
558/**
559 * amdgpu_mm_wreg_mmio_rlc - write register either with direct/indirect mmio or with RLC path if in range
560 *
561 * @adev: amdgpu_device pointer
562 * @reg: mmio/rlc register
563 * @v: value to write
564 *
565 * this function is invoked only for the debugfs register access
566 */
567void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
568 uint32_t reg, uint32_t v)
569{
570 if (amdgpu_device_skip_hw_access(adev))
571 return;
572
573 if (amdgpu_sriov_fullaccess(adev) &&
574 adev->gfx.rlc.funcs &&
575 adev->gfx.rlc.funcs->is_rlcg_access_range) {
576 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
577 return amdgpu_sriov_wreg(adev, reg, v, 0, 0);
578 } else if ((reg * 4) >= adev->rmmio_size) {
579 adev->pcie_wreg(adev, reg * 4, v);
580 } else {
581 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
582 }
583}
584
585/**
586 * amdgpu_mm_rdoorbell - read a doorbell dword
587 *
588 * @adev: amdgpu_device pointer
589 * @index: doorbell index
590 *
591 * Returns the value in the doorbell aperture at the
592 * requested doorbell index (CIK).
593 */
594u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
595{
596 if (amdgpu_device_skip_hw_access(adev))
597 return 0;
598
599 if (index < adev->doorbell.num_doorbells) {
600 return readl(adev->doorbell.ptr + index);
601 } else {
602 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
603 return 0;
604 }
605}
606
607/**
608 * amdgpu_mm_wdoorbell - write a doorbell dword
609 *
610 * @adev: amdgpu_device pointer
611 * @index: doorbell index
612 * @v: value to write
613 *
614 * Writes @v to the doorbell aperture at the
615 * requested doorbell index (CIK).
616 */
617void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
618{
619 if (amdgpu_device_skip_hw_access(adev))
620 return;
621
622 if (index < adev->doorbell.num_doorbells) {
623 writel(v, adev->doorbell.ptr + index);
624 } else {
625 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
626 }
627}
628
629/**
630 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
631 *
632 * @adev: amdgpu_device pointer
633 * @index: doorbell index
634 *
635 * Returns the value in the doorbell aperture at the
636 * requested doorbell index (VEGA10+).
637 */
638u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
639{
640 if (amdgpu_device_skip_hw_access(adev))
641 return 0;
642
643 if (index < adev->doorbell.num_doorbells) {
644 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
645 } else {
646 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
647 return 0;
648 }
649}
650
651/**
652 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
653 *
654 * @adev: amdgpu_device pointer
655 * @index: doorbell index
656 * @v: value to write
657 *
658 * Writes @v to the doorbell aperture at the
659 * requested doorbell index (VEGA10+).
660 */
661void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
662{
663 if (amdgpu_device_skip_hw_access(adev))
664 return;
665
666 if (index < adev->doorbell.num_doorbells) {
667 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
668 } else {
669 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
670 }
671}
672
673/**
674 * amdgpu_device_indirect_rreg - read an indirect register
675 *
676 * @adev: amdgpu_device pointer
677 * @pcie_index: mmio register offset
678 * @pcie_data: mmio register offset
679 * @reg_addr: indirect register address to read from
680 *
681 * Returns the value of indirect register @reg_addr
682 */
683u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
684 u32 pcie_index, u32 pcie_data,
685 u32 reg_addr)
686{
687 unsigned long flags;
688 u32 r;
689 void __iomem *pcie_index_offset;
690 void __iomem *pcie_data_offset;
691
692 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
693 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
694 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
695
696 writel(reg_addr, pcie_index_offset);
697 readl(pcie_index_offset);
698 r = readl(pcie_data_offset);
699 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
700
701 return r;
702}
703
704/**
705 * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
706 *
707 * @adev: amdgpu_device pointer
708 * @pcie_index: mmio register offset
709 * @pcie_data: mmio register offset
710 * @reg_addr: indirect register address to read from
711 *
712 * Returns the value of indirect register @reg_addr
713 */
714u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
715 u32 pcie_index, u32 pcie_data,
716 u32 reg_addr)
717{
718 unsigned long flags;
719 u64 r;
720 void __iomem *pcie_index_offset;
721 void __iomem *pcie_data_offset;
722
723 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
724 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
725 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
726
727 /* read low 32 bits */
728 writel(reg_addr, pcie_index_offset);
729 readl(pcie_index_offset);
730 r = readl(pcie_data_offset);
731 /* read high 32 bits */
732 writel(reg_addr + 4, pcie_index_offset);
733 readl(pcie_index_offset);
734 r |= ((u64)readl(pcie_data_offset) << 32);
735 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
736
737 return r;
738}
739
740/**
741 * amdgpu_device_indirect_wreg - write an indirect register address
742 *
743 * @adev: amdgpu_device pointer
744 * @pcie_index: mmio register offset
745 * @pcie_data: mmio register offset
746 * @reg_addr: indirect register offset
747 * @reg_data: indirect register data
748 *
749 */
750void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
751 u32 pcie_index, u32 pcie_data,
752 u32 reg_addr, u32 reg_data)
753{
754 unsigned long flags;
755 void __iomem *pcie_index_offset;
756 void __iomem *pcie_data_offset;
757
758 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
759 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
760 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
761
762 writel(reg_addr, pcie_index_offset);
763 readl(pcie_index_offset);
764 writel(reg_data, pcie_data_offset);
765 readl(pcie_data_offset);
766 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
767}
768
769/**
770 * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
771 *
772 * @adev: amdgpu_device pointer
773 * @pcie_index: mmio register offset
774 * @pcie_data: mmio register offset
775 * @reg_addr: indirect register offset
776 * @reg_data: indirect register data
777 *
778 */
779void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
780 u32 pcie_index, u32 pcie_data,
781 u32 reg_addr, u64 reg_data)
782{
783 unsigned long flags;
784 void __iomem *pcie_index_offset;
785 void __iomem *pcie_data_offset;
786
787 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
788 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
789 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
790
791 /* write low 32 bits */
792 writel(reg_addr, pcie_index_offset);
793 readl(pcie_index_offset);
794 writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
795 readl(pcie_data_offset);
796 /* write high 32 bits */
797 writel(reg_addr + 4, pcie_index_offset);
798 readl(pcie_index_offset);
799 writel((u32)(reg_data >> 32), pcie_data_offset);
800 readl(pcie_data_offset);
801 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
802}
803
804/**
805 * amdgpu_invalid_rreg - dummy reg read function
806 *
807 * @adev: amdgpu_device pointer
808 * @reg: offset of register
809 *
810 * Dummy register read function. Used for register blocks
811 * that certain asics don't have (all asics).
812 * Returns the value in the register.
813 */
814static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
815{
816 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
817 BUG();
818 return 0;
819}
820
821/**
822 * amdgpu_invalid_wreg - dummy reg write function
823 *
824 * @adev: amdgpu_device pointer
825 * @reg: offset of register
826 * @v: value to write to the register
827 *
828 * Dummy register read function. Used for register blocks
829 * that certain asics don't have (all asics).
830 */
831static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
832{
833 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
834 reg, v);
835 BUG();
836}
837
838/**
839 * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
840 *
841 * @adev: amdgpu_device pointer
842 * @reg: offset of register
843 *
844 * Dummy register read function. Used for register blocks
845 * that certain asics don't have (all asics).
846 * Returns the value in the register.
847 */
848static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
849{
850 DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
851 BUG();
852 return 0;
853}
854
855/**
856 * amdgpu_invalid_wreg64 - dummy reg write function
857 *
858 * @adev: amdgpu_device pointer
859 * @reg: offset of register
860 * @v: value to write to the register
861 *
862 * Dummy register read function. Used for register blocks
863 * that certain asics don't have (all asics).
864 */
865static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
866{
867 DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
868 reg, v);
869 BUG();
870}
871
872/**
873 * amdgpu_block_invalid_rreg - dummy reg read function
874 *
875 * @adev: amdgpu_device pointer
876 * @block: offset of instance
877 * @reg: offset of register
878 *
879 * Dummy register read function. Used for register blocks
880 * that certain asics don't have (all asics).
881 * Returns the value in the register.
882 */
883static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
884 uint32_t block, uint32_t reg)
885{
886 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
887 reg, block);
888 BUG();
889 return 0;
890}
891
892/**
893 * amdgpu_block_invalid_wreg - dummy reg write function
894 *
895 * @adev: amdgpu_device pointer
896 * @block: offset of instance
897 * @reg: offset of register
898 * @v: value to write to the register
899 *
900 * Dummy register read function. Used for register blocks
901 * that certain asics don't have (all asics).
902 */
903static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
904 uint32_t block,
905 uint32_t reg, uint32_t v)
906{
907 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
908 reg, block, v);
909 BUG();
910}
911
912/**
913 * amdgpu_device_asic_init - Wrapper for atom asic_init
914 *
915 * @adev: amdgpu_device pointer
916 *
917 * Does any asic specific work and then calls atom asic init.
918 */
919static int amdgpu_device_asic_init(struct amdgpu_device *adev)
920{
921 amdgpu_asic_pre_asic_init(adev);
922
923 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0))
924 return amdgpu_atomfirmware_asic_init(adev, true);
925 else
926 return amdgpu_atom_asic_init(adev->mode_info.atom_context);
927}
928
929/**
930 * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
931 *
932 * @adev: amdgpu_device pointer
933 *
934 * Allocates a scratch page of VRAM for use by various things in the
935 * driver.
936 */
937static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
938{
939 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
940 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
941 &adev->vram_scratch.robj,
942 &adev->vram_scratch.gpu_addr,
943 (void **)&adev->vram_scratch.ptr);
944}
945
946/**
947 * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
948 *
949 * @adev: amdgpu_device pointer
950 *
951 * Frees the VRAM scratch page.
952 */
953static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
954{
955 amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
956}
957
958/**
959 * amdgpu_device_program_register_sequence - program an array of registers.
960 *
961 * @adev: amdgpu_device pointer
962 * @registers: pointer to the register array
963 * @array_size: size of the register array
964 *
965 * Programs an array or registers with and and or masks.
966 * This is a helper for setting golden registers.
967 */
968void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
969 const u32 *registers,
970 const u32 array_size)
971{
972 u32 tmp, reg, and_mask, or_mask;
973 int i;
974
975 if (array_size % 3)
976 return;
977
978 for (i = 0; i < array_size; i +=3) {
979 reg = registers[i + 0];
980 and_mask = registers[i + 1];
981 or_mask = registers[i + 2];
982
983 if (and_mask == 0xffffffff) {
984 tmp = or_mask;
985 } else {
986 tmp = RREG32(reg);
987 tmp &= ~and_mask;
988 if (adev->family >= AMDGPU_FAMILY_AI)
989 tmp |= (or_mask & and_mask);
990 else
991 tmp |= or_mask;
992 }
993 WREG32(reg, tmp);
994 }
995}
996
997/**
998 * amdgpu_device_pci_config_reset - reset the GPU
999 *
1000 * @adev: amdgpu_device pointer
1001 *
1002 * Resets the GPU using the pci config reset sequence.
1003 * Only applicable to asics prior to vega10.
1004 */
1005void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
1006{
1007 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
1008}
1009
1010/**
1011 * amdgpu_device_pci_reset - reset the GPU using generic PCI means
1012 *
1013 * @adev: amdgpu_device pointer
1014 *
1015 * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
1016 */
1017int amdgpu_device_pci_reset(struct amdgpu_device *adev)
1018{
1019 return pci_reset_function(adev->pdev);
1020}
1021
1022/*
1023 * GPU doorbell aperture helpers function.
1024 */
1025/**
1026 * amdgpu_device_doorbell_init - Init doorbell driver information.
1027 *
1028 * @adev: amdgpu_device pointer
1029 *
1030 * Init doorbell driver information (CIK)
1031 * Returns 0 on success, error on failure.
1032 */
1033static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
1034{
1035
1036 /* No doorbell on SI hardware generation */
1037 if (adev->asic_type < CHIP_BONAIRE) {
1038 adev->doorbell.base = 0;
1039 adev->doorbell.size = 0;
1040 adev->doorbell.num_doorbells = 0;
1041 adev->doorbell.ptr = NULL;
1042 return 0;
1043 }
1044
1045 if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
1046 return -EINVAL;
1047
1048 amdgpu_asic_init_doorbell_index(adev);
1049
1050 /* doorbell bar mapping */
1051 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
1052 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
1053
1054 if (adev->enable_mes) {
1055 adev->doorbell.num_doorbells =
1056 adev->doorbell.size / sizeof(u32);
1057 } else {
1058 adev->doorbell.num_doorbells =
1059 min_t(u32, adev->doorbell.size / sizeof(u32),
1060 adev->doorbell_index.max_assignment+1);
1061 if (adev->doorbell.num_doorbells == 0)
1062 return -EINVAL;
1063
1064 /* For Vega, reserve and map two pages on doorbell BAR since SDMA
1065 * paging queue doorbell use the second page. The
1066 * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
1067 * doorbells are in the first page. So with paging queue enabled,
1068 * the max num_doorbells should + 1 page (0x400 in dword)
1069 */
1070 if (adev->asic_type >= CHIP_VEGA10)
1071 adev->doorbell.num_doorbells += 0x400;
1072 }
1073
1074 adev->doorbell.ptr = ioremap(adev->doorbell.base,
1075 adev->doorbell.num_doorbells *
1076 sizeof(u32));
1077 if (adev->doorbell.ptr == NULL)
1078 return -ENOMEM;
1079
1080 return 0;
1081}
1082
1083/**
1084 * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
1085 *
1086 * @adev: amdgpu_device pointer
1087 *
1088 * Tear down doorbell driver information (CIK)
1089 */
1090static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
1091{
1092 iounmap(adev->doorbell.ptr);
1093 adev->doorbell.ptr = NULL;
1094}
1095
1096
1097
1098/*
1099 * amdgpu_device_wb_*()
1100 * Writeback is the method by which the GPU updates special pages in memory
1101 * with the status of certain GPU events (fences, ring pointers,etc.).
1102 */
1103
1104/**
1105 * amdgpu_device_wb_fini - Disable Writeback and free memory
1106 *
1107 * @adev: amdgpu_device pointer
1108 *
1109 * Disables Writeback and frees the Writeback memory (all asics).
1110 * Used at driver shutdown.
1111 */
1112static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1113{
1114 if (adev->wb.wb_obj) {
1115 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1116 &adev->wb.gpu_addr,
1117 (void **)&adev->wb.wb);
1118 adev->wb.wb_obj = NULL;
1119 }
1120}
1121
1122/**
1123 * amdgpu_device_wb_init - Init Writeback driver info and allocate memory
1124 *
1125 * @adev: amdgpu_device pointer
1126 *
1127 * Initializes writeback and allocates writeback memory (all asics).
1128 * Used at driver startup.
1129 * Returns 0 on success or an -error on failure.
1130 */
1131static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1132{
1133 int r;
1134
1135 if (adev->wb.wb_obj == NULL) {
1136 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1137 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1138 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1139 &adev->wb.wb_obj, &adev->wb.gpu_addr,
1140 (void **)&adev->wb.wb);
1141 if (r) {
1142 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1143 return r;
1144 }
1145
1146 adev->wb.num_wb = AMDGPU_MAX_WB;
1147 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1148
1149 /* clear wb memory */
1150 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1151 }
1152
1153 return 0;
1154}
1155
1156/**
1157 * amdgpu_device_wb_get - Allocate a wb entry
1158 *
1159 * @adev: amdgpu_device pointer
1160 * @wb: wb index
1161 *
1162 * Allocate a wb slot for use by the driver (all asics).
1163 * Returns 0 on success or -EINVAL on failure.
1164 */
1165int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1166{
1167 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1168
1169 if (offset < adev->wb.num_wb) {
1170 __set_bit(offset, adev->wb.used);
1171 *wb = offset << 3; /* convert to dw offset */
1172 return 0;
1173 } else {
1174 return -EINVAL;
1175 }
1176}
1177
1178/**
1179 * amdgpu_device_wb_free - Free a wb entry
1180 *
1181 * @adev: amdgpu_device pointer
1182 * @wb: wb index
1183 *
1184 * Free a wb slot allocated for use by the driver (all asics)
1185 */
1186void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1187{
1188 wb >>= 3;
1189 if (wb < adev->wb.num_wb)
1190 __clear_bit(wb, adev->wb.used);
1191}
1192
1193/**
1194 * amdgpu_device_resize_fb_bar - try to resize FB BAR
1195 *
1196 * @adev: amdgpu_device pointer
1197 *
1198 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1199 * to fail, but if any of the BARs is not accessible after the size we abort
1200 * driver loading by returning -ENODEV.
1201 */
1202int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1203{
1204 int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1205 struct pci_bus *root;
1206 struct resource *res;
1207 unsigned i;
1208 u16 cmd;
1209 int r;
1210
1211 /* Bypass for VF */
1212 if (amdgpu_sriov_vf(adev))
1213 return 0;
1214
1215 /* skip if the bios has already enabled large BAR */
1216 if (adev->gmc.real_vram_size &&
1217 (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1218 return 0;
1219
1220 /* Check if the root BUS has 64bit memory resources */
1221 root = adev->pdev->bus;
1222 while (root->parent)
1223 root = root->parent;
1224
1225 pci_bus_for_each_resource(root, res, i) {
1226 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1227 res->start > 0x100000000ull)
1228 break;
1229 }
1230
1231 /* Trying to resize is pointless without a root hub window above 4GB */
1232 if (!res)
1233 return 0;
1234
1235 /* Limit the BAR size to what is available */
1236 rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1237 rbar_size);
1238
1239 /* Disable memory decoding while we change the BAR addresses and size */
1240 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1241 pci_write_config_word(adev->pdev, PCI_COMMAND,
1242 cmd & ~PCI_COMMAND_MEMORY);
1243
1244 /* Free the VRAM and doorbell BAR, we most likely need to move both. */
1245 amdgpu_device_doorbell_fini(adev);
1246 if (adev->asic_type >= CHIP_BONAIRE)
1247 pci_release_resource(adev->pdev, 2);
1248
1249 pci_release_resource(adev->pdev, 0);
1250
1251 r = pci_resize_resource(adev->pdev, 0, rbar_size);
1252 if (r == -ENOSPC)
1253 DRM_INFO("Not enough PCI address space for a large BAR.");
1254 else if (r && r != -ENOTSUPP)
1255 DRM_ERROR("Problem resizing BAR0 (%d).", r);
1256
1257 pci_assign_unassigned_bus_resources(adev->pdev->bus);
1258
1259 /* When the doorbell or fb BAR isn't available we have no chance of
1260 * using the device.
1261 */
1262 r = amdgpu_device_doorbell_init(adev);
1263 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1264 return -ENODEV;
1265
1266 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1267
1268 return 0;
1269}
1270
1271/*
1272 * GPU helpers function.
1273 */
1274/**
1275 * amdgpu_device_need_post - check if the hw need post or not
1276 *
1277 * @adev: amdgpu_device pointer
1278 *
1279 * Check if the asic has been initialized (all asics) at driver startup
1280 * or post is needed if hw reset is performed.
1281 * Returns true if need or false if not.
1282 */
1283bool amdgpu_device_need_post(struct amdgpu_device *adev)
1284{
1285 uint32_t reg;
1286
1287 if (amdgpu_sriov_vf(adev))
1288 return false;
1289
1290 if (amdgpu_passthrough(adev)) {
1291 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1292 * some old smc fw still need driver do vPost otherwise gpu hang, while
1293 * those smc fw version above 22.15 doesn't have this flaw, so we force
1294 * vpost executed for smc version below 22.15
1295 */
1296 if (adev->asic_type == CHIP_FIJI) {
1297 int err;
1298 uint32_t fw_ver;
1299 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1300 /* force vPost if error occured */
1301 if (err)
1302 return true;
1303
1304 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1305 if (fw_ver < 0x00160e00)
1306 return true;
1307 }
1308 }
1309
1310 /* Don't post if we need to reset whole hive on init */
1311 if (adev->gmc.xgmi.pending_reset)
1312 return false;
1313
1314 if (adev->has_hw_reset) {
1315 adev->has_hw_reset = false;
1316 return true;
1317 }
1318
1319 /* bios scratch used on CIK+ */
1320 if (adev->asic_type >= CHIP_BONAIRE)
1321 return amdgpu_atombios_scratch_need_asic_init(adev);
1322
1323 /* check MEM_SIZE for older asics */
1324 reg = amdgpu_asic_get_config_memsize(adev);
1325
1326 if ((reg != 0) && (reg != 0xffffffff))
1327 return false;
1328
1329 return true;
1330}
1331
1332/**
1333 * amdgpu_device_should_use_aspm - check if the device should program ASPM
1334 *
1335 * @adev: amdgpu_device pointer
1336 *
1337 * Confirm whether the module parameter and pcie bridge agree that ASPM should
1338 * be set for this device.
1339 *
1340 * Returns true if it should be used or false if not.
1341 */
1342bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
1343{
1344 switch (amdgpu_aspm) {
1345 case -1:
1346 break;
1347 case 0:
1348 return false;
1349 case 1:
1350 return true;
1351 default:
1352 return false;
1353 }
1354 return pcie_aspm_enabled(adev->pdev);
1355}
1356
1357/* if we get transitioned to only one device, take VGA back */
1358/**
1359 * amdgpu_device_vga_set_decode - enable/disable vga decode
1360 *
1361 * @pdev: PCI device pointer
1362 * @state: enable/disable vga decode
1363 *
1364 * Enable/disable vga decode (all asics).
1365 * Returns VGA resource flags.
1366 */
1367static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
1368 bool state)
1369{
1370 struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
1371 amdgpu_asic_set_vga_state(adev, state);
1372 if (state)
1373 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1374 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1375 else
1376 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1377}
1378
1379/**
1380 * amdgpu_device_check_block_size - validate the vm block size
1381 *
1382 * @adev: amdgpu_device pointer
1383 *
1384 * Validates the vm block size specified via module parameter.
1385 * The vm block size defines number of bits in page table versus page directory,
1386 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1387 * page table and the remaining bits are in the page directory.
1388 */
1389static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1390{
1391 /* defines number of bits in page table versus page directory,
1392 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1393 * page table and the remaining bits are in the page directory */
1394 if (amdgpu_vm_block_size == -1)
1395 return;
1396
1397 if (amdgpu_vm_block_size < 9) {
1398 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1399 amdgpu_vm_block_size);
1400 amdgpu_vm_block_size = -1;
1401 }
1402}
1403
1404/**
1405 * amdgpu_device_check_vm_size - validate the vm size
1406 *
1407 * @adev: amdgpu_device pointer
1408 *
1409 * Validates the vm size in GB specified via module parameter.
1410 * The VM size is the size of the GPU virtual memory space in GB.
1411 */
1412static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1413{
1414 /* no need to check the default value */
1415 if (amdgpu_vm_size == -1)
1416 return;
1417
1418 if (amdgpu_vm_size < 1) {
1419 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1420 amdgpu_vm_size);
1421 amdgpu_vm_size = -1;
1422 }
1423}
1424
1425static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1426{
1427 struct sysinfo si;
1428 bool is_os_64 = (sizeof(void *) == 8);
1429 uint64_t total_memory;
1430 uint64_t dram_size_seven_GB = 0x1B8000000;
1431 uint64_t dram_size_three_GB = 0xB8000000;
1432
1433 if (amdgpu_smu_memory_pool_size == 0)
1434 return;
1435
1436 if (!is_os_64) {
1437 DRM_WARN("Not 64-bit OS, feature not supported\n");
1438 goto def_value;
1439 }
1440 si_meminfo(&si);
1441 total_memory = (uint64_t)si.totalram * si.mem_unit;
1442
1443 if ((amdgpu_smu_memory_pool_size == 1) ||
1444 (amdgpu_smu_memory_pool_size == 2)) {
1445 if (total_memory < dram_size_three_GB)
1446 goto def_value1;
1447 } else if ((amdgpu_smu_memory_pool_size == 4) ||
1448 (amdgpu_smu_memory_pool_size == 8)) {
1449 if (total_memory < dram_size_seven_GB)
1450 goto def_value1;
1451 } else {
1452 DRM_WARN("Smu memory pool size not supported\n");
1453 goto def_value;
1454 }
1455 adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1456
1457 return;
1458
1459def_value1:
1460 DRM_WARN("No enough system memory\n");
1461def_value:
1462 adev->pm.smu_prv_buffer_size = 0;
1463}
1464
1465static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
1466{
1467 if (!(adev->flags & AMD_IS_APU) ||
1468 adev->asic_type < CHIP_RAVEN)
1469 return 0;
1470
1471 switch (adev->asic_type) {
1472 case CHIP_RAVEN:
1473 if (adev->pdev->device == 0x15dd)
1474 adev->apu_flags |= AMD_APU_IS_RAVEN;
1475 if (adev->pdev->device == 0x15d8)
1476 adev->apu_flags |= AMD_APU_IS_PICASSO;
1477 break;
1478 case CHIP_RENOIR:
1479 if ((adev->pdev->device == 0x1636) ||
1480 (adev->pdev->device == 0x164c))
1481 adev->apu_flags |= AMD_APU_IS_RENOIR;
1482 else
1483 adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1484 break;
1485 case CHIP_VANGOGH:
1486 adev->apu_flags |= AMD_APU_IS_VANGOGH;
1487 break;
1488 case CHIP_YELLOW_CARP:
1489 break;
1490 case CHIP_CYAN_SKILLFISH:
1491 if ((adev->pdev->device == 0x13FE) ||
1492 (adev->pdev->device == 0x143F))
1493 adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
1494 break;
1495 default:
1496 break;
1497 }
1498
1499 return 0;
1500}
1501
1502/**
1503 * amdgpu_device_check_arguments - validate module params
1504 *
1505 * @adev: amdgpu_device pointer
1506 *
1507 * Validates certain module parameters and updates
1508 * the associated values used by the driver (all asics).
1509 */
1510static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1511{
1512 if (amdgpu_sched_jobs < 4) {
1513 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1514 amdgpu_sched_jobs);
1515 amdgpu_sched_jobs = 4;
1516 } else if (!is_power_of_2(amdgpu_sched_jobs)){
1517 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1518 amdgpu_sched_jobs);
1519 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1520 }
1521
1522 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1523 /* gart size must be greater or equal to 32M */
1524 dev_warn(adev->dev, "gart size (%d) too small\n",
1525 amdgpu_gart_size);
1526 amdgpu_gart_size = -1;
1527 }
1528
1529 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1530 /* gtt size must be greater or equal to 32M */
1531 dev_warn(adev->dev, "gtt size (%d) too small\n",
1532 amdgpu_gtt_size);
1533 amdgpu_gtt_size = -1;
1534 }
1535
1536 /* valid range is between 4 and 9 inclusive */
1537 if (amdgpu_vm_fragment_size != -1 &&
1538 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1539 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1540 amdgpu_vm_fragment_size = -1;
1541 }
1542
1543 if (amdgpu_sched_hw_submission < 2) {
1544 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1545 amdgpu_sched_hw_submission);
1546 amdgpu_sched_hw_submission = 2;
1547 } else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1548 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1549 amdgpu_sched_hw_submission);
1550 amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1551 }
1552
1553 if (amdgpu_reset_method < -1 || amdgpu_reset_method > 4) {
1554 dev_warn(adev->dev, "invalid option for reset method, reverting to default\n");
1555 amdgpu_reset_method = -1;
1556 }
1557
1558 amdgpu_device_check_smu_prv_buffer_size(adev);
1559
1560 amdgpu_device_check_vm_size(adev);
1561
1562 amdgpu_device_check_block_size(adev);
1563
1564 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1565
1566 return 0;
1567}
1568
1569/**
1570 * amdgpu_switcheroo_set_state - set switcheroo state
1571 *
1572 * @pdev: pci dev pointer
1573 * @state: vga_switcheroo state
1574 *
1575 * Callback for the switcheroo driver. Suspends or resumes
1576 * the asics before or after it is powered up using ACPI methods.
1577 */
1578static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1579 enum vga_switcheroo_state state)
1580{
1581 struct drm_device *dev = pci_get_drvdata(pdev);
1582 int r;
1583
1584 if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
1585 return;
1586
1587 if (state == VGA_SWITCHEROO_ON) {
1588 pr_info("switched on\n");
1589 /* don't suspend or resume card normally */
1590 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1591
1592 pci_set_power_state(pdev, PCI_D0);
1593 amdgpu_device_load_pci_state(pdev);
1594 r = pci_enable_device(pdev);
1595 if (r)
1596 DRM_WARN("pci_enable_device failed (%d)\n", r);
1597 amdgpu_device_resume(dev, true);
1598
1599 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1600 } else {
1601 pr_info("switched off\n");
1602 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1603 amdgpu_device_suspend(dev, true);
1604 amdgpu_device_cache_pci_state(pdev);
1605 /* Shut down the device */
1606 pci_disable_device(pdev);
1607 pci_set_power_state(pdev, PCI_D3cold);
1608 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1609 }
1610}
1611
1612/**
1613 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1614 *
1615 * @pdev: pci dev pointer
1616 *
1617 * Callback for the switcheroo driver. Check of the switcheroo
1618 * state can be changed.
1619 * Returns true if the state can be changed, false if not.
1620 */
1621static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1622{
1623 struct drm_device *dev = pci_get_drvdata(pdev);
1624
1625 /*
1626 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1627 * locking inversion with the driver load path. And the access here is
1628 * completely racy anyway. So don't bother with locking for now.
1629 */
1630 return atomic_read(&dev->open_count) == 0;
1631}
1632
1633static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1634 .set_gpu_state = amdgpu_switcheroo_set_state,
1635 .reprobe = NULL,
1636 .can_switch = amdgpu_switcheroo_can_switch,
1637};
1638
1639/**
1640 * amdgpu_device_ip_set_clockgating_state - set the CG state
1641 *
1642 * @dev: amdgpu_device pointer
1643 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1644 * @state: clockgating state (gate or ungate)
1645 *
1646 * Sets the requested clockgating state for all instances of
1647 * the hardware IP specified.
1648 * Returns the error code from the last instance.
1649 */
1650int amdgpu_device_ip_set_clockgating_state(void *dev,
1651 enum amd_ip_block_type block_type,
1652 enum amd_clockgating_state state)
1653{
1654 struct amdgpu_device *adev = dev;
1655 int i, r = 0;
1656
1657 for (i = 0; i < adev->num_ip_blocks; i++) {
1658 if (!adev->ip_blocks[i].status.valid)
1659 continue;
1660 if (adev->ip_blocks[i].version->type != block_type)
1661 continue;
1662 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1663 continue;
1664 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1665 (void *)adev, state);
1666 if (r)
1667 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1668 adev->ip_blocks[i].version->funcs->name, r);
1669 }
1670 return r;
1671}
1672
1673/**
1674 * amdgpu_device_ip_set_powergating_state - set the PG state
1675 *
1676 * @dev: amdgpu_device pointer
1677 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1678 * @state: powergating state (gate or ungate)
1679 *
1680 * Sets the requested powergating state for all instances of
1681 * the hardware IP specified.
1682 * Returns the error code from the last instance.
1683 */
1684int amdgpu_device_ip_set_powergating_state(void *dev,
1685 enum amd_ip_block_type block_type,
1686 enum amd_powergating_state state)
1687{
1688 struct amdgpu_device *adev = dev;
1689 int i, r = 0;
1690
1691 for (i = 0; i < adev->num_ip_blocks; i++) {
1692 if (!adev->ip_blocks[i].status.valid)
1693 continue;
1694 if (adev->ip_blocks[i].version->type != block_type)
1695 continue;
1696 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1697 continue;
1698 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1699 (void *)adev, state);
1700 if (r)
1701 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1702 adev->ip_blocks[i].version->funcs->name, r);
1703 }
1704 return r;
1705}
1706
1707/**
1708 * amdgpu_device_ip_get_clockgating_state - get the CG state
1709 *
1710 * @adev: amdgpu_device pointer
1711 * @flags: clockgating feature flags
1712 *
1713 * Walks the list of IPs on the device and updates the clockgating
1714 * flags for each IP.
1715 * Updates @flags with the feature flags for each hardware IP where
1716 * clockgating is enabled.
1717 */
1718void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1719 u64 *flags)
1720{
1721 int i;
1722
1723 for (i = 0; i < adev->num_ip_blocks; i++) {
1724 if (!adev->ip_blocks[i].status.valid)
1725 continue;
1726 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1727 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1728 }
1729}
1730
1731/**
1732 * amdgpu_device_ip_wait_for_idle - wait for idle
1733 *
1734 * @adev: amdgpu_device pointer
1735 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1736 *
1737 * Waits for the request hardware IP to be idle.
1738 * Returns 0 for success or a negative error code on failure.
1739 */
1740int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1741 enum amd_ip_block_type block_type)
1742{
1743 int i, r;
1744
1745 for (i = 0; i < adev->num_ip_blocks; i++) {
1746 if (!adev->ip_blocks[i].status.valid)
1747 continue;
1748 if (adev->ip_blocks[i].version->type == block_type) {
1749 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1750 if (r)
1751 return r;
1752 break;
1753 }
1754 }
1755 return 0;
1756
1757}
1758
1759/**
1760 * amdgpu_device_ip_is_idle - is the hardware IP idle
1761 *
1762 * @adev: amdgpu_device pointer
1763 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1764 *
1765 * Check if the hardware IP is idle or not.
1766 * Returns true if it the IP is idle, false if not.
1767 */
1768bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1769 enum amd_ip_block_type block_type)
1770{
1771 int i;
1772
1773 for (i = 0; i < adev->num_ip_blocks; i++) {
1774 if (!adev->ip_blocks[i].status.valid)
1775 continue;
1776 if (adev->ip_blocks[i].version->type == block_type)
1777 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1778 }
1779 return true;
1780
1781}
1782
1783/**
1784 * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1785 *
1786 * @adev: amdgpu_device pointer
1787 * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1788 *
1789 * Returns a pointer to the hardware IP block structure
1790 * if it exists for the asic, otherwise NULL.
1791 */
1792struct amdgpu_ip_block *
1793amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1794 enum amd_ip_block_type type)
1795{
1796 int i;
1797
1798 for (i = 0; i < adev->num_ip_blocks; i++)
1799 if (adev->ip_blocks[i].version->type == type)
1800 return &adev->ip_blocks[i];
1801
1802 return NULL;
1803}
1804
1805/**
1806 * amdgpu_device_ip_block_version_cmp
1807 *
1808 * @adev: amdgpu_device pointer
1809 * @type: enum amd_ip_block_type
1810 * @major: major version
1811 * @minor: minor version
1812 *
1813 * return 0 if equal or greater
1814 * return 1 if smaller or the ip_block doesn't exist
1815 */
1816int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1817 enum amd_ip_block_type type,
1818 u32 major, u32 minor)
1819{
1820 struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1821
1822 if (ip_block && ((ip_block->version->major > major) ||
1823 ((ip_block->version->major == major) &&
1824 (ip_block->version->minor >= minor))))
1825 return 0;
1826
1827 return 1;
1828}
1829
1830/**
1831 * amdgpu_device_ip_block_add
1832 *
1833 * @adev: amdgpu_device pointer
1834 * @ip_block_version: pointer to the IP to add
1835 *
1836 * Adds the IP block driver information to the collection of IPs
1837 * on the asic.
1838 */
1839int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1840 const struct amdgpu_ip_block_version *ip_block_version)
1841{
1842 if (!ip_block_version)
1843 return -EINVAL;
1844
1845 switch (ip_block_version->type) {
1846 case AMD_IP_BLOCK_TYPE_VCN:
1847 if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
1848 return 0;
1849 break;
1850 case AMD_IP_BLOCK_TYPE_JPEG:
1851 if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
1852 return 0;
1853 break;
1854 default:
1855 break;
1856 }
1857
1858 DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1859 ip_block_version->funcs->name);
1860
1861 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1862
1863 return 0;
1864}
1865
1866/**
1867 * amdgpu_device_enable_virtual_display - enable virtual display feature
1868 *
1869 * @adev: amdgpu_device pointer
1870 *
1871 * Enabled the virtual display feature if the user has enabled it via
1872 * the module parameter virtual_display. This feature provides a virtual
1873 * display hardware on headless boards or in virtualized environments.
1874 * This function parses and validates the configuration string specified by
1875 * the user and configues the virtual display configuration (number of
1876 * virtual connectors, crtcs, etc.) specified.
1877 */
1878static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1879{
1880 adev->enable_virtual_display = false;
1881
1882 if (amdgpu_virtual_display) {
1883 const char *pci_address_name = pci_name(adev->pdev);
1884 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1885
1886 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1887 pciaddstr_tmp = pciaddstr;
1888 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1889 pciaddname = strsep(&pciaddname_tmp, ",");
1890 if (!strcmp("all", pciaddname)
1891 || !strcmp(pci_address_name, pciaddname)) {
1892 long num_crtc;
1893 int res = -1;
1894
1895 adev->enable_virtual_display = true;
1896
1897 if (pciaddname_tmp)
1898 res = kstrtol(pciaddname_tmp, 10,
1899 &num_crtc);
1900
1901 if (!res) {
1902 if (num_crtc < 1)
1903 num_crtc = 1;
1904 if (num_crtc > 6)
1905 num_crtc = 6;
1906 adev->mode_info.num_crtc = num_crtc;
1907 } else {
1908 adev->mode_info.num_crtc = 1;
1909 }
1910 break;
1911 }
1912 }
1913
1914 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1915 amdgpu_virtual_display, pci_address_name,
1916 adev->enable_virtual_display, adev->mode_info.num_crtc);
1917
1918 kfree(pciaddstr);
1919 }
1920}
1921
1922void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev)
1923{
1924 if (amdgpu_sriov_vf(adev) && !adev->enable_virtual_display) {
1925 adev->mode_info.num_crtc = 1;
1926 adev->enable_virtual_display = true;
1927 DRM_INFO("virtual_display:%d, num_crtc:%d\n",
1928 adev->enable_virtual_display, adev->mode_info.num_crtc);
1929 }
1930}
1931
1932/**
1933 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1934 *
1935 * @adev: amdgpu_device pointer
1936 *
1937 * Parses the asic configuration parameters specified in the gpu info
1938 * firmware and makes them availale to the driver for use in configuring
1939 * the asic.
1940 * Returns 0 on success, -EINVAL on failure.
1941 */
1942static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1943{
1944 const char *chip_name;
1945 char fw_name[40];
1946 int err;
1947 const struct gpu_info_firmware_header_v1_0 *hdr;
1948
1949 adev->firmware.gpu_info_fw = NULL;
1950
1951 if (adev->mman.discovery_bin) {
1952 /*
1953 * FIXME: The bounding box is still needed by Navi12, so
1954 * temporarily read it from gpu_info firmware. Should be dropped
1955 * when DAL no longer needs it.
1956 */
1957 if (adev->asic_type != CHIP_NAVI12)
1958 return 0;
1959 }
1960
1961 switch (adev->asic_type) {
1962 default:
1963 return 0;
1964 case CHIP_VEGA10:
1965 chip_name = "vega10";
1966 break;
1967 case CHIP_VEGA12:
1968 chip_name = "vega12";
1969 break;
1970 case CHIP_RAVEN:
1971 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1972 chip_name = "raven2";
1973 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1974 chip_name = "picasso";
1975 else
1976 chip_name = "raven";
1977 break;
1978 case CHIP_ARCTURUS:
1979 chip_name = "arcturus";
1980 break;
1981 case CHIP_NAVI12:
1982 chip_name = "navi12";
1983 break;
1984 }
1985
1986 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1987 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1988 if (err) {
1989 dev_err(adev->dev,
1990 "Failed to load gpu_info firmware \"%s\"\n",
1991 fw_name);
1992 goto out;
1993 }
1994 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1995 if (err) {
1996 dev_err(adev->dev,
1997 "Failed to validate gpu_info firmware \"%s\"\n",
1998 fw_name);
1999 goto out;
2000 }
2001
2002 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
2003 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
2004
2005 switch (hdr->version_major) {
2006 case 1:
2007 {
2008 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
2009 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
2010 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2011
2012 /*
2013 * Should be droped when DAL no longer needs it.
2014 */
2015 if (adev->asic_type == CHIP_NAVI12)
2016 goto parse_soc_bounding_box;
2017
2018 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
2019 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
2020 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
2021 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
2022 adev->gfx.config.max_texture_channel_caches =
2023 le32_to_cpu(gpu_info_fw->gc_num_tccs);
2024 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
2025 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
2026 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
2027 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
2028 adev->gfx.config.double_offchip_lds_buf =
2029 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
2030 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
2031 adev->gfx.cu_info.max_waves_per_simd =
2032 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
2033 adev->gfx.cu_info.max_scratch_slots_per_cu =
2034 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
2035 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
2036 if (hdr->version_minor >= 1) {
2037 const struct gpu_info_firmware_v1_1 *gpu_info_fw =
2038 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
2039 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2040 adev->gfx.config.num_sc_per_sh =
2041 le32_to_cpu(gpu_info_fw->num_sc_per_sh);
2042 adev->gfx.config.num_packer_per_sc =
2043 le32_to_cpu(gpu_info_fw->num_packer_per_sc);
2044 }
2045
2046parse_soc_bounding_box:
2047 /*
2048 * soc bounding box info is not integrated in disocovery table,
2049 * we always need to parse it from gpu info firmware if needed.
2050 */
2051 if (hdr->version_minor == 2) {
2052 const struct gpu_info_firmware_v1_2 *gpu_info_fw =
2053 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
2054 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2055 adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
2056 }
2057 break;
2058 }
2059 default:
2060 dev_err(adev->dev,
2061 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
2062 err = -EINVAL;
2063 goto out;
2064 }
2065out:
2066 return err;
2067}
2068
2069/**
2070 * amdgpu_device_ip_early_init - run early init for hardware IPs
2071 *
2072 * @adev: amdgpu_device pointer
2073 *
2074 * Early initialization pass for hardware IPs. The hardware IPs that make
2075 * up each asic are discovered each IP's early_init callback is run. This
2076 * is the first stage in initializing the asic.
2077 * Returns 0 on success, negative error code on failure.
2078 */
2079static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
2080{
2081 struct drm_device *dev = adev_to_drm(adev);
2082 struct pci_dev *parent;
2083 int i, r;
2084
2085 amdgpu_device_enable_virtual_display(adev);
2086
2087 if (amdgpu_sriov_vf(adev)) {
2088 r = amdgpu_virt_request_full_gpu(adev, true);
2089 if (r)
2090 return r;
2091 }
2092
2093 switch (adev->asic_type) {
2094#ifdef CONFIG_DRM_AMDGPU_SI
2095 case CHIP_VERDE:
2096 case CHIP_TAHITI:
2097 case CHIP_PITCAIRN:
2098 case CHIP_OLAND:
2099 case CHIP_HAINAN:
2100 adev->family = AMDGPU_FAMILY_SI;
2101 r = si_set_ip_blocks(adev);
2102 if (r)
2103 return r;
2104 break;
2105#endif
2106#ifdef CONFIG_DRM_AMDGPU_CIK
2107 case CHIP_BONAIRE:
2108 case CHIP_HAWAII:
2109 case CHIP_KAVERI:
2110 case CHIP_KABINI:
2111 case CHIP_MULLINS:
2112 if (adev->flags & AMD_IS_APU)
2113 adev->family = AMDGPU_FAMILY_KV;
2114 else
2115 adev->family = AMDGPU_FAMILY_CI;
2116
2117 r = cik_set_ip_blocks(adev);
2118 if (r)
2119 return r;
2120 break;
2121#endif
2122 case CHIP_TOPAZ:
2123 case CHIP_TONGA:
2124 case CHIP_FIJI:
2125 case CHIP_POLARIS10:
2126 case CHIP_POLARIS11:
2127 case CHIP_POLARIS12:
2128 case CHIP_VEGAM:
2129 case CHIP_CARRIZO:
2130 case CHIP_STONEY:
2131 if (adev->flags & AMD_IS_APU)
2132 adev->family = AMDGPU_FAMILY_CZ;
2133 else
2134 adev->family = AMDGPU_FAMILY_VI;
2135
2136 r = vi_set_ip_blocks(adev);
2137 if (r)
2138 return r;
2139 break;
2140 default:
2141 r = amdgpu_discovery_set_ip_blocks(adev);
2142 if (r)
2143 return r;
2144 break;
2145 }
2146
2147 if (amdgpu_has_atpx() &&
2148 (amdgpu_is_atpx_hybrid() ||
2149 amdgpu_has_atpx_dgpu_power_cntl()) &&
2150 ((adev->flags & AMD_IS_APU) == 0) &&
2151 !pci_is_thunderbolt_attached(to_pci_dev(dev->dev)))
2152 adev->flags |= AMD_IS_PX;
2153
2154 if (!(adev->flags & AMD_IS_APU)) {
2155 parent = pci_upstream_bridge(adev->pdev);
2156 adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
2157 }
2158
2159 amdgpu_amdkfd_device_probe(adev);
2160
2161 adev->pm.pp_feature = amdgpu_pp_feature_mask;
2162 if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2163 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2164 if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2165 adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2166
2167 for (i = 0; i < adev->num_ip_blocks; i++) {
2168 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2169 DRM_ERROR("disabled ip block: %d <%s>\n",
2170 i, adev->ip_blocks[i].version->funcs->name);
2171 adev->ip_blocks[i].status.valid = false;
2172 } else {
2173 if (adev->ip_blocks[i].version->funcs->early_init) {
2174 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2175 if (r == -ENOENT) {
2176 adev->ip_blocks[i].status.valid = false;
2177 } else if (r) {
2178 DRM_ERROR("early_init of IP block <%s> failed %d\n",
2179 adev->ip_blocks[i].version->funcs->name, r);
2180 return r;
2181 } else {
2182 adev->ip_blocks[i].status.valid = true;
2183 }
2184 } else {
2185 adev->ip_blocks[i].status.valid = true;
2186 }
2187 }
2188 /* get the vbios after the asic_funcs are set up */
2189 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2190 r = amdgpu_device_parse_gpu_info_fw(adev);
2191 if (r)
2192 return r;
2193
2194 /* Read BIOS */
2195 if (!amdgpu_get_bios(adev))
2196 return -EINVAL;
2197
2198 r = amdgpu_atombios_init(adev);
2199 if (r) {
2200 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2201 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2202 return r;
2203 }
2204
2205 /*get pf2vf msg info at it's earliest time*/
2206 if (amdgpu_sriov_vf(adev))
2207 amdgpu_virt_init_data_exchange(adev);
2208
2209 }
2210 }
2211
2212 adev->cg_flags &= amdgpu_cg_mask;
2213 adev->pg_flags &= amdgpu_pg_mask;
2214
2215 return 0;
2216}
2217
2218static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2219{
2220 int i, r;
2221
2222 for (i = 0; i < adev->num_ip_blocks; i++) {
2223 if (!adev->ip_blocks[i].status.sw)
2224 continue;
2225 if (adev->ip_blocks[i].status.hw)
2226 continue;
2227 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2228 (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2229 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2230 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2231 if (r) {
2232 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2233 adev->ip_blocks[i].version->funcs->name, r);
2234 return r;
2235 }
2236 adev->ip_blocks[i].status.hw = true;
2237 }
2238 }
2239
2240 return 0;
2241}
2242
2243static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2244{
2245 int i, r;
2246
2247 for (i = 0; i < adev->num_ip_blocks; i++) {
2248 if (!adev->ip_blocks[i].status.sw)
2249 continue;
2250 if (adev->ip_blocks[i].status.hw)
2251 continue;
2252 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2253 if (r) {
2254 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2255 adev->ip_blocks[i].version->funcs->name, r);
2256 return r;
2257 }
2258 adev->ip_blocks[i].status.hw = true;
2259 }
2260
2261 return 0;
2262}
2263
2264static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2265{
2266 int r = 0;
2267 int i;
2268 uint32_t smu_version;
2269
2270 if (adev->asic_type >= CHIP_VEGA10) {
2271 for (i = 0; i < adev->num_ip_blocks; i++) {
2272 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2273 continue;
2274
2275 if (!adev->ip_blocks[i].status.sw)
2276 continue;
2277
2278 /* no need to do the fw loading again if already done*/
2279 if (adev->ip_blocks[i].status.hw == true)
2280 break;
2281
2282 if (amdgpu_in_reset(adev) || adev->in_suspend) {
2283 r = adev->ip_blocks[i].version->funcs->resume(adev);
2284 if (r) {
2285 DRM_ERROR("resume of IP block <%s> failed %d\n",
2286 adev->ip_blocks[i].version->funcs->name, r);
2287 return r;
2288 }
2289 } else {
2290 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2291 if (r) {
2292 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2293 adev->ip_blocks[i].version->funcs->name, r);
2294 return r;
2295 }
2296 }
2297
2298 adev->ip_blocks[i].status.hw = true;
2299 break;
2300 }
2301 }
2302
2303 if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2304 r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2305
2306 return r;
2307}
2308
2309static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
2310{
2311 long timeout;
2312 int r, i;
2313
2314 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2315 struct amdgpu_ring *ring = adev->rings[i];
2316
2317 /* No need to setup the GPU scheduler for rings that don't need it */
2318 if (!ring || ring->no_scheduler)
2319 continue;
2320
2321 switch (ring->funcs->type) {
2322 case AMDGPU_RING_TYPE_GFX:
2323 timeout = adev->gfx_timeout;
2324 break;
2325 case AMDGPU_RING_TYPE_COMPUTE:
2326 timeout = adev->compute_timeout;
2327 break;
2328 case AMDGPU_RING_TYPE_SDMA:
2329 timeout = adev->sdma_timeout;
2330 break;
2331 default:
2332 timeout = adev->video_timeout;
2333 break;
2334 }
2335
2336 r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
2337 ring->num_hw_submission, amdgpu_job_hang_limit,
2338 timeout, adev->reset_domain->wq,
2339 ring->sched_score, ring->name,
2340 adev->dev);
2341 if (r) {
2342 DRM_ERROR("Failed to create scheduler on ring %s.\n",
2343 ring->name);
2344 return r;
2345 }
2346 }
2347
2348 return 0;
2349}
2350
2351
2352/**
2353 * amdgpu_device_ip_init - run init for hardware IPs
2354 *
2355 * @adev: amdgpu_device pointer
2356 *
2357 * Main initialization pass for hardware IPs. The list of all the hardware
2358 * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2359 * are run. sw_init initializes the software state associated with each IP
2360 * and hw_init initializes the hardware associated with each IP.
2361 * Returns 0 on success, negative error code on failure.
2362 */
2363static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2364{
2365 int i, r;
2366
2367 r = amdgpu_ras_init(adev);
2368 if (r)
2369 return r;
2370
2371 for (i = 0; i < adev->num_ip_blocks; i++) {
2372 if (!adev->ip_blocks[i].status.valid)
2373 continue;
2374 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2375 if (r) {
2376 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2377 adev->ip_blocks[i].version->funcs->name, r);
2378 goto init_failed;
2379 }
2380 adev->ip_blocks[i].status.sw = true;
2381
2382 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2383 /* need to do common hw init early so everything is set up for gmc */
2384 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2385 if (r) {
2386 DRM_ERROR("hw_init %d failed %d\n", i, r);
2387 goto init_failed;
2388 }
2389 adev->ip_blocks[i].status.hw = true;
2390 } else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2391 /* need to do gmc hw init early so we can allocate gpu mem */
2392 /* Try to reserve bad pages early */
2393 if (amdgpu_sriov_vf(adev))
2394 amdgpu_virt_exchange_data(adev);
2395
2396 r = amdgpu_device_vram_scratch_init(adev);
2397 if (r) {
2398 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
2399 goto init_failed;
2400 }
2401 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2402 if (r) {
2403 DRM_ERROR("hw_init %d failed %d\n", i, r);
2404 goto init_failed;
2405 }
2406 r = amdgpu_device_wb_init(adev);
2407 if (r) {
2408 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2409 goto init_failed;
2410 }
2411 adev->ip_blocks[i].status.hw = true;
2412
2413 /* right after GMC hw init, we create CSA */
2414 if (amdgpu_mcbp) {
2415 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2416 AMDGPU_GEM_DOMAIN_VRAM,
2417 AMDGPU_CSA_SIZE);
2418 if (r) {
2419 DRM_ERROR("allocate CSA failed %d\n", r);
2420 goto init_failed;
2421 }
2422 }
2423 }
2424 }
2425
2426 if (amdgpu_sriov_vf(adev))
2427 amdgpu_virt_init_data_exchange(adev);
2428
2429 r = amdgpu_ib_pool_init(adev);
2430 if (r) {
2431 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2432 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2433 goto init_failed;
2434 }
2435
2436 r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2437 if (r)
2438 goto init_failed;
2439
2440 r = amdgpu_device_ip_hw_init_phase1(adev);
2441 if (r)
2442 goto init_failed;
2443
2444 r = amdgpu_device_fw_loading(adev);
2445 if (r)
2446 goto init_failed;
2447
2448 r = amdgpu_device_ip_hw_init_phase2(adev);
2449 if (r)
2450 goto init_failed;
2451
2452 /*
2453 * retired pages will be loaded from eeprom and reserved here,
2454 * it should be called after amdgpu_device_ip_hw_init_phase2 since
2455 * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2456 * for I2C communication which only true at this point.
2457 *
2458 * amdgpu_ras_recovery_init may fail, but the upper only cares the
2459 * failure from bad gpu situation and stop amdgpu init process
2460 * accordingly. For other failed cases, it will still release all
2461 * the resource and print error message, rather than returning one
2462 * negative value to upper level.
2463 *
2464 * Note: theoretically, this should be called before all vram allocations
2465 * to protect retired page from abusing
2466 */
2467 r = amdgpu_ras_recovery_init(adev);
2468 if (r)
2469 goto init_failed;
2470
2471 /**
2472 * In case of XGMI grab extra reference for reset domain for this device
2473 */
2474 if (adev->gmc.xgmi.num_physical_nodes > 1) {
2475 if (amdgpu_xgmi_add_device(adev) == 0) {
2476 if (!amdgpu_sriov_vf(adev)) {
2477 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2478
2479 if (WARN_ON(!hive)) {
2480 r = -ENOENT;
2481 goto init_failed;
2482 }
2483
2484 if (!hive->reset_domain ||
2485 !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
2486 r = -ENOENT;
2487 amdgpu_put_xgmi_hive(hive);
2488 goto init_failed;
2489 }
2490
2491 /* Drop the early temporary reset domain we created for device */
2492 amdgpu_reset_put_reset_domain(adev->reset_domain);
2493 adev->reset_domain = hive->reset_domain;
2494 amdgpu_put_xgmi_hive(hive);
2495 }
2496 }
2497 }
2498
2499 r = amdgpu_device_init_schedulers(adev);
2500 if (r)
2501 goto init_failed;
2502
2503 /* Don't init kfd if whole hive need to be reset during init */
2504 if (!adev->gmc.xgmi.pending_reset)
2505 amdgpu_amdkfd_device_init(adev);
2506
2507 amdgpu_fru_get_product_info(adev);
2508
2509init_failed:
2510 if (amdgpu_sriov_vf(adev))
2511 amdgpu_virt_release_full_gpu(adev, true);
2512
2513 return r;
2514}
2515
2516/**
2517 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2518 *
2519 * @adev: amdgpu_device pointer
2520 *
2521 * Writes a reset magic value to the gart pointer in VRAM. The driver calls
2522 * this function before a GPU reset. If the value is retained after a
2523 * GPU reset, VRAM has not been lost. Some GPU resets may destry VRAM contents.
2524 */
2525static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2526{
2527 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2528}
2529
2530/**
2531 * amdgpu_device_check_vram_lost - check if vram is valid
2532 *
2533 * @adev: amdgpu_device pointer
2534 *
2535 * Checks the reset magic value written to the gart pointer in VRAM.
2536 * The driver calls this after a GPU reset to see if the contents of
2537 * VRAM is lost or now.
2538 * returns true if vram is lost, false if not.
2539 */
2540static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2541{
2542 if (memcmp(adev->gart.ptr, adev->reset_magic,
2543 AMDGPU_RESET_MAGIC_NUM))
2544 return true;
2545
2546 if (!amdgpu_in_reset(adev))
2547 return false;
2548
2549 /*
2550 * For all ASICs with baco/mode1 reset, the VRAM is
2551 * always assumed to be lost.
2552 */
2553 switch (amdgpu_asic_reset_method(adev)) {
2554 case AMD_RESET_METHOD_BACO:
2555 case AMD_RESET_METHOD_MODE1:
2556 return true;
2557 default:
2558 return false;
2559 }
2560}
2561
2562/**
2563 * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2564 *
2565 * @adev: amdgpu_device pointer
2566 * @state: clockgating state (gate or ungate)
2567 *
2568 * The list of all the hardware IPs that make up the asic is walked and the
2569 * set_clockgating_state callbacks are run.
2570 * Late initialization pass enabling clockgating for hardware IPs.
2571 * Fini or suspend, pass disabling clockgating for hardware IPs.
2572 * Returns 0 on success, negative error code on failure.
2573 */
2574
2575int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2576 enum amd_clockgating_state state)
2577{
2578 int i, j, r;
2579
2580 if (amdgpu_emu_mode == 1)
2581 return 0;
2582
2583 for (j = 0; j < adev->num_ip_blocks; j++) {
2584 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2585 if (!adev->ip_blocks[i].status.late_initialized)
2586 continue;
2587 /* skip CG for GFX on S0ix */
2588 if (adev->in_s0ix &&
2589 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2590 continue;
2591 /* skip CG for VCE/UVD, it's handled specially */
2592 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2593 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2594 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2595 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2596 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2597 /* enable clockgating to save power */
2598 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2599 state);
2600 if (r) {
2601 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2602 adev->ip_blocks[i].version->funcs->name, r);
2603 return r;
2604 }
2605 }
2606 }
2607
2608 return 0;
2609}
2610
2611int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2612 enum amd_powergating_state state)
2613{
2614 int i, j, r;
2615
2616 if (amdgpu_emu_mode == 1)
2617 return 0;
2618
2619 for (j = 0; j < adev->num_ip_blocks; j++) {
2620 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2621 if (!adev->ip_blocks[i].status.late_initialized)
2622 continue;
2623 /* skip PG for GFX on S0ix */
2624 if (adev->in_s0ix &&
2625 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2626 continue;
2627 /* skip CG for VCE/UVD, it's handled specially */
2628 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2629 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2630 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2631 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2632 adev->ip_blocks[i].version->funcs->set_powergating_state) {
2633 /* enable powergating to save power */
2634 r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2635 state);
2636 if (r) {
2637 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2638 adev->ip_blocks[i].version->funcs->name, r);
2639 return r;
2640 }
2641 }
2642 }
2643 return 0;
2644}
2645
2646static int amdgpu_device_enable_mgpu_fan_boost(void)
2647{
2648 struct amdgpu_gpu_instance *gpu_ins;
2649 struct amdgpu_device *adev;
2650 int i, ret = 0;
2651
2652 mutex_lock(&mgpu_info.mutex);
2653
2654 /*
2655 * MGPU fan boost feature should be enabled
2656 * only when there are two or more dGPUs in
2657 * the system
2658 */
2659 if (mgpu_info.num_dgpu < 2)
2660 goto out;
2661
2662 for (i = 0; i < mgpu_info.num_dgpu; i++) {
2663 gpu_ins = &(mgpu_info.gpu_ins[i]);
2664 adev = gpu_ins->adev;
2665 if (!(adev->flags & AMD_IS_APU) &&
2666 !gpu_ins->mgpu_fan_enabled) {
2667 ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2668 if (ret)
2669 break;
2670
2671 gpu_ins->mgpu_fan_enabled = 1;
2672 }
2673 }
2674
2675out:
2676 mutex_unlock(&mgpu_info.mutex);
2677
2678 return ret;
2679}
2680
2681/**
2682 * amdgpu_device_ip_late_init - run late init for hardware IPs
2683 *
2684 * @adev: amdgpu_device pointer
2685 *
2686 * Late initialization pass for hardware IPs. The list of all the hardware
2687 * IPs that make up the asic is walked and the late_init callbacks are run.
2688 * late_init covers any special initialization that an IP requires
2689 * after all of the have been initialized or something that needs to happen
2690 * late in the init process.
2691 * Returns 0 on success, negative error code on failure.
2692 */
2693static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2694{
2695 struct amdgpu_gpu_instance *gpu_instance;
2696 int i = 0, r;
2697
2698 for (i = 0; i < adev->num_ip_blocks; i++) {
2699 if (!adev->ip_blocks[i].status.hw)
2700 continue;
2701 if (adev->ip_blocks[i].version->funcs->late_init) {
2702 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2703 if (r) {
2704 DRM_ERROR("late_init of IP block <%s> failed %d\n",
2705 adev->ip_blocks[i].version->funcs->name, r);
2706 return r;
2707 }
2708 }
2709 adev->ip_blocks[i].status.late_initialized = true;
2710 }
2711
2712 r = amdgpu_ras_late_init(adev);
2713 if (r) {
2714 DRM_ERROR("amdgpu_ras_late_init failed %d", r);
2715 return r;
2716 }
2717
2718 amdgpu_ras_set_error_query_ready(adev, true);
2719
2720 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2721 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2722
2723 amdgpu_device_fill_reset_magic(adev);
2724
2725 r = amdgpu_device_enable_mgpu_fan_boost();
2726 if (r)
2727 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2728
2729 /* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
2730 if (amdgpu_passthrough(adev) && ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1)||
2731 adev->asic_type == CHIP_ALDEBARAN ))
2732 amdgpu_dpm_handle_passthrough_sbr(adev, true);
2733
2734 if (adev->gmc.xgmi.num_physical_nodes > 1) {
2735 mutex_lock(&mgpu_info.mutex);
2736
2737 /*
2738 * Reset device p-state to low as this was booted with high.
2739 *
2740 * This should be performed only after all devices from the same
2741 * hive get initialized.
2742 *
2743 * However, it's unknown how many device in the hive in advance.
2744 * As this is counted one by one during devices initializations.
2745 *
2746 * So, we wait for all XGMI interlinked devices initialized.
2747 * This may bring some delays as those devices may come from
2748 * different hives. But that should be OK.
2749 */
2750 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2751 for (i = 0; i < mgpu_info.num_gpu; i++) {
2752 gpu_instance = &(mgpu_info.gpu_ins[i]);
2753 if (gpu_instance->adev->flags & AMD_IS_APU)
2754 continue;
2755
2756 r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2757 AMDGPU_XGMI_PSTATE_MIN);
2758 if (r) {
2759 DRM_ERROR("pstate setting failed (%d).\n", r);
2760 break;
2761 }
2762 }
2763 }
2764
2765 mutex_unlock(&mgpu_info.mutex);
2766 }
2767
2768 return 0;
2769}
2770
2771/**
2772 * amdgpu_device_smu_fini_early - smu hw_fini wrapper
2773 *
2774 * @adev: amdgpu_device pointer
2775 *
2776 * For ASICs need to disable SMC first
2777 */
2778static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
2779{
2780 int i, r;
2781
2782 if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))
2783 return;
2784
2785 for (i = 0; i < adev->num_ip_blocks; i++) {
2786 if (!adev->ip_blocks[i].status.hw)
2787 continue;
2788 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2789 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2790 /* XXX handle errors */
2791 if (r) {
2792 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2793 adev->ip_blocks[i].version->funcs->name, r);
2794 }
2795 adev->ip_blocks[i].status.hw = false;
2796 break;
2797 }
2798 }
2799}
2800
2801static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
2802{
2803 int i, r;
2804
2805 for (i = 0; i < adev->num_ip_blocks; i++) {
2806 if (!adev->ip_blocks[i].version->funcs->early_fini)
2807 continue;
2808
2809 r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
2810 if (r) {
2811 DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
2812 adev->ip_blocks[i].version->funcs->name, r);
2813 }
2814 }
2815
2816 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2817 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2818
2819 amdgpu_amdkfd_suspend(adev, false);
2820
2821 /* Workaroud for ASICs need to disable SMC first */
2822 amdgpu_device_smu_fini_early(adev);
2823
2824 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2825 if (!adev->ip_blocks[i].status.hw)
2826 continue;
2827
2828 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2829 /* XXX handle errors */
2830 if (r) {
2831 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2832 adev->ip_blocks[i].version->funcs->name, r);
2833 }
2834
2835 adev->ip_blocks[i].status.hw = false;
2836 }
2837
2838 if (amdgpu_sriov_vf(adev)) {
2839 if (amdgpu_virt_release_full_gpu(adev, false))
2840 DRM_ERROR("failed to release exclusive mode on fini\n");
2841 }
2842
2843 return 0;
2844}
2845
2846/**
2847 * amdgpu_device_ip_fini - run fini for hardware IPs
2848 *
2849 * @adev: amdgpu_device pointer
2850 *
2851 * Main teardown pass for hardware IPs. The list of all the hardware
2852 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2853 * are run. hw_fini tears down the hardware associated with each IP
2854 * and sw_fini tears down any software state associated with each IP.
2855 * Returns 0 on success, negative error code on failure.
2856 */
2857static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2858{
2859 int i, r;
2860
2861 if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2862 amdgpu_virt_release_ras_err_handler_data(adev);
2863
2864 if (adev->gmc.xgmi.num_physical_nodes > 1)
2865 amdgpu_xgmi_remove_device(adev);
2866
2867 amdgpu_amdkfd_device_fini_sw(adev);
2868
2869 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2870 if (!adev->ip_blocks[i].status.sw)
2871 continue;
2872
2873 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2874 amdgpu_ucode_free_bo(adev);
2875 amdgpu_free_static_csa(&adev->virt.csa_obj);
2876 amdgpu_device_wb_fini(adev);
2877 amdgpu_device_vram_scratch_fini(adev);
2878 amdgpu_ib_pool_fini(adev);
2879 }
2880
2881 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2882 /* XXX handle errors */
2883 if (r) {
2884 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2885 adev->ip_blocks[i].version->funcs->name, r);
2886 }
2887 adev->ip_blocks[i].status.sw = false;
2888 adev->ip_blocks[i].status.valid = false;
2889 }
2890
2891 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2892 if (!adev->ip_blocks[i].status.late_initialized)
2893 continue;
2894 if (adev->ip_blocks[i].version->funcs->late_fini)
2895 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2896 adev->ip_blocks[i].status.late_initialized = false;
2897 }
2898
2899 amdgpu_ras_fini(adev);
2900
2901 return 0;
2902}
2903
2904/**
2905 * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2906 *
2907 * @work: work_struct.
2908 */
2909static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2910{
2911 struct amdgpu_device *adev =
2912 container_of(work, struct amdgpu_device, delayed_init_work.work);
2913 int r;
2914
2915 r = amdgpu_ib_ring_tests(adev);
2916 if (r)
2917 DRM_ERROR("ib ring test failed (%d).\n", r);
2918}
2919
2920static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2921{
2922 struct amdgpu_device *adev =
2923 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2924
2925 WARN_ON_ONCE(adev->gfx.gfx_off_state);
2926 WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
2927
2928 if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2929 adev->gfx.gfx_off_state = true;
2930}
2931
2932/**
2933 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2934 *
2935 * @adev: amdgpu_device pointer
2936 *
2937 * Main suspend function for hardware IPs. The list of all the hardware
2938 * IPs that make up the asic is walked, clockgating is disabled and the
2939 * suspend callbacks are run. suspend puts the hardware and software state
2940 * in each IP into a state suitable for suspend.
2941 * Returns 0 on success, negative error code on failure.
2942 */
2943static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2944{
2945 int i, r;
2946
2947 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2948 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2949
2950 /*
2951 * Per PMFW team's suggestion, driver needs to handle gfxoff
2952 * and df cstate features disablement for gpu reset(e.g. Mode1Reset)
2953 * scenario. Add the missing df cstate disablement here.
2954 */
2955 if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
2956 dev_warn(adev->dev, "Failed to disallow df cstate");
2957
2958 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2959 if (!adev->ip_blocks[i].status.valid)
2960 continue;
2961
2962 /* displays are handled separately */
2963 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2964 continue;
2965
2966 /* XXX handle errors */
2967 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2968 /* XXX handle errors */
2969 if (r) {
2970 DRM_ERROR("suspend of IP block <%s> failed %d\n",
2971 adev->ip_blocks[i].version->funcs->name, r);
2972 return r;
2973 }
2974
2975 adev->ip_blocks[i].status.hw = false;
2976 }
2977
2978 return 0;
2979}
2980
2981/**
2982 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2983 *
2984 * @adev: amdgpu_device pointer
2985 *
2986 * Main suspend function for hardware IPs. The list of all the hardware
2987 * IPs that make up the asic is walked, clockgating is disabled and the
2988 * suspend callbacks are run. suspend puts the hardware and software state
2989 * in each IP into a state suitable for suspend.
2990 * Returns 0 on success, negative error code on failure.
2991 */
2992static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
2993{
2994 int i, r;
2995
2996 if (adev->in_s0ix)
2997 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry);
2998
2999 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3000 if (!adev->ip_blocks[i].status.valid)
3001 continue;
3002 /* displays are handled in phase1 */
3003 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
3004 continue;
3005 /* PSP lost connection when err_event_athub occurs */
3006 if (amdgpu_ras_intr_triggered() &&
3007 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
3008 adev->ip_blocks[i].status.hw = false;
3009 continue;
3010 }
3011
3012 /* skip unnecessary suspend if we do not initialize them yet */
3013 if (adev->gmc.xgmi.pending_reset &&
3014 !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3015 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
3016 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3017 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
3018 adev->ip_blocks[i].status.hw = false;
3019 continue;
3020 }
3021
3022 /* skip suspend of gfx/mes and psp for S0ix
3023 * gfx is in gfxoff state, so on resume it will exit gfxoff just
3024 * like at runtime. PSP is also part of the always on hardware
3025 * so no need to suspend it.
3026 */
3027 if (adev->in_s0ix &&
3028 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
3029 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
3030 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES))
3031 continue;
3032
3033 /* XXX handle errors */
3034 r = adev->ip_blocks[i].version->funcs->suspend(adev);
3035 /* XXX handle errors */
3036 if (r) {
3037 DRM_ERROR("suspend of IP block <%s> failed %d\n",
3038 adev->ip_blocks[i].version->funcs->name, r);
3039 }
3040 adev->ip_blocks[i].status.hw = false;
3041 /* handle putting the SMC in the appropriate state */
3042 if(!amdgpu_sriov_vf(adev)){
3043 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3044 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
3045 if (r) {
3046 DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
3047 adev->mp1_state, r);
3048 return r;
3049 }
3050 }
3051 }
3052 }
3053
3054 return 0;
3055}
3056
3057/**
3058 * amdgpu_device_ip_suspend - run suspend for hardware IPs
3059 *
3060 * @adev: amdgpu_device pointer
3061 *
3062 * Main suspend function for hardware IPs. The list of all the hardware
3063 * IPs that make up the asic is walked, clockgating is disabled and the
3064 * suspend callbacks are run. suspend puts the hardware and software state
3065 * in each IP into a state suitable for suspend.
3066 * Returns 0 on success, negative error code on failure.
3067 */
3068int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
3069{
3070 int r;
3071
3072 if (amdgpu_sriov_vf(adev)) {
3073 amdgpu_virt_fini_data_exchange(adev);
3074 amdgpu_virt_request_full_gpu(adev, false);
3075 }
3076
3077 r = amdgpu_device_ip_suspend_phase1(adev);
3078 if (r)
3079 return r;
3080 r = amdgpu_device_ip_suspend_phase2(adev);
3081
3082 if (amdgpu_sriov_vf(adev))
3083 amdgpu_virt_release_full_gpu(adev, false);
3084
3085 return r;
3086}
3087
3088static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
3089{
3090 int i, r;
3091
3092 static enum amd_ip_block_type ip_order[] = {
3093 AMD_IP_BLOCK_TYPE_COMMON,
3094 AMD_IP_BLOCK_TYPE_GMC,
3095 AMD_IP_BLOCK_TYPE_PSP,
3096 AMD_IP_BLOCK_TYPE_IH,
3097 };
3098
3099 for (i = 0; i < adev->num_ip_blocks; i++) {
3100 int j;
3101 struct amdgpu_ip_block *block;
3102
3103 block = &adev->ip_blocks[i];
3104 block->status.hw = false;
3105
3106 for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
3107
3108 if (block->version->type != ip_order[j] ||
3109 !block->status.valid)
3110 continue;
3111
3112 r = block->version->funcs->hw_init(adev);
3113 DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3114 if (r)
3115 return r;
3116 block->status.hw = true;
3117 }
3118 }
3119
3120 return 0;
3121}
3122
3123static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
3124{
3125 int i, r;
3126
3127 static enum amd_ip_block_type ip_order[] = {
3128 AMD_IP_BLOCK_TYPE_SMC,
3129 AMD_IP_BLOCK_TYPE_DCE,
3130 AMD_IP_BLOCK_TYPE_GFX,
3131 AMD_IP_BLOCK_TYPE_SDMA,
3132 AMD_IP_BLOCK_TYPE_UVD,
3133 AMD_IP_BLOCK_TYPE_VCE,
3134 AMD_IP_BLOCK_TYPE_VCN
3135 };
3136
3137 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
3138 int j;
3139 struct amdgpu_ip_block *block;
3140
3141 for (j = 0; j < adev->num_ip_blocks; j++) {
3142 block = &adev->ip_blocks[j];
3143
3144 if (block->version->type != ip_order[i] ||
3145 !block->status.valid ||
3146 block->status.hw)
3147 continue;
3148
3149 if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
3150 r = block->version->funcs->resume(adev);
3151 else
3152 r = block->version->funcs->hw_init(adev);
3153
3154 DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3155 if (r)
3156 return r;
3157 block->status.hw = true;
3158 }
3159 }
3160
3161 return 0;
3162}
3163
3164/**
3165 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3166 *
3167 * @adev: amdgpu_device pointer
3168 *
3169 * First resume function for hardware IPs. The list of all the hardware
3170 * IPs that make up the asic is walked and the resume callbacks are run for
3171 * COMMON, GMC, and IH. resume puts the hardware into a functional state
3172 * after a suspend and updates the software state as necessary. This
3173 * function is also used for restoring the GPU after a GPU reset.
3174 * Returns 0 on success, negative error code on failure.
3175 */
3176static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
3177{
3178 int i, r;
3179
3180 for (i = 0; i < adev->num_ip_blocks; i++) {
3181 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3182 continue;
3183 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3184 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3185 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3186 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) {
3187
3188 r = adev->ip_blocks[i].version->funcs->resume(adev);
3189 if (r) {
3190 DRM_ERROR("resume of IP block <%s> failed %d\n",
3191 adev->ip_blocks[i].version->funcs->name, r);
3192 return r;
3193 }
3194 adev->ip_blocks[i].status.hw = true;
3195 }
3196 }
3197
3198 return 0;
3199}
3200
3201/**
3202 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3203 *
3204 * @adev: amdgpu_device pointer
3205 *
3206 * First resume function for hardware IPs. The list of all the hardware
3207 * IPs that make up the asic is walked and the resume callbacks are run for
3208 * all blocks except COMMON, GMC, and IH. resume puts the hardware into a
3209 * functional state after a suspend and updates the software state as
3210 * necessary. This function is also used for restoring the GPU after a GPU
3211 * reset.
3212 * Returns 0 on success, negative error code on failure.
3213 */
3214static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3215{
3216 int i, r;
3217
3218 for (i = 0; i < adev->num_ip_blocks; i++) {
3219 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3220 continue;
3221 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3222 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3223 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3224 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3225 continue;
3226 r = adev->ip_blocks[i].version->funcs->resume(adev);
3227 if (r) {
3228 DRM_ERROR("resume of IP block <%s> failed %d\n",
3229 adev->ip_blocks[i].version->funcs->name, r);
3230 return r;
3231 }
3232 adev->ip_blocks[i].status.hw = true;
3233
3234 if (adev->in_s0ix && adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3235 /* disable gfxoff for IP resume. The gfxoff will be re-enabled in
3236 * amdgpu_device_resume() after IP resume.
3237 */
3238 amdgpu_gfx_off_ctrl(adev, false);
3239 DRM_DEBUG("will disable gfxoff for re-initializing other blocks\n");
3240 }
3241
3242 }
3243
3244 return 0;
3245}
3246
3247/**
3248 * amdgpu_device_ip_resume - run resume for hardware IPs
3249 *
3250 * @adev: amdgpu_device pointer
3251 *
3252 * Main resume function for hardware IPs. The hardware IPs
3253 * are split into two resume functions because they are
3254 * are also used in in recovering from a GPU reset and some additional
3255 * steps need to be take between them. In this case (S3/S4) they are
3256 * run sequentially.
3257 * Returns 0 on success, negative error code on failure.
3258 */
3259static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
3260{
3261 int r;
3262
3263 r = amdgpu_amdkfd_resume_iommu(adev);
3264 if (r)
3265 return r;
3266
3267 r = amdgpu_device_ip_resume_phase1(adev);
3268 if (r)
3269 return r;
3270
3271 r = amdgpu_device_fw_loading(adev);
3272 if (r)
3273 return r;
3274
3275 r = amdgpu_device_ip_resume_phase2(adev);
3276
3277 return r;
3278}
3279
3280/**
3281 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3282 *
3283 * @adev: amdgpu_device pointer
3284 *
3285 * Query the VBIOS data tables to determine if the board supports SR-IOV.
3286 */
3287static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3288{
3289 if (amdgpu_sriov_vf(adev)) {
3290 if (adev->is_atom_fw) {
3291 if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
3292 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3293 } else {
3294 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3295 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3296 }
3297
3298 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3299 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3300 }
3301}
3302
3303/**
3304 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3305 *
3306 * @asic_type: AMD asic type
3307 *
3308 * Check if there is DC (new modesetting infrastructre) support for an asic.
3309 * returns true if DC has support, false if not.
3310 */
3311bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3312{
3313 switch (asic_type) {
3314#ifdef CONFIG_DRM_AMDGPU_SI
3315 case CHIP_HAINAN:
3316#endif
3317 case CHIP_TOPAZ:
3318 /* chips with no display hardware */
3319 return false;
3320#if defined(CONFIG_DRM_AMD_DC)
3321 case CHIP_TAHITI:
3322 case CHIP_PITCAIRN:
3323 case CHIP_VERDE:
3324 case CHIP_OLAND:
3325 /*
3326 * We have systems in the wild with these ASICs that require
3327 * LVDS and VGA support which is not supported with DC.
3328 *
3329 * Fallback to the non-DC driver here by default so as not to
3330 * cause regressions.
3331 */
3332#if defined(CONFIG_DRM_AMD_DC_SI)
3333 return amdgpu_dc > 0;
3334#else
3335 return false;
3336#endif
3337 case CHIP_BONAIRE:
3338 case CHIP_KAVERI:
3339 case CHIP_KABINI:
3340 case CHIP_MULLINS:
3341 /*
3342 * We have systems in the wild with these ASICs that require
3343 * VGA support which is not supported with DC.
3344 *
3345 * Fallback to the non-DC driver here by default so as not to
3346 * cause regressions.
3347 */
3348 return amdgpu_dc > 0;
3349 default:
3350 return amdgpu_dc != 0;
3351#else
3352 default:
3353 if (amdgpu_dc > 0)
3354 DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
3355 "but isn't supported by ASIC, ignoring\n");
3356 return false;
3357#endif
3358 }
3359}
3360
3361/**
3362 * amdgpu_device_has_dc_support - check if dc is supported
3363 *
3364 * @adev: amdgpu_device pointer
3365 *
3366 * Returns true for supported, false for not supported
3367 */
3368bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3369{
3370 if (adev->enable_virtual_display ||
3371 (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
3372 return false;
3373
3374 return amdgpu_device_asic_has_dc_support(adev->asic_type);
3375}
3376
3377static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3378{
3379 struct amdgpu_device *adev =
3380 container_of(__work, struct amdgpu_device, xgmi_reset_work);
3381 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3382
3383 /* It's a bug to not have a hive within this function */
3384 if (WARN_ON(!hive))
3385 return;
3386
3387 /*
3388 * Use task barrier to synchronize all xgmi reset works across the
3389 * hive. task_barrier_enter and task_barrier_exit will block
3390 * until all the threads running the xgmi reset works reach
3391 * those points. task_barrier_full will do both blocks.
3392 */
3393 if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3394
3395 task_barrier_enter(&hive->tb);
3396 adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3397
3398 if (adev->asic_reset_res)
3399 goto fail;
3400
3401 task_barrier_exit(&hive->tb);
3402 adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3403
3404 if (adev->asic_reset_res)
3405 goto fail;
3406
3407 if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops &&
3408 adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
3409 adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev);
3410 } else {
3411
3412 task_barrier_full(&hive->tb);
3413 adev->asic_reset_res = amdgpu_asic_reset(adev);
3414 }
3415
3416fail:
3417 if (adev->asic_reset_res)
3418 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3419 adev->asic_reset_res, adev_to_drm(adev)->unique);
3420 amdgpu_put_xgmi_hive(hive);
3421}
3422
3423static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3424{
3425 char *input = amdgpu_lockup_timeout;
3426 char *timeout_setting = NULL;
3427 int index = 0;
3428 long timeout;
3429 int ret = 0;
3430
3431 /*
3432 * By default timeout for non compute jobs is 10000
3433 * and 60000 for compute jobs.
3434 * In SR-IOV or passthrough mode, timeout for compute
3435 * jobs are 60000 by default.
3436 */
3437 adev->gfx_timeout = msecs_to_jiffies(10000);
3438 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3439 if (amdgpu_sriov_vf(adev))
3440 adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3441 msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3442 else
3443 adev->compute_timeout = msecs_to_jiffies(60000);
3444
3445 if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3446 while ((timeout_setting = strsep(&input, ",")) &&
3447 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3448 ret = kstrtol(timeout_setting, 0, &timeout);
3449 if (ret)
3450 return ret;
3451
3452 if (timeout == 0) {
3453 index++;
3454 continue;
3455 } else if (timeout < 0) {
3456 timeout = MAX_SCHEDULE_TIMEOUT;
3457 dev_warn(adev->dev, "lockup timeout disabled");
3458 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
3459 } else {
3460 timeout = msecs_to_jiffies(timeout);
3461 }
3462
3463 switch (index++) {
3464 case 0:
3465 adev->gfx_timeout = timeout;
3466 break;
3467 case 1:
3468 adev->compute_timeout = timeout;
3469 break;
3470 case 2:
3471 adev->sdma_timeout = timeout;
3472 break;
3473 case 3:
3474 adev->video_timeout = timeout;
3475 break;
3476 default:
3477 break;
3478 }
3479 }
3480 /*
3481 * There is only one value specified and
3482 * it should apply to all non-compute jobs.
3483 */
3484 if (index == 1) {
3485 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3486 if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3487 adev->compute_timeout = adev->gfx_timeout;
3488 }
3489 }
3490
3491 return ret;
3492}
3493
3494/**
3495 * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU
3496 *
3497 * @adev: amdgpu_device pointer
3498 *
3499 * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode
3500 */
3501static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
3502{
3503 struct iommu_domain *domain;
3504
3505 domain = iommu_get_domain_for_dev(adev->dev);
3506 if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY)
3507 adev->ram_is_direct_mapped = true;
3508}
3509
3510static const struct attribute *amdgpu_dev_attributes[] = {
3511 &dev_attr_product_name.attr,
3512 &dev_attr_product_number.attr,
3513 &dev_attr_serial_number.attr,
3514 &dev_attr_pcie_replay_count.attr,
3515 NULL
3516};
3517
3518/**
3519 * amdgpu_device_init - initialize the driver
3520 *
3521 * @adev: amdgpu_device pointer
3522 * @flags: driver flags
3523 *
3524 * Initializes the driver info and hw (all asics).
3525 * Returns 0 for success or an error on failure.
3526 * Called at driver startup.
3527 */
3528int amdgpu_device_init(struct amdgpu_device *adev,
3529 uint32_t flags)
3530{
3531 struct drm_device *ddev = adev_to_drm(adev);
3532 struct pci_dev *pdev = adev->pdev;
3533 int r, i;
3534 bool px = false;
3535 u32 max_MBps;
3536
3537 adev->shutdown = false;
3538 adev->flags = flags;
3539
3540 if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3541 adev->asic_type = amdgpu_force_asic_type;
3542 else
3543 adev->asic_type = flags & AMD_ASIC_MASK;
3544
3545 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3546 if (amdgpu_emu_mode == 1)
3547 adev->usec_timeout *= 10;
3548 adev->gmc.gart_size = 512 * 1024 * 1024;
3549 adev->accel_working = false;
3550 adev->num_rings = 0;
3551 RCU_INIT_POINTER(adev->gang_submit, dma_fence_get_stub());
3552 adev->mman.buffer_funcs = NULL;
3553 adev->mman.buffer_funcs_ring = NULL;
3554 adev->vm_manager.vm_pte_funcs = NULL;
3555 adev->vm_manager.vm_pte_num_scheds = 0;
3556 adev->gmc.gmc_funcs = NULL;
3557 adev->harvest_ip_mask = 0x0;
3558 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3559 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3560
3561 adev->smc_rreg = &amdgpu_invalid_rreg;
3562 adev->smc_wreg = &amdgpu_invalid_wreg;
3563 adev->pcie_rreg = &amdgpu_invalid_rreg;
3564 adev->pcie_wreg = &amdgpu_invalid_wreg;
3565 adev->pciep_rreg = &amdgpu_invalid_rreg;
3566 adev->pciep_wreg = &amdgpu_invalid_wreg;
3567 adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3568 adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3569 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3570 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3571 adev->didt_rreg = &amdgpu_invalid_rreg;
3572 adev->didt_wreg = &amdgpu_invalid_wreg;
3573 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3574 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3575 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3576 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3577
3578 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3579 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3580 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3581
3582 /* mutex initialization are all done here so we
3583 * can recall function without having locking issues */
3584 mutex_init(&adev->firmware.mutex);
3585 mutex_init(&adev->pm.mutex);
3586 mutex_init(&adev->gfx.gpu_clock_mutex);
3587 mutex_init(&adev->srbm_mutex);
3588 mutex_init(&adev->gfx.pipe_reserve_mutex);
3589 mutex_init(&adev->gfx.gfx_off_mutex);
3590 mutex_init(&adev->grbm_idx_mutex);
3591 mutex_init(&adev->mn_lock);
3592 mutex_init(&adev->virt.vf_errors.lock);
3593 hash_init(adev->mn_hash);
3594 mutex_init(&adev->psp.mutex);
3595 mutex_init(&adev->notifier_lock);
3596 mutex_init(&adev->pm.stable_pstate_ctx_lock);
3597 mutex_init(&adev->benchmark_mutex);
3598
3599 amdgpu_device_init_apu_flags(adev);
3600
3601 r = amdgpu_device_check_arguments(adev);
3602 if (r)
3603 return r;
3604
3605 spin_lock_init(&adev->mmio_idx_lock);
3606 spin_lock_init(&adev->smc_idx_lock);
3607 spin_lock_init(&adev->pcie_idx_lock);
3608 spin_lock_init(&adev->uvd_ctx_idx_lock);
3609 spin_lock_init(&adev->didt_idx_lock);
3610 spin_lock_init(&adev->gc_cac_idx_lock);
3611 spin_lock_init(&adev->se_cac_idx_lock);
3612 spin_lock_init(&adev->audio_endpt_idx_lock);
3613 spin_lock_init(&adev->mm_stats.lock);
3614
3615 INIT_LIST_HEAD(&adev->shadow_list);
3616 mutex_init(&adev->shadow_list_lock);
3617
3618 INIT_LIST_HEAD(&adev->reset_list);
3619
3620 INIT_LIST_HEAD(&adev->ras_list);
3621
3622 INIT_DELAYED_WORK(&adev->delayed_init_work,
3623 amdgpu_device_delayed_init_work_handler);
3624 INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3625 amdgpu_device_delay_enable_gfx_off);
3626
3627 INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3628
3629 adev->gfx.gfx_off_req_count = 1;
3630 adev->gfx.gfx_off_residency = 0;
3631 adev->gfx.gfx_off_entrycount = 0;
3632 adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3633
3634 atomic_set(&adev->throttling_logging_enabled, 1);
3635 /*
3636 * If throttling continues, logging will be performed every minute
3637 * to avoid log flooding. "-1" is subtracted since the thermal
3638 * throttling interrupt comes every second. Thus, the total logging
3639 * interval is 59 seconds(retelimited printk interval) + 1(waiting
3640 * for throttling interrupt) = 60 seconds.
3641 */
3642 ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3643 ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3644
3645 /* Registers mapping */
3646 /* TODO: block userspace mapping of io register */
3647 if (adev->asic_type >= CHIP_BONAIRE) {
3648 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3649 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3650 } else {
3651 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3652 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3653 }
3654
3655 for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
3656 atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
3657
3658 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3659 if (adev->rmmio == NULL) {
3660 return -ENOMEM;
3661 }
3662 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3663 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
3664
3665 amdgpu_device_get_pcie_info(adev);
3666
3667 if (amdgpu_mcbp)
3668 DRM_INFO("MCBP is enabled\n");
3669
3670 /*
3671 * Reset domain needs to be present early, before XGMI hive discovered
3672 * (if any) and intitialized to use reset sem and in_gpu reset flag
3673 * early on during init and before calling to RREG32.
3674 */
3675 adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
3676 if (!adev->reset_domain)
3677 return -ENOMEM;
3678
3679 /* detect hw virtualization here */
3680 amdgpu_detect_virtualization(adev);
3681
3682 r = amdgpu_device_get_job_timeout_settings(adev);
3683 if (r) {
3684 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3685 return r;
3686 }
3687
3688 /* early init functions */
3689 r = amdgpu_device_ip_early_init(adev);
3690 if (r)
3691 return r;
3692
3693 /* Get rid of things like offb */
3694 r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver);
3695 if (r)
3696 return r;
3697
3698 /* Enable TMZ based on IP_VERSION */
3699 amdgpu_gmc_tmz_set(adev);
3700
3701 amdgpu_gmc_noretry_set(adev);
3702 /* Need to get xgmi info early to decide the reset behavior*/
3703 if (adev->gmc.xgmi.supported) {
3704 r = adev->gfxhub.funcs->get_xgmi_info(adev);
3705 if (r)
3706 return r;
3707 }
3708
3709 /* enable PCIE atomic ops */
3710 if (amdgpu_sriov_vf(adev))
3711 adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
3712 adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
3713 (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3714 else
3715 adev->have_atomics_support =
3716 !pci_enable_atomic_ops_to_root(adev->pdev,
3717 PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3718 PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3719 if (!adev->have_atomics_support)
3720 dev_info(adev->dev, "PCIE atomic ops is not supported\n");
3721
3722 /* doorbell bar mapping and doorbell index init*/
3723 amdgpu_device_doorbell_init(adev);
3724
3725 if (amdgpu_emu_mode == 1) {
3726 /* post the asic on emulation mode */
3727 emu_soc_asic_init(adev);
3728 goto fence_driver_init;
3729 }
3730
3731 amdgpu_reset_init(adev);
3732
3733 /* detect if we are with an SRIOV vbios */
3734 amdgpu_device_detect_sriov_bios(adev);
3735
3736 /* check if we need to reset the asic
3737 * E.g., driver was not cleanly unloaded previously, etc.
3738 */
3739 if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3740 if (adev->gmc.xgmi.num_physical_nodes) {
3741 dev_info(adev->dev, "Pending hive reset.\n");
3742 adev->gmc.xgmi.pending_reset = true;
3743 /* Only need to init necessary block for SMU to handle the reset */
3744 for (i = 0; i < adev->num_ip_blocks; i++) {
3745 if (!adev->ip_blocks[i].status.valid)
3746 continue;
3747 if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3748 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3749 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3750 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
3751 DRM_DEBUG("IP %s disabled for hw_init.\n",
3752 adev->ip_blocks[i].version->funcs->name);
3753 adev->ip_blocks[i].status.hw = true;
3754 }
3755 }
3756 } else {
3757 r = amdgpu_asic_reset(adev);
3758 if (r) {
3759 dev_err(adev->dev, "asic reset on init failed\n");
3760 goto failed;
3761 }
3762 }
3763 }
3764
3765 pci_enable_pcie_error_reporting(adev->pdev);
3766
3767 /* Post card if necessary */
3768 if (amdgpu_device_need_post(adev)) {
3769 if (!adev->bios) {
3770 dev_err(adev->dev, "no vBIOS found\n");
3771 r = -EINVAL;
3772 goto failed;
3773 }
3774 DRM_INFO("GPU posting now...\n");
3775 r = amdgpu_device_asic_init(adev);
3776 if (r) {
3777 dev_err(adev->dev, "gpu post error!\n");
3778 goto failed;
3779 }
3780 }
3781
3782 if (adev->is_atom_fw) {
3783 /* Initialize clocks */
3784 r = amdgpu_atomfirmware_get_clock_info(adev);
3785 if (r) {
3786 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3787 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3788 goto failed;
3789 }
3790 } else {
3791 /* Initialize clocks */
3792 r = amdgpu_atombios_get_clock_info(adev);
3793 if (r) {
3794 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3795 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3796 goto failed;
3797 }
3798 /* init i2c buses */
3799 if (!amdgpu_device_has_dc_support(adev))
3800 amdgpu_atombios_i2c_init(adev);
3801 }
3802
3803fence_driver_init:
3804 /* Fence driver */
3805 r = amdgpu_fence_driver_sw_init(adev);
3806 if (r) {
3807 dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
3808 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3809 goto failed;
3810 }
3811
3812 /* init the mode config */
3813 drm_mode_config_init(adev_to_drm(adev));
3814
3815 r = amdgpu_device_ip_init(adev);
3816 if (r) {
3817 /* failed in exclusive mode due to timeout */
3818 if (amdgpu_sriov_vf(adev) &&
3819 !amdgpu_sriov_runtime(adev) &&
3820 amdgpu_virt_mmio_blocked(adev) &&
3821 !amdgpu_virt_wait_reset(adev)) {
3822 dev_err(adev->dev, "VF exclusive mode timeout\n");
3823 /* Don't send request since VF is inactive. */
3824 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
3825 adev->virt.ops = NULL;
3826 r = -EAGAIN;
3827 goto release_ras_con;
3828 }
3829 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
3830 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3831 goto release_ras_con;
3832 }
3833
3834 amdgpu_fence_driver_hw_init(adev);
3835
3836 dev_info(adev->dev,
3837 "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
3838 adev->gfx.config.max_shader_engines,
3839 adev->gfx.config.max_sh_per_se,
3840 adev->gfx.config.max_cu_per_sh,
3841 adev->gfx.cu_info.number);
3842
3843 adev->accel_working = true;
3844
3845 amdgpu_vm_check_compute_bug(adev);
3846
3847 /* Initialize the buffer migration limit. */
3848 if (amdgpu_moverate >= 0)
3849 max_MBps = amdgpu_moverate;
3850 else
3851 max_MBps = 8; /* Allow 8 MB/s. */
3852 /* Get a log2 for easy divisions. */
3853 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3854
3855 r = amdgpu_pm_sysfs_init(adev);
3856 if (r) {
3857 adev->pm_sysfs_en = false;
3858 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
3859 } else
3860 adev->pm_sysfs_en = true;
3861
3862 r = amdgpu_ucode_sysfs_init(adev);
3863 if (r) {
3864 adev->ucode_sysfs_en = false;
3865 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
3866 } else
3867 adev->ucode_sysfs_en = true;
3868
3869 r = amdgpu_psp_sysfs_init(adev);
3870 if (r) {
3871 adev->psp_sysfs_en = false;
3872 if (!amdgpu_sriov_vf(adev))
3873 DRM_ERROR("Creating psp sysfs failed\n");
3874 } else
3875 adev->psp_sysfs_en = true;
3876
3877 /*
3878 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3879 * Otherwise the mgpu fan boost feature will be skipped due to the
3880 * gpu instance is counted less.
3881 */
3882 amdgpu_register_gpu_instance(adev);
3883
3884 /* enable clockgating, etc. after ib tests, etc. since some blocks require
3885 * explicit gating rather than handling it automatically.
3886 */
3887 if (!adev->gmc.xgmi.pending_reset) {
3888 r = amdgpu_device_ip_late_init(adev);
3889 if (r) {
3890 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
3891 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
3892 goto release_ras_con;
3893 }
3894 /* must succeed. */
3895 amdgpu_ras_resume(adev);
3896 queue_delayed_work(system_wq, &adev->delayed_init_work,
3897 msecs_to_jiffies(AMDGPU_RESUME_MS));
3898 }
3899
3900 if (amdgpu_sriov_vf(adev))
3901 flush_delayed_work(&adev->delayed_init_work);
3902
3903 r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
3904 if (r)
3905 dev_err(adev->dev, "Could not create amdgpu device attr\n");
3906
3907 if (IS_ENABLED(CONFIG_PERF_EVENTS))
3908 r = amdgpu_pmu_init(adev);
3909 if (r)
3910 dev_err(adev->dev, "amdgpu_pmu_init failed\n");
3911
3912 /* Have stored pci confspace at hand for restore in sudden PCI error */
3913 if (amdgpu_device_cache_pci_state(adev->pdev))
3914 pci_restore_state(pdev);
3915
3916 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
3917 /* this will fail for cards that aren't VGA class devices, just
3918 * ignore it */
3919 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3920 vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
3921
3922 if (amdgpu_device_supports_px(ddev)) {
3923 px = true;
3924 vga_switcheroo_register_client(adev->pdev,
3925 &amdgpu_switcheroo_ops, px);
3926 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
3927 }
3928
3929 if (adev->gmc.xgmi.pending_reset)
3930 queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
3931 msecs_to_jiffies(AMDGPU_RESUME_MS));
3932
3933 amdgpu_device_check_iommu_direct_map(adev);
3934
3935 return 0;
3936
3937release_ras_con:
3938 amdgpu_release_ras_context(adev);
3939
3940failed:
3941 amdgpu_vf_error_trans_all(adev);
3942
3943 return r;
3944}
3945
3946static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
3947{
3948
3949 /* Clear all CPU mappings pointing to this device */
3950 unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
3951
3952 /* Unmap all mapped bars - Doorbell, registers and VRAM */
3953 amdgpu_device_doorbell_fini(adev);
3954
3955 iounmap(adev->rmmio);
3956 adev->rmmio = NULL;
3957 if (adev->mman.aper_base_kaddr)
3958 iounmap(adev->mman.aper_base_kaddr);
3959 adev->mman.aper_base_kaddr = NULL;
3960
3961 /* Memory manager related */
3962 if (!adev->gmc.xgmi.connected_to_cpu) {
3963 arch_phys_wc_del(adev->gmc.vram_mtrr);
3964 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
3965 }
3966}
3967
3968/**
3969 * amdgpu_device_fini_hw - tear down the driver
3970 *
3971 * @adev: amdgpu_device pointer
3972 *
3973 * Tear down the driver info (all asics).
3974 * Called at driver shutdown.
3975 */
3976void amdgpu_device_fini_hw(struct amdgpu_device *adev)
3977{
3978 dev_info(adev->dev, "amdgpu: finishing device.\n");
3979 flush_delayed_work(&adev->delayed_init_work);
3980 adev->shutdown = true;
3981
3982 /* make sure IB test finished before entering exclusive mode
3983 * to avoid preemption on IB test
3984 * */
3985 if (amdgpu_sriov_vf(adev)) {
3986 amdgpu_virt_request_full_gpu(adev, false);
3987 amdgpu_virt_fini_data_exchange(adev);
3988 }
3989
3990 /* disable all interrupts */
3991 amdgpu_irq_disable_all(adev);
3992 if (adev->mode_info.mode_config_initialized){
3993 if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
3994 drm_helper_force_disable_all(adev_to_drm(adev));
3995 else
3996 drm_atomic_helper_shutdown(adev_to_drm(adev));
3997 }
3998 amdgpu_fence_driver_hw_fini(adev);
3999
4000 if (adev->mman.initialized) {
4001 flush_delayed_work(&adev->mman.bdev.wq);
4002 ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
4003 }
4004
4005 if (adev->pm_sysfs_en)
4006 amdgpu_pm_sysfs_fini(adev);
4007 if (adev->ucode_sysfs_en)
4008 amdgpu_ucode_sysfs_fini(adev);
4009 if (adev->psp_sysfs_en)
4010 amdgpu_psp_sysfs_fini(adev);
4011 sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
4012
4013 /* disable ras feature must before hw fini */
4014 amdgpu_ras_pre_fini(adev);
4015
4016 amdgpu_device_ip_fini_early(adev);
4017
4018 amdgpu_irq_fini_hw(adev);
4019
4020 if (adev->mman.initialized)
4021 ttm_device_clear_dma_mappings(&adev->mman.bdev);
4022
4023 amdgpu_gart_dummy_page_fini(adev);
4024
4025 amdgpu_device_unmap_mmio(adev);
4026
4027}
4028
4029void amdgpu_device_fini_sw(struct amdgpu_device *adev)
4030{
4031 int idx;
4032
4033 amdgpu_fence_driver_sw_fini(adev);
4034 amdgpu_device_ip_fini(adev);
4035 release_firmware(adev->firmware.gpu_info_fw);
4036 adev->firmware.gpu_info_fw = NULL;
4037 adev->accel_working = false;
4038 dma_fence_put(rcu_dereference_protected(adev->gang_submit, true));
4039
4040 amdgpu_reset_fini(adev);
4041
4042 /* free i2c buses */
4043 if (!amdgpu_device_has_dc_support(adev))
4044 amdgpu_i2c_fini(adev);
4045
4046 if (amdgpu_emu_mode != 1)
4047 amdgpu_atombios_fini(adev);
4048
4049 kfree(adev->bios);
4050 adev->bios = NULL;
4051 if (amdgpu_device_supports_px(adev_to_drm(adev))) {
4052 vga_switcheroo_unregister_client(adev->pdev);
4053 vga_switcheroo_fini_domain_pm_ops(adev->dev);
4054 }
4055 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4056 vga_client_unregister(adev->pdev);
4057
4058 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
4059
4060 iounmap(adev->rmmio);
4061 adev->rmmio = NULL;
4062 amdgpu_device_doorbell_fini(adev);
4063 drm_dev_exit(idx);
4064 }
4065
4066 if (IS_ENABLED(CONFIG_PERF_EVENTS))
4067 amdgpu_pmu_fini(adev);
4068 if (adev->mman.discovery_bin)
4069 amdgpu_discovery_fini(adev);
4070
4071 amdgpu_reset_put_reset_domain(adev->reset_domain);
4072 adev->reset_domain = NULL;
4073
4074 kfree(adev->pci_state);
4075
4076}
4077
4078/**
4079 * amdgpu_device_evict_resources - evict device resources
4080 * @adev: amdgpu device object
4081 *
4082 * Evicts all ttm device resources(vram BOs, gart table) from the lru list
4083 * of the vram memory type. Mainly used for evicting device resources
4084 * at suspend time.
4085 *
4086 */
4087static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
4088{
4089 int ret;
4090
4091 /* No need to evict vram on APUs for suspend to ram or s2idle */
4092 if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU))
4093 return 0;
4094
4095 ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
4096 if (ret)
4097 DRM_WARN("evicting device resources failed\n");
4098 return ret;
4099}
4100
4101/*
4102 * Suspend & resume.
4103 */
4104/**
4105 * amdgpu_device_suspend - initiate device suspend
4106 *
4107 * @dev: drm dev pointer
4108 * @fbcon : notify the fbdev of suspend
4109 *
4110 * Puts the hw in the suspend state (all asics).
4111 * Returns 0 for success or an error on failure.
4112 * Called at driver suspend.
4113 */
4114int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
4115{
4116 struct amdgpu_device *adev = drm_to_adev(dev);
4117 int r = 0;
4118
4119 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4120 return 0;
4121
4122 adev->in_suspend = true;
4123
4124 /* Evict the majority of BOs before grabbing the full access */
4125 r = amdgpu_device_evict_resources(adev);
4126 if (r)
4127 return r;
4128
4129 if (amdgpu_sriov_vf(adev)) {
4130 amdgpu_virt_fini_data_exchange(adev);
4131 r = amdgpu_virt_request_full_gpu(adev, false);
4132 if (r)
4133 return r;
4134 }
4135
4136 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
4137 DRM_WARN("smart shift update failed\n");
4138
4139 drm_kms_helper_poll_disable(dev);
4140
4141 if (fbcon)
4142 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
4143
4144 cancel_delayed_work_sync(&adev->delayed_init_work);
4145
4146 amdgpu_ras_suspend(adev);
4147
4148 amdgpu_device_ip_suspend_phase1(adev);
4149
4150 if (!adev->in_s0ix)
4151 amdgpu_amdkfd_suspend(adev, adev->in_runpm);
4152
4153 r = amdgpu_device_evict_resources(adev);
4154 if (r)
4155 return r;
4156
4157 amdgpu_fence_driver_hw_fini(adev);
4158
4159 amdgpu_device_ip_suspend_phase2(adev);
4160
4161 if (amdgpu_sriov_vf(adev))
4162 amdgpu_virt_release_full_gpu(adev, false);
4163
4164 return 0;
4165}
4166
4167/**
4168 * amdgpu_device_resume - initiate device resume
4169 *
4170 * @dev: drm dev pointer
4171 * @fbcon : notify the fbdev of resume
4172 *
4173 * Bring the hw back to operating state (all asics).
4174 * Returns 0 for success or an error on failure.
4175 * Called at driver resume.
4176 */
4177int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
4178{
4179 struct amdgpu_device *adev = drm_to_adev(dev);
4180 int r = 0;
4181
4182 if (amdgpu_sriov_vf(adev)) {
4183 r = amdgpu_virt_request_full_gpu(adev, true);
4184 if (r)
4185 return r;
4186 }
4187
4188 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4189 return 0;
4190
4191 if (adev->in_s0ix)
4192 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry);
4193
4194 /* post card */
4195 if (amdgpu_device_need_post(adev)) {
4196 r = amdgpu_device_asic_init(adev);
4197 if (r)
4198 dev_err(adev->dev, "amdgpu asic init failed\n");
4199 }
4200
4201 r = amdgpu_device_ip_resume(adev);
4202
4203 if (r) {
4204 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
4205 goto exit;
4206 }
4207 amdgpu_fence_driver_hw_init(adev);
4208
4209 r = amdgpu_device_ip_late_init(adev);
4210 if (r)
4211 goto exit;
4212
4213 queue_delayed_work(system_wq, &adev->delayed_init_work,
4214 msecs_to_jiffies(AMDGPU_RESUME_MS));
4215
4216 if (!adev->in_s0ix) {
4217 r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
4218 if (r)
4219 goto exit;
4220 }
4221
4222exit:
4223 if (amdgpu_sriov_vf(adev)) {
4224 amdgpu_virt_init_data_exchange(adev);
4225 amdgpu_virt_release_full_gpu(adev, true);
4226 }
4227
4228 if (r)
4229 return r;
4230
4231 /* Make sure IB tests flushed */
4232 flush_delayed_work(&adev->delayed_init_work);
4233
4234 if (adev->in_s0ix) {
4235 /* re-enable gfxoff after IP resume. This re-enables gfxoff after
4236 * it was disabled for IP resume in amdgpu_device_ip_resume_phase2().
4237 */
4238 amdgpu_gfx_off_ctrl(adev, true);
4239 DRM_DEBUG("will enable gfxoff for the mission mode\n");
4240 }
4241 if (fbcon)
4242 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
4243
4244 drm_kms_helper_poll_enable(dev);
4245
4246 amdgpu_ras_resume(adev);
4247
4248 if (adev->mode_info.num_crtc) {
4249 /*
4250 * Most of the connector probing functions try to acquire runtime pm
4251 * refs to ensure that the GPU is powered on when connector polling is
4252 * performed. Since we're calling this from a runtime PM callback,
4253 * trying to acquire rpm refs will cause us to deadlock.
4254 *
4255 * Since we're guaranteed to be holding the rpm lock, it's safe to
4256 * temporarily disable the rpm helpers so this doesn't deadlock us.
4257 */
4258#ifdef CONFIG_PM
4259 dev->dev->power.disable_depth++;
4260#endif
4261 if (!adev->dc_enabled)
4262 drm_helper_hpd_irq_event(dev);
4263 else
4264 drm_kms_helper_hotplug_event(dev);
4265#ifdef CONFIG_PM
4266 dev->dev->power.disable_depth--;
4267#endif
4268 }
4269 adev->in_suspend = false;
4270
4271 if (adev->enable_mes)
4272 amdgpu_mes_self_test(adev);
4273
4274 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
4275 DRM_WARN("smart shift update failed\n");
4276
4277 return 0;
4278}
4279
4280/**
4281 * amdgpu_device_ip_check_soft_reset - did soft reset succeed
4282 *
4283 * @adev: amdgpu_device pointer
4284 *
4285 * The list of all the hardware IPs that make up the asic is walked and
4286 * the check_soft_reset callbacks are run. check_soft_reset determines
4287 * if the asic is still hung or not.
4288 * Returns true if any of the IPs are still in a hung state, false if not.
4289 */
4290static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
4291{
4292 int i;
4293 bool asic_hang = false;
4294
4295 if (amdgpu_sriov_vf(adev))
4296 return true;
4297
4298 if (amdgpu_asic_need_full_reset(adev))
4299 return true;
4300
4301 for (i = 0; i < adev->num_ip_blocks; i++) {
4302 if (!adev->ip_blocks[i].status.valid)
4303 continue;
4304 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
4305 adev->ip_blocks[i].status.hang =
4306 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
4307 if (adev->ip_blocks[i].status.hang) {
4308 dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
4309 asic_hang = true;
4310 }
4311 }
4312 return asic_hang;
4313}
4314
4315/**
4316 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
4317 *
4318 * @adev: amdgpu_device pointer
4319 *
4320 * The list of all the hardware IPs that make up the asic is walked and the
4321 * pre_soft_reset callbacks are run if the block is hung. pre_soft_reset
4322 * handles any IP specific hardware or software state changes that are
4323 * necessary for a soft reset to succeed.
4324 * Returns 0 on success, negative error code on failure.
4325 */
4326static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
4327{
4328 int i, r = 0;
4329
4330 for (i = 0; i < adev->num_ip_blocks; i++) {
4331 if (!adev->ip_blocks[i].status.valid)
4332 continue;
4333 if (adev->ip_blocks[i].status.hang &&
4334 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
4335 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
4336 if (r)
4337 return r;
4338 }
4339 }
4340
4341 return 0;
4342}
4343
4344/**
4345 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
4346 *
4347 * @adev: amdgpu_device pointer
4348 *
4349 * Some hardware IPs cannot be soft reset. If they are hung, a full gpu
4350 * reset is necessary to recover.
4351 * Returns true if a full asic reset is required, false if not.
4352 */
4353static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
4354{
4355 int i;
4356
4357 if (amdgpu_asic_need_full_reset(adev))
4358 return true;
4359
4360 for (i = 0; i < adev->num_ip_blocks; i++) {
4361 if (!adev->ip_blocks[i].status.valid)
4362 continue;
4363 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
4364 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
4365 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
4366 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
4367 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
4368 if (adev->ip_blocks[i].status.hang) {
4369 dev_info(adev->dev, "Some block need full reset!\n");
4370 return true;
4371 }
4372 }
4373 }
4374 return false;
4375}
4376
4377/**
4378 * amdgpu_device_ip_soft_reset - do a soft reset
4379 *
4380 * @adev: amdgpu_device pointer
4381 *
4382 * The list of all the hardware IPs that make up the asic is walked and the
4383 * soft_reset callbacks are run if the block is hung. soft_reset handles any
4384 * IP specific hardware or software state changes that are necessary to soft
4385 * reset the IP.
4386 * Returns 0 on success, negative error code on failure.
4387 */
4388static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
4389{
4390 int i, r = 0;
4391
4392 for (i = 0; i < adev->num_ip_blocks; i++) {
4393 if (!adev->ip_blocks[i].status.valid)
4394 continue;
4395 if (adev->ip_blocks[i].status.hang &&
4396 adev->ip_blocks[i].version->funcs->soft_reset) {
4397 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
4398 if (r)
4399 return r;
4400 }
4401 }
4402
4403 return 0;
4404}
4405
4406/**
4407 * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4408 *
4409 * @adev: amdgpu_device pointer
4410 *
4411 * The list of all the hardware IPs that make up the asic is walked and the
4412 * post_soft_reset callbacks are run if the asic was hung. post_soft_reset
4413 * handles any IP specific hardware or software state changes that are
4414 * necessary after the IP has been soft reset.
4415 * Returns 0 on success, negative error code on failure.
4416 */
4417static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4418{
4419 int i, r = 0;
4420
4421 for (i = 0; i < adev->num_ip_blocks; i++) {
4422 if (!adev->ip_blocks[i].status.valid)
4423 continue;
4424 if (adev->ip_blocks[i].status.hang &&
4425 adev->ip_blocks[i].version->funcs->post_soft_reset)
4426 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
4427 if (r)
4428 return r;
4429 }
4430
4431 return 0;
4432}
4433
4434/**
4435 * amdgpu_device_recover_vram - Recover some VRAM contents
4436 *
4437 * @adev: amdgpu_device pointer
4438 *
4439 * Restores the contents of VRAM buffers from the shadows in GTT. Used to
4440 * restore things like GPUVM page tables after a GPU reset where
4441 * the contents of VRAM might be lost.
4442 *
4443 * Returns:
4444 * 0 on success, negative error code on failure.
4445 */
4446static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
4447{
4448 struct dma_fence *fence = NULL, *next = NULL;
4449 struct amdgpu_bo *shadow;
4450 struct amdgpu_bo_vm *vmbo;
4451 long r = 1, tmo;
4452
4453 if (amdgpu_sriov_runtime(adev))
4454 tmo = msecs_to_jiffies(8000);
4455 else
4456 tmo = msecs_to_jiffies(100);
4457
4458 dev_info(adev->dev, "recover vram bo from shadow start\n");
4459 mutex_lock(&adev->shadow_list_lock);
4460 list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
4461 shadow = &vmbo->bo;
4462 /* No need to recover an evicted BO */
4463 if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
4464 shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
4465 shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
4466 continue;
4467
4468 r = amdgpu_bo_restore_shadow(shadow, &next);
4469 if (r)
4470 break;
4471
4472 if (fence) {
4473 tmo = dma_fence_wait_timeout(fence, false, tmo);
4474 dma_fence_put(fence);
4475 fence = next;
4476 if (tmo == 0) {
4477 r = -ETIMEDOUT;
4478 break;
4479 } else if (tmo < 0) {
4480 r = tmo;
4481 break;
4482 }
4483 } else {
4484 fence = next;
4485 }
4486 }
4487 mutex_unlock(&adev->shadow_list_lock);
4488
4489 if (fence)
4490 tmo = dma_fence_wait_timeout(fence, false, tmo);
4491 dma_fence_put(fence);
4492
4493 if (r < 0 || tmo <= 0) {
4494 dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4495 return -EIO;
4496 }
4497
4498 dev_info(adev->dev, "recover vram bo from shadow done\n");
4499 return 0;
4500}
4501
4502
4503/**
4504 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4505 *
4506 * @adev: amdgpu_device pointer
4507 * @from_hypervisor: request from hypervisor
4508 *
4509 * do VF FLR and reinitialize Asic
4510 * return 0 means succeeded otherwise failed
4511 */
4512static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4513 bool from_hypervisor)
4514{
4515 int r;
4516 struct amdgpu_hive_info *hive = NULL;
4517 int retry_limit = 0;
4518
4519retry:
4520 amdgpu_amdkfd_pre_reset(adev);
4521
4522 if (from_hypervisor)
4523 r = amdgpu_virt_request_full_gpu(adev, true);
4524 else
4525 r = amdgpu_virt_reset_gpu(adev);
4526 if (r)
4527 return r;
4528
4529 /* Resume IP prior to SMC */
4530 r = amdgpu_device_ip_reinit_early_sriov(adev);
4531 if (r)
4532 goto error;
4533
4534 amdgpu_virt_init_data_exchange(adev);
4535
4536 r = amdgpu_device_fw_loading(adev);
4537 if (r)
4538 return r;
4539
4540 /* now we are okay to resume SMC/CP/SDMA */
4541 r = amdgpu_device_ip_reinit_late_sriov(adev);
4542 if (r)
4543 goto error;
4544
4545 hive = amdgpu_get_xgmi_hive(adev);
4546 /* Update PSP FW topology after reset */
4547 if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
4548 r = amdgpu_xgmi_update_topology(hive, adev);
4549
4550 if (hive)
4551 amdgpu_put_xgmi_hive(hive);
4552
4553 if (!r) {
4554 amdgpu_irq_gpu_reset_resume_helper(adev);
4555 r = amdgpu_ib_ring_tests(adev);
4556
4557 amdgpu_amdkfd_post_reset(adev);
4558 }
4559
4560error:
4561 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
4562 amdgpu_inc_vram_lost(adev);
4563 r = amdgpu_device_recover_vram(adev);
4564 }
4565 amdgpu_virt_release_full_gpu(adev, true);
4566
4567 if (AMDGPU_RETRY_SRIOV_RESET(r)) {
4568 if (retry_limit < AMDGPU_MAX_RETRY_LIMIT) {
4569 retry_limit++;
4570 goto retry;
4571 } else
4572 DRM_ERROR("GPU reset retry is beyond the retry limit\n");
4573 }
4574
4575 return r;
4576}
4577
4578/**
4579 * amdgpu_device_has_job_running - check if there is any job in mirror list
4580 *
4581 * @adev: amdgpu_device pointer
4582 *
4583 * check if there is any job in mirror list
4584 */
4585bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
4586{
4587 int i;
4588 struct drm_sched_job *job;
4589
4590 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4591 struct amdgpu_ring *ring = adev->rings[i];
4592
4593 if (!ring || !ring->sched.thread)
4594 continue;
4595
4596 spin_lock(&ring->sched.job_list_lock);
4597 job = list_first_entry_or_null(&ring->sched.pending_list,
4598 struct drm_sched_job, list);
4599 spin_unlock(&ring->sched.job_list_lock);
4600 if (job)
4601 return true;
4602 }
4603 return false;
4604}
4605
4606/**
4607 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
4608 *
4609 * @adev: amdgpu_device pointer
4610 *
4611 * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
4612 * a hung GPU.
4613 */
4614bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4615{
4616
4617 if (amdgpu_gpu_recovery == 0)
4618 goto disabled;
4619
4620 /* Skip soft reset check in fatal error mode */
4621 if (!amdgpu_ras_is_poison_mode_supported(adev))
4622 return true;
4623
4624 if (!amdgpu_device_ip_check_soft_reset(adev)) {
4625 dev_info(adev->dev,"Timeout, but no hardware hang detected.\n");
4626 return false;
4627 }
4628
4629 if (amdgpu_sriov_vf(adev))
4630 return true;
4631
4632 if (amdgpu_gpu_recovery == -1) {
4633 switch (adev->asic_type) {
4634#ifdef CONFIG_DRM_AMDGPU_SI
4635 case CHIP_VERDE:
4636 case CHIP_TAHITI:
4637 case CHIP_PITCAIRN:
4638 case CHIP_OLAND:
4639 case CHIP_HAINAN:
4640#endif
4641#ifdef CONFIG_DRM_AMDGPU_CIK
4642 case CHIP_KAVERI:
4643 case CHIP_KABINI:
4644 case CHIP_MULLINS:
4645#endif
4646 case CHIP_CARRIZO:
4647 case CHIP_STONEY:
4648 case CHIP_CYAN_SKILLFISH:
4649 goto disabled;
4650 default:
4651 break;
4652 }
4653 }
4654
4655 return true;
4656
4657disabled:
4658 dev_info(adev->dev, "GPU recovery disabled.\n");
4659 return false;
4660}
4661
4662int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
4663{
4664 u32 i;
4665 int ret = 0;
4666
4667 amdgpu_atombios_scratch_regs_engine_hung(adev, true);
4668
4669 dev_info(adev->dev, "GPU mode1 reset\n");
4670
4671 /* disable BM */
4672 pci_clear_master(adev->pdev);
4673
4674 amdgpu_device_cache_pci_state(adev->pdev);
4675
4676 if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
4677 dev_info(adev->dev, "GPU smu mode1 reset\n");
4678 ret = amdgpu_dpm_mode1_reset(adev);
4679 } else {
4680 dev_info(adev->dev, "GPU psp mode1 reset\n");
4681 ret = psp_gpu_reset(adev);
4682 }
4683
4684 if (ret)
4685 dev_err(adev->dev, "GPU mode1 reset failed\n");
4686
4687 amdgpu_device_load_pci_state(adev->pdev);
4688
4689 /* wait for asic to come out of reset */
4690 for (i = 0; i < adev->usec_timeout; i++) {
4691 u32 memsize = adev->nbio.funcs->get_memsize(adev);
4692
4693 if (memsize != 0xffffffff)
4694 break;
4695 udelay(1);
4696 }
4697
4698 amdgpu_atombios_scratch_regs_engine_hung(adev, false);
4699 return ret;
4700}
4701
4702int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
4703 struct amdgpu_reset_context *reset_context)
4704{
4705 int i, r = 0;
4706 struct amdgpu_job *job = NULL;
4707 bool need_full_reset =
4708 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4709
4710 if (reset_context->reset_req_dev == adev)
4711 job = reset_context->job;
4712
4713 if (amdgpu_sriov_vf(adev)) {
4714 /* stop the data exchange thread */
4715 amdgpu_virt_fini_data_exchange(adev);
4716 }
4717
4718 amdgpu_fence_driver_isr_toggle(adev, true);
4719
4720 /* block all schedulers and reset given job's ring */
4721 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4722 struct amdgpu_ring *ring = adev->rings[i];
4723
4724 if (!ring || !ring->sched.thread)
4725 continue;
4726
4727 /*clear job fence from fence drv to avoid force_completion
4728 *leave NULL and vm flush fence in fence drv */
4729 amdgpu_fence_driver_clear_job_fences(ring);
4730
4731 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
4732 amdgpu_fence_driver_force_completion(ring);
4733 }
4734
4735 amdgpu_fence_driver_isr_toggle(adev, false);
4736
4737 if (job && job->vm)
4738 drm_sched_increase_karma(&job->base);
4739
4740 r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
4741 /* If reset handler not implemented, continue; otherwise return */
4742 if (r == -ENOSYS)
4743 r = 0;
4744 else
4745 return r;
4746
4747 /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
4748 if (!amdgpu_sriov_vf(adev)) {
4749
4750 if (!need_full_reset)
4751 need_full_reset = amdgpu_device_ip_need_full_reset(adev);
4752
4753 if (!need_full_reset && amdgpu_gpu_recovery) {
4754 amdgpu_device_ip_pre_soft_reset(adev);
4755 r = amdgpu_device_ip_soft_reset(adev);
4756 amdgpu_device_ip_post_soft_reset(adev);
4757 if (r || amdgpu_device_ip_check_soft_reset(adev)) {
4758 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
4759 need_full_reset = true;
4760 }
4761 }
4762
4763 if (need_full_reset)
4764 r = amdgpu_device_ip_suspend(adev);
4765 if (need_full_reset)
4766 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4767 else
4768 clear_bit(AMDGPU_NEED_FULL_RESET,
4769 &reset_context->flags);
4770 }
4771
4772 return r;
4773}
4774
4775static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
4776{
4777 int i;
4778
4779 lockdep_assert_held(&adev->reset_domain->sem);
4780
4781 for (i = 0; i < adev->num_regs; i++) {
4782 adev->reset_dump_reg_value[i] = RREG32(adev->reset_dump_reg_list[i]);
4783 trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i],
4784 adev->reset_dump_reg_value[i]);
4785 }
4786
4787 return 0;
4788}
4789
4790#ifdef CONFIG_DEV_COREDUMP
4791static ssize_t amdgpu_devcoredump_read(char *buffer, loff_t offset,
4792 size_t count, void *data, size_t datalen)
4793{
4794 struct drm_printer p;
4795 struct amdgpu_device *adev = data;
4796 struct drm_print_iterator iter;
4797 int i;
4798
4799 iter.data = buffer;
4800 iter.offset = 0;
4801 iter.start = offset;
4802 iter.remain = count;
4803
4804 p = drm_coredump_printer(&iter);
4805
4806 drm_printf(&p, "**** AMDGPU Device Coredump ****\n");
4807 drm_printf(&p, "kernel: " UTS_RELEASE "\n");
4808 drm_printf(&p, "module: " KBUILD_MODNAME "\n");
4809 drm_printf(&p, "time: %lld.%09ld\n", adev->reset_time.tv_sec, adev->reset_time.tv_nsec);
4810 if (adev->reset_task_info.pid)
4811 drm_printf(&p, "process_name: %s PID: %d\n",
4812 adev->reset_task_info.process_name,
4813 adev->reset_task_info.pid);
4814
4815 if (adev->reset_vram_lost)
4816 drm_printf(&p, "VRAM is lost due to GPU reset!\n");
4817 if (adev->num_regs) {
4818 drm_printf(&p, "AMDGPU register dumps:\nOffset: Value:\n");
4819
4820 for (i = 0; i < adev->num_regs; i++)
4821 drm_printf(&p, "0x%08x: 0x%08x\n",
4822 adev->reset_dump_reg_list[i],
4823 adev->reset_dump_reg_value[i]);
4824 }
4825
4826 return count - iter.remain;
4827}
4828
4829static void amdgpu_devcoredump_free(void *data)
4830{
4831}
4832
4833static void amdgpu_reset_capture_coredumpm(struct amdgpu_device *adev)
4834{
4835 struct drm_device *dev = adev_to_drm(adev);
4836
4837 ktime_get_ts64(&adev->reset_time);
4838 dev_coredumpm(dev->dev, THIS_MODULE, adev, 0, GFP_KERNEL,
4839 amdgpu_devcoredump_read, amdgpu_devcoredump_free);
4840}
4841#endif
4842
4843int amdgpu_do_asic_reset(struct list_head *device_list_handle,
4844 struct amdgpu_reset_context *reset_context)
4845{
4846 struct amdgpu_device *tmp_adev = NULL;
4847 bool need_full_reset, skip_hw_reset, vram_lost = false;
4848 int r = 0;
4849 bool gpu_reset_for_dev_remove = 0;
4850
4851 /* Try reset handler method first */
4852 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
4853 reset_list);
4854 amdgpu_reset_reg_dumps(tmp_adev);
4855
4856 reset_context->reset_device_list = device_list_handle;
4857 r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
4858 /* If reset handler not implemented, continue; otherwise return */
4859 if (r == -ENOSYS)
4860 r = 0;
4861 else
4862 return r;
4863
4864 /* Reset handler not implemented, use the default method */
4865 need_full_reset =
4866 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4867 skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
4868
4869 gpu_reset_for_dev_remove =
4870 test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
4871 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4872
4873 /*
4874 * ASIC reset has to be done on all XGMI hive nodes ASAP
4875 * to allow proper links negotiation in FW (within 1 sec)
4876 */
4877 if (!skip_hw_reset && need_full_reset) {
4878 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4879 /* For XGMI run all resets in parallel to speed up the process */
4880 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4881 tmp_adev->gmc.xgmi.pending_reset = false;
4882 if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
4883 r = -EALREADY;
4884 } else
4885 r = amdgpu_asic_reset(tmp_adev);
4886
4887 if (r) {
4888 dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
4889 r, adev_to_drm(tmp_adev)->unique);
4890 break;
4891 }
4892 }
4893
4894 /* For XGMI wait for all resets to complete before proceed */
4895 if (!r) {
4896 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4897 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4898 flush_work(&tmp_adev->xgmi_reset_work);
4899 r = tmp_adev->asic_reset_res;
4900 if (r)
4901 break;
4902 }
4903 }
4904 }
4905 }
4906
4907 if (!r && amdgpu_ras_intr_triggered()) {
4908 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4909 if (tmp_adev->mmhub.ras && tmp_adev->mmhub.ras->ras_block.hw_ops &&
4910 tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
4911 tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(tmp_adev);
4912 }
4913
4914 amdgpu_ras_intr_cleared();
4915 }
4916
4917 /* Since the mode1 reset affects base ip blocks, the
4918 * phase1 ip blocks need to be resumed. Otherwise there
4919 * will be a BIOS signature error and the psp bootloader
4920 * can't load kdb on the next amdgpu install.
4921 */
4922 if (gpu_reset_for_dev_remove) {
4923 list_for_each_entry(tmp_adev, device_list_handle, reset_list)
4924 amdgpu_device_ip_resume_phase1(tmp_adev);
4925
4926 goto end;
4927 }
4928
4929 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4930 if (need_full_reset) {
4931 /* post card */
4932 r = amdgpu_device_asic_init(tmp_adev);
4933 if (r) {
4934 dev_warn(tmp_adev->dev, "asic atom init failed!");
4935 } else {
4936 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
4937 r = amdgpu_amdkfd_resume_iommu(tmp_adev);
4938 if (r)
4939 goto out;
4940
4941 r = amdgpu_device_ip_resume_phase1(tmp_adev);
4942 if (r)
4943 goto out;
4944
4945 vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
4946#ifdef CONFIG_DEV_COREDUMP
4947 tmp_adev->reset_vram_lost = vram_lost;
4948 memset(&tmp_adev->reset_task_info, 0,
4949 sizeof(tmp_adev->reset_task_info));
4950 if (reset_context->job && reset_context->job->vm)
4951 tmp_adev->reset_task_info =
4952 reset_context->job->vm->task_info;
4953 amdgpu_reset_capture_coredumpm(tmp_adev);
4954#endif
4955 if (vram_lost) {
4956 DRM_INFO("VRAM is lost due to GPU reset!\n");
4957 amdgpu_inc_vram_lost(tmp_adev);
4958 }
4959
4960 r = amdgpu_device_fw_loading(tmp_adev);
4961 if (r)
4962 return r;
4963
4964 r = amdgpu_device_ip_resume_phase2(tmp_adev);
4965 if (r)
4966 goto out;
4967
4968 if (vram_lost)
4969 amdgpu_device_fill_reset_magic(tmp_adev);
4970
4971 /*
4972 * Add this ASIC as tracked as reset was already
4973 * complete successfully.
4974 */
4975 amdgpu_register_gpu_instance(tmp_adev);
4976
4977 if (!reset_context->hive &&
4978 tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4979 amdgpu_xgmi_add_device(tmp_adev);
4980
4981 r = amdgpu_device_ip_late_init(tmp_adev);
4982 if (r)
4983 goto out;
4984
4985 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false);
4986
4987 /*
4988 * The GPU enters bad state once faulty pages
4989 * by ECC has reached the threshold, and ras
4990 * recovery is scheduled next. So add one check
4991 * here to break recovery if it indeed exceeds
4992 * bad page threshold, and remind user to
4993 * retire this GPU or setting one bigger
4994 * bad_page_threshold value to fix this once
4995 * probing driver again.
4996 */
4997 if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
4998 /* must succeed. */
4999 amdgpu_ras_resume(tmp_adev);
5000 } else {
5001 r = -EINVAL;
5002 goto out;
5003 }
5004
5005 /* Update PSP FW topology after reset */
5006 if (reset_context->hive &&
5007 tmp_adev->gmc.xgmi.num_physical_nodes > 1)
5008 r = amdgpu_xgmi_update_topology(
5009 reset_context->hive, tmp_adev);
5010 }
5011 }
5012
5013out:
5014 if (!r) {
5015 amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
5016 r = amdgpu_ib_ring_tests(tmp_adev);
5017 if (r) {
5018 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
5019 need_full_reset = true;
5020 r = -EAGAIN;
5021 goto end;
5022 }
5023 }
5024
5025 if (!r)
5026 r = amdgpu_device_recover_vram(tmp_adev);
5027 else
5028 tmp_adev->asic_reset_res = r;
5029 }
5030
5031end:
5032 if (need_full_reset)
5033 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5034 else
5035 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5036 return r;
5037}
5038
5039static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev)
5040{
5041
5042 switch (amdgpu_asic_reset_method(adev)) {
5043 case AMD_RESET_METHOD_MODE1:
5044 adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
5045 break;
5046 case AMD_RESET_METHOD_MODE2:
5047 adev->mp1_state = PP_MP1_STATE_RESET;
5048 break;
5049 default:
5050 adev->mp1_state = PP_MP1_STATE_NONE;
5051 break;
5052 }
5053}
5054
5055static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev)
5056{
5057 amdgpu_vf_error_trans_all(adev);
5058 adev->mp1_state = PP_MP1_STATE_NONE;
5059}
5060
5061static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
5062{
5063 struct pci_dev *p = NULL;
5064
5065 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5066 adev->pdev->bus->number, 1);
5067 if (p) {
5068 pm_runtime_enable(&(p->dev));
5069 pm_runtime_resume(&(p->dev));
5070 }
5071
5072 pci_dev_put(p);
5073}
5074
5075static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
5076{
5077 enum amd_reset_method reset_method;
5078 struct pci_dev *p = NULL;
5079 u64 expires;
5080
5081 /*
5082 * For now, only BACO and mode1 reset are confirmed
5083 * to suffer the audio issue without proper suspended.
5084 */
5085 reset_method = amdgpu_asic_reset_method(adev);
5086 if ((reset_method != AMD_RESET_METHOD_BACO) &&
5087 (reset_method != AMD_RESET_METHOD_MODE1))
5088 return -EINVAL;
5089
5090 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5091 adev->pdev->bus->number, 1);
5092 if (!p)
5093 return -ENODEV;
5094
5095 expires = pm_runtime_autosuspend_expiration(&(p->dev));
5096 if (!expires)
5097 /*
5098 * If we cannot get the audio device autosuspend delay,
5099 * a fixed 4S interval will be used. Considering 3S is
5100 * the audio controller default autosuspend delay setting.
5101 * 4S used here is guaranteed to cover that.
5102 */
5103 expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
5104
5105 while (!pm_runtime_status_suspended(&(p->dev))) {
5106 if (!pm_runtime_suspend(&(p->dev)))
5107 break;
5108
5109 if (expires < ktime_get_mono_fast_ns()) {
5110 dev_warn(adev->dev, "failed to suspend display audio\n");
5111 pci_dev_put(p);
5112 /* TODO: abort the succeeding gpu reset? */
5113 return -ETIMEDOUT;
5114 }
5115 }
5116
5117 pm_runtime_disable(&(p->dev));
5118
5119 pci_dev_put(p);
5120 return 0;
5121}
5122
5123static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
5124{
5125 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5126
5127#if defined(CONFIG_DEBUG_FS)
5128 if (!amdgpu_sriov_vf(adev))
5129 cancel_work(&adev->reset_work);
5130#endif
5131
5132 if (adev->kfd.dev)
5133 cancel_work(&adev->kfd.reset_work);
5134
5135 if (amdgpu_sriov_vf(adev))
5136 cancel_work(&adev->virt.flr_work);
5137
5138 if (con && adev->ras_enabled)
5139 cancel_work(&con->recovery_work);
5140
5141}
5142
5143/**
5144 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
5145 *
5146 * @adev: amdgpu_device pointer
5147 * @job: which job trigger hang
5148 *
5149 * Attempt to reset the GPU if it has hung (all asics).
5150 * Attempt to do soft-reset or full-reset and reinitialize Asic
5151 * Returns 0 for success or an error on failure.
5152 */
5153
5154int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
5155 struct amdgpu_job *job,
5156 struct amdgpu_reset_context *reset_context)
5157{
5158 struct list_head device_list, *device_list_handle = NULL;
5159 bool job_signaled = false;
5160 struct amdgpu_hive_info *hive = NULL;
5161 struct amdgpu_device *tmp_adev = NULL;
5162 int i, r = 0;
5163 bool need_emergency_restart = false;
5164 bool audio_suspended = false;
5165 bool gpu_reset_for_dev_remove = false;
5166
5167 gpu_reset_for_dev_remove =
5168 test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
5169 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5170
5171 /*
5172 * Special case: RAS triggered and full reset isn't supported
5173 */
5174 need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
5175
5176 /*
5177 * Flush RAM to disk so that after reboot
5178 * the user can read log and see why the system rebooted.
5179 */
5180 if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
5181 DRM_WARN("Emergency reboot.");
5182
5183 ksys_sync_helper();
5184 emergency_restart();
5185 }
5186
5187 dev_info(adev->dev, "GPU %s begin!\n",
5188 need_emergency_restart ? "jobs stop":"reset");
5189
5190 if (!amdgpu_sriov_vf(adev))
5191 hive = amdgpu_get_xgmi_hive(adev);
5192 if (hive)
5193 mutex_lock(&hive->hive_lock);
5194
5195 reset_context->job = job;
5196 reset_context->hive = hive;
5197 /*
5198 * Build list of devices to reset.
5199 * In case we are in XGMI hive mode, resort the device list
5200 * to put adev in the 1st position.
5201 */
5202 INIT_LIST_HEAD(&device_list);
5203 if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
5204 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
5205 list_add_tail(&tmp_adev->reset_list, &device_list);
5206 if (gpu_reset_for_dev_remove && adev->shutdown)
5207 tmp_adev->shutdown = true;
5208 }
5209 if (!list_is_first(&adev->reset_list, &device_list))
5210 list_rotate_to_front(&adev->reset_list, &device_list);
5211 device_list_handle = &device_list;
5212 } else {
5213 list_add_tail(&adev->reset_list, &device_list);
5214 device_list_handle = &device_list;
5215 }
5216
5217 /* We need to lock reset domain only once both for XGMI and single device */
5218 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5219 reset_list);
5220 amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
5221
5222 /* block all schedulers and reset given job's ring */
5223 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5224
5225 amdgpu_device_set_mp1_state(tmp_adev);
5226
5227 /*
5228 * Try to put the audio codec into suspend state
5229 * before gpu reset started.
5230 *
5231 * Due to the power domain of the graphics device
5232 * is shared with AZ power domain. Without this,
5233 * we may change the audio hardware from behind
5234 * the audio driver's back. That will trigger
5235 * some audio codec errors.
5236 */
5237 if (!amdgpu_device_suspend_display_audio(tmp_adev))
5238 audio_suspended = true;
5239
5240 amdgpu_ras_set_error_query_ready(tmp_adev, false);
5241
5242 cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
5243
5244 if (!amdgpu_sriov_vf(tmp_adev))
5245 amdgpu_amdkfd_pre_reset(tmp_adev);
5246
5247 /*
5248 * Mark these ASICs to be reseted as untracked first
5249 * And add them back after reset completed
5250 */
5251 amdgpu_unregister_gpu_instance(tmp_adev);
5252
5253 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, true);
5254
5255 /* disable ras on ALL IPs */
5256 if (!need_emergency_restart &&
5257 amdgpu_device_ip_need_full_reset(tmp_adev))
5258 amdgpu_ras_suspend(tmp_adev);
5259
5260 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5261 struct amdgpu_ring *ring = tmp_adev->rings[i];
5262
5263 if (!ring || !ring->sched.thread)
5264 continue;
5265
5266 drm_sched_stop(&ring->sched, job ? &job->base : NULL);
5267
5268 if (need_emergency_restart)
5269 amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
5270 }
5271 atomic_inc(&tmp_adev->gpu_reset_counter);
5272 }
5273
5274 if (need_emergency_restart)
5275 goto skip_sched_resume;
5276
5277 /*
5278 * Must check guilty signal here since after this point all old
5279 * HW fences are force signaled.
5280 *
5281 * job->base holds a reference to parent fence
5282 */
5283 if (job && dma_fence_is_signaled(&job->hw_fence)) {
5284 job_signaled = true;
5285 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5286 goto skip_hw_reset;
5287 }
5288
5289retry: /* Rest of adevs pre asic reset from XGMI hive. */
5290 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5291 if (gpu_reset_for_dev_remove) {
5292 /* Workaroud for ASICs need to disable SMC first */
5293 amdgpu_device_smu_fini_early(tmp_adev);
5294 }
5295 r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
5296 /*TODO Should we stop ?*/
5297 if (r) {
5298 dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
5299 r, adev_to_drm(tmp_adev)->unique);
5300 tmp_adev->asic_reset_res = r;
5301 }
5302
5303 /*
5304 * Drop all pending non scheduler resets. Scheduler resets
5305 * were already dropped during drm_sched_stop
5306 */
5307 amdgpu_device_stop_pending_resets(tmp_adev);
5308 }
5309
5310 /* Actual ASIC resets if needed.*/
5311 /* Host driver will handle XGMI hive reset for SRIOV */
5312 if (amdgpu_sriov_vf(adev)) {
5313 r = amdgpu_device_reset_sriov(adev, job ? false : true);
5314 if (r)
5315 adev->asic_reset_res = r;
5316
5317 /* Aldebaran supports ras in SRIOV, so need resume ras during reset */
5318 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
5319 amdgpu_ras_resume(adev);
5320 } else {
5321 r = amdgpu_do_asic_reset(device_list_handle, reset_context);
5322 if (r && r == -EAGAIN)
5323 goto retry;
5324
5325 if (!r && gpu_reset_for_dev_remove)
5326 goto recover_end;
5327 }
5328
5329skip_hw_reset:
5330
5331 /* Post ASIC reset for all devs .*/
5332 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5333
5334 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5335 struct amdgpu_ring *ring = tmp_adev->rings[i];
5336
5337 if (!ring || !ring->sched.thread)
5338 continue;
5339
5340 drm_sched_start(&ring->sched, true);
5341 }
5342
5343 if (adev->enable_mes && adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3))
5344 amdgpu_mes_self_test(tmp_adev);
5345
5346 if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) {
5347 drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
5348 }
5349
5350 if (tmp_adev->asic_reset_res)
5351 r = tmp_adev->asic_reset_res;
5352
5353 tmp_adev->asic_reset_res = 0;
5354
5355 if (r) {
5356 /* bad news, how to tell it to userspace ? */
5357 dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
5358 amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
5359 } else {
5360 dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
5361 if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
5362 DRM_WARN("smart shift update failed\n");
5363 }
5364 }
5365
5366skip_sched_resume:
5367 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5368 /* unlock kfd: SRIOV would do it separately */
5369 if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
5370 amdgpu_amdkfd_post_reset(tmp_adev);
5371
5372 /* kfd_post_reset will do nothing if kfd device is not initialized,
5373 * need to bring up kfd here if it's not be initialized before
5374 */
5375 if (!adev->kfd.init_complete)
5376 amdgpu_amdkfd_device_init(adev);
5377
5378 if (audio_suspended)
5379 amdgpu_device_resume_display_audio(tmp_adev);
5380
5381 amdgpu_device_unset_mp1_state(tmp_adev);
5382
5383 amdgpu_ras_set_error_query_ready(tmp_adev, true);
5384 }
5385
5386recover_end:
5387 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5388 reset_list);
5389 amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
5390
5391 if (hive) {
5392 mutex_unlock(&hive->hive_lock);
5393 amdgpu_put_xgmi_hive(hive);
5394 }
5395
5396 if (r)
5397 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
5398
5399 atomic_set(&adev->reset_domain->reset_res, r);
5400 return r;
5401}
5402
5403/**
5404 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
5405 *
5406 * @adev: amdgpu_device pointer
5407 *
5408 * Fetchs and stores in the driver the PCIE capabilities (gen speed
5409 * and lanes) of the slot the device is in. Handles APUs and
5410 * virtualized environments where PCIE config space may not be available.
5411 */
5412static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
5413{
5414 struct pci_dev *pdev;
5415 enum pci_bus_speed speed_cap, platform_speed_cap;
5416 enum pcie_link_width platform_link_width;
5417
5418 if (amdgpu_pcie_gen_cap)
5419 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
5420
5421 if (amdgpu_pcie_lane_cap)
5422 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
5423
5424 /* covers APUs as well */
5425 if (pci_is_root_bus(adev->pdev->bus)) {
5426 if (adev->pm.pcie_gen_mask == 0)
5427 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
5428 if (adev->pm.pcie_mlw_mask == 0)
5429 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
5430 return;
5431 }
5432
5433 if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
5434 return;
5435
5436 pcie_bandwidth_available(adev->pdev, NULL,
5437 &platform_speed_cap, &platform_link_width);
5438
5439 if (adev->pm.pcie_gen_mask == 0) {
5440 /* asic caps */
5441 pdev = adev->pdev;
5442 speed_cap = pcie_get_speed_cap(pdev);
5443 if (speed_cap == PCI_SPEED_UNKNOWN) {
5444 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5445 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5446 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5447 } else {
5448 if (speed_cap == PCIE_SPEED_32_0GT)
5449 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5450 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5451 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5452 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5453 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
5454 else if (speed_cap == PCIE_SPEED_16_0GT)
5455 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5456 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5457 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5458 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
5459 else if (speed_cap == PCIE_SPEED_8_0GT)
5460 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5461 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5462 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5463 else if (speed_cap == PCIE_SPEED_5_0GT)
5464 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5465 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
5466 else
5467 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
5468 }
5469 /* platform caps */
5470 if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5471 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5472 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5473 } else {
5474 if (platform_speed_cap == PCIE_SPEED_32_0GT)
5475 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5476 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5477 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5478 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5479 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
5480 else if (platform_speed_cap == PCIE_SPEED_16_0GT)
5481 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5482 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5483 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5484 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
5485 else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5486 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5487 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5488 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
5489 else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5490 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5491 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5492 else
5493 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
5494
5495 }
5496 }
5497 if (adev->pm.pcie_mlw_mask == 0) {
5498 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5499 adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
5500 } else {
5501 switch (platform_link_width) {
5502 case PCIE_LNK_X32:
5503 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
5504 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5505 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5506 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5507 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5508 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5509 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5510 break;
5511 case PCIE_LNK_X16:
5512 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5513 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5514 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5515 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5516 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5517 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5518 break;
5519 case PCIE_LNK_X12:
5520 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5521 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5522 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5523 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5524 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5525 break;
5526 case PCIE_LNK_X8:
5527 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5528 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5529 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5530 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5531 break;
5532 case PCIE_LNK_X4:
5533 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5534 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5535 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5536 break;
5537 case PCIE_LNK_X2:
5538 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5539 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5540 break;
5541 case PCIE_LNK_X1:
5542 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5543 break;
5544 default:
5545 break;
5546 }
5547 }
5548 }
5549}
5550
5551/**
5552 * amdgpu_device_is_peer_accessible - Check peer access through PCIe BAR
5553 *
5554 * @adev: amdgpu_device pointer
5555 * @peer_adev: amdgpu_device pointer for peer device trying to access @adev
5556 *
5557 * Return true if @peer_adev can access (DMA) @adev through the PCIe
5558 * BAR, i.e. @adev is "large BAR" and the BAR matches the DMA mask of
5559 * @peer_adev.
5560 */
5561bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
5562 struct amdgpu_device *peer_adev)
5563{
5564#ifdef CONFIG_HSA_AMD_P2P
5565 uint64_t address_mask = peer_adev->dev->dma_mask ?
5566 ~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
5567 resource_size_t aper_limit =
5568 adev->gmc.aper_base + adev->gmc.aper_size - 1;
5569 bool p2p_access =
5570 !adev->gmc.xgmi.connected_to_cpu &&
5571 !(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0);
5572
5573 return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size &&
5574 adev->gmc.real_vram_size == adev->gmc.visible_vram_size &&
5575 !(adev->gmc.aper_base & address_mask ||
5576 aper_limit & address_mask));
5577#else
5578 return false;
5579#endif
5580}
5581
5582int amdgpu_device_baco_enter(struct drm_device *dev)
5583{
5584 struct amdgpu_device *adev = drm_to_adev(dev);
5585 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5586
5587 if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5588 return -ENOTSUPP;
5589
5590 if (ras && adev->ras_enabled &&
5591 adev->nbio.funcs->enable_doorbell_interrupt)
5592 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
5593
5594 return amdgpu_dpm_baco_enter(adev);
5595}
5596
5597int amdgpu_device_baco_exit(struct drm_device *dev)
5598{
5599 struct amdgpu_device *adev = drm_to_adev(dev);
5600 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5601 int ret = 0;
5602
5603 if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5604 return -ENOTSUPP;
5605
5606 ret = amdgpu_dpm_baco_exit(adev);
5607 if (ret)
5608 return ret;
5609
5610 if (ras && adev->ras_enabled &&
5611 adev->nbio.funcs->enable_doorbell_interrupt)
5612 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
5613
5614 if (amdgpu_passthrough(adev) &&
5615 adev->nbio.funcs->clear_doorbell_interrupt)
5616 adev->nbio.funcs->clear_doorbell_interrupt(adev);
5617
5618 return 0;
5619}
5620
5621/**
5622 * amdgpu_pci_error_detected - Called when a PCI error is detected.
5623 * @pdev: PCI device struct
5624 * @state: PCI channel state
5625 *
5626 * Description: Called when a PCI error is detected.
5627 *
5628 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
5629 */
5630pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5631{
5632 struct drm_device *dev = pci_get_drvdata(pdev);
5633 struct amdgpu_device *adev = drm_to_adev(dev);
5634 int i;
5635
5636 DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
5637
5638 if (adev->gmc.xgmi.num_physical_nodes > 1) {
5639 DRM_WARN("No support for XGMI hive yet...");
5640 return PCI_ERS_RESULT_DISCONNECT;
5641 }
5642
5643 adev->pci_channel_state = state;
5644
5645 switch (state) {
5646 case pci_channel_io_normal:
5647 return PCI_ERS_RESULT_CAN_RECOVER;
5648 /* Fatal error, prepare for slot reset */
5649 case pci_channel_io_frozen:
5650 /*
5651 * Locking adev->reset_domain->sem will prevent any external access
5652 * to GPU during PCI error recovery
5653 */
5654 amdgpu_device_lock_reset_domain(adev->reset_domain);
5655 amdgpu_device_set_mp1_state(adev);
5656
5657 /*
5658 * Block any work scheduling as we do for regular GPU reset
5659 * for the duration of the recovery
5660 */
5661 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5662 struct amdgpu_ring *ring = adev->rings[i];
5663
5664 if (!ring || !ring->sched.thread)
5665 continue;
5666
5667 drm_sched_stop(&ring->sched, NULL);
5668 }
5669 atomic_inc(&adev->gpu_reset_counter);
5670 return PCI_ERS_RESULT_NEED_RESET;
5671 case pci_channel_io_perm_failure:
5672 /* Permanent error, prepare for device removal */
5673 return PCI_ERS_RESULT_DISCONNECT;
5674 }
5675
5676 return PCI_ERS_RESULT_NEED_RESET;
5677}
5678
5679/**
5680 * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
5681 * @pdev: pointer to PCI device
5682 */
5683pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
5684{
5685
5686 DRM_INFO("PCI error: mmio enabled callback!!\n");
5687
5688 /* TODO - dump whatever for debugging purposes */
5689
5690 /* This called only if amdgpu_pci_error_detected returns
5691 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
5692 * works, no need to reset slot.
5693 */
5694
5695 return PCI_ERS_RESULT_RECOVERED;
5696}
5697
5698/**
5699 * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
5700 * @pdev: PCI device struct
5701 *
5702 * Description: This routine is called by the pci error recovery
5703 * code after the PCI slot has been reset, just before we
5704 * should resume normal operations.
5705 */
5706pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
5707{
5708 struct drm_device *dev = pci_get_drvdata(pdev);
5709 struct amdgpu_device *adev = drm_to_adev(dev);
5710 int r, i;
5711 struct amdgpu_reset_context reset_context;
5712 u32 memsize;
5713 struct list_head device_list;
5714
5715 DRM_INFO("PCI error: slot reset callback!!\n");
5716
5717 memset(&reset_context, 0, sizeof(reset_context));
5718
5719 INIT_LIST_HEAD(&device_list);
5720 list_add_tail(&adev->reset_list, &device_list);
5721
5722 /* wait for asic to come out of reset */
5723 msleep(500);
5724
5725 /* Restore PCI confspace */
5726 amdgpu_device_load_pci_state(pdev);
5727
5728 /* confirm ASIC came out of reset */
5729 for (i = 0; i < adev->usec_timeout; i++) {
5730 memsize = amdgpu_asic_get_config_memsize(adev);
5731
5732 if (memsize != 0xffffffff)
5733 break;
5734 udelay(1);
5735 }
5736 if (memsize == 0xffffffff) {
5737 r = -ETIME;
5738 goto out;
5739 }
5740
5741 reset_context.method = AMD_RESET_METHOD_NONE;
5742 reset_context.reset_req_dev = adev;
5743 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
5744 set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
5745
5746 adev->no_hw_access = true;
5747 r = amdgpu_device_pre_asic_reset(adev, &reset_context);
5748 adev->no_hw_access = false;
5749 if (r)
5750 goto out;
5751
5752 r = amdgpu_do_asic_reset(&device_list, &reset_context);
5753
5754out:
5755 if (!r) {
5756 if (amdgpu_device_cache_pci_state(adev->pdev))
5757 pci_restore_state(adev->pdev);
5758
5759 DRM_INFO("PCIe error recovery succeeded\n");
5760 } else {
5761 DRM_ERROR("PCIe error recovery failed, err:%d", r);
5762 amdgpu_device_unset_mp1_state(adev);
5763 amdgpu_device_unlock_reset_domain(adev->reset_domain);
5764 }
5765
5766 return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
5767}
5768
5769/**
5770 * amdgpu_pci_resume() - resume normal ops after PCI reset
5771 * @pdev: pointer to PCI device
5772 *
5773 * Called when the error recovery driver tells us that its
5774 * OK to resume normal operation.
5775 */
5776void amdgpu_pci_resume(struct pci_dev *pdev)
5777{
5778 struct drm_device *dev = pci_get_drvdata(pdev);
5779 struct amdgpu_device *adev = drm_to_adev(dev);
5780 int i;
5781
5782
5783 DRM_INFO("PCI error: resume callback!!\n");
5784
5785 /* Only continue execution for the case of pci_channel_io_frozen */
5786 if (adev->pci_channel_state != pci_channel_io_frozen)
5787 return;
5788
5789 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5790 struct amdgpu_ring *ring = adev->rings[i];
5791
5792 if (!ring || !ring->sched.thread)
5793 continue;
5794
5795 drm_sched_start(&ring->sched, true);
5796 }
5797
5798 amdgpu_device_unset_mp1_state(adev);
5799 amdgpu_device_unlock_reset_domain(adev->reset_domain);
5800}
5801
5802bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
5803{
5804 struct drm_device *dev = pci_get_drvdata(pdev);
5805 struct amdgpu_device *adev = drm_to_adev(dev);
5806 int r;
5807
5808 r = pci_save_state(pdev);
5809 if (!r) {
5810 kfree(adev->pci_state);
5811
5812 adev->pci_state = pci_store_saved_state(pdev);
5813
5814 if (!adev->pci_state) {
5815 DRM_ERROR("Failed to store PCI saved state");
5816 return false;
5817 }
5818 } else {
5819 DRM_WARN("Failed to save PCI state, err:%d\n", r);
5820 return false;
5821 }
5822
5823 return true;
5824}
5825
5826bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
5827{
5828 struct drm_device *dev = pci_get_drvdata(pdev);
5829 struct amdgpu_device *adev = drm_to_adev(dev);
5830 int r;
5831
5832 if (!adev->pci_state)
5833 return false;
5834
5835 r = pci_load_saved_state(pdev, adev->pci_state);
5836
5837 if (!r) {
5838 pci_restore_state(pdev);
5839 } else {
5840 DRM_WARN("Failed to load PCI state, err:%d\n", r);
5841 return false;
5842 }
5843
5844 return true;
5845}
5846
5847void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
5848 struct amdgpu_ring *ring)
5849{
5850#ifdef CONFIG_X86_64
5851 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
5852 return;
5853#endif
5854 if (adev->gmc.xgmi.connected_to_cpu)
5855 return;
5856
5857 if (ring && ring->funcs->emit_hdp_flush)
5858 amdgpu_ring_emit_hdp_flush(ring);
5859 else
5860 amdgpu_asic_flush_hdp(adev, ring);
5861}
5862
5863void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
5864 struct amdgpu_ring *ring)
5865{
5866#ifdef CONFIG_X86_64
5867 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
5868 return;
5869#endif
5870 if (adev->gmc.xgmi.connected_to_cpu)
5871 return;
5872
5873 amdgpu_asic_invalidate_hdp(adev, ring);
5874}
5875
5876int amdgpu_in_reset(struct amdgpu_device *adev)
5877{
5878 return atomic_read(&adev->reset_domain->in_gpu_reset);
5879 }
5880
5881/**
5882 * amdgpu_device_halt() - bring hardware to some kind of halt state
5883 *
5884 * @adev: amdgpu_device pointer
5885 *
5886 * Bring hardware to some kind of halt state so that no one can touch it
5887 * any more. It will help to maintain error context when error occurred.
5888 * Compare to a simple hang, the system will keep stable at least for SSH
5889 * access. Then it should be trivial to inspect the hardware state and
5890 * see what's going on. Implemented as following:
5891 *
5892 * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc),
5893 * clears all CPU mappings to device, disallows remappings through page faults
5894 * 2. amdgpu_irq_disable_all() disables all interrupts
5895 * 3. amdgpu_fence_driver_hw_fini() signals all HW fences
5896 * 4. set adev->no_hw_access to avoid potential crashes after setp 5
5897 * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings
5898 * 6. pci_disable_device() and pci_wait_for_pending_transaction()
5899 * flush any in flight DMA operations
5900 */
5901void amdgpu_device_halt(struct amdgpu_device *adev)
5902{
5903 struct pci_dev *pdev = adev->pdev;
5904 struct drm_device *ddev = adev_to_drm(adev);
5905
5906 drm_dev_unplug(ddev);
5907
5908 amdgpu_irq_disable_all(adev);
5909
5910 amdgpu_fence_driver_hw_fini(adev);
5911
5912 adev->no_hw_access = true;
5913
5914 amdgpu_device_unmap_mmio(adev);
5915
5916 pci_disable_device(pdev);
5917 pci_wait_for_pending_transaction(pdev);
5918}
5919
5920u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
5921 u32 reg)
5922{
5923 unsigned long flags, address, data;
5924 u32 r;
5925
5926 address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
5927 data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
5928
5929 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
5930 WREG32(address, reg * 4);
5931 (void)RREG32(address);
5932 r = RREG32(data);
5933 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
5934 return r;
5935}
5936
5937void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
5938 u32 reg, u32 v)
5939{
5940 unsigned long flags, address, data;
5941
5942 address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
5943 data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
5944
5945 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
5946 WREG32(address, reg * 4);
5947 (void)RREG32(address);
5948 WREG32(data, v);
5949 (void)RREG32(data);
5950 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
5951}
5952
5953/**
5954 * amdgpu_device_switch_gang - switch to a new gang
5955 * @adev: amdgpu_device pointer
5956 * @gang: the gang to switch to
5957 *
5958 * Try to switch to a new gang.
5959 * Returns: NULL if we switched to the new gang or a reference to the current
5960 * gang leader.
5961 */
5962struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
5963 struct dma_fence *gang)
5964{
5965 struct dma_fence *old = NULL;
5966
5967 do {
5968 dma_fence_put(old);
5969 rcu_read_lock();
5970 old = dma_fence_get_rcu_safe(&adev->gang_submit);
5971 rcu_read_unlock();
5972
5973 if (old == gang)
5974 break;
5975
5976 if (!dma_fence_is_signaled(old))
5977 return old;
5978
5979 } while (cmpxchg((struct dma_fence __force **)&adev->gang_submit,
5980 old, gang) != old);
5981
5982 dma_fence_put(old);
5983 return NULL;
5984}
5985
5986bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev)
5987{
5988 switch (adev->asic_type) {
5989#ifdef CONFIG_DRM_AMDGPU_SI
5990 case CHIP_HAINAN:
5991#endif
5992 case CHIP_TOPAZ:
5993 /* chips with no display hardware */
5994 return false;
5995#ifdef CONFIG_DRM_AMDGPU_SI
5996 case CHIP_TAHITI:
5997 case CHIP_PITCAIRN:
5998 case CHIP_VERDE:
5999 case CHIP_OLAND:
6000#endif
6001#ifdef CONFIG_DRM_AMDGPU_CIK
6002 case CHIP_BONAIRE:
6003 case CHIP_HAWAII:
6004 case CHIP_KAVERI:
6005 case CHIP_KABINI:
6006 case CHIP_MULLINS:
6007#endif
6008 case CHIP_TONGA:
6009 case CHIP_FIJI:
6010 case CHIP_POLARIS10:
6011 case CHIP_POLARIS11:
6012 case CHIP_POLARIS12:
6013 case CHIP_VEGAM:
6014 case CHIP_CARRIZO:
6015 case CHIP_STONEY:
6016 /* chips with display hardware */
6017 return true;
6018 default:
6019 /* IP discovery */
6020 if (!adev->ip_versions[DCE_HWIP][0] ||
6021 (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
6022 return false;
6023 return true;
6024 }
6025}