Linux Audio

Check our new training course

Linux BSP development engineering services

Need help to port Linux and bootloaders to your hardware?
Loading...
v5.9
   1/*
   2 * Copyright 2008 Advanced Micro Devices, Inc.
   3 * Copyright 2008 Red Hat Inc.
   4 * Copyright 2009 Jerome Glisse.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the "Software"),
   8 * to deal in the Software without restriction, including without limitation
   9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10 * and/or sell copies of the Software, and to permit persons to whom the
  11 * Software is furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22 * OTHER DEALINGS IN THE SOFTWARE.
  23 *
  24 * Authors: Dave Airlie
  25 *          Alex Deucher
  26 *          Jerome Glisse
  27 */
  28#include <linux/power_supply.h>
  29#include <linux/kthread.h>
  30#include <linux/module.h>
  31#include <linux/console.h>
  32#include <linux/slab.h>
  33
  34#include <drm/drm_atomic_helper.h>
  35#include <drm/drm_probe_helper.h>
  36#include <drm/amdgpu_drm.h>
  37#include <linux/vgaarb.h>
  38#include <linux/vga_switcheroo.h>
  39#include <linux/efi.h>
  40#include "amdgpu.h"
  41#include "amdgpu_trace.h"
  42#include "amdgpu_i2c.h"
  43#include "atom.h"
  44#include "amdgpu_atombios.h"
  45#include "amdgpu_atomfirmware.h"
  46#include "amd_pcie.h"
  47#ifdef CONFIG_DRM_AMDGPU_SI
  48#include "si.h"
  49#endif
  50#ifdef CONFIG_DRM_AMDGPU_CIK
  51#include "cik.h"
  52#endif
  53#include "vi.h"
  54#include "soc15.h"
  55#include "nv.h"
  56#include "bif/bif_4_1_d.h"
  57#include <linux/pci.h>
  58#include <linux/firmware.h>
  59#include "amdgpu_vf_error.h"
  60
  61#include "amdgpu_amdkfd.h"
  62#include "amdgpu_pm.h"
  63
  64#include "amdgpu_xgmi.h"
  65#include "amdgpu_ras.h"
  66#include "amdgpu_pmu.h"
  67#include "amdgpu_fru_eeprom.h"
 
  68
  69#include <linux/suspend.h>
  70#include <drm/task_barrier.h>
  71#include <linux/pm_runtime.h>
  72
 
 
  73MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
  74MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
  75MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
  76MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
  77MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
  78MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
  79MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin");
  80MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
  81MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
  82MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
 
 
  83
  84#define AMDGPU_RESUME_MS		2000
  85
  86const char *amdgpu_asic_name[] = {
  87	"TAHITI",
  88	"PITCAIRN",
  89	"VERDE",
  90	"OLAND",
  91	"HAINAN",
  92	"BONAIRE",
  93	"KAVERI",
  94	"KABINI",
  95	"HAWAII",
  96	"MULLINS",
  97	"TOPAZ",
  98	"TONGA",
  99	"FIJI",
 100	"CARRIZO",
 101	"STONEY",
 102	"POLARIS10",
 103	"POLARIS11",
 104	"POLARIS12",
 105	"VEGAM",
 106	"VEGA10",
 107	"VEGA12",
 108	"VEGA20",
 109	"RAVEN",
 110	"ARCTURUS",
 111	"RENOIR",
 
 112	"NAVI10",
 113	"NAVI14",
 114	"NAVI12",
 115	"SIENNA_CICHLID",
 116	"NAVY_FLOUNDER",
 
 
 
 
 117	"LAST",
 118};
 119
 120/**
 121 * DOC: pcie_replay_count
 122 *
 123 * The amdgpu driver provides a sysfs API for reporting the total number
 124 * of PCIe replays (NAKs)
 125 * The file pcie_replay_count is used for this and returns the total
 126 * number of replays as a sum of the NAKs generated and NAKs received
 127 */
 128
 129static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
 130		struct device_attribute *attr, char *buf)
 131{
 132	struct drm_device *ddev = dev_get_drvdata(dev);
 133	struct amdgpu_device *adev = ddev->dev_private;
 134	uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
 135
 136	return snprintf(buf, PAGE_SIZE, "%llu\n", cnt);
 137}
 138
 139static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
 140		amdgpu_device_get_pcie_replay_count, NULL);
 141
 142static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
 143
 144/**
 145 * DOC: product_name
 146 *
 147 * The amdgpu driver provides a sysfs API for reporting the product name
 148 * for the device
 149 * The file serial_number is used for this and returns the product name
 150 * as returned from the FRU.
 151 * NOTE: This is only available for certain server cards
 152 */
 153
 154static ssize_t amdgpu_device_get_product_name(struct device *dev,
 155		struct device_attribute *attr, char *buf)
 156{
 157	struct drm_device *ddev = dev_get_drvdata(dev);
 158	struct amdgpu_device *adev = ddev->dev_private;
 159
 160	return snprintf(buf, PAGE_SIZE, "%s\n", adev->product_name);
 161}
 162
 163static DEVICE_ATTR(product_name, S_IRUGO,
 164		amdgpu_device_get_product_name, NULL);
 165
 166/**
 167 * DOC: product_number
 168 *
 169 * The amdgpu driver provides a sysfs API for reporting the part number
 170 * for the device
 171 * The file serial_number is used for this and returns the part number
 172 * as returned from the FRU.
 173 * NOTE: This is only available for certain server cards
 174 */
 175
 176static ssize_t amdgpu_device_get_product_number(struct device *dev,
 177		struct device_attribute *attr, char *buf)
 178{
 179	struct drm_device *ddev = dev_get_drvdata(dev);
 180	struct amdgpu_device *adev = ddev->dev_private;
 181
 182	return snprintf(buf, PAGE_SIZE, "%s\n", adev->product_number);
 183}
 184
 185static DEVICE_ATTR(product_number, S_IRUGO,
 186		amdgpu_device_get_product_number, NULL);
 187
 188/**
 189 * DOC: serial_number
 190 *
 191 * The amdgpu driver provides a sysfs API for reporting the serial number
 192 * for the device
 193 * The file serial_number is used for this and returns the serial number
 194 * as returned from the FRU.
 195 * NOTE: This is only available for certain server cards
 196 */
 197
 198static ssize_t amdgpu_device_get_serial_number(struct device *dev,
 199		struct device_attribute *attr, char *buf)
 200{
 201	struct drm_device *ddev = dev_get_drvdata(dev);
 202	struct amdgpu_device *adev = ddev->dev_private;
 203
 204	return snprintf(buf, PAGE_SIZE, "%s\n", adev->serial);
 205}
 206
 207static DEVICE_ATTR(serial_number, S_IRUGO,
 208		amdgpu_device_get_serial_number, NULL);
 209
 210/**
 211 * amdgpu_device_supports_boco - Is the device a dGPU with HG/PX power control
 212 *
 213 * @dev: drm_device pointer
 214 *
 215 * Returns true if the device is a dGPU with HG/PX power control,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 216 * otherwise return false.
 217 */
 218bool amdgpu_device_supports_boco(struct drm_device *dev)
 219{
 220	struct amdgpu_device *adev = dev->dev_private;
 221
 222	if (adev->flags & AMD_IS_PX)
 
 223		return true;
 224	return false;
 225}
 226
 227/**
 228 * amdgpu_device_supports_baco - Does the device support BACO
 229 *
 230 * @dev: drm_device pointer
 231 *
 232 * Returns true if the device supporte BACO,
 233 * otherwise return false.
 234 */
 235bool amdgpu_device_supports_baco(struct drm_device *dev)
 236{
 237	struct amdgpu_device *adev = dev->dev_private;
 238
 239	return amdgpu_asic_supports_baco(adev);
 240}
 241
 242/**
 243 * VRAM access helper functions.
 
 
 
 244 *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 245 * amdgpu_device_vram_access - read/write a buffer in vram
 246 *
 247 * @adev: amdgpu_device pointer
 248 * @pos: offset of the buffer in vram
 249 * @buf: virtual address of the buffer in system memory
 250 * @size: read/write size, sizeof(@buf) must > @size
 251 * @write: true - write to vram, otherwise - read from vram
 252 */
 253void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
 254			       uint32_t *buf, size_t size, bool write)
 255{
 256	unsigned long flags;
 257	uint32_t hi = ~0;
 258	uint64_t last;
 
 259
 
 
 260
 261#ifdef CONFIG_64BIT
 262	last = min(pos + size, adev->gmc.visible_vram_size);
 263	if (last > pos) {
 264		void __iomem *addr = adev->mman.aper_base_kaddr + pos;
 265		size_t count = last - pos;
 266
 267		if (write) {
 268			memcpy_toio(addr, buf, count);
 269			mb();
 270			amdgpu_asic_flush_hdp(adev, NULL);
 271		} else {
 272			amdgpu_asic_invalidate_hdp(adev, NULL);
 273			mb();
 274			memcpy_fromio(buf, addr, count);
 275		}
 276
 277		if (count == size)
 278			return;
 279
 280		pos += count;
 281		buf += count / 4;
 282		size -= count;
 283	}
 284#endif
 285
 286	spin_lock_irqsave(&adev->mmio_idx_lock, flags);
 287	for (last = pos + size; pos < last; pos += 4) {
 288		uint32_t tmp = pos >> 31;
 289
 290		WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
 291		if (tmp != hi) {
 292			WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
 293			hi = tmp;
 294		}
 295		if (write)
 296			WREG32_NO_KIQ(mmMM_DATA, *buf++);
 297		else
 298			*buf++ = RREG32_NO_KIQ(mmMM_DATA);
 299	}
 300	spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
 
 
 
 
 
 301}
 302
 303/*
 304 * MMIO register access helper functions.
 305 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 306/**
 307 * amdgpu_mm_rreg - read a memory mapped IO register
 308 *
 309 * @adev: amdgpu_device pointer
 310 * @reg: dword aligned register offset
 311 * @acc_flags: access flags which require special behavior
 312 *
 313 * Returns the 32 bit value from the offset specified.
 314 */
 315uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
 316			uint32_t acc_flags)
 317{
 318	uint32_t ret;
 319
 320	if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
 321		return amdgpu_kiq_rreg(adev, reg);
 322
 323	if ((reg * 4) < adev->rmmio_size)
 324		ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
 325	else {
 326		unsigned long flags;
 327
 328		spin_lock_irqsave(&adev->mmio_idx_lock, flags);
 329		writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
 330		ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
 331		spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
 
 
 332	}
 333	trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
 
 
 334	return ret;
 335}
 336
 337/*
 338 * MMIO register read with bytes helper functions
 339 * @offset:bytes offset from MMIO start
 340 *
 341*/
 342
 343/**
 344 * amdgpu_mm_rreg8 - read a memory mapped IO register
 345 *
 346 * @adev: amdgpu_device pointer
 347 * @offset: byte aligned register offset
 348 *
 349 * Returns the 8 bit value from the offset specified.
 350 */
 351uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) {
 
 
 
 
 352	if (offset < adev->rmmio_size)
 353		return (readb(adev->rmmio + offset));
 354	BUG();
 355}
 356
 357/*
 358 * MMIO register write with bytes helper functions
 359 * @offset:bytes offset from MMIO start
 360 * @value: the value want to be written to the register
 361 *
 362*/
 363/**
 364 * amdgpu_mm_wreg8 - read a memory mapped IO register
 365 *
 366 * @adev: amdgpu_device pointer
 367 * @offset: byte aligned register offset
 368 * @value: 8 bit value to write
 369 *
 370 * Writes the value specified to the offset specified.
 371 */
 372void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) {
 
 
 
 
 373	if (offset < adev->rmmio_size)
 374		writeb(value, adev->rmmio + offset);
 375	else
 376		BUG();
 377}
 378
 379void static inline amdgpu_mm_wreg_mmio(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t acc_flags)
 380{
 381	trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
 382
 383	if ((reg * 4) < adev->rmmio_size)
 384		writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
 385	else {
 386		unsigned long flags;
 387
 388		spin_lock_irqsave(&adev->mmio_idx_lock, flags);
 389		writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
 390		writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
 391		spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
 392	}
 393}
 394
 395/**
 396 * amdgpu_mm_wreg - write to a memory mapped IO register
 397 *
 398 * @adev: amdgpu_device pointer
 399 * @reg: dword aligned register offset
 400 * @v: 32 bit value to write to the register
 401 * @acc_flags: access flags which require special behavior
 402 *
 403 * Writes the value specified to the offset specified.
 404 */
 405void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
 406		    uint32_t acc_flags)
 
 407{
 408	if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
 409		return amdgpu_kiq_wreg(adev, reg, v);
 410
 411	amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 412}
 413
 414/*
 415 * amdgpu_mm_wreg_mmio_rlc -  write register either with mmio or with RLC path if in range
 416 *
 417 * this function is invoked only the debugfs register access
 418 * */
 419void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
 420		    uint32_t acc_flags)
 421{
 422	if (amdgpu_sriov_fullaccess(adev) &&
 423		adev->gfx.rlc.funcs &&
 424		adev->gfx.rlc.funcs->is_rlcg_access_range) {
 425
 
 
 
 426		if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
 427			return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v);
 428	}
 429
 430	amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags);
 431}
 432
 433/**
 434 * amdgpu_io_rreg - read an IO register
 435 *
 436 * @adev: amdgpu_device pointer
 437 * @reg: dword aligned register offset
 438 *
 439 * Returns the 32 bit value from the offset specified.
 440 */
 441u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
 442{
 443	if ((reg * 4) < adev->rio_mem_size)
 444		return ioread32(adev->rio_mem + (reg * 4));
 445	else {
 446		iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
 447		return ioread32(adev->rio_mem + (mmMM_DATA * 4));
 448	}
 449}
 450
 451/**
 452 * amdgpu_io_wreg - write to an IO register
 453 *
 454 * @adev: amdgpu_device pointer
 455 * @reg: dword aligned register offset
 456 * @v: 32 bit value to write to the register
 457 *
 458 * Writes the value specified to the offset specified.
 459 */
 460void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 461{
 462	if ((reg * 4) < adev->rio_mem_size)
 463		iowrite32(v, adev->rio_mem + (reg * 4));
 464	else {
 465		iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
 466		iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
 467	}
 468}
 469
 470/**
 471 * amdgpu_mm_rdoorbell - read a doorbell dword
 472 *
 473 * @adev: amdgpu_device pointer
 474 * @index: doorbell index
 475 *
 476 * Returns the value in the doorbell aperture at the
 477 * requested doorbell index (CIK).
 478 */
 479u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
 480{
 
 
 
 481	if (index < adev->doorbell.num_doorbells) {
 482		return readl(adev->doorbell.ptr + index);
 483	} else {
 484		DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
 485		return 0;
 486	}
 487}
 488
 489/**
 490 * amdgpu_mm_wdoorbell - write a doorbell dword
 491 *
 492 * @adev: amdgpu_device pointer
 493 * @index: doorbell index
 494 * @v: value to write
 495 *
 496 * Writes @v to the doorbell aperture at the
 497 * requested doorbell index (CIK).
 498 */
 499void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
 500{
 
 
 
 501	if (index < adev->doorbell.num_doorbells) {
 502		writel(v, adev->doorbell.ptr + index);
 503	} else {
 504		DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
 505	}
 506}
 507
 508/**
 509 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
 510 *
 511 * @adev: amdgpu_device pointer
 512 * @index: doorbell index
 513 *
 514 * Returns the value in the doorbell aperture at the
 515 * requested doorbell index (VEGA10+).
 516 */
 517u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
 518{
 
 
 
 519	if (index < adev->doorbell.num_doorbells) {
 520		return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
 521	} else {
 522		DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
 523		return 0;
 524	}
 525}
 526
 527/**
 528 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
 529 *
 530 * @adev: amdgpu_device pointer
 531 * @index: doorbell index
 532 * @v: value to write
 533 *
 534 * Writes @v to the doorbell aperture at the
 535 * requested doorbell index (VEGA10+).
 536 */
 537void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
 538{
 
 
 
 539	if (index < adev->doorbell.num_doorbells) {
 540		atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
 541	} else {
 542		DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
 543	}
 544}
 545
 546/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 547 * amdgpu_invalid_rreg - dummy reg read function
 548 *
 549 * @adev: amdgpu device pointer
 550 * @reg: offset of register
 551 *
 552 * Dummy register read function.  Used for register blocks
 553 * that certain asics don't have (all asics).
 554 * Returns the value in the register.
 555 */
 556static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
 557{
 558	DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
 559	BUG();
 560	return 0;
 561}
 562
 563/**
 564 * amdgpu_invalid_wreg - dummy reg write function
 565 *
 566 * @adev: amdgpu device pointer
 567 * @reg: offset of register
 568 * @v: value to write to the register
 569 *
 570 * Dummy register read function.  Used for register blocks
 571 * that certain asics don't have (all asics).
 572 */
 573static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
 574{
 575	DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
 576		  reg, v);
 577	BUG();
 578}
 579
 580/**
 581 * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
 582 *
 583 * @adev: amdgpu device pointer
 584 * @reg: offset of register
 585 *
 586 * Dummy register read function.  Used for register blocks
 587 * that certain asics don't have (all asics).
 588 * Returns the value in the register.
 589 */
 590static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
 591{
 592	DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
 593	BUG();
 594	return 0;
 595}
 596
 597/**
 598 * amdgpu_invalid_wreg64 - dummy reg write function
 599 *
 600 * @adev: amdgpu device pointer
 601 * @reg: offset of register
 602 * @v: value to write to the register
 603 *
 604 * Dummy register read function.  Used for register blocks
 605 * that certain asics don't have (all asics).
 606 */
 607static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
 608{
 609	DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
 610		  reg, v);
 611	BUG();
 612}
 613
 614/**
 615 * amdgpu_block_invalid_rreg - dummy reg read function
 616 *
 617 * @adev: amdgpu device pointer
 618 * @block: offset of instance
 619 * @reg: offset of register
 620 *
 621 * Dummy register read function.  Used for register blocks
 622 * that certain asics don't have (all asics).
 623 * Returns the value in the register.
 624 */
 625static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
 626					  uint32_t block, uint32_t reg)
 627{
 628	DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
 629		  reg, block);
 630	BUG();
 631	return 0;
 632}
 633
 634/**
 635 * amdgpu_block_invalid_wreg - dummy reg write function
 636 *
 637 * @adev: amdgpu device pointer
 638 * @block: offset of instance
 639 * @reg: offset of register
 640 * @v: value to write to the register
 641 *
 642 * Dummy register read function.  Used for register blocks
 643 * that certain asics don't have (all asics).
 644 */
 645static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
 646				      uint32_t block,
 647				      uint32_t reg, uint32_t v)
 648{
 649	DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
 650		  reg, block, v);
 651	BUG();
 652}
 653
 654/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 655 * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
 656 *
 657 * @adev: amdgpu device pointer
 658 *
 659 * Allocates a scratch page of VRAM for use by various things in the
 660 * driver.
 661 */
 662static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
 663{
 664	return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
 665				       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
 666				       &adev->vram_scratch.robj,
 667				       &adev->vram_scratch.gpu_addr,
 668				       (void **)&adev->vram_scratch.ptr);
 669}
 670
 671/**
 672 * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
 673 *
 674 * @adev: amdgpu device pointer
 675 *
 676 * Frees the VRAM scratch page.
 677 */
 678static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
 679{
 680	amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
 681}
 682
 683/**
 684 * amdgpu_device_program_register_sequence - program an array of registers.
 685 *
 686 * @adev: amdgpu_device pointer
 687 * @registers: pointer to the register array
 688 * @array_size: size of the register array
 689 *
 690 * Programs an array or registers with and and or masks.
 691 * This is a helper for setting golden registers.
 692 */
 693void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
 694					     const u32 *registers,
 695					     const u32 array_size)
 696{
 697	u32 tmp, reg, and_mask, or_mask;
 698	int i;
 699
 700	if (array_size % 3)
 701		return;
 702
 703	for (i = 0; i < array_size; i +=3) {
 704		reg = registers[i + 0];
 705		and_mask = registers[i + 1];
 706		or_mask = registers[i + 2];
 707
 708		if (and_mask == 0xffffffff) {
 709			tmp = or_mask;
 710		} else {
 711			tmp = RREG32(reg);
 712			tmp &= ~and_mask;
 713			if (adev->family >= AMDGPU_FAMILY_AI)
 714				tmp |= (or_mask & and_mask);
 715			else
 716				tmp |= or_mask;
 717		}
 718		WREG32(reg, tmp);
 719	}
 720}
 721
 722/**
 723 * amdgpu_device_pci_config_reset - reset the GPU
 724 *
 725 * @adev: amdgpu_device pointer
 726 *
 727 * Resets the GPU using the pci config reset sequence.
 728 * Only applicable to asics prior to vega10.
 729 */
 730void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
 731{
 732	pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
 733}
 734
 
 
 
 
 
 
 
 
 
 
 
 
 735/*
 736 * GPU doorbell aperture helpers function.
 737 */
 738/**
 739 * amdgpu_device_doorbell_init - Init doorbell driver information.
 740 *
 741 * @adev: amdgpu_device pointer
 742 *
 743 * Init doorbell driver information (CIK)
 744 * Returns 0 on success, error on failure.
 745 */
 746static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
 747{
 748
 749	/* No doorbell on SI hardware generation */
 750	if (adev->asic_type < CHIP_BONAIRE) {
 751		adev->doorbell.base = 0;
 752		adev->doorbell.size = 0;
 753		adev->doorbell.num_doorbells = 0;
 754		adev->doorbell.ptr = NULL;
 755		return 0;
 756	}
 757
 758	if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
 759		return -EINVAL;
 760
 761	amdgpu_asic_init_doorbell_index(adev);
 762
 763	/* doorbell bar mapping */
 764	adev->doorbell.base = pci_resource_start(adev->pdev, 2);
 765	adev->doorbell.size = pci_resource_len(adev->pdev, 2);
 766
 767	adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
 768					     adev->doorbell_index.max_assignment+1);
 769	if (adev->doorbell.num_doorbells == 0)
 770		return -EINVAL;
 771
 772	/* For Vega, reserve and map two pages on doorbell BAR since SDMA
 773	 * paging queue doorbell use the second page. The
 774	 * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
 775	 * doorbells are in the first page. So with paging queue enabled,
 776	 * the max num_doorbells should + 1 page (0x400 in dword)
 777	 */
 778	if (adev->asic_type >= CHIP_VEGA10)
 779		adev->doorbell.num_doorbells += 0x400;
 780
 781	adev->doorbell.ptr = ioremap(adev->doorbell.base,
 782				     adev->doorbell.num_doorbells *
 783				     sizeof(u32));
 784	if (adev->doorbell.ptr == NULL)
 785		return -ENOMEM;
 786
 787	return 0;
 788}
 789
 790/**
 791 * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
 792 *
 793 * @adev: amdgpu_device pointer
 794 *
 795 * Tear down doorbell driver information (CIK)
 796 */
 797static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
 798{
 799	iounmap(adev->doorbell.ptr);
 800	adev->doorbell.ptr = NULL;
 801}
 802
 803
 804
 805/*
 806 * amdgpu_device_wb_*()
 807 * Writeback is the method by which the GPU updates special pages in memory
 808 * with the status of certain GPU events (fences, ring pointers,etc.).
 809 */
 810
 811/**
 812 * amdgpu_device_wb_fini - Disable Writeback and free memory
 813 *
 814 * @adev: amdgpu_device pointer
 815 *
 816 * Disables Writeback and frees the Writeback memory (all asics).
 817 * Used at driver shutdown.
 818 */
 819static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
 820{
 821	if (adev->wb.wb_obj) {
 822		amdgpu_bo_free_kernel(&adev->wb.wb_obj,
 823				      &adev->wb.gpu_addr,
 824				      (void **)&adev->wb.wb);
 825		adev->wb.wb_obj = NULL;
 826	}
 827}
 828
 829/**
 830 * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
 831 *
 832 * @adev: amdgpu_device pointer
 833 *
 834 * Initializes writeback and allocates writeback memory (all asics).
 835 * Used at driver startup.
 836 * Returns 0 on success or an -error on failure.
 837 */
 838static int amdgpu_device_wb_init(struct amdgpu_device *adev)
 839{
 840	int r;
 841
 842	if (adev->wb.wb_obj == NULL) {
 843		/* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
 844		r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
 845					    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
 846					    &adev->wb.wb_obj, &adev->wb.gpu_addr,
 847					    (void **)&adev->wb.wb);
 848		if (r) {
 849			dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
 850			return r;
 851		}
 852
 853		adev->wb.num_wb = AMDGPU_MAX_WB;
 854		memset(&adev->wb.used, 0, sizeof(adev->wb.used));
 855
 856		/* clear wb memory */
 857		memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
 858	}
 859
 860	return 0;
 861}
 862
 863/**
 864 * amdgpu_device_wb_get - Allocate a wb entry
 865 *
 866 * @adev: amdgpu_device pointer
 867 * @wb: wb index
 868 *
 869 * Allocate a wb slot for use by the driver (all asics).
 870 * Returns 0 on success or -EINVAL on failure.
 871 */
 872int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
 873{
 874	unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
 875
 876	if (offset < adev->wb.num_wb) {
 877		__set_bit(offset, adev->wb.used);
 878		*wb = offset << 3; /* convert to dw offset */
 879		return 0;
 880	} else {
 881		return -EINVAL;
 882	}
 883}
 884
 885/**
 886 * amdgpu_device_wb_free - Free a wb entry
 887 *
 888 * @adev: amdgpu_device pointer
 889 * @wb: wb index
 890 *
 891 * Free a wb slot allocated for use by the driver (all asics)
 892 */
 893void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
 894{
 895	wb >>= 3;
 896	if (wb < adev->wb.num_wb)
 897		__clear_bit(wb, adev->wb.used);
 898}
 899
 900/**
 901 * amdgpu_device_resize_fb_bar - try to resize FB BAR
 902 *
 903 * @adev: amdgpu_device pointer
 904 *
 905 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
 906 * to fail, but if any of the BARs is not accessible after the size we abort
 907 * driver loading by returning -ENODEV.
 908 */
 909int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
 910{
 911	u64 space_needed = roundup_pow_of_two(adev->gmc.real_vram_size);
 912	u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1;
 913	struct pci_bus *root;
 914	struct resource *res;
 915	unsigned i;
 916	u16 cmd;
 917	int r;
 918
 919	/* Bypass for VF */
 920	if (amdgpu_sriov_vf(adev))
 921		return 0;
 922
 923	/* skip if the bios has already enabled large BAR */
 924	if (adev->gmc.real_vram_size &&
 925	    (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
 926		return 0;
 927
 928	/* Check if the root BUS has 64bit memory resources */
 929	root = adev->pdev->bus;
 930	while (root->parent)
 931		root = root->parent;
 932
 933	pci_bus_for_each_resource(root, res, i) {
 934		if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
 935		    res->start > 0x100000000ull)
 936			break;
 937	}
 938
 939	/* Trying to resize is pointless without a root hub window above 4GB */
 940	if (!res)
 941		return 0;
 942
 
 
 
 
 943	/* Disable memory decoding while we change the BAR addresses and size */
 944	pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
 945	pci_write_config_word(adev->pdev, PCI_COMMAND,
 946			      cmd & ~PCI_COMMAND_MEMORY);
 947
 948	/* Free the VRAM and doorbell BAR, we most likely need to move both. */
 949	amdgpu_device_doorbell_fini(adev);
 950	if (adev->asic_type >= CHIP_BONAIRE)
 951		pci_release_resource(adev->pdev, 2);
 952
 953	pci_release_resource(adev->pdev, 0);
 954
 955	r = pci_resize_resource(adev->pdev, 0, rbar_size);
 956	if (r == -ENOSPC)
 957		DRM_INFO("Not enough PCI address space for a large BAR.");
 958	else if (r && r != -ENOTSUPP)
 959		DRM_ERROR("Problem resizing BAR0 (%d).", r);
 960
 961	pci_assign_unassigned_bus_resources(adev->pdev->bus);
 962
 963	/* When the doorbell or fb BAR isn't available we have no chance of
 964	 * using the device.
 965	 */
 966	r = amdgpu_device_doorbell_init(adev);
 967	if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
 968		return -ENODEV;
 969
 970	pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
 971
 972	return 0;
 973}
 974
 975/*
 976 * GPU helpers function.
 977 */
 978/**
 979 * amdgpu_device_need_post - check if the hw need post or not
 980 *
 981 * @adev: amdgpu_device pointer
 982 *
 983 * Check if the asic has been initialized (all asics) at driver startup
 984 * or post is needed if  hw reset is performed.
 985 * Returns true if need or false if not.
 986 */
 987bool amdgpu_device_need_post(struct amdgpu_device *adev)
 988{
 989	uint32_t reg;
 990
 991	if (amdgpu_sriov_vf(adev))
 992		return false;
 993
 994	if (amdgpu_passthrough(adev)) {
 995		/* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
 996		 * some old smc fw still need driver do vPost otherwise gpu hang, while
 997		 * those smc fw version above 22.15 doesn't have this flaw, so we force
 998		 * vpost executed for smc version below 22.15
 999		 */
1000		if (adev->asic_type == CHIP_FIJI) {
1001			int err;
1002			uint32_t fw_ver;
1003			err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1004			/* force vPost if error occured */
1005			if (err)
1006				return true;
1007
1008			fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1009			if (fw_ver < 0x00160e00)
1010				return true;
1011		}
1012	}
1013
 
 
 
 
1014	if (adev->has_hw_reset) {
1015		adev->has_hw_reset = false;
1016		return true;
1017	}
1018
1019	/* bios scratch used on CIK+ */
1020	if (adev->asic_type >= CHIP_BONAIRE)
1021		return amdgpu_atombios_scratch_need_asic_init(adev);
1022
1023	/* check MEM_SIZE for older asics */
1024	reg = amdgpu_asic_get_config_memsize(adev);
1025
1026	if ((reg != 0) && (reg != 0xffffffff))
1027		return false;
1028
1029	return true;
1030}
1031
1032/* if we get transitioned to only one device, take VGA back */
1033/**
1034 * amdgpu_device_vga_set_decode - enable/disable vga decode
1035 *
1036 * @cookie: amdgpu_device pointer
1037 * @state: enable/disable vga decode
1038 *
1039 * Enable/disable vga decode (all asics).
1040 * Returns VGA resource flags.
1041 */
1042static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state)
1043{
1044	struct amdgpu_device *adev = cookie;
1045	amdgpu_asic_set_vga_state(adev, state);
1046	if (state)
1047		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1048		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1049	else
1050		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1051}
1052
1053/**
1054 * amdgpu_device_check_block_size - validate the vm block size
1055 *
1056 * @adev: amdgpu_device pointer
1057 *
1058 * Validates the vm block size specified via module parameter.
1059 * The vm block size defines number of bits in page table versus page directory,
1060 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1061 * page table and the remaining bits are in the page directory.
1062 */
1063static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1064{
1065	/* defines number of bits in page table versus page directory,
1066	 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1067	 * page table and the remaining bits are in the page directory */
1068	if (amdgpu_vm_block_size == -1)
1069		return;
1070
1071	if (amdgpu_vm_block_size < 9) {
1072		dev_warn(adev->dev, "VM page table size (%d) too small\n",
1073			 amdgpu_vm_block_size);
1074		amdgpu_vm_block_size = -1;
1075	}
1076}
1077
1078/**
1079 * amdgpu_device_check_vm_size - validate the vm size
1080 *
1081 * @adev: amdgpu_device pointer
1082 *
1083 * Validates the vm size in GB specified via module parameter.
1084 * The VM size is the size of the GPU virtual memory space in GB.
1085 */
1086static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1087{
1088	/* no need to check the default value */
1089	if (amdgpu_vm_size == -1)
1090		return;
1091
1092	if (amdgpu_vm_size < 1) {
1093		dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1094			 amdgpu_vm_size);
1095		amdgpu_vm_size = -1;
1096	}
1097}
1098
1099static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1100{
1101	struct sysinfo si;
1102	bool is_os_64 = (sizeof(void *) == 8);
1103	uint64_t total_memory;
1104	uint64_t dram_size_seven_GB = 0x1B8000000;
1105	uint64_t dram_size_three_GB = 0xB8000000;
1106
1107	if (amdgpu_smu_memory_pool_size == 0)
1108		return;
1109
1110	if (!is_os_64) {
1111		DRM_WARN("Not 64-bit OS, feature not supported\n");
1112		goto def_value;
1113	}
1114	si_meminfo(&si);
1115	total_memory = (uint64_t)si.totalram * si.mem_unit;
1116
1117	if ((amdgpu_smu_memory_pool_size == 1) ||
1118		(amdgpu_smu_memory_pool_size == 2)) {
1119		if (total_memory < dram_size_three_GB)
1120			goto def_value1;
1121	} else if ((amdgpu_smu_memory_pool_size == 4) ||
1122		(amdgpu_smu_memory_pool_size == 8)) {
1123		if (total_memory < dram_size_seven_GB)
1124			goto def_value1;
1125	} else {
1126		DRM_WARN("Smu memory pool size not supported\n");
1127		goto def_value;
1128	}
1129	adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1130
1131	return;
1132
1133def_value1:
1134	DRM_WARN("No enough system memory\n");
1135def_value:
1136	adev->pm.smu_prv_buffer_size = 0;
1137}
1138
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1139/**
1140 * amdgpu_device_check_arguments - validate module params
1141 *
1142 * @adev: amdgpu_device pointer
1143 *
1144 * Validates certain module parameters and updates
1145 * the associated values used by the driver (all asics).
1146 */
1147static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1148{
1149	if (amdgpu_sched_jobs < 4) {
1150		dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1151			 amdgpu_sched_jobs);
1152		amdgpu_sched_jobs = 4;
1153	} else if (!is_power_of_2(amdgpu_sched_jobs)){
1154		dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1155			 amdgpu_sched_jobs);
1156		amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1157	}
1158
1159	if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1160		/* gart size must be greater or equal to 32M */
1161		dev_warn(adev->dev, "gart size (%d) too small\n",
1162			 amdgpu_gart_size);
1163		amdgpu_gart_size = -1;
1164	}
1165
1166	if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1167		/* gtt size must be greater or equal to 32M */
1168		dev_warn(adev->dev, "gtt size (%d) too small\n",
1169				 amdgpu_gtt_size);
1170		amdgpu_gtt_size = -1;
1171	}
1172
1173	/* valid range is between 4 and 9 inclusive */
1174	if (amdgpu_vm_fragment_size != -1 &&
1175	    (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1176		dev_warn(adev->dev, "valid range is between 4 and 9\n");
1177		amdgpu_vm_fragment_size = -1;
1178	}
1179
1180	if (amdgpu_sched_hw_submission < 2) {
1181		dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1182			 amdgpu_sched_hw_submission);
1183		amdgpu_sched_hw_submission = 2;
1184	} else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1185		dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1186			 amdgpu_sched_hw_submission);
1187		amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1188	}
1189
1190	amdgpu_device_check_smu_prv_buffer_size(adev);
1191
1192	amdgpu_device_check_vm_size(adev);
1193
1194	amdgpu_device_check_block_size(adev);
1195
1196	adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1197
1198	amdgpu_gmc_tmz_set(adev);
1199
 
 
1200	return 0;
1201}
1202
1203/**
1204 * amdgpu_switcheroo_set_state - set switcheroo state
1205 *
1206 * @pdev: pci dev pointer
1207 * @state: vga_switcheroo state
1208 *
1209 * Callback for the switcheroo driver.  Suspends or resumes the
1210 * the asics before or after it is powered up using ACPI methods.
1211 */
1212static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
 
1213{
1214	struct drm_device *dev = pci_get_drvdata(pdev);
1215	int r;
1216
1217	if (amdgpu_device_supports_boco(dev) && state == VGA_SWITCHEROO_OFF)
1218		return;
1219
1220	if (state == VGA_SWITCHEROO_ON) {
1221		pr_info("switched on\n");
1222		/* don't suspend or resume card normally */
1223		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1224
1225		pci_set_power_state(dev->pdev, PCI_D0);
1226		pci_restore_state(dev->pdev);
1227		r = pci_enable_device(dev->pdev);
1228		if (r)
1229			DRM_WARN("pci_enable_device failed (%d)\n", r);
1230		amdgpu_device_resume(dev, true);
1231
1232		dev->switch_power_state = DRM_SWITCH_POWER_ON;
1233		drm_kms_helper_poll_enable(dev);
1234	} else {
1235		pr_info("switched off\n");
1236		drm_kms_helper_poll_disable(dev);
1237		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1238		amdgpu_device_suspend(dev, true);
1239		pci_save_state(dev->pdev);
1240		/* Shut down the device */
1241		pci_disable_device(dev->pdev);
1242		pci_set_power_state(dev->pdev, PCI_D3cold);
1243		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1244	}
1245}
1246
1247/**
1248 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1249 *
1250 * @pdev: pci dev pointer
1251 *
1252 * Callback for the switcheroo driver.  Check of the switcheroo
1253 * state can be changed.
1254 * Returns true if the state can be changed, false if not.
1255 */
1256static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1257{
1258	struct drm_device *dev = pci_get_drvdata(pdev);
1259
1260	/*
1261	* FIXME: open_count is protected by drm_global_mutex but that would lead to
1262	* locking inversion with the driver load path. And the access here is
1263	* completely racy anyway. So don't bother with locking for now.
1264	*/
1265	return atomic_read(&dev->open_count) == 0;
1266}
1267
1268static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1269	.set_gpu_state = amdgpu_switcheroo_set_state,
1270	.reprobe = NULL,
1271	.can_switch = amdgpu_switcheroo_can_switch,
1272};
1273
1274/**
1275 * amdgpu_device_ip_set_clockgating_state - set the CG state
1276 *
1277 * @dev: amdgpu_device pointer
1278 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1279 * @state: clockgating state (gate or ungate)
1280 *
1281 * Sets the requested clockgating state for all instances of
1282 * the hardware IP specified.
1283 * Returns the error code from the last instance.
1284 */
1285int amdgpu_device_ip_set_clockgating_state(void *dev,
1286					   enum amd_ip_block_type block_type,
1287					   enum amd_clockgating_state state)
1288{
1289	struct amdgpu_device *adev = dev;
1290	int i, r = 0;
1291
1292	for (i = 0; i < adev->num_ip_blocks; i++) {
1293		if (!adev->ip_blocks[i].status.valid)
1294			continue;
1295		if (adev->ip_blocks[i].version->type != block_type)
1296			continue;
1297		if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1298			continue;
1299		r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1300			(void *)adev, state);
1301		if (r)
1302			DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1303				  adev->ip_blocks[i].version->funcs->name, r);
1304	}
1305	return r;
1306}
1307
1308/**
1309 * amdgpu_device_ip_set_powergating_state - set the PG state
1310 *
1311 * @dev: amdgpu_device pointer
1312 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1313 * @state: powergating state (gate or ungate)
1314 *
1315 * Sets the requested powergating state for all instances of
1316 * the hardware IP specified.
1317 * Returns the error code from the last instance.
1318 */
1319int amdgpu_device_ip_set_powergating_state(void *dev,
1320					   enum amd_ip_block_type block_type,
1321					   enum amd_powergating_state state)
1322{
1323	struct amdgpu_device *adev = dev;
1324	int i, r = 0;
1325
1326	for (i = 0; i < adev->num_ip_blocks; i++) {
1327		if (!adev->ip_blocks[i].status.valid)
1328			continue;
1329		if (adev->ip_blocks[i].version->type != block_type)
1330			continue;
1331		if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1332			continue;
1333		r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1334			(void *)adev, state);
1335		if (r)
1336			DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1337				  adev->ip_blocks[i].version->funcs->name, r);
1338	}
1339	return r;
1340}
1341
1342/**
1343 * amdgpu_device_ip_get_clockgating_state - get the CG state
1344 *
1345 * @adev: amdgpu_device pointer
1346 * @flags: clockgating feature flags
1347 *
1348 * Walks the list of IPs on the device and updates the clockgating
1349 * flags for each IP.
1350 * Updates @flags with the feature flags for each hardware IP where
1351 * clockgating is enabled.
1352 */
1353void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1354					    u32 *flags)
1355{
1356	int i;
1357
1358	for (i = 0; i < adev->num_ip_blocks; i++) {
1359		if (!adev->ip_blocks[i].status.valid)
1360			continue;
1361		if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1362			adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1363	}
1364}
1365
1366/**
1367 * amdgpu_device_ip_wait_for_idle - wait for idle
1368 *
1369 * @adev: amdgpu_device pointer
1370 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1371 *
1372 * Waits for the request hardware IP to be idle.
1373 * Returns 0 for success or a negative error code on failure.
1374 */
1375int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1376				   enum amd_ip_block_type block_type)
1377{
1378	int i, r;
1379
1380	for (i = 0; i < adev->num_ip_blocks; i++) {
1381		if (!adev->ip_blocks[i].status.valid)
1382			continue;
1383		if (adev->ip_blocks[i].version->type == block_type) {
1384			r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1385			if (r)
1386				return r;
1387			break;
1388		}
1389	}
1390	return 0;
1391
1392}
1393
1394/**
1395 * amdgpu_device_ip_is_idle - is the hardware IP idle
1396 *
1397 * @adev: amdgpu_device pointer
1398 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1399 *
1400 * Check if the hardware IP is idle or not.
1401 * Returns true if it the IP is idle, false if not.
1402 */
1403bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1404			      enum amd_ip_block_type block_type)
1405{
1406	int i;
1407
1408	for (i = 0; i < adev->num_ip_blocks; i++) {
1409		if (!adev->ip_blocks[i].status.valid)
1410			continue;
1411		if (adev->ip_blocks[i].version->type == block_type)
1412			return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1413	}
1414	return true;
1415
1416}
1417
1418/**
1419 * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1420 *
1421 * @adev: amdgpu_device pointer
1422 * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1423 *
1424 * Returns a pointer to the hardware IP block structure
1425 * if it exists for the asic, otherwise NULL.
1426 */
1427struct amdgpu_ip_block *
1428amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1429			      enum amd_ip_block_type type)
1430{
1431	int i;
1432
1433	for (i = 0; i < adev->num_ip_blocks; i++)
1434		if (adev->ip_blocks[i].version->type == type)
1435			return &adev->ip_blocks[i];
1436
1437	return NULL;
1438}
1439
1440/**
1441 * amdgpu_device_ip_block_version_cmp
1442 *
1443 * @adev: amdgpu_device pointer
1444 * @type: enum amd_ip_block_type
1445 * @major: major version
1446 * @minor: minor version
1447 *
1448 * return 0 if equal or greater
1449 * return 1 if smaller or the ip_block doesn't exist
1450 */
1451int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1452				       enum amd_ip_block_type type,
1453				       u32 major, u32 minor)
1454{
1455	struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1456
1457	if (ip_block && ((ip_block->version->major > major) ||
1458			((ip_block->version->major == major) &&
1459			(ip_block->version->minor >= minor))))
1460		return 0;
1461
1462	return 1;
1463}
1464
1465/**
1466 * amdgpu_device_ip_block_add
1467 *
1468 * @adev: amdgpu_device pointer
1469 * @ip_block_version: pointer to the IP to add
1470 *
1471 * Adds the IP block driver information to the collection of IPs
1472 * on the asic.
1473 */
1474int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1475			       const struct amdgpu_ip_block_version *ip_block_version)
1476{
1477	if (!ip_block_version)
1478		return -EINVAL;
1479
 
 
 
 
 
 
 
 
 
 
 
 
 
1480	DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1481		  ip_block_version->funcs->name);
1482
1483	adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1484
1485	return 0;
1486}
1487
1488/**
1489 * amdgpu_device_enable_virtual_display - enable virtual display feature
1490 *
1491 * @adev: amdgpu_device pointer
1492 *
1493 * Enabled the virtual display feature if the user has enabled it via
1494 * the module parameter virtual_display.  This feature provides a virtual
1495 * display hardware on headless boards or in virtualized environments.
1496 * This function parses and validates the configuration string specified by
1497 * the user and configues the virtual display configuration (number of
1498 * virtual connectors, crtcs, etc.) specified.
1499 */
1500static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1501{
1502	adev->enable_virtual_display = false;
1503
1504	if (amdgpu_virtual_display) {
1505		struct drm_device *ddev = adev->ddev;
1506		const char *pci_address_name = pci_name(ddev->pdev);
1507		char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1508
1509		pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1510		pciaddstr_tmp = pciaddstr;
1511		while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1512			pciaddname = strsep(&pciaddname_tmp, ",");
1513			if (!strcmp("all", pciaddname)
1514			    || !strcmp(pci_address_name, pciaddname)) {
1515				long num_crtc;
1516				int res = -1;
1517
1518				adev->enable_virtual_display = true;
1519
1520				if (pciaddname_tmp)
1521					res = kstrtol(pciaddname_tmp, 10,
1522						      &num_crtc);
1523
1524				if (!res) {
1525					if (num_crtc < 1)
1526						num_crtc = 1;
1527					if (num_crtc > 6)
1528						num_crtc = 6;
1529					adev->mode_info.num_crtc = num_crtc;
1530				} else {
1531					adev->mode_info.num_crtc = 1;
1532				}
1533				break;
1534			}
1535		}
1536
1537		DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1538			 amdgpu_virtual_display, pci_address_name,
1539			 adev->enable_virtual_display, adev->mode_info.num_crtc);
1540
1541		kfree(pciaddstr);
1542	}
1543}
1544
1545/**
1546 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1547 *
1548 * @adev: amdgpu_device pointer
1549 *
1550 * Parses the asic configuration parameters specified in the gpu info
1551 * firmware and makes them availale to the driver for use in configuring
1552 * the asic.
1553 * Returns 0 on success, -EINVAL on failure.
1554 */
1555static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1556{
1557	const char *chip_name;
1558	char fw_name[40];
1559	int err;
1560	const struct gpu_info_firmware_header_v1_0 *hdr;
1561
1562	adev->firmware.gpu_info_fw = NULL;
1563
1564	if (adev->discovery_bin) {
1565		amdgpu_discovery_get_gfx_info(adev);
1566
1567		/*
1568		 * FIXME: The bounding box is still needed by Navi12, so
1569		 * temporarily read it from gpu_info firmware. Should be droped
1570		 * when DAL no longer needs it.
1571		 */
1572		if (adev->asic_type != CHIP_NAVI12)
1573			return 0;
1574	}
1575
1576	switch (adev->asic_type) {
1577#ifdef CONFIG_DRM_AMDGPU_SI
1578	case CHIP_VERDE:
1579	case CHIP_TAHITI:
1580	case CHIP_PITCAIRN:
1581	case CHIP_OLAND:
1582	case CHIP_HAINAN:
1583#endif
1584#ifdef CONFIG_DRM_AMDGPU_CIK
1585	case CHIP_BONAIRE:
1586	case CHIP_HAWAII:
1587	case CHIP_KAVERI:
1588	case CHIP_KABINI:
1589	case CHIP_MULLINS:
1590#endif
1591	case CHIP_TOPAZ:
1592	case CHIP_TONGA:
1593	case CHIP_FIJI:
1594	case CHIP_POLARIS10:
1595	case CHIP_POLARIS11:
1596	case CHIP_POLARIS12:
1597	case CHIP_VEGAM:
1598	case CHIP_CARRIZO:
1599	case CHIP_STONEY:
1600	case CHIP_VEGA20:
 
1601	case CHIP_SIENNA_CICHLID:
1602	case CHIP_NAVY_FLOUNDER:
 
 
1603	default:
1604		return 0;
1605	case CHIP_VEGA10:
1606		chip_name = "vega10";
1607		break;
1608	case CHIP_VEGA12:
1609		chip_name = "vega12";
1610		break;
1611	case CHIP_RAVEN:
1612		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1613			chip_name = "raven2";
1614		else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1615			chip_name = "picasso";
1616		else
1617			chip_name = "raven";
1618		break;
1619	case CHIP_ARCTURUS:
1620		chip_name = "arcturus";
1621		break;
1622	case CHIP_RENOIR:
1623		chip_name = "renoir";
 
 
 
1624		break;
1625	case CHIP_NAVI10:
1626		chip_name = "navi10";
1627		break;
1628	case CHIP_NAVI14:
1629		chip_name = "navi14";
1630		break;
1631	case CHIP_NAVI12:
1632		chip_name = "navi12";
1633		break;
 
 
 
 
 
 
1634	}
1635
1636	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1637	err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1638	if (err) {
1639		dev_err(adev->dev,
1640			"Failed to load gpu_info firmware \"%s\"\n",
1641			fw_name);
1642		goto out;
1643	}
1644	err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1645	if (err) {
1646		dev_err(adev->dev,
1647			"Failed to validate gpu_info firmware \"%s\"\n",
1648			fw_name);
1649		goto out;
1650	}
1651
1652	hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1653	amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1654
1655	switch (hdr->version_major) {
1656	case 1:
1657	{
1658		const struct gpu_info_firmware_v1_0 *gpu_info_fw =
1659			(const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
1660								le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1661
1662		/*
1663		 * Should be droped when DAL no longer needs it.
1664		 */
1665		if (adev->asic_type == CHIP_NAVI12)
1666			goto parse_soc_bounding_box;
1667
1668		adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1669		adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1670		adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1671		adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
1672		adev->gfx.config.max_texture_channel_caches =
1673			le32_to_cpu(gpu_info_fw->gc_num_tccs);
1674		adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1675		adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1676		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1677		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
1678		adev->gfx.config.double_offchip_lds_buf =
1679			le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1680		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
1681		adev->gfx.cu_info.max_waves_per_simd =
1682			le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1683		adev->gfx.cu_info.max_scratch_slots_per_cu =
1684			le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1685		adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
1686		if (hdr->version_minor >= 1) {
1687			const struct gpu_info_firmware_v1_1 *gpu_info_fw =
1688				(const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
1689									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1690			adev->gfx.config.num_sc_per_sh =
1691				le32_to_cpu(gpu_info_fw->num_sc_per_sh);
1692			adev->gfx.config.num_packer_per_sc =
1693				le32_to_cpu(gpu_info_fw->num_packer_per_sc);
1694		}
1695
1696parse_soc_bounding_box:
1697		/*
1698		 * soc bounding box info is not integrated in disocovery table,
1699		 * we always need to parse it from gpu info firmware if needed.
1700		 */
1701		if (hdr->version_minor == 2) {
1702			const struct gpu_info_firmware_v1_2 *gpu_info_fw =
1703				(const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
1704									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1705			adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
1706		}
1707		break;
1708	}
1709	default:
1710		dev_err(adev->dev,
1711			"Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1712		err = -EINVAL;
1713		goto out;
1714	}
1715out:
1716	return err;
1717}
1718
1719/**
1720 * amdgpu_device_ip_early_init - run early init for hardware IPs
1721 *
1722 * @adev: amdgpu_device pointer
1723 *
1724 * Early initialization pass for hardware IPs.  The hardware IPs that make
1725 * up each asic are discovered each IP's early_init callback is run.  This
1726 * is the first stage in initializing the asic.
1727 * Returns 0 on success, negative error code on failure.
1728 */
1729static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
1730{
1731	int i, r;
1732
1733	amdgpu_device_enable_virtual_display(adev);
1734
1735	if (amdgpu_sriov_vf(adev)) {
1736		r = amdgpu_virt_request_full_gpu(adev, true);
1737		if (r)
1738			return r;
1739	}
1740
1741	switch (adev->asic_type) {
1742#ifdef CONFIG_DRM_AMDGPU_SI
1743	case CHIP_VERDE:
1744	case CHIP_TAHITI:
1745	case CHIP_PITCAIRN:
1746	case CHIP_OLAND:
1747	case CHIP_HAINAN:
1748		adev->family = AMDGPU_FAMILY_SI;
1749		r = si_set_ip_blocks(adev);
1750		if (r)
1751			return r;
1752		break;
1753#endif
1754#ifdef CONFIG_DRM_AMDGPU_CIK
1755	case CHIP_BONAIRE:
1756	case CHIP_HAWAII:
1757	case CHIP_KAVERI:
1758	case CHIP_KABINI:
1759	case CHIP_MULLINS:
1760		if (adev->flags & AMD_IS_APU)
1761			adev->family = AMDGPU_FAMILY_KV;
1762		else
1763			adev->family = AMDGPU_FAMILY_CI;
1764
1765		r = cik_set_ip_blocks(adev);
1766		if (r)
1767			return r;
1768		break;
1769#endif
1770	case CHIP_TOPAZ:
1771	case CHIP_TONGA:
1772	case CHIP_FIJI:
1773	case CHIP_POLARIS10:
1774	case CHIP_POLARIS11:
1775	case CHIP_POLARIS12:
1776	case CHIP_VEGAM:
1777	case CHIP_CARRIZO:
1778	case CHIP_STONEY:
1779		if (adev->flags & AMD_IS_APU)
1780			adev->family = AMDGPU_FAMILY_CZ;
1781		else
1782			adev->family = AMDGPU_FAMILY_VI;
1783
1784		r = vi_set_ip_blocks(adev);
1785		if (r)
1786			return r;
1787		break;
1788	case CHIP_VEGA10:
1789	case CHIP_VEGA12:
1790	case CHIP_VEGA20:
1791	case CHIP_RAVEN:
1792	case CHIP_ARCTURUS:
1793	case CHIP_RENOIR:
 
1794		if (adev->flags & AMD_IS_APU)
1795			adev->family = AMDGPU_FAMILY_RV;
1796		else
1797			adev->family = AMDGPU_FAMILY_AI;
1798
1799		r = soc15_set_ip_blocks(adev);
1800		if (r)
1801			return r;
1802		break;
1803	case  CHIP_NAVI10:
1804	case  CHIP_NAVI14:
1805	case  CHIP_NAVI12:
1806	case  CHIP_SIENNA_CICHLID:
1807	case  CHIP_NAVY_FLOUNDER:
1808		adev->family = AMDGPU_FAMILY_NV;
 
 
 
 
 
 
 
 
 
1809
1810		r = nv_set_ip_blocks(adev);
1811		if (r)
1812			return r;
1813		break;
1814	default:
1815		/* FIXME: not supported yet */
1816		return -EINVAL;
1817	}
1818
1819	amdgpu_amdkfd_device_probe(adev);
1820
1821	adev->pm.pp_feature = amdgpu_pp_feature_mask;
1822	if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
1823		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
 
 
1824
1825	for (i = 0; i < adev->num_ip_blocks; i++) {
1826		if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
1827			DRM_ERROR("disabled ip block: %d <%s>\n",
1828				  i, adev->ip_blocks[i].version->funcs->name);
1829			adev->ip_blocks[i].status.valid = false;
1830		} else {
1831			if (adev->ip_blocks[i].version->funcs->early_init) {
1832				r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
1833				if (r == -ENOENT) {
1834					adev->ip_blocks[i].status.valid = false;
1835				} else if (r) {
1836					DRM_ERROR("early_init of IP block <%s> failed %d\n",
1837						  adev->ip_blocks[i].version->funcs->name, r);
1838					return r;
1839				} else {
1840					adev->ip_blocks[i].status.valid = true;
1841				}
1842			} else {
1843				adev->ip_blocks[i].status.valid = true;
1844			}
1845		}
1846		/* get the vbios after the asic_funcs are set up */
1847		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
1848			r = amdgpu_device_parse_gpu_info_fw(adev);
1849			if (r)
1850				return r;
1851
1852			/* Read BIOS */
1853			if (!amdgpu_get_bios(adev))
1854				return -EINVAL;
1855
1856			r = amdgpu_atombios_init(adev);
1857			if (r) {
1858				dev_err(adev->dev, "amdgpu_atombios_init failed\n");
1859				amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
1860				return r;
1861			}
 
 
 
 
 
1862		}
1863	}
1864
1865	adev->cg_flags &= amdgpu_cg_mask;
1866	adev->pg_flags &= amdgpu_pg_mask;
1867
1868	return 0;
1869}
1870
1871static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
1872{
1873	int i, r;
1874
1875	for (i = 0; i < adev->num_ip_blocks; i++) {
1876		if (!adev->ip_blocks[i].status.sw)
1877			continue;
1878		if (adev->ip_blocks[i].status.hw)
1879			continue;
1880		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1881		    (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
1882		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
1883			r = adev->ip_blocks[i].version->funcs->hw_init(adev);
1884			if (r) {
1885				DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1886					  adev->ip_blocks[i].version->funcs->name, r);
1887				return r;
1888			}
1889			adev->ip_blocks[i].status.hw = true;
1890		}
1891	}
1892
1893	return 0;
1894}
1895
1896static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
1897{
1898	int i, r;
1899
1900	for (i = 0; i < adev->num_ip_blocks; i++) {
1901		if (!adev->ip_blocks[i].status.sw)
1902			continue;
1903		if (adev->ip_blocks[i].status.hw)
1904			continue;
1905		r = adev->ip_blocks[i].version->funcs->hw_init(adev);
1906		if (r) {
1907			DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1908				  adev->ip_blocks[i].version->funcs->name, r);
1909			return r;
1910		}
1911		adev->ip_blocks[i].status.hw = true;
1912	}
1913
1914	return 0;
1915}
1916
1917static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
1918{
1919	int r = 0;
1920	int i;
1921	uint32_t smu_version;
1922
1923	if (adev->asic_type >= CHIP_VEGA10) {
1924		for (i = 0; i < adev->num_ip_blocks; i++) {
1925			if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
1926				continue;
1927
 
 
 
1928			/* no need to do the fw loading again if already done*/
1929			if (adev->ip_blocks[i].status.hw == true)
1930				break;
1931
1932			if (adev->in_gpu_reset || adev->in_suspend) {
1933				r = adev->ip_blocks[i].version->funcs->resume(adev);
1934				if (r) {
1935					DRM_ERROR("resume of IP block <%s> failed %d\n",
1936							  adev->ip_blocks[i].version->funcs->name, r);
1937					return r;
1938				}
1939			} else {
1940				r = adev->ip_blocks[i].version->funcs->hw_init(adev);
1941				if (r) {
1942					DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1943							  adev->ip_blocks[i].version->funcs->name, r);
1944					return r;
1945				}
1946			}
1947
1948			adev->ip_blocks[i].status.hw = true;
1949			break;
1950		}
1951	}
1952
1953	if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
1954		r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
1955
1956	return r;
1957}
1958
1959/**
1960 * amdgpu_device_ip_init - run init for hardware IPs
1961 *
1962 * @adev: amdgpu_device pointer
1963 *
1964 * Main initialization pass for hardware IPs.  The list of all the hardware
1965 * IPs that make up the asic is walked and the sw_init and hw_init callbacks
1966 * are run.  sw_init initializes the software state associated with each IP
1967 * and hw_init initializes the hardware associated with each IP.
1968 * Returns 0 on success, negative error code on failure.
1969 */
1970static int amdgpu_device_ip_init(struct amdgpu_device *adev)
1971{
1972	int i, r;
1973
1974	r = amdgpu_ras_init(adev);
1975	if (r)
1976		return r;
1977
1978	for (i = 0; i < adev->num_ip_blocks; i++) {
1979		if (!adev->ip_blocks[i].status.valid)
1980			continue;
1981		r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
1982		if (r) {
1983			DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1984				  adev->ip_blocks[i].version->funcs->name, r);
1985			goto init_failed;
1986		}
1987		adev->ip_blocks[i].status.sw = true;
1988
1989		/* need to do gmc hw init early so we can allocate gpu mem */
1990		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
1991			r = amdgpu_device_vram_scratch_init(adev);
1992			if (r) {
1993				DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
1994				goto init_failed;
1995			}
1996			r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
1997			if (r) {
1998				DRM_ERROR("hw_init %d failed %d\n", i, r);
1999				goto init_failed;
2000			}
2001			r = amdgpu_device_wb_init(adev);
2002			if (r) {
2003				DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2004				goto init_failed;
2005			}
2006			adev->ip_blocks[i].status.hw = true;
2007
2008			/* right after GMC hw init, we create CSA */
2009			if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
2010				r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2011								AMDGPU_GEM_DOMAIN_VRAM,
2012								AMDGPU_CSA_SIZE);
2013				if (r) {
2014					DRM_ERROR("allocate CSA failed %d\n", r);
2015					goto init_failed;
2016				}
2017			}
2018		}
2019	}
2020
2021	if (amdgpu_sriov_vf(adev))
2022		amdgpu_virt_init_data_exchange(adev);
2023
2024	r = amdgpu_ib_pool_init(adev);
2025	if (r) {
2026		dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2027		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2028		goto init_failed;
2029	}
2030
2031	r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2032	if (r)
2033		goto init_failed;
2034
2035	r = amdgpu_device_ip_hw_init_phase1(adev);
2036	if (r)
2037		goto init_failed;
2038
2039	r = amdgpu_device_fw_loading(adev);
2040	if (r)
2041		goto init_failed;
2042
2043	r = amdgpu_device_ip_hw_init_phase2(adev);
2044	if (r)
2045		goto init_failed;
2046
2047	/*
2048	 * retired pages will be loaded from eeprom and reserved here,
2049	 * it should be called after amdgpu_device_ip_hw_init_phase2  since
2050	 * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2051	 * for I2C communication which only true at this point.
2052	 * recovery_init may fail, but it can free all resources allocated by
2053	 * itself and its failure should not stop amdgpu init process.
 
 
 
 
2054	 *
2055	 * Note: theoretically, this should be called before all vram allocations
2056	 * to protect retired page from abusing
2057	 */
2058	amdgpu_ras_recovery_init(adev);
 
 
2059
2060	if (adev->gmc.xgmi.num_physical_nodes > 1)
2061		amdgpu_xgmi_add_device(adev);
2062	amdgpu_amdkfd_device_init(adev);
 
 
 
 
 
 
 
2063
2064	amdgpu_fru_get_product_info(adev);
2065
2066init_failed:
2067	if (amdgpu_sriov_vf(adev))
2068		amdgpu_virt_release_full_gpu(adev, true);
2069
2070	return r;
2071}
2072
2073/**
2074 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2075 *
2076 * @adev: amdgpu_device pointer
2077 *
2078 * Writes a reset magic value to the gart pointer in VRAM.  The driver calls
2079 * this function before a GPU reset.  If the value is retained after a
2080 * GPU reset, VRAM has not been lost.  Some GPU resets may destry VRAM contents.
2081 */
2082static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2083{
2084	memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2085}
2086
2087/**
2088 * amdgpu_device_check_vram_lost - check if vram is valid
2089 *
2090 * @adev: amdgpu_device pointer
2091 *
2092 * Checks the reset magic value written to the gart pointer in VRAM.
2093 * The driver calls this after a GPU reset to see if the contents of
2094 * VRAM is lost or now.
2095 * returns true if vram is lost, false if not.
2096 */
2097static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2098{
2099	if (memcmp(adev->gart.ptr, adev->reset_magic,
2100			AMDGPU_RESET_MAGIC_NUM))
2101		return true;
2102
2103	if (!adev->in_gpu_reset)
2104		return false;
2105
2106	/*
2107	 * For all ASICs with baco/mode1 reset, the VRAM is
2108	 * always assumed to be lost.
2109	 */
2110	switch (amdgpu_asic_reset_method(adev)) {
2111	case AMD_RESET_METHOD_BACO:
2112	case AMD_RESET_METHOD_MODE1:
2113		return true;
2114	default:
2115		return false;
2116	}
2117}
2118
2119/**
2120 * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2121 *
2122 * @adev: amdgpu_device pointer
2123 * @state: clockgating state (gate or ungate)
2124 *
2125 * The list of all the hardware IPs that make up the asic is walked and the
2126 * set_clockgating_state callbacks are run.
2127 * Late initialization pass enabling clockgating for hardware IPs.
2128 * Fini or suspend, pass disabling clockgating for hardware IPs.
2129 * Returns 0 on success, negative error code on failure.
2130 */
2131
2132static int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2133						enum amd_clockgating_state state)
2134{
2135	int i, j, r;
2136
2137	if (amdgpu_emu_mode == 1)
2138		return 0;
2139
2140	for (j = 0; j < adev->num_ip_blocks; j++) {
2141		i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2142		if (!adev->ip_blocks[i].status.late_initialized)
2143			continue;
 
 
 
 
2144		/* skip CG for VCE/UVD, it's handled specially */
2145		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2146		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2147		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2148		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2149		    adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2150			/* enable clockgating to save power */
2151			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2152										     state);
2153			if (r) {
2154				DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2155					  adev->ip_blocks[i].version->funcs->name, r);
2156				return r;
2157			}
2158		}
2159	}
2160
2161	return 0;
2162}
2163
2164static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_powergating_state state)
 
2165{
2166	int i, j, r;
2167
2168	if (amdgpu_emu_mode == 1)
2169		return 0;
2170
2171	for (j = 0; j < adev->num_ip_blocks; j++) {
2172		i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2173		if (!adev->ip_blocks[i].status.late_initialized)
2174			continue;
 
 
 
 
2175		/* skip CG for VCE/UVD, it's handled specially */
2176		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2177		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2178		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2179		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2180		    adev->ip_blocks[i].version->funcs->set_powergating_state) {
2181			/* enable powergating to save power */
2182			r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2183											state);
2184			if (r) {
2185				DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2186					  adev->ip_blocks[i].version->funcs->name, r);
2187				return r;
2188			}
2189		}
2190	}
2191	return 0;
2192}
2193
2194static int amdgpu_device_enable_mgpu_fan_boost(void)
2195{
2196	struct amdgpu_gpu_instance *gpu_ins;
2197	struct amdgpu_device *adev;
2198	int i, ret = 0;
2199
2200	mutex_lock(&mgpu_info.mutex);
2201
2202	/*
2203	 * MGPU fan boost feature should be enabled
2204	 * only when there are two or more dGPUs in
2205	 * the system
2206	 */
2207	if (mgpu_info.num_dgpu < 2)
2208		goto out;
2209
2210	for (i = 0; i < mgpu_info.num_dgpu; i++) {
2211		gpu_ins = &(mgpu_info.gpu_ins[i]);
2212		adev = gpu_ins->adev;
2213		if (!(adev->flags & AMD_IS_APU) &&
2214		    !gpu_ins->mgpu_fan_enabled &&
2215		    adev->powerplay.pp_funcs &&
2216		    adev->powerplay.pp_funcs->enable_mgpu_fan_boost) {
2217			ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2218			if (ret)
2219				break;
2220
2221			gpu_ins->mgpu_fan_enabled = 1;
2222		}
2223	}
2224
2225out:
2226	mutex_unlock(&mgpu_info.mutex);
2227
2228	return ret;
2229}
2230
2231/**
2232 * amdgpu_device_ip_late_init - run late init for hardware IPs
2233 *
2234 * @adev: amdgpu_device pointer
2235 *
2236 * Late initialization pass for hardware IPs.  The list of all the hardware
2237 * IPs that make up the asic is walked and the late_init callbacks are run.
2238 * late_init covers any special initialization that an IP requires
2239 * after all of the have been initialized or something that needs to happen
2240 * late in the init process.
2241 * Returns 0 on success, negative error code on failure.
2242 */
2243static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2244{
2245	struct amdgpu_gpu_instance *gpu_instance;
2246	int i = 0, r;
2247
2248	for (i = 0; i < adev->num_ip_blocks; i++) {
2249		if (!adev->ip_blocks[i].status.hw)
2250			continue;
2251		if (adev->ip_blocks[i].version->funcs->late_init) {
2252			r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2253			if (r) {
2254				DRM_ERROR("late_init of IP block <%s> failed %d\n",
2255					  adev->ip_blocks[i].version->funcs->name, r);
2256				return r;
2257			}
2258		}
2259		adev->ip_blocks[i].status.late_initialized = true;
2260	}
2261
2262	amdgpu_ras_set_error_query_ready(adev, true);
2263
2264	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2265	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2266
2267	amdgpu_device_fill_reset_magic(adev);
2268
2269	r = amdgpu_device_enable_mgpu_fan_boost();
2270	if (r)
2271		DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2272
 
 
 
 
 
2273
2274	if (adev->gmc.xgmi.num_physical_nodes > 1) {
2275		mutex_lock(&mgpu_info.mutex);
2276
2277		/*
2278		 * Reset device p-state to low as this was booted with high.
2279		 *
2280		 * This should be performed only after all devices from the same
2281		 * hive get initialized.
2282		 *
2283		 * However, it's unknown how many device in the hive in advance.
2284		 * As this is counted one by one during devices initializations.
2285		 *
2286		 * So, we wait for all XGMI interlinked devices initialized.
2287		 * This may bring some delays as those devices may come from
2288		 * different hives. But that should be OK.
2289		 */
2290		if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2291			for (i = 0; i < mgpu_info.num_gpu; i++) {
2292				gpu_instance = &(mgpu_info.gpu_ins[i]);
2293				if (gpu_instance->adev->flags & AMD_IS_APU)
2294					continue;
2295
2296				r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2297						AMDGPU_XGMI_PSTATE_MIN);
2298				if (r) {
2299					DRM_ERROR("pstate setting failed (%d).\n", r);
2300					break;
2301				}
2302			}
2303		}
2304
2305		mutex_unlock(&mgpu_info.mutex);
2306	}
2307
2308	return 0;
2309}
2310
2311/**
2312 * amdgpu_device_ip_fini - run fini for hardware IPs
2313 *
2314 * @adev: amdgpu_device pointer
2315 *
2316 * Main teardown pass for hardware IPs.  The list of all the hardware
2317 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2318 * are run.  hw_fini tears down the hardware associated with each IP
2319 * and sw_fini tears down any software state associated with each IP.
2320 * Returns 0 on success, negative error code on failure.
2321 */
2322static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2323{
2324	int i, r;
2325
2326	if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2327		amdgpu_virt_release_ras_err_handler_data(adev);
2328
2329	amdgpu_ras_pre_fini(adev);
2330
2331	if (adev->gmc.xgmi.num_physical_nodes > 1)
2332		amdgpu_xgmi_remove_device(adev);
 
 
 
 
2333
2334	amdgpu_amdkfd_device_fini(adev);
2335
2336	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2337	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2338
2339	/* need to disable SMC first */
2340	for (i = 0; i < adev->num_ip_blocks; i++) {
2341		if (!adev->ip_blocks[i].status.hw)
2342			continue;
2343		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2344			r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2345			/* XXX handle errors */
2346			if (r) {
2347				DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2348					  adev->ip_blocks[i].version->funcs->name, r);
2349			}
2350			adev->ip_blocks[i].status.hw = false;
2351			break;
2352		}
2353	}
2354
2355	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2356		if (!adev->ip_blocks[i].status.hw)
2357			continue;
2358
2359		r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2360		/* XXX handle errors */
2361		if (r) {
2362			DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2363				  adev->ip_blocks[i].version->funcs->name, r);
2364		}
2365
2366		adev->ip_blocks[i].status.hw = false;
2367	}
2368
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2369
2370	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2371		if (!adev->ip_blocks[i].status.sw)
2372			continue;
2373
2374		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2375			amdgpu_ucode_free_bo(adev);
2376			amdgpu_free_static_csa(&adev->virt.csa_obj);
2377			amdgpu_device_wb_fini(adev);
2378			amdgpu_device_vram_scratch_fini(adev);
2379			amdgpu_ib_pool_fini(adev);
2380		}
2381
2382		r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2383		/* XXX handle errors */
2384		if (r) {
2385			DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2386				  adev->ip_blocks[i].version->funcs->name, r);
2387		}
2388		adev->ip_blocks[i].status.sw = false;
2389		adev->ip_blocks[i].status.valid = false;
2390	}
2391
2392	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2393		if (!adev->ip_blocks[i].status.late_initialized)
2394			continue;
2395		if (adev->ip_blocks[i].version->funcs->late_fini)
2396			adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2397		adev->ip_blocks[i].status.late_initialized = false;
2398	}
2399
2400	amdgpu_ras_fini(adev);
2401
2402	if (amdgpu_sriov_vf(adev))
2403		if (amdgpu_virt_release_full_gpu(adev, false))
2404			DRM_ERROR("failed to release exclusive mode on fini\n");
2405
2406	return 0;
2407}
2408
2409/**
2410 * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2411 *
2412 * @work: work_struct.
2413 */
2414static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2415{
2416	struct amdgpu_device *adev =
2417		container_of(work, struct amdgpu_device, delayed_init_work.work);
2418	int r;
2419
2420	r = amdgpu_ib_ring_tests(adev);
2421	if (r)
2422		DRM_ERROR("ib ring test failed (%d).\n", r);
2423}
2424
2425static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2426{
2427	struct amdgpu_device *adev =
2428		container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2429
2430	mutex_lock(&adev->gfx.gfx_off_mutex);
2431	if (!adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
2432		if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2433			adev->gfx.gfx_off_state = true;
2434	}
2435	mutex_unlock(&adev->gfx.gfx_off_mutex);
2436}
2437
2438/**
2439 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2440 *
2441 * @adev: amdgpu_device pointer
2442 *
2443 * Main suspend function for hardware IPs.  The list of all the hardware
2444 * IPs that make up the asic is walked, clockgating is disabled and the
2445 * suspend callbacks are run.  suspend puts the hardware and software state
2446 * in each IP into a state suitable for suspend.
2447 * Returns 0 on success, negative error code on failure.
2448 */
2449static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2450{
2451	int i, r;
2452
2453	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2454	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2455
2456	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2457		if (!adev->ip_blocks[i].status.valid)
2458			continue;
2459
2460		/* displays are handled separately */
2461		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2462			continue;
2463
2464		/* XXX handle errors */
2465		r = adev->ip_blocks[i].version->funcs->suspend(adev);
2466		/* XXX handle errors */
2467		if (r) {
2468			DRM_ERROR("suspend of IP block <%s> failed %d\n",
2469				  adev->ip_blocks[i].version->funcs->name, r);
2470			return r;
2471		}
2472
2473		adev->ip_blocks[i].status.hw = false;
2474	}
2475
2476	return 0;
2477}
2478
2479/**
2480 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2481 *
2482 * @adev: amdgpu_device pointer
2483 *
2484 * Main suspend function for hardware IPs.  The list of all the hardware
2485 * IPs that make up the asic is walked, clockgating is disabled and the
2486 * suspend callbacks are run.  suspend puts the hardware and software state
2487 * in each IP into a state suitable for suspend.
2488 * Returns 0 on success, negative error code on failure.
2489 */
2490static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
2491{
2492	int i, r;
2493
 
 
 
2494	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2495		if (!adev->ip_blocks[i].status.valid)
2496			continue;
2497		/* displays are handled in phase1 */
2498		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
2499			continue;
2500		/* PSP lost connection when err_event_athub occurs */
2501		if (amdgpu_ras_intr_triggered() &&
2502		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
2503			adev->ip_blocks[i].status.hw = false;
2504			continue;
2505		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2506		/* XXX handle errors */
2507		r = adev->ip_blocks[i].version->funcs->suspend(adev);
2508		/* XXX handle errors */
2509		if (r) {
2510			DRM_ERROR("suspend of IP block <%s> failed %d\n",
2511				  adev->ip_blocks[i].version->funcs->name, r);
2512		}
2513		adev->ip_blocks[i].status.hw = false;
2514		/* handle putting the SMC in the appropriate state */
2515		if(!amdgpu_sriov_vf(adev)){
2516			if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2517				r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
2518				if (r) {
2519					DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
2520							adev->mp1_state, r);
2521					return r;
2522				}
2523			}
2524		}
2525		adev->ip_blocks[i].status.hw = false;
2526	}
2527
2528	return 0;
2529}
2530
2531/**
2532 * amdgpu_device_ip_suspend - run suspend for hardware IPs
2533 *
2534 * @adev: amdgpu_device pointer
2535 *
2536 * Main suspend function for hardware IPs.  The list of all the hardware
2537 * IPs that make up the asic is walked, clockgating is disabled and the
2538 * suspend callbacks are run.  suspend puts the hardware and software state
2539 * in each IP into a state suitable for suspend.
2540 * Returns 0 on success, negative error code on failure.
2541 */
2542int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
2543{
2544	int r;
2545
2546	if (amdgpu_sriov_vf(adev))
 
2547		amdgpu_virt_request_full_gpu(adev, false);
 
2548
2549	r = amdgpu_device_ip_suspend_phase1(adev);
2550	if (r)
2551		return r;
2552	r = amdgpu_device_ip_suspend_phase2(adev);
2553
2554	if (amdgpu_sriov_vf(adev))
2555		amdgpu_virt_release_full_gpu(adev, false);
2556
2557	return r;
2558}
2559
2560static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
2561{
2562	int i, r;
2563
2564	static enum amd_ip_block_type ip_order[] = {
2565		AMD_IP_BLOCK_TYPE_GMC,
2566		AMD_IP_BLOCK_TYPE_COMMON,
2567		AMD_IP_BLOCK_TYPE_PSP,
2568		AMD_IP_BLOCK_TYPE_IH,
2569	};
2570
2571	for (i = 0; i < adev->num_ip_blocks; i++)
2572		adev->ip_blocks[i].status.hw = false;
2573
2574	for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
2575		int j;
2576		struct amdgpu_ip_block *block;
2577
2578		for (j = 0; j < adev->num_ip_blocks; j++) {
2579			block = &adev->ip_blocks[j];
2580
2581			if (block->version->type != ip_order[i] ||
 
 
2582				!block->status.valid)
2583				continue;
2584
2585			r = block->version->funcs->hw_init(adev);
2586			DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
2587			if (r)
2588				return r;
2589			block->status.hw = true;
2590		}
2591	}
2592
2593	return 0;
2594}
2595
2596static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
2597{
2598	int i, r;
2599
2600	static enum amd_ip_block_type ip_order[] = {
2601		AMD_IP_BLOCK_TYPE_SMC,
2602		AMD_IP_BLOCK_TYPE_DCE,
2603		AMD_IP_BLOCK_TYPE_GFX,
2604		AMD_IP_BLOCK_TYPE_SDMA,
2605		AMD_IP_BLOCK_TYPE_UVD,
2606		AMD_IP_BLOCK_TYPE_VCE,
2607		AMD_IP_BLOCK_TYPE_VCN
2608	};
2609
2610	for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
2611		int j;
2612		struct amdgpu_ip_block *block;
2613
2614		for (j = 0; j < adev->num_ip_blocks; j++) {
2615			block = &adev->ip_blocks[j];
2616
2617			if (block->version->type != ip_order[i] ||
2618				!block->status.valid ||
2619				block->status.hw)
2620				continue;
2621
2622			if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
2623				r = block->version->funcs->resume(adev);
2624			else
2625				r = block->version->funcs->hw_init(adev);
2626
2627			DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
2628			if (r)
2629				return r;
2630			block->status.hw = true;
2631		}
2632	}
2633
2634	return 0;
2635}
2636
2637/**
2638 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
2639 *
2640 * @adev: amdgpu_device pointer
2641 *
2642 * First resume function for hardware IPs.  The list of all the hardware
2643 * IPs that make up the asic is walked and the resume callbacks are run for
2644 * COMMON, GMC, and IH.  resume puts the hardware into a functional state
2645 * after a suspend and updates the software state as necessary.  This
2646 * function is also used for restoring the GPU after a GPU reset.
2647 * Returns 0 on success, negative error code on failure.
2648 */
2649static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
2650{
2651	int i, r;
2652
2653	for (i = 0; i < adev->num_ip_blocks; i++) {
2654		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
2655			continue;
2656		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2657		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2658		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2659
2660			r = adev->ip_blocks[i].version->funcs->resume(adev);
2661			if (r) {
2662				DRM_ERROR("resume of IP block <%s> failed %d\n",
2663					  adev->ip_blocks[i].version->funcs->name, r);
2664				return r;
2665			}
2666			adev->ip_blocks[i].status.hw = true;
2667		}
2668	}
2669
2670	return 0;
2671}
2672
2673/**
2674 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
2675 *
2676 * @adev: amdgpu_device pointer
2677 *
2678 * First resume function for hardware IPs.  The list of all the hardware
2679 * IPs that make up the asic is walked and the resume callbacks are run for
2680 * all blocks except COMMON, GMC, and IH.  resume puts the hardware into a
2681 * functional state after a suspend and updates the software state as
2682 * necessary.  This function is also used for restoring the GPU after a GPU
2683 * reset.
2684 * Returns 0 on success, negative error code on failure.
2685 */
2686static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
2687{
2688	int i, r;
2689
2690	for (i = 0; i < adev->num_ip_blocks; i++) {
2691		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
2692			continue;
2693		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2694		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2695		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
2696		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
2697			continue;
2698		r = adev->ip_blocks[i].version->funcs->resume(adev);
2699		if (r) {
2700			DRM_ERROR("resume of IP block <%s> failed %d\n",
2701				  adev->ip_blocks[i].version->funcs->name, r);
2702			return r;
2703		}
2704		adev->ip_blocks[i].status.hw = true;
2705	}
2706
2707	return 0;
2708}
2709
2710/**
2711 * amdgpu_device_ip_resume - run resume for hardware IPs
2712 *
2713 * @adev: amdgpu_device pointer
2714 *
2715 * Main resume function for hardware IPs.  The hardware IPs
2716 * are split into two resume functions because they are
2717 * are also used in in recovering from a GPU reset and some additional
2718 * steps need to be take between them.  In this case (S3/S4) they are
2719 * run sequentially.
2720 * Returns 0 on success, negative error code on failure.
2721 */
2722static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
2723{
2724	int r;
2725
 
 
 
 
2726	r = amdgpu_device_ip_resume_phase1(adev);
2727	if (r)
2728		return r;
2729
2730	r = amdgpu_device_fw_loading(adev);
2731	if (r)
2732		return r;
2733
2734	r = amdgpu_device_ip_resume_phase2(adev);
2735
2736	return r;
2737}
2738
2739/**
2740 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
2741 *
2742 * @adev: amdgpu_device pointer
2743 *
2744 * Query the VBIOS data tables to determine if the board supports SR-IOV.
2745 */
2746static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
2747{
2748	if (amdgpu_sriov_vf(adev)) {
2749		if (adev->is_atom_fw) {
2750			if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
2751				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2752		} else {
2753			if (amdgpu_atombios_has_gpu_virtualization_table(adev))
2754				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2755		}
2756
2757		if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
2758			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
2759	}
2760}
2761
2762/**
2763 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
2764 *
2765 * @asic_type: AMD asic type
2766 *
2767 * Check if there is DC (new modesetting infrastructre) support for an asic.
2768 * returns true if DC has support, false if not.
2769 */
2770bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
2771{
2772	switch (asic_type) {
2773#if defined(CONFIG_DRM_AMD_DC)
 
 
 
 
 
 
2774	case CHIP_BONAIRE:
2775	case CHIP_KAVERI:
2776	case CHIP_KABINI:
2777	case CHIP_MULLINS:
2778		/*
2779		 * We have systems in the wild with these ASICs that require
2780		 * LVDS and VGA support which is not supported with DC.
2781		 *
2782		 * Fallback to the non-DC driver here by default so as not to
2783		 * cause regressions.
2784		 */
2785		return amdgpu_dc > 0;
2786	case CHIP_HAWAII:
2787	case CHIP_CARRIZO:
2788	case CHIP_STONEY:
2789	case CHIP_POLARIS10:
2790	case CHIP_POLARIS11:
2791	case CHIP_POLARIS12:
2792	case CHIP_VEGAM:
2793	case CHIP_TONGA:
2794	case CHIP_FIJI:
2795	case CHIP_VEGA10:
2796	case CHIP_VEGA12:
2797	case CHIP_VEGA20:
2798#if defined(CONFIG_DRM_AMD_DC_DCN)
2799	case CHIP_RAVEN:
2800	case CHIP_NAVI10:
2801	case CHIP_NAVI14:
2802	case CHIP_NAVI12:
2803	case CHIP_RENOIR:
2804#endif
2805#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
2806	case CHIP_SIENNA_CICHLID:
2807	case CHIP_NAVY_FLOUNDER:
 
 
 
 
2808#endif
2809		return amdgpu_dc != 0;
2810#endif
2811	default:
2812		if (amdgpu_dc > 0)
2813			DRM_INFO("Display Core has been requested via kernel parameter "
2814					 "but isn't supported by ASIC, ignoring\n");
2815		return false;
2816	}
2817}
2818
2819/**
2820 * amdgpu_device_has_dc_support - check if dc is supported
2821 *
2822 * @adev: amdgpu_device_pointer
2823 *
2824 * Returns true for supported, false for not supported
2825 */
2826bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
2827{
2828	if (amdgpu_sriov_vf(adev))
 
 
2829		return false;
2830
2831	return amdgpu_device_asic_has_dc_support(adev->asic_type);
2832}
2833
2834
2835static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
2836{
2837	struct amdgpu_device *adev =
2838		container_of(__work, struct amdgpu_device, xgmi_reset_work);
2839	struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0);
2840
2841	/* It's a bug to not have a hive within this function */
2842	if (WARN_ON(!hive))
2843		return;
2844
2845	/*
2846	 * Use task barrier to synchronize all xgmi reset works across the
2847	 * hive. task_barrier_enter and task_barrier_exit will block
2848	 * until all the threads running the xgmi reset works reach
2849	 * those points. task_barrier_full will do both blocks.
2850	 */
2851	if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
2852
2853		task_barrier_enter(&hive->tb);
2854		adev->asic_reset_res = amdgpu_device_baco_enter(adev->ddev);
2855
2856		if (adev->asic_reset_res)
2857			goto fail;
2858
2859		task_barrier_exit(&hive->tb);
2860		adev->asic_reset_res = amdgpu_device_baco_exit(adev->ddev);
2861
2862		if (adev->asic_reset_res)
2863			goto fail;
2864
2865		if (adev->mmhub.funcs && adev->mmhub.funcs->reset_ras_error_count)
2866			adev->mmhub.funcs->reset_ras_error_count(adev);
 
2867	} else {
2868
2869		task_barrier_full(&hive->tb);
2870		adev->asic_reset_res =  amdgpu_asic_reset(adev);
2871	}
2872
2873fail:
2874	if (adev->asic_reset_res)
2875		DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
2876			 adev->asic_reset_res, adev->ddev->unique);
 
2877}
2878
2879static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
2880{
2881	char *input = amdgpu_lockup_timeout;
2882	char *timeout_setting = NULL;
2883	int index = 0;
2884	long timeout;
2885	int ret = 0;
2886
2887	/*
2888	 * By default timeout for non compute jobs is 10000.
2889	 * And there is no timeout enforced on compute jobs.
2890	 * In SR-IOV or passthrough mode, timeout for compute
2891	 * jobs are 60000 by default.
2892	 */
2893	adev->gfx_timeout = msecs_to_jiffies(10000);
2894	adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
2895	if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
2896		adev->compute_timeout =  msecs_to_jiffies(60000);
 
2897	else
2898		adev->compute_timeout = MAX_SCHEDULE_TIMEOUT;
2899
2900	if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
2901		while ((timeout_setting = strsep(&input, ",")) &&
2902				strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
2903			ret = kstrtol(timeout_setting, 0, &timeout);
2904			if (ret)
2905				return ret;
2906
2907			if (timeout == 0) {
2908				index++;
2909				continue;
2910			} else if (timeout < 0) {
2911				timeout = MAX_SCHEDULE_TIMEOUT;
2912			} else {
2913				timeout = msecs_to_jiffies(timeout);
2914			}
2915
2916			switch (index++) {
2917			case 0:
2918				adev->gfx_timeout = timeout;
2919				break;
2920			case 1:
2921				adev->compute_timeout = timeout;
2922				break;
2923			case 2:
2924				adev->sdma_timeout = timeout;
2925				break;
2926			case 3:
2927				adev->video_timeout = timeout;
2928				break;
2929			default:
2930				break;
2931			}
2932		}
2933		/*
2934		 * There is only one value specified and
2935		 * it should apply to all non-compute jobs.
2936		 */
2937		if (index == 1) {
2938			adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
2939			if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
2940				adev->compute_timeout = adev->gfx_timeout;
2941		}
2942	}
2943
2944	return ret;
2945}
2946
2947static const struct attribute *amdgpu_dev_attributes[] = {
2948	&dev_attr_product_name.attr,
2949	&dev_attr_product_number.attr,
2950	&dev_attr_serial_number.attr,
2951	&dev_attr_pcie_replay_count.attr,
2952	NULL
2953};
2954
2955/**
2956 * amdgpu_device_init - initialize the driver
2957 *
2958 * @adev: amdgpu_device pointer
2959 * @ddev: drm dev pointer
2960 * @pdev: pci dev pointer
2961 * @flags: driver flags
2962 *
2963 * Initializes the driver info and hw (all asics).
2964 * Returns 0 for success or an error on failure.
2965 * Called at driver startup.
2966 */
2967int amdgpu_device_init(struct amdgpu_device *adev,
2968		       struct drm_device *ddev,
2969		       struct pci_dev *pdev,
2970		       uint32_t flags)
2971{
 
 
2972	int r, i;
2973	bool boco = false;
2974	u32 max_MBps;
2975
2976	adev->shutdown = false;
2977	adev->dev = &pdev->dev;
2978	adev->ddev = ddev;
2979	adev->pdev = pdev;
2980	adev->flags = flags;
2981
2982	if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
2983		adev->asic_type = amdgpu_force_asic_type;
2984	else
2985		adev->asic_type = flags & AMD_ASIC_MASK;
2986
2987	adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
2988	if (amdgpu_emu_mode == 1)
2989		adev->usec_timeout *= 10;
2990	adev->gmc.gart_size = 512 * 1024 * 1024;
2991	adev->accel_working = false;
2992	adev->num_rings = 0;
2993	adev->mman.buffer_funcs = NULL;
2994	adev->mman.buffer_funcs_ring = NULL;
2995	adev->vm_manager.vm_pte_funcs = NULL;
2996	adev->vm_manager.vm_pte_num_scheds = 0;
2997	adev->gmc.gmc_funcs = NULL;
 
2998	adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2999	bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3000
3001	adev->smc_rreg = &amdgpu_invalid_rreg;
3002	adev->smc_wreg = &amdgpu_invalid_wreg;
3003	adev->pcie_rreg = &amdgpu_invalid_rreg;
3004	adev->pcie_wreg = &amdgpu_invalid_wreg;
3005	adev->pciep_rreg = &amdgpu_invalid_rreg;
3006	adev->pciep_wreg = &amdgpu_invalid_wreg;
3007	adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3008	adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3009	adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3010	adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3011	adev->didt_rreg = &amdgpu_invalid_rreg;
3012	adev->didt_wreg = &amdgpu_invalid_wreg;
3013	adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3014	adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3015	adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3016	adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3017
3018	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3019		 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3020		 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3021
3022	/* mutex initialization are all done here so we
3023	 * can recall function without having locking issues */
3024	atomic_set(&adev->irq.ih.lock, 0);
3025	mutex_init(&adev->firmware.mutex);
3026	mutex_init(&adev->pm.mutex);
3027	mutex_init(&adev->gfx.gpu_clock_mutex);
3028	mutex_init(&adev->srbm_mutex);
3029	mutex_init(&adev->gfx.pipe_reserve_mutex);
3030	mutex_init(&adev->gfx.gfx_off_mutex);
3031	mutex_init(&adev->grbm_idx_mutex);
3032	mutex_init(&adev->mn_lock);
3033	mutex_init(&adev->virt.vf_errors.lock);
3034	hash_init(adev->mn_hash);
3035	mutex_init(&adev->lock_reset);
 
3036	mutex_init(&adev->psp.mutex);
3037	mutex_init(&adev->notifier_lock);
3038
 
 
 
 
3039	r = amdgpu_device_check_arguments(adev);
3040	if (r)
3041		return r;
3042
3043	spin_lock_init(&adev->mmio_idx_lock);
3044	spin_lock_init(&adev->smc_idx_lock);
3045	spin_lock_init(&adev->pcie_idx_lock);
3046	spin_lock_init(&adev->uvd_ctx_idx_lock);
3047	spin_lock_init(&adev->didt_idx_lock);
3048	spin_lock_init(&adev->gc_cac_idx_lock);
3049	spin_lock_init(&adev->se_cac_idx_lock);
3050	spin_lock_init(&adev->audio_endpt_idx_lock);
3051	spin_lock_init(&adev->mm_stats.lock);
3052
3053	INIT_LIST_HEAD(&adev->shadow_list);
3054	mutex_init(&adev->shadow_list_lock);
3055
 
 
3056	INIT_DELAYED_WORK(&adev->delayed_init_work,
3057			  amdgpu_device_delayed_init_work_handler);
3058	INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3059			  amdgpu_device_delay_enable_gfx_off);
3060
3061	INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3062
3063	adev->gfx.gfx_off_req_count = 1;
3064	adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3065
3066	atomic_set(&adev->throttling_logging_enabled, 1);
3067	/*
3068	 * If throttling continues, logging will be performed every minute
3069	 * to avoid log flooding. "-1" is subtracted since the thermal
3070	 * throttling interrupt comes every second. Thus, the total logging
3071	 * interval is 59 seconds(retelimited printk interval) + 1(waiting
3072	 * for throttling interrupt) = 60 seconds.
3073	 */
3074	ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3075	ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3076
3077	/* Registers mapping */
3078	/* TODO: block userspace mapping of io register */
3079	if (adev->asic_type >= CHIP_BONAIRE) {
3080		adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3081		adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3082	} else {
3083		adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3084		adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3085	}
3086
3087	adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3088	if (adev->rmmio == NULL) {
3089		return -ENOMEM;
3090	}
3091	DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3092	DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
3093
3094	/* io port mapping */
3095	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
3096		if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
3097			adev->rio_mem_size = pci_resource_len(adev->pdev, i);
3098			adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
3099			break;
3100		}
3101	}
3102	if (adev->rio_mem == NULL)
3103		DRM_INFO("PCI I/O BAR is not found.\n");
3104
3105	/* enable PCIE atomic ops */
3106	r = pci_enable_atomic_ops_to_root(adev->pdev,
3107					  PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3108					  PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3109	if (r) {
3110		adev->have_atomics_support = false;
3111		DRM_INFO("PCIE atomic ops is not supported\n");
3112	} else {
3113		adev->have_atomics_support = true;
3114	}
3115
3116	amdgpu_device_get_pcie_info(adev);
3117
3118	if (amdgpu_mcbp)
3119		DRM_INFO("MCBP is enabled\n");
3120
3121	if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10)
3122		adev->enable_mes = true;
3123
3124	/* detect hw virtualization here */
3125	amdgpu_detect_virtualization(adev);
3126
3127	r = amdgpu_device_get_job_timeout_settings(adev);
3128	if (r) {
3129		dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3130		return r;
3131	}
3132
3133	/* early init functions */
3134	r = amdgpu_device_ip_early_init(adev);
3135	if (r)
3136		return r;
3137
3138	/* doorbell bar mapping and doorbell index init*/
3139	amdgpu_device_doorbell_init(adev);
3140
3141	/* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
3142	/* this will fail for cards that aren't VGA class devices, just
3143	 * ignore it */
3144	vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
3145
3146	if (amdgpu_device_supports_boco(ddev))
3147		boco = true;
3148	if (amdgpu_has_atpx() &&
3149	    (amdgpu_is_atpx_hybrid() ||
3150	     amdgpu_has_atpx_dgpu_power_cntl()) &&
3151	    !pci_is_thunderbolt_attached(adev->pdev))
3152		vga_switcheroo_register_client(adev->pdev,
3153					       &amdgpu_switcheroo_ops, boco);
3154	if (boco)
3155		vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
3156
3157	if (amdgpu_emu_mode == 1) {
3158		/* post the asic on emulation mode */
3159		emu_soc_asic_init(adev);
3160		goto fence_driver_init;
3161	}
3162
 
 
3163	/* detect if we are with an SRIOV vbios */
3164	amdgpu_device_detect_sriov_bios(adev);
3165
3166	/* check if we need to reset the asic
3167	 *  E.g., driver was not cleanly unloaded previously, etc.
3168	 */
3169	if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3170		r = amdgpu_asic_reset(adev);
3171		if (r) {
3172			dev_err(adev->dev, "asic reset on init failed\n");
3173			goto failed;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3174		}
3175	}
3176
 
 
3177	/* Post card if necessary */
3178	if (amdgpu_device_need_post(adev)) {
3179		if (!adev->bios) {
3180			dev_err(adev->dev, "no vBIOS found\n");
3181			r = -EINVAL;
3182			goto failed;
3183		}
3184		DRM_INFO("GPU posting now...\n");
3185		r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
3186		if (r) {
3187			dev_err(adev->dev, "gpu post error!\n");
3188			goto failed;
3189		}
3190	}
3191
3192	if (adev->is_atom_fw) {
3193		/* Initialize clocks */
3194		r = amdgpu_atomfirmware_get_clock_info(adev);
3195		if (r) {
3196			dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3197			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3198			goto failed;
3199		}
3200	} else {
3201		/* Initialize clocks */
3202		r = amdgpu_atombios_get_clock_info(adev);
3203		if (r) {
3204			dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3205			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3206			goto failed;
3207		}
3208		/* init i2c buses */
3209		if (!amdgpu_device_has_dc_support(adev))
3210			amdgpu_atombios_i2c_init(adev);
3211	}
3212
3213fence_driver_init:
3214	/* Fence driver */
3215	r = amdgpu_fence_driver_init(adev);
3216	if (r) {
3217		dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
3218		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3219		goto failed;
3220	}
3221
3222	/* init the mode config */
3223	drm_mode_config_init(adev->ddev);
3224
3225	r = amdgpu_device_ip_init(adev);
3226	if (r) {
3227		/* failed in exclusive mode due to timeout */
3228		if (amdgpu_sriov_vf(adev) &&
3229		    !amdgpu_sriov_runtime(adev) &&
3230		    amdgpu_virt_mmio_blocked(adev) &&
3231		    !amdgpu_virt_wait_reset(adev)) {
3232			dev_err(adev->dev, "VF exclusive mode timeout\n");
3233			/* Don't send request since VF is inactive. */
3234			adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
3235			adev->virt.ops = NULL;
3236			r = -EAGAIN;
3237			goto failed;
3238		}
3239		dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
3240		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3241		goto failed;
3242	}
3243
 
 
3244	dev_info(adev->dev,
3245		"SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
3246			adev->gfx.config.max_shader_engines,
3247			adev->gfx.config.max_sh_per_se,
3248			adev->gfx.config.max_cu_per_sh,
3249			adev->gfx.cu_info.number);
3250
3251	adev->accel_working = true;
3252
3253	amdgpu_vm_check_compute_bug(adev);
3254
3255	/* Initialize the buffer migration limit. */
3256	if (amdgpu_moverate >= 0)
3257		max_MBps = amdgpu_moverate;
3258	else
3259		max_MBps = 8; /* Allow 8 MB/s. */
3260	/* Get a log2 for easy divisions. */
3261	adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3262
3263	amdgpu_fbdev_init(adev);
3264
3265	r = amdgpu_pm_sysfs_init(adev);
3266	if (r) {
3267		adev->pm_sysfs_en = false;
3268		DRM_ERROR("registering pm debugfs failed (%d).\n", r);
3269	} else
3270		adev->pm_sysfs_en = true;
3271
3272	r = amdgpu_ucode_sysfs_init(adev);
3273	if (r) {
3274		adev->ucode_sysfs_en = false;
3275		DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
3276	} else
3277		adev->ucode_sysfs_en = true;
3278
3279	if ((amdgpu_testing & 1)) {
3280		if (adev->accel_working)
3281			amdgpu_test_moves(adev);
3282		else
3283			DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
3284	}
3285	if (amdgpu_benchmarking) {
3286		if (adev->accel_working)
3287			amdgpu_benchmark(adev, amdgpu_benchmarking);
3288		else
3289			DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
3290	}
3291
3292	/*
3293	 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3294	 * Otherwise the mgpu fan boost feature will be skipped due to the
3295	 * gpu instance is counted less.
3296	 */
3297	amdgpu_register_gpu_instance(adev);
3298
3299	/* enable clockgating, etc. after ib tests, etc. since some blocks require
3300	 * explicit gating rather than handling it automatically.
3301	 */
3302	r = amdgpu_device_ip_late_init(adev);
3303	if (r) {
3304		dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
3305		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
3306		goto failed;
 
 
 
 
 
 
3307	}
3308
3309	/* must succeed. */
3310	amdgpu_ras_resume(adev);
3311
3312	queue_delayed_work(system_wq, &adev->delayed_init_work,
3313			   msecs_to_jiffies(AMDGPU_RESUME_MS));
3314
3315	if (amdgpu_sriov_vf(adev))
3316		flush_delayed_work(&adev->delayed_init_work);
3317
3318	r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
3319	if (r) {
3320		dev_err(adev->dev, "Could not create amdgpu device attr\n");
3321		return r;
3322	}
3323
3324	if (IS_ENABLED(CONFIG_PERF_EVENTS))
3325		r = amdgpu_pmu_init(adev);
3326	if (r)
3327		dev_err(adev->dev, "amdgpu_pmu_init failed\n");
3328
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3329	return 0;
3330
 
 
 
3331failed:
3332	amdgpu_vf_error_trans_all(adev);
3333	if (boco)
3334		vga_switcheroo_fini_domain_pm_ops(adev->dev);
3335
3336	return r;
3337}
3338
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3339/**
3340 * amdgpu_device_fini - tear down the driver
3341 *
3342 * @adev: amdgpu_device pointer
3343 *
3344 * Tear down the driver info (all asics).
3345 * Called at driver shutdown.
3346 */
3347void amdgpu_device_fini(struct amdgpu_device *adev)
3348{
3349	int r;
3350
3351	DRM_INFO("amdgpu: finishing device.\n");
3352	flush_delayed_work(&adev->delayed_init_work);
 
3353	adev->shutdown = true;
3354
3355	/* make sure IB test finished before entering exclusive mode
3356	 * to avoid preemption on IB test
3357	 * */
3358	if (amdgpu_sriov_vf(adev))
3359		amdgpu_virt_request_full_gpu(adev, false);
 
 
3360
3361	/* disable all interrupts */
3362	amdgpu_irq_disable_all(adev);
3363	if (adev->mode_info.mode_config_initialized){
3364		if (!amdgpu_device_has_dc_support(adev))
3365			drm_helper_force_disable_all(adev->ddev);
3366		else
3367			drm_atomic_helper_shutdown(adev->ddev);
3368	}
3369	amdgpu_fence_driver_fini(adev);
 
3370	if (adev->pm_sysfs_en)
3371		amdgpu_pm_sysfs_fini(adev);
 
 
 
 
3372	amdgpu_fbdev_fini(adev);
3373	r = amdgpu_device_ip_fini(adev);
 
 
 
 
 
 
 
 
 
 
 
 
 
3374	release_firmware(adev->firmware.gpu_info_fw);
3375	adev->firmware.gpu_info_fw = NULL;
3376	adev->accel_working = false;
 
 
 
3377	/* free i2c buses */
3378	if (!amdgpu_device_has_dc_support(adev))
3379		amdgpu_i2c_fini(adev);
3380
3381	if (amdgpu_emu_mode != 1)
3382		amdgpu_atombios_fini(adev);
3383
3384	kfree(adev->bios);
3385	adev->bios = NULL;
3386	if (amdgpu_has_atpx() &&
3387	    (amdgpu_is_atpx_hybrid() ||
3388	     amdgpu_has_atpx_dgpu_power_cntl()) &&
3389	    !pci_is_thunderbolt_attached(adev->pdev))
3390		vga_switcheroo_unregister_client(adev->pdev);
3391	if (amdgpu_device_supports_boco(adev->ddev))
3392		vga_switcheroo_fini_domain_pm_ops(adev->dev);
3393	vga_client_register(adev->pdev, NULL, NULL, NULL);
3394	if (adev->rio_mem)
3395		pci_iounmap(adev->pdev, adev->rio_mem);
3396	adev->rio_mem = NULL;
3397	iounmap(adev->rmmio);
3398	adev->rmmio = NULL;
3399	amdgpu_device_doorbell_fini(adev);
3400
3401	if (adev->ucode_sysfs_en)
3402		amdgpu_ucode_sysfs_fini(adev);
3403
3404	sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
3405	if (IS_ENABLED(CONFIG_PERF_EVENTS))
3406		amdgpu_pmu_fini(adev);
3407	if (adev->discovery_bin)
3408		amdgpu_discovery_fini(adev);
 
 
 
3409}
3410
3411
3412/*
3413 * Suspend & resume.
3414 */
3415/**
3416 * amdgpu_device_suspend - initiate device suspend
3417 *
3418 * @dev: drm dev pointer
3419 * @fbcon : notify the fbdev of suspend
3420 *
3421 * Puts the hw in the suspend state (all asics).
3422 * Returns 0 for success or an error on failure.
3423 * Called at driver suspend.
3424 */
3425int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
3426{
3427	struct amdgpu_device *adev;
3428	struct drm_crtc *crtc;
3429	struct drm_connector *connector;
3430	struct drm_connector_list_iter iter;
3431	int r;
3432
3433	if (dev == NULL || dev->dev_private == NULL) {
3434		return -ENODEV;
3435	}
3436
3437	adev = dev->dev_private;
3438
3439	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3440		return 0;
3441
3442	adev->in_suspend = true;
 
 
 
 
3443	drm_kms_helper_poll_disable(dev);
3444
3445	if (fbcon)
3446		amdgpu_fbdev_set_suspend(adev, 1);
3447
3448	cancel_delayed_work_sync(&adev->delayed_init_work);
3449
3450	if (!amdgpu_device_has_dc_support(adev)) {
3451		/* turn off display hw */
3452		drm_modeset_lock_all(dev);
3453		drm_connector_list_iter_begin(dev, &iter);
3454		drm_for_each_connector_iter(connector, &iter)
3455			drm_helper_connector_dpms(connector,
3456						  DRM_MODE_DPMS_OFF);
3457		drm_connector_list_iter_end(&iter);
3458		drm_modeset_unlock_all(dev);
3459			/* unpin the front buffers and cursors */
3460		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3461			struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
3462			struct drm_framebuffer *fb = crtc->primary->fb;
3463			struct amdgpu_bo *robj;
3464
3465			if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
3466				struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
3467				r = amdgpu_bo_reserve(aobj, true);
3468				if (r == 0) {
3469					amdgpu_bo_unpin(aobj);
3470					amdgpu_bo_unreserve(aobj);
3471				}
3472			}
3473
3474			if (fb == NULL || fb->obj[0] == NULL) {
3475				continue;
3476			}
3477			robj = gem_to_amdgpu_bo(fb->obj[0]);
3478			/* don't unpin kernel fb objects */
3479			if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
3480				r = amdgpu_bo_reserve(robj, true);
3481				if (r == 0) {
3482					amdgpu_bo_unpin(robj);
3483					amdgpu_bo_unreserve(robj);
3484				}
3485			}
3486		}
3487	}
3488
3489	amdgpu_ras_suspend(adev);
3490
3491	r = amdgpu_device_ip_suspend_phase1(adev);
3492
3493	amdgpu_amdkfd_suspend(adev, !fbcon);
 
3494
3495	/* evict vram memory */
3496	amdgpu_bo_evict_vram(adev);
3497
3498	amdgpu_fence_driver_suspend(adev);
3499
3500	r = amdgpu_device_ip_suspend_phase2(adev);
3501
 
3502	/* evict remaining vram memory
3503	 * This second call to evict vram is to evict the gart page table
3504	 * using the CPU.
3505	 */
3506	amdgpu_bo_evict_vram(adev);
3507
3508	return 0;
3509}
3510
3511/**
3512 * amdgpu_device_resume - initiate device resume
3513 *
3514 * @dev: drm dev pointer
3515 * @fbcon : notify the fbdev of resume
3516 *
3517 * Bring the hw back to operating state (all asics).
3518 * Returns 0 for success or an error on failure.
3519 * Called at driver resume.
3520 */
3521int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
3522{
3523	struct drm_connector *connector;
3524	struct drm_connector_list_iter iter;
3525	struct amdgpu_device *adev = dev->dev_private;
3526	struct drm_crtc *crtc;
3527	int r = 0;
3528
3529	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3530		return 0;
3531
 
 
 
3532	/* post card */
3533	if (amdgpu_device_need_post(adev)) {
3534		r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
3535		if (r)
3536			DRM_ERROR("amdgpu asic init failed\n");
3537	}
3538
3539	r = amdgpu_device_ip_resume(adev);
3540	if (r) {
3541		DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r);
3542		return r;
3543	}
3544	amdgpu_fence_driver_resume(adev);
3545
3546
3547	r = amdgpu_device_ip_late_init(adev);
3548	if (r)
3549		return r;
3550
3551	queue_delayed_work(system_wq, &adev->delayed_init_work,
3552			   msecs_to_jiffies(AMDGPU_RESUME_MS));
3553
3554	if (!amdgpu_device_has_dc_support(adev)) {
3555		/* pin cursors */
3556		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3557			struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
3558
3559			if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
3560				struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
3561				r = amdgpu_bo_reserve(aobj, true);
3562				if (r == 0) {
3563					r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
3564					if (r != 0)
3565						DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
3566					amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
3567					amdgpu_bo_unreserve(aobj);
3568				}
3569			}
3570		}
3571	}
3572	r = amdgpu_amdkfd_resume(adev, !fbcon);
3573	if (r)
3574		return r;
3575
3576	/* Make sure IB tests flushed */
3577	flush_delayed_work(&adev->delayed_init_work);
3578
3579	/* blat the mode back in */
3580	if (fbcon) {
3581		if (!amdgpu_device_has_dc_support(adev)) {
3582			/* pre DCE11 */
3583			drm_helper_resume_force_mode(dev);
3584
3585			/* turn on display hw */
3586			drm_modeset_lock_all(dev);
3587
3588			drm_connector_list_iter_begin(dev, &iter);
3589			drm_for_each_connector_iter(connector, &iter)
3590				drm_helper_connector_dpms(connector,
3591							  DRM_MODE_DPMS_ON);
3592			drm_connector_list_iter_end(&iter);
3593
3594			drm_modeset_unlock_all(dev);
3595		}
3596		amdgpu_fbdev_set_suspend(adev, 0);
3597	}
3598
3599	drm_kms_helper_poll_enable(dev);
3600
3601	amdgpu_ras_resume(adev);
3602
3603	/*
3604	 * Most of the connector probing functions try to acquire runtime pm
3605	 * refs to ensure that the GPU is powered on when connector polling is
3606	 * performed. Since we're calling this from a runtime PM callback,
3607	 * trying to acquire rpm refs will cause us to deadlock.
3608	 *
3609	 * Since we're guaranteed to be holding the rpm lock, it's safe to
3610	 * temporarily disable the rpm helpers so this doesn't deadlock us.
3611	 */
3612#ifdef CONFIG_PM
3613	dev->dev->power.disable_depth++;
3614#endif
3615	if (!amdgpu_device_has_dc_support(adev))
3616		drm_helper_hpd_irq_event(dev);
3617	else
3618		drm_kms_helper_hotplug_event(dev);
3619#ifdef CONFIG_PM
3620	dev->dev->power.disable_depth--;
3621#endif
3622	adev->in_suspend = false;
3623
 
 
 
3624	return 0;
3625}
3626
3627/**
3628 * amdgpu_device_ip_check_soft_reset - did soft reset succeed
3629 *
3630 * @adev: amdgpu_device pointer
3631 *
3632 * The list of all the hardware IPs that make up the asic is walked and
3633 * the check_soft_reset callbacks are run.  check_soft_reset determines
3634 * if the asic is still hung or not.
3635 * Returns true if any of the IPs are still in a hung state, false if not.
3636 */
3637static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
3638{
3639	int i;
3640	bool asic_hang = false;
3641
3642	if (amdgpu_sriov_vf(adev))
3643		return true;
3644
3645	if (amdgpu_asic_need_full_reset(adev))
3646		return true;
3647
3648	for (i = 0; i < adev->num_ip_blocks; i++) {
3649		if (!adev->ip_blocks[i].status.valid)
3650			continue;
3651		if (adev->ip_blocks[i].version->funcs->check_soft_reset)
3652			adev->ip_blocks[i].status.hang =
3653				adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
3654		if (adev->ip_blocks[i].status.hang) {
3655			DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
3656			asic_hang = true;
3657		}
3658	}
3659	return asic_hang;
3660}
3661
3662/**
3663 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
3664 *
3665 * @adev: amdgpu_device pointer
3666 *
3667 * The list of all the hardware IPs that make up the asic is walked and the
3668 * pre_soft_reset callbacks are run if the block is hung.  pre_soft_reset
3669 * handles any IP specific hardware or software state changes that are
3670 * necessary for a soft reset to succeed.
3671 * Returns 0 on success, negative error code on failure.
3672 */
3673static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
3674{
3675	int i, r = 0;
3676
3677	for (i = 0; i < adev->num_ip_blocks; i++) {
3678		if (!adev->ip_blocks[i].status.valid)
3679			continue;
3680		if (adev->ip_blocks[i].status.hang &&
3681		    adev->ip_blocks[i].version->funcs->pre_soft_reset) {
3682			r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
3683			if (r)
3684				return r;
3685		}
3686	}
3687
3688	return 0;
3689}
3690
3691/**
3692 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
3693 *
3694 * @adev: amdgpu_device pointer
3695 *
3696 * Some hardware IPs cannot be soft reset.  If they are hung, a full gpu
3697 * reset is necessary to recover.
3698 * Returns true if a full asic reset is required, false if not.
3699 */
3700static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
3701{
3702	int i;
3703
3704	if (amdgpu_asic_need_full_reset(adev))
3705		return true;
3706
3707	for (i = 0; i < adev->num_ip_blocks; i++) {
3708		if (!adev->ip_blocks[i].status.valid)
3709			continue;
3710		if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
3711		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
3712		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
3713		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
3714		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
3715			if (adev->ip_blocks[i].status.hang) {
3716				DRM_INFO("Some block need full reset!\n");
3717				return true;
3718			}
3719		}
3720	}
3721	return false;
3722}
3723
3724/**
3725 * amdgpu_device_ip_soft_reset - do a soft reset
3726 *
3727 * @adev: amdgpu_device pointer
3728 *
3729 * The list of all the hardware IPs that make up the asic is walked and the
3730 * soft_reset callbacks are run if the block is hung.  soft_reset handles any
3731 * IP specific hardware or software state changes that are necessary to soft
3732 * reset the IP.
3733 * Returns 0 on success, negative error code on failure.
3734 */
3735static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
3736{
3737	int i, r = 0;
3738
3739	for (i = 0; i < adev->num_ip_blocks; i++) {
3740		if (!adev->ip_blocks[i].status.valid)
3741			continue;
3742		if (adev->ip_blocks[i].status.hang &&
3743		    adev->ip_blocks[i].version->funcs->soft_reset) {
3744			r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
3745			if (r)
3746				return r;
3747		}
3748	}
3749
3750	return 0;
3751}
3752
3753/**
3754 * amdgpu_device_ip_post_soft_reset - clean up from soft reset
3755 *
3756 * @adev: amdgpu_device pointer
3757 *
3758 * The list of all the hardware IPs that make up the asic is walked and the
3759 * post_soft_reset callbacks are run if the asic was hung.  post_soft_reset
3760 * handles any IP specific hardware or software state changes that are
3761 * necessary after the IP has been soft reset.
3762 * Returns 0 on success, negative error code on failure.
3763 */
3764static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
3765{
3766	int i, r = 0;
3767
3768	for (i = 0; i < adev->num_ip_blocks; i++) {
3769		if (!adev->ip_blocks[i].status.valid)
3770			continue;
3771		if (adev->ip_blocks[i].status.hang &&
3772		    adev->ip_blocks[i].version->funcs->post_soft_reset)
3773			r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
3774		if (r)
3775			return r;
3776	}
3777
3778	return 0;
3779}
3780
3781/**
3782 * amdgpu_device_recover_vram - Recover some VRAM contents
3783 *
3784 * @adev: amdgpu_device pointer
3785 *
3786 * Restores the contents of VRAM buffers from the shadows in GTT.  Used to
3787 * restore things like GPUVM page tables after a GPU reset where
3788 * the contents of VRAM might be lost.
3789 *
3790 * Returns:
3791 * 0 on success, negative error code on failure.
3792 */
3793static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
3794{
3795	struct dma_fence *fence = NULL, *next = NULL;
3796	struct amdgpu_bo *shadow;
 
3797	long r = 1, tmo;
3798
3799	if (amdgpu_sriov_runtime(adev))
3800		tmo = msecs_to_jiffies(8000);
3801	else
3802		tmo = msecs_to_jiffies(100);
3803
3804	DRM_INFO("recover vram bo from shadow start\n");
3805	mutex_lock(&adev->shadow_list_lock);
3806	list_for_each_entry(shadow, &adev->shadow_list, shadow_list) {
3807
3808		/* No need to recover an evicted BO */
3809		if (shadow->tbo.mem.mem_type != TTM_PL_TT ||
3810		    shadow->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET ||
3811		    shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM)
3812			continue;
3813
3814		r = amdgpu_bo_restore_shadow(shadow, &next);
3815		if (r)
3816			break;
3817
3818		if (fence) {
3819			tmo = dma_fence_wait_timeout(fence, false, tmo);
3820			dma_fence_put(fence);
3821			fence = next;
3822			if (tmo == 0) {
3823				r = -ETIMEDOUT;
3824				break;
3825			} else if (tmo < 0) {
3826				r = tmo;
3827				break;
3828			}
3829		} else {
3830			fence = next;
3831		}
3832	}
3833	mutex_unlock(&adev->shadow_list_lock);
3834
3835	if (fence)
3836		tmo = dma_fence_wait_timeout(fence, false, tmo);
3837	dma_fence_put(fence);
3838
3839	if (r < 0 || tmo <= 0) {
3840		DRM_ERROR("recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
3841		return -EIO;
3842	}
3843
3844	DRM_INFO("recover vram bo from shadow done\n");
3845	return 0;
3846}
3847
3848
3849/**
3850 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
3851 *
3852 * @adev: amdgpu device pointer
3853 * @from_hypervisor: request from hypervisor
3854 *
3855 * do VF FLR and reinitialize Asic
3856 * return 0 means succeeded otherwise failed
3857 */
3858static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
3859				     bool from_hypervisor)
3860{
3861	int r;
3862
3863	if (from_hypervisor)
3864		r = amdgpu_virt_request_full_gpu(adev, true);
3865	else
3866		r = amdgpu_virt_reset_gpu(adev);
3867	if (r)
3868		return r;
3869
3870	amdgpu_amdkfd_pre_reset(adev);
3871
3872	/* Resume IP prior to SMC */
3873	r = amdgpu_device_ip_reinit_early_sriov(adev);
3874	if (r)
3875		goto error;
3876
3877	amdgpu_virt_init_data_exchange(adev);
3878	/* we need recover gart prior to run SMC/CP/SDMA resume */
3879	amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]);
3880
3881	r = amdgpu_device_fw_loading(adev);
3882	if (r)
3883		return r;
3884
3885	/* now we are okay to resume SMC/CP/SDMA */
3886	r = amdgpu_device_ip_reinit_late_sriov(adev);
3887	if (r)
3888		goto error;
3889
3890	amdgpu_irq_gpu_reset_resume_helper(adev);
3891	r = amdgpu_ib_ring_tests(adev);
3892	amdgpu_amdkfd_post_reset(adev);
3893
3894error:
3895	amdgpu_virt_release_full_gpu(adev, true);
3896	if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
3897		amdgpu_inc_vram_lost(adev);
3898		r = amdgpu_device_recover_vram(adev);
3899	}
 
3900
3901	return r;
3902}
3903
3904/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3905 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
3906 *
3907 * @adev: amdgpu device pointer
3908 *
3909 * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
3910 * a hung GPU.
3911 */
3912bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
3913{
3914	if (!amdgpu_device_ip_check_soft_reset(adev)) {
3915		DRM_INFO("Timeout, but no hardware hang detected.\n");
3916		return false;
3917	}
3918
3919	if (amdgpu_gpu_recovery == 0)
3920		goto disabled;
3921
3922	if (amdgpu_sriov_vf(adev))
3923		return true;
3924
3925	if (amdgpu_gpu_recovery == -1) {
3926		switch (adev->asic_type) {
3927		case CHIP_BONAIRE:
3928		case CHIP_HAWAII:
3929		case CHIP_TOPAZ:
3930		case CHIP_TONGA:
3931		case CHIP_FIJI:
3932		case CHIP_POLARIS10:
3933		case CHIP_POLARIS11:
3934		case CHIP_POLARIS12:
3935		case CHIP_VEGAM:
3936		case CHIP_VEGA20:
3937		case CHIP_VEGA10:
3938		case CHIP_VEGA12:
3939		case CHIP_RAVEN:
3940		case CHIP_ARCTURUS:
3941		case CHIP_RENOIR:
3942		case CHIP_NAVI10:
3943		case CHIP_NAVI14:
3944		case CHIP_NAVI12:
3945		case CHIP_SIENNA_CICHLID:
 
 
 
 
 
3946			break;
3947		default:
3948			goto disabled;
3949		}
3950	}
3951
3952	return true;
3953
3954disabled:
3955		DRM_INFO("GPU recovery disabled.\n");
3956		return false;
3957}
3958
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3959
3960static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
3961					struct amdgpu_job *job,
3962					bool *need_full_reset_arg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3963{
3964	int i, r = 0;
3965	bool need_full_reset  = *need_full_reset_arg;
 
 
 
 
 
 
 
 
 
3966
3967	amdgpu_debugfs_wait_dump(adev);
 
 
 
3968
3969	/* block all schedulers and reset given job's ring */
3970	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
3971		struct amdgpu_ring *ring = adev->rings[i];
3972
3973		if (!ring || !ring->sched.thread)
3974			continue;
3975
3976		/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
3977		amdgpu_fence_driver_force_completion(ring);
3978	}
3979
3980	if(job)
3981		drm_sched_increase_karma(&job->base);
3982
 
 
 
 
 
 
 
3983	/* Don't suspend on bare metal if we are not going to HW reset the ASIC */
3984	if (!amdgpu_sriov_vf(adev)) {
3985
3986		if (!need_full_reset)
3987			need_full_reset = amdgpu_device_ip_need_full_reset(adev);
3988
3989		if (!need_full_reset) {
3990			amdgpu_device_ip_pre_soft_reset(adev);
3991			r = amdgpu_device_ip_soft_reset(adev);
3992			amdgpu_device_ip_post_soft_reset(adev);
3993			if (r || amdgpu_device_ip_check_soft_reset(adev)) {
3994				DRM_INFO("soft reset failed, will fallback to full reset!\n");
3995				need_full_reset = true;
3996			}
3997		}
3998
3999		if (need_full_reset)
4000			r = amdgpu_device_ip_suspend(adev);
4001
4002		*need_full_reset_arg = need_full_reset;
 
 
 
4003	}
4004
4005	return r;
4006}
4007
4008static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
4009			       struct list_head *device_list_handle,
4010			       bool *need_full_reset_arg)
4011{
4012	struct amdgpu_device *tmp_adev = NULL;
4013	bool need_full_reset = *need_full_reset_arg, vram_lost = false;
4014	int r = 0;
4015
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4016	/*
4017	 * ASIC reset has to be done on all HGMI hive nodes ASAP
4018	 * to allow proper links negotiation in FW (within 1 sec)
4019	 */
4020	if (need_full_reset) {
4021		list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4022			/* For XGMI run all resets in parallel to speed up the process */
4023			if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
 
4024				if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
4025					r = -EALREADY;
4026			} else
4027				r = amdgpu_asic_reset(tmp_adev);
4028
4029			if (r) {
4030				DRM_ERROR("ASIC reset failed with error, %d for drm dev, %s",
4031					 r, tmp_adev->ddev->unique);
4032				break;
4033			}
4034		}
4035
4036		/* For XGMI wait for all resets to complete before proceed */
4037		if (!r) {
4038			list_for_each_entry(tmp_adev, device_list_handle,
4039					    gmc.xgmi.head) {
4040				if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4041					flush_work(&tmp_adev->xgmi_reset_work);
4042					r = tmp_adev->asic_reset_res;
4043					if (r)
4044						break;
4045				}
4046			}
4047		}
4048	}
4049
4050	if (!r && amdgpu_ras_intr_triggered()) {
4051		list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4052			if (tmp_adev->mmhub.funcs &&
4053			    tmp_adev->mmhub.funcs->reset_ras_error_count)
4054				tmp_adev->mmhub.funcs->reset_ras_error_count(tmp_adev);
4055		}
4056
4057		amdgpu_ras_intr_cleared();
4058	}
4059
4060	list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4061		if (need_full_reset) {
4062			/* post card */
4063			if (amdgpu_atom_asic_init(tmp_adev->mode_info.atom_context))
4064				DRM_WARN("asic atom init failed!");
4065
4066			if (!r) {
4067				dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
 
 
 
 
4068				r = amdgpu_device_ip_resume_phase1(tmp_adev);
4069				if (r)
4070					goto out;
4071
4072				vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
4073				if (vram_lost) {
4074					DRM_INFO("VRAM is lost due to GPU reset!\n");
4075					amdgpu_inc_vram_lost(tmp_adev);
4076				}
4077
4078				r = amdgpu_gtt_mgr_recover(
4079					&tmp_adev->mman.bdev.man[TTM_PL_TT]);
4080				if (r)
4081					goto out;
4082
4083				r = amdgpu_device_fw_loading(tmp_adev);
4084				if (r)
4085					return r;
4086
4087				r = amdgpu_device_ip_resume_phase2(tmp_adev);
4088				if (r)
4089					goto out;
4090
4091				if (vram_lost)
4092					amdgpu_device_fill_reset_magic(tmp_adev);
4093
4094				/*
4095				 * Add this ASIC as tracked as reset was already
4096				 * complete successfully.
4097				 */
4098				amdgpu_register_gpu_instance(tmp_adev);
4099
 
 
 
 
4100				r = amdgpu_device_ip_late_init(tmp_adev);
4101				if (r)
4102					goto out;
4103
4104				amdgpu_fbdev_set_suspend(tmp_adev, 0);
4105
4106				/* must succeed. */
4107				amdgpu_ras_resume(tmp_adev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4108
4109				/* Update PSP FW topology after reset */
4110				if (hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4111					r = amdgpu_xgmi_update_topology(hive, tmp_adev);
 
 
4112			}
4113		}
4114
4115
4116out:
4117		if (!r) {
4118			amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
4119			r = amdgpu_ib_ring_tests(tmp_adev);
4120			if (r) {
4121				dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
4122				r = amdgpu_device_ip_suspend(tmp_adev);
4123				need_full_reset = true;
4124				r = -EAGAIN;
4125				goto end;
4126			}
4127		}
4128
4129		if (!r)
4130			r = amdgpu_device_recover_vram(tmp_adev);
4131		else
4132			tmp_adev->asic_reset_res = r;
4133	}
4134
4135end:
4136	*need_full_reset_arg = need_full_reset;
 
 
 
4137	return r;
4138}
4139
4140static bool amdgpu_device_lock_adev(struct amdgpu_device *adev, bool trylock)
 
4141{
4142	if (trylock) {
4143		if (!mutex_trylock(&adev->lock_reset))
4144			return false;
4145	} else
4146		mutex_lock(&adev->lock_reset);
 
 
 
4147
4148	atomic_inc(&adev->gpu_reset_counter);
4149	adev->in_gpu_reset = true;
4150	switch (amdgpu_asic_reset_method(adev)) {
4151	case AMD_RESET_METHOD_MODE1:
4152		adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
4153		break;
4154	case AMD_RESET_METHOD_MODE2:
4155		adev->mp1_state = PP_MP1_STATE_RESET;
4156		break;
4157	default:
4158		adev->mp1_state = PP_MP1_STATE_NONE;
4159		break;
4160	}
4161
4162	return true;
4163}
4164
4165static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
4166{
4167	amdgpu_vf_error_trans_all(adev);
4168	adev->mp1_state = PP_MP1_STATE_NONE;
4169	adev->in_gpu_reset = false;
4170	mutex_unlock(&adev->lock_reset);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4171}
4172
4173static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
4174{
4175	struct pci_dev *p = NULL;
4176
4177	p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4178			adev->pdev->bus->number, 1);
4179	if (p) {
4180		pm_runtime_enable(&(p->dev));
4181		pm_runtime_resume(&(p->dev));
4182	}
4183}
4184
4185static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
4186{
4187	enum amd_reset_method reset_method;
4188	struct pci_dev *p = NULL;
4189	u64 expires;
4190
4191	/*
4192	 * For now, only BACO and mode1 reset are confirmed
4193	 * to suffer the audio issue without proper suspended.
4194	 */
4195	reset_method = amdgpu_asic_reset_method(adev);
4196	if ((reset_method != AMD_RESET_METHOD_BACO) &&
4197	     (reset_method != AMD_RESET_METHOD_MODE1))
4198		return -EINVAL;
4199
4200	p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4201			adev->pdev->bus->number, 1);
4202	if (!p)
4203		return -ENODEV;
4204
4205	expires = pm_runtime_autosuspend_expiration(&(p->dev));
4206	if (!expires)
4207		/*
4208		 * If we cannot get the audio device autosuspend delay,
4209		 * a fixed 4S interval will be used. Considering 3S is
4210		 * the audio controller default autosuspend delay setting.
4211		 * 4S used here is guaranteed to cover that.
4212		 */
4213		expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
4214
4215	while (!pm_runtime_status_suspended(&(p->dev))) {
4216		if (!pm_runtime_suspend(&(p->dev)))
4217			break;
4218
4219		if (expires < ktime_get_mono_fast_ns()) {
4220			dev_warn(adev->dev, "failed to suspend display audio\n");
4221			/* TODO: abort the succeeding gpu reset? */
4222			return -ETIMEDOUT;
4223		}
4224	}
4225
4226	pm_runtime_disable(&(p->dev));
4227
4228	return 0;
4229}
4230
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4231/**
4232 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
4233 *
4234 * @adev: amdgpu device pointer
4235 * @job: which job trigger hang
4236 *
4237 * Attempt to reset the GPU if it has hung (all asics).
4238 * Attempt to do soft-reset or full-reset and reinitialize Asic
4239 * Returns 0 for success or an error on failure.
4240 */
4241
4242int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
4243			      struct amdgpu_job *job)
4244{
4245	struct list_head device_list, *device_list_handle =  NULL;
4246	bool need_full_reset = false;
4247	bool job_signaled = false;
4248	struct amdgpu_hive_info *hive = NULL;
4249	struct amdgpu_device *tmp_adev = NULL;
4250	int i, r = 0;
4251	bool need_emergency_restart = false;
4252	bool audio_suspended = false;
 
 
 
 
4253
4254	/**
4255	 * Special case: RAS triggered and full reset isn't supported
4256	 */
4257	need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
4258
4259	/*
4260	 * Flush RAM to disk so that after reboot
4261	 * the user can read log and see why the system rebooted.
4262	 */
4263	if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
4264		DRM_WARN("Emergency reboot.");
4265
4266		ksys_sync_helper();
4267		emergency_restart();
4268	}
4269
4270	dev_info(adev->dev, "GPU %s begin!\n",
4271		need_emergency_restart ? "jobs stop":"reset");
4272
4273	/*
4274	 * Here we trylock to avoid chain of resets executing from
4275	 * either trigger by jobs on different adevs in XGMI hive or jobs on
4276	 * different schedulers for same device while this TO handler is running.
4277	 * We always reset all schedulers for device and all devices for XGMI
4278	 * hive so that should take care of them too.
4279	 */
4280	hive = amdgpu_get_xgmi_hive(adev, true);
4281	if (hive && !mutex_trylock(&hive->reset_lock)) {
4282		DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
4283			  job ? job->base.id : -1, hive->hive_id);
4284		mutex_unlock(&hive->hive_lock);
4285		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4286	}
4287
4288	/*
4289	 * Build list of devices to reset.
4290	 * In case we are in XGMI hive mode, resort the device list
4291	 * to put adev in the 1st position.
4292	 */
4293	INIT_LIST_HEAD(&device_list);
4294	if (adev->gmc.xgmi.num_physical_nodes > 1) {
4295		if (!hive)
4296			return -ENODEV;
4297		if (!list_is_first(&adev->gmc.xgmi.head, &hive->device_list))
4298			list_rotate_to_front(&adev->gmc.xgmi.head, &hive->device_list);
4299		device_list_handle = &hive->device_list;
4300	} else {
4301		list_add_tail(&adev->gmc.xgmi.head, &device_list);
4302		device_list_handle = &device_list;
4303	}
4304
4305	/* block all schedulers and reset given job's ring */
4306	list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4307		if (!amdgpu_device_lock_adev(tmp_adev, !hive)) {
4308			DRM_INFO("Bailing on TDR for s_job:%llx, as another already in progress",
4309				  job ? job->base.id : -1);
4310			mutex_unlock(&hive->hive_lock);
4311			return 0;
4312		}
4313
4314		/*
4315		 * Try to put the audio codec into suspend state
4316		 * before gpu reset started.
4317		 *
4318		 * Due to the power domain of the graphics device
4319		 * is shared with AZ power domain. Without this,
4320		 * we may change the audio hardware from behind
4321		 * the audio driver's back. That will trigger
4322		 * some audio codec errors.
4323		 */
4324		if (!amdgpu_device_suspend_display_audio(tmp_adev))
4325			audio_suspended = true;
4326
4327		amdgpu_ras_set_error_query_ready(tmp_adev, false);
4328
4329		cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
4330
4331		if (!amdgpu_sriov_vf(tmp_adev))
4332			amdgpu_amdkfd_pre_reset(tmp_adev);
4333
4334		/*
4335		 * Mark these ASICs to be reseted as untracked first
4336		 * And add them back after reset completed
4337		 */
4338		amdgpu_unregister_gpu_instance(tmp_adev);
4339
4340		amdgpu_fbdev_set_suspend(tmp_adev, 1);
4341
4342		/* disable ras on ALL IPs */
4343		if (!need_emergency_restart &&
4344		      amdgpu_device_ip_need_full_reset(tmp_adev))
4345			amdgpu_ras_suspend(tmp_adev);
4346
4347		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4348			struct amdgpu_ring *ring = tmp_adev->rings[i];
4349
4350			if (!ring || !ring->sched.thread)
4351				continue;
4352
4353			drm_sched_stop(&ring->sched, job ? &job->base : NULL);
4354
4355			if (need_emergency_restart)
4356				amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
4357		}
 
4358	}
4359
4360	if (need_emergency_restart)
4361		goto skip_sched_resume;
4362
4363	/*
4364	 * Must check guilty signal here since after this point all old
4365	 * HW fences are force signaled.
4366	 *
4367	 * job->base holds a reference to parent fence
4368	 */
4369	if (job && job->base.s_fence->parent &&
4370	    dma_fence_is_signaled(job->base.s_fence->parent)) {
4371		job_signaled = true;
4372		dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
4373		goto skip_hw_reset;
4374	}
4375
4376retry:	/* Rest of adevs pre asic reset from XGMI hive. */
4377	list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4378		r = amdgpu_device_pre_asic_reset(tmp_adev,
4379						 NULL,
4380						 &need_full_reset);
4381		/*TODO Should we stop ?*/
4382		if (r) {
4383			DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
4384				  r, tmp_adev->ddev->unique);
4385			tmp_adev->asic_reset_res = r;
4386		}
4387	}
4388
 
4389	/* Actual ASIC resets if needed.*/
4390	/* TODO Implement XGMI hive reset logic for SRIOV */
4391	if (amdgpu_sriov_vf(adev)) {
4392		r = amdgpu_device_reset_sriov(adev, job ? false : true);
4393		if (r)
4394			adev->asic_reset_res = r;
4395	} else {
4396		r  = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset);
4397		if (r && r == -EAGAIN)
4398			goto retry;
4399	}
4400
4401skip_hw_reset:
4402
4403	/* Post ASIC reset for all devs .*/
4404	list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
 
 
 
 
 
 
 
 
 
 
 
 
4405
4406		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4407			struct amdgpu_ring *ring = tmp_adev->rings[i];
4408
4409			if (!ring || !ring->sched.thread)
4410				continue;
4411
4412			/* No point to resubmit jobs if we didn't HW reset*/
4413			if (!tmp_adev->asic_reset_res && !job_signaled)
4414				drm_sched_resubmit_jobs(&ring->sched);
4415
4416			drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
4417		}
4418
4419		if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) {
4420			drm_helper_resume_force_mode(tmp_adev->ddev);
4421		}
4422
4423		tmp_adev->asic_reset_res = 0;
4424
4425		if (r) {
4426			/* bad news, how to tell it to userspace ? */
4427			dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
4428			amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
4429		} else {
4430			dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
 
 
4431		}
4432	}
4433
4434skip_sched_resume:
4435	list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4436		/*unlock kfd: SRIOV would do it separately */
4437		if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
4438	                amdgpu_amdkfd_post_reset(tmp_adev);
 
 
 
 
 
 
 
4439		if (audio_suspended)
4440			amdgpu_device_resume_display_audio(tmp_adev);
4441		amdgpu_device_unlock_adev(tmp_adev);
4442	}
4443
 
4444	if (hive) {
4445		mutex_unlock(&hive->reset_lock);
4446		mutex_unlock(&hive->hive_lock);
 
4447	}
4448
4449	if (r)
4450		dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
4451	return r;
4452}
4453
4454/**
4455 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
4456 *
4457 * @adev: amdgpu_device pointer
4458 *
4459 * Fetchs and stores in the driver the PCIE capabilities (gen speed
4460 * and lanes) of the slot the device is in. Handles APUs and
4461 * virtualized environments where PCIE config space may not be available.
4462 */
4463static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
4464{
4465	struct pci_dev *pdev;
4466	enum pci_bus_speed speed_cap, platform_speed_cap;
4467	enum pcie_link_width platform_link_width;
4468
4469	if (amdgpu_pcie_gen_cap)
4470		adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
4471
4472	if (amdgpu_pcie_lane_cap)
4473		adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
4474
4475	/* covers APUs as well */
4476	if (pci_is_root_bus(adev->pdev->bus)) {
4477		if (adev->pm.pcie_gen_mask == 0)
4478			adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
4479		if (adev->pm.pcie_mlw_mask == 0)
4480			adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
4481		return;
4482	}
4483
4484	if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
4485		return;
4486
4487	pcie_bandwidth_available(adev->pdev, NULL,
4488				 &platform_speed_cap, &platform_link_width);
4489
4490	if (adev->pm.pcie_gen_mask == 0) {
4491		/* asic caps */
4492		pdev = adev->pdev;
4493		speed_cap = pcie_get_speed_cap(pdev);
4494		if (speed_cap == PCI_SPEED_UNKNOWN) {
4495			adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4496						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4497						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
4498		} else {
4499			if (speed_cap == PCIE_SPEED_16_0GT)
 
 
 
 
 
 
4500				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4501							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4502							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
4503							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
4504			else if (speed_cap == PCIE_SPEED_8_0GT)
4505				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4506							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4507							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
4508			else if (speed_cap == PCIE_SPEED_5_0GT)
4509				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4510							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
4511			else
4512				adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
4513		}
4514		/* platform caps */
4515		if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
4516			adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4517						   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
4518		} else {
4519			if (platform_speed_cap == PCIE_SPEED_16_0GT)
 
 
 
 
 
 
4520				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4521							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4522							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
4523							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
4524			else if (platform_speed_cap == PCIE_SPEED_8_0GT)
4525				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4526							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4527							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
4528			else if (platform_speed_cap == PCIE_SPEED_5_0GT)
4529				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4530							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
4531			else
4532				adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
4533
4534		}
4535	}
4536	if (adev->pm.pcie_mlw_mask == 0) {
4537		if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
4538			adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
4539		} else {
4540			switch (platform_link_width) {
4541			case PCIE_LNK_X32:
4542				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
4543							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
4544							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
4545							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4546							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4547							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4548							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4549				break;
4550			case PCIE_LNK_X16:
4551				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
4552							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
4553							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4554							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4555							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4556							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4557				break;
4558			case PCIE_LNK_X12:
4559				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
4560							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4561							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4562							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4563							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4564				break;
4565			case PCIE_LNK_X8:
4566				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4567							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4568							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4569							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4570				break;
4571			case PCIE_LNK_X4:
4572				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4573							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4574							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4575				break;
4576			case PCIE_LNK_X2:
4577				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4578							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4579				break;
4580			case PCIE_LNK_X1:
4581				adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
4582				break;
4583			default:
4584				break;
4585			}
4586		}
4587	}
4588}
4589
4590int amdgpu_device_baco_enter(struct drm_device *dev)
4591{
4592	struct amdgpu_device *adev = dev->dev_private;
4593	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
4594
4595	if (!amdgpu_device_supports_baco(adev->ddev))
4596		return -ENOTSUPP;
4597
4598	if (ras && ras->supported)
 
4599		adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
4600
4601	return amdgpu_dpm_baco_enter(adev);
4602}
4603
4604int amdgpu_device_baco_exit(struct drm_device *dev)
4605{
4606	struct amdgpu_device *adev = dev->dev_private;
4607	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
4608	int ret = 0;
4609
4610	if (!amdgpu_device_supports_baco(adev->ddev))
4611		return -ENOTSUPP;
4612
4613	ret = amdgpu_dpm_baco_exit(adev);
4614	if (ret)
4615		return ret;
4616
4617	if (ras && ras->supported)
 
4618		adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
4619
4620	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4621}
v5.14.15
   1/*
   2 * Copyright 2008 Advanced Micro Devices, Inc.
   3 * Copyright 2008 Red Hat Inc.
   4 * Copyright 2009 Jerome Glisse.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the "Software"),
   8 * to deal in the Software without restriction, including without limitation
   9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10 * and/or sell copies of the Software, and to permit persons to whom the
  11 * Software is furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22 * OTHER DEALINGS IN THE SOFTWARE.
  23 *
  24 * Authors: Dave Airlie
  25 *          Alex Deucher
  26 *          Jerome Glisse
  27 */
  28#include <linux/power_supply.h>
  29#include <linux/kthread.h>
  30#include <linux/module.h>
  31#include <linux/console.h>
  32#include <linux/slab.h>
  33
  34#include <drm/drm_atomic_helper.h>
  35#include <drm/drm_probe_helper.h>
  36#include <drm/amdgpu_drm.h>
  37#include <linux/vgaarb.h>
  38#include <linux/vga_switcheroo.h>
  39#include <linux/efi.h>
  40#include "amdgpu.h"
  41#include "amdgpu_trace.h"
  42#include "amdgpu_i2c.h"
  43#include "atom.h"
  44#include "amdgpu_atombios.h"
  45#include "amdgpu_atomfirmware.h"
  46#include "amd_pcie.h"
  47#ifdef CONFIG_DRM_AMDGPU_SI
  48#include "si.h"
  49#endif
  50#ifdef CONFIG_DRM_AMDGPU_CIK
  51#include "cik.h"
  52#endif
  53#include "vi.h"
  54#include "soc15.h"
  55#include "nv.h"
  56#include "bif/bif_4_1_d.h"
  57#include <linux/pci.h>
  58#include <linux/firmware.h>
  59#include "amdgpu_vf_error.h"
  60
  61#include "amdgpu_amdkfd.h"
  62#include "amdgpu_pm.h"
  63
  64#include "amdgpu_xgmi.h"
  65#include "amdgpu_ras.h"
  66#include "amdgpu_pmu.h"
  67#include "amdgpu_fru_eeprom.h"
  68#include "amdgpu_reset.h"
  69
  70#include <linux/suspend.h>
  71#include <drm/task_barrier.h>
  72#include <linux/pm_runtime.h>
  73
  74#include <drm/drm_drv.h>
  75
  76MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
  77MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
  78MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
  79MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
  80MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
  81MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
  82MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin");
  83MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
  84MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
  85MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
  86MODULE_FIRMWARE("amdgpu/vangogh_gpu_info.bin");
  87MODULE_FIRMWARE("amdgpu/yellow_carp_gpu_info.bin");
  88
  89#define AMDGPU_RESUME_MS		2000
  90
  91const char *amdgpu_asic_name[] = {
  92	"TAHITI",
  93	"PITCAIRN",
  94	"VERDE",
  95	"OLAND",
  96	"HAINAN",
  97	"BONAIRE",
  98	"KAVERI",
  99	"KABINI",
 100	"HAWAII",
 101	"MULLINS",
 102	"TOPAZ",
 103	"TONGA",
 104	"FIJI",
 105	"CARRIZO",
 106	"STONEY",
 107	"POLARIS10",
 108	"POLARIS11",
 109	"POLARIS12",
 110	"VEGAM",
 111	"VEGA10",
 112	"VEGA12",
 113	"VEGA20",
 114	"RAVEN",
 115	"ARCTURUS",
 116	"RENOIR",
 117	"ALDEBARAN",
 118	"NAVI10",
 119	"NAVI14",
 120	"NAVI12",
 121	"SIENNA_CICHLID",
 122	"NAVY_FLOUNDER",
 123	"VANGOGH",
 124	"DIMGREY_CAVEFISH",
 125	"BEIGE_GOBY",
 126	"YELLOW_CARP",
 127	"LAST",
 128};
 129
 130/**
 131 * DOC: pcie_replay_count
 132 *
 133 * The amdgpu driver provides a sysfs API for reporting the total number
 134 * of PCIe replays (NAKs)
 135 * The file pcie_replay_count is used for this and returns the total
 136 * number of replays as a sum of the NAKs generated and NAKs received
 137 */
 138
 139static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
 140		struct device_attribute *attr, char *buf)
 141{
 142	struct drm_device *ddev = dev_get_drvdata(dev);
 143	struct amdgpu_device *adev = drm_to_adev(ddev);
 144	uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
 145
 146	return sysfs_emit(buf, "%llu\n", cnt);
 147}
 148
 149static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
 150		amdgpu_device_get_pcie_replay_count, NULL);
 151
 152static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
 153
 154/**
 155 * DOC: product_name
 156 *
 157 * The amdgpu driver provides a sysfs API for reporting the product name
 158 * for the device
 159 * The file serial_number is used for this and returns the product name
 160 * as returned from the FRU.
 161 * NOTE: This is only available for certain server cards
 162 */
 163
 164static ssize_t amdgpu_device_get_product_name(struct device *dev,
 165		struct device_attribute *attr, char *buf)
 166{
 167	struct drm_device *ddev = dev_get_drvdata(dev);
 168	struct amdgpu_device *adev = drm_to_adev(ddev);
 169
 170	return sysfs_emit(buf, "%s\n", adev->product_name);
 171}
 172
 173static DEVICE_ATTR(product_name, S_IRUGO,
 174		amdgpu_device_get_product_name, NULL);
 175
 176/**
 177 * DOC: product_number
 178 *
 179 * The amdgpu driver provides a sysfs API for reporting the part number
 180 * for the device
 181 * The file serial_number is used for this and returns the part number
 182 * as returned from the FRU.
 183 * NOTE: This is only available for certain server cards
 184 */
 185
 186static ssize_t amdgpu_device_get_product_number(struct device *dev,
 187		struct device_attribute *attr, char *buf)
 188{
 189	struct drm_device *ddev = dev_get_drvdata(dev);
 190	struct amdgpu_device *adev = drm_to_adev(ddev);
 191
 192	return sysfs_emit(buf, "%s\n", adev->product_number);
 193}
 194
 195static DEVICE_ATTR(product_number, S_IRUGO,
 196		amdgpu_device_get_product_number, NULL);
 197
 198/**
 199 * DOC: serial_number
 200 *
 201 * The amdgpu driver provides a sysfs API for reporting the serial number
 202 * for the device
 203 * The file serial_number is used for this and returns the serial number
 204 * as returned from the FRU.
 205 * NOTE: This is only available for certain server cards
 206 */
 207
 208static ssize_t amdgpu_device_get_serial_number(struct device *dev,
 209		struct device_attribute *attr, char *buf)
 210{
 211	struct drm_device *ddev = dev_get_drvdata(dev);
 212	struct amdgpu_device *adev = drm_to_adev(ddev);
 213
 214	return sysfs_emit(buf, "%s\n", adev->serial);
 215}
 216
 217static DEVICE_ATTR(serial_number, S_IRUGO,
 218		amdgpu_device_get_serial_number, NULL);
 219
 220/**
 221 * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
 222 *
 223 * @dev: drm_device pointer
 224 *
 225 * Returns true if the device is a dGPU with ATPX power control,
 226 * otherwise return false.
 227 */
 228bool amdgpu_device_supports_px(struct drm_device *dev)
 229{
 230	struct amdgpu_device *adev = drm_to_adev(dev);
 231
 232	if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
 233		return true;
 234	return false;
 235}
 236
 237/**
 238 * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
 239 *
 240 * @dev: drm_device pointer
 241 *
 242 * Returns true if the device is a dGPU with ACPI power control,
 243 * otherwise return false.
 244 */
 245bool amdgpu_device_supports_boco(struct drm_device *dev)
 246{
 247	struct amdgpu_device *adev = drm_to_adev(dev);
 248
 249	if (adev->has_pr3 ||
 250	    ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
 251		return true;
 252	return false;
 253}
 254
 255/**
 256 * amdgpu_device_supports_baco - Does the device support BACO
 257 *
 258 * @dev: drm_device pointer
 259 *
 260 * Returns true if the device supporte BACO,
 261 * otherwise return false.
 262 */
 263bool amdgpu_device_supports_baco(struct drm_device *dev)
 264{
 265	struct amdgpu_device *adev = drm_to_adev(dev);
 266
 267	return amdgpu_asic_supports_baco(adev);
 268}
 269
 270/**
 271 * amdgpu_device_supports_smart_shift - Is the device dGPU with
 272 * smart shift support
 273 *
 274 * @dev: drm_device pointer
 275 *
 276 * Returns true if the device is a dGPU with Smart Shift support,
 277 * otherwise returns false.
 278 */
 279bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
 280{
 281	return (amdgpu_device_supports_boco(dev) &&
 282		amdgpu_acpi_is_power_shift_control_supported());
 283}
 284
 285/*
 286 * VRAM access helper functions
 287 */
 288
 289/**
 290 * amdgpu_device_vram_access - read/write a buffer in vram
 291 *
 292 * @adev: amdgpu_device pointer
 293 * @pos: offset of the buffer in vram
 294 * @buf: virtual address of the buffer in system memory
 295 * @size: read/write size, sizeof(@buf) must > @size
 296 * @write: true - write to vram, otherwise - read from vram
 297 */
 298void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
 299			       uint32_t *buf, size_t size, bool write)
 300{
 301	unsigned long flags;
 302	uint32_t hi = ~0;
 303	uint64_t last;
 304	int idx;
 305
 306	if (!drm_dev_enter(&adev->ddev, &idx))
 307		return;
 308
 309#ifdef CONFIG_64BIT
 310	last = min(pos + size, adev->gmc.visible_vram_size);
 311	if (last > pos) {
 312		void __iomem *addr = adev->mman.aper_base_kaddr + pos;
 313		size_t count = last - pos;
 314
 315		if (write) {
 316			memcpy_toio(addr, buf, count);
 317			mb();
 318			amdgpu_device_flush_hdp(adev, NULL);
 319		} else {
 320			amdgpu_device_invalidate_hdp(adev, NULL);
 321			mb();
 322			memcpy_fromio(buf, addr, count);
 323		}
 324
 325		if (count == size)
 326			goto exit;
 327
 328		pos += count;
 329		buf += count / 4;
 330		size -= count;
 331	}
 332#endif
 333
 334	spin_lock_irqsave(&adev->mmio_idx_lock, flags);
 335	for (last = pos + size; pos < last; pos += 4) {
 336		uint32_t tmp = pos >> 31;
 337
 338		WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
 339		if (tmp != hi) {
 340			WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
 341			hi = tmp;
 342		}
 343		if (write)
 344			WREG32_NO_KIQ(mmMM_DATA, *buf++);
 345		else
 346			*buf++ = RREG32_NO_KIQ(mmMM_DATA);
 347	}
 348	spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
 349
 350#ifdef CONFIG_64BIT
 351exit:
 352#endif
 353	drm_dev_exit(idx);
 354}
 355
 356/*
 357 * register access helper functions.
 358 */
 359
 360/* Check if hw access should be skipped because of hotplug or device error */
 361bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
 362{
 363	if (adev->no_hw_access)
 364		return true;
 365
 366#ifdef CONFIG_LOCKDEP
 367	/*
 368	 * This is a bit complicated to understand, so worth a comment. What we assert
 369	 * here is that the GPU reset is not running on another thread in parallel.
 370	 *
 371	 * For this we trylock the read side of the reset semaphore, if that succeeds
 372	 * we know that the reset is not running in paralell.
 373	 *
 374	 * If the trylock fails we assert that we are either already holding the read
 375	 * side of the lock or are the reset thread itself and hold the write side of
 376	 * the lock.
 377	 */
 378	if (in_task()) {
 379		if (down_read_trylock(&adev->reset_sem))
 380			up_read(&adev->reset_sem);
 381		else
 382			lockdep_assert_held(&adev->reset_sem);
 383	}
 384#endif
 385	return false;
 386}
 387
 388/**
 389 * amdgpu_device_rreg - read a memory mapped IO or indirect register
 390 *
 391 * @adev: amdgpu_device pointer
 392 * @reg: dword aligned register offset
 393 * @acc_flags: access flags which require special behavior
 394 *
 395 * Returns the 32 bit value from the offset specified.
 396 */
 397uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
 398			    uint32_t reg, uint32_t acc_flags)
 399{
 400	uint32_t ret;
 401
 402	if (amdgpu_device_skip_hw_access(adev))
 403		return 0;
 404
 405	if ((reg * 4) < adev->rmmio_size) {
 406		if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
 407		    amdgpu_sriov_runtime(adev) &&
 408		    down_read_trylock(&adev->reset_sem)) {
 409			ret = amdgpu_kiq_rreg(adev, reg);
 410			up_read(&adev->reset_sem);
 411		} else {
 412			ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
 413		}
 414	} else {
 415		ret = adev->pcie_rreg(adev, reg * 4);
 416	}
 417
 418	trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
 419
 420	return ret;
 421}
 422
 423/*
 424 * MMIO register read with bytes helper functions
 425 * @offset:bytes offset from MMIO start
 426 *
 427*/
 428
 429/**
 430 * amdgpu_mm_rreg8 - read a memory mapped IO register
 431 *
 432 * @adev: amdgpu_device pointer
 433 * @offset: byte aligned register offset
 434 *
 435 * Returns the 8 bit value from the offset specified.
 436 */
 437uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
 438{
 439	if (amdgpu_device_skip_hw_access(adev))
 440		return 0;
 441
 442	if (offset < adev->rmmio_size)
 443		return (readb(adev->rmmio + offset));
 444	BUG();
 445}
 446
 447/*
 448 * MMIO register write with bytes helper functions
 449 * @offset:bytes offset from MMIO start
 450 * @value: the value want to be written to the register
 451 *
 452*/
 453/**
 454 * amdgpu_mm_wreg8 - read a memory mapped IO register
 455 *
 456 * @adev: amdgpu_device pointer
 457 * @offset: byte aligned register offset
 458 * @value: 8 bit value to write
 459 *
 460 * Writes the value specified to the offset specified.
 461 */
 462void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
 463{
 464	if (amdgpu_device_skip_hw_access(adev))
 465		return;
 466
 467	if (offset < adev->rmmio_size)
 468		writeb(value, adev->rmmio + offset);
 469	else
 470		BUG();
 471}
 472
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 473/**
 474 * amdgpu_device_wreg - write to a memory mapped IO or indirect register
 475 *
 476 * @adev: amdgpu_device pointer
 477 * @reg: dword aligned register offset
 478 * @v: 32 bit value to write to the register
 479 * @acc_flags: access flags which require special behavior
 480 *
 481 * Writes the value specified to the offset specified.
 482 */
 483void amdgpu_device_wreg(struct amdgpu_device *adev,
 484			uint32_t reg, uint32_t v,
 485			uint32_t acc_flags)
 486{
 487	if (amdgpu_device_skip_hw_access(adev))
 488		return;
 489
 490	if ((reg * 4) < adev->rmmio_size) {
 491		if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
 492		    amdgpu_sriov_runtime(adev) &&
 493		    down_read_trylock(&adev->reset_sem)) {
 494			amdgpu_kiq_wreg(adev, reg, v);
 495			up_read(&adev->reset_sem);
 496		} else {
 497			writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
 498		}
 499	} else {
 500		adev->pcie_wreg(adev, reg * 4, v);
 501	}
 502
 503	trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
 504}
 505
 506/*
 507 * amdgpu_mm_wreg_mmio_rlc -  write register either with mmio or with RLC path if in range
 508 *
 509 * this function is invoked only the debugfs register access
 510 * */
 511void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
 512			     uint32_t reg, uint32_t v)
 513{
 514	if (amdgpu_device_skip_hw_access(adev))
 515		return;
 
 516
 517	if (amdgpu_sriov_fullaccess(adev) &&
 518	    adev->gfx.rlc.funcs &&
 519	    adev->gfx.rlc.funcs->is_rlcg_access_range) {
 520		if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
 521			return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v, 0, 0);
 522	} else {
 523		writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 524	}
 525}
 526
 527/**
 528 * amdgpu_mm_rdoorbell - read a doorbell dword
 529 *
 530 * @adev: amdgpu_device pointer
 531 * @index: doorbell index
 532 *
 533 * Returns the value in the doorbell aperture at the
 534 * requested doorbell index (CIK).
 535 */
 536u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
 537{
 538	if (amdgpu_device_skip_hw_access(adev))
 539		return 0;
 540
 541	if (index < adev->doorbell.num_doorbells) {
 542		return readl(adev->doorbell.ptr + index);
 543	} else {
 544		DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
 545		return 0;
 546	}
 547}
 548
 549/**
 550 * amdgpu_mm_wdoorbell - write a doorbell dword
 551 *
 552 * @adev: amdgpu_device pointer
 553 * @index: doorbell index
 554 * @v: value to write
 555 *
 556 * Writes @v to the doorbell aperture at the
 557 * requested doorbell index (CIK).
 558 */
 559void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
 560{
 561	if (amdgpu_device_skip_hw_access(adev))
 562		return;
 563
 564	if (index < adev->doorbell.num_doorbells) {
 565		writel(v, adev->doorbell.ptr + index);
 566	} else {
 567		DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
 568	}
 569}
 570
 571/**
 572 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
 573 *
 574 * @adev: amdgpu_device pointer
 575 * @index: doorbell index
 576 *
 577 * Returns the value in the doorbell aperture at the
 578 * requested doorbell index (VEGA10+).
 579 */
 580u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
 581{
 582	if (amdgpu_device_skip_hw_access(adev))
 583		return 0;
 584
 585	if (index < adev->doorbell.num_doorbells) {
 586		return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
 587	} else {
 588		DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
 589		return 0;
 590	}
 591}
 592
 593/**
 594 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
 595 *
 596 * @adev: amdgpu_device pointer
 597 * @index: doorbell index
 598 * @v: value to write
 599 *
 600 * Writes @v to the doorbell aperture at the
 601 * requested doorbell index (VEGA10+).
 602 */
 603void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
 604{
 605	if (amdgpu_device_skip_hw_access(adev))
 606		return;
 607
 608	if (index < adev->doorbell.num_doorbells) {
 609		atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
 610	} else {
 611		DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
 612	}
 613}
 614
 615/**
 616 * amdgpu_device_indirect_rreg - read an indirect register
 617 *
 618 * @adev: amdgpu_device pointer
 619 * @pcie_index: mmio register offset
 620 * @pcie_data: mmio register offset
 621 * @reg_addr: indirect register address to read from
 622 *
 623 * Returns the value of indirect register @reg_addr
 624 */
 625u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
 626				u32 pcie_index, u32 pcie_data,
 627				u32 reg_addr)
 628{
 629	unsigned long flags;
 630	u32 r;
 631	void __iomem *pcie_index_offset;
 632	void __iomem *pcie_data_offset;
 633
 634	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 635	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
 636	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
 637
 638	writel(reg_addr, pcie_index_offset);
 639	readl(pcie_index_offset);
 640	r = readl(pcie_data_offset);
 641	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 642
 643	return r;
 644}
 645
 646/**
 647 * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
 648 *
 649 * @adev: amdgpu_device pointer
 650 * @pcie_index: mmio register offset
 651 * @pcie_data: mmio register offset
 652 * @reg_addr: indirect register address to read from
 653 *
 654 * Returns the value of indirect register @reg_addr
 655 */
 656u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
 657				  u32 pcie_index, u32 pcie_data,
 658				  u32 reg_addr)
 659{
 660	unsigned long flags;
 661	u64 r;
 662	void __iomem *pcie_index_offset;
 663	void __iomem *pcie_data_offset;
 664
 665	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 666	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
 667	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
 668
 669	/* read low 32 bits */
 670	writel(reg_addr, pcie_index_offset);
 671	readl(pcie_index_offset);
 672	r = readl(pcie_data_offset);
 673	/* read high 32 bits */
 674	writel(reg_addr + 4, pcie_index_offset);
 675	readl(pcie_index_offset);
 676	r |= ((u64)readl(pcie_data_offset) << 32);
 677	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 678
 679	return r;
 680}
 681
 682/**
 683 * amdgpu_device_indirect_wreg - write an indirect register address
 684 *
 685 * @adev: amdgpu_device pointer
 686 * @pcie_index: mmio register offset
 687 * @pcie_data: mmio register offset
 688 * @reg_addr: indirect register offset
 689 * @reg_data: indirect register data
 690 *
 691 */
 692void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
 693				 u32 pcie_index, u32 pcie_data,
 694				 u32 reg_addr, u32 reg_data)
 695{
 696	unsigned long flags;
 697	void __iomem *pcie_index_offset;
 698	void __iomem *pcie_data_offset;
 699
 700	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 701	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
 702	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
 703
 704	writel(reg_addr, pcie_index_offset);
 705	readl(pcie_index_offset);
 706	writel(reg_data, pcie_data_offset);
 707	readl(pcie_data_offset);
 708	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 709}
 710
 711/**
 712 * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
 713 *
 714 * @adev: amdgpu_device pointer
 715 * @pcie_index: mmio register offset
 716 * @pcie_data: mmio register offset
 717 * @reg_addr: indirect register offset
 718 * @reg_data: indirect register data
 719 *
 720 */
 721void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
 722				   u32 pcie_index, u32 pcie_data,
 723				   u32 reg_addr, u64 reg_data)
 724{
 725	unsigned long flags;
 726	void __iomem *pcie_index_offset;
 727	void __iomem *pcie_data_offset;
 728
 729	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 730	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
 731	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
 732
 733	/* write low 32 bits */
 734	writel(reg_addr, pcie_index_offset);
 735	readl(pcie_index_offset);
 736	writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
 737	readl(pcie_data_offset);
 738	/* write high 32 bits */
 739	writel(reg_addr + 4, pcie_index_offset);
 740	readl(pcie_index_offset);
 741	writel((u32)(reg_data >> 32), pcie_data_offset);
 742	readl(pcie_data_offset);
 743	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 744}
 745
 746/**
 747 * amdgpu_invalid_rreg - dummy reg read function
 748 *
 749 * @adev: amdgpu_device pointer
 750 * @reg: offset of register
 751 *
 752 * Dummy register read function.  Used for register blocks
 753 * that certain asics don't have (all asics).
 754 * Returns the value in the register.
 755 */
 756static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
 757{
 758	DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
 759	BUG();
 760	return 0;
 761}
 762
 763/**
 764 * amdgpu_invalid_wreg - dummy reg write function
 765 *
 766 * @adev: amdgpu_device pointer
 767 * @reg: offset of register
 768 * @v: value to write to the register
 769 *
 770 * Dummy register read function.  Used for register blocks
 771 * that certain asics don't have (all asics).
 772 */
 773static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
 774{
 775	DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
 776		  reg, v);
 777	BUG();
 778}
 779
 780/**
 781 * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
 782 *
 783 * @adev: amdgpu_device pointer
 784 * @reg: offset of register
 785 *
 786 * Dummy register read function.  Used for register blocks
 787 * that certain asics don't have (all asics).
 788 * Returns the value in the register.
 789 */
 790static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
 791{
 792	DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
 793	BUG();
 794	return 0;
 795}
 796
 797/**
 798 * amdgpu_invalid_wreg64 - dummy reg write function
 799 *
 800 * @adev: amdgpu_device pointer
 801 * @reg: offset of register
 802 * @v: value to write to the register
 803 *
 804 * Dummy register read function.  Used for register blocks
 805 * that certain asics don't have (all asics).
 806 */
 807static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
 808{
 809	DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
 810		  reg, v);
 811	BUG();
 812}
 813
 814/**
 815 * amdgpu_block_invalid_rreg - dummy reg read function
 816 *
 817 * @adev: amdgpu_device pointer
 818 * @block: offset of instance
 819 * @reg: offset of register
 820 *
 821 * Dummy register read function.  Used for register blocks
 822 * that certain asics don't have (all asics).
 823 * Returns the value in the register.
 824 */
 825static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
 826					  uint32_t block, uint32_t reg)
 827{
 828	DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
 829		  reg, block);
 830	BUG();
 831	return 0;
 832}
 833
 834/**
 835 * amdgpu_block_invalid_wreg - dummy reg write function
 836 *
 837 * @adev: amdgpu_device pointer
 838 * @block: offset of instance
 839 * @reg: offset of register
 840 * @v: value to write to the register
 841 *
 842 * Dummy register read function.  Used for register blocks
 843 * that certain asics don't have (all asics).
 844 */
 845static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
 846				      uint32_t block,
 847				      uint32_t reg, uint32_t v)
 848{
 849	DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
 850		  reg, block, v);
 851	BUG();
 852}
 853
 854/**
 855 * amdgpu_device_asic_init - Wrapper for atom asic_init
 856 *
 857 * @adev: amdgpu_device pointer
 858 *
 859 * Does any asic specific work and then calls atom asic init.
 860 */
 861static int amdgpu_device_asic_init(struct amdgpu_device *adev)
 862{
 863	amdgpu_asic_pre_asic_init(adev);
 864
 865	return amdgpu_atom_asic_init(adev->mode_info.atom_context);
 866}
 867
 868/**
 869 * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
 870 *
 871 * @adev: amdgpu_device pointer
 872 *
 873 * Allocates a scratch page of VRAM for use by various things in the
 874 * driver.
 875 */
 876static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
 877{
 878	return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
 879				       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
 880				       &adev->vram_scratch.robj,
 881				       &adev->vram_scratch.gpu_addr,
 882				       (void **)&adev->vram_scratch.ptr);
 883}
 884
 885/**
 886 * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
 887 *
 888 * @adev: amdgpu_device pointer
 889 *
 890 * Frees the VRAM scratch page.
 891 */
 892static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
 893{
 894	amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
 895}
 896
 897/**
 898 * amdgpu_device_program_register_sequence - program an array of registers.
 899 *
 900 * @adev: amdgpu_device pointer
 901 * @registers: pointer to the register array
 902 * @array_size: size of the register array
 903 *
 904 * Programs an array or registers with and and or masks.
 905 * This is a helper for setting golden registers.
 906 */
 907void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
 908					     const u32 *registers,
 909					     const u32 array_size)
 910{
 911	u32 tmp, reg, and_mask, or_mask;
 912	int i;
 913
 914	if (array_size % 3)
 915		return;
 916
 917	for (i = 0; i < array_size; i +=3) {
 918		reg = registers[i + 0];
 919		and_mask = registers[i + 1];
 920		or_mask = registers[i + 2];
 921
 922		if (and_mask == 0xffffffff) {
 923			tmp = or_mask;
 924		} else {
 925			tmp = RREG32(reg);
 926			tmp &= ~and_mask;
 927			if (adev->family >= AMDGPU_FAMILY_AI)
 928				tmp |= (or_mask & and_mask);
 929			else
 930				tmp |= or_mask;
 931		}
 932		WREG32(reg, tmp);
 933	}
 934}
 935
 936/**
 937 * amdgpu_device_pci_config_reset - reset the GPU
 938 *
 939 * @adev: amdgpu_device pointer
 940 *
 941 * Resets the GPU using the pci config reset sequence.
 942 * Only applicable to asics prior to vega10.
 943 */
 944void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
 945{
 946	pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
 947}
 948
 949/**
 950 * amdgpu_device_pci_reset - reset the GPU using generic PCI means
 951 *
 952 * @adev: amdgpu_device pointer
 953 *
 954 * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
 955 */
 956int amdgpu_device_pci_reset(struct amdgpu_device *adev)
 957{
 958	return pci_reset_function(adev->pdev);
 959}
 960
 961/*
 962 * GPU doorbell aperture helpers function.
 963 */
 964/**
 965 * amdgpu_device_doorbell_init - Init doorbell driver information.
 966 *
 967 * @adev: amdgpu_device pointer
 968 *
 969 * Init doorbell driver information (CIK)
 970 * Returns 0 on success, error on failure.
 971 */
 972static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
 973{
 974
 975	/* No doorbell on SI hardware generation */
 976	if (adev->asic_type < CHIP_BONAIRE) {
 977		adev->doorbell.base = 0;
 978		adev->doorbell.size = 0;
 979		adev->doorbell.num_doorbells = 0;
 980		adev->doorbell.ptr = NULL;
 981		return 0;
 982	}
 983
 984	if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
 985		return -EINVAL;
 986
 987	amdgpu_asic_init_doorbell_index(adev);
 988
 989	/* doorbell bar mapping */
 990	adev->doorbell.base = pci_resource_start(adev->pdev, 2);
 991	adev->doorbell.size = pci_resource_len(adev->pdev, 2);
 992
 993	adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
 994					     adev->doorbell_index.max_assignment+1);
 995	if (adev->doorbell.num_doorbells == 0)
 996		return -EINVAL;
 997
 998	/* For Vega, reserve and map two pages on doorbell BAR since SDMA
 999	 * paging queue doorbell use the second page. The
1000	 * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
1001	 * doorbells are in the first page. So with paging queue enabled,
1002	 * the max num_doorbells should + 1 page (0x400 in dword)
1003	 */
1004	if (adev->asic_type >= CHIP_VEGA10)
1005		adev->doorbell.num_doorbells += 0x400;
1006
1007	adev->doorbell.ptr = ioremap(adev->doorbell.base,
1008				     adev->doorbell.num_doorbells *
1009				     sizeof(u32));
1010	if (adev->doorbell.ptr == NULL)
1011		return -ENOMEM;
1012
1013	return 0;
1014}
1015
1016/**
1017 * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
1018 *
1019 * @adev: amdgpu_device pointer
1020 *
1021 * Tear down doorbell driver information (CIK)
1022 */
1023static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
1024{
1025	iounmap(adev->doorbell.ptr);
1026	adev->doorbell.ptr = NULL;
1027}
1028
1029
1030
1031/*
1032 * amdgpu_device_wb_*()
1033 * Writeback is the method by which the GPU updates special pages in memory
1034 * with the status of certain GPU events (fences, ring pointers,etc.).
1035 */
1036
1037/**
1038 * amdgpu_device_wb_fini - Disable Writeback and free memory
1039 *
1040 * @adev: amdgpu_device pointer
1041 *
1042 * Disables Writeback and frees the Writeback memory (all asics).
1043 * Used at driver shutdown.
1044 */
1045static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1046{
1047	if (adev->wb.wb_obj) {
1048		amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1049				      &adev->wb.gpu_addr,
1050				      (void **)&adev->wb.wb);
1051		adev->wb.wb_obj = NULL;
1052	}
1053}
1054
1055/**
1056 * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
1057 *
1058 * @adev: amdgpu_device pointer
1059 *
1060 * Initializes writeback and allocates writeback memory (all asics).
1061 * Used at driver startup.
1062 * Returns 0 on success or an -error on failure.
1063 */
1064static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1065{
1066	int r;
1067
1068	if (adev->wb.wb_obj == NULL) {
1069		/* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1070		r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1071					    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1072					    &adev->wb.wb_obj, &adev->wb.gpu_addr,
1073					    (void **)&adev->wb.wb);
1074		if (r) {
1075			dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1076			return r;
1077		}
1078
1079		adev->wb.num_wb = AMDGPU_MAX_WB;
1080		memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1081
1082		/* clear wb memory */
1083		memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1084	}
1085
1086	return 0;
1087}
1088
1089/**
1090 * amdgpu_device_wb_get - Allocate a wb entry
1091 *
1092 * @adev: amdgpu_device pointer
1093 * @wb: wb index
1094 *
1095 * Allocate a wb slot for use by the driver (all asics).
1096 * Returns 0 on success or -EINVAL on failure.
1097 */
1098int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1099{
1100	unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1101
1102	if (offset < adev->wb.num_wb) {
1103		__set_bit(offset, adev->wb.used);
1104		*wb = offset << 3; /* convert to dw offset */
1105		return 0;
1106	} else {
1107		return -EINVAL;
1108	}
1109}
1110
1111/**
1112 * amdgpu_device_wb_free - Free a wb entry
1113 *
1114 * @adev: amdgpu_device pointer
1115 * @wb: wb index
1116 *
1117 * Free a wb slot allocated for use by the driver (all asics)
1118 */
1119void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1120{
1121	wb >>= 3;
1122	if (wb < adev->wb.num_wb)
1123		__clear_bit(wb, adev->wb.used);
1124}
1125
1126/**
1127 * amdgpu_device_resize_fb_bar - try to resize FB BAR
1128 *
1129 * @adev: amdgpu_device pointer
1130 *
1131 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1132 * to fail, but if any of the BARs is not accessible after the size we abort
1133 * driver loading by returning -ENODEV.
1134 */
1135int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1136{
1137	int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
 
1138	struct pci_bus *root;
1139	struct resource *res;
1140	unsigned i;
1141	u16 cmd;
1142	int r;
1143
1144	/* Bypass for VF */
1145	if (amdgpu_sriov_vf(adev))
1146		return 0;
1147
1148	/* skip if the bios has already enabled large BAR */
1149	if (adev->gmc.real_vram_size &&
1150	    (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1151		return 0;
1152
1153	/* Check if the root BUS has 64bit memory resources */
1154	root = adev->pdev->bus;
1155	while (root->parent)
1156		root = root->parent;
1157
1158	pci_bus_for_each_resource(root, res, i) {
1159		if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1160		    res->start > 0x100000000ull)
1161			break;
1162	}
1163
1164	/* Trying to resize is pointless without a root hub window above 4GB */
1165	if (!res)
1166		return 0;
1167
1168	/* Limit the BAR size to what is available */
1169	rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1170			rbar_size);
1171
1172	/* Disable memory decoding while we change the BAR addresses and size */
1173	pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1174	pci_write_config_word(adev->pdev, PCI_COMMAND,
1175			      cmd & ~PCI_COMMAND_MEMORY);
1176
1177	/* Free the VRAM and doorbell BAR, we most likely need to move both. */
1178	amdgpu_device_doorbell_fini(adev);
1179	if (adev->asic_type >= CHIP_BONAIRE)
1180		pci_release_resource(adev->pdev, 2);
1181
1182	pci_release_resource(adev->pdev, 0);
1183
1184	r = pci_resize_resource(adev->pdev, 0, rbar_size);
1185	if (r == -ENOSPC)
1186		DRM_INFO("Not enough PCI address space for a large BAR.");
1187	else if (r && r != -ENOTSUPP)
1188		DRM_ERROR("Problem resizing BAR0 (%d).", r);
1189
1190	pci_assign_unassigned_bus_resources(adev->pdev->bus);
1191
1192	/* When the doorbell or fb BAR isn't available we have no chance of
1193	 * using the device.
1194	 */
1195	r = amdgpu_device_doorbell_init(adev);
1196	if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1197		return -ENODEV;
1198
1199	pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1200
1201	return 0;
1202}
1203
1204/*
1205 * GPU helpers function.
1206 */
1207/**
1208 * amdgpu_device_need_post - check if the hw need post or not
1209 *
1210 * @adev: amdgpu_device pointer
1211 *
1212 * Check if the asic has been initialized (all asics) at driver startup
1213 * or post is needed if  hw reset is performed.
1214 * Returns true if need or false if not.
1215 */
1216bool amdgpu_device_need_post(struct amdgpu_device *adev)
1217{
1218	uint32_t reg;
1219
1220	if (amdgpu_sriov_vf(adev))
1221		return false;
1222
1223	if (amdgpu_passthrough(adev)) {
1224		/* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1225		 * some old smc fw still need driver do vPost otherwise gpu hang, while
1226		 * those smc fw version above 22.15 doesn't have this flaw, so we force
1227		 * vpost executed for smc version below 22.15
1228		 */
1229		if (adev->asic_type == CHIP_FIJI) {
1230			int err;
1231			uint32_t fw_ver;
1232			err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1233			/* force vPost if error occured */
1234			if (err)
1235				return true;
1236
1237			fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1238			if (fw_ver < 0x00160e00)
1239				return true;
1240		}
1241	}
1242
1243	/* Don't post if we need to reset whole hive on init */
1244	if (adev->gmc.xgmi.pending_reset)
1245		return false;
1246
1247	if (adev->has_hw_reset) {
1248		adev->has_hw_reset = false;
1249		return true;
1250	}
1251
1252	/* bios scratch used on CIK+ */
1253	if (adev->asic_type >= CHIP_BONAIRE)
1254		return amdgpu_atombios_scratch_need_asic_init(adev);
1255
1256	/* check MEM_SIZE for older asics */
1257	reg = amdgpu_asic_get_config_memsize(adev);
1258
1259	if ((reg != 0) && (reg != 0xffffffff))
1260		return false;
1261
1262	return true;
1263}
1264
1265/* if we get transitioned to only one device, take VGA back */
1266/**
1267 * amdgpu_device_vga_set_decode - enable/disable vga decode
1268 *
1269 * @cookie: amdgpu_device pointer
1270 * @state: enable/disable vga decode
1271 *
1272 * Enable/disable vga decode (all asics).
1273 * Returns VGA resource flags.
1274 */
1275static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state)
1276{
1277	struct amdgpu_device *adev = cookie;
1278	amdgpu_asic_set_vga_state(adev, state);
1279	if (state)
1280		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1281		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1282	else
1283		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1284}
1285
1286/**
1287 * amdgpu_device_check_block_size - validate the vm block size
1288 *
1289 * @adev: amdgpu_device pointer
1290 *
1291 * Validates the vm block size specified via module parameter.
1292 * The vm block size defines number of bits in page table versus page directory,
1293 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1294 * page table and the remaining bits are in the page directory.
1295 */
1296static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1297{
1298	/* defines number of bits in page table versus page directory,
1299	 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1300	 * page table and the remaining bits are in the page directory */
1301	if (amdgpu_vm_block_size == -1)
1302		return;
1303
1304	if (amdgpu_vm_block_size < 9) {
1305		dev_warn(adev->dev, "VM page table size (%d) too small\n",
1306			 amdgpu_vm_block_size);
1307		amdgpu_vm_block_size = -1;
1308	}
1309}
1310
1311/**
1312 * amdgpu_device_check_vm_size - validate the vm size
1313 *
1314 * @adev: amdgpu_device pointer
1315 *
1316 * Validates the vm size in GB specified via module parameter.
1317 * The VM size is the size of the GPU virtual memory space in GB.
1318 */
1319static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1320{
1321	/* no need to check the default value */
1322	if (amdgpu_vm_size == -1)
1323		return;
1324
1325	if (amdgpu_vm_size < 1) {
1326		dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1327			 amdgpu_vm_size);
1328		amdgpu_vm_size = -1;
1329	}
1330}
1331
1332static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1333{
1334	struct sysinfo si;
1335	bool is_os_64 = (sizeof(void *) == 8);
1336	uint64_t total_memory;
1337	uint64_t dram_size_seven_GB = 0x1B8000000;
1338	uint64_t dram_size_three_GB = 0xB8000000;
1339
1340	if (amdgpu_smu_memory_pool_size == 0)
1341		return;
1342
1343	if (!is_os_64) {
1344		DRM_WARN("Not 64-bit OS, feature not supported\n");
1345		goto def_value;
1346	}
1347	si_meminfo(&si);
1348	total_memory = (uint64_t)si.totalram * si.mem_unit;
1349
1350	if ((amdgpu_smu_memory_pool_size == 1) ||
1351		(amdgpu_smu_memory_pool_size == 2)) {
1352		if (total_memory < dram_size_three_GB)
1353			goto def_value1;
1354	} else if ((amdgpu_smu_memory_pool_size == 4) ||
1355		(amdgpu_smu_memory_pool_size == 8)) {
1356		if (total_memory < dram_size_seven_GB)
1357			goto def_value1;
1358	} else {
1359		DRM_WARN("Smu memory pool size not supported\n");
1360		goto def_value;
1361	}
1362	adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1363
1364	return;
1365
1366def_value1:
1367	DRM_WARN("No enough system memory\n");
1368def_value:
1369	adev->pm.smu_prv_buffer_size = 0;
1370}
1371
1372static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
1373{
1374	if (!(adev->flags & AMD_IS_APU) ||
1375	    adev->asic_type < CHIP_RAVEN)
1376		return 0;
1377
1378	switch (adev->asic_type) {
1379	case CHIP_RAVEN:
1380		if (adev->pdev->device == 0x15dd)
1381			adev->apu_flags |= AMD_APU_IS_RAVEN;
1382		if (adev->pdev->device == 0x15d8)
1383			adev->apu_flags |= AMD_APU_IS_PICASSO;
1384		break;
1385	case CHIP_RENOIR:
1386		if ((adev->pdev->device == 0x1636) ||
1387		    (adev->pdev->device == 0x164c))
1388			adev->apu_flags |= AMD_APU_IS_RENOIR;
1389		else
1390			adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1391		break;
1392	case CHIP_VANGOGH:
1393		adev->apu_flags |= AMD_APU_IS_VANGOGH;
1394		break;
1395	case CHIP_YELLOW_CARP:
1396		break;
1397	default:
1398		return -EINVAL;
1399	}
1400
1401	return 0;
1402}
1403
1404/**
1405 * amdgpu_device_check_arguments - validate module params
1406 *
1407 * @adev: amdgpu_device pointer
1408 *
1409 * Validates certain module parameters and updates
1410 * the associated values used by the driver (all asics).
1411 */
1412static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1413{
1414	if (amdgpu_sched_jobs < 4) {
1415		dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1416			 amdgpu_sched_jobs);
1417		amdgpu_sched_jobs = 4;
1418	} else if (!is_power_of_2(amdgpu_sched_jobs)){
1419		dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1420			 amdgpu_sched_jobs);
1421		amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1422	}
1423
1424	if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1425		/* gart size must be greater or equal to 32M */
1426		dev_warn(adev->dev, "gart size (%d) too small\n",
1427			 amdgpu_gart_size);
1428		amdgpu_gart_size = -1;
1429	}
1430
1431	if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1432		/* gtt size must be greater or equal to 32M */
1433		dev_warn(adev->dev, "gtt size (%d) too small\n",
1434				 amdgpu_gtt_size);
1435		amdgpu_gtt_size = -1;
1436	}
1437
1438	/* valid range is between 4 and 9 inclusive */
1439	if (amdgpu_vm_fragment_size != -1 &&
1440	    (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1441		dev_warn(adev->dev, "valid range is between 4 and 9\n");
1442		amdgpu_vm_fragment_size = -1;
1443	}
1444
1445	if (amdgpu_sched_hw_submission < 2) {
1446		dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1447			 amdgpu_sched_hw_submission);
1448		amdgpu_sched_hw_submission = 2;
1449	} else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1450		dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1451			 amdgpu_sched_hw_submission);
1452		amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1453	}
1454
1455	amdgpu_device_check_smu_prv_buffer_size(adev);
1456
1457	amdgpu_device_check_vm_size(adev);
1458
1459	amdgpu_device_check_block_size(adev);
1460
1461	adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1462
1463	amdgpu_gmc_tmz_set(adev);
1464
1465	amdgpu_gmc_noretry_set(adev);
1466
1467	return 0;
1468}
1469
1470/**
1471 * amdgpu_switcheroo_set_state - set switcheroo state
1472 *
1473 * @pdev: pci dev pointer
1474 * @state: vga_switcheroo state
1475 *
1476 * Callback for the switcheroo driver.  Suspends or resumes the
1477 * the asics before or after it is powered up using ACPI methods.
1478 */
1479static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1480					enum vga_switcheroo_state state)
1481{
1482	struct drm_device *dev = pci_get_drvdata(pdev);
1483	int r;
1484
1485	if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
1486		return;
1487
1488	if (state == VGA_SWITCHEROO_ON) {
1489		pr_info("switched on\n");
1490		/* don't suspend or resume card normally */
1491		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1492
1493		pci_set_power_state(pdev, PCI_D0);
1494		amdgpu_device_load_pci_state(pdev);
1495		r = pci_enable_device(pdev);
1496		if (r)
1497			DRM_WARN("pci_enable_device failed (%d)\n", r);
1498		amdgpu_device_resume(dev, true);
1499
1500		dev->switch_power_state = DRM_SWITCH_POWER_ON;
 
1501	} else {
1502		pr_info("switched off\n");
 
1503		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1504		amdgpu_device_suspend(dev, true);
1505		amdgpu_device_cache_pci_state(pdev);
1506		/* Shut down the device */
1507		pci_disable_device(pdev);
1508		pci_set_power_state(pdev, PCI_D3cold);
1509		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1510	}
1511}
1512
1513/**
1514 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1515 *
1516 * @pdev: pci dev pointer
1517 *
1518 * Callback for the switcheroo driver.  Check of the switcheroo
1519 * state can be changed.
1520 * Returns true if the state can be changed, false if not.
1521 */
1522static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1523{
1524	struct drm_device *dev = pci_get_drvdata(pdev);
1525
1526	/*
1527	* FIXME: open_count is protected by drm_global_mutex but that would lead to
1528	* locking inversion with the driver load path. And the access here is
1529	* completely racy anyway. So don't bother with locking for now.
1530	*/
1531	return atomic_read(&dev->open_count) == 0;
1532}
1533
1534static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1535	.set_gpu_state = amdgpu_switcheroo_set_state,
1536	.reprobe = NULL,
1537	.can_switch = amdgpu_switcheroo_can_switch,
1538};
1539
1540/**
1541 * amdgpu_device_ip_set_clockgating_state - set the CG state
1542 *
1543 * @dev: amdgpu_device pointer
1544 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1545 * @state: clockgating state (gate or ungate)
1546 *
1547 * Sets the requested clockgating state for all instances of
1548 * the hardware IP specified.
1549 * Returns the error code from the last instance.
1550 */
1551int amdgpu_device_ip_set_clockgating_state(void *dev,
1552					   enum amd_ip_block_type block_type,
1553					   enum amd_clockgating_state state)
1554{
1555	struct amdgpu_device *adev = dev;
1556	int i, r = 0;
1557
1558	for (i = 0; i < adev->num_ip_blocks; i++) {
1559		if (!adev->ip_blocks[i].status.valid)
1560			continue;
1561		if (adev->ip_blocks[i].version->type != block_type)
1562			continue;
1563		if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1564			continue;
1565		r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1566			(void *)adev, state);
1567		if (r)
1568			DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1569				  adev->ip_blocks[i].version->funcs->name, r);
1570	}
1571	return r;
1572}
1573
1574/**
1575 * amdgpu_device_ip_set_powergating_state - set the PG state
1576 *
1577 * @dev: amdgpu_device pointer
1578 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1579 * @state: powergating state (gate or ungate)
1580 *
1581 * Sets the requested powergating state for all instances of
1582 * the hardware IP specified.
1583 * Returns the error code from the last instance.
1584 */
1585int amdgpu_device_ip_set_powergating_state(void *dev,
1586					   enum amd_ip_block_type block_type,
1587					   enum amd_powergating_state state)
1588{
1589	struct amdgpu_device *adev = dev;
1590	int i, r = 0;
1591
1592	for (i = 0; i < adev->num_ip_blocks; i++) {
1593		if (!adev->ip_blocks[i].status.valid)
1594			continue;
1595		if (adev->ip_blocks[i].version->type != block_type)
1596			continue;
1597		if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1598			continue;
1599		r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1600			(void *)adev, state);
1601		if (r)
1602			DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1603				  adev->ip_blocks[i].version->funcs->name, r);
1604	}
1605	return r;
1606}
1607
1608/**
1609 * amdgpu_device_ip_get_clockgating_state - get the CG state
1610 *
1611 * @adev: amdgpu_device pointer
1612 * @flags: clockgating feature flags
1613 *
1614 * Walks the list of IPs on the device and updates the clockgating
1615 * flags for each IP.
1616 * Updates @flags with the feature flags for each hardware IP where
1617 * clockgating is enabled.
1618 */
1619void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1620					    u32 *flags)
1621{
1622	int i;
1623
1624	for (i = 0; i < adev->num_ip_blocks; i++) {
1625		if (!adev->ip_blocks[i].status.valid)
1626			continue;
1627		if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1628			adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1629	}
1630}
1631
1632/**
1633 * amdgpu_device_ip_wait_for_idle - wait for idle
1634 *
1635 * @adev: amdgpu_device pointer
1636 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1637 *
1638 * Waits for the request hardware IP to be idle.
1639 * Returns 0 for success or a negative error code on failure.
1640 */
1641int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1642				   enum amd_ip_block_type block_type)
1643{
1644	int i, r;
1645
1646	for (i = 0; i < adev->num_ip_blocks; i++) {
1647		if (!adev->ip_blocks[i].status.valid)
1648			continue;
1649		if (adev->ip_blocks[i].version->type == block_type) {
1650			r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1651			if (r)
1652				return r;
1653			break;
1654		}
1655	}
1656	return 0;
1657
1658}
1659
1660/**
1661 * amdgpu_device_ip_is_idle - is the hardware IP idle
1662 *
1663 * @adev: amdgpu_device pointer
1664 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1665 *
1666 * Check if the hardware IP is idle or not.
1667 * Returns true if it the IP is idle, false if not.
1668 */
1669bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1670			      enum amd_ip_block_type block_type)
1671{
1672	int i;
1673
1674	for (i = 0; i < adev->num_ip_blocks; i++) {
1675		if (!adev->ip_blocks[i].status.valid)
1676			continue;
1677		if (adev->ip_blocks[i].version->type == block_type)
1678			return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1679	}
1680	return true;
1681
1682}
1683
1684/**
1685 * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1686 *
1687 * @adev: amdgpu_device pointer
1688 * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1689 *
1690 * Returns a pointer to the hardware IP block structure
1691 * if it exists for the asic, otherwise NULL.
1692 */
1693struct amdgpu_ip_block *
1694amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1695			      enum amd_ip_block_type type)
1696{
1697	int i;
1698
1699	for (i = 0; i < adev->num_ip_blocks; i++)
1700		if (adev->ip_blocks[i].version->type == type)
1701			return &adev->ip_blocks[i];
1702
1703	return NULL;
1704}
1705
1706/**
1707 * amdgpu_device_ip_block_version_cmp
1708 *
1709 * @adev: amdgpu_device pointer
1710 * @type: enum amd_ip_block_type
1711 * @major: major version
1712 * @minor: minor version
1713 *
1714 * return 0 if equal or greater
1715 * return 1 if smaller or the ip_block doesn't exist
1716 */
1717int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1718				       enum amd_ip_block_type type,
1719				       u32 major, u32 minor)
1720{
1721	struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1722
1723	if (ip_block && ((ip_block->version->major > major) ||
1724			((ip_block->version->major == major) &&
1725			(ip_block->version->minor >= minor))))
1726		return 0;
1727
1728	return 1;
1729}
1730
1731/**
1732 * amdgpu_device_ip_block_add
1733 *
1734 * @adev: amdgpu_device pointer
1735 * @ip_block_version: pointer to the IP to add
1736 *
1737 * Adds the IP block driver information to the collection of IPs
1738 * on the asic.
1739 */
1740int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1741			       const struct amdgpu_ip_block_version *ip_block_version)
1742{
1743	if (!ip_block_version)
1744		return -EINVAL;
1745
1746	switch (ip_block_version->type) {
1747	case AMD_IP_BLOCK_TYPE_VCN:
1748		if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
1749			return 0;
1750		break;
1751	case AMD_IP_BLOCK_TYPE_JPEG:
1752		if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
1753			return 0;
1754		break;
1755	default:
1756		break;
1757	}
1758
1759	DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1760		  ip_block_version->funcs->name);
1761
1762	adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1763
1764	return 0;
1765}
1766
1767/**
1768 * amdgpu_device_enable_virtual_display - enable virtual display feature
1769 *
1770 * @adev: amdgpu_device pointer
1771 *
1772 * Enabled the virtual display feature if the user has enabled it via
1773 * the module parameter virtual_display.  This feature provides a virtual
1774 * display hardware on headless boards or in virtualized environments.
1775 * This function parses and validates the configuration string specified by
1776 * the user and configues the virtual display configuration (number of
1777 * virtual connectors, crtcs, etc.) specified.
1778 */
1779static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1780{
1781	adev->enable_virtual_display = false;
1782
1783	if (amdgpu_virtual_display) {
1784		const char *pci_address_name = pci_name(adev->pdev);
 
1785		char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1786
1787		pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1788		pciaddstr_tmp = pciaddstr;
1789		while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1790			pciaddname = strsep(&pciaddname_tmp, ",");
1791			if (!strcmp("all", pciaddname)
1792			    || !strcmp(pci_address_name, pciaddname)) {
1793				long num_crtc;
1794				int res = -1;
1795
1796				adev->enable_virtual_display = true;
1797
1798				if (pciaddname_tmp)
1799					res = kstrtol(pciaddname_tmp, 10,
1800						      &num_crtc);
1801
1802				if (!res) {
1803					if (num_crtc < 1)
1804						num_crtc = 1;
1805					if (num_crtc > 6)
1806						num_crtc = 6;
1807					adev->mode_info.num_crtc = num_crtc;
1808				} else {
1809					adev->mode_info.num_crtc = 1;
1810				}
1811				break;
1812			}
1813		}
1814
1815		DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1816			 amdgpu_virtual_display, pci_address_name,
1817			 adev->enable_virtual_display, adev->mode_info.num_crtc);
1818
1819		kfree(pciaddstr);
1820	}
1821}
1822
1823/**
1824 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1825 *
1826 * @adev: amdgpu_device pointer
1827 *
1828 * Parses the asic configuration parameters specified in the gpu info
1829 * firmware and makes them availale to the driver for use in configuring
1830 * the asic.
1831 * Returns 0 on success, -EINVAL on failure.
1832 */
1833static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1834{
1835	const char *chip_name;
1836	char fw_name[40];
1837	int err;
1838	const struct gpu_info_firmware_header_v1_0 *hdr;
1839
1840	adev->firmware.gpu_info_fw = NULL;
1841
1842	if (adev->mman.discovery_bin) {
1843		amdgpu_discovery_get_gfx_info(adev);
1844
1845		/*
1846		 * FIXME: The bounding box is still needed by Navi12, so
1847		 * temporarily read it from gpu_info firmware. Should be droped
1848		 * when DAL no longer needs it.
1849		 */
1850		if (adev->asic_type != CHIP_NAVI12)
1851			return 0;
1852	}
1853
1854	switch (adev->asic_type) {
1855#ifdef CONFIG_DRM_AMDGPU_SI
1856	case CHIP_VERDE:
1857	case CHIP_TAHITI:
1858	case CHIP_PITCAIRN:
1859	case CHIP_OLAND:
1860	case CHIP_HAINAN:
1861#endif
1862#ifdef CONFIG_DRM_AMDGPU_CIK
1863	case CHIP_BONAIRE:
1864	case CHIP_HAWAII:
1865	case CHIP_KAVERI:
1866	case CHIP_KABINI:
1867	case CHIP_MULLINS:
1868#endif
1869	case CHIP_TOPAZ:
1870	case CHIP_TONGA:
1871	case CHIP_FIJI:
1872	case CHIP_POLARIS10:
1873	case CHIP_POLARIS11:
1874	case CHIP_POLARIS12:
1875	case CHIP_VEGAM:
1876	case CHIP_CARRIZO:
1877	case CHIP_STONEY:
1878	case CHIP_VEGA20:
1879	case CHIP_ALDEBARAN:
1880	case CHIP_SIENNA_CICHLID:
1881	case CHIP_NAVY_FLOUNDER:
1882	case CHIP_DIMGREY_CAVEFISH:
1883	case CHIP_BEIGE_GOBY:
1884	default:
1885		return 0;
1886	case CHIP_VEGA10:
1887		chip_name = "vega10";
1888		break;
1889	case CHIP_VEGA12:
1890		chip_name = "vega12";
1891		break;
1892	case CHIP_RAVEN:
1893		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1894			chip_name = "raven2";
1895		else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1896			chip_name = "picasso";
1897		else
1898			chip_name = "raven";
1899		break;
1900	case CHIP_ARCTURUS:
1901		chip_name = "arcturus";
1902		break;
1903	case CHIP_RENOIR:
1904		if (adev->apu_flags & AMD_APU_IS_RENOIR)
1905			chip_name = "renoir";
1906		else
1907			chip_name = "green_sardine";
1908		break;
1909	case CHIP_NAVI10:
1910		chip_name = "navi10";
1911		break;
1912	case CHIP_NAVI14:
1913		chip_name = "navi14";
1914		break;
1915	case CHIP_NAVI12:
1916		chip_name = "navi12";
1917		break;
1918	case CHIP_VANGOGH:
1919		chip_name = "vangogh";
1920		break;
1921	case CHIP_YELLOW_CARP:
1922		chip_name = "yellow_carp";
1923		break;
1924	}
1925
1926	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1927	err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1928	if (err) {
1929		dev_err(adev->dev,
1930			"Failed to load gpu_info firmware \"%s\"\n",
1931			fw_name);
1932		goto out;
1933	}
1934	err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1935	if (err) {
1936		dev_err(adev->dev,
1937			"Failed to validate gpu_info firmware \"%s\"\n",
1938			fw_name);
1939		goto out;
1940	}
1941
1942	hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1943	amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1944
1945	switch (hdr->version_major) {
1946	case 1:
1947	{
1948		const struct gpu_info_firmware_v1_0 *gpu_info_fw =
1949			(const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
1950								le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1951
1952		/*
1953		 * Should be droped when DAL no longer needs it.
1954		 */
1955		if (adev->asic_type == CHIP_NAVI12)
1956			goto parse_soc_bounding_box;
1957
1958		adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1959		adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1960		adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1961		adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
1962		adev->gfx.config.max_texture_channel_caches =
1963			le32_to_cpu(gpu_info_fw->gc_num_tccs);
1964		adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1965		adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1966		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1967		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
1968		adev->gfx.config.double_offchip_lds_buf =
1969			le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1970		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
1971		adev->gfx.cu_info.max_waves_per_simd =
1972			le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1973		adev->gfx.cu_info.max_scratch_slots_per_cu =
1974			le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1975		adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
1976		if (hdr->version_minor >= 1) {
1977			const struct gpu_info_firmware_v1_1 *gpu_info_fw =
1978				(const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
1979									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1980			adev->gfx.config.num_sc_per_sh =
1981				le32_to_cpu(gpu_info_fw->num_sc_per_sh);
1982			adev->gfx.config.num_packer_per_sc =
1983				le32_to_cpu(gpu_info_fw->num_packer_per_sc);
1984		}
1985
1986parse_soc_bounding_box:
1987		/*
1988		 * soc bounding box info is not integrated in disocovery table,
1989		 * we always need to parse it from gpu info firmware if needed.
1990		 */
1991		if (hdr->version_minor == 2) {
1992			const struct gpu_info_firmware_v1_2 *gpu_info_fw =
1993				(const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
1994									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1995			adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
1996		}
1997		break;
1998	}
1999	default:
2000		dev_err(adev->dev,
2001			"Unsupported gpu_info table %d\n", hdr->header.ucode_version);
2002		err = -EINVAL;
2003		goto out;
2004	}
2005out:
2006	return err;
2007}
2008
2009/**
2010 * amdgpu_device_ip_early_init - run early init for hardware IPs
2011 *
2012 * @adev: amdgpu_device pointer
2013 *
2014 * Early initialization pass for hardware IPs.  The hardware IPs that make
2015 * up each asic are discovered each IP's early_init callback is run.  This
2016 * is the first stage in initializing the asic.
2017 * Returns 0 on success, negative error code on failure.
2018 */
2019static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
2020{
2021	int i, r;
2022
2023	amdgpu_device_enable_virtual_display(adev);
2024
2025	if (amdgpu_sriov_vf(adev)) {
2026		r = amdgpu_virt_request_full_gpu(adev, true);
2027		if (r)
2028			return r;
2029	}
2030
2031	switch (adev->asic_type) {
2032#ifdef CONFIG_DRM_AMDGPU_SI
2033	case CHIP_VERDE:
2034	case CHIP_TAHITI:
2035	case CHIP_PITCAIRN:
2036	case CHIP_OLAND:
2037	case CHIP_HAINAN:
2038		adev->family = AMDGPU_FAMILY_SI;
2039		r = si_set_ip_blocks(adev);
2040		if (r)
2041			return r;
2042		break;
2043#endif
2044#ifdef CONFIG_DRM_AMDGPU_CIK
2045	case CHIP_BONAIRE:
2046	case CHIP_HAWAII:
2047	case CHIP_KAVERI:
2048	case CHIP_KABINI:
2049	case CHIP_MULLINS:
2050		if (adev->flags & AMD_IS_APU)
2051			adev->family = AMDGPU_FAMILY_KV;
2052		else
2053			adev->family = AMDGPU_FAMILY_CI;
2054
2055		r = cik_set_ip_blocks(adev);
2056		if (r)
2057			return r;
2058		break;
2059#endif
2060	case CHIP_TOPAZ:
2061	case CHIP_TONGA:
2062	case CHIP_FIJI:
2063	case CHIP_POLARIS10:
2064	case CHIP_POLARIS11:
2065	case CHIP_POLARIS12:
2066	case CHIP_VEGAM:
2067	case CHIP_CARRIZO:
2068	case CHIP_STONEY:
2069		if (adev->flags & AMD_IS_APU)
2070			adev->family = AMDGPU_FAMILY_CZ;
2071		else
2072			adev->family = AMDGPU_FAMILY_VI;
2073
2074		r = vi_set_ip_blocks(adev);
2075		if (r)
2076			return r;
2077		break;
2078	case CHIP_VEGA10:
2079	case CHIP_VEGA12:
2080	case CHIP_VEGA20:
2081	case CHIP_RAVEN:
2082	case CHIP_ARCTURUS:
2083	case CHIP_RENOIR:
2084	case CHIP_ALDEBARAN:
2085		if (adev->flags & AMD_IS_APU)
2086			adev->family = AMDGPU_FAMILY_RV;
2087		else
2088			adev->family = AMDGPU_FAMILY_AI;
2089
2090		r = soc15_set_ip_blocks(adev);
2091		if (r)
2092			return r;
2093		break;
2094	case  CHIP_NAVI10:
2095	case  CHIP_NAVI14:
2096	case  CHIP_NAVI12:
2097	case  CHIP_SIENNA_CICHLID:
2098	case  CHIP_NAVY_FLOUNDER:
2099	case  CHIP_DIMGREY_CAVEFISH:
2100	case  CHIP_BEIGE_GOBY:
2101	case CHIP_VANGOGH:
2102	case CHIP_YELLOW_CARP:
2103		if (adev->asic_type == CHIP_VANGOGH)
2104			adev->family = AMDGPU_FAMILY_VGH;
2105		else if (adev->asic_type == CHIP_YELLOW_CARP)
2106			adev->family = AMDGPU_FAMILY_YC;
2107		else
2108			adev->family = AMDGPU_FAMILY_NV;
2109
2110		r = nv_set_ip_blocks(adev);
2111		if (r)
2112			return r;
2113		break;
2114	default:
2115		/* FIXME: not supported yet */
2116		return -EINVAL;
2117	}
2118
2119	amdgpu_amdkfd_device_probe(adev);
2120
2121	adev->pm.pp_feature = amdgpu_pp_feature_mask;
2122	if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2123		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2124	if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2125		adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2126
2127	for (i = 0; i < adev->num_ip_blocks; i++) {
2128		if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2129			DRM_ERROR("disabled ip block: %d <%s>\n",
2130				  i, adev->ip_blocks[i].version->funcs->name);
2131			adev->ip_blocks[i].status.valid = false;
2132		} else {
2133			if (adev->ip_blocks[i].version->funcs->early_init) {
2134				r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2135				if (r == -ENOENT) {
2136					adev->ip_blocks[i].status.valid = false;
2137				} else if (r) {
2138					DRM_ERROR("early_init of IP block <%s> failed %d\n",
2139						  adev->ip_blocks[i].version->funcs->name, r);
2140					return r;
2141				} else {
2142					adev->ip_blocks[i].status.valid = true;
2143				}
2144			} else {
2145				adev->ip_blocks[i].status.valid = true;
2146			}
2147		}
2148		/* get the vbios after the asic_funcs are set up */
2149		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2150			r = amdgpu_device_parse_gpu_info_fw(adev);
2151			if (r)
2152				return r;
2153
2154			/* Read BIOS */
2155			if (!amdgpu_get_bios(adev))
2156				return -EINVAL;
2157
2158			r = amdgpu_atombios_init(adev);
2159			if (r) {
2160				dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2161				amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2162				return r;
2163			}
2164
2165			/*get pf2vf msg info at it's earliest time*/
2166			if (amdgpu_sriov_vf(adev))
2167				amdgpu_virt_init_data_exchange(adev);
2168
2169		}
2170	}
2171
2172	adev->cg_flags &= amdgpu_cg_mask;
2173	adev->pg_flags &= amdgpu_pg_mask;
2174
2175	return 0;
2176}
2177
2178static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2179{
2180	int i, r;
2181
2182	for (i = 0; i < adev->num_ip_blocks; i++) {
2183		if (!adev->ip_blocks[i].status.sw)
2184			continue;
2185		if (adev->ip_blocks[i].status.hw)
2186			continue;
2187		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2188		    (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2189		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2190			r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2191			if (r) {
2192				DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2193					  adev->ip_blocks[i].version->funcs->name, r);
2194				return r;
2195			}
2196			adev->ip_blocks[i].status.hw = true;
2197		}
2198	}
2199
2200	return 0;
2201}
2202
2203static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2204{
2205	int i, r;
2206
2207	for (i = 0; i < adev->num_ip_blocks; i++) {
2208		if (!adev->ip_blocks[i].status.sw)
2209			continue;
2210		if (adev->ip_blocks[i].status.hw)
2211			continue;
2212		r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2213		if (r) {
2214			DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2215				  adev->ip_blocks[i].version->funcs->name, r);
2216			return r;
2217		}
2218		adev->ip_blocks[i].status.hw = true;
2219	}
2220
2221	return 0;
2222}
2223
2224static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2225{
2226	int r = 0;
2227	int i;
2228	uint32_t smu_version;
2229
2230	if (adev->asic_type >= CHIP_VEGA10) {
2231		for (i = 0; i < adev->num_ip_blocks; i++) {
2232			if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2233				continue;
2234
2235			if (!adev->ip_blocks[i].status.sw)
2236				continue;
2237
2238			/* no need to do the fw loading again if already done*/
2239			if (adev->ip_blocks[i].status.hw == true)
2240				break;
2241
2242			if (amdgpu_in_reset(adev) || adev->in_suspend) {
2243				r = adev->ip_blocks[i].version->funcs->resume(adev);
2244				if (r) {
2245					DRM_ERROR("resume of IP block <%s> failed %d\n",
2246							  adev->ip_blocks[i].version->funcs->name, r);
2247					return r;
2248				}
2249			} else {
2250				r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2251				if (r) {
2252					DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2253							  adev->ip_blocks[i].version->funcs->name, r);
2254					return r;
2255				}
2256			}
2257
2258			adev->ip_blocks[i].status.hw = true;
2259			break;
2260		}
2261	}
2262
2263	if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2264		r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2265
2266	return r;
2267}
2268
2269/**
2270 * amdgpu_device_ip_init - run init for hardware IPs
2271 *
2272 * @adev: amdgpu_device pointer
2273 *
2274 * Main initialization pass for hardware IPs.  The list of all the hardware
2275 * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2276 * are run.  sw_init initializes the software state associated with each IP
2277 * and hw_init initializes the hardware associated with each IP.
2278 * Returns 0 on success, negative error code on failure.
2279 */
2280static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2281{
2282	int i, r;
2283
2284	r = amdgpu_ras_init(adev);
2285	if (r)
2286		return r;
2287
2288	for (i = 0; i < adev->num_ip_blocks; i++) {
2289		if (!adev->ip_blocks[i].status.valid)
2290			continue;
2291		r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2292		if (r) {
2293			DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2294				  adev->ip_blocks[i].version->funcs->name, r);
2295			goto init_failed;
2296		}
2297		adev->ip_blocks[i].status.sw = true;
2298
2299		/* need to do gmc hw init early so we can allocate gpu mem */
2300		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2301			r = amdgpu_device_vram_scratch_init(adev);
2302			if (r) {
2303				DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
2304				goto init_failed;
2305			}
2306			r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2307			if (r) {
2308				DRM_ERROR("hw_init %d failed %d\n", i, r);
2309				goto init_failed;
2310			}
2311			r = amdgpu_device_wb_init(adev);
2312			if (r) {
2313				DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2314				goto init_failed;
2315			}
2316			adev->ip_blocks[i].status.hw = true;
2317
2318			/* right after GMC hw init, we create CSA */
2319			if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
2320				r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2321								AMDGPU_GEM_DOMAIN_VRAM,
2322								AMDGPU_CSA_SIZE);
2323				if (r) {
2324					DRM_ERROR("allocate CSA failed %d\n", r);
2325					goto init_failed;
2326				}
2327			}
2328		}
2329	}
2330
2331	if (amdgpu_sriov_vf(adev))
2332		amdgpu_virt_init_data_exchange(adev);
2333
2334	r = amdgpu_ib_pool_init(adev);
2335	if (r) {
2336		dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2337		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2338		goto init_failed;
2339	}
2340
2341	r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2342	if (r)
2343		goto init_failed;
2344
2345	r = amdgpu_device_ip_hw_init_phase1(adev);
2346	if (r)
2347		goto init_failed;
2348
2349	r = amdgpu_device_fw_loading(adev);
2350	if (r)
2351		goto init_failed;
2352
2353	r = amdgpu_device_ip_hw_init_phase2(adev);
2354	if (r)
2355		goto init_failed;
2356
2357	/*
2358	 * retired pages will be loaded from eeprom and reserved here,
2359	 * it should be called after amdgpu_device_ip_hw_init_phase2  since
2360	 * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2361	 * for I2C communication which only true at this point.
2362	 *
2363	 * amdgpu_ras_recovery_init may fail, but the upper only cares the
2364	 * failure from bad gpu situation and stop amdgpu init process
2365	 * accordingly. For other failed cases, it will still release all
2366	 * the resource and print error message, rather than returning one
2367	 * negative value to upper level.
2368	 *
2369	 * Note: theoretically, this should be called before all vram allocations
2370	 * to protect retired page from abusing
2371	 */
2372	r = amdgpu_ras_recovery_init(adev);
2373	if (r)
2374		goto init_failed;
2375
2376	if (adev->gmc.xgmi.num_physical_nodes > 1)
2377		amdgpu_xgmi_add_device(adev);
2378
2379	/* Don't init kfd if whole hive need to be reset during init */
2380	if (!adev->gmc.xgmi.pending_reset)
2381		amdgpu_amdkfd_device_init(adev);
2382
2383	r = amdgpu_amdkfd_resume_iommu(adev);
2384	if (r)
2385		goto init_failed;
2386
2387	amdgpu_fru_get_product_info(adev);
2388
2389init_failed:
2390	if (amdgpu_sriov_vf(adev))
2391		amdgpu_virt_release_full_gpu(adev, true);
2392
2393	return r;
2394}
2395
2396/**
2397 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2398 *
2399 * @adev: amdgpu_device pointer
2400 *
2401 * Writes a reset magic value to the gart pointer in VRAM.  The driver calls
2402 * this function before a GPU reset.  If the value is retained after a
2403 * GPU reset, VRAM has not been lost.  Some GPU resets may destry VRAM contents.
2404 */
2405static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2406{
2407	memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2408}
2409
2410/**
2411 * amdgpu_device_check_vram_lost - check if vram is valid
2412 *
2413 * @adev: amdgpu_device pointer
2414 *
2415 * Checks the reset magic value written to the gart pointer in VRAM.
2416 * The driver calls this after a GPU reset to see if the contents of
2417 * VRAM is lost or now.
2418 * returns true if vram is lost, false if not.
2419 */
2420static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2421{
2422	if (memcmp(adev->gart.ptr, adev->reset_magic,
2423			AMDGPU_RESET_MAGIC_NUM))
2424		return true;
2425
2426	if (!amdgpu_in_reset(adev))
2427		return false;
2428
2429	/*
2430	 * For all ASICs with baco/mode1 reset, the VRAM is
2431	 * always assumed to be lost.
2432	 */
2433	switch (amdgpu_asic_reset_method(adev)) {
2434	case AMD_RESET_METHOD_BACO:
2435	case AMD_RESET_METHOD_MODE1:
2436		return true;
2437	default:
2438		return false;
2439	}
2440}
2441
2442/**
2443 * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2444 *
2445 * @adev: amdgpu_device pointer
2446 * @state: clockgating state (gate or ungate)
2447 *
2448 * The list of all the hardware IPs that make up the asic is walked and the
2449 * set_clockgating_state callbacks are run.
2450 * Late initialization pass enabling clockgating for hardware IPs.
2451 * Fini or suspend, pass disabling clockgating for hardware IPs.
2452 * Returns 0 on success, negative error code on failure.
2453 */
2454
2455int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2456			       enum amd_clockgating_state state)
2457{
2458	int i, j, r;
2459
2460	if (amdgpu_emu_mode == 1)
2461		return 0;
2462
2463	for (j = 0; j < adev->num_ip_blocks; j++) {
2464		i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2465		if (!adev->ip_blocks[i].status.late_initialized)
2466			continue;
2467		/* skip CG for GFX on S0ix */
2468		if (adev->in_s0ix &&
2469		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2470			continue;
2471		/* skip CG for VCE/UVD, it's handled specially */
2472		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2473		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2474		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2475		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2476		    adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2477			/* enable clockgating to save power */
2478			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2479										     state);
2480			if (r) {
2481				DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2482					  adev->ip_blocks[i].version->funcs->name, r);
2483				return r;
2484			}
2485		}
2486	}
2487
2488	return 0;
2489}
2490
2491int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2492			       enum amd_powergating_state state)
2493{
2494	int i, j, r;
2495
2496	if (amdgpu_emu_mode == 1)
2497		return 0;
2498
2499	for (j = 0; j < adev->num_ip_blocks; j++) {
2500		i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2501		if (!adev->ip_blocks[i].status.late_initialized)
2502			continue;
2503		/* skip PG for GFX on S0ix */
2504		if (adev->in_s0ix &&
2505		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2506			continue;
2507		/* skip CG for VCE/UVD, it's handled specially */
2508		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2509		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2510		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2511		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2512		    adev->ip_blocks[i].version->funcs->set_powergating_state) {
2513			/* enable powergating to save power */
2514			r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2515											state);
2516			if (r) {
2517				DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2518					  adev->ip_blocks[i].version->funcs->name, r);
2519				return r;
2520			}
2521		}
2522	}
2523	return 0;
2524}
2525
2526static int amdgpu_device_enable_mgpu_fan_boost(void)
2527{
2528	struct amdgpu_gpu_instance *gpu_ins;
2529	struct amdgpu_device *adev;
2530	int i, ret = 0;
2531
2532	mutex_lock(&mgpu_info.mutex);
2533
2534	/*
2535	 * MGPU fan boost feature should be enabled
2536	 * only when there are two or more dGPUs in
2537	 * the system
2538	 */
2539	if (mgpu_info.num_dgpu < 2)
2540		goto out;
2541
2542	for (i = 0; i < mgpu_info.num_dgpu; i++) {
2543		gpu_ins = &(mgpu_info.gpu_ins[i]);
2544		adev = gpu_ins->adev;
2545		if (!(adev->flags & AMD_IS_APU) &&
2546		    !gpu_ins->mgpu_fan_enabled) {
 
 
2547			ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2548			if (ret)
2549				break;
2550
2551			gpu_ins->mgpu_fan_enabled = 1;
2552		}
2553	}
2554
2555out:
2556	mutex_unlock(&mgpu_info.mutex);
2557
2558	return ret;
2559}
2560
2561/**
2562 * amdgpu_device_ip_late_init - run late init for hardware IPs
2563 *
2564 * @adev: amdgpu_device pointer
2565 *
2566 * Late initialization pass for hardware IPs.  The list of all the hardware
2567 * IPs that make up the asic is walked and the late_init callbacks are run.
2568 * late_init covers any special initialization that an IP requires
2569 * after all of the have been initialized or something that needs to happen
2570 * late in the init process.
2571 * Returns 0 on success, negative error code on failure.
2572 */
2573static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2574{
2575	struct amdgpu_gpu_instance *gpu_instance;
2576	int i = 0, r;
2577
2578	for (i = 0; i < adev->num_ip_blocks; i++) {
2579		if (!adev->ip_blocks[i].status.hw)
2580			continue;
2581		if (adev->ip_blocks[i].version->funcs->late_init) {
2582			r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2583			if (r) {
2584				DRM_ERROR("late_init of IP block <%s> failed %d\n",
2585					  adev->ip_blocks[i].version->funcs->name, r);
2586				return r;
2587			}
2588		}
2589		adev->ip_blocks[i].status.late_initialized = true;
2590	}
2591
2592	amdgpu_ras_set_error_query_ready(adev, true);
2593
2594	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2595	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2596
2597	amdgpu_device_fill_reset_magic(adev);
2598
2599	r = amdgpu_device_enable_mgpu_fan_boost();
2600	if (r)
2601		DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2602
2603	/* For XGMI + passthrough configuration on arcturus, enable light SBR */
2604	if (adev->asic_type == CHIP_ARCTURUS &&
2605	    amdgpu_passthrough(adev) &&
2606	    adev->gmc.xgmi.num_physical_nodes > 1)
2607		smu_set_light_sbr(&adev->smu, true);
2608
2609	if (adev->gmc.xgmi.num_physical_nodes > 1) {
2610		mutex_lock(&mgpu_info.mutex);
2611
2612		/*
2613		 * Reset device p-state to low as this was booted with high.
2614		 *
2615		 * This should be performed only after all devices from the same
2616		 * hive get initialized.
2617		 *
2618		 * However, it's unknown how many device in the hive in advance.
2619		 * As this is counted one by one during devices initializations.
2620		 *
2621		 * So, we wait for all XGMI interlinked devices initialized.
2622		 * This may bring some delays as those devices may come from
2623		 * different hives. But that should be OK.
2624		 */
2625		if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2626			for (i = 0; i < mgpu_info.num_gpu; i++) {
2627				gpu_instance = &(mgpu_info.gpu_ins[i]);
2628				if (gpu_instance->adev->flags & AMD_IS_APU)
2629					continue;
2630
2631				r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2632						AMDGPU_XGMI_PSTATE_MIN);
2633				if (r) {
2634					DRM_ERROR("pstate setting failed (%d).\n", r);
2635					break;
2636				}
2637			}
2638		}
2639
2640		mutex_unlock(&mgpu_info.mutex);
2641	}
2642
2643	return 0;
2644}
2645
2646static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
 
 
 
 
 
 
 
 
 
 
 
2647{
2648	int i, r;
2649
2650	for (i = 0; i < adev->num_ip_blocks; i++) {
2651		if (!adev->ip_blocks[i].version->funcs->early_fini)
2652			continue;
 
2653
2654		r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
2655		if (r) {
2656			DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
2657				  adev->ip_blocks[i].version->funcs->name, r);
2658		}
2659	}
2660
2661	amdgpu_amdkfd_suspend(adev, false);
2662
2663	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2664	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2665
2666	/* need to disable SMC first */
2667	for (i = 0; i < adev->num_ip_blocks; i++) {
2668		if (!adev->ip_blocks[i].status.hw)
2669			continue;
2670		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2671			r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2672			/* XXX handle errors */
2673			if (r) {
2674				DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2675					  adev->ip_blocks[i].version->funcs->name, r);
2676			}
2677			adev->ip_blocks[i].status.hw = false;
2678			break;
2679		}
2680	}
2681
2682	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2683		if (!adev->ip_blocks[i].status.hw)
2684			continue;
2685
2686		r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2687		/* XXX handle errors */
2688		if (r) {
2689			DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2690				  adev->ip_blocks[i].version->funcs->name, r);
2691		}
2692
2693		adev->ip_blocks[i].status.hw = false;
2694	}
2695
2696	return 0;
2697}
2698
2699/**
2700 * amdgpu_device_ip_fini - run fini for hardware IPs
2701 *
2702 * @adev: amdgpu_device pointer
2703 *
2704 * Main teardown pass for hardware IPs.  The list of all the hardware
2705 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2706 * are run.  hw_fini tears down the hardware associated with each IP
2707 * and sw_fini tears down any software state associated with each IP.
2708 * Returns 0 on success, negative error code on failure.
2709 */
2710static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2711{
2712	int i, r;
2713
2714	if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2715		amdgpu_virt_release_ras_err_handler_data(adev);
2716
2717	amdgpu_ras_pre_fini(adev);
2718
2719	if (adev->gmc.xgmi.num_physical_nodes > 1)
2720		amdgpu_xgmi_remove_device(adev);
2721
2722	amdgpu_amdkfd_device_fini_sw(adev);
2723
2724	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2725		if (!adev->ip_blocks[i].status.sw)
2726			continue;
2727
2728		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2729			amdgpu_ucode_free_bo(adev);
2730			amdgpu_free_static_csa(&adev->virt.csa_obj);
2731			amdgpu_device_wb_fini(adev);
2732			amdgpu_device_vram_scratch_fini(adev);
2733			amdgpu_ib_pool_fini(adev);
2734		}
2735
2736		r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2737		/* XXX handle errors */
2738		if (r) {
2739			DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2740				  adev->ip_blocks[i].version->funcs->name, r);
2741		}
2742		adev->ip_blocks[i].status.sw = false;
2743		adev->ip_blocks[i].status.valid = false;
2744	}
2745
2746	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2747		if (!adev->ip_blocks[i].status.late_initialized)
2748			continue;
2749		if (adev->ip_blocks[i].version->funcs->late_fini)
2750			adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2751		adev->ip_blocks[i].status.late_initialized = false;
2752	}
2753
2754	amdgpu_ras_fini(adev);
2755
2756	if (amdgpu_sriov_vf(adev))
2757		if (amdgpu_virt_release_full_gpu(adev, false))
2758			DRM_ERROR("failed to release exclusive mode on fini\n");
2759
2760	return 0;
2761}
2762
2763/**
2764 * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2765 *
2766 * @work: work_struct.
2767 */
2768static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2769{
2770	struct amdgpu_device *adev =
2771		container_of(work, struct amdgpu_device, delayed_init_work.work);
2772	int r;
2773
2774	r = amdgpu_ib_ring_tests(adev);
2775	if (r)
2776		DRM_ERROR("ib ring test failed (%d).\n", r);
2777}
2778
2779static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2780{
2781	struct amdgpu_device *adev =
2782		container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2783
2784	WARN_ON_ONCE(adev->gfx.gfx_off_state);
2785	WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
2786
2787	if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2788		adev->gfx.gfx_off_state = true;
 
2789}
2790
2791/**
2792 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2793 *
2794 * @adev: amdgpu_device pointer
2795 *
2796 * Main suspend function for hardware IPs.  The list of all the hardware
2797 * IPs that make up the asic is walked, clockgating is disabled and the
2798 * suspend callbacks are run.  suspend puts the hardware and software state
2799 * in each IP into a state suitable for suspend.
2800 * Returns 0 on success, negative error code on failure.
2801 */
2802static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2803{
2804	int i, r;
2805
2806	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2807	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2808
2809	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2810		if (!adev->ip_blocks[i].status.valid)
2811			continue;
2812
2813		/* displays are handled separately */
2814		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2815			continue;
2816
2817		/* XXX handle errors */
2818		r = adev->ip_blocks[i].version->funcs->suspend(adev);
2819		/* XXX handle errors */
2820		if (r) {
2821			DRM_ERROR("suspend of IP block <%s> failed %d\n",
2822				  adev->ip_blocks[i].version->funcs->name, r);
2823			return r;
2824		}
2825
2826		adev->ip_blocks[i].status.hw = false;
2827	}
2828
2829	return 0;
2830}
2831
2832/**
2833 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2834 *
2835 * @adev: amdgpu_device pointer
2836 *
2837 * Main suspend function for hardware IPs.  The list of all the hardware
2838 * IPs that make up the asic is walked, clockgating is disabled and the
2839 * suspend callbacks are run.  suspend puts the hardware and software state
2840 * in each IP into a state suitable for suspend.
2841 * Returns 0 on success, negative error code on failure.
2842 */
2843static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
2844{
2845	int i, r;
2846
2847	if (adev->in_s0ix)
2848		amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry);
2849
2850	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2851		if (!adev->ip_blocks[i].status.valid)
2852			continue;
2853		/* displays are handled in phase1 */
2854		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
2855			continue;
2856		/* PSP lost connection when err_event_athub occurs */
2857		if (amdgpu_ras_intr_triggered() &&
2858		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
2859			adev->ip_blocks[i].status.hw = false;
2860			continue;
2861		}
2862
2863		/* skip unnecessary suspend if we do not initialize them yet */
2864		if (adev->gmc.xgmi.pending_reset &&
2865		    !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2866		      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
2867		      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2868		      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
2869			adev->ip_blocks[i].status.hw = false;
2870			continue;
2871		}
2872
2873		/* skip suspend of gfx and psp for S0ix
2874		 * gfx is in gfxoff state, so on resume it will exit gfxoff just
2875		 * like at runtime. PSP is also part of the always on hardware
2876		 * so no need to suspend it.
2877		 */
2878		if (adev->in_s0ix &&
2879		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
2880		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX))
2881			continue;
2882
2883		/* XXX handle errors */
2884		r = adev->ip_blocks[i].version->funcs->suspend(adev);
2885		/* XXX handle errors */
2886		if (r) {
2887			DRM_ERROR("suspend of IP block <%s> failed %d\n",
2888				  adev->ip_blocks[i].version->funcs->name, r);
2889		}
2890		adev->ip_blocks[i].status.hw = false;
2891		/* handle putting the SMC in the appropriate state */
2892		if(!amdgpu_sriov_vf(adev)){
2893			if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2894				r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
2895				if (r) {
2896					DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
2897							adev->mp1_state, r);
2898					return r;
2899				}
2900			}
2901		}
 
2902	}
2903
2904	return 0;
2905}
2906
2907/**
2908 * amdgpu_device_ip_suspend - run suspend for hardware IPs
2909 *
2910 * @adev: amdgpu_device pointer
2911 *
2912 * Main suspend function for hardware IPs.  The list of all the hardware
2913 * IPs that make up the asic is walked, clockgating is disabled and the
2914 * suspend callbacks are run.  suspend puts the hardware and software state
2915 * in each IP into a state suitable for suspend.
2916 * Returns 0 on success, negative error code on failure.
2917 */
2918int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
2919{
2920	int r;
2921
2922	if (amdgpu_sriov_vf(adev)) {
2923		amdgpu_virt_fini_data_exchange(adev);
2924		amdgpu_virt_request_full_gpu(adev, false);
2925	}
2926
2927	r = amdgpu_device_ip_suspend_phase1(adev);
2928	if (r)
2929		return r;
2930	r = amdgpu_device_ip_suspend_phase2(adev);
2931
2932	if (amdgpu_sriov_vf(adev))
2933		amdgpu_virt_release_full_gpu(adev, false);
2934
2935	return r;
2936}
2937
2938static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
2939{
2940	int i, r;
2941
2942	static enum amd_ip_block_type ip_order[] = {
2943		AMD_IP_BLOCK_TYPE_GMC,
2944		AMD_IP_BLOCK_TYPE_COMMON,
2945		AMD_IP_BLOCK_TYPE_PSP,
2946		AMD_IP_BLOCK_TYPE_IH,
2947	};
2948
2949	for (i = 0; i < adev->num_ip_blocks; i++) {
 
 
 
2950		int j;
2951		struct amdgpu_ip_block *block;
2952
2953		block = &adev->ip_blocks[i];
2954		block->status.hw = false;
2955
2956		for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
2957
2958			if (block->version->type != ip_order[j] ||
2959				!block->status.valid)
2960				continue;
2961
2962			r = block->version->funcs->hw_init(adev);
2963			DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
2964			if (r)
2965				return r;
2966			block->status.hw = true;
2967		}
2968	}
2969
2970	return 0;
2971}
2972
2973static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
2974{
2975	int i, r;
2976
2977	static enum amd_ip_block_type ip_order[] = {
2978		AMD_IP_BLOCK_TYPE_SMC,
2979		AMD_IP_BLOCK_TYPE_DCE,
2980		AMD_IP_BLOCK_TYPE_GFX,
2981		AMD_IP_BLOCK_TYPE_SDMA,
2982		AMD_IP_BLOCK_TYPE_UVD,
2983		AMD_IP_BLOCK_TYPE_VCE,
2984		AMD_IP_BLOCK_TYPE_VCN
2985	};
2986
2987	for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
2988		int j;
2989		struct amdgpu_ip_block *block;
2990
2991		for (j = 0; j < adev->num_ip_blocks; j++) {
2992			block = &adev->ip_blocks[j];
2993
2994			if (block->version->type != ip_order[i] ||
2995				!block->status.valid ||
2996				block->status.hw)
2997				continue;
2998
2999			if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
3000				r = block->version->funcs->resume(adev);
3001			else
3002				r = block->version->funcs->hw_init(adev);
3003
3004			DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3005			if (r)
3006				return r;
3007			block->status.hw = true;
3008		}
3009	}
3010
3011	return 0;
3012}
3013
3014/**
3015 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3016 *
3017 * @adev: amdgpu_device pointer
3018 *
3019 * First resume function for hardware IPs.  The list of all the hardware
3020 * IPs that make up the asic is walked and the resume callbacks are run for
3021 * COMMON, GMC, and IH.  resume puts the hardware into a functional state
3022 * after a suspend and updates the software state as necessary.  This
3023 * function is also used for restoring the GPU after a GPU reset.
3024 * Returns 0 on success, negative error code on failure.
3025 */
3026static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
3027{
3028	int i, r;
3029
3030	for (i = 0; i < adev->num_ip_blocks; i++) {
3031		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3032			continue;
3033		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3034		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3035		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
3036
3037			r = adev->ip_blocks[i].version->funcs->resume(adev);
3038			if (r) {
3039				DRM_ERROR("resume of IP block <%s> failed %d\n",
3040					  adev->ip_blocks[i].version->funcs->name, r);
3041				return r;
3042			}
3043			adev->ip_blocks[i].status.hw = true;
3044		}
3045	}
3046
3047	return 0;
3048}
3049
3050/**
3051 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3052 *
3053 * @adev: amdgpu_device pointer
3054 *
3055 * First resume function for hardware IPs.  The list of all the hardware
3056 * IPs that make up the asic is walked and the resume callbacks are run for
3057 * all blocks except COMMON, GMC, and IH.  resume puts the hardware into a
3058 * functional state after a suspend and updates the software state as
3059 * necessary.  This function is also used for restoring the GPU after a GPU
3060 * reset.
3061 * Returns 0 on success, negative error code on failure.
3062 */
3063static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3064{
3065	int i, r;
3066
3067	for (i = 0; i < adev->num_ip_blocks; i++) {
3068		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3069			continue;
3070		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3071		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3072		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3073		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3074			continue;
3075		r = adev->ip_blocks[i].version->funcs->resume(adev);
3076		if (r) {
3077			DRM_ERROR("resume of IP block <%s> failed %d\n",
3078				  adev->ip_blocks[i].version->funcs->name, r);
3079			return r;
3080		}
3081		adev->ip_blocks[i].status.hw = true;
3082	}
3083
3084	return 0;
3085}
3086
3087/**
3088 * amdgpu_device_ip_resume - run resume for hardware IPs
3089 *
3090 * @adev: amdgpu_device pointer
3091 *
3092 * Main resume function for hardware IPs.  The hardware IPs
3093 * are split into two resume functions because they are
3094 * are also used in in recovering from a GPU reset and some additional
3095 * steps need to be take between them.  In this case (S3/S4) they are
3096 * run sequentially.
3097 * Returns 0 on success, negative error code on failure.
3098 */
3099static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
3100{
3101	int r;
3102
3103	r = amdgpu_amdkfd_resume_iommu(adev);
3104	if (r)
3105		return r;
3106
3107	r = amdgpu_device_ip_resume_phase1(adev);
3108	if (r)
3109		return r;
3110
3111	r = amdgpu_device_fw_loading(adev);
3112	if (r)
3113		return r;
3114
3115	r = amdgpu_device_ip_resume_phase2(adev);
3116
3117	return r;
3118}
3119
3120/**
3121 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3122 *
3123 * @adev: amdgpu_device pointer
3124 *
3125 * Query the VBIOS data tables to determine if the board supports SR-IOV.
3126 */
3127static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3128{
3129	if (amdgpu_sriov_vf(adev)) {
3130		if (adev->is_atom_fw) {
3131			if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
3132				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3133		} else {
3134			if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3135				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3136		}
3137
3138		if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3139			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3140	}
3141}
3142
3143/**
3144 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3145 *
3146 * @asic_type: AMD asic type
3147 *
3148 * Check if there is DC (new modesetting infrastructre) support for an asic.
3149 * returns true if DC has support, false if not.
3150 */
3151bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3152{
3153	switch (asic_type) {
3154#if defined(CONFIG_DRM_AMD_DC)
3155#if defined(CONFIG_DRM_AMD_DC_SI)
3156	case CHIP_TAHITI:
3157	case CHIP_PITCAIRN:
3158	case CHIP_VERDE:
3159	case CHIP_OLAND:
3160#endif
3161	case CHIP_BONAIRE:
3162	case CHIP_KAVERI:
3163	case CHIP_KABINI:
3164	case CHIP_MULLINS:
3165		/*
3166		 * We have systems in the wild with these ASICs that require
3167		 * LVDS and VGA support which is not supported with DC.
3168		 *
3169		 * Fallback to the non-DC driver here by default so as not to
3170		 * cause regressions.
3171		 */
3172		return amdgpu_dc > 0;
3173	case CHIP_HAWAII:
3174	case CHIP_CARRIZO:
3175	case CHIP_STONEY:
3176	case CHIP_POLARIS10:
3177	case CHIP_POLARIS11:
3178	case CHIP_POLARIS12:
3179	case CHIP_VEGAM:
3180	case CHIP_TONGA:
3181	case CHIP_FIJI:
3182	case CHIP_VEGA10:
3183	case CHIP_VEGA12:
3184	case CHIP_VEGA20:
3185#if defined(CONFIG_DRM_AMD_DC_DCN)
3186	case CHIP_RAVEN:
3187	case CHIP_NAVI10:
3188	case CHIP_NAVI14:
3189	case CHIP_NAVI12:
3190	case CHIP_RENOIR:
 
 
3191	case CHIP_SIENNA_CICHLID:
3192	case CHIP_NAVY_FLOUNDER:
3193	case CHIP_DIMGREY_CAVEFISH:
3194	case CHIP_BEIGE_GOBY:
3195	case CHIP_VANGOGH:
3196	case CHIP_YELLOW_CARP:
3197#endif
3198		return amdgpu_dc != 0;
3199#endif
3200	default:
3201		if (amdgpu_dc > 0)
3202			DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
3203					 "but isn't supported by ASIC, ignoring\n");
3204		return false;
3205	}
3206}
3207
3208/**
3209 * amdgpu_device_has_dc_support - check if dc is supported
3210 *
3211 * @adev: amdgpu_device pointer
3212 *
3213 * Returns true for supported, false for not supported
3214 */
3215bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3216{
3217	if (amdgpu_sriov_vf(adev) || 
3218	    adev->enable_virtual_display ||
3219	    (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
3220		return false;
3221
3222	return amdgpu_device_asic_has_dc_support(adev->asic_type);
3223}
3224
 
3225static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3226{
3227	struct amdgpu_device *adev =
3228		container_of(__work, struct amdgpu_device, xgmi_reset_work);
3229	struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3230
3231	/* It's a bug to not have a hive within this function */
3232	if (WARN_ON(!hive))
3233		return;
3234
3235	/*
3236	 * Use task barrier to synchronize all xgmi reset works across the
3237	 * hive. task_barrier_enter and task_barrier_exit will block
3238	 * until all the threads running the xgmi reset works reach
3239	 * those points. task_barrier_full will do both blocks.
3240	 */
3241	if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3242
3243		task_barrier_enter(&hive->tb);
3244		adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3245
3246		if (adev->asic_reset_res)
3247			goto fail;
3248
3249		task_barrier_exit(&hive->tb);
3250		adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3251
3252		if (adev->asic_reset_res)
3253			goto fail;
3254
3255		if (adev->mmhub.ras_funcs &&
3256		    adev->mmhub.ras_funcs->reset_ras_error_count)
3257			adev->mmhub.ras_funcs->reset_ras_error_count(adev);
3258	} else {
3259
3260		task_barrier_full(&hive->tb);
3261		adev->asic_reset_res =  amdgpu_asic_reset(adev);
3262	}
3263
3264fail:
3265	if (adev->asic_reset_res)
3266		DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3267			 adev->asic_reset_res, adev_to_drm(adev)->unique);
3268	amdgpu_put_xgmi_hive(hive);
3269}
3270
3271static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3272{
3273	char *input = amdgpu_lockup_timeout;
3274	char *timeout_setting = NULL;
3275	int index = 0;
3276	long timeout;
3277	int ret = 0;
3278
3279	/*
3280	 * By default timeout for non compute jobs is 10000
3281	 * and 60000 for compute jobs.
3282	 * In SR-IOV or passthrough mode, timeout for compute
3283	 * jobs are 60000 by default.
3284	 */
3285	adev->gfx_timeout = msecs_to_jiffies(10000);
3286	adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3287	if (amdgpu_sriov_vf(adev))
3288		adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3289					msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3290	else
3291		adev->compute_timeout =  msecs_to_jiffies(60000);
3292
3293	if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3294		while ((timeout_setting = strsep(&input, ",")) &&
3295				strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3296			ret = kstrtol(timeout_setting, 0, &timeout);
3297			if (ret)
3298				return ret;
3299
3300			if (timeout == 0) {
3301				index++;
3302				continue;
3303			} else if (timeout < 0) {
3304				timeout = MAX_SCHEDULE_TIMEOUT;
3305			} else {
3306				timeout = msecs_to_jiffies(timeout);
3307			}
3308
3309			switch (index++) {
3310			case 0:
3311				adev->gfx_timeout = timeout;
3312				break;
3313			case 1:
3314				adev->compute_timeout = timeout;
3315				break;
3316			case 2:
3317				adev->sdma_timeout = timeout;
3318				break;
3319			case 3:
3320				adev->video_timeout = timeout;
3321				break;
3322			default:
3323				break;
3324			}
3325		}
3326		/*
3327		 * There is only one value specified and
3328		 * it should apply to all non-compute jobs.
3329		 */
3330		if (index == 1) {
3331			adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3332			if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3333				adev->compute_timeout = adev->gfx_timeout;
3334		}
3335	}
3336
3337	return ret;
3338}
3339
3340static const struct attribute *amdgpu_dev_attributes[] = {
3341	&dev_attr_product_name.attr,
3342	&dev_attr_product_number.attr,
3343	&dev_attr_serial_number.attr,
3344	&dev_attr_pcie_replay_count.attr,
3345	NULL
3346};
3347
3348/**
3349 * amdgpu_device_init - initialize the driver
3350 *
3351 * @adev: amdgpu_device pointer
 
 
3352 * @flags: driver flags
3353 *
3354 * Initializes the driver info and hw (all asics).
3355 * Returns 0 for success or an error on failure.
3356 * Called at driver startup.
3357 */
3358int amdgpu_device_init(struct amdgpu_device *adev,
 
 
3359		       uint32_t flags)
3360{
3361	struct drm_device *ddev = adev_to_drm(adev);
3362	struct pci_dev *pdev = adev->pdev;
3363	int r, i;
3364	bool px = false;
3365	u32 max_MBps;
3366
3367	adev->shutdown = false;
 
 
 
3368	adev->flags = flags;
3369
3370	if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3371		adev->asic_type = amdgpu_force_asic_type;
3372	else
3373		adev->asic_type = flags & AMD_ASIC_MASK;
3374
3375	adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3376	if (amdgpu_emu_mode == 1)
3377		adev->usec_timeout *= 10;
3378	adev->gmc.gart_size = 512 * 1024 * 1024;
3379	adev->accel_working = false;
3380	adev->num_rings = 0;
3381	adev->mman.buffer_funcs = NULL;
3382	adev->mman.buffer_funcs_ring = NULL;
3383	adev->vm_manager.vm_pte_funcs = NULL;
3384	adev->vm_manager.vm_pte_num_scheds = 0;
3385	adev->gmc.gmc_funcs = NULL;
3386	adev->harvest_ip_mask = 0x0;
3387	adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3388	bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3389
3390	adev->smc_rreg = &amdgpu_invalid_rreg;
3391	adev->smc_wreg = &amdgpu_invalid_wreg;
3392	adev->pcie_rreg = &amdgpu_invalid_rreg;
3393	adev->pcie_wreg = &amdgpu_invalid_wreg;
3394	adev->pciep_rreg = &amdgpu_invalid_rreg;
3395	adev->pciep_wreg = &amdgpu_invalid_wreg;
3396	adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3397	adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3398	adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3399	adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3400	adev->didt_rreg = &amdgpu_invalid_rreg;
3401	adev->didt_wreg = &amdgpu_invalid_wreg;
3402	adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3403	adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3404	adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3405	adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3406
3407	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3408		 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3409		 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3410
3411	/* mutex initialization are all done here so we
3412	 * can recall function without having locking issues */
 
3413	mutex_init(&adev->firmware.mutex);
3414	mutex_init(&adev->pm.mutex);
3415	mutex_init(&adev->gfx.gpu_clock_mutex);
3416	mutex_init(&adev->srbm_mutex);
3417	mutex_init(&adev->gfx.pipe_reserve_mutex);
3418	mutex_init(&adev->gfx.gfx_off_mutex);
3419	mutex_init(&adev->grbm_idx_mutex);
3420	mutex_init(&adev->mn_lock);
3421	mutex_init(&adev->virt.vf_errors.lock);
3422	hash_init(adev->mn_hash);
3423	atomic_set(&adev->in_gpu_reset, 0);
3424	init_rwsem(&adev->reset_sem);
3425	mutex_init(&adev->psp.mutex);
3426	mutex_init(&adev->notifier_lock);
3427
3428	r = amdgpu_device_init_apu_flags(adev);
3429	if (r)
3430		return r;
3431
3432	r = amdgpu_device_check_arguments(adev);
3433	if (r)
3434		return r;
3435
3436	spin_lock_init(&adev->mmio_idx_lock);
3437	spin_lock_init(&adev->smc_idx_lock);
3438	spin_lock_init(&adev->pcie_idx_lock);
3439	spin_lock_init(&adev->uvd_ctx_idx_lock);
3440	spin_lock_init(&adev->didt_idx_lock);
3441	spin_lock_init(&adev->gc_cac_idx_lock);
3442	spin_lock_init(&adev->se_cac_idx_lock);
3443	spin_lock_init(&adev->audio_endpt_idx_lock);
3444	spin_lock_init(&adev->mm_stats.lock);
3445
3446	INIT_LIST_HEAD(&adev->shadow_list);
3447	mutex_init(&adev->shadow_list_lock);
3448
3449	INIT_LIST_HEAD(&adev->reset_list);
3450
3451	INIT_DELAYED_WORK(&adev->delayed_init_work,
3452			  amdgpu_device_delayed_init_work_handler);
3453	INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3454			  amdgpu_device_delay_enable_gfx_off);
3455
3456	INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3457
3458	adev->gfx.gfx_off_req_count = 1;
3459	adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3460
3461	atomic_set(&adev->throttling_logging_enabled, 1);
3462	/*
3463	 * If throttling continues, logging will be performed every minute
3464	 * to avoid log flooding. "-1" is subtracted since the thermal
3465	 * throttling interrupt comes every second. Thus, the total logging
3466	 * interval is 59 seconds(retelimited printk interval) + 1(waiting
3467	 * for throttling interrupt) = 60 seconds.
3468	 */
3469	ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3470	ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3471
3472	/* Registers mapping */
3473	/* TODO: block userspace mapping of io register */
3474	if (adev->asic_type >= CHIP_BONAIRE) {
3475		adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3476		adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3477	} else {
3478		adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3479		adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3480	}
3481
3482	adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3483	if (adev->rmmio == NULL) {
3484		return -ENOMEM;
3485	}
3486	DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3487	DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
3488
 
 
 
 
 
 
 
 
 
 
 
3489	/* enable PCIE atomic ops */
3490	r = pci_enable_atomic_ops_to_root(adev->pdev,
3491					  PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3492					  PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3493	if (r) {
3494		adev->have_atomics_support = false;
3495		DRM_INFO("PCIE atomic ops is not supported\n");
3496	} else {
3497		adev->have_atomics_support = true;
3498	}
3499
3500	amdgpu_device_get_pcie_info(adev);
3501
3502	if (amdgpu_mcbp)
3503		DRM_INFO("MCBP is enabled\n");
3504
3505	if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10)
3506		adev->enable_mes = true;
3507
3508	/* detect hw virtualization here */
3509	amdgpu_detect_virtualization(adev);
3510
3511	r = amdgpu_device_get_job_timeout_settings(adev);
3512	if (r) {
3513		dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3514		return r;
3515	}
3516
3517	/* early init functions */
3518	r = amdgpu_device_ip_early_init(adev);
3519	if (r)
3520		return r;
3521
3522	/* doorbell bar mapping and doorbell index init*/
3523	amdgpu_device_doorbell_init(adev);
3524
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3525	if (amdgpu_emu_mode == 1) {
3526		/* post the asic on emulation mode */
3527		emu_soc_asic_init(adev);
3528		goto fence_driver_init;
3529	}
3530
3531	amdgpu_reset_init(adev);
3532
3533	/* detect if we are with an SRIOV vbios */
3534	amdgpu_device_detect_sriov_bios(adev);
3535
3536	/* check if we need to reset the asic
3537	 *  E.g., driver was not cleanly unloaded previously, etc.
3538	 */
3539	if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3540		if (adev->gmc.xgmi.num_physical_nodes) {
3541			dev_info(adev->dev, "Pending hive reset.\n");
3542			adev->gmc.xgmi.pending_reset = true;
3543			/* Only need to init necessary block for SMU to handle the reset */
3544			for (i = 0; i < adev->num_ip_blocks; i++) {
3545				if (!adev->ip_blocks[i].status.valid)
3546					continue;
3547				if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3548				      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3549				      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3550				      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
3551					DRM_DEBUG("IP %s disabled for hw_init.\n",
3552						adev->ip_blocks[i].version->funcs->name);
3553					adev->ip_blocks[i].status.hw = true;
3554				}
3555			}
3556		} else {
3557			r = amdgpu_asic_reset(adev);
3558			if (r) {
3559				dev_err(adev->dev, "asic reset on init failed\n");
3560				goto failed;
3561			}
3562		}
3563	}
3564
3565	pci_enable_pcie_error_reporting(adev->pdev);
3566
3567	/* Post card if necessary */
3568	if (amdgpu_device_need_post(adev)) {
3569		if (!adev->bios) {
3570			dev_err(adev->dev, "no vBIOS found\n");
3571			r = -EINVAL;
3572			goto failed;
3573		}
3574		DRM_INFO("GPU posting now...\n");
3575		r = amdgpu_device_asic_init(adev);
3576		if (r) {
3577			dev_err(adev->dev, "gpu post error!\n");
3578			goto failed;
3579		}
3580	}
3581
3582	if (adev->is_atom_fw) {
3583		/* Initialize clocks */
3584		r = amdgpu_atomfirmware_get_clock_info(adev);
3585		if (r) {
3586			dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3587			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3588			goto failed;
3589		}
3590	} else {
3591		/* Initialize clocks */
3592		r = amdgpu_atombios_get_clock_info(adev);
3593		if (r) {
3594			dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3595			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3596			goto failed;
3597		}
3598		/* init i2c buses */
3599		if (!amdgpu_device_has_dc_support(adev))
3600			amdgpu_atombios_i2c_init(adev);
3601	}
3602
3603fence_driver_init:
3604	/* Fence driver */
3605	r = amdgpu_fence_driver_sw_init(adev);
3606	if (r) {
3607		dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
3608		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3609		goto failed;
3610	}
3611
3612	/* init the mode config */
3613	drm_mode_config_init(adev_to_drm(adev));
3614
3615	r = amdgpu_device_ip_init(adev);
3616	if (r) {
3617		/* failed in exclusive mode due to timeout */
3618		if (amdgpu_sriov_vf(adev) &&
3619		    !amdgpu_sriov_runtime(adev) &&
3620		    amdgpu_virt_mmio_blocked(adev) &&
3621		    !amdgpu_virt_wait_reset(adev)) {
3622			dev_err(adev->dev, "VF exclusive mode timeout\n");
3623			/* Don't send request since VF is inactive. */
3624			adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
3625			adev->virt.ops = NULL;
3626			r = -EAGAIN;
3627			goto release_ras_con;
3628		}
3629		dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
3630		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3631		goto release_ras_con;
3632	}
3633
3634	amdgpu_fence_driver_hw_init(adev);
3635
3636	dev_info(adev->dev,
3637		"SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
3638			adev->gfx.config.max_shader_engines,
3639			adev->gfx.config.max_sh_per_se,
3640			adev->gfx.config.max_cu_per_sh,
3641			adev->gfx.cu_info.number);
3642
3643	adev->accel_working = true;
3644
3645	amdgpu_vm_check_compute_bug(adev);
3646
3647	/* Initialize the buffer migration limit. */
3648	if (amdgpu_moverate >= 0)
3649		max_MBps = amdgpu_moverate;
3650	else
3651		max_MBps = 8; /* Allow 8 MB/s. */
3652	/* Get a log2 for easy divisions. */
3653	adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3654
3655	amdgpu_fbdev_init(adev);
3656
3657	r = amdgpu_pm_sysfs_init(adev);
3658	if (r) {
3659		adev->pm_sysfs_en = false;
3660		DRM_ERROR("registering pm debugfs failed (%d).\n", r);
3661	} else
3662		adev->pm_sysfs_en = true;
3663
3664	r = amdgpu_ucode_sysfs_init(adev);
3665	if (r) {
3666		adev->ucode_sysfs_en = false;
3667		DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
3668	} else
3669		adev->ucode_sysfs_en = true;
3670
3671	if ((amdgpu_testing & 1)) {
3672		if (adev->accel_working)
3673			amdgpu_test_moves(adev);
3674		else
3675			DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
3676	}
3677	if (amdgpu_benchmarking) {
3678		if (adev->accel_working)
3679			amdgpu_benchmark(adev, amdgpu_benchmarking);
3680		else
3681			DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
3682	}
3683
3684	/*
3685	 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3686	 * Otherwise the mgpu fan boost feature will be skipped due to the
3687	 * gpu instance is counted less.
3688	 */
3689	amdgpu_register_gpu_instance(adev);
3690
3691	/* enable clockgating, etc. after ib tests, etc. since some blocks require
3692	 * explicit gating rather than handling it automatically.
3693	 */
3694	if (!adev->gmc.xgmi.pending_reset) {
3695		r = amdgpu_device_ip_late_init(adev);
3696		if (r) {
3697			dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
3698			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
3699			goto release_ras_con;
3700		}
3701		/* must succeed. */
3702		amdgpu_ras_resume(adev);
3703		queue_delayed_work(system_wq, &adev->delayed_init_work,
3704				   msecs_to_jiffies(AMDGPU_RESUME_MS));
3705	}
3706
 
 
 
 
 
 
3707	if (amdgpu_sriov_vf(adev))
3708		flush_delayed_work(&adev->delayed_init_work);
3709
3710	r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
3711	if (r)
3712		dev_err(adev->dev, "Could not create amdgpu device attr\n");
 
 
3713
3714	if (IS_ENABLED(CONFIG_PERF_EVENTS))
3715		r = amdgpu_pmu_init(adev);
3716	if (r)
3717		dev_err(adev->dev, "amdgpu_pmu_init failed\n");
3718
3719	/* Have stored pci confspace at hand for restore in sudden PCI error */
3720	if (amdgpu_device_cache_pci_state(adev->pdev))
3721		pci_restore_state(pdev);
3722
3723	/* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
3724	/* this will fail for cards that aren't VGA class devices, just
3725	 * ignore it */
3726	if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3727		vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
3728
3729	if (amdgpu_device_supports_px(ddev)) {
3730		px = true;
3731		vga_switcheroo_register_client(adev->pdev,
3732					       &amdgpu_switcheroo_ops, px);
3733		vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
3734	}
3735
3736	if (adev->gmc.xgmi.pending_reset)
3737		queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
3738				   msecs_to_jiffies(AMDGPU_RESUME_MS));
3739
3740	return 0;
3741
3742release_ras_con:
3743	amdgpu_release_ras_context(adev);
3744
3745failed:
3746	amdgpu_vf_error_trans_all(adev);
 
 
3747
3748	return r;
3749}
3750
3751static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
3752{
3753	/* Clear all CPU mappings pointing to this device */
3754	unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
3755
3756	/* Unmap all mapped bars - Doorbell, registers and VRAM */
3757	amdgpu_device_doorbell_fini(adev);
3758
3759	iounmap(adev->rmmio);
3760	adev->rmmio = NULL;
3761	if (adev->mman.aper_base_kaddr)
3762		iounmap(adev->mman.aper_base_kaddr);
3763	adev->mman.aper_base_kaddr = NULL;
3764
3765	/* Memory manager related */
3766	if (!adev->gmc.xgmi.connected_to_cpu) {
3767		arch_phys_wc_del(adev->gmc.vram_mtrr);
3768		arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
3769	}
3770}
3771
3772/**
3773 * amdgpu_device_fini - tear down the driver
3774 *
3775 * @adev: amdgpu_device pointer
3776 *
3777 * Tear down the driver info (all asics).
3778 * Called at driver shutdown.
3779 */
3780void amdgpu_device_fini_hw(struct amdgpu_device *adev)
3781{
3782	dev_info(adev->dev, "amdgpu: finishing device.\n");
 
 
3783	flush_delayed_work(&adev->delayed_init_work);
3784	ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
3785	adev->shutdown = true;
3786
3787	/* make sure IB test finished before entering exclusive mode
3788	 * to avoid preemption on IB test
3789	 * */
3790	if (amdgpu_sriov_vf(adev)) {
3791		amdgpu_virt_request_full_gpu(adev, false);
3792		amdgpu_virt_fini_data_exchange(adev);
3793	}
3794
3795	/* disable all interrupts */
3796	amdgpu_irq_disable_all(adev);
3797	if (adev->mode_info.mode_config_initialized){
3798		if (!amdgpu_device_has_dc_support(adev))
3799			drm_helper_force_disable_all(adev_to_drm(adev));
3800		else
3801			drm_atomic_helper_shutdown(adev_to_drm(adev));
3802	}
3803	amdgpu_fence_driver_hw_fini(adev);
3804
3805	if (adev->pm_sysfs_en)
3806		amdgpu_pm_sysfs_fini(adev);
3807	if (adev->ucode_sysfs_en)
3808		amdgpu_ucode_sysfs_fini(adev);
3809	sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
3810
3811	amdgpu_fbdev_fini(adev);
3812
3813	amdgpu_irq_fini_hw(adev);
3814
3815	amdgpu_device_ip_fini_early(adev);
3816
3817	amdgpu_gart_dummy_page_fini(adev);
3818
3819	amdgpu_device_unmap_mmio(adev);
3820}
3821
3822void amdgpu_device_fini_sw(struct amdgpu_device *adev)
3823{
3824	amdgpu_device_ip_fini(adev);
3825	amdgpu_fence_driver_sw_fini(adev);
3826	release_firmware(adev->firmware.gpu_info_fw);
3827	adev->firmware.gpu_info_fw = NULL;
3828	adev->accel_working = false;
3829
3830	amdgpu_reset_fini(adev);
3831
3832	/* free i2c buses */
3833	if (!amdgpu_device_has_dc_support(adev))
3834		amdgpu_i2c_fini(adev);
3835
3836	if (amdgpu_emu_mode != 1)
3837		amdgpu_atombios_fini(adev);
3838
3839	kfree(adev->bios);
3840	adev->bios = NULL;
3841	if (amdgpu_device_supports_px(adev_to_drm(adev))) {
 
 
 
3842		vga_switcheroo_unregister_client(adev->pdev);
 
3843		vga_switcheroo_fini_domain_pm_ops(adev->dev);
3844	}
3845	if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3846		vga_client_register(adev->pdev, NULL, NULL, NULL);
 
 
 
 
 
 
 
3847
 
3848	if (IS_ENABLED(CONFIG_PERF_EVENTS))
3849		amdgpu_pmu_fini(adev);
3850	if (adev->mman.discovery_bin)
3851		amdgpu_discovery_fini(adev);
3852
3853	kfree(adev->pci_state);
3854
3855}
3856
3857
3858/*
3859 * Suspend & resume.
3860 */
3861/**
3862 * amdgpu_device_suspend - initiate device suspend
3863 *
3864 * @dev: drm dev pointer
3865 * @fbcon : notify the fbdev of suspend
3866 *
3867 * Puts the hw in the suspend state (all asics).
3868 * Returns 0 for success or an error on failure.
3869 * Called at driver suspend.
3870 */
3871int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
3872{
3873	struct amdgpu_device *adev = drm_to_adev(dev);
 
 
 
 
 
 
 
 
 
 
3874
3875	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3876		return 0;
3877
3878	adev->in_suspend = true;
3879
3880	if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
3881		DRM_WARN("smart shift update failed\n");
3882
3883	drm_kms_helper_poll_disable(dev);
3884
3885	if (fbcon)
3886		amdgpu_fbdev_set_suspend(adev, 1);
3887
3888	cancel_delayed_work_sync(&adev->delayed_init_work);
3889
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3890	amdgpu_ras_suspend(adev);
3891
3892	amdgpu_device_ip_suspend_phase1(adev);
3893
3894	if (!adev->in_s0ix)
3895		amdgpu_amdkfd_suspend(adev, adev->in_runpm);
3896
3897	/* evict vram memory */
3898	amdgpu_bo_evict_vram(adev);
3899
3900	amdgpu_fence_driver_hw_fini(adev);
 
 
3901
3902	amdgpu_device_ip_suspend_phase2(adev);
3903	/* evict remaining vram memory
3904	 * This second call to evict vram is to evict the gart page table
3905	 * using the CPU.
3906	 */
3907	amdgpu_bo_evict_vram(adev);
3908
3909	return 0;
3910}
3911
3912/**
3913 * amdgpu_device_resume - initiate device resume
3914 *
3915 * @dev: drm dev pointer
3916 * @fbcon : notify the fbdev of resume
3917 *
3918 * Bring the hw back to operating state (all asics).
3919 * Returns 0 for success or an error on failure.
3920 * Called at driver resume.
3921 */
3922int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
3923{
3924	struct amdgpu_device *adev = drm_to_adev(dev);
 
 
 
3925	int r = 0;
3926
3927	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3928		return 0;
3929
3930	if (adev->in_s0ix)
3931		amdgpu_gfx_state_change_set(adev, sGpuChangeState_D0Entry);
3932
3933	/* post card */
3934	if (amdgpu_device_need_post(adev)) {
3935		r = amdgpu_device_asic_init(adev);
3936		if (r)
3937			dev_err(adev->dev, "amdgpu asic init failed\n");
3938	}
3939
3940	r = amdgpu_device_ip_resume(adev);
3941	if (r) {
3942		dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
3943		return r;
3944	}
3945	amdgpu_fence_driver_hw_init(adev);
 
3946
3947	r = amdgpu_device_ip_late_init(adev);
3948	if (r)
3949		return r;
3950
3951	queue_delayed_work(system_wq, &adev->delayed_init_work,
3952			   msecs_to_jiffies(AMDGPU_RESUME_MS));
3953
3954	if (!adev->in_s0ix) {
3955		r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
3956		if (r)
3957			return r;
 
 
 
 
 
 
 
 
 
 
 
 
 
3958	}
 
 
 
3959
3960	/* Make sure IB tests flushed */
3961	flush_delayed_work(&adev->delayed_init_work);
3962
3963	if (fbcon)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3964		amdgpu_fbdev_set_suspend(adev, 0);
 
3965
3966	drm_kms_helper_poll_enable(dev);
3967
3968	amdgpu_ras_resume(adev);
3969
3970	/*
3971	 * Most of the connector probing functions try to acquire runtime pm
3972	 * refs to ensure that the GPU is powered on when connector polling is
3973	 * performed. Since we're calling this from a runtime PM callback,
3974	 * trying to acquire rpm refs will cause us to deadlock.
3975	 *
3976	 * Since we're guaranteed to be holding the rpm lock, it's safe to
3977	 * temporarily disable the rpm helpers so this doesn't deadlock us.
3978	 */
3979#ifdef CONFIG_PM
3980	dev->dev->power.disable_depth++;
3981#endif
3982	if (!amdgpu_device_has_dc_support(adev))
3983		drm_helper_hpd_irq_event(dev);
3984	else
3985		drm_kms_helper_hotplug_event(dev);
3986#ifdef CONFIG_PM
3987	dev->dev->power.disable_depth--;
3988#endif
3989	adev->in_suspend = false;
3990
3991	if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
3992		DRM_WARN("smart shift update failed\n");
3993
3994	return 0;
3995}
3996
3997/**
3998 * amdgpu_device_ip_check_soft_reset - did soft reset succeed
3999 *
4000 * @adev: amdgpu_device pointer
4001 *
4002 * The list of all the hardware IPs that make up the asic is walked and
4003 * the check_soft_reset callbacks are run.  check_soft_reset determines
4004 * if the asic is still hung or not.
4005 * Returns true if any of the IPs are still in a hung state, false if not.
4006 */
4007static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
4008{
4009	int i;
4010	bool asic_hang = false;
4011
4012	if (amdgpu_sriov_vf(adev))
4013		return true;
4014
4015	if (amdgpu_asic_need_full_reset(adev))
4016		return true;
4017
4018	for (i = 0; i < adev->num_ip_blocks; i++) {
4019		if (!adev->ip_blocks[i].status.valid)
4020			continue;
4021		if (adev->ip_blocks[i].version->funcs->check_soft_reset)
4022			adev->ip_blocks[i].status.hang =
4023				adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
4024		if (adev->ip_blocks[i].status.hang) {
4025			dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
4026			asic_hang = true;
4027		}
4028	}
4029	return asic_hang;
4030}
4031
4032/**
4033 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
4034 *
4035 * @adev: amdgpu_device pointer
4036 *
4037 * The list of all the hardware IPs that make up the asic is walked and the
4038 * pre_soft_reset callbacks are run if the block is hung.  pre_soft_reset
4039 * handles any IP specific hardware or software state changes that are
4040 * necessary for a soft reset to succeed.
4041 * Returns 0 on success, negative error code on failure.
4042 */
4043static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
4044{
4045	int i, r = 0;
4046
4047	for (i = 0; i < adev->num_ip_blocks; i++) {
4048		if (!adev->ip_blocks[i].status.valid)
4049			continue;
4050		if (adev->ip_blocks[i].status.hang &&
4051		    adev->ip_blocks[i].version->funcs->pre_soft_reset) {
4052			r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
4053			if (r)
4054				return r;
4055		}
4056	}
4057
4058	return 0;
4059}
4060
4061/**
4062 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
4063 *
4064 * @adev: amdgpu_device pointer
4065 *
4066 * Some hardware IPs cannot be soft reset.  If they are hung, a full gpu
4067 * reset is necessary to recover.
4068 * Returns true if a full asic reset is required, false if not.
4069 */
4070static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
4071{
4072	int i;
4073
4074	if (amdgpu_asic_need_full_reset(adev))
4075		return true;
4076
4077	for (i = 0; i < adev->num_ip_blocks; i++) {
4078		if (!adev->ip_blocks[i].status.valid)
4079			continue;
4080		if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
4081		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
4082		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
4083		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
4084		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
4085			if (adev->ip_blocks[i].status.hang) {
4086				dev_info(adev->dev, "Some block need full reset!\n");
4087				return true;
4088			}
4089		}
4090	}
4091	return false;
4092}
4093
4094/**
4095 * amdgpu_device_ip_soft_reset - do a soft reset
4096 *
4097 * @adev: amdgpu_device pointer
4098 *
4099 * The list of all the hardware IPs that make up the asic is walked and the
4100 * soft_reset callbacks are run if the block is hung.  soft_reset handles any
4101 * IP specific hardware or software state changes that are necessary to soft
4102 * reset the IP.
4103 * Returns 0 on success, negative error code on failure.
4104 */
4105static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
4106{
4107	int i, r = 0;
4108
4109	for (i = 0; i < adev->num_ip_blocks; i++) {
4110		if (!adev->ip_blocks[i].status.valid)
4111			continue;
4112		if (adev->ip_blocks[i].status.hang &&
4113		    adev->ip_blocks[i].version->funcs->soft_reset) {
4114			r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
4115			if (r)
4116				return r;
4117		}
4118	}
4119
4120	return 0;
4121}
4122
4123/**
4124 * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4125 *
4126 * @adev: amdgpu_device pointer
4127 *
4128 * The list of all the hardware IPs that make up the asic is walked and the
4129 * post_soft_reset callbacks are run if the asic was hung.  post_soft_reset
4130 * handles any IP specific hardware or software state changes that are
4131 * necessary after the IP has been soft reset.
4132 * Returns 0 on success, negative error code on failure.
4133 */
4134static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4135{
4136	int i, r = 0;
4137
4138	for (i = 0; i < adev->num_ip_blocks; i++) {
4139		if (!adev->ip_blocks[i].status.valid)
4140			continue;
4141		if (adev->ip_blocks[i].status.hang &&
4142		    adev->ip_blocks[i].version->funcs->post_soft_reset)
4143			r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
4144		if (r)
4145			return r;
4146	}
4147
4148	return 0;
4149}
4150
4151/**
4152 * amdgpu_device_recover_vram - Recover some VRAM contents
4153 *
4154 * @adev: amdgpu_device pointer
4155 *
4156 * Restores the contents of VRAM buffers from the shadows in GTT.  Used to
4157 * restore things like GPUVM page tables after a GPU reset where
4158 * the contents of VRAM might be lost.
4159 *
4160 * Returns:
4161 * 0 on success, negative error code on failure.
4162 */
4163static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
4164{
4165	struct dma_fence *fence = NULL, *next = NULL;
4166	struct amdgpu_bo *shadow;
4167	struct amdgpu_bo_vm *vmbo;
4168	long r = 1, tmo;
4169
4170	if (amdgpu_sriov_runtime(adev))
4171		tmo = msecs_to_jiffies(8000);
4172	else
4173		tmo = msecs_to_jiffies(100);
4174
4175	dev_info(adev->dev, "recover vram bo from shadow start\n");
4176	mutex_lock(&adev->shadow_list_lock);
4177	list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
4178		shadow = &vmbo->bo;
4179		/* No need to recover an evicted BO */
4180		if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
4181		    shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
4182		    shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
4183			continue;
4184
4185		r = amdgpu_bo_restore_shadow(shadow, &next);
4186		if (r)
4187			break;
4188
4189		if (fence) {
4190			tmo = dma_fence_wait_timeout(fence, false, tmo);
4191			dma_fence_put(fence);
4192			fence = next;
4193			if (tmo == 0) {
4194				r = -ETIMEDOUT;
4195				break;
4196			} else if (tmo < 0) {
4197				r = tmo;
4198				break;
4199			}
4200		} else {
4201			fence = next;
4202		}
4203	}
4204	mutex_unlock(&adev->shadow_list_lock);
4205
4206	if (fence)
4207		tmo = dma_fence_wait_timeout(fence, false, tmo);
4208	dma_fence_put(fence);
4209
4210	if (r < 0 || tmo <= 0) {
4211		dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4212		return -EIO;
4213	}
4214
4215	dev_info(adev->dev, "recover vram bo from shadow done\n");
4216	return 0;
4217}
4218
4219
4220/**
4221 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4222 *
4223 * @adev: amdgpu_device pointer
4224 * @from_hypervisor: request from hypervisor
4225 *
4226 * do VF FLR and reinitialize Asic
4227 * return 0 means succeeded otherwise failed
4228 */
4229static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4230				     bool from_hypervisor)
4231{
4232	int r;
4233
4234	if (from_hypervisor)
4235		r = amdgpu_virt_request_full_gpu(adev, true);
4236	else
4237		r = amdgpu_virt_reset_gpu(adev);
4238	if (r)
4239		return r;
4240
4241	amdgpu_amdkfd_pre_reset(adev);
4242
4243	/* Resume IP prior to SMC */
4244	r = amdgpu_device_ip_reinit_early_sriov(adev);
4245	if (r)
4246		goto error;
4247
4248	amdgpu_virt_init_data_exchange(adev);
4249	/* we need recover gart prior to run SMC/CP/SDMA resume */
4250	amdgpu_gtt_mgr_recover(ttm_manager_type(&adev->mman.bdev, TTM_PL_TT));
4251
4252	r = amdgpu_device_fw_loading(adev);
4253	if (r)
4254		return r;
4255
4256	/* now we are okay to resume SMC/CP/SDMA */
4257	r = amdgpu_device_ip_reinit_late_sriov(adev);
4258	if (r)
4259		goto error;
4260
4261	amdgpu_irq_gpu_reset_resume_helper(adev);
4262	r = amdgpu_ib_ring_tests(adev);
4263	amdgpu_amdkfd_post_reset(adev);
4264
4265error:
 
4266	if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
4267		amdgpu_inc_vram_lost(adev);
4268		r = amdgpu_device_recover_vram(adev);
4269	}
4270	amdgpu_virt_release_full_gpu(adev, true);
4271
4272	return r;
4273}
4274
4275/**
4276 * amdgpu_device_has_job_running - check if there is any job in mirror list
4277 *
4278 * @adev: amdgpu_device pointer
4279 *
4280 * check if there is any job in mirror list
4281 */
4282bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
4283{
4284	int i;
4285	struct drm_sched_job *job;
4286
4287	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4288		struct amdgpu_ring *ring = adev->rings[i];
4289
4290		if (!ring || !ring->sched.thread)
4291			continue;
4292
4293		spin_lock(&ring->sched.job_list_lock);
4294		job = list_first_entry_or_null(&ring->sched.pending_list,
4295					       struct drm_sched_job, list);
4296		spin_unlock(&ring->sched.job_list_lock);
4297		if (job)
4298			return true;
4299	}
4300	return false;
4301}
4302
4303/**
4304 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
4305 *
4306 * @adev: amdgpu_device pointer
4307 *
4308 * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
4309 * a hung GPU.
4310 */
4311bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4312{
4313	if (!amdgpu_device_ip_check_soft_reset(adev)) {
4314		dev_info(adev->dev, "Timeout, but no hardware hang detected.\n");
4315		return false;
4316	}
4317
4318	if (amdgpu_gpu_recovery == 0)
4319		goto disabled;
4320
4321	if (amdgpu_sriov_vf(adev))
4322		return true;
4323
4324	if (amdgpu_gpu_recovery == -1) {
4325		switch (adev->asic_type) {
4326		case CHIP_BONAIRE:
4327		case CHIP_HAWAII:
4328		case CHIP_TOPAZ:
4329		case CHIP_TONGA:
4330		case CHIP_FIJI:
4331		case CHIP_POLARIS10:
4332		case CHIP_POLARIS11:
4333		case CHIP_POLARIS12:
4334		case CHIP_VEGAM:
4335		case CHIP_VEGA20:
4336		case CHIP_VEGA10:
4337		case CHIP_VEGA12:
4338		case CHIP_RAVEN:
4339		case CHIP_ARCTURUS:
4340		case CHIP_RENOIR:
4341		case CHIP_NAVI10:
4342		case CHIP_NAVI14:
4343		case CHIP_NAVI12:
4344		case CHIP_SIENNA_CICHLID:
4345		case CHIP_NAVY_FLOUNDER:
4346		case CHIP_DIMGREY_CAVEFISH:
4347		case CHIP_BEIGE_GOBY:
4348		case CHIP_VANGOGH:
4349		case CHIP_ALDEBARAN:
4350			break;
4351		default:
4352			goto disabled;
4353		}
4354	}
4355
4356	return true;
4357
4358disabled:
4359		dev_info(adev->dev, "GPU recovery disabled.\n");
4360		return false;
4361}
4362
4363int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
4364{
4365        u32 i;
4366        int ret = 0;
4367
4368        amdgpu_atombios_scratch_regs_engine_hung(adev, true);
4369
4370        dev_info(adev->dev, "GPU mode1 reset\n");
4371
4372        /* disable BM */
4373        pci_clear_master(adev->pdev);
4374
4375        amdgpu_device_cache_pci_state(adev->pdev);
4376
4377        if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
4378                dev_info(adev->dev, "GPU smu mode1 reset\n");
4379                ret = amdgpu_dpm_mode1_reset(adev);
4380        } else {
4381                dev_info(adev->dev, "GPU psp mode1 reset\n");
4382                ret = psp_gpu_reset(adev);
4383        }
4384
4385        if (ret)
4386                dev_err(adev->dev, "GPU mode1 reset failed\n");
4387
4388        amdgpu_device_load_pci_state(adev->pdev);
4389
4390        /* wait for asic to come out of reset */
4391        for (i = 0; i < adev->usec_timeout; i++) {
4392                u32 memsize = adev->nbio.funcs->get_memsize(adev);
4393
4394                if (memsize != 0xffffffff)
4395                        break;
4396                udelay(1);
4397        }
4398
4399        amdgpu_atombios_scratch_regs_engine_hung(adev, false);
4400        return ret;
4401}
4402
4403int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
4404				 struct amdgpu_reset_context *reset_context)
4405{
4406	int i, r = 0;
4407	struct amdgpu_job *job = NULL;
4408	bool need_full_reset =
4409		test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4410
4411	if (reset_context->reset_req_dev == adev)
4412		job = reset_context->job;
4413
4414	/* no need to dump if device is not in good state during probe period */
4415	if (!adev->gmc.xgmi.pending_reset)
4416		amdgpu_debugfs_wait_dump(adev);
4417
4418	if (amdgpu_sriov_vf(adev)) {
4419		/* stop the data exchange thread */
4420		amdgpu_virt_fini_data_exchange(adev);
4421	}
4422
4423	/* block all schedulers and reset given job's ring */
4424	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4425		struct amdgpu_ring *ring = adev->rings[i];
4426
4427		if (!ring || !ring->sched.thread)
4428			continue;
4429
4430		/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
4431		amdgpu_fence_driver_force_completion(ring);
4432	}
4433
4434	if(job)
4435		drm_sched_increase_karma(&job->base);
4436
4437	r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
4438	/* If reset handler not implemented, continue; otherwise return */
4439	if (r == -ENOSYS)
4440		r = 0;
4441	else
4442		return r;
4443
4444	/* Don't suspend on bare metal if we are not going to HW reset the ASIC */
4445	if (!amdgpu_sriov_vf(adev)) {
4446
4447		if (!need_full_reset)
4448			need_full_reset = amdgpu_device_ip_need_full_reset(adev);
4449
4450		if (!need_full_reset) {
4451			amdgpu_device_ip_pre_soft_reset(adev);
4452			r = amdgpu_device_ip_soft_reset(adev);
4453			amdgpu_device_ip_post_soft_reset(adev);
4454			if (r || amdgpu_device_ip_check_soft_reset(adev)) {
4455				dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
4456				need_full_reset = true;
4457			}
4458		}
4459
4460		if (need_full_reset)
4461			r = amdgpu_device_ip_suspend(adev);
4462		if (need_full_reset)
4463			set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4464		else
4465			clear_bit(AMDGPU_NEED_FULL_RESET,
4466				  &reset_context->flags);
4467	}
4468
4469	return r;
4470}
4471
4472int amdgpu_do_asic_reset(struct list_head *device_list_handle,
4473			 struct amdgpu_reset_context *reset_context)
 
4474{
4475	struct amdgpu_device *tmp_adev = NULL;
4476	bool need_full_reset, skip_hw_reset, vram_lost = false;
4477	int r = 0;
4478
4479	/* Try reset handler method first */
4480	tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
4481				    reset_list);
4482	r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
4483	/* If reset handler not implemented, continue; otherwise return */
4484	if (r == -ENOSYS)
4485		r = 0;
4486	else
4487		return r;
4488
4489	/* Reset handler not implemented, use the default method */
4490	need_full_reset =
4491		test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4492	skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
4493
4494	/*
4495	 * ASIC reset has to be done on all XGMI hive nodes ASAP
4496	 * to allow proper links negotiation in FW (within 1 sec)
4497	 */
4498	if (!skip_hw_reset && need_full_reset) {
4499		list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4500			/* For XGMI run all resets in parallel to speed up the process */
4501			if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4502				tmp_adev->gmc.xgmi.pending_reset = false;
4503				if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
4504					r = -EALREADY;
4505			} else
4506				r = amdgpu_asic_reset(tmp_adev);
4507
4508			if (r) {
4509				dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
4510					 r, adev_to_drm(tmp_adev)->unique);
4511				break;
4512			}
4513		}
4514
4515		/* For XGMI wait for all resets to complete before proceed */
4516		if (!r) {
4517			list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
 
4518				if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4519					flush_work(&tmp_adev->xgmi_reset_work);
4520					r = tmp_adev->asic_reset_res;
4521					if (r)
4522						break;
4523				}
4524			}
4525		}
4526	}
4527
4528	if (!r && amdgpu_ras_intr_triggered()) {
4529		list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4530			if (tmp_adev->mmhub.ras_funcs &&
4531			    tmp_adev->mmhub.ras_funcs->reset_ras_error_count)
4532				tmp_adev->mmhub.ras_funcs->reset_ras_error_count(tmp_adev);
4533		}
4534
4535		amdgpu_ras_intr_cleared();
4536	}
4537
4538	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4539		if (need_full_reset) {
4540			/* post card */
4541			r = amdgpu_device_asic_init(tmp_adev);
4542			if (r) {
4543				dev_warn(tmp_adev->dev, "asic atom init failed!");
4544			} else {
4545				dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
4546				r = amdgpu_amdkfd_resume_iommu(tmp_adev);
4547				if (r)
4548					goto out;
4549
4550				r = amdgpu_device_ip_resume_phase1(tmp_adev);
4551				if (r)
4552					goto out;
4553
4554				vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
4555				if (vram_lost) {
4556					DRM_INFO("VRAM is lost due to GPU reset!\n");
4557					amdgpu_inc_vram_lost(tmp_adev);
4558				}
4559
4560				r = amdgpu_gtt_mgr_recover(ttm_manager_type(&tmp_adev->mman.bdev, TTM_PL_TT));
 
4561				if (r)
4562					goto out;
4563
4564				r = amdgpu_device_fw_loading(tmp_adev);
4565				if (r)
4566					return r;
4567
4568				r = amdgpu_device_ip_resume_phase2(tmp_adev);
4569				if (r)
4570					goto out;
4571
4572				if (vram_lost)
4573					amdgpu_device_fill_reset_magic(tmp_adev);
4574
4575				/*
4576				 * Add this ASIC as tracked as reset was already
4577				 * complete successfully.
4578				 */
4579				amdgpu_register_gpu_instance(tmp_adev);
4580
4581				if (!reset_context->hive &&
4582				    tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4583					amdgpu_xgmi_add_device(tmp_adev);
4584
4585				r = amdgpu_device_ip_late_init(tmp_adev);
4586				if (r)
4587					goto out;
4588
4589				amdgpu_fbdev_set_suspend(tmp_adev, 0);
4590
4591				/*
4592				 * The GPU enters bad state once faulty pages
4593				 * by ECC has reached the threshold, and ras
4594				 * recovery is scheduled next. So add one check
4595				 * here to break recovery if it indeed exceeds
4596				 * bad page threshold, and remind user to
4597				 * retire this GPU or setting one bigger
4598				 * bad_page_threshold value to fix this once
4599				 * probing driver again.
4600				 */
4601				if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
4602					/* must succeed. */
4603					amdgpu_ras_resume(tmp_adev);
4604				} else {
4605					r = -EINVAL;
4606					goto out;
4607				}
4608
4609				/* Update PSP FW topology after reset */
4610				if (reset_context->hive &&
4611				    tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4612					r = amdgpu_xgmi_update_topology(
4613						reset_context->hive, tmp_adev);
4614			}
4615		}
4616
 
4617out:
4618		if (!r) {
4619			amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
4620			r = amdgpu_ib_ring_tests(tmp_adev);
4621			if (r) {
4622				dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
 
4623				need_full_reset = true;
4624				r = -EAGAIN;
4625				goto end;
4626			}
4627		}
4628
4629		if (!r)
4630			r = amdgpu_device_recover_vram(tmp_adev);
4631		else
4632			tmp_adev->asic_reset_res = r;
4633	}
4634
4635end:
4636	if (need_full_reset)
4637		set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4638	else
4639		clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4640	return r;
4641}
4642
4643static bool amdgpu_device_lock_adev(struct amdgpu_device *adev,
4644				struct amdgpu_hive_info *hive)
4645{
4646	if (atomic_cmpxchg(&adev->in_gpu_reset, 0, 1) != 0)
4647		return false;
4648
4649	if (hive) {
4650		down_write_nest_lock(&adev->reset_sem, &hive->hive_lock);
4651	} else {
4652		down_write(&adev->reset_sem);
4653	}
4654
 
 
4655	switch (amdgpu_asic_reset_method(adev)) {
4656	case AMD_RESET_METHOD_MODE1:
4657		adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
4658		break;
4659	case AMD_RESET_METHOD_MODE2:
4660		adev->mp1_state = PP_MP1_STATE_RESET;
4661		break;
4662	default:
4663		adev->mp1_state = PP_MP1_STATE_NONE;
4664		break;
4665	}
4666
4667	return true;
4668}
4669
4670static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
4671{
4672	amdgpu_vf_error_trans_all(adev);
4673	adev->mp1_state = PP_MP1_STATE_NONE;
4674	atomic_set(&adev->in_gpu_reset, 0);
4675	up_write(&adev->reset_sem);
4676}
4677
4678/*
4679 * to lockup a list of amdgpu devices in a hive safely, if not a hive
4680 * with multiple nodes, it will be similar as amdgpu_device_lock_adev.
4681 *
4682 * unlock won't require roll back.
4683 */
4684static int amdgpu_device_lock_hive_adev(struct amdgpu_device *adev, struct amdgpu_hive_info *hive)
4685{
4686	struct amdgpu_device *tmp_adev = NULL;
4687
4688	if (adev->gmc.xgmi.num_physical_nodes > 1) {
4689		if (!hive) {
4690			dev_err(adev->dev, "Hive is NULL while device has multiple xgmi nodes");
4691			return -ENODEV;
4692		}
4693		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
4694			if (!amdgpu_device_lock_adev(tmp_adev, hive))
4695				goto roll_back;
4696		}
4697	} else if (!amdgpu_device_lock_adev(adev, hive))
4698		return -EAGAIN;
4699
4700	return 0;
4701roll_back:
4702	if (!list_is_first(&tmp_adev->gmc.xgmi.head, &hive->device_list)) {
4703		/*
4704		 * if the lockup iteration break in the middle of a hive,
4705		 * it may means there may has a race issue,
4706		 * or a hive device locked up independently.
4707		 * we may be in trouble and may not, so will try to roll back
4708		 * the lock and give out a warnning.
4709		 */
4710		dev_warn(tmp_adev->dev, "Hive lock iteration broke in the middle. Rolling back to unlock");
4711		list_for_each_entry_continue_reverse(tmp_adev, &hive->device_list, gmc.xgmi.head) {
4712			amdgpu_device_unlock_adev(tmp_adev);
4713		}
4714	}
4715	return -EAGAIN;
4716}
4717
4718static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
4719{
4720	struct pci_dev *p = NULL;
4721
4722	p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4723			adev->pdev->bus->number, 1);
4724	if (p) {
4725		pm_runtime_enable(&(p->dev));
4726		pm_runtime_resume(&(p->dev));
4727	}
4728}
4729
4730static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
4731{
4732	enum amd_reset_method reset_method;
4733	struct pci_dev *p = NULL;
4734	u64 expires;
4735
4736	/*
4737	 * For now, only BACO and mode1 reset are confirmed
4738	 * to suffer the audio issue without proper suspended.
4739	 */
4740	reset_method = amdgpu_asic_reset_method(adev);
4741	if ((reset_method != AMD_RESET_METHOD_BACO) &&
4742	     (reset_method != AMD_RESET_METHOD_MODE1))
4743		return -EINVAL;
4744
4745	p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4746			adev->pdev->bus->number, 1);
4747	if (!p)
4748		return -ENODEV;
4749
4750	expires = pm_runtime_autosuspend_expiration(&(p->dev));
4751	if (!expires)
4752		/*
4753		 * If we cannot get the audio device autosuspend delay,
4754		 * a fixed 4S interval will be used. Considering 3S is
4755		 * the audio controller default autosuspend delay setting.
4756		 * 4S used here is guaranteed to cover that.
4757		 */
4758		expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
4759
4760	while (!pm_runtime_status_suspended(&(p->dev))) {
4761		if (!pm_runtime_suspend(&(p->dev)))
4762			break;
4763
4764		if (expires < ktime_get_mono_fast_ns()) {
4765			dev_warn(adev->dev, "failed to suspend display audio\n");
4766			/* TODO: abort the succeeding gpu reset? */
4767			return -ETIMEDOUT;
4768		}
4769	}
4770
4771	pm_runtime_disable(&(p->dev));
4772
4773	return 0;
4774}
4775
4776static void amdgpu_device_recheck_guilty_jobs(
4777	struct amdgpu_device *adev, struct list_head *device_list_handle,
4778	struct amdgpu_reset_context *reset_context)
4779{
4780	int i, r = 0;
4781
4782	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4783		struct amdgpu_ring *ring = adev->rings[i];
4784		int ret = 0;
4785		struct drm_sched_job *s_job;
4786
4787		if (!ring || !ring->sched.thread)
4788			continue;
4789
4790		s_job = list_first_entry_or_null(&ring->sched.pending_list,
4791				struct drm_sched_job, list);
4792		if (s_job == NULL)
4793			continue;
4794
4795		/* clear job's guilty and depend the folowing step to decide the real one */
4796		drm_sched_reset_karma(s_job);
4797		drm_sched_resubmit_jobs_ext(&ring->sched, 1);
4798
4799		ret = dma_fence_wait_timeout(s_job->s_fence->parent, false, ring->sched.timeout);
4800		if (ret == 0) { /* timeout */
4801			DRM_ERROR("Found the real bad job! ring:%s, job_id:%llx\n",
4802						ring->sched.name, s_job->id);
4803
4804			/* set guilty */
4805			drm_sched_increase_karma(s_job);
4806retry:
4807			/* do hw reset */
4808			if (amdgpu_sriov_vf(adev)) {
4809				amdgpu_virt_fini_data_exchange(adev);
4810				r = amdgpu_device_reset_sriov(adev, false);
4811				if (r)
4812					adev->asic_reset_res = r;
4813			} else {
4814				clear_bit(AMDGPU_SKIP_HW_RESET,
4815					  &reset_context->flags);
4816				r = amdgpu_do_asic_reset(device_list_handle,
4817							 reset_context);
4818				if (r && r == -EAGAIN)
4819					goto retry;
4820			}
4821
4822			/*
4823			 * add reset counter so that the following
4824			 * resubmitted job could flush vmid
4825			 */
4826			atomic_inc(&adev->gpu_reset_counter);
4827			continue;
4828		}
4829
4830		/* got the hw fence, signal finished fence */
4831		atomic_dec(ring->sched.score);
4832		dma_fence_get(&s_job->s_fence->finished);
4833		dma_fence_signal(&s_job->s_fence->finished);
4834		dma_fence_put(&s_job->s_fence->finished);
4835
4836		/* remove node from list and free the job */
4837		spin_lock(&ring->sched.job_list_lock);
4838		list_del_init(&s_job->list);
4839		spin_unlock(&ring->sched.job_list_lock);
4840		ring->sched.ops->free_job(s_job);
4841	}
4842}
4843
4844/**
4845 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
4846 *
4847 * @adev: amdgpu_device pointer
4848 * @job: which job trigger hang
4849 *
4850 * Attempt to reset the GPU if it has hung (all asics).
4851 * Attempt to do soft-reset or full-reset and reinitialize Asic
4852 * Returns 0 for success or an error on failure.
4853 */
4854
4855int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
4856			      struct amdgpu_job *job)
4857{
4858	struct list_head device_list, *device_list_handle =  NULL;
 
4859	bool job_signaled = false;
4860	struct amdgpu_hive_info *hive = NULL;
4861	struct amdgpu_device *tmp_adev = NULL;
4862	int i, r = 0;
4863	bool need_emergency_restart = false;
4864	bool audio_suspended = false;
4865	int tmp_vram_lost_counter;
4866	struct amdgpu_reset_context reset_context;
4867
4868	memset(&reset_context, 0, sizeof(reset_context));
4869
4870	/*
4871	 * Special case: RAS triggered and full reset isn't supported
4872	 */
4873	need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
4874
4875	/*
4876	 * Flush RAM to disk so that after reboot
4877	 * the user can read log and see why the system rebooted.
4878	 */
4879	if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
4880		DRM_WARN("Emergency reboot.");
4881
4882		ksys_sync_helper();
4883		emergency_restart();
4884	}
4885
4886	dev_info(adev->dev, "GPU %s begin!\n",
4887		need_emergency_restart ? "jobs stop":"reset");
4888
4889	/*
4890	 * Here we trylock to avoid chain of resets executing from
4891	 * either trigger by jobs on different adevs in XGMI hive or jobs on
4892	 * different schedulers for same device while this TO handler is running.
4893	 * We always reset all schedulers for device and all devices for XGMI
4894	 * hive so that should take care of them too.
4895	 */
4896	hive = amdgpu_get_xgmi_hive(adev);
4897	if (hive) {
4898		if (atomic_cmpxchg(&hive->in_reset, 0, 1) != 0) {
4899			DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
4900				job ? job->base.id : -1, hive->hive_id);
4901			amdgpu_put_xgmi_hive(hive);
4902			if (job)
4903				drm_sched_increase_karma(&job->base);
4904			return 0;
4905		}
4906		mutex_lock(&hive->hive_lock);
4907	}
4908
4909	reset_context.method = AMD_RESET_METHOD_NONE;
4910	reset_context.reset_req_dev = adev;
4911	reset_context.job = job;
4912	reset_context.hive = hive;
4913	clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
4914
4915	/*
4916	 * lock the device before we try to operate the linked list
4917	 * if didn't get the device lock, don't touch the linked list since
4918	 * others may iterating it.
4919	 */
4920	r = amdgpu_device_lock_hive_adev(adev, hive);
4921	if (r) {
4922		dev_info(adev->dev, "Bailing on TDR for s_job:%llx, as another already in progress",
4923					job ? job->base.id : -1);
4924
4925		/* even we skipped this reset, still need to set the job to guilty */
4926		if (job)
4927			drm_sched_increase_karma(&job->base);
4928		goto skip_recovery;
4929	}
4930
4931	/*
4932	 * Build list of devices to reset.
4933	 * In case we are in XGMI hive mode, resort the device list
4934	 * to put adev in the 1st position.
4935	 */
4936	INIT_LIST_HEAD(&device_list);
4937	if (adev->gmc.xgmi.num_physical_nodes > 1) {
4938		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
4939			list_add_tail(&tmp_adev->reset_list, &device_list);
4940		if (!list_is_first(&adev->reset_list, &device_list))
4941			list_rotate_to_front(&adev->reset_list, &device_list);
4942		device_list_handle = &device_list;
4943	} else {
4944		list_add_tail(&adev->reset_list, &device_list);
4945		device_list_handle = &device_list;
4946	}
4947
4948	/* block all schedulers and reset given job's ring */
4949	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
 
 
 
 
 
 
 
4950		/*
4951		 * Try to put the audio codec into suspend state
4952		 * before gpu reset started.
4953		 *
4954		 * Due to the power domain of the graphics device
4955		 * is shared with AZ power domain. Without this,
4956		 * we may change the audio hardware from behind
4957		 * the audio driver's back. That will trigger
4958		 * some audio codec errors.
4959		 */
4960		if (!amdgpu_device_suspend_display_audio(tmp_adev))
4961			audio_suspended = true;
4962
4963		amdgpu_ras_set_error_query_ready(tmp_adev, false);
4964
4965		cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
4966
4967		if (!amdgpu_sriov_vf(tmp_adev))
4968			amdgpu_amdkfd_pre_reset(tmp_adev);
4969
4970		/*
4971		 * Mark these ASICs to be reseted as untracked first
4972		 * And add them back after reset completed
4973		 */
4974		amdgpu_unregister_gpu_instance(tmp_adev);
4975
4976		amdgpu_fbdev_set_suspend(tmp_adev, 1);
4977
4978		/* disable ras on ALL IPs */
4979		if (!need_emergency_restart &&
4980		      amdgpu_device_ip_need_full_reset(tmp_adev))
4981			amdgpu_ras_suspend(tmp_adev);
4982
4983		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4984			struct amdgpu_ring *ring = tmp_adev->rings[i];
4985
4986			if (!ring || !ring->sched.thread)
4987				continue;
4988
4989			drm_sched_stop(&ring->sched, job ? &job->base : NULL);
4990
4991			if (need_emergency_restart)
4992				amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
4993		}
4994		atomic_inc(&tmp_adev->gpu_reset_counter);
4995	}
4996
4997	if (need_emergency_restart)
4998		goto skip_sched_resume;
4999
5000	/*
5001	 * Must check guilty signal here since after this point all old
5002	 * HW fences are force signaled.
5003	 *
5004	 * job->base holds a reference to parent fence
5005	 */
5006	if (job && job->base.s_fence->parent &&
5007	    dma_fence_is_signaled(job->base.s_fence->parent)) {
5008		job_signaled = true;
5009		dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5010		goto skip_hw_reset;
5011	}
5012
5013retry:	/* Rest of adevs pre asic reset from XGMI hive. */
5014	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5015		r = amdgpu_device_pre_asic_reset(tmp_adev, &reset_context);
 
 
5016		/*TODO Should we stop ?*/
5017		if (r) {
5018			dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
5019				  r, adev_to_drm(tmp_adev)->unique);
5020			tmp_adev->asic_reset_res = r;
5021		}
5022	}
5023
5024	tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter));
5025	/* Actual ASIC resets if needed.*/
5026	/* TODO Implement XGMI hive reset logic for SRIOV */
5027	if (amdgpu_sriov_vf(adev)) {
5028		r = amdgpu_device_reset_sriov(adev, job ? false : true);
5029		if (r)
5030			adev->asic_reset_res = r;
5031	} else {
5032		r = amdgpu_do_asic_reset(device_list_handle, &reset_context);
5033		if (r && r == -EAGAIN)
5034			goto retry;
5035	}
5036
5037skip_hw_reset:
5038
5039	/* Post ASIC reset for all devs .*/
5040	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5041
5042		/*
5043		 * Sometimes a later bad compute job can block a good gfx job as gfx
5044		 * and compute ring share internal GC HW mutually. We add an additional
5045		 * guilty jobs recheck step to find the real guilty job, it synchronously
5046		 * submits and pends for the first job being signaled. If it gets timeout,
5047		 * we identify it as a real guilty job.
5048		 */
5049		if (amdgpu_gpu_recovery == 2 &&
5050			!(tmp_vram_lost_counter < atomic_read(&adev->vram_lost_counter)))
5051			amdgpu_device_recheck_guilty_jobs(
5052				tmp_adev, device_list_handle, &reset_context);
5053
5054		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5055			struct amdgpu_ring *ring = tmp_adev->rings[i];
5056
5057			if (!ring || !ring->sched.thread)
5058				continue;
5059
5060			/* No point to resubmit jobs if we didn't HW reset*/
5061			if (!tmp_adev->asic_reset_res && !job_signaled)
5062				drm_sched_resubmit_jobs(&ring->sched);
5063
5064			drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
5065		}
5066
5067		if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) {
5068			drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
5069		}
5070
5071		tmp_adev->asic_reset_res = 0;
5072
5073		if (r) {
5074			/* bad news, how to tell it to userspace ? */
5075			dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
5076			amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
5077		} else {
5078			dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
5079			if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
5080				DRM_WARN("smart shift update failed\n");
5081		}
5082	}
5083
5084skip_sched_resume:
5085	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5086		/* unlock kfd: SRIOV would do it separately */
5087		if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
5088	                amdgpu_amdkfd_post_reset(tmp_adev);
5089
5090		/* kfd_post_reset will do nothing if kfd device is not initialized,
5091		 * need to bring up kfd here if it's not be initialized before
5092		 */
5093		if (!adev->kfd.init_complete)
5094			amdgpu_amdkfd_device_init(adev);
5095
5096		if (audio_suspended)
5097			amdgpu_device_resume_display_audio(tmp_adev);
5098		amdgpu_device_unlock_adev(tmp_adev);
5099	}
5100
5101skip_recovery:
5102	if (hive) {
5103		atomic_set(&hive->in_reset, 0);
5104		mutex_unlock(&hive->hive_lock);
5105		amdgpu_put_xgmi_hive(hive);
5106	}
5107
5108	if (r && r != -EAGAIN)
5109		dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
5110	return r;
5111}
5112
5113/**
5114 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
5115 *
5116 * @adev: amdgpu_device pointer
5117 *
5118 * Fetchs and stores in the driver the PCIE capabilities (gen speed
5119 * and lanes) of the slot the device is in. Handles APUs and
5120 * virtualized environments where PCIE config space may not be available.
5121 */
5122static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
5123{
5124	struct pci_dev *pdev;
5125	enum pci_bus_speed speed_cap, platform_speed_cap;
5126	enum pcie_link_width platform_link_width;
5127
5128	if (amdgpu_pcie_gen_cap)
5129		adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
5130
5131	if (amdgpu_pcie_lane_cap)
5132		adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
5133
5134	/* covers APUs as well */
5135	if (pci_is_root_bus(adev->pdev->bus)) {
5136		if (adev->pm.pcie_gen_mask == 0)
5137			adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
5138		if (adev->pm.pcie_mlw_mask == 0)
5139			adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
5140		return;
5141	}
5142
5143	if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
5144		return;
5145
5146	pcie_bandwidth_available(adev->pdev, NULL,
5147				 &platform_speed_cap, &platform_link_width);
5148
5149	if (adev->pm.pcie_gen_mask == 0) {
5150		/* asic caps */
5151		pdev = adev->pdev;
5152		speed_cap = pcie_get_speed_cap(pdev);
5153		if (speed_cap == PCI_SPEED_UNKNOWN) {
5154			adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5155						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5156						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5157		} else {
5158			if (speed_cap == PCIE_SPEED_32_0GT)
5159				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5160							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5161							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5162							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5163							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
5164			else if (speed_cap == PCIE_SPEED_16_0GT)
5165				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5166							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5167							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5168							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
5169			else if (speed_cap == PCIE_SPEED_8_0GT)
5170				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5171							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5172							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5173			else if (speed_cap == PCIE_SPEED_5_0GT)
5174				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5175							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
5176			else
5177				adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
5178		}
5179		/* platform caps */
5180		if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5181			adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5182						   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5183		} else {
5184			if (platform_speed_cap == PCIE_SPEED_32_0GT)
5185				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5186							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5187							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5188							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5189							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
5190			else if (platform_speed_cap == PCIE_SPEED_16_0GT)
5191				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5192							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5193							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5194							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
5195			else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5196				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5197							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5198							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
5199			else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5200				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5201							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5202			else
5203				adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
5204
5205		}
5206	}
5207	if (adev->pm.pcie_mlw_mask == 0) {
5208		if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5209			adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
5210		} else {
5211			switch (platform_link_width) {
5212			case PCIE_LNK_X32:
5213				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
5214							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5215							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5216							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5217							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5218							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5219							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5220				break;
5221			case PCIE_LNK_X16:
5222				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5223							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5224							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5225							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5226							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5227							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5228				break;
5229			case PCIE_LNK_X12:
5230				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5231							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5232							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5233							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5234							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5235				break;
5236			case PCIE_LNK_X8:
5237				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5238							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5239							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5240							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5241				break;
5242			case PCIE_LNK_X4:
5243				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5244							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5245							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5246				break;
5247			case PCIE_LNK_X2:
5248				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5249							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5250				break;
5251			case PCIE_LNK_X1:
5252				adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5253				break;
5254			default:
5255				break;
5256			}
5257		}
5258	}
5259}
5260
5261int amdgpu_device_baco_enter(struct drm_device *dev)
5262{
5263	struct amdgpu_device *adev = drm_to_adev(dev);
5264	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5265
5266	if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5267		return -ENOTSUPP;
5268
5269	if (ras && adev->ras_enabled &&
5270	    adev->nbio.funcs->enable_doorbell_interrupt)
5271		adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
5272
5273	return amdgpu_dpm_baco_enter(adev);
5274}
5275
5276int amdgpu_device_baco_exit(struct drm_device *dev)
5277{
5278	struct amdgpu_device *adev = drm_to_adev(dev);
5279	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5280	int ret = 0;
5281
5282	if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5283		return -ENOTSUPP;
5284
5285	ret = amdgpu_dpm_baco_exit(adev);
5286	if (ret)
5287		return ret;
5288
5289	if (ras && adev->ras_enabled &&
5290	    adev->nbio.funcs->enable_doorbell_interrupt)
5291		adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
5292
5293	return 0;
5294}
5295
5296static void amdgpu_cancel_all_tdr(struct amdgpu_device *adev)
5297{
5298	int i;
5299
5300	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5301		struct amdgpu_ring *ring = adev->rings[i];
5302
5303		if (!ring || !ring->sched.thread)
5304			continue;
5305
5306		cancel_delayed_work_sync(&ring->sched.work_tdr);
5307	}
5308}
5309
5310/**
5311 * amdgpu_pci_error_detected - Called when a PCI error is detected.
5312 * @pdev: PCI device struct
5313 * @state: PCI channel state
5314 *
5315 * Description: Called when a PCI error is detected.
5316 *
5317 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
5318 */
5319pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5320{
5321	struct drm_device *dev = pci_get_drvdata(pdev);
5322	struct amdgpu_device *adev = drm_to_adev(dev);
5323	int i;
5324
5325	DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
5326
5327	if (adev->gmc.xgmi.num_physical_nodes > 1) {
5328		DRM_WARN("No support for XGMI hive yet...");
5329		return PCI_ERS_RESULT_DISCONNECT;
5330	}
5331
5332	adev->pci_channel_state = state;
5333
5334	switch (state) {
5335	case pci_channel_io_normal:
5336		return PCI_ERS_RESULT_CAN_RECOVER;
5337	/* Fatal error, prepare for slot reset */
5338	case pci_channel_io_frozen:
5339		/*
5340		 * Cancel and wait for all TDRs in progress if failing to
5341		 * set  adev->in_gpu_reset in amdgpu_device_lock_adev
5342		 *
5343		 * Locking adev->reset_sem will prevent any external access
5344		 * to GPU during PCI error recovery
5345		 */
5346		while (!amdgpu_device_lock_adev(adev, NULL))
5347			amdgpu_cancel_all_tdr(adev);
5348
5349		/*
5350		 * Block any work scheduling as we do for regular GPU reset
5351		 * for the duration of the recovery
5352		 */
5353		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5354			struct amdgpu_ring *ring = adev->rings[i];
5355
5356			if (!ring || !ring->sched.thread)
5357				continue;
5358
5359			drm_sched_stop(&ring->sched, NULL);
5360		}
5361		atomic_inc(&adev->gpu_reset_counter);
5362		return PCI_ERS_RESULT_NEED_RESET;
5363	case pci_channel_io_perm_failure:
5364		/* Permanent error, prepare for device removal */
5365		return PCI_ERS_RESULT_DISCONNECT;
5366	}
5367
5368	return PCI_ERS_RESULT_NEED_RESET;
5369}
5370
5371/**
5372 * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
5373 * @pdev: pointer to PCI device
5374 */
5375pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
5376{
5377
5378	DRM_INFO("PCI error: mmio enabled callback!!\n");
5379
5380	/* TODO - dump whatever for debugging purposes */
5381
5382	/* This called only if amdgpu_pci_error_detected returns
5383	 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
5384	 * works, no need to reset slot.
5385	 */
5386
5387	return PCI_ERS_RESULT_RECOVERED;
5388}
5389
5390/**
5391 * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
5392 * @pdev: PCI device struct
5393 *
5394 * Description: This routine is called by the pci error recovery
5395 * code after the PCI slot has been reset, just before we
5396 * should resume normal operations.
5397 */
5398pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
5399{
5400	struct drm_device *dev = pci_get_drvdata(pdev);
5401	struct amdgpu_device *adev = drm_to_adev(dev);
5402	int r, i;
5403	struct amdgpu_reset_context reset_context;
5404	u32 memsize;
5405	struct list_head device_list;
5406
5407	DRM_INFO("PCI error: slot reset callback!!\n");
5408
5409	memset(&reset_context, 0, sizeof(reset_context));
5410
5411	INIT_LIST_HEAD(&device_list);
5412	list_add_tail(&adev->reset_list, &device_list);
5413
5414	/* wait for asic to come out of reset */
5415	msleep(500);
5416
5417	/* Restore PCI confspace */
5418	amdgpu_device_load_pci_state(pdev);
5419
5420	/* confirm  ASIC came out of reset */
5421	for (i = 0; i < adev->usec_timeout; i++) {
5422		memsize = amdgpu_asic_get_config_memsize(adev);
5423
5424		if (memsize != 0xffffffff)
5425			break;
5426		udelay(1);
5427	}
5428	if (memsize == 0xffffffff) {
5429		r = -ETIME;
5430		goto out;
5431	}
5432
5433	reset_context.method = AMD_RESET_METHOD_NONE;
5434	reset_context.reset_req_dev = adev;
5435	set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
5436	set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
5437
5438	adev->no_hw_access = true;
5439	r = amdgpu_device_pre_asic_reset(adev, &reset_context);
5440	adev->no_hw_access = false;
5441	if (r)
5442		goto out;
5443
5444	r = amdgpu_do_asic_reset(&device_list, &reset_context);
5445
5446out:
5447	if (!r) {
5448		if (amdgpu_device_cache_pci_state(adev->pdev))
5449			pci_restore_state(adev->pdev);
5450
5451		DRM_INFO("PCIe error recovery succeeded\n");
5452	} else {
5453		DRM_ERROR("PCIe error recovery failed, err:%d", r);
5454		amdgpu_device_unlock_adev(adev);
5455	}
5456
5457	return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
5458}
5459
5460/**
5461 * amdgpu_pci_resume() - resume normal ops after PCI reset
5462 * @pdev: pointer to PCI device
5463 *
5464 * Called when the error recovery driver tells us that its
5465 * OK to resume normal operation.
5466 */
5467void amdgpu_pci_resume(struct pci_dev *pdev)
5468{
5469	struct drm_device *dev = pci_get_drvdata(pdev);
5470	struct amdgpu_device *adev = drm_to_adev(dev);
5471	int i;
5472
5473
5474	DRM_INFO("PCI error: resume callback!!\n");
5475
5476	/* Only continue execution for the case of pci_channel_io_frozen */
5477	if (adev->pci_channel_state != pci_channel_io_frozen)
5478		return;
5479
5480	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5481		struct amdgpu_ring *ring = adev->rings[i];
5482
5483		if (!ring || !ring->sched.thread)
5484			continue;
5485
5486
5487		drm_sched_resubmit_jobs(&ring->sched);
5488		drm_sched_start(&ring->sched, true);
5489	}
5490
5491	amdgpu_device_unlock_adev(adev);
5492}
5493
5494bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
5495{
5496	struct drm_device *dev = pci_get_drvdata(pdev);
5497	struct amdgpu_device *adev = drm_to_adev(dev);
5498	int r;
5499
5500	r = pci_save_state(pdev);
5501	if (!r) {
5502		kfree(adev->pci_state);
5503
5504		adev->pci_state = pci_store_saved_state(pdev);
5505
5506		if (!adev->pci_state) {
5507			DRM_ERROR("Failed to store PCI saved state");
5508			return false;
5509		}
5510	} else {
5511		DRM_WARN("Failed to save PCI state, err:%d\n", r);
5512		return false;
5513	}
5514
5515	return true;
5516}
5517
5518bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
5519{
5520	struct drm_device *dev = pci_get_drvdata(pdev);
5521	struct amdgpu_device *adev = drm_to_adev(dev);
5522	int r;
5523
5524	if (!adev->pci_state)
5525		return false;
5526
5527	r = pci_load_saved_state(pdev, adev->pci_state);
5528
5529	if (!r) {
5530		pci_restore_state(pdev);
5531	} else {
5532		DRM_WARN("Failed to load PCI state, err:%d\n", r);
5533		return false;
5534	}
5535
5536	return true;
5537}
5538
5539void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
5540		struct amdgpu_ring *ring)
5541{
5542#ifdef CONFIG_X86_64
5543	if (adev->flags & AMD_IS_APU)
5544		return;
5545#endif
5546	if (adev->gmc.xgmi.connected_to_cpu)
5547		return;
5548
5549	if (ring && ring->funcs->emit_hdp_flush)
5550		amdgpu_ring_emit_hdp_flush(ring);
5551	else
5552		amdgpu_asic_flush_hdp(adev, ring);
5553}
5554
5555void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
5556		struct amdgpu_ring *ring)
5557{
5558#ifdef CONFIG_X86_64
5559	if (adev->flags & AMD_IS_APU)
5560		return;
5561#endif
5562	if (adev->gmc.xgmi.connected_to_cpu)
5563		return;
5564
5565	amdgpu_asic_invalidate_hdp(adev, ring);
5566}