Linux Audio

Check our new training course

Loading...
v6.9.4
   1/*
   2 * Copyright 2008 Advanced Micro Devices, Inc.
   3 * Copyright 2008 Red Hat Inc.
   4 * Copyright 2009 Jerome Glisse.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the "Software"),
   8 * to deal in the Software without restriction, including without limitation
   9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10 * and/or sell copies of the Software, and to permit persons to whom the
  11 * Software is furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22 * OTHER DEALINGS IN THE SOFTWARE.
  23 *
  24 * Authors: Dave Airlie
  25 *          Alex Deucher
  26 *          Jerome Glisse
  27 */
  28#include <linux/power_supply.h>
  29#include <linux/kthread.h>
  30#include <linux/module.h>
  31#include <linux/console.h>
  32#include <linux/slab.h>
  33#include <linux/iommu.h>
  34#include <linux/pci.h>
  35#include <linux/pci-p2pdma.h>
  36#include <linux/apple-gmux.h>
  37
  38#include <drm/drm_aperture.h>
  39#include <drm/drm_atomic_helper.h>
  40#include <drm/drm_crtc_helper.h>
  41#include <drm/drm_fb_helper.h>
  42#include <drm/drm_probe_helper.h>
  43#include <drm/amdgpu_drm.h>
  44#include <linux/device.h>
  45#include <linux/vgaarb.h>
  46#include <linux/vga_switcheroo.h>
  47#include <linux/efi.h>
  48#include "amdgpu.h"
  49#include "amdgpu_trace.h"
  50#include "amdgpu_i2c.h"
  51#include "atom.h"
  52#include "amdgpu_atombios.h"
  53#include "amdgpu_atomfirmware.h"
  54#include "amd_pcie.h"
  55#ifdef CONFIG_DRM_AMDGPU_SI
  56#include "si.h"
  57#endif
  58#ifdef CONFIG_DRM_AMDGPU_CIK
  59#include "cik.h"
  60#endif
  61#include "vi.h"
  62#include "soc15.h"
  63#include "nv.h"
  64#include "bif/bif_4_1_d.h"
  65#include <linux/firmware.h>
  66#include "amdgpu_vf_error.h"
  67
  68#include "amdgpu_amdkfd.h"
  69#include "amdgpu_pm.h"
  70
  71#include "amdgpu_xgmi.h"
  72#include "amdgpu_ras.h"
  73#include "amdgpu_pmu.h"
  74#include "amdgpu_fru_eeprom.h"
  75#include "amdgpu_reset.h"
  76#include "amdgpu_virt.h"
  77
  78#include <linux/suspend.h>
  79#include <drm/task_barrier.h>
  80#include <linux/pm_runtime.h>
  81
  82#include <drm/drm_drv.h>
  83
  84#if IS_ENABLED(CONFIG_X86)
  85#include <asm/intel-family.h>
  86#endif
  87
  88MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
  89MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
  90MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
  91MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
  92MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
  93MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
  94MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
  95
  96#define AMDGPU_RESUME_MS		2000
  97#define AMDGPU_MAX_RETRY_LIMIT		2
  98#define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
  99#define AMDGPU_PCIE_INDEX_FALLBACK (0x38 >> 2)
 100#define AMDGPU_PCIE_INDEX_HI_FALLBACK (0x44 >> 2)
 101#define AMDGPU_PCIE_DATA_FALLBACK (0x3C >> 2)
 102
 103static const struct drm_driver amdgpu_kms_driver;
 104
 105const char *amdgpu_asic_name[] = {
 106	"TAHITI",
 107	"PITCAIRN",
 108	"VERDE",
 109	"OLAND",
 110	"HAINAN",
 111	"BONAIRE",
 112	"KAVERI",
 113	"KABINI",
 114	"HAWAII",
 115	"MULLINS",
 116	"TOPAZ",
 117	"TONGA",
 118	"FIJI",
 119	"CARRIZO",
 120	"STONEY",
 121	"POLARIS10",
 122	"POLARIS11",
 123	"POLARIS12",
 124	"VEGAM",
 125	"VEGA10",
 126	"VEGA12",
 127	"VEGA20",
 128	"RAVEN",
 129	"ARCTURUS",
 130	"RENOIR",
 131	"ALDEBARAN",
 132	"NAVI10",
 133	"CYAN_SKILLFISH",
 134	"NAVI14",
 135	"NAVI12",
 136	"SIENNA_CICHLID",
 137	"NAVY_FLOUNDER",
 138	"VANGOGH",
 139	"DIMGREY_CAVEFISH",
 140	"BEIGE_GOBY",
 141	"YELLOW_CARP",
 142	"IP DISCOVERY",
 143	"LAST",
 144};
 145
 146/**
 147 * DOC: pcie_replay_count
 148 *
 149 * The amdgpu driver provides a sysfs API for reporting the total number
 150 * of PCIe replays (NAKs)
 151 * The file pcie_replay_count is used for this and returns the total
 152 * number of replays as a sum of the NAKs generated and NAKs received
 153 */
 154
 155static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
 156		struct device_attribute *attr, char *buf)
 157{
 158	struct drm_device *ddev = dev_get_drvdata(dev);
 159	struct amdgpu_device *adev = drm_to_adev(ddev);
 160	uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
 161
 162	return sysfs_emit(buf, "%llu\n", cnt);
 163}
 164
 165static DEVICE_ATTR(pcie_replay_count, 0444,
 166		amdgpu_device_get_pcie_replay_count, NULL);
 167
 168static ssize_t amdgpu_sysfs_reg_state_get(struct file *f, struct kobject *kobj,
 169					  struct bin_attribute *attr, char *buf,
 170					  loff_t ppos, size_t count)
 171{
 172	struct device *dev = kobj_to_dev(kobj);
 173	struct drm_device *ddev = dev_get_drvdata(dev);
 174	struct amdgpu_device *adev = drm_to_adev(ddev);
 175	ssize_t bytes_read;
 176
 177	switch (ppos) {
 178	case AMDGPU_SYS_REG_STATE_XGMI:
 179		bytes_read = amdgpu_asic_get_reg_state(
 180			adev, AMDGPU_REG_STATE_TYPE_XGMI, buf, count);
 181		break;
 182	case AMDGPU_SYS_REG_STATE_WAFL:
 183		bytes_read = amdgpu_asic_get_reg_state(
 184			adev, AMDGPU_REG_STATE_TYPE_WAFL, buf, count);
 185		break;
 186	case AMDGPU_SYS_REG_STATE_PCIE:
 187		bytes_read = amdgpu_asic_get_reg_state(
 188			adev, AMDGPU_REG_STATE_TYPE_PCIE, buf, count);
 189		break;
 190	case AMDGPU_SYS_REG_STATE_USR:
 191		bytes_read = amdgpu_asic_get_reg_state(
 192			adev, AMDGPU_REG_STATE_TYPE_USR, buf, count);
 193		break;
 194	case AMDGPU_SYS_REG_STATE_USR_1:
 195		bytes_read = amdgpu_asic_get_reg_state(
 196			adev, AMDGPU_REG_STATE_TYPE_USR_1, buf, count);
 197		break;
 198	default:
 199		return -EINVAL;
 200	}
 201
 202	return bytes_read;
 203}
 204
 205BIN_ATTR(reg_state, 0444, amdgpu_sysfs_reg_state_get, NULL,
 206	 AMDGPU_SYS_REG_STATE_END);
 207
 208int amdgpu_reg_state_sysfs_init(struct amdgpu_device *adev)
 209{
 210	int ret;
 211
 212	if (!amdgpu_asic_get_reg_state_supported(adev))
 213		return 0;
 214
 215	ret = sysfs_create_bin_file(&adev->dev->kobj, &bin_attr_reg_state);
 216
 217	return ret;
 218}
 219
 220void amdgpu_reg_state_sysfs_fini(struct amdgpu_device *adev)
 221{
 222	if (!amdgpu_asic_get_reg_state_supported(adev))
 223		return;
 224	sysfs_remove_bin_file(&adev->dev->kobj, &bin_attr_reg_state);
 225}
 226
 227/**
 228 * DOC: board_info
 229 *
 230 * The amdgpu driver provides a sysfs API for giving board related information.
 231 * It provides the form factor information in the format
 232 *
 233 *   type : form factor
 234 *
 235 * Possible form factor values
 236 *
 237 * - "cem"		- PCIE CEM card
 238 * - "oam"		- Open Compute Accelerator Module
 239 * - "unknown"	- Not known
 240 *
 241 */
 242
 243static ssize_t amdgpu_device_get_board_info(struct device *dev,
 244					    struct device_attribute *attr,
 245					    char *buf)
 246{
 247	struct drm_device *ddev = dev_get_drvdata(dev);
 248	struct amdgpu_device *adev = drm_to_adev(ddev);
 249	enum amdgpu_pkg_type pkg_type = AMDGPU_PKG_TYPE_CEM;
 250	const char *pkg;
 251
 252	if (adev->smuio.funcs && adev->smuio.funcs->get_pkg_type)
 253		pkg_type = adev->smuio.funcs->get_pkg_type(adev);
 254
 255	switch (pkg_type) {
 256	case AMDGPU_PKG_TYPE_CEM:
 257		pkg = "cem";
 258		break;
 259	case AMDGPU_PKG_TYPE_OAM:
 260		pkg = "oam";
 261		break;
 262	default:
 263		pkg = "unknown";
 264		break;
 265	}
 266
 267	return sysfs_emit(buf, "%s : %s\n", "type", pkg);
 268}
 269
 270static DEVICE_ATTR(board_info, 0444, amdgpu_device_get_board_info, NULL);
 271
 272static struct attribute *amdgpu_board_attrs[] = {
 273	&dev_attr_board_info.attr,
 274	NULL,
 275};
 276
 277static umode_t amdgpu_board_attrs_is_visible(struct kobject *kobj,
 278					     struct attribute *attr, int n)
 279{
 280	struct device *dev = kobj_to_dev(kobj);
 281	struct drm_device *ddev = dev_get_drvdata(dev);
 282	struct amdgpu_device *adev = drm_to_adev(ddev);
 283
 284	if (adev->flags & AMD_IS_APU)
 285		return 0;
 286
 287	return attr->mode;
 288}
 289
 290static const struct attribute_group amdgpu_board_attrs_group = {
 291	.attrs = amdgpu_board_attrs,
 292	.is_visible = amdgpu_board_attrs_is_visible
 293};
 294
 295static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
 296
 297
 298/**
 299 * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
 300 *
 301 * @dev: drm_device pointer
 302 *
 303 * Returns true if the device is a dGPU with ATPX power control,
 304 * otherwise return false.
 305 */
 306bool amdgpu_device_supports_px(struct drm_device *dev)
 307{
 308	struct amdgpu_device *adev = drm_to_adev(dev);
 309
 310	if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
 311		return true;
 312	return false;
 313}
 314
 315/**
 316 * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
 317 *
 318 * @dev: drm_device pointer
 319 *
 320 * Returns true if the device is a dGPU with ACPI power control,
 321 * otherwise return false.
 322 */
 323bool amdgpu_device_supports_boco(struct drm_device *dev)
 324{
 325	struct amdgpu_device *adev = drm_to_adev(dev);
 326
 327	if (adev->has_pr3 ||
 328	    ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
 329		return true;
 330	return false;
 331}
 332
 333/**
 334 * amdgpu_device_supports_baco - Does the device support BACO
 335 *
 336 * @dev: drm_device pointer
 337 *
 338 * Returns true if the device supporte BACO,
 339 * otherwise return false.
 340 */
 341bool amdgpu_device_supports_baco(struct drm_device *dev)
 342{
 343	struct amdgpu_device *adev = drm_to_adev(dev);
 344
 345	return amdgpu_asic_supports_baco(adev);
 346}
 347
 348/**
 349 * amdgpu_device_supports_smart_shift - Is the device dGPU with
 350 * smart shift support
 351 *
 352 * @dev: drm_device pointer
 353 *
 354 * Returns true if the device is a dGPU with Smart Shift support,
 355 * otherwise returns false.
 356 */
 357bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
 358{
 359	return (amdgpu_device_supports_boco(dev) &&
 360		amdgpu_acpi_is_power_shift_control_supported());
 361}
 362
 363/*
 364 * VRAM access helper functions
 365 */
 366
 367/**
 368 * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
 369 *
 370 * @adev: amdgpu_device pointer
 371 * @pos: offset of the buffer in vram
 372 * @buf: virtual address of the buffer in system memory
 373 * @size: read/write size, sizeof(@buf) must > @size
 374 * @write: true - write to vram, otherwise - read from vram
 375 */
 376void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
 377			     void *buf, size_t size, bool write)
 378{
 379	unsigned long flags;
 380	uint32_t hi = ~0, tmp = 0;
 381	uint32_t *data = buf;
 382	uint64_t last;
 383	int idx;
 384
 385	if (!drm_dev_enter(adev_to_drm(adev), &idx))
 386		return;
 387
 388	BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
 389
 390	spin_lock_irqsave(&adev->mmio_idx_lock, flags);
 391	for (last = pos + size; pos < last; pos += 4) {
 392		tmp = pos >> 31;
 393
 394		WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
 395		if (tmp != hi) {
 396			WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
 397			hi = tmp;
 398		}
 399		if (write)
 400			WREG32_NO_KIQ(mmMM_DATA, *data++);
 401		else
 402			*data++ = RREG32_NO_KIQ(mmMM_DATA);
 403	}
 404
 405	spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
 406	drm_dev_exit(idx);
 407}
 408
 409/**
 410 * amdgpu_device_aper_access - access vram by vram aperature
 411 *
 412 * @adev: amdgpu_device pointer
 413 * @pos: offset of the buffer in vram
 414 * @buf: virtual address of the buffer in system memory
 415 * @size: read/write size, sizeof(@buf) must > @size
 416 * @write: true - write to vram, otherwise - read from vram
 417 *
 418 * The return value means how many bytes have been transferred.
 419 */
 420size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
 421				 void *buf, size_t size, bool write)
 422{
 423#ifdef CONFIG_64BIT
 424	void __iomem *addr;
 425	size_t count = 0;
 426	uint64_t last;
 427
 428	if (!adev->mman.aper_base_kaddr)
 429		return 0;
 430
 431	last = min(pos + size, adev->gmc.visible_vram_size);
 432	if (last > pos) {
 433		addr = adev->mman.aper_base_kaddr + pos;
 434		count = last - pos;
 435
 436		if (write) {
 437			memcpy_toio(addr, buf, count);
 438			/* Make sure HDP write cache flush happens without any reordering
 439			 * after the system memory contents are sent over PCIe device
 440			 */
 441			mb();
 442			amdgpu_device_flush_hdp(adev, NULL);
 443		} else {
 444			amdgpu_device_invalidate_hdp(adev, NULL);
 445			/* Make sure HDP read cache is invalidated before issuing a read
 446			 * to the PCIe device
 447			 */
 448			mb();
 449			memcpy_fromio(buf, addr, count);
 450		}
 451
 452	}
 453
 454	return count;
 455#else
 456	return 0;
 457#endif
 458}
 459
 460/**
 461 * amdgpu_device_vram_access - read/write a buffer in vram
 462 *
 463 * @adev: amdgpu_device pointer
 464 * @pos: offset of the buffer in vram
 465 * @buf: virtual address of the buffer in system memory
 466 * @size: read/write size, sizeof(@buf) must > @size
 467 * @write: true - write to vram, otherwise - read from vram
 468 */
 469void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
 470			       void *buf, size_t size, bool write)
 471{
 472	size_t count;
 473
 474	/* try to using vram apreature to access vram first */
 475	count = amdgpu_device_aper_access(adev, pos, buf, size, write);
 476	size -= count;
 477	if (size) {
 478		/* using MM to access rest vram */
 479		pos += count;
 480		buf += count;
 481		amdgpu_device_mm_access(adev, pos, buf, size, write);
 482	}
 483}
 484
 485/*
 486 * register access helper functions.
 487 */
 488
 489/* Check if hw access should be skipped because of hotplug or device error */
 490bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
 491{
 492	if (adev->no_hw_access)
 493		return true;
 494
 495#ifdef CONFIG_LOCKDEP
 496	/*
 497	 * This is a bit complicated to understand, so worth a comment. What we assert
 498	 * here is that the GPU reset is not running on another thread in parallel.
 499	 *
 500	 * For this we trylock the read side of the reset semaphore, if that succeeds
 501	 * we know that the reset is not running in paralell.
 502	 *
 503	 * If the trylock fails we assert that we are either already holding the read
 504	 * side of the lock or are the reset thread itself and hold the write side of
 505	 * the lock.
 506	 */
 507	if (in_task()) {
 508		if (down_read_trylock(&adev->reset_domain->sem))
 509			up_read(&adev->reset_domain->sem);
 510		else
 511			lockdep_assert_held(&adev->reset_domain->sem);
 512	}
 513#endif
 514	return false;
 515}
 516
 517/**
 518 * amdgpu_device_rreg - read a memory mapped IO or indirect register
 519 *
 520 * @adev: amdgpu_device pointer
 521 * @reg: dword aligned register offset
 522 * @acc_flags: access flags which require special behavior
 523 *
 524 * Returns the 32 bit value from the offset specified.
 525 */
 526uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
 527			    uint32_t reg, uint32_t acc_flags)
 528{
 529	uint32_t ret;
 530
 531	if (amdgpu_device_skip_hw_access(adev))
 532		return 0;
 533
 534	if ((reg * 4) < adev->rmmio_size) {
 535		if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
 536		    amdgpu_sriov_runtime(adev) &&
 537		    down_read_trylock(&adev->reset_domain->sem)) {
 538			ret = amdgpu_kiq_rreg(adev, reg, 0);
 539			up_read(&adev->reset_domain->sem);
 540		} else {
 541			ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
 542		}
 543	} else {
 544		ret = adev->pcie_rreg(adev, reg * 4);
 545	}
 546
 547	trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
 548
 549	return ret;
 550}
 551
 552/*
 553 * MMIO register read with bytes helper functions
 554 * @offset:bytes offset from MMIO start
 555 */
 556
 557/**
 558 * amdgpu_mm_rreg8 - read a memory mapped IO register
 559 *
 560 * @adev: amdgpu_device pointer
 561 * @offset: byte aligned register offset
 562 *
 563 * Returns the 8 bit value from the offset specified.
 564 */
 565uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
 566{
 567	if (amdgpu_device_skip_hw_access(adev))
 568		return 0;
 569
 570	if (offset < adev->rmmio_size)
 571		return (readb(adev->rmmio + offset));
 572	BUG();
 573}
 574
 575
 576/**
 577 * amdgpu_device_xcc_rreg - read a memory mapped IO or indirect register with specific XCC
 578 *
 579 * @adev: amdgpu_device pointer
 580 * @reg: dword aligned register offset
 581 * @acc_flags: access flags which require special behavior
 582 * @xcc_id: xcc accelerated compute core id
 583 *
 584 * Returns the 32 bit value from the offset specified.
 585 */
 586uint32_t amdgpu_device_xcc_rreg(struct amdgpu_device *adev,
 587				uint32_t reg, uint32_t acc_flags,
 588				uint32_t xcc_id)
 589{
 590	uint32_t ret, rlcg_flag;
 591
 592	if (amdgpu_device_skip_hw_access(adev))
 593		return 0;
 594
 595	if ((reg * 4) < adev->rmmio_size) {
 596		if (amdgpu_sriov_vf(adev) &&
 597		    !amdgpu_sriov_runtime(adev) &&
 598		    adev->gfx.rlc.rlcg_reg_access_supported &&
 599		    amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags,
 600							 GC_HWIP, false,
 601							 &rlcg_flag)) {
 602			ret = amdgpu_virt_rlcg_reg_rw(adev, reg, 0, rlcg_flag, xcc_id);
 603		} else if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
 604		    amdgpu_sriov_runtime(adev) &&
 605		    down_read_trylock(&adev->reset_domain->sem)) {
 606			ret = amdgpu_kiq_rreg(adev, reg, xcc_id);
 607			up_read(&adev->reset_domain->sem);
 608		} else {
 609			ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
 610		}
 611	} else {
 612		ret = adev->pcie_rreg(adev, reg * 4);
 613	}
 614
 615	return ret;
 616}
 617
 618/*
 619 * MMIO register write with bytes helper functions
 620 * @offset:bytes offset from MMIO start
 621 * @value: the value want to be written to the register
 622 */
 623
 624/**
 625 * amdgpu_mm_wreg8 - read a memory mapped IO register
 626 *
 627 * @adev: amdgpu_device pointer
 628 * @offset: byte aligned register offset
 629 * @value: 8 bit value to write
 630 *
 631 * Writes the value specified to the offset specified.
 632 */
 633void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
 634{
 635	if (amdgpu_device_skip_hw_access(adev))
 636		return;
 637
 638	if (offset < adev->rmmio_size)
 639		writeb(value, adev->rmmio + offset);
 640	else
 641		BUG();
 642}
 643
 644/**
 645 * amdgpu_device_wreg - write to a memory mapped IO or indirect register
 646 *
 647 * @adev: amdgpu_device pointer
 648 * @reg: dword aligned register offset
 649 * @v: 32 bit value to write to the register
 650 * @acc_flags: access flags which require special behavior
 651 *
 652 * Writes the value specified to the offset specified.
 653 */
 654void amdgpu_device_wreg(struct amdgpu_device *adev,
 655			uint32_t reg, uint32_t v,
 656			uint32_t acc_flags)
 657{
 658	if (amdgpu_device_skip_hw_access(adev))
 659		return;
 660
 661	if ((reg * 4) < adev->rmmio_size) {
 662		if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
 663		    amdgpu_sriov_runtime(adev) &&
 664		    down_read_trylock(&adev->reset_domain->sem)) {
 665			amdgpu_kiq_wreg(adev, reg, v, 0);
 666			up_read(&adev->reset_domain->sem);
 667		} else {
 668			writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
 669		}
 670	} else {
 671		adev->pcie_wreg(adev, reg * 4, v);
 672	}
 673
 674	trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
 675}
 676
 677/**
 678 * amdgpu_mm_wreg_mmio_rlc -  write register either with direct/indirect mmio or with RLC path if in range
 679 *
 680 * @adev: amdgpu_device pointer
 681 * @reg: mmio/rlc register
 682 * @v: value to write
 683 * @xcc_id: xcc accelerated compute core id
 684 *
 685 * this function is invoked only for the debugfs register access
 686 */
 687void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
 688			     uint32_t reg, uint32_t v,
 689			     uint32_t xcc_id)
 690{
 691	if (amdgpu_device_skip_hw_access(adev))
 692		return;
 693
 694	if (amdgpu_sriov_fullaccess(adev) &&
 695	    adev->gfx.rlc.funcs &&
 696	    adev->gfx.rlc.funcs->is_rlcg_access_range) {
 697		if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
 698			return amdgpu_sriov_wreg(adev, reg, v, 0, 0, xcc_id);
 699	} else if ((reg * 4) >= adev->rmmio_size) {
 700		adev->pcie_wreg(adev, reg * 4, v);
 701	} else {
 702		writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
 703	}
 704}
 705
 706/**
 707 * amdgpu_device_xcc_wreg - write to a memory mapped IO or indirect register with specific XCC
 708 *
 709 * @adev: amdgpu_device pointer
 710 * @reg: dword aligned register offset
 711 * @v: 32 bit value to write to the register
 712 * @acc_flags: access flags which require special behavior
 713 * @xcc_id: xcc accelerated compute core id
 714 *
 715 * Writes the value specified to the offset specified.
 716 */
 717void amdgpu_device_xcc_wreg(struct amdgpu_device *adev,
 718			uint32_t reg, uint32_t v,
 719			uint32_t acc_flags, uint32_t xcc_id)
 720{
 721	uint32_t rlcg_flag;
 722
 723	if (amdgpu_device_skip_hw_access(adev))
 724		return;
 725
 726	if ((reg * 4) < adev->rmmio_size) {
 727		if (amdgpu_sriov_vf(adev) &&
 728		    !amdgpu_sriov_runtime(adev) &&
 729		    adev->gfx.rlc.rlcg_reg_access_supported &&
 730		    amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags,
 731							 GC_HWIP, true,
 732							 &rlcg_flag)) {
 733			amdgpu_virt_rlcg_reg_rw(adev, reg, v, rlcg_flag, xcc_id);
 734		} else if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
 735		    amdgpu_sriov_runtime(adev) &&
 736		    down_read_trylock(&adev->reset_domain->sem)) {
 737			amdgpu_kiq_wreg(adev, reg, v, xcc_id);
 738			up_read(&adev->reset_domain->sem);
 739		} else {
 740			writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
 741		}
 742	} else {
 743		adev->pcie_wreg(adev, reg * 4, v);
 744	}
 745}
 746
 747/**
 748 * amdgpu_device_indirect_rreg - read an indirect register
 749 *
 750 * @adev: amdgpu_device pointer
 751 * @reg_addr: indirect register address to read from
 752 *
 753 * Returns the value of indirect register @reg_addr
 754 */
 755u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
 756				u32 reg_addr)
 757{
 758	unsigned long flags, pcie_index, pcie_data;
 759	void __iomem *pcie_index_offset;
 760	void __iomem *pcie_data_offset;
 761	u32 r;
 762
 763	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
 764	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
 765
 766	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 767	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
 768	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
 769
 770	writel(reg_addr, pcie_index_offset);
 771	readl(pcie_index_offset);
 772	r = readl(pcie_data_offset);
 773	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 774
 775	return r;
 776}
 777
 778u32 amdgpu_device_indirect_rreg_ext(struct amdgpu_device *adev,
 779				    u64 reg_addr)
 780{
 781	unsigned long flags, pcie_index, pcie_index_hi, pcie_data;
 782	u32 r;
 783	void __iomem *pcie_index_offset;
 784	void __iomem *pcie_index_hi_offset;
 785	void __iomem *pcie_data_offset;
 786
 787	if (unlikely(!adev->nbio.funcs)) {
 788		pcie_index = AMDGPU_PCIE_INDEX_FALLBACK;
 789		pcie_data = AMDGPU_PCIE_DATA_FALLBACK;
 790	} else {
 791		pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
 792		pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
 793	}
 794
 795	if (reg_addr >> 32) {
 796		if (unlikely(!adev->nbio.funcs))
 797			pcie_index_hi = AMDGPU_PCIE_INDEX_HI_FALLBACK;
 798		else
 799			pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
 800	} else {
 801		pcie_index_hi = 0;
 802	}
 803
 804	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 805	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
 806	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
 807	if (pcie_index_hi != 0)
 808		pcie_index_hi_offset = (void __iomem *)adev->rmmio +
 809				pcie_index_hi * 4;
 810
 811	writel(reg_addr, pcie_index_offset);
 812	readl(pcie_index_offset);
 813	if (pcie_index_hi != 0) {
 814		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
 815		readl(pcie_index_hi_offset);
 816	}
 817	r = readl(pcie_data_offset);
 818
 819	/* clear the high bits */
 820	if (pcie_index_hi != 0) {
 821		writel(0, pcie_index_hi_offset);
 822		readl(pcie_index_hi_offset);
 823	}
 824
 825	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 826
 827	return r;
 828}
 829
 830/**
 831 * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
 832 *
 833 * @adev: amdgpu_device pointer
 834 * @reg_addr: indirect register address to read from
 835 *
 836 * Returns the value of indirect register @reg_addr
 837 */
 838u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
 839				  u32 reg_addr)
 840{
 841	unsigned long flags, pcie_index, pcie_data;
 842	void __iomem *pcie_index_offset;
 843	void __iomem *pcie_data_offset;
 844	u64 r;
 845
 846	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
 847	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
 848
 849	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 850	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
 851	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
 852
 853	/* read low 32 bits */
 854	writel(reg_addr, pcie_index_offset);
 855	readl(pcie_index_offset);
 856	r = readl(pcie_data_offset);
 857	/* read high 32 bits */
 858	writel(reg_addr + 4, pcie_index_offset);
 859	readl(pcie_index_offset);
 860	r |= ((u64)readl(pcie_data_offset) << 32);
 861	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 862
 863	return r;
 864}
 865
 866u64 amdgpu_device_indirect_rreg64_ext(struct amdgpu_device *adev,
 867				  u64 reg_addr)
 868{
 869	unsigned long flags, pcie_index, pcie_data;
 870	unsigned long pcie_index_hi = 0;
 871	void __iomem *pcie_index_offset;
 872	void __iomem *pcie_index_hi_offset;
 873	void __iomem *pcie_data_offset;
 874	u64 r;
 875
 876	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
 877	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
 878	if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
 879		pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
 880
 881	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 882	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
 883	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
 884	if (pcie_index_hi != 0)
 885		pcie_index_hi_offset = (void __iomem *)adev->rmmio +
 886			pcie_index_hi * 4;
 887
 888	/* read low 32 bits */
 889	writel(reg_addr, pcie_index_offset);
 890	readl(pcie_index_offset);
 891	if (pcie_index_hi != 0) {
 892		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
 893		readl(pcie_index_hi_offset);
 894	}
 895	r = readl(pcie_data_offset);
 896	/* read high 32 bits */
 897	writel(reg_addr + 4, pcie_index_offset);
 898	readl(pcie_index_offset);
 899	if (pcie_index_hi != 0) {
 900		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
 901		readl(pcie_index_hi_offset);
 902	}
 903	r |= ((u64)readl(pcie_data_offset) << 32);
 904
 905	/* clear the high bits */
 906	if (pcie_index_hi != 0) {
 907		writel(0, pcie_index_hi_offset);
 908		readl(pcie_index_hi_offset);
 909	}
 910
 911	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 912
 913	return r;
 914}
 915
 916/**
 917 * amdgpu_device_indirect_wreg - write an indirect register address
 918 *
 919 * @adev: amdgpu_device pointer
 920 * @reg_addr: indirect register offset
 921 * @reg_data: indirect register data
 922 *
 923 */
 924void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
 925				 u32 reg_addr, u32 reg_data)
 926{
 927	unsigned long flags, pcie_index, pcie_data;
 928	void __iomem *pcie_index_offset;
 929	void __iomem *pcie_data_offset;
 930
 931	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
 932	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
 933
 934	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 935	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
 936	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
 937
 938	writel(reg_addr, pcie_index_offset);
 939	readl(pcie_index_offset);
 940	writel(reg_data, pcie_data_offset);
 941	readl(pcie_data_offset);
 942	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 943}
 944
 945void amdgpu_device_indirect_wreg_ext(struct amdgpu_device *adev,
 946				     u64 reg_addr, u32 reg_data)
 947{
 948	unsigned long flags, pcie_index, pcie_index_hi, pcie_data;
 949	void __iomem *pcie_index_offset;
 950	void __iomem *pcie_index_hi_offset;
 951	void __iomem *pcie_data_offset;
 952
 953	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
 954	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
 955	if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
 956		pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
 957	else
 958		pcie_index_hi = 0;
 959
 960	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 961	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
 962	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
 963	if (pcie_index_hi != 0)
 964		pcie_index_hi_offset = (void __iomem *)adev->rmmio +
 965				pcie_index_hi * 4;
 966
 967	writel(reg_addr, pcie_index_offset);
 968	readl(pcie_index_offset);
 969	if (pcie_index_hi != 0) {
 970		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
 971		readl(pcie_index_hi_offset);
 972	}
 973	writel(reg_data, pcie_data_offset);
 974	readl(pcie_data_offset);
 975
 976	/* clear the high bits */
 977	if (pcie_index_hi != 0) {
 978		writel(0, pcie_index_hi_offset);
 979		readl(pcie_index_hi_offset);
 980	}
 981
 982	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 983}
 984
 985/**
 986 * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
 987 *
 988 * @adev: amdgpu_device pointer
 989 * @reg_addr: indirect register offset
 990 * @reg_data: indirect register data
 991 *
 992 */
 993void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
 994				   u32 reg_addr, u64 reg_data)
 995{
 996	unsigned long flags, pcie_index, pcie_data;
 997	void __iomem *pcie_index_offset;
 998	void __iomem *pcie_data_offset;
 999
1000	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
1001	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
1002
1003	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
1004	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
1005	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
1006
1007	/* write low 32 bits */
1008	writel(reg_addr, pcie_index_offset);
1009	readl(pcie_index_offset);
1010	writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
1011	readl(pcie_data_offset);
1012	/* write high 32 bits */
1013	writel(reg_addr + 4, pcie_index_offset);
1014	readl(pcie_index_offset);
1015	writel((u32)(reg_data >> 32), pcie_data_offset);
1016	readl(pcie_data_offset);
1017	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
1018}
1019
1020void amdgpu_device_indirect_wreg64_ext(struct amdgpu_device *adev,
1021				   u64 reg_addr, u64 reg_data)
1022{
1023	unsigned long flags, pcie_index, pcie_data;
1024	unsigned long pcie_index_hi = 0;
1025	void __iomem *pcie_index_offset;
1026	void __iomem *pcie_index_hi_offset;
1027	void __iomem *pcie_data_offset;
1028
1029	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
1030	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
1031	if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
1032		pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
1033
1034	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
1035	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
1036	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
1037	if (pcie_index_hi != 0)
1038		pcie_index_hi_offset = (void __iomem *)adev->rmmio +
1039				pcie_index_hi * 4;
1040
1041	/* write low 32 bits */
1042	writel(reg_addr, pcie_index_offset);
1043	readl(pcie_index_offset);
1044	if (pcie_index_hi != 0) {
1045		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
1046		readl(pcie_index_hi_offset);
1047	}
1048	writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
1049	readl(pcie_data_offset);
1050	/* write high 32 bits */
1051	writel(reg_addr + 4, pcie_index_offset);
1052	readl(pcie_index_offset);
1053	if (pcie_index_hi != 0) {
1054		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
1055		readl(pcie_index_hi_offset);
1056	}
1057	writel((u32)(reg_data >> 32), pcie_data_offset);
1058	readl(pcie_data_offset);
1059
1060	/* clear the high bits */
1061	if (pcie_index_hi != 0) {
1062		writel(0, pcie_index_hi_offset);
1063		readl(pcie_index_hi_offset);
1064	}
1065
1066	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
1067}
1068
1069/**
1070 * amdgpu_device_get_rev_id - query device rev_id
1071 *
1072 * @adev: amdgpu_device pointer
1073 *
1074 * Return device rev_id
1075 */
1076u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev)
1077{
1078	return adev->nbio.funcs->get_rev_id(adev);
1079}
1080
1081/**
1082 * amdgpu_invalid_rreg - dummy reg read function
1083 *
1084 * @adev: amdgpu_device pointer
1085 * @reg: offset of register
1086 *
1087 * Dummy register read function.  Used for register blocks
1088 * that certain asics don't have (all asics).
1089 * Returns the value in the register.
1090 */
1091static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
1092{
1093	DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
1094	BUG();
1095	return 0;
1096}
1097
1098static uint32_t amdgpu_invalid_rreg_ext(struct amdgpu_device *adev, uint64_t reg)
1099{
1100	DRM_ERROR("Invalid callback to read register 0x%llX\n", reg);
1101	BUG();
1102	return 0;
1103}
1104
1105/**
1106 * amdgpu_invalid_wreg - dummy reg write function
1107 *
1108 * @adev: amdgpu_device pointer
1109 * @reg: offset of register
1110 * @v: value to write to the register
1111 *
1112 * Dummy register read function.  Used for register blocks
1113 * that certain asics don't have (all asics).
1114 */
1115static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
1116{
1117	DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
1118		  reg, v);
1119	BUG();
1120}
1121
1122static void amdgpu_invalid_wreg_ext(struct amdgpu_device *adev, uint64_t reg, uint32_t v)
1123{
1124	DRM_ERROR("Invalid callback to write register 0x%llX with 0x%08X\n",
1125		  reg, v);
1126	BUG();
1127}
1128
1129/**
1130 * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
1131 *
1132 * @adev: amdgpu_device pointer
1133 * @reg: offset of register
1134 *
1135 * Dummy register read function.  Used for register blocks
1136 * that certain asics don't have (all asics).
1137 * Returns the value in the register.
1138 */
1139static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
1140{
1141	DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
1142	BUG();
1143	return 0;
1144}
1145
1146static uint64_t amdgpu_invalid_rreg64_ext(struct amdgpu_device *adev, uint64_t reg)
1147{
1148	DRM_ERROR("Invalid callback to read register 0x%llX\n", reg);
1149	BUG();
1150	return 0;
1151}
1152
1153/**
1154 * amdgpu_invalid_wreg64 - dummy reg write function
1155 *
1156 * @adev: amdgpu_device pointer
1157 * @reg: offset of register
1158 * @v: value to write to the register
1159 *
1160 * Dummy register read function.  Used for register blocks
1161 * that certain asics don't have (all asics).
1162 */
1163static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
1164{
1165	DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
1166		  reg, v);
1167	BUG();
1168}
1169
1170static void amdgpu_invalid_wreg64_ext(struct amdgpu_device *adev, uint64_t reg, uint64_t v)
1171{
1172	DRM_ERROR("Invalid callback to write 64 bit register 0x%llX with 0x%08llX\n",
1173		  reg, v);
1174	BUG();
1175}
1176
1177/**
1178 * amdgpu_block_invalid_rreg - dummy reg read function
1179 *
1180 * @adev: amdgpu_device pointer
1181 * @block: offset of instance
1182 * @reg: offset of register
1183 *
1184 * Dummy register read function.  Used for register blocks
1185 * that certain asics don't have (all asics).
1186 * Returns the value in the register.
1187 */
1188static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
1189					  uint32_t block, uint32_t reg)
1190{
1191	DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
1192		  reg, block);
1193	BUG();
1194	return 0;
1195}
1196
1197/**
1198 * amdgpu_block_invalid_wreg - dummy reg write function
1199 *
1200 * @adev: amdgpu_device pointer
1201 * @block: offset of instance
1202 * @reg: offset of register
1203 * @v: value to write to the register
1204 *
1205 * Dummy register read function.  Used for register blocks
1206 * that certain asics don't have (all asics).
1207 */
1208static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
1209				      uint32_t block,
1210				      uint32_t reg, uint32_t v)
1211{
1212	DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
1213		  reg, block, v);
1214	BUG();
1215}
1216
1217/**
1218 * amdgpu_device_asic_init - Wrapper for atom asic_init
1219 *
1220 * @adev: amdgpu_device pointer
1221 *
1222 * Does any asic specific work and then calls atom asic init.
1223 */
1224static int amdgpu_device_asic_init(struct amdgpu_device *adev)
1225{
1226	int ret;
1227
1228	amdgpu_asic_pre_asic_init(adev);
1229
1230	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
1231	    amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) {
1232		amdgpu_psp_wait_for_bootloader(adev);
1233		ret = amdgpu_atomfirmware_asic_init(adev, true);
 
 
1234		return ret;
1235	} else {
1236		return amdgpu_atom_asic_init(adev->mode_info.atom_context);
1237	}
1238
1239	return 0;
1240}
1241
1242/**
1243 * amdgpu_device_mem_scratch_init - allocate the VRAM scratch page
1244 *
1245 * @adev: amdgpu_device pointer
1246 *
1247 * Allocates a scratch page of VRAM for use by various things in the
1248 * driver.
1249 */
1250static int amdgpu_device_mem_scratch_init(struct amdgpu_device *adev)
1251{
1252	return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, PAGE_SIZE,
1253				       AMDGPU_GEM_DOMAIN_VRAM |
1254				       AMDGPU_GEM_DOMAIN_GTT,
1255				       &adev->mem_scratch.robj,
1256				       &adev->mem_scratch.gpu_addr,
1257				       (void **)&adev->mem_scratch.ptr);
1258}
1259
1260/**
1261 * amdgpu_device_mem_scratch_fini - Free the VRAM scratch page
1262 *
1263 * @adev: amdgpu_device pointer
1264 *
1265 * Frees the VRAM scratch page.
1266 */
1267static void amdgpu_device_mem_scratch_fini(struct amdgpu_device *adev)
1268{
1269	amdgpu_bo_free_kernel(&adev->mem_scratch.robj, NULL, NULL);
1270}
1271
1272/**
1273 * amdgpu_device_program_register_sequence - program an array of registers.
1274 *
1275 * @adev: amdgpu_device pointer
1276 * @registers: pointer to the register array
1277 * @array_size: size of the register array
1278 *
1279 * Programs an array or registers with and or masks.
1280 * This is a helper for setting golden registers.
1281 */
1282void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
1283					     const u32 *registers,
1284					     const u32 array_size)
1285{
1286	u32 tmp, reg, and_mask, or_mask;
1287	int i;
1288
1289	if (array_size % 3)
1290		return;
1291
1292	for (i = 0; i < array_size; i += 3) {
1293		reg = registers[i + 0];
1294		and_mask = registers[i + 1];
1295		or_mask = registers[i + 2];
1296
1297		if (and_mask == 0xffffffff) {
1298			tmp = or_mask;
1299		} else {
1300			tmp = RREG32(reg);
1301			tmp &= ~and_mask;
1302			if (adev->family >= AMDGPU_FAMILY_AI)
1303				tmp |= (or_mask & and_mask);
1304			else
1305				tmp |= or_mask;
1306		}
1307		WREG32(reg, tmp);
1308	}
1309}
1310
1311/**
1312 * amdgpu_device_pci_config_reset - reset the GPU
1313 *
1314 * @adev: amdgpu_device pointer
1315 *
1316 * Resets the GPU using the pci config reset sequence.
1317 * Only applicable to asics prior to vega10.
1318 */
1319void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
1320{
1321	pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
1322}
1323
1324/**
1325 * amdgpu_device_pci_reset - reset the GPU using generic PCI means
1326 *
1327 * @adev: amdgpu_device pointer
1328 *
1329 * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
1330 */
1331int amdgpu_device_pci_reset(struct amdgpu_device *adev)
1332{
1333	return pci_reset_function(adev->pdev);
1334}
1335
1336/*
1337 * amdgpu_device_wb_*()
1338 * Writeback is the method by which the GPU updates special pages in memory
1339 * with the status of certain GPU events (fences, ring pointers,etc.).
1340 */
1341
1342/**
1343 * amdgpu_device_wb_fini - Disable Writeback and free memory
1344 *
1345 * @adev: amdgpu_device pointer
1346 *
1347 * Disables Writeback and frees the Writeback memory (all asics).
1348 * Used at driver shutdown.
1349 */
1350static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1351{
1352	if (adev->wb.wb_obj) {
1353		amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1354				      &adev->wb.gpu_addr,
1355				      (void **)&adev->wb.wb);
1356		adev->wb.wb_obj = NULL;
1357	}
1358}
1359
1360/**
1361 * amdgpu_device_wb_init - Init Writeback driver info and allocate memory
1362 *
1363 * @adev: amdgpu_device pointer
1364 *
1365 * Initializes writeback and allocates writeback memory (all asics).
1366 * Used at driver startup.
1367 * Returns 0 on success or an -error on failure.
1368 */
1369static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1370{
1371	int r;
1372
1373	if (adev->wb.wb_obj == NULL) {
1374		/* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1375		r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1376					    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1377					    &adev->wb.wb_obj, &adev->wb.gpu_addr,
1378					    (void **)&adev->wb.wb);
1379		if (r) {
1380			dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1381			return r;
1382		}
1383
1384		adev->wb.num_wb = AMDGPU_MAX_WB;
1385		memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1386
1387		/* clear wb memory */
1388		memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1389	}
1390
1391	return 0;
1392}
1393
1394/**
1395 * amdgpu_device_wb_get - Allocate a wb entry
1396 *
1397 * @adev: amdgpu_device pointer
1398 * @wb: wb index
1399 *
1400 * Allocate a wb slot for use by the driver (all asics).
1401 * Returns 0 on success or -EINVAL on failure.
1402 */
1403int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1404{
1405	unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1406
1407	if (offset < adev->wb.num_wb) {
1408		__set_bit(offset, adev->wb.used);
1409		*wb = offset << 3; /* convert to dw offset */
1410		return 0;
1411	} else {
1412		return -EINVAL;
1413	}
1414}
1415
1416/**
1417 * amdgpu_device_wb_free - Free a wb entry
1418 *
1419 * @adev: amdgpu_device pointer
1420 * @wb: wb index
1421 *
1422 * Free a wb slot allocated for use by the driver (all asics)
1423 */
1424void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1425{
1426	wb >>= 3;
1427	if (wb < adev->wb.num_wb)
1428		__clear_bit(wb, adev->wb.used);
1429}
1430
1431/**
1432 * amdgpu_device_resize_fb_bar - try to resize FB BAR
1433 *
1434 * @adev: amdgpu_device pointer
1435 *
1436 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1437 * to fail, but if any of the BARs is not accessible after the size we abort
1438 * driver loading by returning -ENODEV.
1439 */
1440int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1441{
1442	int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1443	struct pci_bus *root;
1444	struct resource *res;
1445	unsigned int i;
1446	u16 cmd;
1447	int r;
1448
1449	if (!IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
1450		return 0;
1451
1452	/* Bypass for VF */
1453	if (amdgpu_sriov_vf(adev))
1454		return 0;
1455
1456	/* PCI_EXT_CAP_ID_VNDR extended capability is located at 0x100 */
1457	if (!pci_find_ext_capability(adev->pdev, PCI_EXT_CAP_ID_VNDR))
1458		DRM_WARN("System can't access extended configuration space,please check!!\n");
1459
1460	/* skip if the bios has already enabled large BAR */
1461	if (adev->gmc.real_vram_size &&
1462	    (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1463		return 0;
1464
1465	/* Check if the root BUS has 64bit memory resources */
1466	root = adev->pdev->bus;
1467	while (root->parent)
1468		root = root->parent;
1469
1470	pci_bus_for_each_resource(root, res, i) {
1471		if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1472		    res->start > 0x100000000ull)
1473			break;
1474	}
1475
1476	/* Trying to resize is pointless without a root hub window above 4GB */
1477	if (!res)
1478		return 0;
1479
1480	/* Limit the BAR size to what is available */
1481	rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1482			rbar_size);
1483
1484	/* Disable memory decoding while we change the BAR addresses and size */
1485	pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1486	pci_write_config_word(adev->pdev, PCI_COMMAND,
1487			      cmd & ~PCI_COMMAND_MEMORY);
1488
1489	/* Free the VRAM and doorbell BAR, we most likely need to move both. */
1490	amdgpu_doorbell_fini(adev);
1491	if (adev->asic_type >= CHIP_BONAIRE)
1492		pci_release_resource(adev->pdev, 2);
1493
1494	pci_release_resource(adev->pdev, 0);
1495
1496	r = pci_resize_resource(adev->pdev, 0, rbar_size);
1497	if (r == -ENOSPC)
1498		DRM_INFO("Not enough PCI address space for a large BAR.");
1499	else if (r && r != -ENOTSUPP)
1500		DRM_ERROR("Problem resizing BAR0 (%d).", r);
1501
1502	pci_assign_unassigned_bus_resources(adev->pdev->bus);
1503
1504	/* When the doorbell or fb BAR isn't available we have no chance of
1505	 * using the device.
1506	 */
1507	r = amdgpu_doorbell_init(adev);
1508	if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1509		return -ENODEV;
1510
1511	pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1512
1513	return 0;
1514}
1515
1516static bool amdgpu_device_read_bios(struct amdgpu_device *adev)
1517{
1518	if (hweight32(adev->aid_mask) && (adev->flags & AMD_IS_APU))
1519		return false;
1520
1521	return true;
1522}
1523
1524/*
1525 * GPU helpers function.
1526 */
1527/**
1528 * amdgpu_device_need_post - check if the hw need post or not
1529 *
1530 * @adev: amdgpu_device pointer
1531 *
1532 * Check if the asic has been initialized (all asics) at driver startup
1533 * or post is needed if  hw reset is performed.
1534 * Returns true if need or false if not.
1535 */
1536bool amdgpu_device_need_post(struct amdgpu_device *adev)
1537{
1538	uint32_t reg;
1539
1540	if (amdgpu_sriov_vf(adev))
1541		return false;
1542
1543	if (!amdgpu_device_read_bios(adev))
1544		return false;
1545
1546	if (amdgpu_passthrough(adev)) {
1547		/* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1548		 * some old smc fw still need driver do vPost otherwise gpu hang, while
1549		 * those smc fw version above 22.15 doesn't have this flaw, so we force
1550		 * vpost executed for smc version below 22.15
1551		 */
1552		if (adev->asic_type == CHIP_FIJI) {
1553			int err;
1554			uint32_t fw_ver;
1555
1556			err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1557			/* force vPost if error occured */
1558			if (err)
1559				return true;
1560
1561			fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1562			release_firmware(adev->pm.fw);
1563			if (fw_ver < 0x00160e00)
1564				return true;
1565		}
1566	}
1567
1568	/* Don't post if we need to reset whole hive on init */
1569	if (adev->gmc.xgmi.pending_reset)
1570		return false;
1571
1572	if (adev->has_hw_reset) {
1573		adev->has_hw_reset = false;
1574		return true;
1575	}
1576
1577	/* bios scratch used on CIK+ */
1578	if (adev->asic_type >= CHIP_BONAIRE)
1579		return amdgpu_atombios_scratch_need_asic_init(adev);
1580
1581	/* check MEM_SIZE for older asics */
1582	reg = amdgpu_asic_get_config_memsize(adev);
1583
1584	if ((reg != 0) && (reg != 0xffffffff))
1585		return false;
1586
1587	return true;
1588}
1589
1590/*
1591 * Check whether seamless boot is supported.
1592 *
1593 * So far we only support seamless boot on DCE 3.0 or later.
1594 * If users report that it works on older ASICS as well, we may
1595 * loosen this.
1596 */
1597bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev)
1598{
1599	switch (amdgpu_seamless) {
1600	case -1:
1601		break;
1602	case 1:
1603		return true;
1604	case 0:
1605		return false;
1606	default:
1607		DRM_ERROR("Invalid value for amdgpu.seamless: %d\n",
1608			  amdgpu_seamless);
1609		return false;
1610	}
1611
1612	if (!(adev->flags & AMD_IS_APU))
1613		return false;
1614
1615	if (adev->mman.keep_stolen_vga_memory)
1616		return false;
1617
1618	return amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0);
1619}
1620
1621/*
1622 * Intel hosts such as Rocket Lake, Alder Lake, Raptor Lake and Sapphire Rapids
1623 * don't support dynamic speed switching. Until we have confirmation from Intel
1624 * that a specific host supports it, it's safer that we keep it disabled for all.
1625 *
1626 * https://edc.intel.com/content/www/us/en/design/products/platforms/details/raptor-lake-s/13th-generation-core-processors-datasheet-volume-1-of-2/005/pci-express-support/
1627 * https://gitlab.freedesktop.org/drm/amd/-/issues/2663
1628 */
1629static bool amdgpu_device_pcie_dynamic_switching_supported(struct amdgpu_device *adev)
1630{
1631#if IS_ENABLED(CONFIG_X86)
1632	struct cpuinfo_x86 *c = &cpu_data(0);
1633
1634	/* eGPU change speeds based on USB4 fabric conditions */
1635	if (dev_is_removable(adev->dev))
1636		return true;
1637
1638	if (c->x86_vendor == X86_VENDOR_INTEL)
1639		return false;
1640#endif
1641	return true;
1642}
1643
1644/**
1645 * amdgpu_device_should_use_aspm - check if the device should program ASPM
1646 *
1647 * @adev: amdgpu_device pointer
1648 *
1649 * Confirm whether the module parameter and pcie bridge agree that ASPM should
1650 * be set for this device.
1651 *
1652 * Returns true if it should be used or false if not.
1653 */
1654bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
1655{
1656	switch (amdgpu_aspm) {
1657	case -1:
1658		break;
1659	case 0:
1660		return false;
1661	case 1:
1662		return true;
1663	default:
1664		return false;
1665	}
1666	if (adev->flags & AMD_IS_APU)
1667		return false;
1668	if (!(adev->pm.pp_feature & PP_PCIE_DPM_MASK))
1669		return false;
1670	return pcie_aspm_enabled(adev->pdev);
1671}
1672
1673/* if we get transitioned to only one device, take VGA back */
1674/**
1675 * amdgpu_device_vga_set_decode - enable/disable vga decode
1676 *
1677 * @pdev: PCI device pointer
1678 * @state: enable/disable vga decode
1679 *
1680 * Enable/disable vga decode (all asics).
1681 * Returns VGA resource flags.
1682 */
1683static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
1684		bool state)
1685{
1686	struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
1687
1688	amdgpu_asic_set_vga_state(adev, state);
1689	if (state)
1690		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1691		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1692	else
1693		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1694}
1695
1696/**
1697 * amdgpu_device_check_block_size - validate the vm block size
1698 *
1699 * @adev: amdgpu_device pointer
1700 *
1701 * Validates the vm block size specified via module parameter.
1702 * The vm block size defines number of bits in page table versus page directory,
1703 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1704 * page table and the remaining bits are in the page directory.
1705 */
1706static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1707{
1708	/* defines number of bits in page table versus page directory,
1709	 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1710	 * page table and the remaining bits are in the page directory
1711	 */
1712	if (amdgpu_vm_block_size == -1)
1713		return;
1714
1715	if (amdgpu_vm_block_size < 9) {
1716		dev_warn(adev->dev, "VM page table size (%d) too small\n",
1717			 amdgpu_vm_block_size);
1718		amdgpu_vm_block_size = -1;
1719	}
1720}
1721
1722/**
1723 * amdgpu_device_check_vm_size - validate the vm size
1724 *
1725 * @adev: amdgpu_device pointer
1726 *
1727 * Validates the vm size in GB specified via module parameter.
1728 * The VM size is the size of the GPU virtual memory space in GB.
1729 */
1730static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1731{
1732	/* no need to check the default value */
1733	if (amdgpu_vm_size == -1)
1734		return;
1735
1736	if (amdgpu_vm_size < 1) {
1737		dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1738			 amdgpu_vm_size);
1739		amdgpu_vm_size = -1;
1740	}
1741}
1742
1743static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1744{
1745	struct sysinfo si;
1746	bool is_os_64 = (sizeof(void *) == 8);
1747	uint64_t total_memory;
1748	uint64_t dram_size_seven_GB = 0x1B8000000;
1749	uint64_t dram_size_three_GB = 0xB8000000;
1750
1751	if (amdgpu_smu_memory_pool_size == 0)
1752		return;
1753
1754	if (!is_os_64) {
1755		DRM_WARN("Not 64-bit OS, feature not supported\n");
1756		goto def_value;
1757	}
1758	si_meminfo(&si);
1759	total_memory = (uint64_t)si.totalram * si.mem_unit;
1760
1761	if ((amdgpu_smu_memory_pool_size == 1) ||
1762		(amdgpu_smu_memory_pool_size == 2)) {
1763		if (total_memory < dram_size_three_GB)
1764			goto def_value1;
1765	} else if ((amdgpu_smu_memory_pool_size == 4) ||
1766		(amdgpu_smu_memory_pool_size == 8)) {
1767		if (total_memory < dram_size_seven_GB)
1768			goto def_value1;
1769	} else {
1770		DRM_WARN("Smu memory pool size not supported\n");
1771		goto def_value;
1772	}
1773	adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1774
1775	return;
1776
1777def_value1:
1778	DRM_WARN("No enough system memory\n");
1779def_value:
1780	adev->pm.smu_prv_buffer_size = 0;
1781}
1782
1783static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
1784{
1785	if (!(adev->flags & AMD_IS_APU) ||
1786	    adev->asic_type < CHIP_RAVEN)
1787		return 0;
1788
1789	switch (adev->asic_type) {
1790	case CHIP_RAVEN:
1791		if (adev->pdev->device == 0x15dd)
1792			adev->apu_flags |= AMD_APU_IS_RAVEN;
1793		if (adev->pdev->device == 0x15d8)
1794			adev->apu_flags |= AMD_APU_IS_PICASSO;
1795		break;
1796	case CHIP_RENOIR:
1797		if ((adev->pdev->device == 0x1636) ||
1798		    (adev->pdev->device == 0x164c))
1799			adev->apu_flags |= AMD_APU_IS_RENOIR;
1800		else
1801			adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1802		break;
1803	case CHIP_VANGOGH:
1804		adev->apu_flags |= AMD_APU_IS_VANGOGH;
1805		break;
1806	case CHIP_YELLOW_CARP:
1807		break;
1808	case CHIP_CYAN_SKILLFISH:
1809		if ((adev->pdev->device == 0x13FE) ||
1810		    (adev->pdev->device == 0x143F))
1811			adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
1812		break;
1813	default:
1814		break;
1815	}
1816
1817	return 0;
1818}
1819
1820/**
1821 * amdgpu_device_check_arguments - validate module params
1822 *
1823 * @adev: amdgpu_device pointer
1824 *
1825 * Validates certain module parameters and updates
1826 * the associated values used by the driver (all asics).
1827 */
1828static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1829{
1830	if (amdgpu_sched_jobs < 4) {
1831		dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1832			 amdgpu_sched_jobs);
1833		amdgpu_sched_jobs = 4;
1834	} else if (!is_power_of_2(amdgpu_sched_jobs)) {
1835		dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1836			 amdgpu_sched_jobs);
1837		amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1838	}
1839
1840	if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1841		/* gart size must be greater or equal to 32M */
1842		dev_warn(adev->dev, "gart size (%d) too small\n",
1843			 amdgpu_gart_size);
1844		amdgpu_gart_size = -1;
1845	}
1846
1847	if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1848		/* gtt size must be greater or equal to 32M */
1849		dev_warn(adev->dev, "gtt size (%d) too small\n",
1850				 amdgpu_gtt_size);
1851		amdgpu_gtt_size = -1;
1852	}
1853
1854	/* valid range is between 4 and 9 inclusive */
1855	if (amdgpu_vm_fragment_size != -1 &&
1856	    (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1857		dev_warn(adev->dev, "valid range is between 4 and 9\n");
1858		amdgpu_vm_fragment_size = -1;
1859	}
1860
1861	if (amdgpu_sched_hw_submission < 2) {
1862		dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1863			 amdgpu_sched_hw_submission);
1864		amdgpu_sched_hw_submission = 2;
1865	} else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1866		dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1867			 amdgpu_sched_hw_submission);
1868		amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1869	}
1870
1871	if (amdgpu_reset_method < -1 || amdgpu_reset_method > 4) {
1872		dev_warn(adev->dev, "invalid option for reset method, reverting to default\n");
1873		amdgpu_reset_method = -1;
1874	}
1875
1876	amdgpu_device_check_smu_prv_buffer_size(adev);
1877
1878	amdgpu_device_check_vm_size(adev);
1879
1880	amdgpu_device_check_block_size(adev);
1881
1882	adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1883
1884	return 0;
1885}
1886
1887/**
1888 * amdgpu_switcheroo_set_state - set switcheroo state
1889 *
1890 * @pdev: pci dev pointer
1891 * @state: vga_switcheroo state
1892 *
1893 * Callback for the switcheroo driver.  Suspends or resumes
1894 * the asics before or after it is powered up using ACPI methods.
1895 */
1896static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1897					enum vga_switcheroo_state state)
1898{
1899	struct drm_device *dev = pci_get_drvdata(pdev);
1900	int r;
1901
1902	if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
1903		return;
1904
1905	if (state == VGA_SWITCHEROO_ON) {
1906		pr_info("switched on\n");
1907		/* don't suspend or resume card normally */
1908		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1909
1910		pci_set_power_state(pdev, PCI_D0);
1911		amdgpu_device_load_pci_state(pdev);
1912		r = pci_enable_device(pdev);
1913		if (r)
1914			DRM_WARN("pci_enable_device failed (%d)\n", r);
1915		amdgpu_device_resume(dev, true);
1916
1917		dev->switch_power_state = DRM_SWITCH_POWER_ON;
1918	} else {
1919		pr_info("switched off\n");
1920		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1921		amdgpu_device_prepare(dev);
1922		amdgpu_device_suspend(dev, true);
1923		amdgpu_device_cache_pci_state(pdev);
1924		/* Shut down the device */
1925		pci_disable_device(pdev);
1926		pci_set_power_state(pdev, PCI_D3cold);
1927		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1928	}
1929}
1930
1931/**
1932 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1933 *
1934 * @pdev: pci dev pointer
1935 *
1936 * Callback for the switcheroo driver.  Check of the switcheroo
1937 * state can be changed.
1938 * Returns true if the state can be changed, false if not.
1939 */
1940static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1941{
1942	struct drm_device *dev = pci_get_drvdata(pdev);
1943
1944       /*
1945	* FIXME: open_count is protected by drm_global_mutex but that would lead to
1946	* locking inversion with the driver load path. And the access here is
1947	* completely racy anyway. So don't bother with locking for now.
1948	*/
1949	return atomic_read(&dev->open_count) == 0;
1950}
1951
1952static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1953	.set_gpu_state = amdgpu_switcheroo_set_state,
1954	.reprobe = NULL,
1955	.can_switch = amdgpu_switcheroo_can_switch,
1956};
1957
1958/**
1959 * amdgpu_device_ip_set_clockgating_state - set the CG state
1960 *
1961 * @dev: amdgpu_device pointer
1962 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1963 * @state: clockgating state (gate or ungate)
1964 *
1965 * Sets the requested clockgating state for all instances of
1966 * the hardware IP specified.
1967 * Returns the error code from the last instance.
1968 */
1969int amdgpu_device_ip_set_clockgating_state(void *dev,
1970					   enum amd_ip_block_type block_type,
1971					   enum amd_clockgating_state state)
1972{
1973	struct amdgpu_device *adev = dev;
1974	int i, r = 0;
1975
1976	for (i = 0; i < adev->num_ip_blocks; i++) {
1977		if (!adev->ip_blocks[i].status.valid)
1978			continue;
1979		if (adev->ip_blocks[i].version->type != block_type)
1980			continue;
1981		if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1982			continue;
1983		r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1984			(void *)adev, state);
1985		if (r)
1986			DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1987				  adev->ip_blocks[i].version->funcs->name, r);
1988	}
1989	return r;
1990}
1991
1992/**
1993 * amdgpu_device_ip_set_powergating_state - set the PG state
1994 *
1995 * @dev: amdgpu_device pointer
1996 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1997 * @state: powergating state (gate or ungate)
1998 *
1999 * Sets the requested powergating state for all instances of
2000 * the hardware IP specified.
2001 * Returns the error code from the last instance.
2002 */
2003int amdgpu_device_ip_set_powergating_state(void *dev,
2004					   enum amd_ip_block_type block_type,
2005					   enum amd_powergating_state state)
2006{
2007	struct amdgpu_device *adev = dev;
2008	int i, r = 0;
2009
2010	for (i = 0; i < adev->num_ip_blocks; i++) {
2011		if (!adev->ip_blocks[i].status.valid)
2012			continue;
2013		if (adev->ip_blocks[i].version->type != block_type)
2014			continue;
2015		if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
2016			continue;
2017		r = adev->ip_blocks[i].version->funcs->set_powergating_state(
2018			(void *)adev, state);
2019		if (r)
2020			DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
2021				  adev->ip_blocks[i].version->funcs->name, r);
2022	}
2023	return r;
2024}
2025
2026/**
2027 * amdgpu_device_ip_get_clockgating_state - get the CG state
2028 *
2029 * @adev: amdgpu_device pointer
2030 * @flags: clockgating feature flags
2031 *
2032 * Walks the list of IPs on the device and updates the clockgating
2033 * flags for each IP.
2034 * Updates @flags with the feature flags for each hardware IP where
2035 * clockgating is enabled.
2036 */
2037void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
2038					    u64 *flags)
2039{
2040	int i;
2041
2042	for (i = 0; i < adev->num_ip_blocks; i++) {
2043		if (!adev->ip_blocks[i].status.valid)
2044			continue;
2045		if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
2046			adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
2047	}
2048}
2049
2050/**
2051 * amdgpu_device_ip_wait_for_idle - wait for idle
2052 *
2053 * @adev: amdgpu_device pointer
2054 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
2055 *
2056 * Waits for the request hardware IP to be idle.
2057 * Returns 0 for success or a negative error code on failure.
2058 */
2059int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
2060				   enum amd_ip_block_type block_type)
2061{
2062	int i, r;
2063
2064	for (i = 0; i < adev->num_ip_blocks; i++) {
2065		if (!adev->ip_blocks[i].status.valid)
2066			continue;
2067		if (adev->ip_blocks[i].version->type == block_type) {
2068			r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
2069			if (r)
2070				return r;
2071			break;
2072		}
2073	}
2074	return 0;
2075
2076}
2077
2078/**
2079 * amdgpu_device_ip_is_idle - is the hardware IP idle
2080 *
2081 * @adev: amdgpu_device pointer
2082 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
2083 *
2084 * Check if the hardware IP is idle or not.
2085 * Returns true if it the IP is idle, false if not.
2086 */
2087bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
2088			      enum amd_ip_block_type block_type)
2089{
2090	int i;
2091
2092	for (i = 0; i < adev->num_ip_blocks; i++) {
2093		if (!adev->ip_blocks[i].status.valid)
2094			continue;
2095		if (adev->ip_blocks[i].version->type == block_type)
2096			return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
2097	}
2098	return true;
2099
2100}
2101
2102/**
2103 * amdgpu_device_ip_get_ip_block - get a hw IP pointer
2104 *
2105 * @adev: amdgpu_device pointer
2106 * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
2107 *
2108 * Returns a pointer to the hardware IP block structure
2109 * if it exists for the asic, otherwise NULL.
2110 */
2111struct amdgpu_ip_block *
2112amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
2113			      enum amd_ip_block_type type)
2114{
2115	int i;
2116
2117	for (i = 0; i < adev->num_ip_blocks; i++)
2118		if (adev->ip_blocks[i].version->type == type)
2119			return &adev->ip_blocks[i];
2120
2121	return NULL;
2122}
2123
2124/**
2125 * amdgpu_device_ip_block_version_cmp
2126 *
2127 * @adev: amdgpu_device pointer
2128 * @type: enum amd_ip_block_type
2129 * @major: major version
2130 * @minor: minor version
2131 *
2132 * return 0 if equal or greater
2133 * return 1 if smaller or the ip_block doesn't exist
2134 */
2135int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
2136				       enum amd_ip_block_type type,
2137				       u32 major, u32 minor)
2138{
2139	struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
2140
2141	if (ip_block && ((ip_block->version->major > major) ||
2142			((ip_block->version->major == major) &&
2143			(ip_block->version->minor >= minor))))
2144		return 0;
2145
2146	return 1;
2147}
2148
2149/**
2150 * amdgpu_device_ip_block_add
2151 *
2152 * @adev: amdgpu_device pointer
2153 * @ip_block_version: pointer to the IP to add
2154 *
2155 * Adds the IP block driver information to the collection of IPs
2156 * on the asic.
2157 */
2158int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
2159			       const struct amdgpu_ip_block_version *ip_block_version)
2160{
2161	if (!ip_block_version)
2162		return -EINVAL;
2163
2164	switch (ip_block_version->type) {
2165	case AMD_IP_BLOCK_TYPE_VCN:
2166		if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
2167			return 0;
2168		break;
2169	case AMD_IP_BLOCK_TYPE_JPEG:
2170		if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
2171			return 0;
2172		break;
2173	default:
2174		break;
2175	}
2176
2177	DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
2178		  ip_block_version->funcs->name);
2179
2180	adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
2181
2182	return 0;
2183}
2184
2185/**
2186 * amdgpu_device_enable_virtual_display - enable virtual display feature
2187 *
2188 * @adev: amdgpu_device pointer
2189 *
2190 * Enabled the virtual display feature if the user has enabled it via
2191 * the module parameter virtual_display.  This feature provides a virtual
2192 * display hardware on headless boards or in virtualized environments.
2193 * This function parses and validates the configuration string specified by
2194 * the user and configues the virtual display configuration (number of
2195 * virtual connectors, crtcs, etc.) specified.
2196 */
2197static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
2198{
2199	adev->enable_virtual_display = false;
2200
2201	if (amdgpu_virtual_display) {
2202		const char *pci_address_name = pci_name(adev->pdev);
2203		char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
2204
2205		pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
2206		pciaddstr_tmp = pciaddstr;
2207		while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
2208			pciaddname = strsep(&pciaddname_tmp, ",");
2209			if (!strcmp("all", pciaddname)
2210			    || !strcmp(pci_address_name, pciaddname)) {
2211				long num_crtc;
2212				int res = -1;
2213
2214				adev->enable_virtual_display = true;
2215
2216				if (pciaddname_tmp)
2217					res = kstrtol(pciaddname_tmp, 10,
2218						      &num_crtc);
2219
2220				if (!res) {
2221					if (num_crtc < 1)
2222						num_crtc = 1;
2223					if (num_crtc > 6)
2224						num_crtc = 6;
2225					adev->mode_info.num_crtc = num_crtc;
2226				} else {
2227					adev->mode_info.num_crtc = 1;
2228				}
2229				break;
2230			}
2231		}
2232
2233		DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
2234			 amdgpu_virtual_display, pci_address_name,
2235			 adev->enable_virtual_display, adev->mode_info.num_crtc);
2236
2237		kfree(pciaddstr);
2238	}
2239}
2240
2241void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev)
2242{
2243	if (amdgpu_sriov_vf(adev) && !adev->enable_virtual_display) {
2244		adev->mode_info.num_crtc = 1;
2245		adev->enable_virtual_display = true;
2246		DRM_INFO("virtual_display:%d, num_crtc:%d\n",
2247			 adev->enable_virtual_display, adev->mode_info.num_crtc);
2248	}
2249}
2250
2251/**
2252 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
2253 *
2254 * @adev: amdgpu_device pointer
2255 *
2256 * Parses the asic configuration parameters specified in the gpu info
2257 * firmware and makes them availale to the driver for use in configuring
2258 * the asic.
2259 * Returns 0 on success, -EINVAL on failure.
2260 */
2261static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
2262{
2263	const char *chip_name;
2264	char fw_name[40];
2265	int err;
2266	const struct gpu_info_firmware_header_v1_0 *hdr;
2267
2268	adev->firmware.gpu_info_fw = NULL;
2269
2270	if (adev->mman.discovery_bin)
2271		return 0;
2272
2273	switch (adev->asic_type) {
2274	default:
2275		return 0;
2276	case CHIP_VEGA10:
2277		chip_name = "vega10";
2278		break;
2279	case CHIP_VEGA12:
2280		chip_name = "vega12";
2281		break;
2282	case CHIP_RAVEN:
2283		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
2284			chip_name = "raven2";
2285		else if (adev->apu_flags & AMD_APU_IS_PICASSO)
2286			chip_name = "picasso";
2287		else
2288			chip_name = "raven";
2289		break;
2290	case CHIP_ARCTURUS:
2291		chip_name = "arcturus";
2292		break;
2293	case CHIP_NAVI12:
2294		chip_name = "navi12";
2295		break;
2296	}
2297
2298	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
2299	err = amdgpu_ucode_request(adev, &adev->firmware.gpu_info_fw, fw_name);
2300	if (err) {
2301		dev_err(adev->dev,
2302			"Failed to get gpu_info firmware \"%s\"\n",
2303			fw_name);
2304		goto out;
2305	}
2306
2307	hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
2308	amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
2309
2310	switch (hdr->version_major) {
2311	case 1:
2312	{
2313		const struct gpu_info_firmware_v1_0 *gpu_info_fw =
2314			(const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
2315								le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2316
2317		/*
2318		 * Should be droped when DAL no longer needs it.
2319		 */
2320		if (adev->asic_type == CHIP_NAVI12)
2321			goto parse_soc_bounding_box;
2322
2323		adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
2324		adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
2325		adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
2326		adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
2327		adev->gfx.config.max_texture_channel_caches =
2328			le32_to_cpu(gpu_info_fw->gc_num_tccs);
2329		adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
2330		adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
2331		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
2332		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
2333		adev->gfx.config.double_offchip_lds_buf =
2334			le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
2335		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
2336		adev->gfx.cu_info.max_waves_per_simd =
2337			le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
2338		adev->gfx.cu_info.max_scratch_slots_per_cu =
2339			le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
2340		adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
2341		if (hdr->version_minor >= 1) {
2342			const struct gpu_info_firmware_v1_1 *gpu_info_fw =
2343				(const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
2344									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2345			adev->gfx.config.num_sc_per_sh =
2346				le32_to_cpu(gpu_info_fw->num_sc_per_sh);
2347			adev->gfx.config.num_packer_per_sc =
2348				le32_to_cpu(gpu_info_fw->num_packer_per_sc);
2349		}
2350
2351parse_soc_bounding_box:
2352		/*
2353		 * soc bounding box info is not integrated in disocovery table,
2354		 * we always need to parse it from gpu info firmware if needed.
2355		 */
2356		if (hdr->version_minor == 2) {
2357			const struct gpu_info_firmware_v1_2 *gpu_info_fw =
2358				(const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
2359									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2360			adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
2361		}
2362		break;
2363	}
2364	default:
2365		dev_err(adev->dev,
2366			"Unsupported gpu_info table %d\n", hdr->header.ucode_version);
2367		err = -EINVAL;
2368		goto out;
2369	}
2370out:
2371	return err;
2372}
2373
2374/**
2375 * amdgpu_device_ip_early_init - run early init for hardware IPs
2376 *
2377 * @adev: amdgpu_device pointer
2378 *
2379 * Early initialization pass for hardware IPs.  The hardware IPs that make
2380 * up each asic are discovered each IP's early_init callback is run.  This
2381 * is the first stage in initializing the asic.
2382 * Returns 0 on success, negative error code on failure.
2383 */
2384static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
2385{
2386	struct pci_dev *parent;
2387	int i, r;
2388	bool total;
2389
2390	amdgpu_device_enable_virtual_display(adev);
2391
2392	if (amdgpu_sriov_vf(adev)) {
2393		r = amdgpu_virt_request_full_gpu(adev, true);
2394		if (r)
2395			return r;
2396	}
2397
2398	switch (adev->asic_type) {
2399#ifdef CONFIG_DRM_AMDGPU_SI
2400	case CHIP_VERDE:
2401	case CHIP_TAHITI:
2402	case CHIP_PITCAIRN:
2403	case CHIP_OLAND:
2404	case CHIP_HAINAN:
2405		adev->family = AMDGPU_FAMILY_SI;
2406		r = si_set_ip_blocks(adev);
2407		if (r)
2408			return r;
2409		break;
2410#endif
2411#ifdef CONFIG_DRM_AMDGPU_CIK
2412	case CHIP_BONAIRE:
2413	case CHIP_HAWAII:
2414	case CHIP_KAVERI:
2415	case CHIP_KABINI:
2416	case CHIP_MULLINS:
2417		if (adev->flags & AMD_IS_APU)
2418			adev->family = AMDGPU_FAMILY_KV;
2419		else
2420			adev->family = AMDGPU_FAMILY_CI;
2421
2422		r = cik_set_ip_blocks(adev);
2423		if (r)
2424			return r;
2425		break;
2426#endif
2427	case CHIP_TOPAZ:
2428	case CHIP_TONGA:
2429	case CHIP_FIJI:
2430	case CHIP_POLARIS10:
2431	case CHIP_POLARIS11:
2432	case CHIP_POLARIS12:
2433	case CHIP_VEGAM:
2434	case CHIP_CARRIZO:
2435	case CHIP_STONEY:
2436		if (adev->flags & AMD_IS_APU)
2437			adev->family = AMDGPU_FAMILY_CZ;
2438		else
2439			adev->family = AMDGPU_FAMILY_VI;
2440
2441		r = vi_set_ip_blocks(adev);
2442		if (r)
2443			return r;
2444		break;
2445	default:
2446		r = amdgpu_discovery_set_ip_blocks(adev);
2447		if (r)
2448			return r;
2449		break;
2450	}
2451
2452	if (amdgpu_has_atpx() &&
2453	    (amdgpu_is_atpx_hybrid() ||
2454	     amdgpu_has_atpx_dgpu_power_cntl()) &&
2455	    ((adev->flags & AMD_IS_APU) == 0) &&
2456	    !dev_is_removable(&adev->pdev->dev))
2457		adev->flags |= AMD_IS_PX;
2458
2459	if (!(adev->flags & AMD_IS_APU)) {
2460		parent = pcie_find_root_port(adev->pdev);
2461		adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
2462	}
2463
2464
2465	adev->pm.pp_feature = amdgpu_pp_feature_mask;
2466	if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2467		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2468	if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2469		adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2470	if (!amdgpu_device_pcie_dynamic_switching_supported(adev))
2471		adev->pm.pp_feature &= ~PP_PCIE_DPM_MASK;
2472
2473	total = true;
2474	for (i = 0; i < adev->num_ip_blocks; i++) {
2475		if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2476			DRM_WARN("disabled ip block: %d <%s>\n",
2477				  i, adev->ip_blocks[i].version->funcs->name);
2478			adev->ip_blocks[i].status.valid = false;
2479		} else {
2480			if (adev->ip_blocks[i].version->funcs->early_init) {
2481				r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2482				if (r == -ENOENT) {
2483					adev->ip_blocks[i].status.valid = false;
2484				} else if (r) {
2485					DRM_ERROR("early_init of IP block <%s> failed %d\n",
2486						  adev->ip_blocks[i].version->funcs->name, r);
2487					total = false;
2488				} else {
2489					adev->ip_blocks[i].status.valid = true;
2490				}
2491			} else {
2492				adev->ip_blocks[i].status.valid = true;
2493			}
2494		}
2495		/* get the vbios after the asic_funcs are set up */
2496		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2497			r = amdgpu_device_parse_gpu_info_fw(adev);
2498			if (r)
2499				return r;
2500
2501			/* Read BIOS */
2502			if (amdgpu_device_read_bios(adev)) {
2503				if (!amdgpu_get_bios(adev))
2504					return -EINVAL;
2505
2506				r = amdgpu_atombios_init(adev);
2507				if (r) {
2508					dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2509					amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2510					return r;
2511				}
2512			}
2513
2514			/*get pf2vf msg info at it's earliest time*/
2515			if (amdgpu_sriov_vf(adev))
2516				amdgpu_virt_init_data_exchange(adev);
2517
2518		}
2519	}
2520	if (!total)
2521		return -ENODEV;
2522
2523	amdgpu_amdkfd_device_probe(adev);
2524	adev->cg_flags &= amdgpu_cg_mask;
2525	adev->pg_flags &= amdgpu_pg_mask;
2526
2527	return 0;
2528}
2529
2530static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2531{
2532	int i, r;
2533
2534	for (i = 0; i < adev->num_ip_blocks; i++) {
2535		if (!adev->ip_blocks[i].status.sw)
2536			continue;
2537		if (adev->ip_blocks[i].status.hw)
2538			continue;
2539		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2540		    (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2541		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2542			r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2543			if (r) {
2544				DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2545					  adev->ip_blocks[i].version->funcs->name, r);
2546				return r;
2547			}
2548			adev->ip_blocks[i].status.hw = true;
2549		}
2550	}
2551
2552	return 0;
2553}
2554
2555static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2556{
2557	int i, r;
2558
2559	for (i = 0; i < adev->num_ip_blocks; i++) {
2560		if (!adev->ip_blocks[i].status.sw)
2561			continue;
2562		if (adev->ip_blocks[i].status.hw)
2563			continue;
2564		r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2565		if (r) {
2566			DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2567				  adev->ip_blocks[i].version->funcs->name, r);
2568			return r;
2569		}
2570		adev->ip_blocks[i].status.hw = true;
2571	}
2572
2573	return 0;
2574}
2575
2576static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2577{
2578	int r = 0;
2579	int i;
2580	uint32_t smu_version;
2581
2582	if (adev->asic_type >= CHIP_VEGA10) {
2583		for (i = 0; i < adev->num_ip_blocks; i++) {
2584			if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2585				continue;
2586
2587			if (!adev->ip_blocks[i].status.sw)
2588				continue;
2589
2590			/* no need to do the fw loading again if already done*/
2591			if (adev->ip_blocks[i].status.hw == true)
2592				break;
2593
2594			if (amdgpu_in_reset(adev) || adev->in_suspend) {
2595				r = adev->ip_blocks[i].version->funcs->resume(adev);
2596				if (r) {
2597					DRM_ERROR("resume of IP block <%s> failed %d\n",
2598							  adev->ip_blocks[i].version->funcs->name, r);
2599					return r;
2600				}
2601			} else {
2602				r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2603				if (r) {
2604					DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2605							  adev->ip_blocks[i].version->funcs->name, r);
2606					return r;
2607				}
2608			}
2609
2610			adev->ip_blocks[i].status.hw = true;
2611			break;
2612		}
2613	}
2614
2615	if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2616		r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2617
2618	return r;
2619}
2620
2621static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
2622{
2623	long timeout;
2624	int r, i;
2625
2626	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2627		struct amdgpu_ring *ring = adev->rings[i];
2628
2629		/* No need to setup the GPU scheduler for rings that don't need it */
2630		if (!ring || ring->no_scheduler)
2631			continue;
2632
2633		switch (ring->funcs->type) {
2634		case AMDGPU_RING_TYPE_GFX:
2635			timeout = adev->gfx_timeout;
2636			break;
2637		case AMDGPU_RING_TYPE_COMPUTE:
2638			timeout = adev->compute_timeout;
2639			break;
2640		case AMDGPU_RING_TYPE_SDMA:
2641			timeout = adev->sdma_timeout;
2642			break;
2643		default:
2644			timeout = adev->video_timeout;
2645			break;
2646		}
2647
2648		r = drm_sched_init(&ring->sched, &amdgpu_sched_ops, NULL,
2649				   DRM_SCHED_PRIORITY_COUNT,
2650				   ring->num_hw_submission, 0,
2651				   timeout, adev->reset_domain->wq,
2652				   ring->sched_score, ring->name,
2653				   adev->dev);
2654		if (r) {
2655			DRM_ERROR("Failed to create scheduler on ring %s.\n",
2656				  ring->name);
2657			return r;
2658		}
2659		r = amdgpu_uvd_entity_init(adev, ring);
2660		if (r) {
2661			DRM_ERROR("Failed to create UVD scheduling entity on ring %s.\n",
2662				  ring->name);
2663			return r;
2664		}
2665		r = amdgpu_vce_entity_init(adev, ring);
2666		if (r) {
2667			DRM_ERROR("Failed to create VCE scheduling entity on ring %s.\n",
2668				  ring->name);
2669			return r;
2670		}
2671	}
2672
2673	amdgpu_xcp_update_partition_sched_list(adev);
2674
2675	return 0;
2676}
2677
2678
2679/**
2680 * amdgpu_device_ip_init - run init for hardware IPs
2681 *
2682 * @adev: amdgpu_device pointer
2683 *
2684 * Main initialization pass for hardware IPs.  The list of all the hardware
2685 * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2686 * are run.  sw_init initializes the software state associated with each IP
2687 * and hw_init initializes the hardware associated with each IP.
2688 * Returns 0 on success, negative error code on failure.
2689 */
2690static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2691{
2692	int i, r;
2693
2694	r = amdgpu_ras_init(adev);
2695	if (r)
2696		return r;
2697
2698	for (i = 0; i < adev->num_ip_blocks; i++) {
2699		if (!adev->ip_blocks[i].status.valid)
2700			continue;
2701		r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2702		if (r) {
2703			DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2704				  adev->ip_blocks[i].version->funcs->name, r);
2705			goto init_failed;
2706		}
2707		adev->ip_blocks[i].status.sw = true;
2708
2709		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2710			/* need to do common hw init early so everything is set up for gmc */
2711			r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2712			if (r) {
2713				DRM_ERROR("hw_init %d failed %d\n", i, r);
2714				goto init_failed;
2715			}
2716			adev->ip_blocks[i].status.hw = true;
2717		} else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2718			/* need to do gmc hw init early so we can allocate gpu mem */
2719			/* Try to reserve bad pages early */
2720			if (amdgpu_sriov_vf(adev))
2721				amdgpu_virt_exchange_data(adev);
2722
2723			r = amdgpu_device_mem_scratch_init(adev);
2724			if (r) {
2725				DRM_ERROR("amdgpu_mem_scratch_init failed %d\n", r);
2726				goto init_failed;
2727			}
2728			r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2729			if (r) {
2730				DRM_ERROR("hw_init %d failed %d\n", i, r);
2731				goto init_failed;
2732			}
2733			r = amdgpu_device_wb_init(adev);
2734			if (r) {
2735				DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2736				goto init_failed;
2737			}
2738			adev->ip_blocks[i].status.hw = true;
2739
2740			/* right after GMC hw init, we create CSA */
2741			if (adev->gfx.mcbp) {
2742				r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2743							       AMDGPU_GEM_DOMAIN_VRAM |
2744							       AMDGPU_GEM_DOMAIN_GTT,
2745							       AMDGPU_CSA_SIZE);
2746				if (r) {
2747					DRM_ERROR("allocate CSA failed %d\n", r);
2748					goto init_failed;
2749				}
2750			}
2751
2752			r = amdgpu_seq64_init(adev);
2753			if (r) {
2754				DRM_ERROR("allocate seq64 failed %d\n", r);
2755				goto init_failed;
2756			}
2757		}
2758	}
2759
2760	if (amdgpu_sriov_vf(adev))
2761		amdgpu_virt_init_data_exchange(adev);
2762
2763	r = amdgpu_ib_pool_init(adev);
2764	if (r) {
2765		dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2766		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2767		goto init_failed;
2768	}
2769
2770	r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2771	if (r)
2772		goto init_failed;
2773
2774	r = amdgpu_device_ip_hw_init_phase1(adev);
2775	if (r)
2776		goto init_failed;
2777
2778	r = amdgpu_device_fw_loading(adev);
2779	if (r)
2780		goto init_failed;
2781
2782	r = amdgpu_device_ip_hw_init_phase2(adev);
2783	if (r)
2784		goto init_failed;
2785
2786	/*
2787	 * retired pages will be loaded from eeprom and reserved here,
2788	 * it should be called after amdgpu_device_ip_hw_init_phase2  since
2789	 * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2790	 * for I2C communication which only true at this point.
2791	 *
2792	 * amdgpu_ras_recovery_init may fail, but the upper only cares the
2793	 * failure from bad gpu situation and stop amdgpu init process
2794	 * accordingly. For other failed cases, it will still release all
2795	 * the resource and print error message, rather than returning one
2796	 * negative value to upper level.
2797	 *
2798	 * Note: theoretically, this should be called before all vram allocations
2799	 * to protect retired page from abusing
2800	 */
2801	r = amdgpu_ras_recovery_init(adev);
2802	if (r)
2803		goto init_failed;
2804
2805	/**
2806	 * In case of XGMI grab extra reference for reset domain for this device
2807	 */
2808	if (adev->gmc.xgmi.num_physical_nodes > 1) {
2809		if (amdgpu_xgmi_add_device(adev) == 0) {
2810			if (!amdgpu_sriov_vf(adev)) {
2811				struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2812
2813				if (WARN_ON(!hive)) {
2814					r = -ENOENT;
2815					goto init_failed;
2816				}
2817
2818				if (!hive->reset_domain ||
2819				    !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
2820					r = -ENOENT;
2821					amdgpu_put_xgmi_hive(hive);
2822					goto init_failed;
2823				}
2824
2825				/* Drop the early temporary reset domain we created for device */
2826				amdgpu_reset_put_reset_domain(adev->reset_domain);
2827				adev->reset_domain = hive->reset_domain;
2828				amdgpu_put_xgmi_hive(hive);
2829			}
2830		}
2831	}
2832
2833	r = amdgpu_device_init_schedulers(adev);
2834	if (r)
2835		goto init_failed;
2836
2837	if (adev->mman.buffer_funcs_ring->sched.ready)
2838		amdgpu_ttm_set_buffer_funcs_status(adev, true);
2839
2840	/* Don't init kfd if whole hive need to be reset during init */
2841	if (!adev->gmc.xgmi.pending_reset) {
2842		kgd2kfd_init_zone_device(adev);
2843		amdgpu_amdkfd_device_init(adev);
2844	}
2845
2846	amdgpu_fru_get_product_info(adev);
2847
2848init_failed:
2849
2850	return r;
2851}
2852
2853/**
2854 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2855 *
2856 * @adev: amdgpu_device pointer
2857 *
2858 * Writes a reset magic value to the gart pointer in VRAM.  The driver calls
2859 * this function before a GPU reset.  If the value is retained after a
2860 * GPU reset, VRAM has not been lost.  Some GPU resets may destry VRAM contents.
2861 */
2862static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2863{
2864	memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2865}
2866
2867/**
2868 * amdgpu_device_check_vram_lost - check if vram is valid
2869 *
2870 * @adev: amdgpu_device pointer
2871 *
2872 * Checks the reset magic value written to the gart pointer in VRAM.
2873 * The driver calls this after a GPU reset to see if the contents of
2874 * VRAM is lost or now.
2875 * returns true if vram is lost, false if not.
2876 */
2877static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2878{
2879	if (memcmp(adev->gart.ptr, adev->reset_magic,
2880			AMDGPU_RESET_MAGIC_NUM))
2881		return true;
2882
2883	if (!amdgpu_in_reset(adev))
2884		return false;
2885
2886	/*
2887	 * For all ASICs with baco/mode1 reset, the VRAM is
2888	 * always assumed to be lost.
2889	 */
2890	switch (amdgpu_asic_reset_method(adev)) {
2891	case AMD_RESET_METHOD_BACO:
2892	case AMD_RESET_METHOD_MODE1:
2893		return true;
2894	default:
2895		return false;
2896	}
2897}
2898
2899/**
2900 * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2901 *
2902 * @adev: amdgpu_device pointer
2903 * @state: clockgating state (gate or ungate)
2904 *
2905 * The list of all the hardware IPs that make up the asic is walked and the
2906 * set_clockgating_state callbacks are run.
2907 * Late initialization pass enabling clockgating for hardware IPs.
2908 * Fini or suspend, pass disabling clockgating for hardware IPs.
2909 * Returns 0 on success, negative error code on failure.
2910 */
2911
2912int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2913			       enum amd_clockgating_state state)
2914{
2915	int i, j, r;
2916
2917	if (amdgpu_emu_mode == 1)
2918		return 0;
2919
2920	for (j = 0; j < adev->num_ip_blocks; j++) {
2921		i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2922		if (!adev->ip_blocks[i].status.late_initialized)
2923			continue;
2924		/* skip CG for GFX, SDMA on S0ix */
2925		if (adev->in_s0ix &&
2926		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
2927		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
2928			continue;
2929		/* skip CG for VCE/UVD, it's handled specially */
2930		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2931		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2932		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2933		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2934		    adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2935			/* enable clockgating to save power */
2936			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2937										     state);
2938			if (r) {
2939				DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2940					  adev->ip_blocks[i].version->funcs->name, r);
2941				return r;
2942			}
2943		}
2944	}
2945
2946	return 0;
2947}
2948
2949int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2950			       enum amd_powergating_state state)
2951{
2952	int i, j, r;
2953
2954	if (amdgpu_emu_mode == 1)
2955		return 0;
2956
2957	for (j = 0; j < adev->num_ip_blocks; j++) {
2958		i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2959		if (!adev->ip_blocks[i].status.late_initialized)
2960			continue;
2961		/* skip PG for GFX, SDMA on S0ix */
2962		if (adev->in_s0ix &&
2963		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
2964		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
2965			continue;
2966		/* skip CG for VCE/UVD, it's handled specially */
2967		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2968		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2969		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2970		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2971		    adev->ip_blocks[i].version->funcs->set_powergating_state) {
2972			/* enable powergating to save power */
2973			r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2974											state);
2975			if (r) {
2976				DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2977					  adev->ip_blocks[i].version->funcs->name, r);
2978				return r;
2979			}
2980		}
2981	}
2982	return 0;
2983}
2984
2985static int amdgpu_device_enable_mgpu_fan_boost(void)
2986{
2987	struct amdgpu_gpu_instance *gpu_ins;
2988	struct amdgpu_device *adev;
2989	int i, ret = 0;
2990
2991	mutex_lock(&mgpu_info.mutex);
2992
2993	/*
2994	 * MGPU fan boost feature should be enabled
2995	 * only when there are two or more dGPUs in
2996	 * the system
2997	 */
2998	if (mgpu_info.num_dgpu < 2)
2999		goto out;
3000
3001	for (i = 0; i < mgpu_info.num_dgpu; i++) {
3002		gpu_ins = &(mgpu_info.gpu_ins[i]);
3003		adev = gpu_ins->adev;
3004		if (!(adev->flags & AMD_IS_APU) &&
3005		    !gpu_ins->mgpu_fan_enabled) {
3006			ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
3007			if (ret)
3008				break;
3009
3010			gpu_ins->mgpu_fan_enabled = 1;
3011		}
3012	}
3013
3014out:
3015	mutex_unlock(&mgpu_info.mutex);
3016
3017	return ret;
3018}
3019
3020/**
3021 * amdgpu_device_ip_late_init - run late init for hardware IPs
3022 *
3023 * @adev: amdgpu_device pointer
3024 *
3025 * Late initialization pass for hardware IPs.  The list of all the hardware
3026 * IPs that make up the asic is walked and the late_init callbacks are run.
3027 * late_init covers any special initialization that an IP requires
3028 * after all of the have been initialized or something that needs to happen
3029 * late in the init process.
3030 * Returns 0 on success, negative error code on failure.
3031 */
3032static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
3033{
3034	struct amdgpu_gpu_instance *gpu_instance;
3035	int i = 0, r;
3036
3037	for (i = 0; i < adev->num_ip_blocks; i++) {
3038		if (!adev->ip_blocks[i].status.hw)
3039			continue;
3040		if (adev->ip_blocks[i].version->funcs->late_init) {
3041			r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
3042			if (r) {
3043				DRM_ERROR("late_init of IP block <%s> failed %d\n",
3044					  adev->ip_blocks[i].version->funcs->name, r);
3045				return r;
3046			}
3047		}
3048		adev->ip_blocks[i].status.late_initialized = true;
3049	}
3050
3051	r = amdgpu_ras_late_init(adev);
3052	if (r) {
3053		DRM_ERROR("amdgpu_ras_late_init failed %d", r);
3054		return r;
3055	}
3056
3057	amdgpu_ras_set_error_query_ready(adev, true);
3058
3059	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
3060	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
3061
3062	amdgpu_device_fill_reset_magic(adev);
3063
3064	r = amdgpu_device_enable_mgpu_fan_boost();
3065	if (r)
3066		DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
3067
3068	/* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
3069	if (amdgpu_passthrough(adev) &&
3070	    ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1) ||
3071	     adev->asic_type == CHIP_ALDEBARAN))
3072		amdgpu_dpm_handle_passthrough_sbr(adev, true);
3073
3074	if (adev->gmc.xgmi.num_physical_nodes > 1) {
3075		mutex_lock(&mgpu_info.mutex);
3076
3077		/*
3078		 * Reset device p-state to low as this was booted with high.
3079		 *
3080		 * This should be performed only after all devices from the same
3081		 * hive get initialized.
3082		 *
3083		 * However, it's unknown how many device in the hive in advance.
3084		 * As this is counted one by one during devices initializations.
3085		 *
3086		 * So, we wait for all XGMI interlinked devices initialized.
3087		 * This may bring some delays as those devices may come from
3088		 * different hives. But that should be OK.
3089		 */
3090		if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
3091			for (i = 0; i < mgpu_info.num_gpu; i++) {
3092				gpu_instance = &(mgpu_info.gpu_ins[i]);
3093				if (gpu_instance->adev->flags & AMD_IS_APU)
3094					continue;
3095
3096				r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
3097						AMDGPU_XGMI_PSTATE_MIN);
3098				if (r) {
3099					DRM_ERROR("pstate setting failed (%d).\n", r);
3100					break;
3101				}
3102			}
3103		}
3104
3105		mutex_unlock(&mgpu_info.mutex);
3106	}
3107
3108	return 0;
3109}
3110
3111/**
3112 * amdgpu_device_smu_fini_early - smu hw_fini wrapper
3113 *
3114 * @adev: amdgpu_device pointer
3115 *
3116 * For ASICs need to disable SMC first
3117 */
3118static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
3119{
3120	int i, r;
3121
3122	if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(9, 0, 0))
3123		return;
3124
3125	for (i = 0; i < adev->num_ip_blocks; i++) {
3126		if (!adev->ip_blocks[i].status.hw)
3127			continue;
3128		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3129			r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
3130			/* XXX handle errors */
3131			if (r) {
3132				DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
3133					  adev->ip_blocks[i].version->funcs->name, r);
3134			}
3135			adev->ip_blocks[i].status.hw = false;
3136			break;
3137		}
3138	}
3139}
3140
3141static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
3142{
3143	int i, r;
3144
3145	for (i = 0; i < adev->num_ip_blocks; i++) {
3146		if (!adev->ip_blocks[i].version->funcs->early_fini)
3147			continue;
3148
3149		r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
3150		if (r) {
3151			DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
3152				  adev->ip_blocks[i].version->funcs->name, r);
3153		}
3154	}
3155
3156	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
3157	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
3158
3159	amdgpu_amdkfd_suspend(adev, false);
3160
3161	/* Workaroud for ASICs need to disable SMC first */
3162	amdgpu_device_smu_fini_early(adev);
3163
3164	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3165		if (!adev->ip_blocks[i].status.hw)
3166			continue;
3167
3168		r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
3169		/* XXX handle errors */
3170		if (r) {
3171			DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
3172				  adev->ip_blocks[i].version->funcs->name, r);
3173		}
3174
3175		adev->ip_blocks[i].status.hw = false;
3176	}
3177
3178	if (amdgpu_sriov_vf(adev)) {
3179		if (amdgpu_virt_release_full_gpu(adev, false))
3180			DRM_ERROR("failed to release exclusive mode on fini\n");
3181	}
3182
3183	return 0;
3184}
3185
3186/**
3187 * amdgpu_device_ip_fini - run fini for hardware IPs
3188 *
3189 * @adev: amdgpu_device pointer
3190 *
3191 * Main teardown pass for hardware IPs.  The list of all the hardware
3192 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
3193 * are run.  hw_fini tears down the hardware associated with each IP
3194 * and sw_fini tears down any software state associated with each IP.
3195 * Returns 0 on success, negative error code on failure.
3196 */
3197static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
3198{
3199	int i, r;
3200
3201	if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
3202		amdgpu_virt_release_ras_err_handler_data(adev);
3203
3204	if (adev->gmc.xgmi.num_physical_nodes > 1)
3205		amdgpu_xgmi_remove_device(adev);
3206
3207	amdgpu_amdkfd_device_fini_sw(adev);
3208
3209	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3210		if (!adev->ip_blocks[i].status.sw)
3211			continue;
3212
3213		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
3214			amdgpu_ucode_free_bo(adev);
3215			amdgpu_free_static_csa(&adev->virt.csa_obj);
3216			amdgpu_device_wb_fini(adev);
3217			amdgpu_device_mem_scratch_fini(adev);
3218			amdgpu_ib_pool_fini(adev);
3219			amdgpu_seq64_fini(adev);
3220		}
3221
3222		r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
3223		/* XXX handle errors */
3224		if (r) {
3225			DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
3226				  adev->ip_blocks[i].version->funcs->name, r);
3227		}
3228		adev->ip_blocks[i].status.sw = false;
3229		adev->ip_blocks[i].status.valid = false;
3230	}
3231
3232	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3233		if (!adev->ip_blocks[i].status.late_initialized)
3234			continue;
3235		if (adev->ip_blocks[i].version->funcs->late_fini)
3236			adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
3237		adev->ip_blocks[i].status.late_initialized = false;
3238	}
3239
3240	amdgpu_ras_fini(adev);
3241
3242	return 0;
3243}
3244
3245/**
3246 * amdgpu_device_delayed_init_work_handler - work handler for IB tests
3247 *
3248 * @work: work_struct.
3249 */
3250static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
3251{
3252	struct amdgpu_device *adev =
3253		container_of(work, struct amdgpu_device, delayed_init_work.work);
3254	int r;
3255
3256	r = amdgpu_ib_ring_tests(adev);
3257	if (r)
3258		DRM_ERROR("ib ring test failed (%d).\n", r);
3259}
3260
3261static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
3262{
3263	struct amdgpu_device *adev =
3264		container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
3265
3266	WARN_ON_ONCE(adev->gfx.gfx_off_state);
3267	WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
3268
3269	if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
3270		adev->gfx.gfx_off_state = true;
3271}
3272
3273/**
3274 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
3275 *
3276 * @adev: amdgpu_device pointer
3277 *
3278 * Main suspend function for hardware IPs.  The list of all the hardware
3279 * IPs that make up the asic is walked, clockgating is disabled and the
3280 * suspend callbacks are run.  suspend puts the hardware and software state
3281 * in each IP into a state suitable for suspend.
3282 * Returns 0 on success, negative error code on failure.
3283 */
3284static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
3285{
3286	int i, r;
3287
3288	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
3289	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
3290
3291	/*
3292	 * Per PMFW team's suggestion, driver needs to handle gfxoff
3293	 * and df cstate features disablement for gpu reset(e.g. Mode1Reset)
3294	 * scenario. Add the missing df cstate disablement here.
3295	 */
3296	if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
3297		dev_warn(adev->dev, "Failed to disallow df cstate");
3298
3299	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3300		if (!adev->ip_blocks[i].status.valid)
3301			continue;
3302
3303		/* displays are handled separately */
3304		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
3305			continue;
3306
3307		/* XXX handle errors */
3308		r = adev->ip_blocks[i].version->funcs->suspend(adev);
3309		/* XXX handle errors */
3310		if (r) {
3311			DRM_ERROR("suspend of IP block <%s> failed %d\n",
3312				  adev->ip_blocks[i].version->funcs->name, r);
3313			return r;
3314		}
3315
3316		adev->ip_blocks[i].status.hw = false;
3317	}
3318
3319	return 0;
3320}
3321
3322/**
3323 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
3324 *
3325 * @adev: amdgpu_device pointer
3326 *
3327 * Main suspend function for hardware IPs.  The list of all the hardware
3328 * IPs that make up the asic is walked, clockgating is disabled and the
3329 * suspend callbacks are run.  suspend puts the hardware and software state
3330 * in each IP into a state suitable for suspend.
3331 * Returns 0 on success, negative error code on failure.
3332 */
3333static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
3334{
3335	int i, r;
3336
3337	if (adev->in_s0ix)
3338		amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry);
3339
3340	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3341		if (!adev->ip_blocks[i].status.valid)
3342			continue;
3343		/* displays are handled in phase1 */
3344		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
3345			continue;
3346		/* PSP lost connection when err_event_athub occurs */
3347		if (amdgpu_ras_intr_triggered() &&
3348		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
3349			adev->ip_blocks[i].status.hw = false;
3350			continue;
3351		}
3352
3353		/* skip unnecessary suspend if we do not initialize them yet */
3354		if (adev->gmc.xgmi.pending_reset &&
3355		    !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3356		      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
3357		      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3358		      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
3359			adev->ip_blocks[i].status.hw = false;
3360			continue;
3361		}
3362
3363		/* skip suspend of gfx/mes and psp for S0ix
3364		 * gfx is in gfxoff state, so on resume it will exit gfxoff just
3365		 * like at runtime. PSP is also part of the always on hardware
3366		 * so no need to suspend it.
3367		 */
3368		if (adev->in_s0ix &&
3369		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
3370		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
3371		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES))
3372			continue;
3373
3374		/* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */
3375		if (adev->in_s0ix &&
3376		    (amdgpu_ip_version(adev, SDMA0_HWIP, 0) >=
3377		     IP_VERSION(5, 0, 0)) &&
3378		    (adev->ip_blocks[i].version->type ==
3379		     AMD_IP_BLOCK_TYPE_SDMA))
3380			continue;
3381
3382		/* Once swPSP provides the IMU, RLC FW binaries to TOS during cold-boot.
3383		 * These are in TMR, hence are expected to be reused by PSP-TOS to reload
3384		 * from this location and RLC Autoload automatically also gets loaded
3385		 * from here based on PMFW -> PSP message during re-init sequence.
3386		 * Therefore, the psp suspend & resume should be skipped to avoid destroy
3387		 * the TMR and reload FWs again for IMU enabled APU ASICs.
3388		 */
3389		if (amdgpu_in_reset(adev) &&
3390		    (adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs &&
3391		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3392			continue;
3393
3394		/* XXX handle errors */
3395		r = adev->ip_blocks[i].version->funcs->suspend(adev);
3396		/* XXX handle errors */
3397		if (r) {
3398			DRM_ERROR("suspend of IP block <%s> failed %d\n",
3399				  adev->ip_blocks[i].version->funcs->name, r);
3400		}
3401		adev->ip_blocks[i].status.hw = false;
3402		/* handle putting the SMC in the appropriate state */
3403		if (!amdgpu_sriov_vf(adev)) {
3404			if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3405				r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
3406				if (r) {
3407					DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
3408							adev->mp1_state, r);
3409					return r;
3410				}
3411			}
3412		}
3413	}
3414
3415	return 0;
3416}
3417
3418/**
3419 * amdgpu_device_ip_suspend - run suspend for hardware IPs
3420 *
3421 * @adev: amdgpu_device pointer
3422 *
3423 * Main suspend function for hardware IPs.  The list of all the hardware
3424 * IPs that make up the asic is walked, clockgating is disabled and the
3425 * suspend callbacks are run.  suspend puts the hardware and software state
3426 * in each IP into a state suitable for suspend.
3427 * Returns 0 on success, negative error code on failure.
3428 */
3429int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
3430{
3431	int r;
3432
3433	if (amdgpu_sriov_vf(adev)) {
3434		amdgpu_virt_fini_data_exchange(adev);
3435		amdgpu_virt_request_full_gpu(adev, false);
3436	}
3437
3438	amdgpu_ttm_set_buffer_funcs_status(adev, false);
3439
3440	r = amdgpu_device_ip_suspend_phase1(adev);
3441	if (r)
3442		return r;
3443	r = amdgpu_device_ip_suspend_phase2(adev);
3444
3445	if (amdgpu_sriov_vf(adev))
3446		amdgpu_virt_release_full_gpu(adev, false);
3447
3448	return r;
3449}
3450
3451static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
3452{
3453	int i, r;
3454
3455	static enum amd_ip_block_type ip_order[] = {
3456		AMD_IP_BLOCK_TYPE_COMMON,
3457		AMD_IP_BLOCK_TYPE_GMC,
3458		AMD_IP_BLOCK_TYPE_PSP,
3459		AMD_IP_BLOCK_TYPE_IH,
3460	};
3461
3462	for (i = 0; i < adev->num_ip_blocks; i++) {
3463		int j;
3464		struct amdgpu_ip_block *block;
3465
3466		block = &adev->ip_blocks[i];
3467		block->status.hw = false;
3468
3469		for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
3470
3471			if (block->version->type != ip_order[j] ||
3472				!block->status.valid)
3473				continue;
3474
3475			r = block->version->funcs->hw_init(adev);
3476			DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3477			if (r)
3478				return r;
3479			block->status.hw = true;
3480		}
3481	}
3482
3483	return 0;
3484}
3485
3486static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
3487{
3488	int i, r;
3489
3490	static enum amd_ip_block_type ip_order[] = {
3491		AMD_IP_BLOCK_TYPE_SMC,
3492		AMD_IP_BLOCK_TYPE_DCE,
3493		AMD_IP_BLOCK_TYPE_GFX,
3494		AMD_IP_BLOCK_TYPE_SDMA,
3495		AMD_IP_BLOCK_TYPE_MES,
3496		AMD_IP_BLOCK_TYPE_UVD,
3497		AMD_IP_BLOCK_TYPE_VCE,
3498		AMD_IP_BLOCK_TYPE_VCN,
3499		AMD_IP_BLOCK_TYPE_JPEG
3500	};
3501
3502	for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
3503		int j;
3504		struct amdgpu_ip_block *block;
3505
3506		for (j = 0; j < adev->num_ip_blocks; j++) {
3507			block = &adev->ip_blocks[j];
3508
3509			if (block->version->type != ip_order[i] ||
3510				!block->status.valid ||
3511				block->status.hw)
3512				continue;
3513
3514			if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
3515				r = block->version->funcs->resume(adev);
3516			else
3517				r = block->version->funcs->hw_init(adev);
3518
3519			DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3520			if (r)
3521				return r;
3522			block->status.hw = true;
3523		}
3524	}
3525
3526	return 0;
3527}
3528
3529/**
3530 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3531 *
3532 * @adev: amdgpu_device pointer
3533 *
3534 * First resume function for hardware IPs.  The list of all the hardware
3535 * IPs that make up the asic is walked and the resume callbacks are run for
3536 * COMMON, GMC, and IH.  resume puts the hardware into a functional state
3537 * after a suspend and updates the software state as necessary.  This
3538 * function is also used for restoring the GPU after a GPU reset.
3539 * Returns 0 on success, negative error code on failure.
3540 */
3541static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
3542{
3543	int i, r;
3544
3545	for (i = 0; i < adev->num_ip_blocks; i++) {
3546		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3547			continue;
3548		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3549		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3550		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3551		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) {
3552
3553			r = adev->ip_blocks[i].version->funcs->resume(adev);
3554			if (r) {
3555				DRM_ERROR("resume of IP block <%s> failed %d\n",
3556					  adev->ip_blocks[i].version->funcs->name, r);
3557				return r;
3558			}
3559			adev->ip_blocks[i].status.hw = true;
3560		}
3561	}
3562
3563	return 0;
3564}
3565
3566/**
3567 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3568 *
3569 * @adev: amdgpu_device pointer
3570 *
3571 * First resume function for hardware IPs.  The list of all the hardware
3572 * IPs that make up the asic is walked and the resume callbacks are run for
3573 * all blocks except COMMON, GMC, and IH.  resume puts the hardware into a
3574 * functional state after a suspend and updates the software state as
3575 * necessary.  This function is also used for restoring the GPU after a GPU
3576 * reset.
3577 * Returns 0 on success, negative error code on failure.
3578 */
3579static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3580{
3581	int i, r;
3582
3583	for (i = 0; i < adev->num_ip_blocks; i++) {
3584		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3585			continue;
3586		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3587		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3588		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3589		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3590			continue;
3591		r = adev->ip_blocks[i].version->funcs->resume(adev);
3592		if (r) {
3593			DRM_ERROR("resume of IP block <%s> failed %d\n",
3594				  adev->ip_blocks[i].version->funcs->name, r);
3595			return r;
3596		}
3597		adev->ip_blocks[i].status.hw = true;
3598	}
3599
3600	return 0;
3601}
3602
3603/**
3604 * amdgpu_device_ip_resume - run resume for hardware IPs
3605 *
3606 * @adev: amdgpu_device pointer
3607 *
3608 * Main resume function for hardware IPs.  The hardware IPs
3609 * are split into two resume functions because they are
3610 * also used in recovering from a GPU reset and some additional
3611 * steps need to be take between them.  In this case (S3/S4) they are
3612 * run sequentially.
3613 * Returns 0 on success, negative error code on failure.
3614 */
3615static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
3616{
3617	int r;
3618
3619	r = amdgpu_device_ip_resume_phase1(adev);
3620	if (r)
3621		return r;
3622
3623	r = amdgpu_device_fw_loading(adev);
3624	if (r)
3625		return r;
3626
3627	r = amdgpu_device_ip_resume_phase2(adev);
3628
3629	if (adev->mman.buffer_funcs_ring->sched.ready)
3630		amdgpu_ttm_set_buffer_funcs_status(adev, true);
3631
3632	return r;
3633}
3634
3635/**
3636 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3637 *
3638 * @adev: amdgpu_device pointer
3639 *
3640 * Query the VBIOS data tables to determine if the board supports SR-IOV.
3641 */
3642static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3643{
3644	if (amdgpu_sriov_vf(adev)) {
3645		if (adev->is_atom_fw) {
3646			if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
3647				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3648		} else {
3649			if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3650				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3651		}
3652
3653		if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3654			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3655	}
3656}
3657
3658/**
3659 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3660 *
3661 * @asic_type: AMD asic type
3662 *
3663 * Check if there is DC (new modesetting infrastructre) support for an asic.
3664 * returns true if DC has support, false if not.
3665 */
3666bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3667{
3668	switch (asic_type) {
3669#ifdef CONFIG_DRM_AMDGPU_SI
3670	case CHIP_HAINAN:
3671#endif
3672	case CHIP_TOPAZ:
3673		/* chips with no display hardware */
3674		return false;
3675#if defined(CONFIG_DRM_AMD_DC)
3676	case CHIP_TAHITI:
3677	case CHIP_PITCAIRN:
3678	case CHIP_VERDE:
3679	case CHIP_OLAND:
3680		/*
3681		 * We have systems in the wild with these ASICs that require
3682		 * LVDS and VGA support which is not supported with DC.
3683		 *
3684		 * Fallback to the non-DC driver here by default so as not to
3685		 * cause regressions.
3686		 */
3687#if defined(CONFIG_DRM_AMD_DC_SI)
3688		return amdgpu_dc > 0;
3689#else
3690		return false;
3691#endif
3692	case CHIP_BONAIRE:
3693	case CHIP_KAVERI:
3694	case CHIP_KABINI:
3695	case CHIP_MULLINS:
3696		/*
3697		 * We have systems in the wild with these ASICs that require
3698		 * VGA support which is not supported with DC.
3699		 *
3700		 * Fallback to the non-DC driver here by default so as not to
3701		 * cause regressions.
3702		 */
3703		return amdgpu_dc > 0;
3704	default:
3705		return amdgpu_dc != 0;
3706#else
3707	default:
3708		if (amdgpu_dc > 0)
3709			DRM_INFO_ONCE("Display Core has been requested via kernel parameter but isn't supported by ASIC, ignoring\n");
3710		return false;
3711#endif
3712	}
3713}
3714
3715/**
3716 * amdgpu_device_has_dc_support - check if dc is supported
3717 *
3718 * @adev: amdgpu_device pointer
3719 *
3720 * Returns true for supported, false for not supported
3721 */
3722bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3723{
3724	if (adev->enable_virtual_display ||
3725	    (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
3726		return false;
3727
3728	return amdgpu_device_asic_has_dc_support(adev->asic_type);
3729}
3730
3731static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3732{
3733	struct amdgpu_device *adev =
3734		container_of(__work, struct amdgpu_device, xgmi_reset_work);
3735	struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3736
3737	/* It's a bug to not have a hive within this function */
3738	if (WARN_ON(!hive))
3739		return;
3740
3741	/*
3742	 * Use task barrier to synchronize all xgmi reset works across the
3743	 * hive. task_barrier_enter and task_barrier_exit will block
3744	 * until all the threads running the xgmi reset works reach
3745	 * those points. task_barrier_full will do both blocks.
3746	 */
3747	if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3748
3749		task_barrier_enter(&hive->tb);
3750		adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3751
3752		if (adev->asic_reset_res)
3753			goto fail;
3754
3755		task_barrier_exit(&hive->tb);
3756		adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3757
3758		if (adev->asic_reset_res)
3759			goto fail;
3760
3761		amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__MMHUB);
3762	} else {
3763
3764		task_barrier_full(&hive->tb);
3765		adev->asic_reset_res =  amdgpu_asic_reset(adev);
3766	}
3767
3768fail:
3769	if (adev->asic_reset_res)
3770		DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3771			 adev->asic_reset_res, adev_to_drm(adev)->unique);
3772	amdgpu_put_xgmi_hive(hive);
3773}
3774
3775static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3776{
3777	char *input = amdgpu_lockup_timeout;
3778	char *timeout_setting = NULL;
3779	int index = 0;
3780	long timeout;
3781	int ret = 0;
3782
3783	/*
3784	 * By default timeout for non compute jobs is 10000
3785	 * and 60000 for compute jobs.
3786	 * In SR-IOV or passthrough mode, timeout for compute
3787	 * jobs are 60000 by default.
3788	 */
3789	adev->gfx_timeout = msecs_to_jiffies(10000);
3790	adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3791	if (amdgpu_sriov_vf(adev))
3792		adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3793					msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3794	else
3795		adev->compute_timeout =  msecs_to_jiffies(60000);
3796
3797	if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3798		while ((timeout_setting = strsep(&input, ",")) &&
3799				strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3800			ret = kstrtol(timeout_setting, 0, &timeout);
3801			if (ret)
3802				return ret;
3803
3804			if (timeout == 0) {
3805				index++;
3806				continue;
3807			} else if (timeout < 0) {
3808				timeout = MAX_SCHEDULE_TIMEOUT;
3809				dev_warn(adev->dev, "lockup timeout disabled");
3810				add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
3811			} else {
3812				timeout = msecs_to_jiffies(timeout);
3813			}
3814
3815			switch (index++) {
3816			case 0:
3817				adev->gfx_timeout = timeout;
3818				break;
3819			case 1:
3820				adev->compute_timeout = timeout;
3821				break;
3822			case 2:
3823				adev->sdma_timeout = timeout;
3824				break;
3825			case 3:
3826				adev->video_timeout = timeout;
3827				break;
3828			default:
3829				break;
3830			}
3831		}
3832		/*
3833		 * There is only one value specified and
3834		 * it should apply to all non-compute jobs.
3835		 */
3836		if (index == 1) {
3837			adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3838			if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3839				adev->compute_timeout = adev->gfx_timeout;
3840		}
3841	}
3842
3843	return ret;
3844}
3845
3846/**
3847 * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU
3848 *
3849 * @adev: amdgpu_device pointer
3850 *
3851 * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode
3852 */
3853static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
3854{
3855	struct iommu_domain *domain;
3856
3857	domain = iommu_get_domain_for_dev(adev->dev);
3858	if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY)
3859		adev->ram_is_direct_mapped = true;
3860}
3861
3862static const struct attribute *amdgpu_dev_attributes[] = {
3863	&dev_attr_pcie_replay_count.attr,
3864	NULL
3865};
3866
3867static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)
3868{
3869	if (amdgpu_mcbp == 1)
3870		adev->gfx.mcbp = true;
3871	else if (amdgpu_mcbp == 0)
3872		adev->gfx.mcbp = false;
3873
3874	if (amdgpu_sriov_vf(adev))
3875		adev->gfx.mcbp = true;
3876
3877	if (adev->gfx.mcbp)
3878		DRM_INFO("MCBP is enabled\n");
3879}
3880
3881/**
3882 * amdgpu_device_init - initialize the driver
3883 *
3884 * @adev: amdgpu_device pointer
3885 * @flags: driver flags
3886 *
3887 * Initializes the driver info and hw (all asics).
3888 * Returns 0 for success or an error on failure.
3889 * Called at driver startup.
3890 */
3891int amdgpu_device_init(struct amdgpu_device *adev,
3892		       uint32_t flags)
3893{
3894	struct drm_device *ddev = adev_to_drm(adev);
3895	struct pci_dev *pdev = adev->pdev;
3896	int r, i;
3897	bool px = false;
3898	u32 max_MBps;
3899	int tmp;
3900
3901	adev->shutdown = false;
3902	adev->flags = flags;
3903
3904	if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3905		adev->asic_type = amdgpu_force_asic_type;
3906	else
3907		adev->asic_type = flags & AMD_ASIC_MASK;
3908
3909	adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3910	if (amdgpu_emu_mode == 1)
3911		adev->usec_timeout *= 10;
3912	adev->gmc.gart_size = 512 * 1024 * 1024;
3913	adev->accel_working = false;
3914	adev->num_rings = 0;
3915	RCU_INIT_POINTER(adev->gang_submit, dma_fence_get_stub());
3916	adev->mman.buffer_funcs = NULL;
3917	adev->mman.buffer_funcs_ring = NULL;
3918	adev->vm_manager.vm_pte_funcs = NULL;
3919	adev->vm_manager.vm_pte_num_scheds = 0;
3920	adev->gmc.gmc_funcs = NULL;
3921	adev->harvest_ip_mask = 0x0;
3922	adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3923	bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3924
3925	adev->smc_rreg = &amdgpu_invalid_rreg;
3926	adev->smc_wreg = &amdgpu_invalid_wreg;
3927	adev->pcie_rreg = &amdgpu_invalid_rreg;
3928	adev->pcie_wreg = &amdgpu_invalid_wreg;
3929	adev->pcie_rreg_ext = &amdgpu_invalid_rreg_ext;
3930	adev->pcie_wreg_ext = &amdgpu_invalid_wreg_ext;
3931	adev->pciep_rreg = &amdgpu_invalid_rreg;
3932	adev->pciep_wreg = &amdgpu_invalid_wreg;
3933	adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3934	adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3935	adev->pcie_rreg64_ext = &amdgpu_invalid_rreg64_ext;
3936	adev->pcie_wreg64_ext = &amdgpu_invalid_wreg64_ext;
3937	adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3938	adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3939	adev->didt_rreg = &amdgpu_invalid_rreg;
3940	adev->didt_wreg = &amdgpu_invalid_wreg;
3941	adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3942	adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3943	adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3944	adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3945
3946	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3947		 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3948		 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3949
3950	/* mutex initialization are all done here so we
3951	 * can recall function without having locking issues
3952	 */
3953	mutex_init(&adev->firmware.mutex);
3954	mutex_init(&adev->pm.mutex);
3955	mutex_init(&adev->gfx.gpu_clock_mutex);
3956	mutex_init(&adev->srbm_mutex);
3957	mutex_init(&adev->gfx.pipe_reserve_mutex);
3958	mutex_init(&adev->gfx.gfx_off_mutex);
3959	mutex_init(&adev->gfx.partition_mutex);
3960	mutex_init(&adev->grbm_idx_mutex);
3961	mutex_init(&adev->mn_lock);
3962	mutex_init(&adev->virt.vf_errors.lock);
3963	hash_init(adev->mn_hash);
3964	mutex_init(&adev->psp.mutex);
3965	mutex_init(&adev->notifier_lock);
3966	mutex_init(&adev->pm.stable_pstate_ctx_lock);
3967	mutex_init(&adev->benchmark_mutex);
3968
3969	amdgpu_device_init_apu_flags(adev);
3970
3971	r = amdgpu_device_check_arguments(adev);
3972	if (r)
3973		return r;
3974
3975	spin_lock_init(&adev->mmio_idx_lock);
3976	spin_lock_init(&adev->smc_idx_lock);
3977	spin_lock_init(&adev->pcie_idx_lock);
3978	spin_lock_init(&adev->uvd_ctx_idx_lock);
3979	spin_lock_init(&adev->didt_idx_lock);
3980	spin_lock_init(&adev->gc_cac_idx_lock);
3981	spin_lock_init(&adev->se_cac_idx_lock);
3982	spin_lock_init(&adev->audio_endpt_idx_lock);
3983	spin_lock_init(&adev->mm_stats.lock);
3984
3985	INIT_LIST_HEAD(&adev->shadow_list);
3986	mutex_init(&adev->shadow_list_lock);
3987
3988	INIT_LIST_HEAD(&adev->reset_list);
3989
3990	INIT_LIST_HEAD(&adev->ras_list);
3991
3992	INIT_LIST_HEAD(&adev->pm.od_kobj_list);
3993
3994	INIT_DELAYED_WORK(&adev->delayed_init_work,
3995			  amdgpu_device_delayed_init_work_handler);
3996	INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3997			  amdgpu_device_delay_enable_gfx_off);
3998
3999	INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
4000
4001	adev->gfx.gfx_off_req_count = 1;
4002	adev->gfx.gfx_off_residency = 0;
4003	adev->gfx.gfx_off_entrycount = 0;
4004	adev->pm.ac_power = power_supply_is_system_supplied() > 0;
4005
4006	atomic_set(&adev->throttling_logging_enabled, 1);
4007	/*
4008	 * If throttling continues, logging will be performed every minute
4009	 * to avoid log flooding. "-1" is subtracted since the thermal
4010	 * throttling interrupt comes every second. Thus, the total logging
4011	 * interval is 59 seconds(retelimited printk interval) + 1(waiting
4012	 * for throttling interrupt) = 60 seconds.
4013	 */
4014	ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
4015	ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
4016
4017	/* Registers mapping */
4018	/* TODO: block userspace mapping of io register */
4019	if (adev->asic_type >= CHIP_BONAIRE) {
4020		adev->rmmio_base = pci_resource_start(adev->pdev, 5);
4021		adev->rmmio_size = pci_resource_len(adev->pdev, 5);
4022	} else {
4023		adev->rmmio_base = pci_resource_start(adev->pdev, 2);
4024		adev->rmmio_size = pci_resource_len(adev->pdev, 2);
4025	}
4026
4027	for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
4028		atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
4029
4030	adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
4031	if (!adev->rmmio)
4032		return -ENOMEM;
4033
4034	DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
4035	DRM_INFO("register mmio size: %u\n", (unsigned int)adev->rmmio_size);
4036
4037	/*
4038	 * Reset domain needs to be present early, before XGMI hive discovered
4039	 * (if any) and intitialized to use reset sem and in_gpu reset flag
4040	 * early on during init and before calling to RREG32.
4041	 */
4042	adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
4043	if (!adev->reset_domain)
4044		return -ENOMEM;
4045
4046	/* detect hw virtualization here */
4047	amdgpu_detect_virtualization(adev);
4048
4049	amdgpu_device_get_pcie_info(adev);
4050
4051	r = amdgpu_device_get_job_timeout_settings(adev);
4052	if (r) {
4053		dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
4054		return r;
4055	}
4056
4057	amdgpu_device_set_mcbp(adev);
4058
4059	/* early init functions */
4060	r = amdgpu_device_ip_early_init(adev);
4061	if (r)
4062		return r;
4063
 
 
4064	/* Get rid of things like offb */
4065	r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver);
4066	if (r)
4067		return r;
4068
4069	/* Enable TMZ based on IP_VERSION */
4070	amdgpu_gmc_tmz_set(adev);
4071
4072	amdgpu_gmc_noretry_set(adev);
4073	/* Need to get xgmi info early to decide the reset behavior*/
4074	if (adev->gmc.xgmi.supported) {
4075		r = adev->gfxhub.funcs->get_xgmi_info(adev);
4076		if (r)
4077			return r;
4078	}
4079
4080	/* enable PCIE atomic ops */
4081	if (amdgpu_sriov_vf(adev)) {
4082		if (adev->virt.fw_reserve.p_pf2vf)
4083			adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
4084						      adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
4085				(PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
4086	/* APUs w/ gfx9 onwards doesn't reply on PCIe atomics, rather it is a
4087	 * internal path natively support atomics, set have_atomics_support to true.
4088	 */
4089	} else if ((adev->flags & AMD_IS_APU) &&
4090		   (amdgpu_ip_version(adev, GC_HWIP, 0) >
4091		    IP_VERSION(9, 0, 0))) {
4092		adev->have_atomics_support = true;
4093	} else {
4094		adev->have_atomics_support =
4095			!pci_enable_atomic_ops_to_root(adev->pdev,
4096					  PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
4097					  PCI_EXP_DEVCAP2_ATOMIC_COMP64);
4098	}
4099
4100	if (!adev->have_atomics_support)
4101		dev_info(adev->dev, "PCIE atomic ops is not supported\n");
4102
4103	/* doorbell bar mapping and doorbell index init*/
4104	amdgpu_doorbell_init(adev);
4105
4106	if (amdgpu_emu_mode == 1) {
4107		/* post the asic on emulation mode */
4108		emu_soc_asic_init(adev);
4109		goto fence_driver_init;
4110	}
4111
4112	amdgpu_reset_init(adev);
4113
4114	/* detect if we are with an SRIOV vbios */
4115	if (adev->bios)
4116		amdgpu_device_detect_sriov_bios(adev);
4117
4118	/* check if we need to reset the asic
4119	 *  E.g., driver was not cleanly unloaded previously, etc.
4120	 */
4121	if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
4122		if (adev->gmc.xgmi.num_physical_nodes) {
4123			dev_info(adev->dev, "Pending hive reset.\n");
4124			adev->gmc.xgmi.pending_reset = true;
4125			/* Only need to init necessary block for SMU to handle the reset */
4126			for (i = 0; i < adev->num_ip_blocks; i++) {
4127				if (!adev->ip_blocks[i].status.valid)
4128					continue;
4129				if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
4130				      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
4131				      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
4132				      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
4133					DRM_DEBUG("IP %s disabled for hw_init.\n",
4134						adev->ip_blocks[i].version->funcs->name);
4135					adev->ip_blocks[i].status.hw = true;
4136				}
4137			}
4138		} else if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) &&
4139				   !amdgpu_device_has_display_hardware(adev)) {
4140					r = psp_gpu_reset(adev);
4141		} else {
4142				tmp = amdgpu_reset_method;
4143				/* It should do a default reset when loading or reloading the driver,
4144				 * regardless of the module parameter reset_method.
4145				 */
4146				amdgpu_reset_method = AMD_RESET_METHOD_NONE;
4147				r = amdgpu_asic_reset(adev);
4148				amdgpu_reset_method = tmp;
4149		}
4150
4151		if (r) {
4152		  dev_err(adev->dev, "asic reset on init failed\n");
4153		  goto failed;
4154		}
4155	}
4156
4157	/* Post card if necessary */
4158	if (amdgpu_device_need_post(adev)) {
4159		if (!adev->bios) {
4160			dev_err(adev->dev, "no vBIOS found\n");
4161			r = -EINVAL;
4162			goto failed;
4163		}
4164		DRM_INFO("GPU posting now...\n");
4165		r = amdgpu_device_asic_init(adev);
4166		if (r) {
4167			dev_err(adev->dev, "gpu post error!\n");
4168			goto failed;
4169		}
4170	}
4171
4172	if (adev->bios) {
4173		if (adev->is_atom_fw) {
4174			/* Initialize clocks */
4175			r = amdgpu_atomfirmware_get_clock_info(adev);
4176			if (r) {
4177				dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
4178				amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
4179				goto failed;
4180			}
4181		} else {
4182			/* Initialize clocks */
4183			r = amdgpu_atombios_get_clock_info(adev);
4184			if (r) {
4185				dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
4186				amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
4187				goto failed;
4188			}
4189			/* init i2c buses */
4190			if (!amdgpu_device_has_dc_support(adev))
4191				amdgpu_atombios_i2c_init(adev);
4192		}
4193	}
4194
4195fence_driver_init:
4196	/* Fence driver */
4197	r = amdgpu_fence_driver_sw_init(adev);
4198	if (r) {
4199		dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
4200		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
4201		goto failed;
4202	}
4203
4204	/* init the mode config */
4205	drm_mode_config_init(adev_to_drm(adev));
4206
4207	r = amdgpu_device_ip_init(adev);
4208	if (r) {
4209		dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
4210		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
4211		goto release_ras_con;
4212	}
4213
4214	amdgpu_fence_driver_hw_init(adev);
4215
4216	dev_info(adev->dev,
4217		"SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
4218			adev->gfx.config.max_shader_engines,
4219			adev->gfx.config.max_sh_per_se,
4220			adev->gfx.config.max_cu_per_sh,
4221			adev->gfx.cu_info.number);
4222
4223	adev->accel_working = true;
4224
4225	amdgpu_vm_check_compute_bug(adev);
4226
4227	/* Initialize the buffer migration limit. */
4228	if (amdgpu_moverate >= 0)
4229		max_MBps = amdgpu_moverate;
4230	else
4231		max_MBps = 8; /* Allow 8 MB/s. */
4232	/* Get a log2 for easy divisions. */
4233	adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
4234
4235	/*
4236	 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
4237	 * Otherwise the mgpu fan boost feature will be skipped due to the
4238	 * gpu instance is counted less.
4239	 */
4240	amdgpu_register_gpu_instance(adev);
4241
4242	/* enable clockgating, etc. after ib tests, etc. since some blocks require
4243	 * explicit gating rather than handling it automatically.
4244	 */
4245	if (!adev->gmc.xgmi.pending_reset) {
4246		r = amdgpu_device_ip_late_init(adev);
4247		if (r) {
4248			dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
4249			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
4250			goto release_ras_con;
4251		}
4252		/* must succeed. */
4253		amdgpu_ras_resume(adev);
4254		queue_delayed_work(system_wq, &adev->delayed_init_work,
4255				   msecs_to_jiffies(AMDGPU_RESUME_MS));
4256	}
4257
4258	if (amdgpu_sriov_vf(adev)) {
4259		amdgpu_virt_release_full_gpu(adev, true);
4260		flush_delayed_work(&adev->delayed_init_work);
4261	}
4262
4263	/*
4264	 * Place those sysfs registering after `late_init`. As some of those
4265	 * operations performed in `late_init` might affect the sysfs
4266	 * interfaces creating.
4267	 */
4268	r = amdgpu_atombios_sysfs_init(adev);
4269	if (r)
4270		drm_err(&adev->ddev,
4271			"registering atombios sysfs failed (%d).\n", r);
4272
4273	r = amdgpu_pm_sysfs_init(adev);
4274	if (r)
4275		DRM_ERROR("registering pm sysfs failed (%d).\n", r);
4276
4277	r = amdgpu_ucode_sysfs_init(adev);
4278	if (r) {
4279		adev->ucode_sysfs_en = false;
4280		DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
4281	} else
4282		adev->ucode_sysfs_en = true;
4283
4284	r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
4285	if (r)
4286		dev_err(adev->dev, "Could not create amdgpu device attr\n");
4287
4288	r = devm_device_add_group(adev->dev, &amdgpu_board_attrs_group);
4289	if (r)
4290		dev_err(adev->dev,
4291			"Could not create amdgpu board attributes\n");
4292
4293	amdgpu_fru_sysfs_init(adev);
4294	amdgpu_reg_state_sysfs_init(adev);
4295
4296	if (IS_ENABLED(CONFIG_PERF_EVENTS))
4297		r = amdgpu_pmu_init(adev);
4298	if (r)
4299		dev_err(adev->dev, "amdgpu_pmu_init failed\n");
4300
4301	/* Have stored pci confspace at hand for restore in sudden PCI error */
4302	if (amdgpu_device_cache_pci_state(adev->pdev))
4303		pci_restore_state(pdev);
4304
4305	/* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
4306	/* this will fail for cards that aren't VGA class devices, just
4307	 * ignore it
4308	 */
4309	if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4310		vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
4311
4312	px = amdgpu_device_supports_px(ddev);
4313
4314	if (px || (!dev_is_removable(&adev->pdev->dev) &&
4315				apple_gmux_detect(NULL, NULL)))
4316		vga_switcheroo_register_client(adev->pdev,
4317					       &amdgpu_switcheroo_ops, px);
4318
4319	if (px)
4320		vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
4321
4322	if (adev->gmc.xgmi.pending_reset)
4323		queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
4324				   msecs_to_jiffies(AMDGPU_RESUME_MS));
4325
4326	amdgpu_device_check_iommu_direct_map(adev);
4327
4328	return 0;
4329
4330release_ras_con:
4331	if (amdgpu_sriov_vf(adev))
4332		amdgpu_virt_release_full_gpu(adev, true);
4333
4334	/* failed in exclusive mode due to timeout */
4335	if (amdgpu_sriov_vf(adev) &&
4336		!amdgpu_sriov_runtime(adev) &&
4337		amdgpu_virt_mmio_blocked(adev) &&
4338		!amdgpu_virt_wait_reset(adev)) {
4339		dev_err(adev->dev, "VF exclusive mode timeout\n");
4340		/* Don't send request since VF is inactive. */
4341		adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
4342		adev->virt.ops = NULL;
4343		r = -EAGAIN;
4344	}
4345	amdgpu_release_ras_context(adev);
4346
4347failed:
4348	amdgpu_vf_error_trans_all(adev);
4349
4350	return r;
4351}
4352
4353static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
4354{
4355
4356	/* Clear all CPU mappings pointing to this device */
4357	unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
4358
4359	/* Unmap all mapped bars - Doorbell, registers and VRAM */
4360	amdgpu_doorbell_fini(adev);
4361
4362	iounmap(adev->rmmio);
4363	adev->rmmio = NULL;
4364	if (adev->mman.aper_base_kaddr)
4365		iounmap(adev->mman.aper_base_kaddr);
4366	adev->mman.aper_base_kaddr = NULL;
4367
4368	/* Memory manager related */
4369	if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
4370		arch_phys_wc_del(adev->gmc.vram_mtrr);
4371		arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
4372	}
4373}
4374
4375/**
4376 * amdgpu_device_fini_hw - tear down the driver
4377 *
4378 * @adev: amdgpu_device pointer
4379 *
4380 * Tear down the driver info (all asics).
4381 * Called at driver shutdown.
4382 */
4383void amdgpu_device_fini_hw(struct amdgpu_device *adev)
4384{
4385	dev_info(adev->dev, "amdgpu: finishing device.\n");
4386	flush_delayed_work(&adev->delayed_init_work);
4387	adev->shutdown = true;
4388
4389	/* make sure IB test finished before entering exclusive mode
4390	 * to avoid preemption on IB test
4391	 */
4392	if (amdgpu_sriov_vf(adev)) {
4393		amdgpu_virt_request_full_gpu(adev, false);
4394		amdgpu_virt_fini_data_exchange(adev);
4395	}
4396
4397	/* disable all interrupts */
4398	amdgpu_irq_disable_all(adev);
4399	if (adev->mode_info.mode_config_initialized) {
4400		if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
4401			drm_helper_force_disable_all(adev_to_drm(adev));
4402		else
4403			drm_atomic_helper_shutdown(adev_to_drm(adev));
4404	}
4405	amdgpu_fence_driver_hw_fini(adev);
4406
4407	if (adev->mman.initialized)
4408		drain_workqueue(adev->mman.bdev.wq);
4409
4410	if (adev->pm.sysfs_initialized)
4411		amdgpu_pm_sysfs_fini(adev);
4412	if (adev->ucode_sysfs_en)
4413		amdgpu_ucode_sysfs_fini(adev);
4414	sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
4415	amdgpu_fru_sysfs_fini(adev);
4416
4417	amdgpu_reg_state_sysfs_fini(adev);
4418
4419	/* disable ras feature must before hw fini */
4420	amdgpu_ras_pre_fini(adev);
4421
4422	amdgpu_ttm_set_buffer_funcs_status(adev, false);
4423
4424	amdgpu_device_ip_fini_early(adev);
4425
4426	amdgpu_irq_fini_hw(adev);
4427
4428	if (adev->mman.initialized)
4429		ttm_device_clear_dma_mappings(&adev->mman.bdev);
4430
4431	amdgpu_gart_dummy_page_fini(adev);
4432
4433	if (drm_dev_is_unplugged(adev_to_drm(adev)))
4434		amdgpu_device_unmap_mmio(adev);
4435
4436}
4437
4438void amdgpu_device_fini_sw(struct amdgpu_device *adev)
4439{
4440	int idx;
4441	bool px;
4442
4443	amdgpu_fence_driver_sw_fini(adev);
4444	amdgpu_device_ip_fini(adev);
4445	amdgpu_ucode_release(&adev->firmware.gpu_info_fw);
4446	adev->accel_working = false;
4447	dma_fence_put(rcu_dereference_protected(adev->gang_submit, true));
4448
4449	amdgpu_reset_fini(adev);
4450
4451	/* free i2c buses */
4452	if (!amdgpu_device_has_dc_support(adev))
4453		amdgpu_i2c_fini(adev);
4454
4455	if (amdgpu_emu_mode != 1)
4456		amdgpu_atombios_fini(adev);
4457
4458	kfree(adev->bios);
4459	adev->bios = NULL;
4460
4461	kfree(adev->fru_info);
4462	adev->fru_info = NULL;
4463
4464	px = amdgpu_device_supports_px(adev_to_drm(adev));
4465
4466	if (px || (!dev_is_removable(&adev->pdev->dev) &&
4467				apple_gmux_detect(NULL, NULL)))
4468		vga_switcheroo_unregister_client(adev->pdev);
4469
4470	if (px)
4471		vga_switcheroo_fini_domain_pm_ops(adev->dev);
4472
4473	if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4474		vga_client_unregister(adev->pdev);
4475
4476	if (drm_dev_enter(adev_to_drm(adev), &idx)) {
4477
4478		iounmap(adev->rmmio);
4479		adev->rmmio = NULL;
4480		amdgpu_doorbell_fini(adev);
4481		drm_dev_exit(idx);
4482	}
4483
4484	if (IS_ENABLED(CONFIG_PERF_EVENTS))
4485		amdgpu_pmu_fini(adev);
4486	if (adev->mman.discovery_bin)
4487		amdgpu_discovery_fini(adev);
4488
4489	amdgpu_reset_put_reset_domain(adev->reset_domain);
4490	adev->reset_domain = NULL;
4491
4492	kfree(adev->pci_state);
4493
4494}
4495
4496/**
4497 * amdgpu_device_evict_resources - evict device resources
4498 * @adev: amdgpu device object
4499 *
4500 * Evicts all ttm device resources(vram BOs, gart table) from the lru list
4501 * of the vram memory type. Mainly used for evicting device resources
4502 * at suspend time.
4503 *
4504 */
4505static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
4506{
4507	int ret;
4508
4509	/* No need to evict vram on APUs for suspend to ram or s2idle */
4510	if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU))
4511		return 0;
4512
4513	ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
4514	if (ret)
4515		DRM_WARN("evicting device resources failed\n");
4516	return ret;
4517}
4518
4519/*
4520 * Suspend & resume.
4521 */
4522/**
4523 * amdgpu_device_prepare - prepare for device suspend
4524 *
4525 * @dev: drm dev pointer
4526 *
4527 * Prepare to put the hw in the suspend state (all asics).
4528 * Returns 0 for success or an error on failure.
4529 * Called at driver suspend.
4530 */
4531int amdgpu_device_prepare(struct drm_device *dev)
4532{
4533	struct amdgpu_device *adev = drm_to_adev(dev);
4534	int i, r;
4535
4536	amdgpu_choose_low_power_state(adev);
4537
4538	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4539		return 0;
4540
4541	/* Evict the majority of BOs before starting suspend sequence */
4542	r = amdgpu_device_evict_resources(adev);
4543	if (r)
4544		goto unprepare;
4545
4546	flush_delayed_work(&adev->gfx.gfx_off_delay_work);
4547
4548	for (i = 0; i < adev->num_ip_blocks; i++) {
4549		if (!adev->ip_blocks[i].status.valid)
4550			continue;
4551		if (!adev->ip_blocks[i].version->funcs->prepare_suspend)
4552			continue;
4553		r = adev->ip_blocks[i].version->funcs->prepare_suspend((void *)adev);
4554		if (r)
4555			goto unprepare;
4556	}
4557
4558	return 0;
4559
4560unprepare:
4561	adev->in_s0ix = adev->in_s3 = false;
4562
4563	return r;
4564}
4565
4566/**
4567 * amdgpu_device_suspend - initiate device suspend
4568 *
4569 * @dev: drm dev pointer
4570 * @fbcon : notify the fbdev of suspend
4571 *
4572 * Puts the hw in the suspend state (all asics).
4573 * Returns 0 for success or an error on failure.
4574 * Called at driver suspend.
4575 */
4576int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
4577{
4578	struct amdgpu_device *adev = drm_to_adev(dev);
4579	int r = 0;
4580
4581	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4582		return 0;
4583
4584	adev->in_suspend = true;
4585
4586	if (amdgpu_sriov_vf(adev)) {
4587		amdgpu_virt_fini_data_exchange(adev);
4588		r = amdgpu_virt_request_full_gpu(adev, false);
4589		if (r)
4590			return r;
4591	}
4592
4593	if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
4594		DRM_WARN("smart shift update failed\n");
4595
4596	if (fbcon)
4597		drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
4598
4599	cancel_delayed_work_sync(&adev->delayed_init_work);
4600
4601	amdgpu_ras_suspend(adev);
4602
4603	amdgpu_device_ip_suspend_phase1(adev);
4604
4605	if (!adev->in_s0ix)
4606		amdgpu_amdkfd_suspend(adev, adev->in_runpm);
4607
4608	r = amdgpu_device_evict_resources(adev);
4609	if (r)
4610		return r;
4611
4612	amdgpu_ttm_set_buffer_funcs_status(adev, false);
4613
4614	amdgpu_fence_driver_hw_fini(adev);
4615
4616	amdgpu_device_ip_suspend_phase2(adev);
4617
4618	if (amdgpu_sriov_vf(adev))
4619		amdgpu_virt_release_full_gpu(adev, false);
4620
4621	r = amdgpu_dpm_notify_rlc_state(adev, false);
4622	if (r)
4623		return r;
4624
4625	return 0;
4626}
4627
4628/**
4629 * amdgpu_device_resume - initiate device resume
4630 *
4631 * @dev: drm dev pointer
4632 * @fbcon : notify the fbdev of resume
4633 *
4634 * Bring the hw back to operating state (all asics).
4635 * Returns 0 for success or an error on failure.
4636 * Called at driver resume.
4637 */
4638int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
4639{
4640	struct amdgpu_device *adev = drm_to_adev(dev);
4641	int r = 0;
4642
4643	if (amdgpu_sriov_vf(adev)) {
4644		r = amdgpu_virt_request_full_gpu(adev, true);
4645		if (r)
4646			return r;
4647	}
4648
4649	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4650		return 0;
4651
4652	if (adev->in_s0ix)
4653		amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry);
4654
4655	/* post card */
4656	if (amdgpu_device_need_post(adev)) {
4657		r = amdgpu_device_asic_init(adev);
4658		if (r)
4659			dev_err(adev->dev, "amdgpu asic init failed\n");
4660	}
4661
4662	r = amdgpu_device_ip_resume(adev);
4663
4664	if (r) {
4665		dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
4666		goto exit;
4667	}
4668	amdgpu_fence_driver_hw_init(adev);
4669
4670	if (!adev->in_s0ix) {
4671		r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
4672		if (r)
4673			goto exit;
4674	}
4675
4676	r = amdgpu_device_ip_late_init(adev);
4677	if (r)
4678		goto exit;
4679
4680	queue_delayed_work(system_wq, &adev->delayed_init_work,
4681			   msecs_to_jiffies(AMDGPU_RESUME_MS));
4682exit:
4683	if (amdgpu_sriov_vf(adev)) {
4684		amdgpu_virt_init_data_exchange(adev);
4685		amdgpu_virt_release_full_gpu(adev, true);
4686	}
4687
4688	if (r)
4689		return r;
4690
4691	/* Make sure IB tests flushed */
4692	flush_delayed_work(&adev->delayed_init_work);
4693
4694	if (fbcon)
4695		drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
4696
4697	amdgpu_ras_resume(adev);
4698
4699	if (adev->mode_info.num_crtc) {
4700		/*
4701		 * Most of the connector probing functions try to acquire runtime pm
4702		 * refs to ensure that the GPU is powered on when connector polling is
4703		 * performed. Since we're calling this from a runtime PM callback,
4704		 * trying to acquire rpm refs will cause us to deadlock.
4705		 *
4706		 * Since we're guaranteed to be holding the rpm lock, it's safe to
4707		 * temporarily disable the rpm helpers so this doesn't deadlock us.
4708		 */
4709#ifdef CONFIG_PM
4710		dev->dev->power.disable_depth++;
4711#endif
4712		if (!adev->dc_enabled)
4713			drm_helper_hpd_irq_event(dev);
4714		else
4715			drm_kms_helper_hotplug_event(dev);
4716#ifdef CONFIG_PM
4717		dev->dev->power.disable_depth--;
4718#endif
4719	}
4720	adev->in_suspend = false;
4721
4722	if (adev->enable_mes)
4723		amdgpu_mes_self_test(adev);
4724
4725	if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
4726		DRM_WARN("smart shift update failed\n");
4727
4728	return 0;
4729}
4730
4731/**
4732 * amdgpu_device_ip_check_soft_reset - did soft reset succeed
4733 *
4734 * @adev: amdgpu_device pointer
4735 *
4736 * The list of all the hardware IPs that make up the asic is walked and
4737 * the check_soft_reset callbacks are run.  check_soft_reset determines
4738 * if the asic is still hung or not.
4739 * Returns true if any of the IPs are still in a hung state, false if not.
4740 */
4741static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
4742{
4743	int i;
4744	bool asic_hang = false;
4745
4746	if (amdgpu_sriov_vf(adev))
4747		return true;
4748
4749	if (amdgpu_asic_need_full_reset(adev))
4750		return true;
4751
4752	for (i = 0; i < adev->num_ip_blocks; i++) {
4753		if (!adev->ip_blocks[i].status.valid)
4754			continue;
4755		if (adev->ip_blocks[i].version->funcs->check_soft_reset)
4756			adev->ip_blocks[i].status.hang =
4757				adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
4758		if (adev->ip_blocks[i].status.hang) {
4759			dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
4760			asic_hang = true;
4761		}
4762	}
4763	return asic_hang;
4764}
4765
4766/**
4767 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
4768 *
4769 * @adev: amdgpu_device pointer
4770 *
4771 * The list of all the hardware IPs that make up the asic is walked and the
4772 * pre_soft_reset callbacks are run if the block is hung.  pre_soft_reset
4773 * handles any IP specific hardware or software state changes that are
4774 * necessary for a soft reset to succeed.
4775 * Returns 0 on success, negative error code on failure.
4776 */
4777static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
4778{
4779	int i, r = 0;
4780
4781	for (i = 0; i < adev->num_ip_blocks; i++) {
4782		if (!adev->ip_blocks[i].status.valid)
4783			continue;
4784		if (adev->ip_blocks[i].status.hang &&
4785		    adev->ip_blocks[i].version->funcs->pre_soft_reset) {
4786			r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
4787			if (r)
4788				return r;
4789		}
4790	}
4791
4792	return 0;
4793}
4794
4795/**
4796 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
4797 *
4798 * @adev: amdgpu_device pointer
4799 *
4800 * Some hardware IPs cannot be soft reset.  If they are hung, a full gpu
4801 * reset is necessary to recover.
4802 * Returns true if a full asic reset is required, false if not.
4803 */
4804static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
4805{
4806	int i;
4807
4808	if (amdgpu_asic_need_full_reset(adev))
4809		return true;
4810
4811	for (i = 0; i < adev->num_ip_blocks; i++) {
4812		if (!adev->ip_blocks[i].status.valid)
4813			continue;
4814		if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
4815		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
4816		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
4817		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
4818		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
4819			if (adev->ip_blocks[i].status.hang) {
4820				dev_info(adev->dev, "Some block need full reset!\n");
4821				return true;
4822			}
4823		}
4824	}
4825	return false;
4826}
4827
4828/**
4829 * amdgpu_device_ip_soft_reset - do a soft reset
4830 *
4831 * @adev: amdgpu_device pointer
4832 *
4833 * The list of all the hardware IPs that make up the asic is walked and the
4834 * soft_reset callbacks are run if the block is hung.  soft_reset handles any
4835 * IP specific hardware or software state changes that are necessary to soft
4836 * reset the IP.
4837 * Returns 0 on success, negative error code on failure.
4838 */
4839static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
4840{
4841	int i, r = 0;
4842
4843	for (i = 0; i < adev->num_ip_blocks; i++) {
4844		if (!adev->ip_blocks[i].status.valid)
4845			continue;
4846		if (adev->ip_blocks[i].status.hang &&
4847		    adev->ip_blocks[i].version->funcs->soft_reset) {
4848			r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
4849			if (r)
4850				return r;
4851		}
4852	}
4853
4854	return 0;
4855}
4856
4857/**
4858 * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4859 *
4860 * @adev: amdgpu_device pointer
4861 *
4862 * The list of all the hardware IPs that make up the asic is walked and the
4863 * post_soft_reset callbacks are run if the asic was hung.  post_soft_reset
4864 * handles any IP specific hardware or software state changes that are
4865 * necessary after the IP has been soft reset.
4866 * Returns 0 on success, negative error code on failure.
4867 */
4868static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4869{
4870	int i, r = 0;
4871
4872	for (i = 0; i < adev->num_ip_blocks; i++) {
4873		if (!adev->ip_blocks[i].status.valid)
4874			continue;
4875		if (adev->ip_blocks[i].status.hang &&
4876		    adev->ip_blocks[i].version->funcs->post_soft_reset)
4877			r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
4878		if (r)
4879			return r;
4880	}
4881
4882	return 0;
4883}
4884
4885/**
4886 * amdgpu_device_recover_vram - Recover some VRAM contents
4887 *
4888 * @adev: amdgpu_device pointer
4889 *
4890 * Restores the contents of VRAM buffers from the shadows in GTT.  Used to
4891 * restore things like GPUVM page tables after a GPU reset where
4892 * the contents of VRAM might be lost.
4893 *
4894 * Returns:
4895 * 0 on success, negative error code on failure.
4896 */
4897static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
4898{
4899	struct dma_fence *fence = NULL, *next = NULL;
4900	struct amdgpu_bo *shadow;
4901	struct amdgpu_bo_vm *vmbo;
4902	long r = 1, tmo;
4903
4904	if (amdgpu_sriov_runtime(adev))
4905		tmo = msecs_to_jiffies(8000);
4906	else
4907		tmo = msecs_to_jiffies(100);
4908
4909	dev_info(adev->dev, "recover vram bo from shadow start\n");
4910	mutex_lock(&adev->shadow_list_lock);
4911	list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
4912		/* If vm is compute context or adev is APU, shadow will be NULL */
4913		if (!vmbo->shadow)
4914			continue;
4915		shadow = vmbo->shadow;
4916
4917		/* No need to recover an evicted BO */
4918		if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
4919		    shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
4920		    shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
4921			continue;
4922
4923		r = amdgpu_bo_restore_shadow(shadow, &next);
4924		if (r)
4925			break;
4926
4927		if (fence) {
4928			tmo = dma_fence_wait_timeout(fence, false, tmo);
4929			dma_fence_put(fence);
4930			fence = next;
4931			if (tmo == 0) {
4932				r = -ETIMEDOUT;
4933				break;
4934			} else if (tmo < 0) {
4935				r = tmo;
4936				break;
4937			}
4938		} else {
4939			fence = next;
4940		}
4941	}
4942	mutex_unlock(&adev->shadow_list_lock);
4943
4944	if (fence)
4945		tmo = dma_fence_wait_timeout(fence, false, tmo);
4946	dma_fence_put(fence);
4947
4948	if (r < 0 || tmo <= 0) {
4949		dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4950		return -EIO;
4951	}
4952
4953	dev_info(adev->dev, "recover vram bo from shadow done\n");
4954	return 0;
4955}
4956
4957
4958/**
4959 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4960 *
4961 * @adev: amdgpu_device pointer
4962 * @from_hypervisor: request from hypervisor
4963 *
4964 * do VF FLR and reinitialize Asic
4965 * return 0 means succeeded otherwise failed
4966 */
4967static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4968				     bool from_hypervisor)
4969{
4970	int r;
4971	struct amdgpu_hive_info *hive = NULL;
4972	int retry_limit = 0;
4973
4974retry:
4975	amdgpu_amdkfd_pre_reset(adev);
4976
4977	if (from_hypervisor)
4978		r = amdgpu_virt_request_full_gpu(adev, true);
4979	else
4980		r = amdgpu_virt_reset_gpu(adev);
4981	if (r)
4982		return r;
4983	amdgpu_irq_gpu_reset_resume_helper(adev);
4984
4985	/* some sw clean up VF needs to do before recover */
4986	amdgpu_virt_post_reset(adev);
4987
4988	/* Resume IP prior to SMC */
4989	r = amdgpu_device_ip_reinit_early_sriov(adev);
4990	if (r)
4991		goto error;
4992
4993	amdgpu_virt_init_data_exchange(adev);
4994
4995	r = amdgpu_device_fw_loading(adev);
4996	if (r)
4997		return r;
4998
4999	/* now we are okay to resume SMC/CP/SDMA */
5000	r = amdgpu_device_ip_reinit_late_sriov(adev);
5001	if (r)
5002		goto error;
5003
5004	hive = amdgpu_get_xgmi_hive(adev);
5005	/* Update PSP FW topology after reset */
5006	if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
5007		r = amdgpu_xgmi_update_topology(hive, adev);
5008
5009	if (hive)
5010		amdgpu_put_xgmi_hive(hive);
5011
5012	if (!r) {
5013		r = amdgpu_ib_ring_tests(adev);
5014
5015		amdgpu_amdkfd_post_reset(adev);
5016	}
5017
5018error:
5019	if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
5020		amdgpu_inc_vram_lost(adev);
5021		r = amdgpu_device_recover_vram(adev);
5022	}
5023	amdgpu_virt_release_full_gpu(adev, true);
5024
5025	if (AMDGPU_RETRY_SRIOV_RESET(r)) {
5026		if (retry_limit < AMDGPU_MAX_RETRY_LIMIT) {
5027			retry_limit++;
5028			goto retry;
5029		} else
5030			DRM_ERROR("GPU reset retry is beyond the retry limit\n");
5031	}
5032
5033	return r;
5034}
5035
5036/**
5037 * amdgpu_device_has_job_running - check if there is any job in mirror list
5038 *
5039 * @adev: amdgpu_device pointer
5040 *
5041 * check if there is any job in mirror list
5042 */
5043bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
5044{
5045	int i;
5046	struct drm_sched_job *job;
5047
5048	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5049		struct amdgpu_ring *ring = adev->rings[i];
5050
5051		if (!amdgpu_ring_sched_ready(ring))
5052			continue;
5053
5054		spin_lock(&ring->sched.job_list_lock);
5055		job = list_first_entry_or_null(&ring->sched.pending_list,
5056					       struct drm_sched_job, list);
5057		spin_unlock(&ring->sched.job_list_lock);
5058		if (job)
5059			return true;
5060	}
5061	return false;
5062}
5063
5064/**
5065 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
5066 *
5067 * @adev: amdgpu_device pointer
5068 *
5069 * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
5070 * a hung GPU.
5071 */
5072bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
5073{
5074
5075	if (amdgpu_gpu_recovery == 0)
5076		goto disabled;
5077
5078	/* Skip soft reset check in fatal error mode */
5079	if (!amdgpu_ras_is_poison_mode_supported(adev))
5080		return true;
5081
5082	if (amdgpu_sriov_vf(adev))
5083		return true;
5084
5085	if (amdgpu_gpu_recovery == -1) {
5086		switch (adev->asic_type) {
5087#ifdef CONFIG_DRM_AMDGPU_SI
5088		case CHIP_VERDE:
5089		case CHIP_TAHITI:
5090		case CHIP_PITCAIRN:
5091		case CHIP_OLAND:
5092		case CHIP_HAINAN:
5093#endif
5094#ifdef CONFIG_DRM_AMDGPU_CIK
5095		case CHIP_KAVERI:
5096		case CHIP_KABINI:
5097		case CHIP_MULLINS:
5098#endif
5099		case CHIP_CARRIZO:
5100		case CHIP_STONEY:
5101		case CHIP_CYAN_SKILLFISH:
5102			goto disabled;
5103		default:
5104			break;
5105		}
5106	}
5107
5108	return true;
5109
5110disabled:
5111		dev_info(adev->dev, "GPU recovery disabled.\n");
5112		return false;
5113}
5114
5115int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
5116{
5117	u32 i;
5118	int ret = 0;
5119
5120	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
5121
5122	dev_info(adev->dev, "GPU mode1 reset\n");
5123
5124	/* disable BM */
5125	pci_clear_master(adev->pdev);
5126
5127	amdgpu_device_cache_pci_state(adev->pdev);
5128
5129	if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
5130		dev_info(adev->dev, "GPU smu mode1 reset\n");
5131		ret = amdgpu_dpm_mode1_reset(adev);
5132	} else {
5133		dev_info(adev->dev, "GPU psp mode1 reset\n");
5134		ret = psp_gpu_reset(adev);
5135	}
5136
5137	if (ret)
5138		goto mode1_reset_failed;
5139
5140	amdgpu_device_load_pci_state(adev->pdev);
5141	ret = amdgpu_psp_wait_for_bootloader(adev);
5142	if (ret)
5143		goto mode1_reset_failed;
5144
5145	/* wait for asic to come out of reset */
5146	for (i = 0; i < adev->usec_timeout; i++) {
5147		u32 memsize = adev->nbio.funcs->get_memsize(adev);
5148
5149		if (memsize != 0xffffffff)
5150			break;
5151		udelay(1);
5152	}
5153
5154	if (i >= adev->usec_timeout) {
5155		ret = -ETIMEDOUT;
5156		goto mode1_reset_failed;
5157	}
5158
5159	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
5160
5161	return 0;
5162
5163mode1_reset_failed:
5164	dev_err(adev->dev, "GPU mode1 reset failed\n");
5165	return ret;
5166}
5167
5168int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
5169				 struct amdgpu_reset_context *reset_context)
5170{
5171	int i, r = 0;
5172	struct amdgpu_job *job = NULL;
5173	bool need_full_reset =
5174		test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5175
5176	if (reset_context->reset_req_dev == adev)
5177		job = reset_context->job;
5178
5179	if (amdgpu_sriov_vf(adev)) {
5180		/* stop the data exchange thread */
5181		amdgpu_virt_fini_data_exchange(adev);
5182	}
5183
5184	amdgpu_fence_driver_isr_toggle(adev, true);
5185
5186	/* block all schedulers and reset given job's ring */
5187	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5188		struct amdgpu_ring *ring = adev->rings[i];
5189
5190		if (!amdgpu_ring_sched_ready(ring))
5191			continue;
5192
5193		/* Clear job fence from fence drv to avoid force_completion
5194		 * leave NULL and vm flush fence in fence drv
5195		 */
5196		amdgpu_fence_driver_clear_job_fences(ring);
5197
5198		/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
5199		amdgpu_fence_driver_force_completion(ring);
5200	}
5201
5202	amdgpu_fence_driver_isr_toggle(adev, false);
5203
5204	if (job && job->vm)
5205		drm_sched_increase_karma(&job->base);
5206
5207	r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
5208	/* If reset handler not implemented, continue; otherwise return */
5209	if (r == -EOPNOTSUPP)
5210		r = 0;
5211	else
5212		return r;
5213
5214	/* Don't suspend on bare metal if we are not going to HW reset the ASIC */
5215	if (!amdgpu_sriov_vf(adev)) {
5216
5217		if (!need_full_reset)
5218			need_full_reset = amdgpu_device_ip_need_full_reset(adev);
5219
5220		if (!need_full_reset && amdgpu_gpu_recovery &&
5221		    amdgpu_device_ip_check_soft_reset(adev)) {
5222			amdgpu_device_ip_pre_soft_reset(adev);
5223			r = amdgpu_device_ip_soft_reset(adev);
5224			amdgpu_device_ip_post_soft_reset(adev);
5225			if (r || amdgpu_device_ip_check_soft_reset(adev)) {
5226				dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
5227				need_full_reset = true;
5228			}
5229		}
5230
5231		if (need_full_reset)
5232			r = amdgpu_device_ip_suspend(adev);
5233		if (need_full_reset)
5234			set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5235		else
5236			clear_bit(AMDGPU_NEED_FULL_RESET,
5237				  &reset_context->flags);
5238	}
5239
5240	return r;
5241}
5242
5243static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
5244{
5245	int i;
5246
5247	lockdep_assert_held(&adev->reset_domain->sem);
5248
5249	for (i = 0; i < adev->reset_info.num_regs; i++) {
5250		adev->reset_info.reset_dump_reg_value[i] =
5251			RREG32(adev->reset_info.reset_dump_reg_list[i]);
5252
5253		trace_amdgpu_reset_reg_dumps(adev->reset_info.reset_dump_reg_list[i],
5254					     adev->reset_info.reset_dump_reg_value[i]);
5255	}
5256
5257	return 0;
5258}
5259
5260int amdgpu_do_asic_reset(struct list_head *device_list_handle,
5261			 struct amdgpu_reset_context *reset_context)
5262{
5263	struct amdgpu_device *tmp_adev = NULL;
5264	bool need_full_reset, skip_hw_reset, vram_lost = false;
5265	int r = 0;
5266
5267	/* Try reset handler method first */
5268	tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5269				    reset_list);
5270	amdgpu_reset_reg_dumps(tmp_adev);
5271
5272	reset_context->reset_device_list = device_list_handle;
5273	r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
5274	/* If reset handler not implemented, continue; otherwise return */
5275	if (r == -EOPNOTSUPP)
5276		r = 0;
5277	else
5278		return r;
5279
5280	/* Reset handler not implemented, use the default method */
5281	need_full_reset =
5282		test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5283	skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
5284
5285	/*
5286	 * ASIC reset has to be done on all XGMI hive nodes ASAP
5287	 * to allow proper links negotiation in FW (within 1 sec)
5288	 */
5289	if (!skip_hw_reset && need_full_reset) {
5290		list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5291			/* For XGMI run all resets in parallel to speed up the process */
5292			if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
5293				tmp_adev->gmc.xgmi.pending_reset = false;
5294				if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
5295					r = -EALREADY;
5296			} else
5297				r = amdgpu_asic_reset(tmp_adev);
5298
5299			if (r) {
5300				dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
5301					 r, adev_to_drm(tmp_adev)->unique);
5302				goto out;
5303			}
5304		}
5305
5306		/* For XGMI wait for all resets to complete before proceed */
5307		if (!r) {
5308			list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5309				if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
5310					flush_work(&tmp_adev->xgmi_reset_work);
5311					r = tmp_adev->asic_reset_res;
5312					if (r)
5313						break;
5314				}
5315			}
5316		}
5317	}
5318
5319	if (!r && amdgpu_ras_intr_triggered()) {
5320		list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5321			amdgpu_ras_reset_error_count(tmp_adev, AMDGPU_RAS_BLOCK__MMHUB);
5322		}
5323
5324		amdgpu_ras_intr_cleared();
5325	}
5326
5327	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5328		if (need_full_reset) {
5329			/* post card */
5330			amdgpu_ras_set_fed(tmp_adev, false);
5331			r = amdgpu_device_asic_init(tmp_adev);
5332			if (r) {
5333				dev_warn(tmp_adev->dev, "asic atom init failed!");
5334			} else {
5335				dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
5336
5337				r = amdgpu_device_ip_resume_phase1(tmp_adev);
5338				if (r)
5339					goto out;
5340
5341				vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
5342
5343				amdgpu_coredump(tmp_adev, vram_lost, reset_context);
5344
5345				if (vram_lost) {
5346					DRM_INFO("VRAM is lost due to GPU reset!\n");
5347					amdgpu_inc_vram_lost(tmp_adev);
5348				}
5349
5350				r = amdgpu_device_fw_loading(tmp_adev);
5351				if (r)
5352					return r;
5353
5354				r = amdgpu_xcp_restore_partition_mode(
5355					tmp_adev->xcp_mgr);
5356				if (r)
5357					goto out;
5358
5359				r = amdgpu_device_ip_resume_phase2(tmp_adev);
5360				if (r)
5361					goto out;
5362
5363				if (tmp_adev->mman.buffer_funcs_ring->sched.ready)
5364					amdgpu_ttm_set_buffer_funcs_status(tmp_adev, true);
5365
5366				if (vram_lost)
5367					amdgpu_device_fill_reset_magic(tmp_adev);
5368
5369				/*
5370				 * Add this ASIC as tracked as reset was already
5371				 * complete successfully.
5372				 */
5373				amdgpu_register_gpu_instance(tmp_adev);
5374
5375				if (!reset_context->hive &&
5376				    tmp_adev->gmc.xgmi.num_physical_nodes > 1)
5377					amdgpu_xgmi_add_device(tmp_adev);
5378
5379				r = amdgpu_device_ip_late_init(tmp_adev);
5380				if (r)
5381					goto out;
5382
5383				drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false);
5384
5385				/*
5386				 * The GPU enters bad state once faulty pages
5387				 * by ECC has reached the threshold, and ras
5388				 * recovery is scheduled next. So add one check
5389				 * here to break recovery if it indeed exceeds
5390				 * bad page threshold, and remind user to
5391				 * retire this GPU or setting one bigger
5392				 * bad_page_threshold value to fix this once
5393				 * probing driver again.
5394				 */
5395				if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
5396					/* must succeed. */
5397					amdgpu_ras_resume(tmp_adev);
5398				} else {
5399					r = -EINVAL;
5400					goto out;
5401				}
5402
5403				/* Update PSP FW topology after reset */
5404				if (reset_context->hive &&
5405				    tmp_adev->gmc.xgmi.num_physical_nodes > 1)
5406					r = amdgpu_xgmi_update_topology(
5407						reset_context->hive, tmp_adev);
5408			}
5409		}
5410
5411out:
5412		if (!r) {
5413			amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
5414			r = amdgpu_ib_ring_tests(tmp_adev);
5415			if (r) {
5416				dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
5417				need_full_reset = true;
5418				r = -EAGAIN;
5419				goto end;
5420			}
5421		}
5422
5423		if (!r)
5424			r = amdgpu_device_recover_vram(tmp_adev);
5425		else
5426			tmp_adev->asic_reset_res = r;
5427	}
5428
5429end:
5430	if (need_full_reset)
5431		set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5432	else
5433		clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5434	return r;
5435}
5436
5437static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev)
5438{
5439
5440	switch (amdgpu_asic_reset_method(adev)) {
5441	case AMD_RESET_METHOD_MODE1:
5442		adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
5443		break;
5444	case AMD_RESET_METHOD_MODE2:
5445		adev->mp1_state = PP_MP1_STATE_RESET;
5446		break;
5447	default:
5448		adev->mp1_state = PP_MP1_STATE_NONE;
5449		break;
5450	}
5451}
5452
5453static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev)
5454{
5455	amdgpu_vf_error_trans_all(adev);
5456	adev->mp1_state = PP_MP1_STATE_NONE;
5457}
5458
5459static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
5460{
5461	struct pci_dev *p = NULL;
5462
5463	p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5464			adev->pdev->bus->number, 1);
5465	if (p) {
5466		pm_runtime_enable(&(p->dev));
5467		pm_runtime_resume(&(p->dev));
5468	}
5469
5470	pci_dev_put(p);
5471}
5472
5473static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
5474{
5475	enum amd_reset_method reset_method;
5476	struct pci_dev *p = NULL;
5477	u64 expires;
5478
5479	/*
5480	 * For now, only BACO and mode1 reset are confirmed
5481	 * to suffer the audio issue without proper suspended.
5482	 */
5483	reset_method = amdgpu_asic_reset_method(adev);
5484	if ((reset_method != AMD_RESET_METHOD_BACO) &&
5485	     (reset_method != AMD_RESET_METHOD_MODE1))
5486		return -EINVAL;
5487
5488	p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5489			adev->pdev->bus->number, 1);
5490	if (!p)
5491		return -ENODEV;
5492
5493	expires = pm_runtime_autosuspend_expiration(&(p->dev));
5494	if (!expires)
5495		/*
5496		 * If we cannot get the audio device autosuspend delay,
5497		 * a fixed 4S interval will be used. Considering 3S is
5498		 * the audio controller default autosuspend delay setting.
5499		 * 4S used here is guaranteed to cover that.
5500		 */
5501		expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
5502
5503	while (!pm_runtime_status_suspended(&(p->dev))) {
5504		if (!pm_runtime_suspend(&(p->dev)))
5505			break;
5506
5507		if (expires < ktime_get_mono_fast_ns()) {
5508			dev_warn(adev->dev, "failed to suspend display audio\n");
5509			pci_dev_put(p);
5510			/* TODO: abort the succeeding gpu reset? */
5511			return -ETIMEDOUT;
5512		}
5513	}
5514
5515	pm_runtime_disable(&(p->dev));
5516
5517	pci_dev_put(p);
5518	return 0;
5519}
5520
5521static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
5522{
5523	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5524
5525#if defined(CONFIG_DEBUG_FS)
5526	if (!amdgpu_sriov_vf(adev))
5527		cancel_work(&adev->reset_work);
5528#endif
5529
5530	if (adev->kfd.dev)
5531		cancel_work(&adev->kfd.reset_work);
5532
5533	if (amdgpu_sriov_vf(adev))
5534		cancel_work(&adev->virt.flr_work);
5535
5536	if (con && adev->ras_enabled)
5537		cancel_work(&con->recovery_work);
5538
5539}
5540
5541/**
5542 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
5543 *
5544 * @adev: amdgpu_device pointer
5545 * @job: which job trigger hang
5546 * @reset_context: amdgpu reset context pointer
5547 *
5548 * Attempt to reset the GPU if it has hung (all asics).
5549 * Attempt to do soft-reset or full-reset and reinitialize Asic
5550 * Returns 0 for success or an error on failure.
5551 */
5552
5553int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
5554			      struct amdgpu_job *job,
5555			      struct amdgpu_reset_context *reset_context)
5556{
5557	struct list_head device_list, *device_list_handle =  NULL;
5558	bool job_signaled = false;
5559	struct amdgpu_hive_info *hive = NULL;
5560	struct amdgpu_device *tmp_adev = NULL;
5561	int i, r = 0;
5562	bool need_emergency_restart = false;
5563	bool audio_suspended = false;
5564
5565	/*
5566	 * Special case: RAS triggered and full reset isn't supported
5567	 */
5568	need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
5569
5570	/*
5571	 * Flush RAM to disk so that after reboot
5572	 * the user can read log and see why the system rebooted.
5573	 */
5574	if (need_emergency_restart && amdgpu_ras_get_context(adev) &&
5575		amdgpu_ras_get_context(adev)->reboot) {
5576		DRM_WARN("Emergency reboot.");
5577
5578		ksys_sync_helper();
5579		emergency_restart();
5580	}
5581
5582	dev_info(adev->dev, "GPU %s begin!\n",
5583		need_emergency_restart ? "jobs stop":"reset");
5584
5585	if (!amdgpu_sriov_vf(adev))
5586		hive = amdgpu_get_xgmi_hive(adev);
5587	if (hive)
5588		mutex_lock(&hive->hive_lock);
5589
5590	reset_context->job = job;
5591	reset_context->hive = hive;
5592	/*
5593	 * Build list of devices to reset.
5594	 * In case we are in XGMI hive mode, resort the device list
5595	 * to put adev in the 1st position.
5596	 */
5597	INIT_LIST_HEAD(&device_list);
5598	if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
5599		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
5600			list_add_tail(&tmp_adev->reset_list, &device_list);
5601			if (adev->shutdown)
5602				tmp_adev->shutdown = true;
5603		}
5604		if (!list_is_first(&adev->reset_list, &device_list))
5605			list_rotate_to_front(&adev->reset_list, &device_list);
5606		device_list_handle = &device_list;
5607	} else {
5608		list_add_tail(&adev->reset_list, &device_list);
5609		device_list_handle = &device_list;
5610	}
5611
5612	/* We need to lock reset domain only once both for XGMI and single device */
5613	tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5614				    reset_list);
5615	amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
5616
5617	/* block all schedulers and reset given job's ring */
5618	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5619
5620		amdgpu_device_set_mp1_state(tmp_adev);
5621
5622		/*
5623		 * Try to put the audio codec into suspend state
5624		 * before gpu reset started.
5625		 *
5626		 * Due to the power domain of the graphics device
5627		 * is shared with AZ power domain. Without this,
5628		 * we may change the audio hardware from behind
5629		 * the audio driver's back. That will trigger
5630		 * some audio codec errors.
5631		 */
5632		if (!amdgpu_device_suspend_display_audio(tmp_adev))
5633			audio_suspended = true;
5634
5635		amdgpu_ras_set_error_query_ready(tmp_adev, false);
5636
5637		cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
5638
5639		if (!amdgpu_sriov_vf(tmp_adev))
5640			amdgpu_amdkfd_pre_reset(tmp_adev);
5641
5642		/*
5643		 * Mark these ASICs to be reseted as untracked first
5644		 * And add them back after reset completed
5645		 */
5646		amdgpu_unregister_gpu_instance(tmp_adev);
5647
5648		drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, true);
5649
5650		/* disable ras on ALL IPs */
5651		if (!need_emergency_restart &&
5652		      amdgpu_device_ip_need_full_reset(tmp_adev))
5653			amdgpu_ras_suspend(tmp_adev);
5654
5655		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5656			struct amdgpu_ring *ring = tmp_adev->rings[i];
5657
5658			if (!amdgpu_ring_sched_ready(ring))
5659				continue;
5660
5661			drm_sched_stop(&ring->sched, job ? &job->base : NULL);
5662
5663			if (need_emergency_restart)
5664				amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
5665		}
5666		atomic_inc(&tmp_adev->gpu_reset_counter);
5667	}
5668
5669	if (need_emergency_restart)
5670		goto skip_sched_resume;
5671
5672	/*
5673	 * Must check guilty signal here since after this point all old
5674	 * HW fences are force signaled.
5675	 *
5676	 * job->base holds a reference to parent fence
5677	 */
5678	if (job && dma_fence_is_signaled(&job->hw_fence)) {
5679		job_signaled = true;
5680		dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5681		goto skip_hw_reset;
5682	}
5683
5684retry:	/* Rest of adevs pre asic reset from XGMI hive. */
5685	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5686		r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
5687		/*TODO Should we stop ?*/
5688		if (r) {
5689			dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
5690				  r, adev_to_drm(tmp_adev)->unique);
5691			tmp_adev->asic_reset_res = r;
5692		}
5693
5694		/*
5695		 * Drop all pending non scheduler resets. Scheduler resets
5696		 * were already dropped during drm_sched_stop
5697		 */
5698		amdgpu_device_stop_pending_resets(tmp_adev);
5699	}
5700
5701	/* Actual ASIC resets if needed.*/
5702	/* Host driver will handle XGMI hive reset for SRIOV */
5703	if (amdgpu_sriov_vf(adev)) {
5704		r = amdgpu_device_reset_sriov(adev, job ? false : true);
5705		if (r)
5706			adev->asic_reset_res = r;
5707
5708		/* Aldebaran and gfx_11_0_3 support ras in SRIOV, so need resume ras during reset */
5709		if (amdgpu_ip_version(adev, GC_HWIP, 0) ==
5710			    IP_VERSION(9, 4, 2) ||
5711		    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
5712		    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3))
5713			amdgpu_ras_resume(adev);
5714	} else {
5715		r = amdgpu_do_asic_reset(device_list_handle, reset_context);
5716		if (r && r == -EAGAIN)
5717			goto retry;
5718	}
5719
5720skip_hw_reset:
5721
5722	/* Post ASIC reset for all devs .*/
5723	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5724
5725		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5726			struct amdgpu_ring *ring = tmp_adev->rings[i];
5727
5728			if (!amdgpu_ring_sched_ready(ring))
5729				continue;
5730
5731			drm_sched_start(&ring->sched, true);
5732		}
5733
5734		if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled)
5735			drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
5736
5737		if (tmp_adev->asic_reset_res)
5738			r = tmp_adev->asic_reset_res;
5739
5740		tmp_adev->asic_reset_res = 0;
5741
5742		if (r) {
5743			/* bad news, how to tell it to userspace ? */
5744			dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
5745			amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
5746		} else {
5747			dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
5748			if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
5749				DRM_WARN("smart shift update failed\n");
5750		}
5751	}
5752
5753skip_sched_resume:
5754	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5755		/* unlock kfd: SRIOV would do it separately */
5756		if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
5757			amdgpu_amdkfd_post_reset(tmp_adev);
5758
5759		/* kfd_post_reset will do nothing if kfd device is not initialized,
5760		 * need to bring up kfd here if it's not be initialized before
5761		 */
5762		if (!adev->kfd.init_complete)
5763			amdgpu_amdkfd_device_init(adev);
5764
5765		if (audio_suspended)
5766			amdgpu_device_resume_display_audio(tmp_adev);
5767
5768		amdgpu_device_unset_mp1_state(tmp_adev);
5769
5770		amdgpu_ras_set_error_query_ready(tmp_adev, true);
5771	}
5772
5773	tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5774					    reset_list);
5775	amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
5776
5777	if (hive) {
5778		mutex_unlock(&hive->hive_lock);
5779		amdgpu_put_xgmi_hive(hive);
5780	}
5781
5782	if (r)
5783		dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
5784
5785	atomic_set(&adev->reset_domain->reset_res, r);
5786	return r;
5787}
5788
5789/**
5790 * amdgpu_device_partner_bandwidth - find the bandwidth of appropriate partner
5791 *
5792 * @adev: amdgpu_device pointer
5793 * @speed: pointer to the speed of the link
5794 * @width: pointer to the width of the link
5795 *
5796 * Evaluate the hierarchy to find the speed and bandwidth capabilities of the
5797 * first physical partner to an AMD dGPU.
5798 * This will exclude any virtual switches and links.
5799 */
5800static void amdgpu_device_partner_bandwidth(struct amdgpu_device *adev,
5801					    enum pci_bus_speed *speed,
5802					    enum pcie_link_width *width)
5803{
5804	struct pci_dev *parent = adev->pdev;
5805
5806	if (!speed || !width)
5807		return;
5808
5809	*speed = PCI_SPEED_UNKNOWN;
5810	*width = PCIE_LNK_WIDTH_UNKNOWN;
5811
5812	if (amdgpu_device_pcie_dynamic_switching_supported(adev)) {
5813		while ((parent = pci_upstream_bridge(parent))) {
5814			/* skip upstream/downstream switches internal to dGPU*/
5815			if (parent->vendor == PCI_VENDOR_ID_ATI)
5816				continue;
5817			*speed = pcie_get_speed_cap(parent);
5818			*width = pcie_get_width_cap(parent);
5819			break;
5820		}
5821	} else {
5822		/* use the current speeds rather than max if switching is not supported */
5823		pcie_bandwidth_available(adev->pdev, NULL, speed, width);
5824	}
5825}
5826
5827/**
5828 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
5829 *
5830 * @adev: amdgpu_device pointer
5831 *
5832 * Fetchs and stores in the driver the PCIE capabilities (gen speed
5833 * and lanes) of the slot the device is in. Handles APUs and
5834 * virtualized environments where PCIE config space may not be available.
5835 */
5836static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
5837{
5838	struct pci_dev *pdev;
5839	enum pci_bus_speed speed_cap, platform_speed_cap;
5840	enum pcie_link_width platform_link_width;
5841
5842	if (amdgpu_pcie_gen_cap)
5843		adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
5844
5845	if (amdgpu_pcie_lane_cap)
5846		adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
5847
5848	/* covers APUs as well */
5849	if (pci_is_root_bus(adev->pdev->bus) && !amdgpu_passthrough(adev)) {
5850		if (adev->pm.pcie_gen_mask == 0)
5851			adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
5852		if (adev->pm.pcie_mlw_mask == 0)
5853			adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
5854		return;
5855	}
5856
5857	if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
5858		return;
5859
5860	amdgpu_device_partner_bandwidth(adev, &platform_speed_cap,
5861					&platform_link_width);
5862
5863	if (adev->pm.pcie_gen_mask == 0) {
5864		/* asic caps */
5865		pdev = adev->pdev;
5866		speed_cap = pcie_get_speed_cap(pdev);
5867		if (speed_cap == PCI_SPEED_UNKNOWN) {
5868			adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5869						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5870						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5871		} else {
5872			if (speed_cap == PCIE_SPEED_32_0GT)
5873				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5874							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5875							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5876							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5877							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
5878			else if (speed_cap == PCIE_SPEED_16_0GT)
5879				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5880							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5881							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5882							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
5883			else if (speed_cap == PCIE_SPEED_8_0GT)
5884				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5885							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5886							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5887			else if (speed_cap == PCIE_SPEED_5_0GT)
5888				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5889							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
5890			else
5891				adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
5892		}
5893		/* platform caps */
5894		if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5895			adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5896						   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5897		} else {
5898			if (platform_speed_cap == PCIE_SPEED_32_0GT)
5899				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5900							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5901							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5902							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5903							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
5904			else if (platform_speed_cap == PCIE_SPEED_16_0GT)
5905				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5906							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5907							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5908							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
5909			else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5910				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5911							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5912							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
5913			else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5914				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5915							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5916			else
5917				adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
5918
5919		}
5920	}
5921	if (adev->pm.pcie_mlw_mask == 0) {
5922		if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5923			adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
5924		} else {
5925			switch (platform_link_width) {
5926			case PCIE_LNK_X32:
5927				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
5928							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5929							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5930							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5931							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5932							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5933							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5934				break;
5935			case PCIE_LNK_X16:
5936				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5937							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5938							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5939							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5940							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5941							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5942				break;
5943			case PCIE_LNK_X12:
5944				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5945							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5946							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5947							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5948							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5949				break;
5950			case PCIE_LNK_X8:
5951				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5952							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5953							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5954							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5955				break;
5956			case PCIE_LNK_X4:
5957				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5958							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5959							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5960				break;
5961			case PCIE_LNK_X2:
5962				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5963							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5964				break;
5965			case PCIE_LNK_X1:
5966				adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5967				break;
5968			default:
5969				break;
5970			}
5971		}
5972	}
5973}
5974
5975/**
5976 * amdgpu_device_is_peer_accessible - Check peer access through PCIe BAR
5977 *
5978 * @adev: amdgpu_device pointer
5979 * @peer_adev: amdgpu_device pointer for peer device trying to access @adev
5980 *
5981 * Return true if @peer_adev can access (DMA) @adev through the PCIe
5982 * BAR, i.e. @adev is "large BAR" and the BAR matches the DMA mask of
5983 * @peer_adev.
5984 */
5985bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
5986				      struct amdgpu_device *peer_adev)
5987{
5988#ifdef CONFIG_HSA_AMD_P2P
5989	uint64_t address_mask = peer_adev->dev->dma_mask ?
5990		~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
5991	resource_size_t aper_limit =
5992		adev->gmc.aper_base + adev->gmc.aper_size - 1;
5993	bool p2p_access =
5994		!adev->gmc.xgmi.connected_to_cpu &&
5995		!(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0);
5996
5997	return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size &&
5998		adev->gmc.real_vram_size == adev->gmc.visible_vram_size &&
5999		!(adev->gmc.aper_base & address_mask ||
6000		  aper_limit & address_mask));
6001#else
6002	return false;
6003#endif
6004}
6005
6006int amdgpu_device_baco_enter(struct drm_device *dev)
6007{
6008	struct amdgpu_device *adev = drm_to_adev(dev);
6009	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
6010
6011	if (!amdgpu_device_supports_baco(dev))
6012		return -ENOTSUPP;
6013
6014	if (ras && adev->ras_enabled &&
6015	    adev->nbio.funcs->enable_doorbell_interrupt)
6016		adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
6017
6018	return amdgpu_dpm_baco_enter(adev);
6019}
6020
6021int amdgpu_device_baco_exit(struct drm_device *dev)
6022{
6023	struct amdgpu_device *adev = drm_to_adev(dev);
6024	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
6025	int ret = 0;
6026
6027	if (!amdgpu_device_supports_baco(dev))
6028		return -ENOTSUPP;
6029
6030	ret = amdgpu_dpm_baco_exit(adev);
6031	if (ret)
6032		return ret;
6033
6034	if (ras && adev->ras_enabled &&
6035	    adev->nbio.funcs->enable_doorbell_interrupt)
6036		adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
6037
6038	if (amdgpu_passthrough(adev) &&
6039	    adev->nbio.funcs->clear_doorbell_interrupt)
6040		adev->nbio.funcs->clear_doorbell_interrupt(adev);
6041
6042	return 0;
6043}
6044
6045/**
6046 * amdgpu_pci_error_detected - Called when a PCI error is detected.
6047 * @pdev: PCI device struct
6048 * @state: PCI channel state
6049 *
6050 * Description: Called when a PCI error is detected.
6051 *
6052 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
6053 */
6054pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
6055{
6056	struct drm_device *dev = pci_get_drvdata(pdev);
6057	struct amdgpu_device *adev = drm_to_adev(dev);
6058	int i;
6059
6060	DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
6061
6062	if (adev->gmc.xgmi.num_physical_nodes > 1) {
6063		DRM_WARN("No support for XGMI hive yet...");
6064		return PCI_ERS_RESULT_DISCONNECT;
6065	}
6066
6067	adev->pci_channel_state = state;
6068
6069	switch (state) {
6070	case pci_channel_io_normal:
6071		return PCI_ERS_RESULT_CAN_RECOVER;
6072	/* Fatal error, prepare for slot reset */
6073	case pci_channel_io_frozen:
6074		/*
6075		 * Locking adev->reset_domain->sem will prevent any external access
6076		 * to GPU during PCI error recovery
6077		 */
6078		amdgpu_device_lock_reset_domain(adev->reset_domain);
6079		amdgpu_device_set_mp1_state(adev);
6080
6081		/*
6082		 * Block any work scheduling as we do for regular GPU reset
6083		 * for the duration of the recovery
6084		 */
6085		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
6086			struct amdgpu_ring *ring = adev->rings[i];
6087
6088			if (!amdgpu_ring_sched_ready(ring))
6089				continue;
6090
6091			drm_sched_stop(&ring->sched, NULL);
6092		}
6093		atomic_inc(&adev->gpu_reset_counter);
6094		return PCI_ERS_RESULT_NEED_RESET;
6095	case pci_channel_io_perm_failure:
6096		/* Permanent error, prepare for device removal */
6097		return PCI_ERS_RESULT_DISCONNECT;
6098	}
6099
6100	return PCI_ERS_RESULT_NEED_RESET;
6101}
6102
6103/**
6104 * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
6105 * @pdev: pointer to PCI device
6106 */
6107pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
6108{
6109
6110	DRM_INFO("PCI error: mmio enabled callback!!\n");
6111
6112	/* TODO - dump whatever for debugging purposes */
6113
6114	/* This called only if amdgpu_pci_error_detected returns
6115	 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
6116	 * works, no need to reset slot.
6117	 */
6118
6119	return PCI_ERS_RESULT_RECOVERED;
6120}
6121
6122/**
6123 * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
6124 * @pdev: PCI device struct
6125 *
6126 * Description: This routine is called by the pci error recovery
6127 * code after the PCI slot has been reset, just before we
6128 * should resume normal operations.
6129 */
6130pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
6131{
6132	struct drm_device *dev = pci_get_drvdata(pdev);
6133	struct amdgpu_device *adev = drm_to_adev(dev);
6134	int r, i;
6135	struct amdgpu_reset_context reset_context;
6136	u32 memsize;
6137	struct list_head device_list;
6138	struct amdgpu_hive_info *hive;
6139	int hive_ras_recovery = 0;
6140	struct amdgpu_ras *ras;
6141
6142	/* PCI error slot reset should be skipped During RAS recovery */
6143	hive = amdgpu_get_xgmi_hive(adev);
6144	if (hive) {
6145		hive_ras_recovery = atomic_read(&hive->ras_recovery);
6146		amdgpu_put_xgmi_hive(hive);
6147	}
6148	ras = amdgpu_ras_get_context(adev);
6149	if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) &&
6150		 ras && (atomic_read(&ras->in_recovery) || hive_ras_recovery))
6151		return PCI_ERS_RESULT_RECOVERED;
6152
6153	DRM_INFO("PCI error: slot reset callback!!\n");
6154
6155	memset(&reset_context, 0, sizeof(reset_context));
6156
6157	INIT_LIST_HEAD(&device_list);
6158	list_add_tail(&adev->reset_list, &device_list);
6159
6160	/* wait for asic to come out of reset */
6161	msleep(500);
6162
6163	/* Restore PCI confspace */
6164	amdgpu_device_load_pci_state(pdev);
6165
6166	/* confirm  ASIC came out of reset */
6167	for (i = 0; i < adev->usec_timeout; i++) {
6168		memsize = amdgpu_asic_get_config_memsize(adev);
6169
6170		if (memsize != 0xffffffff)
6171			break;
6172		udelay(1);
6173	}
6174	if (memsize == 0xffffffff) {
6175		r = -ETIME;
6176		goto out;
6177	}
6178
6179	reset_context.method = AMD_RESET_METHOD_NONE;
6180	reset_context.reset_req_dev = adev;
6181	set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
6182	set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
6183
6184	adev->no_hw_access = true;
6185	r = amdgpu_device_pre_asic_reset(adev, &reset_context);
6186	adev->no_hw_access = false;
6187	if (r)
6188		goto out;
6189
6190	r = amdgpu_do_asic_reset(&device_list, &reset_context);
6191
6192out:
6193	if (!r) {
6194		if (amdgpu_device_cache_pci_state(adev->pdev))
6195			pci_restore_state(adev->pdev);
6196
6197		DRM_INFO("PCIe error recovery succeeded\n");
6198	} else {
6199		DRM_ERROR("PCIe error recovery failed, err:%d", r);
6200		amdgpu_device_unset_mp1_state(adev);
6201		amdgpu_device_unlock_reset_domain(adev->reset_domain);
6202	}
6203
6204	return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
6205}
6206
6207/**
6208 * amdgpu_pci_resume() - resume normal ops after PCI reset
6209 * @pdev: pointer to PCI device
6210 *
6211 * Called when the error recovery driver tells us that its
6212 * OK to resume normal operation.
6213 */
6214void amdgpu_pci_resume(struct pci_dev *pdev)
6215{
6216	struct drm_device *dev = pci_get_drvdata(pdev);
6217	struct amdgpu_device *adev = drm_to_adev(dev);
6218	int i;
6219
6220
6221	DRM_INFO("PCI error: resume callback!!\n");
6222
6223	/* Only continue execution for the case of pci_channel_io_frozen */
6224	if (adev->pci_channel_state != pci_channel_io_frozen)
6225		return;
6226
6227	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
6228		struct amdgpu_ring *ring = adev->rings[i];
6229
6230		if (!amdgpu_ring_sched_ready(ring))
6231			continue;
6232
6233		drm_sched_start(&ring->sched, true);
6234	}
6235
6236	amdgpu_device_unset_mp1_state(adev);
6237	amdgpu_device_unlock_reset_domain(adev->reset_domain);
6238}
6239
6240bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
6241{
6242	struct drm_device *dev = pci_get_drvdata(pdev);
6243	struct amdgpu_device *adev = drm_to_adev(dev);
6244	int r;
6245
6246	r = pci_save_state(pdev);
6247	if (!r) {
6248		kfree(adev->pci_state);
6249
6250		adev->pci_state = pci_store_saved_state(pdev);
6251
6252		if (!adev->pci_state) {
6253			DRM_ERROR("Failed to store PCI saved state");
6254			return false;
6255		}
6256	} else {
6257		DRM_WARN("Failed to save PCI state, err:%d\n", r);
6258		return false;
6259	}
6260
6261	return true;
6262}
6263
6264bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
6265{
6266	struct drm_device *dev = pci_get_drvdata(pdev);
6267	struct amdgpu_device *adev = drm_to_adev(dev);
6268	int r;
6269
6270	if (!adev->pci_state)
6271		return false;
6272
6273	r = pci_load_saved_state(pdev, adev->pci_state);
6274
6275	if (!r) {
6276		pci_restore_state(pdev);
6277	} else {
6278		DRM_WARN("Failed to load PCI state, err:%d\n", r);
6279		return false;
6280	}
6281
6282	return true;
6283}
6284
6285void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
6286		struct amdgpu_ring *ring)
6287{
6288#ifdef CONFIG_X86_64
6289	if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
6290		return;
6291#endif
6292	if (adev->gmc.xgmi.connected_to_cpu)
6293		return;
6294
6295	if (ring && ring->funcs->emit_hdp_flush)
6296		amdgpu_ring_emit_hdp_flush(ring);
6297	else
6298		amdgpu_asic_flush_hdp(adev, ring);
6299}
6300
6301void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
6302		struct amdgpu_ring *ring)
6303{
6304#ifdef CONFIG_X86_64
6305	if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
6306		return;
6307#endif
6308	if (adev->gmc.xgmi.connected_to_cpu)
6309		return;
6310
6311	amdgpu_asic_invalidate_hdp(adev, ring);
6312}
6313
6314int amdgpu_in_reset(struct amdgpu_device *adev)
6315{
6316	return atomic_read(&adev->reset_domain->in_gpu_reset);
6317}
6318
6319/**
6320 * amdgpu_device_halt() - bring hardware to some kind of halt state
6321 *
6322 * @adev: amdgpu_device pointer
6323 *
6324 * Bring hardware to some kind of halt state so that no one can touch it
6325 * any more. It will help to maintain error context when error occurred.
6326 * Compare to a simple hang, the system will keep stable at least for SSH
6327 * access. Then it should be trivial to inspect the hardware state and
6328 * see what's going on. Implemented as following:
6329 *
6330 * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc),
6331 *    clears all CPU mappings to device, disallows remappings through page faults
6332 * 2. amdgpu_irq_disable_all() disables all interrupts
6333 * 3. amdgpu_fence_driver_hw_fini() signals all HW fences
6334 * 4. set adev->no_hw_access to avoid potential crashes after setp 5
6335 * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings
6336 * 6. pci_disable_device() and pci_wait_for_pending_transaction()
6337 *    flush any in flight DMA operations
6338 */
6339void amdgpu_device_halt(struct amdgpu_device *adev)
6340{
6341	struct pci_dev *pdev = adev->pdev;
6342	struct drm_device *ddev = adev_to_drm(adev);
6343
6344	amdgpu_xcp_dev_unplug(adev);
6345	drm_dev_unplug(ddev);
6346
6347	amdgpu_irq_disable_all(adev);
6348
6349	amdgpu_fence_driver_hw_fini(adev);
6350
6351	adev->no_hw_access = true;
6352
6353	amdgpu_device_unmap_mmio(adev);
6354
6355	pci_disable_device(pdev);
6356	pci_wait_for_pending_transaction(pdev);
6357}
6358
6359u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
6360				u32 reg)
6361{
6362	unsigned long flags, address, data;
6363	u32 r;
6364
6365	address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
6366	data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
6367
6368	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
6369	WREG32(address, reg * 4);
6370	(void)RREG32(address);
6371	r = RREG32(data);
6372	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
6373	return r;
6374}
6375
6376void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
6377				u32 reg, u32 v)
6378{
6379	unsigned long flags, address, data;
6380
6381	address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
6382	data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
6383
6384	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
6385	WREG32(address, reg * 4);
6386	(void)RREG32(address);
6387	WREG32(data, v);
6388	(void)RREG32(data);
6389	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
6390}
6391
6392/**
6393 * amdgpu_device_switch_gang - switch to a new gang
6394 * @adev: amdgpu_device pointer
6395 * @gang: the gang to switch to
6396 *
6397 * Try to switch to a new gang.
6398 * Returns: NULL if we switched to the new gang or a reference to the current
6399 * gang leader.
6400 */
6401struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
6402					    struct dma_fence *gang)
6403{
6404	struct dma_fence *old = NULL;
6405
6406	do {
6407		dma_fence_put(old);
6408		rcu_read_lock();
6409		old = dma_fence_get_rcu_safe(&adev->gang_submit);
6410		rcu_read_unlock();
6411
6412		if (old == gang)
6413			break;
6414
6415		if (!dma_fence_is_signaled(old))
6416			return old;
6417
6418	} while (cmpxchg((struct dma_fence __force **)&adev->gang_submit,
6419			 old, gang) != old);
6420
6421	dma_fence_put(old);
6422	return NULL;
6423}
6424
6425bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev)
6426{
6427	switch (adev->asic_type) {
6428#ifdef CONFIG_DRM_AMDGPU_SI
6429	case CHIP_HAINAN:
6430#endif
6431	case CHIP_TOPAZ:
6432		/* chips with no display hardware */
6433		return false;
6434#ifdef CONFIG_DRM_AMDGPU_SI
6435	case CHIP_TAHITI:
6436	case CHIP_PITCAIRN:
6437	case CHIP_VERDE:
6438	case CHIP_OLAND:
6439#endif
6440#ifdef CONFIG_DRM_AMDGPU_CIK
6441	case CHIP_BONAIRE:
6442	case CHIP_HAWAII:
6443	case CHIP_KAVERI:
6444	case CHIP_KABINI:
6445	case CHIP_MULLINS:
6446#endif
6447	case CHIP_TONGA:
6448	case CHIP_FIJI:
6449	case CHIP_POLARIS10:
6450	case CHIP_POLARIS11:
6451	case CHIP_POLARIS12:
6452	case CHIP_VEGAM:
6453	case CHIP_CARRIZO:
6454	case CHIP_STONEY:
6455		/* chips with display hardware */
6456		return true;
6457	default:
6458		/* IP discovery */
6459		if (!amdgpu_ip_version(adev, DCE_HWIP, 0) ||
6460		    (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
6461			return false;
6462		return true;
6463	}
6464}
6465
6466uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev,
6467		uint32_t inst, uint32_t reg_addr, char reg_name[],
6468		uint32_t expected_value, uint32_t mask)
6469{
6470	uint32_t ret = 0;
6471	uint32_t old_ = 0;
6472	uint32_t tmp_ = RREG32(reg_addr);
6473	uint32_t loop = adev->usec_timeout;
6474
6475	while ((tmp_ & (mask)) != (expected_value)) {
6476		if (old_ != tmp_) {
6477			loop = adev->usec_timeout;
6478			old_ = tmp_;
6479		} else
6480			udelay(1);
6481		tmp_ = RREG32(reg_addr);
6482		loop--;
6483		if (!loop) {
6484			DRM_WARN("Register(%d) [%s] failed to reach value 0x%08x != 0x%08xn",
6485				  inst, reg_name, (uint32_t)expected_value,
6486				  (uint32_t)(tmp_ & (mask)));
6487			ret = -ETIMEDOUT;
6488			break;
6489		}
6490	}
6491	return ret;
6492}
v6.8
   1/*
   2 * Copyright 2008 Advanced Micro Devices, Inc.
   3 * Copyright 2008 Red Hat Inc.
   4 * Copyright 2009 Jerome Glisse.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the "Software"),
   8 * to deal in the Software without restriction, including without limitation
   9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10 * and/or sell copies of the Software, and to permit persons to whom the
  11 * Software is furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22 * OTHER DEALINGS IN THE SOFTWARE.
  23 *
  24 * Authors: Dave Airlie
  25 *          Alex Deucher
  26 *          Jerome Glisse
  27 */
  28#include <linux/power_supply.h>
  29#include <linux/kthread.h>
  30#include <linux/module.h>
  31#include <linux/console.h>
  32#include <linux/slab.h>
  33#include <linux/iommu.h>
  34#include <linux/pci.h>
  35#include <linux/pci-p2pdma.h>
  36#include <linux/apple-gmux.h>
  37
  38#include <drm/drm_aperture.h>
  39#include <drm/drm_atomic_helper.h>
  40#include <drm/drm_crtc_helper.h>
  41#include <drm/drm_fb_helper.h>
  42#include <drm/drm_probe_helper.h>
  43#include <drm/amdgpu_drm.h>
  44#include <linux/device.h>
  45#include <linux/vgaarb.h>
  46#include <linux/vga_switcheroo.h>
  47#include <linux/efi.h>
  48#include "amdgpu.h"
  49#include "amdgpu_trace.h"
  50#include "amdgpu_i2c.h"
  51#include "atom.h"
  52#include "amdgpu_atombios.h"
  53#include "amdgpu_atomfirmware.h"
  54#include "amd_pcie.h"
  55#ifdef CONFIG_DRM_AMDGPU_SI
  56#include "si.h"
  57#endif
  58#ifdef CONFIG_DRM_AMDGPU_CIK
  59#include "cik.h"
  60#endif
  61#include "vi.h"
  62#include "soc15.h"
  63#include "nv.h"
  64#include "bif/bif_4_1_d.h"
  65#include <linux/firmware.h>
  66#include "amdgpu_vf_error.h"
  67
  68#include "amdgpu_amdkfd.h"
  69#include "amdgpu_pm.h"
  70
  71#include "amdgpu_xgmi.h"
  72#include "amdgpu_ras.h"
  73#include "amdgpu_pmu.h"
  74#include "amdgpu_fru_eeprom.h"
  75#include "amdgpu_reset.h"
  76#include "amdgpu_virt.h"
  77
  78#include <linux/suspend.h>
  79#include <drm/task_barrier.h>
  80#include <linux/pm_runtime.h>
  81
  82#include <drm/drm_drv.h>
  83
  84#if IS_ENABLED(CONFIG_X86)
  85#include <asm/intel-family.h>
  86#endif
  87
  88MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
  89MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
  90MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
  91MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
  92MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
  93MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
  94MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
  95
  96#define AMDGPU_RESUME_MS		2000
  97#define AMDGPU_MAX_RETRY_LIMIT		2
  98#define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
 
 
 
  99
 100static const struct drm_driver amdgpu_kms_driver;
 101
 102const char *amdgpu_asic_name[] = {
 103	"TAHITI",
 104	"PITCAIRN",
 105	"VERDE",
 106	"OLAND",
 107	"HAINAN",
 108	"BONAIRE",
 109	"KAVERI",
 110	"KABINI",
 111	"HAWAII",
 112	"MULLINS",
 113	"TOPAZ",
 114	"TONGA",
 115	"FIJI",
 116	"CARRIZO",
 117	"STONEY",
 118	"POLARIS10",
 119	"POLARIS11",
 120	"POLARIS12",
 121	"VEGAM",
 122	"VEGA10",
 123	"VEGA12",
 124	"VEGA20",
 125	"RAVEN",
 126	"ARCTURUS",
 127	"RENOIR",
 128	"ALDEBARAN",
 129	"NAVI10",
 130	"CYAN_SKILLFISH",
 131	"NAVI14",
 132	"NAVI12",
 133	"SIENNA_CICHLID",
 134	"NAVY_FLOUNDER",
 135	"VANGOGH",
 136	"DIMGREY_CAVEFISH",
 137	"BEIGE_GOBY",
 138	"YELLOW_CARP",
 139	"IP DISCOVERY",
 140	"LAST",
 141};
 142
 143/**
 144 * DOC: pcie_replay_count
 145 *
 146 * The amdgpu driver provides a sysfs API for reporting the total number
 147 * of PCIe replays (NAKs)
 148 * The file pcie_replay_count is used for this and returns the total
 149 * number of replays as a sum of the NAKs generated and NAKs received
 150 */
 151
 152static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
 153		struct device_attribute *attr, char *buf)
 154{
 155	struct drm_device *ddev = dev_get_drvdata(dev);
 156	struct amdgpu_device *adev = drm_to_adev(ddev);
 157	uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
 158
 159	return sysfs_emit(buf, "%llu\n", cnt);
 160}
 161
 162static DEVICE_ATTR(pcie_replay_count, 0444,
 163		amdgpu_device_get_pcie_replay_count, NULL);
 164
 165static ssize_t amdgpu_sysfs_reg_state_get(struct file *f, struct kobject *kobj,
 166					  struct bin_attribute *attr, char *buf,
 167					  loff_t ppos, size_t count)
 168{
 169	struct device *dev = kobj_to_dev(kobj);
 170	struct drm_device *ddev = dev_get_drvdata(dev);
 171	struct amdgpu_device *adev = drm_to_adev(ddev);
 172	ssize_t bytes_read;
 173
 174	switch (ppos) {
 175	case AMDGPU_SYS_REG_STATE_XGMI:
 176		bytes_read = amdgpu_asic_get_reg_state(
 177			adev, AMDGPU_REG_STATE_TYPE_XGMI, buf, count);
 178		break;
 179	case AMDGPU_SYS_REG_STATE_WAFL:
 180		bytes_read = amdgpu_asic_get_reg_state(
 181			adev, AMDGPU_REG_STATE_TYPE_WAFL, buf, count);
 182		break;
 183	case AMDGPU_SYS_REG_STATE_PCIE:
 184		bytes_read = amdgpu_asic_get_reg_state(
 185			adev, AMDGPU_REG_STATE_TYPE_PCIE, buf, count);
 186		break;
 187	case AMDGPU_SYS_REG_STATE_USR:
 188		bytes_read = amdgpu_asic_get_reg_state(
 189			adev, AMDGPU_REG_STATE_TYPE_USR, buf, count);
 190		break;
 191	case AMDGPU_SYS_REG_STATE_USR_1:
 192		bytes_read = amdgpu_asic_get_reg_state(
 193			adev, AMDGPU_REG_STATE_TYPE_USR_1, buf, count);
 194		break;
 195	default:
 196		return -EINVAL;
 197	}
 198
 199	return bytes_read;
 200}
 201
 202BIN_ATTR(reg_state, 0444, amdgpu_sysfs_reg_state_get, NULL,
 203	 AMDGPU_SYS_REG_STATE_END);
 204
 205int amdgpu_reg_state_sysfs_init(struct amdgpu_device *adev)
 206{
 207	int ret;
 208
 209	if (!amdgpu_asic_get_reg_state_supported(adev))
 210		return 0;
 211
 212	ret = sysfs_create_bin_file(&adev->dev->kobj, &bin_attr_reg_state);
 213
 214	return ret;
 215}
 216
 217void amdgpu_reg_state_sysfs_fini(struct amdgpu_device *adev)
 218{
 219	if (!amdgpu_asic_get_reg_state_supported(adev))
 220		return;
 221	sysfs_remove_bin_file(&adev->dev->kobj, &bin_attr_reg_state);
 222}
 223
 224/**
 225 * DOC: board_info
 226 *
 227 * The amdgpu driver provides a sysfs API for giving board related information.
 228 * It provides the form factor information in the format
 229 *
 230 *   type : form factor
 231 *
 232 * Possible form factor values
 233 *
 234 * - "cem"		- PCIE CEM card
 235 * - "oam"		- Open Compute Accelerator Module
 236 * - "unknown"	- Not known
 237 *
 238 */
 239
 240static ssize_t amdgpu_device_get_board_info(struct device *dev,
 241					    struct device_attribute *attr,
 242					    char *buf)
 243{
 244	struct drm_device *ddev = dev_get_drvdata(dev);
 245	struct amdgpu_device *adev = drm_to_adev(ddev);
 246	enum amdgpu_pkg_type pkg_type = AMDGPU_PKG_TYPE_CEM;
 247	const char *pkg;
 248
 249	if (adev->smuio.funcs && adev->smuio.funcs->get_pkg_type)
 250		pkg_type = adev->smuio.funcs->get_pkg_type(adev);
 251
 252	switch (pkg_type) {
 253	case AMDGPU_PKG_TYPE_CEM:
 254		pkg = "cem";
 255		break;
 256	case AMDGPU_PKG_TYPE_OAM:
 257		pkg = "oam";
 258		break;
 259	default:
 260		pkg = "unknown";
 261		break;
 262	}
 263
 264	return sysfs_emit(buf, "%s : %s\n", "type", pkg);
 265}
 266
 267static DEVICE_ATTR(board_info, 0444, amdgpu_device_get_board_info, NULL);
 268
 269static struct attribute *amdgpu_board_attrs[] = {
 270	&dev_attr_board_info.attr,
 271	NULL,
 272};
 273
 274static umode_t amdgpu_board_attrs_is_visible(struct kobject *kobj,
 275					     struct attribute *attr, int n)
 276{
 277	struct device *dev = kobj_to_dev(kobj);
 278	struct drm_device *ddev = dev_get_drvdata(dev);
 279	struct amdgpu_device *adev = drm_to_adev(ddev);
 280
 281	if (adev->flags & AMD_IS_APU)
 282		return 0;
 283
 284	return attr->mode;
 285}
 286
 287static const struct attribute_group amdgpu_board_attrs_group = {
 288	.attrs = amdgpu_board_attrs,
 289	.is_visible = amdgpu_board_attrs_is_visible
 290};
 291
 292static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
 293
 294
 295/**
 296 * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
 297 *
 298 * @dev: drm_device pointer
 299 *
 300 * Returns true if the device is a dGPU with ATPX power control,
 301 * otherwise return false.
 302 */
 303bool amdgpu_device_supports_px(struct drm_device *dev)
 304{
 305	struct amdgpu_device *adev = drm_to_adev(dev);
 306
 307	if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
 308		return true;
 309	return false;
 310}
 311
 312/**
 313 * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
 314 *
 315 * @dev: drm_device pointer
 316 *
 317 * Returns true if the device is a dGPU with ACPI power control,
 318 * otherwise return false.
 319 */
 320bool amdgpu_device_supports_boco(struct drm_device *dev)
 321{
 322	struct amdgpu_device *adev = drm_to_adev(dev);
 323
 324	if (adev->has_pr3 ||
 325	    ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
 326		return true;
 327	return false;
 328}
 329
 330/**
 331 * amdgpu_device_supports_baco - Does the device support BACO
 332 *
 333 * @dev: drm_device pointer
 334 *
 335 * Returns true if the device supporte BACO,
 336 * otherwise return false.
 337 */
 338bool amdgpu_device_supports_baco(struct drm_device *dev)
 339{
 340	struct amdgpu_device *adev = drm_to_adev(dev);
 341
 342	return amdgpu_asic_supports_baco(adev);
 343}
 344
 345/**
 346 * amdgpu_device_supports_smart_shift - Is the device dGPU with
 347 * smart shift support
 348 *
 349 * @dev: drm_device pointer
 350 *
 351 * Returns true if the device is a dGPU with Smart Shift support,
 352 * otherwise returns false.
 353 */
 354bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
 355{
 356	return (amdgpu_device_supports_boco(dev) &&
 357		amdgpu_acpi_is_power_shift_control_supported());
 358}
 359
 360/*
 361 * VRAM access helper functions
 362 */
 363
 364/**
 365 * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
 366 *
 367 * @adev: amdgpu_device pointer
 368 * @pos: offset of the buffer in vram
 369 * @buf: virtual address of the buffer in system memory
 370 * @size: read/write size, sizeof(@buf) must > @size
 371 * @write: true - write to vram, otherwise - read from vram
 372 */
 373void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
 374			     void *buf, size_t size, bool write)
 375{
 376	unsigned long flags;
 377	uint32_t hi = ~0, tmp = 0;
 378	uint32_t *data = buf;
 379	uint64_t last;
 380	int idx;
 381
 382	if (!drm_dev_enter(adev_to_drm(adev), &idx))
 383		return;
 384
 385	BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
 386
 387	spin_lock_irqsave(&adev->mmio_idx_lock, flags);
 388	for (last = pos + size; pos < last; pos += 4) {
 389		tmp = pos >> 31;
 390
 391		WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
 392		if (tmp != hi) {
 393			WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
 394			hi = tmp;
 395		}
 396		if (write)
 397			WREG32_NO_KIQ(mmMM_DATA, *data++);
 398		else
 399			*data++ = RREG32_NO_KIQ(mmMM_DATA);
 400	}
 401
 402	spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
 403	drm_dev_exit(idx);
 404}
 405
 406/**
 407 * amdgpu_device_aper_access - access vram by vram aperature
 408 *
 409 * @adev: amdgpu_device pointer
 410 * @pos: offset of the buffer in vram
 411 * @buf: virtual address of the buffer in system memory
 412 * @size: read/write size, sizeof(@buf) must > @size
 413 * @write: true - write to vram, otherwise - read from vram
 414 *
 415 * The return value means how many bytes have been transferred.
 416 */
 417size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
 418				 void *buf, size_t size, bool write)
 419{
 420#ifdef CONFIG_64BIT
 421	void __iomem *addr;
 422	size_t count = 0;
 423	uint64_t last;
 424
 425	if (!adev->mman.aper_base_kaddr)
 426		return 0;
 427
 428	last = min(pos + size, adev->gmc.visible_vram_size);
 429	if (last > pos) {
 430		addr = adev->mman.aper_base_kaddr + pos;
 431		count = last - pos;
 432
 433		if (write) {
 434			memcpy_toio(addr, buf, count);
 435			/* Make sure HDP write cache flush happens without any reordering
 436			 * after the system memory contents are sent over PCIe device
 437			 */
 438			mb();
 439			amdgpu_device_flush_hdp(adev, NULL);
 440		} else {
 441			amdgpu_device_invalidate_hdp(adev, NULL);
 442			/* Make sure HDP read cache is invalidated before issuing a read
 443			 * to the PCIe device
 444			 */
 445			mb();
 446			memcpy_fromio(buf, addr, count);
 447		}
 448
 449	}
 450
 451	return count;
 452#else
 453	return 0;
 454#endif
 455}
 456
 457/**
 458 * amdgpu_device_vram_access - read/write a buffer in vram
 459 *
 460 * @adev: amdgpu_device pointer
 461 * @pos: offset of the buffer in vram
 462 * @buf: virtual address of the buffer in system memory
 463 * @size: read/write size, sizeof(@buf) must > @size
 464 * @write: true - write to vram, otherwise - read from vram
 465 */
 466void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
 467			       void *buf, size_t size, bool write)
 468{
 469	size_t count;
 470
 471	/* try to using vram apreature to access vram first */
 472	count = amdgpu_device_aper_access(adev, pos, buf, size, write);
 473	size -= count;
 474	if (size) {
 475		/* using MM to access rest vram */
 476		pos += count;
 477		buf += count;
 478		amdgpu_device_mm_access(adev, pos, buf, size, write);
 479	}
 480}
 481
 482/*
 483 * register access helper functions.
 484 */
 485
 486/* Check if hw access should be skipped because of hotplug or device error */
 487bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
 488{
 489	if (adev->no_hw_access)
 490		return true;
 491
 492#ifdef CONFIG_LOCKDEP
 493	/*
 494	 * This is a bit complicated to understand, so worth a comment. What we assert
 495	 * here is that the GPU reset is not running on another thread in parallel.
 496	 *
 497	 * For this we trylock the read side of the reset semaphore, if that succeeds
 498	 * we know that the reset is not running in paralell.
 499	 *
 500	 * If the trylock fails we assert that we are either already holding the read
 501	 * side of the lock or are the reset thread itself and hold the write side of
 502	 * the lock.
 503	 */
 504	if (in_task()) {
 505		if (down_read_trylock(&adev->reset_domain->sem))
 506			up_read(&adev->reset_domain->sem);
 507		else
 508			lockdep_assert_held(&adev->reset_domain->sem);
 509	}
 510#endif
 511	return false;
 512}
 513
 514/**
 515 * amdgpu_device_rreg - read a memory mapped IO or indirect register
 516 *
 517 * @adev: amdgpu_device pointer
 518 * @reg: dword aligned register offset
 519 * @acc_flags: access flags which require special behavior
 520 *
 521 * Returns the 32 bit value from the offset specified.
 522 */
 523uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
 524			    uint32_t reg, uint32_t acc_flags)
 525{
 526	uint32_t ret;
 527
 528	if (amdgpu_device_skip_hw_access(adev))
 529		return 0;
 530
 531	if ((reg * 4) < adev->rmmio_size) {
 532		if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
 533		    amdgpu_sriov_runtime(adev) &&
 534		    down_read_trylock(&adev->reset_domain->sem)) {
 535			ret = amdgpu_kiq_rreg(adev, reg, 0);
 536			up_read(&adev->reset_domain->sem);
 537		} else {
 538			ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
 539		}
 540	} else {
 541		ret = adev->pcie_rreg(adev, reg * 4);
 542	}
 543
 544	trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
 545
 546	return ret;
 547}
 548
 549/*
 550 * MMIO register read with bytes helper functions
 551 * @offset:bytes offset from MMIO start
 552 */
 553
 554/**
 555 * amdgpu_mm_rreg8 - read a memory mapped IO register
 556 *
 557 * @adev: amdgpu_device pointer
 558 * @offset: byte aligned register offset
 559 *
 560 * Returns the 8 bit value from the offset specified.
 561 */
 562uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
 563{
 564	if (amdgpu_device_skip_hw_access(adev))
 565		return 0;
 566
 567	if (offset < adev->rmmio_size)
 568		return (readb(adev->rmmio + offset));
 569	BUG();
 570}
 571
 572
 573/**
 574 * amdgpu_device_xcc_rreg - read a memory mapped IO or indirect register with specific XCC
 575 *
 576 * @adev: amdgpu_device pointer
 577 * @reg: dword aligned register offset
 578 * @acc_flags: access flags which require special behavior
 579 * @xcc_id: xcc accelerated compute core id
 580 *
 581 * Returns the 32 bit value from the offset specified.
 582 */
 583uint32_t amdgpu_device_xcc_rreg(struct amdgpu_device *adev,
 584				uint32_t reg, uint32_t acc_flags,
 585				uint32_t xcc_id)
 586{
 587	uint32_t ret, rlcg_flag;
 588
 589	if (amdgpu_device_skip_hw_access(adev))
 590		return 0;
 591
 592	if ((reg * 4) < adev->rmmio_size) {
 593		if (amdgpu_sriov_vf(adev) &&
 594		    !amdgpu_sriov_runtime(adev) &&
 595		    adev->gfx.rlc.rlcg_reg_access_supported &&
 596		    amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags,
 597							 GC_HWIP, false,
 598							 &rlcg_flag)) {
 599			ret = amdgpu_virt_rlcg_reg_rw(adev, reg, 0, rlcg_flag, xcc_id);
 600		} else if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
 601		    amdgpu_sriov_runtime(adev) &&
 602		    down_read_trylock(&adev->reset_domain->sem)) {
 603			ret = amdgpu_kiq_rreg(adev, reg, xcc_id);
 604			up_read(&adev->reset_domain->sem);
 605		} else {
 606			ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
 607		}
 608	} else {
 609		ret = adev->pcie_rreg(adev, reg * 4);
 610	}
 611
 612	return ret;
 613}
 614
 615/*
 616 * MMIO register write with bytes helper functions
 617 * @offset:bytes offset from MMIO start
 618 * @value: the value want to be written to the register
 619 */
 620
 621/**
 622 * amdgpu_mm_wreg8 - read a memory mapped IO register
 623 *
 624 * @adev: amdgpu_device pointer
 625 * @offset: byte aligned register offset
 626 * @value: 8 bit value to write
 627 *
 628 * Writes the value specified to the offset specified.
 629 */
 630void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
 631{
 632	if (amdgpu_device_skip_hw_access(adev))
 633		return;
 634
 635	if (offset < adev->rmmio_size)
 636		writeb(value, adev->rmmio + offset);
 637	else
 638		BUG();
 639}
 640
 641/**
 642 * amdgpu_device_wreg - write to a memory mapped IO or indirect register
 643 *
 644 * @adev: amdgpu_device pointer
 645 * @reg: dword aligned register offset
 646 * @v: 32 bit value to write to the register
 647 * @acc_flags: access flags which require special behavior
 648 *
 649 * Writes the value specified to the offset specified.
 650 */
 651void amdgpu_device_wreg(struct amdgpu_device *adev,
 652			uint32_t reg, uint32_t v,
 653			uint32_t acc_flags)
 654{
 655	if (amdgpu_device_skip_hw_access(adev))
 656		return;
 657
 658	if ((reg * 4) < adev->rmmio_size) {
 659		if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
 660		    amdgpu_sriov_runtime(adev) &&
 661		    down_read_trylock(&adev->reset_domain->sem)) {
 662			amdgpu_kiq_wreg(adev, reg, v, 0);
 663			up_read(&adev->reset_domain->sem);
 664		} else {
 665			writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
 666		}
 667	} else {
 668		adev->pcie_wreg(adev, reg * 4, v);
 669	}
 670
 671	trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
 672}
 673
 674/**
 675 * amdgpu_mm_wreg_mmio_rlc -  write register either with direct/indirect mmio or with RLC path if in range
 676 *
 677 * @adev: amdgpu_device pointer
 678 * @reg: mmio/rlc register
 679 * @v: value to write
 680 * @xcc_id: xcc accelerated compute core id
 681 *
 682 * this function is invoked only for the debugfs register access
 683 */
 684void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
 685			     uint32_t reg, uint32_t v,
 686			     uint32_t xcc_id)
 687{
 688	if (amdgpu_device_skip_hw_access(adev))
 689		return;
 690
 691	if (amdgpu_sriov_fullaccess(adev) &&
 692	    adev->gfx.rlc.funcs &&
 693	    adev->gfx.rlc.funcs->is_rlcg_access_range) {
 694		if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
 695			return amdgpu_sriov_wreg(adev, reg, v, 0, 0, xcc_id);
 696	} else if ((reg * 4) >= adev->rmmio_size) {
 697		adev->pcie_wreg(adev, reg * 4, v);
 698	} else {
 699		writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
 700	}
 701}
 702
 703/**
 704 * amdgpu_device_xcc_wreg - write to a memory mapped IO or indirect register with specific XCC
 705 *
 706 * @adev: amdgpu_device pointer
 707 * @reg: dword aligned register offset
 708 * @v: 32 bit value to write to the register
 709 * @acc_flags: access flags which require special behavior
 710 * @xcc_id: xcc accelerated compute core id
 711 *
 712 * Writes the value specified to the offset specified.
 713 */
 714void amdgpu_device_xcc_wreg(struct amdgpu_device *adev,
 715			uint32_t reg, uint32_t v,
 716			uint32_t acc_flags, uint32_t xcc_id)
 717{
 718	uint32_t rlcg_flag;
 719
 720	if (amdgpu_device_skip_hw_access(adev))
 721		return;
 722
 723	if ((reg * 4) < adev->rmmio_size) {
 724		if (amdgpu_sriov_vf(adev) &&
 725		    !amdgpu_sriov_runtime(adev) &&
 726		    adev->gfx.rlc.rlcg_reg_access_supported &&
 727		    amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags,
 728							 GC_HWIP, true,
 729							 &rlcg_flag)) {
 730			amdgpu_virt_rlcg_reg_rw(adev, reg, v, rlcg_flag, xcc_id);
 731		} else if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
 732		    amdgpu_sriov_runtime(adev) &&
 733		    down_read_trylock(&adev->reset_domain->sem)) {
 734			amdgpu_kiq_wreg(adev, reg, v, xcc_id);
 735			up_read(&adev->reset_domain->sem);
 736		} else {
 737			writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
 738		}
 739	} else {
 740		adev->pcie_wreg(adev, reg * 4, v);
 741	}
 742}
 743
 744/**
 745 * amdgpu_device_indirect_rreg - read an indirect register
 746 *
 747 * @adev: amdgpu_device pointer
 748 * @reg_addr: indirect register address to read from
 749 *
 750 * Returns the value of indirect register @reg_addr
 751 */
 752u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
 753				u32 reg_addr)
 754{
 755	unsigned long flags, pcie_index, pcie_data;
 756	void __iomem *pcie_index_offset;
 757	void __iomem *pcie_data_offset;
 758	u32 r;
 759
 760	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
 761	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
 762
 763	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 764	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
 765	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
 766
 767	writel(reg_addr, pcie_index_offset);
 768	readl(pcie_index_offset);
 769	r = readl(pcie_data_offset);
 770	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 771
 772	return r;
 773}
 774
 775u32 amdgpu_device_indirect_rreg_ext(struct amdgpu_device *adev,
 776				    u64 reg_addr)
 777{
 778	unsigned long flags, pcie_index, pcie_index_hi, pcie_data;
 779	u32 r;
 780	void __iomem *pcie_index_offset;
 781	void __iomem *pcie_index_hi_offset;
 782	void __iomem *pcie_data_offset;
 783
 784	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
 785	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
 786	if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
 787		pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
 788	else
 
 
 
 
 
 
 
 
 
 789		pcie_index_hi = 0;
 
 790
 791	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 792	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
 793	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
 794	if (pcie_index_hi != 0)
 795		pcie_index_hi_offset = (void __iomem *)adev->rmmio +
 796				pcie_index_hi * 4;
 797
 798	writel(reg_addr, pcie_index_offset);
 799	readl(pcie_index_offset);
 800	if (pcie_index_hi != 0) {
 801		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
 802		readl(pcie_index_hi_offset);
 803	}
 804	r = readl(pcie_data_offset);
 805
 806	/* clear the high bits */
 807	if (pcie_index_hi != 0) {
 808		writel(0, pcie_index_hi_offset);
 809		readl(pcie_index_hi_offset);
 810	}
 811
 812	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 813
 814	return r;
 815}
 816
 817/**
 818 * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
 819 *
 820 * @adev: amdgpu_device pointer
 821 * @reg_addr: indirect register address to read from
 822 *
 823 * Returns the value of indirect register @reg_addr
 824 */
 825u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
 826				  u32 reg_addr)
 827{
 828	unsigned long flags, pcie_index, pcie_data;
 829	void __iomem *pcie_index_offset;
 830	void __iomem *pcie_data_offset;
 831	u64 r;
 832
 833	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
 834	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
 835
 836	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 837	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
 838	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
 839
 840	/* read low 32 bits */
 841	writel(reg_addr, pcie_index_offset);
 842	readl(pcie_index_offset);
 843	r = readl(pcie_data_offset);
 844	/* read high 32 bits */
 845	writel(reg_addr + 4, pcie_index_offset);
 846	readl(pcie_index_offset);
 847	r |= ((u64)readl(pcie_data_offset) << 32);
 848	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 849
 850	return r;
 851}
 852
 853u64 amdgpu_device_indirect_rreg64_ext(struct amdgpu_device *adev,
 854				  u64 reg_addr)
 855{
 856	unsigned long flags, pcie_index, pcie_data;
 857	unsigned long pcie_index_hi = 0;
 858	void __iomem *pcie_index_offset;
 859	void __iomem *pcie_index_hi_offset;
 860	void __iomem *pcie_data_offset;
 861	u64 r;
 862
 863	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
 864	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
 865	if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
 866		pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
 867
 868	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 869	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
 870	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
 871	if (pcie_index_hi != 0)
 872		pcie_index_hi_offset = (void __iomem *)adev->rmmio +
 873			pcie_index_hi * 4;
 874
 875	/* read low 32 bits */
 876	writel(reg_addr, pcie_index_offset);
 877	readl(pcie_index_offset);
 878	if (pcie_index_hi != 0) {
 879		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
 880		readl(pcie_index_hi_offset);
 881	}
 882	r = readl(pcie_data_offset);
 883	/* read high 32 bits */
 884	writel(reg_addr + 4, pcie_index_offset);
 885	readl(pcie_index_offset);
 886	if (pcie_index_hi != 0) {
 887		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
 888		readl(pcie_index_hi_offset);
 889	}
 890	r |= ((u64)readl(pcie_data_offset) << 32);
 891
 892	/* clear the high bits */
 893	if (pcie_index_hi != 0) {
 894		writel(0, pcie_index_hi_offset);
 895		readl(pcie_index_hi_offset);
 896	}
 897
 898	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 899
 900	return r;
 901}
 902
 903/**
 904 * amdgpu_device_indirect_wreg - write an indirect register address
 905 *
 906 * @adev: amdgpu_device pointer
 907 * @reg_addr: indirect register offset
 908 * @reg_data: indirect register data
 909 *
 910 */
 911void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
 912				 u32 reg_addr, u32 reg_data)
 913{
 914	unsigned long flags, pcie_index, pcie_data;
 915	void __iomem *pcie_index_offset;
 916	void __iomem *pcie_data_offset;
 917
 918	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
 919	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
 920
 921	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 922	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
 923	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
 924
 925	writel(reg_addr, pcie_index_offset);
 926	readl(pcie_index_offset);
 927	writel(reg_data, pcie_data_offset);
 928	readl(pcie_data_offset);
 929	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 930}
 931
 932void amdgpu_device_indirect_wreg_ext(struct amdgpu_device *adev,
 933				     u64 reg_addr, u32 reg_data)
 934{
 935	unsigned long flags, pcie_index, pcie_index_hi, pcie_data;
 936	void __iomem *pcie_index_offset;
 937	void __iomem *pcie_index_hi_offset;
 938	void __iomem *pcie_data_offset;
 939
 940	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
 941	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
 942	if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
 943		pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
 944	else
 945		pcie_index_hi = 0;
 946
 947	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 948	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
 949	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
 950	if (pcie_index_hi != 0)
 951		pcie_index_hi_offset = (void __iomem *)adev->rmmio +
 952				pcie_index_hi * 4;
 953
 954	writel(reg_addr, pcie_index_offset);
 955	readl(pcie_index_offset);
 956	if (pcie_index_hi != 0) {
 957		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
 958		readl(pcie_index_hi_offset);
 959	}
 960	writel(reg_data, pcie_data_offset);
 961	readl(pcie_data_offset);
 962
 963	/* clear the high bits */
 964	if (pcie_index_hi != 0) {
 965		writel(0, pcie_index_hi_offset);
 966		readl(pcie_index_hi_offset);
 967	}
 968
 969	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 970}
 971
 972/**
 973 * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
 974 *
 975 * @adev: amdgpu_device pointer
 976 * @reg_addr: indirect register offset
 977 * @reg_data: indirect register data
 978 *
 979 */
 980void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
 981				   u32 reg_addr, u64 reg_data)
 982{
 983	unsigned long flags, pcie_index, pcie_data;
 984	void __iomem *pcie_index_offset;
 985	void __iomem *pcie_data_offset;
 986
 987	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
 988	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
 989
 990	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 991	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
 992	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
 993
 994	/* write low 32 bits */
 995	writel(reg_addr, pcie_index_offset);
 996	readl(pcie_index_offset);
 997	writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
 998	readl(pcie_data_offset);
 999	/* write high 32 bits */
1000	writel(reg_addr + 4, pcie_index_offset);
1001	readl(pcie_index_offset);
1002	writel((u32)(reg_data >> 32), pcie_data_offset);
1003	readl(pcie_data_offset);
1004	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
1005}
1006
1007void amdgpu_device_indirect_wreg64_ext(struct amdgpu_device *adev,
1008				   u64 reg_addr, u64 reg_data)
1009{
1010	unsigned long flags, pcie_index, pcie_data;
1011	unsigned long pcie_index_hi = 0;
1012	void __iomem *pcie_index_offset;
1013	void __iomem *pcie_index_hi_offset;
1014	void __iomem *pcie_data_offset;
1015
1016	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
1017	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
1018	if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
1019		pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
1020
1021	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
1022	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
1023	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
1024	if (pcie_index_hi != 0)
1025		pcie_index_hi_offset = (void __iomem *)adev->rmmio +
1026				pcie_index_hi * 4;
1027
1028	/* write low 32 bits */
1029	writel(reg_addr, pcie_index_offset);
1030	readl(pcie_index_offset);
1031	if (pcie_index_hi != 0) {
1032		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
1033		readl(pcie_index_hi_offset);
1034	}
1035	writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
1036	readl(pcie_data_offset);
1037	/* write high 32 bits */
1038	writel(reg_addr + 4, pcie_index_offset);
1039	readl(pcie_index_offset);
1040	if (pcie_index_hi != 0) {
1041		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
1042		readl(pcie_index_hi_offset);
1043	}
1044	writel((u32)(reg_data >> 32), pcie_data_offset);
1045	readl(pcie_data_offset);
1046
1047	/* clear the high bits */
1048	if (pcie_index_hi != 0) {
1049		writel(0, pcie_index_hi_offset);
1050		readl(pcie_index_hi_offset);
1051	}
1052
1053	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
1054}
1055
1056/**
1057 * amdgpu_device_get_rev_id - query device rev_id
1058 *
1059 * @adev: amdgpu_device pointer
1060 *
1061 * Return device rev_id
1062 */
1063u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev)
1064{
1065	return adev->nbio.funcs->get_rev_id(adev);
1066}
1067
1068/**
1069 * amdgpu_invalid_rreg - dummy reg read function
1070 *
1071 * @adev: amdgpu_device pointer
1072 * @reg: offset of register
1073 *
1074 * Dummy register read function.  Used for register blocks
1075 * that certain asics don't have (all asics).
1076 * Returns the value in the register.
1077 */
1078static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
1079{
1080	DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
1081	BUG();
1082	return 0;
1083}
1084
1085static uint32_t amdgpu_invalid_rreg_ext(struct amdgpu_device *adev, uint64_t reg)
1086{
1087	DRM_ERROR("Invalid callback to read register 0x%llX\n", reg);
1088	BUG();
1089	return 0;
1090}
1091
1092/**
1093 * amdgpu_invalid_wreg - dummy reg write function
1094 *
1095 * @adev: amdgpu_device pointer
1096 * @reg: offset of register
1097 * @v: value to write to the register
1098 *
1099 * Dummy register read function.  Used for register blocks
1100 * that certain asics don't have (all asics).
1101 */
1102static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
1103{
1104	DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
1105		  reg, v);
1106	BUG();
1107}
1108
1109static void amdgpu_invalid_wreg_ext(struct amdgpu_device *adev, uint64_t reg, uint32_t v)
1110{
1111	DRM_ERROR("Invalid callback to write register 0x%llX with 0x%08X\n",
1112		  reg, v);
1113	BUG();
1114}
1115
1116/**
1117 * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
1118 *
1119 * @adev: amdgpu_device pointer
1120 * @reg: offset of register
1121 *
1122 * Dummy register read function.  Used for register blocks
1123 * that certain asics don't have (all asics).
1124 * Returns the value in the register.
1125 */
1126static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
1127{
1128	DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
1129	BUG();
1130	return 0;
1131}
1132
1133static uint64_t amdgpu_invalid_rreg64_ext(struct amdgpu_device *adev, uint64_t reg)
1134{
1135	DRM_ERROR("Invalid callback to read register 0x%llX\n", reg);
1136	BUG();
1137	return 0;
1138}
1139
1140/**
1141 * amdgpu_invalid_wreg64 - dummy reg write function
1142 *
1143 * @adev: amdgpu_device pointer
1144 * @reg: offset of register
1145 * @v: value to write to the register
1146 *
1147 * Dummy register read function.  Used for register blocks
1148 * that certain asics don't have (all asics).
1149 */
1150static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
1151{
1152	DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
1153		  reg, v);
1154	BUG();
1155}
1156
1157static void amdgpu_invalid_wreg64_ext(struct amdgpu_device *adev, uint64_t reg, uint64_t v)
1158{
1159	DRM_ERROR("Invalid callback to write 64 bit register 0x%llX with 0x%08llX\n",
1160		  reg, v);
1161	BUG();
1162}
1163
1164/**
1165 * amdgpu_block_invalid_rreg - dummy reg read function
1166 *
1167 * @adev: amdgpu_device pointer
1168 * @block: offset of instance
1169 * @reg: offset of register
1170 *
1171 * Dummy register read function.  Used for register blocks
1172 * that certain asics don't have (all asics).
1173 * Returns the value in the register.
1174 */
1175static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
1176					  uint32_t block, uint32_t reg)
1177{
1178	DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
1179		  reg, block);
1180	BUG();
1181	return 0;
1182}
1183
1184/**
1185 * amdgpu_block_invalid_wreg - dummy reg write function
1186 *
1187 * @adev: amdgpu_device pointer
1188 * @block: offset of instance
1189 * @reg: offset of register
1190 * @v: value to write to the register
1191 *
1192 * Dummy register read function.  Used for register blocks
1193 * that certain asics don't have (all asics).
1194 */
1195static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
1196				      uint32_t block,
1197				      uint32_t reg, uint32_t v)
1198{
1199	DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
1200		  reg, block, v);
1201	BUG();
1202}
1203
1204/**
1205 * amdgpu_device_asic_init - Wrapper for atom asic_init
1206 *
1207 * @adev: amdgpu_device pointer
1208 *
1209 * Does any asic specific work and then calls atom asic init.
1210 */
1211static int amdgpu_device_asic_init(struct amdgpu_device *adev)
1212{
1213	int ret;
1214
1215	amdgpu_asic_pre_asic_init(adev);
1216
1217	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
1218	    amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) {
1219		amdgpu_psp_wait_for_bootloader(adev);
1220		ret = amdgpu_atomfirmware_asic_init(adev, true);
1221		/* TODO: check the return val and stop device initialization if boot fails */
1222		amdgpu_psp_query_boot_status(adev);
1223		return ret;
1224	} else {
1225		return amdgpu_atom_asic_init(adev->mode_info.atom_context);
1226	}
1227
1228	return 0;
1229}
1230
1231/**
1232 * amdgpu_device_mem_scratch_init - allocate the VRAM scratch page
1233 *
1234 * @adev: amdgpu_device pointer
1235 *
1236 * Allocates a scratch page of VRAM for use by various things in the
1237 * driver.
1238 */
1239static int amdgpu_device_mem_scratch_init(struct amdgpu_device *adev)
1240{
1241	return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, PAGE_SIZE,
1242				       AMDGPU_GEM_DOMAIN_VRAM |
1243				       AMDGPU_GEM_DOMAIN_GTT,
1244				       &adev->mem_scratch.robj,
1245				       &adev->mem_scratch.gpu_addr,
1246				       (void **)&adev->mem_scratch.ptr);
1247}
1248
1249/**
1250 * amdgpu_device_mem_scratch_fini - Free the VRAM scratch page
1251 *
1252 * @adev: amdgpu_device pointer
1253 *
1254 * Frees the VRAM scratch page.
1255 */
1256static void amdgpu_device_mem_scratch_fini(struct amdgpu_device *adev)
1257{
1258	amdgpu_bo_free_kernel(&adev->mem_scratch.robj, NULL, NULL);
1259}
1260
1261/**
1262 * amdgpu_device_program_register_sequence - program an array of registers.
1263 *
1264 * @adev: amdgpu_device pointer
1265 * @registers: pointer to the register array
1266 * @array_size: size of the register array
1267 *
1268 * Programs an array or registers with and or masks.
1269 * This is a helper for setting golden registers.
1270 */
1271void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
1272					     const u32 *registers,
1273					     const u32 array_size)
1274{
1275	u32 tmp, reg, and_mask, or_mask;
1276	int i;
1277
1278	if (array_size % 3)
1279		return;
1280
1281	for (i = 0; i < array_size; i += 3) {
1282		reg = registers[i + 0];
1283		and_mask = registers[i + 1];
1284		or_mask = registers[i + 2];
1285
1286		if (and_mask == 0xffffffff) {
1287			tmp = or_mask;
1288		} else {
1289			tmp = RREG32(reg);
1290			tmp &= ~and_mask;
1291			if (adev->family >= AMDGPU_FAMILY_AI)
1292				tmp |= (or_mask & and_mask);
1293			else
1294				tmp |= or_mask;
1295		}
1296		WREG32(reg, tmp);
1297	}
1298}
1299
1300/**
1301 * amdgpu_device_pci_config_reset - reset the GPU
1302 *
1303 * @adev: amdgpu_device pointer
1304 *
1305 * Resets the GPU using the pci config reset sequence.
1306 * Only applicable to asics prior to vega10.
1307 */
1308void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
1309{
1310	pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
1311}
1312
1313/**
1314 * amdgpu_device_pci_reset - reset the GPU using generic PCI means
1315 *
1316 * @adev: amdgpu_device pointer
1317 *
1318 * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
1319 */
1320int amdgpu_device_pci_reset(struct amdgpu_device *adev)
1321{
1322	return pci_reset_function(adev->pdev);
1323}
1324
1325/*
1326 * amdgpu_device_wb_*()
1327 * Writeback is the method by which the GPU updates special pages in memory
1328 * with the status of certain GPU events (fences, ring pointers,etc.).
1329 */
1330
1331/**
1332 * amdgpu_device_wb_fini - Disable Writeback and free memory
1333 *
1334 * @adev: amdgpu_device pointer
1335 *
1336 * Disables Writeback and frees the Writeback memory (all asics).
1337 * Used at driver shutdown.
1338 */
1339static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1340{
1341	if (adev->wb.wb_obj) {
1342		amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1343				      &adev->wb.gpu_addr,
1344				      (void **)&adev->wb.wb);
1345		adev->wb.wb_obj = NULL;
1346	}
1347}
1348
1349/**
1350 * amdgpu_device_wb_init - Init Writeback driver info and allocate memory
1351 *
1352 * @adev: amdgpu_device pointer
1353 *
1354 * Initializes writeback and allocates writeback memory (all asics).
1355 * Used at driver startup.
1356 * Returns 0 on success or an -error on failure.
1357 */
1358static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1359{
1360	int r;
1361
1362	if (adev->wb.wb_obj == NULL) {
1363		/* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1364		r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1365					    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1366					    &adev->wb.wb_obj, &adev->wb.gpu_addr,
1367					    (void **)&adev->wb.wb);
1368		if (r) {
1369			dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1370			return r;
1371		}
1372
1373		adev->wb.num_wb = AMDGPU_MAX_WB;
1374		memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1375
1376		/* clear wb memory */
1377		memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1378	}
1379
1380	return 0;
1381}
1382
1383/**
1384 * amdgpu_device_wb_get - Allocate a wb entry
1385 *
1386 * @adev: amdgpu_device pointer
1387 * @wb: wb index
1388 *
1389 * Allocate a wb slot for use by the driver (all asics).
1390 * Returns 0 on success or -EINVAL on failure.
1391 */
1392int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1393{
1394	unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1395
1396	if (offset < adev->wb.num_wb) {
1397		__set_bit(offset, adev->wb.used);
1398		*wb = offset << 3; /* convert to dw offset */
1399		return 0;
1400	} else {
1401		return -EINVAL;
1402	}
1403}
1404
1405/**
1406 * amdgpu_device_wb_free - Free a wb entry
1407 *
1408 * @adev: amdgpu_device pointer
1409 * @wb: wb index
1410 *
1411 * Free a wb slot allocated for use by the driver (all asics)
1412 */
1413void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1414{
1415	wb >>= 3;
1416	if (wb < adev->wb.num_wb)
1417		__clear_bit(wb, adev->wb.used);
1418}
1419
1420/**
1421 * amdgpu_device_resize_fb_bar - try to resize FB BAR
1422 *
1423 * @adev: amdgpu_device pointer
1424 *
1425 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1426 * to fail, but if any of the BARs is not accessible after the size we abort
1427 * driver loading by returning -ENODEV.
1428 */
1429int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1430{
1431	int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1432	struct pci_bus *root;
1433	struct resource *res;
1434	unsigned int i;
1435	u16 cmd;
1436	int r;
1437
1438	if (!IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
1439		return 0;
1440
1441	/* Bypass for VF */
1442	if (amdgpu_sriov_vf(adev))
1443		return 0;
1444
 
 
 
 
1445	/* skip if the bios has already enabled large BAR */
1446	if (adev->gmc.real_vram_size &&
1447	    (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1448		return 0;
1449
1450	/* Check if the root BUS has 64bit memory resources */
1451	root = adev->pdev->bus;
1452	while (root->parent)
1453		root = root->parent;
1454
1455	pci_bus_for_each_resource(root, res, i) {
1456		if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1457		    res->start > 0x100000000ull)
1458			break;
1459	}
1460
1461	/* Trying to resize is pointless without a root hub window above 4GB */
1462	if (!res)
1463		return 0;
1464
1465	/* Limit the BAR size to what is available */
1466	rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1467			rbar_size);
1468
1469	/* Disable memory decoding while we change the BAR addresses and size */
1470	pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1471	pci_write_config_word(adev->pdev, PCI_COMMAND,
1472			      cmd & ~PCI_COMMAND_MEMORY);
1473
1474	/* Free the VRAM and doorbell BAR, we most likely need to move both. */
1475	amdgpu_doorbell_fini(adev);
1476	if (adev->asic_type >= CHIP_BONAIRE)
1477		pci_release_resource(adev->pdev, 2);
1478
1479	pci_release_resource(adev->pdev, 0);
1480
1481	r = pci_resize_resource(adev->pdev, 0, rbar_size);
1482	if (r == -ENOSPC)
1483		DRM_INFO("Not enough PCI address space for a large BAR.");
1484	else if (r && r != -ENOTSUPP)
1485		DRM_ERROR("Problem resizing BAR0 (%d).", r);
1486
1487	pci_assign_unassigned_bus_resources(adev->pdev->bus);
1488
1489	/* When the doorbell or fb BAR isn't available we have no chance of
1490	 * using the device.
1491	 */
1492	r = amdgpu_doorbell_init(adev);
1493	if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1494		return -ENODEV;
1495
1496	pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1497
1498	return 0;
1499}
1500
1501static bool amdgpu_device_read_bios(struct amdgpu_device *adev)
1502{
1503	if (hweight32(adev->aid_mask) && (adev->flags & AMD_IS_APU))
1504		return false;
1505
1506	return true;
1507}
1508
1509/*
1510 * GPU helpers function.
1511 */
1512/**
1513 * amdgpu_device_need_post - check if the hw need post or not
1514 *
1515 * @adev: amdgpu_device pointer
1516 *
1517 * Check if the asic has been initialized (all asics) at driver startup
1518 * or post is needed if  hw reset is performed.
1519 * Returns true if need or false if not.
1520 */
1521bool amdgpu_device_need_post(struct amdgpu_device *adev)
1522{
1523	uint32_t reg;
1524
1525	if (amdgpu_sriov_vf(adev))
1526		return false;
1527
1528	if (!amdgpu_device_read_bios(adev))
1529		return false;
1530
1531	if (amdgpu_passthrough(adev)) {
1532		/* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1533		 * some old smc fw still need driver do vPost otherwise gpu hang, while
1534		 * those smc fw version above 22.15 doesn't have this flaw, so we force
1535		 * vpost executed for smc version below 22.15
1536		 */
1537		if (adev->asic_type == CHIP_FIJI) {
1538			int err;
1539			uint32_t fw_ver;
1540
1541			err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1542			/* force vPost if error occured */
1543			if (err)
1544				return true;
1545
1546			fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1547			release_firmware(adev->pm.fw);
1548			if (fw_ver < 0x00160e00)
1549				return true;
1550		}
1551	}
1552
1553	/* Don't post if we need to reset whole hive on init */
1554	if (adev->gmc.xgmi.pending_reset)
1555		return false;
1556
1557	if (adev->has_hw_reset) {
1558		adev->has_hw_reset = false;
1559		return true;
1560	}
1561
1562	/* bios scratch used on CIK+ */
1563	if (adev->asic_type >= CHIP_BONAIRE)
1564		return amdgpu_atombios_scratch_need_asic_init(adev);
1565
1566	/* check MEM_SIZE for older asics */
1567	reg = amdgpu_asic_get_config_memsize(adev);
1568
1569	if ((reg != 0) && (reg != 0xffffffff))
1570		return false;
1571
1572	return true;
1573}
1574
1575/*
1576 * Check whether seamless boot is supported.
1577 *
1578 * So far we only support seamless boot on DCE 3.0 or later.
1579 * If users report that it works on older ASICS as well, we may
1580 * loosen this.
1581 */
1582bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev)
1583{
1584	switch (amdgpu_seamless) {
1585	case -1:
1586		break;
1587	case 1:
1588		return true;
1589	case 0:
1590		return false;
1591	default:
1592		DRM_ERROR("Invalid value for amdgpu.seamless: %d\n",
1593			  amdgpu_seamless);
1594		return false;
1595	}
1596
1597	if (!(adev->flags & AMD_IS_APU))
1598		return false;
1599
1600	if (adev->mman.keep_stolen_vga_memory)
1601		return false;
1602
1603	return amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0);
1604}
1605
1606/*
1607 * Intel hosts such as Rocket Lake, Alder Lake, Raptor Lake and Sapphire Rapids
1608 * don't support dynamic speed switching. Until we have confirmation from Intel
1609 * that a specific host supports it, it's safer that we keep it disabled for all.
1610 *
1611 * https://edc.intel.com/content/www/us/en/design/products/platforms/details/raptor-lake-s/13th-generation-core-processors-datasheet-volume-1-of-2/005/pci-express-support/
1612 * https://gitlab.freedesktop.org/drm/amd/-/issues/2663
1613 */
1614static bool amdgpu_device_pcie_dynamic_switching_supported(struct amdgpu_device *adev)
1615{
1616#if IS_ENABLED(CONFIG_X86)
1617	struct cpuinfo_x86 *c = &cpu_data(0);
1618
1619	/* eGPU change speeds based on USB4 fabric conditions */
1620	if (dev_is_removable(adev->dev))
1621		return true;
1622
1623	if (c->x86_vendor == X86_VENDOR_INTEL)
1624		return false;
1625#endif
1626	return true;
1627}
1628
1629/**
1630 * amdgpu_device_should_use_aspm - check if the device should program ASPM
1631 *
1632 * @adev: amdgpu_device pointer
1633 *
1634 * Confirm whether the module parameter and pcie bridge agree that ASPM should
1635 * be set for this device.
1636 *
1637 * Returns true if it should be used or false if not.
1638 */
1639bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
1640{
1641	switch (amdgpu_aspm) {
1642	case -1:
1643		break;
1644	case 0:
1645		return false;
1646	case 1:
1647		return true;
1648	default:
1649		return false;
1650	}
1651	if (adev->flags & AMD_IS_APU)
1652		return false;
1653	if (!(adev->pm.pp_feature & PP_PCIE_DPM_MASK))
1654		return false;
1655	return pcie_aspm_enabled(adev->pdev);
1656}
1657
1658/* if we get transitioned to only one device, take VGA back */
1659/**
1660 * amdgpu_device_vga_set_decode - enable/disable vga decode
1661 *
1662 * @pdev: PCI device pointer
1663 * @state: enable/disable vga decode
1664 *
1665 * Enable/disable vga decode (all asics).
1666 * Returns VGA resource flags.
1667 */
1668static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
1669		bool state)
1670{
1671	struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
1672
1673	amdgpu_asic_set_vga_state(adev, state);
1674	if (state)
1675		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1676		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1677	else
1678		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1679}
1680
1681/**
1682 * amdgpu_device_check_block_size - validate the vm block size
1683 *
1684 * @adev: amdgpu_device pointer
1685 *
1686 * Validates the vm block size specified via module parameter.
1687 * The vm block size defines number of bits in page table versus page directory,
1688 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1689 * page table and the remaining bits are in the page directory.
1690 */
1691static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1692{
1693	/* defines number of bits in page table versus page directory,
1694	 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1695	 * page table and the remaining bits are in the page directory
1696	 */
1697	if (amdgpu_vm_block_size == -1)
1698		return;
1699
1700	if (amdgpu_vm_block_size < 9) {
1701		dev_warn(adev->dev, "VM page table size (%d) too small\n",
1702			 amdgpu_vm_block_size);
1703		amdgpu_vm_block_size = -1;
1704	}
1705}
1706
1707/**
1708 * amdgpu_device_check_vm_size - validate the vm size
1709 *
1710 * @adev: amdgpu_device pointer
1711 *
1712 * Validates the vm size in GB specified via module parameter.
1713 * The VM size is the size of the GPU virtual memory space in GB.
1714 */
1715static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1716{
1717	/* no need to check the default value */
1718	if (amdgpu_vm_size == -1)
1719		return;
1720
1721	if (amdgpu_vm_size < 1) {
1722		dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1723			 amdgpu_vm_size);
1724		amdgpu_vm_size = -1;
1725	}
1726}
1727
1728static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1729{
1730	struct sysinfo si;
1731	bool is_os_64 = (sizeof(void *) == 8);
1732	uint64_t total_memory;
1733	uint64_t dram_size_seven_GB = 0x1B8000000;
1734	uint64_t dram_size_three_GB = 0xB8000000;
1735
1736	if (amdgpu_smu_memory_pool_size == 0)
1737		return;
1738
1739	if (!is_os_64) {
1740		DRM_WARN("Not 64-bit OS, feature not supported\n");
1741		goto def_value;
1742	}
1743	si_meminfo(&si);
1744	total_memory = (uint64_t)si.totalram * si.mem_unit;
1745
1746	if ((amdgpu_smu_memory_pool_size == 1) ||
1747		(amdgpu_smu_memory_pool_size == 2)) {
1748		if (total_memory < dram_size_three_GB)
1749			goto def_value1;
1750	} else if ((amdgpu_smu_memory_pool_size == 4) ||
1751		(amdgpu_smu_memory_pool_size == 8)) {
1752		if (total_memory < dram_size_seven_GB)
1753			goto def_value1;
1754	} else {
1755		DRM_WARN("Smu memory pool size not supported\n");
1756		goto def_value;
1757	}
1758	adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1759
1760	return;
1761
1762def_value1:
1763	DRM_WARN("No enough system memory\n");
1764def_value:
1765	adev->pm.smu_prv_buffer_size = 0;
1766}
1767
1768static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
1769{
1770	if (!(adev->flags & AMD_IS_APU) ||
1771	    adev->asic_type < CHIP_RAVEN)
1772		return 0;
1773
1774	switch (adev->asic_type) {
1775	case CHIP_RAVEN:
1776		if (adev->pdev->device == 0x15dd)
1777			adev->apu_flags |= AMD_APU_IS_RAVEN;
1778		if (adev->pdev->device == 0x15d8)
1779			adev->apu_flags |= AMD_APU_IS_PICASSO;
1780		break;
1781	case CHIP_RENOIR:
1782		if ((adev->pdev->device == 0x1636) ||
1783		    (adev->pdev->device == 0x164c))
1784			adev->apu_flags |= AMD_APU_IS_RENOIR;
1785		else
1786			adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1787		break;
1788	case CHIP_VANGOGH:
1789		adev->apu_flags |= AMD_APU_IS_VANGOGH;
1790		break;
1791	case CHIP_YELLOW_CARP:
1792		break;
1793	case CHIP_CYAN_SKILLFISH:
1794		if ((adev->pdev->device == 0x13FE) ||
1795		    (adev->pdev->device == 0x143F))
1796			adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
1797		break;
1798	default:
1799		break;
1800	}
1801
1802	return 0;
1803}
1804
1805/**
1806 * amdgpu_device_check_arguments - validate module params
1807 *
1808 * @adev: amdgpu_device pointer
1809 *
1810 * Validates certain module parameters and updates
1811 * the associated values used by the driver (all asics).
1812 */
1813static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1814{
1815	if (amdgpu_sched_jobs < 4) {
1816		dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1817			 amdgpu_sched_jobs);
1818		amdgpu_sched_jobs = 4;
1819	} else if (!is_power_of_2(amdgpu_sched_jobs)) {
1820		dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1821			 amdgpu_sched_jobs);
1822		amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1823	}
1824
1825	if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1826		/* gart size must be greater or equal to 32M */
1827		dev_warn(adev->dev, "gart size (%d) too small\n",
1828			 amdgpu_gart_size);
1829		amdgpu_gart_size = -1;
1830	}
1831
1832	if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1833		/* gtt size must be greater or equal to 32M */
1834		dev_warn(adev->dev, "gtt size (%d) too small\n",
1835				 amdgpu_gtt_size);
1836		amdgpu_gtt_size = -1;
1837	}
1838
1839	/* valid range is between 4 and 9 inclusive */
1840	if (amdgpu_vm_fragment_size != -1 &&
1841	    (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1842		dev_warn(adev->dev, "valid range is between 4 and 9\n");
1843		amdgpu_vm_fragment_size = -1;
1844	}
1845
1846	if (amdgpu_sched_hw_submission < 2) {
1847		dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1848			 amdgpu_sched_hw_submission);
1849		amdgpu_sched_hw_submission = 2;
1850	} else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1851		dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1852			 amdgpu_sched_hw_submission);
1853		amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1854	}
1855
1856	if (amdgpu_reset_method < -1 || amdgpu_reset_method > 4) {
1857		dev_warn(adev->dev, "invalid option for reset method, reverting to default\n");
1858		amdgpu_reset_method = -1;
1859	}
1860
1861	amdgpu_device_check_smu_prv_buffer_size(adev);
1862
1863	amdgpu_device_check_vm_size(adev);
1864
1865	amdgpu_device_check_block_size(adev);
1866
1867	adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1868
1869	return 0;
1870}
1871
1872/**
1873 * amdgpu_switcheroo_set_state - set switcheroo state
1874 *
1875 * @pdev: pci dev pointer
1876 * @state: vga_switcheroo state
1877 *
1878 * Callback for the switcheroo driver.  Suspends or resumes
1879 * the asics before or after it is powered up using ACPI methods.
1880 */
1881static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1882					enum vga_switcheroo_state state)
1883{
1884	struct drm_device *dev = pci_get_drvdata(pdev);
1885	int r;
1886
1887	if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
1888		return;
1889
1890	if (state == VGA_SWITCHEROO_ON) {
1891		pr_info("switched on\n");
1892		/* don't suspend or resume card normally */
1893		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1894
1895		pci_set_power_state(pdev, PCI_D0);
1896		amdgpu_device_load_pci_state(pdev);
1897		r = pci_enable_device(pdev);
1898		if (r)
1899			DRM_WARN("pci_enable_device failed (%d)\n", r);
1900		amdgpu_device_resume(dev, true);
1901
1902		dev->switch_power_state = DRM_SWITCH_POWER_ON;
1903	} else {
1904		pr_info("switched off\n");
1905		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1906		amdgpu_device_prepare(dev);
1907		amdgpu_device_suspend(dev, true);
1908		amdgpu_device_cache_pci_state(pdev);
1909		/* Shut down the device */
1910		pci_disable_device(pdev);
1911		pci_set_power_state(pdev, PCI_D3cold);
1912		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1913	}
1914}
1915
1916/**
1917 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1918 *
1919 * @pdev: pci dev pointer
1920 *
1921 * Callback for the switcheroo driver.  Check of the switcheroo
1922 * state can be changed.
1923 * Returns true if the state can be changed, false if not.
1924 */
1925static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1926{
1927	struct drm_device *dev = pci_get_drvdata(pdev);
1928
1929       /*
1930	* FIXME: open_count is protected by drm_global_mutex but that would lead to
1931	* locking inversion with the driver load path. And the access here is
1932	* completely racy anyway. So don't bother with locking for now.
1933	*/
1934	return atomic_read(&dev->open_count) == 0;
1935}
1936
1937static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1938	.set_gpu_state = amdgpu_switcheroo_set_state,
1939	.reprobe = NULL,
1940	.can_switch = amdgpu_switcheroo_can_switch,
1941};
1942
1943/**
1944 * amdgpu_device_ip_set_clockgating_state - set the CG state
1945 *
1946 * @dev: amdgpu_device pointer
1947 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1948 * @state: clockgating state (gate or ungate)
1949 *
1950 * Sets the requested clockgating state for all instances of
1951 * the hardware IP specified.
1952 * Returns the error code from the last instance.
1953 */
1954int amdgpu_device_ip_set_clockgating_state(void *dev,
1955					   enum amd_ip_block_type block_type,
1956					   enum amd_clockgating_state state)
1957{
1958	struct amdgpu_device *adev = dev;
1959	int i, r = 0;
1960
1961	for (i = 0; i < adev->num_ip_blocks; i++) {
1962		if (!adev->ip_blocks[i].status.valid)
1963			continue;
1964		if (adev->ip_blocks[i].version->type != block_type)
1965			continue;
1966		if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1967			continue;
1968		r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1969			(void *)adev, state);
1970		if (r)
1971			DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1972				  adev->ip_blocks[i].version->funcs->name, r);
1973	}
1974	return r;
1975}
1976
1977/**
1978 * amdgpu_device_ip_set_powergating_state - set the PG state
1979 *
1980 * @dev: amdgpu_device pointer
1981 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1982 * @state: powergating state (gate or ungate)
1983 *
1984 * Sets the requested powergating state for all instances of
1985 * the hardware IP specified.
1986 * Returns the error code from the last instance.
1987 */
1988int amdgpu_device_ip_set_powergating_state(void *dev,
1989					   enum amd_ip_block_type block_type,
1990					   enum amd_powergating_state state)
1991{
1992	struct amdgpu_device *adev = dev;
1993	int i, r = 0;
1994
1995	for (i = 0; i < adev->num_ip_blocks; i++) {
1996		if (!adev->ip_blocks[i].status.valid)
1997			continue;
1998		if (adev->ip_blocks[i].version->type != block_type)
1999			continue;
2000		if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
2001			continue;
2002		r = adev->ip_blocks[i].version->funcs->set_powergating_state(
2003			(void *)adev, state);
2004		if (r)
2005			DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
2006				  adev->ip_blocks[i].version->funcs->name, r);
2007	}
2008	return r;
2009}
2010
2011/**
2012 * amdgpu_device_ip_get_clockgating_state - get the CG state
2013 *
2014 * @adev: amdgpu_device pointer
2015 * @flags: clockgating feature flags
2016 *
2017 * Walks the list of IPs on the device and updates the clockgating
2018 * flags for each IP.
2019 * Updates @flags with the feature flags for each hardware IP where
2020 * clockgating is enabled.
2021 */
2022void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
2023					    u64 *flags)
2024{
2025	int i;
2026
2027	for (i = 0; i < adev->num_ip_blocks; i++) {
2028		if (!adev->ip_blocks[i].status.valid)
2029			continue;
2030		if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
2031			adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
2032	}
2033}
2034
2035/**
2036 * amdgpu_device_ip_wait_for_idle - wait for idle
2037 *
2038 * @adev: amdgpu_device pointer
2039 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
2040 *
2041 * Waits for the request hardware IP to be idle.
2042 * Returns 0 for success or a negative error code on failure.
2043 */
2044int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
2045				   enum amd_ip_block_type block_type)
2046{
2047	int i, r;
2048
2049	for (i = 0; i < adev->num_ip_blocks; i++) {
2050		if (!adev->ip_blocks[i].status.valid)
2051			continue;
2052		if (adev->ip_blocks[i].version->type == block_type) {
2053			r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
2054			if (r)
2055				return r;
2056			break;
2057		}
2058	}
2059	return 0;
2060
2061}
2062
2063/**
2064 * amdgpu_device_ip_is_idle - is the hardware IP idle
2065 *
2066 * @adev: amdgpu_device pointer
2067 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
2068 *
2069 * Check if the hardware IP is idle or not.
2070 * Returns true if it the IP is idle, false if not.
2071 */
2072bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
2073			      enum amd_ip_block_type block_type)
2074{
2075	int i;
2076
2077	for (i = 0; i < adev->num_ip_blocks; i++) {
2078		if (!adev->ip_blocks[i].status.valid)
2079			continue;
2080		if (adev->ip_blocks[i].version->type == block_type)
2081			return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
2082	}
2083	return true;
2084
2085}
2086
2087/**
2088 * amdgpu_device_ip_get_ip_block - get a hw IP pointer
2089 *
2090 * @adev: amdgpu_device pointer
2091 * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
2092 *
2093 * Returns a pointer to the hardware IP block structure
2094 * if it exists for the asic, otherwise NULL.
2095 */
2096struct amdgpu_ip_block *
2097amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
2098			      enum amd_ip_block_type type)
2099{
2100	int i;
2101
2102	for (i = 0; i < adev->num_ip_blocks; i++)
2103		if (adev->ip_blocks[i].version->type == type)
2104			return &adev->ip_blocks[i];
2105
2106	return NULL;
2107}
2108
2109/**
2110 * amdgpu_device_ip_block_version_cmp
2111 *
2112 * @adev: amdgpu_device pointer
2113 * @type: enum amd_ip_block_type
2114 * @major: major version
2115 * @minor: minor version
2116 *
2117 * return 0 if equal or greater
2118 * return 1 if smaller or the ip_block doesn't exist
2119 */
2120int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
2121				       enum amd_ip_block_type type,
2122				       u32 major, u32 minor)
2123{
2124	struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
2125
2126	if (ip_block && ((ip_block->version->major > major) ||
2127			((ip_block->version->major == major) &&
2128			(ip_block->version->minor >= minor))))
2129		return 0;
2130
2131	return 1;
2132}
2133
2134/**
2135 * amdgpu_device_ip_block_add
2136 *
2137 * @adev: amdgpu_device pointer
2138 * @ip_block_version: pointer to the IP to add
2139 *
2140 * Adds the IP block driver information to the collection of IPs
2141 * on the asic.
2142 */
2143int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
2144			       const struct amdgpu_ip_block_version *ip_block_version)
2145{
2146	if (!ip_block_version)
2147		return -EINVAL;
2148
2149	switch (ip_block_version->type) {
2150	case AMD_IP_BLOCK_TYPE_VCN:
2151		if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
2152			return 0;
2153		break;
2154	case AMD_IP_BLOCK_TYPE_JPEG:
2155		if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
2156			return 0;
2157		break;
2158	default:
2159		break;
2160	}
2161
2162	DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
2163		  ip_block_version->funcs->name);
2164
2165	adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
2166
2167	return 0;
2168}
2169
2170/**
2171 * amdgpu_device_enable_virtual_display - enable virtual display feature
2172 *
2173 * @adev: amdgpu_device pointer
2174 *
2175 * Enabled the virtual display feature if the user has enabled it via
2176 * the module parameter virtual_display.  This feature provides a virtual
2177 * display hardware on headless boards or in virtualized environments.
2178 * This function parses and validates the configuration string specified by
2179 * the user and configues the virtual display configuration (number of
2180 * virtual connectors, crtcs, etc.) specified.
2181 */
2182static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
2183{
2184	adev->enable_virtual_display = false;
2185
2186	if (amdgpu_virtual_display) {
2187		const char *pci_address_name = pci_name(adev->pdev);
2188		char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
2189
2190		pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
2191		pciaddstr_tmp = pciaddstr;
2192		while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
2193			pciaddname = strsep(&pciaddname_tmp, ",");
2194			if (!strcmp("all", pciaddname)
2195			    || !strcmp(pci_address_name, pciaddname)) {
2196				long num_crtc;
2197				int res = -1;
2198
2199				adev->enable_virtual_display = true;
2200
2201				if (pciaddname_tmp)
2202					res = kstrtol(pciaddname_tmp, 10,
2203						      &num_crtc);
2204
2205				if (!res) {
2206					if (num_crtc < 1)
2207						num_crtc = 1;
2208					if (num_crtc > 6)
2209						num_crtc = 6;
2210					adev->mode_info.num_crtc = num_crtc;
2211				} else {
2212					adev->mode_info.num_crtc = 1;
2213				}
2214				break;
2215			}
2216		}
2217
2218		DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
2219			 amdgpu_virtual_display, pci_address_name,
2220			 adev->enable_virtual_display, adev->mode_info.num_crtc);
2221
2222		kfree(pciaddstr);
2223	}
2224}
2225
2226void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev)
2227{
2228	if (amdgpu_sriov_vf(adev) && !adev->enable_virtual_display) {
2229		adev->mode_info.num_crtc = 1;
2230		adev->enable_virtual_display = true;
2231		DRM_INFO("virtual_display:%d, num_crtc:%d\n",
2232			 adev->enable_virtual_display, adev->mode_info.num_crtc);
2233	}
2234}
2235
2236/**
2237 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
2238 *
2239 * @adev: amdgpu_device pointer
2240 *
2241 * Parses the asic configuration parameters specified in the gpu info
2242 * firmware and makes them availale to the driver for use in configuring
2243 * the asic.
2244 * Returns 0 on success, -EINVAL on failure.
2245 */
2246static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
2247{
2248	const char *chip_name;
2249	char fw_name[40];
2250	int err;
2251	const struct gpu_info_firmware_header_v1_0 *hdr;
2252
2253	adev->firmware.gpu_info_fw = NULL;
2254
2255	if (adev->mman.discovery_bin)
2256		return 0;
2257
2258	switch (adev->asic_type) {
2259	default:
2260		return 0;
2261	case CHIP_VEGA10:
2262		chip_name = "vega10";
2263		break;
2264	case CHIP_VEGA12:
2265		chip_name = "vega12";
2266		break;
2267	case CHIP_RAVEN:
2268		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
2269			chip_name = "raven2";
2270		else if (adev->apu_flags & AMD_APU_IS_PICASSO)
2271			chip_name = "picasso";
2272		else
2273			chip_name = "raven";
2274		break;
2275	case CHIP_ARCTURUS:
2276		chip_name = "arcturus";
2277		break;
2278	case CHIP_NAVI12:
2279		chip_name = "navi12";
2280		break;
2281	}
2282
2283	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
2284	err = amdgpu_ucode_request(adev, &adev->firmware.gpu_info_fw, fw_name);
2285	if (err) {
2286		dev_err(adev->dev,
2287			"Failed to get gpu_info firmware \"%s\"\n",
2288			fw_name);
2289		goto out;
2290	}
2291
2292	hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
2293	amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
2294
2295	switch (hdr->version_major) {
2296	case 1:
2297	{
2298		const struct gpu_info_firmware_v1_0 *gpu_info_fw =
2299			(const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
2300								le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2301
2302		/*
2303		 * Should be droped when DAL no longer needs it.
2304		 */
2305		if (adev->asic_type == CHIP_NAVI12)
2306			goto parse_soc_bounding_box;
2307
2308		adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
2309		adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
2310		adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
2311		adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
2312		adev->gfx.config.max_texture_channel_caches =
2313			le32_to_cpu(gpu_info_fw->gc_num_tccs);
2314		adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
2315		adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
2316		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
2317		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
2318		adev->gfx.config.double_offchip_lds_buf =
2319			le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
2320		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
2321		adev->gfx.cu_info.max_waves_per_simd =
2322			le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
2323		adev->gfx.cu_info.max_scratch_slots_per_cu =
2324			le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
2325		adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
2326		if (hdr->version_minor >= 1) {
2327			const struct gpu_info_firmware_v1_1 *gpu_info_fw =
2328				(const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
2329									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2330			adev->gfx.config.num_sc_per_sh =
2331				le32_to_cpu(gpu_info_fw->num_sc_per_sh);
2332			adev->gfx.config.num_packer_per_sc =
2333				le32_to_cpu(gpu_info_fw->num_packer_per_sc);
2334		}
2335
2336parse_soc_bounding_box:
2337		/*
2338		 * soc bounding box info is not integrated in disocovery table,
2339		 * we always need to parse it from gpu info firmware if needed.
2340		 */
2341		if (hdr->version_minor == 2) {
2342			const struct gpu_info_firmware_v1_2 *gpu_info_fw =
2343				(const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
2344									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2345			adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
2346		}
2347		break;
2348	}
2349	default:
2350		dev_err(adev->dev,
2351			"Unsupported gpu_info table %d\n", hdr->header.ucode_version);
2352		err = -EINVAL;
2353		goto out;
2354	}
2355out:
2356	return err;
2357}
2358
2359/**
2360 * amdgpu_device_ip_early_init - run early init for hardware IPs
2361 *
2362 * @adev: amdgpu_device pointer
2363 *
2364 * Early initialization pass for hardware IPs.  The hardware IPs that make
2365 * up each asic are discovered each IP's early_init callback is run.  This
2366 * is the first stage in initializing the asic.
2367 * Returns 0 on success, negative error code on failure.
2368 */
2369static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
2370{
2371	struct pci_dev *parent;
2372	int i, r;
2373	bool total;
2374
2375	amdgpu_device_enable_virtual_display(adev);
2376
2377	if (amdgpu_sriov_vf(adev)) {
2378		r = amdgpu_virt_request_full_gpu(adev, true);
2379		if (r)
2380			return r;
2381	}
2382
2383	switch (adev->asic_type) {
2384#ifdef CONFIG_DRM_AMDGPU_SI
2385	case CHIP_VERDE:
2386	case CHIP_TAHITI:
2387	case CHIP_PITCAIRN:
2388	case CHIP_OLAND:
2389	case CHIP_HAINAN:
2390		adev->family = AMDGPU_FAMILY_SI;
2391		r = si_set_ip_blocks(adev);
2392		if (r)
2393			return r;
2394		break;
2395#endif
2396#ifdef CONFIG_DRM_AMDGPU_CIK
2397	case CHIP_BONAIRE:
2398	case CHIP_HAWAII:
2399	case CHIP_KAVERI:
2400	case CHIP_KABINI:
2401	case CHIP_MULLINS:
2402		if (adev->flags & AMD_IS_APU)
2403			adev->family = AMDGPU_FAMILY_KV;
2404		else
2405			adev->family = AMDGPU_FAMILY_CI;
2406
2407		r = cik_set_ip_blocks(adev);
2408		if (r)
2409			return r;
2410		break;
2411#endif
2412	case CHIP_TOPAZ:
2413	case CHIP_TONGA:
2414	case CHIP_FIJI:
2415	case CHIP_POLARIS10:
2416	case CHIP_POLARIS11:
2417	case CHIP_POLARIS12:
2418	case CHIP_VEGAM:
2419	case CHIP_CARRIZO:
2420	case CHIP_STONEY:
2421		if (adev->flags & AMD_IS_APU)
2422			adev->family = AMDGPU_FAMILY_CZ;
2423		else
2424			adev->family = AMDGPU_FAMILY_VI;
2425
2426		r = vi_set_ip_blocks(adev);
2427		if (r)
2428			return r;
2429		break;
2430	default:
2431		r = amdgpu_discovery_set_ip_blocks(adev);
2432		if (r)
2433			return r;
2434		break;
2435	}
2436
2437	if (amdgpu_has_atpx() &&
2438	    (amdgpu_is_atpx_hybrid() ||
2439	     amdgpu_has_atpx_dgpu_power_cntl()) &&
2440	    ((adev->flags & AMD_IS_APU) == 0) &&
2441	    !dev_is_removable(&adev->pdev->dev))
2442		adev->flags |= AMD_IS_PX;
2443
2444	if (!(adev->flags & AMD_IS_APU)) {
2445		parent = pcie_find_root_port(adev->pdev);
2446		adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
2447	}
2448
2449
2450	adev->pm.pp_feature = amdgpu_pp_feature_mask;
2451	if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2452		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2453	if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2454		adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2455	if (!amdgpu_device_pcie_dynamic_switching_supported(adev))
2456		adev->pm.pp_feature &= ~PP_PCIE_DPM_MASK;
2457
2458	total = true;
2459	for (i = 0; i < adev->num_ip_blocks; i++) {
2460		if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2461			DRM_WARN("disabled ip block: %d <%s>\n",
2462				  i, adev->ip_blocks[i].version->funcs->name);
2463			adev->ip_blocks[i].status.valid = false;
2464		} else {
2465			if (adev->ip_blocks[i].version->funcs->early_init) {
2466				r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2467				if (r == -ENOENT) {
2468					adev->ip_blocks[i].status.valid = false;
2469				} else if (r) {
2470					DRM_ERROR("early_init of IP block <%s> failed %d\n",
2471						  adev->ip_blocks[i].version->funcs->name, r);
2472					total = false;
2473				} else {
2474					adev->ip_blocks[i].status.valid = true;
2475				}
2476			} else {
2477				adev->ip_blocks[i].status.valid = true;
2478			}
2479		}
2480		/* get the vbios after the asic_funcs are set up */
2481		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2482			r = amdgpu_device_parse_gpu_info_fw(adev);
2483			if (r)
2484				return r;
2485
2486			/* Read BIOS */
2487			if (amdgpu_device_read_bios(adev)) {
2488				if (!amdgpu_get_bios(adev))
2489					return -EINVAL;
2490
2491				r = amdgpu_atombios_init(adev);
2492				if (r) {
2493					dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2494					amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2495					return r;
2496				}
2497			}
2498
2499			/*get pf2vf msg info at it's earliest time*/
2500			if (amdgpu_sriov_vf(adev))
2501				amdgpu_virt_init_data_exchange(adev);
2502
2503		}
2504	}
2505	if (!total)
2506		return -ENODEV;
2507
2508	amdgpu_amdkfd_device_probe(adev);
2509	adev->cg_flags &= amdgpu_cg_mask;
2510	adev->pg_flags &= amdgpu_pg_mask;
2511
2512	return 0;
2513}
2514
2515static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2516{
2517	int i, r;
2518
2519	for (i = 0; i < adev->num_ip_blocks; i++) {
2520		if (!adev->ip_blocks[i].status.sw)
2521			continue;
2522		if (adev->ip_blocks[i].status.hw)
2523			continue;
2524		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2525		    (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2526		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2527			r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2528			if (r) {
2529				DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2530					  adev->ip_blocks[i].version->funcs->name, r);
2531				return r;
2532			}
2533			adev->ip_blocks[i].status.hw = true;
2534		}
2535	}
2536
2537	return 0;
2538}
2539
2540static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2541{
2542	int i, r;
2543
2544	for (i = 0; i < adev->num_ip_blocks; i++) {
2545		if (!adev->ip_blocks[i].status.sw)
2546			continue;
2547		if (adev->ip_blocks[i].status.hw)
2548			continue;
2549		r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2550		if (r) {
2551			DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2552				  adev->ip_blocks[i].version->funcs->name, r);
2553			return r;
2554		}
2555		adev->ip_blocks[i].status.hw = true;
2556	}
2557
2558	return 0;
2559}
2560
2561static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2562{
2563	int r = 0;
2564	int i;
2565	uint32_t smu_version;
2566
2567	if (adev->asic_type >= CHIP_VEGA10) {
2568		for (i = 0; i < adev->num_ip_blocks; i++) {
2569			if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2570				continue;
2571
2572			if (!adev->ip_blocks[i].status.sw)
2573				continue;
2574
2575			/* no need to do the fw loading again if already done*/
2576			if (adev->ip_blocks[i].status.hw == true)
2577				break;
2578
2579			if (amdgpu_in_reset(adev) || adev->in_suspend) {
2580				r = adev->ip_blocks[i].version->funcs->resume(adev);
2581				if (r) {
2582					DRM_ERROR("resume of IP block <%s> failed %d\n",
2583							  adev->ip_blocks[i].version->funcs->name, r);
2584					return r;
2585				}
2586			} else {
2587				r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2588				if (r) {
2589					DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2590							  adev->ip_blocks[i].version->funcs->name, r);
2591					return r;
2592				}
2593			}
2594
2595			adev->ip_blocks[i].status.hw = true;
2596			break;
2597		}
2598	}
2599
2600	if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2601		r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2602
2603	return r;
2604}
2605
2606static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
2607{
2608	long timeout;
2609	int r, i;
2610
2611	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2612		struct amdgpu_ring *ring = adev->rings[i];
2613
2614		/* No need to setup the GPU scheduler for rings that don't need it */
2615		if (!ring || ring->no_scheduler)
2616			continue;
2617
2618		switch (ring->funcs->type) {
2619		case AMDGPU_RING_TYPE_GFX:
2620			timeout = adev->gfx_timeout;
2621			break;
2622		case AMDGPU_RING_TYPE_COMPUTE:
2623			timeout = adev->compute_timeout;
2624			break;
2625		case AMDGPU_RING_TYPE_SDMA:
2626			timeout = adev->sdma_timeout;
2627			break;
2628		default:
2629			timeout = adev->video_timeout;
2630			break;
2631		}
2632
2633		r = drm_sched_init(&ring->sched, &amdgpu_sched_ops, NULL,
2634				   DRM_SCHED_PRIORITY_COUNT,
2635				   ring->num_hw_submission, 0,
2636				   timeout, adev->reset_domain->wq,
2637				   ring->sched_score, ring->name,
2638				   adev->dev);
2639		if (r) {
2640			DRM_ERROR("Failed to create scheduler on ring %s.\n",
2641				  ring->name);
2642			return r;
2643		}
2644		r = amdgpu_uvd_entity_init(adev, ring);
2645		if (r) {
2646			DRM_ERROR("Failed to create UVD scheduling entity on ring %s.\n",
2647				  ring->name);
2648			return r;
2649		}
2650		r = amdgpu_vce_entity_init(adev, ring);
2651		if (r) {
2652			DRM_ERROR("Failed to create VCE scheduling entity on ring %s.\n",
2653				  ring->name);
2654			return r;
2655		}
2656	}
2657
2658	amdgpu_xcp_update_partition_sched_list(adev);
2659
2660	return 0;
2661}
2662
2663
2664/**
2665 * amdgpu_device_ip_init - run init for hardware IPs
2666 *
2667 * @adev: amdgpu_device pointer
2668 *
2669 * Main initialization pass for hardware IPs.  The list of all the hardware
2670 * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2671 * are run.  sw_init initializes the software state associated with each IP
2672 * and hw_init initializes the hardware associated with each IP.
2673 * Returns 0 on success, negative error code on failure.
2674 */
2675static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2676{
2677	int i, r;
2678
2679	r = amdgpu_ras_init(adev);
2680	if (r)
2681		return r;
2682
2683	for (i = 0; i < adev->num_ip_blocks; i++) {
2684		if (!adev->ip_blocks[i].status.valid)
2685			continue;
2686		r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2687		if (r) {
2688			DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2689				  adev->ip_blocks[i].version->funcs->name, r);
2690			goto init_failed;
2691		}
2692		adev->ip_blocks[i].status.sw = true;
2693
2694		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2695			/* need to do common hw init early so everything is set up for gmc */
2696			r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2697			if (r) {
2698				DRM_ERROR("hw_init %d failed %d\n", i, r);
2699				goto init_failed;
2700			}
2701			adev->ip_blocks[i].status.hw = true;
2702		} else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2703			/* need to do gmc hw init early so we can allocate gpu mem */
2704			/* Try to reserve bad pages early */
2705			if (amdgpu_sriov_vf(adev))
2706				amdgpu_virt_exchange_data(adev);
2707
2708			r = amdgpu_device_mem_scratch_init(adev);
2709			if (r) {
2710				DRM_ERROR("amdgpu_mem_scratch_init failed %d\n", r);
2711				goto init_failed;
2712			}
2713			r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2714			if (r) {
2715				DRM_ERROR("hw_init %d failed %d\n", i, r);
2716				goto init_failed;
2717			}
2718			r = amdgpu_device_wb_init(adev);
2719			if (r) {
2720				DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2721				goto init_failed;
2722			}
2723			adev->ip_blocks[i].status.hw = true;
2724
2725			/* right after GMC hw init, we create CSA */
2726			if (adev->gfx.mcbp) {
2727				r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2728							       AMDGPU_GEM_DOMAIN_VRAM |
2729							       AMDGPU_GEM_DOMAIN_GTT,
2730							       AMDGPU_CSA_SIZE);
2731				if (r) {
2732					DRM_ERROR("allocate CSA failed %d\n", r);
2733					goto init_failed;
2734				}
2735			}
2736
2737			r = amdgpu_seq64_init(adev);
2738			if (r) {
2739				DRM_ERROR("allocate seq64 failed %d\n", r);
2740				goto init_failed;
2741			}
2742		}
2743	}
2744
2745	if (amdgpu_sriov_vf(adev))
2746		amdgpu_virt_init_data_exchange(adev);
2747
2748	r = amdgpu_ib_pool_init(adev);
2749	if (r) {
2750		dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2751		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2752		goto init_failed;
2753	}
2754
2755	r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2756	if (r)
2757		goto init_failed;
2758
2759	r = amdgpu_device_ip_hw_init_phase1(adev);
2760	if (r)
2761		goto init_failed;
2762
2763	r = amdgpu_device_fw_loading(adev);
2764	if (r)
2765		goto init_failed;
2766
2767	r = amdgpu_device_ip_hw_init_phase2(adev);
2768	if (r)
2769		goto init_failed;
2770
2771	/*
2772	 * retired pages will be loaded from eeprom and reserved here,
2773	 * it should be called after amdgpu_device_ip_hw_init_phase2  since
2774	 * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2775	 * for I2C communication which only true at this point.
2776	 *
2777	 * amdgpu_ras_recovery_init may fail, but the upper only cares the
2778	 * failure from bad gpu situation and stop amdgpu init process
2779	 * accordingly. For other failed cases, it will still release all
2780	 * the resource and print error message, rather than returning one
2781	 * negative value to upper level.
2782	 *
2783	 * Note: theoretically, this should be called before all vram allocations
2784	 * to protect retired page from abusing
2785	 */
2786	r = amdgpu_ras_recovery_init(adev);
2787	if (r)
2788		goto init_failed;
2789
2790	/**
2791	 * In case of XGMI grab extra reference for reset domain for this device
2792	 */
2793	if (adev->gmc.xgmi.num_physical_nodes > 1) {
2794		if (amdgpu_xgmi_add_device(adev) == 0) {
2795			if (!amdgpu_sriov_vf(adev)) {
2796				struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2797
2798				if (WARN_ON(!hive)) {
2799					r = -ENOENT;
2800					goto init_failed;
2801				}
2802
2803				if (!hive->reset_domain ||
2804				    !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
2805					r = -ENOENT;
2806					amdgpu_put_xgmi_hive(hive);
2807					goto init_failed;
2808				}
2809
2810				/* Drop the early temporary reset domain we created for device */
2811				amdgpu_reset_put_reset_domain(adev->reset_domain);
2812				adev->reset_domain = hive->reset_domain;
2813				amdgpu_put_xgmi_hive(hive);
2814			}
2815		}
2816	}
2817
2818	r = amdgpu_device_init_schedulers(adev);
2819	if (r)
2820		goto init_failed;
2821
2822	if (adev->mman.buffer_funcs_ring->sched.ready)
2823		amdgpu_ttm_set_buffer_funcs_status(adev, true);
2824
2825	/* Don't init kfd if whole hive need to be reset during init */
2826	if (!adev->gmc.xgmi.pending_reset) {
2827		kgd2kfd_init_zone_device(adev);
2828		amdgpu_amdkfd_device_init(adev);
2829	}
2830
2831	amdgpu_fru_get_product_info(adev);
2832
2833init_failed:
2834
2835	return r;
2836}
2837
2838/**
2839 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2840 *
2841 * @adev: amdgpu_device pointer
2842 *
2843 * Writes a reset magic value to the gart pointer in VRAM.  The driver calls
2844 * this function before a GPU reset.  If the value is retained after a
2845 * GPU reset, VRAM has not been lost.  Some GPU resets may destry VRAM contents.
2846 */
2847static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2848{
2849	memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2850}
2851
2852/**
2853 * amdgpu_device_check_vram_lost - check if vram is valid
2854 *
2855 * @adev: amdgpu_device pointer
2856 *
2857 * Checks the reset magic value written to the gart pointer in VRAM.
2858 * The driver calls this after a GPU reset to see if the contents of
2859 * VRAM is lost or now.
2860 * returns true if vram is lost, false if not.
2861 */
2862static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2863{
2864	if (memcmp(adev->gart.ptr, adev->reset_magic,
2865			AMDGPU_RESET_MAGIC_NUM))
2866		return true;
2867
2868	if (!amdgpu_in_reset(adev))
2869		return false;
2870
2871	/*
2872	 * For all ASICs with baco/mode1 reset, the VRAM is
2873	 * always assumed to be lost.
2874	 */
2875	switch (amdgpu_asic_reset_method(adev)) {
2876	case AMD_RESET_METHOD_BACO:
2877	case AMD_RESET_METHOD_MODE1:
2878		return true;
2879	default:
2880		return false;
2881	}
2882}
2883
2884/**
2885 * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2886 *
2887 * @adev: amdgpu_device pointer
2888 * @state: clockgating state (gate or ungate)
2889 *
2890 * The list of all the hardware IPs that make up the asic is walked and the
2891 * set_clockgating_state callbacks are run.
2892 * Late initialization pass enabling clockgating for hardware IPs.
2893 * Fini or suspend, pass disabling clockgating for hardware IPs.
2894 * Returns 0 on success, negative error code on failure.
2895 */
2896
2897int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2898			       enum amd_clockgating_state state)
2899{
2900	int i, j, r;
2901
2902	if (amdgpu_emu_mode == 1)
2903		return 0;
2904
2905	for (j = 0; j < adev->num_ip_blocks; j++) {
2906		i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2907		if (!adev->ip_blocks[i].status.late_initialized)
2908			continue;
2909		/* skip CG for GFX, SDMA on S0ix */
2910		if (adev->in_s0ix &&
2911		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
2912		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
2913			continue;
2914		/* skip CG for VCE/UVD, it's handled specially */
2915		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2916		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2917		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2918		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2919		    adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2920			/* enable clockgating to save power */
2921			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2922										     state);
2923			if (r) {
2924				DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2925					  adev->ip_blocks[i].version->funcs->name, r);
2926				return r;
2927			}
2928		}
2929	}
2930
2931	return 0;
2932}
2933
2934int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2935			       enum amd_powergating_state state)
2936{
2937	int i, j, r;
2938
2939	if (amdgpu_emu_mode == 1)
2940		return 0;
2941
2942	for (j = 0; j < adev->num_ip_blocks; j++) {
2943		i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2944		if (!adev->ip_blocks[i].status.late_initialized)
2945			continue;
2946		/* skip PG for GFX, SDMA on S0ix */
2947		if (adev->in_s0ix &&
2948		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
2949		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
2950			continue;
2951		/* skip CG for VCE/UVD, it's handled specially */
2952		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2953		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2954		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2955		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2956		    adev->ip_blocks[i].version->funcs->set_powergating_state) {
2957			/* enable powergating to save power */
2958			r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2959											state);
2960			if (r) {
2961				DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2962					  adev->ip_blocks[i].version->funcs->name, r);
2963				return r;
2964			}
2965		}
2966	}
2967	return 0;
2968}
2969
2970static int amdgpu_device_enable_mgpu_fan_boost(void)
2971{
2972	struct amdgpu_gpu_instance *gpu_ins;
2973	struct amdgpu_device *adev;
2974	int i, ret = 0;
2975
2976	mutex_lock(&mgpu_info.mutex);
2977
2978	/*
2979	 * MGPU fan boost feature should be enabled
2980	 * only when there are two or more dGPUs in
2981	 * the system
2982	 */
2983	if (mgpu_info.num_dgpu < 2)
2984		goto out;
2985
2986	for (i = 0; i < mgpu_info.num_dgpu; i++) {
2987		gpu_ins = &(mgpu_info.gpu_ins[i]);
2988		adev = gpu_ins->adev;
2989		if (!(adev->flags & AMD_IS_APU) &&
2990		    !gpu_ins->mgpu_fan_enabled) {
2991			ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2992			if (ret)
2993				break;
2994
2995			gpu_ins->mgpu_fan_enabled = 1;
2996		}
2997	}
2998
2999out:
3000	mutex_unlock(&mgpu_info.mutex);
3001
3002	return ret;
3003}
3004
3005/**
3006 * amdgpu_device_ip_late_init - run late init for hardware IPs
3007 *
3008 * @adev: amdgpu_device pointer
3009 *
3010 * Late initialization pass for hardware IPs.  The list of all the hardware
3011 * IPs that make up the asic is walked and the late_init callbacks are run.
3012 * late_init covers any special initialization that an IP requires
3013 * after all of the have been initialized or something that needs to happen
3014 * late in the init process.
3015 * Returns 0 on success, negative error code on failure.
3016 */
3017static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
3018{
3019	struct amdgpu_gpu_instance *gpu_instance;
3020	int i = 0, r;
3021
3022	for (i = 0; i < adev->num_ip_blocks; i++) {
3023		if (!adev->ip_blocks[i].status.hw)
3024			continue;
3025		if (adev->ip_blocks[i].version->funcs->late_init) {
3026			r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
3027			if (r) {
3028				DRM_ERROR("late_init of IP block <%s> failed %d\n",
3029					  adev->ip_blocks[i].version->funcs->name, r);
3030				return r;
3031			}
3032		}
3033		adev->ip_blocks[i].status.late_initialized = true;
3034	}
3035
3036	r = amdgpu_ras_late_init(adev);
3037	if (r) {
3038		DRM_ERROR("amdgpu_ras_late_init failed %d", r);
3039		return r;
3040	}
3041
3042	amdgpu_ras_set_error_query_ready(adev, true);
3043
3044	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
3045	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
3046
3047	amdgpu_device_fill_reset_magic(adev);
3048
3049	r = amdgpu_device_enable_mgpu_fan_boost();
3050	if (r)
3051		DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
3052
3053	/* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
3054	if (amdgpu_passthrough(adev) &&
3055	    ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1) ||
3056	     adev->asic_type == CHIP_ALDEBARAN))
3057		amdgpu_dpm_handle_passthrough_sbr(adev, true);
3058
3059	if (adev->gmc.xgmi.num_physical_nodes > 1) {
3060		mutex_lock(&mgpu_info.mutex);
3061
3062		/*
3063		 * Reset device p-state to low as this was booted with high.
3064		 *
3065		 * This should be performed only after all devices from the same
3066		 * hive get initialized.
3067		 *
3068		 * However, it's unknown how many device in the hive in advance.
3069		 * As this is counted one by one during devices initializations.
3070		 *
3071		 * So, we wait for all XGMI interlinked devices initialized.
3072		 * This may bring some delays as those devices may come from
3073		 * different hives. But that should be OK.
3074		 */
3075		if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
3076			for (i = 0; i < mgpu_info.num_gpu; i++) {
3077				gpu_instance = &(mgpu_info.gpu_ins[i]);
3078				if (gpu_instance->adev->flags & AMD_IS_APU)
3079					continue;
3080
3081				r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
3082						AMDGPU_XGMI_PSTATE_MIN);
3083				if (r) {
3084					DRM_ERROR("pstate setting failed (%d).\n", r);
3085					break;
3086				}
3087			}
3088		}
3089
3090		mutex_unlock(&mgpu_info.mutex);
3091	}
3092
3093	return 0;
3094}
3095
3096/**
3097 * amdgpu_device_smu_fini_early - smu hw_fini wrapper
3098 *
3099 * @adev: amdgpu_device pointer
3100 *
3101 * For ASICs need to disable SMC first
3102 */
3103static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
3104{
3105	int i, r;
3106
3107	if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(9, 0, 0))
3108		return;
3109
3110	for (i = 0; i < adev->num_ip_blocks; i++) {
3111		if (!adev->ip_blocks[i].status.hw)
3112			continue;
3113		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3114			r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
3115			/* XXX handle errors */
3116			if (r) {
3117				DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
3118					  adev->ip_blocks[i].version->funcs->name, r);
3119			}
3120			adev->ip_blocks[i].status.hw = false;
3121			break;
3122		}
3123	}
3124}
3125
3126static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
3127{
3128	int i, r;
3129
3130	for (i = 0; i < adev->num_ip_blocks; i++) {
3131		if (!adev->ip_blocks[i].version->funcs->early_fini)
3132			continue;
3133
3134		r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
3135		if (r) {
3136			DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
3137				  adev->ip_blocks[i].version->funcs->name, r);
3138		}
3139	}
3140
3141	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
3142	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
3143
3144	amdgpu_amdkfd_suspend(adev, false);
3145
3146	/* Workaroud for ASICs need to disable SMC first */
3147	amdgpu_device_smu_fini_early(adev);
3148
3149	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3150		if (!adev->ip_blocks[i].status.hw)
3151			continue;
3152
3153		r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
3154		/* XXX handle errors */
3155		if (r) {
3156			DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
3157				  adev->ip_blocks[i].version->funcs->name, r);
3158		}
3159
3160		adev->ip_blocks[i].status.hw = false;
3161	}
3162
3163	if (amdgpu_sriov_vf(adev)) {
3164		if (amdgpu_virt_release_full_gpu(adev, false))
3165			DRM_ERROR("failed to release exclusive mode on fini\n");
3166	}
3167
3168	return 0;
3169}
3170
3171/**
3172 * amdgpu_device_ip_fini - run fini for hardware IPs
3173 *
3174 * @adev: amdgpu_device pointer
3175 *
3176 * Main teardown pass for hardware IPs.  The list of all the hardware
3177 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
3178 * are run.  hw_fini tears down the hardware associated with each IP
3179 * and sw_fini tears down any software state associated with each IP.
3180 * Returns 0 on success, negative error code on failure.
3181 */
3182static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
3183{
3184	int i, r;
3185
3186	if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
3187		amdgpu_virt_release_ras_err_handler_data(adev);
3188
3189	if (adev->gmc.xgmi.num_physical_nodes > 1)
3190		amdgpu_xgmi_remove_device(adev);
3191
3192	amdgpu_amdkfd_device_fini_sw(adev);
3193
3194	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3195		if (!adev->ip_blocks[i].status.sw)
3196			continue;
3197
3198		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
3199			amdgpu_ucode_free_bo(adev);
3200			amdgpu_free_static_csa(&adev->virt.csa_obj);
3201			amdgpu_device_wb_fini(adev);
3202			amdgpu_device_mem_scratch_fini(adev);
3203			amdgpu_ib_pool_fini(adev);
3204			amdgpu_seq64_fini(adev);
3205		}
3206
3207		r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
3208		/* XXX handle errors */
3209		if (r) {
3210			DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
3211				  adev->ip_blocks[i].version->funcs->name, r);
3212		}
3213		adev->ip_blocks[i].status.sw = false;
3214		adev->ip_blocks[i].status.valid = false;
3215	}
3216
3217	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3218		if (!adev->ip_blocks[i].status.late_initialized)
3219			continue;
3220		if (adev->ip_blocks[i].version->funcs->late_fini)
3221			adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
3222		adev->ip_blocks[i].status.late_initialized = false;
3223	}
3224
3225	amdgpu_ras_fini(adev);
3226
3227	return 0;
3228}
3229
3230/**
3231 * amdgpu_device_delayed_init_work_handler - work handler for IB tests
3232 *
3233 * @work: work_struct.
3234 */
3235static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
3236{
3237	struct amdgpu_device *adev =
3238		container_of(work, struct amdgpu_device, delayed_init_work.work);
3239	int r;
3240
3241	r = amdgpu_ib_ring_tests(adev);
3242	if (r)
3243		DRM_ERROR("ib ring test failed (%d).\n", r);
3244}
3245
3246static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
3247{
3248	struct amdgpu_device *adev =
3249		container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
3250
3251	WARN_ON_ONCE(adev->gfx.gfx_off_state);
3252	WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
3253
3254	if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
3255		adev->gfx.gfx_off_state = true;
3256}
3257
3258/**
3259 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
3260 *
3261 * @adev: amdgpu_device pointer
3262 *
3263 * Main suspend function for hardware IPs.  The list of all the hardware
3264 * IPs that make up the asic is walked, clockgating is disabled and the
3265 * suspend callbacks are run.  suspend puts the hardware and software state
3266 * in each IP into a state suitable for suspend.
3267 * Returns 0 on success, negative error code on failure.
3268 */
3269static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
3270{
3271	int i, r;
3272
3273	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
3274	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
3275
3276	/*
3277	 * Per PMFW team's suggestion, driver needs to handle gfxoff
3278	 * and df cstate features disablement for gpu reset(e.g. Mode1Reset)
3279	 * scenario. Add the missing df cstate disablement here.
3280	 */
3281	if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
3282		dev_warn(adev->dev, "Failed to disallow df cstate");
3283
3284	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3285		if (!adev->ip_blocks[i].status.valid)
3286			continue;
3287
3288		/* displays are handled separately */
3289		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
3290			continue;
3291
3292		/* XXX handle errors */
3293		r = adev->ip_blocks[i].version->funcs->suspend(adev);
3294		/* XXX handle errors */
3295		if (r) {
3296			DRM_ERROR("suspend of IP block <%s> failed %d\n",
3297				  adev->ip_blocks[i].version->funcs->name, r);
3298			return r;
3299		}
3300
3301		adev->ip_blocks[i].status.hw = false;
3302	}
3303
3304	return 0;
3305}
3306
3307/**
3308 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
3309 *
3310 * @adev: amdgpu_device pointer
3311 *
3312 * Main suspend function for hardware IPs.  The list of all the hardware
3313 * IPs that make up the asic is walked, clockgating is disabled and the
3314 * suspend callbacks are run.  suspend puts the hardware and software state
3315 * in each IP into a state suitable for suspend.
3316 * Returns 0 on success, negative error code on failure.
3317 */
3318static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
3319{
3320	int i, r;
3321
3322	if (adev->in_s0ix)
3323		amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry);
3324
3325	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3326		if (!adev->ip_blocks[i].status.valid)
3327			continue;
3328		/* displays are handled in phase1 */
3329		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
3330			continue;
3331		/* PSP lost connection when err_event_athub occurs */
3332		if (amdgpu_ras_intr_triggered() &&
3333		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
3334			adev->ip_blocks[i].status.hw = false;
3335			continue;
3336		}
3337
3338		/* skip unnecessary suspend if we do not initialize them yet */
3339		if (adev->gmc.xgmi.pending_reset &&
3340		    !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3341		      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
3342		      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3343		      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
3344			adev->ip_blocks[i].status.hw = false;
3345			continue;
3346		}
3347
3348		/* skip suspend of gfx/mes and psp for S0ix
3349		 * gfx is in gfxoff state, so on resume it will exit gfxoff just
3350		 * like at runtime. PSP is also part of the always on hardware
3351		 * so no need to suspend it.
3352		 */
3353		if (adev->in_s0ix &&
3354		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
3355		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
3356		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES))
3357			continue;
3358
3359		/* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */
3360		if (adev->in_s0ix &&
3361		    (amdgpu_ip_version(adev, SDMA0_HWIP, 0) >=
3362		     IP_VERSION(5, 0, 0)) &&
3363		    (adev->ip_blocks[i].version->type ==
3364		     AMD_IP_BLOCK_TYPE_SDMA))
3365			continue;
3366
3367		/* Once swPSP provides the IMU, RLC FW binaries to TOS during cold-boot.
3368		 * These are in TMR, hence are expected to be reused by PSP-TOS to reload
3369		 * from this location and RLC Autoload automatically also gets loaded
3370		 * from here based on PMFW -> PSP message during re-init sequence.
3371		 * Therefore, the psp suspend & resume should be skipped to avoid destroy
3372		 * the TMR and reload FWs again for IMU enabled APU ASICs.
3373		 */
3374		if (amdgpu_in_reset(adev) &&
3375		    (adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs &&
3376		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3377			continue;
3378
3379		/* XXX handle errors */
3380		r = adev->ip_blocks[i].version->funcs->suspend(adev);
3381		/* XXX handle errors */
3382		if (r) {
3383			DRM_ERROR("suspend of IP block <%s> failed %d\n",
3384				  adev->ip_blocks[i].version->funcs->name, r);
3385		}
3386		adev->ip_blocks[i].status.hw = false;
3387		/* handle putting the SMC in the appropriate state */
3388		if (!amdgpu_sriov_vf(adev)) {
3389			if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3390				r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
3391				if (r) {
3392					DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
3393							adev->mp1_state, r);
3394					return r;
3395				}
3396			}
3397		}
3398	}
3399
3400	return 0;
3401}
3402
3403/**
3404 * amdgpu_device_ip_suspend - run suspend for hardware IPs
3405 *
3406 * @adev: amdgpu_device pointer
3407 *
3408 * Main suspend function for hardware IPs.  The list of all the hardware
3409 * IPs that make up the asic is walked, clockgating is disabled and the
3410 * suspend callbacks are run.  suspend puts the hardware and software state
3411 * in each IP into a state suitable for suspend.
3412 * Returns 0 on success, negative error code on failure.
3413 */
3414int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
3415{
3416	int r;
3417
3418	if (amdgpu_sriov_vf(adev)) {
3419		amdgpu_virt_fini_data_exchange(adev);
3420		amdgpu_virt_request_full_gpu(adev, false);
3421	}
3422
3423	amdgpu_ttm_set_buffer_funcs_status(adev, false);
3424
3425	r = amdgpu_device_ip_suspend_phase1(adev);
3426	if (r)
3427		return r;
3428	r = amdgpu_device_ip_suspend_phase2(adev);
3429
3430	if (amdgpu_sriov_vf(adev))
3431		amdgpu_virt_release_full_gpu(adev, false);
3432
3433	return r;
3434}
3435
3436static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
3437{
3438	int i, r;
3439
3440	static enum amd_ip_block_type ip_order[] = {
3441		AMD_IP_BLOCK_TYPE_COMMON,
3442		AMD_IP_BLOCK_TYPE_GMC,
3443		AMD_IP_BLOCK_TYPE_PSP,
3444		AMD_IP_BLOCK_TYPE_IH,
3445	};
3446
3447	for (i = 0; i < adev->num_ip_blocks; i++) {
3448		int j;
3449		struct amdgpu_ip_block *block;
3450
3451		block = &adev->ip_blocks[i];
3452		block->status.hw = false;
3453
3454		for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
3455
3456			if (block->version->type != ip_order[j] ||
3457				!block->status.valid)
3458				continue;
3459
3460			r = block->version->funcs->hw_init(adev);
3461			DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3462			if (r)
3463				return r;
3464			block->status.hw = true;
3465		}
3466	}
3467
3468	return 0;
3469}
3470
3471static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
3472{
3473	int i, r;
3474
3475	static enum amd_ip_block_type ip_order[] = {
3476		AMD_IP_BLOCK_TYPE_SMC,
3477		AMD_IP_BLOCK_TYPE_DCE,
3478		AMD_IP_BLOCK_TYPE_GFX,
3479		AMD_IP_BLOCK_TYPE_SDMA,
3480		AMD_IP_BLOCK_TYPE_MES,
3481		AMD_IP_BLOCK_TYPE_UVD,
3482		AMD_IP_BLOCK_TYPE_VCE,
3483		AMD_IP_BLOCK_TYPE_VCN,
3484		AMD_IP_BLOCK_TYPE_JPEG
3485	};
3486
3487	for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
3488		int j;
3489		struct amdgpu_ip_block *block;
3490
3491		for (j = 0; j < adev->num_ip_blocks; j++) {
3492			block = &adev->ip_blocks[j];
3493
3494			if (block->version->type != ip_order[i] ||
3495				!block->status.valid ||
3496				block->status.hw)
3497				continue;
3498
3499			if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
3500				r = block->version->funcs->resume(adev);
3501			else
3502				r = block->version->funcs->hw_init(adev);
3503
3504			DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3505			if (r)
3506				return r;
3507			block->status.hw = true;
3508		}
3509	}
3510
3511	return 0;
3512}
3513
3514/**
3515 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3516 *
3517 * @adev: amdgpu_device pointer
3518 *
3519 * First resume function for hardware IPs.  The list of all the hardware
3520 * IPs that make up the asic is walked and the resume callbacks are run for
3521 * COMMON, GMC, and IH.  resume puts the hardware into a functional state
3522 * after a suspend and updates the software state as necessary.  This
3523 * function is also used for restoring the GPU after a GPU reset.
3524 * Returns 0 on success, negative error code on failure.
3525 */
3526static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
3527{
3528	int i, r;
3529
3530	for (i = 0; i < adev->num_ip_blocks; i++) {
3531		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3532			continue;
3533		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3534		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3535		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3536		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) {
3537
3538			r = adev->ip_blocks[i].version->funcs->resume(adev);
3539			if (r) {
3540				DRM_ERROR("resume of IP block <%s> failed %d\n",
3541					  adev->ip_blocks[i].version->funcs->name, r);
3542				return r;
3543			}
3544			adev->ip_blocks[i].status.hw = true;
3545		}
3546	}
3547
3548	return 0;
3549}
3550
3551/**
3552 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3553 *
3554 * @adev: amdgpu_device pointer
3555 *
3556 * First resume function for hardware IPs.  The list of all the hardware
3557 * IPs that make up the asic is walked and the resume callbacks are run for
3558 * all blocks except COMMON, GMC, and IH.  resume puts the hardware into a
3559 * functional state after a suspend and updates the software state as
3560 * necessary.  This function is also used for restoring the GPU after a GPU
3561 * reset.
3562 * Returns 0 on success, negative error code on failure.
3563 */
3564static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3565{
3566	int i, r;
3567
3568	for (i = 0; i < adev->num_ip_blocks; i++) {
3569		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3570			continue;
3571		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3572		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3573		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3574		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3575			continue;
3576		r = adev->ip_blocks[i].version->funcs->resume(adev);
3577		if (r) {
3578			DRM_ERROR("resume of IP block <%s> failed %d\n",
3579				  adev->ip_blocks[i].version->funcs->name, r);
3580			return r;
3581		}
3582		adev->ip_blocks[i].status.hw = true;
3583	}
3584
3585	return 0;
3586}
3587
3588/**
3589 * amdgpu_device_ip_resume - run resume for hardware IPs
3590 *
3591 * @adev: amdgpu_device pointer
3592 *
3593 * Main resume function for hardware IPs.  The hardware IPs
3594 * are split into two resume functions because they are
3595 * also used in recovering from a GPU reset and some additional
3596 * steps need to be take between them.  In this case (S3/S4) they are
3597 * run sequentially.
3598 * Returns 0 on success, negative error code on failure.
3599 */
3600static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
3601{
3602	int r;
3603
3604	r = amdgpu_device_ip_resume_phase1(adev);
3605	if (r)
3606		return r;
3607
3608	r = amdgpu_device_fw_loading(adev);
3609	if (r)
3610		return r;
3611
3612	r = amdgpu_device_ip_resume_phase2(adev);
3613
3614	if (adev->mman.buffer_funcs_ring->sched.ready)
3615		amdgpu_ttm_set_buffer_funcs_status(adev, true);
3616
3617	return r;
3618}
3619
3620/**
3621 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3622 *
3623 * @adev: amdgpu_device pointer
3624 *
3625 * Query the VBIOS data tables to determine if the board supports SR-IOV.
3626 */
3627static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3628{
3629	if (amdgpu_sriov_vf(adev)) {
3630		if (adev->is_atom_fw) {
3631			if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
3632				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3633		} else {
3634			if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3635				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3636		}
3637
3638		if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3639			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3640	}
3641}
3642
3643/**
3644 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3645 *
3646 * @asic_type: AMD asic type
3647 *
3648 * Check if there is DC (new modesetting infrastructre) support for an asic.
3649 * returns true if DC has support, false if not.
3650 */
3651bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3652{
3653	switch (asic_type) {
3654#ifdef CONFIG_DRM_AMDGPU_SI
3655	case CHIP_HAINAN:
3656#endif
3657	case CHIP_TOPAZ:
3658		/* chips with no display hardware */
3659		return false;
3660#if defined(CONFIG_DRM_AMD_DC)
3661	case CHIP_TAHITI:
3662	case CHIP_PITCAIRN:
3663	case CHIP_VERDE:
3664	case CHIP_OLAND:
3665		/*
3666		 * We have systems in the wild with these ASICs that require
3667		 * LVDS and VGA support which is not supported with DC.
3668		 *
3669		 * Fallback to the non-DC driver here by default so as not to
3670		 * cause regressions.
3671		 */
3672#if defined(CONFIG_DRM_AMD_DC_SI)
3673		return amdgpu_dc > 0;
3674#else
3675		return false;
3676#endif
3677	case CHIP_BONAIRE:
3678	case CHIP_KAVERI:
3679	case CHIP_KABINI:
3680	case CHIP_MULLINS:
3681		/*
3682		 * We have systems in the wild with these ASICs that require
3683		 * VGA support which is not supported with DC.
3684		 *
3685		 * Fallback to the non-DC driver here by default so as not to
3686		 * cause regressions.
3687		 */
3688		return amdgpu_dc > 0;
3689	default:
3690		return amdgpu_dc != 0;
3691#else
3692	default:
3693		if (amdgpu_dc > 0)
3694			DRM_INFO_ONCE("Display Core has been requested via kernel parameter but isn't supported by ASIC, ignoring\n");
3695		return false;
3696#endif
3697	}
3698}
3699
3700/**
3701 * amdgpu_device_has_dc_support - check if dc is supported
3702 *
3703 * @adev: amdgpu_device pointer
3704 *
3705 * Returns true for supported, false for not supported
3706 */
3707bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3708{
3709	if (adev->enable_virtual_display ||
3710	    (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
3711		return false;
3712
3713	return amdgpu_device_asic_has_dc_support(adev->asic_type);
3714}
3715
3716static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3717{
3718	struct amdgpu_device *adev =
3719		container_of(__work, struct amdgpu_device, xgmi_reset_work);
3720	struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3721
3722	/* It's a bug to not have a hive within this function */
3723	if (WARN_ON(!hive))
3724		return;
3725
3726	/*
3727	 * Use task barrier to synchronize all xgmi reset works across the
3728	 * hive. task_barrier_enter and task_barrier_exit will block
3729	 * until all the threads running the xgmi reset works reach
3730	 * those points. task_barrier_full will do both blocks.
3731	 */
3732	if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3733
3734		task_barrier_enter(&hive->tb);
3735		adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3736
3737		if (adev->asic_reset_res)
3738			goto fail;
3739
3740		task_barrier_exit(&hive->tb);
3741		adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3742
3743		if (adev->asic_reset_res)
3744			goto fail;
3745
3746		amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__MMHUB);
3747	} else {
3748
3749		task_barrier_full(&hive->tb);
3750		adev->asic_reset_res =  amdgpu_asic_reset(adev);
3751	}
3752
3753fail:
3754	if (adev->asic_reset_res)
3755		DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3756			 adev->asic_reset_res, adev_to_drm(adev)->unique);
3757	amdgpu_put_xgmi_hive(hive);
3758}
3759
3760static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3761{
3762	char *input = amdgpu_lockup_timeout;
3763	char *timeout_setting = NULL;
3764	int index = 0;
3765	long timeout;
3766	int ret = 0;
3767
3768	/*
3769	 * By default timeout for non compute jobs is 10000
3770	 * and 60000 for compute jobs.
3771	 * In SR-IOV or passthrough mode, timeout for compute
3772	 * jobs are 60000 by default.
3773	 */
3774	adev->gfx_timeout = msecs_to_jiffies(10000);
3775	adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3776	if (amdgpu_sriov_vf(adev))
3777		adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3778					msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3779	else
3780		adev->compute_timeout =  msecs_to_jiffies(60000);
3781
3782	if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3783		while ((timeout_setting = strsep(&input, ",")) &&
3784				strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3785			ret = kstrtol(timeout_setting, 0, &timeout);
3786			if (ret)
3787				return ret;
3788
3789			if (timeout == 0) {
3790				index++;
3791				continue;
3792			} else if (timeout < 0) {
3793				timeout = MAX_SCHEDULE_TIMEOUT;
3794				dev_warn(adev->dev, "lockup timeout disabled");
3795				add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
3796			} else {
3797				timeout = msecs_to_jiffies(timeout);
3798			}
3799
3800			switch (index++) {
3801			case 0:
3802				adev->gfx_timeout = timeout;
3803				break;
3804			case 1:
3805				adev->compute_timeout = timeout;
3806				break;
3807			case 2:
3808				adev->sdma_timeout = timeout;
3809				break;
3810			case 3:
3811				adev->video_timeout = timeout;
3812				break;
3813			default:
3814				break;
3815			}
3816		}
3817		/*
3818		 * There is only one value specified and
3819		 * it should apply to all non-compute jobs.
3820		 */
3821		if (index == 1) {
3822			adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3823			if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3824				adev->compute_timeout = adev->gfx_timeout;
3825		}
3826	}
3827
3828	return ret;
3829}
3830
3831/**
3832 * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU
3833 *
3834 * @adev: amdgpu_device pointer
3835 *
3836 * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode
3837 */
3838static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
3839{
3840	struct iommu_domain *domain;
3841
3842	domain = iommu_get_domain_for_dev(adev->dev);
3843	if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY)
3844		adev->ram_is_direct_mapped = true;
3845}
3846
3847static const struct attribute *amdgpu_dev_attributes[] = {
3848	&dev_attr_pcie_replay_count.attr,
3849	NULL
3850};
3851
3852static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)
3853{
3854	if (amdgpu_mcbp == 1)
3855		adev->gfx.mcbp = true;
3856	else if (amdgpu_mcbp == 0)
3857		adev->gfx.mcbp = false;
3858
3859	if (amdgpu_sriov_vf(adev))
3860		adev->gfx.mcbp = true;
3861
3862	if (adev->gfx.mcbp)
3863		DRM_INFO("MCBP is enabled\n");
3864}
3865
3866/**
3867 * amdgpu_device_init - initialize the driver
3868 *
3869 * @adev: amdgpu_device pointer
3870 * @flags: driver flags
3871 *
3872 * Initializes the driver info and hw (all asics).
3873 * Returns 0 for success or an error on failure.
3874 * Called at driver startup.
3875 */
3876int amdgpu_device_init(struct amdgpu_device *adev,
3877		       uint32_t flags)
3878{
3879	struct drm_device *ddev = adev_to_drm(adev);
3880	struct pci_dev *pdev = adev->pdev;
3881	int r, i;
3882	bool px = false;
3883	u32 max_MBps;
3884	int tmp;
3885
3886	adev->shutdown = false;
3887	adev->flags = flags;
3888
3889	if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3890		adev->asic_type = amdgpu_force_asic_type;
3891	else
3892		adev->asic_type = flags & AMD_ASIC_MASK;
3893
3894	adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3895	if (amdgpu_emu_mode == 1)
3896		adev->usec_timeout *= 10;
3897	adev->gmc.gart_size = 512 * 1024 * 1024;
3898	adev->accel_working = false;
3899	adev->num_rings = 0;
3900	RCU_INIT_POINTER(adev->gang_submit, dma_fence_get_stub());
3901	adev->mman.buffer_funcs = NULL;
3902	adev->mman.buffer_funcs_ring = NULL;
3903	adev->vm_manager.vm_pte_funcs = NULL;
3904	adev->vm_manager.vm_pte_num_scheds = 0;
3905	adev->gmc.gmc_funcs = NULL;
3906	adev->harvest_ip_mask = 0x0;
3907	adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3908	bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3909
3910	adev->smc_rreg = &amdgpu_invalid_rreg;
3911	adev->smc_wreg = &amdgpu_invalid_wreg;
3912	adev->pcie_rreg = &amdgpu_invalid_rreg;
3913	adev->pcie_wreg = &amdgpu_invalid_wreg;
3914	adev->pcie_rreg_ext = &amdgpu_invalid_rreg_ext;
3915	adev->pcie_wreg_ext = &amdgpu_invalid_wreg_ext;
3916	adev->pciep_rreg = &amdgpu_invalid_rreg;
3917	adev->pciep_wreg = &amdgpu_invalid_wreg;
3918	adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3919	adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3920	adev->pcie_rreg64_ext = &amdgpu_invalid_rreg64_ext;
3921	adev->pcie_wreg64_ext = &amdgpu_invalid_wreg64_ext;
3922	adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3923	adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3924	adev->didt_rreg = &amdgpu_invalid_rreg;
3925	adev->didt_wreg = &amdgpu_invalid_wreg;
3926	adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3927	adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3928	adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3929	adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3930
3931	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3932		 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3933		 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3934
3935	/* mutex initialization are all done here so we
3936	 * can recall function without having locking issues
3937	 */
3938	mutex_init(&adev->firmware.mutex);
3939	mutex_init(&adev->pm.mutex);
3940	mutex_init(&adev->gfx.gpu_clock_mutex);
3941	mutex_init(&adev->srbm_mutex);
3942	mutex_init(&adev->gfx.pipe_reserve_mutex);
3943	mutex_init(&adev->gfx.gfx_off_mutex);
3944	mutex_init(&adev->gfx.partition_mutex);
3945	mutex_init(&adev->grbm_idx_mutex);
3946	mutex_init(&adev->mn_lock);
3947	mutex_init(&adev->virt.vf_errors.lock);
3948	hash_init(adev->mn_hash);
3949	mutex_init(&adev->psp.mutex);
3950	mutex_init(&adev->notifier_lock);
3951	mutex_init(&adev->pm.stable_pstate_ctx_lock);
3952	mutex_init(&adev->benchmark_mutex);
3953
3954	amdgpu_device_init_apu_flags(adev);
3955
3956	r = amdgpu_device_check_arguments(adev);
3957	if (r)
3958		return r;
3959
3960	spin_lock_init(&adev->mmio_idx_lock);
3961	spin_lock_init(&adev->smc_idx_lock);
3962	spin_lock_init(&adev->pcie_idx_lock);
3963	spin_lock_init(&adev->uvd_ctx_idx_lock);
3964	spin_lock_init(&adev->didt_idx_lock);
3965	spin_lock_init(&adev->gc_cac_idx_lock);
3966	spin_lock_init(&adev->se_cac_idx_lock);
3967	spin_lock_init(&adev->audio_endpt_idx_lock);
3968	spin_lock_init(&adev->mm_stats.lock);
3969
3970	INIT_LIST_HEAD(&adev->shadow_list);
3971	mutex_init(&adev->shadow_list_lock);
3972
3973	INIT_LIST_HEAD(&adev->reset_list);
3974
3975	INIT_LIST_HEAD(&adev->ras_list);
3976
3977	INIT_LIST_HEAD(&adev->pm.od_kobj_list);
3978
3979	INIT_DELAYED_WORK(&adev->delayed_init_work,
3980			  amdgpu_device_delayed_init_work_handler);
3981	INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3982			  amdgpu_device_delay_enable_gfx_off);
3983
3984	INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3985
3986	adev->gfx.gfx_off_req_count = 1;
3987	adev->gfx.gfx_off_residency = 0;
3988	adev->gfx.gfx_off_entrycount = 0;
3989	adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3990
3991	atomic_set(&adev->throttling_logging_enabled, 1);
3992	/*
3993	 * If throttling continues, logging will be performed every minute
3994	 * to avoid log flooding. "-1" is subtracted since the thermal
3995	 * throttling interrupt comes every second. Thus, the total logging
3996	 * interval is 59 seconds(retelimited printk interval) + 1(waiting
3997	 * for throttling interrupt) = 60 seconds.
3998	 */
3999	ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
4000	ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
4001
4002	/* Registers mapping */
4003	/* TODO: block userspace mapping of io register */
4004	if (adev->asic_type >= CHIP_BONAIRE) {
4005		adev->rmmio_base = pci_resource_start(adev->pdev, 5);
4006		adev->rmmio_size = pci_resource_len(adev->pdev, 5);
4007	} else {
4008		adev->rmmio_base = pci_resource_start(adev->pdev, 2);
4009		adev->rmmio_size = pci_resource_len(adev->pdev, 2);
4010	}
4011
4012	for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
4013		atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
4014
4015	adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
4016	if (!adev->rmmio)
4017		return -ENOMEM;
4018
4019	DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
4020	DRM_INFO("register mmio size: %u\n", (unsigned int)adev->rmmio_size);
4021
4022	/*
4023	 * Reset domain needs to be present early, before XGMI hive discovered
4024	 * (if any) and intitialized to use reset sem and in_gpu reset flag
4025	 * early on during init and before calling to RREG32.
4026	 */
4027	adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
4028	if (!adev->reset_domain)
4029		return -ENOMEM;
4030
4031	/* detect hw virtualization here */
4032	amdgpu_detect_virtualization(adev);
4033
4034	amdgpu_device_get_pcie_info(adev);
4035
4036	r = amdgpu_device_get_job_timeout_settings(adev);
4037	if (r) {
4038		dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
4039		return r;
4040	}
4041
 
 
4042	/* early init functions */
4043	r = amdgpu_device_ip_early_init(adev);
4044	if (r)
4045		return r;
4046
4047	amdgpu_device_set_mcbp(adev);
4048
4049	/* Get rid of things like offb */
4050	r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver);
4051	if (r)
4052		return r;
4053
4054	/* Enable TMZ based on IP_VERSION */
4055	amdgpu_gmc_tmz_set(adev);
4056
4057	amdgpu_gmc_noretry_set(adev);
4058	/* Need to get xgmi info early to decide the reset behavior*/
4059	if (adev->gmc.xgmi.supported) {
4060		r = adev->gfxhub.funcs->get_xgmi_info(adev);
4061		if (r)
4062			return r;
4063	}
4064
4065	/* enable PCIE atomic ops */
4066	if (amdgpu_sriov_vf(adev)) {
4067		if (adev->virt.fw_reserve.p_pf2vf)
4068			adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
4069						      adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
4070				(PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
4071	/* APUs w/ gfx9 onwards doesn't reply on PCIe atomics, rather it is a
4072	 * internal path natively support atomics, set have_atomics_support to true.
4073	 */
4074	} else if ((adev->flags & AMD_IS_APU) &&
4075		   (amdgpu_ip_version(adev, GC_HWIP, 0) >
4076		    IP_VERSION(9, 0, 0))) {
4077		adev->have_atomics_support = true;
4078	} else {
4079		adev->have_atomics_support =
4080			!pci_enable_atomic_ops_to_root(adev->pdev,
4081					  PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
4082					  PCI_EXP_DEVCAP2_ATOMIC_COMP64);
4083	}
4084
4085	if (!adev->have_atomics_support)
4086		dev_info(adev->dev, "PCIE atomic ops is not supported\n");
4087
4088	/* doorbell bar mapping and doorbell index init*/
4089	amdgpu_doorbell_init(adev);
4090
4091	if (amdgpu_emu_mode == 1) {
4092		/* post the asic on emulation mode */
4093		emu_soc_asic_init(adev);
4094		goto fence_driver_init;
4095	}
4096
4097	amdgpu_reset_init(adev);
4098
4099	/* detect if we are with an SRIOV vbios */
4100	if (adev->bios)
4101		amdgpu_device_detect_sriov_bios(adev);
4102
4103	/* check if we need to reset the asic
4104	 *  E.g., driver was not cleanly unloaded previously, etc.
4105	 */
4106	if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
4107		if (adev->gmc.xgmi.num_physical_nodes) {
4108			dev_info(adev->dev, "Pending hive reset.\n");
4109			adev->gmc.xgmi.pending_reset = true;
4110			/* Only need to init necessary block for SMU to handle the reset */
4111			for (i = 0; i < adev->num_ip_blocks; i++) {
4112				if (!adev->ip_blocks[i].status.valid)
4113					continue;
4114				if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
4115				      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
4116				      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
4117				      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
4118					DRM_DEBUG("IP %s disabled for hw_init.\n",
4119						adev->ip_blocks[i].version->funcs->name);
4120					adev->ip_blocks[i].status.hw = true;
4121				}
4122			}
 
 
 
4123		} else {
4124			tmp = amdgpu_reset_method;
4125			/* It should do a default reset when loading or reloading the driver,
4126			 * regardless of the module parameter reset_method.
4127			 */
4128			amdgpu_reset_method = AMD_RESET_METHOD_NONE;
4129			r = amdgpu_asic_reset(adev);
4130			amdgpu_reset_method = tmp;
4131			if (r) {
4132				dev_err(adev->dev, "asic reset on init failed\n");
4133				goto failed;
4134			}
 
4135		}
4136	}
4137
4138	/* Post card if necessary */
4139	if (amdgpu_device_need_post(adev)) {
4140		if (!adev->bios) {
4141			dev_err(adev->dev, "no vBIOS found\n");
4142			r = -EINVAL;
4143			goto failed;
4144		}
4145		DRM_INFO("GPU posting now...\n");
4146		r = amdgpu_device_asic_init(adev);
4147		if (r) {
4148			dev_err(adev->dev, "gpu post error!\n");
4149			goto failed;
4150		}
4151	}
4152
4153	if (adev->bios) {
4154		if (adev->is_atom_fw) {
4155			/* Initialize clocks */
4156			r = amdgpu_atomfirmware_get_clock_info(adev);
4157			if (r) {
4158				dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
4159				amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
4160				goto failed;
4161			}
4162		} else {
4163			/* Initialize clocks */
4164			r = amdgpu_atombios_get_clock_info(adev);
4165			if (r) {
4166				dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
4167				amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
4168				goto failed;
4169			}
4170			/* init i2c buses */
4171			if (!amdgpu_device_has_dc_support(adev))
4172				amdgpu_atombios_i2c_init(adev);
4173		}
4174	}
4175
4176fence_driver_init:
4177	/* Fence driver */
4178	r = amdgpu_fence_driver_sw_init(adev);
4179	if (r) {
4180		dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
4181		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
4182		goto failed;
4183	}
4184
4185	/* init the mode config */
4186	drm_mode_config_init(adev_to_drm(adev));
4187
4188	r = amdgpu_device_ip_init(adev);
4189	if (r) {
4190		dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
4191		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
4192		goto release_ras_con;
4193	}
4194
4195	amdgpu_fence_driver_hw_init(adev);
4196
4197	dev_info(adev->dev,
4198		"SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
4199			adev->gfx.config.max_shader_engines,
4200			adev->gfx.config.max_sh_per_se,
4201			adev->gfx.config.max_cu_per_sh,
4202			adev->gfx.cu_info.number);
4203
4204	adev->accel_working = true;
4205
4206	amdgpu_vm_check_compute_bug(adev);
4207
4208	/* Initialize the buffer migration limit. */
4209	if (amdgpu_moverate >= 0)
4210		max_MBps = amdgpu_moverate;
4211	else
4212		max_MBps = 8; /* Allow 8 MB/s. */
4213	/* Get a log2 for easy divisions. */
4214	adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
4215
4216	/*
4217	 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
4218	 * Otherwise the mgpu fan boost feature will be skipped due to the
4219	 * gpu instance is counted less.
4220	 */
4221	amdgpu_register_gpu_instance(adev);
4222
4223	/* enable clockgating, etc. after ib tests, etc. since some blocks require
4224	 * explicit gating rather than handling it automatically.
4225	 */
4226	if (!adev->gmc.xgmi.pending_reset) {
4227		r = amdgpu_device_ip_late_init(adev);
4228		if (r) {
4229			dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
4230			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
4231			goto release_ras_con;
4232		}
4233		/* must succeed. */
4234		amdgpu_ras_resume(adev);
4235		queue_delayed_work(system_wq, &adev->delayed_init_work,
4236				   msecs_to_jiffies(AMDGPU_RESUME_MS));
4237	}
4238
4239	if (amdgpu_sriov_vf(adev)) {
4240		amdgpu_virt_release_full_gpu(adev, true);
4241		flush_delayed_work(&adev->delayed_init_work);
4242	}
4243
4244	/*
4245	 * Place those sysfs registering after `late_init`. As some of those
4246	 * operations performed in `late_init` might affect the sysfs
4247	 * interfaces creating.
4248	 */
4249	r = amdgpu_atombios_sysfs_init(adev);
4250	if (r)
4251		drm_err(&adev->ddev,
4252			"registering atombios sysfs failed (%d).\n", r);
4253
4254	r = amdgpu_pm_sysfs_init(adev);
4255	if (r)
4256		DRM_ERROR("registering pm sysfs failed (%d).\n", r);
4257
4258	r = amdgpu_ucode_sysfs_init(adev);
4259	if (r) {
4260		adev->ucode_sysfs_en = false;
4261		DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
4262	} else
4263		adev->ucode_sysfs_en = true;
4264
4265	r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
4266	if (r)
4267		dev_err(adev->dev, "Could not create amdgpu device attr\n");
4268
4269	r = devm_device_add_group(adev->dev, &amdgpu_board_attrs_group);
4270	if (r)
4271		dev_err(adev->dev,
4272			"Could not create amdgpu board attributes\n");
4273
4274	amdgpu_fru_sysfs_init(adev);
4275	amdgpu_reg_state_sysfs_init(adev);
4276
4277	if (IS_ENABLED(CONFIG_PERF_EVENTS))
4278		r = amdgpu_pmu_init(adev);
4279	if (r)
4280		dev_err(adev->dev, "amdgpu_pmu_init failed\n");
4281
4282	/* Have stored pci confspace at hand for restore in sudden PCI error */
4283	if (amdgpu_device_cache_pci_state(adev->pdev))
4284		pci_restore_state(pdev);
4285
4286	/* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
4287	/* this will fail for cards that aren't VGA class devices, just
4288	 * ignore it
4289	 */
4290	if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4291		vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
4292
4293	px = amdgpu_device_supports_px(ddev);
4294
4295	if (px || (!dev_is_removable(&adev->pdev->dev) &&
4296				apple_gmux_detect(NULL, NULL)))
4297		vga_switcheroo_register_client(adev->pdev,
4298					       &amdgpu_switcheroo_ops, px);
4299
4300	if (px)
4301		vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
4302
4303	if (adev->gmc.xgmi.pending_reset)
4304		queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
4305				   msecs_to_jiffies(AMDGPU_RESUME_MS));
4306
4307	amdgpu_device_check_iommu_direct_map(adev);
4308
4309	return 0;
4310
4311release_ras_con:
4312	if (amdgpu_sriov_vf(adev))
4313		amdgpu_virt_release_full_gpu(adev, true);
4314
4315	/* failed in exclusive mode due to timeout */
4316	if (amdgpu_sriov_vf(adev) &&
4317		!amdgpu_sriov_runtime(adev) &&
4318		amdgpu_virt_mmio_blocked(adev) &&
4319		!amdgpu_virt_wait_reset(adev)) {
4320		dev_err(adev->dev, "VF exclusive mode timeout\n");
4321		/* Don't send request since VF is inactive. */
4322		adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
4323		adev->virt.ops = NULL;
4324		r = -EAGAIN;
4325	}
4326	amdgpu_release_ras_context(adev);
4327
4328failed:
4329	amdgpu_vf_error_trans_all(adev);
4330
4331	return r;
4332}
4333
4334static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
4335{
4336
4337	/* Clear all CPU mappings pointing to this device */
4338	unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
4339
4340	/* Unmap all mapped bars - Doorbell, registers and VRAM */
4341	amdgpu_doorbell_fini(adev);
4342
4343	iounmap(adev->rmmio);
4344	adev->rmmio = NULL;
4345	if (adev->mman.aper_base_kaddr)
4346		iounmap(adev->mman.aper_base_kaddr);
4347	adev->mman.aper_base_kaddr = NULL;
4348
4349	/* Memory manager related */
4350	if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
4351		arch_phys_wc_del(adev->gmc.vram_mtrr);
4352		arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
4353	}
4354}
4355
4356/**
4357 * amdgpu_device_fini_hw - tear down the driver
4358 *
4359 * @adev: amdgpu_device pointer
4360 *
4361 * Tear down the driver info (all asics).
4362 * Called at driver shutdown.
4363 */
4364void amdgpu_device_fini_hw(struct amdgpu_device *adev)
4365{
4366	dev_info(adev->dev, "amdgpu: finishing device.\n");
4367	flush_delayed_work(&adev->delayed_init_work);
4368	adev->shutdown = true;
4369
4370	/* make sure IB test finished before entering exclusive mode
4371	 * to avoid preemption on IB test
4372	 */
4373	if (amdgpu_sriov_vf(adev)) {
4374		amdgpu_virt_request_full_gpu(adev, false);
4375		amdgpu_virt_fini_data_exchange(adev);
4376	}
4377
4378	/* disable all interrupts */
4379	amdgpu_irq_disable_all(adev);
4380	if (adev->mode_info.mode_config_initialized) {
4381		if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
4382			drm_helper_force_disable_all(adev_to_drm(adev));
4383		else
4384			drm_atomic_helper_shutdown(adev_to_drm(adev));
4385	}
4386	amdgpu_fence_driver_hw_fini(adev);
4387
4388	if (adev->mman.initialized)
4389		drain_workqueue(adev->mman.bdev.wq);
4390
4391	if (adev->pm.sysfs_initialized)
4392		amdgpu_pm_sysfs_fini(adev);
4393	if (adev->ucode_sysfs_en)
4394		amdgpu_ucode_sysfs_fini(adev);
4395	sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
4396	amdgpu_fru_sysfs_fini(adev);
4397
4398	amdgpu_reg_state_sysfs_fini(adev);
4399
4400	/* disable ras feature must before hw fini */
4401	amdgpu_ras_pre_fini(adev);
4402
4403	amdgpu_ttm_set_buffer_funcs_status(adev, false);
4404
4405	amdgpu_device_ip_fini_early(adev);
4406
4407	amdgpu_irq_fini_hw(adev);
4408
4409	if (adev->mman.initialized)
4410		ttm_device_clear_dma_mappings(&adev->mman.bdev);
4411
4412	amdgpu_gart_dummy_page_fini(adev);
4413
4414	if (drm_dev_is_unplugged(adev_to_drm(adev)))
4415		amdgpu_device_unmap_mmio(adev);
4416
4417}
4418
4419void amdgpu_device_fini_sw(struct amdgpu_device *adev)
4420{
4421	int idx;
4422	bool px;
4423
4424	amdgpu_fence_driver_sw_fini(adev);
4425	amdgpu_device_ip_fini(adev);
4426	amdgpu_ucode_release(&adev->firmware.gpu_info_fw);
4427	adev->accel_working = false;
4428	dma_fence_put(rcu_dereference_protected(adev->gang_submit, true));
4429
4430	amdgpu_reset_fini(adev);
4431
4432	/* free i2c buses */
4433	if (!amdgpu_device_has_dc_support(adev))
4434		amdgpu_i2c_fini(adev);
4435
4436	if (amdgpu_emu_mode != 1)
4437		amdgpu_atombios_fini(adev);
4438
4439	kfree(adev->bios);
4440	adev->bios = NULL;
4441
4442	kfree(adev->fru_info);
4443	adev->fru_info = NULL;
4444
4445	px = amdgpu_device_supports_px(adev_to_drm(adev));
4446
4447	if (px || (!dev_is_removable(&adev->pdev->dev) &&
4448				apple_gmux_detect(NULL, NULL)))
4449		vga_switcheroo_unregister_client(adev->pdev);
4450
4451	if (px)
4452		vga_switcheroo_fini_domain_pm_ops(adev->dev);
4453
4454	if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4455		vga_client_unregister(adev->pdev);
4456
4457	if (drm_dev_enter(adev_to_drm(adev), &idx)) {
4458
4459		iounmap(adev->rmmio);
4460		adev->rmmio = NULL;
4461		amdgpu_doorbell_fini(adev);
4462		drm_dev_exit(idx);
4463	}
4464
4465	if (IS_ENABLED(CONFIG_PERF_EVENTS))
4466		amdgpu_pmu_fini(adev);
4467	if (adev->mman.discovery_bin)
4468		amdgpu_discovery_fini(adev);
4469
4470	amdgpu_reset_put_reset_domain(adev->reset_domain);
4471	adev->reset_domain = NULL;
4472
4473	kfree(adev->pci_state);
4474
4475}
4476
4477/**
4478 * amdgpu_device_evict_resources - evict device resources
4479 * @adev: amdgpu device object
4480 *
4481 * Evicts all ttm device resources(vram BOs, gart table) from the lru list
4482 * of the vram memory type. Mainly used for evicting device resources
4483 * at suspend time.
4484 *
4485 */
4486static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
4487{
4488	int ret;
4489
4490	/* No need to evict vram on APUs for suspend to ram or s2idle */
4491	if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU))
4492		return 0;
4493
4494	ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
4495	if (ret)
4496		DRM_WARN("evicting device resources failed\n");
4497	return ret;
4498}
4499
4500/*
4501 * Suspend & resume.
4502 */
4503/**
4504 * amdgpu_device_prepare - prepare for device suspend
4505 *
4506 * @dev: drm dev pointer
4507 *
4508 * Prepare to put the hw in the suspend state (all asics).
4509 * Returns 0 for success or an error on failure.
4510 * Called at driver suspend.
4511 */
4512int amdgpu_device_prepare(struct drm_device *dev)
4513{
4514	struct amdgpu_device *adev = drm_to_adev(dev);
4515	int i, r;
4516
4517	amdgpu_choose_low_power_state(adev);
4518
4519	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4520		return 0;
4521
4522	/* Evict the majority of BOs before starting suspend sequence */
4523	r = amdgpu_device_evict_resources(adev);
4524	if (r)
4525		goto unprepare;
4526
 
 
4527	for (i = 0; i < adev->num_ip_blocks; i++) {
4528		if (!adev->ip_blocks[i].status.valid)
4529			continue;
4530		if (!adev->ip_blocks[i].version->funcs->prepare_suspend)
4531			continue;
4532		r = adev->ip_blocks[i].version->funcs->prepare_suspend((void *)adev);
4533		if (r)
4534			goto unprepare;
4535	}
4536
4537	return 0;
4538
4539unprepare:
4540	adev->in_s0ix = adev->in_s3 = false;
4541
4542	return r;
4543}
4544
4545/**
4546 * amdgpu_device_suspend - initiate device suspend
4547 *
4548 * @dev: drm dev pointer
4549 * @fbcon : notify the fbdev of suspend
4550 *
4551 * Puts the hw in the suspend state (all asics).
4552 * Returns 0 for success or an error on failure.
4553 * Called at driver suspend.
4554 */
4555int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
4556{
4557	struct amdgpu_device *adev = drm_to_adev(dev);
4558	int r = 0;
4559
4560	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4561		return 0;
4562
4563	adev->in_suspend = true;
4564
4565	if (amdgpu_sriov_vf(adev)) {
4566		amdgpu_virt_fini_data_exchange(adev);
4567		r = amdgpu_virt_request_full_gpu(adev, false);
4568		if (r)
4569			return r;
4570	}
4571
4572	if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
4573		DRM_WARN("smart shift update failed\n");
4574
4575	if (fbcon)
4576		drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
4577
4578	cancel_delayed_work_sync(&adev->delayed_init_work);
4579
4580	amdgpu_ras_suspend(adev);
4581
4582	amdgpu_device_ip_suspend_phase1(adev);
4583
4584	if (!adev->in_s0ix)
4585		amdgpu_amdkfd_suspend(adev, adev->in_runpm);
4586
4587	r = amdgpu_device_evict_resources(adev);
4588	if (r)
4589		return r;
4590
4591	amdgpu_ttm_set_buffer_funcs_status(adev, false);
4592
4593	amdgpu_fence_driver_hw_fini(adev);
4594
4595	amdgpu_device_ip_suspend_phase2(adev);
4596
4597	if (amdgpu_sriov_vf(adev))
4598		amdgpu_virt_release_full_gpu(adev, false);
4599
4600	r = amdgpu_dpm_notify_rlc_state(adev, false);
4601	if (r)
4602		return r;
4603
4604	return 0;
4605}
4606
4607/**
4608 * amdgpu_device_resume - initiate device resume
4609 *
4610 * @dev: drm dev pointer
4611 * @fbcon : notify the fbdev of resume
4612 *
4613 * Bring the hw back to operating state (all asics).
4614 * Returns 0 for success or an error on failure.
4615 * Called at driver resume.
4616 */
4617int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
4618{
4619	struct amdgpu_device *adev = drm_to_adev(dev);
4620	int r = 0;
4621
4622	if (amdgpu_sriov_vf(adev)) {
4623		r = amdgpu_virt_request_full_gpu(adev, true);
4624		if (r)
4625			return r;
4626	}
4627
4628	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4629		return 0;
4630
4631	if (adev->in_s0ix)
4632		amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry);
4633
4634	/* post card */
4635	if (amdgpu_device_need_post(adev)) {
4636		r = amdgpu_device_asic_init(adev);
4637		if (r)
4638			dev_err(adev->dev, "amdgpu asic init failed\n");
4639	}
4640
4641	r = amdgpu_device_ip_resume(adev);
4642
4643	if (r) {
4644		dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
4645		goto exit;
4646	}
4647	amdgpu_fence_driver_hw_init(adev);
4648
4649	if (!adev->in_s0ix) {
4650		r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
4651		if (r)
4652			goto exit;
4653	}
4654
4655	r = amdgpu_device_ip_late_init(adev);
4656	if (r)
4657		goto exit;
4658
4659	queue_delayed_work(system_wq, &adev->delayed_init_work,
4660			   msecs_to_jiffies(AMDGPU_RESUME_MS));
4661exit:
4662	if (amdgpu_sriov_vf(adev)) {
4663		amdgpu_virt_init_data_exchange(adev);
4664		amdgpu_virt_release_full_gpu(adev, true);
4665	}
4666
4667	if (r)
4668		return r;
4669
4670	/* Make sure IB tests flushed */
4671	flush_delayed_work(&adev->delayed_init_work);
4672
4673	if (fbcon)
4674		drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
4675
4676	amdgpu_ras_resume(adev);
4677
4678	if (adev->mode_info.num_crtc) {
4679		/*
4680		 * Most of the connector probing functions try to acquire runtime pm
4681		 * refs to ensure that the GPU is powered on when connector polling is
4682		 * performed. Since we're calling this from a runtime PM callback,
4683		 * trying to acquire rpm refs will cause us to deadlock.
4684		 *
4685		 * Since we're guaranteed to be holding the rpm lock, it's safe to
4686		 * temporarily disable the rpm helpers so this doesn't deadlock us.
4687		 */
4688#ifdef CONFIG_PM
4689		dev->dev->power.disable_depth++;
4690#endif
4691		if (!adev->dc_enabled)
4692			drm_helper_hpd_irq_event(dev);
4693		else
4694			drm_kms_helper_hotplug_event(dev);
4695#ifdef CONFIG_PM
4696		dev->dev->power.disable_depth--;
4697#endif
4698	}
4699	adev->in_suspend = false;
4700
4701	if (adev->enable_mes)
4702		amdgpu_mes_self_test(adev);
4703
4704	if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
4705		DRM_WARN("smart shift update failed\n");
4706
4707	return 0;
4708}
4709
4710/**
4711 * amdgpu_device_ip_check_soft_reset - did soft reset succeed
4712 *
4713 * @adev: amdgpu_device pointer
4714 *
4715 * The list of all the hardware IPs that make up the asic is walked and
4716 * the check_soft_reset callbacks are run.  check_soft_reset determines
4717 * if the asic is still hung or not.
4718 * Returns true if any of the IPs are still in a hung state, false if not.
4719 */
4720static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
4721{
4722	int i;
4723	bool asic_hang = false;
4724
4725	if (amdgpu_sriov_vf(adev))
4726		return true;
4727
4728	if (amdgpu_asic_need_full_reset(adev))
4729		return true;
4730
4731	for (i = 0; i < adev->num_ip_blocks; i++) {
4732		if (!adev->ip_blocks[i].status.valid)
4733			continue;
4734		if (adev->ip_blocks[i].version->funcs->check_soft_reset)
4735			adev->ip_blocks[i].status.hang =
4736				adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
4737		if (adev->ip_blocks[i].status.hang) {
4738			dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
4739			asic_hang = true;
4740		}
4741	}
4742	return asic_hang;
4743}
4744
4745/**
4746 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
4747 *
4748 * @adev: amdgpu_device pointer
4749 *
4750 * The list of all the hardware IPs that make up the asic is walked and the
4751 * pre_soft_reset callbacks are run if the block is hung.  pre_soft_reset
4752 * handles any IP specific hardware or software state changes that are
4753 * necessary for a soft reset to succeed.
4754 * Returns 0 on success, negative error code on failure.
4755 */
4756static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
4757{
4758	int i, r = 0;
4759
4760	for (i = 0; i < adev->num_ip_blocks; i++) {
4761		if (!adev->ip_blocks[i].status.valid)
4762			continue;
4763		if (adev->ip_blocks[i].status.hang &&
4764		    adev->ip_blocks[i].version->funcs->pre_soft_reset) {
4765			r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
4766			if (r)
4767				return r;
4768		}
4769	}
4770
4771	return 0;
4772}
4773
4774/**
4775 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
4776 *
4777 * @adev: amdgpu_device pointer
4778 *
4779 * Some hardware IPs cannot be soft reset.  If they are hung, a full gpu
4780 * reset is necessary to recover.
4781 * Returns true if a full asic reset is required, false if not.
4782 */
4783static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
4784{
4785	int i;
4786
4787	if (amdgpu_asic_need_full_reset(adev))
4788		return true;
4789
4790	for (i = 0; i < adev->num_ip_blocks; i++) {
4791		if (!adev->ip_blocks[i].status.valid)
4792			continue;
4793		if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
4794		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
4795		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
4796		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
4797		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
4798			if (adev->ip_blocks[i].status.hang) {
4799				dev_info(adev->dev, "Some block need full reset!\n");
4800				return true;
4801			}
4802		}
4803	}
4804	return false;
4805}
4806
4807/**
4808 * amdgpu_device_ip_soft_reset - do a soft reset
4809 *
4810 * @adev: amdgpu_device pointer
4811 *
4812 * The list of all the hardware IPs that make up the asic is walked and the
4813 * soft_reset callbacks are run if the block is hung.  soft_reset handles any
4814 * IP specific hardware or software state changes that are necessary to soft
4815 * reset the IP.
4816 * Returns 0 on success, negative error code on failure.
4817 */
4818static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
4819{
4820	int i, r = 0;
4821
4822	for (i = 0; i < adev->num_ip_blocks; i++) {
4823		if (!adev->ip_blocks[i].status.valid)
4824			continue;
4825		if (adev->ip_blocks[i].status.hang &&
4826		    adev->ip_blocks[i].version->funcs->soft_reset) {
4827			r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
4828			if (r)
4829				return r;
4830		}
4831	}
4832
4833	return 0;
4834}
4835
4836/**
4837 * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4838 *
4839 * @adev: amdgpu_device pointer
4840 *
4841 * The list of all the hardware IPs that make up the asic is walked and the
4842 * post_soft_reset callbacks are run if the asic was hung.  post_soft_reset
4843 * handles any IP specific hardware or software state changes that are
4844 * necessary after the IP has been soft reset.
4845 * Returns 0 on success, negative error code on failure.
4846 */
4847static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4848{
4849	int i, r = 0;
4850
4851	for (i = 0; i < adev->num_ip_blocks; i++) {
4852		if (!adev->ip_blocks[i].status.valid)
4853			continue;
4854		if (adev->ip_blocks[i].status.hang &&
4855		    adev->ip_blocks[i].version->funcs->post_soft_reset)
4856			r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
4857		if (r)
4858			return r;
4859	}
4860
4861	return 0;
4862}
4863
4864/**
4865 * amdgpu_device_recover_vram - Recover some VRAM contents
4866 *
4867 * @adev: amdgpu_device pointer
4868 *
4869 * Restores the contents of VRAM buffers from the shadows in GTT.  Used to
4870 * restore things like GPUVM page tables after a GPU reset where
4871 * the contents of VRAM might be lost.
4872 *
4873 * Returns:
4874 * 0 on success, negative error code on failure.
4875 */
4876static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
4877{
4878	struct dma_fence *fence = NULL, *next = NULL;
4879	struct amdgpu_bo *shadow;
4880	struct amdgpu_bo_vm *vmbo;
4881	long r = 1, tmo;
4882
4883	if (amdgpu_sriov_runtime(adev))
4884		tmo = msecs_to_jiffies(8000);
4885	else
4886		tmo = msecs_to_jiffies(100);
4887
4888	dev_info(adev->dev, "recover vram bo from shadow start\n");
4889	mutex_lock(&adev->shadow_list_lock);
4890	list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
4891		/* If vm is compute context or adev is APU, shadow will be NULL */
4892		if (!vmbo->shadow)
4893			continue;
4894		shadow = vmbo->shadow;
4895
4896		/* No need to recover an evicted BO */
4897		if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
4898		    shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
4899		    shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
4900			continue;
4901
4902		r = amdgpu_bo_restore_shadow(shadow, &next);
4903		if (r)
4904			break;
4905
4906		if (fence) {
4907			tmo = dma_fence_wait_timeout(fence, false, tmo);
4908			dma_fence_put(fence);
4909			fence = next;
4910			if (tmo == 0) {
4911				r = -ETIMEDOUT;
4912				break;
4913			} else if (tmo < 0) {
4914				r = tmo;
4915				break;
4916			}
4917		} else {
4918			fence = next;
4919		}
4920	}
4921	mutex_unlock(&adev->shadow_list_lock);
4922
4923	if (fence)
4924		tmo = dma_fence_wait_timeout(fence, false, tmo);
4925	dma_fence_put(fence);
4926
4927	if (r < 0 || tmo <= 0) {
4928		dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4929		return -EIO;
4930	}
4931
4932	dev_info(adev->dev, "recover vram bo from shadow done\n");
4933	return 0;
4934}
4935
4936
4937/**
4938 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4939 *
4940 * @adev: amdgpu_device pointer
4941 * @from_hypervisor: request from hypervisor
4942 *
4943 * do VF FLR and reinitialize Asic
4944 * return 0 means succeeded otherwise failed
4945 */
4946static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4947				     bool from_hypervisor)
4948{
4949	int r;
4950	struct amdgpu_hive_info *hive = NULL;
4951	int retry_limit = 0;
4952
4953retry:
4954	amdgpu_amdkfd_pre_reset(adev);
4955
4956	if (from_hypervisor)
4957		r = amdgpu_virt_request_full_gpu(adev, true);
4958	else
4959		r = amdgpu_virt_reset_gpu(adev);
4960	if (r)
4961		return r;
4962	amdgpu_irq_gpu_reset_resume_helper(adev);
4963
4964	/* some sw clean up VF needs to do before recover */
4965	amdgpu_virt_post_reset(adev);
4966
4967	/* Resume IP prior to SMC */
4968	r = amdgpu_device_ip_reinit_early_sriov(adev);
4969	if (r)
4970		goto error;
4971
4972	amdgpu_virt_init_data_exchange(adev);
4973
4974	r = amdgpu_device_fw_loading(adev);
4975	if (r)
4976		return r;
4977
4978	/* now we are okay to resume SMC/CP/SDMA */
4979	r = amdgpu_device_ip_reinit_late_sriov(adev);
4980	if (r)
4981		goto error;
4982
4983	hive = amdgpu_get_xgmi_hive(adev);
4984	/* Update PSP FW topology after reset */
4985	if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
4986		r = amdgpu_xgmi_update_topology(hive, adev);
4987
4988	if (hive)
4989		amdgpu_put_xgmi_hive(hive);
4990
4991	if (!r) {
4992		r = amdgpu_ib_ring_tests(adev);
4993
4994		amdgpu_amdkfd_post_reset(adev);
4995	}
4996
4997error:
4998	if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
4999		amdgpu_inc_vram_lost(adev);
5000		r = amdgpu_device_recover_vram(adev);
5001	}
5002	amdgpu_virt_release_full_gpu(adev, true);
5003
5004	if (AMDGPU_RETRY_SRIOV_RESET(r)) {
5005		if (retry_limit < AMDGPU_MAX_RETRY_LIMIT) {
5006			retry_limit++;
5007			goto retry;
5008		} else
5009			DRM_ERROR("GPU reset retry is beyond the retry limit\n");
5010	}
5011
5012	return r;
5013}
5014
5015/**
5016 * amdgpu_device_has_job_running - check if there is any job in mirror list
5017 *
5018 * @adev: amdgpu_device pointer
5019 *
5020 * check if there is any job in mirror list
5021 */
5022bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
5023{
5024	int i;
5025	struct drm_sched_job *job;
5026
5027	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5028		struct amdgpu_ring *ring = adev->rings[i];
5029
5030		if (!amdgpu_ring_sched_ready(ring))
5031			continue;
5032
5033		spin_lock(&ring->sched.job_list_lock);
5034		job = list_first_entry_or_null(&ring->sched.pending_list,
5035					       struct drm_sched_job, list);
5036		spin_unlock(&ring->sched.job_list_lock);
5037		if (job)
5038			return true;
5039	}
5040	return false;
5041}
5042
5043/**
5044 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
5045 *
5046 * @adev: amdgpu_device pointer
5047 *
5048 * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
5049 * a hung GPU.
5050 */
5051bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
5052{
5053
5054	if (amdgpu_gpu_recovery == 0)
5055		goto disabled;
5056
5057	/* Skip soft reset check in fatal error mode */
5058	if (!amdgpu_ras_is_poison_mode_supported(adev))
5059		return true;
5060
5061	if (amdgpu_sriov_vf(adev))
5062		return true;
5063
5064	if (amdgpu_gpu_recovery == -1) {
5065		switch (adev->asic_type) {
5066#ifdef CONFIG_DRM_AMDGPU_SI
5067		case CHIP_VERDE:
5068		case CHIP_TAHITI:
5069		case CHIP_PITCAIRN:
5070		case CHIP_OLAND:
5071		case CHIP_HAINAN:
5072#endif
5073#ifdef CONFIG_DRM_AMDGPU_CIK
5074		case CHIP_KAVERI:
5075		case CHIP_KABINI:
5076		case CHIP_MULLINS:
5077#endif
5078		case CHIP_CARRIZO:
5079		case CHIP_STONEY:
5080		case CHIP_CYAN_SKILLFISH:
5081			goto disabled;
5082		default:
5083			break;
5084		}
5085	}
5086
5087	return true;
5088
5089disabled:
5090		dev_info(adev->dev, "GPU recovery disabled.\n");
5091		return false;
5092}
5093
5094int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
5095{
5096	u32 i;
5097	int ret = 0;
5098
5099	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
5100
5101	dev_info(adev->dev, "GPU mode1 reset\n");
5102
5103	/* disable BM */
5104	pci_clear_master(adev->pdev);
5105
5106	amdgpu_device_cache_pci_state(adev->pdev);
5107
5108	if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
5109		dev_info(adev->dev, "GPU smu mode1 reset\n");
5110		ret = amdgpu_dpm_mode1_reset(adev);
5111	} else {
5112		dev_info(adev->dev, "GPU psp mode1 reset\n");
5113		ret = psp_gpu_reset(adev);
5114	}
5115
5116	if (ret)
5117		goto mode1_reset_failed;
5118
5119	amdgpu_device_load_pci_state(adev->pdev);
5120	ret = amdgpu_psp_wait_for_bootloader(adev);
5121	if (ret)
5122		goto mode1_reset_failed;
5123
5124	/* wait for asic to come out of reset */
5125	for (i = 0; i < adev->usec_timeout; i++) {
5126		u32 memsize = adev->nbio.funcs->get_memsize(adev);
5127
5128		if (memsize != 0xffffffff)
5129			break;
5130		udelay(1);
5131	}
5132
5133	if (i >= adev->usec_timeout) {
5134		ret = -ETIMEDOUT;
5135		goto mode1_reset_failed;
5136	}
5137
5138	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
5139
5140	return 0;
5141
5142mode1_reset_failed:
5143	dev_err(adev->dev, "GPU mode1 reset failed\n");
5144	return ret;
5145}
5146
5147int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
5148				 struct amdgpu_reset_context *reset_context)
5149{
5150	int i, r = 0;
5151	struct amdgpu_job *job = NULL;
5152	bool need_full_reset =
5153		test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5154
5155	if (reset_context->reset_req_dev == adev)
5156		job = reset_context->job;
5157
5158	if (amdgpu_sriov_vf(adev)) {
5159		/* stop the data exchange thread */
5160		amdgpu_virt_fini_data_exchange(adev);
5161	}
5162
5163	amdgpu_fence_driver_isr_toggle(adev, true);
5164
5165	/* block all schedulers and reset given job's ring */
5166	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5167		struct amdgpu_ring *ring = adev->rings[i];
5168
5169		if (!amdgpu_ring_sched_ready(ring))
5170			continue;
5171
5172		/* Clear job fence from fence drv to avoid force_completion
5173		 * leave NULL and vm flush fence in fence drv
5174		 */
5175		amdgpu_fence_driver_clear_job_fences(ring);
5176
5177		/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
5178		amdgpu_fence_driver_force_completion(ring);
5179	}
5180
5181	amdgpu_fence_driver_isr_toggle(adev, false);
5182
5183	if (job && job->vm)
5184		drm_sched_increase_karma(&job->base);
5185
5186	r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
5187	/* If reset handler not implemented, continue; otherwise return */
5188	if (r == -EOPNOTSUPP)
5189		r = 0;
5190	else
5191		return r;
5192
5193	/* Don't suspend on bare metal if we are not going to HW reset the ASIC */
5194	if (!amdgpu_sriov_vf(adev)) {
5195
5196		if (!need_full_reset)
5197			need_full_reset = amdgpu_device_ip_need_full_reset(adev);
5198
5199		if (!need_full_reset && amdgpu_gpu_recovery &&
5200		    amdgpu_device_ip_check_soft_reset(adev)) {
5201			amdgpu_device_ip_pre_soft_reset(adev);
5202			r = amdgpu_device_ip_soft_reset(adev);
5203			amdgpu_device_ip_post_soft_reset(adev);
5204			if (r || amdgpu_device_ip_check_soft_reset(adev)) {
5205				dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
5206				need_full_reset = true;
5207			}
5208		}
5209
5210		if (need_full_reset)
5211			r = amdgpu_device_ip_suspend(adev);
5212		if (need_full_reset)
5213			set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5214		else
5215			clear_bit(AMDGPU_NEED_FULL_RESET,
5216				  &reset_context->flags);
5217	}
5218
5219	return r;
5220}
5221
5222static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
5223{
5224	int i;
5225
5226	lockdep_assert_held(&adev->reset_domain->sem);
5227
5228	for (i = 0; i < adev->reset_info.num_regs; i++) {
5229		adev->reset_info.reset_dump_reg_value[i] =
5230			RREG32(adev->reset_info.reset_dump_reg_list[i]);
5231
5232		trace_amdgpu_reset_reg_dumps(adev->reset_info.reset_dump_reg_list[i],
5233					     adev->reset_info.reset_dump_reg_value[i]);
5234	}
5235
5236	return 0;
5237}
5238
5239int amdgpu_do_asic_reset(struct list_head *device_list_handle,
5240			 struct amdgpu_reset_context *reset_context)
5241{
5242	struct amdgpu_device *tmp_adev = NULL;
5243	bool need_full_reset, skip_hw_reset, vram_lost = false;
5244	int r = 0;
5245
5246	/* Try reset handler method first */
5247	tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5248				    reset_list);
5249	amdgpu_reset_reg_dumps(tmp_adev);
5250
5251	reset_context->reset_device_list = device_list_handle;
5252	r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
5253	/* If reset handler not implemented, continue; otherwise return */
5254	if (r == -EOPNOTSUPP)
5255		r = 0;
5256	else
5257		return r;
5258
5259	/* Reset handler not implemented, use the default method */
5260	need_full_reset =
5261		test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5262	skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
5263
5264	/*
5265	 * ASIC reset has to be done on all XGMI hive nodes ASAP
5266	 * to allow proper links negotiation in FW (within 1 sec)
5267	 */
5268	if (!skip_hw_reset && need_full_reset) {
5269		list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5270			/* For XGMI run all resets in parallel to speed up the process */
5271			if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
5272				tmp_adev->gmc.xgmi.pending_reset = false;
5273				if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
5274					r = -EALREADY;
5275			} else
5276				r = amdgpu_asic_reset(tmp_adev);
5277
5278			if (r) {
5279				dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
5280					 r, adev_to_drm(tmp_adev)->unique);
5281				goto out;
5282			}
5283		}
5284
5285		/* For XGMI wait for all resets to complete before proceed */
5286		if (!r) {
5287			list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5288				if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
5289					flush_work(&tmp_adev->xgmi_reset_work);
5290					r = tmp_adev->asic_reset_res;
5291					if (r)
5292						break;
5293				}
5294			}
5295		}
5296	}
5297
5298	if (!r && amdgpu_ras_intr_triggered()) {
5299		list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5300			amdgpu_ras_reset_error_count(tmp_adev, AMDGPU_RAS_BLOCK__MMHUB);
5301		}
5302
5303		amdgpu_ras_intr_cleared();
5304	}
5305
5306	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5307		if (need_full_reset) {
5308			/* post card */
 
5309			r = amdgpu_device_asic_init(tmp_adev);
5310			if (r) {
5311				dev_warn(tmp_adev->dev, "asic atom init failed!");
5312			} else {
5313				dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
5314
5315				r = amdgpu_device_ip_resume_phase1(tmp_adev);
5316				if (r)
5317					goto out;
5318
5319				vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
5320
5321				amdgpu_coredump(tmp_adev, vram_lost, reset_context);
5322
5323				if (vram_lost) {
5324					DRM_INFO("VRAM is lost due to GPU reset!\n");
5325					amdgpu_inc_vram_lost(tmp_adev);
5326				}
5327
5328				r = amdgpu_device_fw_loading(tmp_adev);
5329				if (r)
5330					return r;
5331
5332				r = amdgpu_xcp_restore_partition_mode(
5333					tmp_adev->xcp_mgr);
5334				if (r)
5335					goto out;
5336
5337				r = amdgpu_device_ip_resume_phase2(tmp_adev);
5338				if (r)
5339					goto out;
5340
5341				if (tmp_adev->mman.buffer_funcs_ring->sched.ready)
5342					amdgpu_ttm_set_buffer_funcs_status(tmp_adev, true);
5343
5344				if (vram_lost)
5345					amdgpu_device_fill_reset_magic(tmp_adev);
5346
5347				/*
5348				 * Add this ASIC as tracked as reset was already
5349				 * complete successfully.
5350				 */
5351				amdgpu_register_gpu_instance(tmp_adev);
5352
5353				if (!reset_context->hive &&
5354				    tmp_adev->gmc.xgmi.num_physical_nodes > 1)
5355					amdgpu_xgmi_add_device(tmp_adev);
5356
5357				r = amdgpu_device_ip_late_init(tmp_adev);
5358				if (r)
5359					goto out;
5360
5361				drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false);
5362
5363				/*
5364				 * The GPU enters bad state once faulty pages
5365				 * by ECC has reached the threshold, and ras
5366				 * recovery is scheduled next. So add one check
5367				 * here to break recovery if it indeed exceeds
5368				 * bad page threshold, and remind user to
5369				 * retire this GPU or setting one bigger
5370				 * bad_page_threshold value to fix this once
5371				 * probing driver again.
5372				 */
5373				if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
5374					/* must succeed. */
5375					amdgpu_ras_resume(tmp_adev);
5376				} else {
5377					r = -EINVAL;
5378					goto out;
5379				}
5380
5381				/* Update PSP FW topology after reset */
5382				if (reset_context->hive &&
5383				    tmp_adev->gmc.xgmi.num_physical_nodes > 1)
5384					r = amdgpu_xgmi_update_topology(
5385						reset_context->hive, tmp_adev);
5386			}
5387		}
5388
5389out:
5390		if (!r) {
5391			amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
5392			r = amdgpu_ib_ring_tests(tmp_adev);
5393			if (r) {
5394				dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
5395				need_full_reset = true;
5396				r = -EAGAIN;
5397				goto end;
5398			}
5399		}
5400
5401		if (!r)
5402			r = amdgpu_device_recover_vram(tmp_adev);
5403		else
5404			tmp_adev->asic_reset_res = r;
5405	}
5406
5407end:
5408	if (need_full_reset)
5409		set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5410	else
5411		clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5412	return r;
5413}
5414
5415static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev)
5416{
5417
5418	switch (amdgpu_asic_reset_method(adev)) {
5419	case AMD_RESET_METHOD_MODE1:
5420		adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
5421		break;
5422	case AMD_RESET_METHOD_MODE2:
5423		adev->mp1_state = PP_MP1_STATE_RESET;
5424		break;
5425	default:
5426		adev->mp1_state = PP_MP1_STATE_NONE;
5427		break;
5428	}
5429}
5430
5431static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev)
5432{
5433	amdgpu_vf_error_trans_all(adev);
5434	adev->mp1_state = PP_MP1_STATE_NONE;
5435}
5436
5437static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
5438{
5439	struct pci_dev *p = NULL;
5440
5441	p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5442			adev->pdev->bus->number, 1);
5443	if (p) {
5444		pm_runtime_enable(&(p->dev));
5445		pm_runtime_resume(&(p->dev));
5446	}
5447
5448	pci_dev_put(p);
5449}
5450
5451static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
5452{
5453	enum amd_reset_method reset_method;
5454	struct pci_dev *p = NULL;
5455	u64 expires;
5456
5457	/*
5458	 * For now, only BACO and mode1 reset are confirmed
5459	 * to suffer the audio issue without proper suspended.
5460	 */
5461	reset_method = amdgpu_asic_reset_method(adev);
5462	if ((reset_method != AMD_RESET_METHOD_BACO) &&
5463	     (reset_method != AMD_RESET_METHOD_MODE1))
5464		return -EINVAL;
5465
5466	p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5467			adev->pdev->bus->number, 1);
5468	if (!p)
5469		return -ENODEV;
5470
5471	expires = pm_runtime_autosuspend_expiration(&(p->dev));
5472	if (!expires)
5473		/*
5474		 * If we cannot get the audio device autosuspend delay,
5475		 * a fixed 4S interval will be used. Considering 3S is
5476		 * the audio controller default autosuspend delay setting.
5477		 * 4S used here is guaranteed to cover that.
5478		 */
5479		expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
5480
5481	while (!pm_runtime_status_suspended(&(p->dev))) {
5482		if (!pm_runtime_suspend(&(p->dev)))
5483			break;
5484
5485		if (expires < ktime_get_mono_fast_ns()) {
5486			dev_warn(adev->dev, "failed to suspend display audio\n");
5487			pci_dev_put(p);
5488			/* TODO: abort the succeeding gpu reset? */
5489			return -ETIMEDOUT;
5490		}
5491	}
5492
5493	pm_runtime_disable(&(p->dev));
5494
5495	pci_dev_put(p);
5496	return 0;
5497}
5498
5499static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
5500{
5501	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5502
5503#if defined(CONFIG_DEBUG_FS)
5504	if (!amdgpu_sriov_vf(adev))
5505		cancel_work(&adev->reset_work);
5506#endif
5507
5508	if (adev->kfd.dev)
5509		cancel_work(&adev->kfd.reset_work);
5510
5511	if (amdgpu_sriov_vf(adev))
5512		cancel_work(&adev->virt.flr_work);
5513
5514	if (con && adev->ras_enabled)
5515		cancel_work(&con->recovery_work);
5516
5517}
5518
5519/**
5520 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
5521 *
5522 * @adev: amdgpu_device pointer
5523 * @job: which job trigger hang
5524 * @reset_context: amdgpu reset context pointer
5525 *
5526 * Attempt to reset the GPU if it has hung (all asics).
5527 * Attempt to do soft-reset or full-reset and reinitialize Asic
5528 * Returns 0 for success or an error on failure.
5529 */
5530
5531int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
5532			      struct amdgpu_job *job,
5533			      struct amdgpu_reset_context *reset_context)
5534{
5535	struct list_head device_list, *device_list_handle =  NULL;
5536	bool job_signaled = false;
5537	struct amdgpu_hive_info *hive = NULL;
5538	struct amdgpu_device *tmp_adev = NULL;
5539	int i, r = 0;
5540	bool need_emergency_restart = false;
5541	bool audio_suspended = false;
5542
5543	/*
5544	 * Special case: RAS triggered and full reset isn't supported
5545	 */
5546	need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
5547
5548	/*
5549	 * Flush RAM to disk so that after reboot
5550	 * the user can read log and see why the system rebooted.
5551	 */
5552	if (need_emergency_restart && amdgpu_ras_get_context(adev) &&
5553		amdgpu_ras_get_context(adev)->reboot) {
5554		DRM_WARN("Emergency reboot.");
5555
5556		ksys_sync_helper();
5557		emergency_restart();
5558	}
5559
5560	dev_info(adev->dev, "GPU %s begin!\n",
5561		need_emergency_restart ? "jobs stop":"reset");
5562
5563	if (!amdgpu_sriov_vf(adev))
5564		hive = amdgpu_get_xgmi_hive(adev);
5565	if (hive)
5566		mutex_lock(&hive->hive_lock);
5567
5568	reset_context->job = job;
5569	reset_context->hive = hive;
5570	/*
5571	 * Build list of devices to reset.
5572	 * In case we are in XGMI hive mode, resort the device list
5573	 * to put adev in the 1st position.
5574	 */
5575	INIT_LIST_HEAD(&device_list);
5576	if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
5577		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
5578			list_add_tail(&tmp_adev->reset_list, &device_list);
5579			if (adev->shutdown)
5580				tmp_adev->shutdown = true;
5581		}
5582		if (!list_is_first(&adev->reset_list, &device_list))
5583			list_rotate_to_front(&adev->reset_list, &device_list);
5584		device_list_handle = &device_list;
5585	} else {
5586		list_add_tail(&adev->reset_list, &device_list);
5587		device_list_handle = &device_list;
5588	}
5589
5590	/* We need to lock reset domain only once both for XGMI and single device */
5591	tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5592				    reset_list);
5593	amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
5594
5595	/* block all schedulers and reset given job's ring */
5596	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5597
5598		amdgpu_device_set_mp1_state(tmp_adev);
5599
5600		/*
5601		 * Try to put the audio codec into suspend state
5602		 * before gpu reset started.
5603		 *
5604		 * Due to the power domain of the graphics device
5605		 * is shared with AZ power domain. Without this,
5606		 * we may change the audio hardware from behind
5607		 * the audio driver's back. That will trigger
5608		 * some audio codec errors.
5609		 */
5610		if (!amdgpu_device_suspend_display_audio(tmp_adev))
5611			audio_suspended = true;
5612
5613		amdgpu_ras_set_error_query_ready(tmp_adev, false);
5614
5615		cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
5616
5617		if (!amdgpu_sriov_vf(tmp_adev))
5618			amdgpu_amdkfd_pre_reset(tmp_adev);
5619
5620		/*
5621		 * Mark these ASICs to be reseted as untracked first
5622		 * And add them back after reset completed
5623		 */
5624		amdgpu_unregister_gpu_instance(tmp_adev);
5625
5626		drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, true);
5627
5628		/* disable ras on ALL IPs */
5629		if (!need_emergency_restart &&
5630		      amdgpu_device_ip_need_full_reset(tmp_adev))
5631			amdgpu_ras_suspend(tmp_adev);
5632
5633		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5634			struct amdgpu_ring *ring = tmp_adev->rings[i];
5635
5636			if (!amdgpu_ring_sched_ready(ring))
5637				continue;
5638
5639			drm_sched_stop(&ring->sched, job ? &job->base : NULL);
5640
5641			if (need_emergency_restart)
5642				amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
5643		}
5644		atomic_inc(&tmp_adev->gpu_reset_counter);
5645	}
5646
5647	if (need_emergency_restart)
5648		goto skip_sched_resume;
5649
5650	/*
5651	 * Must check guilty signal here since after this point all old
5652	 * HW fences are force signaled.
5653	 *
5654	 * job->base holds a reference to parent fence
5655	 */
5656	if (job && dma_fence_is_signaled(&job->hw_fence)) {
5657		job_signaled = true;
5658		dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5659		goto skip_hw_reset;
5660	}
5661
5662retry:	/* Rest of adevs pre asic reset from XGMI hive. */
5663	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5664		r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
5665		/*TODO Should we stop ?*/
5666		if (r) {
5667			dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
5668				  r, adev_to_drm(tmp_adev)->unique);
5669			tmp_adev->asic_reset_res = r;
5670		}
5671
5672		/*
5673		 * Drop all pending non scheduler resets. Scheduler resets
5674		 * were already dropped during drm_sched_stop
5675		 */
5676		amdgpu_device_stop_pending_resets(tmp_adev);
5677	}
5678
5679	/* Actual ASIC resets if needed.*/
5680	/* Host driver will handle XGMI hive reset for SRIOV */
5681	if (amdgpu_sriov_vf(adev)) {
5682		r = amdgpu_device_reset_sriov(adev, job ? false : true);
5683		if (r)
5684			adev->asic_reset_res = r;
5685
5686		/* Aldebaran and gfx_11_0_3 support ras in SRIOV, so need resume ras during reset */
5687		if (amdgpu_ip_version(adev, GC_HWIP, 0) ==
5688			    IP_VERSION(9, 4, 2) ||
 
5689		    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3))
5690			amdgpu_ras_resume(adev);
5691	} else {
5692		r = amdgpu_do_asic_reset(device_list_handle, reset_context);
5693		if (r && r == -EAGAIN)
5694			goto retry;
5695	}
5696
5697skip_hw_reset:
5698
5699	/* Post ASIC reset for all devs .*/
5700	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5701
5702		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5703			struct amdgpu_ring *ring = tmp_adev->rings[i];
5704
5705			if (!amdgpu_ring_sched_ready(ring))
5706				continue;
5707
5708			drm_sched_start(&ring->sched, true);
5709		}
5710
5711		if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled)
5712			drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
5713
5714		if (tmp_adev->asic_reset_res)
5715			r = tmp_adev->asic_reset_res;
5716
5717		tmp_adev->asic_reset_res = 0;
5718
5719		if (r) {
5720			/* bad news, how to tell it to userspace ? */
5721			dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
5722			amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
5723		} else {
5724			dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
5725			if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
5726				DRM_WARN("smart shift update failed\n");
5727		}
5728	}
5729
5730skip_sched_resume:
5731	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5732		/* unlock kfd: SRIOV would do it separately */
5733		if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
5734			amdgpu_amdkfd_post_reset(tmp_adev);
5735
5736		/* kfd_post_reset will do nothing if kfd device is not initialized,
5737		 * need to bring up kfd here if it's not be initialized before
5738		 */
5739		if (!adev->kfd.init_complete)
5740			amdgpu_amdkfd_device_init(adev);
5741
5742		if (audio_suspended)
5743			amdgpu_device_resume_display_audio(tmp_adev);
5744
5745		amdgpu_device_unset_mp1_state(tmp_adev);
5746
5747		amdgpu_ras_set_error_query_ready(tmp_adev, true);
5748	}
5749
5750	tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5751					    reset_list);
5752	amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
5753
5754	if (hive) {
5755		mutex_unlock(&hive->hive_lock);
5756		amdgpu_put_xgmi_hive(hive);
5757	}
5758
5759	if (r)
5760		dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
5761
5762	atomic_set(&adev->reset_domain->reset_res, r);
5763	return r;
5764}
5765
5766/**
5767 * amdgpu_device_partner_bandwidth - find the bandwidth of appropriate partner
5768 *
5769 * @adev: amdgpu_device pointer
5770 * @speed: pointer to the speed of the link
5771 * @width: pointer to the width of the link
5772 *
5773 * Evaluate the hierarchy to find the speed and bandwidth capabilities of the
5774 * first physical partner to an AMD dGPU.
5775 * This will exclude any virtual switches and links.
5776 */
5777static void amdgpu_device_partner_bandwidth(struct amdgpu_device *adev,
5778					    enum pci_bus_speed *speed,
5779					    enum pcie_link_width *width)
5780{
5781	struct pci_dev *parent = adev->pdev;
5782
5783	if (!speed || !width)
5784		return;
5785
5786	*speed = PCI_SPEED_UNKNOWN;
5787	*width = PCIE_LNK_WIDTH_UNKNOWN;
5788
5789	while ((parent = pci_upstream_bridge(parent))) {
5790		/* skip upstream/downstream switches internal to dGPU*/
5791		if (parent->vendor == PCI_VENDOR_ID_ATI)
5792			continue;
5793		*speed = pcie_get_speed_cap(parent);
5794		*width = pcie_get_width_cap(parent);
5795		break;
 
 
 
 
 
5796	}
5797}
5798
5799/**
5800 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
5801 *
5802 * @adev: amdgpu_device pointer
5803 *
5804 * Fetchs and stores in the driver the PCIE capabilities (gen speed
5805 * and lanes) of the slot the device is in. Handles APUs and
5806 * virtualized environments where PCIE config space may not be available.
5807 */
5808static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
5809{
5810	struct pci_dev *pdev;
5811	enum pci_bus_speed speed_cap, platform_speed_cap;
5812	enum pcie_link_width platform_link_width;
5813
5814	if (amdgpu_pcie_gen_cap)
5815		adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
5816
5817	if (amdgpu_pcie_lane_cap)
5818		adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
5819
5820	/* covers APUs as well */
5821	if (pci_is_root_bus(adev->pdev->bus) && !amdgpu_passthrough(adev)) {
5822		if (adev->pm.pcie_gen_mask == 0)
5823			adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
5824		if (adev->pm.pcie_mlw_mask == 0)
5825			adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
5826		return;
5827	}
5828
5829	if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
5830		return;
5831
5832	amdgpu_device_partner_bandwidth(adev, &platform_speed_cap,
5833					&platform_link_width);
5834
5835	if (adev->pm.pcie_gen_mask == 0) {
5836		/* asic caps */
5837		pdev = adev->pdev;
5838		speed_cap = pcie_get_speed_cap(pdev);
5839		if (speed_cap == PCI_SPEED_UNKNOWN) {
5840			adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5841						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5842						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5843		} else {
5844			if (speed_cap == PCIE_SPEED_32_0GT)
5845				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5846							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5847							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5848							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5849							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
5850			else if (speed_cap == PCIE_SPEED_16_0GT)
5851				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5852							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5853							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5854							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
5855			else if (speed_cap == PCIE_SPEED_8_0GT)
5856				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5857							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5858							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5859			else if (speed_cap == PCIE_SPEED_5_0GT)
5860				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5861							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
5862			else
5863				adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
5864		}
5865		/* platform caps */
5866		if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5867			adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5868						   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5869		} else {
5870			if (platform_speed_cap == PCIE_SPEED_32_0GT)
5871				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5872							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5873							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5874							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5875							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
5876			else if (platform_speed_cap == PCIE_SPEED_16_0GT)
5877				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5878							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5879							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5880							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
5881			else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5882				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5883							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5884							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
5885			else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5886				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5887							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5888			else
5889				adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
5890
5891		}
5892	}
5893	if (adev->pm.pcie_mlw_mask == 0) {
5894		if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5895			adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
5896		} else {
5897			switch (platform_link_width) {
5898			case PCIE_LNK_X32:
5899				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
5900							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5901							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5902							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5903							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5904							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5905							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5906				break;
5907			case PCIE_LNK_X16:
5908				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5909							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5910							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5911							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5912							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5913							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5914				break;
5915			case PCIE_LNK_X12:
5916				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5917							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5918							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5919							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5920							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5921				break;
5922			case PCIE_LNK_X8:
5923				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5924							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5925							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5926							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5927				break;
5928			case PCIE_LNK_X4:
5929				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5930							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5931							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5932				break;
5933			case PCIE_LNK_X2:
5934				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5935							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5936				break;
5937			case PCIE_LNK_X1:
5938				adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5939				break;
5940			default:
5941				break;
5942			}
5943		}
5944	}
5945}
5946
5947/**
5948 * amdgpu_device_is_peer_accessible - Check peer access through PCIe BAR
5949 *
5950 * @adev: amdgpu_device pointer
5951 * @peer_adev: amdgpu_device pointer for peer device trying to access @adev
5952 *
5953 * Return true if @peer_adev can access (DMA) @adev through the PCIe
5954 * BAR, i.e. @adev is "large BAR" and the BAR matches the DMA mask of
5955 * @peer_adev.
5956 */
5957bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
5958				      struct amdgpu_device *peer_adev)
5959{
5960#ifdef CONFIG_HSA_AMD_P2P
5961	uint64_t address_mask = peer_adev->dev->dma_mask ?
5962		~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
5963	resource_size_t aper_limit =
5964		adev->gmc.aper_base + adev->gmc.aper_size - 1;
5965	bool p2p_access =
5966		!adev->gmc.xgmi.connected_to_cpu &&
5967		!(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0);
5968
5969	return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size &&
5970		adev->gmc.real_vram_size == adev->gmc.visible_vram_size &&
5971		!(adev->gmc.aper_base & address_mask ||
5972		  aper_limit & address_mask));
5973#else
5974	return false;
5975#endif
5976}
5977
5978int amdgpu_device_baco_enter(struct drm_device *dev)
5979{
5980	struct amdgpu_device *adev = drm_to_adev(dev);
5981	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5982
5983	if (!amdgpu_device_supports_baco(dev))
5984		return -ENOTSUPP;
5985
5986	if (ras && adev->ras_enabled &&
5987	    adev->nbio.funcs->enable_doorbell_interrupt)
5988		adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
5989
5990	return amdgpu_dpm_baco_enter(adev);
5991}
5992
5993int amdgpu_device_baco_exit(struct drm_device *dev)
5994{
5995	struct amdgpu_device *adev = drm_to_adev(dev);
5996	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5997	int ret = 0;
5998
5999	if (!amdgpu_device_supports_baco(dev))
6000		return -ENOTSUPP;
6001
6002	ret = amdgpu_dpm_baco_exit(adev);
6003	if (ret)
6004		return ret;
6005
6006	if (ras && adev->ras_enabled &&
6007	    adev->nbio.funcs->enable_doorbell_interrupt)
6008		adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
6009
6010	if (amdgpu_passthrough(adev) &&
6011	    adev->nbio.funcs->clear_doorbell_interrupt)
6012		adev->nbio.funcs->clear_doorbell_interrupt(adev);
6013
6014	return 0;
6015}
6016
6017/**
6018 * amdgpu_pci_error_detected - Called when a PCI error is detected.
6019 * @pdev: PCI device struct
6020 * @state: PCI channel state
6021 *
6022 * Description: Called when a PCI error is detected.
6023 *
6024 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
6025 */
6026pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
6027{
6028	struct drm_device *dev = pci_get_drvdata(pdev);
6029	struct amdgpu_device *adev = drm_to_adev(dev);
6030	int i;
6031
6032	DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
6033
6034	if (adev->gmc.xgmi.num_physical_nodes > 1) {
6035		DRM_WARN("No support for XGMI hive yet...");
6036		return PCI_ERS_RESULT_DISCONNECT;
6037	}
6038
6039	adev->pci_channel_state = state;
6040
6041	switch (state) {
6042	case pci_channel_io_normal:
6043		return PCI_ERS_RESULT_CAN_RECOVER;
6044	/* Fatal error, prepare for slot reset */
6045	case pci_channel_io_frozen:
6046		/*
6047		 * Locking adev->reset_domain->sem will prevent any external access
6048		 * to GPU during PCI error recovery
6049		 */
6050		amdgpu_device_lock_reset_domain(adev->reset_domain);
6051		amdgpu_device_set_mp1_state(adev);
6052
6053		/*
6054		 * Block any work scheduling as we do for regular GPU reset
6055		 * for the duration of the recovery
6056		 */
6057		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
6058			struct amdgpu_ring *ring = adev->rings[i];
6059
6060			if (!amdgpu_ring_sched_ready(ring))
6061				continue;
6062
6063			drm_sched_stop(&ring->sched, NULL);
6064		}
6065		atomic_inc(&adev->gpu_reset_counter);
6066		return PCI_ERS_RESULT_NEED_RESET;
6067	case pci_channel_io_perm_failure:
6068		/* Permanent error, prepare for device removal */
6069		return PCI_ERS_RESULT_DISCONNECT;
6070	}
6071
6072	return PCI_ERS_RESULT_NEED_RESET;
6073}
6074
6075/**
6076 * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
6077 * @pdev: pointer to PCI device
6078 */
6079pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
6080{
6081
6082	DRM_INFO("PCI error: mmio enabled callback!!\n");
6083
6084	/* TODO - dump whatever for debugging purposes */
6085
6086	/* This called only if amdgpu_pci_error_detected returns
6087	 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
6088	 * works, no need to reset slot.
6089	 */
6090
6091	return PCI_ERS_RESULT_RECOVERED;
6092}
6093
6094/**
6095 * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
6096 * @pdev: PCI device struct
6097 *
6098 * Description: This routine is called by the pci error recovery
6099 * code after the PCI slot has been reset, just before we
6100 * should resume normal operations.
6101 */
6102pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
6103{
6104	struct drm_device *dev = pci_get_drvdata(pdev);
6105	struct amdgpu_device *adev = drm_to_adev(dev);
6106	int r, i;
6107	struct amdgpu_reset_context reset_context;
6108	u32 memsize;
6109	struct list_head device_list;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6110
6111	DRM_INFO("PCI error: slot reset callback!!\n");
6112
6113	memset(&reset_context, 0, sizeof(reset_context));
6114
6115	INIT_LIST_HEAD(&device_list);
6116	list_add_tail(&adev->reset_list, &device_list);
6117
6118	/* wait for asic to come out of reset */
6119	msleep(500);
6120
6121	/* Restore PCI confspace */
6122	amdgpu_device_load_pci_state(pdev);
6123
6124	/* confirm  ASIC came out of reset */
6125	for (i = 0; i < adev->usec_timeout; i++) {
6126		memsize = amdgpu_asic_get_config_memsize(adev);
6127
6128		if (memsize != 0xffffffff)
6129			break;
6130		udelay(1);
6131	}
6132	if (memsize == 0xffffffff) {
6133		r = -ETIME;
6134		goto out;
6135	}
6136
6137	reset_context.method = AMD_RESET_METHOD_NONE;
6138	reset_context.reset_req_dev = adev;
6139	set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
6140	set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
6141
6142	adev->no_hw_access = true;
6143	r = amdgpu_device_pre_asic_reset(adev, &reset_context);
6144	adev->no_hw_access = false;
6145	if (r)
6146		goto out;
6147
6148	r = amdgpu_do_asic_reset(&device_list, &reset_context);
6149
6150out:
6151	if (!r) {
6152		if (amdgpu_device_cache_pci_state(adev->pdev))
6153			pci_restore_state(adev->pdev);
6154
6155		DRM_INFO("PCIe error recovery succeeded\n");
6156	} else {
6157		DRM_ERROR("PCIe error recovery failed, err:%d", r);
6158		amdgpu_device_unset_mp1_state(adev);
6159		amdgpu_device_unlock_reset_domain(adev->reset_domain);
6160	}
6161
6162	return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
6163}
6164
6165/**
6166 * amdgpu_pci_resume() - resume normal ops after PCI reset
6167 * @pdev: pointer to PCI device
6168 *
6169 * Called when the error recovery driver tells us that its
6170 * OK to resume normal operation.
6171 */
6172void amdgpu_pci_resume(struct pci_dev *pdev)
6173{
6174	struct drm_device *dev = pci_get_drvdata(pdev);
6175	struct amdgpu_device *adev = drm_to_adev(dev);
6176	int i;
6177
6178
6179	DRM_INFO("PCI error: resume callback!!\n");
6180
6181	/* Only continue execution for the case of pci_channel_io_frozen */
6182	if (adev->pci_channel_state != pci_channel_io_frozen)
6183		return;
6184
6185	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
6186		struct amdgpu_ring *ring = adev->rings[i];
6187
6188		if (!amdgpu_ring_sched_ready(ring))
6189			continue;
6190
6191		drm_sched_start(&ring->sched, true);
6192	}
6193
6194	amdgpu_device_unset_mp1_state(adev);
6195	amdgpu_device_unlock_reset_domain(adev->reset_domain);
6196}
6197
6198bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
6199{
6200	struct drm_device *dev = pci_get_drvdata(pdev);
6201	struct amdgpu_device *adev = drm_to_adev(dev);
6202	int r;
6203
6204	r = pci_save_state(pdev);
6205	if (!r) {
6206		kfree(adev->pci_state);
6207
6208		adev->pci_state = pci_store_saved_state(pdev);
6209
6210		if (!adev->pci_state) {
6211			DRM_ERROR("Failed to store PCI saved state");
6212			return false;
6213		}
6214	} else {
6215		DRM_WARN("Failed to save PCI state, err:%d\n", r);
6216		return false;
6217	}
6218
6219	return true;
6220}
6221
6222bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
6223{
6224	struct drm_device *dev = pci_get_drvdata(pdev);
6225	struct amdgpu_device *adev = drm_to_adev(dev);
6226	int r;
6227
6228	if (!adev->pci_state)
6229		return false;
6230
6231	r = pci_load_saved_state(pdev, adev->pci_state);
6232
6233	if (!r) {
6234		pci_restore_state(pdev);
6235	} else {
6236		DRM_WARN("Failed to load PCI state, err:%d\n", r);
6237		return false;
6238	}
6239
6240	return true;
6241}
6242
6243void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
6244		struct amdgpu_ring *ring)
6245{
6246#ifdef CONFIG_X86_64
6247	if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
6248		return;
6249#endif
6250	if (adev->gmc.xgmi.connected_to_cpu)
6251		return;
6252
6253	if (ring && ring->funcs->emit_hdp_flush)
6254		amdgpu_ring_emit_hdp_flush(ring);
6255	else
6256		amdgpu_asic_flush_hdp(adev, ring);
6257}
6258
6259void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
6260		struct amdgpu_ring *ring)
6261{
6262#ifdef CONFIG_X86_64
6263	if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
6264		return;
6265#endif
6266	if (adev->gmc.xgmi.connected_to_cpu)
6267		return;
6268
6269	amdgpu_asic_invalidate_hdp(adev, ring);
6270}
6271
6272int amdgpu_in_reset(struct amdgpu_device *adev)
6273{
6274	return atomic_read(&adev->reset_domain->in_gpu_reset);
6275}
6276
6277/**
6278 * amdgpu_device_halt() - bring hardware to some kind of halt state
6279 *
6280 * @adev: amdgpu_device pointer
6281 *
6282 * Bring hardware to some kind of halt state so that no one can touch it
6283 * any more. It will help to maintain error context when error occurred.
6284 * Compare to a simple hang, the system will keep stable at least for SSH
6285 * access. Then it should be trivial to inspect the hardware state and
6286 * see what's going on. Implemented as following:
6287 *
6288 * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc),
6289 *    clears all CPU mappings to device, disallows remappings through page faults
6290 * 2. amdgpu_irq_disable_all() disables all interrupts
6291 * 3. amdgpu_fence_driver_hw_fini() signals all HW fences
6292 * 4. set adev->no_hw_access to avoid potential crashes after setp 5
6293 * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings
6294 * 6. pci_disable_device() and pci_wait_for_pending_transaction()
6295 *    flush any in flight DMA operations
6296 */
6297void amdgpu_device_halt(struct amdgpu_device *adev)
6298{
6299	struct pci_dev *pdev = adev->pdev;
6300	struct drm_device *ddev = adev_to_drm(adev);
6301
6302	amdgpu_xcp_dev_unplug(adev);
6303	drm_dev_unplug(ddev);
6304
6305	amdgpu_irq_disable_all(adev);
6306
6307	amdgpu_fence_driver_hw_fini(adev);
6308
6309	adev->no_hw_access = true;
6310
6311	amdgpu_device_unmap_mmio(adev);
6312
6313	pci_disable_device(pdev);
6314	pci_wait_for_pending_transaction(pdev);
6315}
6316
6317u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
6318				u32 reg)
6319{
6320	unsigned long flags, address, data;
6321	u32 r;
6322
6323	address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
6324	data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
6325
6326	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
6327	WREG32(address, reg * 4);
6328	(void)RREG32(address);
6329	r = RREG32(data);
6330	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
6331	return r;
6332}
6333
6334void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
6335				u32 reg, u32 v)
6336{
6337	unsigned long flags, address, data;
6338
6339	address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
6340	data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
6341
6342	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
6343	WREG32(address, reg * 4);
6344	(void)RREG32(address);
6345	WREG32(data, v);
6346	(void)RREG32(data);
6347	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
6348}
6349
6350/**
6351 * amdgpu_device_switch_gang - switch to a new gang
6352 * @adev: amdgpu_device pointer
6353 * @gang: the gang to switch to
6354 *
6355 * Try to switch to a new gang.
6356 * Returns: NULL if we switched to the new gang or a reference to the current
6357 * gang leader.
6358 */
6359struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
6360					    struct dma_fence *gang)
6361{
6362	struct dma_fence *old = NULL;
6363
6364	do {
6365		dma_fence_put(old);
6366		rcu_read_lock();
6367		old = dma_fence_get_rcu_safe(&adev->gang_submit);
6368		rcu_read_unlock();
6369
6370		if (old == gang)
6371			break;
6372
6373		if (!dma_fence_is_signaled(old))
6374			return old;
6375
6376	} while (cmpxchg((struct dma_fence __force **)&adev->gang_submit,
6377			 old, gang) != old);
6378
6379	dma_fence_put(old);
6380	return NULL;
6381}
6382
6383bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev)
6384{
6385	switch (adev->asic_type) {
6386#ifdef CONFIG_DRM_AMDGPU_SI
6387	case CHIP_HAINAN:
6388#endif
6389	case CHIP_TOPAZ:
6390		/* chips with no display hardware */
6391		return false;
6392#ifdef CONFIG_DRM_AMDGPU_SI
6393	case CHIP_TAHITI:
6394	case CHIP_PITCAIRN:
6395	case CHIP_VERDE:
6396	case CHIP_OLAND:
6397#endif
6398#ifdef CONFIG_DRM_AMDGPU_CIK
6399	case CHIP_BONAIRE:
6400	case CHIP_HAWAII:
6401	case CHIP_KAVERI:
6402	case CHIP_KABINI:
6403	case CHIP_MULLINS:
6404#endif
6405	case CHIP_TONGA:
6406	case CHIP_FIJI:
6407	case CHIP_POLARIS10:
6408	case CHIP_POLARIS11:
6409	case CHIP_POLARIS12:
6410	case CHIP_VEGAM:
6411	case CHIP_CARRIZO:
6412	case CHIP_STONEY:
6413		/* chips with display hardware */
6414		return true;
6415	default:
6416		/* IP discovery */
6417		if (!amdgpu_ip_version(adev, DCE_HWIP, 0) ||
6418		    (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
6419			return false;
6420		return true;
6421	}
6422}
6423
6424uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev,
6425		uint32_t inst, uint32_t reg_addr, char reg_name[],
6426		uint32_t expected_value, uint32_t mask)
6427{
6428	uint32_t ret = 0;
6429	uint32_t old_ = 0;
6430	uint32_t tmp_ = RREG32(reg_addr);
6431	uint32_t loop = adev->usec_timeout;
6432
6433	while ((tmp_ & (mask)) != (expected_value)) {
6434		if (old_ != tmp_) {
6435			loop = adev->usec_timeout;
6436			old_ = tmp_;
6437		} else
6438			udelay(1);
6439		tmp_ = RREG32(reg_addr);
6440		loop--;
6441		if (!loop) {
6442			DRM_WARN("Register(%d) [%s] failed to reach value 0x%08x != 0x%08xn",
6443				  inst, reg_name, (uint32_t)expected_value,
6444				  (uint32_t)(tmp_ & (mask)));
6445			ret = -ETIMEDOUT;
6446			break;
6447		}
6448	}
6449	return ret;
6450}