Linux Audio

Check our new training course

Yocto distribution development and maintenance

Need a Yocto distribution for your embedded project?
Loading...
v6.13.7
   1/*
   2 * Copyright 2008 Advanced Micro Devices, Inc.
   3 * Copyright 2008 Red Hat Inc.
   4 * Copyright 2009 Jerome Glisse.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the "Software"),
   8 * to deal in the Software without restriction, including without limitation
   9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10 * and/or sell copies of the Software, and to permit persons to whom the
  11 * Software is furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22 * OTHER DEALINGS IN THE SOFTWARE.
  23 *
  24 * Authors: Dave Airlie
  25 *          Alex Deucher
  26 *          Jerome Glisse
  27 */
  28
  29#include <linux/aperture.h>
  30#include <linux/power_supply.h>
  31#include <linux/kthread.h>
  32#include <linux/module.h>
  33#include <linux/console.h>
  34#include <linux/slab.h>
  35#include <linux/iommu.h>
  36#include <linux/pci.h>
  37#include <linux/pci-p2pdma.h>
  38#include <linux/apple-gmux.h>
  39
  40#include <drm/drm_atomic_helper.h>
  41#include <drm/drm_client_event.h>
  42#include <drm/drm_crtc_helper.h>
  43#include <drm/drm_probe_helper.h>
  44#include <drm/amdgpu_drm.h>
  45#include <linux/device.h>
  46#include <linux/vgaarb.h>
  47#include <linux/vga_switcheroo.h>
  48#include <linux/efi.h>
  49#include "amdgpu.h"
  50#include "amdgpu_trace.h"
  51#include "amdgpu_i2c.h"
  52#include "atom.h"
  53#include "amdgpu_atombios.h"
  54#include "amdgpu_atomfirmware.h"
  55#include "amd_pcie.h"
  56#ifdef CONFIG_DRM_AMDGPU_SI
  57#include "si.h"
  58#endif
  59#ifdef CONFIG_DRM_AMDGPU_CIK
  60#include "cik.h"
  61#endif
  62#include "vi.h"
  63#include "soc15.h"
  64#include "nv.h"
  65#include "bif/bif_4_1_d.h"
 
  66#include <linux/firmware.h>
  67#include "amdgpu_vf_error.h"
  68
  69#include "amdgpu_amdkfd.h"
  70#include "amdgpu_pm.h"
  71
  72#include "amdgpu_xgmi.h"
  73#include "amdgpu_ras.h"
  74#include "amdgpu_pmu.h"
  75#include "amdgpu_fru_eeprom.h"
  76#include "amdgpu_reset.h"
  77#include "amdgpu_virt.h"
  78#include "amdgpu_dev_coredump.h"
  79
  80#include <linux/suspend.h>
  81#include <drm/task_barrier.h>
  82#include <linux/pm_runtime.h>
  83
  84#include <drm/drm_drv.h>
  85
  86#if IS_ENABLED(CONFIG_X86)
  87#include <asm/intel-family.h>
  88#endif
  89
  90MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
  91MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
  92MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
  93MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
  94MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
  95MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
 
 
 
  96MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
 
 
  97
  98#define AMDGPU_RESUME_MS		2000
  99#define AMDGPU_MAX_RETRY_LIMIT		2
 100#define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
 101#define AMDGPU_PCIE_INDEX_FALLBACK (0x38 >> 2)
 102#define AMDGPU_PCIE_INDEX_HI_FALLBACK (0x44 >> 2)
 103#define AMDGPU_PCIE_DATA_FALLBACK (0x3C >> 2)
 104
 105static const struct drm_driver amdgpu_kms_driver;
 106
 107const char *amdgpu_asic_name[] = {
 108	"TAHITI",
 109	"PITCAIRN",
 110	"VERDE",
 111	"OLAND",
 112	"HAINAN",
 113	"BONAIRE",
 114	"KAVERI",
 115	"KABINI",
 116	"HAWAII",
 117	"MULLINS",
 118	"TOPAZ",
 119	"TONGA",
 120	"FIJI",
 121	"CARRIZO",
 122	"STONEY",
 123	"POLARIS10",
 124	"POLARIS11",
 125	"POLARIS12",
 126	"VEGAM",
 127	"VEGA10",
 128	"VEGA12",
 129	"VEGA20",
 130	"RAVEN",
 131	"ARCTURUS",
 132	"RENOIR",
 133	"ALDEBARAN",
 134	"NAVI10",
 135	"CYAN_SKILLFISH",
 136	"NAVI14",
 137	"NAVI12",
 138	"SIENNA_CICHLID",
 139	"NAVY_FLOUNDER",
 140	"VANGOGH",
 141	"DIMGREY_CAVEFISH",
 142	"BEIGE_GOBY",
 143	"YELLOW_CARP",
 144	"IP DISCOVERY",
 145	"LAST",
 146};
 147
 148#define AMDGPU_IP_BLK_MASK_ALL GENMASK(AMD_IP_BLOCK_TYPE_NUM  - 1, 0)
 149/*
 150 * Default init level where all blocks are expected to be initialized. This is
 151 * the level of initialization expected by default and also after a full reset
 152 * of the device.
 153 */
 154struct amdgpu_init_level amdgpu_init_default = {
 155	.level = AMDGPU_INIT_LEVEL_DEFAULT,
 156	.hwini_ip_block_mask = AMDGPU_IP_BLK_MASK_ALL,
 157};
 158
 159struct amdgpu_init_level amdgpu_init_recovery = {
 160	.level = AMDGPU_INIT_LEVEL_RESET_RECOVERY,
 161	.hwini_ip_block_mask = AMDGPU_IP_BLK_MASK_ALL,
 162};
 163
 164/*
 165 * Minimal blocks needed to be initialized before a XGMI hive can be reset. This
 166 * is used for cases like reset on initialization where the entire hive needs to
 167 * be reset before first use.
 168 */
 169struct amdgpu_init_level amdgpu_init_minimal_xgmi = {
 170	.level = AMDGPU_INIT_LEVEL_MINIMAL_XGMI,
 171	.hwini_ip_block_mask =
 172		BIT(AMD_IP_BLOCK_TYPE_GMC) | BIT(AMD_IP_BLOCK_TYPE_SMC) |
 173		BIT(AMD_IP_BLOCK_TYPE_COMMON) | BIT(AMD_IP_BLOCK_TYPE_IH) |
 174		BIT(AMD_IP_BLOCK_TYPE_PSP)
 175};
 176
 177static inline bool amdgpu_ip_member_of_hwini(struct amdgpu_device *adev,
 178					     enum amd_ip_block_type block)
 179{
 180	return (adev->init_lvl->hwini_ip_block_mask & (1U << block)) != 0;
 181}
 182
 183void amdgpu_set_init_level(struct amdgpu_device *adev,
 184			   enum amdgpu_init_lvl_id lvl)
 185{
 186	switch (lvl) {
 187	case AMDGPU_INIT_LEVEL_MINIMAL_XGMI:
 188		adev->init_lvl = &amdgpu_init_minimal_xgmi;
 189		break;
 190	case AMDGPU_INIT_LEVEL_RESET_RECOVERY:
 191		adev->init_lvl = &amdgpu_init_recovery;
 192		break;
 193	case AMDGPU_INIT_LEVEL_DEFAULT:
 194		fallthrough;
 195	default:
 196		adev->init_lvl = &amdgpu_init_default;
 197		break;
 198	}
 199}
 200
 201static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev);
 202
 203/**
 204 * DOC: pcie_replay_count
 205 *
 206 * The amdgpu driver provides a sysfs API for reporting the total number
 207 * of PCIe replays (NAKs)
 208 * The file pcie_replay_count is used for this and returns the total
 209 * number of replays as a sum of the NAKs generated and NAKs received
 210 */
 211
 212static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
 213		struct device_attribute *attr, char *buf)
 214{
 215	struct drm_device *ddev = dev_get_drvdata(dev);
 216	struct amdgpu_device *adev = drm_to_adev(ddev);
 217	uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
 218
 219	return sysfs_emit(buf, "%llu\n", cnt);
 220}
 221
 222static DEVICE_ATTR(pcie_replay_count, 0444,
 223		amdgpu_device_get_pcie_replay_count, NULL);
 224
 225static ssize_t amdgpu_sysfs_reg_state_get(struct file *f, struct kobject *kobj,
 226					  struct bin_attribute *attr, char *buf,
 227					  loff_t ppos, size_t count)
 228{
 229	struct device *dev = kobj_to_dev(kobj);
 230	struct drm_device *ddev = dev_get_drvdata(dev);
 231	struct amdgpu_device *adev = drm_to_adev(ddev);
 232	ssize_t bytes_read;
 233
 234	switch (ppos) {
 235	case AMDGPU_SYS_REG_STATE_XGMI:
 236		bytes_read = amdgpu_asic_get_reg_state(
 237			adev, AMDGPU_REG_STATE_TYPE_XGMI, buf, count);
 238		break;
 239	case AMDGPU_SYS_REG_STATE_WAFL:
 240		bytes_read = amdgpu_asic_get_reg_state(
 241			adev, AMDGPU_REG_STATE_TYPE_WAFL, buf, count);
 242		break;
 243	case AMDGPU_SYS_REG_STATE_PCIE:
 244		bytes_read = amdgpu_asic_get_reg_state(
 245			adev, AMDGPU_REG_STATE_TYPE_PCIE, buf, count);
 246		break;
 247	case AMDGPU_SYS_REG_STATE_USR:
 248		bytes_read = amdgpu_asic_get_reg_state(
 249			adev, AMDGPU_REG_STATE_TYPE_USR, buf, count);
 250		break;
 251	case AMDGPU_SYS_REG_STATE_USR_1:
 252		bytes_read = amdgpu_asic_get_reg_state(
 253			adev, AMDGPU_REG_STATE_TYPE_USR_1, buf, count);
 254		break;
 255	default:
 256		return -EINVAL;
 257	}
 258
 259	return bytes_read;
 260}
 261
 262BIN_ATTR(reg_state, 0444, amdgpu_sysfs_reg_state_get, NULL,
 263	 AMDGPU_SYS_REG_STATE_END);
 264
 265int amdgpu_reg_state_sysfs_init(struct amdgpu_device *adev)
 266{
 267	int ret;
 268
 269	if (!amdgpu_asic_get_reg_state_supported(adev))
 270		return 0;
 271
 272	ret = sysfs_create_bin_file(&adev->dev->kobj, &bin_attr_reg_state);
 273
 274	return ret;
 275}
 276
 277void amdgpu_reg_state_sysfs_fini(struct amdgpu_device *adev)
 278{
 279	if (!amdgpu_asic_get_reg_state_supported(adev))
 280		return;
 281	sysfs_remove_bin_file(&adev->dev->kobj, &bin_attr_reg_state);
 282}
 
 
 
 283
 284int amdgpu_ip_block_suspend(struct amdgpu_ip_block *ip_block)
 
 285{
 286	int r;
 287
 288	if (ip_block->version->funcs->suspend) {
 289		r = ip_block->version->funcs->suspend(ip_block);
 290		if (r) {
 291			dev_err(ip_block->adev->dev,
 292				"suspend of IP block <%s> failed %d\n",
 293				ip_block->version->funcs->name, r);
 294			return r;
 295		}
 296	}
 297
 298	ip_block->status.hw = false;
 299	return 0;
 300}
 301
 302int amdgpu_ip_block_resume(struct amdgpu_ip_block *ip_block)
 303{
 304	int r;
 305
 306	if (ip_block->version->funcs->resume) {
 307		r = ip_block->version->funcs->resume(ip_block);
 308		if (r) {
 309			dev_err(ip_block->adev->dev,
 310				"resume of IP block <%s> failed %d\n",
 311				ip_block->version->funcs->name, r);
 312			return r;
 313		}
 314	}
 315
 316	ip_block->status.hw = true;
 317	return 0;
 318}
 319
 320/**
 321 * DOC: board_info
 322 *
 323 * The amdgpu driver provides a sysfs API for giving board related information.
 324 * It provides the form factor information in the format
 325 *
 326 *   type : form factor
 327 *
 328 * Possible form factor values
 329 *
 330 * - "cem"		- PCIE CEM card
 331 * - "oam"		- Open Compute Accelerator Module
 332 * - "unknown"	- Not known
 333 *
 
 
 
 
 
 334 */
 335
 336static ssize_t amdgpu_device_get_board_info(struct device *dev,
 337					    struct device_attribute *attr,
 338					    char *buf)
 339{
 340	struct drm_device *ddev = dev_get_drvdata(dev);
 341	struct amdgpu_device *adev = drm_to_adev(ddev);
 342	enum amdgpu_pkg_type pkg_type = AMDGPU_PKG_TYPE_CEM;
 343	const char *pkg;
 344
 345	if (adev->smuio.funcs && adev->smuio.funcs->get_pkg_type)
 346		pkg_type = adev->smuio.funcs->get_pkg_type(adev);
 347
 348	switch (pkg_type) {
 349	case AMDGPU_PKG_TYPE_CEM:
 350		pkg = "cem";
 351		break;
 352	case AMDGPU_PKG_TYPE_OAM:
 353		pkg = "oam";
 354		break;
 355	default:
 356		pkg = "unknown";
 357		break;
 358	}
 359
 360	return sysfs_emit(buf, "%s : %s\n", "type", pkg);
 361}
 362
 363static DEVICE_ATTR(board_info, 0444, amdgpu_device_get_board_info, NULL);
 
 364
 365static struct attribute *amdgpu_board_attrs[] = {
 366	&dev_attr_board_info.attr,
 367	NULL,
 368};
 
 
 
 
 
 369
 370static umode_t amdgpu_board_attrs_is_visible(struct kobject *kobj,
 371					     struct attribute *attr, int n)
 372{
 373	struct device *dev = kobj_to_dev(kobj);
 374	struct drm_device *ddev = dev_get_drvdata(dev);
 375	struct amdgpu_device *adev = drm_to_adev(ddev);
 376
 377	if (adev->flags & AMD_IS_APU)
 378		return 0;
 379
 380	return attr->mode;
 381}
 382
 383static const struct attribute_group amdgpu_board_attrs_group = {
 384	.attrs = amdgpu_board_attrs,
 385	.is_visible = amdgpu_board_attrs_is_visible
 386};
 387
 388static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
 389
 390
 391/**
 392 * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
 393 *
 394 * @dev: drm_device pointer
 395 *
 396 * Returns true if the device is a dGPU with ATPX power control,
 397 * otherwise return false.
 398 */
 399bool amdgpu_device_supports_px(struct drm_device *dev)
 400{
 401	struct amdgpu_device *adev = drm_to_adev(dev);
 402
 403	if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
 404		return true;
 405	return false;
 406}
 407
 408/**
 409 * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
 410 *
 411 * @dev: drm_device pointer
 412 *
 413 * Returns true if the device is a dGPU with ACPI power control,
 414 * otherwise return false.
 415 */
 416bool amdgpu_device_supports_boco(struct drm_device *dev)
 417{
 418	struct amdgpu_device *adev = drm_to_adev(dev);
 419
 420	if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
 421		return false;
 422
 423	if (adev->has_pr3 ||
 424	    ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
 425		return true;
 426	return false;
 427}
 428
 429/**
 430 * amdgpu_device_supports_baco - Does the device support BACO
 431 *
 432 * @dev: drm_device pointer
 433 *
 434 * Return:
 435 * 1 if the device supporte BACO;
 436 * 3 if the device support MACO (only works if BACO is supported)
 437 * otherwise return 0.
 438 */
 439int amdgpu_device_supports_baco(struct drm_device *dev)
 440{
 441	struct amdgpu_device *adev = drm_to_adev(dev);
 442
 443	return amdgpu_asic_supports_baco(adev);
 444}
 445
 446void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev)
 447{
 448	struct drm_device *dev;
 449	int bamaco_support;
 450
 451	dev = adev_to_drm(adev);
 452
 453	adev->pm.rpm_mode = AMDGPU_RUNPM_NONE;
 454	bamaco_support = amdgpu_device_supports_baco(dev);
 455
 456	switch (amdgpu_runtime_pm) {
 457	case 2:
 458		if (bamaco_support & MACO_SUPPORT) {
 459			adev->pm.rpm_mode = AMDGPU_RUNPM_BAMACO;
 460			dev_info(adev->dev, "Forcing BAMACO for runtime pm\n");
 461		} else if (bamaco_support == BACO_SUPPORT) {
 462			adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
 463			dev_info(adev->dev, "Requested mode BAMACO not available,fallback to use BACO\n");
 464		}
 465		break;
 466	case 1:
 467		if (bamaco_support & BACO_SUPPORT) {
 468			adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
 469			dev_info(adev->dev, "Forcing BACO for runtime pm\n");
 470		}
 471		break;
 472	case -1:
 473	case -2:
 474		if (amdgpu_device_supports_px(dev)) { /* enable PX as runtime mode */
 475			adev->pm.rpm_mode = AMDGPU_RUNPM_PX;
 476			dev_info(adev->dev, "Using ATPX for runtime pm\n");
 477		} else if (amdgpu_device_supports_boco(dev)) { /* enable boco as runtime mode */
 478			adev->pm.rpm_mode = AMDGPU_RUNPM_BOCO;
 479			dev_info(adev->dev, "Using BOCO for runtime pm\n");
 480		} else {
 481			if (!bamaco_support)
 482				goto no_runtime_pm;
 483
 484			switch (adev->asic_type) {
 485			case CHIP_VEGA20:
 486			case CHIP_ARCTURUS:
 487				/* BACO are not supported on vega20 and arctrus */
 488				break;
 489			case CHIP_VEGA10:
 490				/* enable BACO as runpm mode if noretry=0 */
 491				if (!adev->gmc.noretry)
 492					adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
 493				break;
 494			default:
 495				/* enable BACO as runpm mode on CI+ */
 496				adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
 497				break;
 498			}
 499
 500			if (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) {
 501				if (bamaco_support & MACO_SUPPORT) {
 502					adev->pm.rpm_mode = AMDGPU_RUNPM_BAMACO;
 503					dev_info(adev->dev, "Using BAMACO for runtime pm\n");
 504				} else {
 505					dev_info(adev->dev, "Using BACO for runtime pm\n");
 506				}
 507			}
 508		}
 509		break;
 510	case 0:
 511		dev_info(adev->dev, "runtime pm is manually disabled\n");
 512		break;
 513	default:
 514		break;
 515	}
 516
 517no_runtime_pm:
 518	if (adev->pm.rpm_mode == AMDGPU_RUNPM_NONE)
 519		dev_info(adev->dev, "Runtime PM not available\n");
 520}
 521/**
 522 * amdgpu_device_supports_smart_shift - Is the device dGPU with
 523 * smart shift support
 524 *
 525 * @dev: drm_device pointer
 526 *
 527 * Returns true if the device is a dGPU with Smart Shift support,
 528 * otherwise returns false.
 529 */
 530bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
 531{
 532	return (amdgpu_device_supports_boco(dev) &&
 533		amdgpu_acpi_is_power_shift_control_supported());
 534}
 535
 536/*
 537 * VRAM access helper functions
 538 */
 539
 540/**
 541 * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
 542 *
 543 * @adev: amdgpu_device pointer
 544 * @pos: offset of the buffer in vram
 545 * @buf: virtual address of the buffer in system memory
 546 * @size: read/write size, sizeof(@buf) must > @size
 547 * @write: true - write to vram, otherwise - read from vram
 548 */
 549void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
 550			     void *buf, size_t size, bool write)
 551{
 552	unsigned long flags;
 553	uint32_t hi = ~0, tmp = 0;
 554	uint32_t *data = buf;
 555	uint64_t last;
 556	int idx;
 557
 558	if (!drm_dev_enter(adev_to_drm(adev), &idx))
 559		return;
 560
 561	BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
 562
 563	spin_lock_irqsave(&adev->mmio_idx_lock, flags);
 564	for (last = pos + size; pos < last; pos += 4) {
 565		tmp = pos >> 31;
 566
 567		WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
 568		if (tmp != hi) {
 569			WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
 570			hi = tmp;
 571		}
 572		if (write)
 573			WREG32_NO_KIQ(mmMM_DATA, *data++);
 574		else
 575			*data++ = RREG32_NO_KIQ(mmMM_DATA);
 576	}
 577
 578	spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
 579	drm_dev_exit(idx);
 580}
 581
 582/**
 583 * amdgpu_device_aper_access - access vram by vram aperature
 584 *
 585 * @adev: amdgpu_device pointer
 586 * @pos: offset of the buffer in vram
 587 * @buf: virtual address of the buffer in system memory
 588 * @size: read/write size, sizeof(@buf) must > @size
 589 * @write: true - write to vram, otherwise - read from vram
 590 *
 591 * The return value means how many bytes have been transferred.
 592 */
 593size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
 594				 void *buf, size_t size, bool write)
 595{
 596#ifdef CONFIG_64BIT
 597	void __iomem *addr;
 598	size_t count = 0;
 599	uint64_t last;
 600
 601	if (!adev->mman.aper_base_kaddr)
 602		return 0;
 603
 604	last = min(pos + size, adev->gmc.visible_vram_size);
 605	if (last > pos) {
 606		addr = adev->mman.aper_base_kaddr + pos;
 607		count = last - pos;
 608
 609		if (write) {
 610			memcpy_toio(addr, buf, count);
 611			/* Make sure HDP write cache flush happens without any reordering
 612			 * after the system memory contents are sent over PCIe device
 613			 */
 614			mb();
 615			amdgpu_device_flush_hdp(adev, NULL);
 616		} else {
 617			amdgpu_device_invalidate_hdp(adev, NULL);
 618			/* Make sure HDP read cache is invalidated before issuing a read
 619			 * to the PCIe device
 620			 */
 621			mb();
 622			memcpy_fromio(buf, addr, count);
 623		}
 624
 625	}
 
 626
 627	return count;
 628#else
 629	return 0;
 
 630#endif
 631}
 632
 633/**
 634 * amdgpu_device_vram_access - read/write a buffer in vram
 635 *
 636 * @adev: amdgpu_device pointer
 637 * @pos: offset of the buffer in vram
 638 * @buf: virtual address of the buffer in system memory
 639 * @size: read/write size, sizeof(@buf) must > @size
 640 * @write: true - write to vram, otherwise - read from vram
 641 */
 642void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
 643			       void *buf, size_t size, bool write)
 644{
 645	size_t count;
 646
 647	/* try to using vram apreature to access vram first */
 648	count = amdgpu_device_aper_access(adev, pos, buf, size, write);
 649	size -= count;
 650	if (size) {
 651		/* using MM to access rest vram */
 652		pos += count;
 653		buf += count;
 654		amdgpu_device_mm_access(adev, pos, buf, size, write);
 
 655	}
 
 
 
 
 
 
 656}
 657
 658/*
 659 * register access helper functions.
 660 */
 661
 662/* Check if hw access should be skipped because of hotplug or device error */
 663bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
 664{
 665	if (adev->no_hw_access)
 666		return true;
 667
 668#ifdef CONFIG_LOCKDEP
 669	/*
 670	 * This is a bit complicated to understand, so worth a comment. What we assert
 671	 * here is that the GPU reset is not running on another thread in parallel.
 672	 *
 673	 * For this we trylock the read side of the reset semaphore, if that succeeds
 674	 * we know that the reset is not running in paralell.
 675	 *
 676	 * If the trylock fails we assert that we are either already holding the read
 677	 * side of the lock or are the reset thread itself and hold the write side of
 678	 * the lock.
 679	 */
 680	if (in_task()) {
 681		if (down_read_trylock(&adev->reset_domain->sem))
 682			up_read(&adev->reset_domain->sem);
 683		else
 684			lockdep_assert_held(&adev->reset_domain->sem);
 685	}
 686#endif
 687	return false;
 688}
 689
 690/**
 691 * amdgpu_device_rreg - read a memory mapped IO or indirect register
 692 *
 693 * @adev: amdgpu_device pointer
 694 * @reg: dword aligned register offset
 695 * @acc_flags: access flags which require special behavior
 696 *
 697 * Returns the 32 bit value from the offset specified.
 698 */
 699uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
 700			    uint32_t reg, uint32_t acc_flags)
 701{
 702	uint32_t ret;
 703
 704	if (amdgpu_device_skip_hw_access(adev))
 705		return 0;
 706
 707	if ((reg * 4) < adev->rmmio_size) {
 708		if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
 709		    amdgpu_sriov_runtime(adev) &&
 710		    down_read_trylock(&adev->reset_domain->sem)) {
 711			ret = amdgpu_kiq_rreg(adev, reg, 0);
 712			up_read(&adev->reset_domain->sem);
 713		} else {
 714			ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
 715		}
 716	} else {
 717		ret = adev->pcie_rreg(adev, reg * 4);
 718	}
 719
 720	trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
 721
 722	return ret;
 723}
 724
 725/*
 726 * MMIO register read with bytes helper functions
 727 * @offset:bytes offset from MMIO start
 728 */
 
 729
 730/**
 731 * amdgpu_mm_rreg8 - read a memory mapped IO register
 732 *
 733 * @adev: amdgpu_device pointer
 734 * @offset: byte aligned register offset
 735 *
 736 * Returns the 8 bit value from the offset specified.
 737 */
 738uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
 739{
 740	if (amdgpu_device_skip_hw_access(adev))
 741		return 0;
 742
 743	if (offset < adev->rmmio_size)
 744		return (readb(adev->rmmio + offset));
 745	BUG();
 746}
 747
 748
 749/**
 750 * amdgpu_device_xcc_rreg - read a memory mapped IO or indirect register with specific XCC
 751 *
 752 * @adev: amdgpu_device pointer
 753 * @reg: dword aligned register offset
 754 * @acc_flags: access flags which require special behavior
 755 * @xcc_id: xcc accelerated compute core id
 756 *
 757 * Returns the 32 bit value from the offset specified.
 758 */
 759uint32_t amdgpu_device_xcc_rreg(struct amdgpu_device *adev,
 760				uint32_t reg, uint32_t acc_flags,
 761				uint32_t xcc_id)
 762{
 763	uint32_t ret, rlcg_flag;
 764
 765	if (amdgpu_device_skip_hw_access(adev))
 766		return 0;
 767
 768	if ((reg * 4) < adev->rmmio_size) {
 769		if (amdgpu_sriov_vf(adev) &&
 770		    !amdgpu_sriov_runtime(adev) &&
 771		    adev->gfx.rlc.rlcg_reg_access_supported &&
 772		    amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags,
 773							 GC_HWIP, false,
 774							 &rlcg_flag)) {
 775			ret = amdgpu_virt_rlcg_reg_rw(adev, reg, 0, rlcg_flag, GET_INST(GC, xcc_id));
 776		} else if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
 777		    amdgpu_sriov_runtime(adev) &&
 778		    down_read_trylock(&adev->reset_domain->sem)) {
 779			ret = amdgpu_kiq_rreg(adev, reg, xcc_id);
 780			up_read(&adev->reset_domain->sem);
 781		} else {
 782			ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
 783		}
 784	} else {
 785		ret = adev->pcie_rreg(adev, reg * 4);
 786	}
 787
 788	return ret;
 789}
 790
 791/*
 792 * MMIO register write with bytes helper functions
 793 * @offset:bytes offset from MMIO start
 794 * @value: the value want to be written to the register
 795 */
 796
 797/**
 798 * amdgpu_mm_wreg8 - read a memory mapped IO register
 799 *
 800 * @adev: amdgpu_device pointer
 801 * @offset: byte aligned register offset
 802 * @value: 8 bit value to write
 803 *
 804 * Writes the value specified to the offset specified.
 805 */
 806void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
 807{
 808	if (amdgpu_device_skip_hw_access(adev))
 809		return;
 810
 811	if (offset < adev->rmmio_size)
 812		writeb(value, adev->rmmio + offset);
 813	else
 814		BUG();
 815}
 816
 817/**
 818 * amdgpu_device_wreg - write to a memory mapped IO or indirect register
 819 *
 820 * @adev: amdgpu_device pointer
 821 * @reg: dword aligned register offset
 822 * @v: 32 bit value to write to the register
 823 * @acc_flags: access flags which require special behavior
 824 *
 825 * Writes the value specified to the offset specified.
 826 */
 827void amdgpu_device_wreg(struct amdgpu_device *adev,
 828			uint32_t reg, uint32_t v,
 829			uint32_t acc_flags)
 830{
 831	if (amdgpu_device_skip_hw_access(adev))
 832		return;
 833
 834	if ((reg * 4) < adev->rmmio_size) {
 835		if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
 836		    amdgpu_sriov_runtime(adev) &&
 837		    down_read_trylock(&adev->reset_domain->sem)) {
 838			amdgpu_kiq_wreg(adev, reg, v, 0);
 839			up_read(&adev->reset_domain->sem);
 840		} else {
 841			writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
 842		}
 843	} else {
 844		adev->pcie_wreg(adev, reg * 4, v);
 845	}
 846
 847	trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
 848}
 849
 850/**
 851 * amdgpu_mm_wreg_mmio_rlc -  write register either with direct/indirect mmio or with RLC path if in range
 852 *
 853 * @adev: amdgpu_device pointer
 854 * @reg: mmio/rlc register
 855 * @v: value to write
 856 * @xcc_id: xcc accelerated compute core id
 857 *
 858 * this function is invoked only for the debugfs register access
 859 */
 860void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
 861			     uint32_t reg, uint32_t v,
 862			     uint32_t xcc_id)
 863{
 864	if (amdgpu_device_skip_hw_access(adev))
 865		return;
 866
 867	if (amdgpu_sriov_fullaccess(adev) &&
 868	    adev->gfx.rlc.funcs &&
 869	    adev->gfx.rlc.funcs->is_rlcg_access_range) {
 870		if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
 871			return amdgpu_sriov_wreg(adev, reg, v, 0, 0, xcc_id);
 872	} else if ((reg * 4) >= adev->rmmio_size) {
 873		adev->pcie_wreg(adev, reg * 4, v);
 874	} else {
 875		writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
 876	}
 877}
 878
 879/**
 880 * amdgpu_device_xcc_wreg - write to a memory mapped IO or indirect register with specific XCC
 881 *
 882 * @adev: amdgpu_device pointer
 883 * @reg: dword aligned register offset
 884 * @v: 32 bit value to write to the register
 885 * @acc_flags: access flags which require special behavior
 886 * @xcc_id: xcc accelerated compute core id
 887 *
 888 * Writes the value specified to the offset specified.
 
 889 */
 890void amdgpu_device_xcc_wreg(struct amdgpu_device *adev,
 891			uint32_t reg, uint32_t v,
 892			uint32_t acc_flags, uint32_t xcc_id)
 893{
 894	uint32_t rlcg_flag;
 895
 896	if (amdgpu_device_skip_hw_access(adev))
 897		return;
 898
 899	if ((reg * 4) < adev->rmmio_size) {
 900		if (amdgpu_sriov_vf(adev) &&
 901		    !amdgpu_sriov_runtime(adev) &&
 902		    adev->gfx.rlc.rlcg_reg_access_supported &&
 903		    amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags,
 904							 GC_HWIP, true,
 905							 &rlcg_flag)) {
 906			amdgpu_virt_rlcg_reg_rw(adev, reg, v, rlcg_flag, GET_INST(GC, xcc_id));
 907		} else if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
 908		    amdgpu_sriov_runtime(adev) &&
 909		    down_read_trylock(&adev->reset_domain->sem)) {
 910			amdgpu_kiq_wreg(adev, reg, v, xcc_id);
 911			up_read(&adev->reset_domain->sem);
 912		} else {
 913			writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
 914		}
 915	} else {
 916		adev->pcie_wreg(adev, reg * 4, v);
 
 917	}
 918}
 919
 920/**
 921 * amdgpu_device_indirect_rreg - read an indirect register
 922 *
 923 * @adev: amdgpu_device pointer
 924 * @reg_addr: indirect register address to read from
 
 925 *
 926 * Returns the value of indirect register @reg_addr
 
 927 */
 928u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
 929				u32 reg_addr)
 930{
 931	unsigned long flags, pcie_index, pcie_data;
 932	void __iomem *pcie_index_offset;
 933	void __iomem *pcie_data_offset;
 934	u32 r;
 935
 936	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
 937	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
 938
 939	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 940	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
 941	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
 942
 943	writel(reg_addr, pcie_index_offset);
 944	readl(pcie_index_offset);
 945	r = readl(pcie_data_offset);
 946	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 947
 948	return r;
 
 
 
 
 949}
 950
 951u32 amdgpu_device_indirect_rreg_ext(struct amdgpu_device *adev,
 952				    u64 reg_addr)
 
 
 
 
 
 
 
 
 953{
 954	unsigned long flags, pcie_index, pcie_index_hi, pcie_data;
 955	u32 r;
 956	void __iomem *pcie_index_offset;
 957	void __iomem *pcie_index_hi_offset;
 958	void __iomem *pcie_data_offset;
 959
 960	if (unlikely(!adev->nbio.funcs)) {
 961		pcie_index = AMDGPU_PCIE_INDEX_FALLBACK;
 962		pcie_data = AMDGPU_PCIE_DATA_FALLBACK;
 963	} else {
 964		pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
 965		pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
 966	}
 967
 968	if (reg_addr >> 32) {
 969		if (unlikely(!adev->nbio.funcs))
 970			pcie_index_hi = AMDGPU_PCIE_INDEX_HI_FALLBACK;
 971		else
 972			pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
 973	} else {
 974		pcie_index_hi = 0;
 
 975	}
 
 976
 977	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 978	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
 979	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
 980	if (pcie_index_hi != 0)
 981		pcie_index_hi_offset = (void __iomem *)adev->rmmio +
 982				pcie_index_hi * 4;
 983
 984	writel(reg_addr, pcie_index_offset);
 985	readl(pcie_index_offset);
 986	if (pcie_index_hi != 0) {
 987		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
 988		readl(pcie_index_hi_offset);
 989	}
 990	r = readl(pcie_data_offset);
 991
 992	/* clear the high bits */
 993	if (pcie_index_hi != 0) {
 994		writel(0, pcie_index_hi_offset);
 995		readl(pcie_index_hi_offset);
 996	}
 997
 998	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 999
1000	return r;
1001}
1002
1003/**
1004 * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
1005 *
1006 * @adev: amdgpu_device pointer
 
 
1007 * @reg_addr: indirect register address to read from
1008 *
1009 * Returns the value of indirect register @reg_addr
1010 */
1011u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
1012				  u32 reg_addr)
 
1013{
1014	unsigned long flags, pcie_index, pcie_data;
 
1015	void __iomem *pcie_index_offset;
1016	void __iomem *pcie_data_offset;
1017	u64 r;
1018
1019	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
1020	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
1021
1022	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
1023	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
1024	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
1025
1026	/* read low 32 bits */
1027	writel(reg_addr, pcie_index_offset);
1028	readl(pcie_index_offset);
1029	r = readl(pcie_data_offset);
1030	/* read high 32 bits */
1031	writel(reg_addr + 4, pcie_index_offset);
1032	readl(pcie_index_offset);
1033	r |= ((u64)readl(pcie_data_offset) << 32);
1034	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
1035
1036	return r;
1037}
1038
1039u64 amdgpu_device_indirect_rreg64_ext(struct amdgpu_device *adev,
1040				  u64 reg_addr)
 
 
 
 
 
 
 
 
 
 
 
1041{
1042	unsigned long flags, pcie_index, pcie_data;
1043	unsigned long pcie_index_hi = 0;
1044	void __iomem *pcie_index_offset;
1045	void __iomem *pcie_index_hi_offset;
1046	void __iomem *pcie_data_offset;
1047	u64 r;
1048
1049	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
1050	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
1051	if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
1052		pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
1053
1054	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
1055	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
1056	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
1057	if (pcie_index_hi != 0)
1058		pcie_index_hi_offset = (void __iomem *)adev->rmmio +
1059			pcie_index_hi * 4;
1060
1061	/* read low 32 bits */
1062	writel(reg_addr, pcie_index_offset);
1063	readl(pcie_index_offset);
1064	if (pcie_index_hi != 0) {
1065		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
1066		readl(pcie_index_hi_offset);
1067	}
1068	r = readl(pcie_data_offset);
1069	/* read high 32 bits */
1070	writel(reg_addr + 4, pcie_index_offset);
1071	readl(pcie_index_offset);
1072	if (pcie_index_hi != 0) {
1073		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
1074		readl(pcie_index_hi_offset);
1075	}
1076	r |= ((u64)readl(pcie_data_offset) << 32);
1077
1078	/* clear the high bits */
1079	if (pcie_index_hi != 0) {
1080		writel(0, pcie_index_hi_offset);
1081		readl(pcie_index_hi_offset);
1082	}
1083
1084	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
1085
1086	return r;
1087}
1088
1089/**
1090 * amdgpu_device_indirect_wreg - write an indirect register address
1091 *
1092 * @adev: amdgpu_device pointer
 
 
1093 * @reg_addr: indirect register offset
1094 * @reg_data: indirect register data
1095 *
1096 */
1097void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
 
1098				 u32 reg_addr, u32 reg_data)
1099{
1100	unsigned long flags, pcie_index, pcie_data;
1101	void __iomem *pcie_index_offset;
1102	void __iomem *pcie_data_offset;
1103
1104	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
1105	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
1106
1107	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
1108	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
1109	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
1110
1111	writel(reg_addr, pcie_index_offset);
1112	readl(pcie_index_offset);
1113	writel(reg_data, pcie_data_offset);
1114	readl(pcie_data_offset);
1115	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
1116}
1117
1118void amdgpu_device_indirect_wreg_ext(struct amdgpu_device *adev,
1119				     u64 reg_addr, u32 reg_data)
1120{
1121	unsigned long flags, pcie_index, pcie_index_hi, pcie_data;
1122	void __iomem *pcie_index_offset;
1123	void __iomem *pcie_index_hi_offset;
1124	void __iomem *pcie_data_offset;
1125
1126	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
1127	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
1128	if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
1129		pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
1130	else
1131		pcie_index_hi = 0;
1132
1133	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
1134	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
1135	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
1136	if (pcie_index_hi != 0)
1137		pcie_index_hi_offset = (void __iomem *)adev->rmmio +
1138				pcie_index_hi * 4;
1139
1140	writel(reg_addr, pcie_index_offset);
1141	readl(pcie_index_offset);
1142	if (pcie_index_hi != 0) {
1143		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
1144		readl(pcie_index_hi_offset);
1145	}
1146	writel(reg_data, pcie_data_offset);
1147	readl(pcie_data_offset);
1148
1149	/* clear the high bits */
1150	if (pcie_index_hi != 0) {
1151		writel(0, pcie_index_hi_offset);
1152		readl(pcie_index_hi_offset);
1153	}
1154
1155	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
1156}
1157
1158/**
1159 * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
1160 *
1161 * @adev: amdgpu_device pointer
 
 
1162 * @reg_addr: indirect register offset
1163 * @reg_data: indirect register data
1164 *
1165 */
1166void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
 
1167				   u32 reg_addr, u64 reg_data)
1168{
1169	unsigned long flags, pcie_index, pcie_data;
1170	void __iomem *pcie_index_offset;
1171	void __iomem *pcie_data_offset;
1172
1173	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
1174	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
1175
1176	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
1177	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
1178	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
1179
1180	/* write low 32 bits */
1181	writel(reg_addr, pcie_index_offset);
1182	readl(pcie_index_offset);
1183	writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
1184	readl(pcie_data_offset);
1185	/* write high 32 bits */
1186	writel(reg_addr + 4, pcie_index_offset);
1187	readl(pcie_index_offset);
1188	writel((u32)(reg_data >> 32), pcie_data_offset);
1189	readl(pcie_data_offset);
1190	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
1191}
1192
1193void amdgpu_device_indirect_wreg64_ext(struct amdgpu_device *adev,
1194				   u64 reg_addr, u64 reg_data)
1195{
1196	unsigned long flags, pcie_index, pcie_data;
1197	unsigned long pcie_index_hi = 0;
1198	void __iomem *pcie_index_offset;
1199	void __iomem *pcie_index_hi_offset;
1200	void __iomem *pcie_data_offset;
1201
1202	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
1203	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
1204	if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
1205		pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
1206
1207	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
1208	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
1209	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
1210	if (pcie_index_hi != 0)
1211		pcie_index_hi_offset = (void __iomem *)adev->rmmio +
1212				pcie_index_hi * 4;
1213
1214	/* write low 32 bits */
1215	writel(reg_addr, pcie_index_offset);
1216	readl(pcie_index_offset);
1217	if (pcie_index_hi != 0) {
1218		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
1219		readl(pcie_index_hi_offset);
1220	}
1221	writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
1222	readl(pcie_data_offset);
1223	/* write high 32 bits */
1224	writel(reg_addr + 4, pcie_index_offset);
1225	readl(pcie_index_offset);
1226	if (pcie_index_hi != 0) {
1227		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
1228		readl(pcie_index_hi_offset);
1229	}
1230	writel((u32)(reg_data >> 32), pcie_data_offset);
1231	readl(pcie_data_offset);
1232
1233	/* clear the high bits */
1234	if (pcie_index_hi != 0) {
1235		writel(0, pcie_index_hi_offset);
1236		readl(pcie_index_hi_offset);
1237	}
1238
1239	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
1240}
1241
1242/**
1243 * amdgpu_device_get_rev_id - query device rev_id
1244 *
1245 * @adev: amdgpu_device pointer
1246 *
1247 * Return device rev_id
1248 */
1249u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev)
1250{
1251	return adev->nbio.funcs->get_rev_id(adev);
1252}
1253
1254/**
1255 * amdgpu_invalid_rreg - dummy reg read function
1256 *
1257 * @adev: amdgpu_device pointer
1258 * @reg: offset of register
1259 *
1260 * Dummy register read function.  Used for register blocks
1261 * that certain asics don't have (all asics).
1262 * Returns the value in the register.
1263 */
1264static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
1265{
1266	DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
1267	BUG();
1268	return 0;
1269}
1270
1271static uint32_t amdgpu_invalid_rreg_ext(struct amdgpu_device *adev, uint64_t reg)
1272{
1273	DRM_ERROR("Invalid callback to read register 0x%llX\n", reg);
1274	BUG();
1275	return 0;
1276}
1277
1278/**
1279 * amdgpu_invalid_wreg - dummy reg write function
1280 *
1281 * @adev: amdgpu_device pointer
1282 * @reg: offset of register
1283 * @v: value to write to the register
1284 *
1285 * Dummy register read function.  Used for register blocks
1286 * that certain asics don't have (all asics).
1287 */
1288static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
1289{
1290	DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
1291		  reg, v);
1292	BUG();
1293}
1294
1295static void amdgpu_invalid_wreg_ext(struct amdgpu_device *adev, uint64_t reg, uint32_t v)
1296{
1297	DRM_ERROR("Invalid callback to write register 0x%llX with 0x%08X\n",
1298		  reg, v);
1299	BUG();
1300}
1301
1302/**
1303 * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
1304 *
1305 * @adev: amdgpu_device pointer
1306 * @reg: offset of register
1307 *
1308 * Dummy register read function.  Used for register blocks
1309 * that certain asics don't have (all asics).
1310 * Returns the value in the register.
1311 */
1312static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
1313{
1314	DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
1315	BUG();
1316	return 0;
1317}
1318
1319static uint64_t amdgpu_invalid_rreg64_ext(struct amdgpu_device *adev, uint64_t reg)
1320{
1321	DRM_ERROR("Invalid callback to read register 0x%llX\n", reg);
1322	BUG();
1323	return 0;
1324}
1325
1326/**
1327 * amdgpu_invalid_wreg64 - dummy reg write function
1328 *
1329 * @adev: amdgpu_device pointer
1330 * @reg: offset of register
1331 * @v: value to write to the register
1332 *
1333 * Dummy register read function.  Used for register blocks
1334 * that certain asics don't have (all asics).
1335 */
1336static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
1337{
1338	DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
1339		  reg, v);
1340	BUG();
1341}
1342
1343static void amdgpu_invalid_wreg64_ext(struct amdgpu_device *adev, uint64_t reg, uint64_t v)
1344{
1345	DRM_ERROR("Invalid callback to write 64 bit register 0x%llX with 0x%08llX\n",
1346		  reg, v);
1347	BUG();
1348}
1349
1350/**
1351 * amdgpu_block_invalid_rreg - dummy reg read function
1352 *
1353 * @adev: amdgpu_device pointer
1354 * @block: offset of instance
1355 * @reg: offset of register
1356 *
1357 * Dummy register read function.  Used for register blocks
1358 * that certain asics don't have (all asics).
1359 * Returns the value in the register.
1360 */
1361static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
1362					  uint32_t block, uint32_t reg)
1363{
1364	DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
1365		  reg, block);
1366	BUG();
1367	return 0;
1368}
1369
1370/**
1371 * amdgpu_block_invalid_wreg - dummy reg write function
1372 *
1373 * @adev: amdgpu_device pointer
1374 * @block: offset of instance
1375 * @reg: offset of register
1376 * @v: value to write to the register
1377 *
1378 * Dummy register read function.  Used for register blocks
1379 * that certain asics don't have (all asics).
1380 */
1381static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
1382				      uint32_t block,
1383				      uint32_t reg, uint32_t v)
1384{
1385	DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
1386		  reg, block, v);
1387	BUG();
1388}
1389
1390/**
1391 * amdgpu_device_asic_init - Wrapper for atom asic_init
1392 *
1393 * @adev: amdgpu_device pointer
1394 *
1395 * Does any asic specific work and then calls atom asic init.
1396 */
1397static int amdgpu_device_asic_init(struct amdgpu_device *adev)
1398{
1399	int ret;
1400
1401	amdgpu_asic_pre_asic_init(adev);
1402
1403	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
1404	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
1405	    amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) {
1406		amdgpu_psp_wait_for_bootloader(adev);
1407		ret = amdgpu_atomfirmware_asic_init(adev, true);
1408		return ret;
1409	} else {
1410		return amdgpu_atom_asic_init(adev->mode_info.atom_context);
1411	}
1412
1413	return 0;
1414}
1415
1416/**
1417 * amdgpu_device_mem_scratch_init - allocate the VRAM scratch page
1418 *
1419 * @adev: amdgpu_device pointer
1420 *
1421 * Allocates a scratch page of VRAM for use by various things in the
1422 * driver.
1423 */
1424static int amdgpu_device_mem_scratch_init(struct amdgpu_device *adev)
1425{
1426	return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, PAGE_SIZE,
1427				       AMDGPU_GEM_DOMAIN_VRAM |
1428				       AMDGPU_GEM_DOMAIN_GTT,
1429				       &adev->mem_scratch.robj,
1430				       &adev->mem_scratch.gpu_addr,
1431				       (void **)&adev->mem_scratch.ptr);
1432}
1433
1434/**
1435 * amdgpu_device_mem_scratch_fini - Free the VRAM scratch page
1436 *
1437 * @adev: amdgpu_device pointer
1438 *
1439 * Frees the VRAM scratch page.
1440 */
1441static void amdgpu_device_mem_scratch_fini(struct amdgpu_device *adev)
1442{
1443	amdgpu_bo_free_kernel(&adev->mem_scratch.robj, NULL, NULL);
1444}
1445
1446/**
1447 * amdgpu_device_program_register_sequence - program an array of registers.
1448 *
1449 * @adev: amdgpu_device pointer
1450 * @registers: pointer to the register array
1451 * @array_size: size of the register array
1452 *
1453 * Programs an array or registers with and or masks.
1454 * This is a helper for setting golden registers.
1455 */
1456void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
1457					     const u32 *registers,
1458					     const u32 array_size)
1459{
1460	u32 tmp, reg, and_mask, or_mask;
1461	int i;
1462
1463	if (array_size % 3)
1464		return;
1465
1466	for (i = 0; i < array_size; i += 3) {
1467		reg = registers[i + 0];
1468		and_mask = registers[i + 1];
1469		or_mask = registers[i + 2];
1470
1471		if (and_mask == 0xffffffff) {
1472			tmp = or_mask;
1473		} else {
1474			tmp = RREG32(reg);
1475			tmp &= ~and_mask;
1476			if (adev->family >= AMDGPU_FAMILY_AI)
1477				tmp |= (or_mask & and_mask);
1478			else
1479				tmp |= or_mask;
1480		}
1481		WREG32(reg, tmp);
1482	}
1483}
1484
1485/**
1486 * amdgpu_device_pci_config_reset - reset the GPU
1487 *
1488 * @adev: amdgpu_device pointer
1489 *
1490 * Resets the GPU using the pci config reset sequence.
1491 * Only applicable to asics prior to vega10.
1492 */
1493void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
1494{
1495	pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
1496}
1497
1498/**
1499 * amdgpu_device_pci_reset - reset the GPU using generic PCI means
1500 *
1501 * @adev: amdgpu_device pointer
1502 *
1503 * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
1504 */
1505int amdgpu_device_pci_reset(struct amdgpu_device *adev)
1506{
1507	return pci_reset_function(adev->pdev);
1508}
1509
1510/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1511 * amdgpu_device_wb_*()
1512 * Writeback is the method by which the GPU updates special pages in memory
1513 * with the status of certain GPU events (fences, ring pointers,etc.).
1514 */
1515
1516/**
1517 * amdgpu_device_wb_fini - Disable Writeback and free memory
1518 *
1519 * @adev: amdgpu_device pointer
1520 *
1521 * Disables Writeback and frees the Writeback memory (all asics).
1522 * Used at driver shutdown.
1523 */
1524static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1525{
1526	if (adev->wb.wb_obj) {
1527		amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1528				      &adev->wb.gpu_addr,
1529				      (void **)&adev->wb.wb);
1530		adev->wb.wb_obj = NULL;
1531	}
1532}
1533
1534/**
1535 * amdgpu_device_wb_init - Init Writeback driver info and allocate memory
1536 *
1537 * @adev: amdgpu_device pointer
1538 *
1539 * Initializes writeback and allocates writeback memory (all asics).
1540 * Used at driver startup.
1541 * Returns 0 on success or an -error on failure.
1542 */
1543static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1544{
1545	int r;
1546
1547	if (adev->wb.wb_obj == NULL) {
1548		/* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1549		r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1550					    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1551					    &adev->wb.wb_obj, &adev->wb.gpu_addr,
1552					    (void **)&adev->wb.wb);
1553		if (r) {
1554			dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1555			return r;
1556		}
1557
1558		adev->wb.num_wb = AMDGPU_MAX_WB;
1559		memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1560
1561		/* clear wb memory */
1562		memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1563	}
1564
1565	return 0;
1566}
1567
1568/**
1569 * amdgpu_device_wb_get - Allocate a wb entry
1570 *
1571 * @adev: amdgpu_device pointer
1572 * @wb: wb index
1573 *
1574 * Allocate a wb slot for use by the driver (all asics).
1575 * Returns 0 on success or -EINVAL on failure.
1576 */
1577int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1578{
1579	unsigned long flags, offset;
1580
1581	spin_lock_irqsave(&adev->wb.lock, flags);
1582	offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1583	if (offset < adev->wb.num_wb) {
1584		__set_bit(offset, adev->wb.used);
1585		spin_unlock_irqrestore(&adev->wb.lock, flags);
1586		*wb = offset << 3; /* convert to dw offset */
1587		return 0;
1588	} else {
1589		spin_unlock_irqrestore(&adev->wb.lock, flags);
1590		return -EINVAL;
1591	}
1592}
1593
1594/**
1595 * amdgpu_device_wb_free - Free a wb entry
1596 *
1597 * @adev: amdgpu_device pointer
1598 * @wb: wb index
1599 *
1600 * Free a wb slot allocated for use by the driver (all asics)
1601 */
1602void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1603{
1604	unsigned long flags;
1605
1606	wb >>= 3;
1607	spin_lock_irqsave(&adev->wb.lock, flags);
1608	if (wb < adev->wb.num_wb)
1609		__clear_bit(wb, adev->wb.used);
1610	spin_unlock_irqrestore(&adev->wb.lock, flags);
1611}
1612
1613/**
1614 * amdgpu_device_resize_fb_bar - try to resize FB BAR
1615 *
1616 * @adev: amdgpu_device pointer
1617 *
1618 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1619 * to fail, but if any of the BARs is not accessible after the size we abort
1620 * driver loading by returning -ENODEV.
1621 */
1622int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1623{
1624	int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1625	struct pci_bus *root;
1626	struct resource *res;
1627	unsigned int i;
1628	u16 cmd;
1629	int r;
1630
1631	if (!IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
1632		return 0;
1633
1634	/* Bypass for VF */
1635	if (amdgpu_sriov_vf(adev))
1636		return 0;
1637
1638	/* resizing on Dell G5 SE platforms causes problems with runtime pm */
1639	if ((amdgpu_runtime_pm != 0) &&
1640	    adev->pdev->vendor == PCI_VENDOR_ID_ATI &&
1641	    adev->pdev->device == 0x731f &&
1642	    adev->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
1643		return 0;
1644
1645	/* PCI_EXT_CAP_ID_VNDR extended capability is located at 0x100 */
1646	if (!pci_find_ext_capability(adev->pdev, PCI_EXT_CAP_ID_VNDR))
1647		DRM_WARN("System can't access extended configuration space, please check!!\n");
1648
1649	/* skip if the bios has already enabled large BAR */
1650	if (adev->gmc.real_vram_size &&
1651	    (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1652		return 0;
1653
1654	/* Check if the root BUS has 64bit memory resources */
1655	root = adev->pdev->bus;
1656	while (root->parent)
1657		root = root->parent;
1658
1659	pci_bus_for_each_resource(root, res, i) {
1660		if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1661		    res->start > 0x100000000ull)
1662			break;
1663	}
1664
1665	/* Trying to resize is pointless without a root hub window above 4GB */
1666	if (!res)
1667		return 0;
1668
1669	/* Limit the BAR size to what is available */
1670	rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1671			rbar_size);
1672
1673	/* Disable memory decoding while we change the BAR addresses and size */
1674	pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1675	pci_write_config_word(adev->pdev, PCI_COMMAND,
1676			      cmd & ~PCI_COMMAND_MEMORY);
1677
1678	/* Free the VRAM and doorbell BAR, we most likely need to move both. */
1679	amdgpu_doorbell_fini(adev);
1680	if (adev->asic_type >= CHIP_BONAIRE)
1681		pci_release_resource(adev->pdev, 2);
1682
1683	pci_release_resource(adev->pdev, 0);
1684
1685	r = pci_resize_resource(adev->pdev, 0, rbar_size);
1686	if (r == -ENOSPC)
1687		DRM_INFO("Not enough PCI address space for a large BAR.");
1688	else if (r && r != -ENOTSUPP)
1689		DRM_ERROR("Problem resizing BAR0 (%d).", r);
1690
1691	pci_assign_unassigned_bus_resources(adev->pdev->bus);
1692
1693	/* When the doorbell or fb BAR isn't available we have no chance of
1694	 * using the device.
1695	 */
1696	r = amdgpu_doorbell_init(adev);
1697	if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1698		return -ENODEV;
1699
1700	pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1701
1702	return 0;
1703}
1704
1705static bool amdgpu_device_read_bios(struct amdgpu_device *adev)
1706{
1707	if (hweight32(adev->aid_mask) && (adev->flags & AMD_IS_APU))
1708		return false;
1709
1710	return true;
1711}
1712
1713/*
1714 * GPU helpers function.
1715 */
1716/**
1717 * amdgpu_device_need_post - check if the hw need post or not
1718 *
1719 * @adev: amdgpu_device pointer
1720 *
1721 * Check if the asic has been initialized (all asics) at driver startup
1722 * or post is needed if  hw reset is performed.
1723 * Returns true if need or false if not.
1724 */
1725bool amdgpu_device_need_post(struct amdgpu_device *adev)
1726{
1727	uint32_t reg;
1728
1729	if (amdgpu_sriov_vf(adev))
1730		return false;
1731
1732	if (!amdgpu_device_read_bios(adev))
1733		return false;
1734
1735	if (amdgpu_passthrough(adev)) {
1736		/* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1737		 * some old smc fw still need driver do vPost otherwise gpu hang, while
1738		 * those smc fw version above 22.15 doesn't have this flaw, so we force
1739		 * vpost executed for smc version below 22.15
1740		 */
1741		if (adev->asic_type == CHIP_FIJI) {
1742			int err;
1743			uint32_t fw_ver;
1744
1745			err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1746			/* force vPost if error occured */
1747			if (err)
1748				return true;
1749
1750			fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1751			release_firmware(adev->pm.fw);
1752			if (fw_ver < 0x00160e00)
1753				return true;
1754		}
1755	}
1756
1757	/* Don't post if we need to reset whole hive on init */
1758	if (adev->init_lvl->level == AMDGPU_INIT_LEVEL_MINIMAL_XGMI)
1759		return false;
1760
1761	if (adev->has_hw_reset) {
1762		adev->has_hw_reset = false;
1763		return true;
1764	}
1765
1766	/* bios scratch used on CIK+ */
1767	if (adev->asic_type >= CHIP_BONAIRE)
1768		return amdgpu_atombios_scratch_need_asic_init(adev);
1769
1770	/* check MEM_SIZE for older asics */
1771	reg = amdgpu_asic_get_config_memsize(adev);
1772
1773	if ((reg != 0) && (reg != 0xffffffff))
1774		return false;
1775
1776	return true;
1777}
1778
1779/*
1780 * Check whether seamless boot is supported.
1781 *
1782 * So far we only support seamless boot on DCE 3.0 or later.
1783 * If users report that it works on older ASICS as well, we may
1784 * loosen this.
1785 */
1786bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev)
1787{
1788	switch (amdgpu_seamless) {
1789	case -1:
1790		break;
1791	case 1:
1792		return true;
1793	case 0:
1794		return false;
1795	default:
1796		DRM_ERROR("Invalid value for amdgpu.seamless: %d\n",
1797			  amdgpu_seamless);
1798		return false;
1799	}
1800
1801	if (!(adev->flags & AMD_IS_APU))
1802		return false;
1803
1804	if (adev->mman.keep_stolen_vga_memory)
1805		return false;
1806
1807	return amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0);
1808}
1809
1810/*
1811 * Intel hosts such as Rocket Lake, Alder Lake, Raptor Lake and Sapphire Rapids
1812 * don't support dynamic speed switching. Until we have confirmation from Intel
1813 * that a specific host supports it, it's safer that we keep it disabled for all.
1814 *
1815 * https://edc.intel.com/content/www/us/en/design/products/platforms/details/raptor-lake-s/13th-generation-core-processors-datasheet-volume-1-of-2/005/pci-express-support/
1816 * https://gitlab.freedesktop.org/drm/amd/-/issues/2663
1817 */
1818static bool amdgpu_device_pcie_dynamic_switching_supported(struct amdgpu_device *adev)
1819{
1820#if IS_ENABLED(CONFIG_X86)
1821	struct cpuinfo_x86 *c = &cpu_data(0);
1822
1823	/* eGPU change speeds based on USB4 fabric conditions */
1824	if (dev_is_removable(adev->dev))
1825		return true;
1826
1827	if (c->x86_vendor == X86_VENDOR_INTEL)
1828		return false;
1829#endif
1830	return true;
1831}
1832
1833/**
1834 * amdgpu_device_should_use_aspm - check if the device should program ASPM
1835 *
1836 * @adev: amdgpu_device pointer
1837 *
1838 * Confirm whether the module parameter and pcie bridge agree that ASPM should
1839 * be set for this device.
1840 *
1841 * Returns true if it should be used or false if not.
1842 */
1843bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
1844{
1845	switch (amdgpu_aspm) {
1846	case -1:
1847		break;
1848	case 0:
1849		return false;
1850	case 1:
1851		return true;
1852	default:
1853		return false;
1854	}
1855	if (adev->flags & AMD_IS_APU)
1856		return false;
1857	if (!(adev->pm.pp_feature & PP_PCIE_DPM_MASK))
1858		return false;
1859	return pcie_aspm_enabled(adev->pdev);
1860}
1861
1862/* if we get transitioned to only one device, take VGA back */
1863/**
1864 * amdgpu_device_vga_set_decode - enable/disable vga decode
1865 *
1866 * @pdev: PCI device pointer
1867 * @state: enable/disable vga decode
1868 *
1869 * Enable/disable vga decode (all asics).
1870 * Returns VGA resource flags.
1871 */
1872static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
1873		bool state)
1874{
1875	struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
1876
1877	amdgpu_asic_set_vga_state(adev, state);
1878	if (state)
1879		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1880		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1881	else
1882		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1883}
1884
1885/**
1886 * amdgpu_device_check_block_size - validate the vm block size
1887 *
1888 * @adev: amdgpu_device pointer
1889 *
1890 * Validates the vm block size specified via module parameter.
1891 * The vm block size defines number of bits in page table versus page directory,
1892 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1893 * page table and the remaining bits are in the page directory.
1894 */
1895static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1896{
1897	/* defines number of bits in page table versus page directory,
1898	 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1899	 * page table and the remaining bits are in the page directory
1900	 */
1901	if (amdgpu_vm_block_size == -1)
1902		return;
1903
1904	if (amdgpu_vm_block_size < 9) {
1905		dev_warn(adev->dev, "VM page table size (%d) too small\n",
1906			 amdgpu_vm_block_size);
1907		amdgpu_vm_block_size = -1;
1908	}
1909}
1910
1911/**
1912 * amdgpu_device_check_vm_size - validate the vm size
1913 *
1914 * @adev: amdgpu_device pointer
1915 *
1916 * Validates the vm size in GB specified via module parameter.
1917 * The VM size is the size of the GPU virtual memory space in GB.
1918 */
1919static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1920{
1921	/* no need to check the default value */
1922	if (amdgpu_vm_size == -1)
1923		return;
1924
1925	if (amdgpu_vm_size < 1) {
1926		dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1927			 amdgpu_vm_size);
1928		amdgpu_vm_size = -1;
1929	}
1930}
1931
1932static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1933{
1934	struct sysinfo si;
1935	bool is_os_64 = (sizeof(void *) == 8);
1936	uint64_t total_memory;
1937	uint64_t dram_size_seven_GB = 0x1B8000000;
1938	uint64_t dram_size_three_GB = 0xB8000000;
1939
1940	if (amdgpu_smu_memory_pool_size == 0)
1941		return;
1942
1943	if (!is_os_64) {
1944		DRM_WARN("Not 64-bit OS, feature not supported\n");
1945		goto def_value;
1946	}
1947	si_meminfo(&si);
1948	total_memory = (uint64_t)si.totalram * si.mem_unit;
1949
1950	if ((amdgpu_smu_memory_pool_size == 1) ||
1951		(amdgpu_smu_memory_pool_size == 2)) {
1952		if (total_memory < dram_size_three_GB)
1953			goto def_value1;
1954	} else if ((amdgpu_smu_memory_pool_size == 4) ||
1955		(amdgpu_smu_memory_pool_size == 8)) {
1956		if (total_memory < dram_size_seven_GB)
1957			goto def_value1;
1958	} else {
1959		DRM_WARN("Smu memory pool size not supported\n");
1960		goto def_value;
1961	}
1962	adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1963
1964	return;
1965
1966def_value1:
1967	DRM_WARN("No enough system memory\n");
1968def_value:
1969	adev->pm.smu_prv_buffer_size = 0;
1970}
1971
1972static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
1973{
1974	if (!(adev->flags & AMD_IS_APU) ||
1975	    adev->asic_type < CHIP_RAVEN)
1976		return 0;
1977
1978	switch (adev->asic_type) {
1979	case CHIP_RAVEN:
1980		if (adev->pdev->device == 0x15dd)
1981			adev->apu_flags |= AMD_APU_IS_RAVEN;
1982		if (adev->pdev->device == 0x15d8)
1983			adev->apu_flags |= AMD_APU_IS_PICASSO;
1984		break;
1985	case CHIP_RENOIR:
1986		if ((adev->pdev->device == 0x1636) ||
1987		    (adev->pdev->device == 0x164c))
1988			adev->apu_flags |= AMD_APU_IS_RENOIR;
1989		else
1990			adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1991		break;
1992	case CHIP_VANGOGH:
1993		adev->apu_flags |= AMD_APU_IS_VANGOGH;
1994		break;
1995	case CHIP_YELLOW_CARP:
1996		break;
1997	case CHIP_CYAN_SKILLFISH:
1998		if ((adev->pdev->device == 0x13FE) ||
1999		    (adev->pdev->device == 0x143F))
2000			adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
2001		break;
2002	default:
2003		break;
2004	}
2005
2006	return 0;
2007}
2008
2009/**
2010 * amdgpu_device_check_arguments - validate module params
2011 *
2012 * @adev: amdgpu_device pointer
2013 *
2014 * Validates certain module parameters and updates
2015 * the associated values used by the driver (all asics).
2016 */
2017static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
2018{
2019	int i;
2020
2021	if (amdgpu_sched_jobs < 4) {
2022		dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
2023			 amdgpu_sched_jobs);
2024		amdgpu_sched_jobs = 4;
2025	} else if (!is_power_of_2(amdgpu_sched_jobs)) {
2026		dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
2027			 amdgpu_sched_jobs);
2028		amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
2029	}
2030
2031	if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
2032		/* gart size must be greater or equal to 32M */
2033		dev_warn(adev->dev, "gart size (%d) too small\n",
2034			 amdgpu_gart_size);
2035		amdgpu_gart_size = -1;
2036	}
2037
2038	if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
2039		/* gtt size must be greater or equal to 32M */
2040		dev_warn(adev->dev, "gtt size (%d) too small\n",
2041				 amdgpu_gtt_size);
2042		amdgpu_gtt_size = -1;
2043	}
2044
2045	/* valid range is between 4 and 9 inclusive */
2046	if (amdgpu_vm_fragment_size != -1 &&
2047	    (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
2048		dev_warn(adev->dev, "valid range is between 4 and 9\n");
2049		amdgpu_vm_fragment_size = -1;
2050	}
2051
2052	if (amdgpu_sched_hw_submission < 2) {
2053		dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
2054			 amdgpu_sched_hw_submission);
2055		amdgpu_sched_hw_submission = 2;
2056	} else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
2057		dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
2058			 amdgpu_sched_hw_submission);
2059		amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
2060	}
2061
2062	if (amdgpu_reset_method < -1 || amdgpu_reset_method > 4) {
2063		dev_warn(adev->dev, "invalid option for reset method, reverting to default\n");
2064		amdgpu_reset_method = -1;
2065	}
2066
2067	amdgpu_device_check_smu_prv_buffer_size(adev);
2068
2069	amdgpu_device_check_vm_size(adev);
2070
2071	amdgpu_device_check_block_size(adev);
2072
2073	adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
2074
2075	for (i = 0; i < MAX_XCP; i++)
2076		adev->enforce_isolation[i] = !!enforce_isolation;
 
2077
2078	return 0;
2079}
2080
2081/**
2082 * amdgpu_switcheroo_set_state - set switcheroo state
2083 *
2084 * @pdev: pci dev pointer
2085 * @state: vga_switcheroo state
2086 *
2087 * Callback for the switcheroo driver.  Suspends or resumes
2088 * the asics before or after it is powered up using ACPI methods.
2089 */
2090static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
2091					enum vga_switcheroo_state state)
2092{
2093	struct drm_device *dev = pci_get_drvdata(pdev);
2094	int r;
2095
2096	if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
2097		return;
2098
2099	if (state == VGA_SWITCHEROO_ON) {
2100		pr_info("switched on\n");
2101		/* don't suspend or resume card normally */
2102		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
2103
2104		pci_set_power_state(pdev, PCI_D0);
2105		amdgpu_device_load_pci_state(pdev);
2106		r = pci_enable_device(pdev);
2107		if (r)
2108			DRM_WARN("pci_enable_device failed (%d)\n", r);
2109		amdgpu_device_resume(dev, true);
2110
2111		dev->switch_power_state = DRM_SWITCH_POWER_ON;
2112	} else {
2113		pr_info("switched off\n");
2114		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
2115		amdgpu_device_prepare(dev);
2116		amdgpu_device_suspend(dev, true);
2117		amdgpu_device_cache_pci_state(pdev);
2118		/* Shut down the device */
2119		pci_disable_device(pdev);
2120		pci_set_power_state(pdev, PCI_D3cold);
2121		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
2122	}
2123}
2124
2125/**
2126 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
2127 *
2128 * @pdev: pci dev pointer
2129 *
2130 * Callback for the switcheroo driver.  Check of the switcheroo
2131 * state can be changed.
2132 * Returns true if the state can be changed, false if not.
2133 */
2134static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
2135{
2136	struct drm_device *dev = pci_get_drvdata(pdev);
2137
2138       /*
2139	* FIXME: open_count is protected by drm_global_mutex but that would lead to
2140	* locking inversion with the driver load path. And the access here is
2141	* completely racy anyway. So don't bother with locking for now.
2142	*/
2143	return atomic_read(&dev->open_count) == 0;
2144}
2145
2146static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
2147	.set_gpu_state = amdgpu_switcheroo_set_state,
2148	.reprobe = NULL,
2149	.can_switch = amdgpu_switcheroo_can_switch,
2150};
2151
2152/**
2153 * amdgpu_device_ip_set_clockgating_state - set the CG state
2154 *
2155 * @dev: amdgpu_device pointer
2156 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
2157 * @state: clockgating state (gate or ungate)
2158 *
2159 * Sets the requested clockgating state for all instances of
2160 * the hardware IP specified.
2161 * Returns the error code from the last instance.
2162 */
2163int amdgpu_device_ip_set_clockgating_state(void *dev,
2164					   enum amd_ip_block_type block_type,
2165					   enum amd_clockgating_state state)
2166{
2167	struct amdgpu_device *adev = dev;
2168	int i, r = 0;
2169
2170	for (i = 0; i < adev->num_ip_blocks; i++) {
2171		if (!adev->ip_blocks[i].status.valid)
2172			continue;
2173		if (adev->ip_blocks[i].version->type != block_type)
2174			continue;
2175		if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
2176			continue;
2177		r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
2178			(void *)adev, state);
2179		if (r)
2180			DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
2181				  adev->ip_blocks[i].version->funcs->name, r);
2182	}
2183	return r;
2184}
2185
2186/**
2187 * amdgpu_device_ip_set_powergating_state - set the PG state
2188 *
2189 * @dev: amdgpu_device pointer
2190 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
2191 * @state: powergating state (gate or ungate)
2192 *
2193 * Sets the requested powergating state for all instances of
2194 * the hardware IP specified.
2195 * Returns the error code from the last instance.
2196 */
2197int amdgpu_device_ip_set_powergating_state(void *dev,
2198					   enum amd_ip_block_type block_type,
2199					   enum amd_powergating_state state)
2200{
2201	struct amdgpu_device *adev = dev;
2202	int i, r = 0;
2203
2204	for (i = 0; i < adev->num_ip_blocks; i++) {
2205		if (!adev->ip_blocks[i].status.valid)
2206			continue;
2207		if (adev->ip_blocks[i].version->type != block_type)
2208			continue;
2209		if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
2210			continue;
2211		r = adev->ip_blocks[i].version->funcs->set_powergating_state(
2212			(void *)adev, state);
2213		if (r)
2214			DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
2215				  adev->ip_blocks[i].version->funcs->name, r);
2216	}
2217	return r;
2218}
2219
2220/**
2221 * amdgpu_device_ip_get_clockgating_state - get the CG state
2222 *
2223 * @adev: amdgpu_device pointer
2224 * @flags: clockgating feature flags
2225 *
2226 * Walks the list of IPs on the device and updates the clockgating
2227 * flags for each IP.
2228 * Updates @flags with the feature flags for each hardware IP where
2229 * clockgating is enabled.
2230 */
2231void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
2232					    u64 *flags)
2233{
2234	int i;
2235
2236	for (i = 0; i < adev->num_ip_blocks; i++) {
2237		if (!adev->ip_blocks[i].status.valid)
2238			continue;
2239		if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
2240			adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
2241	}
2242}
2243
2244/**
2245 * amdgpu_device_ip_wait_for_idle - wait for idle
2246 *
2247 * @adev: amdgpu_device pointer
2248 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
2249 *
2250 * Waits for the request hardware IP to be idle.
2251 * Returns 0 for success or a negative error code on failure.
2252 */
2253int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
2254				   enum amd_ip_block_type block_type)
2255{
2256	int i, r;
2257
2258	for (i = 0; i < adev->num_ip_blocks; i++) {
2259		if (!adev->ip_blocks[i].status.valid)
2260			continue;
2261		if (adev->ip_blocks[i].version->type == block_type) {
2262			if (adev->ip_blocks[i].version->funcs->wait_for_idle) {
2263				r = adev->ip_blocks[i].version->funcs->wait_for_idle(
2264								&adev->ip_blocks[i]);
2265				if (r)
2266					return r;
2267			}
2268			break;
2269		}
2270	}
2271	return 0;
2272
2273}
2274
2275/**
2276 * amdgpu_device_ip_is_valid - is the hardware IP enabled
2277 *
2278 * @adev: amdgpu_device pointer
2279 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
2280 *
2281 * Check if the hardware IP is enable or not.
2282 * Returns true if it the IP is enable, false if not.
2283 */
2284bool amdgpu_device_ip_is_valid(struct amdgpu_device *adev,
2285			       enum amd_ip_block_type block_type)
2286{
2287	int i;
2288
2289	for (i = 0; i < adev->num_ip_blocks; i++) {
 
 
2290		if (adev->ip_blocks[i].version->type == block_type)
2291			return adev->ip_blocks[i].status.valid;
2292	}
2293	return false;
2294
2295}
2296
2297/**
2298 * amdgpu_device_ip_get_ip_block - get a hw IP pointer
2299 *
2300 * @adev: amdgpu_device pointer
2301 * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
2302 *
2303 * Returns a pointer to the hardware IP block structure
2304 * if it exists for the asic, otherwise NULL.
2305 */
2306struct amdgpu_ip_block *
2307amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
2308			      enum amd_ip_block_type type)
2309{
2310	int i;
2311
2312	for (i = 0; i < adev->num_ip_blocks; i++)
2313		if (adev->ip_blocks[i].version->type == type)
2314			return &adev->ip_blocks[i];
2315
2316	return NULL;
2317}
2318
2319/**
2320 * amdgpu_device_ip_block_version_cmp
2321 *
2322 * @adev: amdgpu_device pointer
2323 * @type: enum amd_ip_block_type
2324 * @major: major version
2325 * @minor: minor version
2326 *
2327 * return 0 if equal or greater
2328 * return 1 if smaller or the ip_block doesn't exist
2329 */
2330int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
2331				       enum amd_ip_block_type type,
2332				       u32 major, u32 minor)
2333{
2334	struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
2335
2336	if (ip_block && ((ip_block->version->major > major) ||
2337			((ip_block->version->major == major) &&
2338			(ip_block->version->minor >= minor))))
2339		return 0;
2340
2341	return 1;
2342}
2343
2344/**
2345 * amdgpu_device_ip_block_add
2346 *
2347 * @adev: amdgpu_device pointer
2348 * @ip_block_version: pointer to the IP to add
2349 *
2350 * Adds the IP block driver information to the collection of IPs
2351 * on the asic.
2352 */
2353int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
2354			       const struct amdgpu_ip_block_version *ip_block_version)
2355{
2356	if (!ip_block_version)
2357		return -EINVAL;
2358
2359	switch (ip_block_version->type) {
2360	case AMD_IP_BLOCK_TYPE_VCN:
2361		if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
2362			return 0;
2363		break;
2364	case AMD_IP_BLOCK_TYPE_JPEG:
2365		if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
2366			return 0;
2367		break;
2368	default:
2369		break;
2370	}
2371
2372	DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
2373		  ip_block_version->funcs->name);
2374
2375	adev->ip_blocks[adev->num_ip_blocks].adev = adev;
2376
2377	adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
2378
2379	return 0;
2380}
2381
2382/**
2383 * amdgpu_device_enable_virtual_display - enable virtual display feature
2384 *
2385 * @adev: amdgpu_device pointer
2386 *
2387 * Enabled the virtual display feature if the user has enabled it via
2388 * the module parameter virtual_display.  This feature provides a virtual
2389 * display hardware on headless boards or in virtualized environments.
2390 * This function parses and validates the configuration string specified by
2391 * the user and configues the virtual display configuration (number of
2392 * virtual connectors, crtcs, etc.) specified.
2393 */
2394static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
2395{
2396	adev->enable_virtual_display = false;
2397
2398	if (amdgpu_virtual_display) {
2399		const char *pci_address_name = pci_name(adev->pdev);
2400		char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
2401
2402		pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
2403		pciaddstr_tmp = pciaddstr;
2404		while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
2405			pciaddname = strsep(&pciaddname_tmp, ",");
2406			if (!strcmp("all", pciaddname)
2407			    || !strcmp(pci_address_name, pciaddname)) {
2408				long num_crtc;
2409				int res = -1;
2410
2411				adev->enable_virtual_display = true;
2412
2413				if (pciaddname_tmp)
2414					res = kstrtol(pciaddname_tmp, 10,
2415						      &num_crtc);
2416
2417				if (!res) {
2418					if (num_crtc < 1)
2419						num_crtc = 1;
2420					if (num_crtc > 6)
2421						num_crtc = 6;
2422					adev->mode_info.num_crtc = num_crtc;
2423				} else {
2424					adev->mode_info.num_crtc = 1;
2425				}
2426				break;
2427			}
2428		}
2429
2430		DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
2431			 amdgpu_virtual_display, pci_address_name,
2432			 adev->enable_virtual_display, adev->mode_info.num_crtc);
2433
2434		kfree(pciaddstr);
2435	}
2436}
2437
2438void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev)
2439{
2440	if (amdgpu_sriov_vf(adev) && !adev->enable_virtual_display) {
2441		adev->mode_info.num_crtc = 1;
2442		adev->enable_virtual_display = true;
2443		DRM_INFO("virtual_display:%d, num_crtc:%d\n",
2444			 adev->enable_virtual_display, adev->mode_info.num_crtc);
2445	}
2446}
2447
2448/**
2449 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
2450 *
2451 * @adev: amdgpu_device pointer
2452 *
2453 * Parses the asic configuration parameters specified in the gpu info
2454 * firmware and makes them availale to the driver for use in configuring
2455 * the asic.
2456 * Returns 0 on success, -EINVAL on failure.
2457 */
2458static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
2459{
2460	const char *chip_name;
 
2461	int err;
2462	const struct gpu_info_firmware_header_v1_0 *hdr;
2463
2464	adev->firmware.gpu_info_fw = NULL;
2465
2466	if (adev->mman.discovery_bin)
2467		return 0;
 
 
 
 
 
 
 
 
 
2468
2469	switch (adev->asic_type) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2470	default:
2471		return 0;
2472	case CHIP_VEGA10:
2473		chip_name = "vega10";
2474		break;
2475	case CHIP_VEGA12:
2476		chip_name = "vega12";
2477		break;
2478	case CHIP_RAVEN:
2479		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
2480			chip_name = "raven2";
2481		else if (adev->apu_flags & AMD_APU_IS_PICASSO)
2482			chip_name = "picasso";
2483		else
2484			chip_name = "raven";
2485		break;
2486	case CHIP_ARCTURUS:
2487		chip_name = "arcturus";
2488		break;
 
 
 
 
 
 
 
 
 
 
 
 
2489	case CHIP_NAVI12:
2490		chip_name = "navi12";
2491		break;
 
 
 
 
 
 
2492	}
2493
2494	err = amdgpu_ucode_request(adev, &adev->firmware.gpu_info_fw,
2495				   "amdgpu/%s_gpu_info.bin", chip_name);
2496	if (err) {
2497		dev_err(adev->dev,
2498			"Failed to get gpu_info firmware \"%s_gpu_info.bin\"\n",
2499			chip_name);
 
 
 
 
 
 
 
2500		goto out;
2501	}
2502
2503	hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
2504	amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
2505
2506	switch (hdr->version_major) {
2507	case 1:
2508	{
2509		const struct gpu_info_firmware_v1_0 *gpu_info_fw =
2510			(const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
2511								le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2512
2513		/*
2514		 * Should be droped when DAL no longer needs it.
2515		 */
2516		if (adev->asic_type == CHIP_NAVI12)
2517			goto parse_soc_bounding_box;
2518
2519		adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
2520		adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
2521		adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
2522		adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
2523		adev->gfx.config.max_texture_channel_caches =
2524			le32_to_cpu(gpu_info_fw->gc_num_tccs);
2525		adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
2526		adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
2527		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
2528		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
2529		adev->gfx.config.double_offchip_lds_buf =
2530			le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
2531		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
2532		adev->gfx.cu_info.max_waves_per_simd =
2533			le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
2534		adev->gfx.cu_info.max_scratch_slots_per_cu =
2535			le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
2536		adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
2537		if (hdr->version_minor >= 1) {
2538			const struct gpu_info_firmware_v1_1 *gpu_info_fw =
2539				(const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
2540									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2541			adev->gfx.config.num_sc_per_sh =
2542				le32_to_cpu(gpu_info_fw->num_sc_per_sh);
2543			adev->gfx.config.num_packer_per_sc =
2544				le32_to_cpu(gpu_info_fw->num_packer_per_sc);
2545		}
2546
2547parse_soc_bounding_box:
2548		/*
2549		 * soc bounding box info is not integrated in disocovery table,
2550		 * we always need to parse it from gpu info firmware if needed.
2551		 */
2552		if (hdr->version_minor == 2) {
2553			const struct gpu_info_firmware_v1_2 *gpu_info_fw =
2554				(const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
2555									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2556			adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
2557		}
2558		break;
2559	}
2560	default:
2561		dev_err(adev->dev,
2562			"Unsupported gpu_info table %d\n", hdr->header.ucode_version);
2563		err = -EINVAL;
2564		goto out;
2565	}
2566out:
2567	return err;
2568}
2569
2570/**
2571 * amdgpu_device_ip_early_init - run early init for hardware IPs
2572 *
2573 * @adev: amdgpu_device pointer
2574 *
2575 * Early initialization pass for hardware IPs.  The hardware IPs that make
2576 * up each asic are discovered each IP's early_init callback is run.  This
2577 * is the first stage in initializing the asic.
2578 * Returns 0 on success, negative error code on failure.
2579 */
2580static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
2581{
2582	struct amdgpu_ip_block *ip_block;
2583	struct pci_dev *parent;
2584	int i, r;
2585	bool total;
2586
2587	amdgpu_device_enable_virtual_display(adev);
2588
2589	if (amdgpu_sriov_vf(adev)) {
2590		r = amdgpu_virt_request_full_gpu(adev, true);
2591		if (r)
2592			return r;
2593	}
2594
2595	switch (adev->asic_type) {
2596#ifdef CONFIG_DRM_AMDGPU_SI
2597	case CHIP_VERDE:
2598	case CHIP_TAHITI:
2599	case CHIP_PITCAIRN:
2600	case CHIP_OLAND:
2601	case CHIP_HAINAN:
2602		adev->family = AMDGPU_FAMILY_SI;
2603		r = si_set_ip_blocks(adev);
2604		if (r)
2605			return r;
2606		break;
2607#endif
2608#ifdef CONFIG_DRM_AMDGPU_CIK
2609	case CHIP_BONAIRE:
2610	case CHIP_HAWAII:
2611	case CHIP_KAVERI:
2612	case CHIP_KABINI:
2613	case CHIP_MULLINS:
2614		if (adev->flags & AMD_IS_APU)
2615			adev->family = AMDGPU_FAMILY_KV;
2616		else
2617			adev->family = AMDGPU_FAMILY_CI;
2618
2619		r = cik_set_ip_blocks(adev);
2620		if (r)
2621			return r;
2622		break;
2623#endif
2624	case CHIP_TOPAZ:
2625	case CHIP_TONGA:
2626	case CHIP_FIJI:
2627	case CHIP_POLARIS10:
2628	case CHIP_POLARIS11:
2629	case CHIP_POLARIS12:
2630	case CHIP_VEGAM:
2631	case CHIP_CARRIZO:
2632	case CHIP_STONEY:
2633		if (adev->flags & AMD_IS_APU)
2634			adev->family = AMDGPU_FAMILY_CZ;
2635		else
2636			adev->family = AMDGPU_FAMILY_VI;
2637
2638		r = vi_set_ip_blocks(adev);
2639		if (r)
2640			return r;
2641		break;
2642	default:
2643		r = amdgpu_discovery_set_ip_blocks(adev);
 
 
 
 
 
 
 
 
 
 
 
2644		if (r)
2645			return r;
2646		break;
2647	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2648
2649	if (amdgpu_has_atpx() &&
2650	    (amdgpu_is_atpx_hybrid() ||
2651	     amdgpu_has_atpx_dgpu_power_cntl()) &&
2652	    ((adev->flags & AMD_IS_APU) == 0) &&
2653	    !dev_is_removable(&adev->pdev->dev))
2654		adev->flags |= AMD_IS_PX;
2655
2656	if (!(adev->flags & AMD_IS_APU)) {
2657		parent = pcie_find_root_port(adev->pdev);
2658		adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
2659	}
2660
 
2661
2662	adev->pm.pp_feature = amdgpu_pp_feature_mask;
2663	if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2664		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2665	if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2666		adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2667	if (!amdgpu_device_pcie_dynamic_switching_supported(adev))
2668		adev->pm.pp_feature &= ~PP_PCIE_DPM_MASK;
2669
2670	total = true;
2671	for (i = 0; i < adev->num_ip_blocks; i++) {
2672		ip_block = &adev->ip_blocks[i];
2673
2674		if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2675			DRM_WARN("disabled ip block: %d <%s>\n",
2676				  i, adev->ip_blocks[i].version->funcs->name);
2677			adev->ip_blocks[i].status.valid = false;
2678		} else if (ip_block->version->funcs->early_init) {
2679			r = ip_block->version->funcs->early_init(ip_block);
2680			if (r == -ENOENT) {
2681				adev->ip_blocks[i].status.valid = false;
2682			} else if (r) {
2683				DRM_ERROR("early_init of IP block <%s> failed %d\n",
2684					  adev->ip_blocks[i].version->funcs->name, r);
2685				total = false;
 
 
 
 
2686			} else {
2687				adev->ip_blocks[i].status.valid = true;
2688			}
2689		} else {
2690			adev->ip_blocks[i].status.valid = true;
2691		}
2692		/* get the vbios after the asic_funcs are set up */
2693		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2694			r = amdgpu_device_parse_gpu_info_fw(adev);
2695			if (r)
2696				return r;
2697
2698			/* Read BIOS */
2699			if (amdgpu_device_read_bios(adev)) {
2700				if (!amdgpu_get_bios(adev))
2701					return -EINVAL;
2702
2703				r = amdgpu_atombios_init(adev);
2704				if (r) {
2705					dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2706					amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2707					return r;
2708				}
2709			}
2710
2711			/*get pf2vf msg info at it's earliest time*/
2712			if (amdgpu_sriov_vf(adev))
2713				amdgpu_virt_init_data_exchange(adev);
2714
2715		}
2716	}
2717	if (!total)
2718		return -ENODEV;
2719
2720	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
2721	if (ip_block->status.valid != false)
2722		amdgpu_amdkfd_device_probe(adev);
2723
2724	adev->cg_flags &= amdgpu_cg_mask;
2725	adev->pg_flags &= amdgpu_pg_mask;
2726
2727	return 0;
2728}
2729
2730static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2731{
2732	int i, r;
2733
2734	for (i = 0; i < adev->num_ip_blocks; i++) {
2735		if (!adev->ip_blocks[i].status.sw)
2736			continue;
2737		if (adev->ip_blocks[i].status.hw)
2738			continue;
2739		if (!amdgpu_ip_member_of_hwini(
2740			    adev, adev->ip_blocks[i].version->type))
2741			continue;
2742		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2743		    (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2744		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2745			r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
2746			if (r) {
2747				DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2748					  adev->ip_blocks[i].version->funcs->name, r);
2749				return r;
2750			}
2751			adev->ip_blocks[i].status.hw = true;
2752		}
2753	}
2754
2755	return 0;
2756}
2757
2758static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2759{
2760	int i, r;
2761
2762	for (i = 0; i < adev->num_ip_blocks; i++) {
2763		if (!adev->ip_blocks[i].status.sw)
2764			continue;
2765		if (adev->ip_blocks[i].status.hw)
2766			continue;
2767		if (!amdgpu_ip_member_of_hwini(
2768			    adev, adev->ip_blocks[i].version->type))
2769			continue;
2770		r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
2771		if (r) {
2772			DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2773				  adev->ip_blocks[i].version->funcs->name, r);
2774			return r;
2775		}
2776		adev->ip_blocks[i].status.hw = true;
2777	}
2778
2779	return 0;
2780}
2781
2782static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2783{
2784	int r = 0;
2785	int i;
2786	uint32_t smu_version;
2787
2788	if (adev->asic_type >= CHIP_VEGA10) {
2789		for (i = 0; i < adev->num_ip_blocks; i++) {
2790			if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2791				continue;
2792
2793			if (!amdgpu_ip_member_of_hwini(adev,
2794						       AMD_IP_BLOCK_TYPE_PSP))
2795				break;
2796
2797			if (!adev->ip_blocks[i].status.sw)
2798				continue;
2799
2800			/* no need to do the fw loading again if already done*/
2801			if (adev->ip_blocks[i].status.hw == true)
2802				break;
2803
2804			if (amdgpu_in_reset(adev) || adev->in_suspend) {
2805				r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
2806				if (r)
 
 
2807					return r;
 
2808			} else {
2809				r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
2810				if (r) {
2811					DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2812							  adev->ip_blocks[i].version->funcs->name, r);
2813					return r;
2814				}
2815				adev->ip_blocks[i].status.hw = true;
2816			}
 
 
2817			break;
2818		}
2819	}
2820
2821	if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2822		r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2823
2824	return r;
2825}
2826
2827static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
2828{
2829	long timeout;
2830	int r, i;
2831
2832	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2833		struct amdgpu_ring *ring = adev->rings[i];
2834
2835		/* No need to setup the GPU scheduler for rings that don't need it */
2836		if (!ring || ring->no_scheduler)
2837			continue;
2838
2839		switch (ring->funcs->type) {
2840		case AMDGPU_RING_TYPE_GFX:
2841			timeout = adev->gfx_timeout;
2842			break;
2843		case AMDGPU_RING_TYPE_COMPUTE:
2844			timeout = adev->compute_timeout;
2845			break;
2846		case AMDGPU_RING_TYPE_SDMA:
2847			timeout = adev->sdma_timeout;
2848			break;
2849		default:
2850			timeout = adev->video_timeout;
2851			break;
2852		}
2853
2854		r = drm_sched_init(&ring->sched, &amdgpu_sched_ops, NULL,
2855				   DRM_SCHED_PRIORITY_COUNT,
2856				   ring->num_hw_submission, 0,
2857				   timeout, adev->reset_domain->wq,
2858				   ring->sched_score, ring->name,
2859				   adev->dev);
2860		if (r) {
2861			DRM_ERROR("Failed to create scheduler on ring %s.\n",
2862				  ring->name);
2863			return r;
2864		}
2865		r = amdgpu_uvd_entity_init(adev, ring);
2866		if (r) {
2867			DRM_ERROR("Failed to create UVD scheduling entity on ring %s.\n",
2868				  ring->name);
2869			return r;
2870		}
2871		r = amdgpu_vce_entity_init(adev, ring);
2872		if (r) {
2873			DRM_ERROR("Failed to create VCE scheduling entity on ring %s.\n",
2874				  ring->name);
2875			return r;
2876		}
2877	}
2878
2879	amdgpu_xcp_update_partition_sched_list(adev);
2880
2881	return 0;
2882}
2883
2884
2885/**
2886 * amdgpu_device_ip_init - run init for hardware IPs
2887 *
2888 * @adev: amdgpu_device pointer
2889 *
2890 * Main initialization pass for hardware IPs.  The list of all the hardware
2891 * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2892 * are run.  sw_init initializes the software state associated with each IP
2893 * and hw_init initializes the hardware associated with each IP.
2894 * Returns 0 on success, negative error code on failure.
2895 */
2896static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2897{
2898	bool init_badpage;
2899	int i, r;
2900
2901	r = amdgpu_ras_init(adev);
2902	if (r)
2903		return r;
2904
2905	for (i = 0; i < adev->num_ip_blocks; i++) {
2906		if (!adev->ip_blocks[i].status.valid)
2907			continue;
2908		if (adev->ip_blocks[i].version->funcs->sw_init) {
2909			r = adev->ip_blocks[i].version->funcs->sw_init(&adev->ip_blocks[i]);
2910			if (r) {
2911				DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2912					  adev->ip_blocks[i].version->funcs->name, r);
2913				goto init_failed;
2914			}
2915		}
2916		adev->ip_blocks[i].status.sw = true;
2917
2918		if (!amdgpu_ip_member_of_hwini(
2919			    adev, adev->ip_blocks[i].version->type))
2920			continue;
2921
2922		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2923			/* need to do common hw init early so everything is set up for gmc */
2924			r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
2925			if (r) {
2926				DRM_ERROR("hw_init %d failed %d\n", i, r);
2927				goto init_failed;
2928			}
2929			adev->ip_blocks[i].status.hw = true;
2930		} else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2931			/* need to do gmc hw init early so we can allocate gpu mem */
2932			/* Try to reserve bad pages early */
2933			if (amdgpu_sriov_vf(adev))
2934				amdgpu_virt_exchange_data(adev);
2935
2936			r = amdgpu_device_mem_scratch_init(adev);
2937			if (r) {
2938				DRM_ERROR("amdgpu_mem_scratch_init failed %d\n", r);
2939				goto init_failed;
2940			}
2941			r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
2942			if (r) {
2943				DRM_ERROR("hw_init %d failed %d\n", i, r);
2944				goto init_failed;
2945			}
2946			r = amdgpu_device_wb_init(adev);
2947			if (r) {
2948				DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2949				goto init_failed;
2950			}
2951			adev->ip_blocks[i].status.hw = true;
2952
2953			/* right after GMC hw init, we create CSA */
2954			if (adev->gfx.mcbp) {
2955				r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2956							       AMDGPU_GEM_DOMAIN_VRAM |
2957							       AMDGPU_GEM_DOMAIN_GTT,
2958							       AMDGPU_CSA_SIZE);
2959				if (r) {
2960					DRM_ERROR("allocate CSA failed %d\n", r);
2961					goto init_failed;
2962				}
2963			}
2964
2965			r = amdgpu_seq64_init(adev);
2966			if (r) {
2967				DRM_ERROR("allocate seq64 failed %d\n", r);
2968				goto init_failed;
2969			}
2970		}
2971	}
2972
2973	if (amdgpu_sriov_vf(adev))
2974		amdgpu_virt_init_data_exchange(adev);
2975
2976	r = amdgpu_ib_pool_init(adev);
2977	if (r) {
2978		dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2979		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2980		goto init_failed;
2981	}
2982
2983	r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2984	if (r)
2985		goto init_failed;
2986
2987	r = amdgpu_device_ip_hw_init_phase1(adev);
2988	if (r)
2989		goto init_failed;
2990
2991	r = amdgpu_device_fw_loading(adev);
2992	if (r)
2993		goto init_failed;
2994
2995	r = amdgpu_device_ip_hw_init_phase2(adev);
2996	if (r)
2997		goto init_failed;
2998
2999	/*
3000	 * retired pages will be loaded from eeprom and reserved here,
3001	 * it should be called after amdgpu_device_ip_hw_init_phase2  since
3002	 * for some ASICs the RAS EEPROM code relies on SMU fully functioning
3003	 * for I2C communication which only true at this point.
3004	 *
3005	 * amdgpu_ras_recovery_init may fail, but the upper only cares the
3006	 * failure from bad gpu situation and stop amdgpu init process
3007	 * accordingly. For other failed cases, it will still release all
3008	 * the resource and print error message, rather than returning one
3009	 * negative value to upper level.
3010	 *
3011	 * Note: theoretically, this should be called before all vram allocations
3012	 * to protect retired page from abusing
3013	 */
3014	init_badpage = (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI);
3015	r = amdgpu_ras_recovery_init(adev, init_badpage);
3016	if (r)
3017		goto init_failed;
3018
3019	/**
3020	 * In case of XGMI grab extra reference for reset domain for this device
3021	 */
3022	if (adev->gmc.xgmi.num_physical_nodes > 1) {
3023		if (amdgpu_xgmi_add_device(adev) == 0) {
3024			if (!amdgpu_sriov_vf(adev)) {
3025				struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3026
3027				if (WARN_ON(!hive)) {
3028					r = -ENOENT;
3029					goto init_failed;
3030				}
3031
3032				if (!hive->reset_domain ||
3033				    !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
3034					r = -ENOENT;
3035					amdgpu_put_xgmi_hive(hive);
3036					goto init_failed;
3037				}
3038
3039				/* Drop the early temporary reset domain we created for device */
3040				amdgpu_reset_put_reset_domain(adev->reset_domain);
3041				adev->reset_domain = hive->reset_domain;
3042				amdgpu_put_xgmi_hive(hive);
3043			}
3044		}
3045	}
3046
3047	r = amdgpu_device_init_schedulers(adev);
3048	if (r)
3049		goto init_failed;
3050
3051	if (adev->mman.buffer_funcs_ring->sched.ready)
3052		amdgpu_ttm_set_buffer_funcs_status(adev, true);
3053
3054	/* Don't init kfd if whole hive need to be reset during init */
3055	if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) {
3056		kgd2kfd_init_zone_device(adev);
3057		amdgpu_amdkfd_device_init(adev);
3058	}
3059
3060	amdgpu_fru_get_product_info(adev);
3061
3062init_failed:
 
 
3063
3064	return r;
3065}
3066
3067/**
3068 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
3069 *
3070 * @adev: amdgpu_device pointer
3071 *
3072 * Writes a reset magic value to the gart pointer in VRAM.  The driver calls
3073 * this function before a GPU reset.  If the value is retained after a
3074 * GPU reset, VRAM has not been lost.  Some GPU resets may destry VRAM contents.
3075 */
3076static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
3077{
3078	memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
3079}
3080
3081/**
3082 * amdgpu_device_check_vram_lost - check if vram is valid
3083 *
3084 * @adev: amdgpu_device pointer
3085 *
3086 * Checks the reset magic value written to the gart pointer in VRAM.
3087 * The driver calls this after a GPU reset to see if the contents of
3088 * VRAM is lost or now.
3089 * returns true if vram is lost, false if not.
3090 */
3091static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
3092{
3093	if (memcmp(adev->gart.ptr, adev->reset_magic,
3094			AMDGPU_RESET_MAGIC_NUM))
3095		return true;
3096
3097	if (!amdgpu_in_reset(adev))
3098		return false;
3099
3100	/*
3101	 * For all ASICs with baco/mode1 reset, the VRAM is
3102	 * always assumed to be lost.
3103	 */
3104	switch (amdgpu_asic_reset_method(adev)) {
3105	case AMD_RESET_METHOD_BACO:
3106	case AMD_RESET_METHOD_MODE1:
3107		return true;
3108	default:
3109		return false;
3110	}
3111}
3112
3113/**
3114 * amdgpu_device_set_cg_state - set clockgating for amdgpu device
3115 *
3116 * @adev: amdgpu_device pointer
3117 * @state: clockgating state (gate or ungate)
3118 *
3119 * The list of all the hardware IPs that make up the asic is walked and the
3120 * set_clockgating_state callbacks are run.
3121 * Late initialization pass enabling clockgating for hardware IPs.
3122 * Fini or suspend, pass disabling clockgating for hardware IPs.
3123 * Returns 0 on success, negative error code on failure.
3124 */
3125
3126int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
3127			       enum amd_clockgating_state state)
3128{
3129	int i, j, r;
3130
3131	if (amdgpu_emu_mode == 1)
3132		return 0;
3133
3134	for (j = 0; j < adev->num_ip_blocks; j++) {
3135		i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
3136		if (!adev->ip_blocks[i].status.late_initialized)
3137			continue;
3138		/* skip CG for GFX, SDMA on S0ix */
3139		if (adev->in_s0ix &&
3140		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
3141		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
3142			continue;
3143		/* skip CG for VCE/UVD, it's handled specially */
3144		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
3145		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
3146		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
3147		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
3148		    adev->ip_blocks[i].version->funcs->set_clockgating_state) {
3149			/* enable clockgating to save power */
3150			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
3151										     state);
3152			if (r) {
3153				DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
3154					  adev->ip_blocks[i].version->funcs->name, r);
3155				return r;
3156			}
3157		}
3158	}
3159
3160	return 0;
3161}
3162
3163int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
3164			       enum amd_powergating_state state)
3165{
3166	int i, j, r;
3167
3168	if (amdgpu_emu_mode == 1)
3169		return 0;
3170
3171	for (j = 0; j < adev->num_ip_blocks; j++) {
3172		i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
3173		if (!adev->ip_blocks[i].status.late_initialized)
3174			continue;
3175		/* skip PG for GFX, SDMA on S0ix */
3176		if (adev->in_s0ix &&
3177		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
3178		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
3179			continue;
3180		/* skip CG for VCE/UVD, it's handled specially */
3181		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
3182		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
3183		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
3184		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
3185		    adev->ip_blocks[i].version->funcs->set_powergating_state) {
3186			/* enable powergating to save power */
3187			r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
3188											state);
3189			if (r) {
3190				DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
3191					  adev->ip_blocks[i].version->funcs->name, r);
3192				return r;
3193			}
3194		}
3195	}
3196	return 0;
3197}
3198
3199static int amdgpu_device_enable_mgpu_fan_boost(void)
3200{
3201	struct amdgpu_gpu_instance *gpu_ins;
3202	struct amdgpu_device *adev;
3203	int i, ret = 0;
3204
3205	mutex_lock(&mgpu_info.mutex);
3206
3207	/*
3208	 * MGPU fan boost feature should be enabled
3209	 * only when there are two or more dGPUs in
3210	 * the system
3211	 */
3212	if (mgpu_info.num_dgpu < 2)
3213		goto out;
3214
3215	for (i = 0; i < mgpu_info.num_dgpu; i++) {
3216		gpu_ins = &(mgpu_info.gpu_ins[i]);
3217		adev = gpu_ins->adev;
3218		if (!(adev->flags & AMD_IS_APU) &&
3219		    !gpu_ins->mgpu_fan_enabled) {
3220			ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
3221			if (ret)
3222				break;
3223
3224			gpu_ins->mgpu_fan_enabled = 1;
3225		}
3226	}
3227
3228out:
3229	mutex_unlock(&mgpu_info.mutex);
3230
3231	return ret;
3232}
3233
3234/**
3235 * amdgpu_device_ip_late_init - run late init for hardware IPs
3236 *
3237 * @adev: amdgpu_device pointer
3238 *
3239 * Late initialization pass for hardware IPs.  The list of all the hardware
3240 * IPs that make up the asic is walked and the late_init callbacks are run.
3241 * late_init covers any special initialization that an IP requires
3242 * after all of the have been initialized or something that needs to happen
3243 * late in the init process.
3244 * Returns 0 on success, negative error code on failure.
3245 */
3246static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
3247{
3248	struct amdgpu_gpu_instance *gpu_instance;
3249	int i = 0, r;
3250
3251	for (i = 0; i < adev->num_ip_blocks; i++) {
3252		if (!adev->ip_blocks[i].status.hw)
3253			continue;
3254		if (adev->ip_blocks[i].version->funcs->late_init) {
3255			r = adev->ip_blocks[i].version->funcs->late_init(&adev->ip_blocks[i]);
3256			if (r) {
3257				DRM_ERROR("late_init of IP block <%s> failed %d\n",
3258					  adev->ip_blocks[i].version->funcs->name, r);
3259				return r;
3260			}
3261		}
3262		adev->ip_blocks[i].status.late_initialized = true;
3263	}
3264
3265	r = amdgpu_ras_late_init(adev);
3266	if (r) {
3267		DRM_ERROR("amdgpu_ras_late_init failed %d", r);
3268		return r;
3269	}
3270
3271	if (!amdgpu_reset_in_recovery(adev))
3272		amdgpu_ras_set_error_query_ready(adev, true);
3273
3274	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
3275	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
3276
3277	amdgpu_device_fill_reset_magic(adev);
3278
3279	r = amdgpu_device_enable_mgpu_fan_boost();
3280	if (r)
3281		DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
3282
3283	/* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
3284	if (amdgpu_passthrough(adev) &&
3285	    ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1) ||
3286	     adev->asic_type == CHIP_ALDEBARAN))
3287		amdgpu_dpm_handle_passthrough_sbr(adev, true);
3288
3289	if (adev->gmc.xgmi.num_physical_nodes > 1) {
3290		mutex_lock(&mgpu_info.mutex);
3291
3292		/*
3293		 * Reset device p-state to low as this was booted with high.
3294		 *
3295		 * This should be performed only after all devices from the same
3296		 * hive get initialized.
3297		 *
3298		 * However, it's unknown how many device in the hive in advance.
3299		 * As this is counted one by one during devices initializations.
3300		 *
3301		 * So, we wait for all XGMI interlinked devices initialized.
3302		 * This may bring some delays as those devices may come from
3303		 * different hives. But that should be OK.
3304		 */
3305		if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
3306			for (i = 0; i < mgpu_info.num_gpu; i++) {
3307				gpu_instance = &(mgpu_info.gpu_ins[i]);
3308				if (gpu_instance->adev->flags & AMD_IS_APU)
3309					continue;
3310
3311				r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
3312						AMDGPU_XGMI_PSTATE_MIN);
3313				if (r) {
3314					DRM_ERROR("pstate setting failed (%d).\n", r);
3315					break;
3316				}
3317			}
3318		}
3319
3320		mutex_unlock(&mgpu_info.mutex);
3321	}
3322
3323	return 0;
3324}
3325
3326static void amdgpu_ip_block_hw_fini(struct amdgpu_ip_block *ip_block)
3327{
3328	int r;
3329
3330	if (!ip_block->version->funcs->hw_fini) {
3331		DRM_ERROR("hw_fini of IP block <%s> not defined\n",
3332			  ip_block->version->funcs->name);
3333	} else {
3334		r = ip_block->version->funcs->hw_fini(ip_block);
3335		/* XXX handle errors */
3336		if (r) {
3337			DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
3338				  ip_block->version->funcs->name, r);
3339		}
3340	}
3341
3342	ip_block->status.hw = false;
3343}
3344
3345/**
3346 * amdgpu_device_smu_fini_early - smu hw_fini wrapper
3347 *
3348 * @adev: amdgpu_device pointer
3349 *
3350 * For ASICs need to disable SMC first
3351 */
3352static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
3353{
3354	int i;
3355
3356	if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(9, 0, 0))
3357		return;
3358
3359	for (i = 0; i < adev->num_ip_blocks; i++) {
3360		if (!adev->ip_blocks[i].status.hw)
3361			continue;
3362		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3363			amdgpu_ip_block_hw_fini(&adev->ip_blocks[i]);
3364			break;
3365		}
3366	}
3367}
3368
3369static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
3370{
3371	int i, r;
3372
3373	for (i = 0; i < adev->num_ip_blocks; i++) {
3374		if (!adev->ip_blocks[i].version->funcs->early_fini)
3375			continue;
3376
3377		r = adev->ip_blocks[i].version->funcs->early_fini(&adev->ip_blocks[i]);
3378		if (r) {
3379			DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
3380				  adev->ip_blocks[i].version->funcs->name, r);
3381		}
3382	}
3383
 
 
3384	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
3385	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
3386
3387	amdgpu_amdkfd_suspend(adev, false);
3388
3389	/* Workaroud for ASICs need to disable SMC first */
3390	amdgpu_device_smu_fini_early(adev);
 
 
 
 
 
 
 
 
 
 
 
3391
3392	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3393		if (!adev->ip_blocks[i].status.hw)
3394			continue;
3395
3396		amdgpu_ip_block_hw_fini(&adev->ip_blocks[i]);
3397	}
 
 
 
 
3398
3399	if (amdgpu_sriov_vf(adev)) {
3400		if (amdgpu_virt_release_full_gpu(adev, false))
3401			DRM_ERROR("failed to release exclusive mode on fini\n");
3402	}
3403
3404	return 0;
3405}
3406
3407/**
3408 * amdgpu_device_ip_fini - run fini for hardware IPs
3409 *
3410 * @adev: amdgpu_device pointer
3411 *
3412 * Main teardown pass for hardware IPs.  The list of all the hardware
3413 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
3414 * are run.  hw_fini tears down the hardware associated with each IP
3415 * and sw_fini tears down any software state associated with each IP.
3416 * Returns 0 on success, negative error code on failure.
3417 */
3418static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
3419{
3420	int i, r;
3421
3422	if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
3423		amdgpu_virt_release_ras_err_handler_data(adev);
3424
 
 
3425	if (adev->gmc.xgmi.num_physical_nodes > 1)
3426		amdgpu_xgmi_remove_device(adev);
3427
3428	amdgpu_amdkfd_device_fini_sw(adev);
3429
3430	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3431		if (!adev->ip_blocks[i].status.sw)
3432			continue;
3433
3434		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
3435			amdgpu_ucode_free_bo(adev);
3436			amdgpu_free_static_csa(&adev->virt.csa_obj);
3437			amdgpu_device_wb_fini(adev);
3438			amdgpu_device_mem_scratch_fini(adev);
3439			amdgpu_ib_pool_fini(adev);
3440			amdgpu_seq64_fini(adev);
3441		}
3442		if (adev->ip_blocks[i].version->funcs->sw_fini) {
3443			r = adev->ip_blocks[i].version->funcs->sw_fini(&adev->ip_blocks[i]);
3444			/* XXX handle errors */
3445			if (r) {
3446				DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
3447					  adev->ip_blocks[i].version->funcs->name, r);
3448			}
3449		}
3450		adev->ip_blocks[i].status.sw = false;
3451		adev->ip_blocks[i].status.valid = false;
3452	}
3453
3454	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3455		if (!adev->ip_blocks[i].status.late_initialized)
3456			continue;
3457		if (adev->ip_blocks[i].version->funcs->late_fini)
3458			adev->ip_blocks[i].version->funcs->late_fini(&adev->ip_blocks[i]);
3459		adev->ip_blocks[i].status.late_initialized = false;
3460	}
3461
3462	amdgpu_ras_fini(adev);
3463
 
 
 
 
3464	return 0;
3465}
3466
3467/**
3468 * amdgpu_device_delayed_init_work_handler - work handler for IB tests
3469 *
3470 * @work: work_struct.
3471 */
3472static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
3473{
3474	struct amdgpu_device *adev =
3475		container_of(work, struct amdgpu_device, delayed_init_work.work);
3476	int r;
3477
3478	r = amdgpu_ib_ring_tests(adev);
3479	if (r)
3480		DRM_ERROR("ib ring test failed (%d).\n", r);
3481}
3482
3483static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
3484{
3485	struct amdgpu_device *adev =
3486		container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
3487
3488	WARN_ON_ONCE(adev->gfx.gfx_off_state);
3489	WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
3490
3491	if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
3492		adev->gfx.gfx_off_state = true;
3493}
3494
3495/**
3496 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
3497 *
3498 * @adev: amdgpu_device pointer
3499 *
3500 * Main suspend function for hardware IPs.  The list of all the hardware
3501 * IPs that make up the asic is walked, clockgating is disabled and the
3502 * suspend callbacks are run.  suspend puts the hardware and software state
3503 * in each IP into a state suitable for suspend.
3504 * Returns 0 on success, negative error code on failure.
3505 */
3506static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
3507{
3508	int i, r;
3509
3510	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
3511	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
3512
3513	/*
3514	 * Per PMFW team's suggestion, driver needs to handle gfxoff
3515	 * and df cstate features disablement for gpu reset(e.g. Mode1Reset)
3516	 * scenario. Add the missing df cstate disablement here.
3517	 */
3518	if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
3519		dev_warn(adev->dev, "Failed to disallow df cstate");
3520
3521	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3522		if (!adev->ip_blocks[i].status.valid)
3523			continue;
3524
3525		/* displays are handled separately */
3526		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
3527			continue;
3528
3529		/* XXX handle errors */
3530		r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]);
3531		if (r)
 
 
 
3532			return r;
 
 
 
3533	}
3534
3535	return 0;
3536}
3537
3538/**
3539 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
3540 *
3541 * @adev: amdgpu_device pointer
3542 *
3543 * Main suspend function for hardware IPs.  The list of all the hardware
3544 * IPs that make up the asic is walked, clockgating is disabled and the
3545 * suspend callbacks are run.  suspend puts the hardware and software state
3546 * in each IP into a state suitable for suspend.
3547 * Returns 0 on success, negative error code on failure.
3548 */
3549static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
3550{
3551	int i, r;
3552
3553	if (adev->in_s0ix)
3554		amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry);
3555
3556	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3557		if (!adev->ip_blocks[i].status.valid)
3558			continue;
3559		/* displays are handled in phase1 */
3560		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
3561			continue;
3562		/* PSP lost connection when err_event_athub occurs */
3563		if (amdgpu_ras_intr_triggered() &&
3564		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
3565			adev->ip_blocks[i].status.hw = false;
3566			continue;
3567		}
3568
3569		/* skip unnecessary suspend if we do not initialize them yet */
3570		if (!amdgpu_ip_member_of_hwini(
3571			    adev, adev->ip_blocks[i].version->type))
 
 
 
 
3572			continue;
 
3573
3574		/* skip suspend of gfx/mes and psp for S0ix
3575		 * gfx is in gfxoff state, so on resume it will exit gfxoff just
3576		 * like at runtime. PSP is also part of the always on hardware
3577		 * so no need to suspend it.
3578		 */
3579		if (adev->in_s0ix &&
3580		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
3581		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
3582		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES))
3583			continue;
3584
3585		/* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */
3586		if (adev->in_s0ix &&
3587		    (amdgpu_ip_version(adev, SDMA0_HWIP, 0) >=
3588		     IP_VERSION(5, 0, 0)) &&
3589		    (adev->ip_blocks[i].version->type ==
3590		     AMD_IP_BLOCK_TYPE_SDMA))
3591			continue;
3592
3593		/* Once swPSP provides the IMU, RLC FW binaries to TOS during cold-boot.
3594		 * These are in TMR, hence are expected to be reused by PSP-TOS to reload
3595		 * from this location and RLC Autoload automatically also gets loaded
3596		 * from here based on PMFW -> PSP message during re-init sequence.
3597		 * Therefore, the psp suspend & resume should be skipped to avoid destroy
3598		 * the TMR and reload FWs again for IMU enabled APU ASICs.
3599		 */
3600		if (amdgpu_in_reset(adev) &&
3601		    (adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs &&
3602		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3603			continue;
3604
3605		/* XXX handle errors */
3606		r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]);
 
 
 
 
 
3607		adev->ip_blocks[i].status.hw = false;
3608
3609		/* handle putting the SMC in the appropriate state */
3610		if (!amdgpu_sriov_vf(adev)) {
3611			if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3612				r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
3613				if (r) {
3614					DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
3615							adev->mp1_state, r);
3616					return r;
3617				}
3618			}
3619		}
3620	}
3621
3622	return 0;
3623}
3624
3625/**
3626 * amdgpu_device_ip_suspend - run suspend for hardware IPs
3627 *
3628 * @adev: amdgpu_device pointer
3629 *
3630 * Main suspend function for hardware IPs.  The list of all the hardware
3631 * IPs that make up the asic is walked, clockgating is disabled and the
3632 * suspend callbacks are run.  suspend puts the hardware and software state
3633 * in each IP into a state suitable for suspend.
3634 * Returns 0 on success, negative error code on failure.
3635 */
3636int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
3637{
3638	int r;
3639
3640	if (amdgpu_sriov_vf(adev)) {
3641		amdgpu_virt_fini_data_exchange(adev);
3642		amdgpu_virt_request_full_gpu(adev, false);
3643	}
3644
3645	amdgpu_ttm_set_buffer_funcs_status(adev, false);
3646
3647	r = amdgpu_device_ip_suspend_phase1(adev);
3648	if (r)
3649		return r;
3650	r = amdgpu_device_ip_suspend_phase2(adev);
3651
3652	if (amdgpu_sriov_vf(adev))
3653		amdgpu_virt_release_full_gpu(adev, false);
3654
3655	return r;
3656}
3657
3658static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
3659{
3660	int i, r;
3661
3662	static enum amd_ip_block_type ip_order[] = {
3663		AMD_IP_BLOCK_TYPE_COMMON,
3664		AMD_IP_BLOCK_TYPE_GMC,
 
3665		AMD_IP_BLOCK_TYPE_PSP,
3666		AMD_IP_BLOCK_TYPE_IH,
3667	};
3668
3669	for (i = 0; i < adev->num_ip_blocks; i++) {
3670		int j;
3671		struct amdgpu_ip_block *block;
3672
3673		block = &adev->ip_blocks[i];
3674		block->status.hw = false;
3675
3676		for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
3677
3678			if (block->version->type != ip_order[j] ||
3679				!block->status.valid)
3680				continue;
3681
3682			r = block->version->funcs->hw_init(&adev->ip_blocks[i]);
3683			if (r) {
3684				dev_err(adev->dev, "RE-INIT-early: %s failed\n",
3685					 block->version->funcs->name);
3686				return r;
3687			}
3688			block->status.hw = true;
3689		}
3690	}
3691
3692	return 0;
3693}
3694
3695static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
3696{
3697	struct amdgpu_ip_block *block;
3698	int i, r = 0;
3699
3700	static enum amd_ip_block_type ip_order[] = {
3701		AMD_IP_BLOCK_TYPE_SMC,
3702		AMD_IP_BLOCK_TYPE_DCE,
3703		AMD_IP_BLOCK_TYPE_GFX,
3704		AMD_IP_BLOCK_TYPE_SDMA,
3705		AMD_IP_BLOCK_TYPE_MES,
3706		AMD_IP_BLOCK_TYPE_UVD,
3707		AMD_IP_BLOCK_TYPE_VCE,
3708		AMD_IP_BLOCK_TYPE_VCN,
3709		AMD_IP_BLOCK_TYPE_JPEG
3710	};
3711
3712	for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
3713		block = amdgpu_device_ip_get_ip_block(adev, ip_order[i]);
 
3714
3715		if (!block)
3716			continue;
3717
3718		if (block->status.valid && !block->status.hw) {
3719			if (block->version->type == AMD_IP_BLOCK_TYPE_SMC) {
3720				r = amdgpu_ip_block_resume(block);
3721			} else {
3722				r = block->version->funcs->hw_init(block);
3723			}
3724
3725			if (r) {
3726				dev_err(adev->dev, "RE-INIT-late: %s failed\n",
3727					 block->version->funcs->name);
3728				break;
3729			}
 
 
 
3730			block->status.hw = true;
3731		}
3732	}
3733
3734	return r;
3735}
3736
3737/**
3738 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3739 *
3740 * @adev: amdgpu_device pointer
3741 *
3742 * First resume function for hardware IPs.  The list of all the hardware
3743 * IPs that make up the asic is walked and the resume callbacks are run for
3744 * COMMON, GMC, and IH.  resume puts the hardware into a functional state
3745 * after a suspend and updates the software state as necessary.  This
3746 * function is also used for restoring the GPU after a GPU reset.
3747 * Returns 0 on success, negative error code on failure.
3748 */
3749static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
3750{
3751	int i, r;
3752
3753	for (i = 0; i < adev->num_ip_blocks; i++) {
3754		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3755			continue;
3756		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3757		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3758		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3759		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) {
3760
3761			r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
3762			if (r)
 
 
3763				return r;
 
 
3764		}
3765	}
3766
3767	return 0;
3768}
3769
3770/**
3771 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3772 *
3773 * @adev: amdgpu_device pointer
3774 *
3775 * Second resume function for hardware IPs.  The list of all the hardware
3776 * IPs that make up the asic is walked and the resume callbacks are run for
3777 * all blocks except COMMON, GMC, and IH.  resume puts the hardware into a
3778 * functional state after a suspend and updates the software state as
3779 * necessary.  This function is also used for restoring the GPU after a GPU
3780 * reset.
3781 * Returns 0 on success, negative error code on failure.
3782 */
3783static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3784{
3785	int i, r;
3786
3787	for (i = 0; i < adev->num_ip_blocks; i++) {
3788		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3789			continue;
3790		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3791		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3792		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3793		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE ||
3794		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3795			continue;
3796		r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
3797		if (r)
 
 
3798			return r;
3799	}
3800
3801	return 0;
3802}
3803
3804/**
3805 * amdgpu_device_ip_resume_phase3 - run resume for hardware IPs
3806 *
3807 * @adev: amdgpu_device pointer
3808 *
3809 * Third resume function for hardware IPs.  The list of all the hardware
3810 * IPs that make up the asic is walked and the resume callbacks are run for
3811 * all DCE.  resume puts the hardware into a functional state after a suspend
3812 * and updates the software state as necessary.  This function is also used
3813 * for restoring the GPU after a GPU reset.
3814 *
3815 * Returns 0 on success, negative error code on failure.
3816 */
3817static int amdgpu_device_ip_resume_phase3(struct amdgpu_device *adev)
3818{
3819	int i, r;
3820
3821	for (i = 0; i < adev->num_ip_blocks; i++) {
3822		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3823			continue;
3824		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) {
3825			r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
3826			if (r)
3827				return r;
3828		}
 
3829	}
3830
3831	return 0;
3832}
3833
3834/**
3835 * amdgpu_device_ip_resume - run resume for hardware IPs
3836 *
3837 * @adev: amdgpu_device pointer
3838 *
3839 * Main resume function for hardware IPs.  The hardware IPs
3840 * are split into two resume functions because they are
3841 * also used in recovering from a GPU reset and some additional
3842 * steps need to be take between them.  In this case (S3/S4) they are
3843 * run sequentially.
3844 * Returns 0 on success, negative error code on failure.
3845 */
3846static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
3847{
3848	int r;
3849
3850	r = amdgpu_device_ip_resume_phase1(adev);
3851	if (r)
3852		return r;
3853
3854	r = amdgpu_device_fw_loading(adev);
3855	if (r)
3856		return r;
3857
3858	r = amdgpu_device_ip_resume_phase2(adev);
3859
3860	if (adev->mman.buffer_funcs_ring->sched.ready)
3861		amdgpu_ttm_set_buffer_funcs_status(adev, true);
3862
3863	if (r)
3864		return r;
3865
3866	amdgpu_fence_driver_hw_init(adev);
3867
3868	r = amdgpu_device_ip_resume_phase3(adev);
3869
3870	return r;
3871}
3872
3873/**
3874 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3875 *
3876 * @adev: amdgpu_device pointer
3877 *
3878 * Query the VBIOS data tables to determine if the board supports SR-IOV.
3879 */
3880static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3881{
3882	if (amdgpu_sriov_vf(adev)) {
3883		if (adev->is_atom_fw) {
3884			if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
3885				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3886		} else {
3887			if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3888				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3889		}
3890
3891		if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3892			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3893	}
3894}
3895
3896/**
3897 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3898 *
3899 * @asic_type: AMD asic type
3900 *
3901 * Check if there is DC (new modesetting infrastructre) support for an asic.
3902 * returns true if DC has support, false if not.
3903 */
3904bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3905{
3906	switch (asic_type) {
3907#ifdef CONFIG_DRM_AMDGPU_SI
3908	case CHIP_HAINAN:
3909#endif
3910	case CHIP_TOPAZ:
3911		/* chips with no display hardware */
3912		return false;
3913#if defined(CONFIG_DRM_AMD_DC)
 
3914	case CHIP_TAHITI:
3915	case CHIP_PITCAIRN:
3916	case CHIP_VERDE:
3917	case CHIP_OLAND:
3918		/*
3919		 * We have systems in the wild with these ASICs that require
3920		 * LVDS and VGA support which is not supported with DC.
3921		 *
3922		 * Fallback to the non-DC driver here by default so as not to
3923		 * cause regressions.
3924		 */
3925#if defined(CONFIG_DRM_AMD_DC_SI)
3926		return amdgpu_dc > 0;
3927#else
3928		return false;
3929#endif
3930	case CHIP_BONAIRE:
3931	case CHIP_KAVERI:
3932	case CHIP_KABINI:
3933	case CHIP_MULLINS:
3934		/*
3935		 * We have systems in the wild with these ASICs that require
3936		 * VGA support which is not supported with DC.
3937		 *
3938		 * Fallback to the non-DC driver here by default so as not to
3939		 * cause regressions.
3940		 */
3941		return amdgpu_dc > 0;
3942	default:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3943		return amdgpu_dc != 0;
3944#else
3945	default:
3946		if (amdgpu_dc > 0)
3947			DRM_INFO_ONCE("Display Core has been requested via kernel parameter but isn't supported by ASIC, ignoring\n");
 
3948		return false;
3949#endif
3950	}
3951}
3952
3953/**
3954 * amdgpu_device_has_dc_support - check if dc is supported
3955 *
3956 * @adev: amdgpu_device pointer
3957 *
3958 * Returns true for supported, false for not supported
3959 */
3960bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3961{
3962	if (adev->enable_virtual_display ||
 
3963	    (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
3964		return false;
3965
3966	return amdgpu_device_asic_has_dc_support(adev->asic_type);
3967}
3968
3969static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3970{
3971	struct amdgpu_device *adev =
3972		container_of(__work, struct amdgpu_device, xgmi_reset_work);
3973	struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3974
3975	/* It's a bug to not have a hive within this function */
3976	if (WARN_ON(!hive))
3977		return;
3978
3979	/*
3980	 * Use task barrier to synchronize all xgmi reset works across the
3981	 * hive. task_barrier_enter and task_barrier_exit will block
3982	 * until all the threads running the xgmi reset works reach
3983	 * those points. task_barrier_full will do both blocks.
3984	 */
3985	if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3986
3987		task_barrier_enter(&hive->tb);
3988		adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3989
3990		if (adev->asic_reset_res)
3991			goto fail;
3992
3993		task_barrier_exit(&hive->tb);
3994		adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3995
3996		if (adev->asic_reset_res)
3997			goto fail;
3998
3999		amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__MMHUB);
 
 
4000	} else {
4001
4002		task_barrier_full(&hive->tb);
4003		adev->asic_reset_res =  amdgpu_asic_reset(adev);
4004	}
4005
4006fail:
4007	if (adev->asic_reset_res)
4008		DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
4009			 adev->asic_reset_res, adev_to_drm(adev)->unique);
4010	amdgpu_put_xgmi_hive(hive);
4011}
4012
4013static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
4014{
4015	char *input = amdgpu_lockup_timeout;
4016	char *timeout_setting = NULL;
4017	int index = 0;
4018	long timeout;
4019	int ret = 0;
4020
4021	/*
4022	 * By default timeout for non compute jobs is 10000
4023	 * and 60000 for compute jobs.
4024	 * In SR-IOV or passthrough mode, timeout for compute
4025	 * jobs are 60000 by default.
4026	 */
4027	adev->gfx_timeout = msecs_to_jiffies(10000);
4028	adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
4029	if (amdgpu_sriov_vf(adev))
4030		adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
4031					msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
4032	else
4033		adev->compute_timeout =  msecs_to_jiffies(60000);
4034
4035	if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
4036		while ((timeout_setting = strsep(&input, ",")) &&
4037				strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
4038			ret = kstrtol(timeout_setting, 0, &timeout);
4039			if (ret)
4040				return ret;
4041
4042			if (timeout == 0) {
4043				index++;
4044				continue;
4045			} else if (timeout < 0) {
4046				timeout = MAX_SCHEDULE_TIMEOUT;
4047				dev_warn(adev->dev, "lockup timeout disabled");
4048				add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
4049			} else {
4050				timeout = msecs_to_jiffies(timeout);
4051			}
4052
4053			switch (index++) {
4054			case 0:
4055				adev->gfx_timeout = timeout;
4056				break;
4057			case 1:
4058				adev->compute_timeout = timeout;
4059				break;
4060			case 2:
4061				adev->sdma_timeout = timeout;
4062				break;
4063			case 3:
4064				adev->video_timeout = timeout;
4065				break;
4066			default:
4067				break;
4068			}
4069		}
4070		/*
4071		 * There is only one value specified and
4072		 * it should apply to all non-compute jobs.
4073		 */
4074		if (index == 1) {
4075			adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
4076			if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
4077				adev->compute_timeout = adev->gfx_timeout;
4078		}
4079	}
4080
4081	return ret;
4082}
4083
4084/**
4085 * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU
4086 *
4087 * @adev: amdgpu_device pointer
4088 *
4089 * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode
4090 */
4091static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
4092{
4093	struct iommu_domain *domain;
4094
4095	domain = iommu_get_domain_for_dev(adev->dev);
4096	if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY)
4097		adev->ram_is_direct_mapped = true;
4098}
4099
4100#if defined(CONFIG_HSA_AMD_P2P)
4101/**
4102 * amdgpu_device_check_iommu_remap - Check if DMA remapping is enabled.
4103 *
4104 * @adev: amdgpu_device pointer
4105 *
4106 * return if IOMMU remapping bar address
4107 */
4108static bool amdgpu_device_check_iommu_remap(struct amdgpu_device *adev)
4109{
4110	struct iommu_domain *domain;
4111
4112	domain = iommu_get_domain_for_dev(adev->dev);
4113	if (domain && (domain->type == IOMMU_DOMAIN_DMA ||
4114		domain->type ==	IOMMU_DOMAIN_DMA_FQ))
4115		return true;
4116
4117	return false;
4118}
4119#endif
4120
4121static const struct attribute *amdgpu_dev_attributes[] = {
 
 
 
4122	&dev_attr_pcie_replay_count.attr,
4123	NULL
4124};
4125
4126static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)
4127{
4128	if (amdgpu_mcbp == 1)
4129		adev->gfx.mcbp = true;
4130	else if (amdgpu_mcbp == 0)
4131		adev->gfx.mcbp = false;
4132
4133	if (amdgpu_sriov_vf(adev))
4134		adev->gfx.mcbp = true;
4135
4136	if (adev->gfx.mcbp)
4137		DRM_INFO("MCBP is enabled\n");
4138}
4139
4140/**
4141 * amdgpu_device_init - initialize the driver
4142 *
4143 * @adev: amdgpu_device pointer
4144 * @flags: driver flags
4145 *
4146 * Initializes the driver info and hw (all asics).
4147 * Returns 0 for success or an error on failure.
4148 * Called at driver startup.
4149 */
4150int amdgpu_device_init(struct amdgpu_device *adev,
4151		       uint32_t flags)
4152{
4153	struct drm_device *ddev = adev_to_drm(adev);
4154	struct pci_dev *pdev = adev->pdev;
4155	int r, i;
4156	bool px = false;
4157	u32 max_MBps;
4158	int tmp;
4159
4160	adev->shutdown = false;
4161	adev->flags = flags;
4162
4163	if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
4164		adev->asic_type = amdgpu_force_asic_type;
4165	else
4166		adev->asic_type = flags & AMD_ASIC_MASK;
4167
4168	adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
4169	if (amdgpu_emu_mode == 1)
4170		adev->usec_timeout *= 10;
4171	adev->gmc.gart_size = 512 * 1024 * 1024;
4172	adev->accel_working = false;
4173	adev->num_rings = 0;
4174	RCU_INIT_POINTER(adev->gang_submit, dma_fence_get_stub());
4175	adev->mman.buffer_funcs = NULL;
4176	adev->mman.buffer_funcs_ring = NULL;
4177	adev->vm_manager.vm_pte_funcs = NULL;
4178	adev->vm_manager.vm_pte_num_scheds = 0;
4179	adev->gmc.gmc_funcs = NULL;
4180	adev->harvest_ip_mask = 0x0;
4181	adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
4182	bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
4183
4184	adev->smc_rreg = &amdgpu_invalid_rreg;
4185	adev->smc_wreg = &amdgpu_invalid_wreg;
4186	adev->pcie_rreg = &amdgpu_invalid_rreg;
4187	adev->pcie_wreg = &amdgpu_invalid_wreg;
4188	adev->pcie_rreg_ext = &amdgpu_invalid_rreg_ext;
4189	adev->pcie_wreg_ext = &amdgpu_invalid_wreg_ext;
4190	adev->pciep_rreg = &amdgpu_invalid_rreg;
4191	adev->pciep_wreg = &amdgpu_invalid_wreg;
4192	adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
4193	adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
4194	adev->pcie_rreg64_ext = &amdgpu_invalid_rreg64_ext;
4195	adev->pcie_wreg64_ext = &amdgpu_invalid_wreg64_ext;
4196	adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
4197	adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
4198	adev->didt_rreg = &amdgpu_invalid_rreg;
4199	adev->didt_wreg = &amdgpu_invalid_wreg;
4200	adev->gc_cac_rreg = &amdgpu_invalid_rreg;
4201	adev->gc_cac_wreg = &amdgpu_invalid_wreg;
4202	adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
4203	adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
4204
4205	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
4206		 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
4207		 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
4208
4209	/* mutex initialization are all done here so we
4210	 * can recall function without having locking issues
4211	 */
4212	mutex_init(&adev->firmware.mutex);
4213	mutex_init(&adev->pm.mutex);
4214	mutex_init(&adev->gfx.gpu_clock_mutex);
4215	mutex_init(&adev->srbm_mutex);
4216	mutex_init(&adev->gfx.pipe_reserve_mutex);
4217	mutex_init(&adev->gfx.gfx_off_mutex);
4218	mutex_init(&adev->gfx.partition_mutex);
4219	mutex_init(&adev->grbm_idx_mutex);
4220	mutex_init(&adev->mn_lock);
4221	mutex_init(&adev->virt.vf_errors.lock);
4222	mutex_init(&adev->virt.rlcg_reg_lock);
4223	hash_init(adev->mn_hash);
 
 
4224	mutex_init(&adev->psp.mutex);
4225	mutex_init(&adev->notifier_lock);
4226	mutex_init(&adev->pm.stable_pstate_ctx_lock);
4227	mutex_init(&adev->benchmark_mutex);
4228	mutex_init(&adev->gfx.reset_sem_mutex);
4229	/* Initialize the mutex for cleaner shader isolation between GFX and compute processes */
4230	mutex_init(&adev->enforce_isolation_mutex);
4231	mutex_init(&adev->gfx.kfd_sch_mutex);
4232
4233	amdgpu_device_init_apu_flags(adev);
 
 
4234
4235	r = amdgpu_device_check_arguments(adev);
4236	if (r)
4237		return r;
4238
4239	spin_lock_init(&adev->mmio_idx_lock);
4240	spin_lock_init(&adev->smc_idx_lock);
4241	spin_lock_init(&adev->pcie_idx_lock);
4242	spin_lock_init(&adev->uvd_ctx_idx_lock);
4243	spin_lock_init(&adev->didt_idx_lock);
4244	spin_lock_init(&adev->gc_cac_idx_lock);
4245	spin_lock_init(&adev->se_cac_idx_lock);
4246	spin_lock_init(&adev->audio_endpt_idx_lock);
4247	spin_lock_init(&adev->mm_stats.lock);
4248	spin_lock_init(&adev->wb.lock);
4249
4250	INIT_LIST_HEAD(&adev->reset_list);
4251
4252	INIT_LIST_HEAD(&adev->ras_list);
 
4253
4254	INIT_LIST_HEAD(&adev->pm.od_kobj_list);
4255
4256	INIT_DELAYED_WORK(&adev->delayed_init_work,
4257			  amdgpu_device_delayed_init_work_handler);
4258	INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
4259			  amdgpu_device_delay_enable_gfx_off);
4260	/*
4261	 * Initialize the enforce_isolation work structures for each XCP
4262	 * partition.  This work handler is responsible for enforcing shader
4263	 * isolation on AMD GPUs.  It counts the number of emitted fences for
4264	 * each GFX and compute ring.  If there are any fences, it schedules
4265	 * the `enforce_isolation_work` to be run after a delay.  If there are
4266	 * no fences, it signals the Kernel Fusion Driver (KFD) to resume the
4267	 * runqueue.
4268	 */
4269	for (i = 0; i < MAX_XCP; i++) {
4270		INIT_DELAYED_WORK(&adev->gfx.enforce_isolation[i].work,
4271				  amdgpu_gfx_enforce_isolation_handler);
4272		adev->gfx.enforce_isolation[i].adev = adev;
4273		adev->gfx.enforce_isolation[i].xcp_id = i;
4274	}
4275
4276	INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
4277
4278	adev->gfx.gfx_off_req_count = 1;
4279	adev->gfx.gfx_off_residency = 0;
4280	adev->gfx.gfx_off_entrycount = 0;
4281	adev->pm.ac_power = power_supply_is_system_supplied() > 0;
4282
4283	atomic_set(&adev->throttling_logging_enabled, 1);
4284	/*
4285	 * If throttling continues, logging will be performed every minute
4286	 * to avoid log flooding. "-1" is subtracted since the thermal
4287	 * throttling interrupt comes every second. Thus, the total logging
4288	 * interval is 59 seconds(retelimited printk interval) + 1(waiting
4289	 * for throttling interrupt) = 60 seconds.
4290	 */
4291	ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
4292	ratelimit_state_init(&adev->virt.ras_telemetry_rs, 5 * HZ, 1);
4293
4294	ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
4295	ratelimit_set_flags(&adev->virt.ras_telemetry_rs, RATELIMIT_MSG_ON_RELEASE);
4296
4297	/* Registers mapping */
4298	/* TODO: block userspace mapping of io register */
4299	if (adev->asic_type >= CHIP_BONAIRE) {
4300		adev->rmmio_base = pci_resource_start(adev->pdev, 5);
4301		adev->rmmio_size = pci_resource_len(adev->pdev, 5);
4302	} else {
4303		adev->rmmio_base = pci_resource_start(adev->pdev, 2);
4304		adev->rmmio_size = pci_resource_len(adev->pdev, 2);
4305	}
4306
4307	for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
4308		atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
4309
4310	adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
4311	if (!adev->rmmio)
4312		return -ENOMEM;
4313
4314	DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
4315	DRM_INFO("register mmio size: %u\n", (unsigned int)adev->rmmio_size);
4316
4317	/*
4318	 * Reset domain needs to be present early, before XGMI hive discovered
4319	 * (if any) and intitialized to use reset sem and in_gpu reset flag
4320	 * early on during init and before calling to RREG32.
4321	 */
4322	adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
4323	if (!adev->reset_domain)
4324		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
4325
4326	/* detect hw virtualization here */
4327	amdgpu_detect_virtualization(adev);
4328
4329	amdgpu_device_get_pcie_info(adev);
4330
4331	r = amdgpu_device_get_job_timeout_settings(adev);
4332	if (r) {
4333		dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
4334		return r;
4335	}
4336
4337	amdgpu_device_set_mcbp(adev);
4338
4339	/*
4340	 * By default, use default mode where all blocks are expected to be
4341	 * initialized. At present a 'swinit' of blocks is required to be
4342	 * completed before the need for a different level is detected.
4343	 */
4344	amdgpu_set_init_level(adev, AMDGPU_INIT_LEVEL_DEFAULT);
4345	/* early init functions */
4346	r = amdgpu_device_ip_early_init(adev);
4347	if (r)
4348		return r;
4349
4350	/* Get rid of things like offb */
4351	r = aperture_remove_conflicting_pci_devices(adev->pdev, amdgpu_kms_driver.name);
4352	if (r)
4353		return r;
4354
4355	/* Enable TMZ based on IP_VERSION */
4356	amdgpu_gmc_tmz_set(adev);
4357
4358	if (amdgpu_sriov_vf(adev) &&
4359	    amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0))
4360		/* VF MMIO access (except mailbox range) from CPU
4361		 * will be blocked during sriov runtime
4362		 */
4363		adev->virt.caps |= AMDGPU_VF_MMIO_ACCESS_PROTECT;
4364
4365	amdgpu_gmc_noretry_set(adev);
4366	/* Need to get xgmi info early to decide the reset behavior*/
4367	if (adev->gmc.xgmi.supported) {
4368		r = adev->gfxhub.funcs->get_xgmi_info(adev);
4369		if (r)
4370			return r;
4371	}
4372
4373	/* enable PCIE atomic ops */
4374	if (amdgpu_sriov_vf(adev)) {
4375		if (adev->virt.fw_reserve.p_pf2vf)
4376			adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
4377						      adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
4378				(PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
4379	/* APUs w/ gfx9 onwards doesn't reply on PCIe atomics, rather it is a
4380	 * internal path natively support atomics, set have_atomics_support to true.
4381	 */
4382	} else if ((adev->flags & AMD_IS_APU) &&
4383		   (amdgpu_ip_version(adev, GC_HWIP, 0) >
4384		    IP_VERSION(9, 0, 0))) {
4385		adev->have_atomics_support = true;
4386	} else {
4387		adev->have_atomics_support =
4388			!pci_enable_atomic_ops_to_root(adev->pdev,
4389					  PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
4390					  PCI_EXP_DEVCAP2_ATOMIC_COMP64);
4391	}
4392
4393	if (!adev->have_atomics_support)
4394		dev_info(adev->dev, "PCIE atomic ops is not supported\n");
4395
4396	/* doorbell bar mapping and doorbell index init*/
4397	amdgpu_doorbell_init(adev);
4398
4399	if (amdgpu_emu_mode == 1) {
4400		/* post the asic on emulation mode */
4401		emu_soc_asic_init(adev);
4402		goto fence_driver_init;
4403	}
4404
4405	amdgpu_reset_init(adev);
4406
4407	/* detect if we are with an SRIOV vbios */
4408	if (adev->bios)
4409		amdgpu_device_detect_sriov_bios(adev);
4410
4411	/* check if we need to reset the asic
4412	 *  E.g., driver was not cleanly unloaded previously, etc.
4413	 */
4414	if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
4415		if (adev->gmc.xgmi.num_physical_nodes) {
4416			dev_info(adev->dev, "Pending hive reset.\n");
4417			amdgpu_set_init_level(adev,
4418					      AMDGPU_INIT_LEVEL_MINIMAL_XGMI);
4419		} else if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) &&
4420				   !amdgpu_device_has_display_hardware(adev)) {
4421					r = psp_gpu_reset(adev);
 
 
 
 
 
 
 
 
 
4422		} else {
4423				tmp = amdgpu_reset_method;
4424				/* It should do a default reset when loading or reloading the driver,
4425				 * regardless of the module parameter reset_method.
4426				 */
4427				amdgpu_reset_method = AMD_RESET_METHOD_NONE;
4428				r = amdgpu_asic_reset(adev);
4429				amdgpu_reset_method = tmp;
4430		}
4431
4432		if (r) {
4433		  dev_err(adev->dev, "asic reset on init failed\n");
4434		  goto failed;
4435		}
4436	}
4437
 
 
4438	/* Post card if necessary */
4439	if (amdgpu_device_need_post(adev)) {
4440		if (!adev->bios) {
4441			dev_err(adev->dev, "no vBIOS found\n");
4442			r = -EINVAL;
4443			goto failed;
4444		}
4445		DRM_INFO("GPU posting now...\n");
4446		r = amdgpu_device_asic_init(adev);
4447		if (r) {
4448			dev_err(adev->dev, "gpu post error!\n");
4449			goto failed;
4450		}
4451	}
4452
4453	if (adev->bios) {
4454		if (adev->is_atom_fw) {
4455			/* Initialize clocks */
4456			r = amdgpu_atomfirmware_get_clock_info(adev);
4457			if (r) {
4458				dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
4459				amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
4460				goto failed;
4461			}
4462		} else {
4463			/* Initialize clocks */
4464			r = amdgpu_atombios_get_clock_info(adev);
4465			if (r) {
4466				dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
4467				amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
4468				goto failed;
4469			}
4470			/* init i2c buses */
4471			if (!amdgpu_device_has_dc_support(adev))
4472				amdgpu_atombios_i2c_init(adev);
4473		}
 
 
 
 
 
 
 
 
 
 
 
4474	}
4475
4476fence_driver_init:
4477	/* Fence driver */
4478	r = amdgpu_fence_driver_sw_init(adev);
4479	if (r) {
4480		dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
4481		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
4482		goto failed;
4483	}
4484
4485	/* init the mode config */
4486	drm_mode_config_init(adev_to_drm(adev));
4487
4488	r = amdgpu_device_ip_init(adev);
4489	if (r) {
 
 
 
 
 
 
 
 
 
 
 
 
4490		dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
4491		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
4492		goto release_ras_con;
4493	}
4494
4495	amdgpu_fence_driver_hw_init(adev);
4496
4497	dev_info(adev->dev,
4498		"SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
4499			adev->gfx.config.max_shader_engines,
4500			adev->gfx.config.max_sh_per_se,
4501			adev->gfx.config.max_cu_per_sh,
4502			adev->gfx.cu_info.number);
4503
4504	adev->accel_working = true;
4505
4506	amdgpu_vm_check_compute_bug(adev);
4507
4508	/* Initialize the buffer migration limit. */
4509	if (amdgpu_moverate >= 0)
4510		max_MBps = amdgpu_moverate;
4511	else
4512		max_MBps = 8; /* Allow 8 MB/s. */
4513	/* Get a log2 for easy divisions. */
4514	adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
4515
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4516	/*
4517	 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
4518	 * Otherwise the mgpu fan boost feature will be skipped due to the
4519	 * gpu instance is counted less.
4520	 */
4521	amdgpu_register_gpu_instance(adev);
4522
4523	/* enable clockgating, etc. after ib tests, etc. since some blocks require
4524	 * explicit gating rather than handling it automatically.
4525	 */
4526	if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) {
4527		r = amdgpu_device_ip_late_init(adev);
4528		if (r) {
4529			dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
4530			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
4531			goto release_ras_con;
4532		}
4533		/* must succeed. */
4534		amdgpu_ras_resume(adev);
4535		queue_delayed_work(system_wq, &adev->delayed_init_work,
4536				   msecs_to_jiffies(AMDGPU_RESUME_MS));
4537	}
4538
4539	if (amdgpu_sriov_vf(adev)) {
4540		amdgpu_virt_release_full_gpu(adev, true);
4541		flush_delayed_work(&adev->delayed_init_work);
4542	}
4543
4544	/*
4545	 * Place those sysfs registering after `late_init`. As some of those
4546	 * operations performed in `late_init` might affect the sysfs
4547	 * interfaces creating.
4548	 */
4549	r = amdgpu_atombios_sysfs_init(adev);
4550	if (r)
4551		drm_err(&adev->ddev,
4552			"registering atombios sysfs failed (%d).\n", r);
4553
4554	r = amdgpu_pm_sysfs_init(adev);
4555	if (r)
4556		DRM_ERROR("registering pm sysfs failed (%d).\n", r);
4557
4558	r = amdgpu_ucode_sysfs_init(adev);
4559	if (r) {
4560		adev->ucode_sysfs_en = false;
4561		DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
4562	} else
4563		adev->ucode_sysfs_en = true;
4564
4565	r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
4566	if (r)
4567		dev_err(adev->dev, "Could not create amdgpu device attr\n");
4568
4569	r = devm_device_add_group(adev->dev, &amdgpu_board_attrs_group);
4570	if (r)
4571		dev_err(adev->dev,
4572			"Could not create amdgpu board attributes\n");
4573
4574	amdgpu_fru_sysfs_init(adev);
4575	amdgpu_reg_state_sysfs_init(adev);
4576	amdgpu_xcp_cfg_sysfs_init(adev);
4577
4578	if (IS_ENABLED(CONFIG_PERF_EVENTS))
4579		r = amdgpu_pmu_init(adev);
4580	if (r)
4581		dev_err(adev->dev, "amdgpu_pmu_init failed\n");
4582
4583	/* Have stored pci confspace at hand for restore in sudden PCI error */
4584	if (amdgpu_device_cache_pci_state(adev->pdev))
4585		pci_restore_state(pdev);
4586
4587	/* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
4588	/* this will fail for cards that aren't VGA class devices, just
4589	 * ignore it
4590	 */
4591	if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4592		vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
4593
4594	px = amdgpu_device_supports_px(ddev);
4595
4596	if (px || (!dev_is_removable(&adev->pdev->dev) &&
4597				apple_gmux_detect(NULL, NULL)))
4598		vga_switcheroo_register_client(adev->pdev,
4599					       &amdgpu_switcheroo_ops, px);
4600
4601	if (px)
4602		vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
 
4603
4604	if (adev->init_lvl->level == AMDGPU_INIT_LEVEL_MINIMAL_XGMI)
4605		amdgpu_xgmi_reset_on_init(adev);
4606
4607	amdgpu_device_check_iommu_direct_map(adev);
4608
4609	return 0;
4610
4611release_ras_con:
4612	if (amdgpu_sriov_vf(adev))
4613		amdgpu_virt_release_full_gpu(adev, true);
4614
4615	/* failed in exclusive mode due to timeout */
4616	if (amdgpu_sriov_vf(adev) &&
4617		!amdgpu_sriov_runtime(adev) &&
4618		amdgpu_virt_mmio_blocked(adev) &&
4619		!amdgpu_virt_wait_reset(adev)) {
4620		dev_err(adev->dev, "VF exclusive mode timeout\n");
4621		/* Don't send request since VF is inactive. */
4622		adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
4623		adev->virt.ops = NULL;
4624		r = -EAGAIN;
4625	}
4626	amdgpu_release_ras_context(adev);
4627
4628failed:
4629	amdgpu_vf_error_trans_all(adev);
4630
4631	return r;
4632}
4633
4634static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
4635{
4636
4637	/* Clear all CPU mappings pointing to this device */
4638	unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
4639
4640	/* Unmap all mapped bars - Doorbell, registers and VRAM */
4641	amdgpu_doorbell_fini(adev);
4642
4643	iounmap(adev->rmmio);
4644	adev->rmmio = NULL;
4645	if (adev->mman.aper_base_kaddr)
4646		iounmap(adev->mman.aper_base_kaddr);
4647	adev->mman.aper_base_kaddr = NULL;
4648
4649	/* Memory manager related */
4650	if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
4651		arch_phys_wc_del(adev->gmc.vram_mtrr);
4652		arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
4653	}
4654}
4655
4656/**
4657 * amdgpu_device_fini_hw - tear down the driver
4658 *
4659 * @adev: amdgpu_device pointer
4660 *
4661 * Tear down the driver info (all asics).
4662 * Called at driver shutdown.
4663 */
4664void amdgpu_device_fini_hw(struct amdgpu_device *adev)
4665{
4666	dev_info(adev->dev, "amdgpu: finishing device.\n");
4667	flush_delayed_work(&adev->delayed_init_work);
4668
4669	if (adev->mman.initialized)
4670		drain_workqueue(adev->mman.bdev.wq);
4671	adev->shutdown = true;
4672
4673	/* make sure IB test finished before entering exclusive mode
4674	 * to avoid preemption on IB test
4675	 */
4676	if (amdgpu_sriov_vf(adev)) {
4677		amdgpu_virt_request_full_gpu(adev, false);
4678		amdgpu_virt_fini_data_exchange(adev);
4679	}
4680
4681	/* disable all interrupts */
4682	amdgpu_irq_disable_all(adev);
4683	if (adev->mode_info.mode_config_initialized) {
4684		if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
4685			drm_helper_force_disable_all(adev_to_drm(adev));
4686		else
4687			drm_atomic_helper_shutdown(adev_to_drm(adev));
4688	}
4689	amdgpu_fence_driver_hw_fini(adev);
4690
4691	if (adev->pm.sysfs_initialized)
4692		amdgpu_pm_sysfs_fini(adev);
4693	if (adev->ucode_sysfs_en)
4694		amdgpu_ucode_sysfs_fini(adev);
4695	sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
4696	amdgpu_fru_sysfs_fini(adev);
4697
4698	amdgpu_reg_state_sysfs_fini(adev);
4699	amdgpu_xcp_cfg_sysfs_fini(adev);
4700
4701	/* disable ras feature must before hw fini */
4702	amdgpu_ras_pre_fini(adev);
4703
4704	amdgpu_ttm_set_buffer_funcs_status(adev, false);
4705
4706	amdgpu_device_ip_fini_early(adev);
4707
4708	amdgpu_irq_fini_hw(adev);
4709
4710	if (adev->mman.initialized)
4711		ttm_device_clear_dma_mappings(&adev->mman.bdev);
4712
4713	amdgpu_gart_dummy_page_fini(adev);
4714
4715	if (drm_dev_is_unplugged(adev_to_drm(adev)))
4716		amdgpu_device_unmap_mmio(adev);
4717
4718}
4719
4720void amdgpu_device_fini_sw(struct amdgpu_device *adev)
4721{
4722	int idx;
4723	bool px;
4724
4725	amdgpu_device_ip_fini(adev);
4726	amdgpu_fence_driver_sw_fini(adev);
4727	amdgpu_ucode_release(&adev->firmware.gpu_info_fw);
 
4728	adev->accel_working = false;
4729	dma_fence_put(rcu_dereference_protected(adev->gang_submit, true));
4730
4731	amdgpu_reset_fini(adev);
4732
4733	/* free i2c buses */
4734	if (!amdgpu_device_has_dc_support(adev))
4735		amdgpu_i2c_fini(adev);
4736
4737	if (amdgpu_emu_mode != 1)
4738		amdgpu_atombios_fini(adev);
4739
4740	kfree(adev->bios);
4741	adev->bios = NULL;
4742
4743	kfree(adev->fru_info);
4744	adev->fru_info = NULL;
4745
4746	px = amdgpu_device_supports_px(adev_to_drm(adev));
4747
4748	if (px || (!dev_is_removable(&adev->pdev->dev) &&
4749				apple_gmux_detect(NULL, NULL)))
4750		vga_switcheroo_unregister_client(adev->pdev);
4751
4752	if (px)
4753		vga_switcheroo_fini_domain_pm_ops(adev->dev);
4754
4755	if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4756		vga_client_unregister(adev->pdev);
4757
4758	if (drm_dev_enter(adev_to_drm(adev), &idx)) {
4759
4760		iounmap(adev->rmmio);
4761		adev->rmmio = NULL;
4762		amdgpu_doorbell_fini(adev);
4763		drm_dev_exit(idx);
4764	}
 
 
4765
4766	if (IS_ENABLED(CONFIG_PERF_EVENTS))
4767		amdgpu_pmu_fini(adev);
4768	if (adev->mman.discovery_bin)
4769		amdgpu_discovery_fini(adev);
4770
4771	amdgpu_reset_put_reset_domain(adev->reset_domain);
4772	adev->reset_domain = NULL;
4773
4774	kfree(adev->pci_state);
4775
4776}
4777
4778/**
4779 * amdgpu_device_evict_resources - evict device resources
4780 * @adev: amdgpu device object
4781 *
4782 * Evicts all ttm device resources(vram BOs, gart table) from the lru list
4783 * of the vram memory type. Mainly used for evicting device resources
4784 * at suspend time.
4785 *
4786 */
4787static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
4788{
4789	int ret;
4790
4791	/* No need to evict vram on APUs for suspend to ram or s2idle */
4792	if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU))
4793		return 0;
4794
4795	ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
4796	if (ret)
4797		DRM_WARN("evicting device resources failed\n");
4798	return ret;
4799}
4800
4801/*
4802 * Suspend & resume.
4803 */
4804/**
4805 * amdgpu_device_prepare - prepare for device suspend
4806 *
4807 * @dev: drm dev pointer
4808 *
4809 * Prepare to put the hw in the suspend state (all asics).
4810 * Returns 0 for success or an error on failure.
4811 * Called at driver suspend.
4812 */
4813int amdgpu_device_prepare(struct drm_device *dev)
4814{
4815	struct amdgpu_device *adev = drm_to_adev(dev);
4816	int i, r;
4817
4818	amdgpu_choose_low_power_state(adev);
4819
4820	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4821		return 0;
4822
4823	/* Evict the majority of BOs before starting suspend sequence */
4824	r = amdgpu_device_evict_resources(adev);
4825	if (r)
4826		goto unprepare;
4827
4828	flush_delayed_work(&adev->gfx.gfx_off_delay_work);
4829
4830	for (i = 0; i < adev->num_ip_blocks; i++) {
4831		if (!adev->ip_blocks[i].status.valid)
4832			continue;
4833		if (!adev->ip_blocks[i].version->funcs->prepare_suspend)
4834			continue;
4835		r = adev->ip_blocks[i].version->funcs->prepare_suspend(&adev->ip_blocks[i]);
4836		if (r)
4837			goto unprepare;
4838	}
4839
4840	return 0;
4841
4842unprepare:
4843	adev->in_s0ix = adev->in_s3 = false;
4844
4845	return r;
4846}
4847
4848/**
4849 * amdgpu_device_suspend - initiate device suspend
4850 *
4851 * @dev: drm dev pointer
4852 * @notify_clients: notify in-kernel DRM clients
4853 *
4854 * Puts the hw in the suspend state (all asics).
4855 * Returns 0 for success or an error on failure.
4856 * Called at driver suspend.
4857 */
4858int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
4859{
4860	struct amdgpu_device *adev = drm_to_adev(dev);
4861	int r = 0;
4862
4863	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4864		return 0;
4865
4866	adev->in_suspend = true;
4867
4868	if (amdgpu_sriov_vf(adev)) {
4869		amdgpu_virt_fini_data_exchange(adev);
4870		r = amdgpu_virt_request_full_gpu(adev, false);
4871		if (r)
4872			return r;
4873	}
4874
4875	if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
4876		DRM_WARN("smart shift update failed\n");
4877
4878	if (notify_clients)
4879		drm_client_dev_suspend(adev_to_drm(adev), false);
 
 
4880
4881	cancel_delayed_work_sync(&adev->delayed_init_work);
4882
4883	amdgpu_ras_suspend(adev);
4884
4885	amdgpu_device_ip_suspend_phase1(adev);
4886
4887	if (!adev->in_s0ix)
4888		amdgpu_amdkfd_suspend(adev, adev->in_runpm);
4889
4890	r = amdgpu_device_evict_resources(adev);
4891	if (r)
4892		return r;
4893
4894	amdgpu_ttm_set_buffer_funcs_status(adev, false);
4895
4896	amdgpu_fence_driver_hw_fini(adev);
4897
4898	amdgpu_device_ip_suspend_phase2(adev);
4899
4900	if (amdgpu_sriov_vf(adev))
4901		amdgpu_virt_release_full_gpu(adev, false);
4902
4903	r = amdgpu_dpm_notify_rlc_state(adev, false);
4904	if (r)
4905		return r;
4906
4907	return 0;
4908}
4909
4910/**
4911 * amdgpu_device_resume - initiate device resume
4912 *
4913 * @dev: drm dev pointer
4914 * @notify_clients: notify in-kernel DRM clients
4915 *
4916 * Bring the hw back to operating state (all asics).
4917 * Returns 0 for success or an error on failure.
4918 * Called at driver resume.
4919 */
4920int amdgpu_device_resume(struct drm_device *dev, bool notify_clients)
4921{
4922	struct amdgpu_device *adev = drm_to_adev(dev);
4923	int r = 0;
4924
4925	if (amdgpu_sriov_vf(adev)) {
4926		r = amdgpu_virt_request_full_gpu(adev, true);
4927		if (r)
4928			return r;
4929	}
4930
4931	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4932		return 0;
4933
4934	if (adev->in_s0ix)
4935		amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry);
4936
4937	/* post card */
4938	if (amdgpu_device_need_post(adev)) {
4939		r = amdgpu_device_asic_init(adev);
4940		if (r)
4941			dev_err(adev->dev, "amdgpu asic init failed\n");
4942	}
4943
4944	r = amdgpu_device_ip_resume(adev);
4945
4946	if (r) {
4947		dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
4948		goto exit;
4949	}
4950
4951	if (!adev->in_s0ix) {
4952		r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
4953		if (r)
4954			goto exit;
4955	}
 
4956
4957	r = amdgpu_device_ip_late_init(adev);
4958	if (r)
4959		goto exit;
4960
4961	queue_delayed_work(system_wq, &adev->delayed_init_work,
4962			   msecs_to_jiffies(AMDGPU_RESUME_MS));
4963exit:
4964	if (amdgpu_sriov_vf(adev)) {
4965		amdgpu_virt_init_data_exchange(adev);
4966		amdgpu_virt_release_full_gpu(adev, true);
4967	}
4968
4969	if (r)
4970		return r;
 
 
 
4971
4972	/* Make sure IB tests flushed */
4973	flush_delayed_work(&adev->delayed_init_work);
4974
4975	if (notify_clients)
4976		drm_client_dev_resume(adev_to_drm(adev), false);
 
 
4977
4978	amdgpu_ras_resume(adev);
4979
4980	if (adev->mode_info.num_crtc) {
4981		/*
4982		 * Most of the connector probing functions try to acquire runtime pm
4983		 * refs to ensure that the GPU is powered on when connector polling is
4984		 * performed. Since we're calling this from a runtime PM callback,
4985		 * trying to acquire rpm refs will cause us to deadlock.
4986		 *
4987		 * Since we're guaranteed to be holding the rpm lock, it's safe to
4988		 * temporarily disable the rpm helpers so this doesn't deadlock us.
4989		 */
4990#ifdef CONFIG_PM
4991		dev->dev->power.disable_depth++;
4992#endif
4993		if (!adev->dc_enabled)
4994			drm_helper_hpd_irq_event(dev);
4995		else
4996			drm_kms_helper_hotplug_event(dev);
4997#ifdef CONFIG_PM
4998		dev->dev->power.disable_depth--;
4999#endif
5000	}
5001	adev->in_suspend = false;
5002
5003	if (adev->enable_mes)
5004		amdgpu_mes_self_test(adev);
5005
5006	if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
5007		DRM_WARN("smart shift update failed\n");
5008
5009	return 0;
5010}
5011
5012/**
5013 * amdgpu_device_ip_check_soft_reset - did soft reset succeed
5014 *
5015 * @adev: amdgpu_device pointer
5016 *
5017 * The list of all the hardware IPs that make up the asic is walked and
5018 * the check_soft_reset callbacks are run.  check_soft_reset determines
5019 * if the asic is still hung or not.
5020 * Returns true if any of the IPs are still in a hung state, false if not.
5021 */
5022static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
5023{
5024	int i;
5025	bool asic_hang = false;
5026
5027	if (amdgpu_sriov_vf(adev))
5028		return true;
5029
5030	if (amdgpu_asic_need_full_reset(adev))
5031		return true;
5032
5033	for (i = 0; i < adev->num_ip_blocks; i++) {
5034		if (!adev->ip_blocks[i].status.valid)
5035			continue;
5036		if (adev->ip_blocks[i].version->funcs->check_soft_reset)
5037			adev->ip_blocks[i].status.hang =
5038				adev->ip_blocks[i].version->funcs->check_soft_reset(
5039					&adev->ip_blocks[i]);
5040		if (adev->ip_blocks[i].status.hang) {
5041			dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
5042			asic_hang = true;
5043		}
5044	}
5045	return asic_hang;
5046}
5047
5048/**
5049 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
5050 *
5051 * @adev: amdgpu_device pointer
5052 *
5053 * The list of all the hardware IPs that make up the asic is walked and the
5054 * pre_soft_reset callbacks are run if the block is hung.  pre_soft_reset
5055 * handles any IP specific hardware or software state changes that are
5056 * necessary for a soft reset to succeed.
5057 * Returns 0 on success, negative error code on failure.
5058 */
5059static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
5060{
5061	int i, r = 0;
5062
5063	for (i = 0; i < adev->num_ip_blocks; i++) {
5064		if (!adev->ip_blocks[i].status.valid)
5065			continue;
5066		if (adev->ip_blocks[i].status.hang &&
5067		    adev->ip_blocks[i].version->funcs->pre_soft_reset) {
5068			r = adev->ip_blocks[i].version->funcs->pre_soft_reset(&adev->ip_blocks[i]);
5069			if (r)
5070				return r;
5071		}
5072	}
5073
5074	return 0;
5075}
5076
5077/**
5078 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
5079 *
5080 * @adev: amdgpu_device pointer
5081 *
5082 * Some hardware IPs cannot be soft reset.  If they are hung, a full gpu
5083 * reset is necessary to recover.
5084 * Returns true if a full asic reset is required, false if not.
5085 */
5086static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
5087{
5088	int i;
5089
5090	if (amdgpu_asic_need_full_reset(adev))
5091		return true;
5092
5093	for (i = 0; i < adev->num_ip_blocks; i++) {
5094		if (!adev->ip_blocks[i].status.valid)
5095			continue;
5096		if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
5097		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
5098		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
5099		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
5100		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
5101			if (adev->ip_blocks[i].status.hang) {
5102				dev_info(adev->dev, "Some block need full reset!\n");
5103				return true;
5104			}
5105		}
5106	}
5107	return false;
5108}
5109
5110/**
5111 * amdgpu_device_ip_soft_reset - do a soft reset
5112 *
5113 * @adev: amdgpu_device pointer
5114 *
5115 * The list of all the hardware IPs that make up the asic is walked and the
5116 * soft_reset callbacks are run if the block is hung.  soft_reset handles any
5117 * IP specific hardware or software state changes that are necessary to soft
5118 * reset the IP.
5119 * Returns 0 on success, negative error code on failure.
5120 */
5121static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
5122{
5123	int i, r = 0;
5124
5125	for (i = 0; i < adev->num_ip_blocks; i++) {
5126		if (!adev->ip_blocks[i].status.valid)
5127			continue;
5128		if (adev->ip_blocks[i].status.hang &&
5129		    adev->ip_blocks[i].version->funcs->soft_reset) {
5130			r = adev->ip_blocks[i].version->funcs->soft_reset(&adev->ip_blocks[i]);
5131			if (r)
5132				return r;
5133		}
5134	}
5135
5136	return 0;
5137}
5138
5139/**
5140 * amdgpu_device_ip_post_soft_reset - clean up from soft reset
5141 *
5142 * @adev: amdgpu_device pointer
5143 *
5144 * The list of all the hardware IPs that make up the asic is walked and the
5145 * post_soft_reset callbacks are run if the asic was hung.  post_soft_reset
5146 * handles any IP specific hardware or software state changes that are
5147 * necessary after the IP has been soft reset.
5148 * Returns 0 on success, negative error code on failure.
5149 */
5150static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
5151{
5152	int i, r = 0;
5153
5154	for (i = 0; i < adev->num_ip_blocks; i++) {
5155		if (!adev->ip_blocks[i].status.valid)
5156			continue;
5157		if (adev->ip_blocks[i].status.hang &&
5158		    adev->ip_blocks[i].version->funcs->post_soft_reset)
5159			r = adev->ip_blocks[i].version->funcs->post_soft_reset(&adev->ip_blocks[i]);
5160		if (r)
5161			return r;
5162	}
5163
5164	return 0;
5165}
5166
5167/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5168 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
5169 *
5170 * @adev: amdgpu_device pointer
5171 * @reset_context: amdgpu reset context pointer
5172 *
5173 * do VF FLR and reinitialize Asic
5174 * return 0 means succeeded otherwise failed
5175 */
5176static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
5177				     struct amdgpu_reset_context *reset_context)
5178{
5179	int r;
5180	struct amdgpu_hive_info *hive = NULL;
5181
5182	if (test_bit(AMDGPU_HOST_FLR, &reset_context->flags)) {
5183		if (!amdgpu_ras_get_fed_status(adev))
5184			amdgpu_virt_ready_to_reset(adev);
5185		amdgpu_virt_wait_reset(adev);
5186		clear_bit(AMDGPU_HOST_FLR, &reset_context->flags);
5187		r = amdgpu_virt_request_full_gpu(adev, true);
5188	} else {
5189		r = amdgpu_virt_reset_gpu(adev);
5190	}
5191	if (r)
5192		return r;
5193
5194	amdgpu_ras_set_fed(adev, false);
5195	amdgpu_irq_gpu_reset_resume_helper(adev);
5196
5197	/* some sw clean up VF needs to do before recover */
5198	amdgpu_virt_post_reset(adev);
5199
5200	/* Resume IP prior to SMC */
5201	r = amdgpu_device_ip_reinit_early_sriov(adev);
5202	if (r)
5203		return r;
5204
5205	amdgpu_virt_init_data_exchange(adev);
 
 
5206
5207	r = amdgpu_device_fw_loading(adev);
5208	if (r)
5209		return r;
5210
5211	/* now we are okay to resume SMC/CP/SDMA */
5212	r = amdgpu_device_ip_reinit_late_sriov(adev);
5213	if (r)
5214		return r;
5215
5216	hive = amdgpu_get_xgmi_hive(adev);
5217	/* Update PSP FW topology after reset */
5218	if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
5219		r = amdgpu_xgmi_update_topology(hive, adev);
5220	if (hive)
5221		amdgpu_put_xgmi_hive(hive);
5222	if (r)
5223		return r;
5224
 
5225	r = amdgpu_ib_ring_tests(adev);
5226	if (r)
5227		return r;
5228
5229	if (adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST)
 
5230		amdgpu_inc_vram_lost(adev);
5231
5232	/* need to be called during full access so we can't do it later like
5233	 * bare-metal does.
5234	 */
5235	amdgpu_amdkfd_post_reset(adev);
5236	amdgpu_virt_release_full_gpu(adev, true);
5237
5238	/* Aldebaran and gfx_11_0_3 support ras in SRIOV, so need resume ras during reset */
5239	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) ||
5240	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
5241	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
5242	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3))
5243		amdgpu_ras_resume(adev);
5244
5245	amdgpu_virt_ras_telemetry_post_reset(adev);
5246
5247	return 0;
5248}
5249
5250/**
5251 * amdgpu_device_has_job_running - check if there is any job in mirror list
5252 *
5253 * @adev: amdgpu_device pointer
5254 *
5255 * check if there is any job in mirror list
5256 */
5257bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
5258{
5259	int i;
5260	struct drm_sched_job *job;
5261
5262	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5263		struct amdgpu_ring *ring = adev->rings[i];
5264
5265		if (!amdgpu_ring_sched_ready(ring))
5266			continue;
5267
5268		spin_lock(&ring->sched.job_list_lock);
5269		job = list_first_entry_or_null(&ring->sched.pending_list,
5270					       struct drm_sched_job, list);
5271		spin_unlock(&ring->sched.job_list_lock);
5272		if (job)
5273			return true;
5274	}
5275	return false;
5276}
5277
5278/**
5279 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
5280 *
5281 * @adev: amdgpu_device pointer
5282 *
5283 * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
5284 * a hung GPU.
5285 */
5286bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
5287{
 
 
 
 
5288
5289	if (amdgpu_gpu_recovery == 0)
5290		goto disabled;
5291
5292	/* Skip soft reset check in fatal error mode */
5293	if (!amdgpu_ras_is_poison_mode_supported(adev))
5294		return true;
5295
5296	if (amdgpu_sriov_vf(adev))
5297		return true;
5298
5299	if (amdgpu_gpu_recovery == -1) {
5300		switch (adev->asic_type) {
5301#ifdef CONFIG_DRM_AMDGPU_SI
5302		case CHIP_VERDE:
5303		case CHIP_TAHITI:
5304		case CHIP_PITCAIRN:
5305		case CHIP_OLAND:
5306		case CHIP_HAINAN:
5307#endif
5308#ifdef CONFIG_DRM_AMDGPU_CIK
5309		case CHIP_KAVERI:
5310		case CHIP_KABINI:
5311		case CHIP_MULLINS:
5312#endif
5313		case CHIP_CARRIZO:
5314		case CHIP_STONEY:
5315		case CHIP_CYAN_SKILLFISH:
5316			goto disabled;
5317		default:
 
 
 
 
 
 
 
5318			break;
 
 
5319		}
5320	}
5321
5322	return true;
5323
5324disabled:
5325		dev_info(adev->dev, "GPU recovery disabled.\n");
5326		return false;
5327}
5328
5329int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
5330{
5331	u32 i;
5332	int ret = 0;
5333
5334	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
5335
5336	dev_info(adev->dev, "GPU mode1 reset\n");
5337
5338	/* Cache the state before bus master disable. The saved config space
5339	 * values are used in other cases like restore after mode-2 reset.
5340	 */
5341	amdgpu_device_cache_pci_state(adev->pdev);
5342
5343	/* disable BM */
5344	pci_clear_master(adev->pdev);
5345
5346	if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
5347		dev_info(adev->dev, "GPU smu mode1 reset\n");
5348		ret = amdgpu_dpm_mode1_reset(adev);
5349	} else {
5350		dev_info(adev->dev, "GPU psp mode1 reset\n");
5351		ret = psp_gpu_reset(adev);
5352	}
5353
5354	if (ret)
5355		goto mode1_reset_failed;
5356
5357	amdgpu_device_load_pci_state(adev->pdev);
5358	ret = amdgpu_psp_wait_for_bootloader(adev);
5359	if (ret)
5360		goto mode1_reset_failed;
5361
5362	/* wait for asic to come out of reset */
5363	for (i = 0; i < adev->usec_timeout; i++) {
5364		u32 memsize = adev->nbio.funcs->get_memsize(adev);
 
 
 
 
5365
5366		if (memsize != 0xffffffff)
5367			break;
5368		udelay(1);
5369	}
5370
5371	if (i >= adev->usec_timeout) {
5372		ret = -ETIMEDOUT;
5373		goto mode1_reset_failed;
5374	}
5375
5376	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
 
 
5377
5378	return 0;
 
 
 
5379
5380mode1_reset_failed:
5381	dev_err(adev->dev, "GPU mode1 reset failed\n");
5382	return ret;
5383}
5384
5385int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
5386				 struct amdgpu_reset_context *reset_context)
5387{
5388	int i, r = 0;
5389	struct amdgpu_job *job = NULL;
5390	struct amdgpu_device *tmp_adev = reset_context->reset_req_dev;
5391	bool need_full_reset =
5392		test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5393
5394	if (reset_context->reset_req_dev == adev)
5395		job = reset_context->job;
5396
5397	if (amdgpu_sriov_vf(adev))
5398		amdgpu_virt_pre_reset(adev);
 
5399
5400	amdgpu_fence_driver_isr_toggle(adev, true);
 
 
 
5401
5402	/* block all schedulers and reset given job's ring */
5403	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5404		struct amdgpu_ring *ring = adev->rings[i];
5405
5406		if (!amdgpu_ring_sched_ready(ring))
5407			continue;
5408
5409		/* Clear job fence from fence drv to avoid force_completion
5410		 * leave NULL and vm flush fence in fence drv
5411		 */
5412		amdgpu_fence_driver_clear_job_fences(ring);
5413
5414		/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
5415		amdgpu_fence_driver_force_completion(ring);
5416	}
5417
5418	amdgpu_fence_driver_isr_toggle(adev, false);
5419
5420	if (job && job->vm)
5421		drm_sched_increase_karma(&job->base);
5422
5423	r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
5424	/* If reset handler not implemented, continue; otherwise return */
5425	if (r == -EOPNOTSUPP)
5426		r = 0;
5427	else
5428		return r;
5429
5430	/* Don't suspend on bare metal if we are not going to HW reset the ASIC */
5431	if (!amdgpu_sriov_vf(adev)) {
5432
5433		if (!need_full_reset)
5434			need_full_reset = amdgpu_device_ip_need_full_reset(adev);
5435
5436		if (!need_full_reset && amdgpu_gpu_recovery &&
5437		    amdgpu_device_ip_check_soft_reset(adev)) {
5438			amdgpu_device_ip_pre_soft_reset(adev);
5439			r = amdgpu_device_ip_soft_reset(adev);
5440			amdgpu_device_ip_post_soft_reset(adev);
5441			if (r || amdgpu_device_ip_check_soft_reset(adev)) {
5442				dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
5443				need_full_reset = true;
5444			}
5445		}
5446
5447		if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags)) {
5448			dev_info(tmp_adev->dev, "Dumping IP State\n");
5449			/* Trigger ip dump before we reset the asic */
5450			for (i = 0; i < tmp_adev->num_ip_blocks; i++)
5451				if (tmp_adev->ip_blocks[i].version->funcs->dump_ip_state)
5452					tmp_adev->ip_blocks[i].version->funcs
5453						->dump_ip_state((void *)&tmp_adev->ip_blocks[i]);
5454			dev_info(tmp_adev->dev, "Dumping IP State Completed\n");
5455		}
5456
5457		if (need_full_reset)
5458			r = amdgpu_device_ip_suspend(adev);
5459		if (need_full_reset)
5460			set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5461		else
5462			clear_bit(AMDGPU_NEED_FULL_RESET,
5463				  &reset_context->flags);
5464	}
5465
5466	return r;
5467}
5468
5469int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context)
 
5470{
5471	struct list_head *device_list_handle;
5472	bool full_reset, vram_lost = false;
5473	struct amdgpu_device *tmp_adev;
5474	int r, init_level;
5475
5476	device_list_handle = reset_context->reset_device_list;
5477
5478	if (!device_list_handle)
5479		return -EINVAL;
 
 
 
 
 
 
 
5480
5481	full_reset = test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
 
 
 
5482
5483	/**
5484	 * If it's reset on init, it's default init level, otherwise keep level
5485	 * as recovery level.
5486	 */
5487	if (reset_context->method == AMD_RESET_METHOD_ON_INIT)
5488			init_level = AMDGPU_INIT_LEVEL_DEFAULT;
5489	else
5490			init_level = AMDGPU_INIT_LEVEL_RESET_RECOVERY;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5491
5492	r = 0;
5493	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5494		amdgpu_set_init_level(tmp_adev, init_level);
5495		if (full_reset) {
5496			/* post card */
5497			amdgpu_ras_set_fed(tmp_adev, false);
5498			r = amdgpu_device_asic_init(tmp_adev);
5499			if (r) {
5500				dev_warn(tmp_adev->dev, "asic atom init failed!");
5501			} else {
5502				dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
 
 
 
5503
5504				r = amdgpu_device_ip_resume_phase1(tmp_adev);
5505				if (r)
5506					goto out;
5507
5508				vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
5509
5510				if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags))
5511					amdgpu_coredump(tmp_adev, false, vram_lost, reset_context->job);
5512
5513				if (vram_lost) {
5514					DRM_INFO("VRAM is lost due to GPU reset!\n");
5515					amdgpu_inc_vram_lost(tmp_adev);
5516				}
5517
5518				r = amdgpu_device_fw_loading(tmp_adev);
5519				if (r)
5520					return r;
5521
5522				r = amdgpu_xcp_restore_partition_mode(
5523					tmp_adev->xcp_mgr);
5524				if (r)
5525					goto out;
5526
5527				r = amdgpu_device_ip_resume_phase2(tmp_adev);
5528				if (r)
5529					goto out;
5530
5531				if (tmp_adev->mman.buffer_funcs_ring->sched.ready)
5532					amdgpu_ttm_set_buffer_funcs_status(tmp_adev, true);
5533
5534				r = amdgpu_device_ip_resume_phase3(tmp_adev);
5535				if (r)
5536					goto out;
5537
5538				if (vram_lost)
5539					amdgpu_device_fill_reset_magic(tmp_adev);
5540
5541				/*
5542				 * Add this ASIC as tracked as reset was already
5543				 * complete successfully.
5544				 */
5545				amdgpu_register_gpu_instance(tmp_adev);
5546
5547				if (!reset_context->hive &&
5548				    tmp_adev->gmc.xgmi.num_physical_nodes > 1)
5549					amdgpu_xgmi_add_device(tmp_adev);
5550
5551				r = amdgpu_device_ip_late_init(tmp_adev);
5552				if (r)
5553					goto out;
5554
5555				drm_client_dev_resume(adev_to_drm(tmp_adev), false);
5556
5557				/*
5558				 * The GPU enters bad state once faulty pages
5559				 * by ECC has reached the threshold, and ras
5560				 * recovery is scheduled next. So add one check
5561				 * here to break recovery if it indeed exceeds
5562				 * bad page threshold, and remind user to
5563				 * retire this GPU or setting one bigger
5564				 * bad_page_threshold value to fix this once
5565				 * probing driver again.
5566				 */
5567				if (!amdgpu_ras_is_rma(tmp_adev)) {
5568					/* must succeed. */
5569					amdgpu_ras_resume(tmp_adev);
5570				} else {
5571					r = -EINVAL;
5572					goto out;
5573				}
5574
5575				/* Update PSP FW topology after reset */
5576				if (reset_context->hive &&
5577				    tmp_adev->gmc.xgmi.num_physical_nodes > 1)
5578					r = amdgpu_xgmi_update_topology(
5579						reset_context->hive, tmp_adev);
5580			}
5581		}
5582
5583out:
5584		if (!r) {
5585			/* IP init is complete now, set level as default */
5586			amdgpu_set_init_level(tmp_adev,
5587					      AMDGPU_INIT_LEVEL_DEFAULT);
5588			amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
5589			r = amdgpu_ib_ring_tests(tmp_adev);
5590			if (r) {
5591				dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
 
5592				r = -EAGAIN;
5593				goto end;
5594			}
5595		}
5596
5597		if (r)
 
 
5598			tmp_adev->asic_reset_res = r;
5599	}
5600
5601end:
5602	return r;
5603}
5604
5605int amdgpu_do_asic_reset(struct list_head *device_list_handle,
5606			 struct amdgpu_reset_context *reset_context)
5607{
5608	struct amdgpu_device *tmp_adev = NULL;
5609	bool need_full_reset, skip_hw_reset;
5610	int r = 0;
5611
5612	/* Try reset handler method first */
5613	tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5614				    reset_list);
5615
5616	reset_context->reset_device_list = device_list_handle;
5617	r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
5618	/* If reset handler not implemented, continue; otherwise return */
5619	if (r == -EOPNOTSUPP)
5620		r = 0;
5621	else
5622		return r;
5623
5624	/* Reset handler not implemented, use the default method */
5625	need_full_reset =
5626		test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5627	skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
5628
5629	/*
5630	 * ASIC reset has to be done on all XGMI hive nodes ASAP
5631	 * to allow proper links negotiation in FW (within 1 sec)
5632	 */
5633	if (!skip_hw_reset && need_full_reset) {
5634		list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5635			/* For XGMI run all resets in parallel to speed up the process */
5636			if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
5637				if (!queue_work(system_unbound_wq,
5638						&tmp_adev->xgmi_reset_work))
5639					r = -EALREADY;
5640			} else
5641				r = amdgpu_asic_reset(tmp_adev);
5642
5643			if (r) {
5644				dev_err(tmp_adev->dev,
5645					"ASIC reset failed with error, %d for drm dev, %s",
5646					r, adev_to_drm(tmp_adev)->unique);
5647				goto out;
5648			}
5649		}
5650
5651		/* For XGMI wait for all resets to complete before proceed */
5652		if (!r) {
5653			list_for_each_entry(tmp_adev, device_list_handle,
5654					    reset_list) {
5655				if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
5656					flush_work(&tmp_adev->xgmi_reset_work);
5657					r = tmp_adev->asic_reset_res;
5658					if (r)
5659						break;
5660				}
5661			}
5662		}
5663	}
5664
5665	if (!r && amdgpu_ras_intr_triggered()) {
5666		list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5667			amdgpu_ras_reset_error_count(tmp_adev,
5668						     AMDGPU_RAS_BLOCK__MMHUB);
5669		}
5670
5671		amdgpu_ras_intr_cleared();
5672	}
5673
5674	r = amdgpu_device_reinit_after_reset(reset_context);
5675	if (r == -EAGAIN)
5676		set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5677	else
5678		clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5679
5680out:
5681	return r;
5682}
5683
5684static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev)
 
5685{
 
 
 
 
 
 
 
 
5686
5687	switch (amdgpu_asic_reset_method(adev)) {
5688	case AMD_RESET_METHOD_MODE1:
5689		adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
5690		break;
5691	case AMD_RESET_METHOD_MODE2:
5692		adev->mp1_state = PP_MP1_STATE_RESET;
5693		break;
5694	default:
5695		adev->mp1_state = PP_MP1_STATE_NONE;
5696		break;
5697	}
 
 
5698}
5699
5700static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev)
5701{
5702	amdgpu_vf_error_trans_all(adev);
5703	adev->mp1_state = PP_MP1_STATE_NONE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5704}
5705
5706static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
5707{
5708	struct pci_dev *p = NULL;
5709
5710	p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5711			adev->pdev->bus->number, 1);
5712	if (p) {
5713		pm_runtime_enable(&(p->dev));
5714		pm_runtime_resume(&(p->dev));
5715	}
5716
5717	pci_dev_put(p);
5718}
5719
5720static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
5721{
5722	enum amd_reset_method reset_method;
5723	struct pci_dev *p = NULL;
5724	u64 expires;
5725
5726	/*
5727	 * For now, only BACO and mode1 reset are confirmed
5728	 * to suffer the audio issue without proper suspended.
5729	 */
5730	reset_method = amdgpu_asic_reset_method(adev);
5731	if ((reset_method != AMD_RESET_METHOD_BACO) &&
5732	     (reset_method != AMD_RESET_METHOD_MODE1))
5733		return -EINVAL;
5734
5735	p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5736			adev->pdev->bus->number, 1);
5737	if (!p)
5738		return -ENODEV;
5739
5740	expires = pm_runtime_autosuspend_expiration(&(p->dev));
5741	if (!expires)
5742		/*
5743		 * If we cannot get the audio device autosuspend delay,
5744		 * a fixed 4S interval will be used. Considering 3S is
5745		 * the audio controller default autosuspend delay setting.
5746		 * 4S used here is guaranteed to cover that.
5747		 */
5748		expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
5749
5750	while (!pm_runtime_status_suspended(&(p->dev))) {
5751		if (!pm_runtime_suspend(&(p->dev)))
5752			break;
5753
5754		if (expires < ktime_get_mono_fast_ns()) {
5755			dev_warn(adev->dev, "failed to suspend display audio\n");
5756			pci_dev_put(p);
5757			/* TODO: abort the succeeding gpu reset? */
5758			return -ETIMEDOUT;
5759		}
5760	}
5761
5762	pm_runtime_disable(&(p->dev));
5763
5764	pci_dev_put(p);
5765	return 0;
5766}
5767
5768static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
 
 
5769{
5770	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5771
5772#if defined(CONFIG_DEBUG_FS)
5773	if (!amdgpu_sriov_vf(adev))
5774		cancel_work(&adev->reset_work);
5775#endif
5776
5777	if (adev->kfd.dev)
5778		cancel_work(&adev->kfd.reset_work);
5779
5780	if (amdgpu_sriov_vf(adev))
5781		cancel_work(&adev->virt.flr_work);
 
 
5782
5783	if (con && adev->ras_enabled)
5784		cancel_work(&con->recovery_work);
 
5785
5786}
 
 
 
5787
5788static int amdgpu_device_health_check(struct list_head *device_list_handle)
5789{
5790	struct amdgpu_device *tmp_adev;
5791	int ret = 0;
5792	u32 status;
 
 
 
 
 
 
 
 
 
 
 
 
5793
5794	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5795		pci_read_config_dword(tmp_adev->pdev, PCI_COMMAND, &status);
5796		if (PCI_POSSIBLE_ERROR(status)) {
5797			dev_err(tmp_adev->dev, "device lost from bus!");
5798			ret = -ENODEV;
 
5799		}
5800	}
5801
5802	return ret;
 
 
 
 
 
 
 
 
 
 
 
5803}
5804
5805/**
5806 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
5807 *
5808 * @adev: amdgpu_device pointer
5809 * @job: which job trigger hang
5810 * @reset_context: amdgpu reset context pointer
5811 *
5812 * Attempt to reset the GPU if it has hung (all asics).
5813 * Attempt to do soft-reset or full-reset and reinitialize Asic
5814 * Returns 0 for success or an error on failure.
5815 */
5816
5817int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
5818			      struct amdgpu_job *job,
5819			      struct amdgpu_reset_context *reset_context)
5820{
5821	struct list_head device_list, *device_list_handle =  NULL;
5822	bool job_signaled = false;
5823	struct amdgpu_hive_info *hive = NULL;
5824	struct amdgpu_device *tmp_adev = NULL;
5825	int i, r = 0;
5826	bool need_emergency_restart = false;
5827	bool audio_suspended = false;
5828	int retry_limit = AMDGPU_MAX_RETRY_LIMIT;
 
 
 
5829
5830	/*
5831	 * Special case: RAS triggered and full reset isn't supported
5832	 */
5833	need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
5834
5835	/*
5836	 * Flush RAM to disk so that after reboot
5837	 * the user can read log and see why the system rebooted.
5838	 */
5839	if (need_emergency_restart && amdgpu_ras_get_context(adev) &&
5840		amdgpu_ras_get_context(adev)->reboot) {
5841		DRM_WARN("Emergency reboot.");
5842
5843		ksys_sync_helper();
5844		emergency_restart();
5845	}
5846
5847	dev_info(adev->dev, "GPU %s begin!\n",
5848		need_emergency_restart ? "jobs stop":"reset");
5849
5850	if (!amdgpu_sriov_vf(adev))
5851		hive = amdgpu_get_xgmi_hive(adev);
5852	if (hive)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5853		mutex_lock(&hive->hive_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5854
5855	reset_context->job = job;
5856	reset_context->hive = hive;
5857	/*
5858	 * Build list of devices to reset.
5859	 * In case we are in XGMI hive mode, resort the device list
5860	 * to put adev in the 1st position.
5861	 */
5862	INIT_LIST_HEAD(&device_list);
5863	if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1) && hive) {
5864		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
5865			list_add_tail(&tmp_adev->reset_list, &device_list);
5866			if (adev->shutdown)
5867				tmp_adev->shutdown = true;
5868		}
5869		if (!list_is_first(&adev->reset_list, &device_list))
5870			list_rotate_to_front(&adev->reset_list, &device_list);
5871		device_list_handle = &device_list;
5872	} else {
5873		list_add_tail(&adev->reset_list, &device_list);
5874		device_list_handle = &device_list;
5875	}
5876
5877	if (!amdgpu_sriov_vf(adev)) {
5878		r = amdgpu_device_health_check(device_list_handle);
5879		if (r)
5880			goto end_reset;
5881	}
5882
5883	/* We need to lock reset domain only once both for XGMI and single device */
5884	tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5885				    reset_list);
5886	amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
5887
5888	/* block all schedulers and reset given job's ring */
5889	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5890
5891		amdgpu_device_set_mp1_state(tmp_adev);
5892
5893		/*
5894		 * Try to put the audio codec into suspend state
5895		 * before gpu reset started.
5896		 *
5897		 * Due to the power domain of the graphics device
5898		 * is shared with AZ power domain. Without this,
5899		 * we may change the audio hardware from behind
5900		 * the audio driver's back. That will trigger
5901		 * some audio codec errors.
5902		 */
5903		if (!amdgpu_device_suspend_display_audio(tmp_adev))
5904			audio_suspended = true;
5905
5906		amdgpu_ras_set_error_query_ready(tmp_adev, false);
5907
5908		cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
5909
5910		amdgpu_amdkfd_pre_reset(tmp_adev, reset_context);
 
5911
5912		/*
5913		 * Mark these ASICs to be reseted as untracked first
5914		 * And add them back after reset completed
5915		 */
5916		amdgpu_unregister_gpu_instance(tmp_adev);
5917
5918		drm_client_dev_suspend(adev_to_drm(tmp_adev), false);
5919
5920		/* disable ras on ALL IPs */
5921		if (!need_emergency_restart &&
5922		      amdgpu_device_ip_need_full_reset(tmp_adev))
5923			amdgpu_ras_suspend(tmp_adev);
5924
5925		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5926			struct amdgpu_ring *ring = tmp_adev->rings[i];
5927
5928			if (!amdgpu_ring_sched_ready(ring))
5929				continue;
5930
5931			drm_sched_stop(&ring->sched, job ? &job->base : NULL);
5932
5933			if (need_emergency_restart)
5934				amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
5935		}
5936		atomic_inc(&tmp_adev->gpu_reset_counter);
5937	}
5938
5939	if (need_emergency_restart)
5940		goto skip_sched_resume;
5941
5942	/*
5943	 * Must check guilty signal here since after this point all old
5944	 * HW fences are force signaled.
5945	 *
5946	 * job->base holds a reference to parent fence
5947	 */
5948	if (job && dma_fence_is_signaled(&job->hw_fence)) {
 
5949		job_signaled = true;
5950		dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5951		goto skip_hw_reset;
5952	}
5953
5954retry:	/* Rest of adevs pre asic reset from XGMI hive. */
5955	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5956		r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
5957		/*TODO Should we stop ?*/
5958		if (r) {
5959			dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
5960				  r, adev_to_drm(tmp_adev)->unique);
5961			tmp_adev->asic_reset_res = r;
5962		}
5963	}
5964
 
5965	/* Actual ASIC resets if needed.*/
5966	/* Host driver will handle XGMI hive reset for SRIOV */
5967	if (amdgpu_sriov_vf(adev)) {
5968		if (amdgpu_ras_get_fed_status(adev) || amdgpu_virt_rcvd_ras_interrupt(adev)) {
5969			dev_dbg(adev->dev, "Detected RAS error, wait for FLR completion\n");
5970			amdgpu_ras_set_fed(adev, true);
5971			set_bit(AMDGPU_HOST_FLR, &reset_context->flags);
5972		}
5973
5974		r = amdgpu_device_reset_sriov(adev, reset_context);
5975		if (AMDGPU_RETRY_SRIOV_RESET(r) && (retry_limit--) > 0) {
5976			amdgpu_virt_release_full_gpu(adev, true);
5977			goto retry;
5978		}
5979		if (r)
5980			adev->asic_reset_res = r;
5981	} else {
5982		r = amdgpu_do_asic_reset(device_list_handle, reset_context);
5983		if (r && r == -EAGAIN)
5984			goto retry;
5985	}
5986
5987	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5988		/*
5989		 * Drop any pending non scheduler resets queued before reset is done.
5990		 * Any reset scheduled after this point would be valid. Scheduler resets
5991		 * were already dropped during drm_sched_stop and no new ones can come
5992		 * in before drm_sched_start.
5993		 */
5994		amdgpu_device_stop_pending_resets(tmp_adev);
5995	}
5996
5997skip_hw_reset:
5998
5999	/* Post ASIC reset for all devs .*/
6000	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
6001
 
 
 
 
 
 
 
 
 
 
 
 
6002		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
6003			struct amdgpu_ring *ring = tmp_adev->rings[i];
6004
6005			if (!amdgpu_ring_sched_ready(ring))
6006				continue;
6007
6008			drm_sched_start(&ring->sched, 0);
 
 
 
 
6009		}
6010
6011		if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled)
6012			drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
6013
6014		if (tmp_adev->asic_reset_res)
6015			r = tmp_adev->asic_reset_res;
6016
6017		tmp_adev->asic_reset_res = 0;
6018
6019		if (r) {
6020			/* bad news, how to tell it to userspace ?
6021			 * for ras error, we should report GPU bad status instead of
6022			 * reset failure
6023			 */
6024			if (reset_context->src != AMDGPU_RESET_SRC_RAS ||
6025			    !amdgpu_ras_eeprom_check_err_threshold(tmp_adev))
6026				dev_info(tmp_adev->dev, "GPU reset(%d) failed\n",
6027					atomic_read(&tmp_adev->gpu_reset_counter));
6028			amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
6029		} else {
6030			dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
6031			if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
6032				DRM_WARN("smart shift update failed\n");
6033		}
6034	}
6035
6036skip_sched_resume:
6037	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
6038		/* unlock kfd: SRIOV would do it separately */
6039		if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
6040			amdgpu_amdkfd_post_reset(tmp_adev);
6041
6042		/* kfd_post_reset will do nothing if kfd device is not initialized,
6043		 * need to bring up kfd here if it's not be initialized before
6044		 */
6045		if (!adev->kfd.init_complete)
6046			amdgpu_amdkfd_device_init(adev);
6047
6048		if (audio_suspended)
6049			amdgpu_device_resume_display_audio(tmp_adev);
6050
6051		amdgpu_device_unset_mp1_state(tmp_adev);
6052
6053		amdgpu_ras_set_error_query_ready(tmp_adev, true);
6054	}
6055
6056	tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
6057					    reset_list);
6058	amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
6059
6060end_reset:
6061	if (hive) {
 
6062		mutex_unlock(&hive->hive_lock);
6063		amdgpu_put_xgmi_hive(hive);
6064	}
6065
6066	if (r)
6067		dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
6068
6069	atomic_set(&adev->reset_domain->reset_res, r);
6070	return r;
6071}
6072
6073/**
6074 * amdgpu_device_partner_bandwidth - find the bandwidth of appropriate partner
6075 *
6076 * @adev: amdgpu_device pointer
6077 * @speed: pointer to the speed of the link
6078 * @width: pointer to the width of the link
6079 *
6080 * Evaluate the hierarchy to find the speed and bandwidth capabilities of the
6081 * first physical partner to an AMD dGPU.
6082 * This will exclude any virtual switches and links.
6083 */
6084static void amdgpu_device_partner_bandwidth(struct amdgpu_device *adev,
6085					    enum pci_bus_speed *speed,
6086					    enum pcie_link_width *width)
6087{
6088	struct pci_dev *parent = adev->pdev;
6089
6090	if (!speed || !width)
6091		return;
6092
6093	*speed = PCI_SPEED_UNKNOWN;
6094	*width = PCIE_LNK_WIDTH_UNKNOWN;
6095
6096	if (amdgpu_device_pcie_dynamic_switching_supported(adev)) {
6097		while ((parent = pci_upstream_bridge(parent))) {
6098			/* skip upstream/downstream switches internal to dGPU*/
6099			if (parent->vendor == PCI_VENDOR_ID_ATI)
6100				continue;
6101			*speed = pcie_get_speed_cap(parent);
6102			*width = pcie_get_width_cap(parent);
6103			break;
6104		}
6105	} else {
6106		/* use the current speeds rather than max if switching is not supported */
6107		pcie_bandwidth_available(adev->pdev, NULL, speed, width);
6108	}
6109}
6110
6111/**
6112 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
6113 *
6114 * @adev: amdgpu_device pointer
6115 *
6116 * Fetchs and stores in the driver the PCIE capabilities (gen speed
6117 * and lanes) of the slot the device is in. Handles APUs and
6118 * virtualized environments where PCIE config space may not be available.
6119 */
6120static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
6121{
6122	struct pci_dev *pdev;
6123	enum pci_bus_speed speed_cap, platform_speed_cap;
6124	enum pcie_link_width platform_link_width;
6125
6126	if (amdgpu_pcie_gen_cap)
6127		adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
6128
6129	if (amdgpu_pcie_lane_cap)
6130		adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
6131
6132	/* covers APUs as well */
6133	if (pci_is_root_bus(adev->pdev->bus) && !amdgpu_passthrough(adev)) {
6134		if (adev->pm.pcie_gen_mask == 0)
6135			adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
6136		if (adev->pm.pcie_mlw_mask == 0)
6137			adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
6138		return;
6139	}
6140
6141	if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
6142		return;
6143
6144	amdgpu_device_partner_bandwidth(adev, &platform_speed_cap,
6145					&platform_link_width);
6146
6147	if (adev->pm.pcie_gen_mask == 0) {
6148		/* asic caps */
6149		pdev = adev->pdev;
6150		speed_cap = pcie_get_speed_cap(pdev);
6151		if (speed_cap == PCI_SPEED_UNKNOWN) {
6152			adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6153						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
6154						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
6155		} else {
6156			if (speed_cap == PCIE_SPEED_32_0GT)
6157				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6158							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
6159							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
6160							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
6161							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
6162			else if (speed_cap == PCIE_SPEED_16_0GT)
6163				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6164							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
6165							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
6166							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
6167			else if (speed_cap == PCIE_SPEED_8_0GT)
6168				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6169							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
6170							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
6171			else if (speed_cap == PCIE_SPEED_5_0GT)
6172				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6173							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
6174			else
6175				adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
6176		}
6177		/* platform caps */
6178		if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
6179			adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6180						   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
6181		} else {
6182			if (platform_speed_cap == PCIE_SPEED_32_0GT)
6183				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6184							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
6185							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
6186							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
6187							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
6188			else if (platform_speed_cap == PCIE_SPEED_16_0GT)
6189				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6190							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
6191							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
6192							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
6193			else if (platform_speed_cap == PCIE_SPEED_8_0GT)
6194				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6195							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
6196							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
6197			else if (platform_speed_cap == PCIE_SPEED_5_0GT)
6198				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6199							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
6200			else
6201				adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
6202
6203		}
6204	}
6205	if (adev->pm.pcie_mlw_mask == 0) {
6206		if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
6207			adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
6208		} else {
6209			switch (platform_link_width) {
6210			case PCIE_LNK_X32:
6211				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
6212							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
6213							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
6214							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
6215							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
6216							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
6217							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
6218				break;
6219			case PCIE_LNK_X16:
6220				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
6221							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
6222							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
6223							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
6224							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
6225							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
6226				break;
6227			case PCIE_LNK_X12:
6228				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
6229							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
6230							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
6231							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
6232							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
6233				break;
6234			case PCIE_LNK_X8:
6235				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
6236							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
6237							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
6238							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
6239				break;
6240			case PCIE_LNK_X4:
6241				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
6242							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
6243							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
6244				break;
6245			case PCIE_LNK_X2:
6246				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
6247							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
6248				break;
6249			case PCIE_LNK_X1:
6250				adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
6251				break;
6252			default:
6253				break;
6254			}
6255		}
6256	}
6257}
6258
6259/**
6260 * amdgpu_device_is_peer_accessible - Check peer access through PCIe BAR
6261 *
6262 * @adev: amdgpu_device pointer
6263 * @peer_adev: amdgpu_device pointer for peer device trying to access @adev
6264 *
6265 * Return true if @peer_adev can access (DMA) @adev through the PCIe
6266 * BAR, i.e. @adev is "large BAR" and the BAR matches the DMA mask of
6267 * @peer_adev.
6268 */
6269bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
6270				      struct amdgpu_device *peer_adev)
6271{
6272#ifdef CONFIG_HSA_AMD_P2P
6273	bool p2p_access =
6274		!adev->gmc.xgmi.connected_to_cpu &&
6275		!(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0);
6276	if (!p2p_access)
6277		dev_info(adev->dev, "PCIe P2P access from peer device %s is not supported by the chipset\n",
6278			pci_name(peer_adev->pdev));
6279
6280	bool is_large_bar = adev->gmc.visible_vram_size &&
6281		adev->gmc.real_vram_size == adev->gmc.visible_vram_size;
6282	bool p2p_addressable = amdgpu_device_check_iommu_remap(peer_adev);
6283
6284	if (!p2p_addressable) {
6285		uint64_t address_mask = peer_adev->dev->dma_mask ?
6286			~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
6287		resource_size_t aper_limit =
6288			adev->gmc.aper_base + adev->gmc.aper_size - 1;
6289
6290		p2p_addressable = !(adev->gmc.aper_base & address_mask ||
6291				     aper_limit & address_mask);
6292	}
6293	return pcie_p2p && is_large_bar && p2p_access && p2p_addressable;
6294#else
6295	return false;
6296#endif
6297}
6298
6299int amdgpu_device_baco_enter(struct drm_device *dev)
6300{
6301	struct amdgpu_device *adev = drm_to_adev(dev);
6302	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
6303
6304	if (!amdgpu_device_supports_baco(dev))
6305		return -ENOTSUPP;
6306
6307	if (ras && adev->ras_enabled &&
6308	    adev->nbio.funcs->enable_doorbell_interrupt)
6309		adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
6310
6311	return amdgpu_dpm_baco_enter(adev);
6312}
6313
6314int amdgpu_device_baco_exit(struct drm_device *dev)
6315{
6316	struct amdgpu_device *adev = drm_to_adev(dev);
6317	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
6318	int ret = 0;
6319
6320	if (!amdgpu_device_supports_baco(dev))
6321		return -ENOTSUPP;
6322
6323	ret = amdgpu_dpm_baco_exit(adev);
6324	if (ret)
6325		return ret;
6326
6327	if (ras && adev->ras_enabled &&
6328	    adev->nbio.funcs->enable_doorbell_interrupt)
6329		adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
6330
6331	if (amdgpu_passthrough(adev) && adev->nbio.funcs &&
6332	    adev->nbio.funcs->clear_doorbell_interrupt)
6333		adev->nbio.funcs->clear_doorbell_interrupt(adev);
6334
6335	return 0;
6336}
6337
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6338/**
6339 * amdgpu_pci_error_detected - Called when a PCI error is detected.
6340 * @pdev: PCI device struct
6341 * @state: PCI channel state
6342 *
6343 * Description: Called when a PCI error is detected.
6344 *
6345 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
6346 */
6347pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
6348{
6349	struct drm_device *dev = pci_get_drvdata(pdev);
6350	struct amdgpu_device *adev = drm_to_adev(dev);
6351	int i;
6352
6353	DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
6354
6355	if (adev->gmc.xgmi.num_physical_nodes > 1) {
6356		DRM_WARN("No support for XGMI hive yet...");
6357		return PCI_ERS_RESULT_DISCONNECT;
6358	}
6359
6360	adev->pci_channel_state = state;
6361
6362	switch (state) {
6363	case pci_channel_io_normal:
6364		return PCI_ERS_RESULT_CAN_RECOVER;
6365	/* Fatal error, prepare for slot reset */
6366	case pci_channel_io_frozen:
6367		/*
6368		 * Locking adev->reset_domain->sem will prevent any external access
 
 
 
6369		 * to GPU during PCI error recovery
6370		 */
6371		amdgpu_device_lock_reset_domain(adev->reset_domain);
6372		amdgpu_device_set_mp1_state(adev);
6373
6374		/*
6375		 * Block any work scheduling as we do for regular GPU reset
6376		 * for the duration of the recovery
6377		 */
6378		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
6379			struct amdgpu_ring *ring = adev->rings[i];
6380
6381			if (!amdgpu_ring_sched_ready(ring))
6382				continue;
6383
6384			drm_sched_stop(&ring->sched, NULL);
6385		}
6386		atomic_inc(&adev->gpu_reset_counter);
6387		return PCI_ERS_RESULT_NEED_RESET;
6388	case pci_channel_io_perm_failure:
6389		/* Permanent error, prepare for device removal */
6390		return PCI_ERS_RESULT_DISCONNECT;
6391	}
6392
6393	return PCI_ERS_RESULT_NEED_RESET;
6394}
6395
6396/**
6397 * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
6398 * @pdev: pointer to PCI device
6399 */
6400pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
6401{
6402
6403	DRM_INFO("PCI error: mmio enabled callback!!\n");
6404
6405	/* TODO - dump whatever for debugging purposes */
6406
6407	/* This called only if amdgpu_pci_error_detected returns
6408	 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
6409	 * works, no need to reset slot.
6410	 */
6411
6412	return PCI_ERS_RESULT_RECOVERED;
6413}
6414
6415/**
6416 * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
6417 * @pdev: PCI device struct
6418 *
6419 * Description: This routine is called by the pci error recovery
6420 * code after the PCI slot has been reset, just before we
6421 * should resume normal operations.
6422 */
6423pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
6424{
6425	struct drm_device *dev = pci_get_drvdata(pdev);
6426	struct amdgpu_device *adev = drm_to_adev(dev);
6427	int r, i;
6428	struct amdgpu_reset_context reset_context;
6429	u32 memsize;
6430	struct list_head device_list;
6431
6432	/* PCI error slot reset should be skipped During RAS recovery */
6433	if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
6434	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) &&
6435	    amdgpu_ras_in_recovery(adev))
6436		return PCI_ERS_RESULT_RECOVERED;
6437
6438	DRM_INFO("PCI error: slot reset callback!!\n");
6439
6440	memset(&reset_context, 0, sizeof(reset_context));
6441
6442	INIT_LIST_HEAD(&device_list);
6443	list_add_tail(&adev->reset_list, &device_list);
6444
6445	/* wait for asic to come out of reset */
6446	msleep(500);
6447
6448	/* Restore PCI confspace */
6449	amdgpu_device_load_pci_state(pdev);
6450
6451	/* confirm  ASIC came out of reset */
6452	for (i = 0; i < adev->usec_timeout; i++) {
6453		memsize = amdgpu_asic_get_config_memsize(adev);
6454
6455		if (memsize != 0xffffffff)
6456			break;
6457		udelay(1);
6458	}
6459	if (memsize == 0xffffffff) {
6460		r = -ETIME;
6461		goto out;
6462	}
6463
6464	reset_context.method = AMD_RESET_METHOD_NONE;
6465	reset_context.reset_req_dev = adev;
6466	set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
6467	set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
6468
6469	adev->no_hw_access = true;
6470	r = amdgpu_device_pre_asic_reset(adev, &reset_context);
6471	adev->no_hw_access = false;
6472	if (r)
6473		goto out;
6474
6475	r = amdgpu_do_asic_reset(&device_list, &reset_context);
6476
6477out:
6478	if (!r) {
6479		if (amdgpu_device_cache_pci_state(adev->pdev))
6480			pci_restore_state(adev->pdev);
6481
6482		DRM_INFO("PCIe error recovery succeeded\n");
6483	} else {
6484		DRM_ERROR("PCIe error recovery failed, err:%d", r);
6485		amdgpu_device_unset_mp1_state(adev);
6486		amdgpu_device_unlock_reset_domain(adev->reset_domain);
6487	}
6488
6489	return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
6490}
6491
6492/**
6493 * amdgpu_pci_resume() - resume normal ops after PCI reset
6494 * @pdev: pointer to PCI device
6495 *
6496 * Called when the error recovery driver tells us that its
6497 * OK to resume normal operation.
6498 */
6499void amdgpu_pci_resume(struct pci_dev *pdev)
6500{
6501	struct drm_device *dev = pci_get_drvdata(pdev);
6502	struct amdgpu_device *adev = drm_to_adev(dev);
6503	int i;
6504
6505
6506	DRM_INFO("PCI error: resume callback!!\n");
6507
6508	/* Only continue execution for the case of pci_channel_io_frozen */
6509	if (adev->pci_channel_state != pci_channel_io_frozen)
6510		return;
6511
6512	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
6513		struct amdgpu_ring *ring = adev->rings[i];
6514
6515		if (!amdgpu_ring_sched_ready(ring))
6516			continue;
6517
6518		drm_sched_start(&ring->sched, 0);
 
 
6519	}
6520
6521	amdgpu_device_unset_mp1_state(adev);
6522	amdgpu_device_unlock_reset_domain(adev->reset_domain);
6523}
6524
6525bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
6526{
6527	struct drm_device *dev = pci_get_drvdata(pdev);
6528	struct amdgpu_device *adev = drm_to_adev(dev);
6529	int r;
6530
6531	if (amdgpu_sriov_vf(adev))
6532		return false;
6533
6534	r = pci_save_state(pdev);
6535	if (!r) {
6536		kfree(adev->pci_state);
6537
6538		adev->pci_state = pci_store_saved_state(pdev);
6539
6540		if (!adev->pci_state) {
6541			DRM_ERROR("Failed to store PCI saved state");
6542			return false;
6543		}
6544	} else {
6545		DRM_WARN("Failed to save PCI state, err:%d\n", r);
6546		return false;
6547	}
6548
6549	return true;
6550}
6551
6552bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
6553{
6554	struct drm_device *dev = pci_get_drvdata(pdev);
6555	struct amdgpu_device *adev = drm_to_adev(dev);
6556	int r;
6557
6558	if (!adev->pci_state)
6559		return false;
6560
6561	r = pci_load_saved_state(pdev, adev->pci_state);
6562
6563	if (!r) {
6564		pci_restore_state(pdev);
6565	} else {
6566		DRM_WARN("Failed to load PCI state, err:%d\n", r);
6567		return false;
6568	}
6569
6570	return true;
6571}
6572
6573void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
6574		struct amdgpu_ring *ring)
6575{
6576#ifdef CONFIG_X86_64
6577	if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
6578		return;
6579#endif
6580	if (adev->gmc.xgmi.connected_to_cpu)
6581		return;
6582
6583	if (ring && ring->funcs->emit_hdp_flush)
6584		amdgpu_ring_emit_hdp_flush(ring);
6585	else
6586		amdgpu_asic_flush_hdp(adev, ring);
6587}
6588
6589void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
6590		struct amdgpu_ring *ring)
6591{
6592#ifdef CONFIG_X86_64
6593	if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
6594		return;
6595#endif
6596	if (adev->gmc.xgmi.connected_to_cpu)
6597		return;
6598
6599	amdgpu_asic_invalidate_hdp(adev, ring);
6600}
6601
6602int amdgpu_in_reset(struct amdgpu_device *adev)
6603{
6604	return atomic_read(&adev->reset_domain->in_gpu_reset);
6605}
6606
6607/**
6608 * amdgpu_device_halt() - bring hardware to some kind of halt state
6609 *
6610 * @adev: amdgpu_device pointer
6611 *
6612 * Bring hardware to some kind of halt state so that no one can touch it
6613 * any more. It will help to maintain error context when error occurred.
6614 * Compare to a simple hang, the system will keep stable at least for SSH
6615 * access. Then it should be trivial to inspect the hardware state and
6616 * see what's going on. Implemented as following:
6617 *
6618 * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc),
6619 *    clears all CPU mappings to device, disallows remappings through page faults
6620 * 2. amdgpu_irq_disable_all() disables all interrupts
6621 * 3. amdgpu_fence_driver_hw_fini() signals all HW fences
6622 * 4. set adev->no_hw_access to avoid potential crashes after setp 5
6623 * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings
6624 * 6. pci_disable_device() and pci_wait_for_pending_transaction()
6625 *    flush any in flight DMA operations
6626 */
6627void amdgpu_device_halt(struct amdgpu_device *adev)
6628{
6629	struct pci_dev *pdev = adev->pdev;
6630	struct drm_device *ddev = adev_to_drm(adev);
6631
6632	amdgpu_xcp_dev_unplug(adev);
6633	drm_dev_unplug(ddev);
6634
6635	amdgpu_irq_disable_all(adev);
6636
6637	amdgpu_fence_driver_hw_fini(adev);
6638
6639	adev->no_hw_access = true;
6640
6641	amdgpu_device_unmap_mmio(adev);
6642
6643	pci_disable_device(pdev);
6644	pci_wait_for_pending_transaction(pdev);
6645}
6646
6647u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
6648				u32 reg)
6649{
6650	unsigned long flags, address, data;
6651	u32 r;
6652
6653	address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
6654	data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
6655
6656	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
6657	WREG32(address, reg * 4);
6658	(void)RREG32(address);
6659	r = RREG32(data);
6660	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
6661	return r;
6662}
6663
6664void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
6665				u32 reg, u32 v)
6666{
6667	unsigned long flags, address, data;
6668
6669	address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
6670	data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
6671
6672	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
6673	WREG32(address, reg * 4);
6674	(void)RREG32(address);
6675	WREG32(data, v);
6676	(void)RREG32(data);
6677	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
6678}
6679
6680/**
6681 * amdgpu_device_get_gang - return a reference to the current gang
6682 * @adev: amdgpu_device pointer
6683 *
6684 * Returns: A new reference to the current gang leader.
6685 */
6686struct dma_fence *amdgpu_device_get_gang(struct amdgpu_device *adev)
6687{
6688	struct dma_fence *fence;
6689
6690	rcu_read_lock();
6691	fence = dma_fence_get_rcu_safe(&adev->gang_submit);
6692	rcu_read_unlock();
6693	return fence;
6694}
6695
6696/**
6697 * amdgpu_device_switch_gang - switch to a new gang
6698 * @adev: amdgpu_device pointer
6699 * @gang: the gang to switch to
6700 *
6701 * Try to switch to a new gang.
6702 * Returns: NULL if we switched to the new gang or a reference to the current
6703 * gang leader.
6704 */
6705struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
6706					    struct dma_fence *gang)
6707{
6708	struct dma_fence *old = NULL;
6709
6710	do {
6711		dma_fence_put(old);
6712		old = amdgpu_device_get_gang(adev);
6713		if (old == gang)
6714			break;
6715
6716		if (!dma_fence_is_signaled(old))
6717			return old;
6718
6719	} while (cmpxchg((struct dma_fence __force **)&adev->gang_submit,
6720			 old, gang) != old);
6721
6722	dma_fence_put(old);
6723	return NULL;
6724}
6725
6726bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev)
6727{
6728	switch (adev->asic_type) {
6729#ifdef CONFIG_DRM_AMDGPU_SI
6730	case CHIP_HAINAN:
6731#endif
6732	case CHIP_TOPAZ:
6733		/* chips with no display hardware */
6734		return false;
6735#ifdef CONFIG_DRM_AMDGPU_SI
6736	case CHIP_TAHITI:
6737	case CHIP_PITCAIRN:
6738	case CHIP_VERDE:
6739	case CHIP_OLAND:
6740#endif
6741#ifdef CONFIG_DRM_AMDGPU_CIK
6742	case CHIP_BONAIRE:
6743	case CHIP_HAWAII:
6744	case CHIP_KAVERI:
6745	case CHIP_KABINI:
6746	case CHIP_MULLINS:
6747#endif
6748	case CHIP_TONGA:
6749	case CHIP_FIJI:
6750	case CHIP_POLARIS10:
6751	case CHIP_POLARIS11:
6752	case CHIP_POLARIS12:
6753	case CHIP_VEGAM:
6754	case CHIP_CARRIZO:
6755	case CHIP_STONEY:
6756		/* chips with display hardware */
6757		return true;
6758	default:
6759		/* IP discovery */
6760		if (!amdgpu_ip_version(adev, DCE_HWIP, 0) ||
6761		    (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
6762			return false;
6763		return true;
6764	}
6765}
6766
6767uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev,
6768		uint32_t inst, uint32_t reg_addr, char reg_name[],
6769		uint32_t expected_value, uint32_t mask)
6770{
6771	uint32_t ret = 0;
6772	uint32_t old_ = 0;
6773	uint32_t tmp_ = RREG32(reg_addr);
6774	uint32_t loop = adev->usec_timeout;
6775
6776	while ((tmp_ & (mask)) != (expected_value)) {
6777		if (old_ != tmp_) {
6778			loop = adev->usec_timeout;
6779			old_ = tmp_;
6780		} else
6781			udelay(1);
6782		tmp_ = RREG32(reg_addr);
6783		loop--;
6784		if (!loop) {
6785			DRM_WARN("Register(%d) [%s] failed to reach value 0x%08x != 0x%08xn",
6786				  inst, reg_name, (uint32_t)expected_value,
6787				  (uint32_t)(tmp_ & (mask)));
6788			ret = -ETIMEDOUT;
6789			break;
6790		}
6791	}
6792	return ret;
6793}
6794
6795ssize_t amdgpu_get_soft_full_reset_mask(struct amdgpu_ring *ring)
6796{
6797	ssize_t size = 0;
6798
6799	if (!ring || !ring->adev)
6800		return size;
6801
6802	if (amdgpu_device_should_recover_gpu(ring->adev))
6803		size |= AMDGPU_RESET_TYPE_FULL;
6804
6805	if (unlikely(!ring->adev->debug_disable_soft_recovery) &&
6806	    !amdgpu_sriov_vf(ring->adev) && ring->funcs->soft_recovery)
6807		size |= AMDGPU_RESET_TYPE_SOFT_RESET;
6808
6809	return size;
6810}
6811
6812ssize_t amdgpu_show_reset_mask(char *buf, uint32_t supported_reset)
6813{
6814	ssize_t size = 0;
6815
6816	if (supported_reset == 0) {
6817		size += sysfs_emit_at(buf, size, "unsupported");
6818		size += sysfs_emit_at(buf, size, "\n");
6819		return size;
6820
6821	}
6822
6823	if (supported_reset & AMDGPU_RESET_TYPE_SOFT_RESET)
6824		size += sysfs_emit_at(buf, size, "soft ");
6825
6826	if (supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE)
6827		size += sysfs_emit_at(buf, size, "queue ");
6828
6829	if (supported_reset & AMDGPU_RESET_TYPE_PER_PIPE)
6830		size += sysfs_emit_at(buf, size, "pipe ");
6831
6832	if (supported_reset & AMDGPU_RESET_TYPE_FULL)
6833		size += sysfs_emit_at(buf, size, "full ");
6834
6835	size += sysfs_emit_at(buf, size, "\n");
6836	return size;
6837}
v5.14.15
   1/*
   2 * Copyright 2008 Advanced Micro Devices, Inc.
   3 * Copyright 2008 Red Hat Inc.
   4 * Copyright 2009 Jerome Glisse.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the "Software"),
   8 * to deal in the Software without restriction, including without limitation
   9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10 * and/or sell copies of the Software, and to permit persons to whom the
  11 * Software is furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22 * OTHER DEALINGS IN THE SOFTWARE.
  23 *
  24 * Authors: Dave Airlie
  25 *          Alex Deucher
  26 *          Jerome Glisse
  27 */
 
 
  28#include <linux/power_supply.h>
  29#include <linux/kthread.h>
  30#include <linux/module.h>
  31#include <linux/console.h>
  32#include <linux/slab.h>
 
 
 
 
  33
  34#include <drm/drm_atomic_helper.h>
 
 
  35#include <drm/drm_probe_helper.h>
  36#include <drm/amdgpu_drm.h>
 
  37#include <linux/vgaarb.h>
  38#include <linux/vga_switcheroo.h>
  39#include <linux/efi.h>
  40#include "amdgpu.h"
  41#include "amdgpu_trace.h"
  42#include "amdgpu_i2c.h"
  43#include "atom.h"
  44#include "amdgpu_atombios.h"
  45#include "amdgpu_atomfirmware.h"
  46#include "amd_pcie.h"
  47#ifdef CONFIG_DRM_AMDGPU_SI
  48#include "si.h"
  49#endif
  50#ifdef CONFIG_DRM_AMDGPU_CIK
  51#include "cik.h"
  52#endif
  53#include "vi.h"
  54#include "soc15.h"
  55#include "nv.h"
  56#include "bif/bif_4_1_d.h"
  57#include <linux/pci.h>
  58#include <linux/firmware.h>
  59#include "amdgpu_vf_error.h"
  60
  61#include "amdgpu_amdkfd.h"
  62#include "amdgpu_pm.h"
  63
  64#include "amdgpu_xgmi.h"
  65#include "amdgpu_ras.h"
  66#include "amdgpu_pmu.h"
  67#include "amdgpu_fru_eeprom.h"
  68#include "amdgpu_reset.h"
 
 
  69
  70#include <linux/suspend.h>
  71#include <drm/task_barrier.h>
  72#include <linux/pm_runtime.h>
  73
  74#include <drm/drm_drv.h>
  75
 
 
 
 
  76MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
  77MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
  78MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
  79MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
  80MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
  81MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
  82MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin");
  83MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
  84MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
  85MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
  86MODULE_FIRMWARE("amdgpu/vangogh_gpu_info.bin");
  87MODULE_FIRMWARE("amdgpu/yellow_carp_gpu_info.bin");
  88
  89#define AMDGPU_RESUME_MS		2000
 
 
 
 
 
 
 
  90
  91const char *amdgpu_asic_name[] = {
  92	"TAHITI",
  93	"PITCAIRN",
  94	"VERDE",
  95	"OLAND",
  96	"HAINAN",
  97	"BONAIRE",
  98	"KAVERI",
  99	"KABINI",
 100	"HAWAII",
 101	"MULLINS",
 102	"TOPAZ",
 103	"TONGA",
 104	"FIJI",
 105	"CARRIZO",
 106	"STONEY",
 107	"POLARIS10",
 108	"POLARIS11",
 109	"POLARIS12",
 110	"VEGAM",
 111	"VEGA10",
 112	"VEGA12",
 113	"VEGA20",
 114	"RAVEN",
 115	"ARCTURUS",
 116	"RENOIR",
 117	"ALDEBARAN",
 118	"NAVI10",
 
 119	"NAVI14",
 120	"NAVI12",
 121	"SIENNA_CICHLID",
 122	"NAVY_FLOUNDER",
 123	"VANGOGH",
 124	"DIMGREY_CAVEFISH",
 125	"BEIGE_GOBY",
 126	"YELLOW_CARP",
 
 127	"LAST",
 128};
 129
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 130/**
 131 * DOC: pcie_replay_count
 132 *
 133 * The amdgpu driver provides a sysfs API for reporting the total number
 134 * of PCIe replays (NAKs)
 135 * The file pcie_replay_count is used for this and returns the total
 136 * number of replays as a sum of the NAKs generated and NAKs received
 137 */
 138
 139static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
 140		struct device_attribute *attr, char *buf)
 141{
 142	struct drm_device *ddev = dev_get_drvdata(dev);
 143	struct amdgpu_device *adev = drm_to_adev(ddev);
 144	uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
 145
 146	return sysfs_emit(buf, "%llu\n", cnt);
 147}
 148
 149static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
 150		amdgpu_device_get_pcie_replay_count, NULL);
 151
 152static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 153
 154/**
 155 * DOC: product_name
 156 *
 157 * The amdgpu driver provides a sysfs API for reporting the product name
 158 * for the device
 159 * The file serial_number is used for this and returns the product name
 160 * as returned from the FRU.
 161 * NOTE: This is only available for certain server cards
 162 */
 163
 164static ssize_t amdgpu_device_get_product_name(struct device *dev,
 165		struct device_attribute *attr, char *buf)
 166{
 167	struct drm_device *ddev = dev_get_drvdata(dev);
 168	struct amdgpu_device *adev = drm_to_adev(ddev);
 
 
 
 
 
 
 
 
 
 169
 170	return sysfs_emit(buf, "%s\n", adev->product_name);
 
 171}
 172
 173static DEVICE_ATTR(product_name, S_IRUGO,
 174		amdgpu_device_get_product_name, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 175
 176/**
 177 * DOC: product_number
 
 
 
 
 
 
 
 
 
 
 
 178 *
 179 * The amdgpu driver provides a sysfs API for reporting the part number
 180 * for the device
 181 * The file serial_number is used for this and returns the part number
 182 * as returned from the FRU.
 183 * NOTE: This is only available for certain server cards
 184 */
 185
 186static ssize_t amdgpu_device_get_product_number(struct device *dev,
 187		struct device_attribute *attr, char *buf)
 
 188{
 189	struct drm_device *ddev = dev_get_drvdata(dev);
 190	struct amdgpu_device *adev = drm_to_adev(ddev);
 
 
 191
 192	return sysfs_emit(buf, "%s\n", adev->product_number);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 193}
 194
 195static DEVICE_ATTR(product_number, S_IRUGO,
 196		amdgpu_device_get_product_number, NULL);
 197
 198/**
 199 * DOC: serial_number
 200 *
 201 * The amdgpu driver provides a sysfs API for reporting the serial number
 202 * for the device
 203 * The file serial_number is used for this and returns the serial number
 204 * as returned from the FRU.
 205 * NOTE: This is only available for certain server cards
 206 */
 207
 208static ssize_t amdgpu_device_get_serial_number(struct device *dev,
 209		struct device_attribute *attr, char *buf)
 210{
 
 211	struct drm_device *ddev = dev_get_drvdata(dev);
 212	struct amdgpu_device *adev = drm_to_adev(ddev);
 213
 214	return sysfs_emit(buf, "%s\n", adev->serial);
 
 
 
 215}
 216
 217static DEVICE_ATTR(serial_number, S_IRUGO,
 218		amdgpu_device_get_serial_number, NULL);
 
 
 
 
 
 219
 220/**
 221 * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
 222 *
 223 * @dev: drm_device pointer
 224 *
 225 * Returns true if the device is a dGPU with ATPX power control,
 226 * otherwise return false.
 227 */
 228bool amdgpu_device_supports_px(struct drm_device *dev)
 229{
 230	struct amdgpu_device *adev = drm_to_adev(dev);
 231
 232	if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
 233		return true;
 234	return false;
 235}
 236
 237/**
 238 * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
 239 *
 240 * @dev: drm_device pointer
 241 *
 242 * Returns true if the device is a dGPU with ACPI power control,
 243 * otherwise return false.
 244 */
 245bool amdgpu_device_supports_boco(struct drm_device *dev)
 246{
 247	struct amdgpu_device *adev = drm_to_adev(dev);
 248
 
 
 
 249	if (adev->has_pr3 ||
 250	    ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
 251		return true;
 252	return false;
 253}
 254
 255/**
 256 * amdgpu_device_supports_baco - Does the device support BACO
 257 *
 258 * @dev: drm_device pointer
 259 *
 260 * Returns true if the device supporte BACO,
 261 * otherwise return false.
 
 
 262 */
 263bool amdgpu_device_supports_baco(struct drm_device *dev)
 264{
 265	struct amdgpu_device *adev = drm_to_adev(dev);
 266
 267	return amdgpu_asic_supports_baco(adev);
 268}
 269
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 270/**
 271 * amdgpu_device_supports_smart_shift - Is the device dGPU with
 272 * smart shift support
 273 *
 274 * @dev: drm_device pointer
 275 *
 276 * Returns true if the device is a dGPU with Smart Shift support,
 277 * otherwise returns false.
 278 */
 279bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
 280{
 281	return (amdgpu_device_supports_boco(dev) &&
 282		amdgpu_acpi_is_power_shift_control_supported());
 283}
 284
 285/*
 286 * VRAM access helper functions
 287 */
 288
 289/**
 290 * amdgpu_device_vram_access - read/write a buffer in vram
 291 *
 292 * @adev: amdgpu_device pointer
 293 * @pos: offset of the buffer in vram
 294 * @buf: virtual address of the buffer in system memory
 295 * @size: read/write size, sizeof(@buf) must > @size
 296 * @write: true - write to vram, otherwise - read from vram
 297 */
 298void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
 299			       uint32_t *buf, size_t size, bool write)
 300{
 301	unsigned long flags;
 302	uint32_t hi = ~0;
 
 303	uint64_t last;
 304	int idx;
 305
 306	if (!drm_dev_enter(&adev->ddev, &idx))
 307		return;
 308
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 309#ifdef CONFIG_64BIT
 
 
 
 
 
 
 
 310	last = min(pos + size, adev->gmc.visible_vram_size);
 311	if (last > pos) {
 312		void __iomem *addr = adev->mman.aper_base_kaddr + pos;
 313		size_t count = last - pos;
 314
 315		if (write) {
 316			memcpy_toio(addr, buf, count);
 
 
 
 317			mb();
 318			amdgpu_device_flush_hdp(adev, NULL);
 319		} else {
 320			amdgpu_device_invalidate_hdp(adev, NULL);
 
 
 
 321			mb();
 322			memcpy_fromio(buf, addr, count);
 323		}
 324
 325		if (count == size)
 326			goto exit;
 327
 328		pos += count;
 329		buf += count / 4;
 330		size -= count;
 331	}
 332#endif
 
 333
 334	spin_lock_irqsave(&adev->mmio_idx_lock, flags);
 335	for (last = pos + size; pos < last; pos += 4) {
 336		uint32_t tmp = pos >> 31;
 
 
 
 
 
 
 
 
 
 
 337
 338		WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
 339		if (tmp != hi) {
 340			WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
 341			hi = tmp;
 342		}
 343		if (write)
 344			WREG32_NO_KIQ(mmMM_DATA, *buf++);
 345		else
 346			*buf++ = RREG32_NO_KIQ(mmMM_DATA);
 347	}
 348	spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
 349
 350#ifdef CONFIG_64BIT
 351exit:
 352#endif
 353	drm_dev_exit(idx);
 354}
 355
 356/*
 357 * register access helper functions.
 358 */
 359
 360/* Check if hw access should be skipped because of hotplug or device error */
 361bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
 362{
 363	if (adev->no_hw_access)
 364		return true;
 365
 366#ifdef CONFIG_LOCKDEP
 367	/*
 368	 * This is a bit complicated to understand, so worth a comment. What we assert
 369	 * here is that the GPU reset is not running on another thread in parallel.
 370	 *
 371	 * For this we trylock the read side of the reset semaphore, if that succeeds
 372	 * we know that the reset is not running in paralell.
 373	 *
 374	 * If the trylock fails we assert that we are either already holding the read
 375	 * side of the lock or are the reset thread itself and hold the write side of
 376	 * the lock.
 377	 */
 378	if (in_task()) {
 379		if (down_read_trylock(&adev->reset_sem))
 380			up_read(&adev->reset_sem);
 381		else
 382			lockdep_assert_held(&adev->reset_sem);
 383	}
 384#endif
 385	return false;
 386}
 387
 388/**
 389 * amdgpu_device_rreg - read a memory mapped IO or indirect register
 390 *
 391 * @adev: amdgpu_device pointer
 392 * @reg: dword aligned register offset
 393 * @acc_flags: access flags which require special behavior
 394 *
 395 * Returns the 32 bit value from the offset specified.
 396 */
 397uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
 398			    uint32_t reg, uint32_t acc_flags)
 399{
 400	uint32_t ret;
 401
 402	if (amdgpu_device_skip_hw_access(adev))
 403		return 0;
 404
 405	if ((reg * 4) < adev->rmmio_size) {
 406		if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
 407		    amdgpu_sriov_runtime(adev) &&
 408		    down_read_trylock(&adev->reset_sem)) {
 409			ret = amdgpu_kiq_rreg(adev, reg);
 410			up_read(&adev->reset_sem);
 411		} else {
 412			ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
 413		}
 414	} else {
 415		ret = adev->pcie_rreg(adev, reg * 4);
 416	}
 417
 418	trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
 419
 420	return ret;
 421}
 422
 423/*
 424 * MMIO register read with bytes helper functions
 425 * @offset:bytes offset from MMIO start
 426 *
 427*/
 428
 429/**
 430 * amdgpu_mm_rreg8 - read a memory mapped IO register
 431 *
 432 * @adev: amdgpu_device pointer
 433 * @offset: byte aligned register offset
 434 *
 435 * Returns the 8 bit value from the offset specified.
 436 */
 437uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
 438{
 439	if (amdgpu_device_skip_hw_access(adev))
 440		return 0;
 441
 442	if (offset < adev->rmmio_size)
 443		return (readb(adev->rmmio + offset));
 444	BUG();
 445}
 446
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 447/*
 448 * MMIO register write with bytes helper functions
 449 * @offset:bytes offset from MMIO start
 450 * @value: the value want to be written to the register
 451 *
 452*/
 453/**
 454 * amdgpu_mm_wreg8 - read a memory mapped IO register
 455 *
 456 * @adev: amdgpu_device pointer
 457 * @offset: byte aligned register offset
 458 * @value: 8 bit value to write
 459 *
 460 * Writes the value specified to the offset specified.
 461 */
 462void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
 463{
 464	if (amdgpu_device_skip_hw_access(adev))
 465		return;
 466
 467	if (offset < adev->rmmio_size)
 468		writeb(value, adev->rmmio + offset);
 469	else
 470		BUG();
 471}
 472
 473/**
 474 * amdgpu_device_wreg - write to a memory mapped IO or indirect register
 475 *
 476 * @adev: amdgpu_device pointer
 477 * @reg: dword aligned register offset
 478 * @v: 32 bit value to write to the register
 479 * @acc_flags: access flags which require special behavior
 480 *
 481 * Writes the value specified to the offset specified.
 482 */
 483void amdgpu_device_wreg(struct amdgpu_device *adev,
 484			uint32_t reg, uint32_t v,
 485			uint32_t acc_flags)
 486{
 487	if (amdgpu_device_skip_hw_access(adev))
 488		return;
 489
 490	if ((reg * 4) < adev->rmmio_size) {
 491		if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
 492		    amdgpu_sriov_runtime(adev) &&
 493		    down_read_trylock(&adev->reset_sem)) {
 494			amdgpu_kiq_wreg(adev, reg, v);
 495			up_read(&adev->reset_sem);
 496		} else {
 497			writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
 498		}
 499	} else {
 500		adev->pcie_wreg(adev, reg * 4, v);
 501	}
 502
 503	trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
 504}
 505
 506/*
 507 * amdgpu_mm_wreg_mmio_rlc -  write register either with mmio or with RLC path if in range
 
 
 
 
 
 508 *
 509 * this function is invoked only the debugfs register access
 510 * */
 511void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
 512			     uint32_t reg, uint32_t v)
 
 513{
 514	if (amdgpu_device_skip_hw_access(adev))
 515		return;
 516
 517	if (amdgpu_sriov_fullaccess(adev) &&
 518	    adev->gfx.rlc.funcs &&
 519	    adev->gfx.rlc.funcs->is_rlcg_access_range) {
 520		if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
 521			return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v, 0, 0);
 
 
 522	} else {
 523		writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
 524	}
 525}
 526
 527/**
 528 * amdgpu_mm_rdoorbell - read a doorbell dword
 529 *
 530 * @adev: amdgpu_device pointer
 531 * @index: doorbell index
 
 
 
 532 *
 533 * Returns the value in the doorbell aperture at the
 534 * requested doorbell index (CIK).
 535 */
 536u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
 
 
 537{
 
 
 538	if (amdgpu_device_skip_hw_access(adev))
 539		return 0;
 540
 541	if (index < adev->doorbell.num_doorbells) {
 542		return readl(adev->doorbell.ptr + index);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 543	} else {
 544		DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
 545		return 0;
 546	}
 547}
 548
 549/**
 550 * amdgpu_mm_wdoorbell - write a doorbell dword
 551 *
 552 * @adev: amdgpu_device pointer
 553 * @index: doorbell index
 554 * @v: value to write
 555 *
 556 * Writes @v to the doorbell aperture at the
 557 * requested doorbell index (CIK).
 558 */
 559void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
 
 560{
 561	if (amdgpu_device_skip_hw_access(adev))
 562		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 563
 564	if (index < adev->doorbell.num_doorbells) {
 565		writel(v, adev->doorbell.ptr + index);
 566	} else {
 567		DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
 568	}
 569}
 570
 571/**
 572 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
 573 *
 574 * @adev: amdgpu_device pointer
 575 * @index: doorbell index
 576 *
 577 * Returns the value in the doorbell aperture at the
 578 * requested doorbell index (VEGA10+).
 579 */
 580u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
 581{
 582	if (amdgpu_device_skip_hw_access(adev))
 583		return 0;
 
 
 
 
 
 
 
 
 
 
 
 584
 585	if (index < adev->doorbell.num_doorbells) {
 586		return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
 
 
 
 587	} else {
 588		DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
 589		return 0;
 590	}
 591}
 592
 593/**
 594 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
 595 *
 596 * @adev: amdgpu_device pointer
 597 * @index: doorbell index
 598 * @v: value to write
 599 *
 600 * Writes @v to the doorbell aperture at the
 601 * requested doorbell index (VEGA10+).
 602 */
 603void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
 604{
 605	if (amdgpu_device_skip_hw_access(adev))
 606		return;
 607
 608	if (index < adev->doorbell.num_doorbells) {
 609		atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
 610	} else {
 611		DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
 612	}
 
 
 
 
 613}
 614
 615/**
 616 * amdgpu_device_indirect_rreg - read an indirect register
 617 *
 618 * @adev: amdgpu_device pointer
 619 * @pcie_index: mmio register offset
 620 * @pcie_data: mmio register offset
 621 * @reg_addr: indirect register address to read from
 622 *
 623 * Returns the value of indirect register @reg_addr
 624 */
 625u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
 626				u32 pcie_index, u32 pcie_data,
 627				u32 reg_addr)
 628{
 629	unsigned long flags;
 630	u32 r;
 631	void __iomem *pcie_index_offset;
 632	void __iomem *pcie_data_offset;
 
 
 
 
 633
 634	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 635	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
 636	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
 637
 
 638	writel(reg_addr, pcie_index_offset);
 639	readl(pcie_index_offset);
 640	r = readl(pcie_data_offset);
 
 
 
 
 641	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 642
 643	return r;
 644}
 645
 646/**
 647 * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
 648 *
 649 * @adev: amdgpu_device pointer
 650 * @pcie_index: mmio register offset
 651 * @pcie_data: mmio register offset
 652 * @reg_addr: indirect register address to read from
 653 *
 654 * Returns the value of indirect register @reg_addr
 655 */
 656u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
 657				  u32 pcie_index, u32 pcie_data,
 658				  u32 reg_addr)
 659{
 660	unsigned long flags;
 661	u64 r;
 662	void __iomem *pcie_index_offset;
 
 663	void __iomem *pcie_data_offset;
 
 
 
 
 
 
 664
 665	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 666	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
 667	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
 
 
 
 668
 669	/* read low 32 bits */
 670	writel(reg_addr, pcie_index_offset);
 671	readl(pcie_index_offset);
 
 
 
 
 672	r = readl(pcie_data_offset);
 673	/* read high 32 bits */
 674	writel(reg_addr + 4, pcie_index_offset);
 675	readl(pcie_index_offset);
 
 
 
 
 676	r |= ((u64)readl(pcie_data_offset) << 32);
 
 
 
 
 
 
 
 677	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 678
 679	return r;
 680}
 681
 682/**
 683 * amdgpu_device_indirect_wreg - write an indirect register address
 684 *
 685 * @adev: amdgpu_device pointer
 686 * @pcie_index: mmio register offset
 687 * @pcie_data: mmio register offset
 688 * @reg_addr: indirect register offset
 689 * @reg_data: indirect register data
 690 *
 691 */
 692void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
 693				 u32 pcie_index, u32 pcie_data,
 694				 u32 reg_addr, u32 reg_data)
 695{
 696	unsigned long flags;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 697	void __iomem *pcie_index_offset;
 
 698	void __iomem *pcie_data_offset;
 699
 
 
 
 
 
 
 
 700	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 701	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
 702	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
 
 
 
 703
 704	writel(reg_addr, pcie_index_offset);
 705	readl(pcie_index_offset);
 
 
 
 
 706	writel(reg_data, pcie_data_offset);
 707	readl(pcie_data_offset);
 
 
 
 
 
 
 
 708	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 709}
 710
 711/**
 712 * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
 713 *
 714 * @adev: amdgpu_device pointer
 715 * @pcie_index: mmio register offset
 716 * @pcie_data: mmio register offset
 717 * @reg_addr: indirect register offset
 718 * @reg_data: indirect register data
 719 *
 720 */
 721void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
 722				   u32 pcie_index, u32 pcie_data,
 723				   u32 reg_addr, u64 reg_data)
 724{
 725	unsigned long flags;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 726	void __iomem *pcie_index_offset;
 
 727	void __iomem *pcie_data_offset;
 728
 
 
 
 
 
 729	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 730	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
 731	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
 
 
 
 732
 733	/* write low 32 bits */
 734	writel(reg_addr, pcie_index_offset);
 735	readl(pcie_index_offset);
 
 
 
 
 736	writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
 737	readl(pcie_data_offset);
 738	/* write high 32 bits */
 739	writel(reg_addr + 4, pcie_index_offset);
 740	readl(pcie_index_offset);
 
 
 
 
 741	writel((u32)(reg_data >> 32), pcie_data_offset);
 742	readl(pcie_data_offset);
 
 
 
 
 
 
 
 743	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 744}
 745
 746/**
 
 
 
 
 
 
 
 
 
 
 
 
 747 * amdgpu_invalid_rreg - dummy reg read function
 748 *
 749 * @adev: amdgpu_device pointer
 750 * @reg: offset of register
 751 *
 752 * Dummy register read function.  Used for register blocks
 753 * that certain asics don't have (all asics).
 754 * Returns the value in the register.
 755 */
 756static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
 757{
 758	DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
 759	BUG();
 760	return 0;
 761}
 762
 
 
 
 
 
 
 
 763/**
 764 * amdgpu_invalid_wreg - dummy reg write function
 765 *
 766 * @adev: amdgpu_device pointer
 767 * @reg: offset of register
 768 * @v: value to write to the register
 769 *
 770 * Dummy register read function.  Used for register blocks
 771 * that certain asics don't have (all asics).
 772 */
 773static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
 774{
 775	DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
 776		  reg, v);
 777	BUG();
 778}
 779
 
 
 
 
 
 
 
 780/**
 781 * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
 782 *
 783 * @adev: amdgpu_device pointer
 784 * @reg: offset of register
 785 *
 786 * Dummy register read function.  Used for register blocks
 787 * that certain asics don't have (all asics).
 788 * Returns the value in the register.
 789 */
 790static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
 791{
 792	DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
 793	BUG();
 794	return 0;
 795}
 796
 
 
 
 
 
 
 
 797/**
 798 * amdgpu_invalid_wreg64 - dummy reg write function
 799 *
 800 * @adev: amdgpu_device pointer
 801 * @reg: offset of register
 802 * @v: value to write to the register
 803 *
 804 * Dummy register read function.  Used for register blocks
 805 * that certain asics don't have (all asics).
 806 */
 807static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
 808{
 809	DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
 810		  reg, v);
 811	BUG();
 812}
 813
 
 
 
 
 
 
 
 814/**
 815 * amdgpu_block_invalid_rreg - dummy reg read function
 816 *
 817 * @adev: amdgpu_device pointer
 818 * @block: offset of instance
 819 * @reg: offset of register
 820 *
 821 * Dummy register read function.  Used for register blocks
 822 * that certain asics don't have (all asics).
 823 * Returns the value in the register.
 824 */
 825static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
 826					  uint32_t block, uint32_t reg)
 827{
 828	DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
 829		  reg, block);
 830	BUG();
 831	return 0;
 832}
 833
 834/**
 835 * amdgpu_block_invalid_wreg - dummy reg write function
 836 *
 837 * @adev: amdgpu_device pointer
 838 * @block: offset of instance
 839 * @reg: offset of register
 840 * @v: value to write to the register
 841 *
 842 * Dummy register read function.  Used for register blocks
 843 * that certain asics don't have (all asics).
 844 */
 845static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
 846				      uint32_t block,
 847				      uint32_t reg, uint32_t v)
 848{
 849	DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
 850		  reg, block, v);
 851	BUG();
 852}
 853
 854/**
 855 * amdgpu_device_asic_init - Wrapper for atom asic_init
 856 *
 857 * @adev: amdgpu_device pointer
 858 *
 859 * Does any asic specific work and then calls atom asic init.
 860 */
 861static int amdgpu_device_asic_init(struct amdgpu_device *adev)
 862{
 
 
 863	amdgpu_asic_pre_asic_init(adev);
 864
 865	return amdgpu_atom_asic_init(adev->mode_info.atom_context);
 
 
 
 
 
 
 
 
 
 
 866}
 867
 868/**
 869 * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
 870 *
 871 * @adev: amdgpu_device pointer
 872 *
 873 * Allocates a scratch page of VRAM for use by various things in the
 874 * driver.
 875 */
 876static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
 877{
 878	return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
 879				       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
 880				       &adev->vram_scratch.robj,
 881				       &adev->vram_scratch.gpu_addr,
 882				       (void **)&adev->vram_scratch.ptr);
 
 883}
 884
 885/**
 886 * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
 887 *
 888 * @adev: amdgpu_device pointer
 889 *
 890 * Frees the VRAM scratch page.
 891 */
 892static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
 893{
 894	amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
 895}
 896
 897/**
 898 * amdgpu_device_program_register_sequence - program an array of registers.
 899 *
 900 * @adev: amdgpu_device pointer
 901 * @registers: pointer to the register array
 902 * @array_size: size of the register array
 903 *
 904 * Programs an array or registers with and and or masks.
 905 * This is a helper for setting golden registers.
 906 */
 907void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
 908					     const u32 *registers,
 909					     const u32 array_size)
 910{
 911	u32 tmp, reg, and_mask, or_mask;
 912	int i;
 913
 914	if (array_size % 3)
 915		return;
 916
 917	for (i = 0; i < array_size; i +=3) {
 918		reg = registers[i + 0];
 919		and_mask = registers[i + 1];
 920		or_mask = registers[i + 2];
 921
 922		if (and_mask == 0xffffffff) {
 923			tmp = or_mask;
 924		} else {
 925			tmp = RREG32(reg);
 926			tmp &= ~and_mask;
 927			if (adev->family >= AMDGPU_FAMILY_AI)
 928				tmp |= (or_mask & and_mask);
 929			else
 930				tmp |= or_mask;
 931		}
 932		WREG32(reg, tmp);
 933	}
 934}
 935
 936/**
 937 * amdgpu_device_pci_config_reset - reset the GPU
 938 *
 939 * @adev: amdgpu_device pointer
 940 *
 941 * Resets the GPU using the pci config reset sequence.
 942 * Only applicable to asics prior to vega10.
 943 */
 944void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
 945{
 946	pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
 947}
 948
 949/**
 950 * amdgpu_device_pci_reset - reset the GPU using generic PCI means
 951 *
 952 * @adev: amdgpu_device pointer
 953 *
 954 * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
 955 */
 956int amdgpu_device_pci_reset(struct amdgpu_device *adev)
 957{
 958	return pci_reset_function(adev->pdev);
 959}
 960
 961/*
 962 * GPU doorbell aperture helpers function.
 963 */
 964/**
 965 * amdgpu_device_doorbell_init - Init doorbell driver information.
 966 *
 967 * @adev: amdgpu_device pointer
 968 *
 969 * Init doorbell driver information (CIK)
 970 * Returns 0 on success, error on failure.
 971 */
 972static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
 973{
 974
 975	/* No doorbell on SI hardware generation */
 976	if (adev->asic_type < CHIP_BONAIRE) {
 977		adev->doorbell.base = 0;
 978		adev->doorbell.size = 0;
 979		adev->doorbell.num_doorbells = 0;
 980		adev->doorbell.ptr = NULL;
 981		return 0;
 982	}
 983
 984	if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
 985		return -EINVAL;
 986
 987	amdgpu_asic_init_doorbell_index(adev);
 988
 989	/* doorbell bar mapping */
 990	adev->doorbell.base = pci_resource_start(adev->pdev, 2);
 991	adev->doorbell.size = pci_resource_len(adev->pdev, 2);
 992
 993	adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
 994					     adev->doorbell_index.max_assignment+1);
 995	if (adev->doorbell.num_doorbells == 0)
 996		return -EINVAL;
 997
 998	/* For Vega, reserve and map two pages on doorbell BAR since SDMA
 999	 * paging queue doorbell use the second page. The
1000	 * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
1001	 * doorbells are in the first page. So with paging queue enabled,
1002	 * the max num_doorbells should + 1 page (0x400 in dword)
1003	 */
1004	if (adev->asic_type >= CHIP_VEGA10)
1005		adev->doorbell.num_doorbells += 0x400;
1006
1007	adev->doorbell.ptr = ioremap(adev->doorbell.base,
1008				     adev->doorbell.num_doorbells *
1009				     sizeof(u32));
1010	if (adev->doorbell.ptr == NULL)
1011		return -ENOMEM;
1012
1013	return 0;
1014}
1015
1016/**
1017 * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
1018 *
1019 * @adev: amdgpu_device pointer
1020 *
1021 * Tear down doorbell driver information (CIK)
1022 */
1023static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
1024{
1025	iounmap(adev->doorbell.ptr);
1026	adev->doorbell.ptr = NULL;
1027}
1028
1029
1030
1031/*
1032 * amdgpu_device_wb_*()
1033 * Writeback is the method by which the GPU updates special pages in memory
1034 * with the status of certain GPU events (fences, ring pointers,etc.).
1035 */
1036
1037/**
1038 * amdgpu_device_wb_fini - Disable Writeback and free memory
1039 *
1040 * @adev: amdgpu_device pointer
1041 *
1042 * Disables Writeback and frees the Writeback memory (all asics).
1043 * Used at driver shutdown.
1044 */
1045static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1046{
1047	if (adev->wb.wb_obj) {
1048		amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1049				      &adev->wb.gpu_addr,
1050				      (void **)&adev->wb.wb);
1051		adev->wb.wb_obj = NULL;
1052	}
1053}
1054
1055/**
1056 * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
1057 *
1058 * @adev: amdgpu_device pointer
1059 *
1060 * Initializes writeback and allocates writeback memory (all asics).
1061 * Used at driver startup.
1062 * Returns 0 on success or an -error on failure.
1063 */
1064static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1065{
1066	int r;
1067
1068	if (adev->wb.wb_obj == NULL) {
1069		/* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1070		r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1071					    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1072					    &adev->wb.wb_obj, &adev->wb.gpu_addr,
1073					    (void **)&adev->wb.wb);
1074		if (r) {
1075			dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1076			return r;
1077		}
1078
1079		adev->wb.num_wb = AMDGPU_MAX_WB;
1080		memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1081
1082		/* clear wb memory */
1083		memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1084	}
1085
1086	return 0;
1087}
1088
1089/**
1090 * amdgpu_device_wb_get - Allocate a wb entry
1091 *
1092 * @adev: amdgpu_device pointer
1093 * @wb: wb index
1094 *
1095 * Allocate a wb slot for use by the driver (all asics).
1096 * Returns 0 on success or -EINVAL on failure.
1097 */
1098int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1099{
1100	unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1101
 
 
1102	if (offset < adev->wb.num_wb) {
1103		__set_bit(offset, adev->wb.used);
 
1104		*wb = offset << 3; /* convert to dw offset */
1105		return 0;
1106	} else {
 
1107		return -EINVAL;
1108	}
1109}
1110
1111/**
1112 * amdgpu_device_wb_free - Free a wb entry
1113 *
1114 * @adev: amdgpu_device pointer
1115 * @wb: wb index
1116 *
1117 * Free a wb slot allocated for use by the driver (all asics)
1118 */
1119void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1120{
 
 
1121	wb >>= 3;
 
1122	if (wb < adev->wb.num_wb)
1123		__clear_bit(wb, adev->wb.used);
 
1124}
1125
1126/**
1127 * amdgpu_device_resize_fb_bar - try to resize FB BAR
1128 *
1129 * @adev: amdgpu_device pointer
1130 *
1131 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1132 * to fail, but if any of the BARs is not accessible after the size we abort
1133 * driver loading by returning -ENODEV.
1134 */
1135int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1136{
1137	int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1138	struct pci_bus *root;
1139	struct resource *res;
1140	unsigned i;
1141	u16 cmd;
1142	int r;
1143
 
 
 
1144	/* Bypass for VF */
1145	if (amdgpu_sriov_vf(adev))
1146		return 0;
1147
 
 
 
 
 
 
 
 
 
 
 
1148	/* skip if the bios has already enabled large BAR */
1149	if (adev->gmc.real_vram_size &&
1150	    (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1151		return 0;
1152
1153	/* Check if the root BUS has 64bit memory resources */
1154	root = adev->pdev->bus;
1155	while (root->parent)
1156		root = root->parent;
1157
1158	pci_bus_for_each_resource(root, res, i) {
1159		if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1160		    res->start > 0x100000000ull)
1161			break;
1162	}
1163
1164	/* Trying to resize is pointless without a root hub window above 4GB */
1165	if (!res)
1166		return 0;
1167
1168	/* Limit the BAR size to what is available */
1169	rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1170			rbar_size);
1171
1172	/* Disable memory decoding while we change the BAR addresses and size */
1173	pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1174	pci_write_config_word(adev->pdev, PCI_COMMAND,
1175			      cmd & ~PCI_COMMAND_MEMORY);
1176
1177	/* Free the VRAM and doorbell BAR, we most likely need to move both. */
1178	amdgpu_device_doorbell_fini(adev);
1179	if (adev->asic_type >= CHIP_BONAIRE)
1180		pci_release_resource(adev->pdev, 2);
1181
1182	pci_release_resource(adev->pdev, 0);
1183
1184	r = pci_resize_resource(adev->pdev, 0, rbar_size);
1185	if (r == -ENOSPC)
1186		DRM_INFO("Not enough PCI address space for a large BAR.");
1187	else if (r && r != -ENOTSUPP)
1188		DRM_ERROR("Problem resizing BAR0 (%d).", r);
1189
1190	pci_assign_unassigned_bus_resources(adev->pdev->bus);
1191
1192	/* When the doorbell or fb BAR isn't available we have no chance of
1193	 * using the device.
1194	 */
1195	r = amdgpu_device_doorbell_init(adev);
1196	if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1197		return -ENODEV;
1198
1199	pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1200
1201	return 0;
1202}
1203
 
 
 
 
 
 
 
 
1204/*
1205 * GPU helpers function.
1206 */
1207/**
1208 * amdgpu_device_need_post - check if the hw need post or not
1209 *
1210 * @adev: amdgpu_device pointer
1211 *
1212 * Check if the asic has been initialized (all asics) at driver startup
1213 * or post is needed if  hw reset is performed.
1214 * Returns true if need or false if not.
1215 */
1216bool amdgpu_device_need_post(struct amdgpu_device *adev)
1217{
1218	uint32_t reg;
1219
1220	if (amdgpu_sriov_vf(adev))
1221		return false;
1222
 
 
 
1223	if (amdgpu_passthrough(adev)) {
1224		/* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1225		 * some old smc fw still need driver do vPost otherwise gpu hang, while
1226		 * those smc fw version above 22.15 doesn't have this flaw, so we force
1227		 * vpost executed for smc version below 22.15
1228		 */
1229		if (adev->asic_type == CHIP_FIJI) {
1230			int err;
1231			uint32_t fw_ver;
 
1232			err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1233			/* force vPost if error occured */
1234			if (err)
1235				return true;
1236
1237			fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
 
1238			if (fw_ver < 0x00160e00)
1239				return true;
1240		}
1241	}
1242
1243	/* Don't post if we need to reset whole hive on init */
1244	if (adev->gmc.xgmi.pending_reset)
1245		return false;
1246
1247	if (adev->has_hw_reset) {
1248		adev->has_hw_reset = false;
1249		return true;
1250	}
1251
1252	/* bios scratch used on CIK+ */
1253	if (adev->asic_type >= CHIP_BONAIRE)
1254		return amdgpu_atombios_scratch_need_asic_init(adev);
1255
1256	/* check MEM_SIZE for older asics */
1257	reg = amdgpu_asic_get_config_memsize(adev);
1258
1259	if ((reg != 0) && (reg != 0xffffffff))
1260		return false;
1261
1262	return true;
1263}
1264
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1265/* if we get transitioned to only one device, take VGA back */
1266/**
1267 * amdgpu_device_vga_set_decode - enable/disable vga decode
1268 *
1269 * @cookie: amdgpu_device pointer
1270 * @state: enable/disable vga decode
1271 *
1272 * Enable/disable vga decode (all asics).
1273 * Returns VGA resource flags.
1274 */
1275static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state)
 
1276{
1277	struct amdgpu_device *adev = cookie;
 
1278	amdgpu_asic_set_vga_state(adev, state);
1279	if (state)
1280		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1281		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1282	else
1283		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1284}
1285
1286/**
1287 * amdgpu_device_check_block_size - validate the vm block size
1288 *
1289 * @adev: amdgpu_device pointer
1290 *
1291 * Validates the vm block size specified via module parameter.
1292 * The vm block size defines number of bits in page table versus page directory,
1293 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1294 * page table and the remaining bits are in the page directory.
1295 */
1296static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1297{
1298	/* defines number of bits in page table versus page directory,
1299	 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1300	 * page table and the remaining bits are in the page directory */
 
1301	if (amdgpu_vm_block_size == -1)
1302		return;
1303
1304	if (amdgpu_vm_block_size < 9) {
1305		dev_warn(adev->dev, "VM page table size (%d) too small\n",
1306			 amdgpu_vm_block_size);
1307		amdgpu_vm_block_size = -1;
1308	}
1309}
1310
1311/**
1312 * amdgpu_device_check_vm_size - validate the vm size
1313 *
1314 * @adev: amdgpu_device pointer
1315 *
1316 * Validates the vm size in GB specified via module parameter.
1317 * The VM size is the size of the GPU virtual memory space in GB.
1318 */
1319static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1320{
1321	/* no need to check the default value */
1322	if (amdgpu_vm_size == -1)
1323		return;
1324
1325	if (amdgpu_vm_size < 1) {
1326		dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1327			 amdgpu_vm_size);
1328		amdgpu_vm_size = -1;
1329	}
1330}
1331
1332static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1333{
1334	struct sysinfo si;
1335	bool is_os_64 = (sizeof(void *) == 8);
1336	uint64_t total_memory;
1337	uint64_t dram_size_seven_GB = 0x1B8000000;
1338	uint64_t dram_size_three_GB = 0xB8000000;
1339
1340	if (amdgpu_smu_memory_pool_size == 0)
1341		return;
1342
1343	if (!is_os_64) {
1344		DRM_WARN("Not 64-bit OS, feature not supported\n");
1345		goto def_value;
1346	}
1347	si_meminfo(&si);
1348	total_memory = (uint64_t)si.totalram * si.mem_unit;
1349
1350	if ((amdgpu_smu_memory_pool_size == 1) ||
1351		(amdgpu_smu_memory_pool_size == 2)) {
1352		if (total_memory < dram_size_three_GB)
1353			goto def_value1;
1354	} else if ((amdgpu_smu_memory_pool_size == 4) ||
1355		(amdgpu_smu_memory_pool_size == 8)) {
1356		if (total_memory < dram_size_seven_GB)
1357			goto def_value1;
1358	} else {
1359		DRM_WARN("Smu memory pool size not supported\n");
1360		goto def_value;
1361	}
1362	adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1363
1364	return;
1365
1366def_value1:
1367	DRM_WARN("No enough system memory\n");
1368def_value:
1369	adev->pm.smu_prv_buffer_size = 0;
1370}
1371
1372static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
1373{
1374	if (!(adev->flags & AMD_IS_APU) ||
1375	    adev->asic_type < CHIP_RAVEN)
1376		return 0;
1377
1378	switch (adev->asic_type) {
1379	case CHIP_RAVEN:
1380		if (adev->pdev->device == 0x15dd)
1381			adev->apu_flags |= AMD_APU_IS_RAVEN;
1382		if (adev->pdev->device == 0x15d8)
1383			adev->apu_flags |= AMD_APU_IS_PICASSO;
1384		break;
1385	case CHIP_RENOIR:
1386		if ((adev->pdev->device == 0x1636) ||
1387		    (adev->pdev->device == 0x164c))
1388			adev->apu_flags |= AMD_APU_IS_RENOIR;
1389		else
1390			adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1391		break;
1392	case CHIP_VANGOGH:
1393		adev->apu_flags |= AMD_APU_IS_VANGOGH;
1394		break;
1395	case CHIP_YELLOW_CARP:
1396		break;
 
 
 
 
 
1397	default:
1398		return -EINVAL;
1399	}
1400
1401	return 0;
1402}
1403
1404/**
1405 * amdgpu_device_check_arguments - validate module params
1406 *
1407 * @adev: amdgpu_device pointer
1408 *
1409 * Validates certain module parameters and updates
1410 * the associated values used by the driver (all asics).
1411 */
1412static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1413{
 
 
1414	if (amdgpu_sched_jobs < 4) {
1415		dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1416			 amdgpu_sched_jobs);
1417		amdgpu_sched_jobs = 4;
1418	} else if (!is_power_of_2(amdgpu_sched_jobs)){
1419		dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1420			 amdgpu_sched_jobs);
1421		amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1422	}
1423
1424	if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1425		/* gart size must be greater or equal to 32M */
1426		dev_warn(adev->dev, "gart size (%d) too small\n",
1427			 amdgpu_gart_size);
1428		amdgpu_gart_size = -1;
1429	}
1430
1431	if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1432		/* gtt size must be greater or equal to 32M */
1433		dev_warn(adev->dev, "gtt size (%d) too small\n",
1434				 amdgpu_gtt_size);
1435		amdgpu_gtt_size = -1;
1436	}
1437
1438	/* valid range is between 4 and 9 inclusive */
1439	if (amdgpu_vm_fragment_size != -1 &&
1440	    (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1441		dev_warn(adev->dev, "valid range is between 4 and 9\n");
1442		amdgpu_vm_fragment_size = -1;
1443	}
1444
1445	if (amdgpu_sched_hw_submission < 2) {
1446		dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1447			 amdgpu_sched_hw_submission);
1448		amdgpu_sched_hw_submission = 2;
1449	} else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1450		dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1451			 amdgpu_sched_hw_submission);
1452		amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1453	}
1454
 
 
 
 
 
1455	amdgpu_device_check_smu_prv_buffer_size(adev);
1456
1457	amdgpu_device_check_vm_size(adev);
1458
1459	amdgpu_device_check_block_size(adev);
1460
1461	adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1462
1463	amdgpu_gmc_tmz_set(adev);
1464
1465	amdgpu_gmc_noretry_set(adev);
1466
1467	return 0;
1468}
1469
1470/**
1471 * amdgpu_switcheroo_set_state - set switcheroo state
1472 *
1473 * @pdev: pci dev pointer
1474 * @state: vga_switcheroo state
1475 *
1476 * Callback for the switcheroo driver.  Suspends or resumes the
1477 * the asics before or after it is powered up using ACPI methods.
1478 */
1479static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1480					enum vga_switcheroo_state state)
1481{
1482	struct drm_device *dev = pci_get_drvdata(pdev);
1483	int r;
1484
1485	if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
1486		return;
1487
1488	if (state == VGA_SWITCHEROO_ON) {
1489		pr_info("switched on\n");
1490		/* don't suspend or resume card normally */
1491		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1492
1493		pci_set_power_state(pdev, PCI_D0);
1494		amdgpu_device_load_pci_state(pdev);
1495		r = pci_enable_device(pdev);
1496		if (r)
1497			DRM_WARN("pci_enable_device failed (%d)\n", r);
1498		amdgpu_device_resume(dev, true);
1499
1500		dev->switch_power_state = DRM_SWITCH_POWER_ON;
1501	} else {
1502		pr_info("switched off\n");
1503		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
 
1504		amdgpu_device_suspend(dev, true);
1505		amdgpu_device_cache_pci_state(pdev);
1506		/* Shut down the device */
1507		pci_disable_device(pdev);
1508		pci_set_power_state(pdev, PCI_D3cold);
1509		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1510	}
1511}
1512
1513/**
1514 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1515 *
1516 * @pdev: pci dev pointer
1517 *
1518 * Callback for the switcheroo driver.  Check of the switcheroo
1519 * state can be changed.
1520 * Returns true if the state can be changed, false if not.
1521 */
1522static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1523{
1524	struct drm_device *dev = pci_get_drvdata(pdev);
1525
1526	/*
1527	* FIXME: open_count is protected by drm_global_mutex but that would lead to
1528	* locking inversion with the driver load path. And the access here is
1529	* completely racy anyway. So don't bother with locking for now.
1530	*/
1531	return atomic_read(&dev->open_count) == 0;
1532}
1533
1534static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1535	.set_gpu_state = amdgpu_switcheroo_set_state,
1536	.reprobe = NULL,
1537	.can_switch = amdgpu_switcheroo_can_switch,
1538};
1539
1540/**
1541 * amdgpu_device_ip_set_clockgating_state - set the CG state
1542 *
1543 * @dev: amdgpu_device pointer
1544 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1545 * @state: clockgating state (gate or ungate)
1546 *
1547 * Sets the requested clockgating state for all instances of
1548 * the hardware IP specified.
1549 * Returns the error code from the last instance.
1550 */
1551int amdgpu_device_ip_set_clockgating_state(void *dev,
1552					   enum amd_ip_block_type block_type,
1553					   enum amd_clockgating_state state)
1554{
1555	struct amdgpu_device *adev = dev;
1556	int i, r = 0;
1557
1558	for (i = 0; i < adev->num_ip_blocks; i++) {
1559		if (!adev->ip_blocks[i].status.valid)
1560			continue;
1561		if (adev->ip_blocks[i].version->type != block_type)
1562			continue;
1563		if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1564			continue;
1565		r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1566			(void *)adev, state);
1567		if (r)
1568			DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1569				  adev->ip_blocks[i].version->funcs->name, r);
1570	}
1571	return r;
1572}
1573
1574/**
1575 * amdgpu_device_ip_set_powergating_state - set the PG state
1576 *
1577 * @dev: amdgpu_device pointer
1578 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1579 * @state: powergating state (gate or ungate)
1580 *
1581 * Sets the requested powergating state for all instances of
1582 * the hardware IP specified.
1583 * Returns the error code from the last instance.
1584 */
1585int amdgpu_device_ip_set_powergating_state(void *dev,
1586					   enum amd_ip_block_type block_type,
1587					   enum amd_powergating_state state)
1588{
1589	struct amdgpu_device *adev = dev;
1590	int i, r = 0;
1591
1592	for (i = 0; i < adev->num_ip_blocks; i++) {
1593		if (!adev->ip_blocks[i].status.valid)
1594			continue;
1595		if (adev->ip_blocks[i].version->type != block_type)
1596			continue;
1597		if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1598			continue;
1599		r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1600			(void *)adev, state);
1601		if (r)
1602			DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1603				  adev->ip_blocks[i].version->funcs->name, r);
1604	}
1605	return r;
1606}
1607
1608/**
1609 * amdgpu_device_ip_get_clockgating_state - get the CG state
1610 *
1611 * @adev: amdgpu_device pointer
1612 * @flags: clockgating feature flags
1613 *
1614 * Walks the list of IPs on the device and updates the clockgating
1615 * flags for each IP.
1616 * Updates @flags with the feature flags for each hardware IP where
1617 * clockgating is enabled.
1618 */
1619void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1620					    u32 *flags)
1621{
1622	int i;
1623
1624	for (i = 0; i < adev->num_ip_blocks; i++) {
1625		if (!adev->ip_blocks[i].status.valid)
1626			continue;
1627		if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1628			adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1629	}
1630}
1631
1632/**
1633 * amdgpu_device_ip_wait_for_idle - wait for idle
1634 *
1635 * @adev: amdgpu_device pointer
1636 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1637 *
1638 * Waits for the request hardware IP to be idle.
1639 * Returns 0 for success or a negative error code on failure.
1640 */
1641int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1642				   enum amd_ip_block_type block_type)
1643{
1644	int i, r;
1645
1646	for (i = 0; i < adev->num_ip_blocks; i++) {
1647		if (!adev->ip_blocks[i].status.valid)
1648			continue;
1649		if (adev->ip_blocks[i].version->type == block_type) {
1650			r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1651			if (r)
1652				return r;
 
 
 
1653			break;
1654		}
1655	}
1656	return 0;
1657
1658}
1659
1660/**
1661 * amdgpu_device_ip_is_idle - is the hardware IP idle
1662 *
1663 * @adev: amdgpu_device pointer
1664 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1665 *
1666 * Check if the hardware IP is idle or not.
1667 * Returns true if it the IP is idle, false if not.
1668 */
1669bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1670			      enum amd_ip_block_type block_type)
1671{
1672	int i;
1673
1674	for (i = 0; i < adev->num_ip_blocks; i++) {
1675		if (!adev->ip_blocks[i].status.valid)
1676			continue;
1677		if (adev->ip_blocks[i].version->type == block_type)
1678			return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1679	}
1680	return true;
1681
1682}
1683
1684/**
1685 * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1686 *
1687 * @adev: amdgpu_device pointer
1688 * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1689 *
1690 * Returns a pointer to the hardware IP block structure
1691 * if it exists for the asic, otherwise NULL.
1692 */
1693struct amdgpu_ip_block *
1694amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1695			      enum amd_ip_block_type type)
1696{
1697	int i;
1698
1699	for (i = 0; i < adev->num_ip_blocks; i++)
1700		if (adev->ip_blocks[i].version->type == type)
1701			return &adev->ip_blocks[i];
1702
1703	return NULL;
1704}
1705
1706/**
1707 * amdgpu_device_ip_block_version_cmp
1708 *
1709 * @adev: amdgpu_device pointer
1710 * @type: enum amd_ip_block_type
1711 * @major: major version
1712 * @minor: minor version
1713 *
1714 * return 0 if equal or greater
1715 * return 1 if smaller or the ip_block doesn't exist
1716 */
1717int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1718				       enum amd_ip_block_type type,
1719				       u32 major, u32 minor)
1720{
1721	struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1722
1723	if (ip_block && ((ip_block->version->major > major) ||
1724			((ip_block->version->major == major) &&
1725			(ip_block->version->minor >= minor))))
1726		return 0;
1727
1728	return 1;
1729}
1730
1731/**
1732 * amdgpu_device_ip_block_add
1733 *
1734 * @adev: amdgpu_device pointer
1735 * @ip_block_version: pointer to the IP to add
1736 *
1737 * Adds the IP block driver information to the collection of IPs
1738 * on the asic.
1739 */
1740int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1741			       const struct amdgpu_ip_block_version *ip_block_version)
1742{
1743	if (!ip_block_version)
1744		return -EINVAL;
1745
1746	switch (ip_block_version->type) {
1747	case AMD_IP_BLOCK_TYPE_VCN:
1748		if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
1749			return 0;
1750		break;
1751	case AMD_IP_BLOCK_TYPE_JPEG:
1752		if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
1753			return 0;
1754		break;
1755	default:
1756		break;
1757	}
1758
1759	DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1760		  ip_block_version->funcs->name);
1761
 
 
1762	adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1763
1764	return 0;
1765}
1766
1767/**
1768 * amdgpu_device_enable_virtual_display - enable virtual display feature
1769 *
1770 * @adev: amdgpu_device pointer
1771 *
1772 * Enabled the virtual display feature if the user has enabled it via
1773 * the module parameter virtual_display.  This feature provides a virtual
1774 * display hardware on headless boards or in virtualized environments.
1775 * This function parses and validates the configuration string specified by
1776 * the user and configues the virtual display configuration (number of
1777 * virtual connectors, crtcs, etc.) specified.
1778 */
1779static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1780{
1781	adev->enable_virtual_display = false;
1782
1783	if (amdgpu_virtual_display) {
1784		const char *pci_address_name = pci_name(adev->pdev);
1785		char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1786
1787		pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1788		pciaddstr_tmp = pciaddstr;
1789		while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1790			pciaddname = strsep(&pciaddname_tmp, ",");
1791			if (!strcmp("all", pciaddname)
1792			    || !strcmp(pci_address_name, pciaddname)) {
1793				long num_crtc;
1794				int res = -1;
1795
1796				adev->enable_virtual_display = true;
1797
1798				if (pciaddname_tmp)
1799					res = kstrtol(pciaddname_tmp, 10,
1800						      &num_crtc);
1801
1802				if (!res) {
1803					if (num_crtc < 1)
1804						num_crtc = 1;
1805					if (num_crtc > 6)
1806						num_crtc = 6;
1807					adev->mode_info.num_crtc = num_crtc;
1808				} else {
1809					adev->mode_info.num_crtc = 1;
1810				}
1811				break;
1812			}
1813		}
1814
1815		DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1816			 amdgpu_virtual_display, pci_address_name,
1817			 adev->enable_virtual_display, adev->mode_info.num_crtc);
1818
1819		kfree(pciaddstr);
1820	}
1821}
1822
 
 
 
 
 
 
 
 
 
 
1823/**
1824 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1825 *
1826 * @adev: amdgpu_device pointer
1827 *
1828 * Parses the asic configuration parameters specified in the gpu info
1829 * firmware and makes them availale to the driver for use in configuring
1830 * the asic.
1831 * Returns 0 on success, -EINVAL on failure.
1832 */
1833static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1834{
1835	const char *chip_name;
1836	char fw_name[40];
1837	int err;
1838	const struct gpu_info_firmware_header_v1_0 *hdr;
1839
1840	adev->firmware.gpu_info_fw = NULL;
1841
1842	if (adev->mman.discovery_bin) {
1843		amdgpu_discovery_get_gfx_info(adev);
1844
1845		/*
1846		 * FIXME: The bounding box is still needed by Navi12, so
1847		 * temporarily read it from gpu_info firmware. Should be droped
1848		 * when DAL no longer needs it.
1849		 */
1850		if (adev->asic_type != CHIP_NAVI12)
1851			return 0;
1852	}
1853
1854	switch (adev->asic_type) {
1855#ifdef CONFIG_DRM_AMDGPU_SI
1856	case CHIP_VERDE:
1857	case CHIP_TAHITI:
1858	case CHIP_PITCAIRN:
1859	case CHIP_OLAND:
1860	case CHIP_HAINAN:
1861#endif
1862#ifdef CONFIG_DRM_AMDGPU_CIK
1863	case CHIP_BONAIRE:
1864	case CHIP_HAWAII:
1865	case CHIP_KAVERI:
1866	case CHIP_KABINI:
1867	case CHIP_MULLINS:
1868#endif
1869	case CHIP_TOPAZ:
1870	case CHIP_TONGA:
1871	case CHIP_FIJI:
1872	case CHIP_POLARIS10:
1873	case CHIP_POLARIS11:
1874	case CHIP_POLARIS12:
1875	case CHIP_VEGAM:
1876	case CHIP_CARRIZO:
1877	case CHIP_STONEY:
1878	case CHIP_VEGA20:
1879	case CHIP_ALDEBARAN:
1880	case CHIP_SIENNA_CICHLID:
1881	case CHIP_NAVY_FLOUNDER:
1882	case CHIP_DIMGREY_CAVEFISH:
1883	case CHIP_BEIGE_GOBY:
1884	default:
1885		return 0;
1886	case CHIP_VEGA10:
1887		chip_name = "vega10";
1888		break;
1889	case CHIP_VEGA12:
1890		chip_name = "vega12";
1891		break;
1892	case CHIP_RAVEN:
1893		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1894			chip_name = "raven2";
1895		else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1896			chip_name = "picasso";
1897		else
1898			chip_name = "raven";
1899		break;
1900	case CHIP_ARCTURUS:
1901		chip_name = "arcturus";
1902		break;
1903	case CHIP_RENOIR:
1904		if (adev->apu_flags & AMD_APU_IS_RENOIR)
1905			chip_name = "renoir";
1906		else
1907			chip_name = "green_sardine";
1908		break;
1909	case CHIP_NAVI10:
1910		chip_name = "navi10";
1911		break;
1912	case CHIP_NAVI14:
1913		chip_name = "navi14";
1914		break;
1915	case CHIP_NAVI12:
1916		chip_name = "navi12";
1917		break;
1918	case CHIP_VANGOGH:
1919		chip_name = "vangogh";
1920		break;
1921	case CHIP_YELLOW_CARP:
1922		chip_name = "yellow_carp";
1923		break;
1924	}
1925
1926	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1927	err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1928	if (err) {
1929		dev_err(adev->dev,
1930			"Failed to load gpu_info firmware \"%s\"\n",
1931			fw_name);
1932		goto out;
1933	}
1934	err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1935	if (err) {
1936		dev_err(adev->dev,
1937			"Failed to validate gpu_info firmware \"%s\"\n",
1938			fw_name);
1939		goto out;
1940	}
1941
1942	hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1943	amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1944
1945	switch (hdr->version_major) {
1946	case 1:
1947	{
1948		const struct gpu_info_firmware_v1_0 *gpu_info_fw =
1949			(const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
1950								le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1951
1952		/*
1953		 * Should be droped when DAL no longer needs it.
1954		 */
1955		if (adev->asic_type == CHIP_NAVI12)
1956			goto parse_soc_bounding_box;
1957
1958		adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1959		adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1960		adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1961		adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
1962		adev->gfx.config.max_texture_channel_caches =
1963			le32_to_cpu(gpu_info_fw->gc_num_tccs);
1964		adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1965		adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1966		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1967		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
1968		adev->gfx.config.double_offchip_lds_buf =
1969			le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1970		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
1971		adev->gfx.cu_info.max_waves_per_simd =
1972			le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1973		adev->gfx.cu_info.max_scratch_slots_per_cu =
1974			le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1975		adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
1976		if (hdr->version_minor >= 1) {
1977			const struct gpu_info_firmware_v1_1 *gpu_info_fw =
1978				(const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
1979									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1980			adev->gfx.config.num_sc_per_sh =
1981				le32_to_cpu(gpu_info_fw->num_sc_per_sh);
1982			adev->gfx.config.num_packer_per_sc =
1983				le32_to_cpu(gpu_info_fw->num_packer_per_sc);
1984		}
1985
1986parse_soc_bounding_box:
1987		/*
1988		 * soc bounding box info is not integrated in disocovery table,
1989		 * we always need to parse it from gpu info firmware if needed.
1990		 */
1991		if (hdr->version_minor == 2) {
1992			const struct gpu_info_firmware_v1_2 *gpu_info_fw =
1993				(const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
1994									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1995			adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
1996		}
1997		break;
1998	}
1999	default:
2000		dev_err(adev->dev,
2001			"Unsupported gpu_info table %d\n", hdr->header.ucode_version);
2002		err = -EINVAL;
2003		goto out;
2004	}
2005out:
2006	return err;
2007}
2008
2009/**
2010 * amdgpu_device_ip_early_init - run early init for hardware IPs
2011 *
2012 * @adev: amdgpu_device pointer
2013 *
2014 * Early initialization pass for hardware IPs.  The hardware IPs that make
2015 * up each asic are discovered each IP's early_init callback is run.  This
2016 * is the first stage in initializing the asic.
2017 * Returns 0 on success, negative error code on failure.
2018 */
2019static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
2020{
 
 
2021	int i, r;
 
2022
2023	amdgpu_device_enable_virtual_display(adev);
2024
2025	if (amdgpu_sriov_vf(adev)) {
2026		r = amdgpu_virt_request_full_gpu(adev, true);
2027		if (r)
2028			return r;
2029	}
2030
2031	switch (adev->asic_type) {
2032#ifdef CONFIG_DRM_AMDGPU_SI
2033	case CHIP_VERDE:
2034	case CHIP_TAHITI:
2035	case CHIP_PITCAIRN:
2036	case CHIP_OLAND:
2037	case CHIP_HAINAN:
2038		adev->family = AMDGPU_FAMILY_SI;
2039		r = si_set_ip_blocks(adev);
2040		if (r)
2041			return r;
2042		break;
2043#endif
2044#ifdef CONFIG_DRM_AMDGPU_CIK
2045	case CHIP_BONAIRE:
2046	case CHIP_HAWAII:
2047	case CHIP_KAVERI:
2048	case CHIP_KABINI:
2049	case CHIP_MULLINS:
2050		if (adev->flags & AMD_IS_APU)
2051			adev->family = AMDGPU_FAMILY_KV;
2052		else
2053			adev->family = AMDGPU_FAMILY_CI;
2054
2055		r = cik_set_ip_blocks(adev);
2056		if (r)
2057			return r;
2058		break;
2059#endif
2060	case CHIP_TOPAZ:
2061	case CHIP_TONGA:
2062	case CHIP_FIJI:
2063	case CHIP_POLARIS10:
2064	case CHIP_POLARIS11:
2065	case CHIP_POLARIS12:
2066	case CHIP_VEGAM:
2067	case CHIP_CARRIZO:
2068	case CHIP_STONEY:
2069		if (adev->flags & AMD_IS_APU)
2070			adev->family = AMDGPU_FAMILY_CZ;
2071		else
2072			adev->family = AMDGPU_FAMILY_VI;
2073
2074		r = vi_set_ip_blocks(adev);
2075		if (r)
2076			return r;
2077		break;
2078	case CHIP_VEGA10:
2079	case CHIP_VEGA12:
2080	case CHIP_VEGA20:
2081	case CHIP_RAVEN:
2082	case CHIP_ARCTURUS:
2083	case CHIP_RENOIR:
2084	case CHIP_ALDEBARAN:
2085		if (adev->flags & AMD_IS_APU)
2086			adev->family = AMDGPU_FAMILY_RV;
2087		else
2088			adev->family = AMDGPU_FAMILY_AI;
2089
2090		r = soc15_set_ip_blocks(adev);
2091		if (r)
2092			return r;
2093		break;
2094	case  CHIP_NAVI10:
2095	case  CHIP_NAVI14:
2096	case  CHIP_NAVI12:
2097	case  CHIP_SIENNA_CICHLID:
2098	case  CHIP_NAVY_FLOUNDER:
2099	case  CHIP_DIMGREY_CAVEFISH:
2100	case  CHIP_BEIGE_GOBY:
2101	case CHIP_VANGOGH:
2102	case CHIP_YELLOW_CARP:
2103		if (adev->asic_type == CHIP_VANGOGH)
2104			adev->family = AMDGPU_FAMILY_VGH;
2105		else if (adev->asic_type == CHIP_YELLOW_CARP)
2106			adev->family = AMDGPU_FAMILY_YC;
2107		else
2108			adev->family = AMDGPU_FAMILY_NV;
2109
2110		r = nv_set_ip_blocks(adev);
2111		if (r)
2112			return r;
2113		break;
2114	default:
2115		/* FIXME: not supported yet */
2116		return -EINVAL;
 
 
 
2117	}
2118
2119	amdgpu_amdkfd_device_probe(adev);
2120
2121	adev->pm.pp_feature = amdgpu_pp_feature_mask;
2122	if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2123		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2124	if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2125		adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
 
 
2126
 
2127	for (i = 0; i < adev->num_ip_blocks; i++) {
 
 
2128		if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2129			DRM_ERROR("disabled ip block: %d <%s>\n",
2130				  i, adev->ip_blocks[i].version->funcs->name);
2131			adev->ip_blocks[i].status.valid = false;
2132		} else {
2133			if (adev->ip_blocks[i].version->funcs->early_init) {
2134				r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2135				if (r == -ENOENT) {
2136					adev->ip_blocks[i].status.valid = false;
2137				} else if (r) {
2138					DRM_ERROR("early_init of IP block <%s> failed %d\n",
2139						  adev->ip_blocks[i].version->funcs->name, r);
2140					return r;
2141				} else {
2142					adev->ip_blocks[i].status.valid = true;
2143				}
2144			} else {
2145				adev->ip_blocks[i].status.valid = true;
2146			}
 
 
2147		}
2148		/* get the vbios after the asic_funcs are set up */
2149		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2150			r = amdgpu_device_parse_gpu_info_fw(adev);
2151			if (r)
2152				return r;
2153
2154			/* Read BIOS */
2155			if (!amdgpu_get_bios(adev))
2156				return -EINVAL;
 
2157
2158			r = amdgpu_atombios_init(adev);
2159			if (r) {
2160				dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2161				amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2162				return r;
 
2163			}
2164
2165			/*get pf2vf msg info at it's earliest time*/
2166			if (amdgpu_sriov_vf(adev))
2167				amdgpu_virt_init_data_exchange(adev);
2168
2169		}
2170	}
 
 
 
 
 
 
2171
2172	adev->cg_flags &= amdgpu_cg_mask;
2173	adev->pg_flags &= amdgpu_pg_mask;
2174
2175	return 0;
2176}
2177
2178static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2179{
2180	int i, r;
2181
2182	for (i = 0; i < adev->num_ip_blocks; i++) {
2183		if (!adev->ip_blocks[i].status.sw)
2184			continue;
2185		if (adev->ip_blocks[i].status.hw)
2186			continue;
 
 
 
2187		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2188		    (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2189		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2190			r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2191			if (r) {
2192				DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2193					  adev->ip_blocks[i].version->funcs->name, r);
2194				return r;
2195			}
2196			adev->ip_blocks[i].status.hw = true;
2197		}
2198	}
2199
2200	return 0;
2201}
2202
2203static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2204{
2205	int i, r;
2206
2207	for (i = 0; i < adev->num_ip_blocks; i++) {
2208		if (!adev->ip_blocks[i].status.sw)
2209			continue;
2210		if (adev->ip_blocks[i].status.hw)
2211			continue;
2212		r = adev->ip_blocks[i].version->funcs->hw_init(adev);
 
 
 
2213		if (r) {
2214			DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2215				  adev->ip_blocks[i].version->funcs->name, r);
2216			return r;
2217		}
2218		adev->ip_blocks[i].status.hw = true;
2219	}
2220
2221	return 0;
2222}
2223
2224static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2225{
2226	int r = 0;
2227	int i;
2228	uint32_t smu_version;
2229
2230	if (adev->asic_type >= CHIP_VEGA10) {
2231		for (i = 0; i < adev->num_ip_blocks; i++) {
2232			if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2233				continue;
2234
 
 
 
 
2235			if (!adev->ip_blocks[i].status.sw)
2236				continue;
2237
2238			/* no need to do the fw loading again if already done*/
2239			if (adev->ip_blocks[i].status.hw == true)
2240				break;
2241
2242			if (amdgpu_in_reset(adev) || adev->in_suspend) {
2243				r = adev->ip_blocks[i].version->funcs->resume(adev);
2244				if (r) {
2245					DRM_ERROR("resume of IP block <%s> failed %d\n",
2246							  adev->ip_blocks[i].version->funcs->name, r);
2247					return r;
2248				}
2249			} else {
2250				r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2251				if (r) {
2252					DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2253							  adev->ip_blocks[i].version->funcs->name, r);
2254					return r;
2255				}
 
2256			}
2257
2258			adev->ip_blocks[i].status.hw = true;
2259			break;
2260		}
2261	}
2262
2263	if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2264		r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2265
2266	return r;
2267}
2268
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2269/**
2270 * amdgpu_device_ip_init - run init for hardware IPs
2271 *
2272 * @adev: amdgpu_device pointer
2273 *
2274 * Main initialization pass for hardware IPs.  The list of all the hardware
2275 * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2276 * are run.  sw_init initializes the software state associated with each IP
2277 * and hw_init initializes the hardware associated with each IP.
2278 * Returns 0 on success, negative error code on failure.
2279 */
2280static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2281{
 
2282	int i, r;
2283
2284	r = amdgpu_ras_init(adev);
2285	if (r)
2286		return r;
2287
2288	for (i = 0; i < adev->num_ip_blocks; i++) {
2289		if (!adev->ip_blocks[i].status.valid)
2290			continue;
2291		r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2292		if (r) {
2293			DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2294				  adev->ip_blocks[i].version->funcs->name, r);
2295			goto init_failed;
 
 
2296		}
2297		adev->ip_blocks[i].status.sw = true;
2298
2299		/* need to do gmc hw init early so we can allocate gpu mem */
2300		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2301			r = amdgpu_device_vram_scratch_init(adev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2302			if (r) {
2303				DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
2304				goto init_failed;
2305			}
2306			r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2307			if (r) {
2308				DRM_ERROR("hw_init %d failed %d\n", i, r);
2309				goto init_failed;
2310			}
2311			r = amdgpu_device_wb_init(adev);
2312			if (r) {
2313				DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2314				goto init_failed;
2315			}
2316			adev->ip_blocks[i].status.hw = true;
2317
2318			/* right after GMC hw init, we create CSA */
2319			if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
2320				r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2321								AMDGPU_GEM_DOMAIN_VRAM,
2322								AMDGPU_CSA_SIZE);
 
2323				if (r) {
2324					DRM_ERROR("allocate CSA failed %d\n", r);
2325					goto init_failed;
2326				}
2327			}
 
 
 
 
 
 
2328		}
2329	}
2330
2331	if (amdgpu_sriov_vf(adev))
2332		amdgpu_virt_init_data_exchange(adev);
2333
2334	r = amdgpu_ib_pool_init(adev);
2335	if (r) {
2336		dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2337		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2338		goto init_failed;
2339	}
2340
2341	r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2342	if (r)
2343		goto init_failed;
2344
2345	r = amdgpu_device_ip_hw_init_phase1(adev);
2346	if (r)
2347		goto init_failed;
2348
2349	r = amdgpu_device_fw_loading(adev);
2350	if (r)
2351		goto init_failed;
2352
2353	r = amdgpu_device_ip_hw_init_phase2(adev);
2354	if (r)
2355		goto init_failed;
2356
2357	/*
2358	 * retired pages will be loaded from eeprom and reserved here,
2359	 * it should be called after amdgpu_device_ip_hw_init_phase2  since
2360	 * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2361	 * for I2C communication which only true at this point.
2362	 *
2363	 * amdgpu_ras_recovery_init may fail, but the upper only cares the
2364	 * failure from bad gpu situation and stop amdgpu init process
2365	 * accordingly. For other failed cases, it will still release all
2366	 * the resource and print error message, rather than returning one
2367	 * negative value to upper level.
2368	 *
2369	 * Note: theoretically, this should be called before all vram allocations
2370	 * to protect retired page from abusing
2371	 */
2372	r = amdgpu_ras_recovery_init(adev);
 
2373	if (r)
2374		goto init_failed;
2375
2376	if (adev->gmc.xgmi.num_physical_nodes > 1)
2377		amdgpu_xgmi_add_device(adev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2378
2379	/* Don't init kfd if whole hive need to be reset during init */
2380	if (!adev->gmc.xgmi.pending_reset)
2381		amdgpu_amdkfd_device_init(adev);
 
 
 
 
2382
2383	r = amdgpu_amdkfd_resume_iommu(adev);
2384	if (r)
2385		goto init_failed;
2386
 
 
 
 
 
 
 
 
 
2387	amdgpu_fru_get_product_info(adev);
2388
2389init_failed:
2390	if (amdgpu_sriov_vf(adev))
2391		amdgpu_virt_release_full_gpu(adev, true);
2392
2393	return r;
2394}
2395
2396/**
2397 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2398 *
2399 * @adev: amdgpu_device pointer
2400 *
2401 * Writes a reset magic value to the gart pointer in VRAM.  The driver calls
2402 * this function before a GPU reset.  If the value is retained after a
2403 * GPU reset, VRAM has not been lost.  Some GPU resets may destry VRAM contents.
2404 */
2405static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2406{
2407	memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2408}
2409
2410/**
2411 * amdgpu_device_check_vram_lost - check if vram is valid
2412 *
2413 * @adev: amdgpu_device pointer
2414 *
2415 * Checks the reset magic value written to the gart pointer in VRAM.
2416 * The driver calls this after a GPU reset to see if the contents of
2417 * VRAM is lost or now.
2418 * returns true if vram is lost, false if not.
2419 */
2420static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2421{
2422	if (memcmp(adev->gart.ptr, adev->reset_magic,
2423			AMDGPU_RESET_MAGIC_NUM))
2424		return true;
2425
2426	if (!amdgpu_in_reset(adev))
2427		return false;
2428
2429	/*
2430	 * For all ASICs with baco/mode1 reset, the VRAM is
2431	 * always assumed to be lost.
2432	 */
2433	switch (amdgpu_asic_reset_method(adev)) {
2434	case AMD_RESET_METHOD_BACO:
2435	case AMD_RESET_METHOD_MODE1:
2436		return true;
2437	default:
2438		return false;
2439	}
2440}
2441
2442/**
2443 * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2444 *
2445 * @adev: amdgpu_device pointer
2446 * @state: clockgating state (gate or ungate)
2447 *
2448 * The list of all the hardware IPs that make up the asic is walked and the
2449 * set_clockgating_state callbacks are run.
2450 * Late initialization pass enabling clockgating for hardware IPs.
2451 * Fini or suspend, pass disabling clockgating for hardware IPs.
2452 * Returns 0 on success, negative error code on failure.
2453 */
2454
2455int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2456			       enum amd_clockgating_state state)
2457{
2458	int i, j, r;
2459
2460	if (amdgpu_emu_mode == 1)
2461		return 0;
2462
2463	for (j = 0; j < adev->num_ip_blocks; j++) {
2464		i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2465		if (!adev->ip_blocks[i].status.late_initialized)
2466			continue;
2467		/* skip CG for GFX on S0ix */
2468		if (adev->in_s0ix &&
2469		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
 
2470			continue;
2471		/* skip CG for VCE/UVD, it's handled specially */
2472		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2473		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2474		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2475		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2476		    adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2477			/* enable clockgating to save power */
2478			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2479										     state);
2480			if (r) {
2481				DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2482					  adev->ip_blocks[i].version->funcs->name, r);
2483				return r;
2484			}
2485		}
2486	}
2487
2488	return 0;
2489}
2490
2491int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2492			       enum amd_powergating_state state)
2493{
2494	int i, j, r;
2495
2496	if (amdgpu_emu_mode == 1)
2497		return 0;
2498
2499	for (j = 0; j < adev->num_ip_blocks; j++) {
2500		i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2501		if (!adev->ip_blocks[i].status.late_initialized)
2502			continue;
2503		/* skip PG for GFX on S0ix */
2504		if (adev->in_s0ix &&
2505		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
 
2506			continue;
2507		/* skip CG for VCE/UVD, it's handled specially */
2508		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2509		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2510		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2511		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2512		    adev->ip_blocks[i].version->funcs->set_powergating_state) {
2513			/* enable powergating to save power */
2514			r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2515											state);
2516			if (r) {
2517				DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2518					  adev->ip_blocks[i].version->funcs->name, r);
2519				return r;
2520			}
2521		}
2522	}
2523	return 0;
2524}
2525
2526static int amdgpu_device_enable_mgpu_fan_boost(void)
2527{
2528	struct amdgpu_gpu_instance *gpu_ins;
2529	struct amdgpu_device *adev;
2530	int i, ret = 0;
2531
2532	mutex_lock(&mgpu_info.mutex);
2533
2534	/*
2535	 * MGPU fan boost feature should be enabled
2536	 * only when there are two or more dGPUs in
2537	 * the system
2538	 */
2539	if (mgpu_info.num_dgpu < 2)
2540		goto out;
2541
2542	for (i = 0; i < mgpu_info.num_dgpu; i++) {
2543		gpu_ins = &(mgpu_info.gpu_ins[i]);
2544		adev = gpu_ins->adev;
2545		if (!(adev->flags & AMD_IS_APU) &&
2546		    !gpu_ins->mgpu_fan_enabled) {
2547			ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2548			if (ret)
2549				break;
2550
2551			gpu_ins->mgpu_fan_enabled = 1;
2552		}
2553	}
2554
2555out:
2556	mutex_unlock(&mgpu_info.mutex);
2557
2558	return ret;
2559}
2560
2561/**
2562 * amdgpu_device_ip_late_init - run late init for hardware IPs
2563 *
2564 * @adev: amdgpu_device pointer
2565 *
2566 * Late initialization pass for hardware IPs.  The list of all the hardware
2567 * IPs that make up the asic is walked and the late_init callbacks are run.
2568 * late_init covers any special initialization that an IP requires
2569 * after all of the have been initialized or something that needs to happen
2570 * late in the init process.
2571 * Returns 0 on success, negative error code on failure.
2572 */
2573static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2574{
2575	struct amdgpu_gpu_instance *gpu_instance;
2576	int i = 0, r;
2577
2578	for (i = 0; i < adev->num_ip_blocks; i++) {
2579		if (!adev->ip_blocks[i].status.hw)
2580			continue;
2581		if (adev->ip_blocks[i].version->funcs->late_init) {
2582			r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2583			if (r) {
2584				DRM_ERROR("late_init of IP block <%s> failed %d\n",
2585					  adev->ip_blocks[i].version->funcs->name, r);
2586				return r;
2587			}
2588		}
2589		adev->ip_blocks[i].status.late_initialized = true;
2590	}
2591
2592	amdgpu_ras_set_error_query_ready(adev, true);
 
 
 
 
 
 
 
2593
2594	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2595	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2596
2597	amdgpu_device_fill_reset_magic(adev);
2598
2599	r = amdgpu_device_enable_mgpu_fan_boost();
2600	if (r)
2601		DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2602
2603	/* For XGMI + passthrough configuration on arcturus, enable light SBR */
2604	if (adev->asic_type == CHIP_ARCTURUS &&
2605	    amdgpu_passthrough(adev) &&
2606	    adev->gmc.xgmi.num_physical_nodes > 1)
2607		smu_set_light_sbr(&adev->smu, true);
2608
2609	if (adev->gmc.xgmi.num_physical_nodes > 1) {
2610		mutex_lock(&mgpu_info.mutex);
2611
2612		/*
2613		 * Reset device p-state to low as this was booted with high.
2614		 *
2615		 * This should be performed only after all devices from the same
2616		 * hive get initialized.
2617		 *
2618		 * However, it's unknown how many device in the hive in advance.
2619		 * As this is counted one by one during devices initializations.
2620		 *
2621		 * So, we wait for all XGMI interlinked devices initialized.
2622		 * This may bring some delays as those devices may come from
2623		 * different hives. But that should be OK.
2624		 */
2625		if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2626			for (i = 0; i < mgpu_info.num_gpu; i++) {
2627				gpu_instance = &(mgpu_info.gpu_ins[i]);
2628				if (gpu_instance->adev->flags & AMD_IS_APU)
2629					continue;
2630
2631				r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2632						AMDGPU_XGMI_PSTATE_MIN);
2633				if (r) {
2634					DRM_ERROR("pstate setting failed (%d).\n", r);
2635					break;
2636				}
2637			}
2638		}
2639
2640		mutex_unlock(&mgpu_info.mutex);
2641	}
2642
2643	return 0;
2644}
2645
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2646static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
2647{
2648	int i, r;
2649
2650	for (i = 0; i < adev->num_ip_blocks; i++) {
2651		if (!adev->ip_blocks[i].version->funcs->early_fini)
2652			continue;
2653
2654		r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
2655		if (r) {
2656			DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
2657				  adev->ip_blocks[i].version->funcs->name, r);
2658		}
2659	}
2660
2661	amdgpu_amdkfd_suspend(adev, false);
2662
2663	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2664	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2665
2666	/* need to disable SMC first */
2667	for (i = 0; i < adev->num_ip_blocks; i++) {
2668		if (!adev->ip_blocks[i].status.hw)
2669			continue;
2670		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2671			r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2672			/* XXX handle errors */
2673			if (r) {
2674				DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2675					  adev->ip_blocks[i].version->funcs->name, r);
2676			}
2677			adev->ip_blocks[i].status.hw = false;
2678			break;
2679		}
2680	}
2681
2682	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2683		if (!adev->ip_blocks[i].status.hw)
2684			continue;
2685
2686		r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2687		/* XXX handle errors */
2688		if (r) {
2689			DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2690				  adev->ip_blocks[i].version->funcs->name, r);
2691		}
2692
2693		adev->ip_blocks[i].status.hw = false;
 
 
2694	}
2695
2696	return 0;
2697}
2698
2699/**
2700 * amdgpu_device_ip_fini - run fini for hardware IPs
2701 *
2702 * @adev: amdgpu_device pointer
2703 *
2704 * Main teardown pass for hardware IPs.  The list of all the hardware
2705 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2706 * are run.  hw_fini tears down the hardware associated with each IP
2707 * and sw_fini tears down any software state associated with each IP.
2708 * Returns 0 on success, negative error code on failure.
2709 */
2710static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2711{
2712	int i, r;
2713
2714	if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2715		amdgpu_virt_release_ras_err_handler_data(adev);
2716
2717	amdgpu_ras_pre_fini(adev);
2718
2719	if (adev->gmc.xgmi.num_physical_nodes > 1)
2720		amdgpu_xgmi_remove_device(adev);
2721
2722	amdgpu_amdkfd_device_fini_sw(adev);
2723
2724	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2725		if (!adev->ip_blocks[i].status.sw)
2726			continue;
2727
2728		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2729			amdgpu_ucode_free_bo(adev);
2730			amdgpu_free_static_csa(&adev->virt.csa_obj);
2731			amdgpu_device_wb_fini(adev);
2732			amdgpu_device_vram_scratch_fini(adev);
2733			amdgpu_ib_pool_fini(adev);
 
2734		}
2735
2736		r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2737		/* XXX handle errors */
2738		if (r) {
2739			DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2740				  adev->ip_blocks[i].version->funcs->name, r);
 
2741		}
2742		adev->ip_blocks[i].status.sw = false;
2743		adev->ip_blocks[i].status.valid = false;
2744	}
2745
2746	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2747		if (!adev->ip_blocks[i].status.late_initialized)
2748			continue;
2749		if (adev->ip_blocks[i].version->funcs->late_fini)
2750			adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2751		adev->ip_blocks[i].status.late_initialized = false;
2752	}
2753
2754	amdgpu_ras_fini(adev);
2755
2756	if (amdgpu_sriov_vf(adev))
2757		if (amdgpu_virt_release_full_gpu(adev, false))
2758			DRM_ERROR("failed to release exclusive mode on fini\n");
2759
2760	return 0;
2761}
2762
2763/**
2764 * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2765 *
2766 * @work: work_struct.
2767 */
2768static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2769{
2770	struct amdgpu_device *adev =
2771		container_of(work, struct amdgpu_device, delayed_init_work.work);
2772	int r;
2773
2774	r = amdgpu_ib_ring_tests(adev);
2775	if (r)
2776		DRM_ERROR("ib ring test failed (%d).\n", r);
2777}
2778
2779static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2780{
2781	struct amdgpu_device *adev =
2782		container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2783
2784	WARN_ON_ONCE(adev->gfx.gfx_off_state);
2785	WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
2786
2787	if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2788		adev->gfx.gfx_off_state = true;
2789}
2790
2791/**
2792 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2793 *
2794 * @adev: amdgpu_device pointer
2795 *
2796 * Main suspend function for hardware IPs.  The list of all the hardware
2797 * IPs that make up the asic is walked, clockgating is disabled and the
2798 * suspend callbacks are run.  suspend puts the hardware and software state
2799 * in each IP into a state suitable for suspend.
2800 * Returns 0 on success, negative error code on failure.
2801 */
2802static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2803{
2804	int i, r;
2805
2806	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2807	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2808
 
 
 
 
 
 
 
 
2809	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2810		if (!adev->ip_blocks[i].status.valid)
2811			continue;
2812
2813		/* displays are handled separately */
2814		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2815			continue;
2816
2817		/* XXX handle errors */
2818		r = adev->ip_blocks[i].version->funcs->suspend(adev);
2819		/* XXX handle errors */
2820		if (r) {
2821			DRM_ERROR("suspend of IP block <%s> failed %d\n",
2822				  adev->ip_blocks[i].version->funcs->name, r);
2823			return r;
2824		}
2825
2826		adev->ip_blocks[i].status.hw = false;
2827	}
2828
2829	return 0;
2830}
2831
2832/**
2833 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2834 *
2835 * @adev: amdgpu_device pointer
2836 *
2837 * Main suspend function for hardware IPs.  The list of all the hardware
2838 * IPs that make up the asic is walked, clockgating is disabled and the
2839 * suspend callbacks are run.  suspend puts the hardware and software state
2840 * in each IP into a state suitable for suspend.
2841 * Returns 0 on success, negative error code on failure.
2842 */
2843static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
2844{
2845	int i, r;
2846
2847	if (adev->in_s0ix)
2848		amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry);
2849
2850	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2851		if (!adev->ip_blocks[i].status.valid)
2852			continue;
2853		/* displays are handled in phase1 */
2854		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
2855			continue;
2856		/* PSP lost connection when err_event_athub occurs */
2857		if (amdgpu_ras_intr_triggered() &&
2858		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
2859			adev->ip_blocks[i].status.hw = false;
2860			continue;
2861		}
2862
2863		/* skip unnecessary suspend if we do not initialize them yet */
2864		if (adev->gmc.xgmi.pending_reset &&
2865		    !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2866		      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
2867		      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2868		      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
2869			adev->ip_blocks[i].status.hw = false;
2870			continue;
2871		}
2872
2873		/* skip suspend of gfx and psp for S0ix
2874		 * gfx is in gfxoff state, so on resume it will exit gfxoff just
2875		 * like at runtime. PSP is also part of the always on hardware
2876		 * so no need to suspend it.
2877		 */
2878		if (adev->in_s0ix &&
2879		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
2880		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2881			continue;
2882
2883		/* XXX handle errors */
2884		r = adev->ip_blocks[i].version->funcs->suspend(adev);
2885		/* XXX handle errors */
2886		if (r) {
2887			DRM_ERROR("suspend of IP block <%s> failed %d\n",
2888				  adev->ip_blocks[i].version->funcs->name, r);
2889		}
2890		adev->ip_blocks[i].status.hw = false;
 
2891		/* handle putting the SMC in the appropriate state */
2892		if(!amdgpu_sriov_vf(adev)){
2893			if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2894				r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
2895				if (r) {
2896					DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
2897							adev->mp1_state, r);
2898					return r;
2899				}
2900			}
2901		}
2902	}
2903
2904	return 0;
2905}
2906
2907/**
2908 * amdgpu_device_ip_suspend - run suspend for hardware IPs
2909 *
2910 * @adev: amdgpu_device pointer
2911 *
2912 * Main suspend function for hardware IPs.  The list of all the hardware
2913 * IPs that make up the asic is walked, clockgating is disabled and the
2914 * suspend callbacks are run.  suspend puts the hardware and software state
2915 * in each IP into a state suitable for suspend.
2916 * Returns 0 on success, negative error code on failure.
2917 */
2918int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
2919{
2920	int r;
2921
2922	if (amdgpu_sriov_vf(adev)) {
2923		amdgpu_virt_fini_data_exchange(adev);
2924		amdgpu_virt_request_full_gpu(adev, false);
2925	}
2926
 
 
2927	r = amdgpu_device_ip_suspend_phase1(adev);
2928	if (r)
2929		return r;
2930	r = amdgpu_device_ip_suspend_phase2(adev);
2931
2932	if (amdgpu_sriov_vf(adev))
2933		amdgpu_virt_release_full_gpu(adev, false);
2934
2935	return r;
2936}
2937
2938static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
2939{
2940	int i, r;
2941
2942	static enum amd_ip_block_type ip_order[] = {
 
2943		AMD_IP_BLOCK_TYPE_GMC,
2944		AMD_IP_BLOCK_TYPE_COMMON,
2945		AMD_IP_BLOCK_TYPE_PSP,
2946		AMD_IP_BLOCK_TYPE_IH,
2947	};
2948
2949	for (i = 0; i < adev->num_ip_blocks; i++) {
2950		int j;
2951		struct amdgpu_ip_block *block;
2952
2953		block = &adev->ip_blocks[i];
2954		block->status.hw = false;
2955
2956		for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
2957
2958			if (block->version->type != ip_order[j] ||
2959				!block->status.valid)
2960				continue;
2961
2962			r = block->version->funcs->hw_init(adev);
2963			DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
2964			if (r)
 
2965				return r;
 
2966			block->status.hw = true;
2967		}
2968	}
2969
2970	return 0;
2971}
2972
2973static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
2974{
2975	int i, r;
 
2976
2977	static enum amd_ip_block_type ip_order[] = {
2978		AMD_IP_BLOCK_TYPE_SMC,
2979		AMD_IP_BLOCK_TYPE_DCE,
2980		AMD_IP_BLOCK_TYPE_GFX,
2981		AMD_IP_BLOCK_TYPE_SDMA,
 
2982		AMD_IP_BLOCK_TYPE_UVD,
2983		AMD_IP_BLOCK_TYPE_VCE,
2984		AMD_IP_BLOCK_TYPE_VCN
 
2985	};
2986
2987	for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
2988		int j;
2989		struct amdgpu_ip_block *block;
2990
2991		for (j = 0; j < adev->num_ip_blocks; j++) {
2992			block = &adev->ip_blocks[j];
2993
2994			if (block->version->type != ip_order[i] ||
2995				!block->status.valid ||
2996				block->status.hw)
2997				continue;
 
 
2998
2999			if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
3000				r = block->version->funcs->resume(adev);
3001			else
3002				r = block->version->funcs->hw_init(adev);
3003
3004			DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3005			if (r)
3006				return r;
3007			block->status.hw = true;
3008		}
3009	}
3010
3011	return 0;
3012}
3013
3014/**
3015 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3016 *
3017 * @adev: amdgpu_device pointer
3018 *
3019 * First resume function for hardware IPs.  The list of all the hardware
3020 * IPs that make up the asic is walked and the resume callbacks are run for
3021 * COMMON, GMC, and IH.  resume puts the hardware into a functional state
3022 * after a suspend and updates the software state as necessary.  This
3023 * function is also used for restoring the GPU after a GPU reset.
3024 * Returns 0 on success, negative error code on failure.
3025 */
3026static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
3027{
3028	int i, r;
3029
3030	for (i = 0; i < adev->num_ip_blocks; i++) {
3031		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3032			continue;
3033		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3034		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3035		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
 
3036
3037			r = adev->ip_blocks[i].version->funcs->resume(adev);
3038			if (r) {
3039				DRM_ERROR("resume of IP block <%s> failed %d\n",
3040					  adev->ip_blocks[i].version->funcs->name, r);
3041				return r;
3042			}
3043			adev->ip_blocks[i].status.hw = true;
3044		}
3045	}
3046
3047	return 0;
3048}
3049
3050/**
3051 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3052 *
3053 * @adev: amdgpu_device pointer
3054 *
3055 * First resume function for hardware IPs.  The list of all the hardware
3056 * IPs that make up the asic is walked and the resume callbacks are run for
3057 * all blocks except COMMON, GMC, and IH.  resume puts the hardware into a
3058 * functional state after a suspend and updates the software state as
3059 * necessary.  This function is also used for restoring the GPU after a GPU
3060 * reset.
3061 * Returns 0 on success, negative error code on failure.
3062 */
3063static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3064{
3065	int i, r;
3066
3067	for (i = 0; i < adev->num_ip_blocks; i++) {
3068		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3069			continue;
3070		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3071		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3072		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
 
3073		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3074			continue;
3075		r = adev->ip_blocks[i].version->funcs->resume(adev);
3076		if (r) {
3077			DRM_ERROR("resume of IP block <%s> failed %d\n",
3078				  adev->ip_blocks[i].version->funcs->name, r);
3079			return r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3080		}
3081		adev->ip_blocks[i].status.hw = true;
3082	}
3083
3084	return 0;
3085}
3086
3087/**
3088 * amdgpu_device_ip_resume - run resume for hardware IPs
3089 *
3090 * @adev: amdgpu_device pointer
3091 *
3092 * Main resume function for hardware IPs.  The hardware IPs
3093 * are split into two resume functions because they are
3094 * are also used in in recovering from a GPU reset and some additional
3095 * steps need to be take between them.  In this case (S3/S4) they are
3096 * run sequentially.
3097 * Returns 0 on success, negative error code on failure.
3098 */
3099static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
3100{
3101	int r;
3102
3103	r = amdgpu_amdkfd_resume_iommu(adev);
3104	if (r)
3105		return r;
3106
3107	r = amdgpu_device_ip_resume_phase1(adev);
3108	if (r)
3109		return r;
3110
3111	r = amdgpu_device_fw_loading(adev);
 
 
 
 
3112	if (r)
3113		return r;
3114
3115	r = amdgpu_device_ip_resume_phase2(adev);
 
 
3116
3117	return r;
3118}
3119
3120/**
3121 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3122 *
3123 * @adev: amdgpu_device pointer
3124 *
3125 * Query the VBIOS data tables to determine if the board supports SR-IOV.
3126 */
3127static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3128{
3129	if (amdgpu_sriov_vf(adev)) {
3130		if (adev->is_atom_fw) {
3131			if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
3132				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3133		} else {
3134			if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3135				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3136		}
3137
3138		if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3139			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3140	}
3141}
3142
3143/**
3144 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3145 *
3146 * @asic_type: AMD asic type
3147 *
3148 * Check if there is DC (new modesetting infrastructre) support for an asic.
3149 * returns true if DC has support, false if not.
3150 */
3151bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3152{
3153	switch (asic_type) {
 
 
 
 
 
 
3154#if defined(CONFIG_DRM_AMD_DC)
3155#if defined(CONFIG_DRM_AMD_DC_SI)
3156	case CHIP_TAHITI:
3157	case CHIP_PITCAIRN:
3158	case CHIP_VERDE:
3159	case CHIP_OLAND:
 
 
 
 
 
 
 
 
 
 
 
3160#endif
3161	case CHIP_BONAIRE:
3162	case CHIP_KAVERI:
3163	case CHIP_KABINI:
3164	case CHIP_MULLINS:
3165		/*
3166		 * We have systems in the wild with these ASICs that require
3167		 * LVDS and VGA support which is not supported with DC.
3168		 *
3169		 * Fallback to the non-DC driver here by default so as not to
3170		 * cause regressions.
3171		 */
3172		return amdgpu_dc > 0;
3173	case CHIP_HAWAII:
3174	case CHIP_CARRIZO:
3175	case CHIP_STONEY:
3176	case CHIP_POLARIS10:
3177	case CHIP_POLARIS11:
3178	case CHIP_POLARIS12:
3179	case CHIP_VEGAM:
3180	case CHIP_TONGA:
3181	case CHIP_FIJI:
3182	case CHIP_VEGA10:
3183	case CHIP_VEGA12:
3184	case CHIP_VEGA20:
3185#if defined(CONFIG_DRM_AMD_DC_DCN)
3186	case CHIP_RAVEN:
3187	case CHIP_NAVI10:
3188	case CHIP_NAVI14:
3189	case CHIP_NAVI12:
3190	case CHIP_RENOIR:
3191	case CHIP_SIENNA_CICHLID:
3192	case CHIP_NAVY_FLOUNDER:
3193	case CHIP_DIMGREY_CAVEFISH:
3194	case CHIP_BEIGE_GOBY:
3195	case CHIP_VANGOGH:
3196	case CHIP_YELLOW_CARP:
3197#endif
3198		return amdgpu_dc != 0;
3199#endif
3200	default:
3201		if (amdgpu_dc > 0)
3202			DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
3203					 "but isn't supported by ASIC, ignoring\n");
3204		return false;
 
3205	}
3206}
3207
3208/**
3209 * amdgpu_device_has_dc_support - check if dc is supported
3210 *
3211 * @adev: amdgpu_device pointer
3212 *
3213 * Returns true for supported, false for not supported
3214 */
3215bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3216{
3217	if (amdgpu_sriov_vf(adev) || 
3218	    adev->enable_virtual_display ||
3219	    (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
3220		return false;
3221
3222	return amdgpu_device_asic_has_dc_support(adev->asic_type);
3223}
3224
3225static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3226{
3227	struct amdgpu_device *adev =
3228		container_of(__work, struct amdgpu_device, xgmi_reset_work);
3229	struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3230
3231	/* It's a bug to not have a hive within this function */
3232	if (WARN_ON(!hive))
3233		return;
3234
3235	/*
3236	 * Use task barrier to synchronize all xgmi reset works across the
3237	 * hive. task_barrier_enter and task_barrier_exit will block
3238	 * until all the threads running the xgmi reset works reach
3239	 * those points. task_barrier_full will do both blocks.
3240	 */
3241	if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3242
3243		task_barrier_enter(&hive->tb);
3244		adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3245
3246		if (adev->asic_reset_res)
3247			goto fail;
3248
3249		task_barrier_exit(&hive->tb);
3250		adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3251
3252		if (adev->asic_reset_res)
3253			goto fail;
3254
3255		if (adev->mmhub.ras_funcs &&
3256		    adev->mmhub.ras_funcs->reset_ras_error_count)
3257			adev->mmhub.ras_funcs->reset_ras_error_count(adev);
3258	} else {
3259
3260		task_barrier_full(&hive->tb);
3261		adev->asic_reset_res =  amdgpu_asic_reset(adev);
3262	}
3263
3264fail:
3265	if (adev->asic_reset_res)
3266		DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3267			 adev->asic_reset_res, adev_to_drm(adev)->unique);
3268	amdgpu_put_xgmi_hive(hive);
3269}
3270
3271static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3272{
3273	char *input = amdgpu_lockup_timeout;
3274	char *timeout_setting = NULL;
3275	int index = 0;
3276	long timeout;
3277	int ret = 0;
3278
3279	/*
3280	 * By default timeout for non compute jobs is 10000
3281	 * and 60000 for compute jobs.
3282	 * In SR-IOV or passthrough mode, timeout for compute
3283	 * jobs are 60000 by default.
3284	 */
3285	adev->gfx_timeout = msecs_to_jiffies(10000);
3286	adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3287	if (amdgpu_sriov_vf(adev))
3288		adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3289					msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3290	else
3291		adev->compute_timeout =  msecs_to_jiffies(60000);
3292
3293	if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3294		while ((timeout_setting = strsep(&input, ",")) &&
3295				strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3296			ret = kstrtol(timeout_setting, 0, &timeout);
3297			if (ret)
3298				return ret;
3299
3300			if (timeout == 0) {
3301				index++;
3302				continue;
3303			} else if (timeout < 0) {
3304				timeout = MAX_SCHEDULE_TIMEOUT;
 
 
3305			} else {
3306				timeout = msecs_to_jiffies(timeout);
3307			}
3308
3309			switch (index++) {
3310			case 0:
3311				adev->gfx_timeout = timeout;
3312				break;
3313			case 1:
3314				adev->compute_timeout = timeout;
3315				break;
3316			case 2:
3317				adev->sdma_timeout = timeout;
3318				break;
3319			case 3:
3320				adev->video_timeout = timeout;
3321				break;
3322			default:
3323				break;
3324			}
3325		}
3326		/*
3327		 * There is only one value specified and
3328		 * it should apply to all non-compute jobs.
3329		 */
3330		if (index == 1) {
3331			adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3332			if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3333				adev->compute_timeout = adev->gfx_timeout;
3334		}
3335	}
3336
3337	return ret;
3338}
3339
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3340static const struct attribute *amdgpu_dev_attributes[] = {
3341	&dev_attr_product_name.attr,
3342	&dev_attr_product_number.attr,
3343	&dev_attr_serial_number.attr,
3344	&dev_attr_pcie_replay_count.attr,
3345	NULL
3346};
3347
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3348/**
3349 * amdgpu_device_init - initialize the driver
3350 *
3351 * @adev: amdgpu_device pointer
3352 * @flags: driver flags
3353 *
3354 * Initializes the driver info and hw (all asics).
3355 * Returns 0 for success or an error on failure.
3356 * Called at driver startup.
3357 */
3358int amdgpu_device_init(struct amdgpu_device *adev,
3359		       uint32_t flags)
3360{
3361	struct drm_device *ddev = adev_to_drm(adev);
3362	struct pci_dev *pdev = adev->pdev;
3363	int r, i;
3364	bool px = false;
3365	u32 max_MBps;
 
3366
3367	adev->shutdown = false;
3368	adev->flags = flags;
3369
3370	if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3371		adev->asic_type = amdgpu_force_asic_type;
3372	else
3373		adev->asic_type = flags & AMD_ASIC_MASK;
3374
3375	adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3376	if (amdgpu_emu_mode == 1)
3377		adev->usec_timeout *= 10;
3378	adev->gmc.gart_size = 512 * 1024 * 1024;
3379	adev->accel_working = false;
3380	adev->num_rings = 0;
 
3381	adev->mman.buffer_funcs = NULL;
3382	adev->mman.buffer_funcs_ring = NULL;
3383	adev->vm_manager.vm_pte_funcs = NULL;
3384	adev->vm_manager.vm_pte_num_scheds = 0;
3385	adev->gmc.gmc_funcs = NULL;
3386	adev->harvest_ip_mask = 0x0;
3387	adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3388	bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3389
3390	adev->smc_rreg = &amdgpu_invalid_rreg;
3391	adev->smc_wreg = &amdgpu_invalid_wreg;
3392	adev->pcie_rreg = &amdgpu_invalid_rreg;
3393	adev->pcie_wreg = &amdgpu_invalid_wreg;
 
 
3394	adev->pciep_rreg = &amdgpu_invalid_rreg;
3395	adev->pciep_wreg = &amdgpu_invalid_wreg;
3396	adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3397	adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
 
 
3398	adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3399	adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3400	adev->didt_rreg = &amdgpu_invalid_rreg;
3401	adev->didt_wreg = &amdgpu_invalid_wreg;
3402	adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3403	adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3404	adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3405	adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3406
3407	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3408		 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3409		 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3410
3411	/* mutex initialization are all done here so we
3412	 * can recall function without having locking issues */
 
3413	mutex_init(&adev->firmware.mutex);
3414	mutex_init(&adev->pm.mutex);
3415	mutex_init(&adev->gfx.gpu_clock_mutex);
3416	mutex_init(&adev->srbm_mutex);
3417	mutex_init(&adev->gfx.pipe_reserve_mutex);
3418	mutex_init(&adev->gfx.gfx_off_mutex);
 
3419	mutex_init(&adev->grbm_idx_mutex);
3420	mutex_init(&adev->mn_lock);
3421	mutex_init(&adev->virt.vf_errors.lock);
 
3422	hash_init(adev->mn_hash);
3423	atomic_set(&adev->in_gpu_reset, 0);
3424	init_rwsem(&adev->reset_sem);
3425	mutex_init(&adev->psp.mutex);
3426	mutex_init(&adev->notifier_lock);
 
 
 
 
 
 
3427
3428	r = amdgpu_device_init_apu_flags(adev);
3429	if (r)
3430		return r;
3431
3432	r = amdgpu_device_check_arguments(adev);
3433	if (r)
3434		return r;
3435
3436	spin_lock_init(&adev->mmio_idx_lock);
3437	spin_lock_init(&adev->smc_idx_lock);
3438	spin_lock_init(&adev->pcie_idx_lock);
3439	spin_lock_init(&adev->uvd_ctx_idx_lock);
3440	spin_lock_init(&adev->didt_idx_lock);
3441	spin_lock_init(&adev->gc_cac_idx_lock);
3442	spin_lock_init(&adev->se_cac_idx_lock);
3443	spin_lock_init(&adev->audio_endpt_idx_lock);
3444	spin_lock_init(&adev->mm_stats.lock);
 
 
 
3445
3446	INIT_LIST_HEAD(&adev->shadow_list);
3447	mutex_init(&adev->shadow_list_lock);
3448
3449	INIT_LIST_HEAD(&adev->reset_list);
3450
3451	INIT_DELAYED_WORK(&adev->delayed_init_work,
3452			  amdgpu_device_delayed_init_work_handler);
3453	INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3454			  amdgpu_device_delay_enable_gfx_off);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3455
3456	INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3457
3458	adev->gfx.gfx_off_req_count = 1;
 
 
3459	adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3460
3461	atomic_set(&adev->throttling_logging_enabled, 1);
3462	/*
3463	 * If throttling continues, logging will be performed every minute
3464	 * to avoid log flooding. "-1" is subtracted since the thermal
3465	 * throttling interrupt comes every second. Thus, the total logging
3466	 * interval is 59 seconds(retelimited printk interval) + 1(waiting
3467	 * for throttling interrupt) = 60 seconds.
3468	 */
3469	ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
 
 
3470	ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
 
3471
3472	/* Registers mapping */
3473	/* TODO: block userspace mapping of io register */
3474	if (adev->asic_type >= CHIP_BONAIRE) {
3475		adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3476		adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3477	} else {
3478		adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3479		adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3480	}
3481
 
 
 
3482	adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3483	if (adev->rmmio == NULL) {
3484		return -ENOMEM;
3485	}
3486	DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3487	DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
3488
3489	/* enable PCIE atomic ops */
3490	r = pci_enable_atomic_ops_to_root(adev->pdev,
3491					  PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3492					  PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3493	if (r) {
3494		adev->have_atomics_support = false;
3495		DRM_INFO("PCIE atomic ops is not supported\n");
3496	} else {
3497		adev->have_atomics_support = true;
3498	}
3499
3500	amdgpu_device_get_pcie_info(adev);
3501
3502	if (amdgpu_mcbp)
3503		DRM_INFO("MCBP is enabled\n");
3504
3505	if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10)
3506		adev->enable_mes = true;
3507
3508	/* detect hw virtualization here */
3509	amdgpu_detect_virtualization(adev);
3510
 
 
3511	r = amdgpu_device_get_job_timeout_settings(adev);
3512	if (r) {
3513		dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3514		return r;
3515	}
3516
 
 
 
 
 
 
 
 
3517	/* early init functions */
3518	r = amdgpu_device_ip_early_init(adev);
3519	if (r)
3520		return r;
3521
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3522	/* doorbell bar mapping and doorbell index init*/
3523	amdgpu_device_doorbell_init(adev);
3524
3525	if (amdgpu_emu_mode == 1) {
3526		/* post the asic on emulation mode */
3527		emu_soc_asic_init(adev);
3528		goto fence_driver_init;
3529	}
3530
3531	amdgpu_reset_init(adev);
3532
3533	/* detect if we are with an SRIOV vbios */
3534	amdgpu_device_detect_sriov_bios(adev);
 
3535
3536	/* check if we need to reset the asic
3537	 *  E.g., driver was not cleanly unloaded previously, etc.
3538	 */
3539	if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3540		if (adev->gmc.xgmi.num_physical_nodes) {
3541			dev_info(adev->dev, "Pending hive reset.\n");
3542			adev->gmc.xgmi.pending_reset = true;
3543			/* Only need to init necessary block for SMU to handle the reset */
3544			for (i = 0; i < adev->num_ip_blocks; i++) {
3545				if (!adev->ip_blocks[i].status.valid)
3546					continue;
3547				if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3548				      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3549				      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3550				      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
3551					DRM_DEBUG("IP %s disabled for hw_init.\n",
3552						adev->ip_blocks[i].version->funcs->name);
3553					adev->ip_blocks[i].status.hw = true;
3554				}
3555			}
3556		} else {
3557			r = amdgpu_asic_reset(adev);
3558			if (r) {
3559				dev_err(adev->dev, "asic reset on init failed\n");
3560				goto failed;
3561			}
 
 
 
 
 
 
 
3562		}
3563	}
3564
3565	pci_enable_pcie_error_reporting(adev->pdev);
3566
3567	/* Post card if necessary */
3568	if (amdgpu_device_need_post(adev)) {
3569		if (!adev->bios) {
3570			dev_err(adev->dev, "no vBIOS found\n");
3571			r = -EINVAL;
3572			goto failed;
3573		}
3574		DRM_INFO("GPU posting now...\n");
3575		r = amdgpu_device_asic_init(adev);
3576		if (r) {
3577			dev_err(adev->dev, "gpu post error!\n");
3578			goto failed;
3579		}
3580	}
3581
3582	if (adev->is_atom_fw) {
3583		/* Initialize clocks */
3584		r = amdgpu_atomfirmware_get_clock_info(adev);
3585		if (r) {
3586			dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3587			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3588			goto failed;
 
 
 
 
 
 
 
 
 
 
 
 
 
3589		}
3590	} else {
3591		/* Initialize clocks */
3592		r = amdgpu_atombios_get_clock_info(adev);
3593		if (r) {
3594			dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3595			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3596			goto failed;
3597		}
3598		/* init i2c buses */
3599		if (!amdgpu_device_has_dc_support(adev))
3600			amdgpu_atombios_i2c_init(adev);
3601	}
3602
3603fence_driver_init:
3604	/* Fence driver */
3605	r = amdgpu_fence_driver_sw_init(adev);
3606	if (r) {
3607		dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
3608		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3609		goto failed;
3610	}
3611
3612	/* init the mode config */
3613	drm_mode_config_init(adev_to_drm(adev));
3614
3615	r = amdgpu_device_ip_init(adev);
3616	if (r) {
3617		/* failed in exclusive mode due to timeout */
3618		if (amdgpu_sriov_vf(adev) &&
3619		    !amdgpu_sriov_runtime(adev) &&
3620		    amdgpu_virt_mmio_blocked(adev) &&
3621		    !amdgpu_virt_wait_reset(adev)) {
3622			dev_err(adev->dev, "VF exclusive mode timeout\n");
3623			/* Don't send request since VF is inactive. */
3624			adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
3625			adev->virt.ops = NULL;
3626			r = -EAGAIN;
3627			goto release_ras_con;
3628		}
3629		dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
3630		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3631		goto release_ras_con;
3632	}
3633
3634	amdgpu_fence_driver_hw_init(adev);
3635
3636	dev_info(adev->dev,
3637		"SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
3638			adev->gfx.config.max_shader_engines,
3639			adev->gfx.config.max_sh_per_se,
3640			adev->gfx.config.max_cu_per_sh,
3641			adev->gfx.cu_info.number);
3642
3643	adev->accel_working = true;
3644
3645	amdgpu_vm_check_compute_bug(adev);
3646
3647	/* Initialize the buffer migration limit. */
3648	if (amdgpu_moverate >= 0)
3649		max_MBps = amdgpu_moverate;
3650	else
3651		max_MBps = 8; /* Allow 8 MB/s. */
3652	/* Get a log2 for easy divisions. */
3653	adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3654
3655	amdgpu_fbdev_init(adev);
3656
3657	r = amdgpu_pm_sysfs_init(adev);
3658	if (r) {
3659		adev->pm_sysfs_en = false;
3660		DRM_ERROR("registering pm debugfs failed (%d).\n", r);
3661	} else
3662		adev->pm_sysfs_en = true;
3663
3664	r = amdgpu_ucode_sysfs_init(adev);
3665	if (r) {
3666		adev->ucode_sysfs_en = false;
3667		DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
3668	} else
3669		adev->ucode_sysfs_en = true;
3670
3671	if ((amdgpu_testing & 1)) {
3672		if (adev->accel_working)
3673			amdgpu_test_moves(adev);
3674		else
3675			DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
3676	}
3677	if (amdgpu_benchmarking) {
3678		if (adev->accel_working)
3679			amdgpu_benchmark(adev, amdgpu_benchmarking);
3680		else
3681			DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
3682	}
3683
3684	/*
3685	 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3686	 * Otherwise the mgpu fan boost feature will be skipped due to the
3687	 * gpu instance is counted less.
3688	 */
3689	amdgpu_register_gpu_instance(adev);
3690
3691	/* enable clockgating, etc. after ib tests, etc. since some blocks require
3692	 * explicit gating rather than handling it automatically.
3693	 */
3694	if (!adev->gmc.xgmi.pending_reset) {
3695		r = amdgpu_device_ip_late_init(adev);
3696		if (r) {
3697			dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
3698			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
3699			goto release_ras_con;
3700		}
3701		/* must succeed. */
3702		amdgpu_ras_resume(adev);
3703		queue_delayed_work(system_wq, &adev->delayed_init_work,
3704				   msecs_to_jiffies(AMDGPU_RESUME_MS));
3705	}
3706
3707	if (amdgpu_sriov_vf(adev))
 
3708		flush_delayed_work(&adev->delayed_init_work);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3709
3710	r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
3711	if (r)
3712		dev_err(adev->dev, "Could not create amdgpu device attr\n");
3713
 
 
 
 
 
 
 
 
 
3714	if (IS_ENABLED(CONFIG_PERF_EVENTS))
3715		r = amdgpu_pmu_init(adev);
3716	if (r)
3717		dev_err(adev->dev, "amdgpu_pmu_init failed\n");
3718
3719	/* Have stored pci confspace at hand for restore in sudden PCI error */
3720	if (amdgpu_device_cache_pci_state(adev->pdev))
3721		pci_restore_state(pdev);
3722
3723	/* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
3724	/* this will fail for cards that aren't VGA class devices, just
3725	 * ignore it */
 
3726	if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3727		vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
3728
3729	if (amdgpu_device_supports_px(ddev)) {
3730		px = true;
 
 
3731		vga_switcheroo_register_client(adev->pdev,
3732					       &amdgpu_switcheroo_ops, px);
 
 
3733		vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
3734	}
3735
3736	if (adev->gmc.xgmi.pending_reset)
3737		queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
3738				   msecs_to_jiffies(AMDGPU_RESUME_MS));
 
3739
3740	return 0;
3741
3742release_ras_con:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3743	amdgpu_release_ras_context(adev);
3744
3745failed:
3746	amdgpu_vf_error_trans_all(adev);
3747
3748	return r;
3749}
3750
3751static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
3752{
 
3753	/* Clear all CPU mappings pointing to this device */
3754	unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
3755
3756	/* Unmap all mapped bars - Doorbell, registers and VRAM */
3757	amdgpu_device_doorbell_fini(adev);
3758
3759	iounmap(adev->rmmio);
3760	adev->rmmio = NULL;
3761	if (adev->mman.aper_base_kaddr)
3762		iounmap(adev->mman.aper_base_kaddr);
3763	adev->mman.aper_base_kaddr = NULL;
3764
3765	/* Memory manager related */
3766	if (!adev->gmc.xgmi.connected_to_cpu) {
3767		arch_phys_wc_del(adev->gmc.vram_mtrr);
3768		arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
3769	}
3770}
3771
3772/**
3773 * amdgpu_device_fini - tear down the driver
3774 *
3775 * @adev: amdgpu_device pointer
3776 *
3777 * Tear down the driver info (all asics).
3778 * Called at driver shutdown.
3779 */
3780void amdgpu_device_fini_hw(struct amdgpu_device *adev)
3781{
3782	dev_info(adev->dev, "amdgpu: finishing device.\n");
3783	flush_delayed_work(&adev->delayed_init_work);
3784	ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
 
 
3785	adev->shutdown = true;
3786
3787	/* make sure IB test finished before entering exclusive mode
3788	 * to avoid preemption on IB test
3789	 * */
3790	if (amdgpu_sriov_vf(adev)) {
3791		amdgpu_virt_request_full_gpu(adev, false);
3792		amdgpu_virt_fini_data_exchange(adev);
3793	}
3794
3795	/* disable all interrupts */
3796	amdgpu_irq_disable_all(adev);
3797	if (adev->mode_info.mode_config_initialized){
3798		if (!amdgpu_device_has_dc_support(adev))
3799			drm_helper_force_disable_all(adev_to_drm(adev));
3800		else
3801			drm_atomic_helper_shutdown(adev_to_drm(adev));
3802	}
3803	amdgpu_fence_driver_hw_fini(adev);
3804
3805	if (adev->pm_sysfs_en)
3806		amdgpu_pm_sysfs_fini(adev);
3807	if (adev->ucode_sysfs_en)
3808		amdgpu_ucode_sysfs_fini(adev);
3809	sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
 
3810
3811	amdgpu_fbdev_fini(adev);
 
 
 
 
 
 
 
 
3812
3813	amdgpu_irq_fini_hw(adev);
3814
3815	amdgpu_device_ip_fini_early(adev);
 
3816
3817	amdgpu_gart_dummy_page_fini(adev);
3818
3819	amdgpu_device_unmap_mmio(adev);
 
 
3820}
3821
3822void amdgpu_device_fini_sw(struct amdgpu_device *adev)
3823{
 
 
 
3824	amdgpu_device_ip_fini(adev);
3825	amdgpu_fence_driver_sw_fini(adev);
3826	release_firmware(adev->firmware.gpu_info_fw);
3827	adev->firmware.gpu_info_fw = NULL;
3828	adev->accel_working = false;
 
3829
3830	amdgpu_reset_fini(adev);
3831
3832	/* free i2c buses */
3833	if (!amdgpu_device_has_dc_support(adev))
3834		amdgpu_i2c_fini(adev);
3835
3836	if (amdgpu_emu_mode != 1)
3837		amdgpu_atombios_fini(adev);
3838
3839	kfree(adev->bios);
3840	adev->bios = NULL;
3841	if (amdgpu_device_supports_px(adev_to_drm(adev))) {
 
 
 
 
 
 
 
3842		vga_switcheroo_unregister_client(adev->pdev);
 
 
3843		vga_switcheroo_fini_domain_pm_ops(adev->dev);
 
 
 
 
 
 
 
 
 
 
3844	}
3845	if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3846		vga_client_register(adev->pdev, NULL, NULL, NULL);
3847
3848	if (IS_ENABLED(CONFIG_PERF_EVENTS))
3849		amdgpu_pmu_fini(adev);
3850	if (adev->mman.discovery_bin)
3851		amdgpu_discovery_fini(adev);
3852
 
 
 
3853	kfree(adev->pci_state);
3854
3855}
3856
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3857
3858/*
3859 * Suspend & resume.
3860 */
3861/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3862 * amdgpu_device_suspend - initiate device suspend
3863 *
3864 * @dev: drm dev pointer
3865 * @fbcon : notify the fbdev of suspend
3866 *
3867 * Puts the hw in the suspend state (all asics).
3868 * Returns 0 for success or an error on failure.
3869 * Called at driver suspend.
3870 */
3871int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
3872{
3873	struct amdgpu_device *adev = drm_to_adev(dev);
 
3874
3875	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3876		return 0;
3877
3878	adev->in_suspend = true;
3879
 
 
 
 
 
 
 
3880	if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
3881		DRM_WARN("smart shift update failed\n");
3882
3883	drm_kms_helper_poll_disable(dev);
3884
3885	if (fbcon)
3886		amdgpu_fbdev_set_suspend(adev, 1);
3887
3888	cancel_delayed_work_sync(&adev->delayed_init_work);
3889
3890	amdgpu_ras_suspend(adev);
3891
3892	amdgpu_device_ip_suspend_phase1(adev);
3893
3894	if (!adev->in_s0ix)
3895		amdgpu_amdkfd_suspend(adev, adev->in_runpm);
3896
3897	/* evict vram memory */
3898	amdgpu_bo_evict_vram(adev);
 
 
 
3899
3900	amdgpu_fence_driver_hw_fini(adev);
3901
3902	amdgpu_device_ip_suspend_phase2(adev);
3903	/* evict remaining vram memory
3904	 * This second call to evict vram is to evict the gart page table
3905	 * using the CPU.
3906	 */
3907	amdgpu_bo_evict_vram(adev);
 
 
3908
3909	return 0;
3910}
3911
3912/**
3913 * amdgpu_device_resume - initiate device resume
3914 *
3915 * @dev: drm dev pointer
3916 * @fbcon : notify the fbdev of resume
3917 *
3918 * Bring the hw back to operating state (all asics).
3919 * Returns 0 for success or an error on failure.
3920 * Called at driver resume.
3921 */
3922int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
3923{
3924	struct amdgpu_device *adev = drm_to_adev(dev);
3925	int r = 0;
3926
 
 
 
 
 
 
3927	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3928		return 0;
3929
3930	if (adev->in_s0ix)
3931		amdgpu_gfx_state_change_set(adev, sGpuChangeState_D0Entry);
3932
3933	/* post card */
3934	if (amdgpu_device_need_post(adev)) {
3935		r = amdgpu_device_asic_init(adev);
3936		if (r)
3937			dev_err(adev->dev, "amdgpu asic init failed\n");
3938	}
3939
3940	r = amdgpu_device_ip_resume(adev);
 
3941	if (r) {
3942		dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
3943		return r;
 
 
 
 
 
 
3944	}
3945	amdgpu_fence_driver_hw_init(adev);
3946
3947	r = amdgpu_device_ip_late_init(adev);
3948	if (r)
3949		return r;
3950
3951	queue_delayed_work(system_wq, &adev->delayed_init_work,
3952			   msecs_to_jiffies(AMDGPU_RESUME_MS));
 
 
 
 
 
3953
3954	if (!adev->in_s0ix) {
3955		r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
3956		if (r)
3957			return r;
3958	}
3959
3960	/* Make sure IB tests flushed */
3961	flush_delayed_work(&adev->delayed_init_work);
3962
3963	if (fbcon)
3964		amdgpu_fbdev_set_suspend(adev, 0);
3965
3966	drm_kms_helper_poll_enable(dev);
3967
3968	amdgpu_ras_resume(adev);
3969
3970	/*
3971	 * Most of the connector probing functions try to acquire runtime pm
3972	 * refs to ensure that the GPU is powered on when connector polling is
3973	 * performed. Since we're calling this from a runtime PM callback,
3974	 * trying to acquire rpm refs will cause us to deadlock.
3975	 *
3976	 * Since we're guaranteed to be holding the rpm lock, it's safe to
3977	 * temporarily disable the rpm helpers so this doesn't deadlock us.
3978	 */
 
3979#ifdef CONFIG_PM
3980	dev->dev->power.disable_depth++;
3981#endif
3982	if (!amdgpu_device_has_dc_support(adev))
3983		drm_helper_hpd_irq_event(dev);
3984	else
3985		drm_kms_helper_hotplug_event(dev);
3986#ifdef CONFIG_PM
3987	dev->dev->power.disable_depth--;
3988#endif
 
3989	adev->in_suspend = false;
3990
 
 
 
3991	if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
3992		DRM_WARN("smart shift update failed\n");
3993
3994	return 0;
3995}
3996
3997/**
3998 * amdgpu_device_ip_check_soft_reset - did soft reset succeed
3999 *
4000 * @adev: amdgpu_device pointer
4001 *
4002 * The list of all the hardware IPs that make up the asic is walked and
4003 * the check_soft_reset callbacks are run.  check_soft_reset determines
4004 * if the asic is still hung or not.
4005 * Returns true if any of the IPs are still in a hung state, false if not.
4006 */
4007static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
4008{
4009	int i;
4010	bool asic_hang = false;
4011
4012	if (amdgpu_sriov_vf(adev))
4013		return true;
4014
4015	if (amdgpu_asic_need_full_reset(adev))
4016		return true;
4017
4018	for (i = 0; i < adev->num_ip_blocks; i++) {
4019		if (!adev->ip_blocks[i].status.valid)
4020			continue;
4021		if (adev->ip_blocks[i].version->funcs->check_soft_reset)
4022			adev->ip_blocks[i].status.hang =
4023				adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
 
4024		if (adev->ip_blocks[i].status.hang) {
4025			dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
4026			asic_hang = true;
4027		}
4028	}
4029	return asic_hang;
4030}
4031
4032/**
4033 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
4034 *
4035 * @adev: amdgpu_device pointer
4036 *
4037 * The list of all the hardware IPs that make up the asic is walked and the
4038 * pre_soft_reset callbacks are run if the block is hung.  pre_soft_reset
4039 * handles any IP specific hardware or software state changes that are
4040 * necessary for a soft reset to succeed.
4041 * Returns 0 on success, negative error code on failure.
4042 */
4043static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
4044{
4045	int i, r = 0;
4046
4047	for (i = 0; i < adev->num_ip_blocks; i++) {
4048		if (!adev->ip_blocks[i].status.valid)
4049			continue;
4050		if (adev->ip_blocks[i].status.hang &&
4051		    adev->ip_blocks[i].version->funcs->pre_soft_reset) {
4052			r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
4053			if (r)
4054				return r;
4055		}
4056	}
4057
4058	return 0;
4059}
4060
4061/**
4062 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
4063 *
4064 * @adev: amdgpu_device pointer
4065 *
4066 * Some hardware IPs cannot be soft reset.  If they are hung, a full gpu
4067 * reset is necessary to recover.
4068 * Returns true if a full asic reset is required, false if not.
4069 */
4070static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
4071{
4072	int i;
4073
4074	if (amdgpu_asic_need_full_reset(adev))
4075		return true;
4076
4077	for (i = 0; i < adev->num_ip_blocks; i++) {
4078		if (!adev->ip_blocks[i].status.valid)
4079			continue;
4080		if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
4081		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
4082		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
4083		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
4084		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
4085			if (adev->ip_blocks[i].status.hang) {
4086				dev_info(adev->dev, "Some block need full reset!\n");
4087				return true;
4088			}
4089		}
4090	}
4091	return false;
4092}
4093
4094/**
4095 * amdgpu_device_ip_soft_reset - do a soft reset
4096 *
4097 * @adev: amdgpu_device pointer
4098 *
4099 * The list of all the hardware IPs that make up the asic is walked and the
4100 * soft_reset callbacks are run if the block is hung.  soft_reset handles any
4101 * IP specific hardware or software state changes that are necessary to soft
4102 * reset the IP.
4103 * Returns 0 on success, negative error code on failure.
4104 */
4105static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
4106{
4107	int i, r = 0;
4108
4109	for (i = 0; i < adev->num_ip_blocks; i++) {
4110		if (!adev->ip_blocks[i].status.valid)
4111			continue;
4112		if (adev->ip_blocks[i].status.hang &&
4113		    adev->ip_blocks[i].version->funcs->soft_reset) {
4114			r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
4115			if (r)
4116				return r;
4117		}
4118	}
4119
4120	return 0;
4121}
4122
4123/**
4124 * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4125 *
4126 * @adev: amdgpu_device pointer
4127 *
4128 * The list of all the hardware IPs that make up the asic is walked and the
4129 * post_soft_reset callbacks are run if the asic was hung.  post_soft_reset
4130 * handles any IP specific hardware or software state changes that are
4131 * necessary after the IP has been soft reset.
4132 * Returns 0 on success, negative error code on failure.
4133 */
4134static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4135{
4136	int i, r = 0;
4137
4138	for (i = 0; i < adev->num_ip_blocks; i++) {
4139		if (!adev->ip_blocks[i].status.valid)
4140			continue;
4141		if (adev->ip_blocks[i].status.hang &&
4142		    adev->ip_blocks[i].version->funcs->post_soft_reset)
4143			r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
4144		if (r)
4145			return r;
4146	}
4147
4148	return 0;
4149}
4150
4151/**
4152 * amdgpu_device_recover_vram - Recover some VRAM contents
4153 *
4154 * @adev: amdgpu_device pointer
4155 *
4156 * Restores the contents of VRAM buffers from the shadows in GTT.  Used to
4157 * restore things like GPUVM page tables after a GPU reset where
4158 * the contents of VRAM might be lost.
4159 *
4160 * Returns:
4161 * 0 on success, negative error code on failure.
4162 */
4163static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
4164{
4165	struct dma_fence *fence = NULL, *next = NULL;
4166	struct amdgpu_bo *shadow;
4167	struct amdgpu_bo_vm *vmbo;
4168	long r = 1, tmo;
4169
4170	if (amdgpu_sriov_runtime(adev))
4171		tmo = msecs_to_jiffies(8000);
4172	else
4173		tmo = msecs_to_jiffies(100);
4174
4175	dev_info(adev->dev, "recover vram bo from shadow start\n");
4176	mutex_lock(&adev->shadow_list_lock);
4177	list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
4178		shadow = &vmbo->bo;
4179		/* No need to recover an evicted BO */
4180		if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
4181		    shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
4182		    shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
4183			continue;
4184
4185		r = amdgpu_bo_restore_shadow(shadow, &next);
4186		if (r)
4187			break;
4188
4189		if (fence) {
4190			tmo = dma_fence_wait_timeout(fence, false, tmo);
4191			dma_fence_put(fence);
4192			fence = next;
4193			if (tmo == 0) {
4194				r = -ETIMEDOUT;
4195				break;
4196			} else if (tmo < 0) {
4197				r = tmo;
4198				break;
4199			}
4200		} else {
4201			fence = next;
4202		}
4203	}
4204	mutex_unlock(&adev->shadow_list_lock);
4205
4206	if (fence)
4207		tmo = dma_fence_wait_timeout(fence, false, tmo);
4208	dma_fence_put(fence);
4209
4210	if (r < 0 || tmo <= 0) {
4211		dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4212		return -EIO;
4213	}
4214
4215	dev_info(adev->dev, "recover vram bo from shadow done\n");
4216	return 0;
4217}
4218
4219
4220/**
4221 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4222 *
4223 * @adev: amdgpu_device pointer
4224 * @from_hypervisor: request from hypervisor
4225 *
4226 * do VF FLR and reinitialize Asic
4227 * return 0 means succeeded otherwise failed
4228 */
4229static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4230				     bool from_hypervisor)
4231{
4232	int r;
 
4233
4234	if (from_hypervisor)
 
 
 
 
4235		r = amdgpu_virt_request_full_gpu(adev, true);
4236	else
4237		r = amdgpu_virt_reset_gpu(adev);
 
4238	if (r)
4239		return r;
4240
4241	amdgpu_amdkfd_pre_reset(adev);
 
 
 
 
4242
4243	/* Resume IP prior to SMC */
4244	r = amdgpu_device_ip_reinit_early_sriov(adev);
4245	if (r)
4246		goto error;
4247
4248	amdgpu_virt_init_data_exchange(adev);
4249	/* we need recover gart prior to run SMC/CP/SDMA resume */
4250	amdgpu_gtt_mgr_recover(ttm_manager_type(&adev->mman.bdev, TTM_PL_TT));
4251
4252	r = amdgpu_device_fw_loading(adev);
4253	if (r)
4254		return r;
4255
4256	/* now we are okay to resume SMC/CP/SDMA */
4257	r = amdgpu_device_ip_reinit_late_sriov(adev);
4258	if (r)
4259		goto error;
 
 
 
 
 
 
 
 
 
4260
4261	amdgpu_irq_gpu_reset_resume_helper(adev);
4262	r = amdgpu_ib_ring_tests(adev);
4263	amdgpu_amdkfd_post_reset(adev);
 
4264
4265error:
4266	if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
4267		amdgpu_inc_vram_lost(adev);
4268		r = amdgpu_device_recover_vram(adev);
4269	}
 
 
 
4270	amdgpu_virt_release_full_gpu(adev, true);
4271
4272	return r;
 
 
 
 
 
 
 
 
 
4273}
4274
4275/**
4276 * amdgpu_device_has_job_running - check if there is any job in mirror list
4277 *
4278 * @adev: amdgpu_device pointer
4279 *
4280 * check if there is any job in mirror list
4281 */
4282bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
4283{
4284	int i;
4285	struct drm_sched_job *job;
4286
4287	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4288		struct amdgpu_ring *ring = adev->rings[i];
4289
4290		if (!ring || !ring->sched.thread)
4291			continue;
4292
4293		spin_lock(&ring->sched.job_list_lock);
4294		job = list_first_entry_or_null(&ring->sched.pending_list,
4295					       struct drm_sched_job, list);
4296		spin_unlock(&ring->sched.job_list_lock);
4297		if (job)
4298			return true;
4299	}
4300	return false;
4301}
4302
4303/**
4304 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
4305 *
4306 * @adev: amdgpu_device pointer
4307 *
4308 * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
4309 * a hung GPU.
4310 */
4311bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4312{
4313	if (!amdgpu_device_ip_check_soft_reset(adev)) {
4314		dev_info(adev->dev, "Timeout, but no hardware hang detected.\n");
4315		return false;
4316	}
4317
4318	if (amdgpu_gpu_recovery == 0)
4319		goto disabled;
4320
 
 
 
 
4321	if (amdgpu_sriov_vf(adev))
4322		return true;
4323
4324	if (amdgpu_gpu_recovery == -1) {
4325		switch (adev->asic_type) {
4326		case CHIP_BONAIRE:
4327		case CHIP_HAWAII:
4328		case CHIP_TOPAZ:
4329		case CHIP_TONGA:
4330		case CHIP_FIJI:
4331		case CHIP_POLARIS10:
4332		case CHIP_POLARIS11:
4333		case CHIP_POLARIS12:
4334		case CHIP_VEGAM:
4335		case CHIP_VEGA20:
4336		case CHIP_VEGA10:
4337		case CHIP_VEGA12:
4338		case CHIP_RAVEN:
4339		case CHIP_ARCTURUS:
4340		case CHIP_RENOIR:
4341		case CHIP_NAVI10:
4342		case CHIP_NAVI14:
4343		case CHIP_NAVI12:
4344		case CHIP_SIENNA_CICHLID:
4345		case CHIP_NAVY_FLOUNDER:
4346		case CHIP_DIMGREY_CAVEFISH:
4347		case CHIP_BEIGE_GOBY:
4348		case CHIP_VANGOGH:
4349		case CHIP_ALDEBARAN:
4350			break;
4351		default:
4352			goto disabled;
4353		}
4354	}
4355
4356	return true;
4357
4358disabled:
4359		dev_info(adev->dev, "GPU recovery disabled.\n");
4360		return false;
4361}
4362
4363int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
4364{
4365        u32 i;
4366        int ret = 0;
 
 
 
 
 
 
 
 
 
4367
4368        amdgpu_atombios_scratch_regs_engine_hung(adev, true);
 
4369
4370        dev_info(adev->dev, "GPU mode1 reset\n");
 
 
 
 
 
 
4371
4372        /* disable BM */
4373        pci_clear_master(adev->pdev);
4374
4375        amdgpu_device_cache_pci_state(adev->pdev);
 
 
 
4376
4377        if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
4378                dev_info(adev->dev, "GPU smu mode1 reset\n");
4379                ret = amdgpu_dpm_mode1_reset(adev);
4380        } else {
4381                dev_info(adev->dev, "GPU psp mode1 reset\n");
4382                ret = psp_gpu_reset(adev);
4383        }
4384
4385        if (ret)
4386                dev_err(adev->dev, "GPU mode1 reset failed\n");
 
 
4387
4388        amdgpu_device_load_pci_state(adev->pdev);
 
 
 
4389
4390        /* wait for asic to come out of reset */
4391        for (i = 0; i < adev->usec_timeout; i++) {
4392                u32 memsize = adev->nbio.funcs->get_memsize(adev);
4393
4394                if (memsize != 0xffffffff)
4395                        break;
4396                udelay(1);
4397        }
4398
4399        amdgpu_atombios_scratch_regs_engine_hung(adev, false);
4400        return ret;
 
4401}
4402
4403int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
4404				 struct amdgpu_reset_context *reset_context)
4405{
4406	int i, r = 0;
4407	struct amdgpu_job *job = NULL;
 
4408	bool need_full_reset =
4409		test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4410
4411	if (reset_context->reset_req_dev == adev)
4412		job = reset_context->job;
4413
4414	/* no need to dump if device is not in good state during probe period */
4415	if (!adev->gmc.xgmi.pending_reset)
4416		amdgpu_debugfs_wait_dump(adev);
4417
4418	if (amdgpu_sriov_vf(adev)) {
4419		/* stop the data exchange thread */
4420		amdgpu_virt_fini_data_exchange(adev);
4421	}
4422
4423	/* block all schedulers and reset given job's ring */
4424	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4425		struct amdgpu_ring *ring = adev->rings[i];
4426
4427		if (!ring || !ring->sched.thread)
4428			continue;
4429
 
 
 
 
 
4430		/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
4431		amdgpu_fence_driver_force_completion(ring);
4432	}
4433
4434	if(job)
 
 
4435		drm_sched_increase_karma(&job->base);
4436
4437	r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
4438	/* If reset handler not implemented, continue; otherwise return */
4439	if (r == -ENOSYS)
4440		r = 0;
4441	else
4442		return r;
4443
4444	/* Don't suspend on bare metal if we are not going to HW reset the ASIC */
4445	if (!amdgpu_sriov_vf(adev)) {
4446
4447		if (!need_full_reset)
4448			need_full_reset = amdgpu_device_ip_need_full_reset(adev);
4449
4450		if (!need_full_reset) {
 
4451			amdgpu_device_ip_pre_soft_reset(adev);
4452			r = amdgpu_device_ip_soft_reset(adev);
4453			amdgpu_device_ip_post_soft_reset(adev);
4454			if (r || amdgpu_device_ip_check_soft_reset(adev)) {
4455				dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
4456				need_full_reset = true;
4457			}
4458		}
4459
 
 
 
 
 
 
 
 
 
 
4460		if (need_full_reset)
4461			r = amdgpu_device_ip_suspend(adev);
4462		if (need_full_reset)
4463			set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4464		else
4465			clear_bit(AMDGPU_NEED_FULL_RESET,
4466				  &reset_context->flags);
4467	}
4468
4469	return r;
4470}
4471
4472int amdgpu_do_asic_reset(struct list_head *device_list_handle,
4473			 struct amdgpu_reset_context *reset_context)
4474{
4475	struct amdgpu_device *tmp_adev = NULL;
4476	bool need_full_reset, skip_hw_reset, vram_lost = false;
4477	int r = 0;
 
 
 
4478
4479	/* Try reset handler method first */
4480	tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
4481				    reset_list);
4482	r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
4483	/* If reset handler not implemented, continue; otherwise return */
4484	if (r == -ENOSYS)
4485		r = 0;
4486	else
4487		return r;
4488
4489	/* Reset handler not implemented, use the default method */
4490	need_full_reset =
4491		test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4492	skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
4493
4494	/*
4495	 * ASIC reset has to be done on all XGMI hive nodes ASAP
4496	 * to allow proper links negotiation in FW (within 1 sec)
4497	 */
4498	if (!skip_hw_reset && need_full_reset) {
4499		list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4500			/* For XGMI run all resets in parallel to speed up the process */
4501			if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4502				tmp_adev->gmc.xgmi.pending_reset = false;
4503				if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
4504					r = -EALREADY;
4505			} else
4506				r = amdgpu_asic_reset(tmp_adev);
4507
4508			if (r) {
4509				dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
4510					 r, adev_to_drm(tmp_adev)->unique);
4511				break;
4512			}
4513		}
4514
4515		/* For XGMI wait for all resets to complete before proceed */
4516		if (!r) {
4517			list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4518				if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4519					flush_work(&tmp_adev->xgmi_reset_work);
4520					r = tmp_adev->asic_reset_res;
4521					if (r)
4522						break;
4523				}
4524			}
4525		}
4526	}
4527
4528	if (!r && amdgpu_ras_intr_triggered()) {
4529		list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4530			if (tmp_adev->mmhub.ras_funcs &&
4531			    tmp_adev->mmhub.ras_funcs->reset_ras_error_count)
4532				tmp_adev->mmhub.ras_funcs->reset_ras_error_count(tmp_adev);
4533		}
4534
4535		amdgpu_ras_intr_cleared();
4536	}
4537
 
4538	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4539		if (need_full_reset) {
 
4540			/* post card */
 
4541			r = amdgpu_device_asic_init(tmp_adev);
4542			if (r) {
4543				dev_warn(tmp_adev->dev, "asic atom init failed!");
4544			} else {
4545				dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
4546				r = amdgpu_amdkfd_resume_iommu(tmp_adev);
4547				if (r)
4548					goto out;
4549
4550				r = amdgpu_device_ip_resume_phase1(tmp_adev);
4551				if (r)
4552					goto out;
4553
4554				vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
 
 
 
 
4555				if (vram_lost) {
4556					DRM_INFO("VRAM is lost due to GPU reset!\n");
4557					amdgpu_inc_vram_lost(tmp_adev);
4558				}
4559
4560				r = amdgpu_gtt_mgr_recover(ttm_manager_type(&tmp_adev->mman.bdev, TTM_PL_TT));
 
 
 
 
 
4561				if (r)
4562					goto out;
4563
4564				r = amdgpu_device_fw_loading(tmp_adev);
4565				if (r)
4566					return r;
 
 
 
4567
4568				r = amdgpu_device_ip_resume_phase2(tmp_adev);
4569				if (r)
4570					goto out;
4571
4572				if (vram_lost)
4573					amdgpu_device_fill_reset_magic(tmp_adev);
4574
4575				/*
4576				 * Add this ASIC as tracked as reset was already
4577				 * complete successfully.
4578				 */
4579				amdgpu_register_gpu_instance(tmp_adev);
4580
4581				if (!reset_context->hive &&
4582				    tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4583					amdgpu_xgmi_add_device(tmp_adev);
4584
4585				r = amdgpu_device_ip_late_init(tmp_adev);
4586				if (r)
4587					goto out;
4588
4589				amdgpu_fbdev_set_suspend(tmp_adev, 0);
4590
4591				/*
4592				 * The GPU enters bad state once faulty pages
4593				 * by ECC has reached the threshold, and ras
4594				 * recovery is scheduled next. So add one check
4595				 * here to break recovery if it indeed exceeds
4596				 * bad page threshold, and remind user to
4597				 * retire this GPU or setting one bigger
4598				 * bad_page_threshold value to fix this once
4599				 * probing driver again.
4600				 */
4601				if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
4602					/* must succeed. */
4603					amdgpu_ras_resume(tmp_adev);
4604				} else {
4605					r = -EINVAL;
4606					goto out;
4607				}
4608
4609				/* Update PSP FW topology after reset */
4610				if (reset_context->hive &&
4611				    tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4612					r = amdgpu_xgmi_update_topology(
4613						reset_context->hive, tmp_adev);
4614			}
4615		}
4616
4617out:
4618		if (!r) {
 
 
 
4619			amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
4620			r = amdgpu_ib_ring_tests(tmp_adev);
4621			if (r) {
4622				dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
4623				need_full_reset = true;
4624				r = -EAGAIN;
4625				goto end;
4626			}
4627		}
4628
4629		if (!r)
4630			r = amdgpu_device_recover_vram(tmp_adev);
4631		else
4632			tmp_adev->asic_reset_res = r;
4633	}
4634
4635end:
4636	if (need_full_reset)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4637		set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4638	else
4639		clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
 
 
4640	return r;
4641}
4642
4643static bool amdgpu_device_lock_adev(struct amdgpu_device *adev,
4644				struct amdgpu_hive_info *hive)
4645{
4646	if (atomic_cmpxchg(&adev->in_gpu_reset, 0, 1) != 0)
4647		return false;
4648
4649	if (hive) {
4650		down_write_nest_lock(&adev->reset_sem, &hive->hive_lock);
4651	} else {
4652		down_write(&adev->reset_sem);
4653	}
4654
4655	switch (amdgpu_asic_reset_method(adev)) {
4656	case AMD_RESET_METHOD_MODE1:
4657		adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
4658		break;
4659	case AMD_RESET_METHOD_MODE2:
4660		adev->mp1_state = PP_MP1_STATE_RESET;
4661		break;
4662	default:
4663		adev->mp1_state = PP_MP1_STATE_NONE;
4664		break;
4665	}
4666
4667	return true;
4668}
4669
4670static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
4671{
4672	amdgpu_vf_error_trans_all(adev);
4673	adev->mp1_state = PP_MP1_STATE_NONE;
4674	atomic_set(&adev->in_gpu_reset, 0);
4675	up_write(&adev->reset_sem);
4676}
4677
4678/*
4679 * to lockup a list of amdgpu devices in a hive safely, if not a hive
4680 * with multiple nodes, it will be similar as amdgpu_device_lock_adev.
4681 *
4682 * unlock won't require roll back.
4683 */
4684static int amdgpu_device_lock_hive_adev(struct amdgpu_device *adev, struct amdgpu_hive_info *hive)
4685{
4686	struct amdgpu_device *tmp_adev = NULL;
4687
4688	if (adev->gmc.xgmi.num_physical_nodes > 1) {
4689		if (!hive) {
4690			dev_err(adev->dev, "Hive is NULL while device has multiple xgmi nodes");
4691			return -ENODEV;
4692		}
4693		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
4694			if (!amdgpu_device_lock_adev(tmp_adev, hive))
4695				goto roll_back;
4696		}
4697	} else if (!amdgpu_device_lock_adev(adev, hive))
4698		return -EAGAIN;
4699
4700	return 0;
4701roll_back:
4702	if (!list_is_first(&tmp_adev->gmc.xgmi.head, &hive->device_list)) {
4703		/*
4704		 * if the lockup iteration break in the middle of a hive,
4705		 * it may means there may has a race issue,
4706		 * or a hive device locked up independently.
4707		 * we may be in trouble and may not, so will try to roll back
4708		 * the lock and give out a warnning.
4709		 */
4710		dev_warn(tmp_adev->dev, "Hive lock iteration broke in the middle. Rolling back to unlock");
4711		list_for_each_entry_continue_reverse(tmp_adev, &hive->device_list, gmc.xgmi.head) {
4712			amdgpu_device_unlock_adev(tmp_adev);
4713		}
4714	}
4715	return -EAGAIN;
4716}
4717
4718static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
4719{
4720	struct pci_dev *p = NULL;
4721
4722	p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4723			adev->pdev->bus->number, 1);
4724	if (p) {
4725		pm_runtime_enable(&(p->dev));
4726		pm_runtime_resume(&(p->dev));
4727	}
 
 
4728}
4729
4730static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
4731{
4732	enum amd_reset_method reset_method;
4733	struct pci_dev *p = NULL;
4734	u64 expires;
4735
4736	/*
4737	 * For now, only BACO and mode1 reset are confirmed
4738	 * to suffer the audio issue without proper suspended.
4739	 */
4740	reset_method = amdgpu_asic_reset_method(adev);
4741	if ((reset_method != AMD_RESET_METHOD_BACO) &&
4742	     (reset_method != AMD_RESET_METHOD_MODE1))
4743		return -EINVAL;
4744
4745	p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4746			adev->pdev->bus->number, 1);
4747	if (!p)
4748		return -ENODEV;
4749
4750	expires = pm_runtime_autosuspend_expiration(&(p->dev));
4751	if (!expires)
4752		/*
4753		 * If we cannot get the audio device autosuspend delay,
4754		 * a fixed 4S interval will be used. Considering 3S is
4755		 * the audio controller default autosuspend delay setting.
4756		 * 4S used here is guaranteed to cover that.
4757		 */
4758		expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
4759
4760	while (!pm_runtime_status_suspended(&(p->dev))) {
4761		if (!pm_runtime_suspend(&(p->dev)))
4762			break;
4763
4764		if (expires < ktime_get_mono_fast_ns()) {
4765			dev_warn(adev->dev, "failed to suspend display audio\n");
 
4766			/* TODO: abort the succeeding gpu reset? */
4767			return -ETIMEDOUT;
4768		}
4769	}
4770
4771	pm_runtime_disable(&(p->dev));
4772
 
4773	return 0;
4774}
4775
4776static void amdgpu_device_recheck_guilty_jobs(
4777	struct amdgpu_device *adev, struct list_head *device_list_handle,
4778	struct amdgpu_reset_context *reset_context)
4779{
4780	int i, r = 0;
4781
4782	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4783		struct amdgpu_ring *ring = adev->rings[i];
4784		int ret = 0;
4785		struct drm_sched_job *s_job;
4786
4787		if (!ring || !ring->sched.thread)
4788			continue;
4789
4790		s_job = list_first_entry_or_null(&ring->sched.pending_list,
4791				struct drm_sched_job, list);
4792		if (s_job == NULL)
4793			continue;
4794
4795		/* clear job's guilty and depend the folowing step to decide the real one */
4796		drm_sched_reset_karma(s_job);
4797		drm_sched_resubmit_jobs_ext(&ring->sched, 1);
4798
4799		ret = dma_fence_wait_timeout(s_job->s_fence->parent, false, ring->sched.timeout);
4800		if (ret == 0) { /* timeout */
4801			DRM_ERROR("Found the real bad job! ring:%s, job_id:%llx\n",
4802						ring->sched.name, s_job->id);
4803
4804			/* set guilty */
4805			drm_sched_increase_karma(s_job);
4806retry:
4807			/* do hw reset */
4808			if (amdgpu_sriov_vf(adev)) {
4809				amdgpu_virt_fini_data_exchange(adev);
4810				r = amdgpu_device_reset_sriov(adev, false);
4811				if (r)
4812					adev->asic_reset_res = r;
4813			} else {
4814				clear_bit(AMDGPU_SKIP_HW_RESET,
4815					  &reset_context->flags);
4816				r = amdgpu_do_asic_reset(device_list_handle,
4817							 reset_context);
4818				if (r && r == -EAGAIN)
4819					goto retry;
4820			}
4821
4822			/*
4823			 * add reset counter so that the following
4824			 * resubmitted job could flush vmid
4825			 */
4826			atomic_inc(&adev->gpu_reset_counter);
4827			continue;
4828		}
 
4829
4830		/* got the hw fence, signal finished fence */
4831		atomic_dec(ring->sched.score);
4832		dma_fence_get(&s_job->s_fence->finished);
4833		dma_fence_signal(&s_job->s_fence->finished);
4834		dma_fence_put(&s_job->s_fence->finished);
4835
4836		/* remove node from list and free the job */
4837		spin_lock(&ring->sched.job_list_lock);
4838		list_del_init(&s_job->list);
4839		spin_unlock(&ring->sched.job_list_lock);
4840		ring->sched.ops->free_job(s_job);
4841	}
4842}
4843
4844/**
4845 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
4846 *
4847 * @adev: amdgpu_device pointer
4848 * @job: which job trigger hang
 
4849 *
4850 * Attempt to reset the GPU if it has hung (all asics).
4851 * Attempt to do soft-reset or full-reset and reinitialize Asic
4852 * Returns 0 for success or an error on failure.
4853 */
4854
4855int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
4856			      struct amdgpu_job *job)
 
4857{
4858	struct list_head device_list, *device_list_handle =  NULL;
4859	bool job_signaled = false;
4860	struct amdgpu_hive_info *hive = NULL;
4861	struct amdgpu_device *tmp_adev = NULL;
4862	int i, r = 0;
4863	bool need_emergency_restart = false;
4864	bool audio_suspended = false;
4865	int tmp_vram_lost_counter;
4866	struct amdgpu_reset_context reset_context;
4867
4868	memset(&reset_context, 0, sizeof(reset_context));
4869
4870	/*
4871	 * Special case: RAS triggered and full reset isn't supported
4872	 */
4873	need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
4874
4875	/*
4876	 * Flush RAM to disk so that after reboot
4877	 * the user can read log and see why the system rebooted.
4878	 */
4879	if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
 
4880		DRM_WARN("Emergency reboot.");
4881
4882		ksys_sync_helper();
4883		emergency_restart();
4884	}
4885
4886	dev_info(adev->dev, "GPU %s begin!\n",
4887		need_emergency_restart ? "jobs stop":"reset");
4888
4889	/*
4890	 * Here we trylock to avoid chain of resets executing from
4891	 * either trigger by jobs on different adevs in XGMI hive or jobs on
4892	 * different schedulers for same device while this TO handler is running.
4893	 * We always reset all schedulers for device and all devices for XGMI
4894	 * hive so that should take care of them too.
4895	 */
4896	hive = amdgpu_get_xgmi_hive(adev);
4897	if (hive) {
4898		if (atomic_cmpxchg(&hive->in_reset, 0, 1) != 0) {
4899			DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
4900				job ? job->base.id : -1, hive->hive_id);
4901			amdgpu_put_xgmi_hive(hive);
4902			if (job)
4903				drm_sched_increase_karma(&job->base);
4904			return 0;
4905		}
4906		mutex_lock(&hive->hive_lock);
4907	}
4908
4909	reset_context.method = AMD_RESET_METHOD_NONE;
4910	reset_context.reset_req_dev = adev;
4911	reset_context.job = job;
4912	reset_context.hive = hive;
4913	clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
4914
4915	/*
4916	 * lock the device before we try to operate the linked list
4917	 * if didn't get the device lock, don't touch the linked list since
4918	 * others may iterating it.
4919	 */
4920	r = amdgpu_device_lock_hive_adev(adev, hive);
4921	if (r) {
4922		dev_info(adev->dev, "Bailing on TDR for s_job:%llx, as another already in progress",
4923					job ? job->base.id : -1);
4924
4925		/* even we skipped this reset, still need to set the job to guilty */
4926		if (job)
4927			drm_sched_increase_karma(&job->base);
4928		goto skip_recovery;
4929	}
4930
 
 
4931	/*
4932	 * Build list of devices to reset.
4933	 * In case we are in XGMI hive mode, resort the device list
4934	 * to put adev in the 1st position.
4935	 */
4936	INIT_LIST_HEAD(&device_list);
4937	if (adev->gmc.xgmi.num_physical_nodes > 1) {
4938		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
4939			list_add_tail(&tmp_adev->reset_list, &device_list);
 
 
 
4940		if (!list_is_first(&adev->reset_list, &device_list))
4941			list_rotate_to_front(&adev->reset_list, &device_list);
4942		device_list_handle = &device_list;
4943	} else {
4944		list_add_tail(&adev->reset_list, &device_list);
4945		device_list_handle = &device_list;
4946	}
4947
 
 
 
 
 
 
 
 
 
 
 
4948	/* block all schedulers and reset given job's ring */
4949	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
 
 
 
4950		/*
4951		 * Try to put the audio codec into suspend state
4952		 * before gpu reset started.
4953		 *
4954		 * Due to the power domain of the graphics device
4955		 * is shared with AZ power domain. Without this,
4956		 * we may change the audio hardware from behind
4957		 * the audio driver's back. That will trigger
4958		 * some audio codec errors.
4959		 */
4960		if (!amdgpu_device_suspend_display_audio(tmp_adev))
4961			audio_suspended = true;
4962
4963		amdgpu_ras_set_error_query_ready(tmp_adev, false);
4964
4965		cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
4966
4967		if (!amdgpu_sriov_vf(tmp_adev))
4968			amdgpu_amdkfd_pre_reset(tmp_adev);
4969
4970		/*
4971		 * Mark these ASICs to be reseted as untracked first
4972		 * And add them back after reset completed
4973		 */
4974		amdgpu_unregister_gpu_instance(tmp_adev);
4975
4976		amdgpu_fbdev_set_suspend(tmp_adev, 1);
4977
4978		/* disable ras on ALL IPs */
4979		if (!need_emergency_restart &&
4980		      amdgpu_device_ip_need_full_reset(tmp_adev))
4981			amdgpu_ras_suspend(tmp_adev);
4982
4983		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4984			struct amdgpu_ring *ring = tmp_adev->rings[i];
4985
4986			if (!ring || !ring->sched.thread)
4987				continue;
4988
4989			drm_sched_stop(&ring->sched, job ? &job->base : NULL);
4990
4991			if (need_emergency_restart)
4992				amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
4993		}
4994		atomic_inc(&tmp_adev->gpu_reset_counter);
4995	}
4996
4997	if (need_emergency_restart)
4998		goto skip_sched_resume;
4999
5000	/*
5001	 * Must check guilty signal here since after this point all old
5002	 * HW fences are force signaled.
5003	 *
5004	 * job->base holds a reference to parent fence
5005	 */
5006	if (job && job->base.s_fence->parent &&
5007	    dma_fence_is_signaled(job->base.s_fence->parent)) {
5008		job_signaled = true;
5009		dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5010		goto skip_hw_reset;
5011	}
5012
5013retry:	/* Rest of adevs pre asic reset from XGMI hive. */
5014	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5015		r = amdgpu_device_pre_asic_reset(tmp_adev, &reset_context);
5016		/*TODO Should we stop ?*/
5017		if (r) {
5018			dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
5019				  r, adev_to_drm(tmp_adev)->unique);
5020			tmp_adev->asic_reset_res = r;
5021		}
5022	}
5023
5024	tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter));
5025	/* Actual ASIC resets if needed.*/
5026	/* TODO Implement XGMI hive reset logic for SRIOV */
5027	if (amdgpu_sriov_vf(adev)) {
5028		r = amdgpu_device_reset_sriov(adev, job ? false : true);
 
 
 
 
 
 
 
 
 
 
5029		if (r)
5030			adev->asic_reset_res = r;
5031	} else {
5032		r = amdgpu_do_asic_reset(device_list_handle, &reset_context);
5033		if (r && r == -EAGAIN)
5034			goto retry;
5035	}
5036
 
 
 
 
 
 
 
 
 
 
5037skip_hw_reset:
5038
5039	/* Post ASIC reset for all devs .*/
5040	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5041
5042		/*
5043		 * Sometimes a later bad compute job can block a good gfx job as gfx
5044		 * and compute ring share internal GC HW mutually. We add an additional
5045		 * guilty jobs recheck step to find the real guilty job, it synchronously
5046		 * submits and pends for the first job being signaled. If it gets timeout,
5047		 * we identify it as a real guilty job.
5048		 */
5049		if (amdgpu_gpu_recovery == 2 &&
5050			!(tmp_vram_lost_counter < atomic_read(&adev->vram_lost_counter)))
5051			amdgpu_device_recheck_guilty_jobs(
5052				tmp_adev, device_list_handle, &reset_context);
5053
5054		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5055			struct amdgpu_ring *ring = tmp_adev->rings[i];
5056
5057			if (!ring || !ring->sched.thread)
5058				continue;
5059
5060			/* No point to resubmit jobs if we didn't HW reset*/
5061			if (!tmp_adev->asic_reset_res && !job_signaled)
5062				drm_sched_resubmit_jobs(&ring->sched);
5063
5064			drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
5065		}
5066
5067		if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) {
5068			drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
5069		}
 
 
5070
5071		tmp_adev->asic_reset_res = 0;
5072
5073		if (r) {
5074			/* bad news, how to tell it to userspace ? */
5075			dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
 
 
 
 
 
 
5076			amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
5077		} else {
5078			dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
5079			if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
5080				DRM_WARN("smart shift update failed\n");
5081		}
5082	}
5083
5084skip_sched_resume:
5085	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5086		/* unlock kfd: SRIOV would do it separately */
5087		if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
5088	                amdgpu_amdkfd_post_reset(tmp_adev);
5089
5090		/* kfd_post_reset will do nothing if kfd device is not initialized,
5091		 * need to bring up kfd here if it's not be initialized before
5092		 */
5093		if (!adev->kfd.init_complete)
5094			amdgpu_amdkfd_device_init(adev);
5095
5096		if (audio_suspended)
5097			amdgpu_device_resume_display_audio(tmp_adev);
5098		amdgpu_device_unlock_adev(tmp_adev);
 
 
 
5099	}
5100
5101skip_recovery:
 
 
 
 
5102	if (hive) {
5103		atomic_set(&hive->in_reset, 0);
5104		mutex_unlock(&hive->hive_lock);
5105		amdgpu_put_xgmi_hive(hive);
5106	}
5107
5108	if (r && r != -EAGAIN)
5109		dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
 
 
5110	return r;
5111}
5112
5113/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5114 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
5115 *
5116 * @adev: amdgpu_device pointer
5117 *
5118 * Fetchs and stores in the driver the PCIE capabilities (gen speed
5119 * and lanes) of the slot the device is in. Handles APUs and
5120 * virtualized environments where PCIE config space may not be available.
5121 */
5122static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
5123{
5124	struct pci_dev *pdev;
5125	enum pci_bus_speed speed_cap, platform_speed_cap;
5126	enum pcie_link_width platform_link_width;
5127
5128	if (amdgpu_pcie_gen_cap)
5129		adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
5130
5131	if (amdgpu_pcie_lane_cap)
5132		adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
5133
5134	/* covers APUs as well */
5135	if (pci_is_root_bus(adev->pdev->bus)) {
5136		if (adev->pm.pcie_gen_mask == 0)
5137			adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
5138		if (adev->pm.pcie_mlw_mask == 0)
5139			adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
5140		return;
5141	}
5142
5143	if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
5144		return;
5145
5146	pcie_bandwidth_available(adev->pdev, NULL,
5147				 &platform_speed_cap, &platform_link_width);
5148
5149	if (adev->pm.pcie_gen_mask == 0) {
5150		/* asic caps */
5151		pdev = adev->pdev;
5152		speed_cap = pcie_get_speed_cap(pdev);
5153		if (speed_cap == PCI_SPEED_UNKNOWN) {
5154			adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5155						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5156						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5157		} else {
5158			if (speed_cap == PCIE_SPEED_32_0GT)
5159				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5160							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5161							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5162							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5163							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
5164			else if (speed_cap == PCIE_SPEED_16_0GT)
5165				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5166							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5167							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5168							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
5169			else if (speed_cap == PCIE_SPEED_8_0GT)
5170				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5171							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5172							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5173			else if (speed_cap == PCIE_SPEED_5_0GT)
5174				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5175							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
5176			else
5177				adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
5178		}
5179		/* platform caps */
5180		if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5181			adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5182						   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5183		} else {
5184			if (platform_speed_cap == PCIE_SPEED_32_0GT)
5185				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5186							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5187							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5188							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5189							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
5190			else if (platform_speed_cap == PCIE_SPEED_16_0GT)
5191				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5192							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5193							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5194							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
5195			else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5196				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5197							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5198							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
5199			else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5200				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5201							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5202			else
5203				adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
5204
5205		}
5206	}
5207	if (adev->pm.pcie_mlw_mask == 0) {
5208		if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5209			adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
5210		} else {
5211			switch (platform_link_width) {
5212			case PCIE_LNK_X32:
5213				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
5214							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5215							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5216							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5217							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5218							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5219							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5220				break;
5221			case PCIE_LNK_X16:
5222				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5223							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5224							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5225							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5226							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5227							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5228				break;
5229			case PCIE_LNK_X12:
5230				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5231							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5232							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5233							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5234							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5235				break;
5236			case PCIE_LNK_X8:
5237				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5238							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5239							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5240							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5241				break;
5242			case PCIE_LNK_X4:
5243				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5244							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5245							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5246				break;
5247			case PCIE_LNK_X2:
5248				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5249							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5250				break;
5251			case PCIE_LNK_X1:
5252				adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5253				break;
5254			default:
5255				break;
5256			}
5257		}
5258	}
5259}
5260
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5261int amdgpu_device_baco_enter(struct drm_device *dev)
5262{
5263	struct amdgpu_device *adev = drm_to_adev(dev);
5264	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5265
5266	if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5267		return -ENOTSUPP;
5268
5269	if (ras && adev->ras_enabled &&
5270	    adev->nbio.funcs->enable_doorbell_interrupt)
5271		adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
5272
5273	return amdgpu_dpm_baco_enter(adev);
5274}
5275
5276int amdgpu_device_baco_exit(struct drm_device *dev)
5277{
5278	struct amdgpu_device *adev = drm_to_adev(dev);
5279	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5280	int ret = 0;
5281
5282	if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5283		return -ENOTSUPP;
5284
5285	ret = amdgpu_dpm_baco_exit(adev);
5286	if (ret)
5287		return ret;
5288
5289	if (ras && adev->ras_enabled &&
5290	    adev->nbio.funcs->enable_doorbell_interrupt)
5291		adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
5292
 
 
 
 
5293	return 0;
5294}
5295
5296static void amdgpu_cancel_all_tdr(struct amdgpu_device *adev)
5297{
5298	int i;
5299
5300	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5301		struct amdgpu_ring *ring = adev->rings[i];
5302
5303		if (!ring || !ring->sched.thread)
5304			continue;
5305
5306		cancel_delayed_work_sync(&ring->sched.work_tdr);
5307	}
5308}
5309
5310/**
5311 * amdgpu_pci_error_detected - Called when a PCI error is detected.
5312 * @pdev: PCI device struct
5313 * @state: PCI channel state
5314 *
5315 * Description: Called when a PCI error is detected.
5316 *
5317 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
5318 */
5319pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5320{
5321	struct drm_device *dev = pci_get_drvdata(pdev);
5322	struct amdgpu_device *adev = drm_to_adev(dev);
5323	int i;
5324
5325	DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
5326
5327	if (adev->gmc.xgmi.num_physical_nodes > 1) {
5328		DRM_WARN("No support for XGMI hive yet...");
5329		return PCI_ERS_RESULT_DISCONNECT;
5330	}
5331
5332	adev->pci_channel_state = state;
5333
5334	switch (state) {
5335	case pci_channel_io_normal:
5336		return PCI_ERS_RESULT_CAN_RECOVER;
5337	/* Fatal error, prepare for slot reset */
5338	case pci_channel_io_frozen:
5339		/*
5340		 * Cancel and wait for all TDRs in progress if failing to
5341		 * set  adev->in_gpu_reset in amdgpu_device_lock_adev
5342		 *
5343		 * Locking adev->reset_sem will prevent any external access
5344		 * to GPU during PCI error recovery
5345		 */
5346		while (!amdgpu_device_lock_adev(adev, NULL))
5347			amdgpu_cancel_all_tdr(adev);
5348
5349		/*
5350		 * Block any work scheduling as we do for regular GPU reset
5351		 * for the duration of the recovery
5352		 */
5353		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5354			struct amdgpu_ring *ring = adev->rings[i];
5355
5356			if (!ring || !ring->sched.thread)
5357				continue;
5358
5359			drm_sched_stop(&ring->sched, NULL);
5360		}
5361		atomic_inc(&adev->gpu_reset_counter);
5362		return PCI_ERS_RESULT_NEED_RESET;
5363	case pci_channel_io_perm_failure:
5364		/* Permanent error, prepare for device removal */
5365		return PCI_ERS_RESULT_DISCONNECT;
5366	}
5367
5368	return PCI_ERS_RESULT_NEED_RESET;
5369}
5370
5371/**
5372 * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
5373 * @pdev: pointer to PCI device
5374 */
5375pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
5376{
5377
5378	DRM_INFO("PCI error: mmio enabled callback!!\n");
5379
5380	/* TODO - dump whatever for debugging purposes */
5381
5382	/* This called only if amdgpu_pci_error_detected returns
5383	 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
5384	 * works, no need to reset slot.
5385	 */
5386
5387	return PCI_ERS_RESULT_RECOVERED;
5388}
5389
5390/**
5391 * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
5392 * @pdev: PCI device struct
5393 *
5394 * Description: This routine is called by the pci error recovery
5395 * code after the PCI slot has been reset, just before we
5396 * should resume normal operations.
5397 */
5398pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
5399{
5400	struct drm_device *dev = pci_get_drvdata(pdev);
5401	struct amdgpu_device *adev = drm_to_adev(dev);
5402	int r, i;
5403	struct amdgpu_reset_context reset_context;
5404	u32 memsize;
5405	struct list_head device_list;
5406
 
 
 
 
 
 
5407	DRM_INFO("PCI error: slot reset callback!!\n");
5408
5409	memset(&reset_context, 0, sizeof(reset_context));
5410
5411	INIT_LIST_HEAD(&device_list);
5412	list_add_tail(&adev->reset_list, &device_list);
5413
5414	/* wait for asic to come out of reset */
5415	msleep(500);
5416
5417	/* Restore PCI confspace */
5418	amdgpu_device_load_pci_state(pdev);
5419
5420	/* confirm  ASIC came out of reset */
5421	for (i = 0; i < adev->usec_timeout; i++) {
5422		memsize = amdgpu_asic_get_config_memsize(adev);
5423
5424		if (memsize != 0xffffffff)
5425			break;
5426		udelay(1);
5427	}
5428	if (memsize == 0xffffffff) {
5429		r = -ETIME;
5430		goto out;
5431	}
5432
5433	reset_context.method = AMD_RESET_METHOD_NONE;
5434	reset_context.reset_req_dev = adev;
5435	set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
5436	set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
5437
5438	adev->no_hw_access = true;
5439	r = amdgpu_device_pre_asic_reset(adev, &reset_context);
5440	adev->no_hw_access = false;
5441	if (r)
5442		goto out;
5443
5444	r = amdgpu_do_asic_reset(&device_list, &reset_context);
5445
5446out:
5447	if (!r) {
5448		if (amdgpu_device_cache_pci_state(adev->pdev))
5449			pci_restore_state(adev->pdev);
5450
5451		DRM_INFO("PCIe error recovery succeeded\n");
5452	} else {
5453		DRM_ERROR("PCIe error recovery failed, err:%d", r);
5454		amdgpu_device_unlock_adev(adev);
 
5455	}
5456
5457	return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
5458}
5459
5460/**
5461 * amdgpu_pci_resume() - resume normal ops after PCI reset
5462 * @pdev: pointer to PCI device
5463 *
5464 * Called when the error recovery driver tells us that its
5465 * OK to resume normal operation.
5466 */
5467void amdgpu_pci_resume(struct pci_dev *pdev)
5468{
5469	struct drm_device *dev = pci_get_drvdata(pdev);
5470	struct amdgpu_device *adev = drm_to_adev(dev);
5471	int i;
5472
5473
5474	DRM_INFO("PCI error: resume callback!!\n");
5475
5476	/* Only continue execution for the case of pci_channel_io_frozen */
5477	if (adev->pci_channel_state != pci_channel_io_frozen)
5478		return;
5479
5480	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5481		struct amdgpu_ring *ring = adev->rings[i];
5482
5483		if (!ring || !ring->sched.thread)
5484			continue;
5485
5486
5487		drm_sched_resubmit_jobs(&ring->sched);
5488		drm_sched_start(&ring->sched, true);
5489	}
5490
5491	amdgpu_device_unlock_adev(adev);
 
5492}
5493
5494bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
5495{
5496	struct drm_device *dev = pci_get_drvdata(pdev);
5497	struct amdgpu_device *adev = drm_to_adev(dev);
5498	int r;
5499
 
 
 
5500	r = pci_save_state(pdev);
5501	if (!r) {
5502		kfree(adev->pci_state);
5503
5504		adev->pci_state = pci_store_saved_state(pdev);
5505
5506		if (!adev->pci_state) {
5507			DRM_ERROR("Failed to store PCI saved state");
5508			return false;
5509		}
5510	} else {
5511		DRM_WARN("Failed to save PCI state, err:%d\n", r);
5512		return false;
5513	}
5514
5515	return true;
5516}
5517
5518bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
5519{
5520	struct drm_device *dev = pci_get_drvdata(pdev);
5521	struct amdgpu_device *adev = drm_to_adev(dev);
5522	int r;
5523
5524	if (!adev->pci_state)
5525		return false;
5526
5527	r = pci_load_saved_state(pdev, adev->pci_state);
5528
5529	if (!r) {
5530		pci_restore_state(pdev);
5531	} else {
5532		DRM_WARN("Failed to load PCI state, err:%d\n", r);
5533		return false;
5534	}
5535
5536	return true;
5537}
5538
5539void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
5540		struct amdgpu_ring *ring)
5541{
5542#ifdef CONFIG_X86_64
5543	if (adev->flags & AMD_IS_APU)
5544		return;
5545#endif
5546	if (adev->gmc.xgmi.connected_to_cpu)
5547		return;
5548
5549	if (ring && ring->funcs->emit_hdp_flush)
5550		amdgpu_ring_emit_hdp_flush(ring);
5551	else
5552		amdgpu_asic_flush_hdp(adev, ring);
5553}
5554
5555void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
5556		struct amdgpu_ring *ring)
5557{
5558#ifdef CONFIG_X86_64
5559	if (adev->flags & AMD_IS_APU)
5560		return;
5561#endif
5562	if (adev->gmc.xgmi.connected_to_cpu)
5563		return;
5564
5565	amdgpu_asic_invalidate_hdp(adev, ring);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5566}