Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Feb 10-13, 2025
Register
Loading...
v6.8
   1/*
   2 * Copyright 2008 Advanced Micro Devices, Inc.
   3 * Copyright 2008 Red Hat Inc.
   4 * Copyright 2009 Jerome Glisse.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the "Software"),
   8 * to deal in the Software without restriction, including without limitation
   9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10 * and/or sell copies of the Software, and to permit persons to whom the
  11 * Software is furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22 * OTHER DEALINGS IN THE SOFTWARE.
  23 *
  24 * Authors: Dave Airlie
  25 *          Alex Deucher
  26 *          Jerome Glisse
  27 */
  28#include <linux/power_supply.h>
  29#include <linux/kthread.h>
  30#include <linux/module.h>
  31#include <linux/console.h>
  32#include <linux/slab.h>
  33#include <linux/iommu.h>
  34#include <linux/pci.h>
  35#include <linux/pci-p2pdma.h>
  36#include <linux/apple-gmux.h>
  37
  38#include <drm/drm_aperture.h>
  39#include <drm/drm_atomic_helper.h>
  40#include <drm/drm_crtc_helper.h>
  41#include <drm/drm_fb_helper.h>
  42#include <drm/drm_probe_helper.h>
  43#include <drm/amdgpu_drm.h>
  44#include <linux/device.h>
  45#include <linux/vgaarb.h>
  46#include <linux/vga_switcheroo.h>
  47#include <linux/efi.h>
  48#include "amdgpu.h"
  49#include "amdgpu_trace.h"
  50#include "amdgpu_i2c.h"
  51#include "atom.h"
  52#include "amdgpu_atombios.h"
  53#include "amdgpu_atomfirmware.h"
  54#include "amd_pcie.h"
  55#ifdef CONFIG_DRM_AMDGPU_SI
  56#include "si.h"
  57#endif
  58#ifdef CONFIG_DRM_AMDGPU_CIK
  59#include "cik.h"
  60#endif
  61#include "vi.h"
  62#include "soc15.h"
  63#include "nv.h"
  64#include "bif/bif_4_1_d.h"
 
  65#include <linux/firmware.h>
  66#include "amdgpu_vf_error.h"
  67
  68#include "amdgpu_amdkfd.h"
  69#include "amdgpu_pm.h"
  70
  71#include "amdgpu_xgmi.h"
  72#include "amdgpu_ras.h"
  73#include "amdgpu_pmu.h"
  74#include "amdgpu_fru_eeprom.h"
  75#include "amdgpu_reset.h"
  76#include "amdgpu_virt.h"
  77
  78#include <linux/suspend.h>
  79#include <drm/task_barrier.h>
  80#include <linux/pm_runtime.h>
  81
  82#include <drm/drm_drv.h>
  83
  84#if IS_ENABLED(CONFIG_X86)
  85#include <asm/intel-family.h>
  86#endif
  87
  88MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
  89MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
  90MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
  91MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
  92MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
  93MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
 
 
 
  94MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
  95
  96#define AMDGPU_RESUME_MS		2000
  97#define AMDGPU_MAX_RETRY_LIMIT		2
  98#define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
  99
 100static const struct drm_driver amdgpu_kms_driver;
 101
 102const char *amdgpu_asic_name[] = {
 103	"TAHITI",
 104	"PITCAIRN",
 105	"VERDE",
 106	"OLAND",
 107	"HAINAN",
 108	"BONAIRE",
 109	"KAVERI",
 110	"KABINI",
 111	"HAWAII",
 112	"MULLINS",
 113	"TOPAZ",
 114	"TONGA",
 115	"FIJI",
 116	"CARRIZO",
 117	"STONEY",
 118	"POLARIS10",
 119	"POLARIS11",
 120	"POLARIS12",
 121	"VEGAM",
 122	"VEGA10",
 123	"VEGA12",
 124	"VEGA20",
 125	"RAVEN",
 126	"ARCTURUS",
 127	"RENOIR",
 128	"ALDEBARAN",
 129	"NAVI10",
 130	"CYAN_SKILLFISH",
 131	"NAVI14",
 132	"NAVI12",
 133	"SIENNA_CICHLID",
 134	"NAVY_FLOUNDER",
 135	"VANGOGH",
 136	"DIMGREY_CAVEFISH",
 137	"BEIGE_GOBY",
 138	"YELLOW_CARP",
 139	"IP DISCOVERY",
 140	"LAST",
 141};
 142
 143/**
 144 * DOC: pcie_replay_count
 145 *
 146 * The amdgpu driver provides a sysfs API for reporting the total number
 147 * of PCIe replays (NAKs)
 148 * The file pcie_replay_count is used for this and returns the total
 149 * number of replays as a sum of the NAKs generated and NAKs received
 150 */
 151
 152static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
 153		struct device_attribute *attr, char *buf)
 154{
 155	struct drm_device *ddev = dev_get_drvdata(dev);
 156	struct amdgpu_device *adev = drm_to_adev(ddev);
 157	uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
 158
 159	return sysfs_emit(buf, "%llu\n", cnt);
 160}
 161
 162static DEVICE_ATTR(pcie_replay_count, 0444,
 163		amdgpu_device_get_pcie_replay_count, NULL);
 164
 165static ssize_t amdgpu_sysfs_reg_state_get(struct file *f, struct kobject *kobj,
 166					  struct bin_attribute *attr, char *buf,
 167					  loff_t ppos, size_t count)
 168{
 169	struct device *dev = kobj_to_dev(kobj);
 170	struct drm_device *ddev = dev_get_drvdata(dev);
 171	struct amdgpu_device *adev = drm_to_adev(ddev);
 172	ssize_t bytes_read;
 173
 174	switch (ppos) {
 175	case AMDGPU_SYS_REG_STATE_XGMI:
 176		bytes_read = amdgpu_asic_get_reg_state(
 177			adev, AMDGPU_REG_STATE_TYPE_XGMI, buf, count);
 178		break;
 179	case AMDGPU_SYS_REG_STATE_WAFL:
 180		bytes_read = amdgpu_asic_get_reg_state(
 181			adev, AMDGPU_REG_STATE_TYPE_WAFL, buf, count);
 182		break;
 183	case AMDGPU_SYS_REG_STATE_PCIE:
 184		bytes_read = amdgpu_asic_get_reg_state(
 185			adev, AMDGPU_REG_STATE_TYPE_PCIE, buf, count);
 186		break;
 187	case AMDGPU_SYS_REG_STATE_USR:
 188		bytes_read = amdgpu_asic_get_reg_state(
 189			adev, AMDGPU_REG_STATE_TYPE_USR, buf, count);
 190		break;
 191	case AMDGPU_SYS_REG_STATE_USR_1:
 192		bytes_read = amdgpu_asic_get_reg_state(
 193			adev, AMDGPU_REG_STATE_TYPE_USR_1, buf, count);
 194		break;
 195	default:
 196		return -EINVAL;
 197	}
 198
 199	return bytes_read;
 200}
 201
 202BIN_ATTR(reg_state, 0444, amdgpu_sysfs_reg_state_get, NULL,
 203	 AMDGPU_SYS_REG_STATE_END);
 204
 205int amdgpu_reg_state_sysfs_init(struct amdgpu_device *adev)
 206{
 207	int ret;
 208
 209	if (!amdgpu_asic_get_reg_state_supported(adev))
 210		return 0;
 211
 212	ret = sysfs_create_bin_file(&adev->dev->kobj, &bin_attr_reg_state);
 213
 214	return ret;
 215}
 216
 217void amdgpu_reg_state_sysfs_fini(struct amdgpu_device *adev)
 218{
 219	if (!amdgpu_asic_get_reg_state_supported(adev))
 220		return;
 221	sysfs_remove_bin_file(&adev->dev->kobj, &bin_attr_reg_state);
 222}
 223
 224/**
 225 * DOC: board_info
 226 *
 227 * The amdgpu driver provides a sysfs API for giving board related information.
 228 * It provides the form factor information in the format
 229 *
 230 *   type : form factor
 231 *
 232 * Possible form factor values
 233 *
 234 * - "cem"		- PCIE CEM card
 235 * - "oam"		- Open Compute Accelerator Module
 236 * - "unknown"	- Not known
 237 *
 238 */
 239
 240static ssize_t amdgpu_device_get_board_info(struct device *dev,
 241					    struct device_attribute *attr,
 242					    char *buf)
 243{
 244	struct drm_device *ddev = dev_get_drvdata(dev);
 245	struct amdgpu_device *adev = drm_to_adev(ddev);
 246	enum amdgpu_pkg_type pkg_type = AMDGPU_PKG_TYPE_CEM;
 247	const char *pkg;
 248
 249	if (adev->smuio.funcs && adev->smuio.funcs->get_pkg_type)
 250		pkg_type = adev->smuio.funcs->get_pkg_type(adev);
 251
 252	switch (pkg_type) {
 253	case AMDGPU_PKG_TYPE_CEM:
 254		pkg = "cem";
 255		break;
 256	case AMDGPU_PKG_TYPE_OAM:
 257		pkg = "oam";
 258		break;
 259	default:
 260		pkg = "unknown";
 261		break;
 262	}
 263
 264	return sysfs_emit(buf, "%s : %s\n", "type", pkg);
 265}
 266
 267static DEVICE_ATTR(board_info, 0444, amdgpu_device_get_board_info, NULL);
 268
 269static struct attribute *amdgpu_board_attrs[] = {
 270	&dev_attr_board_info.attr,
 271	NULL,
 272};
 273
 274static umode_t amdgpu_board_attrs_is_visible(struct kobject *kobj,
 275					     struct attribute *attr, int n)
 276{
 277	struct device *dev = kobj_to_dev(kobj);
 278	struct drm_device *ddev = dev_get_drvdata(dev);
 279	struct amdgpu_device *adev = drm_to_adev(ddev);
 280
 281	if (adev->flags & AMD_IS_APU)
 282		return 0;
 283
 284	return attr->mode;
 285}
 286
 287static const struct attribute_group amdgpu_board_attrs_group = {
 288	.attrs = amdgpu_board_attrs,
 289	.is_visible = amdgpu_board_attrs_is_visible
 290};
 291
 292static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
 293
 294
 295/**
 296 * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
 297 *
 298 * @dev: drm_device pointer
 299 *
 300 * Returns true if the device is a dGPU with ATPX power control,
 301 * otherwise return false.
 302 */
 303bool amdgpu_device_supports_px(struct drm_device *dev)
 304{
 305	struct amdgpu_device *adev = drm_to_adev(dev);
 306
 307	if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
 308		return true;
 309	return false;
 310}
 311
 312/**
 313 * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
 314 *
 315 * @dev: drm_device pointer
 316 *
 317 * Returns true if the device is a dGPU with ACPI power control,
 318 * otherwise return false.
 319 */
 320bool amdgpu_device_supports_boco(struct drm_device *dev)
 321{
 322	struct amdgpu_device *adev = drm_to_adev(dev);
 323
 324	if (adev->has_pr3 ||
 325	    ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
 326		return true;
 327	return false;
 328}
 329
 330/**
 331 * amdgpu_device_supports_baco - Does the device support BACO
 332 *
 333 * @dev: drm_device pointer
 334 *
 335 * Returns true if the device supporte BACO,
 336 * otherwise return false.
 337 */
 338bool amdgpu_device_supports_baco(struct drm_device *dev)
 339{
 340	struct amdgpu_device *adev = drm_to_adev(dev);
 341
 342	return amdgpu_asic_supports_baco(adev);
 343}
 344
 345/**
 346 * amdgpu_device_supports_smart_shift - Is the device dGPU with
 347 * smart shift support
 348 *
 349 * @dev: drm_device pointer
 350 *
 351 * Returns true if the device is a dGPU with Smart Shift support,
 352 * otherwise returns false.
 353 */
 354bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
 355{
 356	return (amdgpu_device_supports_boco(dev) &&
 357		amdgpu_acpi_is_power_shift_control_supported());
 358}
 359
 360/*
 361 * VRAM access helper functions
 362 */
 363
 364/**
 365 * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
 366 *
 367 * @adev: amdgpu_device pointer
 368 * @pos: offset of the buffer in vram
 369 * @buf: virtual address of the buffer in system memory
 370 * @size: read/write size, sizeof(@buf) must > @size
 371 * @write: true - write to vram, otherwise - read from vram
 372 */
 373void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
 374			     void *buf, size_t size, bool write)
 375{
 376	unsigned long flags;
 377	uint32_t hi = ~0, tmp = 0;
 378	uint32_t *data = buf;
 379	uint64_t last;
 380	int idx;
 381
 382	if (!drm_dev_enter(adev_to_drm(adev), &idx))
 383		return;
 384
 385	BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
 386
 387	spin_lock_irqsave(&adev->mmio_idx_lock, flags);
 388	for (last = pos + size; pos < last; pos += 4) {
 389		tmp = pos >> 31;
 390
 391		WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
 392		if (tmp != hi) {
 393			WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
 394			hi = tmp;
 395		}
 396		if (write)
 397			WREG32_NO_KIQ(mmMM_DATA, *data++);
 398		else
 399			*data++ = RREG32_NO_KIQ(mmMM_DATA);
 400	}
 401
 402	spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
 403	drm_dev_exit(idx);
 404}
 405
 406/**
 407 * amdgpu_device_aper_access - access vram by vram aperature
 408 *
 409 * @adev: amdgpu_device pointer
 410 * @pos: offset of the buffer in vram
 411 * @buf: virtual address of the buffer in system memory
 412 * @size: read/write size, sizeof(@buf) must > @size
 413 * @write: true - write to vram, otherwise - read from vram
 414 *
 415 * The return value means how many bytes have been transferred.
 416 */
 417size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
 418				 void *buf, size_t size, bool write)
 419{
 420#ifdef CONFIG_64BIT
 421	void __iomem *addr;
 422	size_t count = 0;
 423	uint64_t last;
 424
 425	if (!adev->mman.aper_base_kaddr)
 426		return 0;
 427
 428	last = min(pos + size, adev->gmc.visible_vram_size);
 429	if (last > pos) {
 430		addr = adev->mman.aper_base_kaddr + pos;
 431		count = last - pos;
 432
 433		if (write) {
 434			memcpy_toio(addr, buf, count);
 435			/* Make sure HDP write cache flush happens without any reordering
 436			 * after the system memory contents are sent over PCIe device
 437			 */
 438			mb();
 439			amdgpu_device_flush_hdp(adev, NULL);
 440		} else {
 441			amdgpu_device_invalidate_hdp(adev, NULL);
 442			/* Make sure HDP read cache is invalidated before issuing a read
 443			 * to the PCIe device
 444			 */
 445			mb();
 446			memcpy_fromio(buf, addr, count);
 447		}
 448
 449	}
 450
 451	return count;
 452#else
 453	return 0;
 454#endif
 455}
 456
 457/**
 458 * amdgpu_device_vram_access - read/write a buffer in vram
 459 *
 460 * @adev: amdgpu_device pointer
 461 * @pos: offset of the buffer in vram
 462 * @buf: virtual address of the buffer in system memory
 463 * @size: read/write size, sizeof(@buf) must > @size
 464 * @write: true - write to vram, otherwise - read from vram
 465 */
 466void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
 467			       void *buf, size_t size, bool write)
 468{
 469	size_t count;
 470
 471	/* try to using vram apreature to access vram first */
 472	count = amdgpu_device_aper_access(adev, pos, buf, size, write);
 473	size -= count;
 474	if (size) {
 475		/* using MM to access rest vram */
 476		pos += count;
 477		buf += count;
 478		amdgpu_device_mm_access(adev, pos, buf, size, write);
 479	}
 480}
 481
 482/*
 483 * register access helper functions.
 484 */
 485
 486/* Check if hw access should be skipped because of hotplug or device error */
 487bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
 488{
 489	if (adev->no_hw_access)
 490		return true;
 491
 492#ifdef CONFIG_LOCKDEP
 493	/*
 494	 * This is a bit complicated to understand, so worth a comment. What we assert
 495	 * here is that the GPU reset is not running on another thread in parallel.
 496	 *
 497	 * For this we trylock the read side of the reset semaphore, if that succeeds
 498	 * we know that the reset is not running in paralell.
 499	 *
 500	 * If the trylock fails we assert that we are either already holding the read
 501	 * side of the lock or are the reset thread itself and hold the write side of
 502	 * the lock.
 503	 */
 504	if (in_task()) {
 505		if (down_read_trylock(&adev->reset_domain->sem))
 506			up_read(&adev->reset_domain->sem);
 507		else
 508			lockdep_assert_held(&adev->reset_domain->sem);
 509	}
 510#endif
 511	return false;
 512}
 513
 514/**
 515 * amdgpu_device_rreg - read a memory mapped IO or indirect register
 516 *
 517 * @adev: amdgpu_device pointer
 518 * @reg: dword aligned register offset
 519 * @acc_flags: access flags which require special behavior
 520 *
 521 * Returns the 32 bit value from the offset specified.
 522 */
 523uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
 524			    uint32_t reg, uint32_t acc_flags)
 525{
 526	uint32_t ret;
 527
 528	if (amdgpu_device_skip_hw_access(adev))
 529		return 0;
 530
 531	if ((reg * 4) < adev->rmmio_size) {
 532		if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
 533		    amdgpu_sriov_runtime(adev) &&
 534		    down_read_trylock(&adev->reset_domain->sem)) {
 535			ret = amdgpu_kiq_rreg(adev, reg, 0);
 536			up_read(&adev->reset_domain->sem);
 537		} else {
 538			ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
 539		}
 540	} else {
 541		ret = adev->pcie_rreg(adev, reg * 4);
 542	}
 543
 544	trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
 545
 546	return ret;
 547}
 548
 549/*
 550 * MMIO register read with bytes helper functions
 551 * @offset:bytes offset from MMIO start
 552 */
 
 553
 554/**
 555 * amdgpu_mm_rreg8 - read a memory mapped IO register
 556 *
 557 * @adev: amdgpu_device pointer
 558 * @offset: byte aligned register offset
 559 *
 560 * Returns the 8 bit value from the offset specified.
 561 */
 562uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
 563{
 564	if (amdgpu_device_skip_hw_access(adev))
 565		return 0;
 566
 567	if (offset < adev->rmmio_size)
 568		return (readb(adev->rmmio + offset));
 569	BUG();
 570}
 571
 572
 573/**
 574 * amdgpu_device_xcc_rreg - read a memory mapped IO or indirect register with specific XCC
 575 *
 576 * @adev: amdgpu_device pointer
 577 * @reg: dword aligned register offset
 578 * @acc_flags: access flags which require special behavior
 579 * @xcc_id: xcc accelerated compute core id
 580 *
 581 * Returns the 32 bit value from the offset specified.
 582 */
 583uint32_t amdgpu_device_xcc_rreg(struct amdgpu_device *adev,
 584				uint32_t reg, uint32_t acc_flags,
 585				uint32_t xcc_id)
 586{
 587	uint32_t ret, rlcg_flag;
 588
 589	if (amdgpu_device_skip_hw_access(adev))
 590		return 0;
 591
 592	if ((reg * 4) < adev->rmmio_size) {
 593		if (amdgpu_sriov_vf(adev) &&
 594		    !amdgpu_sriov_runtime(adev) &&
 595		    adev->gfx.rlc.rlcg_reg_access_supported &&
 596		    amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags,
 597							 GC_HWIP, false,
 598							 &rlcg_flag)) {
 599			ret = amdgpu_virt_rlcg_reg_rw(adev, reg, 0, rlcg_flag, xcc_id);
 600		} else if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
 601		    amdgpu_sriov_runtime(adev) &&
 602		    down_read_trylock(&adev->reset_domain->sem)) {
 603			ret = amdgpu_kiq_rreg(adev, reg, xcc_id);
 604			up_read(&adev->reset_domain->sem);
 605		} else {
 606			ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
 607		}
 608	} else {
 609		ret = adev->pcie_rreg(adev, reg * 4);
 610	}
 611
 612	return ret;
 613}
 614
 615/*
 616 * MMIO register write with bytes helper functions
 617 * @offset:bytes offset from MMIO start
 618 * @value: the value want to be written to the register
 619 */
 620
 621/**
 622 * amdgpu_mm_wreg8 - read a memory mapped IO register
 623 *
 624 * @adev: amdgpu_device pointer
 625 * @offset: byte aligned register offset
 626 * @value: 8 bit value to write
 627 *
 628 * Writes the value specified to the offset specified.
 629 */
 630void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
 631{
 632	if (amdgpu_device_skip_hw_access(adev))
 633		return;
 634
 635	if (offset < adev->rmmio_size)
 636		writeb(value, adev->rmmio + offset);
 637	else
 638		BUG();
 639}
 640
 641/**
 642 * amdgpu_device_wreg - write to a memory mapped IO or indirect register
 643 *
 644 * @adev: amdgpu_device pointer
 645 * @reg: dword aligned register offset
 646 * @v: 32 bit value to write to the register
 647 * @acc_flags: access flags which require special behavior
 648 *
 649 * Writes the value specified to the offset specified.
 650 */
 651void amdgpu_device_wreg(struct amdgpu_device *adev,
 652			uint32_t reg, uint32_t v,
 653			uint32_t acc_flags)
 654{
 655	if (amdgpu_device_skip_hw_access(adev))
 656		return;
 657
 658	if ((reg * 4) < adev->rmmio_size) {
 659		if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
 660		    amdgpu_sriov_runtime(adev) &&
 661		    down_read_trylock(&adev->reset_domain->sem)) {
 662			amdgpu_kiq_wreg(adev, reg, v, 0);
 663			up_read(&adev->reset_domain->sem);
 664		} else {
 665			writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
 666		}
 667	} else {
 668		adev->pcie_wreg(adev, reg * 4, v);
 669	}
 670
 671	trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
 672}
 673
 674/**
 675 * amdgpu_mm_wreg_mmio_rlc -  write register either with direct/indirect mmio or with RLC path if in range
 676 *
 677 * @adev: amdgpu_device pointer
 678 * @reg: mmio/rlc register
 679 * @v: value to write
 680 * @xcc_id: xcc accelerated compute core id
 681 *
 682 * this function is invoked only for the debugfs register access
 683 */
 684void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
 685			     uint32_t reg, uint32_t v,
 686			     uint32_t xcc_id)
 687{
 688	if (amdgpu_device_skip_hw_access(adev))
 689		return;
 690
 691	if (amdgpu_sriov_fullaccess(adev) &&
 692	    adev->gfx.rlc.funcs &&
 693	    adev->gfx.rlc.funcs->is_rlcg_access_range) {
 694		if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
 695			return amdgpu_sriov_wreg(adev, reg, v, 0, 0, xcc_id);
 696	} else if ((reg * 4) >= adev->rmmio_size) {
 697		adev->pcie_wreg(adev, reg * 4, v);
 698	} else {
 699		writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
 
 
 
 
 
 
 
 
 
 
 
 700	}
 701}
 702
 703/**
 704 * amdgpu_device_xcc_wreg - write to a memory mapped IO or indirect register with specific XCC
 705 *
 706 * @adev: amdgpu_device pointer
 707 * @reg: dword aligned register offset
 708 * @v: 32 bit value to write to the register
 709 * @acc_flags: access flags which require special behavior
 710 * @xcc_id: xcc accelerated compute core id
 711 *
 712 * Writes the value specified to the offset specified.
 713 */
 714void amdgpu_device_xcc_wreg(struct amdgpu_device *adev,
 715			uint32_t reg, uint32_t v,
 716			uint32_t acc_flags, uint32_t xcc_id)
 717{
 718	uint32_t rlcg_flag;
 719
 720	if (amdgpu_device_skip_hw_access(adev))
 721		return;
 722
 723	if ((reg * 4) < adev->rmmio_size) {
 724		if (amdgpu_sriov_vf(adev) &&
 725		    !amdgpu_sriov_runtime(adev) &&
 726		    adev->gfx.rlc.rlcg_reg_access_supported &&
 727		    amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags,
 728							 GC_HWIP, true,
 729							 &rlcg_flag)) {
 730			amdgpu_virt_rlcg_reg_rw(adev, reg, v, rlcg_flag, xcc_id);
 731		} else if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
 732		    amdgpu_sriov_runtime(adev) &&
 733		    down_read_trylock(&adev->reset_domain->sem)) {
 734			amdgpu_kiq_wreg(adev, reg, v, xcc_id);
 735			up_read(&adev->reset_domain->sem);
 736		} else {
 737			writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
 738		}
 739	} else {
 740		adev->pcie_wreg(adev, reg * 4, v);
 741	}
 742}
 743
 744/**
 745 * amdgpu_device_indirect_rreg - read an indirect register
 746 *
 747 * @adev: amdgpu_device pointer
 748 * @reg_addr: indirect register address to read from
 
 749 *
 750 * Returns the value of indirect register @reg_addr
 751 */
 752u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
 753				u32 reg_addr)
 754{
 755	unsigned long flags, pcie_index, pcie_data;
 756	void __iomem *pcie_index_offset;
 757	void __iomem *pcie_data_offset;
 758	u32 r;
 759
 760	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
 761	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
 762
 763	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 764	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
 765	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
 766
 767	writel(reg_addr, pcie_index_offset);
 768	readl(pcie_index_offset);
 769	r = readl(pcie_data_offset);
 770	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 771
 772	return r;
 773}
 774
 775u32 amdgpu_device_indirect_rreg_ext(struct amdgpu_device *adev,
 776				    u64 reg_addr)
 777{
 778	unsigned long flags, pcie_index, pcie_index_hi, pcie_data;
 779	u32 r;
 780	void __iomem *pcie_index_offset;
 781	void __iomem *pcie_index_hi_offset;
 782	void __iomem *pcie_data_offset;
 783
 784	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
 785	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
 786	if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
 787		pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
 788	else
 789		pcie_index_hi = 0;
 790
 791	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 792	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
 793	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
 794	if (pcie_index_hi != 0)
 795		pcie_index_hi_offset = (void __iomem *)adev->rmmio +
 796				pcie_index_hi * 4;
 797
 798	writel(reg_addr, pcie_index_offset);
 799	readl(pcie_index_offset);
 800	if (pcie_index_hi != 0) {
 801		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
 802		readl(pcie_index_hi_offset);
 803	}
 804	r = readl(pcie_data_offset);
 805
 806	/* clear the high bits */
 807	if (pcie_index_hi != 0) {
 808		writel(0, pcie_index_hi_offset);
 809		readl(pcie_index_hi_offset);
 810	}
 811
 812	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 
 
 
 
 
 813
 814	return r;
 
 
 815}
 816
 817/**
 818 * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
 819 *
 820 * @adev: amdgpu_device pointer
 821 * @reg_addr: indirect register address to read from
 822 *
 823 * Returns the value of indirect register @reg_addr
 
 824 */
 825u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
 826				  u32 reg_addr)
 827{
 828	unsigned long flags, pcie_index, pcie_data;
 829	void __iomem *pcie_index_offset;
 830	void __iomem *pcie_data_offset;
 831	u64 r;
 832
 833	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
 834	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
 835
 836	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 837	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
 838	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
 839
 840	/* read low 32 bits */
 841	writel(reg_addr, pcie_index_offset);
 842	readl(pcie_index_offset);
 843	r = readl(pcie_data_offset);
 844	/* read high 32 bits */
 845	writel(reg_addr + 4, pcie_index_offset);
 846	readl(pcie_index_offset);
 847	r |= ((u64)readl(pcie_data_offset) << 32);
 848	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 849
 850	return r;
 851}
 852
 853u64 amdgpu_device_indirect_rreg64_ext(struct amdgpu_device *adev,
 854				  u64 reg_addr)
 855{
 856	unsigned long flags, pcie_index, pcie_data;
 857	unsigned long pcie_index_hi = 0;
 858	void __iomem *pcie_index_offset;
 859	void __iomem *pcie_index_hi_offset;
 860	void __iomem *pcie_data_offset;
 861	u64 r;
 862
 863	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
 864	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
 865	if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
 866		pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
 867
 868	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 869	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
 870	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
 871	if (pcie_index_hi != 0)
 872		pcie_index_hi_offset = (void __iomem *)adev->rmmio +
 873			pcie_index_hi * 4;
 874
 875	/* read low 32 bits */
 876	writel(reg_addr, pcie_index_offset);
 877	readl(pcie_index_offset);
 878	if (pcie_index_hi != 0) {
 879		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
 880		readl(pcie_index_hi_offset);
 881	}
 882	r = readl(pcie_data_offset);
 883	/* read high 32 bits */
 884	writel(reg_addr + 4, pcie_index_offset);
 885	readl(pcie_index_offset);
 886	if (pcie_index_hi != 0) {
 887		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
 888		readl(pcie_index_hi_offset);
 889	}
 890	r |= ((u64)readl(pcie_data_offset) << 32);
 891
 892	/* clear the high bits */
 893	if (pcie_index_hi != 0) {
 894		writel(0, pcie_index_hi_offset);
 895		readl(pcie_index_hi_offset);
 896	}
 897
 898	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 899
 900	return r;
 901}
 902
 903/**
 904 * amdgpu_device_indirect_wreg - write an indirect register address
 905 *
 906 * @adev: amdgpu_device pointer
 907 * @reg_addr: indirect register offset
 908 * @reg_data: indirect register data
 909 *
 
 
 910 */
 911void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
 912				 u32 reg_addr, u32 reg_data)
 913{
 914	unsigned long flags, pcie_index, pcie_data;
 915	void __iomem *pcie_index_offset;
 916	void __iomem *pcie_data_offset;
 917
 918	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
 919	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
 920
 921	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 922	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
 923	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
 924
 925	writel(reg_addr, pcie_index_offset);
 926	readl(pcie_index_offset);
 927	writel(reg_data, pcie_data_offset);
 928	readl(pcie_data_offset);
 929	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 930}
 931
 932void amdgpu_device_indirect_wreg_ext(struct amdgpu_device *adev,
 933				     u64 reg_addr, u32 reg_data)
 934{
 935	unsigned long flags, pcie_index, pcie_index_hi, pcie_data;
 936	void __iomem *pcie_index_offset;
 937	void __iomem *pcie_index_hi_offset;
 938	void __iomem *pcie_data_offset;
 939
 940	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
 941	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
 942	if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
 943		pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
 944	else
 945		pcie_index_hi = 0;
 946
 947	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 948	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
 949	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
 950	if (pcie_index_hi != 0)
 951		pcie_index_hi_offset = (void __iomem *)adev->rmmio +
 952				pcie_index_hi * 4;
 953
 954	writel(reg_addr, pcie_index_offset);
 955	readl(pcie_index_offset);
 956	if (pcie_index_hi != 0) {
 957		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
 958		readl(pcie_index_hi_offset);
 959	}
 960	writel(reg_data, pcie_data_offset);
 961	readl(pcie_data_offset);
 962
 963	/* clear the high bits */
 964	if (pcie_index_hi != 0) {
 965		writel(0, pcie_index_hi_offset);
 966		readl(pcie_index_hi_offset);
 967	}
 968
 969	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 970}
 971
 972/**
 973 * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
 974 *
 975 * @adev: amdgpu_device pointer
 976 * @reg_addr: indirect register offset
 977 * @reg_data: indirect register data
 978 *
 
 
 979 */
 980void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
 981				   u32 reg_addr, u64 reg_data)
 982{
 983	unsigned long flags, pcie_index, pcie_data;
 984	void __iomem *pcie_index_offset;
 985	void __iomem *pcie_data_offset;
 986
 987	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
 988	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
 989
 990	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 991	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
 992	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
 993
 994	/* write low 32 bits */
 995	writel(reg_addr, pcie_index_offset);
 996	readl(pcie_index_offset);
 997	writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
 998	readl(pcie_data_offset);
 999	/* write high 32 bits */
1000	writel(reg_addr + 4, pcie_index_offset);
1001	readl(pcie_index_offset);
1002	writel((u32)(reg_data >> 32), pcie_data_offset);
1003	readl(pcie_data_offset);
1004	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
1005}
1006
1007void amdgpu_device_indirect_wreg64_ext(struct amdgpu_device *adev,
1008				   u64 reg_addr, u64 reg_data)
1009{
1010	unsigned long flags, pcie_index, pcie_data;
1011	unsigned long pcie_index_hi = 0;
1012	void __iomem *pcie_index_offset;
1013	void __iomem *pcie_index_hi_offset;
1014	void __iomem *pcie_data_offset;
1015
1016	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
1017	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
1018	if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
1019		pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
1020
1021	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
1022	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
1023	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
1024	if (pcie_index_hi != 0)
1025		pcie_index_hi_offset = (void __iomem *)adev->rmmio +
1026				pcie_index_hi * 4;
1027
1028	/* write low 32 bits */
1029	writel(reg_addr, pcie_index_offset);
1030	readl(pcie_index_offset);
1031	if (pcie_index_hi != 0) {
1032		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
1033		readl(pcie_index_hi_offset);
1034	}
1035	writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
1036	readl(pcie_data_offset);
1037	/* write high 32 bits */
1038	writel(reg_addr + 4, pcie_index_offset);
1039	readl(pcie_index_offset);
1040	if (pcie_index_hi != 0) {
1041		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
1042		readl(pcie_index_hi_offset);
1043	}
1044	writel((u32)(reg_data >> 32), pcie_data_offset);
1045	readl(pcie_data_offset);
1046
1047	/* clear the high bits */
1048	if (pcie_index_hi != 0) {
1049		writel(0, pcie_index_hi_offset);
1050		readl(pcie_index_hi_offset);
1051	}
1052
1053	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
1054}
1055
1056/**
1057 * amdgpu_device_get_rev_id - query device rev_id
1058 *
1059 * @adev: amdgpu_device pointer
 
 
1060 *
1061 * Return device rev_id
 
1062 */
1063u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev)
1064{
1065	return adev->nbio.funcs->get_rev_id(adev);
 
 
 
 
1066}
1067
1068/**
1069 * amdgpu_invalid_rreg - dummy reg read function
1070 *
1071 * @adev: amdgpu_device pointer
1072 * @reg: offset of register
1073 *
1074 * Dummy register read function.  Used for register blocks
1075 * that certain asics don't have (all asics).
1076 * Returns the value in the register.
1077 */
1078static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
1079{
1080	DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
1081	BUG();
1082	return 0;
1083}
1084
1085static uint32_t amdgpu_invalid_rreg_ext(struct amdgpu_device *adev, uint64_t reg)
1086{
1087	DRM_ERROR("Invalid callback to read register 0x%llX\n", reg);
1088	BUG();
1089	return 0;
1090}
1091
1092/**
1093 * amdgpu_invalid_wreg - dummy reg write function
1094 *
1095 * @adev: amdgpu_device pointer
1096 * @reg: offset of register
1097 * @v: value to write to the register
1098 *
1099 * Dummy register read function.  Used for register blocks
1100 * that certain asics don't have (all asics).
1101 */
1102static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
1103{
1104	DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
1105		  reg, v);
1106	BUG();
1107}
1108
1109static void amdgpu_invalid_wreg_ext(struct amdgpu_device *adev, uint64_t reg, uint32_t v)
1110{
1111	DRM_ERROR("Invalid callback to write register 0x%llX with 0x%08X\n",
1112		  reg, v);
1113	BUG();
1114}
1115
1116/**
1117 * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
1118 *
1119 * @adev: amdgpu_device pointer
1120 * @reg: offset of register
1121 *
1122 * Dummy register read function.  Used for register blocks
1123 * that certain asics don't have (all asics).
1124 * Returns the value in the register.
1125 */
1126static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
1127{
1128	DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
1129	BUG();
1130	return 0;
1131}
1132
1133static uint64_t amdgpu_invalid_rreg64_ext(struct amdgpu_device *adev, uint64_t reg)
1134{
1135	DRM_ERROR("Invalid callback to read register 0x%llX\n", reg);
1136	BUG();
1137	return 0;
1138}
1139
1140/**
1141 * amdgpu_invalid_wreg64 - dummy reg write function
1142 *
1143 * @adev: amdgpu_device pointer
1144 * @reg: offset of register
1145 * @v: value to write to the register
1146 *
1147 * Dummy register read function.  Used for register blocks
1148 * that certain asics don't have (all asics).
1149 */
1150static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
1151{
1152	DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
1153		  reg, v);
1154	BUG();
1155}
1156
1157static void amdgpu_invalid_wreg64_ext(struct amdgpu_device *adev, uint64_t reg, uint64_t v)
1158{
1159	DRM_ERROR("Invalid callback to write 64 bit register 0x%llX with 0x%08llX\n",
1160		  reg, v);
1161	BUG();
1162}
1163
1164/**
1165 * amdgpu_block_invalid_rreg - dummy reg read function
1166 *
1167 * @adev: amdgpu_device pointer
1168 * @block: offset of instance
1169 * @reg: offset of register
1170 *
1171 * Dummy register read function.  Used for register blocks
1172 * that certain asics don't have (all asics).
1173 * Returns the value in the register.
1174 */
1175static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
1176					  uint32_t block, uint32_t reg)
1177{
1178	DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
1179		  reg, block);
1180	BUG();
1181	return 0;
1182}
1183
1184/**
1185 * amdgpu_block_invalid_wreg - dummy reg write function
1186 *
1187 * @adev: amdgpu_device pointer
1188 * @block: offset of instance
1189 * @reg: offset of register
1190 * @v: value to write to the register
1191 *
1192 * Dummy register read function.  Used for register blocks
1193 * that certain asics don't have (all asics).
1194 */
1195static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
1196				      uint32_t block,
1197				      uint32_t reg, uint32_t v)
1198{
1199	DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
1200		  reg, block, v);
1201	BUG();
1202}
1203
1204/**
1205 * amdgpu_device_asic_init - Wrapper for atom asic_init
1206 *
1207 * @adev: amdgpu_device pointer
1208 *
1209 * Does any asic specific work and then calls atom asic init.
1210 */
1211static int amdgpu_device_asic_init(struct amdgpu_device *adev)
1212{
1213	int ret;
1214
1215	amdgpu_asic_pre_asic_init(adev);
1216
1217	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
1218	    amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) {
1219		amdgpu_psp_wait_for_bootloader(adev);
1220		ret = amdgpu_atomfirmware_asic_init(adev, true);
1221		/* TODO: check the return val and stop device initialization if boot fails */
1222		amdgpu_psp_query_boot_status(adev);
1223		return ret;
1224	} else {
1225		return amdgpu_atom_asic_init(adev->mode_info.atom_context);
1226	}
1227
1228	return 0;
1229}
1230
1231/**
1232 * amdgpu_device_mem_scratch_init - allocate the VRAM scratch page
1233 *
1234 * @adev: amdgpu_device pointer
1235 *
1236 * Allocates a scratch page of VRAM for use by various things in the
1237 * driver.
1238 */
1239static int amdgpu_device_mem_scratch_init(struct amdgpu_device *adev)
1240{
1241	return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, PAGE_SIZE,
1242				       AMDGPU_GEM_DOMAIN_VRAM |
1243				       AMDGPU_GEM_DOMAIN_GTT,
1244				       &adev->mem_scratch.robj,
1245				       &adev->mem_scratch.gpu_addr,
1246				       (void **)&adev->mem_scratch.ptr);
1247}
1248
1249/**
1250 * amdgpu_device_mem_scratch_fini - Free the VRAM scratch page
1251 *
1252 * @adev: amdgpu_device pointer
1253 *
1254 * Frees the VRAM scratch page.
1255 */
1256static void amdgpu_device_mem_scratch_fini(struct amdgpu_device *adev)
1257{
1258	amdgpu_bo_free_kernel(&adev->mem_scratch.robj, NULL, NULL);
1259}
1260
1261/**
1262 * amdgpu_device_program_register_sequence - program an array of registers.
1263 *
1264 * @adev: amdgpu_device pointer
1265 * @registers: pointer to the register array
1266 * @array_size: size of the register array
1267 *
1268 * Programs an array or registers with and or masks.
1269 * This is a helper for setting golden registers.
1270 */
1271void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
1272					     const u32 *registers,
1273					     const u32 array_size)
1274{
1275	u32 tmp, reg, and_mask, or_mask;
1276	int i;
1277
1278	if (array_size % 3)
1279		return;
1280
1281	for (i = 0; i < array_size; i += 3) {
1282		reg = registers[i + 0];
1283		and_mask = registers[i + 1];
1284		or_mask = registers[i + 2];
1285
1286		if (and_mask == 0xffffffff) {
1287			tmp = or_mask;
1288		} else {
1289			tmp = RREG32(reg);
1290			tmp &= ~and_mask;
1291			if (adev->family >= AMDGPU_FAMILY_AI)
1292				tmp |= (or_mask & and_mask);
1293			else
1294				tmp |= or_mask;
1295		}
1296		WREG32(reg, tmp);
1297	}
1298}
1299
1300/**
1301 * amdgpu_device_pci_config_reset - reset the GPU
1302 *
1303 * @adev: amdgpu_device pointer
1304 *
1305 * Resets the GPU using the pci config reset sequence.
1306 * Only applicable to asics prior to vega10.
1307 */
1308void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
1309{
1310	pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
1311}
1312
 
 
 
1313/**
1314 * amdgpu_device_pci_reset - reset the GPU using generic PCI means
1315 *
1316 * @adev: amdgpu_device pointer
1317 *
1318 * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
 
1319 */
1320int amdgpu_device_pci_reset(struct amdgpu_device *adev)
1321{
1322	return pci_reset_function(adev->pdev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1323}
1324
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1325/*
1326 * amdgpu_device_wb_*()
1327 * Writeback is the method by which the GPU updates special pages in memory
1328 * with the status of certain GPU events (fences, ring pointers,etc.).
1329 */
1330
1331/**
1332 * amdgpu_device_wb_fini - Disable Writeback and free memory
1333 *
1334 * @adev: amdgpu_device pointer
1335 *
1336 * Disables Writeback and frees the Writeback memory (all asics).
1337 * Used at driver shutdown.
1338 */
1339static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1340{
1341	if (adev->wb.wb_obj) {
1342		amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1343				      &adev->wb.gpu_addr,
1344				      (void **)&adev->wb.wb);
1345		adev->wb.wb_obj = NULL;
1346	}
1347}
1348
1349/**
1350 * amdgpu_device_wb_init - Init Writeback driver info and allocate memory
1351 *
1352 * @adev: amdgpu_device pointer
1353 *
1354 * Initializes writeback and allocates writeback memory (all asics).
1355 * Used at driver startup.
1356 * Returns 0 on success or an -error on failure.
1357 */
1358static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1359{
1360	int r;
1361
1362	if (adev->wb.wb_obj == NULL) {
1363		/* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1364		r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1365					    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1366					    &adev->wb.wb_obj, &adev->wb.gpu_addr,
1367					    (void **)&adev->wb.wb);
1368		if (r) {
1369			dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1370			return r;
1371		}
1372
1373		adev->wb.num_wb = AMDGPU_MAX_WB;
1374		memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1375
1376		/* clear wb memory */
1377		memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1378	}
1379
1380	return 0;
1381}
1382
1383/**
1384 * amdgpu_device_wb_get - Allocate a wb entry
1385 *
1386 * @adev: amdgpu_device pointer
1387 * @wb: wb index
1388 *
1389 * Allocate a wb slot for use by the driver (all asics).
1390 * Returns 0 on success or -EINVAL on failure.
1391 */
1392int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1393{
1394	unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1395
1396	if (offset < adev->wb.num_wb) {
1397		__set_bit(offset, adev->wb.used);
1398		*wb = offset << 3; /* convert to dw offset */
1399		return 0;
1400	} else {
1401		return -EINVAL;
1402	}
1403}
1404
1405/**
1406 * amdgpu_device_wb_free - Free a wb entry
1407 *
1408 * @adev: amdgpu_device pointer
1409 * @wb: wb index
1410 *
1411 * Free a wb slot allocated for use by the driver (all asics)
1412 */
1413void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1414{
1415	wb >>= 3;
1416	if (wb < adev->wb.num_wb)
1417		__clear_bit(wb, adev->wb.used);
1418}
1419
1420/**
1421 * amdgpu_device_resize_fb_bar - try to resize FB BAR
1422 *
1423 * @adev: amdgpu_device pointer
1424 *
1425 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1426 * to fail, but if any of the BARs is not accessible after the size we abort
1427 * driver loading by returning -ENODEV.
1428 */
1429int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1430{
1431	int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
 
1432	struct pci_bus *root;
1433	struct resource *res;
1434	unsigned int i;
1435	u16 cmd;
1436	int r;
1437
1438	if (!IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
1439		return 0;
1440
1441	/* Bypass for VF */
1442	if (amdgpu_sriov_vf(adev))
1443		return 0;
1444
1445	/* skip if the bios has already enabled large BAR */
1446	if (adev->gmc.real_vram_size &&
1447	    (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1448		return 0;
1449
1450	/* Check if the root BUS has 64bit memory resources */
1451	root = adev->pdev->bus;
1452	while (root->parent)
1453		root = root->parent;
1454
1455	pci_bus_for_each_resource(root, res, i) {
1456		if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1457		    res->start > 0x100000000ull)
1458			break;
1459	}
1460
1461	/* Trying to resize is pointless without a root hub window above 4GB */
1462	if (!res)
1463		return 0;
1464
1465	/* Limit the BAR size to what is available */
1466	rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1467			rbar_size);
1468
1469	/* Disable memory decoding while we change the BAR addresses and size */
1470	pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1471	pci_write_config_word(adev->pdev, PCI_COMMAND,
1472			      cmd & ~PCI_COMMAND_MEMORY);
1473
1474	/* Free the VRAM and doorbell BAR, we most likely need to move both. */
1475	amdgpu_doorbell_fini(adev);
1476	if (adev->asic_type >= CHIP_BONAIRE)
1477		pci_release_resource(adev->pdev, 2);
1478
1479	pci_release_resource(adev->pdev, 0);
1480
1481	r = pci_resize_resource(adev->pdev, 0, rbar_size);
1482	if (r == -ENOSPC)
1483		DRM_INFO("Not enough PCI address space for a large BAR.");
1484	else if (r && r != -ENOTSUPP)
1485		DRM_ERROR("Problem resizing BAR0 (%d).", r);
1486
1487	pci_assign_unassigned_bus_resources(adev->pdev->bus);
1488
1489	/* When the doorbell or fb BAR isn't available we have no chance of
1490	 * using the device.
1491	 */
1492	r = amdgpu_doorbell_init(adev);
1493	if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1494		return -ENODEV;
1495
1496	pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1497
1498	return 0;
1499}
1500
1501static bool amdgpu_device_read_bios(struct amdgpu_device *adev)
1502{
1503	if (hweight32(adev->aid_mask) && (adev->flags & AMD_IS_APU))
1504		return false;
1505
1506	return true;
1507}
1508
1509/*
1510 * GPU helpers function.
1511 */
1512/**
1513 * amdgpu_device_need_post - check if the hw need post or not
1514 *
1515 * @adev: amdgpu_device pointer
1516 *
1517 * Check if the asic has been initialized (all asics) at driver startup
1518 * or post is needed if  hw reset is performed.
1519 * Returns true if need or false if not.
1520 */
1521bool amdgpu_device_need_post(struct amdgpu_device *adev)
1522{
1523	uint32_t reg;
1524
1525	if (amdgpu_sriov_vf(adev))
1526		return false;
1527
1528	if (!amdgpu_device_read_bios(adev))
1529		return false;
1530
1531	if (amdgpu_passthrough(adev)) {
1532		/* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1533		 * some old smc fw still need driver do vPost otherwise gpu hang, while
1534		 * those smc fw version above 22.15 doesn't have this flaw, so we force
1535		 * vpost executed for smc version below 22.15
1536		 */
1537		if (adev->asic_type == CHIP_FIJI) {
1538			int err;
1539			uint32_t fw_ver;
1540
1541			err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1542			/* force vPost if error occured */
1543			if (err)
1544				return true;
1545
1546			fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1547			release_firmware(adev->pm.fw);
1548			if (fw_ver < 0x00160e00)
1549				return true;
1550		}
1551	}
1552
1553	/* Don't post if we need to reset whole hive on init */
1554	if (adev->gmc.xgmi.pending_reset)
1555		return false;
1556
1557	if (adev->has_hw_reset) {
1558		adev->has_hw_reset = false;
1559		return true;
1560	}
1561
1562	/* bios scratch used on CIK+ */
1563	if (adev->asic_type >= CHIP_BONAIRE)
1564		return amdgpu_atombios_scratch_need_asic_init(adev);
1565
1566	/* check MEM_SIZE for older asics */
1567	reg = amdgpu_asic_get_config_memsize(adev);
1568
1569	if ((reg != 0) && (reg != 0xffffffff))
1570		return false;
1571
1572	return true;
1573}
1574
1575/*
1576 * Check whether seamless boot is supported.
1577 *
1578 * So far we only support seamless boot on DCE 3.0 or later.
1579 * If users report that it works on older ASICS as well, we may
1580 * loosen this.
1581 */
1582bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev)
1583{
1584	switch (amdgpu_seamless) {
1585	case -1:
1586		break;
1587	case 1:
1588		return true;
1589	case 0:
1590		return false;
1591	default:
1592		DRM_ERROR("Invalid value for amdgpu.seamless: %d\n",
1593			  amdgpu_seamless);
1594		return false;
1595	}
1596
1597	if (!(adev->flags & AMD_IS_APU))
1598		return false;
1599
1600	if (adev->mman.keep_stolen_vga_memory)
1601		return false;
1602
1603	return amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0);
1604}
1605
1606/*
1607 * Intel hosts such as Rocket Lake, Alder Lake, Raptor Lake and Sapphire Rapids
1608 * don't support dynamic speed switching. Until we have confirmation from Intel
1609 * that a specific host supports it, it's safer that we keep it disabled for all.
1610 *
1611 * https://edc.intel.com/content/www/us/en/design/products/platforms/details/raptor-lake-s/13th-generation-core-processors-datasheet-volume-1-of-2/005/pci-express-support/
1612 * https://gitlab.freedesktop.org/drm/amd/-/issues/2663
1613 */
1614static bool amdgpu_device_pcie_dynamic_switching_supported(struct amdgpu_device *adev)
1615{
1616#if IS_ENABLED(CONFIG_X86)
1617	struct cpuinfo_x86 *c = &cpu_data(0);
1618
1619	/* eGPU change speeds based on USB4 fabric conditions */
1620	if (dev_is_removable(adev->dev))
1621		return true;
1622
1623	if (c->x86_vendor == X86_VENDOR_INTEL)
1624		return false;
1625#endif
1626	return true;
1627}
1628
1629/**
1630 * amdgpu_device_should_use_aspm - check if the device should program ASPM
1631 *
1632 * @adev: amdgpu_device pointer
1633 *
1634 * Confirm whether the module parameter and pcie bridge agree that ASPM should
1635 * be set for this device.
1636 *
1637 * Returns true if it should be used or false if not.
1638 */
1639bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
1640{
1641	switch (amdgpu_aspm) {
1642	case -1:
1643		break;
1644	case 0:
1645		return false;
1646	case 1:
1647		return true;
1648	default:
1649		return false;
1650	}
1651	if (adev->flags & AMD_IS_APU)
1652		return false;
1653	if (!(adev->pm.pp_feature & PP_PCIE_DPM_MASK))
1654		return false;
1655	return pcie_aspm_enabled(adev->pdev);
1656}
1657
1658/* if we get transitioned to only one device, take VGA back */
1659/**
1660 * amdgpu_device_vga_set_decode - enable/disable vga decode
1661 *
1662 * @pdev: PCI device pointer
1663 * @state: enable/disable vga decode
1664 *
1665 * Enable/disable vga decode (all asics).
1666 * Returns VGA resource flags.
1667 */
1668static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
1669		bool state)
1670{
1671	struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
1672
1673	amdgpu_asic_set_vga_state(adev, state);
1674	if (state)
1675		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1676		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1677	else
1678		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1679}
1680
1681/**
1682 * amdgpu_device_check_block_size - validate the vm block size
1683 *
1684 * @adev: amdgpu_device pointer
1685 *
1686 * Validates the vm block size specified via module parameter.
1687 * The vm block size defines number of bits in page table versus page directory,
1688 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1689 * page table and the remaining bits are in the page directory.
1690 */
1691static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1692{
1693	/* defines number of bits in page table versus page directory,
1694	 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1695	 * page table and the remaining bits are in the page directory
1696	 */
1697	if (amdgpu_vm_block_size == -1)
1698		return;
1699
1700	if (amdgpu_vm_block_size < 9) {
1701		dev_warn(adev->dev, "VM page table size (%d) too small\n",
1702			 amdgpu_vm_block_size);
1703		amdgpu_vm_block_size = -1;
1704	}
1705}
1706
1707/**
1708 * amdgpu_device_check_vm_size - validate the vm size
1709 *
1710 * @adev: amdgpu_device pointer
1711 *
1712 * Validates the vm size in GB specified via module parameter.
1713 * The VM size is the size of the GPU virtual memory space in GB.
1714 */
1715static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1716{
1717	/* no need to check the default value */
1718	if (amdgpu_vm_size == -1)
1719		return;
1720
1721	if (amdgpu_vm_size < 1) {
1722		dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1723			 amdgpu_vm_size);
1724		amdgpu_vm_size = -1;
1725	}
1726}
1727
1728static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1729{
1730	struct sysinfo si;
1731	bool is_os_64 = (sizeof(void *) == 8);
1732	uint64_t total_memory;
1733	uint64_t dram_size_seven_GB = 0x1B8000000;
1734	uint64_t dram_size_three_GB = 0xB8000000;
1735
1736	if (amdgpu_smu_memory_pool_size == 0)
1737		return;
1738
1739	if (!is_os_64) {
1740		DRM_WARN("Not 64-bit OS, feature not supported\n");
1741		goto def_value;
1742	}
1743	si_meminfo(&si);
1744	total_memory = (uint64_t)si.totalram * si.mem_unit;
1745
1746	if ((amdgpu_smu_memory_pool_size == 1) ||
1747		(amdgpu_smu_memory_pool_size == 2)) {
1748		if (total_memory < dram_size_three_GB)
1749			goto def_value1;
1750	} else if ((amdgpu_smu_memory_pool_size == 4) ||
1751		(amdgpu_smu_memory_pool_size == 8)) {
1752		if (total_memory < dram_size_seven_GB)
1753			goto def_value1;
1754	} else {
1755		DRM_WARN("Smu memory pool size not supported\n");
1756		goto def_value;
1757	}
1758	adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1759
1760	return;
1761
1762def_value1:
1763	DRM_WARN("No enough system memory\n");
1764def_value:
1765	adev->pm.smu_prv_buffer_size = 0;
1766}
1767
1768static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
1769{
1770	if (!(adev->flags & AMD_IS_APU) ||
1771	    adev->asic_type < CHIP_RAVEN)
1772		return 0;
1773
1774	switch (adev->asic_type) {
1775	case CHIP_RAVEN:
1776		if (adev->pdev->device == 0x15dd)
1777			adev->apu_flags |= AMD_APU_IS_RAVEN;
1778		if (adev->pdev->device == 0x15d8)
1779			adev->apu_flags |= AMD_APU_IS_PICASSO;
1780		break;
1781	case CHIP_RENOIR:
1782		if ((adev->pdev->device == 0x1636) ||
1783		    (adev->pdev->device == 0x164c))
1784			adev->apu_flags |= AMD_APU_IS_RENOIR;
1785		else
1786			adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1787		break;
1788	case CHIP_VANGOGH:
1789		adev->apu_flags |= AMD_APU_IS_VANGOGH;
1790		break;
1791	case CHIP_YELLOW_CARP:
1792		break;
1793	case CHIP_CYAN_SKILLFISH:
1794		if ((adev->pdev->device == 0x13FE) ||
1795		    (adev->pdev->device == 0x143F))
1796			adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
1797		break;
1798	default:
1799		break;
1800	}
1801
1802	return 0;
1803}
1804
1805/**
1806 * amdgpu_device_check_arguments - validate module params
1807 *
1808 * @adev: amdgpu_device pointer
1809 *
1810 * Validates certain module parameters and updates
1811 * the associated values used by the driver (all asics).
1812 */
1813static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1814{
 
 
1815	if (amdgpu_sched_jobs < 4) {
1816		dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1817			 amdgpu_sched_jobs);
1818		amdgpu_sched_jobs = 4;
1819	} else if (!is_power_of_2(amdgpu_sched_jobs)) {
1820		dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1821			 amdgpu_sched_jobs);
1822		amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1823	}
1824
1825	if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1826		/* gart size must be greater or equal to 32M */
1827		dev_warn(adev->dev, "gart size (%d) too small\n",
1828			 amdgpu_gart_size);
1829		amdgpu_gart_size = -1;
1830	}
1831
1832	if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1833		/* gtt size must be greater or equal to 32M */
1834		dev_warn(adev->dev, "gtt size (%d) too small\n",
1835				 amdgpu_gtt_size);
1836		amdgpu_gtt_size = -1;
1837	}
1838
1839	/* valid range is between 4 and 9 inclusive */
1840	if (amdgpu_vm_fragment_size != -1 &&
1841	    (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1842		dev_warn(adev->dev, "valid range is between 4 and 9\n");
1843		amdgpu_vm_fragment_size = -1;
1844	}
1845
1846	if (amdgpu_sched_hw_submission < 2) {
1847		dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1848			 amdgpu_sched_hw_submission);
1849		amdgpu_sched_hw_submission = 2;
1850	} else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1851		dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1852			 amdgpu_sched_hw_submission);
1853		amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1854	}
1855
1856	if (amdgpu_reset_method < -1 || amdgpu_reset_method > 4) {
1857		dev_warn(adev->dev, "invalid option for reset method, reverting to default\n");
1858		amdgpu_reset_method = -1;
1859	}
1860
1861	amdgpu_device_check_smu_prv_buffer_size(adev);
1862
1863	amdgpu_device_check_vm_size(adev);
1864
1865	amdgpu_device_check_block_size(adev);
1866
 
 
 
 
 
 
1867	adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1868
1869	return 0;
1870}
1871
1872/**
1873 * amdgpu_switcheroo_set_state - set switcheroo state
1874 *
1875 * @pdev: pci dev pointer
1876 * @state: vga_switcheroo state
1877 *
1878 * Callback for the switcheroo driver.  Suspends or resumes
1879 * the asics before or after it is powered up using ACPI methods.
1880 */
1881static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1882					enum vga_switcheroo_state state)
1883{
1884	struct drm_device *dev = pci_get_drvdata(pdev);
1885	int r;
1886
1887	if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
1888		return;
1889
1890	if (state == VGA_SWITCHEROO_ON) {
1891		pr_info("switched on\n");
1892		/* don't suspend or resume card normally */
1893		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1894
1895		pci_set_power_state(pdev, PCI_D0);
1896		amdgpu_device_load_pci_state(pdev);
1897		r = pci_enable_device(pdev);
1898		if (r)
1899			DRM_WARN("pci_enable_device failed (%d)\n", r);
1900		amdgpu_device_resume(dev, true);
1901
1902		dev->switch_power_state = DRM_SWITCH_POWER_ON;
 
1903	} else {
1904		pr_info("switched off\n");
 
1905		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1906		amdgpu_device_prepare(dev);
1907		amdgpu_device_suspend(dev, true);
1908		amdgpu_device_cache_pci_state(pdev);
1909		/* Shut down the device */
1910		pci_disable_device(pdev);
1911		pci_set_power_state(pdev, PCI_D3cold);
1912		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1913	}
1914}
1915
1916/**
1917 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1918 *
1919 * @pdev: pci dev pointer
1920 *
1921 * Callback for the switcheroo driver.  Check of the switcheroo
1922 * state can be changed.
1923 * Returns true if the state can be changed, false if not.
1924 */
1925static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1926{
1927	struct drm_device *dev = pci_get_drvdata(pdev);
1928
1929       /*
1930	* FIXME: open_count is protected by drm_global_mutex but that would lead to
1931	* locking inversion with the driver load path. And the access here is
1932	* completely racy anyway. So don't bother with locking for now.
1933	*/
1934	return atomic_read(&dev->open_count) == 0;
1935}
1936
1937static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1938	.set_gpu_state = amdgpu_switcheroo_set_state,
1939	.reprobe = NULL,
1940	.can_switch = amdgpu_switcheroo_can_switch,
1941};
1942
1943/**
1944 * amdgpu_device_ip_set_clockgating_state - set the CG state
1945 *
1946 * @dev: amdgpu_device pointer
1947 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1948 * @state: clockgating state (gate or ungate)
1949 *
1950 * Sets the requested clockgating state for all instances of
1951 * the hardware IP specified.
1952 * Returns the error code from the last instance.
1953 */
1954int amdgpu_device_ip_set_clockgating_state(void *dev,
1955					   enum amd_ip_block_type block_type,
1956					   enum amd_clockgating_state state)
1957{
1958	struct amdgpu_device *adev = dev;
1959	int i, r = 0;
1960
1961	for (i = 0; i < adev->num_ip_blocks; i++) {
1962		if (!adev->ip_blocks[i].status.valid)
1963			continue;
1964		if (adev->ip_blocks[i].version->type != block_type)
1965			continue;
1966		if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1967			continue;
1968		r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1969			(void *)adev, state);
1970		if (r)
1971			DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1972				  adev->ip_blocks[i].version->funcs->name, r);
1973	}
1974	return r;
1975}
1976
1977/**
1978 * amdgpu_device_ip_set_powergating_state - set the PG state
1979 *
1980 * @dev: amdgpu_device pointer
1981 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1982 * @state: powergating state (gate or ungate)
1983 *
1984 * Sets the requested powergating state for all instances of
1985 * the hardware IP specified.
1986 * Returns the error code from the last instance.
1987 */
1988int amdgpu_device_ip_set_powergating_state(void *dev,
1989					   enum amd_ip_block_type block_type,
1990					   enum amd_powergating_state state)
1991{
1992	struct amdgpu_device *adev = dev;
1993	int i, r = 0;
1994
1995	for (i = 0; i < adev->num_ip_blocks; i++) {
1996		if (!adev->ip_blocks[i].status.valid)
1997			continue;
1998		if (adev->ip_blocks[i].version->type != block_type)
1999			continue;
2000		if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
2001			continue;
2002		r = adev->ip_blocks[i].version->funcs->set_powergating_state(
2003			(void *)adev, state);
2004		if (r)
2005			DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
2006				  adev->ip_blocks[i].version->funcs->name, r);
2007	}
2008	return r;
2009}
2010
2011/**
2012 * amdgpu_device_ip_get_clockgating_state - get the CG state
2013 *
2014 * @adev: amdgpu_device pointer
2015 * @flags: clockgating feature flags
2016 *
2017 * Walks the list of IPs on the device and updates the clockgating
2018 * flags for each IP.
2019 * Updates @flags with the feature flags for each hardware IP where
2020 * clockgating is enabled.
2021 */
2022void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
2023					    u64 *flags)
2024{
2025	int i;
2026
2027	for (i = 0; i < adev->num_ip_blocks; i++) {
2028		if (!adev->ip_blocks[i].status.valid)
2029			continue;
2030		if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
2031			adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
2032	}
2033}
2034
2035/**
2036 * amdgpu_device_ip_wait_for_idle - wait for idle
2037 *
2038 * @adev: amdgpu_device pointer
2039 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
2040 *
2041 * Waits for the request hardware IP to be idle.
2042 * Returns 0 for success or a negative error code on failure.
2043 */
2044int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
2045				   enum amd_ip_block_type block_type)
2046{
2047	int i, r;
2048
2049	for (i = 0; i < adev->num_ip_blocks; i++) {
2050		if (!adev->ip_blocks[i].status.valid)
2051			continue;
2052		if (adev->ip_blocks[i].version->type == block_type) {
2053			r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
2054			if (r)
2055				return r;
2056			break;
2057		}
2058	}
2059	return 0;
2060
2061}
2062
2063/**
2064 * amdgpu_device_ip_is_idle - is the hardware IP idle
2065 *
2066 * @adev: amdgpu_device pointer
2067 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
2068 *
2069 * Check if the hardware IP is idle or not.
2070 * Returns true if it the IP is idle, false if not.
2071 */
2072bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
2073			      enum amd_ip_block_type block_type)
2074{
2075	int i;
2076
2077	for (i = 0; i < adev->num_ip_blocks; i++) {
2078		if (!adev->ip_blocks[i].status.valid)
2079			continue;
2080		if (adev->ip_blocks[i].version->type == block_type)
2081			return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
2082	}
2083	return true;
2084
2085}
2086
2087/**
2088 * amdgpu_device_ip_get_ip_block - get a hw IP pointer
2089 *
2090 * @adev: amdgpu_device pointer
2091 * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
2092 *
2093 * Returns a pointer to the hardware IP block structure
2094 * if it exists for the asic, otherwise NULL.
2095 */
2096struct amdgpu_ip_block *
2097amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
2098			      enum amd_ip_block_type type)
2099{
2100	int i;
2101
2102	for (i = 0; i < adev->num_ip_blocks; i++)
2103		if (adev->ip_blocks[i].version->type == type)
2104			return &adev->ip_blocks[i];
2105
2106	return NULL;
2107}
2108
2109/**
2110 * amdgpu_device_ip_block_version_cmp
2111 *
2112 * @adev: amdgpu_device pointer
2113 * @type: enum amd_ip_block_type
2114 * @major: major version
2115 * @minor: minor version
2116 *
2117 * return 0 if equal or greater
2118 * return 1 if smaller or the ip_block doesn't exist
2119 */
2120int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
2121				       enum amd_ip_block_type type,
2122				       u32 major, u32 minor)
2123{
2124	struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
2125
2126	if (ip_block && ((ip_block->version->major > major) ||
2127			((ip_block->version->major == major) &&
2128			(ip_block->version->minor >= minor))))
2129		return 0;
2130
2131	return 1;
2132}
2133
2134/**
2135 * amdgpu_device_ip_block_add
2136 *
2137 * @adev: amdgpu_device pointer
2138 * @ip_block_version: pointer to the IP to add
2139 *
2140 * Adds the IP block driver information to the collection of IPs
2141 * on the asic.
2142 */
2143int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
2144			       const struct amdgpu_ip_block_version *ip_block_version)
2145{
2146	if (!ip_block_version)
2147		return -EINVAL;
2148
2149	switch (ip_block_version->type) {
2150	case AMD_IP_BLOCK_TYPE_VCN:
2151		if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
2152			return 0;
2153		break;
2154	case AMD_IP_BLOCK_TYPE_JPEG:
2155		if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
2156			return 0;
2157		break;
2158	default:
2159		break;
2160	}
2161
2162	DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
2163		  ip_block_version->funcs->name);
2164
2165	adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
2166
2167	return 0;
2168}
2169
2170/**
2171 * amdgpu_device_enable_virtual_display - enable virtual display feature
2172 *
2173 * @adev: amdgpu_device pointer
2174 *
2175 * Enabled the virtual display feature if the user has enabled it via
2176 * the module parameter virtual_display.  This feature provides a virtual
2177 * display hardware on headless boards or in virtualized environments.
2178 * This function parses and validates the configuration string specified by
2179 * the user and configues the virtual display configuration (number of
2180 * virtual connectors, crtcs, etc.) specified.
2181 */
2182static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
2183{
2184	adev->enable_virtual_display = false;
2185
2186	if (amdgpu_virtual_display) {
2187		const char *pci_address_name = pci_name(adev->pdev);
 
2188		char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
2189
2190		pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
2191		pciaddstr_tmp = pciaddstr;
2192		while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
2193			pciaddname = strsep(&pciaddname_tmp, ",");
2194			if (!strcmp("all", pciaddname)
2195			    || !strcmp(pci_address_name, pciaddname)) {
2196				long num_crtc;
2197				int res = -1;
2198
2199				adev->enable_virtual_display = true;
2200
2201				if (pciaddname_tmp)
2202					res = kstrtol(pciaddname_tmp, 10,
2203						      &num_crtc);
2204
2205				if (!res) {
2206					if (num_crtc < 1)
2207						num_crtc = 1;
2208					if (num_crtc > 6)
2209						num_crtc = 6;
2210					adev->mode_info.num_crtc = num_crtc;
2211				} else {
2212					adev->mode_info.num_crtc = 1;
2213				}
2214				break;
2215			}
2216		}
2217
2218		DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
2219			 amdgpu_virtual_display, pci_address_name,
2220			 adev->enable_virtual_display, adev->mode_info.num_crtc);
2221
2222		kfree(pciaddstr);
2223	}
2224}
2225
2226void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev)
2227{
2228	if (amdgpu_sriov_vf(adev) && !adev->enable_virtual_display) {
2229		adev->mode_info.num_crtc = 1;
2230		adev->enable_virtual_display = true;
2231		DRM_INFO("virtual_display:%d, num_crtc:%d\n",
2232			 adev->enable_virtual_display, adev->mode_info.num_crtc);
2233	}
2234}
2235
2236/**
2237 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
2238 *
2239 * @adev: amdgpu_device pointer
2240 *
2241 * Parses the asic configuration parameters specified in the gpu info
2242 * firmware and makes them availale to the driver for use in configuring
2243 * the asic.
2244 * Returns 0 on success, -EINVAL on failure.
2245 */
2246static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
2247{
2248	const char *chip_name;
2249	char fw_name[40];
2250	int err;
2251	const struct gpu_info_firmware_header_v1_0 *hdr;
2252
2253	adev->firmware.gpu_info_fw = NULL;
2254
2255	if (adev->mman.discovery_bin)
2256		return 0;
2257
2258	switch (adev->asic_type) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2259	default:
2260		return 0;
2261	case CHIP_VEGA10:
2262		chip_name = "vega10";
2263		break;
2264	case CHIP_VEGA12:
2265		chip_name = "vega12";
2266		break;
2267	case CHIP_RAVEN:
2268		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
2269			chip_name = "raven2";
2270		else if (adev->apu_flags & AMD_APU_IS_PICASSO)
2271			chip_name = "picasso";
2272		else
2273			chip_name = "raven";
2274		break;
2275	case CHIP_ARCTURUS:
2276		chip_name = "arcturus";
2277		break;
 
 
 
 
 
 
 
 
 
2278	case CHIP_NAVI12:
2279		chip_name = "navi12";
2280		break;
2281	}
2282
2283	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
2284	err = amdgpu_ucode_request(adev, &adev->firmware.gpu_info_fw, fw_name);
 
 
 
 
 
 
 
2285	if (err) {
2286		dev_err(adev->dev,
2287			"Failed to get gpu_info firmware \"%s\"\n",
2288			fw_name);
2289		goto out;
2290	}
2291
2292	hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
2293	amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
2294
2295	switch (hdr->version_major) {
2296	case 1:
2297	{
2298		const struct gpu_info_firmware_v1_0 *gpu_info_fw =
2299			(const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
2300								le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2301
2302		/*
2303		 * Should be droped when DAL no longer needs it.
2304		 */
2305		if (adev->asic_type == CHIP_NAVI12)
2306			goto parse_soc_bounding_box;
2307
2308		adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
2309		adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
2310		adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
2311		adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
2312		adev->gfx.config.max_texture_channel_caches =
2313			le32_to_cpu(gpu_info_fw->gc_num_tccs);
2314		adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
2315		adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
2316		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
2317		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
2318		adev->gfx.config.double_offchip_lds_buf =
2319			le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
2320		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
2321		adev->gfx.cu_info.max_waves_per_simd =
2322			le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
2323		adev->gfx.cu_info.max_scratch_slots_per_cu =
2324			le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
2325		adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
2326		if (hdr->version_minor >= 1) {
2327			const struct gpu_info_firmware_v1_1 *gpu_info_fw =
2328				(const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
2329									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2330			adev->gfx.config.num_sc_per_sh =
2331				le32_to_cpu(gpu_info_fw->num_sc_per_sh);
2332			adev->gfx.config.num_packer_per_sc =
2333				le32_to_cpu(gpu_info_fw->num_packer_per_sc);
2334		}
2335
2336parse_soc_bounding_box:
2337		/*
2338		 * soc bounding box info is not integrated in disocovery table,
2339		 * we always need to parse it from gpu info firmware if needed.
2340		 */
2341		if (hdr->version_minor == 2) {
2342			const struct gpu_info_firmware_v1_2 *gpu_info_fw =
2343				(const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
2344									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2345			adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
2346		}
 
2347		break;
2348	}
2349	default:
2350		dev_err(adev->dev,
2351			"Unsupported gpu_info table %d\n", hdr->header.ucode_version);
2352		err = -EINVAL;
2353		goto out;
2354	}
2355out:
2356	return err;
2357}
2358
2359/**
2360 * amdgpu_device_ip_early_init - run early init for hardware IPs
2361 *
2362 * @adev: amdgpu_device pointer
2363 *
2364 * Early initialization pass for hardware IPs.  The hardware IPs that make
2365 * up each asic are discovered each IP's early_init callback is run.  This
2366 * is the first stage in initializing the asic.
2367 * Returns 0 on success, negative error code on failure.
2368 */
2369static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
2370{
2371	struct pci_dev *parent;
2372	int i, r;
2373	bool total;
2374
2375	amdgpu_device_enable_virtual_display(adev);
2376
2377	if (amdgpu_sriov_vf(adev)) {
2378		r = amdgpu_virt_request_full_gpu(adev, true);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2379		if (r)
2380			return r;
2381	}
2382
2383	switch (adev->asic_type) {
2384#ifdef CONFIG_DRM_AMDGPU_SI
2385	case CHIP_VERDE:
2386	case CHIP_TAHITI:
2387	case CHIP_PITCAIRN:
2388	case CHIP_OLAND:
2389	case CHIP_HAINAN:
2390		adev->family = AMDGPU_FAMILY_SI;
2391		r = si_set_ip_blocks(adev);
2392		if (r)
2393			return r;
2394		break;
2395#endif
2396#ifdef CONFIG_DRM_AMDGPU_CIK
2397	case CHIP_BONAIRE:
2398	case CHIP_HAWAII:
2399	case CHIP_KAVERI:
2400	case CHIP_KABINI:
2401	case CHIP_MULLINS:
2402		if (adev->flags & AMD_IS_APU)
2403			adev->family = AMDGPU_FAMILY_KV;
2404		else
2405			adev->family = AMDGPU_FAMILY_CI;
 
 
2406
2407		r = cik_set_ip_blocks(adev);
2408		if (r)
2409			return r;
2410		break;
2411#endif
2412	case CHIP_TOPAZ:
2413	case CHIP_TONGA:
2414	case CHIP_FIJI:
2415	case CHIP_POLARIS10:
2416	case CHIP_POLARIS11:
2417	case CHIP_POLARIS12:
2418	case CHIP_VEGAM:
2419	case CHIP_CARRIZO:
2420	case CHIP_STONEY:
2421		if (adev->flags & AMD_IS_APU)
2422			adev->family = AMDGPU_FAMILY_CZ;
2423		else
2424			adev->family = AMDGPU_FAMILY_VI;
2425
2426		r = vi_set_ip_blocks(adev);
2427		if (r)
2428			return r;
2429		break;
2430	default:
2431		r = amdgpu_discovery_set_ip_blocks(adev);
 
 
 
 
2432		if (r)
2433			return r;
2434		break;
 
 
 
2435	}
2436
2437	if (amdgpu_has_atpx() &&
2438	    (amdgpu_is_atpx_hybrid() ||
2439	     amdgpu_has_atpx_dgpu_power_cntl()) &&
2440	    ((adev->flags & AMD_IS_APU) == 0) &&
2441	    !dev_is_removable(&adev->pdev->dev))
2442		adev->flags |= AMD_IS_PX;
2443
2444	if (!(adev->flags & AMD_IS_APU)) {
2445		parent = pcie_find_root_port(adev->pdev);
2446		adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
2447	}
2448
 
 
 
 
 
 
 
2449
2450	adev->pm.pp_feature = amdgpu_pp_feature_mask;
2451	if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2452		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2453	if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2454		adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2455	if (!amdgpu_device_pcie_dynamic_switching_supported(adev))
2456		adev->pm.pp_feature &= ~PP_PCIE_DPM_MASK;
2457
2458	total = true;
2459	for (i = 0; i < adev->num_ip_blocks; i++) {
2460		if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2461			DRM_WARN("disabled ip block: %d <%s>\n",
2462				  i, adev->ip_blocks[i].version->funcs->name);
2463			adev->ip_blocks[i].status.valid = false;
2464		} else {
2465			if (adev->ip_blocks[i].version->funcs->early_init) {
2466				r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2467				if (r == -ENOENT) {
2468					adev->ip_blocks[i].status.valid = false;
2469				} else if (r) {
2470					DRM_ERROR("early_init of IP block <%s> failed %d\n",
2471						  adev->ip_blocks[i].version->funcs->name, r);
2472					total = false;
2473				} else {
2474					adev->ip_blocks[i].status.valid = true;
2475				}
2476			} else {
2477				adev->ip_blocks[i].status.valid = true;
2478			}
2479		}
2480		/* get the vbios after the asic_funcs are set up */
2481		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2482			r = amdgpu_device_parse_gpu_info_fw(adev);
2483			if (r)
2484				return r;
2485
2486			/* Read BIOS */
2487			if (amdgpu_device_read_bios(adev)) {
2488				if (!amdgpu_get_bios(adev))
2489					return -EINVAL;
2490
2491				r = amdgpu_atombios_init(adev);
2492				if (r) {
2493					dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2494					amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2495					return r;
2496				}
2497			}
2498
2499			/*get pf2vf msg info at it's earliest time*/
2500			if (amdgpu_sriov_vf(adev))
2501				amdgpu_virt_init_data_exchange(adev);
2502
2503		}
2504	}
2505	if (!total)
2506		return -ENODEV;
2507
2508	amdgpu_amdkfd_device_probe(adev);
2509	adev->cg_flags &= amdgpu_cg_mask;
2510	adev->pg_flags &= amdgpu_pg_mask;
2511
2512	return 0;
2513}
2514
2515static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2516{
2517	int i, r;
2518
2519	for (i = 0; i < adev->num_ip_blocks; i++) {
2520		if (!adev->ip_blocks[i].status.sw)
2521			continue;
2522		if (adev->ip_blocks[i].status.hw)
2523			continue;
2524		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2525		    (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2526		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2527			r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2528			if (r) {
2529				DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2530					  adev->ip_blocks[i].version->funcs->name, r);
2531				return r;
2532			}
2533			adev->ip_blocks[i].status.hw = true;
2534		}
2535	}
2536
2537	return 0;
2538}
2539
2540static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2541{
2542	int i, r;
2543
2544	for (i = 0; i < adev->num_ip_blocks; i++) {
2545		if (!adev->ip_blocks[i].status.sw)
2546			continue;
2547		if (adev->ip_blocks[i].status.hw)
2548			continue;
2549		r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2550		if (r) {
2551			DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2552				  adev->ip_blocks[i].version->funcs->name, r);
2553			return r;
2554		}
2555		adev->ip_blocks[i].status.hw = true;
2556	}
2557
2558	return 0;
2559}
2560
2561static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2562{
2563	int r = 0;
2564	int i;
2565	uint32_t smu_version;
2566
2567	if (adev->asic_type >= CHIP_VEGA10) {
2568		for (i = 0; i < adev->num_ip_blocks; i++) {
2569			if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2570				continue;
2571
2572			if (!adev->ip_blocks[i].status.sw)
2573				continue;
2574
2575			/* no need to do the fw loading again if already done*/
2576			if (adev->ip_blocks[i].status.hw == true)
2577				break;
2578
2579			if (amdgpu_in_reset(adev) || adev->in_suspend) {
2580				r = adev->ip_blocks[i].version->funcs->resume(adev);
2581				if (r) {
2582					DRM_ERROR("resume of IP block <%s> failed %d\n",
2583							  adev->ip_blocks[i].version->funcs->name, r);
2584					return r;
2585				}
2586			} else {
2587				r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2588				if (r) {
2589					DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2590							  adev->ip_blocks[i].version->funcs->name, r);
2591					return r;
2592				}
2593			}
2594
2595			adev->ip_blocks[i].status.hw = true;
2596			break;
2597		}
2598	}
2599
2600	if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2601		r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2602
2603	return r;
2604}
2605
2606static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
2607{
2608	long timeout;
2609	int r, i;
2610
2611	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2612		struct amdgpu_ring *ring = adev->rings[i];
2613
2614		/* No need to setup the GPU scheduler for rings that don't need it */
2615		if (!ring || ring->no_scheduler)
2616			continue;
2617
2618		switch (ring->funcs->type) {
2619		case AMDGPU_RING_TYPE_GFX:
2620			timeout = adev->gfx_timeout;
2621			break;
2622		case AMDGPU_RING_TYPE_COMPUTE:
2623			timeout = adev->compute_timeout;
2624			break;
2625		case AMDGPU_RING_TYPE_SDMA:
2626			timeout = adev->sdma_timeout;
2627			break;
2628		default:
2629			timeout = adev->video_timeout;
2630			break;
2631		}
2632
2633		r = drm_sched_init(&ring->sched, &amdgpu_sched_ops, NULL,
2634				   DRM_SCHED_PRIORITY_COUNT,
2635				   ring->num_hw_submission, 0,
2636				   timeout, adev->reset_domain->wq,
2637				   ring->sched_score, ring->name,
2638				   adev->dev);
2639		if (r) {
2640			DRM_ERROR("Failed to create scheduler on ring %s.\n",
2641				  ring->name);
2642			return r;
2643		}
2644		r = amdgpu_uvd_entity_init(adev, ring);
2645		if (r) {
2646			DRM_ERROR("Failed to create UVD scheduling entity on ring %s.\n",
2647				  ring->name);
2648			return r;
2649		}
2650		r = amdgpu_vce_entity_init(adev, ring);
2651		if (r) {
2652			DRM_ERROR("Failed to create VCE scheduling entity on ring %s.\n",
2653				  ring->name);
2654			return r;
2655		}
2656	}
2657
2658	amdgpu_xcp_update_partition_sched_list(adev);
2659
2660	return 0;
2661}
2662
2663
2664/**
2665 * amdgpu_device_ip_init - run init for hardware IPs
2666 *
2667 * @adev: amdgpu_device pointer
2668 *
2669 * Main initialization pass for hardware IPs.  The list of all the hardware
2670 * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2671 * are run.  sw_init initializes the software state associated with each IP
2672 * and hw_init initializes the hardware associated with each IP.
2673 * Returns 0 on success, negative error code on failure.
2674 */
2675static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2676{
2677	int i, r;
2678
2679	r = amdgpu_ras_init(adev);
2680	if (r)
2681		return r;
2682
2683	for (i = 0; i < adev->num_ip_blocks; i++) {
2684		if (!adev->ip_blocks[i].status.valid)
2685			continue;
2686		r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2687		if (r) {
2688			DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2689				  adev->ip_blocks[i].version->funcs->name, r);
2690			goto init_failed;
2691		}
2692		adev->ip_blocks[i].status.sw = true;
2693
2694		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2695			/* need to do common hw init early so everything is set up for gmc */
2696			r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2697			if (r) {
2698				DRM_ERROR("hw_init %d failed %d\n", i, r);
2699				goto init_failed;
2700			}
2701			adev->ip_blocks[i].status.hw = true;
2702		} else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2703			/* need to do gmc hw init early so we can allocate gpu mem */
2704			/* Try to reserve bad pages early */
2705			if (amdgpu_sriov_vf(adev))
2706				amdgpu_virt_exchange_data(adev);
2707
2708			r = amdgpu_device_mem_scratch_init(adev);
2709			if (r) {
2710				DRM_ERROR("amdgpu_mem_scratch_init failed %d\n", r);
2711				goto init_failed;
2712			}
2713			r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2714			if (r) {
2715				DRM_ERROR("hw_init %d failed %d\n", i, r);
2716				goto init_failed;
2717			}
2718			r = amdgpu_device_wb_init(adev);
2719			if (r) {
2720				DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2721				goto init_failed;
2722			}
2723			adev->ip_blocks[i].status.hw = true;
2724
2725			/* right after GMC hw init, we create CSA */
2726			if (adev->gfx.mcbp) {
2727				r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2728							       AMDGPU_GEM_DOMAIN_VRAM |
2729							       AMDGPU_GEM_DOMAIN_GTT,
2730							       AMDGPU_CSA_SIZE);
2731				if (r) {
2732					DRM_ERROR("allocate CSA failed %d\n", r);
2733					goto init_failed;
2734				}
2735			}
2736
2737			r = amdgpu_seq64_init(adev);
2738			if (r) {
2739				DRM_ERROR("allocate seq64 failed %d\n", r);
2740				goto init_failed;
2741			}
2742		}
2743	}
2744
2745	if (amdgpu_sriov_vf(adev))
2746		amdgpu_virt_init_data_exchange(adev);
2747
2748	r = amdgpu_ib_pool_init(adev);
2749	if (r) {
2750		dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2751		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2752		goto init_failed;
2753	}
2754
2755	r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2756	if (r)
2757		goto init_failed;
2758
2759	r = amdgpu_device_ip_hw_init_phase1(adev);
2760	if (r)
2761		goto init_failed;
2762
2763	r = amdgpu_device_fw_loading(adev);
2764	if (r)
2765		goto init_failed;
2766
2767	r = amdgpu_device_ip_hw_init_phase2(adev);
2768	if (r)
2769		goto init_failed;
2770
2771	/*
2772	 * retired pages will be loaded from eeprom and reserved here,
2773	 * it should be called after amdgpu_device_ip_hw_init_phase2  since
2774	 * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2775	 * for I2C communication which only true at this point.
2776	 *
2777	 * amdgpu_ras_recovery_init may fail, but the upper only cares the
2778	 * failure from bad gpu situation and stop amdgpu init process
2779	 * accordingly. For other failed cases, it will still release all
2780	 * the resource and print error message, rather than returning one
2781	 * negative value to upper level.
2782	 *
2783	 * Note: theoretically, this should be called before all vram allocations
2784	 * to protect retired page from abusing
2785	 */
2786	r = amdgpu_ras_recovery_init(adev);
2787	if (r)
2788		goto init_failed;
2789
2790	/**
2791	 * In case of XGMI grab extra reference for reset domain for this device
2792	 */
2793	if (adev->gmc.xgmi.num_physical_nodes > 1) {
2794		if (amdgpu_xgmi_add_device(adev) == 0) {
2795			if (!amdgpu_sriov_vf(adev)) {
2796				struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2797
2798				if (WARN_ON(!hive)) {
2799					r = -ENOENT;
2800					goto init_failed;
2801				}
2802
2803				if (!hive->reset_domain ||
2804				    !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
2805					r = -ENOENT;
2806					amdgpu_put_xgmi_hive(hive);
2807					goto init_failed;
2808				}
2809
2810				/* Drop the early temporary reset domain we created for device */
2811				amdgpu_reset_put_reset_domain(adev->reset_domain);
2812				adev->reset_domain = hive->reset_domain;
2813				amdgpu_put_xgmi_hive(hive);
2814			}
2815		}
2816	}
2817
2818	r = amdgpu_device_init_schedulers(adev);
2819	if (r)
2820		goto init_failed;
2821
2822	if (adev->mman.buffer_funcs_ring->sched.ready)
2823		amdgpu_ttm_set_buffer_funcs_status(adev, true);
2824
2825	/* Don't init kfd if whole hive need to be reset during init */
2826	if (!adev->gmc.xgmi.pending_reset) {
2827		kgd2kfd_init_zone_device(adev);
2828		amdgpu_amdkfd_device_init(adev);
2829	}
2830
2831	amdgpu_fru_get_product_info(adev);
2832
2833init_failed:
 
 
 
 
 
2834
2835	return r;
2836}
2837
2838/**
2839 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2840 *
2841 * @adev: amdgpu_device pointer
2842 *
2843 * Writes a reset magic value to the gart pointer in VRAM.  The driver calls
2844 * this function before a GPU reset.  If the value is retained after a
2845 * GPU reset, VRAM has not been lost.  Some GPU resets may destry VRAM contents.
2846 */
2847static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2848{
2849	memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2850}
2851
2852/**
2853 * amdgpu_device_check_vram_lost - check if vram is valid
2854 *
2855 * @adev: amdgpu_device pointer
2856 *
2857 * Checks the reset magic value written to the gart pointer in VRAM.
2858 * The driver calls this after a GPU reset to see if the contents of
2859 * VRAM is lost or now.
2860 * returns true if vram is lost, false if not.
2861 */
2862static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2863{
2864	if (memcmp(adev->gart.ptr, adev->reset_magic,
2865			AMDGPU_RESET_MAGIC_NUM))
2866		return true;
2867
2868	if (!amdgpu_in_reset(adev))
2869		return false;
2870
2871	/*
2872	 * For all ASICs with baco/mode1 reset, the VRAM is
2873	 * always assumed to be lost.
2874	 */
2875	switch (amdgpu_asic_reset_method(adev)) {
2876	case AMD_RESET_METHOD_BACO:
2877	case AMD_RESET_METHOD_MODE1:
2878		return true;
2879	default:
2880		return false;
2881	}
2882}
2883
2884/**
2885 * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2886 *
2887 * @adev: amdgpu_device pointer
2888 * @state: clockgating state (gate or ungate)
2889 *
2890 * The list of all the hardware IPs that make up the asic is walked and the
2891 * set_clockgating_state callbacks are run.
2892 * Late initialization pass enabling clockgating for hardware IPs.
2893 * Fini or suspend, pass disabling clockgating for hardware IPs.
2894 * Returns 0 on success, negative error code on failure.
2895 */
2896
2897int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2898			       enum amd_clockgating_state state)
2899{
2900	int i, j, r;
2901
2902	if (amdgpu_emu_mode == 1)
2903		return 0;
2904
2905	for (j = 0; j < adev->num_ip_blocks; j++) {
2906		i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2907		if (!adev->ip_blocks[i].status.late_initialized)
2908			continue;
2909		/* skip CG for GFX, SDMA on S0ix */
2910		if (adev->in_s0ix &&
2911		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
2912		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
2913			continue;
2914		/* skip CG for VCE/UVD, it's handled specially */
2915		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2916		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2917		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2918		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2919		    adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2920			/* enable clockgating to save power */
2921			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2922										     state);
2923			if (r) {
2924				DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2925					  adev->ip_blocks[i].version->funcs->name, r);
2926				return r;
2927			}
2928		}
2929	}
2930
2931	return 0;
2932}
2933
2934int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2935			       enum amd_powergating_state state)
2936{
2937	int i, j, r;
2938
2939	if (amdgpu_emu_mode == 1)
2940		return 0;
2941
2942	for (j = 0; j < adev->num_ip_blocks; j++) {
2943		i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2944		if (!adev->ip_blocks[i].status.late_initialized)
2945			continue;
2946		/* skip PG for GFX, SDMA on S0ix */
2947		if (adev->in_s0ix &&
2948		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
2949		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
2950			continue;
2951		/* skip CG for VCE/UVD, it's handled specially */
2952		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2953		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2954		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2955		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2956		    adev->ip_blocks[i].version->funcs->set_powergating_state) {
2957			/* enable powergating to save power */
2958			r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2959											state);
2960			if (r) {
2961				DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2962					  adev->ip_blocks[i].version->funcs->name, r);
2963				return r;
2964			}
2965		}
2966	}
2967	return 0;
2968}
2969
2970static int amdgpu_device_enable_mgpu_fan_boost(void)
2971{
2972	struct amdgpu_gpu_instance *gpu_ins;
2973	struct amdgpu_device *adev;
2974	int i, ret = 0;
2975
2976	mutex_lock(&mgpu_info.mutex);
2977
2978	/*
2979	 * MGPU fan boost feature should be enabled
2980	 * only when there are two or more dGPUs in
2981	 * the system
2982	 */
2983	if (mgpu_info.num_dgpu < 2)
2984		goto out;
2985
2986	for (i = 0; i < mgpu_info.num_dgpu; i++) {
2987		gpu_ins = &(mgpu_info.gpu_ins[i]);
2988		adev = gpu_ins->adev;
2989		if (!(adev->flags & AMD_IS_APU) &&
2990		    !gpu_ins->mgpu_fan_enabled) {
 
 
2991			ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2992			if (ret)
2993				break;
2994
2995			gpu_ins->mgpu_fan_enabled = 1;
2996		}
2997	}
2998
2999out:
3000	mutex_unlock(&mgpu_info.mutex);
3001
3002	return ret;
3003}
3004
3005/**
3006 * amdgpu_device_ip_late_init - run late init for hardware IPs
3007 *
3008 * @adev: amdgpu_device pointer
3009 *
3010 * Late initialization pass for hardware IPs.  The list of all the hardware
3011 * IPs that make up the asic is walked and the late_init callbacks are run.
3012 * late_init covers any special initialization that an IP requires
3013 * after all of the have been initialized or something that needs to happen
3014 * late in the init process.
3015 * Returns 0 on success, negative error code on failure.
3016 */
3017static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
3018{
3019	struct amdgpu_gpu_instance *gpu_instance;
3020	int i = 0, r;
3021
3022	for (i = 0; i < adev->num_ip_blocks; i++) {
3023		if (!adev->ip_blocks[i].status.hw)
3024			continue;
3025		if (adev->ip_blocks[i].version->funcs->late_init) {
3026			r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
3027			if (r) {
3028				DRM_ERROR("late_init of IP block <%s> failed %d\n",
3029					  adev->ip_blocks[i].version->funcs->name, r);
3030				return r;
3031			}
3032		}
3033		adev->ip_blocks[i].status.late_initialized = true;
3034	}
3035
3036	r = amdgpu_ras_late_init(adev);
3037	if (r) {
3038		DRM_ERROR("amdgpu_ras_late_init failed %d", r);
3039		return r;
3040	}
3041
3042	amdgpu_ras_set_error_query_ready(adev, true);
3043
3044	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
3045	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
3046
3047	amdgpu_device_fill_reset_magic(adev);
3048
3049	r = amdgpu_device_enable_mgpu_fan_boost();
3050	if (r)
3051		DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
3052
3053	/* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
3054	if (amdgpu_passthrough(adev) &&
3055	    ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1) ||
3056	     adev->asic_type == CHIP_ALDEBARAN))
3057		amdgpu_dpm_handle_passthrough_sbr(adev, true);
3058
3059	if (adev->gmc.xgmi.num_physical_nodes > 1) {
3060		mutex_lock(&mgpu_info.mutex);
3061
3062		/*
3063		 * Reset device p-state to low as this was booted with high.
3064		 *
3065		 * This should be performed only after all devices from the same
3066		 * hive get initialized.
3067		 *
3068		 * However, it's unknown how many device in the hive in advance.
3069		 * As this is counted one by one during devices initializations.
3070		 *
3071		 * So, we wait for all XGMI interlinked devices initialized.
3072		 * This may bring some delays as those devices may come from
3073		 * different hives. But that should be OK.
3074		 */
3075		if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
3076			for (i = 0; i < mgpu_info.num_gpu; i++) {
3077				gpu_instance = &(mgpu_info.gpu_ins[i]);
3078				if (gpu_instance->adev->flags & AMD_IS_APU)
3079					continue;
3080
3081				r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
3082						AMDGPU_XGMI_PSTATE_MIN);
3083				if (r) {
3084					DRM_ERROR("pstate setting failed (%d).\n", r);
3085					break;
3086				}
3087			}
3088		}
3089
3090		mutex_unlock(&mgpu_info.mutex);
3091	}
3092
3093	return 0;
3094}
3095
3096/**
3097 * amdgpu_device_smu_fini_early - smu hw_fini wrapper
3098 *
3099 * @adev: amdgpu_device pointer
3100 *
3101 * For ASICs need to disable SMC first
 
 
 
 
3102 */
3103static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
3104{
3105	int i, r;
3106
3107	if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(9, 0, 0))
3108		return;
 
 
3109
 
 
 
 
 
 
3110	for (i = 0; i < adev->num_ip_blocks; i++) {
3111		if (!adev->ip_blocks[i].status.hw)
3112			continue;
3113		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3114			r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
3115			/* XXX handle errors */
3116			if (r) {
3117				DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
3118					  adev->ip_blocks[i].version->funcs->name, r);
3119			}
3120			adev->ip_blocks[i].status.hw = false;
3121			break;
3122		}
3123	}
3124}
3125
3126static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
3127{
3128	int i, r;
3129
3130	for (i = 0; i < adev->num_ip_blocks; i++) {
3131		if (!adev->ip_blocks[i].version->funcs->early_fini)
3132			continue;
3133
3134		r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
3135		if (r) {
3136			DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
3137				  adev->ip_blocks[i].version->funcs->name, r);
3138		}
3139	}
3140
3141	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
3142	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
3143
3144	amdgpu_amdkfd_suspend(adev, false);
3145
3146	/* Workaroud for ASICs need to disable SMC first */
3147	amdgpu_device_smu_fini_early(adev);
3148
3149	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3150		if (!adev->ip_blocks[i].status.hw)
3151			continue;
3152
3153		r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
3154		/* XXX handle errors */
3155		if (r) {
3156			DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
3157				  adev->ip_blocks[i].version->funcs->name, r);
3158		}
3159
3160		adev->ip_blocks[i].status.hw = false;
3161	}
3162
3163	if (amdgpu_sriov_vf(adev)) {
3164		if (amdgpu_virt_release_full_gpu(adev, false))
3165			DRM_ERROR("failed to release exclusive mode on fini\n");
3166	}
3167
3168	return 0;
3169}
3170
3171/**
3172 * amdgpu_device_ip_fini - run fini for hardware IPs
3173 *
3174 * @adev: amdgpu_device pointer
3175 *
3176 * Main teardown pass for hardware IPs.  The list of all the hardware
3177 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
3178 * are run.  hw_fini tears down the hardware associated with each IP
3179 * and sw_fini tears down any software state associated with each IP.
3180 * Returns 0 on success, negative error code on failure.
3181 */
3182static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
3183{
3184	int i, r;
3185
3186	if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
3187		amdgpu_virt_release_ras_err_handler_data(adev);
3188
3189	if (adev->gmc.xgmi.num_physical_nodes > 1)
3190		amdgpu_xgmi_remove_device(adev);
3191
3192	amdgpu_amdkfd_device_fini_sw(adev);
3193
3194	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3195		if (!adev->ip_blocks[i].status.sw)
3196			continue;
3197
3198		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
3199			amdgpu_ucode_free_bo(adev);
3200			amdgpu_free_static_csa(&adev->virt.csa_obj);
3201			amdgpu_device_wb_fini(adev);
3202			amdgpu_device_mem_scratch_fini(adev);
3203			amdgpu_ib_pool_fini(adev);
3204			amdgpu_seq64_fini(adev);
3205		}
3206
3207		r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
3208		/* XXX handle errors */
3209		if (r) {
3210			DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
3211				  adev->ip_blocks[i].version->funcs->name, r);
3212		}
3213		adev->ip_blocks[i].status.sw = false;
3214		adev->ip_blocks[i].status.valid = false;
3215	}
3216
3217	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3218		if (!adev->ip_blocks[i].status.late_initialized)
3219			continue;
3220		if (adev->ip_blocks[i].version->funcs->late_fini)
3221			adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
3222		adev->ip_blocks[i].status.late_initialized = false;
3223	}
3224
3225	amdgpu_ras_fini(adev);
3226
 
 
 
 
3227	return 0;
3228}
3229
3230/**
3231 * amdgpu_device_delayed_init_work_handler - work handler for IB tests
3232 *
3233 * @work: work_struct.
3234 */
3235static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
3236{
3237	struct amdgpu_device *adev =
3238		container_of(work, struct amdgpu_device, delayed_init_work.work);
3239	int r;
3240
3241	r = amdgpu_ib_ring_tests(adev);
3242	if (r)
3243		DRM_ERROR("ib ring test failed (%d).\n", r);
3244}
3245
3246static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
3247{
3248	struct amdgpu_device *adev =
3249		container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
3250
3251	WARN_ON_ONCE(adev->gfx.gfx_off_state);
3252	WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
3253
3254	if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
3255		adev->gfx.gfx_off_state = true;
 
3256}
3257
3258/**
3259 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
3260 *
3261 * @adev: amdgpu_device pointer
3262 *
3263 * Main suspend function for hardware IPs.  The list of all the hardware
3264 * IPs that make up the asic is walked, clockgating is disabled and the
3265 * suspend callbacks are run.  suspend puts the hardware and software state
3266 * in each IP into a state suitable for suspend.
3267 * Returns 0 on success, negative error code on failure.
3268 */
3269static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
3270{
3271	int i, r;
3272
3273	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
3274	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
3275
3276	/*
3277	 * Per PMFW team's suggestion, driver needs to handle gfxoff
3278	 * and df cstate features disablement for gpu reset(e.g. Mode1Reset)
3279	 * scenario. Add the missing df cstate disablement here.
3280	 */
3281	if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
3282		dev_warn(adev->dev, "Failed to disallow df cstate");
3283
3284	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3285		if (!adev->ip_blocks[i].status.valid)
3286			continue;
3287
3288		/* displays are handled separately */
3289		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
3290			continue;
3291
3292		/* XXX handle errors */
3293		r = adev->ip_blocks[i].version->funcs->suspend(adev);
3294		/* XXX handle errors */
3295		if (r) {
3296			DRM_ERROR("suspend of IP block <%s> failed %d\n",
3297				  adev->ip_blocks[i].version->funcs->name, r);
3298			return r;
3299		}
3300
3301		adev->ip_blocks[i].status.hw = false;
3302	}
3303
3304	return 0;
3305}
3306
3307/**
3308 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
3309 *
3310 * @adev: amdgpu_device pointer
3311 *
3312 * Main suspend function for hardware IPs.  The list of all the hardware
3313 * IPs that make up the asic is walked, clockgating is disabled and the
3314 * suspend callbacks are run.  suspend puts the hardware and software state
3315 * in each IP into a state suitable for suspend.
3316 * Returns 0 on success, negative error code on failure.
3317 */
3318static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
3319{
3320	int i, r;
3321
3322	if (adev->in_s0ix)
3323		amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry);
3324
3325	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3326		if (!adev->ip_blocks[i].status.valid)
3327			continue;
3328		/* displays are handled in phase1 */
3329		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
3330			continue;
3331		/* PSP lost connection when err_event_athub occurs */
3332		if (amdgpu_ras_intr_triggered() &&
3333		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
3334			adev->ip_blocks[i].status.hw = false;
3335			continue;
3336		}
3337
3338		/* skip unnecessary suspend if we do not initialize them yet */
3339		if (adev->gmc.xgmi.pending_reset &&
3340		    !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3341		      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
3342		      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3343		      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
3344			adev->ip_blocks[i].status.hw = false;
3345			continue;
3346		}
3347
3348		/* skip suspend of gfx/mes and psp for S0ix
3349		 * gfx is in gfxoff state, so on resume it will exit gfxoff just
3350		 * like at runtime. PSP is also part of the always on hardware
3351		 * so no need to suspend it.
3352		 */
3353		if (adev->in_s0ix &&
3354		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
3355		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
3356		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES))
3357			continue;
3358
3359		/* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */
3360		if (adev->in_s0ix &&
3361		    (amdgpu_ip_version(adev, SDMA0_HWIP, 0) >=
3362		     IP_VERSION(5, 0, 0)) &&
3363		    (adev->ip_blocks[i].version->type ==
3364		     AMD_IP_BLOCK_TYPE_SDMA))
3365			continue;
3366
3367		/* Once swPSP provides the IMU, RLC FW binaries to TOS during cold-boot.
3368		 * These are in TMR, hence are expected to be reused by PSP-TOS to reload
3369		 * from this location and RLC Autoload automatically also gets loaded
3370		 * from here based on PMFW -> PSP message during re-init sequence.
3371		 * Therefore, the psp suspend & resume should be skipped to avoid destroy
3372		 * the TMR and reload FWs again for IMU enabled APU ASICs.
3373		 */
3374		if (amdgpu_in_reset(adev) &&
3375		    (adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs &&
3376		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3377			continue;
3378
3379		/* XXX handle errors */
3380		r = adev->ip_blocks[i].version->funcs->suspend(adev);
3381		/* XXX handle errors */
3382		if (r) {
3383			DRM_ERROR("suspend of IP block <%s> failed %d\n",
3384				  adev->ip_blocks[i].version->funcs->name, r);
3385		}
3386		adev->ip_blocks[i].status.hw = false;
3387		/* handle putting the SMC in the appropriate state */
3388		if (!amdgpu_sriov_vf(adev)) {
3389			if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3390				r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
 
 
 
 
 
3391				if (r) {
3392					DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
3393							adev->mp1_state, r);
3394					return r;
3395				}
3396			}
3397		}
 
 
3398	}
3399
3400	return 0;
3401}
3402
3403/**
3404 * amdgpu_device_ip_suspend - run suspend for hardware IPs
3405 *
3406 * @adev: amdgpu_device pointer
3407 *
3408 * Main suspend function for hardware IPs.  The list of all the hardware
3409 * IPs that make up the asic is walked, clockgating is disabled and the
3410 * suspend callbacks are run.  suspend puts the hardware and software state
3411 * in each IP into a state suitable for suspend.
3412 * Returns 0 on success, negative error code on failure.
3413 */
3414int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
3415{
3416	int r;
3417
3418	if (amdgpu_sriov_vf(adev)) {
3419		amdgpu_virt_fini_data_exchange(adev);
3420		amdgpu_virt_request_full_gpu(adev, false);
3421	}
3422
3423	amdgpu_ttm_set_buffer_funcs_status(adev, false);
3424
3425	r = amdgpu_device_ip_suspend_phase1(adev);
3426	if (r)
3427		return r;
3428	r = amdgpu_device_ip_suspend_phase2(adev);
3429
3430	if (amdgpu_sriov_vf(adev))
3431		amdgpu_virt_release_full_gpu(adev, false);
3432
3433	return r;
3434}
3435
3436static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
3437{
3438	int i, r;
3439
3440	static enum amd_ip_block_type ip_order[] = {
3441		AMD_IP_BLOCK_TYPE_COMMON,
3442		AMD_IP_BLOCK_TYPE_GMC,
 
3443		AMD_IP_BLOCK_TYPE_PSP,
3444		AMD_IP_BLOCK_TYPE_IH,
3445	};
3446
3447	for (i = 0; i < adev->num_ip_blocks; i++) {
3448		int j;
3449		struct amdgpu_ip_block *block;
3450
3451		block = &adev->ip_blocks[i];
3452		block->status.hw = false;
3453
3454		for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
3455
3456			if (block->version->type != ip_order[j] ||
 
3457				!block->status.valid)
3458				continue;
3459
3460			r = block->version->funcs->hw_init(adev);
3461			DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3462			if (r)
3463				return r;
3464			block->status.hw = true;
3465		}
3466	}
3467
3468	return 0;
3469}
3470
3471static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
3472{
3473	int i, r;
3474
3475	static enum amd_ip_block_type ip_order[] = {
3476		AMD_IP_BLOCK_TYPE_SMC,
3477		AMD_IP_BLOCK_TYPE_DCE,
3478		AMD_IP_BLOCK_TYPE_GFX,
3479		AMD_IP_BLOCK_TYPE_SDMA,
3480		AMD_IP_BLOCK_TYPE_MES,
3481		AMD_IP_BLOCK_TYPE_UVD,
3482		AMD_IP_BLOCK_TYPE_VCE,
3483		AMD_IP_BLOCK_TYPE_VCN,
3484		AMD_IP_BLOCK_TYPE_JPEG
3485	};
3486
3487	for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
3488		int j;
3489		struct amdgpu_ip_block *block;
3490
3491		for (j = 0; j < adev->num_ip_blocks; j++) {
3492			block = &adev->ip_blocks[j];
3493
3494			if (block->version->type != ip_order[i] ||
3495				!block->status.valid ||
3496				block->status.hw)
3497				continue;
3498
3499			if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
3500				r = block->version->funcs->resume(adev);
3501			else
3502				r = block->version->funcs->hw_init(adev);
3503
3504			DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3505			if (r)
3506				return r;
3507			block->status.hw = true;
3508		}
3509	}
3510
3511	return 0;
3512}
3513
3514/**
3515 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3516 *
3517 * @adev: amdgpu_device pointer
3518 *
3519 * First resume function for hardware IPs.  The list of all the hardware
3520 * IPs that make up the asic is walked and the resume callbacks are run for
3521 * COMMON, GMC, and IH.  resume puts the hardware into a functional state
3522 * after a suspend and updates the software state as necessary.  This
3523 * function is also used for restoring the GPU after a GPU reset.
3524 * Returns 0 on success, negative error code on failure.
3525 */
3526static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
3527{
3528	int i, r;
3529
3530	for (i = 0; i < adev->num_ip_blocks; i++) {
3531		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3532			continue;
3533		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3534		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3535		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3536		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) {
3537
3538			r = adev->ip_blocks[i].version->funcs->resume(adev);
3539			if (r) {
3540				DRM_ERROR("resume of IP block <%s> failed %d\n",
3541					  adev->ip_blocks[i].version->funcs->name, r);
3542				return r;
3543			}
3544			adev->ip_blocks[i].status.hw = true;
3545		}
3546	}
3547
3548	return 0;
3549}
3550
3551/**
3552 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3553 *
3554 * @adev: amdgpu_device pointer
3555 *
3556 * First resume function for hardware IPs.  The list of all the hardware
3557 * IPs that make up the asic is walked and the resume callbacks are run for
3558 * all blocks except COMMON, GMC, and IH.  resume puts the hardware into a
3559 * functional state after a suspend and updates the software state as
3560 * necessary.  This function is also used for restoring the GPU after a GPU
3561 * reset.
3562 * Returns 0 on success, negative error code on failure.
3563 */
3564static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3565{
3566	int i, r;
3567
3568	for (i = 0; i < adev->num_ip_blocks; i++) {
3569		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3570			continue;
3571		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3572		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3573		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3574		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3575			continue;
3576		r = adev->ip_blocks[i].version->funcs->resume(adev);
3577		if (r) {
3578			DRM_ERROR("resume of IP block <%s> failed %d\n",
3579				  adev->ip_blocks[i].version->funcs->name, r);
3580			return r;
3581		}
3582		adev->ip_blocks[i].status.hw = true;
3583	}
3584
3585	return 0;
3586}
3587
3588/**
3589 * amdgpu_device_ip_resume - run resume for hardware IPs
3590 *
3591 * @adev: amdgpu_device pointer
3592 *
3593 * Main resume function for hardware IPs.  The hardware IPs
3594 * are split into two resume functions because they are
3595 * also used in recovering from a GPU reset and some additional
3596 * steps need to be take between them.  In this case (S3/S4) they are
3597 * run sequentially.
3598 * Returns 0 on success, negative error code on failure.
3599 */
3600static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
3601{
3602	int r;
3603
3604	r = amdgpu_device_ip_resume_phase1(adev);
3605	if (r)
3606		return r;
3607
3608	r = amdgpu_device_fw_loading(adev);
3609	if (r)
3610		return r;
3611
3612	r = amdgpu_device_ip_resume_phase2(adev);
3613
3614	if (adev->mman.buffer_funcs_ring->sched.ready)
3615		amdgpu_ttm_set_buffer_funcs_status(adev, true);
3616
3617	return r;
3618}
3619
3620/**
3621 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3622 *
3623 * @adev: amdgpu_device pointer
3624 *
3625 * Query the VBIOS data tables to determine if the board supports SR-IOV.
3626 */
3627static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3628{
3629	if (amdgpu_sriov_vf(adev)) {
3630		if (adev->is_atom_fw) {
3631			if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
3632				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3633		} else {
3634			if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3635				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3636		}
3637
3638		if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3639			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3640	}
3641}
3642
3643/**
3644 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3645 *
3646 * @asic_type: AMD asic type
3647 *
3648 * Check if there is DC (new modesetting infrastructre) support for an asic.
3649 * returns true if DC has support, false if not.
3650 */
3651bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3652{
3653	switch (asic_type) {
3654#ifdef CONFIG_DRM_AMDGPU_SI
3655	case CHIP_HAINAN:
3656#endif
3657	case CHIP_TOPAZ:
3658		/* chips with no display hardware */
3659		return false;
3660#if defined(CONFIG_DRM_AMD_DC)
3661	case CHIP_TAHITI:
3662	case CHIP_PITCAIRN:
3663	case CHIP_VERDE:
3664	case CHIP_OLAND:
3665		/*
3666		 * We have systems in the wild with these ASICs that require
3667		 * LVDS and VGA support which is not supported with DC.
3668		 *
3669		 * Fallback to the non-DC driver here by default so as not to
3670		 * cause regressions.
3671		 */
3672#if defined(CONFIG_DRM_AMD_DC_SI)
3673		return amdgpu_dc > 0;
3674#else
3675		return false;
3676#endif
3677	case CHIP_BONAIRE:
3678	case CHIP_KAVERI:
3679	case CHIP_KABINI:
3680	case CHIP_MULLINS:
3681		/*
3682		 * We have systems in the wild with these ASICs that require
3683		 * VGA support which is not supported with DC.
3684		 *
3685		 * Fallback to the non-DC driver here by default so as not to
3686		 * cause regressions.
3687		 */
3688		return amdgpu_dc > 0;
3689	default:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3690		return amdgpu_dc != 0;
3691#else
3692	default:
3693		if (amdgpu_dc > 0)
3694			DRM_INFO_ONCE("Display Core has been requested via kernel parameter but isn't supported by ASIC, ignoring\n");
3695		return false;
3696#endif
3697	}
3698}
3699
3700/**
3701 * amdgpu_device_has_dc_support - check if dc is supported
3702 *
3703 * @adev: amdgpu_device pointer
3704 *
3705 * Returns true for supported, false for not supported
3706 */
3707bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3708{
3709	if (adev->enable_virtual_display ||
3710	    (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
3711		return false;
3712
3713	return amdgpu_device_asic_has_dc_support(adev->asic_type);
3714}
3715
 
3716static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3717{
3718	struct amdgpu_device *adev =
3719		container_of(__work, struct amdgpu_device, xgmi_reset_work);
3720	struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3721
3722	/* It's a bug to not have a hive within this function */
3723	if (WARN_ON(!hive))
3724		return;
3725
3726	/*
3727	 * Use task barrier to synchronize all xgmi reset works across the
3728	 * hive. task_barrier_enter and task_barrier_exit will block
3729	 * until all the threads running the xgmi reset works reach
3730	 * those points. task_barrier_full will do both blocks.
3731	 */
3732	if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3733
3734		task_barrier_enter(&hive->tb);
3735		adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3736
3737		if (adev->asic_reset_res)
3738			goto fail;
3739
3740		task_barrier_exit(&hive->tb);
3741		adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3742
3743		if (adev->asic_reset_res)
3744			goto fail;
3745
3746		amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__MMHUB);
3747	} else {
3748
3749		task_barrier_full(&hive->tb);
3750		adev->asic_reset_res =  amdgpu_asic_reset(adev);
3751	}
3752
3753fail:
3754	if (adev->asic_reset_res)
3755		DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3756			 adev->asic_reset_res, adev_to_drm(adev)->unique);
3757	amdgpu_put_xgmi_hive(hive);
3758}
3759
3760static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3761{
3762	char *input = amdgpu_lockup_timeout;
3763	char *timeout_setting = NULL;
3764	int index = 0;
3765	long timeout;
3766	int ret = 0;
3767
3768	/*
3769	 * By default timeout for non compute jobs is 10000
3770	 * and 60000 for compute jobs.
3771	 * In SR-IOV or passthrough mode, timeout for compute
3772	 * jobs are 60000 by default.
3773	 */
3774	adev->gfx_timeout = msecs_to_jiffies(10000);
3775	adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3776	if (amdgpu_sriov_vf(adev))
3777		adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3778					msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3779	else
3780		adev->compute_timeout =  msecs_to_jiffies(60000);
3781
3782	if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3783		while ((timeout_setting = strsep(&input, ",")) &&
3784				strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3785			ret = kstrtol(timeout_setting, 0, &timeout);
3786			if (ret)
3787				return ret;
3788
3789			if (timeout == 0) {
3790				index++;
3791				continue;
3792			} else if (timeout < 0) {
3793				timeout = MAX_SCHEDULE_TIMEOUT;
3794				dev_warn(adev->dev, "lockup timeout disabled");
3795				add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
3796			} else {
3797				timeout = msecs_to_jiffies(timeout);
3798			}
3799
3800			switch (index++) {
3801			case 0:
3802				adev->gfx_timeout = timeout;
3803				break;
3804			case 1:
3805				adev->compute_timeout = timeout;
3806				break;
3807			case 2:
3808				adev->sdma_timeout = timeout;
3809				break;
3810			case 3:
3811				adev->video_timeout = timeout;
3812				break;
3813			default:
3814				break;
3815			}
3816		}
3817		/*
3818		 * There is only one value specified and
3819		 * it should apply to all non-compute jobs.
3820		 */
3821		if (index == 1) {
3822			adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3823			if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3824				adev->compute_timeout = adev->gfx_timeout;
3825		}
3826	}
3827
3828	return ret;
3829}
3830
3831/**
3832 * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU
3833 *
3834 * @adev: amdgpu_device pointer
3835 *
3836 * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode
3837 */
3838static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
3839{
3840	struct iommu_domain *domain;
3841
3842	domain = iommu_get_domain_for_dev(adev->dev);
3843	if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY)
3844		adev->ram_is_direct_mapped = true;
3845}
3846
3847static const struct attribute *amdgpu_dev_attributes[] = {
3848	&dev_attr_pcie_replay_count.attr,
3849	NULL
3850};
3851
3852static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)
3853{
3854	if (amdgpu_mcbp == 1)
3855		adev->gfx.mcbp = true;
3856	else if (amdgpu_mcbp == 0)
3857		adev->gfx.mcbp = false;
3858
3859	if (amdgpu_sriov_vf(adev))
3860		adev->gfx.mcbp = true;
3861
3862	if (adev->gfx.mcbp)
3863		DRM_INFO("MCBP is enabled\n");
3864}
3865
3866/**
3867 * amdgpu_device_init - initialize the driver
3868 *
3869 * @adev: amdgpu_device pointer
 
 
3870 * @flags: driver flags
3871 *
3872 * Initializes the driver info and hw (all asics).
3873 * Returns 0 for success or an error on failure.
3874 * Called at driver startup.
3875 */
3876int amdgpu_device_init(struct amdgpu_device *adev,
 
 
3877		       uint32_t flags)
3878{
3879	struct drm_device *ddev = adev_to_drm(adev);
3880	struct pci_dev *pdev = adev->pdev;
3881	int r, i;
3882	bool px = false;
3883	u32 max_MBps;
3884	int tmp;
3885
3886	adev->shutdown = false;
 
 
 
3887	adev->flags = flags;
3888
3889	if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3890		adev->asic_type = amdgpu_force_asic_type;
3891	else
3892		adev->asic_type = flags & AMD_ASIC_MASK;
3893
3894	adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3895	if (amdgpu_emu_mode == 1)
3896		adev->usec_timeout *= 10;
3897	adev->gmc.gart_size = 512 * 1024 * 1024;
3898	adev->accel_working = false;
3899	adev->num_rings = 0;
3900	RCU_INIT_POINTER(adev->gang_submit, dma_fence_get_stub());
3901	adev->mman.buffer_funcs = NULL;
3902	adev->mman.buffer_funcs_ring = NULL;
3903	adev->vm_manager.vm_pte_funcs = NULL;
3904	adev->vm_manager.vm_pte_num_scheds = 0;
3905	adev->gmc.gmc_funcs = NULL;
3906	adev->harvest_ip_mask = 0x0;
3907	adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3908	bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3909
3910	adev->smc_rreg = &amdgpu_invalid_rreg;
3911	adev->smc_wreg = &amdgpu_invalid_wreg;
3912	adev->pcie_rreg = &amdgpu_invalid_rreg;
3913	adev->pcie_wreg = &amdgpu_invalid_wreg;
3914	adev->pcie_rreg_ext = &amdgpu_invalid_rreg_ext;
3915	adev->pcie_wreg_ext = &amdgpu_invalid_wreg_ext;
3916	adev->pciep_rreg = &amdgpu_invalid_rreg;
3917	adev->pciep_wreg = &amdgpu_invalid_wreg;
3918	adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3919	adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3920	adev->pcie_rreg64_ext = &amdgpu_invalid_rreg64_ext;
3921	adev->pcie_wreg64_ext = &amdgpu_invalid_wreg64_ext;
3922	adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3923	adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3924	adev->didt_rreg = &amdgpu_invalid_rreg;
3925	adev->didt_wreg = &amdgpu_invalid_wreg;
3926	adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3927	adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3928	adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3929	adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3930
3931	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3932		 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3933		 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3934
3935	/* mutex initialization are all done here so we
3936	 * can recall function without having locking issues
3937	 */
3938	mutex_init(&adev->firmware.mutex);
3939	mutex_init(&adev->pm.mutex);
3940	mutex_init(&adev->gfx.gpu_clock_mutex);
3941	mutex_init(&adev->srbm_mutex);
3942	mutex_init(&adev->gfx.pipe_reserve_mutex);
3943	mutex_init(&adev->gfx.gfx_off_mutex);
3944	mutex_init(&adev->gfx.partition_mutex);
3945	mutex_init(&adev->grbm_idx_mutex);
3946	mutex_init(&adev->mn_lock);
3947	mutex_init(&adev->virt.vf_errors.lock);
3948	hash_init(adev->mn_hash);
 
 
3949	mutex_init(&adev->psp.mutex);
3950	mutex_init(&adev->notifier_lock);
3951	mutex_init(&adev->pm.stable_pstate_ctx_lock);
3952	mutex_init(&adev->benchmark_mutex);
3953
3954	amdgpu_device_init_apu_flags(adev);
3955
3956	r = amdgpu_device_check_arguments(adev);
3957	if (r)
3958		return r;
3959
3960	spin_lock_init(&adev->mmio_idx_lock);
3961	spin_lock_init(&adev->smc_idx_lock);
3962	spin_lock_init(&adev->pcie_idx_lock);
3963	spin_lock_init(&adev->uvd_ctx_idx_lock);
3964	spin_lock_init(&adev->didt_idx_lock);
3965	spin_lock_init(&adev->gc_cac_idx_lock);
3966	spin_lock_init(&adev->se_cac_idx_lock);
3967	spin_lock_init(&adev->audio_endpt_idx_lock);
3968	spin_lock_init(&adev->mm_stats.lock);
3969
3970	INIT_LIST_HEAD(&adev->shadow_list);
3971	mutex_init(&adev->shadow_list_lock);
3972
3973	INIT_LIST_HEAD(&adev->reset_list);
3974
3975	INIT_LIST_HEAD(&adev->ras_list);
3976
3977	INIT_LIST_HEAD(&adev->pm.od_kobj_list);
3978
3979	INIT_DELAYED_WORK(&adev->delayed_init_work,
3980			  amdgpu_device_delayed_init_work_handler);
3981	INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3982			  amdgpu_device_delay_enable_gfx_off);
3983
3984	INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3985
3986	adev->gfx.gfx_off_req_count = 1;
3987	adev->gfx.gfx_off_residency = 0;
3988	adev->gfx.gfx_off_entrycount = 0;
3989	adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3990
3991	atomic_set(&adev->throttling_logging_enabled, 1);
3992	/*
3993	 * If throttling continues, logging will be performed every minute
3994	 * to avoid log flooding. "-1" is subtracted since the thermal
3995	 * throttling interrupt comes every second. Thus, the total logging
3996	 * interval is 59 seconds(retelimited printk interval) + 1(waiting
3997	 * for throttling interrupt) = 60 seconds.
3998	 */
3999	ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
4000	ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
4001
4002	/* Registers mapping */
4003	/* TODO: block userspace mapping of io register */
4004	if (adev->asic_type >= CHIP_BONAIRE) {
4005		adev->rmmio_base = pci_resource_start(adev->pdev, 5);
4006		adev->rmmio_size = pci_resource_len(adev->pdev, 5);
4007	} else {
4008		adev->rmmio_base = pci_resource_start(adev->pdev, 2);
4009		adev->rmmio_size = pci_resource_len(adev->pdev, 2);
4010	}
4011
4012	for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
4013		atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
4014
4015	adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
4016	if (!adev->rmmio)
4017		return -ENOMEM;
4018
4019	DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
4020	DRM_INFO("register mmio size: %u\n", (unsigned int)adev->rmmio_size);
4021
4022	/*
4023	 * Reset domain needs to be present early, before XGMI hive discovered
4024	 * (if any) and intitialized to use reset sem and in_gpu reset flag
4025	 * early on during init and before calling to RREG32.
4026	 */
4027	adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
4028	if (!adev->reset_domain)
4029		return -ENOMEM;
 
 
4030
4031	/* detect hw virtualization here */
4032	amdgpu_detect_virtualization(adev);
 
 
 
 
 
 
 
 
4033
4034	amdgpu_device_get_pcie_info(adev);
4035
4036	r = amdgpu_device_get_job_timeout_settings(adev);
4037	if (r) {
4038		dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
4039		return r;
 
 
 
 
 
 
 
 
4040	}
4041
4042	/* early init functions */
4043	r = amdgpu_device_ip_early_init(adev);
4044	if (r)
4045		return r;
4046
4047	amdgpu_device_set_mcbp(adev);
4048
4049	/* Get rid of things like offb */
4050	r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver);
4051	if (r)
4052		return r;
4053
4054	/* Enable TMZ based on IP_VERSION */
4055	amdgpu_gmc_tmz_set(adev);
4056
4057	amdgpu_gmc_noretry_set(adev);
4058	/* Need to get xgmi info early to decide the reset behavior*/
4059	if (adev->gmc.xgmi.supported) {
4060		r = adev->gfxhub.funcs->get_xgmi_info(adev);
4061		if (r)
4062			return r;
4063	}
4064
4065	/* enable PCIE atomic ops */
4066	if (amdgpu_sriov_vf(adev)) {
4067		if (adev->virt.fw_reserve.p_pf2vf)
4068			adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
4069						      adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
4070				(PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
4071	/* APUs w/ gfx9 onwards doesn't reply on PCIe atomics, rather it is a
4072	 * internal path natively support atomics, set have_atomics_support to true.
4073	 */
4074	} else if ((adev->flags & AMD_IS_APU) &&
4075		   (amdgpu_ip_version(adev, GC_HWIP, 0) >
4076		    IP_VERSION(9, 0, 0))) {
4077		adev->have_atomics_support = true;
4078	} else {
4079		adev->have_atomics_support =
4080			!pci_enable_atomic_ops_to_root(adev->pdev,
4081					  PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
4082					  PCI_EXP_DEVCAP2_ATOMIC_COMP64);
4083	}
4084
4085	if (!adev->have_atomics_support)
4086		dev_info(adev->dev, "PCIE atomic ops is not supported\n");
 
 
4087
4088	/* doorbell bar mapping and doorbell index init*/
4089	amdgpu_doorbell_init(adev);
 
 
 
 
 
4090
4091	if (amdgpu_emu_mode == 1) {
4092		/* post the asic on emulation mode */
4093		emu_soc_asic_init(adev);
4094		goto fence_driver_init;
4095	}
4096
4097	amdgpu_reset_init(adev);
4098
4099	/* detect if we are with an SRIOV vbios */
4100	if (adev->bios)
4101		amdgpu_device_detect_sriov_bios(adev);
4102
4103	/* check if we need to reset the asic
4104	 *  E.g., driver was not cleanly unloaded previously, etc.
4105	 */
4106	if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
4107		if (adev->gmc.xgmi.num_physical_nodes) {
4108			dev_info(adev->dev, "Pending hive reset.\n");
4109			adev->gmc.xgmi.pending_reset = true;
4110			/* Only need to init necessary block for SMU to handle the reset */
4111			for (i = 0; i < adev->num_ip_blocks; i++) {
4112				if (!adev->ip_blocks[i].status.valid)
4113					continue;
4114				if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
4115				      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
4116				      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
4117				      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
4118					DRM_DEBUG("IP %s disabled for hw_init.\n",
4119						adev->ip_blocks[i].version->funcs->name);
4120					adev->ip_blocks[i].status.hw = true;
4121				}
4122			}
4123		} else {
4124			tmp = amdgpu_reset_method;
4125			/* It should do a default reset when loading or reloading the driver,
4126			 * regardless of the module parameter reset_method.
4127			 */
4128			amdgpu_reset_method = AMD_RESET_METHOD_NONE;
4129			r = amdgpu_asic_reset(adev);
4130			amdgpu_reset_method = tmp;
4131			if (r) {
4132				dev_err(adev->dev, "asic reset on init failed\n");
4133				goto failed;
4134			}
4135		}
4136	}
4137
4138	/* Post card if necessary */
4139	if (amdgpu_device_need_post(adev)) {
4140		if (!adev->bios) {
4141			dev_err(adev->dev, "no vBIOS found\n");
4142			r = -EINVAL;
4143			goto failed;
4144		}
4145		DRM_INFO("GPU posting now...\n");
4146		r = amdgpu_device_asic_init(adev);
4147		if (r) {
4148			dev_err(adev->dev, "gpu post error!\n");
4149			goto failed;
4150		}
4151	}
4152
4153	if (adev->bios) {
4154		if (adev->is_atom_fw) {
4155			/* Initialize clocks */
4156			r = amdgpu_atomfirmware_get_clock_info(adev);
4157			if (r) {
4158				dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
4159				amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
4160				goto failed;
4161			}
4162		} else {
4163			/* Initialize clocks */
4164			r = amdgpu_atombios_get_clock_info(adev);
4165			if (r) {
4166				dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
4167				amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
4168				goto failed;
4169			}
4170			/* init i2c buses */
4171			if (!amdgpu_device_has_dc_support(adev))
4172				amdgpu_atombios_i2c_init(adev);
4173		}
 
 
 
4174	}
4175
4176fence_driver_init:
4177	/* Fence driver */
4178	r = amdgpu_fence_driver_sw_init(adev);
4179	if (r) {
4180		dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
4181		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
4182		goto failed;
4183	}
4184
4185	/* init the mode config */
4186	drm_mode_config_init(adev_to_drm(adev));
4187
4188	r = amdgpu_device_ip_init(adev);
4189	if (r) {
 
 
 
 
 
 
 
 
 
 
 
 
4190		dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
4191		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
4192		goto release_ras_con;
 
 
4193	}
4194
4195	amdgpu_fence_driver_hw_init(adev);
4196
4197	dev_info(adev->dev,
4198		"SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
4199			adev->gfx.config.max_shader_engines,
4200			adev->gfx.config.max_sh_per_se,
4201			adev->gfx.config.max_cu_per_sh,
4202			adev->gfx.cu_info.number);
4203
4204	adev->accel_working = true;
4205
4206	amdgpu_vm_check_compute_bug(adev);
4207
4208	/* Initialize the buffer migration limit. */
4209	if (amdgpu_moverate >= 0)
4210		max_MBps = amdgpu_moverate;
4211	else
4212		max_MBps = 8; /* Allow 8 MB/s. */
4213	/* Get a log2 for easy divisions. */
4214	adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
4215
4216	/*
4217	 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
4218	 * Otherwise the mgpu fan boost feature will be skipped due to the
4219	 * gpu instance is counted less.
4220	 */
4221	amdgpu_register_gpu_instance(adev);
4222
4223	/* enable clockgating, etc. after ib tests, etc. since some blocks require
4224	 * explicit gating rather than handling it automatically.
4225	 */
4226	if (!adev->gmc.xgmi.pending_reset) {
4227		r = amdgpu_device_ip_late_init(adev);
4228		if (r) {
4229			dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
4230			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
4231			goto release_ras_con;
4232		}
4233		/* must succeed. */
4234		amdgpu_ras_resume(adev);
4235		queue_delayed_work(system_wq, &adev->delayed_init_work,
4236				   msecs_to_jiffies(AMDGPU_RESUME_MS));
4237	}
4238
4239	if (amdgpu_sriov_vf(adev)) {
4240		amdgpu_virt_release_full_gpu(adev, true);
4241		flush_delayed_work(&adev->delayed_init_work);
4242	}
4243
4244	/*
4245	 * Place those sysfs registering after `late_init`. As some of those
4246	 * operations performed in `late_init` might affect the sysfs
4247	 * interfaces creating.
4248	 */
4249	r = amdgpu_atombios_sysfs_init(adev);
4250	if (r)
4251		drm_err(&adev->ddev,
4252			"registering atombios sysfs failed (%d).\n", r);
4253
4254	r = amdgpu_pm_sysfs_init(adev);
4255	if (r)
4256		DRM_ERROR("registering pm sysfs failed (%d).\n", r);
4257
4258	r = amdgpu_ucode_sysfs_init(adev);
4259	if (r) {
4260		adev->ucode_sysfs_en = false;
4261		DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
4262	} else
4263		adev->ucode_sysfs_en = true;
4264
4265	r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
4266	if (r)
4267		dev_err(adev->dev, "Could not create amdgpu device attr\n");
4268
4269	r = devm_device_add_group(adev->dev, &amdgpu_board_attrs_group);
4270	if (r)
4271		dev_err(adev->dev,
4272			"Could not create amdgpu board attributes\n");
4273
4274	amdgpu_fru_sysfs_init(adev);
4275	amdgpu_reg_state_sysfs_init(adev);
 
4276
4277	if (IS_ENABLED(CONFIG_PERF_EVENTS))
4278		r = amdgpu_pmu_init(adev);
4279	if (r)
4280		dev_err(adev->dev, "amdgpu_pmu_init failed\n");
4281
4282	/* Have stored pci confspace at hand for restore in sudden PCI error */
4283	if (amdgpu_device_cache_pci_state(adev->pdev))
4284		pci_restore_state(pdev);
 
 
 
 
 
 
 
 
 
4285
4286	/* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
4287	/* this will fail for cards that aren't VGA class devices, just
4288	 * ignore it
 
4289	 */
4290	if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4291		vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
4292
4293	px = amdgpu_device_supports_px(ddev);
 
 
 
 
 
 
 
 
4294
4295	if (px || (!dev_is_removable(&adev->pdev->dev) &&
4296				apple_gmux_detect(NULL, NULL)))
4297		vga_switcheroo_register_client(adev->pdev,
4298					       &amdgpu_switcheroo_ops, px);
4299
4300	if (px)
4301		vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
4302
4303	if (adev->gmc.xgmi.pending_reset)
4304		queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
4305				   msecs_to_jiffies(AMDGPU_RESUME_MS));
 
 
4306
4307	amdgpu_device_check_iommu_direct_map(adev);
 
 
 
4308
4309	return 0;
4310
4311release_ras_con:
4312	if (amdgpu_sriov_vf(adev))
4313		amdgpu_virt_release_full_gpu(adev, true);
4314
4315	/* failed in exclusive mode due to timeout */
4316	if (amdgpu_sriov_vf(adev) &&
4317		!amdgpu_sriov_runtime(adev) &&
4318		amdgpu_virt_mmio_blocked(adev) &&
4319		!amdgpu_virt_wait_reset(adev)) {
4320		dev_err(adev->dev, "VF exclusive mode timeout\n");
4321		/* Don't send request since VF is inactive. */
4322		adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
4323		adev->virt.ops = NULL;
4324		r = -EAGAIN;
4325	}
4326	amdgpu_release_ras_context(adev);
4327
4328failed:
4329	amdgpu_vf_error_trans_all(adev);
 
 
4330
4331	return r;
4332}
4333
4334static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
4335{
4336
4337	/* Clear all CPU mappings pointing to this device */
4338	unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
4339
4340	/* Unmap all mapped bars - Doorbell, registers and VRAM */
4341	amdgpu_doorbell_fini(adev);
4342
4343	iounmap(adev->rmmio);
4344	adev->rmmio = NULL;
4345	if (adev->mman.aper_base_kaddr)
4346		iounmap(adev->mman.aper_base_kaddr);
4347	adev->mman.aper_base_kaddr = NULL;
4348
4349	/* Memory manager related */
4350	if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
4351		arch_phys_wc_del(adev->gmc.vram_mtrr);
4352		arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
4353	}
4354}
4355
4356/**
4357 * amdgpu_device_fini_hw - tear down the driver
4358 *
4359 * @adev: amdgpu_device pointer
4360 *
4361 * Tear down the driver info (all asics).
4362 * Called at driver shutdown.
4363 */
4364void amdgpu_device_fini_hw(struct amdgpu_device *adev)
4365{
4366	dev_info(adev->dev, "amdgpu: finishing device.\n");
4367	flush_delayed_work(&adev->delayed_init_work);
4368	adev->shutdown = true;
4369
4370	/* make sure IB test finished before entering exclusive mode
4371	 * to avoid preemption on IB test
4372	 */
4373	if (amdgpu_sriov_vf(adev)) {
4374		amdgpu_virt_request_full_gpu(adev, false);
4375		amdgpu_virt_fini_data_exchange(adev);
4376	}
4377
 
 
4378	/* disable all interrupts */
4379	amdgpu_irq_disable_all(adev);
4380	if (adev->mode_info.mode_config_initialized) {
4381		if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
4382			drm_helper_force_disable_all(adev_to_drm(adev));
4383		else
4384			drm_atomic_helper_shutdown(adev_to_drm(adev));
 
 
 
 
 
 
 
 
4385	}
4386	amdgpu_fence_driver_hw_fini(adev);
4387
4388	if (adev->mman.initialized)
4389		drain_workqueue(adev->mman.bdev.wq);
4390
4391	if (adev->pm.sysfs_initialized)
4392		amdgpu_pm_sysfs_fini(adev);
4393	if (adev->ucode_sysfs_en)
4394		amdgpu_ucode_sysfs_fini(adev);
4395	sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
4396	amdgpu_fru_sysfs_fini(adev);
4397
4398	amdgpu_reg_state_sysfs_fini(adev);
4399
4400	/* disable ras feature must before hw fini */
4401	amdgpu_ras_pre_fini(adev);
4402
4403	amdgpu_ttm_set_buffer_funcs_status(adev, false);
4404
4405	amdgpu_device_ip_fini_early(adev);
4406
4407	amdgpu_irq_fini_hw(adev);
4408
4409	if (adev->mman.initialized)
4410		ttm_device_clear_dma_mappings(&adev->mman.bdev);
4411
4412	amdgpu_gart_dummy_page_fini(adev);
4413
4414	if (drm_dev_is_unplugged(adev_to_drm(adev)))
4415		amdgpu_device_unmap_mmio(adev);
4416
4417}
4418
4419void amdgpu_device_fini_sw(struct amdgpu_device *adev)
4420{
4421	int idx;
4422	bool px;
4423
4424	amdgpu_fence_driver_sw_fini(adev);
4425	amdgpu_device_ip_fini(adev);
4426	amdgpu_ucode_release(&adev->firmware.gpu_info_fw);
4427	adev->accel_working = false;
4428	dma_fence_put(rcu_dereference_protected(adev->gang_submit, true));
4429
4430	amdgpu_reset_fini(adev);
4431
4432	/* free i2c buses */
4433	if (!amdgpu_device_has_dc_support(adev))
4434		amdgpu_i2c_fini(adev);
4435
4436	if (amdgpu_emu_mode != 1)
4437		amdgpu_atombios_fini(adev);
4438
4439	kfree(adev->bios);
4440	adev->bios = NULL;
4441
4442	kfree(adev->fru_info);
4443	adev->fru_info = NULL;
4444
4445	px = amdgpu_device_supports_px(adev_to_drm(adev));
4446
4447	if (px || (!dev_is_removable(&adev->pdev->dev) &&
4448				apple_gmux_detect(NULL, NULL)))
4449		vga_switcheroo_unregister_client(adev->pdev);
4450
4451	if (px)
4452		vga_switcheroo_fini_domain_pm_ops(adev->dev);
4453
4454	if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4455		vga_client_unregister(adev->pdev);
4456
4457	if (drm_dev_enter(adev_to_drm(adev), &idx)) {
4458
4459		iounmap(adev->rmmio);
4460		adev->rmmio = NULL;
4461		amdgpu_doorbell_fini(adev);
4462		drm_dev_exit(idx);
4463	}
4464
 
4465	if (IS_ENABLED(CONFIG_PERF_EVENTS))
4466		amdgpu_pmu_fini(adev);
4467	if (adev->mman.discovery_bin)
 
4468		amdgpu_discovery_fini(adev);
4469
4470	amdgpu_reset_put_reset_domain(adev->reset_domain);
4471	adev->reset_domain = NULL;
4472
4473	kfree(adev->pci_state);
4474
4475}
4476
4477/**
4478 * amdgpu_device_evict_resources - evict device resources
4479 * @adev: amdgpu device object
4480 *
4481 * Evicts all ttm device resources(vram BOs, gart table) from the lru list
4482 * of the vram memory type. Mainly used for evicting device resources
4483 * at suspend time.
4484 *
4485 */
4486static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
4487{
4488	int ret;
4489
4490	/* No need to evict vram on APUs for suspend to ram or s2idle */
4491	if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU))
4492		return 0;
4493
4494	ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
4495	if (ret)
4496		DRM_WARN("evicting device resources failed\n");
4497	return ret;
4498}
4499
4500/*
4501 * Suspend & resume.
4502 */
4503/**
4504 * amdgpu_device_prepare - prepare for device suspend
4505 *
4506 * @dev: drm dev pointer
4507 *
4508 * Prepare to put the hw in the suspend state (all asics).
4509 * Returns 0 for success or an error on failure.
4510 * Called at driver suspend.
4511 */
4512int amdgpu_device_prepare(struct drm_device *dev)
4513{
4514	struct amdgpu_device *adev = drm_to_adev(dev);
4515	int i, r;
4516
4517	amdgpu_choose_low_power_state(adev);
4518
4519	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4520		return 0;
4521
4522	/* Evict the majority of BOs before starting suspend sequence */
4523	r = amdgpu_device_evict_resources(adev);
4524	if (r)
4525		goto unprepare;
4526
4527	for (i = 0; i < adev->num_ip_blocks; i++) {
4528		if (!adev->ip_blocks[i].status.valid)
4529			continue;
4530		if (!adev->ip_blocks[i].version->funcs->prepare_suspend)
4531			continue;
4532		r = adev->ip_blocks[i].version->funcs->prepare_suspend((void *)adev);
4533		if (r)
4534			goto unprepare;
4535	}
4536
4537	return 0;
4538
4539unprepare:
4540	adev->in_s0ix = adev->in_s3 = false;
4541
4542	return r;
4543}
4544
4545/**
4546 * amdgpu_device_suspend - initiate device suspend
4547 *
4548 * @dev: drm dev pointer
 
4549 * @fbcon : notify the fbdev of suspend
4550 *
4551 * Puts the hw in the suspend state (all asics).
4552 * Returns 0 for success or an error on failure.
4553 * Called at driver suspend.
4554 */
4555int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
4556{
4557	struct amdgpu_device *adev = drm_to_adev(dev);
4558	int r = 0;
 
 
 
 
 
 
 
 
4559
4560	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4561		return 0;
4562
4563	adev->in_suspend = true;
4564
4565	if (amdgpu_sriov_vf(adev)) {
4566		amdgpu_virt_fini_data_exchange(adev);
4567		r = amdgpu_virt_request_full_gpu(adev, false);
4568		if (r)
4569			return r;
4570	}
4571
4572	if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
4573		DRM_WARN("smart shift update failed\n");
4574
4575	if (fbcon)
4576		drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
4577
4578	cancel_delayed_work_sync(&adev->delayed_init_work);
4579
4580	amdgpu_ras_suspend(adev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4581
4582	amdgpu_device_ip_suspend_phase1(adev);
 
 
 
 
 
 
 
 
 
 
 
 
 
4583
4584	if (!adev->in_s0ix)
4585		amdgpu_amdkfd_suspend(adev, adev->in_runpm);
4586
4587	r = amdgpu_device_evict_resources(adev);
4588	if (r)
4589		return r;
4590
4591	amdgpu_ttm_set_buffer_funcs_status(adev, false);
4592
4593	amdgpu_fence_driver_hw_fini(adev);
 
4594
4595	amdgpu_device_ip_suspend_phase2(adev);
4596
4597	if (amdgpu_sriov_vf(adev))
4598		amdgpu_virt_release_full_gpu(adev, false);
4599
4600	r = amdgpu_dpm_notify_rlc_state(adev, false);
4601	if (r)
4602		return r;
 
 
 
 
 
 
 
 
 
 
 
 
 
4603
4604	return 0;
4605}
4606
4607/**
4608 * amdgpu_device_resume - initiate device resume
4609 *
4610 * @dev: drm dev pointer
 
4611 * @fbcon : notify the fbdev of resume
4612 *
4613 * Bring the hw back to operating state (all asics).
4614 * Returns 0 for success or an error on failure.
4615 * Called at driver resume.
4616 */
4617int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
4618{
4619	struct amdgpu_device *adev = drm_to_adev(dev);
 
 
4620	int r = 0;
4621
4622	if (amdgpu_sriov_vf(adev)) {
4623		r = amdgpu_virt_request_full_gpu(adev, true);
4624		if (r)
4625			return r;
4626	}
4627
4628	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4629		return 0;
4630
4631	if (adev->in_s0ix)
4632		amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry);
 
 
 
 
 
4633
4634	/* post card */
4635	if (amdgpu_device_need_post(adev)) {
4636		r = amdgpu_device_asic_init(adev);
4637		if (r)
4638			dev_err(adev->dev, "amdgpu asic init failed\n");
4639	}
4640
4641	r = amdgpu_device_ip_resume(adev);
4642
4643	if (r) {
4644		dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
4645		goto exit;
4646	}
4647	amdgpu_fence_driver_hw_init(adev);
4648
4649	if (!adev->in_s0ix) {
4650		r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
4651		if (r)
4652			goto exit;
4653	}
4654
4655	r = amdgpu_device_ip_late_init(adev);
4656	if (r)
4657		goto exit;
4658
4659	queue_delayed_work(system_wq, &adev->delayed_init_work,
4660			   msecs_to_jiffies(AMDGPU_RESUME_MS));
4661exit:
4662	if (amdgpu_sriov_vf(adev)) {
4663		amdgpu_virt_init_data_exchange(adev);
4664		amdgpu_virt_release_full_gpu(adev, true);
4665	}
4666
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4667	if (r)
4668		return r;
4669
4670	/* Make sure IB tests flushed */
4671	flush_delayed_work(&adev->delayed_init_work);
4672
4673	if (fbcon)
4674		drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4675
4676	amdgpu_ras_resume(adev);
4677
4678	if (adev->mode_info.num_crtc) {
4679		/*
4680		 * Most of the connector probing functions try to acquire runtime pm
4681		 * refs to ensure that the GPU is powered on when connector polling is
4682		 * performed. Since we're calling this from a runtime PM callback,
4683		 * trying to acquire rpm refs will cause us to deadlock.
4684		 *
4685		 * Since we're guaranteed to be holding the rpm lock, it's safe to
4686		 * temporarily disable the rpm helpers so this doesn't deadlock us.
4687		 */
4688#ifdef CONFIG_PM
4689		dev->dev->power.disable_depth++;
4690#endif
4691		if (!adev->dc_enabled)
4692			drm_helper_hpd_irq_event(dev);
4693		else
4694			drm_kms_helper_hotplug_event(dev);
4695#ifdef CONFIG_PM
4696		dev->dev->power.disable_depth--;
4697#endif
4698	}
4699	adev->in_suspend = false;
4700
4701	if (adev->enable_mes)
4702		amdgpu_mes_self_test(adev);
4703
4704	if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
4705		DRM_WARN("smart shift update failed\n");
4706
4707	return 0;
4708}
4709
4710/**
4711 * amdgpu_device_ip_check_soft_reset - did soft reset succeed
4712 *
4713 * @adev: amdgpu_device pointer
4714 *
4715 * The list of all the hardware IPs that make up the asic is walked and
4716 * the check_soft_reset callbacks are run.  check_soft_reset determines
4717 * if the asic is still hung or not.
4718 * Returns true if any of the IPs are still in a hung state, false if not.
4719 */
4720static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
4721{
4722	int i;
4723	bool asic_hang = false;
4724
4725	if (amdgpu_sriov_vf(adev))
4726		return true;
4727
4728	if (amdgpu_asic_need_full_reset(adev))
4729		return true;
4730
4731	for (i = 0; i < adev->num_ip_blocks; i++) {
4732		if (!adev->ip_blocks[i].status.valid)
4733			continue;
4734		if (adev->ip_blocks[i].version->funcs->check_soft_reset)
4735			adev->ip_blocks[i].status.hang =
4736				adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
4737		if (adev->ip_blocks[i].status.hang) {
4738			dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
4739			asic_hang = true;
4740		}
4741	}
4742	return asic_hang;
4743}
4744
4745/**
4746 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
4747 *
4748 * @adev: amdgpu_device pointer
4749 *
4750 * The list of all the hardware IPs that make up the asic is walked and the
4751 * pre_soft_reset callbacks are run if the block is hung.  pre_soft_reset
4752 * handles any IP specific hardware or software state changes that are
4753 * necessary for a soft reset to succeed.
4754 * Returns 0 on success, negative error code on failure.
4755 */
4756static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
4757{
4758	int i, r = 0;
4759
4760	for (i = 0; i < adev->num_ip_blocks; i++) {
4761		if (!adev->ip_blocks[i].status.valid)
4762			continue;
4763		if (adev->ip_blocks[i].status.hang &&
4764		    adev->ip_blocks[i].version->funcs->pre_soft_reset) {
4765			r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
4766			if (r)
4767				return r;
4768		}
4769	}
4770
4771	return 0;
4772}
4773
4774/**
4775 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
4776 *
4777 * @adev: amdgpu_device pointer
4778 *
4779 * Some hardware IPs cannot be soft reset.  If they are hung, a full gpu
4780 * reset is necessary to recover.
4781 * Returns true if a full asic reset is required, false if not.
4782 */
4783static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
4784{
4785	int i;
4786
4787	if (amdgpu_asic_need_full_reset(adev))
4788		return true;
4789
4790	for (i = 0; i < adev->num_ip_blocks; i++) {
4791		if (!adev->ip_blocks[i].status.valid)
4792			continue;
4793		if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
4794		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
4795		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
4796		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
4797		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
4798			if (adev->ip_blocks[i].status.hang) {
4799				dev_info(adev->dev, "Some block need full reset!\n");
4800				return true;
4801			}
4802		}
4803	}
4804	return false;
4805}
4806
4807/**
4808 * amdgpu_device_ip_soft_reset - do a soft reset
4809 *
4810 * @adev: amdgpu_device pointer
4811 *
4812 * The list of all the hardware IPs that make up the asic is walked and the
4813 * soft_reset callbacks are run if the block is hung.  soft_reset handles any
4814 * IP specific hardware or software state changes that are necessary to soft
4815 * reset the IP.
4816 * Returns 0 on success, negative error code on failure.
4817 */
4818static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
4819{
4820	int i, r = 0;
4821
4822	for (i = 0; i < adev->num_ip_blocks; i++) {
4823		if (!adev->ip_blocks[i].status.valid)
4824			continue;
4825		if (adev->ip_blocks[i].status.hang &&
4826		    adev->ip_blocks[i].version->funcs->soft_reset) {
4827			r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
4828			if (r)
4829				return r;
4830		}
4831	}
4832
4833	return 0;
4834}
4835
4836/**
4837 * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4838 *
4839 * @adev: amdgpu_device pointer
4840 *
4841 * The list of all the hardware IPs that make up the asic is walked and the
4842 * post_soft_reset callbacks are run if the asic was hung.  post_soft_reset
4843 * handles any IP specific hardware or software state changes that are
4844 * necessary after the IP has been soft reset.
4845 * Returns 0 on success, negative error code on failure.
4846 */
4847static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4848{
4849	int i, r = 0;
4850
4851	for (i = 0; i < adev->num_ip_blocks; i++) {
4852		if (!adev->ip_blocks[i].status.valid)
4853			continue;
4854		if (adev->ip_blocks[i].status.hang &&
4855		    adev->ip_blocks[i].version->funcs->post_soft_reset)
4856			r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
4857		if (r)
4858			return r;
4859	}
4860
4861	return 0;
4862}
4863
4864/**
4865 * amdgpu_device_recover_vram - Recover some VRAM contents
4866 *
4867 * @adev: amdgpu_device pointer
4868 *
4869 * Restores the contents of VRAM buffers from the shadows in GTT.  Used to
4870 * restore things like GPUVM page tables after a GPU reset where
4871 * the contents of VRAM might be lost.
4872 *
4873 * Returns:
4874 * 0 on success, negative error code on failure.
4875 */
4876static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
4877{
4878	struct dma_fence *fence = NULL, *next = NULL;
4879	struct amdgpu_bo *shadow;
4880	struct amdgpu_bo_vm *vmbo;
4881	long r = 1, tmo;
4882
4883	if (amdgpu_sriov_runtime(adev))
4884		tmo = msecs_to_jiffies(8000);
4885	else
4886		tmo = msecs_to_jiffies(100);
4887
4888	dev_info(adev->dev, "recover vram bo from shadow start\n");
4889	mutex_lock(&adev->shadow_list_lock);
4890	list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
4891		/* If vm is compute context or adev is APU, shadow will be NULL */
4892		if (!vmbo->shadow)
4893			continue;
4894		shadow = vmbo->shadow;
4895
4896		/* No need to recover an evicted BO */
4897		if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
4898		    shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
4899		    shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
4900			continue;
4901
4902		r = amdgpu_bo_restore_shadow(shadow, &next);
4903		if (r)
4904			break;
4905
4906		if (fence) {
4907			tmo = dma_fence_wait_timeout(fence, false, tmo);
4908			dma_fence_put(fence);
4909			fence = next;
4910			if (tmo == 0) {
4911				r = -ETIMEDOUT;
4912				break;
4913			} else if (tmo < 0) {
4914				r = tmo;
4915				break;
4916			}
4917		} else {
4918			fence = next;
4919		}
4920	}
4921	mutex_unlock(&adev->shadow_list_lock);
4922
4923	if (fence)
4924		tmo = dma_fence_wait_timeout(fence, false, tmo);
4925	dma_fence_put(fence);
4926
4927	if (r < 0 || tmo <= 0) {
4928		dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4929		return -EIO;
4930	}
4931
4932	dev_info(adev->dev, "recover vram bo from shadow done\n");
4933	return 0;
4934}
4935
4936
4937/**
4938 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4939 *
4940 * @adev: amdgpu_device pointer
4941 * @from_hypervisor: request from hypervisor
4942 *
4943 * do VF FLR and reinitialize Asic
4944 * return 0 means succeeded otherwise failed
4945 */
4946static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4947				     bool from_hypervisor)
4948{
4949	int r;
4950	struct amdgpu_hive_info *hive = NULL;
4951	int retry_limit = 0;
4952
4953retry:
4954	amdgpu_amdkfd_pre_reset(adev);
4955
4956	if (from_hypervisor)
4957		r = amdgpu_virt_request_full_gpu(adev, true);
4958	else
4959		r = amdgpu_virt_reset_gpu(adev);
4960	if (r)
4961		return r;
4962	amdgpu_irq_gpu_reset_resume_helper(adev);
4963
4964	/* some sw clean up VF needs to do before recover */
4965	amdgpu_virt_post_reset(adev);
4966
4967	/* Resume IP prior to SMC */
4968	r = amdgpu_device_ip_reinit_early_sriov(adev);
4969	if (r)
4970		goto error;
4971
4972	amdgpu_virt_init_data_exchange(adev);
 
4973
4974	r = amdgpu_device_fw_loading(adev);
4975	if (r)
4976		return r;
4977
4978	/* now we are okay to resume SMC/CP/SDMA */
4979	r = amdgpu_device_ip_reinit_late_sriov(adev);
4980	if (r)
4981		goto error;
4982
4983	hive = amdgpu_get_xgmi_hive(adev);
4984	/* Update PSP FW topology after reset */
4985	if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
4986		r = amdgpu_xgmi_update_topology(hive, adev);
4987
4988	if (hive)
4989		amdgpu_put_xgmi_hive(hive);
4990
4991	if (!r) {
4992		r = amdgpu_ib_ring_tests(adev);
4993
4994		amdgpu_amdkfd_post_reset(adev);
4995	}
4996
4997error:
 
 
4998	if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
4999		amdgpu_inc_vram_lost(adev);
5000		r = amdgpu_device_recover_vram(adev);
5001	}
5002	amdgpu_virt_release_full_gpu(adev, true);
5003
5004	if (AMDGPU_RETRY_SRIOV_RESET(r)) {
5005		if (retry_limit < AMDGPU_MAX_RETRY_LIMIT) {
5006			retry_limit++;
5007			goto retry;
5008		} else
5009			DRM_ERROR("GPU reset retry is beyond the retry limit\n");
5010	}
5011
5012	return r;
5013}
5014
5015/**
5016 * amdgpu_device_has_job_running - check if there is any job in mirror list
5017 *
5018 * @adev: amdgpu_device pointer
5019 *
5020 * check if there is any job in mirror list
5021 */
5022bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
5023{
5024	int i;
5025	struct drm_sched_job *job;
5026
5027	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5028		struct amdgpu_ring *ring = adev->rings[i];
5029
5030		if (!amdgpu_ring_sched_ready(ring))
5031			continue;
5032
5033		spin_lock(&ring->sched.job_list_lock);
5034		job = list_first_entry_or_null(&ring->sched.pending_list,
5035					       struct drm_sched_job, list);
5036		spin_unlock(&ring->sched.job_list_lock);
5037		if (job)
5038			return true;
5039	}
5040	return false;
5041}
5042
5043/**
5044 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
5045 *
5046 * @adev: amdgpu_device pointer
5047 *
5048 * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
5049 * a hung GPU.
5050 */
5051bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
5052{
 
 
 
 
5053
5054	if (amdgpu_gpu_recovery == 0)
5055		goto disabled;
5056
5057	/* Skip soft reset check in fatal error mode */
5058	if (!amdgpu_ras_is_poison_mode_supported(adev))
5059		return true;
5060
5061	if (amdgpu_sriov_vf(adev))
5062		return true;
5063
5064	if (amdgpu_gpu_recovery == -1) {
5065		switch (adev->asic_type) {
5066#ifdef CONFIG_DRM_AMDGPU_SI
5067		case CHIP_VERDE:
5068		case CHIP_TAHITI:
5069		case CHIP_PITCAIRN:
5070		case CHIP_OLAND:
5071		case CHIP_HAINAN:
5072#endif
5073#ifdef CONFIG_DRM_AMDGPU_CIK
5074		case CHIP_KAVERI:
5075		case CHIP_KABINI:
5076		case CHIP_MULLINS:
5077#endif
5078		case CHIP_CARRIZO:
5079		case CHIP_STONEY:
5080		case CHIP_CYAN_SKILLFISH:
5081			goto disabled;
5082		default:
5083			break;
 
 
5084		}
5085	}
5086
5087	return true;
5088
5089disabled:
5090		dev_info(adev->dev, "GPU recovery disabled.\n");
5091		return false;
5092}
5093
5094int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
5095{
5096	u32 i;
5097	int ret = 0;
5098
5099	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
5100
5101	dev_info(adev->dev, "GPU mode1 reset\n");
5102
5103	/* disable BM */
5104	pci_clear_master(adev->pdev);
5105
5106	amdgpu_device_cache_pci_state(adev->pdev);
5107
5108	if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
5109		dev_info(adev->dev, "GPU smu mode1 reset\n");
5110		ret = amdgpu_dpm_mode1_reset(adev);
5111	} else {
5112		dev_info(adev->dev, "GPU psp mode1 reset\n");
5113		ret = psp_gpu_reset(adev);
5114	}
5115
5116	if (ret)
5117		goto mode1_reset_failed;
5118
5119	amdgpu_device_load_pci_state(adev->pdev);
5120	ret = amdgpu_psp_wait_for_bootloader(adev);
5121	if (ret)
5122		goto mode1_reset_failed;
5123
5124	/* wait for asic to come out of reset */
5125	for (i = 0; i < adev->usec_timeout; i++) {
5126		u32 memsize = adev->nbio.funcs->get_memsize(adev);
5127
5128		if (memsize != 0xffffffff)
5129			break;
5130		udelay(1);
5131	}
5132
5133	if (i >= adev->usec_timeout) {
5134		ret = -ETIMEDOUT;
5135		goto mode1_reset_failed;
5136	}
5137
5138	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
5139
5140	return 0;
5141
5142mode1_reset_failed:
5143	dev_err(adev->dev, "GPU mode1 reset failed\n");
5144	return ret;
5145}
5146
5147int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
5148				 struct amdgpu_reset_context *reset_context)
 
5149{
5150	int i, r = 0;
5151	struct amdgpu_job *job = NULL;
5152	bool need_full_reset =
5153		test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5154
5155	if (reset_context->reset_req_dev == adev)
5156		job = reset_context->job;
5157
5158	if (amdgpu_sriov_vf(adev)) {
5159		/* stop the data exchange thread */
5160		amdgpu_virt_fini_data_exchange(adev);
5161	}
5162
5163	amdgpu_fence_driver_isr_toggle(adev, true);
5164
5165	/* block all schedulers and reset given job's ring */
5166	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5167		struct amdgpu_ring *ring = adev->rings[i];
5168
5169		if (!amdgpu_ring_sched_ready(ring))
5170			continue;
5171
5172		/* Clear job fence from fence drv to avoid force_completion
5173		 * leave NULL and vm flush fence in fence drv
5174		 */
5175		amdgpu_fence_driver_clear_job_fences(ring);
5176
5177		/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
5178		amdgpu_fence_driver_force_completion(ring);
5179	}
5180
5181	amdgpu_fence_driver_isr_toggle(adev, false);
5182
5183	if (job && job->vm)
5184		drm_sched_increase_karma(&job->base);
5185
5186	r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
5187	/* If reset handler not implemented, continue; otherwise return */
5188	if (r == -EOPNOTSUPP)
5189		r = 0;
5190	else
5191		return r;
5192
5193	/* Don't suspend on bare metal if we are not going to HW reset the ASIC */
5194	if (!amdgpu_sriov_vf(adev)) {
5195
5196		if (!need_full_reset)
5197			need_full_reset = amdgpu_device_ip_need_full_reset(adev);
5198
5199		if (!need_full_reset && amdgpu_gpu_recovery &&
5200		    amdgpu_device_ip_check_soft_reset(adev)) {
5201			amdgpu_device_ip_pre_soft_reset(adev);
5202			r = amdgpu_device_ip_soft_reset(adev);
5203			amdgpu_device_ip_post_soft_reset(adev);
5204			if (r || amdgpu_device_ip_check_soft_reset(adev)) {
5205				dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
5206				need_full_reset = true;
5207			}
5208		}
5209
5210		if (need_full_reset)
5211			r = amdgpu_device_ip_suspend(adev);
5212		if (need_full_reset)
5213			set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5214		else
5215			clear_bit(AMDGPU_NEED_FULL_RESET,
5216				  &reset_context->flags);
5217	}
5218
5219	return r;
5220}
5221
5222static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
5223{
5224	int i;
5225
5226	lockdep_assert_held(&adev->reset_domain->sem);
5227
5228	for (i = 0; i < adev->reset_info.num_regs; i++) {
5229		adev->reset_info.reset_dump_reg_value[i] =
5230			RREG32(adev->reset_info.reset_dump_reg_list[i]);
5231
5232		trace_amdgpu_reset_reg_dumps(adev->reset_info.reset_dump_reg_list[i],
5233					     adev->reset_info.reset_dump_reg_value[i]);
5234	}
5235
5236	return 0;
5237}
5238
5239int amdgpu_do_asic_reset(struct list_head *device_list_handle,
5240			 struct amdgpu_reset_context *reset_context)
 
5241{
5242	struct amdgpu_device *tmp_adev = NULL;
5243	bool need_full_reset, skip_hw_reset, vram_lost = false;
5244	int r = 0;
5245
5246	/* Try reset handler method first */
5247	tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5248				    reset_list);
5249	amdgpu_reset_reg_dumps(tmp_adev);
5250
5251	reset_context->reset_device_list = device_list_handle;
5252	r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
5253	/* If reset handler not implemented, continue; otherwise return */
5254	if (r == -EOPNOTSUPP)
5255		r = 0;
5256	else
5257		return r;
5258
5259	/* Reset handler not implemented, use the default method */
5260	need_full_reset =
5261		test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5262	skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
5263
5264	/*
5265	 * ASIC reset has to be done on all XGMI hive nodes ASAP
5266	 * to allow proper links negotiation in FW (within 1 sec)
5267	 */
5268	if (!skip_hw_reset && need_full_reset) {
5269		list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5270			/* For XGMI run all resets in parallel to speed up the process */
5271			if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
5272				tmp_adev->gmc.xgmi.pending_reset = false;
5273				if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
5274					r = -EALREADY;
5275			} else
5276				r = amdgpu_asic_reset(tmp_adev);
5277
5278			if (r) {
5279				dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
5280					 r, adev_to_drm(tmp_adev)->unique);
5281				goto out;
5282			}
5283		}
5284
5285		/* For XGMI wait for all resets to complete before proceed */
5286		if (!r) {
5287			list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
 
5288				if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
5289					flush_work(&tmp_adev->xgmi_reset_work);
5290					r = tmp_adev->asic_reset_res;
5291					if (r)
5292						break;
5293				}
5294			}
5295		}
5296	}
5297
5298	if (!r && amdgpu_ras_intr_triggered()) {
5299		list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5300			amdgpu_ras_reset_error_count(tmp_adev, AMDGPU_RAS_BLOCK__MMHUB);
 
5301		}
5302
5303		amdgpu_ras_intr_cleared();
5304	}
5305
5306	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
 
5307		if (need_full_reset) {
5308			/* post card */
5309			r = amdgpu_device_asic_init(tmp_adev);
5310			if (r) {
5311				dev_warn(tmp_adev->dev, "asic atom init failed!");
5312			} else {
5313				dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
5314
 
 
5315				r = amdgpu_device_ip_resume_phase1(tmp_adev);
5316				if (r)
5317					goto out;
5318
5319				vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
5320
5321				amdgpu_coredump(tmp_adev, vram_lost, reset_context);
5322
5323				if (vram_lost) {
5324					DRM_INFO("VRAM is lost due to GPU reset!\n");
5325					amdgpu_inc_vram_lost(tmp_adev);
5326				}
5327
5328				r = amdgpu_device_fw_loading(tmp_adev);
 
5329				if (r)
5330					return r;
5331
5332				r = amdgpu_xcp_restore_partition_mode(
5333					tmp_adev->xcp_mgr);
5334				if (r)
5335					goto out;
5336
5337				r = amdgpu_device_ip_resume_phase2(tmp_adev);
5338				if (r)
5339					goto out;
5340
5341				if (tmp_adev->mman.buffer_funcs_ring->sched.ready)
5342					amdgpu_ttm_set_buffer_funcs_status(tmp_adev, true);
5343
5344				if (vram_lost)
5345					amdgpu_device_fill_reset_magic(tmp_adev);
5346
5347				/*
5348				 * Add this ASIC as tracked as reset was already
5349				 * complete successfully.
5350				 */
5351				amdgpu_register_gpu_instance(tmp_adev);
5352
5353				if (!reset_context->hive &&
5354				    tmp_adev->gmc.xgmi.num_physical_nodes > 1)
5355					amdgpu_xgmi_add_device(tmp_adev);
5356
5357				r = amdgpu_device_ip_late_init(tmp_adev);
5358				if (r)
5359					goto out;
5360
5361				drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false);
5362
5363				/*
5364				 * The GPU enters bad state once faulty pages
5365				 * by ECC has reached the threshold, and ras
5366				 * recovery is scheduled next. So add one check
5367				 * here to break recovery if it indeed exceeds
5368				 * bad page threshold, and remind user to
5369				 * retire this GPU or setting one bigger
5370				 * bad_page_threshold value to fix this once
5371				 * probing driver again.
5372				 */
5373				if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
5374					/* must succeed. */
5375					amdgpu_ras_resume(tmp_adev);
5376				} else {
5377					r = -EINVAL;
5378					goto out;
5379				}
5380
5381				/* Update PSP FW topology after reset */
5382				if (reset_context->hive &&
5383				    tmp_adev->gmc.xgmi.num_physical_nodes > 1)
5384					r = amdgpu_xgmi_update_topology(
5385						reset_context->hive, tmp_adev);
5386			}
5387		}
5388
 
5389out:
5390		if (!r) {
5391			amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
5392			r = amdgpu_ib_ring_tests(tmp_adev);
5393			if (r) {
5394				dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
 
5395				need_full_reset = true;
5396				r = -EAGAIN;
5397				goto end;
5398			}
5399		}
5400
5401		if (!r)
5402			r = amdgpu_device_recover_vram(tmp_adev);
5403		else
5404			tmp_adev->asic_reset_res = r;
5405	}
5406
5407end:
5408	if (need_full_reset)
5409		set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5410	else
5411		clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5412	return r;
5413}
5414
5415static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev)
5416{
 
 
 
 
 
5417
 
 
5418	switch (amdgpu_asic_reset_method(adev)) {
5419	case AMD_RESET_METHOD_MODE1:
5420		adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
5421		break;
5422	case AMD_RESET_METHOD_MODE2:
5423		adev->mp1_state = PP_MP1_STATE_RESET;
5424		break;
5425	default:
5426		adev->mp1_state = PP_MP1_STATE_NONE;
5427		break;
5428	}
 
 
 
 
 
5429}
5430
5431static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev)
5432{
 
 
 
5433	amdgpu_vf_error_trans_all(adev);
5434	adev->mp1_state = PP_MP1_STATE_NONE;
 
 
5435}
5436
5437static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
5438{
5439	struct pci_dev *p = NULL;
5440
5441	p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5442			adev->pdev->bus->number, 1);
5443	if (p) {
5444		pm_runtime_enable(&(p->dev));
5445		pm_runtime_resume(&(p->dev));
5446	}
5447
5448	pci_dev_put(p);
5449}
5450
5451static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
5452{
5453	enum amd_reset_method reset_method;
5454	struct pci_dev *p = NULL;
5455	u64 expires;
5456
5457	/*
5458	 * For now, only BACO and mode1 reset are confirmed
5459	 * to suffer the audio issue without proper suspended.
5460	 */
5461	reset_method = amdgpu_asic_reset_method(adev);
5462	if ((reset_method != AMD_RESET_METHOD_BACO) &&
5463	     (reset_method != AMD_RESET_METHOD_MODE1))
5464		return -EINVAL;
5465
5466	p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5467			adev->pdev->bus->number, 1);
5468	if (!p)
5469		return -ENODEV;
5470
5471	expires = pm_runtime_autosuspend_expiration(&(p->dev));
5472	if (!expires)
5473		/*
5474		 * If we cannot get the audio device autosuspend delay,
5475		 * a fixed 4S interval will be used. Considering 3S is
5476		 * the audio controller default autosuspend delay setting.
5477		 * 4S used here is guaranteed to cover that.
5478		 */
5479		expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
5480
5481	while (!pm_runtime_status_suspended(&(p->dev))) {
5482		if (!pm_runtime_suspend(&(p->dev)))
5483			break;
5484
5485		if (expires < ktime_get_mono_fast_ns()) {
5486			dev_warn(adev->dev, "failed to suspend display audio\n");
5487			pci_dev_put(p);
5488			/* TODO: abort the succeeding gpu reset? */
5489			return -ETIMEDOUT;
5490		}
5491	}
5492
5493	pm_runtime_disable(&(p->dev));
5494
5495	pci_dev_put(p);
5496	return 0;
5497}
5498
5499static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
5500{
5501	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5502
5503#if defined(CONFIG_DEBUG_FS)
5504	if (!amdgpu_sriov_vf(adev))
5505		cancel_work(&adev->reset_work);
5506#endif
5507
5508	if (adev->kfd.dev)
5509		cancel_work(&adev->kfd.reset_work);
5510
5511	if (amdgpu_sriov_vf(adev))
5512		cancel_work(&adev->virt.flr_work);
5513
5514	if (con && adev->ras_enabled)
5515		cancel_work(&con->recovery_work);
5516
5517}
5518
5519/**
5520 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
5521 *
5522 * @adev: amdgpu_device pointer
5523 * @job: which job trigger hang
5524 * @reset_context: amdgpu reset context pointer
5525 *
5526 * Attempt to reset the GPU if it has hung (all asics).
5527 * Attempt to do soft-reset or full-reset and reinitialize Asic
5528 * Returns 0 for success or an error on failure.
5529 */
5530
5531int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
5532			      struct amdgpu_job *job,
5533			      struct amdgpu_reset_context *reset_context)
5534{
5535	struct list_head device_list, *device_list_handle =  NULL;
5536	bool job_signaled = false;
5537	struct amdgpu_hive_info *hive = NULL;
5538	struct amdgpu_device *tmp_adev = NULL;
5539	int i, r = 0;
5540	bool need_emergency_restart = false;
5541	bool audio_suspended = false;
5542
5543	/*
5544	 * Special case: RAS triggered and full reset isn't supported
5545	 */
5546	need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
5547
5548	/*
5549	 * Flush RAM to disk so that after reboot
5550	 * the user can read log and see why the system rebooted.
5551	 */
5552	if (need_emergency_restart && amdgpu_ras_get_context(adev) &&
5553		amdgpu_ras_get_context(adev)->reboot) {
5554		DRM_WARN("Emergency reboot.");
5555
5556		ksys_sync_helper();
5557		emergency_restart();
5558	}
5559
5560	dev_info(adev->dev, "GPU %s begin!\n",
5561		need_emergency_restart ? "jobs stop":"reset");
5562
5563	if (!amdgpu_sriov_vf(adev))
5564		hive = amdgpu_get_xgmi_hive(adev);
5565	if (hive)
5566		mutex_lock(&hive->hive_lock);
5567
5568	reset_context->job = job;
5569	reset_context->hive = hive;
5570	/*
5571	 * Build list of devices to reset.
5572	 * In case we are in XGMI hive mode, resort the device list
5573	 * to put adev in the 1st position.
 
 
5574	 */
5575	INIT_LIST_HEAD(&device_list);
5576	if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
5577		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
5578			list_add_tail(&tmp_adev->reset_list, &device_list);
5579			if (adev->shutdown)
5580				tmp_adev->shutdown = true;
5581		}
5582		if (!list_is_first(&adev->reset_list, &device_list))
5583			list_rotate_to_front(&adev->reset_list, &device_list);
5584		device_list_handle = &device_list;
5585	} else {
5586		list_add_tail(&adev->reset_list, &device_list);
5587		device_list_handle = &device_list;
5588	}
5589
5590	/* We need to lock reset domain only once both for XGMI and single device */
5591	tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5592				    reset_list);
5593	amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
 
5594
5595	/* block all schedulers and reset given job's ring */
5596	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
 
 
 
 
5597
5598		amdgpu_device_set_mp1_state(tmp_adev);
 
 
 
 
 
5599
5600		/*
5601		 * Try to put the audio codec into suspend state
5602		 * before gpu reset started.
5603		 *
5604		 * Due to the power domain of the graphics device
5605		 * is shared with AZ power domain. Without this,
5606		 * we may change the audio hardware from behind
5607		 * the audio driver's back. That will trigger
5608		 * some audio codec errors.
5609		 */
5610		if (!amdgpu_device_suspend_display_audio(tmp_adev))
5611			audio_suspended = true;
5612
5613		amdgpu_ras_set_error_query_ready(tmp_adev, false);
5614
5615		cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
5616
5617		if (!amdgpu_sriov_vf(tmp_adev))
5618			amdgpu_amdkfd_pre_reset(tmp_adev);
5619
5620		/*
5621		 * Mark these ASICs to be reseted as untracked first
5622		 * And add them back after reset completed
5623		 */
 
5624		amdgpu_unregister_gpu_instance(tmp_adev);
5625
5626		drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, true);
5627
5628		/* disable ras on ALL IPs */
5629		if (!need_emergency_restart &&
5630		      amdgpu_device_ip_need_full_reset(tmp_adev))
5631			amdgpu_ras_suspend(tmp_adev);
5632
5633		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5634			struct amdgpu_ring *ring = tmp_adev->rings[i];
5635
5636			if (!amdgpu_ring_sched_ready(ring))
5637				continue;
5638
5639			drm_sched_stop(&ring->sched, job ? &job->base : NULL);
5640
5641			if (need_emergency_restart)
5642				amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
5643		}
5644		atomic_inc(&tmp_adev->gpu_reset_counter);
5645	}
5646
5647	if (need_emergency_restart)
5648		goto skip_sched_resume;
5649
5650	/*
5651	 * Must check guilty signal here since after this point all old
5652	 * HW fences are force signaled.
5653	 *
5654	 * job->base holds a reference to parent fence
5655	 */
5656	if (job && dma_fence_is_signaled(&job->hw_fence)) {
 
5657		job_signaled = true;
 
 
 
 
 
5658		dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5659		goto skip_hw_reset;
5660	}
5661
 
 
 
 
 
 
 
 
 
 
5662retry:	/* Rest of adevs pre asic reset from XGMI hive. */
5663	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5664		r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
 
 
 
 
 
 
 
5665		/*TODO Should we stop ?*/
5666		if (r) {
5667			dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
5668				  r, adev_to_drm(tmp_adev)->unique);
5669			tmp_adev->asic_reset_res = r;
5670		}
5671
5672		/*
5673		 * Drop all pending non scheduler resets. Scheduler resets
5674		 * were already dropped during drm_sched_stop
5675		 */
5676		amdgpu_device_stop_pending_resets(tmp_adev);
5677	}
5678
5679	/* Actual ASIC resets if needed.*/
5680	/* Host driver will handle XGMI hive reset for SRIOV */
5681	if (amdgpu_sriov_vf(adev)) {
5682		r = amdgpu_device_reset_sriov(adev, job ? false : true);
5683		if (r)
5684			adev->asic_reset_res = r;
5685
5686		/* Aldebaran and gfx_11_0_3 support ras in SRIOV, so need resume ras during reset */
5687		if (amdgpu_ip_version(adev, GC_HWIP, 0) ==
5688			    IP_VERSION(9, 4, 2) ||
5689		    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3))
5690			amdgpu_ras_resume(adev);
5691	} else {
5692		r = amdgpu_do_asic_reset(device_list_handle, reset_context);
5693		if (r && r == -EAGAIN)
5694			goto retry;
5695	}
5696
5697skip_hw_reset:
5698
5699	/* Post ASIC reset for all devs .*/
5700	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5701
5702		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5703			struct amdgpu_ring *ring = tmp_adev->rings[i];
5704
5705			if (!amdgpu_ring_sched_ready(ring))
5706				continue;
5707
5708			drm_sched_start(&ring->sched, true);
5709		}
 
5710
5711		if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled)
5712			drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
5713
5714		if (tmp_adev->asic_reset_res)
5715			r = tmp_adev->asic_reset_res;
 
5716
5717		tmp_adev->asic_reset_res = 0;
5718
5719		if (r) {
5720			/* bad news, how to tell it to userspace ? */
5721			dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
5722			amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
5723		} else {
5724			dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
5725			if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
5726				DRM_WARN("smart shift update failed\n");
5727		}
5728	}
5729
5730skip_sched_resume:
5731	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5732		/* unlock kfd: SRIOV would do it separately */
5733		if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
5734			amdgpu_amdkfd_post_reset(tmp_adev);
5735
5736		/* kfd_post_reset will do nothing if kfd device is not initialized,
5737		 * need to bring up kfd here if it's not be initialized before
5738		 */
5739		if (!adev->kfd.init_complete)
5740			amdgpu_amdkfd_device_init(adev);
5741
5742		if (audio_suspended)
5743			amdgpu_device_resume_display_audio(tmp_adev);
5744
5745		amdgpu_device_unset_mp1_state(tmp_adev);
5746
5747		amdgpu_ras_set_error_query_ready(tmp_adev, true);
5748	}
5749
5750	tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5751					    reset_list);
5752	amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
5753
5754	if (hive) {
5755		mutex_unlock(&hive->hive_lock);
5756		amdgpu_put_xgmi_hive(hive);
5757	}
5758
5759	if (r)
5760		dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
5761
5762	atomic_set(&adev->reset_domain->reset_res, r);
5763	return r;
5764}
5765
5766/**
5767 * amdgpu_device_partner_bandwidth - find the bandwidth of appropriate partner
5768 *
5769 * @adev: amdgpu_device pointer
5770 * @speed: pointer to the speed of the link
5771 * @width: pointer to the width of the link
5772 *
5773 * Evaluate the hierarchy to find the speed and bandwidth capabilities of the
5774 * first physical partner to an AMD dGPU.
5775 * This will exclude any virtual switches and links.
5776 */
5777static void amdgpu_device_partner_bandwidth(struct amdgpu_device *adev,
5778					    enum pci_bus_speed *speed,
5779					    enum pcie_link_width *width)
5780{
5781	struct pci_dev *parent = adev->pdev;
5782
5783	if (!speed || !width)
5784		return;
5785
5786	*speed = PCI_SPEED_UNKNOWN;
5787	*width = PCIE_LNK_WIDTH_UNKNOWN;
5788
5789	while ((parent = pci_upstream_bridge(parent))) {
5790		/* skip upstream/downstream switches internal to dGPU*/
5791		if (parent->vendor == PCI_VENDOR_ID_ATI)
5792			continue;
5793		*speed = pcie_get_speed_cap(parent);
5794		*width = pcie_get_width_cap(parent);
5795		break;
5796	}
5797}
5798
5799/**
5800 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
5801 *
5802 * @adev: amdgpu_device pointer
5803 *
5804 * Fetchs and stores in the driver the PCIE capabilities (gen speed
5805 * and lanes) of the slot the device is in. Handles APUs and
5806 * virtualized environments where PCIE config space may not be available.
5807 */
5808static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
5809{
5810	struct pci_dev *pdev;
5811	enum pci_bus_speed speed_cap, platform_speed_cap;
5812	enum pcie_link_width platform_link_width;
5813
5814	if (amdgpu_pcie_gen_cap)
5815		adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
5816
5817	if (amdgpu_pcie_lane_cap)
5818		adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
5819
5820	/* covers APUs as well */
5821	if (pci_is_root_bus(adev->pdev->bus) && !amdgpu_passthrough(adev)) {
5822		if (adev->pm.pcie_gen_mask == 0)
5823			adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
5824		if (adev->pm.pcie_mlw_mask == 0)
5825			adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
5826		return;
5827	}
5828
5829	if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
5830		return;
5831
5832	amdgpu_device_partner_bandwidth(adev, &platform_speed_cap,
5833					&platform_link_width);
5834
5835	if (adev->pm.pcie_gen_mask == 0) {
5836		/* asic caps */
5837		pdev = adev->pdev;
5838		speed_cap = pcie_get_speed_cap(pdev);
5839		if (speed_cap == PCI_SPEED_UNKNOWN) {
5840			adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5841						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5842						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5843		} else {
5844			if (speed_cap == PCIE_SPEED_32_0GT)
5845				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5846							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5847							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5848							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5849							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
5850			else if (speed_cap == PCIE_SPEED_16_0GT)
5851				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5852							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5853							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5854							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
5855			else if (speed_cap == PCIE_SPEED_8_0GT)
5856				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5857							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5858							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5859			else if (speed_cap == PCIE_SPEED_5_0GT)
5860				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5861							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
5862			else
5863				adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
5864		}
5865		/* platform caps */
5866		if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5867			adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5868						   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5869		} else {
5870			if (platform_speed_cap == PCIE_SPEED_32_0GT)
5871				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5872							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5873							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5874							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5875							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
5876			else if (platform_speed_cap == PCIE_SPEED_16_0GT)
5877				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5878							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5879							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5880							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
5881			else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5882				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5883							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5884							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
5885			else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5886				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5887							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5888			else
5889				adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
5890
5891		}
5892	}
5893	if (adev->pm.pcie_mlw_mask == 0) {
5894		if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5895			adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
5896		} else {
5897			switch (platform_link_width) {
5898			case PCIE_LNK_X32:
5899				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
5900							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5901							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5902							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5903							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5904							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5905							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5906				break;
5907			case PCIE_LNK_X16:
5908				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5909							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5910							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5911							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5912							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5913							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5914				break;
5915			case PCIE_LNK_X12:
5916				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5917							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5918							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5919							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5920							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5921				break;
5922			case PCIE_LNK_X8:
5923				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5924							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5925							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5926							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5927				break;
5928			case PCIE_LNK_X4:
5929				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5930							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5931							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5932				break;
5933			case PCIE_LNK_X2:
5934				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5935							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5936				break;
5937			case PCIE_LNK_X1:
5938				adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5939				break;
5940			default:
5941				break;
5942			}
5943		}
5944	}
5945}
5946
5947/**
5948 * amdgpu_device_is_peer_accessible - Check peer access through PCIe BAR
5949 *
5950 * @adev: amdgpu_device pointer
5951 * @peer_adev: amdgpu_device pointer for peer device trying to access @adev
5952 *
5953 * Return true if @peer_adev can access (DMA) @adev through the PCIe
5954 * BAR, i.e. @adev is "large BAR" and the BAR matches the DMA mask of
5955 * @peer_adev.
5956 */
5957bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
5958				      struct amdgpu_device *peer_adev)
5959{
5960#ifdef CONFIG_HSA_AMD_P2P
5961	uint64_t address_mask = peer_adev->dev->dma_mask ?
5962		~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
5963	resource_size_t aper_limit =
5964		adev->gmc.aper_base + adev->gmc.aper_size - 1;
5965	bool p2p_access =
5966		!adev->gmc.xgmi.connected_to_cpu &&
5967		!(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0);
5968
5969	return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size &&
5970		adev->gmc.real_vram_size == adev->gmc.visible_vram_size &&
5971		!(adev->gmc.aper_base & address_mask ||
5972		  aper_limit & address_mask));
5973#else
5974	return false;
5975#endif
5976}
5977
5978int amdgpu_device_baco_enter(struct drm_device *dev)
5979{
5980	struct amdgpu_device *adev = drm_to_adev(dev);
5981	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5982
5983	if (!amdgpu_device_supports_baco(dev))
5984		return -ENOTSUPP;
5985
5986	if (ras && adev->ras_enabled &&
5987	    adev->nbio.funcs->enable_doorbell_interrupt)
5988		adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
5989
5990	return amdgpu_dpm_baco_enter(adev);
5991}
5992
5993int amdgpu_device_baco_exit(struct drm_device *dev)
5994{
5995	struct amdgpu_device *adev = drm_to_adev(dev);
5996	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5997	int ret = 0;
5998
5999	if (!amdgpu_device_supports_baco(dev))
6000		return -ENOTSUPP;
6001
6002	ret = amdgpu_dpm_baco_exit(adev);
6003	if (ret)
6004		return ret;
6005
6006	if (ras && adev->ras_enabled &&
6007	    adev->nbio.funcs->enable_doorbell_interrupt)
6008		adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
6009
6010	if (amdgpu_passthrough(adev) &&
6011	    adev->nbio.funcs->clear_doorbell_interrupt)
6012		adev->nbio.funcs->clear_doorbell_interrupt(adev);
6013
6014	return 0;
6015}
6016
6017/**
6018 * amdgpu_pci_error_detected - Called when a PCI error is detected.
6019 * @pdev: PCI device struct
6020 * @state: PCI channel state
6021 *
6022 * Description: Called when a PCI error is detected.
6023 *
6024 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
6025 */
6026pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
6027{
6028	struct drm_device *dev = pci_get_drvdata(pdev);
6029	struct amdgpu_device *adev = drm_to_adev(dev);
6030	int i;
6031
6032	DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
6033
6034	if (adev->gmc.xgmi.num_physical_nodes > 1) {
6035		DRM_WARN("No support for XGMI hive yet...");
6036		return PCI_ERS_RESULT_DISCONNECT;
6037	}
6038
6039	adev->pci_channel_state = state;
6040
6041	switch (state) {
6042	case pci_channel_io_normal:
6043		return PCI_ERS_RESULT_CAN_RECOVER;
6044	/* Fatal error, prepare for slot reset */
6045	case pci_channel_io_frozen:
6046		/*
6047		 * Locking adev->reset_domain->sem will prevent any external access
6048		 * to GPU during PCI error recovery
6049		 */
6050		amdgpu_device_lock_reset_domain(adev->reset_domain);
6051		amdgpu_device_set_mp1_state(adev);
6052
6053		/*
6054		 * Block any work scheduling as we do for regular GPU reset
6055		 * for the duration of the recovery
6056		 */
6057		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
6058			struct amdgpu_ring *ring = adev->rings[i];
6059
6060			if (!amdgpu_ring_sched_ready(ring))
6061				continue;
6062
6063			drm_sched_stop(&ring->sched, NULL);
6064		}
6065		atomic_inc(&adev->gpu_reset_counter);
6066		return PCI_ERS_RESULT_NEED_RESET;
6067	case pci_channel_io_perm_failure:
6068		/* Permanent error, prepare for device removal */
6069		return PCI_ERS_RESULT_DISCONNECT;
6070	}
6071
6072	return PCI_ERS_RESULT_NEED_RESET;
6073}
6074
6075/**
6076 * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
6077 * @pdev: pointer to PCI device
6078 */
6079pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
6080{
6081
6082	DRM_INFO("PCI error: mmio enabled callback!!\n");
6083
6084	/* TODO - dump whatever for debugging purposes */
6085
6086	/* This called only if amdgpu_pci_error_detected returns
6087	 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
6088	 * works, no need to reset slot.
6089	 */
6090
6091	return PCI_ERS_RESULT_RECOVERED;
6092}
6093
6094/**
6095 * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
6096 * @pdev: PCI device struct
6097 *
6098 * Description: This routine is called by the pci error recovery
6099 * code after the PCI slot has been reset, just before we
6100 * should resume normal operations.
6101 */
6102pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
6103{
6104	struct drm_device *dev = pci_get_drvdata(pdev);
6105	struct amdgpu_device *adev = drm_to_adev(dev);
6106	int r, i;
6107	struct amdgpu_reset_context reset_context;
6108	u32 memsize;
6109	struct list_head device_list;
6110
6111	DRM_INFO("PCI error: slot reset callback!!\n");
6112
6113	memset(&reset_context, 0, sizeof(reset_context));
6114
6115	INIT_LIST_HEAD(&device_list);
6116	list_add_tail(&adev->reset_list, &device_list);
6117
6118	/* wait for asic to come out of reset */
6119	msleep(500);
6120
6121	/* Restore PCI confspace */
6122	amdgpu_device_load_pci_state(pdev);
6123
6124	/* confirm  ASIC came out of reset */
6125	for (i = 0; i < adev->usec_timeout; i++) {
6126		memsize = amdgpu_asic_get_config_memsize(adev);
6127
6128		if (memsize != 0xffffffff)
6129			break;
6130		udelay(1);
6131	}
6132	if (memsize == 0xffffffff) {
6133		r = -ETIME;
6134		goto out;
6135	}
6136
6137	reset_context.method = AMD_RESET_METHOD_NONE;
6138	reset_context.reset_req_dev = adev;
6139	set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
6140	set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
6141
6142	adev->no_hw_access = true;
6143	r = amdgpu_device_pre_asic_reset(adev, &reset_context);
6144	adev->no_hw_access = false;
6145	if (r)
6146		goto out;
6147
6148	r = amdgpu_do_asic_reset(&device_list, &reset_context);
6149
6150out:
6151	if (!r) {
6152		if (amdgpu_device_cache_pci_state(adev->pdev))
6153			pci_restore_state(adev->pdev);
6154
6155		DRM_INFO("PCIe error recovery succeeded\n");
6156	} else {
6157		DRM_ERROR("PCIe error recovery failed, err:%d", r);
6158		amdgpu_device_unset_mp1_state(adev);
6159		amdgpu_device_unlock_reset_domain(adev->reset_domain);
6160	}
6161
6162	return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
6163}
6164
6165/**
6166 * amdgpu_pci_resume() - resume normal ops after PCI reset
6167 * @pdev: pointer to PCI device
6168 *
6169 * Called when the error recovery driver tells us that its
6170 * OK to resume normal operation.
6171 */
6172void amdgpu_pci_resume(struct pci_dev *pdev)
6173{
6174	struct drm_device *dev = pci_get_drvdata(pdev);
6175	struct amdgpu_device *adev = drm_to_adev(dev);
6176	int i;
6177
6178
6179	DRM_INFO("PCI error: resume callback!!\n");
6180
6181	/* Only continue execution for the case of pci_channel_io_frozen */
6182	if (adev->pci_channel_state != pci_channel_io_frozen)
6183		return;
6184
6185	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
6186		struct amdgpu_ring *ring = adev->rings[i];
6187
6188		if (!amdgpu_ring_sched_ready(ring))
6189			continue;
6190
6191		drm_sched_start(&ring->sched, true);
6192	}
6193
6194	amdgpu_device_unset_mp1_state(adev);
6195	amdgpu_device_unlock_reset_domain(adev->reset_domain);
6196}
6197
6198bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
6199{
6200	struct drm_device *dev = pci_get_drvdata(pdev);
6201	struct amdgpu_device *adev = drm_to_adev(dev);
6202	int r;
6203
6204	r = pci_save_state(pdev);
6205	if (!r) {
6206		kfree(adev->pci_state);
6207
6208		adev->pci_state = pci_store_saved_state(pdev);
6209
6210		if (!adev->pci_state) {
6211			DRM_ERROR("Failed to store PCI saved state");
6212			return false;
6213		}
6214	} else {
6215		DRM_WARN("Failed to save PCI state, err:%d\n", r);
6216		return false;
6217	}
6218
6219	return true;
6220}
6221
6222bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
6223{
6224	struct drm_device *dev = pci_get_drvdata(pdev);
6225	struct amdgpu_device *adev = drm_to_adev(dev);
6226	int r;
6227
6228	if (!adev->pci_state)
6229		return false;
6230
6231	r = pci_load_saved_state(pdev, adev->pci_state);
6232
6233	if (!r) {
6234		pci_restore_state(pdev);
6235	} else {
6236		DRM_WARN("Failed to load PCI state, err:%d\n", r);
6237		return false;
6238	}
6239
6240	return true;
6241}
6242
6243void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
6244		struct amdgpu_ring *ring)
6245{
6246#ifdef CONFIG_X86_64
6247	if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
6248		return;
6249#endif
6250	if (adev->gmc.xgmi.connected_to_cpu)
6251		return;
6252
6253	if (ring && ring->funcs->emit_hdp_flush)
6254		amdgpu_ring_emit_hdp_flush(ring);
6255	else
6256		amdgpu_asic_flush_hdp(adev, ring);
6257}
6258
6259void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
6260		struct amdgpu_ring *ring)
6261{
6262#ifdef CONFIG_X86_64
6263	if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
6264		return;
6265#endif
6266	if (adev->gmc.xgmi.connected_to_cpu)
6267		return;
6268
6269	amdgpu_asic_invalidate_hdp(adev, ring);
6270}
6271
6272int amdgpu_in_reset(struct amdgpu_device *adev)
6273{
6274	return atomic_read(&adev->reset_domain->in_gpu_reset);
6275}
6276
6277/**
6278 * amdgpu_device_halt() - bring hardware to some kind of halt state
6279 *
6280 * @adev: amdgpu_device pointer
6281 *
6282 * Bring hardware to some kind of halt state so that no one can touch it
6283 * any more. It will help to maintain error context when error occurred.
6284 * Compare to a simple hang, the system will keep stable at least for SSH
6285 * access. Then it should be trivial to inspect the hardware state and
6286 * see what's going on. Implemented as following:
6287 *
6288 * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc),
6289 *    clears all CPU mappings to device, disallows remappings through page faults
6290 * 2. amdgpu_irq_disable_all() disables all interrupts
6291 * 3. amdgpu_fence_driver_hw_fini() signals all HW fences
6292 * 4. set adev->no_hw_access to avoid potential crashes after setp 5
6293 * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings
6294 * 6. pci_disable_device() and pci_wait_for_pending_transaction()
6295 *    flush any in flight DMA operations
6296 */
6297void amdgpu_device_halt(struct amdgpu_device *adev)
6298{
6299	struct pci_dev *pdev = adev->pdev;
6300	struct drm_device *ddev = adev_to_drm(adev);
6301
6302	amdgpu_xcp_dev_unplug(adev);
6303	drm_dev_unplug(ddev);
6304
6305	amdgpu_irq_disable_all(adev);
6306
6307	amdgpu_fence_driver_hw_fini(adev);
6308
6309	adev->no_hw_access = true;
6310
6311	amdgpu_device_unmap_mmio(adev);
6312
6313	pci_disable_device(pdev);
6314	pci_wait_for_pending_transaction(pdev);
6315}
6316
6317u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
6318				u32 reg)
6319{
6320	unsigned long flags, address, data;
6321	u32 r;
6322
6323	address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
6324	data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
6325
6326	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
6327	WREG32(address, reg * 4);
6328	(void)RREG32(address);
6329	r = RREG32(data);
6330	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
6331	return r;
6332}
6333
6334void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
6335				u32 reg, u32 v)
6336{
6337	unsigned long flags, address, data;
6338
6339	address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
6340	data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
6341
6342	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
6343	WREG32(address, reg * 4);
6344	(void)RREG32(address);
6345	WREG32(data, v);
6346	(void)RREG32(data);
6347	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
6348}
6349
6350/**
6351 * amdgpu_device_switch_gang - switch to a new gang
6352 * @adev: amdgpu_device pointer
6353 * @gang: the gang to switch to
6354 *
6355 * Try to switch to a new gang.
6356 * Returns: NULL if we switched to the new gang or a reference to the current
6357 * gang leader.
6358 */
6359struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
6360					    struct dma_fence *gang)
6361{
6362	struct dma_fence *old = NULL;
6363
6364	do {
6365		dma_fence_put(old);
6366		rcu_read_lock();
6367		old = dma_fence_get_rcu_safe(&adev->gang_submit);
6368		rcu_read_unlock();
6369
6370		if (old == gang)
6371			break;
6372
6373		if (!dma_fence_is_signaled(old))
6374			return old;
6375
6376	} while (cmpxchg((struct dma_fence __force **)&adev->gang_submit,
6377			 old, gang) != old);
6378
6379	dma_fence_put(old);
6380	return NULL;
6381}
6382
6383bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev)
6384{
6385	switch (adev->asic_type) {
6386#ifdef CONFIG_DRM_AMDGPU_SI
6387	case CHIP_HAINAN:
6388#endif
6389	case CHIP_TOPAZ:
6390		/* chips with no display hardware */
6391		return false;
6392#ifdef CONFIG_DRM_AMDGPU_SI
6393	case CHIP_TAHITI:
6394	case CHIP_PITCAIRN:
6395	case CHIP_VERDE:
6396	case CHIP_OLAND:
6397#endif
6398#ifdef CONFIG_DRM_AMDGPU_CIK
6399	case CHIP_BONAIRE:
6400	case CHIP_HAWAII:
6401	case CHIP_KAVERI:
6402	case CHIP_KABINI:
6403	case CHIP_MULLINS:
6404#endif
6405	case CHIP_TONGA:
6406	case CHIP_FIJI:
6407	case CHIP_POLARIS10:
6408	case CHIP_POLARIS11:
6409	case CHIP_POLARIS12:
6410	case CHIP_VEGAM:
6411	case CHIP_CARRIZO:
6412	case CHIP_STONEY:
6413		/* chips with display hardware */
6414		return true;
6415	default:
6416		/* IP discovery */
6417		if (!amdgpu_ip_version(adev, DCE_HWIP, 0) ||
6418		    (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
6419			return false;
6420		return true;
6421	}
6422}
6423
6424uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev,
6425		uint32_t inst, uint32_t reg_addr, char reg_name[],
6426		uint32_t expected_value, uint32_t mask)
6427{
6428	uint32_t ret = 0;
6429	uint32_t old_ = 0;
6430	uint32_t tmp_ = RREG32(reg_addr);
6431	uint32_t loop = adev->usec_timeout;
6432
6433	while ((tmp_ & (mask)) != (expected_value)) {
6434		if (old_ != tmp_) {
6435			loop = adev->usec_timeout;
6436			old_ = tmp_;
6437		} else
6438			udelay(1);
6439		tmp_ = RREG32(reg_addr);
6440		loop--;
6441		if (!loop) {
6442			DRM_WARN("Register(%d) [%s] failed to reach value 0x%08x != 0x%08xn",
6443				  inst, reg_name, (uint32_t)expected_value,
6444				  (uint32_t)(tmp_ & (mask)));
6445			ret = -ETIMEDOUT;
6446			break;
6447		}
6448	}
6449	return ret;
6450}
v5.4
   1/*
   2 * Copyright 2008 Advanced Micro Devices, Inc.
   3 * Copyright 2008 Red Hat Inc.
   4 * Copyright 2009 Jerome Glisse.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the "Software"),
   8 * to deal in the Software without restriction, including without limitation
   9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10 * and/or sell copies of the Software, and to permit persons to whom the
  11 * Software is furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22 * OTHER DEALINGS IN THE SOFTWARE.
  23 *
  24 * Authors: Dave Airlie
  25 *          Alex Deucher
  26 *          Jerome Glisse
  27 */
  28#include <linux/power_supply.h>
  29#include <linux/kthread.h>
  30#include <linux/module.h>
  31#include <linux/console.h>
  32#include <linux/slab.h>
 
 
 
 
  33
 
  34#include <drm/drm_atomic_helper.h>
 
 
  35#include <drm/drm_probe_helper.h>
  36#include <drm/amdgpu_drm.h>
 
  37#include <linux/vgaarb.h>
  38#include <linux/vga_switcheroo.h>
  39#include <linux/efi.h>
  40#include "amdgpu.h"
  41#include "amdgpu_trace.h"
  42#include "amdgpu_i2c.h"
  43#include "atom.h"
  44#include "amdgpu_atombios.h"
  45#include "amdgpu_atomfirmware.h"
  46#include "amd_pcie.h"
  47#ifdef CONFIG_DRM_AMDGPU_SI
  48#include "si.h"
  49#endif
  50#ifdef CONFIG_DRM_AMDGPU_CIK
  51#include "cik.h"
  52#endif
  53#include "vi.h"
  54#include "soc15.h"
  55#include "nv.h"
  56#include "bif/bif_4_1_d.h"
  57#include <linux/pci.h>
  58#include <linux/firmware.h>
  59#include "amdgpu_vf_error.h"
  60
  61#include "amdgpu_amdkfd.h"
  62#include "amdgpu_pm.h"
  63
  64#include "amdgpu_xgmi.h"
  65#include "amdgpu_ras.h"
  66#include "amdgpu_pmu.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
  67
  68MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
  69MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
  70MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
  71MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
  72MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
  73MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
  74MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin");
  75MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
  76MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
  77MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
  78
  79#define AMDGPU_RESUME_MS		2000
 
 
 
 
  80
  81static const char *amdgpu_asic_name[] = {
  82	"TAHITI",
  83	"PITCAIRN",
  84	"VERDE",
  85	"OLAND",
  86	"HAINAN",
  87	"BONAIRE",
  88	"KAVERI",
  89	"KABINI",
  90	"HAWAII",
  91	"MULLINS",
  92	"TOPAZ",
  93	"TONGA",
  94	"FIJI",
  95	"CARRIZO",
  96	"STONEY",
  97	"POLARIS10",
  98	"POLARIS11",
  99	"POLARIS12",
 100	"VEGAM",
 101	"VEGA10",
 102	"VEGA12",
 103	"VEGA20",
 104	"RAVEN",
 105	"ARCTURUS",
 106	"RENOIR",
 
 107	"NAVI10",
 
 108	"NAVI14",
 109	"NAVI12",
 
 
 
 
 
 
 
 110	"LAST",
 111};
 112
 113/**
 114 * DOC: pcie_replay_count
 115 *
 116 * The amdgpu driver provides a sysfs API for reporting the total number
 117 * of PCIe replays (NAKs)
 118 * The file pcie_replay_count is used for this and returns the total
 119 * number of replays as a sum of the NAKs generated and NAKs received
 120 */
 121
 122static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
 123		struct device_attribute *attr, char *buf)
 124{
 125	struct drm_device *ddev = dev_get_drvdata(dev);
 126	struct amdgpu_device *adev = ddev->dev_private;
 127	uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
 128
 129	return snprintf(buf, PAGE_SIZE, "%llu\n", cnt);
 130}
 131
 132static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
 133		amdgpu_device_get_pcie_replay_count, NULL);
 134
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 135static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
 136
 
 137/**
 138 * amdgpu_device_is_px - Is the device is a dGPU with HG/PX power control
 139 *
 140 * @dev: drm_device pointer
 141 *
 142 * Returns true if the device is a dGPU with HG/PX power control,
 143 * otherwise return false.
 144 */
 145bool amdgpu_device_is_px(struct drm_device *dev)
 146{
 147	struct amdgpu_device *adev = dev->dev_private;
 148
 149	if (adev->flags & AMD_IS_PX)
 150		return true;
 151	return false;
 152}
 153
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 154/*
 155 * MMIO register access helper functions.
 156 */
 
 157/**
 158 * amdgpu_mm_rreg - read a memory mapped IO register
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 159 *
 160 * @adev: amdgpu_device pointer
 161 * @reg: dword aligned register offset
 162 * @acc_flags: access flags which require special behavior
 163 *
 164 * Returns the 32 bit value from the offset specified.
 165 */
 166uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
 167			uint32_t acc_flags)
 168{
 169	uint32_t ret;
 170
 171	if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
 172		return amdgpu_virt_kiq_rreg(adev, reg);
 173
 174	if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
 175		ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
 176	else {
 177		unsigned long flags;
 178
 179		spin_lock_irqsave(&adev->mmio_idx_lock, flags);
 180		writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
 181		ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
 182		spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
 
 
 183	}
 184	trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
 
 
 185	return ret;
 186}
 187
 188/*
 189 * MMIO register read with bytes helper functions
 190 * @offset:bytes offset from MMIO start
 191 *
 192*/
 193
 194/**
 195 * amdgpu_mm_rreg8 - read a memory mapped IO register
 196 *
 197 * @adev: amdgpu_device pointer
 198 * @offset: byte aligned register offset
 199 *
 200 * Returns the 8 bit value from the offset specified.
 201 */
 202uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) {
 
 
 
 
 203	if (offset < adev->rmmio_size)
 204		return (readb(adev->rmmio + offset));
 205	BUG();
 206}
 207
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 208/*
 209 * MMIO register write with bytes helper functions
 210 * @offset:bytes offset from MMIO start
 211 * @value: the value want to be written to the register
 212 *
 213*/
 214/**
 215 * amdgpu_mm_wreg8 - read a memory mapped IO register
 216 *
 217 * @adev: amdgpu_device pointer
 218 * @offset: byte aligned register offset
 219 * @value: 8 bit value to write
 220 *
 221 * Writes the value specified to the offset specified.
 222 */
 223void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) {
 
 
 
 
 224	if (offset < adev->rmmio_size)
 225		writeb(value, adev->rmmio + offset);
 226	else
 227		BUG();
 228}
 229
 230/**
 231 * amdgpu_mm_wreg - write to a memory mapped IO register
 232 *
 233 * @adev: amdgpu_device pointer
 234 * @reg: dword aligned register offset
 235 * @v: 32 bit value to write to the register
 236 * @acc_flags: access flags which require special behavior
 237 *
 238 * Writes the value specified to the offset specified.
 239 */
 240void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
 241		    uint32_t acc_flags)
 
 242{
 243	trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
 
 244
 245	if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
 246		adev->last_mm_index = v;
 
 
 
 
 
 
 
 
 
 247	}
 248
 249	if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
 250		return amdgpu_virt_kiq_wreg(adev, reg, v);
 251
 252	if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 253		writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
 254	else {
 255		unsigned long flags;
 256
 257		spin_lock_irqsave(&adev->mmio_idx_lock, flags);
 258		writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
 259		writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
 260		spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
 261	}
 262
 263	if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
 264		udelay(500);
 265	}
 266}
 267
 268/**
 269 * amdgpu_io_rreg - read an IO register
 270 *
 271 * @adev: amdgpu_device pointer
 272 * @reg: dword aligned register offset
 
 
 
 273 *
 274 * Returns the 32 bit value from the offset specified.
 275 */
 276u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
 
 
 277{
 278	if ((reg * 4) < adev->rio_mem_size)
 279		return ioread32(adev->rio_mem + (reg * 4));
 280	else {
 281		iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
 282		return ioread32(adev->rio_mem + (mmMM_DATA * 4));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 283	}
 284}
 285
 286/**
 287 * amdgpu_io_wreg - write to an IO register
 288 *
 289 * @adev: amdgpu_device pointer
 290 * @reg: dword aligned register offset
 291 * @v: 32 bit value to write to the register
 292 *
 293 * Writes the value specified to the offset specified.
 294 */
 295void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 
 296{
 297	if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
 298		adev->last_mm_index = v;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 299	}
 300
 301	if ((reg * 4) < adev->rio_mem_size)
 302		iowrite32(v, adev->rio_mem + (reg * 4));
 303	else {
 304		iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
 305		iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
 306	}
 307
 308	if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
 309		udelay(500);
 310	}
 311}
 312
 313/**
 314 * amdgpu_mm_rdoorbell - read a doorbell dword
 315 *
 316 * @adev: amdgpu_device pointer
 317 * @index: doorbell index
 318 *
 319 * Returns the value in the doorbell aperture at the
 320 * requested doorbell index (CIK).
 321 */
 322u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 323{
 324	if (index < adev->doorbell.num_doorbells) {
 325		return readl(adev->doorbell.ptr + index);
 326	} else {
 327		DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
 328		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 329	}
 
 
 
 
 330}
 331
 332/**
 333 * amdgpu_mm_wdoorbell - write a doorbell dword
 334 *
 335 * @adev: amdgpu_device pointer
 336 * @index: doorbell index
 337 * @v: value to write
 338 *
 339 * Writes @v to the doorbell aperture at the
 340 * requested doorbell index (CIK).
 341 */
 342void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 343{
 344	if (index < adev->doorbell.num_doorbells) {
 345		writel(v, adev->doorbell.ptr + index);
 346	} else {
 347		DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 348	}
 
 
 349}
 350
 351/**
 352 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
 353 *
 354 * @adev: amdgpu_device pointer
 355 * @index: doorbell index
 
 356 *
 357 * Returns the value in the doorbell aperture at the
 358 * requested doorbell index (VEGA10+).
 359 */
 360u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 361{
 362	if (index < adev->doorbell.num_doorbells) {
 363		return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
 364	} else {
 365		DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
 366		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 367	}
 
 
 368}
 369
 370/**
 371 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
 372 *
 373 * @adev: amdgpu_device pointer
 374 * @index: doorbell index
 375 * @v: value to write
 376 *
 377 * Writes @v to the doorbell aperture at the
 378 * requested doorbell index (VEGA10+).
 379 */
 380void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
 381{
 382	if (index < adev->doorbell.num_doorbells) {
 383		atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
 384	} else {
 385		DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
 386	}
 387}
 388
 389/**
 390 * amdgpu_invalid_rreg - dummy reg read function
 391 *
 392 * @adev: amdgpu device pointer
 393 * @reg: offset of register
 394 *
 395 * Dummy register read function.  Used for register blocks
 396 * that certain asics don't have (all asics).
 397 * Returns the value in the register.
 398 */
 399static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
 400{
 401	DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
 402	BUG();
 403	return 0;
 404}
 405
 
 
 
 
 
 
 
 406/**
 407 * amdgpu_invalid_wreg - dummy reg write function
 408 *
 409 * @adev: amdgpu device pointer
 410 * @reg: offset of register
 411 * @v: value to write to the register
 412 *
 413 * Dummy register read function.  Used for register blocks
 414 * that certain asics don't have (all asics).
 415 */
 416static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
 417{
 418	DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
 419		  reg, v);
 420	BUG();
 421}
 422
 
 
 
 
 
 
 
 423/**
 424 * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
 425 *
 426 * @adev: amdgpu device pointer
 427 * @reg: offset of register
 428 *
 429 * Dummy register read function.  Used for register blocks
 430 * that certain asics don't have (all asics).
 431 * Returns the value in the register.
 432 */
 433static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
 434{
 435	DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
 436	BUG();
 437	return 0;
 438}
 439
 
 
 
 
 
 
 
 440/**
 441 * amdgpu_invalid_wreg64 - dummy reg write function
 442 *
 443 * @adev: amdgpu device pointer
 444 * @reg: offset of register
 445 * @v: value to write to the register
 446 *
 447 * Dummy register read function.  Used for register blocks
 448 * that certain asics don't have (all asics).
 449 */
 450static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
 451{
 452	DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
 453		  reg, v);
 454	BUG();
 455}
 456
 
 
 
 
 
 
 
 457/**
 458 * amdgpu_block_invalid_rreg - dummy reg read function
 459 *
 460 * @adev: amdgpu device pointer
 461 * @block: offset of instance
 462 * @reg: offset of register
 463 *
 464 * Dummy register read function.  Used for register blocks
 465 * that certain asics don't have (all asics).
 466 * Returns the value in the register.
 467 */
 468static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
 469					  uint32_t block, uint32_t reg)
 470{
 471	DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
 472		  reg, block);
 473	BUG();
 474	return 0;
 475}
 476
 477/**
 478 * amdgpu_block_invalid_wreg - dummy reg write function
 479 *
 480 * @adev: amdgpu device pointer
 481 * @block: offset of instance
 482 * @reg: offset of register
 483 * @v: value to write to the register
 484 *
 485 * Dummy register read function.  Used for register blocks
 486 * that certain asics don't have (all asics).
 487 */
 488static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
 489				      uint32_t block,
 490				      uint32_t reg, uint32_t v)
 491{
 492	DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
 493		  reg, block, v);
 494	BUG();
 495}
 496
 497/**
 498 * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 499 *
 500 * @adev: amdgpu device pointer
 501 *
 502 * Allocates a scratch page of VRAM for use by various things in the
 503 * driver.
 504 */
 505static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
 506{
 507	return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
 508				       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
 509				       &adev->vram_scratch.robj,
 510				       &adev->vram_scratch.gpu_addr,
 511				       (void **)&adev->vram_scratch.ptr);
 
 512}
 513
 514/**
 515 * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
 516 *
 517 * @adev: amdgpu device pointer
 518 *
 519 * Frees the VRAM scratch page.
 520 */
 521static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
 522{
 523	amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
 524}
 525
 526/**
 527 * amdgpu_device_program_register_sequence - program an array of registers.
 528 *
 529 * @adev: amdgpu_device pointer
 530 * @registers: pointer to the register array
 531 * @array_size: size of the register array
 532 *
 533 * Programs an array or registers with and and or masks.
 534 * This is a helper for setting golden registers.
 535 */
 536void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
 537					     const u32 *registers,
 538					     const u32 array_size)
 539{
 540	u32 tmp, reg, and_mask, or_mask;
 541	int i;
 542
 543	if (array_size % 3)
 544		return;
 545
 546	for (i = 0; i < array_size; i +=3) {
 547		reg = registers[i + 0];
 548		and_mask = registers[i + 1];
 549		or_mask = registers[i + 2];
 550
 551		if (and_mask == 0xffffffff) {
 552			tmp = or_mask;
 553		} else {
 554			tmp = RREG32(reg);
 555			tmp &= ~and_mask;
 556			if (adev->family >= AMDGPU_FAMILY_AI)
 557				tmp |= (or_mask & and_mask);
 558			else
 559				tmp |= or_mask;
 560		}
 561		WREG32(reg, tmp);
 562	}
 563}
 564
 565/**
 566 * amdgpu_device_pci_config_reset - reset the GPU
 567 *
 568 * @adev: amdgpu_device pointer
 569 *
 570 * Resets the GPU using the pci config reset sequence.
 571 * Only applicable to asics prior to vega10.
 572 */
 573void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
 574{
 575	pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
 576}
 577
 578/*
 579 * GPU doorbell aperture helpers function.
 580 */
 581/**
 582 * amdgpu_device_doorbell_init - Init doorbell driver information.
 583 *
 584 * @adev: amdgpu_device pointer
 585 *
 586 * Init doorbell driver information (CIK)
 587 * Returns 0 on success, error on failure.
 588 */
 589static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
 590{
 591
 592	/* No doorbell on SI hardware generation */
 593	if (adev->asic_type < CHIP_BONAIRE) {
 594		adev->doorbell.base = 0;
 595		adev->doorbell.size = 0;
 596		adev->doorbell.num_doorbells = 0;
 597		adev->doorbell.ptr = NULL;
 598		return 0;
 599	}
 600
 601	if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
 602		return -EINVAL;
 603
 604	amdgpu_asic_init_doorbell_index(adev);
 605
 606	/* doorbell bar mapping */
 607	adev->doorbell.base = pci_resource_start(adev->pdev, 2);
 608	adev->doorbell.size = pci_resource_len(adev->pdev, 2);
 609
 610	adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
 611					     adev->doorbell_index.max_assignment+1);
 612	if (adev->doorbell.num_doorbells == 0)
 613		return -EINVAL;
 614
 615	/* For Vega, reserve and map two pages on doorbell BAR since SDMA
 616	 * paging queue doorbell use the second page. The
 617	 * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
 618	 * doorbells are in the first page. So with paging queue enabled,
 619	 * the max num_doorbells should + 1 page (0x400 in dword)
 620	 */
 621	if (adev->asic_type >= CHIP_VEGA10)
 622		adev->doorbell.num_doorbells += 0x400;
 623
 624	adev->doorbell.ptr = ioremap(adev->doorbell.base,
 625				     adev->doorbell.num_doorbells *
 626				     sizeof(u32));
 627	if (adev->doorbell.ptr == NULL)
 628		return -ENOMEM;
 629
 630	return 0;
 631}
 632
 633/**
 634 * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
 635 *
 636 * @adev: amdgpu_device pointer
 637 *
 638 * Tear down doorbell driver information (CIK)
 639 */
 640static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
 641{
 642	iounmap(adev->doorbell.ptr);
 643	adev->doorbell.ptr = NULL;
 644}
 645
 646
 647
 648/*
 649 * amdgpu_device_wb_*()
 650 * Writeback is the method by which the GPU updates special pages in memory
 651 * with the status of certain GPU events (fences, ring pointers,etc.).
 652 */
 653
 654/**
 655 * amdgpu_device_wb_fini - Disable Writeback and free memory
 656 *
 657 * @adev: amdgpu_device pointer
 658 *
 659 * Disables Writeback and frees the Writeback memory (all asics).
 660 * Used at driver shutdown.
 661 */
 662static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
 663{
 664	if (adev->wb.wb_obj) {
 665		amdgpu_bo_free_kernel(&adev->wb.wb_obj,
 666				      &adev->wb.gpu_addr,
 667				      (void **)&adev->wb.wb);
 668		adev->wb.wb_obj = NULL;
 669	}
 670}
 671
 672/**
 673 * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
 674 *
 675 * @adev: amdgpu_device pointer
 676 *
 677 * Initializes writeback and allocates writeback memory (all asics).
 678 * Used at driver startup.
 679 * Returns 0 on success or an -error on failure.
 680 */
 681static int amdgpu_device_wb_init(struct amdgpu_device *adev)
 682{
 683	int r;
 684
 685	if (adev->wb.wb_obj == NULL) {
 686		/* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
 687		r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
 688					    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
 689					    &adev->wb.wb_obj, &adev->wb.gpu_addr,
 690					    (void **)&adev->wb.wb);
 691		if (r) {
 692			dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
 693			return r;
 694		}
 695
 696		adev->wb.num_wb = AMDGPU_MAX_WB;
 697		memset(&adev->wb.used, 0, sizeof(adev->wb.used));
 698
 699		/* clear wb memory */
 700		memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
 701	}
 702
 703	return 0;
 704}
 705
 706/**
 707 * amdgpu_device_wb_get - Allocate a wb entry
 708 *
 709 * @adev: amdgpu_device pointer
 710 * @wb: wb index
 711 *
 712 * Allocate a wb slot for use by the driver (all asics).
 713 * Returns 0 on success or -EINVAL on failure.
 714 */
 715int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
 716{
 717	unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
 718
 719	if (offset < adev->wb.num_wb) {
 720		__set_bit(offset, adev->wb.used);
 721		*wb = offset << 3; /* convert to dw offset */
 722		return 0;
 723	} else {
 724		return -EINVAL;
 725	}
 726}
 727
 728/**
 729 * amdgpu_device_wb_free - Free a wb entry
 730 *
 731 * @adev: amdgpu_device pointer
 732 * @wb: wb index
 733 *
 734 * Free a wb slot allocated for use by the driver (all asics)
 735 */
 736void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
 737{
 738	wb >>= 3;
 739	if (wb < adev->wb.num_wb)
 740		__clear_bit(wb, adev->wb.used);
 741}
 742
 743/**
 744 * amdgpu_device_resize_fb_bar - try to resize FB BAR
 745 *
 746 * @adev: amdgpu_device pointer
 747 *
 748 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
 749 * to fail, but if any of the BARs is not accessible after the size we abort
 750 * driver loading by returning -ENODEV.
 751 */
 752int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
 753{
 754	u64 space_needed = roundup_pow_of_two(adev->gmc.real_vram_size);
 755	u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1;
 756	struct pci_bus *root;
 757	struct resource *res;
 758	unsigned i;
 759	u16 cmd;
 760	int r;
 761
 
 
 
 762	/* Bypass for VF */
 763	if (amdgpu_sriov_vf(adev))
 764		return 0;
 765
 
 
 
 
 
 766	/* Check if the root BUS has 64bit memory resources */
 767	root = adev->pdev->bus;
 768	while (root->parent)
 769		root = root->parent;
 770
 771	pci_bus_for_each_resource(root, res, i) {
 772		if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
 773		    res->start > 0x100000000ull)
 774			break;
 775	}
 776
 777	/* Trying to resize is pointless without a root hub window above 4GB */
 778	if (!res)
 779		return 0;
 780
 
 
 
 
 781	/* Disable memory decoding while we change the BAR addresses and size */
 782	pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
 783	pci_write_config_word(adev->pdev, PCI_COMMAND,
 784			      cmd & ~PCI_COMMAND_MEMORY);
 785
 786	/* Free the VRAM and doorbell BAR, we most likely need to move both. */
 787	amdgpu_device_doorbell_fini(adev);
 788	if (adev->asic_type >= CHIP_BONAIRE)
 789		pci_release_resource(adev->pdev, 2);
 790
 791	pci_release_resource(adev->pdev, 0);
 792
 793	r = pci_resize_resource(adev->pdev, 0, rbar_size);
 794	if (r == -ENOSPC)
 795		DRM_INFO("Not enough PCI address space for a large BAR.");
 796	else if (r && r != -ENOTSUPP)
 797		DRM_ERROR("Problem resizing BAR0 (%d).", r);
 798
 799	pci_assign_unassigned_bus_resources(adev->pdev->bus);
 800
 801	/* When the doorbell or fb BAR isn't available we have no chance of
 802	 * using the device.
 803	 */
 804	r = amdgpu_device_doorbell_init(adev);
 805	if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
 806		return -ENODEV;
 807
 808	pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
 809
 810	return 0;
 811}
 812
 
 
 
 
 
 
 
 
 813/*
 814 * GPU helpers function.
 815 */
 816/**
 817 * amdgpu_device_need_post - check if the hw need post or not
 818 *
 819 * @adev: amdgpu_device pointer
 820 *
 821 * Check if the asic has been initialized (all asics) at driver startup
 822 * or post is needed if  hw reset is performed.
 823 * Returns true if need or false if not.
 824 */
 825bool amdgpu_device_need_post(struct amdgpu_device *adev)
 826{
 827	uint32_t reg;
 828
 829	if (amdgpu_sriov_vf(adev))
 830		return false;
 831
 
 
 
 832	if (amdgpu_passthrough(adev)) {
 833		/* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
 834		 * some old smc fw still need driver do vPost otherwise gpu hang, while
 835		 * those smc fw version above 22.15 doesn't have this flaw, so we force
 836		 * vpost executed for smc version below 22.15
 837		 */
 838		if (adev->asic_type == CHIP_FIJI) {
 839			int err;
 840			uint32_t fw_ver;
 
 841			err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
 842			/* force vPost if error occured */
 843			if (err)
 844				return true;
 845
 846			fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
 
 847			if (fw_ver < 0x00160e00)
 848				return true;
 849		}
 850	}
 851
 
 
 
 
 852	if (adev->has_hw_reset) {
 853		adev->has_hw_reset = false;
 854		return true;
 855	}
 856
 857	/* bios scratch used on CIK+ */
 858	if (adev->asic_type >= CHIP_BONAIRE)
 859		return amdgpu_atombios_scratch_need_asic_init(adev);
 860
 861	/* check MEM_SIZE for older asics */
 862	reg = amdgpu_asic_get_config_memsize(adev);
 863
 864	if ((reg != 0) && (reg != 0xffffffff))
 865		return false;
 866
 867	return true;
 868}
 869
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 870/* if we get transitioned to only one device, take VGA back */
 871/**
 872 * amdgpu_device_vga_set_decode - enable/disable vga decode
 873 *
 874 * @cookie: amdgpu_device pointer
 875 * @state: enable/disable vga decode
 876 *
 877 * Enable/disable vga decode (all asics).
 878 * Returns VGA resource flags.
 879 */
 880static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state)
 
 881{
 882	struct amdgpu_device *adev = cookie;
 
 883	amdgpu_asic_set_vga_state(adev, state);
 884	if (state)
 885		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
 886		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
 887	else
 888		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
 889}
 890
 891/**
 892 * amdgpu_device_check_block_size - validate the vm block size
 893 *
 894 * @adev: amdgpu_device pointer
 895 *
 896 * Validates the vm block size specified via module parameter.
 897 * The vm block size defines number of bits in page table versus page directory,
 898 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
 899 * page table and the remaining bits are in the page directory.
 900 */
 901static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
 902{
 903	/* defines number of bits in page table versus page directory,
 904	 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
 905	 * page table and the remaining bits are in the page directory */
 
 906	if (amdgpu_vm_block_size == -1)
 907		return;
 908
 909	if (amdgpu_vm_block_size < 9) {
 910		dev_warn(adev->dev, "VM page table size (%d) too small\n",
 911			 amdgpu_vm_block_size);
 912		amdgpu_vm_block_size = -1;
 913	}
 914}
 915
 916/**
 917 * amdgpu_device_check_vm_size - validate the vm size
 918 *
 919 * @adev: amdgpu_device pointer
 920 *
 921 * Validates the vm size in GB specified via module parameter.
 922 * The VM size is the size of the GPU virtual memory space in GB.
 923 */
 924static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
 925{
 926	/* no need to check the default value */
 927	if (amdgpu_vm_size == -1)
 928		return;
 929
 930	if (amdgpu_vm_size < 1) {
 931		dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
 932			 amdgpu_vm_size);
 933		amdgpu_vm_size = -1;
 934	}
 935}
 936
 937static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
 938{
 939	struct sysinfo si;
 940	bool is_os_64 = (sizeof(void *) == 8) ? true : false;
 941	uint64_t total_memory;
 942	uint64_t dram_size_seven_GB = 0x1B8000000;
 943	uint64_t dram_size_three_GB = 0xB8000000;
 944
 945	if (amdgpu_smu_memory_pool_size == 0)
 946		return;
 947
 948	if (!is_os_64) {
 949		DRM_WARN("Not 64-bit OS, feature not supported\n");
 950		goto def_value;
 951	}
 952	si_meminfo(&si);
 953	total_memory = (uint64_t)si.totalram * si.mem_unit;
 954
 955	if ((amdgpu_smu_memory_pool_size == 1) ||
 956		(amdgpu_smu_memory_pool_size == 2)) {
 957		if (total_memory < dram_size_three_GB)
 958			goto def_value1;
 959	} else if ((amdgpu_smu_memory_pool_size == 4) ||
 960		(amdgpu_smu_memory_pool_size == 8)) {
 961		if (total_memory < dram_size_seven_GB)
 962			goto def_value1;
 963	} else {
 964		DRM_WARN("Smu memory pool size not supported\n");
 965		goto def_value;
 966	}
 967	adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
 968
 969	return;
 970
 971def_value1:
 972	DRM_WARN("No enough system memory\n");
 973def_value:
 974	adev->pm.smu_prv_buffer_size = 0;
 975}
 976
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 977/**
 978 * amdgpu_device_check_arguments - validate module params
 979 *
 980 * @adev: amdgpu_device pointer
 981 *
 982 * Validates certain module parameters and updates
 983 * the associated values used by the driver (all asics).
 984 */
 985static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
 986{
 987	int ret = 0;
 988
 989	if (amdgpu_sched_jobs < 4) {
 990		dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
 991			 amdgpu_sched_jobs);
 992		amdgpu_sched_jobs = 4;
 993	} else if (!is_power_of_2(amdgpu_sched_jobs)){
 994		dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
 995			 amdgpu_sched_jobs);
 996		amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
 997	}
 998
 999	if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1000		/* gart size must be greater or equal to 32M */
1001		dev_warn(adev->dev, "gart size (%d) too small\n",
1002			 amdgpu_gart_size);
1003		amdgpu_gart_size = -1;
1004	}
1005
1006	if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1007		/* gtt size must be greater or equal to 32M */
1008		dev_warn(adev->dev, "gtt size (%d) too small\n",
1009				 amdgpu_gtt_size);
1010		amdgpu_gtt_size = -1;
1011	}
1012
1013	/* valid range is between 4 and 9 inclusive */
1014	if (amdgpu_vm_fragment_size != -1 &&
1015	    (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1016		dev_warn(adev->dev, "valid range is between 4 and 9\n");
1017		amdgpu_vm_fragment_size = -1;
1018	}
1019
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1020	amdgpu_device_check_smu_prv_buffer_size(adev);
1021
1022	amdgpu_device_check_vm_size(adev);
1023
1024	amdgpu_device_check_block_size(adev);
1025
1026	ret = amdgpu_device_get_job_timeout_settings(adev);
1027	if (ret) {
1028		dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
1029		return ret;
1030	}
1031
1032	adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1033
1034	return ret;
1035}
1036
1037/**
1038 * amdgpu_switcheroo_set_state - set switcheroo state
1039 *
1040 * @pdev: pci dev pointer
1041 * @state: vga_switcheroo state
1042 *
1043 * Callback for the switcheroo driver.  Suspends or resumes the
1044 * the asics before or after it is powered up using ACPI methods.
1045 */
1046static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
 
1047{
1048	struct drm_device *dev = pci_get_drvdata(pdev);
 
1049
1050	if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1051		return;
1052
1053	if (state == VGA_SWITCHEROO_ON) {
1054		pr_info("amdgpu: switched on\n");
1055		/* don't suspend or resume card normally */
1056		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1057
1058		amdgpu_device_resume(dev, true, true);
 
 
 
 
 
1059
1060		dev->switch_power_state = DRM_SWITCH_POWER_ON;
1061		drm_kms_helper_poll_enable(dev);
1062	} else {
1063		pr_info("amdgpu: switched off\n");
1064		drm_kms_helper_poll_disable(dev);
1065		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1066		amdgpu_device_suspend(dev, true, true);
 
 
 
 
 
1067		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1068	}
1069}
1070
1071/**
1072 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1073 *
1074 * @pdev: pci dev pointer
1075 *
1076 * Callback for the switcheroo driver.  Check of the switcheroo
1077 * state can be changed.
1078 * Returns true if the state can be changed, false if not.
1079 */
1080static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1081{
1082	struct drm_device *dev = pci_get_drvdata(pdev);
1083
1084	/*
1085	* FIXME: open_count is protected by drm_global_mutex but that would lead to
1086	* locking inversion with the driver load path. And the access here is
1087	* completely racy anyway. So don't bother with locking for now.
1088	*/
1089	return dev->open_count == 0;
1090}
1091
1092static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1093	.set_gpu_state = amdgpu_switcheroo_set_state,
1094	.reprobe = NULL,
1095	.can_switch = amdgpu_switcheroo_can_switch,
1096};
1097
1098/**
1099 * amdgpu_device_ip_set_clockgating_state - set the CG state
1100 *
1101 * @dev: amdgpu_device pointer
1102 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1103 * @state: clockgating state (gate or ungate)
1104 *
1105 * Sets the requested clockgating state for all instances of
1106 * the hardware IP specified.
1107 * Returns the error code from the last instance.
1108 */
1109int amdgpu_device_ip_set_clockgating_state(void *dev,
1110					   enum amd_ip_block_type block_type,
1111					   enum amd_clockgating_state state)
1112{
1113	struct amdgpu_device *adev = dev;
1114	int i, r = 0;
1115
1116	for (i = 0; i < adev->num_ip_blocks; i++) {
1117		if (!adev->ip_blocks[i].status.valid)
1118			continue;
1119		if (adev->ip_blocks[i].version->type != block_type)
1120			continue;
1121		if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1122			continue;
1123		r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1124			(void *)adev, state);
1125		if (r)
1126			DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1127				  adev->ip_blocks[i].version->funcs->name, r);
1128	}
1129	return r;
1130}
1131
1132/**
1133 * amdgpu_device_ip_set_powergating_state - set the PG state
1134 *
1135 * @dev: amdgpu_device pointer
1136 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1137 * @state: powergating state (gate or ungate)
1138 *
1139 * Sets the requested powergating state for all instances of
1140 * the hardware IP specified.
1141 * Returns the error code from the last instance.
1142 */
1143int amdgpu_device_ip_set_powergating_state(void *dev,
1144					   enum amd_ip_block_type block_type,
1145					   enum amd_powergating_state state)
1146{
1147	struct amdgpu_device *adev = dev;
1148	int i, r = 0;
1149
1150	for (i = 0; i < adev->num_ip_blocks; i++) {
1151		if (!adev->ip_blocks[i].status.valid)
1152			continue;
1153		if (adev->ip_blocks[i].version->type != block_type)
1154			continue;
1155		if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1156			continue;
1157		r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1158			(void *)adev, state);
1159		if (r)
1160			DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1161				  adev->ip_blocks[i].version->funcs->name, r);
1162	}
1163	return r;
1164}
1165
1166/**
1167 * amdgpu_device_ip_get_clockgating_state - get the CG state
1168 *
1169 * @adev: amdgpu_device pointer
1170 * @flags: clockgating feature flags
1171 *
1172 * Walks the list of IPs on the device and updates the clockgating
1173 * flags for each IP.
1174 * Updates @flags with the feature flags for each hardware IP where
1175 * clockgating is enabled.
1176 */
1177void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1178					    u32 *flags)
1179{
1180	int i;
1181
1182	for (i = 0; i < adev->num_ip_blocks; i++) {
1183		if (!adev->ip_blocks[i].status.valid)
1184			continue;
1185		if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1186			adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1187	}
1188}
1189
1190/**
1191 * amdgpu_device_ip_wait_for_idle - wait for idle
1192 *
1193 * @adev: amdgpu_device pointer
1194 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1195 *
1196 * Waits for the request hardware IP to be idle.
1197 * Returns 0 for success or a negative error code on failure.
1198 */
1199int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1200				   enum amd_ip_block_type block_type)
1201{
1202	int i, r;
1203
1204	for (i = 0; i < adev->num_ip_blocks; i++) {
1205		if (!adev->ip_blocks[i].status.valid)
1206			continue;
1207		if (adev->ip_blocks[i].version->type == block_type) {
1208			r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1209			if (r)
1210				return r;
1211			break;
1212		}
1213	}
1214	return 0;
1215
1216}
1217
1218/**
1219 * amdgpu_device_ip_is_idle - is the hardware IP idle
1220 *
1221 * @adev: amdgpu_device pointer
1222 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1223 *
1224 * Check if the hardware IP is idle or not.
1225 * Returns true if it the IP is idle, false if not.
1226 */
1227bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1228			      enum amd_ip_block_type block_type)
1229{
1230	int i;
1231
1232	for (i = 0; i < adev->num_ip_blocks; i++) {
1233		if (!adev->ip_blocks[i].status.valid)
1234			continue;
1235		if (adev->ip_blocks[i].version->type == block_type)
1236			return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1237	}
1238	return true;
1239
1240}
1241
1242/**
1243 * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1244 *
1245 * @adev: amdgpu_device pointer
1246 * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1247 *
1248 * Returns a pointer to the hardware IP block structure
1249 * if it exists for the asic, otherwise NULL.
1250 */
1251struct amdgpu_ip_block *
1252amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1253			      enum amd_ip_block_type type)
1254{
1255	int i;
1256
1257	for (i = 0; i < adev->num_ip_blocks; i++)
1258		if (adev->ip_blocks[i].version->type == type)
1259			return &adev->ip_blocks[i];
1260
1261	return NULL;
1262}
1263
1264/**
1265 * amdgpu_device_ip_block_version_cmp
1266 *
1267 * @adev: amdgpu_device pointer
1268 * @type: enum amd_ip_block_type
1269 * @major: major version
1270 * @minor: minor version
1271 *
1272 * return 0 if equal or greater
1273 * return 1 if smaller or the ip_block doesn't exist
1274 */
1275int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1276				       enum amd_ip_block_type type,
1277				       u32 major, u32 minor)
1278{
1279	struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1280
1281	if (ip_block && ((ip_block->version->major > major) ||
1282			((ip_block->version->major == major) &&
1283			(ip_block->version->minor >= minor))))
1284		return 0;
1285
1286	return 1;
1287}
1288
1289/**
1290 * amdgpu_device_ip_block_add
1291 *
1292 * @adev: amdgpu_device pointer
1293 * @ip_block_version: pointer to the IP to add
1294 *
1295 * Adds the IP block driver information to the collection of IPs
1296 * on the asic.
1297 */
1298int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1299			       const struct amdgpu_ip_block_version *ip_block_version)
1300{
1301	if (!ip_block_version)
1302		return -EINVAL;
1303
 
 
 
 
 
 
 
 
 
 
 
 
 
1304	DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1305		  ip_block_version->funcs->name);
1306
1307	adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1308
1309	return 0;
1310}
1311
1312/**
1313 * amdgpu_device_enable_virtual_display - enable virtual display feature
1314 *
1315 * @adev: amdgpu_device pointer
1316 *
1317 * Enabled the virtual display feature if the user has enabled it via
1318 * the module parameter virtual_display.  This feature provides a virtual
1319 * display hardware on headless boards or in virtualized environments.
1320 * This function parses and validates the configuration string specified by
1321 * the user and configues the virtual display configuration (number of
1322 * virtual connectors, crtcs, etc.) specified.
1323 */
1324static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1325{
1326	adev->enable_virtual_display = false;
1327
1328	if (amdgpu_virtual_display) {
1329		struct drm_device *ddev = adev->ddev;
1330		const char *pci_address_name = pci_name(ddev->pdev);
1331		char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1332
1333		pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1334		pciaddstr_tmp = pciaddstr;
1335		while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1336			pciaddname = strsep(&pciaddname_tmp, ",");
1337			if (!strcmp("all", pciaddname)
1338			    || !strcmp(pci_address_name, pciaddname)) {
1339				long num_crtc;
1340				int res = -1;
1341
1342				adev->enable_virtual_display = true;
1343
1344				if (pciaddname_tmp)
1345					res = kstrtol(pciaddname_tmp, 10,
1346						      &num_crtc);
1347
1348				if (!res) {
1349					if (num_crtc < 1)
1350						num_crtc = 1;
1351					if (num_crtc > 6)
1352						num_crtc = 6;
1353					adev->mode_info.num_crtc = num_crtc;
1354				} else {
1355					adev->mode_info.num_crtc = 1;
1356				}
1357				break;
1358			}
1359		}
1360
1361		DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1362			 amdgpu_virtual_display, pci_address_name,
1363			 adev->enable_virtual_display, adev->mode_info.num_crtc);
1364
1365		kfree(pciaddstr);
1366	}
1367}
1368
 
 
 
 
 
 
 
 
 
 
1369/**
1370 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1371 *
1372 * @adev: amdgpu_device pointer
1373 *
1374 * Parses the asic configuration parameters specified in the gpu info
1375 * firmware and makes them availale to the driver for use in configuring
1376 * the asic.
1377 * Returns 0 on success, -EINVAL on failure.
1378 */
1379static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1380{
1381	const char *chip_name;
1382	char fw_name[30];
1383	int err;
1384	const struct gpu_info_firmware_header_v1_0 *hdr;
1385
1386	adev->firmware.gpu_info_fw = NULL;
1387
 
 
 
1388	switch (adev->asic_type) {
1389	case CHIP_TOPAZ:
1390	case CHIP_TONGA:
1391	case CHIP_FIJI:
1392	case CHIP_POLARIS10:
1393	case CHIP_POLARIS11:
1394	case CHIP_POLARIS12:
1395	case CHIP_VEGAM:
1396	case CHIP_CARRIZO:
1397	case CHIP_STONEY:
1398#ifdef CONFIG_DRM_AMDGPU_SI
1399	case CHIP_VERDE:
1400	case CHIP_TAHITI:
1401	case CHIP_PITCAIRN:
1402	case CHIP_OLAND:
1403	case CHIP_HAINAN:
1404#endif
1405#ifdef CONFIG_DRM_AMDGPU_CIK
1406	case CHIP_BONAIRE:
1407	case CHIP_HAWAII:
1408	case CHIP_KAVERI:
1409	case CHIP_KABINI:
1410	case CHIP_MULLINS:
1411#endif
1412	case CHIP_VEGA20:
1413	default:
1414		return 0;
1415	case CHIP_VEGA10:
1416		chip_name = "vega10";
1417		break;
1418	case CHIP_VEGA12:
1419		chip_name = "vega12";
1420		break;
1421	case CHIP_RAVEN:
1422		if (adev->rev_id >= 8)
1423			chip_name = "raven2";
1424		else if (adev->pdev->device == 0x15d8)
1425			chip_name = "picasso";
1426		else
1427			chip_name = "raven";
1428		break;
1429	case CHIP_ARCTURUS:
1430		chip_name = "arcturus";
1431		break;
1432	case CHIP_RENOIR:
1433		chip_name = "renoir";
1434		break;
1435	case CHIP_NAVI10:
1436		chip_name = "navi10";
1437		break;
1438	case CHIP_NAVI14:
1439		chip_name = "navi14";
1440		break;
1441	case CHIP_NAVI12:
1442		chip_name = "navi12";
1443		break;
1444	}
1445
1446	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1447	err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1448	if (err) {
1449		dev_err(adev->dev,
1450			"Failed to load gpu_info firmware \"%s\"\n",
1451			fw_name);
1452		goto out;
1453	}
1454	err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1455	if (err) {
1456		dev_err(adev->dev,
1457			"Failed to validate gpu_info firmware \"%s\"\n",
1458			fw_name);
1459		goto out;
1460	}
1461
1462	hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1463	amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1464
1465	switch (hdr->version_major) {
1466	case 1:
1467	{
1468		const struct gpu_info_firmware_v1_0 *gpu_info_fw =
1469			(const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
1470								le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1471
 
 
 
 
 
 
1472		adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1473		adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1474		adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1475		adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
1476		adev->gfx.config.max_texture_channel_caches =
1477			le32_to_cpu(gpu_info_fw->gc_num_tccs);
1478		adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1479		adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1480		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1481		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
1482		adev->gfx.config.double_offchip_lds_buf =
1483			le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1484		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
1485		adev->gfx.cu_info.max_waves_per_simd =
1486			le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1487		adev->gfx.cu_info.max_scratch_slots_per_cu =
1488			le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1489		adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
1490		if (hdr->version_minor >= 1) {
1491			const struct gpu_info_firmware_v1_1 *gpu_info_fw =
1492				(const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
1493									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1494			adev->gfx.config.num_sc_per_sh =
1495				le32_to_cpu(gpu_info_fw->num_sc_per_sh);
1496			adev->gfx.config.num_packer_per_sc =
1497				le32_to_cpu(gpu_info_fw->num_packer_per_sc);
1498		}
1499#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 
 
 
 
 
1500		if (hdr->version_minor == 2) {
1501			const struct gpu_info_firmware_v1_2 *gpu_info_fw =
1502				(const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
1503									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1504			adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
1505		}
1506#endif
1507		break;
1508	}
1509	default:
1510		dev_err(adev->dev,
1511			"Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1512		err = -EINVAL;
1513		goto out;
1514	}
1515out:
1516	return err;
1517}
1518
1519/**
1520 * amdgpu_device_ip_early_init - run early init for hardware IPs
1521 *
1522 * @adev: amdgpu_device pointer
1523 *
1524 * Early initialization pass for hardware IPs.  The hardware IPs that make
1525 * up each asic are discovered each IP's early_init callback is run.  This
1526 * is the first stage in initializing the asic.
1527 * Returns 0 on success, negative error code on failure.
1528 */
1529static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
1530{
 
1531	int i, r;
 
1532
1533	amdgpu_device_enable_virtual_display(adev);
1534
1535	switch (adev->asic_type) {
1536	case CHIP_TOPAZ:
1537	case CHIP_TONGA:
1538	case CHIP_FIJI:
1539	case CHIP_POLARIS10:
1540	case CHIP_POLARIS11:
1541	case CHIP_POLARIS12:
1542	case CHIP_VEGAM:
1543	case CHIP_CARRIZO:
1544	case CHIP_STONEY:
1545		if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
1546			adev->family = AMDGPU_FAMILY_CZ;
1547		else
1548			adev->family = AMDGPU_FAMILY_VI;
1549
1550		r = vi_set_ip_blocks(adev);
1551		if (r)
1552			return r;
1553		break;
 
 
1554#ifdef CONFIG_DRM_AMDGPU_SI
1555	case CHIP_VERDE:
1556	case CHIP_TAHITI:
1557	case CHIP_PITCAIRN:
1558	case CHIP_OLAND:
1559	case CHIP_HAINAN:
1560		adev->family = AMDGPU_FAMILY_SI;
1561		r = si_set_ip_blocks(adev);
1562		if (r)
1563			return r;
1564		break;
1565#endif
1566#ifdef CONFIG_DRM_AMDGPU_CIK
1567	case CHIP_BONAIRE:
1568	case CHIP_HAWAII:
1569	case CHIP_KAVERI:
1570	case CHIP_KABINI:
1571	case CHIP_MULLINS:
1572		if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
 
 
1573			adev->family = AMDGPU_FAMILY_CI;
1574		else
1575			adev->family = AMDGPU_FAMILY_KV;
1576
1577		r = cik_set_ip_blocks(adev);
1578		if (r)
1579			return r;
1580		break;
1581#endif
1582	case CHIP_VEGA10:
1583	case CHIP_VEGA12:
1584	case CHIP_VEGA20:
1585	case CHIP_RAVEN:
1586	case CHIP_ARCTURUS:
1587	case CHIP_RENOIR:
1588		if (adev->asic_type == CHIP_RAVEN ||
1589		    adev->asic_type == CHIP_RENOIR)
1590			adev->family = AMDGPU_FAMILY_RV;
 
 
1591		else
1592			adev->family = AMDGPU_FAMILY_AI;
1593
1594		r = soc15_set_ip_blocks(adev);
1595		if (r)
1596			return r;
1597		break;
1598	case  CHIP_NAVI10:
1599	case  CHIP_NAVI14:
1600	case  CHIP_NAVI12:
1601		adev->family = AMDGPU_FAMILY_NV;
1602
1603		r = nv_set_ip_blocks(adev);
1604		if (r)
1605			return r;
1606		break;
1607	default:
1608		/* FIXME: not supported yet */
1609		return -EINVAL;
1610	}
1611
1612	r = amdgpu_device_parse_gpu_info_fw(adev);
1613	if (r)
1614		return r;
 
 
 
 
 
 
 
 
1615
1616	amdgpu_amdkfd_device_probe(adev);
1617
1618	if (amdgpu_sriov_vf(adev)) {
1619		r = amdgpu_virt_request_full_gpu(adev, true);
1620		if (r)
1621			return -EAGAIN;
1622	}
1623
1624	adev->pm.pp_feature = amdgpu_pp_feature_mask;
1625	if (amdgpu_sriov_vf(adev))
1626		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
 
 
 
 
1627
 
1628	for (i = 0; i < adev->num_ip_blocks; i++) {
1629		if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
1630			DRM_ERROR("disabled ip block: %d <%s>\n",
1631				  i, adev->ip_blocks[i].version->funcs->name);
1632			adev->ip_blocks[i].status.valid = false;
1633		} else {
1634			if (adev->ip_blocks[i].version->funcs->early_init) {
1635				r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
1636				if (r == -ENOENT) {
1637					adev->ip_blocks[i].status.valid = false;
1638				} else if (r) {
1639					DRM_ERROR("early_init of IP block <%s> failed %d\n",
1640						  adev->ip_blocks[i].version->funcs->name, r);
1641					return r;
1642				} else {
1643					adev->ip_blocks[i].status.valid = true;
1644				}
1645			} else {
1646				adev->ip_blocks[i].status.valid = true;
1647			}
1648		}
1649		/* get the vbios after the asic_funcs are set up */
1650		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
 
 
 
 
1651			/* Read BIOS */
1652			if (!amdgpu_get_bios(adev))
1653				return -EINVAL;
 
1654
1655			r = amdgpu_atombios_init(adev);
1656			if (r) {
1657				dev_err(adev->dev, "amdgpu_atombios_init failed\n");
1658				amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
1659				return r;
 
1660			}
 
 
 
 
 
1661		}
1662	}
 
 
1663
 
1664	adev->cg_flags &= amdgpu_cg_mask;
1665	adev->pg_flags &= amdgpu_pg_mask;
1666
1667	return 0;
1668}
1669
1670static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
1671{
1672	int i, r;
1673
1674	for (i = 0; i < adev->num_ip_blocks; i++) {
1675		if (!adev->ip_blocks[i].status.sw)
1676			continue;
1677		if (adev->ip_blocks[i].status.hw)
1678			continue;
1679		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1680		    (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
1681		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
1682			r = adev->ip_blocks[i].version->funcs->hw_init(adev);
1683			if (r) {
1684				DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1685					  adev->ip_blocks[i].version->funcs->name, r);
1686				return r;
1687			}
1688			adev->ip_blocks[i].status.hw = true;
1689		}
1690	}
1691
1692	return 0;
1693}
1694
1695static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
1696{
1697	int i, r;
1698
1699	for (i = 0; i < adev->num_ip_blocks; i++) {
1700		if (!adev->ip_blocks[i].status.sw)
1701			continue;
1702		if (adev->ip_blocks[i].status.hw)
1703			continue;
1704		r = adev->ip_blocks[i].version->funcs->hw_init(adev);
1705		if (r) {
1706			DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1707				  adev->ip_blocks[i].version->funcs->name, r);
1708			return r;
1709		}
1710		adev->ip_blocks[i].status.hw = true;
1711	}
1712
1713	return 0;
1714}
1715
1716static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
1717{
1718	int r = 0;
1719	int i;
1720	uint32_t smu_version;
1721
1722	if (adev->asic_type >= CHIP_VEGA10) {
1723		for (i = 0; i < adev->num_ip_blocks; i++) {
1724			if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
1725				continue;
1726
 
 
 
1727			/* no need to do the fw loading again if already done*/
1728			if (adev->ip_blocks[i].status.hw == true)
1729				break;
1730
1731			if (adev->in_gpu_reset || adev->in_suspend) {
1732				r = adev->ip_blocks[i].version->funcs->resume(adev);
1733				if (r) {
1734					DRM_ERROR("resume of IP block <%s> failed %d\n",
1735							  adev->ip_blocks[i].version->funcs->name, r);
1736					return r;
1737				}
1738			} else {
1739				r = adev->ip_blocks[i].version->funcs->hw_init(adev);
1740				if (r) {
1741					DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1742							  adev->ip_blocks[i].version->funcs->name, r);
1743					return r;
1744				}
1745			}
1746
1747			adev->ip_blocks[i].status.hw = true;
1748			break;
1749		}
1750	}
1751
1752	r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
 
1753
1754	return r;
1755}
1756
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1757/**
1758 * amdgpu_device_ip_init - run init for hardware IPs
1759 *
1760 * @adev: amdgpu_device pointer
1761 *
1762 * Main initialization pass for hardware IPs.  The list of all the hardware
1763 * IPs that make up the asic is walked and the sw_init and hw_init callbacks
1764 * are run.  sw_init initializes the software state associated with each IP
1765 * and hw_init initializes the hardware associated with each IP.
1766 * Returns 0 on success, negative error code on failure.
1767 */
1768static int amdgpu_device_ip_init(struct amdgpu_device *adev)
1769{
1770	int i, r;
1771
1772	r = amdgpu_ras_init(adev);
1773	if (r)
1774		return r;
1775
1776	for (i = 0; i < adev->num_ip_blocks; i++) {
1777		if (!adev->ip_blocks[i].status.valid)
1778			continue;
1779		r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
1780		if (r) {
1781			DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1782				  adev->ip_blocks[i].version->funcs->name, r);
1783			goto init_failed;
1784		}
1785		adev->ip_blocks[i].status.sw = true;
1786
1787		/* need to do gmc hw init early so we can allocate gpu mem */
1788		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
1789			r = amdgpu_device_vram_scratch_init(adev);
 
 
 
 
 
 
 
 
 
 
 
 
1790			if (r) {
1791				DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
1792				goto init_failed;
1793			}
1794			r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
1795			if (r) {
1796				DRM_ERROR("hw_init %d failed %d\n", i, r);
1797				goto init_failed;
1798			}
1799			r = amdgpu_device_wb_init(adev);
1800			if (r) {
1801				DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
1802				goto init_failed;
1803			}
1804			adev->ip_blocks[i].status.hw = true;
1805
1806			/* right after GMC hw init, we create CSA */
1807			if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
1808				r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
1809								AMDGPU_GEM_DOMAIN_VRAM,
1810								AMDGPU_CSA_SIZE);
 
1811				if (r) {
1812					DRM_ERROR("allocate CSA failed %d\n", r);
1813					goto init_failed;
1814				}
1815			}
 
 
 
 
 
 
1816		}
1817	}
1818
 
 
 
1819	r = amdgpu_ib_pool_init(adev);
1820	if (r) {
1821		dev_err(adev->dev, "IB initialization failed (%d).\n", r);
1822		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
1823		goto init_failed;
1824	}
1825
1826	r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
1827	if (r)
1828		goto init_failed;
1829
1830	r = amdgpu_device_ip_hw_init_phase1(adev);
1831	if (r)
1832		goto init_failed;
1833
1834	r = amdgpu_device_fw_loading(adev);
1835	if (r)
1836		goto init_failed;
1837
1838	r = amdgpu_device_ip_hw_init_phase2(adev);
1839	if (r)
1840		goto init_failed;
1841
1842	if (adev->gmc.xgmi.num_physical_nodes > 1)
1843		amdgpu_xgmi_add_device(adev);
1844	amdgpu_amdkfd_device_init(adev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1845
1846init_failed:
1847	if (amdgpu_sriov_vf(adev)) {
1848		if (!r)
1849			amdgpu_virt_init_data_exchange(adev);
1850		amdgpu_virt_release_full_gpu(adev, true);
1851	}
1852
1853	return r;
1854}
1855
1856/**
1857 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
1858 *
1859 * @adev: amdgpu_device pointer
1860 *
1861 * Writes a reset magic value to the gart pointer in VRAM.  The driver calls
1862 * this function before a GPU reset.  If the value is retained after a
1863 * GPU reset, VRAM has not been lost.  Some GPU resets may destry VRAM contents.
1864 */
1865static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
1866{
1867	memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
1868}
1869
1870/**
1871 * amdgpu_device_check_vram_lost - check if vram is valid
1872 *
1873 * @adev: amdgpu_device pointer
1874 *
1875 * Checks the reset magic value written to the gart pointer in VRAM.
1876 * The driver calls this after a GPU reset to see if the contents of
1877 * VRAM is lost or now.
1878 * returns true if vram is lost, false if not.
1879 */
1880static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
1881{
1882	return !!memcmp(adev->gart.ptr, adev->reset_magic,
1883			AMDGPU_RESET_MAGIC_NUM);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1884}
1885
1886/**
1887 * amdgpu_device_set_cg_state - set clockgating for amdgpu device
1888 *
1889 * @adev: amdgpu_device pointer
 
1890 *
1891 * The list of all the hardware IPs that make up the asic is walked and the
1892 * set_clockgating_state callbacks are run.
1893 * Late initialization pass enabling clockgating for hardware IPs.
1894 * Fini or suspend, pass disabling clockgating for hardware IPs.
1895 * Returns 0 on success, negative error code on failure.
1896 */
1897
1898static int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
1899						enum amd_clockgating_state state)
1900{
1901	int i, j, r;
1902
1903	if (amdgpu_emu_mode == 1)
1904		return 0;
1905
1906	for (j = 0; j < adev->num_ip_blocks; j++) {
1907		i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
1908		if (!adev->ip_blocks[i].status.late_initialized)
1909			continue;
 
 
 
 
 
1910		/* skip CG for VCE/UVD, it's handled specially */
1911		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1912		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
1913		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
 
1914		    adev->ip_blocks[i].version->funcs->set_clockgating_state) {
1915			/* enable clockgating to save power */
1916			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1917										     state);
1918			if (r) {
1919				DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
1920					  adev->ip_blocks[i].version->funcs->name, r);
1921				return r;
1922			}
1923		}
1924	}
1925
1926	return 0;
1927}
1928
1929static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_powergating_state state)
 
1930{
1931	int i, j, r;
1932
1933	if (amdgpu_emu_mode == 1)
1934		return 0;
1935
1936	for (j = 0; j < adev->num_ip_blocks; j++) {
1937		i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
1938		if (!adev->ip_blocks[i].status.late_initialized)
1939			continue;
 
 
 
 
 
1940		/* skip CG for VCE/UVD, it's handled specially */
1941		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1942		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
1943		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
 
1944		    adev->ip_blocks[i].version->funcs->set_powergating_state) {
1945			/* enable powergating to save power */
1946			r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
1947											state);
1948			if (r) {
1949				DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
1950					  adev->ip_blocks[i].version->funcs->name, r);
1951				return r;
1952			}
1953		}
1954	}
1955	return 0;
1956}
1957
1958static int amdgpu_device_enable_mgpu_fan_boost(void)
1959{
1960	struct amdgpu_gpu_instance *gpu_ins;
1961	struct amdgpu_device *adev;
1962	int i, ret = 0;
1963
1964	mutex_lock(&mgpu_info.mutex);
1965
1966	/*
1967	 * MGPU fan boost feature should be enabled
1968	 * only when there are two or more dGPUs in
1969	 * the system
1970	 */
1971	if (mgpu_info.num_dgpu < 2)
1972		goto out;
1973
1974	for (i = 0; i < mgpu_info.num_dgpu; i++) {
1975		gpu_ins = &(mgpu_info.gpu_ins[i]);
1976		adev = gpu_ins->adev;
1977		if (!(adev->flags & AMD_IS_APU) &&
1978		    !gpu_ins->mgpu_fan_enabled &&
1979		    adev->powerplay.pp_funcs &&
1980		    adev->powerplay.pp_funcs->enable_mgpu_fan_boost) {
1981			ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
1982			if (ret)
1983				break;
1984
1985			gpu_ins->mgpu_fan_enabled = 1;
1986		}
1987	}
1988
1989out:
1990	mutex_unlock(&mgpu_info.mutex);
1991
1992	return ret;
1993}
1994
1995/**
1996 * amdgpu_device_ip_late_init - run late init for hardware IPs
1997 *
1998 * @adev: amdgpu_device pointer
1999 *
2000 * Late initialization pass for hardware IPs.  The list of all the hardware
2001 * IPs that make up the asic is walked and the late_init callbacks are run.
2002 * late_init covers any special initialization that an IP requires
2003 * after all of the have been initialized or something that needs to happen
2004 * late in the init process.
2005 * Returns 0 on success, negative error code on failure.
2006 */
2007static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2008{
 
2009	int i = 0, r;
2010
2011	for (i = 0; i < adev->num_ip_blocks; i++) {
2012		if (!adev->ip_blocks[i].status.hw)
2013			continue;
2014		if (adev->ip_blocks[i].version->funcs->late_init) {
2015			r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2016			if (r) {
2017				DRM_ERROR("late_init of IP block <%s> failed %d\n",
2018					  adev->ip_blocks[i].version->funcs->name, r);
2019				return r;
2020			}
2021		}
2022		adev->ip_blocks[i].status.late_initialized = true;
2023	}
2024
 
 
 
 
 
 
 
 
2025	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2026	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2027
2028	amdgpu_device_fill_reset_magic(adev);
2029
2030	r = amdgpu_device_enable_mgpu_fan_boost();
2031	if (r)
2032		DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2033
2034	/* set to low pstate by default */
2035	amdgpu_xgmi_set_pstate(adev, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2036
2037	return 0;
2038}
2039
2040/**
2041 * amdgpu_device_ip_fini - run fini for hardware IPs
2042 *
2043 * @adev: amdgpu_device pointer
2044 *
2045 * Main teardown pass for hardware IPs.  The list of all the hardware
2046 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2047 * are run.  hw_fini tears down the hardware associated with each IP
2048 * and sw_fini tears down any software state associated with each IP.
2049 * Returns 0 on success, negative error code on failure.
2050 */
2051static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2052{
2053	int i, r;
2054
2055	amdgpu_ras_pre_fini(adev);
2056
2057	if (adev->gmc.xgmi.num_physical_nodes > 1)
2058		amdgpu_xgmi_remove_device(adev);
2059
2060	amdgpu_amdkfd_device_fini(adev);
2061
2062	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2063	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2064
2065	/* need to disable SMC first */
2066	for (i = 0; i < adev->num_ip_blocks; i++) {
2067		if (!adev->ip_blocks[i].status.hw)
2068			continue;
2069		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2070			r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2071			/* XXX handle errors */
2072			if (r) {
2073				DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2074					  adev->ip_blocks[i].version->funcs->name, r);
2075			}
2076			adev->ip_blocks[i].status.hw = false;
2077			break;
2078		}
2079	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2080
2081	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2082		if (!adev->ip_blocks[i].status.hw)
2083			continue;
2084
2085		r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2086		/* XXX handle errors */
2087		if (r) {
2088			DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2089				  adev->ip_blocks[i].version->funcs->name, r);
2090		}
2091
2092		adev->ip_blocks[i].status.hw = false;
2093	}
2094
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2095
2096	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2097		if (!adev->ip_blocks[i].status.sw)
2098			continue;
2099
2100		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2101			amdgpu_ucode_free_bo(adev);
2102			amdgpu_free_static_csa(&adev->virt.csa_obj);
2103			amdgpu_device_wb_fini(adev);
2104			amdgpu_device_vram_scratch_fini(adev);
2105			amdgpu_ib_pool_fini(adev);
 
2106		}
2107
2108		r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2109		/* XXX handle errors */
2110		if (r) {
2111			DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2112				  adev->ip_blocks[i].version->funcs->name, r);
2113		}
2114		adev->ip_blocks[i].status.sw = false;
2115		adev->ip_blocks[i].status.valid = false;
2116	}
2117
2118	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2119		if (!adev->ip_blocks[i].status.late_initialized)
2120			continue;
2121		if (adev->ip_blocks[i].version->funcs->late_fini)
2122			adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2123		adev->ip_blocks[i].status.late_initialized = false;
2124	}
2125
2126	amdgpu_ras_fini(adev);
2127
2128	if (amdgpu_sriov_vf(adev))
2129		if (amdgpu_virt_release_full_gpu(adev, false))
2130			DRM_ERROR("failed to release exclusive mode on fini\n");
2131
2132	return 0;
2133}
2134
2135/**
2136 * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2137 *
2138 * @work: work_struct.
2139 */
2140static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2141{
2142	struct amdgpu_device *adev =
2143		container_of(work, struct amdgpu_device, delayed_init_work.work);
2144	int r;
2145
2146	r = amdgpu_ib_ring_tests(adev);
2147	if (r)
2148		DRM_ERROR("ib ring test failed (%d).\n", r);
2149}
2150
2151static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2152{
2153	struct amdgpu_device *adev =
2154		container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2155
2156	mutex_lock(&adev->gfx.gfx_off_mutex);
2157	if (!adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
2158		if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2159			adev->gfx.gfx_off_state = true;
2160	}
2161	mutex_unlock(&adev->gfx.gfx_off_mutex);
2162}
2163
2164/**
2165 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2166 *
2167 * @adev: amdgpu_device pointer
2168 *
2169 * Main suspend function for hardware IPs.  The list of all the hardware
2170 * IPs that make up the asic is walked, clockgating is disabled and the
2171 * suspend callbacks are run.  suspend puts the hardware and software state
2172 * in each IP into a state suitable for suspend.
2173 * Returns 0 on success, negative error code on failure.
2174 */
2175static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2176{
2177	int i, r;
2178
2179	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2180	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2181
 
 
 
 
 
 
 
 
2182	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2183		if (!adev->ip_blocks[i].status.valid)
2184			continue;
 
2185		/* displays are handled separately */
2186		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) {
2187			/* XXX handle errors */
2188			r = adev->ip_blocks[i].version->funcs->suspend(adev);
2189			/* XXX handle errors */
2190			if (r) {
2191				DRM_ERROR("suspend of IP block <%s> failed %d\n",
2192					  adev->ip_blocks[i].version->funcs->name, r);
2193				return r;
2194			}
2195			adev->ip_blocks[i].status.hw = false;
2196		}
 
 
2197	}
2198
2199	return 0;
2200}
2201
2202/**
2203 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2204 *
2205 * @adev: amdgpu_device pointer
2206 *
2207 * Main suspend function for hardware IPs.  The list of all the hardware
2208 * IPs that make up the asic is walked, clockgating is disabled and the
2209 * suspend callbacks are run.  suspend puts the hardware and software state
2210 * in each IP into a state suitable for suspend.
2211 * Returns 0 on success, negative error code on failure.
2212 */
2213static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
2214{
2215	int i, r;
2216
 
 
 
2217	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2218		if (!adev->ip_blocks[i].status.valid)
2219			continue;
2220		/* displays are handled in phase1 */
2221		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
2222			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2223		/* XXX handle errors */
2224		r = adev->ip_blocks[i].version->funcs->suspend(adev);
2225		/* XXX handle errors */
2226		if (r) {
2227			DRM_ERROR("suspend of IP block <%s> failed %d\n",
2228				  adev->ip_blocks[i].version->funcs->name, r);
2229		}
2230		adev->ip_blocks[i].status.hw = false;
2231		/* handle putting the SMC in the appropriate state */
2232		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2233			if (is_support_sw_smu(adev)) {
2234				/* todo */
2235			} else if (adev->powerplay.pp_funcs &&
2236					   adev->powerplay.pp_funcs->set_mp1_state) {
2237				r = adev->powerplay.pp_funcs->set_mp1_state(
2238					adev->powerplay.pp_handle,
2239					adev->mp1_state);
2240				if (r) {
2241					DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
2242						  adev->mp1_state, r);
2243					return r;
2244				}
2245			}
2246		}
2247
2248		adev->ip_blocks[i].status.hw = false;
2249	}
2250
2251	return 0;
2252}
2253
2254/**
2255 * amdgpu_device_ip_suspend - run suspend for hardware IPs
2256 *
2257 * @adev: amdgpu_device pointer
2258 *
2259 * Main suspend function for hardware IPs.  The list of all the hardware
2260 * IPs that make up the asic is walked, clockgating is disabled and the
2261 * suspend callbacks are run.  suspend puts the hardware and software state
2262 * in each IP into a state suitable for suspend.
2263 * Returns 0 on success, negative error code on failure.
2264 */
2265int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
2266{
2267	int r;
2268
2269	if (amdgpu_sriov_vf(adev))
 
2270		amdgpu_virt_request_full_gpu(adev, false);
 
 
 
2271
2272	r = amdgpu_device_ip_suspend_phase1(adev);
2273	if (r)
2274		return r;
2275	r = amdgpu_device_ip_suspend_phase2(adev);
2276
2277	if (amdgpu_sriov_vf(adev))
2278		amdgpu_virt_release_full_gpu(adev, false);
2279
2280	return r;
2281}
2282
2283static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
2284{
2285	int i, r;
2286
2287	static enum amd_ip_block_type ip_order[] = {
 
2288		AMD_IP_BLOCK_TYPE_GMC,
2289		AMD_IP_BLOCK_TYPE_COMMON,
2290		AMD_IP_BLOCK_TYPE_PSP,
2291		AMD_IP_BLOCK_TYPE_IH,
2292	};
2293
2294	for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
2295		int j;
2296		struct amdgpu_ip_block *block;
2297
2298		for (j = 0; j < adev->num_ip_blocks; j++) {
2299			block = &adev->ip_blocks[j];
 
 
2300
2301			block->status.hw = false;
2302			if (block->version->type != ip_order[i] ||
2303				!block->status.valid)
2304				continue;
2305
2306			r = block->version->funcs->hw_init(adev);
2307			DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
2308			if (r)
2309				return r;
2310			block->status.hw = true;
2311		}
2312	}
2313
2314	return 0;
2315}
2316
2317static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
2318{
2319	int i, r;
2320
2321	static enum amd_ip_block_type ip_order[] = {
2322		AMD_IP_BLOCK_TYPE_SMC,
2323		AMD_IP_BLOCK_TYPE_DCE,
2324		AMD_IP_BLOCK_TYPE_GFX,
2325		AMD_IP_BLOCK_TYPE_SDMA,
 
2326		AMD_IP_BLOCK_TYPE_UVD,
2327		AMD_IP_BLOCK_TYPE_VCE
 
 
2328	};
2329
2330	for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
2331		int j;
2332		struct amdgpu_ip_block *block;
2333
2334		for (j = 0; j < adev->num_ip_blocks; j++) {
2335			block = &adev->ip_blocks[j];
2336
2337			if (block->version->type != ip_order[i] ||
2338				!block->status.valid ||
2339				block->status.hw)
2340				continue;
2341
2342			r = block->version->funcs->hw_init(adev);
 
 
 
 
2343			DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
2344			if (r)
2345				return r;
2346			block->status.hw = true;
2347		}
2348	}
2349
2350	return 0;
2351}
2352
2353/**
2354 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
2355 *
2356 * @adev: amdgpu_device pointer
2357 *
2358 * First resume function for hardware IPs.  The list of all the hardware
2359 * IPs that make up the asic is walked and the resume callbacks are run for
2360 * COMMON, GMC, and IH.  resume puts the hardware into a functional state
2361 * after a suspend and updates the software state as necessary.  This
2362 * function is also used for restoring the GPU after a GPU reset.
2363 * Returns 0 on success, negative error code on failure.
2364 */
2365static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
2366{
2367	int i, r;
2368
2369	for (i = 0; i < adev->num_ip_blocks; i++) {
2370		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
2371			continue;
2372		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2373		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2374		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
 
2375
2376			r = adev->ip_blocks[i].version->funcs->resume(adev);
2377			if (r) {
2378				DRM_ERROR("resume of IP block <%s> failed %d\n",
2379					  adev->ip_blocks[i].version->funcs->name, r);
2380				return r;
2381			}
2382			adev->ip_blocks[i].status.hw = true;
2383		}
2384	}
2385
2386	return 0;
2387}
2388
2389/**
2390 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
2391 *
2392 * @adev: amdgpu_device pointer
2393 *
2394 * First resume function for hardware IPs.  The list of all the hardware
2395 * IPs that make up the asic is walked and the resume callbacks are run for
2396 * all blocks except COMMON, GMC, and IH.  resume puts the hardware into a
2397 * functional state after a suspend and updates the software state as
2398 * necessary.  This function is also used for restoring the GPU after a GPU
2399 * reset.
2400 * Returns 0 on success, negative error code on failure.
2401 */
2402static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
2403{
2404	int i, r;
2405
2406	for (i = 0; i < adev->num_ip_blocks; i++) {
2407		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
2408			continue;
2409		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2410		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2411		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
2412		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
2413			continue;
2414		r = adev->ip_blocks[i].version->funcs->resume(adev);
2415		if (r) {
2416			DRM_ERROR("resume of IP block <%s> failed %d\n",
2417				  adev->ip_blocks[i].version->funcs->name, r);
2418			return r;
2419		}
2420		adev->ip_blocks[i].status.hw = true;
2421	}
2422
2423	return 0;
2424}
2425
2426/**
2427 * amdgpu_device_ip_resume - run resume for hardware IPs
2428 *
2429 * @adev: amdgpu_device pointer
2430 *
2431 * Main resume function for hardware IPs.  The hardware IPs
2432 * are split into two resume functions because they are
2433 * are also used in in recovering from a GPU reset and some additional
2434 * steps need to be take between them.  In this case (S3/S4) they are
2435 * run sequentially.
2436 * Returns 0 on success, negative error code on failure.
2437 */
2438static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
2439{
2440	int r;
2441
2442	r = amdgpu_device_ip_resume_phase1(adev);
2443	if (r)
2444		return r;
2445
2446	r = amdgpu_device_fw_loading(adev);
2447	if (r)
2448		return r;
2449
2450	r = amdgpu_device_ip_resume_phase2(adev);
2451
 
 
 
2452	return r;
2453}
2454
2455/**
2456 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
2457 *
2458 * @adev: amdgpu_device pointer
2459 *
2460 * Query the VBIOS data tables to determine if the board supports SR-IOV.
2461 */
2462static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
2463{
2464	if (amdgpu_sriov_vf(adev)) {
2465		if (adev->is_atom_fw) {
2466			if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
2467				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2468		} else {
2469			if (amdgpu_atombios_has_gpu_virtualization_table(adev))
2470				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2471		}
2472
2473		if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
2474			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
2475	}
2476}
2477
2478/**
2479 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
2480 *
2481 * @asic_type: AMD asic type
2482 *
2483 * Check if there is DC (new modesetting infrastructre) support for an asic.
2484 * returns true if DC has support, false if not.
2485 */
2486bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
2487{
2488	switch (asic_type) {
 
 
 
 
 
 
2489#if defined(CONFIG_DRM_AMD_DC)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2490	case CHIP_BONAIRE:
2491	case CHIP_KAVERI:
2492	case CHIP_KABINI:
2493	case CHIP_MULLINS:
2494		/*
2495		 * We have systems in the wild with these ASICs that require
2496		 * LVDS and VGA support which is not supported with DC.
2497		 *
2498		 * Fallback to the non-DC driver here by default so as not to
2499		 * cause regressions.
2500		 */
2501		return amdgpu_dc > 0;
2502	case CHIP_HAWAII:
2503	case CHIP_CARRIZO:
2504	case CHIP_STONEY:
2505	case CHIP_POLARIS10:
2506	case CHIP_POLARIS11:
2507	case CHIP_POLARIS12:
2508	case CHIP_VEGAM:
2509	case CHIP_TONGA:
2510	case CHIP_FIJI:
2511	case CHIP_VEGA10:
2512	case CHIP_VEGA12:
2513	case CHIP_VEGA20:
2514#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
2515	case CHIP_RAVEN:
2516#endif
2517#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
2518	case CHIP_NAVI10:
2519	case CHIP_NAVI14:
2520	case CHIP_NAVI12:
2521#endif
2522#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
2523	case CHIP_RENOIR:
2524#endif
2525		return amdgpu_dc != 0;
2526#endif
2527	default:
 
 
2528		return false;
 
2529	}
2530}
2531
2532/**
2533 * amdgpu_device_has_dc_support - check if dc is supported
2534 *
2535 * @adev: amdgpu_device_pointer
2536 *
2537 * Returns true for supported, false for not supported
2538 */
2539bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
2540{
2541	if (amdgpu_sriov_vf(adev))
 
2542		return false;
2543
2544	return amdgpu_device_asic_has_dc_support(adev->asic_type);
2545}
2546
2547
2548static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
2549{
2550	struct amdgpu_device *adev =
2551		container_of(__work, struct amdgpu_device, xgmi_reset_work);
 
2552
2553	adev->asic_reset_res =  amdgpu_asic_reset(adev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2554	if (adev->asic_reset_res)
2555		DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
2556			 adev->asic_reset_res, adev->ddev->unique);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2557}
2558
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2559
2560/**
2561 * amdgpu_device_init - initialize the driver
2562 *
2563 * @adev: amdgpu_device pointer
2564 * @ddev: drm dev pointer
2565 * @pdev: pci dev pointer
2566 * @flags: driver flags
2567 *
2568 * Initializes the driver info and hw (all asics).
2569 * Returns 0 for success or an error on failure.
2570 * Called at driver startup.
2571 */
2572int amdgpu_device_init(struct amdgpu_device *adev,
2573		       struct drm_device *ddev,
2574		       struct pci_dev *pdev,
2575		       uint32_t flags)
2576{
 
 
2577	int r, i;
2578	bool runtime = false;
2579	u32 max_MBps;
 
2580
2581	adev->shutdown = false;
2582	adev->dev = &pdev->dev;
2583	adev->ddev = ddev;
2584	adev->pdev = pdev;
2585	adev->flags = flags;
2586	adev->asic_type = flags & AMD_ASIC_MASK;
 
 
 
 
 
2587	adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
2588	if (amdgpu_emu_mode == 1)
2589		adev->usec_timeout *= 2;
2590	adev->gmc.gart_size = 512 * 1024 * 1024;
2591	adev->accel_working = false;
2592	adev->num_rings = 0;
 
2593	adev->mman.buffer_funcs = NULL;
2594	adev->mman.buffer_funcs_ring = NULL;
2595	adev->vm_manager.vm_pte_funcs = NULL;
2596	adev->vm_manager.vm_pte_num_rqs = 0;
2597	adev->gmc.gmc_funcs = NULL;
 
2598	adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2599	bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
2600
2601	adev->smc_rreg = &amdgpu_invalid_rreg;
2602	adev->smc_wreg = &amdgpu_invalid_wreg;
2603	adev->pcie_rreg = &amdgpu_invalid_rreg;
2604	adev->pcie_wreg = &amdgpu_invalid_wreg;
 
 
2605	adev->pciep_rreg = &amdgpu_invalid_rreg;
2606	adev->pciep_wreg = &amdgpu_invalid_wreg;
2607	adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
2608	adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
 
 
2609	adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
2610	adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
2611	adev->didt_rreg = &amdgpu_invalid_rreg;
2612	adev->didt_wreg = &amdgpu_invalid_wreg;
2613	adev->gc_cac_rreg = &amdgpu_invalid_rreg;
2614	adev->gc_cac_wreg = &amdgpu_invalid_wreg;
2615	adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
2616	adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
2617
2618	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
2619		 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
2620		 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
2621
2622	/* mutex initialization are all done here so we
2623	 * can recall function without having locking issues */
2624	atomic_set(&adev->irq.ih.lock, 0);
2625	mutex_init(&adev->firmware.mutex);
2626	mutex_init(&adev->pm.mutex);
2627	mutex_init(&adev->gfx.gpu_clock_mutex);
2628	mutex_init(&adev->srbm_mutex);
2629	mutex_init(&adev->gfx.pipe_reserve_mutex);
2630	mutex_init(&adev->gfx.gfx_off_mutex);
 
2631	mutex_init(&adev->grbm_idx_mutex);
2632	mutex_init(&adev->mn_lock);
2633	mutex_init(&adev->virt.vf_errors.lock);
2634	hash_init(adev->mn_hash);
2635	mutex_init(&adev->lock_reset);
2636	mutex_init(&adev->virt.dpm_mutex);
2637	mutex_init(&adev->psp.mutex);
 
 
 
 
 
2638
2639	r = amdgpu_device_check_arguments(adev);
2640	if (r)
2641		return r;
2642
2643	spin_lock_init(&adev->mmio_idx_lock);
2644	spin_lock_init(&adev->smc_idx_lock);
2645	spin_lock_init(&adev->pcie_idx_lock);
2646	spin_lock_init(&adev->uvd_ctx_idx_lock);
2647	spin_lock_init(&adev->didt_idx_lock);
2648	spin_lock_init(&adev->gc_cac_idx_lock);
2649	spin_lock_init(&adev->se_cac_idx_lock);
2650	spin_lock_init(&adev->audio_endpt_idx_lock);
2651	spin_lock_init(&adev->mm_stats.lock);
2652
2653	INIT_LIST_HEAD(&adev->shadow_list);
2654	mutex_init(&adev->shadow_list_lock);
2655
2656	INIT_LIST_HEAD(&adev->ring_lru_list);
2657	spin_lock_init(&adev->ring_lru_list_lock);
 
 
 
2658
2659	INIT_DELAYED_WORK(&adev->delayed_init_work,
2660			  amdgpu_device_delayed_init_work_handler);
2661	INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
2662			  amdgpu_device_delay_enable_gfx_off);
2663
2664	INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
2665
2666	adev->gfx.gfx_off_req_count = 1;
2667	adev->pm.ac_power = power_supply_is_system_supplied() > 0 ? true : false;
 
 
 
 
 
 
 
 
 
 
 
 
 
2668
2669	/* Registers mapping */
2670	/* TODO: block userspace mapping of io register */
2671	if (adev->asic_type >= CHIP_BONAIRE) {
2672		adev->rmmio_base = pci_resource_start(adev->pdev, 5);
2673		adev->rmmio_size = pci_resource_len(adev->pdev, 5);
2674	} else {
2675		adev->rmmio_base = pci_resource_start(adev->pdev, 2);
2676		adev->rmmio_size = pci_resource_len(adev->pdev, 2);
2677	}
2678
 
 
 
2679	adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
2680	if (adev->rmmio == NULL) {
2681		return -ENOMEM;
2682	}
2683	DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
2684	DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
2685
2686	/* io port mapping */
2687	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2688		if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
2689			adev->rio_mem_size = pci_resource_len(adev->pdev, i);
2690			adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
2691			break;
2692		}
2693	}
2694	if (adev->rio_mem == NULL)
2695		DRM_INFO("PCI I/O BAR is not found.\n");
2696
2697	/* enable PCIE atomic ops */
2698	r = pci_enable_atomic_ops_to_root(adev->pdev,
2699					  PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
2700					  PCI_EXP_DEVCAP2_ATOMIC_COMP64);
2701	if (r) {
2702		adev->have_atomics_support = false;
2703		DRM_INFO("PCIE atomic ops is not supported\n");
2704	} else {
2705		adev->have_atomics_support = true;
2706	}
2707
2708	amdgpu_device_get_pcie_info(adev);
2709
2710	if (amdgpu_mcbp)
2711		DRM_INFO("MCBP is enabled\n");
2712
2713	if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10)
2714		adev->enable_mes = true;
2715
2716	if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) {
2717		r = amdgpu_discovery_init(adev);
2718		if (r) {
2719			dev_err(adev->dev, "amdgpu_discovery_init failed\n");
2720			return r;
2721		}
2722	}
2723
2724	/* early init functions */
2725	r = amdgpu_device_ip_early_init(adev);
2726	if (r)
2727		return r;
2728
2729	/* doorbell bar mapping and doorbell index init*/
2730	amdgpu_device_doorbell_init(adev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2731
2732	/* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
2733	/* this will fail for cards that aren't VGA class devices, just
2734	 * ignore it */
2735	vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
2736
2737	if (amdgpu_device_is_px(ddev))
2738		runtime = true;
2739	if (!pci_is_thunderbolt_attached(adev->pdev))
2740		vga_switcheroo_register_client(adev->pdev,
2741					       &amdgpu_switcheroo_ops, runtime);
2742	if (runtime)
2743		vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
2744
2745	if (amdgpu_emu_mode == 1) {
2746		/* post the asic on emulation mode */
2747		emu_soc_asic_init(adev);
2748		goto fence_driver_init;
2749	}
2750
 
 
2751	/* detect if we are with an SRIOV vbios */
2752	amdgpu_device_detect_sriov_bios(adev);
 
2753
2754	/* check if we need to reset the asic
2755	 *  E.g., driver was not cleanly unloaded previously, etc.
2756	 */
2757	if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
2758		r = amdgpu_asic_reset(adev);
2759		if (r) {
2760			dev_err(adev->dev, "asic reset on init failed\n");
2761			goto failed;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2762		}
2763	}
2764
2765	/* Post card if necessary */
2766	if (amdgpu_device_need_post(adev)) {
2767		if (!adev->bios) {
2768			dev_err(adev->dev, "no vBIOS found\n");
2769			r = -EINVAL;
2770			goto failed;
2771		}
2772		DRM_INFO("GPU posting now...\n");
2773		r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2774		if (r) {
2775			dev_err(adev->dev, "gpu post error!\n");
2776			goto failed;
2777		}
2778	}
2779
2780	if (adev->is_atom_fw) {
2781		/* Initialize clocks */
2782		r = amdgpu_atomfirmware_get_clock_info(adev);
2783		if (r) {
2784			dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
2785			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
2786			goto failed;
2787		}
2788	} else {
2789		/* Initialize clocks */
2790		r = amdgpu_atombios_get_clock_info(adev);
2791		if (r) {
2792			dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
2793			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
2794			goto failed;
 
 
 
 
 
2795		}
2796		/* init i2c buses */
2797		if (!amdgpu_device_has_dc_support(adev))
2798			amdgpu_atombios_i2c_init(adev);
2799	}
2800
2801fence_driver_init:
2802	/* Fence driver */
2803	r = amdgpu_fence_driver_init(adev);
2804	if (r) {
2805		dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
2806		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
2807		goto failed;
2808	}
2809
2810	/* init the mode config */
2811	drm_mode_config_init(adev->ddev);
2812
2813	r = amdgpu_device_ip_init(adev);
2814	if (r) {
2815		/* failed in exclusive mode due to timeout */
2816		if (amdgpu_sriov_vf(adev) &&
2817		    !amdgpu_sriov_runtime(adev) &&
2818		    amdgpu_virt_mmio_blocked(adev) &&
2819		    !amdgpu_virt_wait_reset(adev)) {
2820			dev_err(adev->dev, "VF exclusive mode timeout\n");
2821			/* Don't send request since VF is inactive. */
2822			adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
2823			adev->virt.ops = NULL;
2824			r = -EAGAIN;
2825			goto failed;
2826		}
2827		dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
2828		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
2829		if (amdgpu_virt_request_full_gpu(adev, false))
2830			amdgpu_virt_release_full_gpu(adev, false);
2831		goto failed;
2832	}
2833
 
 
 
 
 
 
 
 
 
2834	adev->accel_working = true;
2835
2836	amdgpu_vm_check_compute_bug(adev);
2837
2838	/* Initialize the buffer migration limit. */
2839	if (amdgpu_moverate >= 0)
2840		max_MBps = amdgpu_moverate;
2841	else
2842		max_MBps = 8; /* Allow 8 MB/s. */
2843	/* Get a log2 for easy divisions. */
2844	adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
2845
2846	amdgpu_fbdev_init(adev);
 
 
 
 
 
2847
2848	if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev))
2849		amdgpu_pm_virt_sysfs_init(adev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2850
2851	r = amdgpu_pm_sysfs_init(adev);
2852	if (r)
2853		DRM_ERROR("registering pm debugfs failed (%d).\n", r);
2854
2855	r = amdgpu_ucode_sysfs_init(adev);
2856	if (r)
 
2857		DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
 
 
2858
2859	r = amdgpu_debugfs_gem_init(adev);
2860	if (r)
2861		DRM_ERROR("registering gem debugfs failed (%d).\n", r);
2862
2863	r = amdgpu_debugfs_regs_init(adev);
2864	if (r)
2865		DRM_ERROR("registering register debugfs failed (%d).\n", r);
 
2866
2867	r = amdgpu_debugfs_firmware_init(adev);
2868	if (r)
2869		DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
2870
2871	r = amdgpu_debugfs_init(adev);
 
2872	if (r)
2873		DRM_ERROR("Creating debugfs files failed (%d).\n", r);
2874
2875	if ((amdgpu_testing & 1)) {
2876		if (adev->accel_working)
2877			amdgpu_test_moves(adev);
2878		else
2879			DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
2880	}
2881	if (amdgpu_benchmarking) {
2882		if (adev->accel_working)
2883			amdgpu_benchmark(adev, amdgpu_benchmarking);
2884		else
2885			DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
2886	}
2887
2888	/*
2889	 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
2890	 * Otherwise the mgpu fan boost feature will be skipped due to the
2891	 * gpu instance is counted less.
2892	 */
2893	amdgpu_register_gpu_instance(adev);
 
2894
2895	/* enable clockgating, etc. after ib tests, etc. since some blocks require
2896	 * explicit gating rather than handling it automatically.
2897	 */
2898	r = amdgpu_device_ip_late_init(adev);
2899	if (r) {
2900		dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
2901		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
2902		goto failed;
2903	}
2904
2905	/* must succeed. */
2906	amdgpu_ras_resume(adev);
 
 
2907
2908	queue_delayed_work(system_wq, &adev->delayed_init_work,
2909			   msecs_to_jiffies(AMDGPU_RESUME_MS));
2910
2911	r = device_create_file(adev->dev, &dev_attr_pcie_replay_count);
2912	if (r) {
2913		dev_err(adev->dev, "Could not create pcie_replay_count");
2914		return r;
2915	}
2916
2917	if (IS_ENABLED(CONFIG_PERF_EVENTS))
2918		r = amdgpu_pmu_init(adev);
2919	if (r)
2920		dev_err(adev->dev, "amdgpu_pmu_init failed\n");
2921
2922	return 0;
2923
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2924failed:
2925	amdgpu_vf_error_trans_all(adev);
2926	if (runtime)
2927		vga_switcheroo_fini_domain_pm_ops(adev->dev);
2928
2929	return r;
2930}
2931
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2932/**
2933 * amdgpu_device_fini - tear down the driver
2934 *
2935 * @adev: amdgpu_device pointer
2936 *
2937 * Tear down the driver info (all asics).
2938 * Called at driver shutdown.
2939 */
2940void amdgpu_device_fini(struct amdgpu_device *adev)
2941{
2942	int r;
 
 
 
 
 
 
 
 
 
 
2943
2944	DRM_INFO("amdgpu: finishing device.\n");
2945	adev->shutdown = true;
2946	/* disable all interrupts */
2947	amdgpu_irq_disable_all(adev);
2948	if (adev->mode_info.mode_config_initialized){
2949		if (!amdgpu_device_has_dc_support(adev))
2950			drm_helper_force_disable_all(adev->ddev);
2951		else
2952			drm_atomic_helper_shutdown(adev->ddev);
2953	}
2954	amdgpu_fence_driver_fini(adev);
2955	amdgpu_pm_sysfs_fini(adev);
2956	amdgpu_fbdev_fini(adev);
2957	r = amdgpu_device_ip_fini(adev);
2958	if (adev->firmware.gpu_info_fw) {
2959		release_firmware(adev->firmware.gpu_info_fw);
2960		adev->firmware.gpu_info_fw = NULL;
2961	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2962	adev->accel_working = false;
2963	cancel_delayed_work_sync(&adev->delayed_init_work);
 
 
 
2964	/* free i2c buses */
2965	if (!amdgpu_device_has_dc_support(adev))
2966		amdgpu_i2c_fini(adev);
2967
2968	if (amdgpu_emu_mode != 1)
2969		amdgpu_atombios_fini(adev);
2970
2971	kfree(adev->bios);
2972	adev->bios = NULL;
2973	if (!pci_is_thunderbolt_attached(adev->pdev))
 
 
 
 
 
 
 
2974		vga_switcheroo_unregister_client(adev->pdev);
2975	if (adev->flags & AMD_IS_PX)
 
2976		vga_switcheroo_fini_domain_pm_ops(adev->dev);
2977	vga_client_register(adev->pdev, NULL, NULL, NULL);
2978	if (adev->rio_mem)
2979		pci_iounmap(adev->pdev, adev->rio_mem);
2980	adev->rio_mem = NULL;
2981	iounmap(adev->rmmio);
2982	adev->rmmio = NULL;
2983	amdgpu_device_doorbell_fini(adev);
2984	if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev))
2985		amdgpu_pm_virt_sysfs_fini(adev);
2986
2987	amdgpu_debugfs_regs_cleanup(adev);
2988	device_remove_file(adev->dev, &dev_attr_pcie_replay_count);
2989	amdgpu_ucode_sysfs_fini(adev);
2990	if (IS_ENABLED(CONFIG_PERF_EVENTS))
2991		amdgpu_pmu_fini(adev);
2992	amdgpu_debugfs_preempt_cleanup(adev);
2993	if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
2994		amdgpu_discovery_fini(adev);
 
 
 
 
 
 
2995}
2996
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2997
2998/*
2999 * Suspend & resume.
3000 */
3001/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3002 * amdgpu_device_suspend - initiate device suspend
3003 *
3004 * @dev: drm dev pointer
3005 * @suspend: suspend state
3006 * @fbcon : notify the fbdev of suspend
3007 *
3008 * Puts the hw in the suspend state (all asics).
3009 * Returns 0 for success or an error on failure.
3010 * Called at driver suspend.
3011 */
3012int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
3013{
3014	struct amdgpu_device *adev;
3015	struct drm_crtc *crtc;
3016	struct drm_connector *connector;
3017	int r;
3018
3019	if (dev == NULL || dev->dev_private == NULL) {
3020		return -ENODEV;
3021	}
3022
3023	adev = dev->dev_private;
3024
3025	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3026		return 0;
3027
3028	adev->in_suspend = true;
3029	drm_kms_helper_poll_disable(dev);
 
 
 
 
 
 
 
 
 
3030
3031	if (fbcon)
3032		amdgpu_fbdev_set_suspend(adev, 1);
3033
3034	cancel_delayed_work_sync(&adev->delayed_init_work);
3035
3036	if (!amdgpu_device_has_dc_support(adev)) {
3037		/* turn off display hw */
3038		drm_modeset_lock_all(dev);
3039		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
3040			drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
3041		}
3042		drm_modeset_unlock_all(dev);
3043			/* unpin the front buffers and cursors */
3044		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3045			struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
3046			struct drm_framebuffer *fb = crtc->primary->fb;
3047			struct amdgpu_bo *robj;
3048
3049			if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
3050				struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
3051				r = amdgpu_bo_reserve(aobj, true);
3052				if (r == 0) {
3053					amdgpu_bo_unpin(aobj);
3054					amdgpu_bo_unreserve(aobj);
3055				}
3056			}
3057
3058			if (fb == NULL || fb->obj[0] == NULL) {
3059				continue;
3060			}
3061			robj = gem_to_amdgpu_bo(fb->obj[0]);
3062			/* don't unpin kernel fb objects */
3063			if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
3064				r = amdgpu_bo_reserve(robj, true);
3065				if (r == 0) {
3066					amdgpu_bo_unpin(robj);
3067					amdgpu_bo_unreserve(robj);
3068				}
3069			}
3070		}
3071	}
3072
3073	amdgpu_amdkfd_suspend(adev);
 
3074
3075	amdgpu_ras_suspend(adev);
 
 
3076
3077	r = amdgpu_device_ip_suspend_phase1(adev);
3078
3079	/* evict vram memory */
3080	amdgpu_bo_evict_vram(adev);
3081
3082	amdgpu_fence_driver_suspend(adev);
3083
3084	r = amdgpu_device_ip_suspend_phase2(adev);
 
3085
3086	/* evict remaining vram memory
3087	 * This second call to evict vram is to evict the gart page table
3088	 * using the CPU.
3089	 */
3090	amdgpu_bo_evict_vram(adev);
3091
3092	pci_save_state(dev->pdev);
3093	if (suspend) {
3094		/* Shut down the device */
3095		pci_disable_device(dev->pdev);
3096		pci_set_power_state(dev->pdev, PCI_D3hot);
3097	} else {
3098		r = amdgpu_asic_reset(adev);
3099		if (r)
3100			DRM_ERROR("amdgpu asic reset failed\n");
3101	}
3102
3103	return 0;
3104}
3105
3106/**
3107 * amdgpu_device_resume - initiate device resume
3108 *
3109 * @dev: drm dev pointer
3110 * @resume: resume state
3111 * @fbcon : notify the fbdev of resume
3112 *
3113 * Bring the hw back to operating state (all asics).
3114 * Returns 0 for success or an error on failure.
3115 * Called at driver resume.
3116 */
3117int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
3118{
3119	struct drm_connector *connector;
3120	struct amdgpu_device *adev = dev->dev_private;
3121	struct drm_crtc *crtc;
3122	int r = 0;
3123
 
 
 
 
 
 
3124	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3125		return 0;
3126
3127	if (resume) {
3128		pci_set_power_state(dev->pdev, PCI_D0);
3129		pci_restore_state(dev->pdev);
3130		r = pci_enable_device(dev->pdev);
3131		if (r)
3132			return r;
3133	}
3134
3135	/* post card */
3136	if (amdgpu_device_need_post(adev)) {
3137		r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
3138		if (r)
3139			DRM_ERROR("amdgpu asic init failed\n");
3140	}
3141
3142	r = amdgpu_device_ip_resume(adev);
 
3143	if (r) {
3144		DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r);
3145		return r;
3146	}
3147	amdgpu_fence_driver_resume(adev);
3148
 
 
 
 
 
3149
3150	r = amdgpu_device_ip_late_init(adev);
3151	if (r)
3152		return r;
3153
3154	queue_delayed_work(system_wq, &adev->delayed_init_work,
3155			   msecs_to_jiffies(AMDGPU_RESUME_MS));
 
 
 
 
 
3156
3157	if (!amdgpu_device_has_dc_support(adev)) {
3158		/* pin cursors */
3159		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3160			struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
3161
3162			if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
3163				struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
3164				r = amdgpu_bo_reserve(aobj, true);
3165				if (r == 0) {
3166					r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
3167					if (r != 0)
3168						DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
3169					amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
3170					amdgpu_bo_unreserve(aobj);
3171				}
3172			}
3173		}
3174	}
3175	r = amdgpu_amdkfd_resume(adev);
3176	if (r)
3177		return r;
3178
3179	/* Make sure IB tests flushed */
3180	flush_delayed_work(&adev->delayed_init_work);
3181
3182	/* blat the mode back in */
3183	if (fbcon) {
3184		if (!amdgpu_device_has_dc_support(adev)) {
3185			/* pre DCE11 */
3186			drm_helper_resume_force_mode(dev);
3187
3188			/* turn on display hw */
3189			drm_modeset_lock_all(dev);
3190			list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
3191				drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
3192			}
3193			drm_modeset_unlock_all(dev);
3194		}
3195		amdgpu_fbdev_set_suspend(adev, 0);
3196	}
3197
3198	drm_kms_helper_poll_enable(dev);
3199
3200	amdgpu_ras_resume(adev);
3201
3202	/*
3203	 * Most of the connector probing functions try to acquire runtime pm
3204	 * refs to ensure that the GPU is powered on when connector polling is
3205	 * performed. Since we're calling this from a runtime PM callback,
3206	 * trying to acquire rpm refs will cause us to deadlock.
3207	 *
3208	 * Since we're guaranteed to be holding the rpm lock, it's safe to
3209	 * temporarily disable the rpm helpers so this doesn't deadlock us.
3210	 */
 
3211#ifdef CONFIG_PM
3212	dev->dev->power.disable_depth++;
3213#endif
3214	if (!amdgpu_device_has_dc_support(adev))
3215		drm_helper_hpd_irq_event(dev);
3216	else
3217		drm_kms_helper_hotplug_event(dev);
3218#ifdef CONFIG_PM
3219	dev->dev->power.disable_depth--;
3220#endif
 
3221	adev->in_suspend = false;
3222
 
 
 
 
 
 
3223	return 0;
3224}
3225
3226/**
3227 * amdgpu_device_ip_check_soft_reset - did soft reset succeed
3228 *
3229 * @adev: amdgpu_device pointer
3230 *
3231 * The list of all the hardware IPs that make up the asic is walked and
3232 * the check_soft_reset callbacks are run.  check_soft_reset determines
3233 * if the asic is still hung or not.
3234 * Returns true if any of the IPs are still in a hung state, false if not.
3235 */
3236static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
3237{
3238	int i;
3239	bool asic_hang = false;
3240
3241	if (amdgpu_sriov_vf(adev))
3242		return true;
3243
3244	if (amdgpu_asic_need_full_reset(adev))
3245		return true;
3246
3247	for (i = 0; i < adev->num_ip_blocks; i++) {
3248		if (!adev->ip_blocks[i].status.valid)
3249			continue;
3250		if (adev->ip_blocks[i].version->funcs->check_soft_reset)
3251			adev->ip_blocks[i].status.hang =
3252				adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
3253		if (adev->ip_blocks[i].status.hang) {
3254			DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
3255			asic_hang = true;
3256		}
3257	}
3258	return asic_hang;
3259}
3260
3261/**
3262 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
3263 *
3264 * @adev: amdgpu_device pointer
3265 *
3266 * The list of all the hardware IPs that make up the asic is walked and the
3267 * pre_soft_reset callbacks are run if the block is hung.  pre_soft_reset
3268 * handles any IP specific hardware or software state changes that are
3269 * necessary for a soft reset to succeed.
3270 * Returns 0 on success, negative error code on failure.
3271 */
3272static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
3273{
3274	int i, r = 0;
3275
3276	for (i = 0; i < adev->num_ip_blocks; i++) {
3277		if (!adev->ip_blocks[i].status.valid)
3278			continue;
3279		if (adev->ip_blocks[i].status.hang &&
3280		    adev->ip_blocks[i].version->funcs->pre_soft_reset) {
3281			r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
3282			if (r)
3283				return r;
3284		}
3285	}
3286
3287	return 0;
3288}
3289
3290/**
3291 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
3292 *
3293 * @adev: amdgpu_device pointer
3294 *
3295 * Some hardware IPs cannot be soft reset.  If they are hung, a full gpu
3296 * reset is necessary to recover.
3297 * Returns true if a full asic reset is required, false if not.
3298 */
3299static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
3300{
3301	int i;
3302
3303	if (amdgpu_asic_need_full_reset(adev))
3304		return true;
3305
3306	for (i = 0; i < adev->num_ip_blocks; i++) {
3307		if (!adev->ip_blocks[i].status.valid)
3308			continue;
3309		if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
3310		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
3311		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
3312		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
3313		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
3314			if (adev->ip_blocks[i].status.hang) {
3315				DRM_INFO("Some block need full reset!\n");
3316				return true;
3317			}
3318		}
3319	}
3320	return false;
3321}
3322
3323/**
3324 * amdgpu_device_ip_soft_reset - do a soft reset
3325 *
3326 * @adev: amdgpu_device pointer
3327 *
3328 * The list of all the hardware IPs that make up the asic is walked and the
3329 * soft_reset callbacks are run if the block is hung.  soft_reset handles any
3330 * IP specific hardware or software state changes that are necessary to soft
3331 * reset the IP.
3332 * Returns 0 on success, negative error code on failure.
3333 */
3334static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
3335{
3336	int i, r = 0;
3337
3338	for (i = 0; i < adev->num_ip_blocks; i++) {
3339		if (!adev->ip_blocks[i].status.valid)
3340			continue;
3341		if (adev->ip_blocks[i].status.hang &&
3342		    adev->ip_blocks[i].version->funcs->soft_reset) {
3343			r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
3344			if (r)
3345				return r;
3346		}
3347	}
3348
3349	return 0;
3350}
3351
3352/**
3353 * amdgpu_device_ip_post_soft_reset - clean up from soft reset
3354 *
3355 * @adev: amdgpu_device pointer
3356 *
3357 * The list of all the hardware IPs that make up the asic is walked and the
3358 * post_soft_reset callbacks are run if the asic was hung.  post_soft_reset
3359 * handles any IP specific hardware or software state changes that are
3360 * necessary after the IP has been soft reset.
3361 * Returns 0 on success, negative error code on failure.
3362 */
3363static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
3364{
3365	int i, r = 0;
3366
3367	for (i = 0; i < adev->num_ip_blocks; i++) {
3368		if (!adev->ip_blocks[i].status.valid)
3369			continue;
3370		if (adev->ip_blocks[i].status.hang &&
3371		    adev->ip_blocks[i].version->funcs->post_soft_reset)
3372			r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
3373		if (r)
3374			return r;
3375	}
3376
3377	return 0;
3378}
3379
3380/**
3381 * amdgpu_device_recover_vram - Recover some VRAM contents
3382 *
3383 * @adev: amdgpu_device pointer
3384 *
3385 * Restores the contents of VRAM buffers from the shadows in GTT.  Used to
3386 * restore things like GPUVM page tables after a GPU reset where
3387 * the contents of VRAM might be lost.
3388 *
3389 * Returns:
3390 * 0 on success, negative error code on failure.
3391 */
3392static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
3393{
3394	struct dma_fence *fence = NULL, *next = NULL;
3395	struct amdgpu_bo *shadow;
 
3396	long r = 1, tmo;
3397
3398	if (amdgpu_sriov_runtime(adev))
3399		tmo = msecs_to_jiffies(8000);
3400	else
3401		tmo = msecs_to_jiffies(100);
3402
3403	DRM_INFO("recover vram bo from shadow start\n");
3404	mutex_lock(&adev->shadow_list_lock);
3405	list_for_each_entry(shadow, &adev->shadow_list, shadow_list) {
 
 
 
 
3406
3407		/* No need to recover an evicted BO */
3408		if (shadow->tbo.mem.mem_type != TTM_PL_TT ||
3409		    shadow->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET ||
3410		    shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM)
3411			continue;
3412
3413		r = amdgpu_bo_restore_shadow(shadow, &next);
3414		if (r)
3415			break;
3416
3417		if (fence) {
3418			tmo = dma_fence_wait_timeout(fence, false, tmo);
3419			dma_fence_put(fence);
3420			fence = next;
3421			if (tmo == 0) {
3422				r = -ETIMEDOUT;
3423				break;
3424			} else if (tmo < 0) {
3425				r = tmo;
3426				break;
3427			}
3428		} else {
3429			fence = next;
3430		}
3431	}
3432	mutex_unlock(&adev->shadow_list_lock);
3433
3434	if (fence)
3435		tmo = dma_fence_wait_timeout(fence, false, tmo);
3436	dma_fence_put(fence);
3437
3438	if (r < 0 || tmo <= 0) {
3439		DRM_ERROR("recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
3440		return -EIO;
3441	}
3442
3443	DRM_INFO("recover vram bo from shadow done\n");
3444	return 0;
3445}
3446
3447
3448/**
3449 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
3450 *
3451 * @adev: amdgpu device pointer
3452 * @from_hypervisor: request from hypervisor
3453 *
3454 * do VF FLR and reinitialize Asic
3455 * return 0 means succeeded otherwise failed
3456 */
3457static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
3458				     bool from_hypervisor)
3459{
3460	int r;
 
 
 
 
 
3461
3462	if (from_hypervisor)
3463		r = amdgpu_virt_request_full_gpu(adev, true);
3464	else
3465		r = amdgpu_virt_reset_gpu(adev);
3466	if (r)
3467		return r;
 
3468
3469	amdgpu_amdkfd_pre_reset(adev);
 
3470
3471	/* Resume IP prior to SMC */
3472	r = amdgpu_device_ip_reinit_early_sriov(adev);
3473	if (r)
3474		goto error;
3475
3476	/* we need recover gart prior to run SMC/CP/SDMA resume */
3477	amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]);
3478
3479	r = amdgpu_device_fw_loading(adev);
3480	if (r)
3481		return r;
3482
3483	/* now we are okay to resume SMC/CP/SDMA */
3484	r = amdgpu_device_ip_reinit_late_sriov(adev);
3485	if (r)
3486		goto error;
3487
3488	amdgpu_irq_gpu_reset_resume_helper(adev);
3489	r = amdgpu_ib_ring_tests(adev);
3490	amdgpu_amdkfd_post_reset(adev);
 
 
 
 
 
 
 
 
 
 
3491
3492error:
3493	amdgpu_virt_init_data_exchange(adev);
3494	amdgpu_virt_release_full_gpu(adev, true);
3495	if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
3496		amdgpu_inc_vram_lost(adev);
3497		r = amdgpu_device_recover_vram(adev);
3498	}
 
 
 
 
 
 
 
 
 
3499
3500	return r;
3501}
3502
3503/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3504 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
3505 *
3506 * @adev: amdgpu device pointer
3507 *
3508 * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
3509 * a hung GPU.
3510 */
3511bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
3512{
3513	if (!amdgpu_device_ip_check_soft_reset(adev)) {
3514		DRM_INFO("Timeout, but no hardware hang detected.\n");
3515		return false;
3516	}
3517
3518	if (amdgpu_gpu_recovery == 0)
3519		goto disabled;
3520
 
 
 
 
3521	if (amdgpu_sriov_vf(adev))
3522		return true;
3523
3524	if (amdgpu_gpu_recovery == -1) {
3525		switch (adev->asic_type) {
3526		case CHIP_BONAIRE:
3527		case CHIP_HAWAII:
3528		case CHIP_TOPAZ:
3529		case CHIP_TONGA:
3530		case CHIP_FIJI:
3531		case CHIP_POLARIS10:
3532		case CHIP_POLARIS11:
3533		case CHIP_POLARIS12:
3534		case CHIP_VEGAM:
3535		case CHIP_VEGA20:
3536		case CHIP_VEGA10:
3537		case CHIP_VEGA12:
3538		case CHIP_RAVEN:
 
 
 
 
3539			break;
3540		default:
3541			goto disabled;
3542		}
3543	}
3544
3545	return true;
3546
3547disabled:
3548		DRM_INFO("GPU recovery disabled.\n");
3549		return false;
3550}
3551
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3552
3553static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
3554					struct amdgpu_job *job,
3555					bool *need_full_reset_arg)
3556{
3557	int i, r = 0;
3558	bool need_full_reset  = *need_full_reset_arg;
 
 
 
 
 
 
 
 
 
 
 
 
3559
3560	/* block all schedulers and reset given job's ring */
3561	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
3562		struct amdgpu_ring *ring = adev->rings[i];
3563
3564		if (!ring || !ring->sched.thread)
3565			continue;
3566
 
 
 
 
 
3567		/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
3568		amdgpu_fence_driver_force_completion(ring);
3569	}
3570
3571	if(job)
 
 
3572		drm_sched_increase_karma(&job->base);
3573
 
 
 
 
 
 
 
3574	/* Don't suspend on bare metal if we are not going to HW reset the ASIC */
3575	if (!amdgpu_sriov_vf(adev)) {
3576
3577		if (!need_full_reset)
3578			need_full_reset = amdgpu_device_ip_need_full_reset(adev);
3579
3580		if (!need_full_reset) {
 
3581			amdgpu_device_ip_pre_soft_reset(adev);
3582			r = amdgpu_device_ip_soft_reset(adev);
3583			amdgpu_device_ip_post_soft_reset(adev);
3584			if (r || amdgpu_device_ip_check_soft_reset(adev)) {
3585				DRM_INFO("soft reset failed, will fallback to full reset!\n");
3586				need_full_reset = true;
3587			}
3588		}
3589
3590		if (need_full_reset)
3591			r = amdgpu_device_ip_suspend(adev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3592
3593		*need_full_reset_arg = need_full_reset;
 
3594	}
3595
3596	return r;
3597}
3598
3599static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
3600			       struct list_head *device_list_handle,
3601			       bool *need_full_reset_arg)
3602{
3603	struct amdgpu_device *tmp_adev = NULL;
3604	bool need_full_reset = *need_full_reset_arg, vram_lost = false;
3605	int r = 0;
3606
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3607	/*
3608	 * ASIC reset has to be done on all HGMI hive nodes ASAP
3609	 * to allow proper links negotiation in FW (within 1 sec)
3610	 */
3611	if (need_full_reset) {
3612		list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
3613			/* For XGMI run all resets in parallel to speed up the process */
3614			if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
3615				if (!queue_work(system_highpri_wq, &tmp_adev->xgmi_reset_work))
 
3616					r = -EALREADY;
3617			} else
3618				r = amdgpu_asic_reset(tmp_adev);
3619
3620			if (r) {
3621				DRM_ERROR("ASIC reset failed with error, %d for drm dev, %s",
3622					 r, tmp_adev->ddev->unique);
3623				break;
3624			}
3625		}
3626
3627		/* For XGMI wait for all PSP resets to complete before proceed */
3628		if (!r) {
3629			list_for_each_entry(tmp_adev, device_list_handle,
3630					    gmc.xgmi.head) {
3631				if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
3632					flush_work(&tmp_adev->xgmi_reset_work);
3633					r = tmp_adev->asic_reset_res;
3634					if (r)
3635						break;
3636				}
3637			}
 
 
3638
3639			list_for_each_entry(tmp_adev, device_list_handle,
3640					gmc.xgmi.head) {
3641				amdgpu_ras_reserve_bad_pages(tmp_adev);
3642			}
3643		}
 
 
3644	}
3645
3646
3647	list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
3648		if (need_full_reset) {
3649			/* post card */
3650			if (amdgpu_atom_asic_init(tmp_adev->mode_info.atom_context))
3651				DRM_WARN("asic atom init failed!");
 
 
 
3652
3653			if (!r) {
3654				dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
3655				r = amdgpu_device_ip_resume_phase1(tmp_adev);
3656				if (r)
3657					goto out;
3658
3659				vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
 
 
 
3660				if (vram_lost) {
3661					DRM_INFO("VRAM is lost due to GPU reset!\n");
3662					amdgpu_inc_vram_lost(tmp_adev);
3663				}
3664
3665				r = amdgpu_gtt_mgr_recover(
3666					&tmp_adev->mman.bdev.man[TTM_PL_TT]);
3667				if (r)
3668					goto out;
3669
3670				r = amdgpu_device_fw_loading(tmp_adev);
 
3671				if (r)
3672					return r;
3673
3674				r = amdgpu_device_ip_resume_phase2(tmp_adev);
3675				if (r)
3676					goto out;
3677
 
 
 
3678				if (vram_lost)
3679					amdgpu_device_fill_reset_magic(tmp_adev);
3680
3681				/*
3682				 * Add this ASIC as tracked as reset was already
3683				 * complete successfully.
3684				 */
3685				amdgpu_register_gpu_instance(tmp_adev);
3686
 
 
 
 
3687				r = amdgpu_device_ip_late_init(tmp_adev);
3688				if (r)
3689					goto out;
3690
3691				/* must succeed. */
3692				amdgpu_ras_resume(tmp_adev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3693
3694				/* Update PSP FW topology after reset */
3695				if (hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1)
3696					r = amdgpu_xgmi_update_topology(hive, tmp_adev);
 
 
3697			}
3698		}
3699
3700
3701out:
3702		if (!r) {
3703			amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
3704			r = amdgpu_ib_ring_tests(tmp_adev);
3705			if (r) {
3706				dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
3707				r = amdgpu_device_ip_suspend(tmp_adev);
3708				need_full_reset = true;
3709				r = -EAGAIN;
3710				goto end;
3711			}
3712		}
3713
3714		if (!r)
3715			r = amdgpu_device_recover_vram(tmp_adev);
3716		else
3717			tmp_adev->asic_reset_res = r;
3718	}
3719
3720end:
3721	*need_full_reset_arg = need_full_reset;
 
 
 
3722	return r;
3723}
3724
3725static bool amdgpu_device_lock_adev(struct amdgpu_device *adev, bool trylock)
3726{
3727	if (trylock) {
3728		if (!mutex_trylock(&adev->lock_reset))
3729			return false;
3730	} else
3731		mutex_lock(&adev->lock_reset);
3732
3733	atomic_inc(&adev->gpu_reset_counter);
3734	adev->in_gpu_reset = 1;
3735	switch (amdgpu_asic_reset_method(adev)) {
3736	case AMD_RESET_METHOD_MODE1:
3737		adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
3738		break;
3739	case AMD_RESET_METHOD_MODE2:
3740		adev->mp1_state = PP_MP1_STATE_RESET;
3741		break;
3742	default:
3743		adev->mp1_state = PP_MP1_STATE_NONE;
3744		break;
3745	}
3746	/* Block kfd: SRIOV would do it separately */
3747	if (!amdgpu_sriov_vf(adev))
3748                amdgpu_amdkfd_pre_reset(adev);
3749
3750	return true;
3751}
3752
3753static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
3754{
3755	/*unlock kfd: SRIOV would do it separately */
3756	if (!amdgpu_sriov_vf(adev))
3757                amdgpu_amdkfd_post_reset(adev);
3758	amdgpu_vf_error_trans_all(adev);
3759	adev->mp1_state = PP_MP1_STATE_NONE;
3760	adev->in_gpu_reset = 0;
3761	mutex_unlock(&adev->lock_reset);
3762}
3763
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3764
3765/**
3766 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
3767 *
3768 * @adev: amdgpu device pointer
3769 * @job: which job trigger hang
 
3770 *
3771 * Attempt to reset the GPU if it has hung (all asics).
3772 * Attempt to do soft-reset or full-reset and reinitialize Asic
3773 * Returns 0 for success or an error on failure.
3774 */
3775
3776int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
3777			      struct amdgpu_job *job)
 
3778{
3779	struct list_head device_list, *device_list_handle =  NULL;
3780	bool need_full_reset, job_signaled;
3781	struct amdgpu_hive_info *hive = NULL;
3782	struct amdgpu_device *tmp_adev = NULL;
3783	int i, r = 0;
 
 
3784
3785	need_full_reset = job_signaled = false;
3786	INIT_LIST_HEAD(&device_list);
 
 
 
 
 
 
 
 
 
 
3787
3788	dev_info(adev->dev, "GPU reset begin!\n");
 
 
3789
3790	cancel_delayed_work_sync(&adev->delayed_init_work);
 
3791
3792	hive = amdgpu_get_xgmi_hive(adev, false);
 
 
 
3793
 
 
3794	/*
3795	 * Here we trylock to avoid chain of resets executing from
3796	 * either trigger by jobs on different adevs in XGMI hive or jobs on
3797	 * different schedulers for same device while this TO handler is running.
3798	 * We always reset all schedulers for device and all devices for XGMI
3799	 * hive so that should take care of them too.
3800	 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3801
3802	if (hive && !mutex_trylock(&hive->reset_lock)) {
3803		DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
3804			  job ? job->base.id : -1, hive->hive_id);
3805		return 0;
3806	}
3807
3808	/* Start with adev pre asic reset first for soft reset check.*/
3809	if (!amdgpu_device_lock_adev(adev, !hive)) {
3810		DRM_INFO("Bailing on TDR for s_job:%llx, as another already in progress",
3811			  job ? job->base.id : -1);
3812		return 0;
3813	}
3814
3815	/* Build list of devices to reset */
3816	if  (adev->gmc.xgmi.num_physical_nodes > 1) {
3817		if (!hive) {
3818			amdgpu_device_unlock_adev(adev);
3819			return -ENODEV;
3820		}
3821
3822		/*
3823		 * In case we are in XGMI hive mode device reset is done for all the
3824		 * nodes in the hive to retrain all XGMI links and hence the reset
3825		 * sequence is executed in loop on all nodes.
 
 
 
 
 
3826		 */
3827		device_list_handle = &hive->device_list;
3828	} else {
3829		list_add_tail(&adev->gmc.xgmi.head, &device_list);
3830		device_list_handle = &device_list;
3831	}
 
 
 
 
3832
3833	/*
3834	 * Mark these ASICs to be reseted as untracked first
3835	 * And add them back after reset completed
3836	 */
3837	list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head)
3838		amdgpu_unregister_gpu_instance(tmp_adev);
3839
3840	/* block all schedulers and reset given job's ring */
3841	list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
3842		/* disable ras on ALL IPs */
3843		if (amdgpu_device_ip_need_full_reset(tmp_adev))
 
3844			amdgpu_ras_suspend(tmp_adev);
3845
3846		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
3847			struct amdgpu_ring *ring = tmp_adev->rings[i];
3848
3849			if (!ring || !ring->sched.thread)
3850				continue;
3851
3852			drm_sched_stop(&ring->sched, job ? &job->base : NULL);
 
 
 
3853		}
 
3854	}
3855
 
 
3856
3857	/*
3858	 * Must check guilty signal here since after this point all old
3859	 * HW fences are force signaled.
3860	 *
3861	 * job->base holds a reference to parent fence
3862	 */
3863	if (job && job->base.s_fence->parent &&
3864	    dma_fence_is_signaled(job->base.s_fence->parent))
3865		job_signaled = true;
3866
3867	if (!amdgpu_device_ip_need_full_reset(adev))
3868		device_list_handle = &device_list;
3869
3870	if (job_signaled) {
3871		dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
3872		goto skip_hw_reset;
3873	}
3874
3875
3876	/* Guilty job will be freed after this*/
3877	r = amdgpu_device_pre_asic_reset(adev, job, &need_full_reset);
3878	if (r) {
3879		/*TODO Should we stop ?*/
3880		DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
3881			  r, adev->ddev->unique);
3882		adev->asic_reset_res = r;
3883	}
3884
3885retry:	/* Rest of adevs pre asic reset from XGMI hive. */
3886	list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
3887
3888		if (tmp_adev == adev)
3889			continue;
3890
3891		amdgpu_device_lock_adev(tmp_adev, false);
3892		r = amdgpu_device_pre_asic_reset(tmp_adev,
3893						 NULL,
3894						 &need_full_reset);
3895		/*TODO Should we stop ?*/
3896		if (r) {
3897			DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
3898				  r, tmp_adev->ddev->unique);
3899			tmp_adev->asic_reset_res = r;
3900		}
 
 
 
 
 
 
3901	}
3902
3903	/* Actual ASIC resets if needed.*/
3904	/* TODO Implement XGMI hive reset logic for SRIOV */
3905	if (amdgpu_sriov_vf(adev)) {
3906		r = amdgpu_device_reset_sriov(adev, job ? false : true);
3907		if (r)
3908			adev->asic_reset_res = r;
 
 
 
 
 
 
3909	} else {
3910		r  = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset);
3911		if (r && r == -EAGAIN)
3912			goto retry;
3913	}
3914
3915skip_hw_reset:
3916
3917	/* Post ASIC reset for all devs .*/
3918	list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
 
3919		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
3920			struct amdgpu_ring *ring = tmp_adev->rings[i];
3921
3922			if (!ring || !ring->sched.thread)
3923				continue;
3924
3925			/* No point to resubmit jobs if we didn't HW reset*/
3926			if (!tmp_adev->asic_reset_res && !job_signaled)
3927				drm_sched_resubmit_jobs(&ring->sched);
3928
3929			drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
3930		}
3931
3932		if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) {
3933			drm_helper_resume_force_mode(tmp_adev->ddev);
3934		}
3935
3936		tmp_adev->asic_reset_res = 0;
3937
3938		if (r) {
3939			/* bad news, how to tell it to userspace ? */
3940			dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
3941			amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
3942		} else {
3943			dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&adev->gpu_reset_counter));
 
 
3944		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3945
3946		amdgpu_device_unlock_adev(tmp_adev);
3947	}
3948
3949	if (hive)
3950		mutex_unlock(&hive->reset_lock);
 
 
 
 
 
 
3951
3952	if (r)
3953		dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
 
 
3954	return r;
3955}
3956
3957/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3958 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
3959 *
3960 * @adev: amdgpu_device pointer
3961 *
3962 * Fetchs and stores in the driver the PCIE capabilities (gen speed
3963 * and lanes) of the slot the device is in. Handles APUs and
3964 * virtualized environments where PCIE config space may not be available.
3965 */
3966static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
3967{
3968	struct pci_dev *pdev;
3969	enum pci_bus_speed speed_cap, platform_speed_cap;
3970	enum pcie_link_width platform_link_width;
3971
3972	if (amdgpu_pcie_gen_cap)
3973		adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
3974
3975	if (amdgpu_pcie_lane_cap)
3976		adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
3977
3978	/* covers APUs as well */
3979	if (pci_is_root_bus(adev->pdev->bus)) {
3980		if (adev->pm.pcie_gen_mask == 0)
3981			adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
3982		if (adev->pm.pcie_mlw_mask == 0)
3983			adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
3984		return;
3985	}
3986
3987	if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
3988		return;
3989
3990	pcie_bandwidth_available(adev->pdev, NULL,
3991				 &platform_speed_cap, &platform_link_width);
3992
3993	if (adev->pm.pcie_gen_mask == 0) {
3994		/* asic caps */
3995		pdev = adev->pdev;
3996		speed_cap = pcie_get_speed_cap(pdev);
3997		if (speed_cap == PCI_SPEED_UNKNOWN) {
3998			adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3999						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4000						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
4001		} else {
4002			if (speed_cap == PCIE_SPEED_16_0GT)
 
 
 
 
 
 
4003				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4004							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4005							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
4006							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
4007			else if (speed_cap == PCIE_SPEED_8_0GT)
4008				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4009							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4010							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
4011			else if (speed_cap == PCIE_SPEED_5_0GT)
4012				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4013							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
4014			else
4015				adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
4016		}
4017		/* platform caps */
4018		if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
4019			adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4020						   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
4021		} else {
4022			if (platform_speed_cap == PCIE_SPEED_16_0GT)
 
 
 
 
 
 
4023				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4024							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4025							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
4026							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
4027			else if (platform_speed_cap == PCIE_SPEED_8_0GT)
4028				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4029							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4030							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
4031			else if (platform_speed_cap == PCIE_SPEED_5_0GT)
4032				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4033							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
4034			else
4035				adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
4036
4037		}
4038	}
4039	if (adev->pm.pcie_mlw_mask == 0) {
4040		if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
4041			adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
4042		} else {
4043			switch (platform_link_width) {
4044			case PCIE_LNK_X32:
4045				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
4046							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
4047							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
4048							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4049							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4050							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4051							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4052				break;
4053			case PCIE_LNK_X16:
4054				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
4055							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
4056							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4057							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4058							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4059							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4060				break;
4061			case PCIE_LNK_X12:
4062				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
4063							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4064							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4065							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4066							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4067				break;
4068			case PCIE_LNK_X8:
4069				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4070							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4071							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4072							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4073				break;
4074			case PCIE_LNK_X4:
4075				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4076							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4077							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4078				break;
4079			case PCIE_LNK_X2:
4080				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4081							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4082				break;
4083			case PCIE_LNK_X1:
4084				adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
4085				break;
4086			default:
4087				break;
4088			}
4089		}
4090	}
4091}
4092