Linux Audio

Check our new training course

Loading...
v6.8
   1/*
   2 * Copyright 2008 Advanced Micro Devices, Inc.
   3 * Copyright 2008 Red Hat Inc.
   4 * Copyright 2009 Jerome Glisse.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the "Software"),
   8 * to deal in the Software without restriction, including without limitation
   9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10 * and/or sell copies of the Software, and to permit persons to whom the
  11 * Software is furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22 * OTHER DEALINGS IN THE SOFTWARE.
  23 *
  24 * Authors: Dave Airlie
  25 *          Alex Deucher
  26 *          Jerome Glisse
  27 */
  28#include <linux/power_supply.h>
  29#include <linux/kthread.h>
  30#include <linux/module.h>
  31#include <linux/console.h>
  32#include <linux/slab.h>
  33#include <linux/iommu.h>
  34#include <linux/pci.h>
  35#include <linux/pci-p2pdma.h>
  36#include <linux/apple-gmux.h>
  37
  38#include <drm/drm_aperture.h>
  39#include <drm/drm_atomic_helper.h>
  40#include <drm/drm_crtc_helper.h>
  41#include <drm/drm_fb_helper.h>
  42#include <drm/drm_probe_helper.h>
  43#include <drm/amdgpu_drm.h>
  44#include <linux/device.h>
  45#include <linux/vgaarb.h>
  46#include <linux/vga_switcheroo.h>
  47#include <linux/efi.h>
  48#include "amdgpu.h"
  49#include "amdgpu_trace.h"
  50#include "amdgpu_i2c.h"
  51#include "atom.h"
  52#include "amdgpu_atombios.h"
  53#include "amdgpu_atomfirmware.h"
  54#include "amd_pcie.h"
  55#ifdef CONFIG_DRM_AMDGPU_SI
  56#include "si.h"
  57#endif
  58#ifdef CONFIG_DRM_AMDGPU_CIK
  59#include "cik.h"
  60#endif
  61#include "vi.h"
  62#include "soc15.h"
  63#include "nv.h"
  64#include "bif/bif_4_1_d.h"
 
  65#include <linux/firmware.h>
  66#include "amdgpu_vf_error.h"
  67
  68#include "amdgpu_amdkfd.h"
  69#include "amdgpu_pm.h"
  70
  71#include "amdgpu_xgmi.h"
  72#include "amdgpu_ras.h"
  73#include "amdgpu_pmu.h"
  74#include "amdgpu_fru_eeprom.h"
  75#include "amdgpu_reset.h"
  76#include "amdgpu_virt.h"
  77
  78#include <linux/suspend.h>
  79#include <drm/task_barrier.h>
  80#include <linux/pm_runtime.h>
  81
  82#include <drm/drm_drv.h>
  83
  84#if IS_ENABLED(CONFIG_X86)
  85#include <asm/intel-family.h>
  86#endif
  87
  88MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
  89MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
  90MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
  91MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
  92MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
  93MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
  94MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
  95
  96#define AMDGPU_RESUME_MS		2000
  97#define AMDGPU_MAX_RETRY_LIMIT		2
  98#define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
  99
 100static const struct drm_driver amdgpu_kms_driver;
 101
 102const char *amdgpu_asic_name[] = {
 103	"TAHITI",
 104	"PITCAIRN",
 105	"VERDE",
 106	"OLAND",
 107	"HAINAN",
 108	"BONAIRE",
 109	"KAVERI",
 110	"KABINI",
 111	"HAWAII",
 112	"MULLINS",
 113	"TOPAZ",
 114	"TONGA",
 115	"FIJI",
 116	"CARRIZO",
 117	"STONEY",
 118	"POLARIS10",
 119	"POLARIS11",
 120	"POLARIS12",
 121	"VEGAM",
 122	"VEGA10",
 123	"VEGA12",
 124	"VEGA20",
 125	"RAVEN",
 126	"ARCTURUS",
 127	"RENOIR",
 128	"ALDEBARAN",
 129	"NAVI10",
 130	"CYAN_SKILLFISH",
 131	"NAVI14",
 132	"NAVI12",
 133	"SIENNA_CICHLID",
 134	"NAVY_FLOUNDER",
 135	"VANGOGH",
 136	"DIMGREY_CAVEFISH",
 137	"BEIGE_GOBY",
 138	"YELLOW_CARP",
 139	"IP DISCOVERY",
 140	"LAST",
 141};
 142
 143/**
 144 * DOC: pcie_replay_count
 145 *
 146 * The amdgpu driver provides a sysfs API for reporting the total number
 147 * of PCIe replays (NAKs)
 148 * The file pcie_replay_count is used for this and returns the total
 149 * number of replays as a sum of the NAKs generated and NAKs received
 150 */
 151
 152static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
 153		struct device_attribute *attr, char *buf)
 154{
 155	struct drm_device *ddev = dev_get_drvdata(dev);
 156	struct amdgpu_device *adev = drm_to_adev(ddev);
 157	uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
 158
 159	return sysfs_emit(buf, "%llu\n", cnt);
 160}
 161
 162static DEVICE_ATTR(pcie_replay_count, 0444,
 163		amdgpu_device_get_pcie_replay_count, NULL);
 164
 165static ssize_t amdgpu_sysfs_reg_state_get(struct file *f, struct kobject *kobj,
 166					  struct bin_attribute *attr, char *buf,
 167					  loff_t ppos, size_t count)
 168{
 169	struct device *dev = kobj_to_dev(kobj);
 170	struct drm_device *ddev = dev_get_drvdata(dev);
 171	struct amdgpu_device *adev = drm_to_adev(ddev);
 172	ssize_t bytes_read;
 173
 174	switch (ppos) {
 175	case AMDGPU_SYS_REG_STATE_XGMI:
 176		bytes_read = amdgpu_asic_get_reg_state(
 177			adev, AMDGPU_REG_STATE_TYPE_XGMI, buf, count);
 178		break;
 179	case AMDGPU_SYS_REG_STATE_WAFL:
 180		bytes_read = amdgpu_asic_get_reg_state(
 181			adev, AMDGPU_REG_STATE_TYPE_WAFL, buf, count);
 182		break;
 183	case AMDGPU_SYS_REG_STATE_PCIE:
 184		bytes_read = amdgpu_asic_get_reg_state(
 185			adev, AMDGPU_REG_STATE_TYPE_PCIE, buf, count);
 186		break;
 187	case AMDGPU_SYS_REG_STATE_USR:
 188		bytes_read = amdgpu_asic_get_reg_state(
 189			adev, AMDGPU_REG_STATE_TYPE_USR, buf, count);
 190		break;
 191	case AMDGPU_SYS_REG_STATE_USR_1:
 192		bytes_read = amdgpu_asic_get_reg_state(
 193			adev, AMDGPU_REG_STATE_TYPE_USR_1, buf, count);
 194		break;
 195	default:
 196		return -EINVAL;
 197	}
 198
 199	return bytes_read;
 200}
 201
 202BIN_ATTR(reg_state, 0444, amdgpu_sysfs_reg_state_get, NULL,
 203	 AMDGPU_SYS_REG_STATE_END);
 204
 205int amdgpu_reg_state_sysfs_init(struct amdgpu_device *adev)
 206{
 207	int ret;
 208
 209	if (!amdgpu_asic_get_reg_state_supported(adev))
 210		return 0;
 211
 212	ret = sysfs_create_bin_file(&adev->dev->kobj, &bin_attr_reg_state);
 213
 214	return ret;
 215}
 216
 217void amdgpu_reg_state_sysfs_fini(struct amdgpu_device *adev)
 218{
 219	if (!amdgpu_asic_get_reg_state_supported(adev))
 220		return;
 221	sysfs_remove_bin_file(&adev->dev->kobj, &bin_attr_reg_state);
 222}
 223
 224/**
 225 * DOC: board_info
 226 *
 227 * The amdgpu driver provides a sysfs API for giving board related information.
 228 * It provides the form factor information in the format
 229 *
 230 *   type : form factor
 231 *
 232 * Possible form factor values
 233 *
 234 * - "cem"		- PCIE CEM card
 235 * - "oam"		- Open Compute Accelerator Module
 236 * - "unknown"	- Not known
 237 *
 238 */
 239
 240static ssize_t amdgpu_device_get_board_info(struct device *dev,
 241					    struct device_attribute *attr,
 242					    char *buf)
 243{
 244	struct drm_device *ddev = dev_get_drvdata(dev);
 245	struct amdgpu_device *adev = drm_to_adev(ddev);
 246	enum amdgpu_pkg_type pkg_type = AMDGPU_PKG_TYPE_CEM;
 247	const char *pkg;
 248
 249	if (adev->smuio.funcs && adev->smuio.funcs->get_pkg_type)
 250		pkg_type = adev->smuio.funcs->get_pkg_type(adev);
 251
 252	switch (pkg_type) {
 253	case AMDGPU_PKG_TYPE_CEM:
 254		pkg = "cem";
 255		break;
 256	case AMDGPU_PKG_TYPE_OAM:
 257		pkg = "oam";
 258		break;
 259	default:
 260		pkg = "unknown";
 261		break;
 262	}
 263
 264	return sysfs_emit(buf, "%s : %s\n", "type", pkg);
 265}
 266
 267static DEVICE_ATTR(board_info, 0444, amdgpu_device_get_board_info, NULL);
 268
 269static struct attribute *amdgpu_board_attrs[] = {
 270	&dev_attr_board_info.attr,
 271	NULL,
 272};
 273
 274static umode_t amdgpu_board_attrs_is_visible(struct kobject *kobj,
 275					     struct attribute *attr, int n)
 276{
 277	struct device *dev = kobj_to_dev(kobj);
 278	struct drm_device *ddev = dev_get_drvdata(dev);
 279	struct amdgpu_device *adev = drm_to_adev(ddev);
 280
 281	if (adev->flags & AMD_IS_APU)
 282		return 0;
 283
 284	return attr->mode;
 285}
 286
 287static const struct attribute_group amdgpu_board_attrs_group = {
 288	.attrs = amdgpu_board_attrs,
 289	.is_visible = amdgpu_board_attrs_is_visible
 290};
 291
 292static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
 293
 294
 295/**
 296 * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
 297 *
 298 * @dev: drm_device pointer
 299 *
 300 * Returns true if the device is a dGPU with ATPX power control,
 301 * otherwise return false.
 302 */
 303bool amdgpu_device_supports_px(struct drm_device *dev)
 304{
 305	struct amdgpu_device *adev = drm_to_adev(dev);
 306
 307	if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
 308		return true;
 309	return false;
 310}
 311
 312/**
 313 * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
 314 *
 315 * @dev: drm_device pointer
 316 *
 317 * Returns true if the device is a dGPU with ACPI power control,
 318 * otherwise return false.
 319 */
 320bool amdgpu_device_supports_boco(struct drm_device *dev)
 321{
 322	struct amdgpu_device *adev = drm_to_adev(dev);
 323
 324	if (adev->has_pr3 ||
 325	    ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
 326		return true;
 327	return false;
 328}
 329
 330/**
 331 * amdgpu_device_supports_baco - Does the device support BACO
 332 *
 333 * @dev: drm_device pointer
 334 *
 335 * Returns true if the device supporte BACO,
 336 * otherwise return false.
 337 */
 338bool amdgpu_device_supports_baco(struct drm_device *dev)
 339{
 340	struct amdgpu_device *adev = drm_to_adev(dev);
 341
 342	return amdgpu_asic_supports_baco(adev);
 343}
 344
 345/**
 346 * amdgpu_device_supports_smart_shift - Is the device dGPU with
 347 * smart shift support
 348 *
 349 * @dev: drm_device pointer
 350 *
 351 * Returns true if the device is a dGPU with Smart Shift support,
 352 * otherwise returns false.
 353 */
 354bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
 355{
 356	return (amdgpu_device_supports_boco(dev) &&
 357		amdgpu_acpi_is_power_shift_control_supported());
 358}
 359
 360/*
 361 * VRAM access helper functions
 362 */
 363
 364/**
 365 * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
 366 *
 367 * @adev: amdgpu_device pointer
 368 * @pos: offset of the buffer in vram
 369 * @buf: virtual address of the buffer in system memory
 370 * @size: read/write size, sizeof(@buf) must > @size
 371 * @write: true - write to vram, otherwise - read from vram
 372 */
 373void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
 374			     void *buf, size_t size, bool write)
 375{
 376	unsigned long flags;
 377	uint32_t hi = ~0, tmp = 0;
 378	uint32_t *data = buf;
 379	uint64_t last;
 380	int idx;
 381
 382	if (!drm_dev_enter(adev_to_drm(adev), &idx))
 383		return;
 384
 385	BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
 386
 387	spin_lock_irqsave(&adev->mmio_idx_lock, flags);
 388	for (last = pos + size; pos < last; pos += 4) {
 389		tmp = pos >> 31;
 390
 391		WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
 392		if (tmp != hi) {
 393			WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
 394			hi = tmp;
 395		}
 396		if (write)
 397			WREG32_NO_KIQ(mmMM_DATA, *data++);
 398		else
 399			*data++ = RREG32_NO_KIQ(mmMM_DATA);
 400	}
 401
 402	spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
 403	drm_dev_exit(idx);
 404}
 405
 406/**
 407 * amdgpu_device_aper_access - access vram by vram aperature
 408 *
 409 * @adev: amdgpu_device pointer
 410 * @pos: offset of the buffer in vram
 411 * @buf: virtual address of the buffer in system memory
 412 * @size: read/write size, sizeof(@buf) must > @size
 413 * @write: true - write to vram, otherwise - read from vram
 414 *
 415 * The return value means how many bytes have been transferred.
 416 */
 417size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
 418				 void *buf, size_t size, bool write)
 419{
 420#ifdef CONFIG_64BIT
 421	void __iomem *addr;
 422	size_t count = 0;
 423	uint64_t last;
 424
 425	if (!adev->mman.aper_base_kaddr)
 426		return 0;
 427
 428	last = min(pos + size, adev->gmc.visible_vram_size);
 429	if (last > pos) {
 430		addr = adev->mman.aper_base_kaddr + pos;
 431		count = last - pos;
 432
 433		if (write) {
 434			memcpy_toio(addr, buf, count);
 435			/* Make sure HDP write cache flush happens without any reordering
 436			 * after the system memory contents are sent over PCIe device
 437			 */
 438			mb();
 439			amdgpu_device_flush_hdp(adev, NULL);
 440		} else {
 441			amdgpu_device_invalidate_hdp(adev, NULL);
 442			/* Make sure HDP read cache is invalidated before issuing a read
 443			 * to the PCIe device
 444			 */
 445			mb();
 446			memcpy_fromio(buf, addr, count);
 447		}
 448
 449	}
 450
 451	return count;
 452#else
 453	return 0;
 454#endif
 455}
 456
 457/**
 458 * amdgpu_device_vram_access - read/write a buffer in vram
 459 *
 460 * @adev: amdgpu_device pointer
 461 * @pos: offset of the buffer in vram
 462 * @buf: virtual address of the buffer in system memory
 463 * @size: read/write size, sizeof(@buf) must > @size
 464 * @write: true - write to vram, otherwise - read from vram
 465 */
 466void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
 467			       void *buf, size_t size, bool write)
 468{
 469	size_t count;
 470
 471	/* try to using vram apreature to access vram first */
 472	count = amdgpu_device_aper_access(adev, pos, buf, size, write);
 473	size -= count;
 474	if (size) {
 475		/* using MM to access rest vram */
 476		pos += count;
 477		buf += count;
 478		amdgpu_device_mm_access(adev, pos, buf, size, write);
 479	}
 480}
 481
 482/*
 483 * register access helper functions.
 484 */
 485
 486/* Check if hw access should be skipped because of hotplug or device error */
 487bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
 488{
 489	if (adev->no_hw_access)
 490		return true;
 491
 492#ifdef CONFIG_LOCKDEP
 493	/*
 494	 * This is a bit complicated to understand, so worth a comment. What we assert
 495	 * here is that the GPU reset is not running on another thread in parallel.
 496	 *
 497	 * For this we trylock the read side of the reset semaphore, if that succeeds
 498	 * we know that the reset is not running in paralell.
 499	 *
 500	 * If the trylock fails we assert that we are either already holding the read
 501	 * side of the lock or are the reset thread itself and hold the write side of
 502	 * the lock.
 503	 */
 504	if (in_task()) {
 505		if (down_read_trylock(&adev->reset_domain->sem))
 506			up_read(&adev->reset_domain->sem);
 507		else
 508			lockdep_assert_held(&adev->reset_domain->sem);
 509	}
 510#endif
 511	return false;
 512}
 513
 514/**
 515 * amdgpu_device_rreg - read a memory mapped IO or indirect register
 516 *
 517 * @adev: amdgpu_device pointer
 518 * @reg: dword aligned register offset
 519 * @acc_flags: access flags which require special behavior
 520 *
 521 * Returns the 32 bit value from the offset specified.
 522 */
 523uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
 524			    uint32_t reg, uint32_t acc_flags)
 525{
 526	uint32_t ret;
 527
 528	if (amdgpu_device_skip_hw_access(adev))
 529		return 0;
 530
 531	if ((reg * 4) < adev->rmmio_size) {
 532		if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
 533		    amdgpu_sriov_runtime(adev) &&
 534		    down_read_trylock(&adev->reset_domain->sem)) {
 535			ret = amdgpu_kiq_rreg(adev, reg, 0);
 536			up_read(&adev->reset_domain->sem);
 537		} else {
 538			ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
 539		}
 540	} else {
 541		ret = adev->pcie_rreg(adev, reg * 4);
 542	}
 543
 544	trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
 545
 546	return ret;
 547}
 548
 549/*
 550 * MMIO register read with bytes helper functions
 551 * @offset:bytes offset from MMIO start
 552 */
 
 553
 554/**
 555 * amdgpu_mm_rreg8 - read a memory mapped IO register
 556 *
 557 * @adev: amdgpu_device pointer
 558 * @offset: byte aligned register offset
 559 *
 560 * Returns the 8 bit value from the offset specified.
 561 */
 562uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
 563{
 564	if (amdgpu_device_skip_hw_access(adev))
 565		return 0;
 566
 567	if (offset < adev->rmmio_size)
 568		return (readb(adev->rmmio + offset));
 569	BUG();
 570}
 571
 572
 573/**
 574 * amdgpu_device_xcc_rreg - read a memory mapped IO or indirect register with specific XCC
 575 *
 576 * @adev: amdgpu_device pointer
 577 * @reg: dword aligned register offset
 578 * @acc_flags: access flags which require special behavior
 579 * @xcc_id: xcc accelerated compute core id
 580 *
 581 * Returns the 32 bit value from the offset specified.
 582 */
 583uint32_t amdgpu_device_xcc_rreg(struct amdgpu_device *adev,
 584				uint32_t reg, uint32_t acc_flags,
 585				uint32_t xcc_id)
 586{
 587	uint32_t ret, rlcg_flag;
 588
 589	if (amdgpu_device_skip_hw_access(adev))
 590		return 0;
 591
 592	if ((reg * 4) < adev->rmmio_size) {
 593		if (amdgpu_sriov_vf(adev) &&
 594		    !amdgpu_sriov_runtime(adev) &&
 595		    adev->gfx.rlc.rlcg_reg_access_supported &&
 596		    amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags,
 597							 GC_HWIP, false,
 598							 &rlcg_flag)) {
 599			ret = amdgpu_virt_rlcg_reg_rw(adev, reg, 0, rlcg_flag, xcc_id);
 600		} else if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
 601		    amdgpu_sriov_runtime(adev) &&
 602		    down_read_trylock(&adev->reset_domain->sem)) {
 603			ret = amdgpu_kiq_rreg(adev, reg, xcc_id);
 604			up_read(&adev->reset_domain->sem);
 605		} else {
 606			ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
 607		}
 608	} else {
 609		ret = adev->pcie_rreg(adev, reg * 4);
 610	}
 611
 612	return ret;
 613}
 614
 615/*
 616 * MMIO register write with bytes helper functions
 617 * @offset:bytes offset from MMIO start
 618 * @value: the value want to be written to the register
 619 */
 620
 621/**
 622 * amdgpu_mm_wreg8 - read a memory mapped IO register
 623 *
 624 * @adev: amdgpu_device pointer
 625 * @offset: byte aligned register offset
 626 * @value: 8 bit value to write
 627 *
 628 * Writes the value specified to the offset specified.
 629 */
 630void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
 631{
 632	if (amdgpu_device_skip_hw_access(adev))
 633		return;
 634
 635	if (offset < adev->rmmio_size)
 636		writeb(value, adev->rmmio + offset);
 637	else
 638		BUG();
 639}
 640
 641/**
 642 * amdgpu_device_wreg - write to a memory mapped IO or indirect register
 643 *
 644 * @adev: amdgpu_device pointer
 645 * @reg: dword aligned register offset
 646 * @v: 32 bit value to write to the register
 647 * @acc_flags: access flags which require special behavior
 648 *
 649 * Writes the value specified to the offset specified.
 650 */
 651void amdgpu_device_wreg(struct amdgpu_device *adev,
 652			uint32_t reg, uint32_t v,
 653			uint32_t acc_flags)
 654{
 655	if (amdgpu_device_skip_hw_access(adev))
 656		return;
 657
 658	if ((reg * 4) < adev->rmmio_size) {
 659		if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
 660		    amdgpu_sriov_runtime(adev) &&
 661		    down_read_trylock(&adev->reset_domain->sem)) {
 662			amdgpu_kiq_wreg(adev, reg, v, 0);
 663			up_read(&adev->reset_domain->sem);
 664		} else {
 665			writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
 666		}
 667	} else {
 668		adev->pcie_wreg(adev, reg * 4, v);
 669	}
 670
 671	trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
 672}
 673
 674/**
 675 * amdgpu_mm_wreg_mmio_rlc -  write register either with direct/indirect mmio or with RLC path if in range
 676 *
 677 * @adev: amdgpu_device pointer
 678 * @reg: mmio/rlc register
 679 * @v: value to write
 680 * @xcc_id: xcc accelerated compute core id
 681 *
 682 * this function is invoked only for the debugfs register access
 683 */
 684void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
 685			     uint32_t reg, uint32_t v,
 686			     uint32_t xcc_id)
 687{
 688	if (amdgpu_device_skip_hw_access(adev))
 689		return;
 690
 691	if (amdgpu_sriov_fullaccess(adev) &&
 692	    adev->gfx.rlc.funcs &&
 693	    adev->gfx.rlc.funcs->is_rlcg_access_range) {
 694		if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
 695			return amdgpu_sriov_wreg(adev, reg, v, 0, 0, xcc_id);
 696	} else if ((reg * 4) >= adev->rmmio_size) {
 697		adev->pcie_wreg(adev, reg * 4, v);
 698	} else {
 699		writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
 
 
 
 
 
 
 
 
 
 
 
 700	}
 701}
 702
 703/**
 704 * amdgpu_device_xcc_wreg - write to a memory mapped IO or indirect register with specific XCC
 705 *
 706 * @adev: amdgpu_device pointer
 707 * @reg: dword aligned register offset
 708 * @v: 32 bit value to write to the register
 709 * @acc_flags: access flags which require special behavior
 710 * @xcc_id: xcc accelerated compute core id
 711 *
 712 * Writes the value specified to the offset specified.
 713 */
 714void amdgpu_device_xcc_wreg(struct amdgpu_device *adev,
 715			uint32_t reg, uint32_t v,
 716			uint32_t acc_flags, uint32_t xcc_id)
 717{
 718	uint32_t rlcg_flag;
 719
 720	if (amdgpu_device_skip_hw_access(adev))
 721		return;
 722
 723	if ((reg * 4) < adev->rmmio_size) {
 724		if (amdgpu_sriov_vf(adev) &&
 725		    !amdgpu_sriov_runtime(adev) &&
 726		    adev->gfx.rlc.rlcg_reg_access_supported &&
 727		    amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags,
 728							 GC_HWIP, true,
 729							 &rlcg_flag)) {
 730			amdgpu_virt_rlcg_reg_rw(adev, reg, v, rlcg_flag, xcc_id);
 731		} else if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
 732		    amdgpu_sriov_runtime(adev) &&
 733		    down_read_trylock(&adev->reset_domain->sem)) {
 734			amdgpu_kiq_wreg(adev, reg, v, xcc_id);
 735			up_read(&adev->reset_domain->sem);
 736		} else {
 737			writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
 738		}
 739	} else {
 740		adev->pcie_wreg(adev, reg * 4, v);
 741	}
 742}
 743
 744/**
 745 * amdgpu_device_indirect_rreg - read an indirect register
 746 *
 747 * @adev: amdgpu_device pointer
 748 * @reg_addr: indirect register address to read from
 
 749 *
 750 * Returns the value of indirect register @reg_addr
 751 */
 752u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
 753				u32 reg_addr)
 754{
 755	unsigned long flags, pcie_index, pcie_data;
 756	void __iomem *pcie_index_offset;
 757	void __iomem *pcie_data_offset;
 758	u32 r;
 759
 760	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
 761	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
 762
 763	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 764	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
 765	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
 766
 767	writel(reg_addr, pcie_index_offset);
 768	readl(pcie_index_offset);
 769	r = readl(pcie_data_offset);
 770	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 771
 772	return r;
 773}
 774
 775u32 amdgpu_device_indirect_rreg_ext(struct amdgpu_device *adev,
 776				    u64 reg_addr)
 777{
 778	unsigned long flags, pcie_index, pcie_index_hi, pcie_data;
 779	u32 r;
 780	void __iomem *pcie_index_offset;
 781	void __iomem *pcie_index_hi_offset;
 782	void __iomem *pcie_data_offset;
 783
 784	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
 785	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
 786	if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
 787		pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
 788	else
 789		pcie_index_hi = 0;
 790
 791	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 792	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
 793	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
 794	if (pcie_index_hi != 0)
 795		pcie_index_hi_offset = (void __iomem *)adev->rmmio +
 796				pcie_index_hi * 4;
 797
 798	writel(reg_addr, pcie_index_offset);
 799	readl(pcie_index_offset);
 800	if (pcie_index_hi != 0) {
 801		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
 802		readl(pcie_index_hi_offset);
 803	}
 804	r = readl(pcie_data_offset);
 805
 806	/* clear the high bits */
 807	if (pcie_index_hi != 0) {
 808		writel(0, pcie_index_hi_offset);
 809		readl(pcie_index_hi_offset);
 810	}
 811
 812	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 
 
 
 
 
 813
 814	return r;
 
 
 815}
 816
 817/**
 818 * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
 819 *
 820 * @adev: amdgpu_device pointer
 821 * @reg_addr: indirect register address to read from
 822 *
 823 * Returns the value of indirect register @reg_addr
 
 824 */
 825u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
 826				  u32 reg_addr)
 827{
 828	unsigned long flags, pcie_index, pcie_data;
 829	void __iomem *pcie_index_offset;
 830	void __iomem *pcie_data_offset;
 831	u64 r;
 832
 833	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
 834	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
 835
 836	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 837	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
 838	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
 839
 840	/* read low 32 bits */
 841	writel(reg_addr, pcie_index_offset);
 842	readl(pcie_index_offset);
 843	r = readl(pcie_data_offset);
 844	/* read high 32 bits */
 845	writel(reg_addr + 4, pcie_index_offset);
 846	readl(pcie_index_offset);
 847	r |= ((u64)readl(pcie_data_offset) << 32);
 848	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 849
 850	return r;
 851}
 852
 853u64 amdgpu_device_indirect_rreg64_ext(struct amdgpu_device *adev,
 854				  u64 reg_addr)
 855{
 856	unsigned long flags, pcie_index, pcie_data;
 857	unsigned long pcie_index_hi = 0;
 858	void __iomem *pcie_index_offset;
 859	void __iomem *pcie_index_hi_offset;
 860	void __iomem *pcie_data_offset;
 861	u64 r;
 862
 863	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
 864	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
 865	if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
 866		pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
 867
 868	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 869	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
 870	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
 871	if (pcie_index_hi != 0)
 872		pcie_index_hi_offset = (void __iomem *)adev->rmmio +
 873			pcie_index_hi * 4;
 874
 875	/* read low 32 bits */
 876	writel(reg_addr, pcie_index_offset);
 877	readl(pcie_index_offset);
 878	if (pcie_index_hi != 0) {
 879		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
 880		readl(pcie_index_hi_offset);
 881	}
 882	r = readl(pcie_data_offset);
 883	/* read high 32 bits */
 884	writel(reg_addr + 4, pcie_index_offset);
 885	readl(pcie_index_offset);
 886	if (pcie_index_hi != 0) {
 887		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
 888		readl(pcie_index_hi_offset);
 889	}
 890	r |= ((u64)readl(pcie_data_offset) << 32);
 891
 892	/* clear the high bits */
 893	if (pcie_index_hi != 0) {
 894		writel(0, pcie_index_hi_offset);
 895		readl(pcie_index_hi_offset);
 896	}
 897
 898	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 899
 900	return r;
 901}
 902
 903/**
 904 * amdgpu_device_indirect_wreg - write an indirect register address
 905 *
 906 * @adev: amdgpu_device pointer
 907 * @reg_addr: indirect register offset
 908 * @reg_data: indirect register data
 909 *
 
 
 910 */
 911void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
 912				 u32 reg_addr, u32 reg_data)
 913{
 914	unsigned long flags, pcie_index, pcie_data;
 915	void __iomem *pcie_index_offset;
 916	void __iomem *pcie_data_offset;
 917
 918	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
 919	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
 920
 921	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 922	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
 923	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
 924
 925	writel(reg_addr, pcie_index_offset);
 926	readl(pcie_index_offset);
 927	writel(reg_data, pcie_data_offset);
 928	readl(pcie_data_offset);
 929	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 930}
 931
 932void amdgpu_device_indirect_wreg_ext(struct amdgpu_device *adev,
 933				     u64 reg_addr, u32 reg_data)
 934{
 935	unsigned long flags, pcie_index, pcie_index_hi, pcie_data;
 936	void __iomem *pcie_index_offset;
 937	void __iomem *pcie_index_hi_offset;
 938	void __iomem *pcie_data_offset;
 939
 940	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
 941	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
 942	if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
 943		pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
 944	else
 945		pcie_index_hi = 0;
 946
 947	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 948	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
 949	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
 950	if (pcie_index_hi != 0)
 951		pcie_index_hi_offset = (void __iomem *)adev->rmmio +
 952				pcie_index_hi * 4;
 953
 954	writel(reg_addr, pcie_index_offset);
 955	readl(pcie_index_offset);
 956	if (pcie_index_hi != 0) {
 957		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
 958		readl(pcie_index_hi_offset);
 959	}
 960	writel(reg_data, pcie_data_offset);
 961	readl(pcie_data_offset);
 962
 963	/* clear the high bits */
 964	if (pcie_index_hi != 0) {
 965		writel(0, pcie_index_hi_offset);
 966		readl(pcie_index_hi_offset);
 967	}
 968
 969	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 970}
 971
 972/**
 973 * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
 974 *
 975 * @adev: amdgpu_device pointer
 976 * @reg_addr: indirect register offset
 977 * @reg_data: indirect register data
 978 *
 
 
 979 */
 980void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
 981				   u32 reg_addr, u64 reg_data)
 982{
 983	unsigned long flags, pcie_index, pcie_data;
 984	void __iomem *pcie_index_offset;
 985	void __iomem *pcie_data_offset;
 986
 987	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
 988	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
 989
 990	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 991	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
 992	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
 993
 994	/* write low 32 bits */
 995	writel(reg_addr, pcie_index_offset);
 996	readl(pcie_index_offset);
 997	writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
 998	readl(pcie_data_offset);
 999	/* write high 32 bits */
1000	writel(reg_addr + 4, pcie_index_offset);
1001	readl(pcie_index_offset);
1002	writel((u32)(reg_data >> 32), pcie_data_offset);
1003	readl(pcie_data_offset);
1004	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
1005}
1006
1007void amdgpu_device_indirect_wreg64_ext(struct amdgpu_device *adev,
1008				   u64 reg_addr, u64 reg_data)
1009{
1010	unsigned long flags, pcie_index, pcie_data;
1011	unsigned long pcie_index_hi = 0;
1012	void __iomem *pcie_index_offset;
1013	void __iomem *pcie_index_hi_offset;
1014	void __iomem *pcie_data_offset;
1015
1016	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
1017	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
1018	if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
1019		pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
1020
1021	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
1022	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
1023	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
1024	if (pcie_index_hi != 0)
1025		pcie_index_hi_offset = (void __iomem *)adev->rmmio +
1026				pcie_index_hi * 4;
1027
1028	/* write low 32 bits */
1029	writel(reg_addr, pcie_index_offset);
1030	readl(pcie_index_offset);
1031	if (pcie_index_hi != 0) {
1032		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
1033		readl(pcie_index_hi_offset);
1034	}
1035	writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
1036	readl(pcie_data_offset);
1037	/* write high 32 bits */
1038	writel(reg_addr + 4, pcie_index_offset);
1039	readl(pcie_index_offset);
1040	if (pcie_index_hi != 0) {
1041		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
1042		readl(pcie_index_hi_offset);
1043	}
1044	writel((u32)(reg_data >> 32), pcie_data_offset);
1045	readl(pcie_data_offset);
1046
1047	/* clear the high bits */
1048	if (pcie_index_hi != 0) {
1049		writel(0, pcie_index_hi_offset);
1050		readl(pcie_index_hi_offset);
1051	}
1052
1053	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
1054}
1055
1056/**
1057 * amdgpu_device_get_rev_id - query device rev_id
1058 *
1059 * @adev: amdgpu_device pointer
 
 
1060 *
1061 * Return device rev_id
 
1062 */
1063u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev)
1064{
1065	return adev->nbio.funcs->get_rev_id(adev);
 
 
 
 
1066}
1067
1068/**
1069 * amdgpu_invalid_rreg - dummy reg read function
1070 *
1071 * @adev: amdgpu_device pointer
1072 * @reg: offset of register
1073 *
1074 * Dummy register read function.  Used for register blocks
1075 * that certain asics don't have (all asics).
1076 * Returns the value in the register.
1077 */
1078static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
1079{
1080	DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
1081	BUG();
1082	return 0;
1083}
1084
1085static uint32_t amdgpu_invalid_rreg_ext(struct amdgpu_device *adev, uint64_t reg)
1086{
1087	DRM_ERROR("Invalid callback to read register 0x%llX\n", reg);
1088	BUG();
1089	return 0;
1090}
1091
1092/**
1093 * amdgpu_invalid_wreg - dummy reg write function
1094 *
1095 * @adev: amdgpu_device pointer
1096 * @reg: offset of register
1097 * @v: value to write to the register
1098 *
1099 * Dummy register read function.  Used for register blocks
1100 * that certain asics don't have (all asics).
1101 */
1102static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
1103{
1104	DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
1105		  reg, v);
1106	BUG();
1107}
1108
1109static void amdgpu_invalid_wreg_ext(struct amdgpu_device *adev, uint64_t reg, uint32_t v)
1110{
1111	DRM_ERROR("Invalid callback to write register 0x%llX with 0x%08X\n",
1112		  reg, v);
1113	BUG();
1114}
1115
1116/**
1117 * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
1118 *
1119 * @adev: amdgpu_device pointer
1120 * @reg: offset of register
1121 *
1122 * Dummy register read function.  Used for register blocks
1123 * that certain asics don't have (all asics).
1124 * Returns the value in the register.
1125 */
1126static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
1127{
1128	DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
1129	BUG();
1130	return 0;
1131}
1132
1133static uint64_t amdgpu_invalid_rreg64_ext(struct amdgpu_device *adev, uint64_t reg)
1134{
1135	DRM_ERROR("Invalid callback to read register 0x%llX\n", reg);
1136	BUG();
1137	return 0;
1138}
1139
1140/**
1141 * amdgpu_invalid_wreg64 - dummy reg write function
1142 *
1143 * @adev: amdgpu_device pointer
1144 * @reg: offset of register
1145 * @v: value to write to the register
1146 *
1147 * Dummy register read function.  Used for register blocks
1148 * that certain asics don't have (all asics).
1149 */
1150static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
1151{
1152	DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
1153		  reg, v);
1154	BUG();
1155}
1156
1157static void amdgpu_invalid_wreg64_ext(struct amdgpu_device *adev, uint64_t reg, uint64_t v)
1158{
1159	DRM_ERROR("Invalid callback to write 64 bit register 0x%llX with 0x%08llX\n",
1160		  reg, v);
1161	BUG();
1162}
1163
1164/**
1165 * amdgpu_block_invalid_rreg - dummy reg read function
1166 *
1167 * @adev: amdgpu_device pointer
1168 * @block: offset of instance
1169 * @reg: offset of register
1170 *
1171 * Dummy register read function.  Used for register blocks
1172 * that certain asics don't have (all asics).
1173 * Returns the value in the register.
1174 */
1175static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
1176					  uint32_t block, uint32_t reg)
1177{
1178	DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
1179		  reg, block);
1180	BUG();
1181	return 0;
1182}
1183
1184/**
1185 * amdgpu_block_invalid_wreg - dummy reg write function
1186 *
1187 * @adev: amdgpu_device pointer
1188 * @block: offset of instance
1189 * @reg: offset of register
1190 * @v: value to write to the register
1191 *
1192 * Dummy register read function.  Used for register blocks
1193 * that certain asics don't have (all asics).
1194 */
1195static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
1196				      uint32_t block,
1197				      uint32_t reg, uint32_t v)
1198{
1199	DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
1200		  reg, block, v);
1201	BUG();
1202}
1203
1204/**
1205 * amdgpu_device_asic_init - Wrapper for atom asic_init
1206 *
1207 * @adev: amdgpu_device pointer
1208 *
1209 * Does any asic specific work and then calls atom asic init.
1210 */
1211static int amdgpu_device_asic_init(struct amdgpu_device *adev)
1212{
1213	int ret;
1214
1215	amdgpu_asic_pre_asic_init(adev);
1216
1217	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
1218	    amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) {
1219		amdgpu_psp_wait_for_bootloader(adev);
1220		ret = amdgpu_atomfirmware_asic_init(adev, true);
1221		/* TODO: check the return val and stop device initialization if boot fails */
1222		amdgpu_psp_query_boot_status(adev);
1223		return ret;
1224	} else {
1225		return amdgpu_atom_asic_init(adev->mode_info.atom_context);
1226	}
1227
1228	return 0;
1229}
1230
1231/**
1232 * amdgpu_device_mem_scratch_init - allocate the VRAM scratch page
1233 *
1234 * @adev: amdgpu_device pointer
1235 *
1236 * Allocates a scratch page of VRAM for use by various things in the
1237 * driver.
1238 */
1239static int amdgpu_device_mem_scratch_init(struct amdgpu_device *adev)
1240{
1241	return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, PAGE_SIZE,
1242				       AMDGPU_GEM_DOMAIN_VRAM |
1243				       AMDGPU_GEM_DOMAIN_GTT,
1244				       &adev->mem_scratch.robj,
1245				       &adev->mem_scratch.gpu_addr,
1246				       (void **)&adev->mem_scratch.ptr);
1247}
1248
1249/**
1250 * amdgpu_device_mem_scratch_fini - Free the VRAM scratch page
1251 *
1252 * @adev: amdgpu_device pointer
1253 *
1254 * Frees the VRAM scratch page.
1255 */
1256static void amdgpu_device_mem_scratch_fini(struct amdgpu_device *adev)
1257{
1258	amdgpu_bo_free_kernel(&adev->mem_scratch.robj, NULL, NULL);
1259}
1260
1261/**
1262 * amdgpu_device_program_register_sequence - program an array of registers.
1263 *
1264 * @adev: amdgpu_device pointer
1265 * @registers: pointer to the register array
1266 * @array_size: size of the register array
1267 *
1268 * Programs an array or registers with and or masks.
1269 * This is a helper for setting golden registers.
1270 */
1271void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
1272					     const u32 *registers,
1273					     const u32 array_size)
1274{
1275	u32 tmp, reg, and_mask, or_mask;
1276	int i;
1277
1278	if (array_size % 3)
1279		return;
1280
1281	for (i = 0; i < array_size; i += 3) {
1282		reg = registers[i + 0];
1283		and_mask = registers[i + 1];
1284		or_mask = registers[i + 2];
1285
1286		if (and_mask == 0xffffffff) {
1287			tmp = or_mask;
1288		} else {
1289			tmp = RREG32(reg);
1290			tmp &= ~and_mask;
1291			if (adev->family >= AMDGPU_FAMILY_AI)
1292				tmp |= (or_mask & and_mask);
1293			else
1294				tmp |= or_mask;
1295		}
1296		WREG32(reg, tmp);
1297	}
1298}
1299
1300/**
1301 * amdgpu_device_pci_config_reset - reset the GPU
1302 *
1303 * @adev: amdgpu_device pointer
1304 *
1305 * Resets the GPU using the pci config reset sequence.
1306 * Only applicable to asics prior to vega10.
1307 */
1308void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
1309{
1310	pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
1311}
1312
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1313/**
1314 * amdgpu_device_pci_reset - reset the GPU using generic PCI means
1315 *
1316 * @adev: amdgpu_device pointer
1317 *
1318 * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
1319 */
1320int amdgpu_device_pci_reset(struct amdgpu_device *adev)
1321{
1322	return pci_reset_function(adev->pdev);
 
1323}
1324
 
 
1325/*
1326 * amdgpu_device_wb_*()
1327 * Writeback is the method by which the GPU updates special pages in memory
1328 * with the status of certain GPU events (fences, ring pointers,etc.).
1329 */
1330
1331/**
1332 * amdgpu_device_wb_fini - Disable Writeback and free memory
1333 *
1334 * @adev: amdgpu_device pointer
1335 *
1336 * Disables Writeback and frees the Writeback memory (all asics).
1337 * Used at driver shutdown.
1338 */
1339static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1340{
1341	if (adev->wb.wb_obj) {
1342		amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1343				      &adev->wb.gpu_addr,
1344				      (void **)&adev->wb.wb);
1345		adev->wb.wb_obj = NULL;
1346	}
1347}
1348
1349/**
1350 * amdgpu_device_wb_init - Init Writeback driver info and allocate memory
1351 *
1352 * @adev: amdgpu_device pointer
1353 *
1354 * Initializes writeback and allocates writeback memory (all asics).
1355 * Used at driver startup.
1356 * Returns 0 on success or an -error on failure.
1357 */
1358static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1359{
1360	int r;
1361
1362	if (adev->wb.wb_obj == NULL) {
1363		/* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1364		r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1365					    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1366					    &adev->wb.wb_obj, &adev->wb.gpu_addr,
1367					    (void **)&adev->wb.wb);
1368		if (r) {
1369			dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1370			return r;
1371		}
1372
1373		adev->wb.num_wb = AMDGPU_MAX_WB;
1374		memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1375
1376		/* clear wb memory */
1377		memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1378	}
1379
1380	return 0;
1381}
1382
1383/**
1384 * amdgpu_device_wb_get - Allocate a wb entry
1385 *
1386 * @adev: amdgpu_device pointer
1387 * @wb: wb index
1388 *
1389 * Allocate a wb slot for use by the driver (all asics).
1390 * Returns 0 on success or -EINVAL on failure.
1391 */
1392int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1393{
1394	unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1395
1396	if (offset < adev->wb.num_wb) {
1397		__set_bit(offset, adev->wb.used);
1398		*wb = offset << 3; /* convert to dw offset */
1399		return 0;
1400	} else {
1401		return -EINVAL;
1402	}
1403}
1404
1405/**
1406 * amdgpu_device_wb_free - Free a wb entry
1407 *
1408 * @adev: amdgpu_device pointer
1409 * @wb: wb index
1410 *
1411 * Free a wb slot allocated for use by the driver (all asics)
1412 */
1413void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1414{
1415	wb >>= 3;
1416	if (wb < adev->wb.num_wb)
1417		__clear_bit(wb, adev->wb.used);
1418}
1419
1420/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1421 * amdgpu_device_resize_fb_bar - try to resize FB BAR
1422 *
1423 * @adev: amdgpu_device pointer
1424 *
1425 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1426 * to fail, but if any of the BARs is not accessible after the size we abort
1427 * driver loading by returning -ENODEV.
1428 */
1429int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1430{
1431	int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
 
1432	struct pci_bus *root;
1433	struct resource *res;
1434	unsigned int i;
1435	u16 cmd;
1436	int r;
1437
1438	if (!IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
1439		return 0;
1440
1441	/* Bypass for VF */
1442	if (amdgpu_sriov_vf(adev))
1443		return 0;
1444
1445	/* skip if the bios has already enabled large BAR */
1446	if (adev->gmc.real_vram_size &&
1447	    (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1448		return 0;
1449
1450	/* Check if the root BUS has 64bit memory resources */
1451	root = adev->pdev->bus;
1452	while (root->parent)
1453		root = root->parent;
1454
1455	pci_bus_for_each_resource(root, res, i) {
1456		if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1457		    res->start > 0x100000000ull)
1458			break;
1459	}
1460
1461	/* Trying to resize is pointless without a root hub window above 4GB */
1462	if (!res)
1463		return 0;
1464
1465	/* Limit the BAR size to what is available */
1466	rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1467			rbar_size);
1468
1469	/* Disable memory decoding while we change the BAR addresses and size */
1470	pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1471	pci_write_config_word(adev->pdev, PCI_COMMAND,
1472			      cmd & ~PCI_COMMAND_MEMORY);
1473
1474	/* Free the VRAM and doorbell BAR, we most likely need to move both. */
1475	amdgpu_doorbell_fini(adev);
1476	if (adev->asic_type >= CHIP_BONAIRE)
1477		pci_release_resource(adev->pdev, 2);
1478
1479	pci_release_resource(adev->pdev, 0);
1480
1481	r = pci_resize_resource(adev->pdev, 0, rbar_size);
1482	if (r == -ENOSPC)
1483		DRM_INFO("Not enough PCI address space for a large BAR.");
1484	else if (r && r != -ENOTSUPP)
1485		DRM_ERROR("Problem resizing BAR0 (%d).", r);
1486
1487	pci_assign_unassigned_bus_resources(adev->pdev->bus);
1488
1489	/* When the doorbell or fb BAR isn't available we have no chance of
1490	 * using the device.
1491	 */
1492	r = amdgpu_doorbell_init(adev);
1493	if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1494		return -ENODEV;
1495
1496	pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1497
1498	return 0;
1499}
1500
1501static bool amdgpu_device_read_bios(struct amdgpu_device *adev)
1502{
1503	if (hweight32(adev->aid_mask) && (adev->flags & AMD_IS_APU))
1504		return false;
1505
1506	return true;
1507}
1508
1509/*
1510 * GPU helpers function.
1511 */
1512/**
1513 * amdgpu_device_need_post - check if the hw need post or not
1514 *
1515 * @adev: amdgpu_device pointer
1516 *
1517 * Check if the asic has been initialized (all asics) at driver startup
1518 * or post is needed if  hw reset is performed.
1519 * Returns true if need or false if not.
1520 */
1521bool amdgpu_device_need_post(struct amdgpu_device *adev)
1522{
1523	uint32_t reg;
1524
1525	if (amdgpu_sriov_vf(adev))
1526		return false;
1527
1528	if (!amdgpu_device_read_bios(adev))
1529		return false;
1530
1531	if (amdgpu_passthrough(adev)) {
1532		/* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1533		 * some old smc fw still need driver do vPost otherwise gpu hang, while
1534		 * those smc fw version above 22.15 doesn't have this flaw, so we force
1535		 * vpost executed for smc version below 22.15
1536		 */
1537		if (adev->asic_type == CHIP_FIJI) {
1538			int err;
1539			uint32_t fw_ver;
1540
1541			err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1542			/* force vPost if error occured */
1543			if (err)
1544				return true;
1545
1546			fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1547			release_firmware(adev->pm.fw);
1548			if (fw_ver < 0x00160e00)
1549				return true;
1550		}
1551	}
1552
1553	/* Don't post if we need to reset whole hive on init */
1554	if (adev->gmc.xgmi.pending_reset)
1555		return false;
1556
1557	if (adev->has_hw_reset) {
1558		adev->has_hw_reset = false;
1559		return true;
1560	}
1561
1562	/* bios scratch used on CIK+ */
1563	if (adev->asic_type >= CHIP_BONAIRE)
1564		return amdgpu_atombios_scratch_need_asic_init(adev);
1565
1566	/* check MEM_SIZE for older asics */
1567	reg = amdgpu_asic_get_config_memsize(adev);
1568
1569	if ((reg != 0) && (reg != 0xffffffff))
1570		return false;
1571
1572	return true;
1573}
1574
1575/*
1576 * Check whether seamless boot is supported.
1577 *
1578 * So far we only support seamless boot on DCE 3.0 or later.
1579 * If users report that it works on older ASICS as well, we may
1580 * loosen this.
1581 */
1582bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev)
1583{
1584	switch (amdgpu_seamless) {
1585	case -1:
1586		break;
1587	case 1:
1588		return true;
1589	case 0:
1590		return false;
1591	default:
1592		DRM_ERROR("Invalid value for amdgpu.seamless: %d\n",
1593			  amdgpu_seamless);
1594		return false;
1595	}
1596
1597	if (!(adev->flags & AMD_IS_APU))
1598		return false;
1599
1600	if (adev->mman.keep_stolen_vga_memory)
1601		return false;
1602
1603	return amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0);
1604}
1605
1606/*
1607 * Intel hosts such as Rocket Lake, Alder Lake, Raptor Lake and Sapphire Rapids
1608 * don't support dynamic speed switching. Until we have confirmation from Intel
1609 * that a specific host supports it, it's safer that we keep it disabled for all.
1610 *
1611 * https://edc.intel.com/content/www/us/en/design/products/platforms/details/raptor-lake-s/13th-generation-core-processors-datasheet-volume-1-of-2/005/pci-express-support/
1612 * https://gitlab.freedesktop.org/drm/amd/-/issues/2663
1613 */
1614static bool amdgpu_device_pcie_dynamic_switching_supported(struct amdgpu_device *adev)
1615{
1616#if IS_ENABLED(CONFIG_X86)
1617	struct cpuinfo_x86 *c = &cpu_data(0);
1618
1619	/* eGPU change speeds based on USB4 fabric conditions */
1620	if (dev_is_removable(adev->dev))
1621		return true;
1622
1623	if (c->x86_vendor == X86_VENDOR_INTEL)
1624		return false;
1625#endif
1626	return true;
1627}
1628
1629/**
1630 * amdgpu_device_should_use_aspm - check if the device should program ASPM
1631 *
1632 * @adev: amdgpu_device pointer
1633 *
1634 * Confirm whether the module parameter and pcie bridge agree that ASPM should
1635 * be set for this device.
1636 *
1637 * Returns true if it should be used or false if not.
1638 */
1639bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
1640{
1641	switch (amdgpu_aspm) {
1642	case -1:
1643		break;
1644	case 0:
1645		return false;
1646	case 1:
1647		return true;
1648	default:
1649		return false;
1650	}
1651	if (adev->flags & AMD_IS_APU)
1652		return false;
1653	if (!(adev->pm.pp_feature & PP_PCIE_DPM_MASK))
1654		return false;
1655	return pcie_aspm_enabled(adev->pdev);
1656}
1657
1658/* if we get transitioned to only one device, take VGA back */
1659/**
1660 * amdgpu_device_vga_set_decode - enable/disable vga decode
1661 *
1662 * @pdev: PCI device pointer
1663 * @state: enable/disable vga decode
1664 *
1665 * Enable/disable vga decode (all asics).
1666 * Returns VGA resource flags.
1667 */
1668static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
1669		bool state)
1670{
1671	struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
1672
1673	amdgpu_asic_set_vga_state(adev, state);
1674	if (state)
1675		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1676		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1677	else
1678		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1679}
1680
1681/**
1682 * amdgpu_device_check_block_size - validate the vm block size
1683 *
1684 * @adev: amdgpu_device pointer
1685 *
1686 * Validates the vm block size specified via module parameter.
1687 * The vm block size defines number of bits in page table versus page directory,
1688 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1689 * page table and the remaining bits are in the page directory.
1690 */
1691static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1692{
1693	/* defines number of bits in page table versus page directory,
1694	 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1695	 * page table and the remaining bits are in the page directory
1696	 */
1697	if (amdgpu_vm_block_size == -1)
1698		return;
1699
1700	if (amdgpu_vm_block_size < 9) {
1701		dev_warn(adev->dev, "VM page table size (%d) too small\n",
1702			 amdgpu_vm_block_size);
1703		amdgpu_vm_block_size = -1;
1704	}
1705}
1706
1707/**
1708 * amdgpu_device_check_vm_size - validate the vm size
1709 *
1710 * @adev: amdgpu_device pointer
1711 *
1712 * Validates the vm size in GB specified via module parameter.
1713 * The VM size is the size of the GPU virtual memory space in GB.
1714 */
1715static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1716{
1717	/* no need to check the default value */
1718	if (amdgpu_vm_size == -1)
1719		return;
1720
1721	if (amdgpu_vm_size < 1) {
1722		dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1723			 amdgpu_vm_size);
1724		amdgpu_vm_size = -1;
1725	}
1726}
1727
1728static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1729{
1730	struct sysinfo si;
1731	bool is_os_64 = (sizeof(void *) == 8);
1732	uint64_t total_memory;
1733	uint64_t dram_size_seven_GB = 0x1B8000000;
1734	uint64_t dram_size_three_GB = 0xB8000000;
1735
1736	if (amdgpu_smu_memory_pool_size == 0)
1737		return;
1738
1739	if (!is_os_64) {
1740		DRM_WARN("Not 64-bit OS, feature not supported\n");
1741		goto def_value;
1742	}
1743	si_meminfo(&si);
1744	total_memory = (uint64_t)si.totalram * si.mem_unit;
1745
1746	if ((amdgpu_smu_memory_pool_size == 1) ||
1747		(amdgpu_smu_memory_pool_size == 2)) {
1748		if (total_memory < dram_size_three_GB)
1749			goto def_value1;
1750	} else if ((amdgpu_smu_memory_pool_size == 4) ||
1751		(amdgpu_smu_memory_pool_size == 8)) {
1752		if (total_memory < dram_size_seven_GB)
1753			goto def_value1;
1754	} else {
1755		DRM_WARN("Smu memory pool size not supported\n");
1756		goto def_value;
1757	}
1758	adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1759
1760	return;
1761
1762def_value1:
1763	DRM_WARN("No enough system memory\n");
1764def_value:
1765	adev->pm.smu_prv_buffer_size = 0;
1766}
1767
1768static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
1769{
1770	if (!(adev->flags & AMD_IS_APU) ||
1771	    adev->asic_type < CHIP_RAVEN)
1772		return 0;
1773
1774	switch (adev->asic_type) {
1775	case CHIP_RAVEN:
1776		if (adev->pdev->device == 0x15dd)
1777			adev->apu_flags |= AMD_APU_IS_RAVEN;
1778		if (adev->pdev->device == 0x15d8)
1779			adev->apu_flags |= AMD_APU_IS_PICASSO;
1780		break;
1781	case CHIP_RENOIR:
1782		if ((adev->pdev->device == 0x1636) ||
1783		    (adev->pdev->device == 0x164c))
1784			adev->apu_flags |= AMD_APU_IS_RENOIR;
1785		else
1786			adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1787		break;
1788	case CHIP_VANGOGH:
1789		adev->apu_flags |= AMD_APU_IS_VANGOGH;
1790		break;
1791	case CHIP_YELLOW_CARP:
1792		break;
1793	case CHIP_CYAN_SKILLFISH:
1794		if ((adev->pdev->device == 0x13FE) ||
1795		    (adev->pdev->device == 0x143F))
1796			adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
1797		break;
1798	default:
1799		break;
1800	}
1801
1802	return 0;
1803}
1804
1805/**
1806 * amdgpu_device_check_arguments - validate module params
1807 *
1808 * @adev: amdgpu_device pointer
1809 *
1810 * Validates certain module parameters and updates
1811 * the associated values used by the driver (all asics).
1812 */
1813static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1814{
1815	if (amdgpu_sched_jobs < 4) {
1816		dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1817			 amdgpu_sched_jobs);
1818		amdgpu_sched_jobs = 4;
1819	} else if (!is_power_of_2(amdgpu_sched_jobs)) {
1820		dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1821			 amdgpu_sched_jobs);
1822		amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1823	}
1824
1825	if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1826		/* gart size must be greater or equal to 32M */
1827		dev_warn(adev->dev, "gart size (%d) too small\n",
1828			 amdgpu_gart_size);
1829		amdgpu_gart_size = -1;
1830	}
1831
1832	if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1833		/* gtt size must be greater or equal to 32M */
1834		dev_warn(adev->dev, "gtt size (%d) too small\n",
1835				 amdgpu_gtt_size);
1836		amdgpu_gtt_size = -1;
1837	}
1838
1839	/* valid range is between 4 and 9 inclusive */
1840	if (amdgpu_vm_fragment_size != -1 &&
1841	    (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1842		dev_warn(adev->dev, "valid range is between 4 and 9\n");
1843		amdgpu_vm_fragment_size = -1;
1844	}
1845
1846	if (amdgpu_sched_hw_submission < 2) {
1847		dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1848			 amdgpu_sched_hw_submission);
1849		amdgpu_sched_hw_submission = 2;
1850	} else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1851		dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1852			 amdgpu_sched_hw_submission);
1853		amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1854	}
1855
1856	if (amdgpu_reset_method < -1 || amdgpu_reset_method > 4) {
1857		dev_warn(adev->dev, "invalid option for reset method, reverting to default\n");
1858		amdgpu_reset_method = -1;
1859	}
1860
1861	amdgpu_device_check_smu_prv_buffer_size(adev);
1862
1863	amdgpu_device_check_vm_size(adev);
1864
1865	amdgpu_device_check_block_size(adev);
1866
1867	adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
 
 
 
 
 
1868
1869	return 0;
 
 
 
 
 
1870}
1871
1872/**
1873 * amdgpu_switcheroo_set_state - set switcheroo state
1874 *
1875 * @pdev: pci dev pointer
1876 * @state: vga_switcheroo state
1877 *
1878 * Callback for the switcheroo driver.  Suspends or resumes
1879 * the asics before or after it is powered up using ACPI methods.
1880 */
1881static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1882					enum vga_switcheroo_state state)
1883{
1884	struct drm_device *dev = pci_get_drvdata(pdev);
1885	int r;
1886
1887	if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
1888		return;
1889
1890	if (state == VGA_SWITCHEROO_ON) {
1891		pr_info("switched on\n");
1892		/* don't suspend or resume card normally */
1893		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1894
1895		pci_set_power_state(pdev, PCI_D0);
1896		amdgpu_device_load_pci_state(pdev);
1897		r = pci_enable_device(pdev);
1898		if (r)
1899			DRM_WARN("pci_enable_device failed (%d)\n", r);
1900		amdgpu_device_resume(dev, true);
1901
1902		dev->switch_power_state = DRM_SWITCH_POWER_ON;
 
1903	} else {
1904		pr_info("switched off\n");
 
1905		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1906		amdgpu_device_prepare(dev);
1907		amdgpu_device_suspend(dev, true);
1908		amdgpu_device_cache_pci_state(pdev);
1909		/* Shut down the device */
1910		pci_disable_device(pdev);
1911		pci_set_power_state(pdev, PCI_D3cold);
1912		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1913	}
1914}
1915
1916/**
1917 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1918 *
1919 * @pdev: pci dev pointer
1920 *
1921 * Callback for the switcheroo driver.  Check of the switcheroo
1922 * state can be changed.
1923 * Returns true if the state can be changed, false if not.
1924 */
1925static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1926{
1927	struct drm_device *dev = pci_get_drvdata(pdev);
1928
1929       /*
1930	* FIXME: open_count is protected by drm_global_mutex but that would lead to
1931	* locking inversion with the driver load path. And the access here is
1932	* completely racy anyway. So don't bother with locking for now.
1933	*/
1934	return atomic_read(&dev->open_count) == 0;
1935}
1936
1937static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1938	.set_gpu_state = amdgpu_switcheroo_set_state,
1939	.reprobe = NULL,
1940	.can_switch = amdgpu_switcheroo_can_switch,
1941};
1942
1943/**
1944 * amdgpu_device_ip_set_clockgating_state - set the CG state
1945 *
1946 * @dev: amdgpu_device pointer
1947 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1948 * @state: clockgating state (gate or ungate)
1949 *
1950 * Sets the requested clockgating state for all instances of
1951 * the hardware IP specified.
1952 * Returns the error code from the last instance.
1953 */
1954int amdgpu_device_ip_set_clockgating_state(void *dev,
1955					   enum amd_ip_block_type block_type,
1956					   enum amd_clockgating_state state)
1957{
1958	struct amdgpu_device *adev = dev;
1959	int i, r = 0;
1960
1961	for (i = 0; i < adev->num_ip_blocks; i++) {
1962		if (!adev->ip_blocks[i].status.valid)
1963			continue;
1964		if (adev->ip_blocks[i].version->type != block_type)
1965			continue;
1966		if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1967			continue;
1968		r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1969			(void *)adev, state);
1970		if (r)
1971			DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1972				  adev->ip_blocks[i].version->funcs->name, r);
1973	}
1974	return r;
1975}
1976
1977/**
1978 * amdgpu_device_ip_set_powergating_state - set the PG state
1979 *
1980 * @dev: amdgpu_device pointer
1981 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1982 * @state: powergating state (gate or ungate)
1983 *
1984 * Sets the requested powergating state for all instances of
1985 * the hardware IP specified.
1986 * Returns the error code from the last instance.
1987 */
1988int amdgpu_device_ip_set_powergating_state(void *dev,
1989					   enum amd_ip_block_type block_type,
1990					   enum amd_powergating_state state)
1991{
1992	struct amdgpu_device *adev = dev;
1993	int i, r = 0;
1994
1995	for (i = 0; i < adev->num_ip_blocks; i++) {
1996		if (!adev->ip_blocks[i].status.valid)
1997			continue;
1998		if (adev->ip_blocks[i].version->type != block_type)
1999			continue;
2000		if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
2001			continue;
2002		r = adev->ip_blocks[i].version->funcs->set_powergating_state(
2003			(void *)adev, state);
2004		if (r)
2005			DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
2006				  adev->ip_blocks[i].version->funcs->name, r);
2007	}
2008	return r;
2009}
2010
2011/**
2012 * amdgpu_device_ip_get_clockgating_state - get the CG state
2013 *
2014 * @adev: amdgpu_device pointer
2015 * @flags: clockgating feature flags
2016 *
2017 * Walks the list of IPs on the device and updates the clockgating
2018 * flags for each IP.
2019 * Updates @flags with the feature flags for each hardware IP where
2020 * clockgating is enabled.
2021 */
2022void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
2023					    u64 *flags)
2024{
2025	int i;
2026
2027	for (i = 0; i < adev->num_ip_blocks; i++) {
2028		if (!adev->ip_blocks[i].status.valid)
2029			continue;
2030		if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
2031			adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
2032	}
2033}
2034
2035/**
2036 * amdgpu_device_ip_wait_for_idle - wait for idle
2037 *
2038 * @adev: amdgpu_device pointer
2039 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
2040 *
2041 * Waits for the request hardware IP to be idle.
2042 * Returns 0 for success or a negative error code on failure.
2043 */
2044int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
2045				   enum amd_ip_block_type block_type)
2046{
2047	int i, r;
2048
2049	for (i = 0; i < adev->num_ip_blocks; i++) {
2050		if (!adev->ip_blocks[i].status.valid)
2051			continue;
2052		if (adev->ip_blocks[i].version->type == block_type) {
2053			r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
2054			if (r)
2055				return r;
2056			break;
2057		}
2058	}
2059	return 0;
2060
2061}
2062
2063/**
2064 * amdgpu_device_ip_is_idle - is the hardware IP idle
2065 *
2066 * @adev: amdgpu_device pointer
2067 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
2068 *
2069 * Check if the hardware IP is idle or not.
2070 * Returns true if it the IP is idle, false if not.
2071 */
2072bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
2073			      enum amd_ip_block_type block_type)
2074{
2075	int i;
2076
2077	for (i = 0; i < adev->num_ip_blocks; i++) {
2078		if (!adev->ip_blocks[i].status.valid)
2079			continue;
2080		if (adev->ip_blocks[i].version->type == block_type)
2081			return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
2082	}
2083	return true;
2084
2085}
2086
2087/**
2088 * amdgpu_device_ip_get_ip_block - get a hw IP pointer
2089 *
2090 * @adev: amdgpu_device pointer
2091 * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
2092 *
2093 * Returns a pointer to the hardware IP block structure
2094 * if it exists for the asic, otherwise NULL.
2095 */
2096struct amdgpu_ip_block *
2097amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
2098			      enum amd_ip_block_type type)
2099{
2100	int i;
2101
2102	for (i = 0; i < adev->num_ip_blocks; i++)
2103		if (adev->ip_blocks[i].version->type == type)
2104			return &adev->ip_blocks[i];
2105
2106	return NULL;
2107}
2108
2109/**
2110 * amdgpu_device_ip_block_version_cmp
2111 *
2112 * @adev: amdgpu_device pointer
2113 * @type: enum amd_ip_block_type
2114 * @major: major version
2115 * @minor: minor version
2116 *
2117 * return 0 if equal or greater
2118 * return 1 if smaller or the ip_block doesn't exist
2119 */
2120int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
2121				       enum amd_ip_block_type type,
2122				       u32 major, u32 minor)
2123{
2124	struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
2125
2126	if (ip_block && ((ip_block->version->major > major) ||
2127			((ip_block->version->major == major) &&
2128			(ip_block->version->minor >= minor))))
2129		return 0;
2130
2131	return 1;
2132}
2133
2134/**
2135 * amdgpu_device_ip_block_add
2136 *
2137 * @adev: amdgpu_device pointer
2138 * @ip_block_version: pointer to the IP to add
2139 *
2140 * Adds the IP block driver information to the collection of IPs
2141 * on the asic.
2142 */
2143int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
2144			       const struct amdgpu_ip_block_version *ip_block_version)
2145{
2146	if (!ip_block_version)
2147		return -EINVAL;
2148
2149	switch (ip_block_version->type) {
2150	case AMD_IP_BLOCK_TYPE_VCN:
2151		if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
2152			return 0;
2153		break;
2154	case AMD_IP_BLOCK_TYPE_JPEG:
2155		if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
2156			return 0;
2157		break;
2158	default:
2159		break;
2160	}
2161
2162	DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
2163		  ip_block_version->funcs->name);
2164
2165	adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
2166
2167	return 0;
2168}
2169
2170/**
2171 * amdgpu_device_enable_virtual_display - enable virtual display feature
2172 *
2173 * @adev: amdgpu_device pointer
2174 *
2175 * Enabled the virtual display feature if the user has enabled it via
2176 * the module parameter virtual_display.  This feature provides a virtual
2177 * display hardware on headless boards or in virtualized environments.
2178 * This function parses and validates the configuration string specified by
2179 * the user and configues the virtual display configuration (number of
2180 * virtual connectors, crtcs, etc.) specified.
2181 */
2182static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
2183{
2184	adev->enable_virtual_display = false;
2185
2186	if (amdgpu_virtual_display) {
2187		const char *pci_address_name = pci_name(adev->pdev);
 
2188		char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
2189
2190		pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
2191		pciaddstr_tmp = pciaddstr;
2192		while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
2193			pciaddname = strsep(&pciaddname_tmp, ",");
2194			if (!strcmp("all", pciaddname)
2195			    || !strcmp(pci_address_name, pciaddname)) {
2196				long num_crtc;
2197				int res = -1;
2198
2199				adev->enable_virtual_display = true;
2200
2201				if (pciaddname_tmp)
2202					res = kstrtol(pciaddname_tmp, 10,
2203						      &num_crtc);
2204
2205				if (!res) {
2206					if (num_crtc < 1)
2207						num_crtc = 1;
2208					if (num_crtc > 6)
2209						num_crtc = 6;
2210					adev->mode_info.num_crtc = num_crtc;
2211				} else {
2212					adev->mode_info.num_crtc = 1;
2213				}
2214				break;
2215			}
2216		}
2217
2218		DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
2219			 amdgpu_virtual_display, pci_address_name,
2220			 adev->enable_virtual_display, adev->mode_info.num_crtc);
2221
2222		kfree(pciaddstr);
2223	}
2224}
2225
2226void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev)
2227{
2228	if (amdgpu_sriov_vf(adev) && !adev->enable_virtual_display) {
2229		adev->mode_info.num_crtc = 1;
2230		adev->enable_virtual_display = true;
2231		DRM_INFO("virtual_display:%d, num_crtc:%d\n",
2232			 adev->enable_virtual_display, adev->mode_info.num_crtc);
2233	}
2234}
2235
2236/**
2237 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
2238 *
2239 * @adev: amdgpu_device pointer
2240 *
2241 * Parses the asic configuration parameters specified in the gpu info
2242 * firmware and makes them availale to the driver for use in configuring
2243 * the asic.
2244 * Returns 0 on success, -EINVAL on failure.
2245 */
2246static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
2247{
2248	const char *chip_name;
2249	char fw_name[40];
2250	int err;
2251	const struct gpu_info_firmware_header_v1_0 *hdr;
2252
2253	adev->firmware.gpu_info_fw = NULL;
2254
2255	if (adev->mman.discovery_bin)
2256		return 0;
2257
2258	switch (adev->asic_type) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2259	default:
2260		return 0;
2261	case CHIP_VEGA10:
2262		chip_name = "vega10";
2263		break;
2264	case CHIP_VEGA12:
2265		chip_name = "vega12";
2266		break;
2267	case CHIP_RAVEN:
2268		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
2269			chip_name = "raven2";
2270		else if (adev->apu_flags & AMD_APU_IS_PICASSO)
2271			chip_name = "picasso";
2272		else
2273			chip_name = "raven";
2274		break;
2275	case CHIP_ARCTURUS:
2276		chip_name = "arcturus";
2277		break;
2278	case CHIP_NAVI12:
2279		chip_name = "navi12";
2280		break;
2281	}
2282
2283	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
2284	err = amdgpu_ucode_request(adev, &adev->firmware.gpu_info_fw, fw_name);
2285	if (err) {
2286		dev_err(adev->dev,
2287			"Failed to get gpu_info firmware \"%s\"\n",
 
 
 
 
 
 
 
2288			fw_name);
2289		goto out;
2290	}
2291
2292	hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
2293	amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
2294
2295	switch (hdr->version_major) {
2296	case 1:
2297	{
2298		const struct gpu_info_firmware_v1_0 *gpu_info_fw =
2299			(const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
2300								le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2301
2302		/*
2303		 * Should be droped when DAL no longer needs it.
2304		 */
2305		if (adev->asic_type == CHIP_NAVI12)
2306			goto parse_soc_bounding_box;
2307
2308		adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
2309		adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
2310		adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
2311		adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
2312		adev->gfx.config.max_texture_channel_caches =
2313			le32_to_cpu(gpu_info_fw->gc_num_tccs);
2314		adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
2315		adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
2316		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
2317		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
2318		adev->gfx.config.double_offchip_lds_buf =
2319			le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
2320		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
2321		adev->gfx.cu_info.max_waves_per_simd =
2322			le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
2323		adev->gfx.cu_info.max_scratch_slots_per_cu =
2324			le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
2325		adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
2326		if (hdr->version_minor >= 1) {
2327			const struct gpu_info_firmware_v1_1 *gpu_info_fw =
2328				(const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
2329									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2330			adev->gfx.config.num_sc_per_sh =
2331				le32_to_cpu(gpu_info_fw->num_sc_per_sh);
2332			adev->gfx.config.num_packer_per_sc =
2333				le32_to_cpu(gpu_info_fw->num_packer_per_sc);
2334		}
2335
2336parse_soc_bounding_box:
2337		/*
2338		 * soc bounding box info is not integrated in disocovery table,
2339		 * we always need to parse it from gpu info firmware if needed.
2340		 */
2341		if (hdr->version_minor == 2) {
2342			const struct gpu_info_firmware_v1_2 *gpu_info_fw =
2343				(const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
2344									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2345			adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
2346		}
2347		break;
2348	}
2349	default:
2350		dev_err(adev->dev,
2351			"Unsupported gpu_info table %d\n", hdr->header.ucode_version);
2352		err = -EINVAL;
2353		goto out;
2354	}
2355out:
2356	return err;
2357}
2358
2359/**
2360 * amdgpu_device_ip_early_init - run early init for hardware IPs
2361 *
2362 * @adev: amdgpu_device pointer
2363 *
2364 * Early initialization pass for hardware IPs.  The hardware IPs that make
2365 * up each asic are discovered each IP's early_init callback is run.  This
2366 * is the first stage in initializing the asic.
2367 * Returns 0 on success, negative error code on failure.
2368 */
2369static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
2370{
2371	struct pci_dev *parent;
2372	int i, r;
2373	bool total;
2374
2375	amdgpu_device_enable_virtual_display(adev);
2376
2377	if (amdgpu_sriov_vf(adev)) {
2378		r = amdgpu_virt_request_full_gpu(adev, true);
 
 
 
 
 
 
 
 
 
 
 
 
 
2379		if (r)
2380			return r;
2381	}
2382
2383	switch (adev->asic_type) {
2384#ifdef CONFIG_DRM_AMDGPU_SI
2385	case CHIP_VERDE:
2386	case CHIP_TAHITI:
2387	case CHIP_PITCAIRN:
2388	case CHIP_OLAND:
2389	case CHIP_HAINAN:
2390		adev->family = AMDGPU_FAMILY_SI;
2391		r = si_set_ip_blocks(adev);
2392		if (r)
2393			return r;
2394		break;
2395#endif
2396#ifdef CONFIG_DRM_AMDGPU_CIK
2397	case CHIP_BONAIRE:
2398	case CHIP_HAWAII:
2399	case CHIP_KAVERI:
2400	case CHIP_KABINI:
2401	case CHIP_MULLINS:
2402		if (adev->flags & AMD_IS_APU)
2403			adev->family = AMDGPU_FAMILY_KV;
2404		else
2405			adev->family = AMDGPU_FAMILY_CI;
 
 
2406
2407		r = cik_set_ip_blocks(adev);
2408		if (r)
2409			return r;
2410		break;
2411#endif
2412	case CHIP_TOPAZ:
2413	case CHIP_TONGA:
2414	case CHIP_FIJI:
2415	case CHIP_POLARIS10:
2416	case CHIP_POLARIS11:
2417	case CHIP_POLARIS12:
2418	case CHIP_VEGAM:
2419	case CHIP_CARRIZO:
2420	case CHIP_STONEY:
2421		if (adev->flags & AMD_IS_APU)
2422			adev->family = AMDGPU_FAMILY_CZ;
2423		else
2424			adev->family = AMDGPU_FAMILY_VI;
2425
2426		r = vi_set_ip_blocks(adev);
2427		if (r)
2428			return r;
2429		break;
2430	default:
2431		r = amdgpu_discovery_set_ip_blocks(adev);
2432		if (r)
2433			return r;
2434		break;
2435	}
2436
2437	if (amdgpu_has_atpx() &&
2438	    (amdgpu_is_atpx_hybrid() ||
2439	     amdgpu_has_atpx_dgpu_power_cntl()) &&
2440	    ((adev->flags & AMD_IS_APU) == 0) &&
2441	    !dev_is_removable(&adev->pdev->dev))
2442		adev->flags |= AMD_IS_PX;
2443
2444	if (!(adev->flags & AMD_IS_APU)) {
2445		parent = pcie_find_root_port(adev->pdev);
2446		adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
2447	}
2448
 
2449
2450	adev->pm.pp_feature = amdgpu_pp_feature_mask;
2451	if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2452		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2453	if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2454		adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2455	if (!amdgpu_device_pcie_dynamic_switching_supported(adev))
2456		adev->pm.pp_feature &= ~PP_PCIE_DPM_MASK;
2457
2458	total = true;
2459	for (i = 0; i < adev->num_ip_blocks; i++) {
2460		if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2461			DRM_WARN("disabled ip block: %d <%s>\n",
2462				  i, adev->ip_blocks[i].version->funcs->name);
2463			adev->ip_blocks[i].status.valid = false;
2464		} else {
2465			if (adev->ip_blocks[i].version->funcs->early_init) {
2466				r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2467				if (r == -ENOENT) {
2468					adev->ip_blocks[i].status.valid = false;
2469				} else if (r) {
2470					DRM_ERROR("early_init of IP block <%s> failed %d\n",
2471						  adev->ip_blocks[i].version->funcs->name, r);
2472					total = false;
2473				} else {
2474					adev->ip_blocks[i].status.valid = true;
2475				}
2476			} else {
2477				adev->ip_blocks[i].status.valid = true;
2478			}
2479		}
2480		/* get the vbios after the asic_funcs are set up */
2481		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2482			r = amdgpu_device_parse_gpu_info_fw(adev);
2483			if (r)
2484				return r;
2485
2486			/* Read BIOS */
2487			if (amdgpu_device_read_bios(adev)) {
2488				if (!amdgpu_get_bios(adev))
2489					return -EINVAL;
2490
2491				r = amdgpu_atombios_init(adev);
2492				if (r) {
2493					dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2494					amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2495					return r;
2496				}
2497			}
2498
2499			/*get pf2vf msg info at it's earliest time*/
2500			if (amdgpu_sriov_vf(adev))
2501				amdgpu_virt_init_data_exchange(adev);
2502
2503		}
2504	}
2505	if (!total)
2506		return -ENODEV;
2507
2508	amdgpu_amdkfd_device_probe(adev);
2509	adev->cg_flags &= amdgpu_cg_mask;
2510	adev->pg_flags &= amdgpu_pg_mask;
2511
2512	return 0;
2513}
2514
2515static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2516{
2517	int i, r;
2518
2519	for (i = 0; i < adev->num_ip_blocks; i++) {
2520		if (!adev->ip_blocks[i].status.sw)
2521			continue;
2522		if (adev->ip_blocks[i].status.hw)
2523			continue;
2524		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2525		    (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2526		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2527			r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2528			if (r) {
2529				DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2530					  adev->ip_blocks[i].version->funcs->name, r);
2531				return r;
2532			}
2533			adev->ip_blocks[i].status.hw = true;
2534		}
2535	}
2536
2537	return 0;
2538}
2539
2540static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2541{
2542	int i, r;
2543
2544	for (i = 0; i < adev->num_ip_blocks; i++) {
2545		if (!adev->ip_blocks[i].status.sw)
2546			continue;
2547		if (adev->ip_blocks[i].status.hw)
2548			continue;
2549		r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2550		if (r) {
2551			DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2552				  adev->ip_blocks[i].version->funcs->name, r);
2553			return r;
2554		}
2555		adev->ip_blocks[i].status.hw = true;
2556	}
2557
2558	return 0;
2559}
2560
2561static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2562{
2563	int r = 0;
2564	int i;
2565	uint32_t smu_version;
2566
2567	if (adev->asic_type >= CHIP_VEGA10) {
2568		for (i = 0; i < adev->num_ip_blocks; i++) {
2569			if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2570				continue;
2571
2572			if (!adev->ip_blocks[i].status.sw)
2573				continue;
2574
2575			/* no need to do the fw loading again if already done*/
2576			if (adev->ip_blocks[i].status.hw == true)
2577				break;
2578
2579			if (amdgpu_in_reset(adev) || adev->in_suspend) {
2580				r = adev->ip_blocks[i].version->funcs->resume(adev);
2581				if (r) {
2582					DRM_ERROR("resume of IP block <%s> failed %d\n",
2583							  adev->ip_blocks[i].version->funcs->name, r);
2584					return r;
2585				}
2586			} else {
2587				r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2588				if (r) {
2589					DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2590							  adev->ip_blocks[i].version->funcs->name, r);
2591					return r;
2592				}
2593			}
2594
2595			adev->ip_blocks[i].status.hw = true;
2596			break;
2597		}
2598	}
2599
2600	if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2601		r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2602
2603	return r;
2604}
2605
2606static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
2607{
2608	long timeout;
2609	int r, i;
2610
2611	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2612		struct amdgpu_ring *ring = adev->rings[i];
2613
2614		/* No need to setup the GPU scheduler for rings that don't need it */
2615		if (!ring || ring->no_scheduler)
2616			continue;
2617
2618		switch (ring->funcs->type) {
2619		case AMDGPU_RING_TYPE_GFX:
2620			timeout = adev->gfx_timeout;
2621			break;
2622		case AMDGPU_RING_TYPE_COMPUTE:
2623			timeout = adev->compute_timeout;
2624			break;
2625		case AMDGPU_RING_TYPE_SDMA:
2626			timeout = adev->sdma_timeout;
2627			break;
2628		default:
2629			timeout = adev->video_timeout;
2630			break;
2631		}
2632
2633		r = drm_sched_init(&ring->sched, &amdgpu_sched_ops, NULL,
2634				   DRM_SCHED_PRIORITY_COUNT,
2635				   ring->num_hw_submission, 0,
2636				   timeout, adev->reset_domain->wq,
2637				   ring->sched_score, ring->name,
2638				   adev->dev);
2639		if (r) {
2640			DRM_ERROR("Failed to create scheduler on ring %s.\n",
2641				  ring->name);
2642			return r;
2643		}
2644		r = amdgpu_uvd_entity_init(adev, ring);
2645		if (r) {
2646			DRM_ERROR("Failed to create UVD scheduling entity on ring %s.\n",
2647				  ring->name);
2648			return r;
2649		}
2650		r = amdgpu_vce_entity_init(adev, ring);
2651		if (r) {
2652			DRM_ERROR("Failed to create VCE scheduling entity on ring %s.\n",
2653				  ring->name);
2654			return r;
2655		}
2656	}
2657
2658	amdgpu_xcp_update_partition_sched_list(adev);
2659
2660	return 0;
2661}
2662
2663
2664/**
2665 * amdgpu_device_ip_init - run init for hardware IPs
2666 *
2667 * @adev: amdgpu_device pointer
2668 *
2669 * Main initialization pass for hardware IPs.  The list of all the hardware
2670 * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2671 * are run.  sw_init initializes the software state associated with each IP
2672 * and hw_init initializes the hardware associated with each IP.
2673 * Returns 0 on success, negative error code on failure.
2674 */
2675static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2676{
2677	int i, r;
2678
2679	r = amdgpu_ras_init(adev);
2680	if (r)
2681		return r;
2682
2683	for (i = 0; i < adev->num_ip_blocks; i++) {
2684		if (!adev->ip_blocks[i].status.valid)
2685			continue;
2686		r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2687		if (r) {
2688			DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2689				  adev->ip_blocks[i].version->funcs->name, r);
2690			goto init_failed;
2691		}
2692		adev->ip_blocks[i].status.sw = true;
2693
2694		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2695			/* need to do common hw init early so everything is set up for gmc */
2696			r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2697			if (r) {
2698				DRM_ERROR("hw_init %d failed %d\n", i, r);
2699				goto init_failed;
2700			}
2701			adev->ip_blocks[i].status.hw = true;
2702		} else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2703			/* need to do gmc hw init early so we can allocate gpu mem */
2704			/* Try to reserve bad pages early */
2705			if (amdgpu_sriov_vf(adev))
2706				amdgpu_virt_exchange_data(adev);
2707
2708			r = amdgpu_device_mem_scratch_init(adev);
2709			if (r) {
2710				DRM_ERROR("amdgpu_mem_scratch_init failed %d\n", r);
2711				goto init_failed;
2712			}
2713			r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2714			if (r) {
2715				DRM_ERROR("hw_init %d failed %d\n", i, r);
2716				goto init_failed;
2717			}
2718			r = amdgpu_device_wb_init(adev);
2719			if (r) {
2720				DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2721				goto init_failed;
2722			}
2723			adev->ip_blocks[i].status.hw = true;
2724
2725			/* right after GMC hw init, we create CSA */
2726			if (adev->gfx.mcbp) {
2727				r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2728							       AMDGPU_GEM_DOMAIN_VRAM |
2729							       AMDGPU_GEM_DOMAIN_GTT,
2730							       AMDGPU_CSA_SIZE);
2731				if (r) {
2732					DRM_ERROR("allocate CSA failed %d\n", r);
2733					goto init_failed;
2734				}
2735			}
2736
2737			r = amdgpu_seq64_init(adev);
2738			if (r) {
2739				DRM_ERROR("allocate seq64 failed %d\n", r);
2740				goto init_failed;
2741			}
2742		}
2743	}
2744
2745	if (amdgpu_sriov_vf(adev))
2746		amdgpu_virt_init_data_exchange(adev);
2747
2748	r = amdgpu_ib_pool_init(adev);
2749	if (r) {
2750		dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2751		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2752		goto init_failed;
2753	}
2754
2755	r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2756	if (r)
2757		goto init_failed;
2758
2759	r = amdgpu_device_ip_hw_init_phase1(adev);
2760	if (r)
2761		goto init_failed;
2762
2763	r = amdgpu_device_fw_loading(adev);
2764	if (r)
2765		goto init_failed;
2766
2767	r = amdgpu_device_ip_hw_init_phase2(adev);
2768	if (r)
2769		goto init_failed;
2770
2771	/*
2772	 * retired pages will be loaded from eeprom and reserved here,
2773	 * it should be called after amdgpu_device_ip_hw_init_phase2  since
2774	 * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2775	 * for I2C communication which only true at this point.
2776	 *
2777	 * amdgpu_ras_recovery_init may fail, but the upper only cares the
2778	 * failure from bad gpu situation and stop amdgpu init process
2779	 * accordingly. For other failed cases, it will still release all
2780	 * the resource and print error message, rather than returning one
2781	 * negative value to upper level.
2782	 *
2783	 * Note: theoretically, this should be called before all vram allocations
2784	 * to protect retired page from abusing
2785	 */
2786	r = amdgpu_ras_recovery_init(adev);
2787	if (r)
2788		goto init_failed;
2789
2790	/**
2791	 * In case of XGMI grab extra reference for reset domain for this device
2792	 */
2793	if (adev->gmc.xgmi.num_physical_nodes > 1) {
2794		if (amdgpu_xgmi_add_device(adev) == 0) {
2795			if (!amdgpu_sriov_vf(adev)) {
2796				struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2797
2798				if (WARN_ON(!hive)) {
2799					r = -ENOENT;
2800					goto init_failed;
2801				}
2802
2803				if (!hive->reset_domain ||
2804				    !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
2805					r = -ENOENT;
2806					amdgpu_put_xgmi_hive(hive);
2807					goto init_failed;
2808				}
2809
2810				/* Drop the early temporary reset domain we created for device */
2811				amdgpu_reset_put_reset_domain(adev->reset_domain);
2812				adev->reset_domain = hive->reset_domain;
2813				amdgpu_put_xgmi_hive(hive);
2814			}
2815		}
 
2816	}
2817
2818	r = amdgpu_device_init_schedulers(adev);
2819	if (r)
2820		goto init_failed;
2821
2822	if (adev->mman.buffer_funcs_ring->sched.ready)
2823		amdgpu_ttm_set_buffer_funcs_status(adev, true);
2824
2825	/* Don't init kfd if whole hive need to be reset during init */
2826	if (!adev->gmc.xgmi.pending_reset) {
2827		kgd2kfd_init_zone_device(adev);
2828		amdgpu_amdkfd_device_init(adev);
2829	}
2830
2831	amdgpu_fru_get_product_info(adev);
2832
2833init_failed:
 
2834
2835	return r;
2836}
2837
2838/**
2839 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2840 *
2841 * @adev: amdgpu_device pointer
2842 *
2843 * Writes a reset magic value to the gart pointer in VRAM.  The driver calls
2844 * this function before a GPU reset.  If the value is retained after a
2845 * GPU reset, VRAM has not been lost.  Some GPU resets may destry VRAM contents.
2846 */
2847static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2848{
2849	memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2850}
2851
2852/**
2853 * amdgpu_device_check_vram_lost - check if vram is valid
2854 *
2855 * @adev: amdgpu_device pointer
2856 *
2857 * Checks the reset magic value written to the gart pointer in VRAM.
2858 * The driver calls this after a GPU reset to see if the contents of
2859 * VRAM is lost or now.
2860 * returns true if vram is lost, false if not.
2861 */
2862static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2863{
2864	if (memcmp(adev->gart.ptr, adev->reset_magic,
2865			AMDGPU_RESET_MAGIC_NUM))
2866		return true;
2867
2868	if (!amdgpu_in_reset(adev))
2869		return false;
2870
2871	/*
2872	 * For all ASICs with baco/mode1 reset, the VRAM is
2873	 * always assumed to be lost.
2874	 */
2875	switch (amdgpu_asic_reset_method(adev)) {
2876	case AMD_RESET_METHOD_BACO:
2877	case AMD_RESET_METHOD_MODE1:
2878		return true;
2879	default:
2880		return false;
2881	}
2882}
2883
2884/**
2885 * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2886 *
2887 * @adev: amdgpu_device pointer
2888 * @state: clockgating state (gate or ungate)
2889 *
2890 * The list of all the hardware IPs that make up the asic is walked and the
2891 * set_clockgating_state callbacks are run.
2892 * Late initialization pass enabling clockgating for hardware IPs.
2893 * Fini or suspend, pass disabling clockgating for hardware IPs.
 
 
2894 * Returns 0 on success, negative error code on failure.
2895 */
2896
2897int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2898			       enum amd_clockgating_state state)
2899{
2900	int i, j, r;
2901
2902	if (amdgpu_emu_mode == 1)
2903		return 0;
2904
2905	for (j = 0; j < adev->num_ip_blocks; j++) {
2906		i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2907		if (!adev->ip_blocks[i].status.late_initialized)
2908			continue;
2909		/* skip CG for GFX, SDMA on S0ix */
2910		if (adev->in_s0ix &&
2911		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
2912		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
2913			continue;
2914		/* skip CG for VCE/UVD, it's handled specially */
2915		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2916		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2917		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2918		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2919		    adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2920			/* enable clockgating to save power */
2921			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2922										     state);
2923			if (r) {
2924				DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2925					  adev->ip_blocks[i].version->funcs->name, r);
2926				return r;
2927			}
2928		}
2929	}
2930
2931	return 0;
2932}
2933
2934int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2935			       enum amd_powergating_state state)
2936{
2937	int i, j, r;
2938
2939	if (amdgpu_emu_mode == 1)
2940		return 0;
2941
2942	for (j = 0; j < adev->num_ip_blocks; j++) {
2943		i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2944		if (!adev->ip_blocks[i].status.late_initialized)
2945			continue;
2946		/* skip PG for GFX, SDMA on S0ix */
2947		if (adev->in_s0ix &&
2948		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
2949		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
2950			continue;
2951		/* skip CG for VCE/UVD, it's handled specially */
2952		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2953		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2954		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2955		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2956		    adev->ip_blocks[i].version->funcs->set_powergating_state) {
2957			/* enable powergating to save power */
2958			r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2959											state);
2960			if (r) {
2961				DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2962					  adev->ip_blocks[i].version->funcs->name, r);
2963				return r;
2964			}
2965		}
2966	}
2967	return 0;
2968}
2969
2970static int amdgpu_device_enable_mgpu_fan_boost(void)
2971{
2972	struct amdgpu_gpu_instance *gpu_ins;
2973	struct amdgpu_device *adev;
2974	int i, ret = 0;
2975
2976	mutex_lock(&mgpu_info.mutex);
2977
2978	/*
2979	 * MGPU fan boost feature should be enabled
2980	 * only when there are two or more dGPUs in
2981	 * the system
2982	 */
2983	if (mgpu_info.num_dgpu < 2)
2984		goto out;
2985
2986	for (i = 0; i < mgpu_info.num_dgpu; i++) {
2987		gpu_ins = &(mgpu_info.gpu_ins[i]);
2988		adev = gpu_ins->adev;
2989		if (!(adev->flags & AMD_IS_APU) &&
2990		    !gpu_ins->mgpu_fan_enabled) {
2991			ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2992			if (ret)
2993				break;
2994
2995			gpu_ins->mgpu_fan_enabled = 1;
2996		}
2997	}
2998
2999out:
3000	mutex_unlock(&mgpu_info.mutex);
3001
3002	return ret;
3003}
3004
3005/**
3006 * amdgpu_device_ip_late_init - run late init for hardware IPs
3007 *
3008 * @adev: amdgpu_device pointer
3009 *
3010 * Late initialization pass for hardware IPs.  The list of all the hardware
3011 * IPs that make up the asic is walked and the late_init callbacks are run.
3012 * late_init covers any special initialization that an IP requires
3013 * after all of the have been initialized or something that needs to happen
3014 * late in the init process.
3015 * Returns 0 on success, negative error code on failure.
3016 */
3017static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
3018{
3019	struct amdgpu_gpu_instance *gpu_instance;
3020	int i = 0, r;
3021
3022	for (i = 0; i < adev->num_ip_blocks; i++) {
3023		if (!adev->ip_blocks[i].status.hw)
3024			continue;
3025		if (adev->ip_blocks[i].version->funcs->late_init) {
3026			r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
3027			if (r) {
3028				DRM_ERROR("late_init of IP block <%s> failed %d\n",
3029					  adev->ip_blocks[i].version->funcs->name, r);
3030				return r;
3031			}
 
3032		}
3033		adev->ip_blocks[i].status.late_initialized = true;
3034	}
3035
3036	r = amdgpu_ras_late_init(adev);
3037	if (r) {
3038		DRM_ERROR("amdgpu_ras_late_init failed %d", r);
3039		return r;
3040	}
3041
3042	amdgpu_ras_set_error_query_ready(adev, true);
3043
3044	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
3045	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
3046
3047	amdgpu_device_fill_reset_magic(adev);
3048
3049	r = amdgpu_device_enable_mgpu_fan_boost();
3050	if (r)
3051		DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
3052
3053	/* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
3054	if (amdgpu_passthrough(adev) &&
3055	    ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1) ||
3056	     adev->asic_type == CHIP_ALDEBARAN))
3057		amdgpu_dpm_handle_passthrough_sbr(adev, true);
3058
3059	if (adev->gmc.xgmi.num_physical_nodes > 1) {
3060		mutex_lock(&mgpu_info.mutex);
3061
3062		/*
3063		 * Reset device p-state to low as this was booted with high.
3064		 *
3065		 * This should be performed only after all devices from the same
3066		 * hive get initialized.
3067		 *
3068		 * However, it's unknown how many device in the hive in advance.
3069		 * As this is counted one by one during devices initializations.
3070		 *
3071		 * So, we wait for all XGMI interlinked devices initialized.
3072		 * This may bring some delays as those devices may come from
3073		 * different hives. But that should be OK.
3074		 */
3075		if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
3076			for (i = 0; i < mgpu_info.num_gpu; i++) {
3077				gpu_instance = &(mgpu_info.gpu_ins[i]);
3078				if (gpu_instance->adev->flags & AMD_IS_APU)
3079					continue;
3080
3081				r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
3082						AMDGPU_XGMI_PSTATE_MIN);
3083				if (r) {
3084					DRM_ERROR("pstate setting failed (%d).\n", r);
3085					break;
3086				}
3087			}
3088		}
3089
3090		mutex_unlock(&mgpu_info.mutex);
3091	}
3092
3093	return 0;
3094}
3095
3096/**
3097 * amdgpu_device_smu_fini_early - smu hw_fini wrapper
3098 *
3099 * @adev: amdgpu_device pointer
3100 *
3101 * For ASICs need to disable SMC first
 
 
 
 
3102 */
3103static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
3104{
3105	int i, r;
3106
3107	if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(9, 0, 0))
3108		return;
3109
3110	for (i = 0; i < adev->num_ip_blocks; i++) {
3111		if (!adev->ip_blocks[i].status.hw)
3112			continue;
3113		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
 
 
 
 
 
 
 
 
 
3114			r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
3115			/* XXX handle errors */
3116			if (r) {
3117				DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
3118					  adev->ip_blocks[i].version->funcs->name, r);
3119			}
3120			adev->ip_blocks[i].status.hw = false;
3121			break;
3122		}
3123	}
3124}
3125
3126static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
3127{
3128	int i, r;
3129
3130	for (i = 0; i < adev->num_ip_blocks; i++) {
3131		if (!adev->ip_blocks[i].version->funcs->early_fini)
3132			continue;
3133
3134		r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
3135		if (r) {
3136			DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
3137				  adev->ip_blocks[i].version->funcs->name, r);
3138		}
3139	}
3140
3141	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
3142	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
3143
3144	amdgpu_amdkfd_suspend(adev, false);
3145
3146	/* Workaroud for ASICs need to disable SMC first */
3147	amdgpu_device_smu_fini_early(adev);
3148
3149	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3150		if (!adev->ip_blocks[i].status.hw)
3151			continue;
3152
 
 
 
 
 
 
 
 
 
 
 
 
 
3153		r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
3154		/* XXX handle errors */
3155		if (r) {
3156			DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
3157				  adev->ip_blocks[i].version->funcs->name, r);
3158		}
3159
3160		adev->ip_blocks[i].status.hw = false;
3161	}
3162
3163	if (amdgpu_sriov_vf(adev)) {
3164		if (amdgpu_virt_release_full_gpu(adev, false))
3165			DRM_ERROR("failed to release exclusive mode on fini\n");
3166	}
3167
3168	return 0;
3169}
3170
3171/**
3172 * amdgpu_device_ip_fini - run fini for hardware IPs
3173 *
3174 * @adev: amdgpu_device pointer
3175 *
3176 * Main teardown pass for hardware IPs.  The list of all the hardware
3177 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
3178 * are run.  hw_fini tears down the hardware associated with each IP
3179 * and sw_fini tears down any software state associated with each IP.
3180 * Returns 0 on success, negative error code on failure.
3181 */
3182static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
3183{
3184	int i, r;
3185
3186	if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
3187		amdgpu_virt_release_ras_err_handler_data(adev);
3188
3189	if (adev->gmc.xgmi.num_physical_nodes > 1)
3190		amdgpu_xgmi_remove_device(adev);
3191
3192	amdgpu_amdkfd_device_fini_sw(adev);
3193
3194	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3195		if (!adev->ip_blocks[i].status.sw)
3196			continue;
3197
3198		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
3199			amdgpu_ucode_free_bo(adev);
3200			amdgpu_free_static_csa(&adev->virt.csa_obj);
3201			amdgpu_device_wb_fini(adev);
3202			amdgpu_device_mem_scratch_fini(adev);
3203			amdgpu_ib_pool_fini(adev);
3204			amdgpu_seq64_fini(adev);
3205		}
3206
3207		r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
3208		/* XXX handle errors */
3209		if (r) {
3210			DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
3211				  adev->ip_blocks[i].version->funcs->name, r);
3212		}
3213		adev->ip_blocks[i].status.sw = false;
3214		adev->ip_blocks[i].status.valid = false;
3215	}
3216
3217	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3218		if (!adev->ip_blocks[i].status.late_initialized)
3219			continue;
3220		if (adev->ip_blocks[i].version->funcs->late_fini)
3221			adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
3222		adev->ip_blocks[i].status.late_initialized = false;
3223	}
3224
3225	amdgpu_ras_fini(adev);
 
 
3226
3227	return 0;
3228}
3229
3230/**
3231 * amdgpu_device_delayed_init_work_handler - work handler for IB tests
 
 
3232 *
3233 * @work: work_struct.
 
 
3234 */
3235static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
3236{
3237	struct amdgpu_device *adev =
3238		container_of(work, struct amdgpu_device, delayed_init_work.work);
3239	int r;
3240
3241	r = amdgpu_ib_ring_tests(adev);
3242	if (r)
3243		DRM_ERROR("ib ring test failed (%d).\n", r);
3244}
3245
3246static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
3247{
3248	struct amdgpu_device *adev =
3249		container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
3250
3251	WARN_ON_ONCE(adev->gfx.gfx_off_state);
3252	WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
3253
3254	if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
3255		adev->gfx.gfx_off_state = true;
3256}
3257
3258/**
3259 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
3260 *
3261 * @adev: amdgpu_device pointer
3262 *
3263 * Main suspend function for hardware IPs.  The list of all the hardware
3264 * IPs that make up the asic is walked, clockgating is disabled and the
3265 * suspend callbacks are run.  suspend puts the hardware and software state
3266 * in each IP into a state suitable for suspend.
3267 * Returns 0 on success, negative error code on failure.
3268 */
3269static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
3270{
3271	int i, r;
3272
3273	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
3274	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
3275
3276	/*
3277	 * Per PMFW team's suggestion, driver needs to handle gfxoff
3278	 * and df cstate features disablement for gpu reset(e.g. Mode1Reset)
3279	 * scenario. Add the missing df cstate disablement here.
3280	 */
3281	if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
3282		dev_warn(adev->dev, "Failed to disallow df cstate");
3283
3284	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3285		if (!adev->ip_blocks[i].status.valid)
3286			continue;
3287
3288		/* displays are handled separately */
3289		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
3290			continue;
3291
3292		/* XXX handle errors */
3293		r = adev->ip_blocks[i].version->funcs->suspend(adev);
3294		/* XXX handle errors */
3295		if (r) {
3296			DRM_ERROR("suspend of IP block <%s> failed %d\n",
3297				  adev->ip_blocks[i].version->funcs->name, r);
3298			return r;
3299		}
3300
3301		adev->ip_blocks[i].status.hw = false;
 
 
 
 
3302	}
3303
3304	return 0;
3305}
3306
3307/**
3308 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
3309 *
3310 * @adev: amdgpu_device pointer
3311 *
3312 * Main suspend function for hardware IPs.  The list of all the hardware
3313 * IPs that make up the asic is walked, clockgating is disabled and the
3314 * suspend callbacks are run.  suspend puts the hardware and software state
3315 * in each IP into a state suitable for suspend.
3316 * Returns 0 on success, negative error code on failure.
3317 */
3318static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
3319{
3320	int i, r;
3321
3322	if (adev->in_s0ix)
3323		amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry);
3324
3325	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3326		if (!adev->ip_blocks[i].status.valid)
3327			continue;
3328		/* displays are handled in phase1 */
3329		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
3330			continue;
3331		/* PSP lost connection when err_event_athub occurs */
3332		if (amdgpu_ras_intr_triggered() &&
3333		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
3334			adev->ip_blocks[i].status.hw = false;
3335			continue;
3336		}
3337
3338		/* skip unnecessary suspend if we do not initialize them yet */
3339		if (adev->gmc.xgmi.pending_reset &&
3340		    !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3341		      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
3342		      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3343		      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
3344			adev->ip_blocks[i].status.hw = false;
3345			continue;
3346		}
3347
3348		/* skip suspend of gfx/mes and psp for S0ix
3349		 * gfx is in gfxoff state, so on resume it will exit gfxoff just
3350		 * like at runtime. PSP is also part of the always on hardware
3351		 * so no need to suspend it.
3352		 */
3353		if (adev->in_s0ix &&
3354		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
3355		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
3356		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES))
3357			continue;
3358
3359		/* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */
3360		if (adev->in_s0ix &&
3361		    (amdgpu_ip_version(adev, SDMA0_HWIP, 0) >=
3362		     IP_VERSION(5, 0, 0)) &&
3363		    (adev->ip_blocks[i].version->type ==
3364		     AMD_IP_BLOCK_TYPE_SDMA))
3365			continue;
3366
3367		/* Once swPSP provides the IMU, RLC FW binaries to TOS during cold-boot.
3368		 * These are in TMR, hence are expected to be reused by PSP-TOS to reload
3369		 * from this location and RLC Autoload automatically also gets loaded
3370		 * from here based on PMFW -> PSP message during re-init sequence.
3371		 * Therefore, the psp suspend & resume should be skipped to avoid destroy
3372		 * the TMR and reload FWs again for IMU enabled APU ASICs.
3373		 */
3374		if (amdgpu_in_reset(adev) &&
3375		    (adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs &&
3376		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3377			continue;
3378
3379		/* XXX handle errors */
3380		r = adev->ip_blocks[i].version->funcs->suspend(adev);
3381		/* XXX handle errors */
3382		if (r) {
3383			DRM_ERROR("suspend of IP block <%s> failed %d\n",
3384				  adev->ip_blocks[i].version->funcs->name, r);
3385		}
3386		adev->ip_blocks[i].status.hw = false;
3387		/* handle putting the SMC in the appropriate state */
3388		if (!amdgpu_sriov_vf(adev)) {
3389			if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3390				r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
3391				if (r) {
3392					DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
3393							adev->mp1_state, r);
3394					return r;
3395				}
3396			}
3397		}
3398	}
3399
3400	return 0;
3401}
3402
3403/**
3404 * amdgpu_device_ip_suspend - run suspend for hardware IPs
3405 *
3406 * @adev: amdgpu_device pointer
3407 *
3408 * Main suspend function for hardware IPs.  The list of all the hardware
3409 * IPs that make up the asic is walked, clockgating is disabled and the
3410 * suspend callbacks are run.  suspend puts the hardware and software state
3411 * in each IP into a state suitable for suspend.
3412 * Returns 0 on success, negative error code on failure.
3413 */
3414int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
3415{
3416	int r;
3417
3418	if (amdgpu_sriov_vf(adev)) {
3419		amdgpu_virt_fini_data_exchange(adev);
3420		amdgpu_virt_request_full_gpu(adev, false);
3421	}
3422
3423	amdgpu_ttm_set_buffer_funcs_status(adev, false);
3424
3425	r = amdgpu_device_ip_suspend_phase1(adev);
3426	if (r)
3427		return r;
3428	r = amdgpu_device_ip_suspend_phase2(adev);
3429
3430	if (amdgpu_sriov_vf(adev))
3431		amdgpu_virt_release_full_gpu(adev, false);
3432
3433	return r;
3434}
3435
3436static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
3437{
3438	int i, r;
3439
3440	static enum amd_ip_block_type ip_order[] = {
3441		AMD_IP_BLOCK_TYPE_COMMON,
3442		AMD_IP_BLOCK_TYPE_GMC,
3443		AMD_IP_BLOCK_TYPE_PSP,
3444		AMD_IP_BLOCK_TYPE_IH,
3445	};
3446
3447	for (i = 0; i < adev->num_ip_blocks; i++) {
3448		int j;
3449		struct amdgpu_ip_block *block;
3450
3451		block = &adev->ip_blocks[i];
3452		block->status.hw = false;
3453
3454		for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
3455
3456			if (block->version->type != ip_order[j] ||
3457				!block->status.valid)
3458				continue;
3459
3460			r = block->version->funcs->hw_init(adev);
3461			DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3462			if (r)
3463				return r;
3464			block->status.hw = true;
3465		}
3466	}
3467
3468	return 0;
3469}
3470
3471static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
3472{
3473	int i, r;
3474
3475	static enum amd_ip_block_type ip_order[] = {
3476		AMD_IP_BLOCK_TYPE_SMC,
 
3477		AMD_IP_BLOCK_TYPE_DCE,
3478		AMD_IP_BLOCK_TYPE_GFX,
3479		AMD_IP_BLOCK_TYPE_SDMA,
3480		AMD_IP_BLOCK_TYPE_MES,
3481		AMD_IP_BLOCK_TYPE_UVD,
3482		AMD_IP_BLOCK_TYPE_VCE,
3483		AMD_IP_BLOCK_TYPE_VCN,
3484		AMD_IP_BLOCK_TYPE_JPEG
3485	};
3486
3487	for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
3488		int j;
3489		struct amdgpu_ip_block *block;
3490
3491		for (j = 0; j < adev->num_ip_blocks; j++) {
3492			block = &adev->ip_blocks[j];
3493
3494			if (block->version->type != ip_order[i] ||
3495				!block->status.valid ||
3496				block->status.hw)
3497				continue;
3498
3499			if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
3500				r = block->version->funcs->resume(adev);
3501			else
3502				r = block->version->funcs->hw_init(adev);
3503
3504			DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3505			if (r)
3506				return r;
3507			block->status.hw = true;
3508		}
3509	}
3510
3511	return 0;
3512}
3513
3514/**
3515 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3516 *
3517 * @adev: amdgpu_device pointer
3518 *
3519 * First resume function for hardware IPs.  The list of all the hardware
3520 * IPs that make up the asic is walked and the resume callbacks are run for
3521 * COMMON, GMC, and IH.  resume puts the hardware into a functional state
3522 * after a suspend and updates the software state as necessary.  This
3523 * function is also used for restoring the GPU after a GPU reset.
3524 * Returns 0 on success, negative error code on failure.
3525 */
3526static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
3527{
3528	int i, r;
3529
3530	for (i = 0; i < adev->num_ip_blocks; i++) {
3531		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3532			continue;
3533		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3534		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3535		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3536		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) {
3537
3538			r = adev->ip_blocks[i].version->funcs->resume(adev);
3539			if (r) {
3540				DRM_ERROR("resume of IP block <%s> failed %d\n",
3541					  adev->ip_blocks[i].version->funcs->name, r);
3542				return r;
3543			}
3544			adev->ip_blocks[i].status.hw = true;
3545		}
3546	}
3547
3548	return 0;
3549}
3550
3551/**
3552 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3553 *
3554 * @adev: amdgpu_device pointer
3555 *
3556 * First resume function for hardware IPs.  The list of all the hardware
3557 * IPs that make up the asic is walked and the resume callbacks are run for
3558 * all blocks except COMMON, GMC, and IH.  resume puts the hardware into a
3559 * functional state after a suspend and updates the software state as
3560 * necessary.  This function is also used for restoring the GPU after a GPU
3561 * reset.
3562 * Returns 0 on success, negative error code on failure.
3563 */
3564static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3565{
3566	int i, r;
3567
3568	for (i = 0; i < adev->num_ip_blocks; i++) {
3569		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3570			continue;
3571		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3572		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3573		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3574		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3575			continue;
3576		r = adev->ip_blocks[i].version->funcs->resume(adev);
3577		if (r) {
3578			DRM_ERROR("resume of IP block <%s> failed %d\n",
3579				  adev->ip_blocks[i].version->funcs->name, r);
3580			return r;
3581		}
3582		adev->ip_blocks[i].status.hw = true;
3583	}
3584
3585	return 0;
3586}
3587
3588/**
3589 * amdgpu_device_ip_resume - run resume for hardware IPs
3590 *
3591 * @adev: amdgpu_device pointer
3592 *
3593 * Main resume function for hardware IPs.  The hardware IPs
3594 * are split into two resume functions because they are
3595 * also used in recovering from a GPU reset and some additional
3596 * steps need to be take between them.  In this case (S3/S4) they are
3597 * run sequentially.
3598 * Returns 0 on success, negative error code on failure.
3599 */
3600static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
3601{
3602	int r;
3603
3604	r = amdgpu_device_ip_resume_phase1(adev);
3605	if (r)
3606		return r;
3607
3608	r = amdgpu_device_fw_loading(adev);
3609	if (r)
3610		return r;
3611
3612	r = amdgpu_device_ip_resume_phase2(adev);
3613
3614	if (adev->mman.buffer_funcs_ring->sched.ready)
3615		amdgpu_ttm_set_buffer_funcs_status(adev, true);
3616
3617	return r;
3618}
3619
3620/**
3621 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3622 *
3623 * @adev: amdgpu_device pointer
3624 *
3625 * Query the VBIOS data tables to determine if the board supports SR-IOV.
3626 */
3627static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3628{
3629	if (amdgpu_sriov_vf(adev)) {
3630		if (adev->is_atom_fw) {
3631			if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
3632				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3633		} else {
3634			if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3635				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3636		}
3637
3638		if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3639			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3640	}
3641}
3642
3643/**
3644 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3645 *
3646 * @asic_type: AMD asic type
3647 *
3648 * Check if there is DC (new modesetting infrastructre) support for an asic.
3649 * returns true if DC has support, false if not.
3650 */
3651bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3652{
3653	switch (asic_type) {
3654#ifdef CONFIG_DRM_AMDGPU_SI
3655	case CHIP_HAINAN:
3656#endif
3657	case CHIP_TOPAZ:
3658		/* chips with no display hardware */
3659		return false;
3660#if defined(CONFIG_DRM_AMD_DC)
3661	case CHIP_TAHITI:
3662	case CHIP_PITCAIRN:
3663	case CHIP_VERDE:
3664	case CHIP_OLAND:
3665		/*
3666		 * We have systems in the wild with these ASICs that require
3667		 * LVDS and VGA support which is not supported with DC.
3668		 *
3669		 * Fallback to the non-DC driver here by default so as not to
3670		 * cause regressions.
3671		 */
3672#if defined(CONFIG_DRM_AMD_DC_SI)
3673		return amdgpu_dc > 0;
3674#else
3675		return false;
3676#endif
3677	case CHIP_BONAIRE:
 
3678	case CHIP_KAVERI:
3679	case CHIP_KABINI:
3680	case CHIP_MULLINS:
3681		/*
3682		 * We have systems in the wild with these ASICs that require
3683		 * VGA support which is not supported with DC.
3684		 *
3685		 * Fallback to the non-DC driver here by default so as not to
3686		 * cause regressions.
3687		 */
3688		return amdgpu_dc > 0;
3689	default:
3690		return amdgpu_dc != 0;
3691#else
 
 
 
 
 
 
 
3692	default:
3693		if (amdgpu_dc > 0)
3694			DRM_INFO_ONCE("Display Core has been requested via kernel parameter but isn't supported by ASIC, ignoring\n");
3695		return false;
3696#endif
3697	}
3698}
3699
3700/**
3701 * amdgpu_device_has_dc_support - check if dc is supported
3702 *
3703 * @adev: amdgpu_device pointer
3704 *
3705 * Returns true for supported, false for not supported
3706 */
3707bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3708{
3709	if (adev->enable_virtual_display ||
3710	    (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
3711		return false;
3712
3713	return amdgpu_device_asic_has_dc_support(adev->asic_type);
3714}
3715
3716static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3717{
3718	struct amdgpu_device *adev =
3719		container_of(__work, struct amdgpu_device, xgmi_reset_work);
3720	struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3721
3722	/* It's a bug to not have a hive within this function */
3723	if (WARN_ON(!hive))
3724		return;
3725
3726	/*
3727	 * Use task barrier to synchronize all xgmi reset works across the
3728	 * hive. task_barrier_enter and task_barrier_exit will block
3729	 * until all the threads running the xgmi reset works reach
3730	 * those points. task_barrier_full will do both blocks.
3731	 */
3732	if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3733
3734		task_barrier_enter(&hive->tb);
3735		adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3736
3737		if (adev->asic_reset_res)
3738			goto fail;
3739
3740		task_barrier_exit(&hive->tb);
3741		adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3742
3743		if (adev->asic_reset_res)
3744			goto fail;
3745
3746		amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__MMHUB);
3747	} else {
3748
3749		task_barrier_full(&hive->tb);
3750		adev->asic_reset_res =  amdgpu_asic_reset(adev);
3751	}
3752
3753fail:
3754	if (adev->asic_reset_res)
3755		DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3756			 adev->asic_reset_res, adev_to_drm(adev)->unique);
3757	amdgpu_put_xgmi_hive(hive);
3758}
3759
3760static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3761{
3762	char *input = amdgpu_lockup_timeout;
3763	char *timeout_setting = NULL;
3764	int index = 0;
3765	long timeout;
3766	int ret = 0;
3767
3768	/*
3769	 * By default timeout for non compute jobs is 10000
3770	 * and 60000 for compute jobs.
3771	 * In SR-IOV or passthrough mode, timeout for compute
3772	 * jobs are 60000 by default.
3773	 */
3774	adev->gfx_timeout = msecs_to_jiffies(10000);
3775	adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3776	if (amdgpu_sriov_vf(adev))
3777		adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3778					msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3779	else
3780		adev->compute_timeout =  msecs_to_jiffies(60000);
3781
3782	if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3783		while ((timeout_setting = strsep(&input, ",")) &&
3784				strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3785			ret = kstrtol(timeout_setting, 0, &timeout);
3786			if (ret)
3787				return ret;
3788
3789			if (timeout == 0) {
3790				index++;
3791				continue;
3792			} else if (timeout < 0) {
3793				timeout = MAX_SCHEDULE_TIMEOUT;
3794				dev_warn(adev->dev, "lockup timeout disabled");
3795				add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
3796			} else {
3797				timeout = msecs_to_jiffies(timeout);
3798			}
3799
3800			switch (index++) {
3801			case 0:
3802				adev->gfx_timeout = timeout;
3803				break;
3804			case 1:
3805				adev->compute_timeout = timeout;
3806				break;
3807			case 2:
3808				adev->sdma_timeout = timeout;
3809				break;
3810			case 3:
3811				adev->video_timeout = timeout;
3812				break;
3813			default:
3814				break;
3815			}
3816		}
3817		/*
3818		 * There is only one value specified and
3819		 * it should apply to all non-compute jobs.
3820		 */
3821		if (index == 1) {
3822			adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3823			if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3824				adev->compute_timeout = adev->gfx_timeout;
3825		}
3826	}
3827
3828	return ret;
3829}
3830
3831/**
3832 * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU
3833 *
3834 * @adev: amdgpu_device pointer
3835 *
3836 * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode
3837 */
3838static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
3839{
3840	struct iommu_domain *domain;
3841
3842	domain = iommu_get_domain_for_dev(adev->dev);
3843	if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY)
3844		adev->ram_is_direct_mapped = true;
3845}
3846
3847static const struct attribute *amdgpu_dev_attributes[] = {
3848	&dev_attr_pcie_replay_count.attr,
3849	NULL
3850};
3851
3852static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)
3853{
3854	if (amdgpu_mcbp == 1)
3855		adev->gfx.mcbp = true;
3856	else if (amdgpu_mcbp == 0)
3857		adev->gfx.mcbp = false;
3858
3859	if (amdgpu_sriov_vf(adev))
3860		adev->gfx.mcbp = true;
3861
3862	if (adev->gfx.mcbp)
3863		DRM_INFO("MCBP is enabled\n");
3864}
3865
3866/**
3867 * amdgpu_device_init - initialize the driver
3868 *
3869 * @adev: amdgpu_device pointer
 
 
3870 * @flags: driver flags
3871 *
3872 * Initializes the driver info and hw (all asics).
3873 * Returns 0 for success or an error on failure.
3874 * Called at driver startup.
3875 */
3876int amdgpu_device_init(struct amdgpu_device *adev,
 
 
3877		       uint32_t flags)
3878{
3879	struct drm_device *ddev = adev_to_drm(adev);
3880	struct pci_dev *pdev = adev->pdev;
3881	int r, i;
3882	bool px = false;
3883	u32 max_MBps;
3884	int tmp;
3885
3886	adev->shutdown = false;
 
 
 
3887	adev->flags = flags;
3888
3889	if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3890		adev->asic_type = amdgpu_force_asic_type;
3891	else
3892		adev->asic_type = flags & AMD_ASIC_MASK;
3893
3894	adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3895	if (amdgpu_emu_mode == 1)
3896		adev->usec_timeout *= 10;
3897	adev->gmc.gart_size = 512 * 1024 * 1024;
3898	adev->accel_working = false;
3899	adev->num_rings = 0;
3900	RCU_INIT_POINTER(adev->gang_submit, dma_fence_get_stub());
3901	adev->mman.buffer_funcs = NULL;
3902	adev->mman.buffer_funcs_ring = NULL;
3903	adev->vm_manager.vm_pte_funcs = NULL;
3904	adev->vm_manager.vm_pte_num_scheds = 0;
3905	adev->gmc.gmc_funcs = NULL;
3906	adev->harvest_ip_mask = 0x0;
3907	adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3908	bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3909
3910	adev->smc_rreg = &amdgpu_invalid_rreg;
3911	adev->smc_wreg = &amdgpu_invalid_wreg;
3912	adev->pcie_rreg = &amdgpu_invalid_rreg;
3913	adev->pcie_wreg = &amdgpu_invalid_wreg;
3914	adev->pcie_rreg_ext = &amdgpu_invalid_rreg_ext;
3915	adev->pcie_wreg_ext = &amdgpu_invalid_wreg_ext;
3916	adev->pciep_rreg = &amdgpu_invalid_rreg;
3917	adev->pciep_wreg = &amdgpu_invalid_wreg;
3918	adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3919	adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3920	adev->pcie_rreg64_ext = &amdgpu_invalid_rreg64_ext;
3921	adev->pcie_wreg64_ext = &amdgpu_invalid_wreg64_ext;
3922	adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3923	adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3924	adev->didt_rreg = &amdgpu_invalid_rreg;
3925	adev->didt_wreg = &amdgpu_invalid_wreg;
3926	adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3927	adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3928	adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3929	adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3930
3931	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3932		 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3933		 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3934
3935	/* mutex initialization are all done here so we
3936	 * can recall function without having locking issues
3937	 */
3938	mutex_init(&adev->firmware.mutex);
3939	mutex_init(&adev->pm.mutex);
3940	mutex_init(&adev->gfx.gpu_clock_mutex);
3941	mutex_init(&adev->srbm_mutex);
3942	mutex_init(&adev->gfx.pipe_reserve_mutex);
3943	mutex_init(&adev->gfx.gfx_off_mutex);
3944	mutex_init(&adev->gfx.partition_mutex);
3945	mutex_init(&adev->grbm_idx_mutex);
3946	mutex_init(&adev->mn_lock);
3947	mutex_init(&adev->virt.vf_errors.lock);
3948	hash_init(adev->mn_hash);
3949	mutex_init(&adev->psp.mutex);
3950	mutex_init(&adev->notifier_lock);
3951	mutex_init(&adev->pm.stable_pstate_ctx_lock);
3952	mutex_init(&adev->benchmark_mutex);
3953
3954	amdgpu_device_init_apu_flags(adev);
3955
3956	r = amdgpu_device_check_arguments(adev);
3957	if (r)
3958		return r;
3959
3960	spin_lock_init(&adev->mmio_idx_lock);
3961	spin_lock_init(&adev->smc_idx_lock);
3962	spin_lock_init(&adev->pcie_idx_lock);
3963	spin_lock_init(&adev->uvd_ctx_idx_lock);
3964	spin_lock_init(&adev->didt_idx_lock);
3965	spin_lock_init(&adev->gc_cac_idx_lock);
3966	spin_lock_init(&adev->se_cac_idx_lock);
3967	spin_lock_init(&adev->audio_endpt_idx_lock);
3968	spin_lock_init(&adev->mm_stats.lock);
3969
3970	INIT_LIST_HEAD(&adev->shadow_list);
3971	mutex_init(&adev->shadow_list_lock);
3972
3973	INIT_LIST_HEAD(&adev->reset_list);
 
3974
3975	INIT_LIST_HEAD(&adev->ras_list);
3976
3977	INIT_LIST_HEAD(&adev->pm.od_kobj_list);
3978
3979	INIT_DELAYED_WORK(&adev->delayed_init_work,
3980			  amdgpu_device_delayed_init_work_handler);
3981	INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3982			  amdgpu_device_delay_enable_gfx_off);
3983
3984	INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3985
3986	adev->gfx.gfx_off_req_count = 1;
3987	adev->gfx.gfx_off_residency = 0;
3988	adev->gfx.gfx_off_entrycount = 0;
3989	adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3990
3991	atomic_set(&adev->throttling_logging_enabled, 1);
3992	/*
3993	 * If throttling continues, logging will be performed every minute
3994	 * to avoid log flooding. "-1" is subtracted since the thermal
3995	 * throttling interrupt comes every second. Thus, the total logging
3996	 * interval is 59 seconds(retelimited printk interval) + 1(waiting
3997	 * for throttling interrupt) = 60 seconds.
3998	 */
3999	ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
4000	ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
4001
4002	/* Registers mapping */
4003	/* TODO: block userspace mapping of io register */
4004	if (adev->asic_type >= CHIP_BONAIRE) {
4005		adev->rmmio_base = pci_resource_start(adev->pdev, 5);
4006		adev->rmmio_size = pci_resource_len(adev->pdev, 5);
4007	} else {
4008		adev->rmmio_base = pci_resource_start(adev->pdev, 2);
4009		adev->rmmio_size = pci_resource_len(adev->pdev, 2);
4010	}
4011
4012	for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
4013		atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
4014
4015	adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
4016	if (!adev->rmmio)
4017		return -ENOMEM;
4018
4019	DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
4020	DRM_INFO("register mmio size: %u\n", (unsigned int)adev->rmmio_size);
4021
4022	/*
4023	 * Reset domain needs to be present early, before XGMI hive discovered
4024	 * (if any) and intitialized to use reset sem and in_gpu reset flag
4025	 * early on during init and before calling to RREG32.
4026	 */
4027	adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
4028	if (!adev->reset_domain)
4029		return -ENOMEM;
4030
4031	/* detect hw virtualization here */
4032	amdgpu_detect_virtualization(adev);
 
 
 
 
 
 
 
 
4033
4034	amdgpu_device_get_pcie_info(adev);
4035
4036	r = amdgpu_device_get_job_timeout_settings(adev);
4037	if (r) {
4038		dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
4039		return r;
4040	}
4041
4042	/* early init functions */
4043	r = amdgpu_device_ip_early_init(adev);
4044	if (r)
4045		return r;
4046
4047	amdgpu_device_set_mcbp(adev);
4048
4049	/* Get rid of things like offb */
4050	r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver);
4051	if (r)
4052		return r;
4053
4054	/* Enable TMZ based on IP_VERSION */
4055	amdgpu_gmc_tmz_set(adev);
4056
4057	amdgpu_gmc_noretry_set(adev);
4058	/* Need to get xgmi info early to decide the reset behavior*/
4059	if (adev->gmc.xgmi.supported) {
4060		r = adev->gfxhub.funcs->get_xgmi_info(adev);
4061		if (r)
4062			return r;
4063	}
4064
4065	/* enable PCIE atomic ops */
4066	if (amdgpu_sriov_vf(adev)) {
4067		if (adev->virt.fw_reserve.p_pf2vf)
4068			adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
4069						      adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
4070				(PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
4071	/* APUs w/ gfx9 onwards doesn't reply on PCIe atomics, rather it is a
4072	 * internal path natively support atomics, set have_atomics_support to true.
4073	 */
4074	} else if ((adev->flags & AMD_IS_APU) &&
4075		   (amdgpu_ip_version(adev, GC_HWIP, 0) >
4076		    IP_VERSION(9, 0, 0))) {
4077		adev->have_atomics_support = true;
4078	} else {
4079		adev->have_atomics_support =
4080			!pci_enable_atomic_ops_to_root(adev->pdev,
4081					  PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
4082					  PCI_EXP_DEVCAP2_ATOMIC_COMP64);
4083	}
4084
4085	if (!adev->have_atomics_support)
4086		dev_info(adev->dev, "PCIE atomic ops is not supported\n");
4087
4088	/* doorbell bar mapping and doorbell index init*/
4089	amdgpu_doorbell_init(adev);
 
 
 
 
 
4090
4091	if (amdgpu_emu_mode == 1) {
4092		/* post the asic on emulation mode */
4093		emu_soc_asic_init(adev);
4094		goto fence_driver_init;
4095	}
4096
4097	amdgpu_reset_init(adev);
4098
4099	/* detect if we are with an SRIOV vbios */
4100	if (adev->bios)
4101		amdgpu_device_detect_sriov_bios(adev);
4102
4103	/* check if we need to reset the asic
4104	 *  E.g., driver was not cleanly unloaded previously, etc.
4105	 */
4106	if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
4107		if (adev->gmc.xgmi.num_physical_nodes) {
4108			dev_info(adev->dev, "Pending hive reset.\n");
4109			adev->gmc.xgmi.pending_reset = true;
4110			/* Only need to init necessary block for SMU to handle the reset */
4111			for (i = 0; i < adev->num_ip_blocks; i++) {
4112				if (!adev->ip_blocks[i].status.valid)
4113					continue;
4114				if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
4115				      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
4116				      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
4117				      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
4118					DRM_DEBUG("IP %s disabled for hw_init.\n",
4119						adev->ip_blocks[i].version->funcs->name);
4120					adev->ip_blocks[i].status.hw = true;
4121				}
4122			}
4123		} else {
4124			tmp = amdgpu_reset_method;
4125			/* It should do a default reset when loading or reloading the driver,
4126			 * regardless of the module parameter reset_method.
4127			 */
4128			amdgpu_reset_method = AMD_RESET_METHOD_NONE;
4129			r = amdgpu_asic_reset(adev);
4130			amdgpu_reset_method = tmp;
4131			if (r) {
4132				dev_err(adev->dev, "asic reset on init failed\n");
4133				goto failed;
4134			}
4135		}
4136	}
4137
 
 
 
4138	/* Post card if necessary */
4139	if (amdgpu_device_need_post(adev)) {
4140		if (!adev->bios) {
4141			dev_err(adev->dev, "no vBIOS found\n");
4142			r = -EINVAL;
4143			goto failed;
4144		}
4145		DRM_INFO("GPU posting now...\n");
4146		r = amdgpu_device_asic_init(adev);
4147		if (r) {
4148			dev_err(adev->dev, "gpu post error!\n");
4149			goto failed;
4150		}
4151	}
4152
4153	if (adev->bios) {
4154		if (adev->is_atom_fw) {
4155			/* Initialize clocks */
4156			r = amdgpu_atomfirmware_get_clock_info(adev);
4157			if (r) {
4158				dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
4159				amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
4160				goto failed;
4161			}
4162		} else {
4163			/* Initialize clocks */
4164			r = amdgpu_atombios_get_clock_info(adev);
4165			if (r) {
4166				dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
4167				amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
4168				goto failed;
4169			}
4170			/* init i2c buses */
4171			if (!amdgpu_device_has_dc_support(adev))
4172				amdgpu_atombios_i2c_init(adev);
4173		}
 
 
 
4174	}
4175
4176fence_driver_init:
4177	/* Fence driver */
4178	r = amdgpu_fence_driver_sw_init(adev);
4179	if (r) {
4180		dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
4181		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
4182		goto failed;
4183	}
4184
4185	/* init the mode config */
4186	drm_mode_config_init(adev_to_drm(adev));
4187
4188	r = amdgpu_device_ip_init(adev);
4189	if (r) {
 
 
 
 
 
 
 
 
 
 
 
 
4190		dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
4191		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
4192		goto release_ras_con;
4193	}
4194
4195	amdgpu_fence_driver_hw_init(adev);
4196
4197	dev_info(adev->dev,
4198		"SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
4199			adev->gfx.config.max_shader_engines,
4200			adev->gfx.config.max_sh_per_se,
4201			adev->gfx.config.max_cu_per_sh,
4202			adev->gfx.cu_info.number);
4203
4204	adev->accel_working = true;
4205
4206	amdgpu_vm_check_compute_bug(adev);
4207
4208	/* Initialize the buffer migration limit. */
4209	if (amdgpu_moverate >= 0)
4210		max_MBps = amdgpu_moverate;
4211	else
4212		max_MBps = 8; /* Allow 8 MB/s. */
4213	/* Get a log2 for easy divisions. */
4214	adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
4215
4216	/*
4217	 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
4218	 * Otherwise the mgpu fan boost feature will be skipped due to the
4219	 * gpu instance is counted less.
4220	 */
4221	amdgpu_register_gpu_instance(adev);
4222
4223	/* enable clockgating, etc. after ib tests, etc. since some blocks require
4224	 * explicit gating rather than handling it automatically.
4225	 */
4226	if (!adev->gmc.xgmi.pending_reset) {
4227		r = amdgpu_device_ip_late_init(adev);
4228		if (r) {
4229			dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
4230			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
4231			goto release_ras_con;
4232		}
4233		/* must succeed. */
4234		amdgpu_ras_resume(adev);
4235		queue_delayed_work(system_wq, &adev->delayed_init_work,
4236				   msecs_to_jiffies(AMDGPU_RESUME_MS));
4237	}
4238
4239	if (amdgpu_sriov_vf(adev)) {
4240		amdgpu_virt_release_full_gpu(adev, true);
4241		flush_delayed_work(&adev->delayed_init_work);
4242	}
4243
4244	/*
4245	 * Place those sysfs registering after `late_init`. As some of those
4246	 * operations performed in `late_init` might affect the sysfs
4247	 * interfaces creating.
4248	 */
4249	r = amdgpu_atombios_sysfs_init(adev);
4250	if (r)
4251		drm_err(&adev->ddev,
4252			"registering atombios sysfs failed (%d).\n", r);
 
 
 
 
4253
4254	r = amdgpu_pm_sysfs_init(adev);
4255	if (r)
4256		DRM_ERROR("registering pm sysfs failed (%d).\n", r);
4257
4258	r = amdgpu_ucode_sysfs_init(adev);
4259	if (r) {
4260		adev->ucode_sysfs_en = false;
4261		DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
4262	} else
4263		adev->ucode_sysfs_en = true;
4264
4265	r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
4266	if (r)
4267		dev_err(adev->dev, "Could not create amdgpu device attr\n");
4268
4269	r = devm_device_add_group(adev->dev, &amdgpu_board_attrs_group);
4270	if (r)
4271		dev_err(adev->dev,
4272			"Could not create amdgpu board attributes\n");
4273
4274	amdgpu_fru_sysfs_init(adev);
4275	amdgpu_reg_state_sysfs_init(adev);
 
4276
4277	if (IS_ENABLED(CONFIG_PERF_EVENTS))
4278		r = amdgpu_pmu_init(adev);
4279	if (r)
4280		dev_err(adev->dev, "amdgpu_pmu_init failed\n");
4281
4282	/* Have stored pci confspace at hand for restore in sudden PCI error */
4283	if (amdgpu_device_cache_pci_state(adev->pdev))
4284		pci_restore_state(pdev);
 
 
 
 
 
 
 
 
 
4285
4286	/* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
4287	/* this will fail for cards that aren't VGA class devices, just
4288	 * ignore it
4289	 */
4290	if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4291		vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
4292
4293	px = amdgpu_device_supports_px(ddev);
4294
4295	if (px || (!dev_is_removable(&adev->pdev->dev) &&
4296				apple_gmux_detect(NULL, NULL)))
4297		vga_switcheroo_register_client(adev->pdev,
4298					       &amdgpu_switcheroo_ops, px);
4299
4300	if (px)
4301		vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
4302
4303	if (adev->gmc.xgmi.pending_reset)
4304		queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
4305				   msecs_to_jiffies(AMDGPU_RESUME_MS));
4306
4307	amdgpu_device_check_iommu_direct_map(adev);
4308
4309	return 0;
4310
4311release_ras_con:
4312	if (amdgpu_sriov_vf(adev))
4313		amdgpu_virt_release_full_gpu(adev, true);
4314
4315	/* failed in exclusive mode due to timeout */
4316	if (amdgpu_sriov_vf(adev) &&
4317		!amdgpu_sriov_runtime(adev) &&
4318		amdgpu_virt_mmio_blocked(adev) &&
4319		!amdgpu_virt_wait_reset(adev)) {
4320		dev_err(adev->dev, "VF exclusive mode timeout\n");
4321		/* Don't send request since VF is inactive. */
4322		adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
4323		adev->virt.ops = NULL;
4324		r = -EAGAIN;
4325	}
4326	amdgpu_release_ras_context(adev);
4327
4328failed:
4329	amdgpu_vf_error_trans_all(adev);
 
 
4330
4331	return r;
4332}
4333
4334static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
4335{
4336
4337	/* Clear all CPU mappings pointing to this device */
4338	unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
4339
4340	/* Unmap all mapped bars - Doorbell, registers and VRAM */
4341	amdgpu_doorbell_fini(adev);
4342
4343	iounmap(adev->rmmio);
4344	adev->rmmio = NULL;
4345	if (adev->mman.aper_base_kaddr)
4346		iounmap(adev->mman.aper_base_kaddr);
4347	adev->mman.aper_base_kaddr = NULL;
4348
4349	/* Memory manager related */
4350	if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
4351		arch_phys_wc_del(adev->gmc.vram_mtrr);
4352		arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
4353	}
4354}
4355
4356/**
4357 * amdgpu_device_fini_hw - tear down the driver
4358 *
4359 * @adev: amdgpu_device pointer
4360 *
4361 * Tear down the driver info (all asics).
4362 * Called at driver shutdown.
4363 */
4364void amdgpu_device_fini_hw(struct amdgpu_device *adev)
4365{
4366	dev_info(adev->dev, "amdgpu: finishing device.\n");
4367	flush_delayed_work(&adev->delayed_init_work);
4368	adev->shutdown = true;
4369
4370	/* make sure IB test finished before entering exclusive mode
4371	 * to avoid preemption on IB test
4372	 */
4373	if (amdgpu_sriov_vf(adev)) {
4374		amdgpu_virt_request_full_gpu(adev, false);
4375		amdgpu_virt_fini_data_exchange(adev);
4376	}
4377
 
 
4378	/* disable all interrupts */
4379	amdgpu_irq_disable_all(adev);
4380	if (adev->mode_info.mode_config_initialized) {
4381		if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
4382			drm_helper_force_disable_all(adev_to_drm(adev));
4383		else
4384			drm_atomic_helper_shutdown(adev_to_drm(adev));
 
 
 
 
 
 
 
 
 
4385	}
4386	amdgpu_fence_driver_hw_fini(adev);
4387
4388	if (adev->mman.initialized)
4389		drain_workqueue(adev->mman.bdev.wq);
4390
4391	if (adev->pm.sysfs_initialized)
4392		amdgpu_pm_sysfs_fini(adev);
4393	if (adev->ucode_sysfs_en)
4394		amdgpu_ucode_sysfs_fini(adev);
4395	sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
4396	amdgpu_fru_sysfs_fini(adev);
4397
4398	amdgpu_reg_state_sysfs_fini(adev);
4399
4400	/* disable ras feature must before hw fini */
4401	amdgpu_ras_pre_fini(adev);
4402
4403	amdgpu_ttm_set_buffer_funcs_status(adev, false);
4404
4405	amdgpu_device_ip_fini_early(adev);
4406
4407	amdgpu_irq_fini_hw(adev);
4408
4409	if (adev->mman.initialized)
4410		ttm_device_clear_dma_mappings(&adev->mman.bdev);
4411
4412	amdgpu_gart_dummy_page_fini(adev);
4413
4414	if (drm_dev_is_unplugged(adev_to_drm(adev)))
4415		amdgpu_device_unmap_mmio(adev);
4416
4417}
4418
4419void amdgpu_device_fini_sw(struct amdgpu_device *adev)
4420{
4421	int idx;
4422	bool px;
4423
4424	amdgpu_fence_driver_sw_fini(adev);
4425	amdgpu_device_ip_fini(adev);
4426	amdgpu_ucode_release(&adev->firmware.gpu_info_fw);
4427	adev->accel_working = false;
4428	dma_fence_put(rcu_dereference_protected(adev->gang_submit, true));
4429
4430	amdgpu_reset_fini(adev);
4431
4432	/* free i2c buses */
4433	if (!amdgpu_device_has_dc_support(adev))
4434		amdgpu_i2c_fini(adev);
4435
4436	if (amdgpu_emu_mode != 1)
4437		amdgpu_atombios_fini(adev);
4438
4439	kfree(adev->bios);
4440	adev->bios = NULL;
4441
4442	kfree(adev->fru_info);
4443	adev->fru_info = NULL;
4444
4445	px = amdgpu_device_supports_px(adev_to_drm(adev));
4446
4447	if (px || (!dev_is_removable(&adev->pdev->dev) &&
4448				apple_gmux_detect(NULL, NULL)))
4449		vga_switcheroo_unregister_client(adev->pdev);
4450
4451	if (px)
4452		vga_switcheroo_fini_domain_pm_ops(adev->dev);
4453
4454	if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4455		vga_client_unregister(adev->pdev);
4456
4457	if (drm_dev_enter(adev_to_drm(adev), &idx)) {
4458
4459		iounmap(adev->rmmio);
4460		adev->rmmio = NULL;
4461		amdgpu_doorbell_fini(adev);
4462		drm_dev_exit(idx);
4463	}
4464
4465	if (IS_ENABLED(CONFIG_PERF_EVENTS))
4466		amdgpu_pmu_fini(adev);
4467	if (adev->mman.discovery_bin)
4468		amdgpu_discovery_fini(adev);
4469
4470	amdgpu_reset_put_reset_domain(adev->reset_domain);
4471	adev->reset_domain = NULL;
4472
4473	kfree(adev->pci_state);
4474
4475}
4476
4477/**
4478 * amdgpu_device_evict_resources - evict device resources
4479 * @adev: amdgpu device object
4480 *
4481 * Evicts all ttm device resources(vram BOs, gart table) from the lru list
4482 * of the vram memory type. Mainly used for evicting device resources
4483 * at suspend time.
4484 *
4485 */
4486static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
4487{
4488	int ret;
4489
4490	/* No need to evict vram on APUs for suspend to ram or s2idle */
4491	if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU))
4492		return 0;
4493
4494	ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
4495	if (ret)
4496		DRM_WARN("evicting device resources failed\n");
4497	return ret;
4498}
4499
4500/*
4501 * Suspend & resume.
4502 */
4503/**
4504 * amdgpu_device_prepare - prepare for device suspend
4505 *
4506 * @dev: drm dev pointer
 
4507 *
4508 * Prepare to put the hw in the suspend state (all asics).
4509 * Returns 0 for success or an error on failure.
4510 * Called at driver suspend.
4511 */
4512int amdgpu_device_prepare(struct drm_device *dev)
4513{
4514	struct amdgpu_device *adev = drm_to_adev(dev);
4515	int i, r;
4516
4517	amdgpu_choose_low_power_state(adev);
4518
4519	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4520		return 0;
4521
4522	/* Evict the majority of BOs before starting suspend sequence */
4523	r = amdgpu_device_evict_resources(adev);
4524	if (r)
4525		goto unprepare;
4526
4527	for (i = 0; i < adev->num_ip_blocks; i++) {
4528		if (!adev->ip_blocks[i].status.valid)
4529			continue;
4530		if (!adev->ip_blocks[i].version->funcs->prepare_suspend)
4531			continue;
4532		r = adev->ip_blocks[i].version->funcs->prepare_suspend((void *)adev);
4533		if (r)
4534			goto unprepare;
4535	}
4536
4537	return 0;
4538
4539unprepare:
4540	adev->in_s0ix = adev->in_s3 = false;
4541
4542	return r;
4543}
4544
4545/**
4546 * amdgpu_device_suspend - initiate device suspend
4547 *
4548 * @dev: drm dev pointer
4549 * @fbcon : notify the fbdev of suspend
4550 *
4551 * Puts the hw in the suspend state (all asics).
4552 * Returns 0 for success or an error on failure.
4553 * Called at driver suspend.
4554 */
4555int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
4556{
4557	struct amdgpu_device *adev = drm_to_adev(dev);
4558	int r = 0;
4559
4560	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4561		return 0;
4562
4563	adev->in_suspend = true;
4564
4565	if (amdgpu_sriov_vf(adev)) {
4566		amdgpu_virt_fini_data_exchange(adev);
4567		r = amdgpu_virt_request_full_gpu(adev, false);
4568		if (r)
4569			return r;
 
 
4570	}
4571
4572	if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
4573		DRM_WARN("smart shift update failed\n");
4574
4575	if (fbcon)
4576		drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
4577
4578	cancel_delayed_work_sync(&adev->delayed_init_work);
4579
4580	amdgpu_ras_suspend(adev);
 
 
 
 
4581
4582	amdgpu_device_ip_suspend_phase1(adev);
4583
4584	if (!adev->in_s0ix)
4585		amdgpu_amdkfd_suspend(adev, adev->in_runpm);
4586
4587	r = amdgpu_device_evict_resources(adev);
4588	if (r)
4589		return r;
4590
4591	amdgpu_ttm_set_buffer_funcs_status(adev, false);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4592
4593	amdgpu_fence_driver_hw_fini(adev);
4594
4595	amdgpu_device_ip_suspend_phase2(adev);
4596
4597	if (amdgpu_sriov_vf(adev))
4598		amdgpu_virt_release_full_gpu(adev, false);
 
 
 
4599
4600	r = amdgpu_dpm_notify_rlc_state(adev, false);
4601	if (r)
4602		return r;
 
 
 
 
 
 
 
4603
 
 
 
 
 
4604	return 0;
4605}
4606
4607/**
4608 * amdgpu_device_resume - initiate device resume
4609 *
4610 * @dev: drm dev pointer
4611 * @fbcon : notify the fbdev of resume
4612 *
4613 * Bring the hw back to operating state (all asics).
4614 * Returns 0 for success or an error on failure.
4615 * Called at driver resume.
4616 */
4617int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
4618{
4619	struct amdgpu_device *adev = drm_to_adev(dev);
 
 
4620	int r = 0;
4621
4622	if (amdgpu_sriov_vf(adev)) {
4623		r = amdgpu_virt_request_full_gpu(adev, true);
4624		if (r)
4625			return r;
4626	}
4627
4628	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4629		return 0;
4630
4631	if (adev->in_s0ix)
4632		amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry);
 
 
 
 
 
 
 
 
4633
4634	/* post card */
4635	if (amdgpu_device_need_post(adev)) {
4636		r = amdgpu_device_asic_init(adev);
4637		if (r)
4638			dev_err(adev->dev, "amdgpu asic init failed\n");
4639	}
4640
4641	r = amdgpu_device_ip_resume(adev);
4642
4643	if (r) {
4644		dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
4645		goto exit;
4646	}
4647	amdgpu_fence_driver_hw_init(adev);
4648
4649	if (!adev->in_s0ix) {
4650		r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
4651		if (r)
4652			goto exit;
4653	}
4654
4655	r = amdgpu_device_ip_late_init(adev);
4656	if (r)
4657		goto exit;
4658
4659	queue_delayed_work(system_wq, &adev->delayed_init_work,
4660			   msecs_to_jiffies(AMDGPU_RESUME_MS));
4661exit:
4662	if (amdgpu_sriov_vf(adev)) {
4663		amdgpu_virt_init_data_exchange(adev);
4664		amdgpu_virt_release_full_gpu(adev, true);
 
 
 
 
 
 
 
 
 
 
4665	}
4666
4667	if (r)
4668		return r;
4669
4670	/* Make sure IB tests flushed */
4671	flush_delayed_work(&adev->delayed_init_work);
 
 
 
4672
4673	if (fbcon)
4674		drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
 
 
 
 
 
 
4675
4676	amdgpu_ras_resume(adev);
4677
4678	if (adev->mode_info.num_crtc) {
4679		/*
4680		 * Most of the connector probing functions try to acquire runtime pm
4681		 * refs to ensure that the GPU is powered on when connector polling is
4682		 * performed. Since we're calling this from a runtime PM callback,
4683		 * trying to acquire rpm refs will cause us to deadlock.
4684		 *
4685		 * Since we're guaranteed to be holding the rpm lock, it's safe to
4686		 * temporarily disable the rpm helpers so this doesn't deadlock us.
4687		 */
4688#ifdef CONFIG_PM
4689		dev->dev->power.disable_depth++;
4690#endif
4691		if (!adev->dc_enabled)
4692			drm_helper_hpd_irq_event(dev);
4693		else
4694			drm_kms_helper_hotplug_event(dev);
4695#ifdef CONFIG_PM
4696		dev->dev->power.disable_depth--;
4697#endif
4698	}
4699	adev->in_suspend = false;
4700
4701	if (adev->enable_mes)
4702		amdgpu_mes_self_test(adev);
4703
4704	if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
4705		DRM_WARN("smart shift update failed\n");
 
4706
4707	return 0;
4708}
4709
4710/**
4711 * amdgpu_device_ip_check_soft_reset - did soft reset succeed
4712 *
4713 * @adev: amdgpu_device pointer
4714 *
4715 * The list of all the hardware IPs that make up the asic is walked and
4716 * the check_soft_reset callbacks are run.  check_soft_reset determines
4717 * if the asic is still hung or not.
4718 * Returns true if any of the IPs are still in a hung state, false if not.
4719 */
4720static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
4721{
4722	int i;
4723	bool asic_hang = false;
4724
4725	if (amdgpu_sriov_vf(adev))
4726		return true;
4727
4728	if (amdgpu_asic_need_full_reset(adev))
4729		return true;
4730
4731	for (i = 0; i < adev->num_ip_blocks; i++) {
4732		if (!adev->ip_blocks[i].status.valid)
4733			continue;
4734		if (adev->ip_blocks[i].version->funcs->check_soft_reset)
4735			adev->ip_blocks[i].status.hang =
4736				adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
4737		if (adev->ip_blocks[i].status.hang) {
4738			dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
4739			asic_hang = true;
4740		}
4741	}
4742	return asic_hang;
4743}
4744
4745/**
4746 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
4747 *
4748 * @adev: amdgpu_device pointer
4749 *
4750 * The list of all the hardware IPs that make up the asic is walked and the
4751 * pre_soft_reset callbacks are run if the block is hung.  pre_soft_reset
4752 * handles any IP specific hardware or software state changes that are
4753 * necessary for a soft reset to succeed.
4754 * Returns 0 on success, negative error code on failure.
4755 */
4756static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
4757{
4758	int i, r = 0;
4759
4760	for (i = 0; i < adev->num_ip_blocks; i++) {
4761		if (!adev->ip_blocks[i].status.valid)
4762			continue;
4763		if (adev->ip_blocks[i].status.hang &&
4764		    adev->ip_blocks[i].version->funcs->pre_soft_reset) {
4765			r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
4766			if (r)
4767				return r;
4768		}
4769	}
4770
4771	return 0;
4772}
4773
4774/**
4775 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
4776 *
4777 * @adev: amdgpu_device pointer
4778 *
4779 * Some hardware IPs cannot be soft reset.  If they are hung, a full gpu
4780 * reset is necessary to recover.
4781 * Returns true if a full asic reset is required, false if not.
4782 */
4783static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
4784{
4785	int i;
4786
4787	if (amdgpu_asic_need_full_reset(adev))
4788		return true;
4789
4790	for (i = 0; i < adev->num_ip_blocks; i++) {
4791		if (!adev->ip_blocks[i].status.valid)
4792			continue;
4793		if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
4794		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
4795		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
4796		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
4797		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
4798			if (adev->ip_blocks[i].status.hang) {
4799				dev_info(adev->dev, "Some block need full reset!\n");
4800				return true;
4801			}
4802		}
4803	}
4804	return false;
4805}
4806
4807/**
4808 * amdgpu_device_ip_soft_reset - do a soft reset
4809 *
4810 * @adev: amdgpu_device pointer
4811 *
4812 * The list of all the hardware IPs that make up the asic is walked and the
4813 * soft_reset callbacks are run if the block is hung.  soft_reset handles any
4814 * IP specific hardware or software state changes that are necessary to soft
4815 * reset the IP.
4816 * Returns 0 on success, negative error code on failure.
4817 */
4818static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
4819{
4820	int i, r = 0;
4821
4822	for (i = 0; i < adev->num_ip_blocks; i++) {
4823		if (!adev->ip_blocks[i].status.valid)
4824			continue;
4825		if (adev->ip_blocks[i].status.hang &&
4826		    adev->ip_blocks[i].version->funcs->soft_reset) {
4827			r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
4828			if (r)
4829				return r;
4830		}
4831	}
4832
4833	return 0;
4834}
4835
4836/**
4837 * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4838 *
4839 * @adev: amdgpu_device pointer
4840 *
4841 * The list of all the hardware IPs that make up the asic is walked and the
4842 * post_soft_reset callbacks are run if the asic was hung.  post_soft_reset
4843 * handles any IP specific hardware or software state changes that are
4844 * necessary after the IP has been soft reset.
4845 * Returns 0 on success, negative error code on failure.
4846 */
4847static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4848{
4849	int i, r = 0;
4850
4851	for (i = 0; i < adev->num_ip_blocks; i++) {
4852		if (!adev->ip_blocks[i].status.valid)
4853			continue;
4854		if (adev->ip_blocks[i].status.hang &&
4855		    adev->ip_blocks[i].version->funcs->post_soft_reset)
4856			r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
4857		if (r)
4858			return r;
4859	}
4860
4861	return 0;
4862}
4863
4864/**
4865 * amdgpu_device_recover_vram - Recover some VRAM contents
4866 *
4867 * @adev: amdgpu_device pointer
 
 
 
4868 *
4869 * Restores the contents of VRAM buffers from the shadows in GTT.  Used to
4870 * restore things like GPUVM page tables after a GPU reset where
4871 * the contents of VRAM might be lost.
4872 *
4873 * Returns:
4874 * 0 on success, negative error code on failure.
4875 */
4876static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
 
 
 
4877{
4878	struct dma_fence *fence = NULL, *next = NULL;
4879	struct amdgpu_bo *shadow;
4880	struct amdgpu_bo_vm *vmbo;
4881	long r = 1, tmo;
4882
4883	if (amdgpu_sriov_runtime(adev))
4884		tmo = msecs_to_jiffies(8000);
4885	else
4886		tmo = msecs_to_jiffies(100);
4887
4888	dev_info(adev->dev, "recover vram bo from shadow start\n");
4889	mutex_lock(&adev->shadow_list_lock);
4890	list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
4891		/* If vm is compute context or adev is APU, shadow will be NULL */
4892		if (!vmbo->shadow)
4893			continue;
4894		shadow = vmbo->shadow;
4895
4896		/* No need to recover an evicted BO */
4897		if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
4898		    shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
4899		    shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
4900			continue;
4901
4902		r = amdgpu_bo_restore_shadow(shadow, &next);
4903		if (r)
4904			break;
4905
4906		if (fence) {
4907			tmo = dma_fence_wait_timeout(fence, false, tmo);
4908			dma_fence_put(fence);
4909			fence = next;
4910			if (tmo == 0) {
4911				r = -ETIMEDOUT;
4912				break;
4913			} else if (tmo < 0) {
4914				r = tmo;
4915				break;
4916			}
4917		} else {
4918			fence = next;
4919		}
4920	}
4921	mutex_unlock(&adev->shadow_list_lock);
4922
4923	if (fence)
4924		tmo = dma_fence_wait_timeout(fence, false, tmo);
4925	dma_fence_put(fence);
4926
4927	if (r < 0 || tmo <= 0) {
4928		dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4929		return -EIO;
4930	}
4931
4932	dev_info(adev->dev, "recover vram bo from shadow done\n");
4933	return 0;
4934}
4935
4936
4937/**
4938 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4939 *
4940 * @adev: amdgpu_device pointer
4941 * @from_hypervisor: request from hypervisor
4942 *
4943 * do VF FLR and reinitialize Asic
4944 * return 0 means succeeded otherwise failed
 
 
4945 */
4946static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4947				     bool from_hypervisor)
4948{
4949	int r;
4950	struct amdgpu_hive_info *hive = NULL;
4951	int retry_limit = 0;
4952
4953retry:
4954	amdgpu_amdkfd_pre_reset(adev);
4955
4956	if (from_hypervisor)
4957		r = amdgpu_virt_request_full_gpu(adev, true);
4958	else
4959		r = amdgpu_virt_reset_gpu(adev);
4960	if (r)
4961		return r;
4962	amdgpu_irq_gpu_reset_resume_helper(adev);
4963
4964	/* some sw clean up VF needs to do before recover */
4965	amdgpu_virt_post_reset(adev);
4966
4967	/* Resume IP prior to SMC */
4968	r = amdgpu_device_ip_reinit_early_sriov(adev);
4969	if (r)
4970		goto error;
4971
4972	amdgpu_virt_init_data_exchange(adev);
4973
4974	r = amdgpu_device_fw_loading(adev);
4975	if (r)
4976		return r;
4977
4978	/* now we are okay to resume SMC/CP/SDMA */
4979	r = amdgpu_device_ip_reinit_late_sriov(adev);
4980	if (r)
4981		goto error;
4982
4983	hive = amdgpu_get_xgmi_hive(adev);
4984	/* Update PSP FW topology after reset */
4985	if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
4986		r = amdgpu_xgmi_update_topology(hive, adev);
4987
4988	if (hive)
4989		amdgpu_put_xgmi_hive(hive);
4990
4991	if (!r) {
4992		r = amdgpu_ib_ring_tests(adev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4993
4994		amdgpu_amdkfd_post_reset(adev);
 
4995	}
 
4996
4997error:
4998	if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
4999		amdgpu_inc_vram_lost(adev);
5000		r = amdgpu_device_recover_vram(adev);
5001	}
5002	amdgpu_virt_release_full_gpu(adev, true);
5003
5004	if (AMDGPU_RETRY_SRIOV_RESET(r)) {
5005		if (retry_limit < AMDGPU_MAX_RETRY_LIMIT) {
5006			retry_limit++;
5007			goto retry;
5008		} else
5009			DRM_ERROR("GPU reset retry is beyond the retry limit\n");
5010	}
 
5011
5012	return r;
5013}
5014
5015/**
5016 * amdgpu_device_has_job_running - check if there is any job in mirror list
5017 *
5018 * @adev: amdgpu_device pointer
5019 *
5020 * check if there is any job in mirror list
5021 */
5022bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
5023{
5024	int i;
5025	struct drm_sched_job *job;
5026
5027	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5028		struct amdgpu_ring *ring = adev->rings[i];
5029
5030		if (!amdgpu_ring_sched_ready(ring))
5031			continue;
5032
5033		spin_lock(&ring->sched.job_list_lock);
5034		job = list_first_entry_or_null(&ring->sched.pending_list,
5035					       struct drm_sched_job, list);
5036		spin_unlock(&ring->sched.job_list_lock);
5037		if (job)
5038			return true;
5039	}
5040	return false;
5041}
5042
5043/**
5044 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
5045 *
5046 * @adev: amdgpu_device pointer
5047 *
5048 * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
5049 * a hung GPU.
5050 */
5051bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
5052{
 
 
5053
5054	if (amdgpu_gpu_recovery == 0)
5055		goto disabled;
5056
5057	/* Skip soft reset check in fatal error mode */
5058	if (!amdgpu_ras_is_poison_mode_supported(adev))
5059		return true;
5060
5061	if (amdgpu_sriov_vf(adev))
5062		return true;
5063
5064	if (amdgpu_gpu_recovery == -1) {
5065		switch (adev->asic_type) {
5066#ifdef CONFIG_DRM_AMDGPU_SI
5067		case CHIP_VERDE:
5068		case CHIP_TAHITI:
5069		case CHIP_PITCAIRN:
5070		case CHIP_OLAND:
5071		case CHIP_HAINAN:
5072#endif
5073#ifdef CONFIG_DRM_AMDGPU_CIK
5074		case CHIP_KAVERI:
5075		case CHIP_KABINI:
5076		case CHIP_MULLINS:
5077#endif
5078		case CHIP_CARRIZO:
5079		case CHIP_STONEY:
5080		case CHIP_CYAN_SKILLFISH:
5081			goto disabled;
5082		default:
5083			break;
5084		}
5085	}
5086
5087	return true;
5088
5089disabled:
5090		dev_info(adev->dev, "GPU recovery disabled.\n");
5091		return false;
5092}
5093
5094int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
5095{
5096	u32 i;
5097	int ret = 0;
5098
5099	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
5100
5101	dev_info(adev->dev, "GPU mode1 reset\n");
5102
5103	/* disable BM */
5104	pci_clear_master(adev->pdev);
5105
5106	amdgpu_device_cache_pci_state(adev->pdev);
5107
5108	if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
5109		dev_info(adev->dev, "GPU smu mode1 reset\n");
5110		ret = amdgpu_dpm_mode1_reset(adev);
5111	} else {
5112		dev_info(adev->dev, "GPU psp mode1 reset\n");
5113		ret = psp_gpu_reset(adev);
5114	}
5115
5116	if (ret)
5117		goto mode1_reset_failed;
5118
5119	amdgpu_device_load_pci_state(adev->pdev);
5120	ret = amdgpu_psp_wait_for_bootloader(adev);
5121	if (ret)
5122		goto mode1_reset_failed;
5123
5124	/* wait for asic to come out of reset */
5125	for (i = 0; i < adev->usec_timeout; i++) {
5126		u32 memsize = adev->nbio.funcs->get_memsize(adev);
5127
5128		if (memsize != 0xffffffff)
5129			break;
5130		udelay(1);
5131	}
5132
5133	if (i >= adev->usec_timeout) {
5134		ret = -ETIMEDOUT;
5135		goto mode1_reset_failed;
5136	}
5137
5138	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
5139
5140	return 0;
5141
5142mode1_reset_failed:
5143	dev_err(adev->dev, "GPU mode1 reset failed\n");
5144	return ret;
5145}
5146
5147int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
5148				 struct amdgpu_reset_context *reset_context)
5149{
5150	int i, r = 0;
5151	struct amdgpu_job *job = NULL;
5152	bool need_full_reset =
5153		test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5154
5155	if (reset_context->reset_req_dev == adev)
5156		job = reset_context->job;
5157
5158	if (amdgpu_sriov_vf(adev)) {
5159		/* stop the data exchange thread */
5160		amdgpu_virt_fini_data_exchange(adev);
5161	}
5162
5163	amdgpu_fence_driver_isr_toggle(adev, true);
5164
5165	/* block all schedulers and reset given job's ring */
5166	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5167		struct amdgpu_ring *ring = adev->rings[i];
5168
5169		if (!amdgpu_ring_sched_ready(ring))
5170			continue;
5171
5172		/* Clear job fence from fence drv to avoid force_completion
5173		 * leave NULL and vm flush fence in fence drv
5174		 */
5175		amdgpu_fence_driver_clear_job_fences(ring);
5176
5177		/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
5178		amdgpu_fence_driver_force_completion(ring);
5179	}
5180
5181	amdgpu_fence_driver_isr_toggle(adev, false);
5182
5183	if (job && job->vm)
5184		drm_sched_increase_karma(&job->base);
5185
5186	r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
5187	/* If reset handler not implemented, continue; otherwise return */
5188	if (r == -EOPNOTSUPP)
5189		r = 0;
5190	else
5191		return r;
5192
5193	/* Don't suspend on bare metal if we are not going to HW reset the ASIC */
5194	if (!amdgpu_sriov_vf(adev)) {
5195
5196		if (!need_full_reset)
5197			need_full_reset = amdgpu_device_ip_need_full_reset(adev);
 
 
 
5198
5199		if (!need_full_reset && amdgpu_gpu_recovery &&
5200		    amdgpu_device_ip_check_soft_reset(adev)) {
5201			amdgpu_device_ip_pre_soft_reset(adev);
5202			r = amdgpu_device_ip_soft_reset(adev);
5203			amdgpu_device_ip_post_soft_reset(adev);
5204			if (r || amdgpu_device_ip_check_soft_reset(adev)) {
5205				dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
5206				need_full_reset = true;
5207			}
5208		}
5209
5210		if (need_full_reset)
5211			r = amdgpu_device_ip_suspend(adev);
5212		if (need_full_reset)
5213			set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5214		else
5215			clear_bit(AMDGPU_NEED_FULL_RESET,
5216				  &reset_context->flags);
5217	}
5218
5219	return r;
5220}
5221
5222static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
5223{
5224	int i;
5225
5226	lockdep_assert_held(&adev->reset_domain->sem);
5227
5228	for (i = 0; i < adev->reset_info.num_regs; i++) {
5229		adev->reset_info.reset_dump_reg_value[i] =
5230			RREG32(adev->reset_info.reset_dump_reg_list[i]);
5231
5232		trace_amdgpu_reset_reg_dumps(adev->reset_info.reset_dump_reg_list[i],
5233					     adev->reset_info.reset_dump_reg_value[i]);
5234	}
5235
5236	return 0;
5237}
5238
5239int amdgpu_do_asic_reset(struct list_head *device_list_handle,
5240			 struct amdgpu_reset_context *reset_context)
5241{
5242	struct amdgpu_device *tmp_adev = NULL;
5243	bool need_full_reset, skip_hw_reset, vram_lost = false;
5244	int r = 0;
5245
5246	/* Try reset handler method first */
5247	tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5248				    reset_list);
5249	amdgpu_reset_reg_dumps(tmp_adev);
5250
5251	reset_context->reset_device_list = device_list_handle;
5252	r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
5253	/* If reset handler not implemented, continue; otherwise return */
5254	if (r == -EOPNOTSUPP)
5255		r = 0;
5256	else
5257		return r;
5258
5259	/* Reset handler not implemented, use the default method */
5260	need_full_reset =
5261		test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5262	skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
5263
5264	/*
5265	 * ASIC reset has to be done on all XGMI hive nodes ASAP
5266	 * to allow proper links negotiation in FW (within 1 sec)
5267	 */
5268	if (!skip_hw_reset && need_full_reset) {
5269		list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5270			/* For XGMI run all resets in parallel to speed up the process */
5271			if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
5272				tmp_adev->gmc.xgmi.pending_reset = false;
5273				if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
5274					r = -EALREADY;
5275			} else
5276				r = amdgpu_asic_reset(tmp_adev);
5277
5278			if (r) {
5279				dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
5280					 r, adev_to_drm(tmp_adev)->unique);
5281				goto out;
5282			}
5283		}
5284
5285		/* For XGMI wait for all resets to complete before proceed */
5286		if (!r) {
5287			list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5288				if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
5289					flush_work(&tmp_adev->xgmi_reset_work);
5290					r = tmp_adev->asic_reset_res;
5291					if (r)
5292						break;
5293				}
5294			}
5295		}
5296	}
5297
5298	if (!r && amdgpu_ras_intr_triggered()) {
5299		list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5300			amdgpu_ras_reset_error_count(tmp_adev, AMDGPU_RAS_BLOCK__MMHUB);
5301		}
5302
5303		amdgpu_ras_intr_cleared();
5304	}
5305
5306	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5307		if (need_full_reset) {
5308			/* post card */
5309			r = amdgpu_device_asic_init(tmp_adev);
5310			if (r) {
5311				dev_warn(tmp_adev->dev, "asic atom init failed!");
5312			} else {
5313				dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
5314
5315				r = amdgpu_device_ip_resume_phase1(tmp_adev);
5316				if (r)
5317					goto out;
5318
5319				vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
5320
5321				amdgpu_coredump(tmp_adev, vram_lost, reset_context);
5322
5323				if (vram_lost) {
5324					DRM_INFO("VRAM is lost due to GPU reset!\n");
5325					amdgpu_inc_vram_lost(tmp_adev);
5326				}
5327
5328				r = amdgpu_device_fw_loading(tmp_adev);
5329				if (r)
5330					return r;
5331
5332				r = amdgpu_xcp_restore_partition_mode(
5333					tmp_adev->xcp_mgr);
5334				if (r)
5335					goto out;
5336
5337				r = amdgpu_device_ip_resume_phase2(tmp_adev);
5338				if (r)
5339					goto out;
5340
5341				if (tmp_adev->mman.buffer_funcs_ring->sched.ready)
5342					amdgpu_ttm_set_buffer_funcs_status(tmp_adev, true);
5343
5344				if (vram_lost)
5345					amdgpu_device_fill_reset_magic(tmp_adev);
5346
5347				/*
5348				 * Add this ASIC as tracked as reset was already
5349				 * complete successfully.
5350				 */
5351				amdgpu_register_gpu_instance(tmp_adev);
5352
5353				if (!reset_context->hive &&
5354				    tmp_adev->gmc.xgmi.num_physical_nodes > 1)
5355					amdgpu_xgmi_add_device(tmp_adev);
5356
5357				r = amdgpu_device_ip_late_init(tmp_adev);
5358				if (r)
5359					goto out;
5360
5361				drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false);
5362
5363				/*
5364				 * The GPU enters bad state once faulty pages
5365				 * by ECC has reached the threshold, and ras
5366				 * recovery is scheduled next. So add one check
5367				 * here to break recovery if it indeed exceeds
5368				 * bad page threshold, and remind user to
5369				 * retire this GPU or setting one bigger
5370				 * bad_page_threshold value to fix this once
5371				 * probing driver again.
5372				 */
5373				if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
5374					/* must succeed. */
5375					amdgpu_ras_resume(tmp_adev);
5376				} else {
5377					r = -EINVAL;
5378					goto out;
5379				}
5380
5381				/* Update PSP FW topology after reset */
5382				if (reset_context->hive &&
5383				    tmp_adev->gmc.xgmi.num_physical_nodes > 1)
5384					r = amdgpu_xgmi_update_topology(
5385						reset_context->hive, tmp_adev);
5386			}
5387		}
5388
5389out:
5390		if (!r) {
5391			amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
5392			r = amdgpu_ib_ring_tests(tmp_adev);
5393			if (r) {
5394				dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
5395				need_full_reset = true;
5396				r = -EAGAIN;
5397				goto end;
5398			}
5399		}
5400
5401		if (!r)
5402			r = amdgpu_device_recover_vram(tmp_adev);
5403		else
5404			tmp_adev->asic_reset_res = r;
5405	}
5406
5407end:
5408	if (need_full_reset)
5409		set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5410	else
5411		clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5412	return r;
5413}
5414
5415static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev)
5416{
5417
5418	switch (amdgpu_asic_reset_method(adev)) {
5419	case AMD_RESET_METHOD_MODE1:
5420		adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
5421		break;
5422	case AMD_RESET_METHOD_MODE2:
5423		adev->mp1_state = PP_MP1_STATE_RESET;
5424		break;
5425	default:
5426		adev->mp1_state = PP_MP1_STATE_NONE;
5427		break;
5428	}
5429}
5430
5431static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev)
5432{
5433	amdgpu_vf_error_trans_all(adev);
5434	adev->mp1_state = PP_MP1_STATE_NONE;
5435}
5436
5437static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
 
 
 
 
 
 
 
 
 
5438{
5439	struct pci_dev *p = NULL;
5440
5441	p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5442			adev->pdev->bus->number, 1);
5443	if (p) {
5444		pm_runtime_enable(&(p->dev));
5445		pm_runtime_resume(&(p->dev));
5446	}
5447
5448	pci_dev_put(p);
5449}
5450
5451static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
5452{
5453	enum amd_reset_method reset_method;
5454	struct pci_dev *p = NULL;
5455	u64 expires;
 
5456
5457	/*
5458	 * For now, only BACO and mode1 reset are confirmed
5459	 * to suffer the audio issue without proper suspended.
5460	 */
5461	reset_method = amdgpu_asic_reset_method(adev);
5462	if ((reset_method != AMD_RESET_METHOD_BACO) &&
5463	     (reset_method != AMD_RESET_METHOD_MODE1))
5464		return -EINVAL;
5465
5466	p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5467			adev->pdev->bus->number, 1);
5468	if (!p)
5469		return -ENODEV;
5470
5471	expires = pm_runtime_autosuspend_expiration(&(p->dev));
5472	if (!expires)
5473		/*
5474		 * If we cannot get the audio device autosuspend delay,
5475		 * a fixed 4S interval will be used. Considering 3S is
5476		 * the audio controller default autosuspend delay setting.
5477		 * 4S used here is guaranteed to cover that.
5478		 */
5479		expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
5480
5481	while (!pm_runtime_status_suspended(&(p->dev))) {
5482		if (!pm_runtime_suspend(&(p->dev)))
5483			break;
5484
5485		if (expires < ktime_get_mono_fast_ns()) {
5486			dev_warn(adev->dev, "failed to suspend display audio\n");
5487			pci_dev_put(p);
5488			/* TODO: abort the succeeding gpu reset? */
5489			return -ETIMEDOUT;
5490		}
5491	}
5492
5493	pm_runtime_disable(&(p->dev));
5494
5495	pci_dev_put(p);
5496	return 0;
5497}
5498
5499static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
5500{
5501	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5502
5503#if defined(CONFIG_DEBUG_FS)
5504	if (!amdgpu_sriov_vf(adev))
5505		cancel_work(&adev->reset_work);
5506#endif
5507
5508	if (adev->kfd.dev)
5509		cancel_work(&adev->kfd.reset_work);
5510
5511	if (amdgpu_sriov_vf(adev))
5512		cancel_work(&adev->virt.flr_work);
5513
5514	if (con && adev->ras_enabled)
5515		cancel_work(&con->recovery_work);
5516
 
5517}
5518
5519/**
5520 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
5521 *
5522 * @adev: amdgpu_device pointer
5523 * @job: which job trigger hang
5524 * @reset_context: amdgpu reset context pointer
5525 *
5526 * Attempt to reset the GPU if it has hung (all asics).
5527 * Attempt to do soft-reset or full-reset and reinitialize Asic
5528 * Returns 0 for success or an error on failure.
5529 */
5530
5531int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
5532			      struct amdgpu_job *job,
5533			      struct amdgpu_reset_context *reset_context)
5534{
5535	struct list_head device_list, *device_list_handle =  NULL;
5536	bool job_signaled = false;
5537	struct amdgpu_hive_info *hive = NULL;
5538	struct amdgpu_device *tmp_adev = NULL;
5539	int i, r = 0;
5540	bool need_emergency_restart = false;
5541	bool audio_suspended = false;
5542
5543	/*
5544	 * Special case: RAS triggered and full reset isn't supported
5545	 */
5546	need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
5547
5548	/*
5549	 * Flush RAM to disk so that after reboot
5550	 * the user can read log and see why the system rebooted.
5551	 */
5552	if (need_emergency_restart && amdgpu_ras_get_context(adev) &&
5553		amdgpu_ras_get_context(adev)->reboot) {
5554		DRM_WARN("Emergency reboot.");
5555
5556		ksys_sync_helper();
5557		emergency_restart();
5558	}
5559
5560	dev_info(adev->dev, "GPU %s begin!\n",
5561		need_emergency_restart ? "jobs stop":"reset");
5562
5563	if (!amdgpu_sriov_vf(adev))
5564		hive = amdgpu_get_xgmi_hive(adev);
5565	if (hive)
5566		mutex_lock(&hive->hive_lock);
5567
5568	reset_context->job = job;
5569	reset_context->hive = hive;
5570	/*
5571	 * Build list of devices to reset.
5572	 * In case we are in XGMI hive mode, resort the device list
5573	 * to put adev in the 1st position.
5574	 */
5575	INIT_LIST_HEAD(&device_list);
5576	if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
5577		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
5578			list_add_tail(&tmp_adev->reset_list, &device_list);
5579			if (adev->shutdown)
5580				tmp_adev->shutdown = true;
5581		}
5582		if (!list_is_first(&adev->reset_list, &device_list))
5583			list_rotate_to_front(&adev->reset_list, &device_list);
5584		device_list_handle = &device_list;
5585	} else {
5586		list_add_tail(&adev->reset_list, &device_list);
5587		device_list_handle = &device_list;
5588	}
5589
5590	/* We need to lock reset domain only once both for XGMI and single device */
5591	tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5592				    reset_list);
5593	amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
5594
5595	/* block all schedulers and reset given job's ring */
5596	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5597
5598		amdgpu_device_set_mp1_state(tmp_adev);
 
 
5599
5600		/*
5601		 * Try to put the audio codec into suspend state
5602		 * before gpu reset started.
5603		 *
5604		 * Due to the power domain of the graphics device
5605		 * is shared with AZ power domain. Without this,
5606		 * we may change the audio hardware from behind
5607		 * the audio driver's back. That will trigger
5608		 * some audio codec errors.
5609		 */
5610		if (!amdgpu_device_suspend_display_audio(tmp_adev))
5611			audio_suspended = true;
5612
5613		amdgpu_ras_set_error_query_ready(tmp_adev, false);
 
 
5614
5615		cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
5616
5617		if (!amdgpu_sriov_vf(tmp_adev))
5618			amdgpu_amdkfd_pre_reset(tmp_adev);
5619
5620		/*
5621		 * Mark these ASICs to be reseted as untracked first
5622		 * And add them back after reset completed
5623		 */
5624		amdgpu_unregister_gpu_instance(tmp_adev);
5625
5626		drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, true);
5627
5628		/* disable ras on ALL IPs */
5629		if (!need_emergency_restart &&
5630		      amdgpu_device_ip_need_full_reset(tmp_adev))
5631			amdgpu_ras_suspend(tmp_adev);
5632
5633		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5634			struct amdgpu_ring *ring = tmp_adev->rings[i];
5635
5636			if (!amdgpu_ring_sched_ready(ring))
5637				continue;
5638
5639			drm_sched_stop(&ring->sched, job ? &job->base : NULL);
5640
5641			if (need_emergency_restart)
5642				amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
5643		}
5644		atomic_inc(&tmp_adev->gpu_reset_counter);
5645	}
5646
5647	if (need_emergency_restart)
5648		goto skip_sched_resume;
5649
5650	/*
5651	 * Must check guilty signal here since after this point all old
5652	 * HW fences are force signaled.
5653	 *
5654	 * job->base holds a reference to parent fence
5655	 */
5656	if (job && dma_fence_is_signaled(&job->hw_fence)) {
5657		job_signaled = true;
5658		dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5659		goto skip_hw_reset;
5660	}
5661
5662retry:	/* Rest of adevs pre asic reset from XGMI hive. */
5663	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5664		r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
5665		/*TODO Should we stop ?*/
5666		if (r) {
5667			dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
5668				  r, adev_to_drm(tmp_adev)->unique);
5669			tmp_adev->asic_reset_res = r;
5670		}
5671
5672		/*
5673		 * Drop all pending non scheduler resets. Scheduler resets
5674		 * were already dropped during drm_sched_stop
5675		 */
5676		amdgpu_device_stop_pending_resets(tmp_adev);
5677	}
5678
5679	/* Actual ASIC resets if needed.*/
5680	/* Host driver will handle XGMI hive reset for SRIOV */
5681	if (amdgpu_sriov_vf(adev)) {
5682		r = amdgpu_device_reset_sriov(adev, job ? false : true);
5683		if (r)
5684			adev->asic_reset_res = r;
5685
5686		/* Aldebaran and gfx_11_0_3 support ras in SRIOV, so need resume ras during reset */
5687		if (amdgpu_ip_version(adev, GC_HWIP, 0) ==
5688			    IP_VERSION(9, 4, 2) ||
5689		    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3))
5690			amdgpu_ras_resume(adev);
5691	} else {
5692		r = amdgpu_do_asic_reset(device_list_handle, reset_context);
5693		if (r && r == -EAGAIN)
5694			goto retry;
5695	}
5696
5697skip_hw_reset:
5698
5699	/* Post ASIC reset for all devs .*/
5700	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5701
5702		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5703			struct amdgpu_ring *ring = tmp_adev->rings[i];
5704
5705			if (!amdgpu_ring_sched_ready(ring))
5706				continue;
5707
5708			drm_sched_start(&ring->sched, true);
5709		}
5710
5711		if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled)
5712			drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
5713
5714		if (tmp_adev->asic_reset_res)
5715			r = tmp_adev->asic_reset_res;
5716
5717		tmp_adev->asic_reset_res = 0;
5718
5719		if (r) {
5720			/* bad news, how to tell it to userspace ? */
5721			dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
5722			amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
5723		} else {
5724			dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
5725			if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
5726				DRM_WARN("smart shift update failed\n");
5727		}
5728	}
5729
5730skip_sched_resume:
5731	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5732		/* unlock kfd: SRIOV would do it separately */
5733		if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
5734			amdgpu_amdkfd_post_reset(tmp_adev);
5735
5736		/* kfd_post_reset will do nothing if kfd device is not initialized,
5737		 * need to bring up kfd here if it's not be initialized before
 
5738		 */
5739		if (!adev->kfd.init_complete)
5740			amdgpu_amdkfd_device_init(adev);
5741
5742		if (audio_suspended)
5743			amdgpu_device_resume_display_audio(tmp_adev);
5744
5745		amdgpu_device_unset_mp1_state(tmp_adev);
5746
5747		amdgpu_ras_set_error_query_ready(tmp_adev, true);
 
 
 
 
5748	}
5749
5750	tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5751					    reset_list);
5752	amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
5753
5754	if (hive) {
5755		mutex_unlock(&hive->hive_lock);
5756		amdgpu_put_xgmi_hive(hive);
 
 
 
5757	}
5758
5759	if (r)
5760		dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
5761
5762	atomic_set(&adev->reset_domain->reset_res, r);
5763	return r;
5764}
5765
5766/**
5767 * amdgpu_device_partner_bandwidth - find the bandwidth of appropriate partner
5768 *
5769 * @adev: amdgpu_device pointer
5770 * @speed: pointer to the speed of the link
5771 * @width: pointer to the width of the link
5772 *
5773 * Evaluate the hierarchy to find the speed and bandwidth capabilities of the
5774 * first physical partner to an AMD dGPU.
5775 * This will exclude any virtual switches and links.
5776 */
5777static void amdgpu_device_partner_bandwidth(struct amdgpu_device *adev,
5778					    enum pci_bus_speed *speed,
5779					    enum pcie_link_width *width)
5780{
5781	struct pci_dev *parent = adev->pdev;
5782
5783	if (!speed || !width)
5784		return;
5785
5786	*speed = PCI_SPEED_UNKNOWN;
5787	*width = PCIE_LNK_WIDTH_UNKNOWN;
5788
5789	while ((parent = pci_upstream_bridge(parent))) {
5790		/* skip upstream/downstream switches internal to dGPU*/
5791		if (parent->vendor == PCI_VENDOR_ID_ATI)
5792			continue;
5793		*speed = pcie_get_speed_cap(parent);
5794		*width = pcie_get_width_cap(parent);
5795		break;
5796	}
5797}
5798
5799/**
5800 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
5801 *
5802 * @adev: amdgpu_device pointer
5803 *
5804 * Fetchs and stores in the driver the PCIE capabilities (gen speed
5805 * and lanes) of the slot the device is in. Handles APUs and
5806 * virtualized environments where PCIE config space may not be available.
5807 */
5808static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
5809{
5810	struct pci_dev *pdev;
5811	enum pci_bus_speed speed_cap, platform_speed_cap;
5812	enum pcie_link_width platform_link_width;
5813
5814	if (amdgpu_pcie_gen_cap)
5815		adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
5816
5817	if (amdgpu_pcie_lane_cap)
5818		adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
5819
5820	/* covers APUs as well */
5821	if (pci_is_root_bus(adev->pdev->bus) && !amdgpu_passthrough(adev)) {
5822		if (adev->pm.pcie_gen_mask == 0)
5823			adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
5824		if (adev->pm.pcie_mlw_mask == 0)
5825			adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
5826		return;
5827	}
5828
5829	if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
5830		return;
5831
5832	amdgpu_device_partner_bandwidth(adev, &platform_speed_cap,
5833					&platform_link_width);
5834
5835	if (adev->pm.pcie_gen_mask == 0) {
5836		/* asic caps */
5837		pdev = adev->pdev;
5838		speed_cap = pcie_get_speed_cap(pdev);
5839		if (speed_cap == PCI_SPEED_UNKNOWN) {
5840			adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5841						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5842						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5843		} else {
5844			if (speed_cap == PCIE_SPEED_32_0GT)
5845				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5846							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5847							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5848							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5849							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
5850			else if (speed_cap == PCIE_SPEED_16_0GT)
5851				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5852							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5853							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5854							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
5855			else if (speed_cap == PCIE_SPEED_8_0GT)
5856				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5857							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5858							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5859			else if (speed_cap == PCIE_SPEED_5_0GT)
5860				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5861							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
5862			else
5863				adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
5864		}
5865		/* platform caps */
5866		if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5867			adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5868						   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5869		} else {
5870			if (platform_speed_cap == PCIE_SPEED_32_0GT)
5871				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5872							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5873							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5874							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5875							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
5876			else if (platform_speed_cap == PCIE_SPEED_16_0GT)
5877				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5878							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5879							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5880							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
5881			else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5882				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5883							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5884							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
5885			else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5886				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5887							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5888			else
5889				adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
5890
 
 
 
 
 
 
 
 
5891		}
5892	}
5893	if (adev->pm.pcie_mlw_mask == 0) {
5894		if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5895			adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
5896		} else {
5897			switch (platform_link_width) {
5898			case PCIE_LNK_X32:
5899				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
5900							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5901							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5902							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5903							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5904							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5905							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5906				break;
5907			case PCIE_LNK_X16:
5908				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5909							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5910							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5911							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5912							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5913							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5914				break;
5915			case PCIE_LNK_X12:
5916				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5917							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5918							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5919							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5920							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5921				break;
5922			case PCIE_LNK_X8:
5923				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5924							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5925							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5926							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5927				break;
5928			case PCIE_LNK_X4:
5929				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5930							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5931							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5932				break;
5933			case PCIE_LNK_X2:
5934				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5935							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5936				break;
5937			case PCIE_LNK_X1:
5938				adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5939				break;
5940			default:
5941				break;
5942			}
 
 
5943		}
5944	}
5945}
5946
5947/**
5948 * amdgpu_device_is_peer_accessible - Check peer access through PCIe BAR
5949 *
5950 * @adev: amdgpu_device pointer
5951 * @peer_adev: amdgpu_device pointer for peer device trying to access @adev
5952 *
5953 * Return true if @peer_adev can access (DMA) @adev through the PCIe
5954 * BAR, i.e. @adev is "large BAR" and the BAR matches the DMA mask of
5955 * @peer_adev.
5956 */
5957bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
5958				      struct amdgpu_device *peer_adev)
5959{
5960#ifdef CONFIG_HSA_AMD_P2P
5961	uint64_t address_mask = peer_adev->dev->dma_mask ?
5962		~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
5963	resource_size_t aper_limit =
5964		adev->gmc.aper_base + adev->gmc.aper_size - 1;
5965	bool p2p_access =
5966		!adev->gmc.xgmi.connected_to_cpu &&
5967		!(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0);
5968
5969	return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size &&
5970		adev->gmc.real_vram_size == adev->gmc.visible_vram_size &&
5971		!(adev->gmc.aper_base & address_mask ||
5972		  aper_limit & address_mask));
5973#else
5974	return false;
5975#endif
5976}
5977
5978int amdgpu_device_baco_enter(struct drm_device *dev)
5979{
5980	struct amdgpu_device *adev = drm_to_adev(dev);
5981	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5982
5983	if (!amdgpu_device_supports_baco(dev))
5984		return -ENOTSUPP;
5985
5986	if (ras && adev->ras_enabled &&
5987	    adev->nbio.funcs->enable_doorbell_interrupt)
5988		adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
5989
5990	return amdgpu_dpm_baco_enter(adev);
5991}
5992
5993int amdgpu_device_baco_exit(struct drm_device *dev)
5994{
5995	struct amdgpu_device *adev = drm_to_adev(dev);
5996	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5997	int ret = 0;
5998
5999	if (!amdgpu_device_supports_baco(dev))
6000		return -ENOTSUPP;
6001
6002	ret = amdgpu_dpm_baco_exit(adev);
6003	if (ret)
6004		return ret;
6005
6006	if (ras && adev->ras_enabled &&
6007	    adev->nbio.funcs->enable_doorbell_interrupt)
6008		adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
6009
6010	if (amdgpu_passthrough(adev) &&
6011	    adev->nbio.funcs->clear_doorbell_interrupt)
6012		adev->nbio.funcs->clear_doorbell_interrupt(adev);
6013
6014	return 0;
6015}
6016
6017/**
6018 * amdgpu_pci_error_detected - Called when a PCI error is detected.
6019 * @pdev: PCI device struct
6020 * @state: PCI channel state
6021 *
6022 * Description: Called when a PCI error is detected.
6023 *
6024 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
6025 */
6026pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
6027{
6028	struct drm_device *dev = pci_get_drvdata(pdev);
6029	struct amdgpu_device *adev = drm_to_adev(dev);
6030	int i;
6031
6032	DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
6033
6034	if (adev->gmc.xgmi.num_physical_nodes > 1) {
6035		DRM_WARN("No support for XGMI hive yet...");
6036		return PCI_ERS_RESULT_DISCONNECT;
6037	}
6038
6039	adev->pci_channel_state = state;
6040
6041	switch (state) {
6042	case pci_channel_io_normal:
6043		return PCI_ERS_RESULT_CAN_RECOVER;
6044	/* Fatal error, prepare for slot reset */
6045	case pci_channel_io_frozen:
6046		/*
6047		 * Locking adev->reset_domain->sem will prevent any external access
6048		 * to GPU during PCI error recovery
6049		 */
6050		amdgpu_device_lock_reset_domain(adev->reset_domain);
6051		amdgpu_device_set_mp1_state(adev);
6052
6053		/*
6054		 * Block any work scheduling as we do for regular GPU reset
6055		 * for the duration of the recovery
6056		 */
6057		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
6058			struct amdgpu_ring *ring = adev->rings[i];
6059
6060			if (!amdgpu_ring_sched_ready(ring))
6061				continue;
6062
6063			drm_sched_stop(&ring->sched, NULL);
6064		}
6065		atomic_inc(&adev->gpu_reset_counter);
6066		return PCI_ERS_RESULT_NEED_RESET;
6067	case pci_channel_io_perm_failure:
6068		/* Permanent error, prepare for device removal */
6069		return PCI_ERS_RESULT_DISCONNECT;
6070	}
6071
6072	return PCI_ERS_RESULT_NEED_RESET;
6073}
6074
6075/**
6076 * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
6077 * @pdev: pointer to PCI device
6078 */
6079pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
6080{
6081
6082	DRM_INFO("PCI error: mmio enabled callback!!\n");
6083
6084	/* TODO - dump whatever for debugging purposes */
6085
6086	/* This called only if amdgpu_pci_error_detected returns
6087	 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
6088	 * works, no need to reset slot.
6089	 */
6090
6091	return PCI_ERS_RESULT_RECOVERED;
6092}
6093
6094/**
6095 * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
6096 * @pdev: PCI device struct
6097 *
6098 * Description: This routine is called by the pci error recovery
6099 * code after the PCI slot has been reset, just before we
6100 * should resume normal operations.
6101 */
6102pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
6103{
6104	struct drm_device *dev = pci_get_drvdata(pdev);
6105	struct amdgpu_device *adev = drm_to_adev(dev);
6106	int r, i;
6107	struct amdgpu_reset_context reset_context;
6108	u32 memsize;
6109	struct list_head device_list;
6110
6111	DRM_INFO("PCI error: slot reset callback!!\n");
6112
6113	memset(&reset_context, 0, sizeof(reset_context));
6114
6115	INIT_LIST_HEAD(&device_list);
6116	list_add_tail(&adev->reset_list, &device_list);
6117
6118	/* wait for asic to come out of reset */
6119	msleep(500);
6120
6121	/* Restore PCI confspace */
6122	amdgpu_device_load_pci_state(pdev);
6123
6124	/* confirm  ASIC came out of reset */
6125	for (i = 0; i < adev->usec_timeout; i++) {
6126		memsize = amdgpu_asic_get_config_memsize(adev);
6127
6128		if (memsize != 0xffffffff)
6129			break;
6130		udelay(1);
6131	}
6132	if (memsize == 0xffffffff) {
6133		r = -ETIME;
6134		goto out;
6135	}
6136
6137	reset_context.method = AMD_RESET_METHOD_NONE;
6138	reset_context.reset_req_dev = adev;
6139	set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
6140	set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
6141
6142	adev->no_hw_access = true;
6143	r = amdgpu_device_pre_asic_reset(adev, &reset_context);
6144	adev->no_hw_access = false;
6145	if (r)
6146		goto out;
6147
6148	r = amdgpu_do_asic_reset(&device_list, &reset_context);
6149
6150out:
6151	if (!r) {
6152		if (amdgpu_device_cache_pci_state(adev->pdev))
6153			pci_restore_state(adev->pdev);
6154
6155		DRM_INFO("PCIe error recovery succeeded\n");
6156	} else {
6157		DRM_ERROR("PCIe error recovery failed, err:%d", r);
6158		amdgpu_device_unset_mp1_state(adev);
6159		amdgpu_device_unlock_reset_domain(adev->reset_domain);
6160	}
6161
6162	return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
6163}
6164
6165/**
6166 * amdgpu_pci_resume() - resume normal ops after PCI reset
6167 * @pdev: pointer to PCI device
6168 *
6169 * Called when the error recovery driver tells us that its
6170 * OK to resume normal operation.
6171 */
6172void amdgpu_pci_resume(struct pci_dev *pdev)
6173{
6174	struct drm_device *dev = pci_get_drvdata(pdev);
6175	struct amdgpu_device *adev = drm_to_adev(dev);
6176	int i;
6177
6178
6179	DRM_INFO("PCI error: resume callback!!\n");
6180
6181	/* Only continue execution for the case of pci_channel_io_frozen */
6182	if (adev->pci_channel_state != pci_channel_io_frozen)
6183		return;
6184
6185	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
6186		struct amdgpu_ring *ring = adev->rings[i];
6187
6188		if (!amdgpu_ring_sched_ready(ring))
6189			continue;
6190
6191		drm_sched_start(&ring->sched, true);
6192	}
6193
6194	amdgpu_device_unset_mp1_state(adev);
6195	amdgpu_device_unlock_reset_domain(adev->reset_domain);
6196}
6197
6198bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
6199{
6200	struct drm_device *dev = pci_get_drvdata(pdev);
6201	struct amdgpu_device *adev = drm_to_adev(dev);
6202	int r;
6203
6204	r = pci_save_state(pdev);
6205	if (!r) {
6206		kfree(adev->pci_state);
6207
6208		adev->pci_state = pci_store_saved_state(pdev);
6209
6210		if (!adev->pci_state) {
6211			DRM_ERROR("Failed to store PCI saved state");
6212			return false;
6213		}
6214	} else {
6215		DRM_WARN("Failed to save PCI state, err:%d\n", r);
6216		return false;
6217	}
6218
6219	return true;
6220}
6221
6222bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
6223{
6224	struct drm_device *dev = pci_get_drvdata(pdev);
6225	struct amdgpu_device *adev = drm_to_adev(dev);
6226	int r;
6227
6228	if (!adev->pci_state)
6229		return false;
6230
6231	r = pci_load_saved_state(pdev, adev->pci_state);
6232
6233	if (!r) {
6234		pci_restore_state(pdev);
6235	} else {
6236		DRM_WARN("Failed to load PCI state, err:%d\n", r);
6237		return false;
6238	}
6239
6240	return true;
6241}
6242
6243void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
6244		struct amdgpu_ring *ring)
6245{
6246#ifdef CONFIG_X86_64
6247	if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
6248		return;
6249#endif
6250	if (adev->gmc.xgmi.connected_to_cpu)
6251		return;
6252
6253	if (ring && ring->funcs->emit_hdp_flush)
6254		amdgpu_ring_emit_hdp_flush(ring);
6255	else
6256		amdgpu_asic_flush_hdp(adev, ring);
6257}
6258
6259void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
6260		struct amdgpu_ring *ring)
6261{
6262#ifdef CONFIG_X86_64
6263	if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
6264		return;
6265#endif
6266	if (adev->gmc.xgmi.connected_to_cpu)
6267		return;
6268
6269	amdgpu_asic_invalidate_hdp(adev, ring);
6270}
6271
6272int amdgpu_in_reset(struct amdgpu_device *adev)
6273{
6274	return atomic_read(&adev->reset_domain->in_gpu_reset);
6275}
6276
6277/**
6278 * amdgpu_device_halt() - bring hardware to some kind of halt state
6279 *
6280 * @adev: amdgpu_device pointer
6281 *
6282 * Bring hardware to some kind of halt state so that no one can touch it
6283 * any more. It will help to maintain error context when error occurred.
6284 * Compare to a simple hang, the system will keep stable at least for SSH
6285 * access. Then it should be trivial to inspect the hardware state and
6286 * see what's going on. Implemented as following:
6287 *
6288 * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc),
6289 *    clears all CPU mappings to device, disallows remappings through page faults
6290 * 2. amdgpu_irq_disable_all() disables all interrupts
6291 * 3. amdgpu_fence_driver_hw_fini() signals all HW fences
6292 * 4. set adev->no_hw_access to avoid potential crashes after setp 5
6293 * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings
6294 * 6. pci_disable_device() and pci_wait_for_pending_transaction()
6295 *    flush any in flight DMA operations
6296 */
6297void amdgpu_device_halt(struct amdgpu_device *adev)
6298{
6299	struct pci_dev *pdev = adev->pdev;
6300	struct drm_device *ddev = adev_to_drm(adev);
6301
6302	amdgpu_xcp_dev_unplug(adev);
6303	drm_dev_unplug(ddev);
6304
6305	amdgpu_irq_disable_all(adev);
6306
6307	amdgpu_fence_driver_hw_fini(adev);
6308
6309	adev->no_hw_access = true;
6310
6311	amdgpu_device_unmap_mmio(adev);
6312
6313	pci_disable_device(pdev);
6314	pci_wait_for_pending_transaction(pdev);
6315}
6316
6317u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
6318				u32 reg)
6319{
6320	unsigned long flags, address, data;
6321	u32 r;
6322
6323	address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
6324	data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
6325
6326	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
6327	WREG32(address, reg * 4);
6328	(void)RREG32(address);
6329	r = RREG32(data);
6330	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
6331	return r;
6332}
6333
6334void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
6335				u32 reg, u32 v)
6336{
6337	unsigned long flags, address, data;
6338
6339	address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
6340	data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
6341
6342	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
6343	WREG32(address, reg * 4);
6344	(void)RREG32(address);
6345	WREG32(data, v);
6346	(void)RREG32(data);
6347	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
6348}
6349
6350/**
6351 * amdgpu_device_switch_gang - switch to a new gang
6352 * @adev: amdgpu_device pointer
6353 * @gang: the gang to switch to
6354 *
6355 * Try to switch to a new gang.
6356 * Returns: NULL if we switched to the new gang or a reference to the current
6357 * gang leader.
6358 */
6359struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
6360					    struct dma_fence *gang)
6361{
6362	struct dma_fence *old = NULL;
6363
6364	do {
6365		dma_fence_put(old);
6366		rcu_read_lock();
6367		old = dma_fence_get_rcu_safe(&adev->gang_submit);
6368		rcu_read_unlock();
6369
6370		if (old == gang)
6371			break;
6372
6373		if (!dma_fence_is_signaled(old))
6374			return old;
6375
6376	} while (cmpxchg((struct dma_fence __force **)&adev->gang_submit,
6377			 old, gang) != old);
6378
6379	dma_fence_put(old);
6380	return NULL;
6381}
6382
6383bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev)
6384{
6385	switch (adev->asic_type) {
6386#ifdef CONFIG_DRM_AMDGPU_SI
6387	case CHIP_HAINAN:
6388#endif
6389	case CHIP_TOPAZ:
6390		/* chips with no display hardware */
6391		return false;
6392#ifdef CONFIG_DRM_AMDGPU_SI
6393	case CHIP_TAHITI:
6394	case CHIP_PITCAIRN:
6395	case CHIP_VERDE:
6396	case CHIP_OLAND:
6397#endif
6398#ifdef CONFIG_DRM_AMDGPU_CIK
6399	case CHIP_BONAIRE:
6400	case CHIP_HAWAII:
6401	case CHIP_KAVERI:
6402	case CHIP_KABINI:
6403	case CHIP_MULLINS:
6404#endif
6405	case CHIP_TONGA:
6406	case CHIP_FIJI:
6407	case CHIP_POLARIS10:
6408	case CHIP_POLARIS11:
6409	case CHIP_POLARIS12:
6410	case CHIP_VEGAM:
6411	case CHIP_CARRIZO:
6412	case CHIP_STONEY:
6413		/* chips with display hardware */
6414		return true;
6415	default:
6416		/* IP discovery */
6417		if (!amdgpu_ip_version(adev, DCE_HWIP, 0) ||
6418		    (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
6419			return false;
6420		return true;
6421	}
6422}
6423
6424uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev,
6425		uint32_t inst, uint32_t reg_addr, char reg_name[],
6426		uint32_t expected_value, uint32_t mask)
6427{
6428	uint32_t ret = 0;
6429	uint32_t old_ = 0;
6430	uint32_t tmp_ = RREG32(reg_addr);
6431	uint32_t loop = adev->usec_timeout;
6432
6433	while ((tmp_ & (mask)) != (expected_value)) {
6434		if (old_ != tmp_) {
6435			loop = adev->usec_timeout;
6436			old_ = tmp_;
6437		} else
6438			udelay(1);
6439		tmp_ = RREG32(reg_addr);
6440		loop--;
6441		if (!loop) {
6442			DRM_WARN("Register(%d) [%s] failed to reach value 0x%08x != 0x%08xn",
6443				  inst, reg_name, (uint32_t)expected_value,
6444				  (uint32_t)(tmp_ & (mask)));
6445			ret = -ETIMEDOUT;
6446			break;
6447		}
6448	}
6449	return ret;
6450}
v4.17
   1/*
   2 * Copyright 2008 Advanced Micro Devices, Inc.
   3 * Copyright 2008 Red Hat Inc.
   4 * Copyright 2009 Jerome Glisse.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the "Software"),
   8 * to deal in the Software without restriction, including without limitation
   9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10 * and/or sell copies of the Software, and to permit persons to whom the
  11 * Software is furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22 * OTHER DEALINGS IN THE SOFTWARE.
  23 *
  24 * Authors: Dave Airlie
  25 *          Alex Deucher
  26 *          Jerome Glisse
  27 */
 
  28#include <linux/kthread.h>
 
  29#include <linux/console.h>
  30#include <linux/slab.h>
  31#include <drm/drmP.h>
 
 
 
 
 
 
  32#include <drm/drm_crtc_helper.h>
  33#include <drm/drm_atomic_helper.h>
 
  34#include <drm/amdgpu_drm.h>
 
  35#include <linux/vgaarb.h>
  36#include <linux/vga_switcheroo.h>
  37#include <linux/efi.h>
  38#include "amdgpu.h"
  39#include "amdgpu_trace.h"
  40#include "amdgpu_i2c.h"
  41#include "atom.h"
  42#include "amdgpu_atombios.h"
  43#include "amdgpu_atomfirmware.h"
  44#include "amd_pcie.h"
  45#ifdef CONFIG_DRM_AMDGPU_SI
  46#include "si.h"
  47#endif
  48#ifdef CONFIG_DRM_AMDGPU_CIK
  49#include "cik.h"
  50#endif
  51#include "vi.h"
  52#include "soc15.h"
 
  53#include "bif/bif_4_1_d.h"
  54#include <linux/pci.h>
  55#include <linux/firmware.h>
  56#include "amdgpu_vf_error.h"
  57
  58#include "amdgpu_amdkfd.h"
  59#include "amdgpu_pm.h"
  60
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  61MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
  62MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
  63MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
 
 
 
 
  64
  65#define AMDGPU_RESUME_MS		2000
 
 
  66
  67static const char *amdgpu_asic_name[] = {
 
 
  68	"TAHITI",
  69	"PITCAIRN",
  70	"VERDE",
  71	"OLAND",
  72	"HAINAN",
  73	"BONAIRE",
  74	"KAVERI",
  75	"KABINI",
  76	"HAWAII",
  77	"MULLINS",
  78	"TOPAZ",
  79	"TONGA",
  80	"FIJI",
  81	"CARRIZO",
  82	"STONEY",
  83	"POLARIS10",
  84	"POLARIS11",
  85	"POLARIS12",
 
  86	"VEGA10",
  87	"VEGA12",
 
  88	"RAVEN",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  89	"LAST",
  90};
  91
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  92static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
  93
 
  94/**
  95 * amdgpu_device_is_px - Is the device is a dGPU with HG/PX power control
  96 *
  97 * @dev: drm_device pointer
  98 *
  99 * Returns true if the device is a dGPU with HG/PX power control,
 100 * otherwise return false.
 101 */
 102bool amdgpu_device_is_px(struct drm_device *dev)
 103{
 104	struct amdgpu_device *adev = dev->dev_private;
 105
 106	if (adev->flags & AMD_IS_PX)
 107		return true;
 108	return false;
 109}
 110
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 111/*
 112 * MMIO register access helper functions.
 113 */
 
 114/**
 115 * amdgpu_mm_rreg - read a memory mapped IO register
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 116 *
 117 * @adev: amdgpu_device pointer
 118 * @reg: dword aligned register offset
 119 * @acc_flags: access flags which require special behavior
 120 *
 121 * Returns the 32 bit value from the offset specified.
 122 */
 123uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
 124			uint32_t acc_flags)
 125{
 126	uint32_t ret;
 127
 128	if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
 129		return amdgpu_virt_kiq_rreg(adev, reg);
 130
 131	if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
 132		ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
 133	else {
 134		unsigned long flags;
 135
 136		spin_lock_irqsave(&adev->mmio_idx_lock, flags);
 137		writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
 138		ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
 139		spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
 
 
 140	}
 141	trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
 
 
 142	return ret;
 143}
 144
 145/*
 146 * MMIO register read with bytes helper functions
 147 * @offset:bytes offset from MMIO start
 148 *
 149*/
 150
 151/**
 152 * amdgpu_mm_rreg8 - read a memory mapped IO register
 153 *
 154 * @adev: amdgpu_device pointer
 155 * @offset: byte aligned register offset
 156 *
 157 * Returns the 8 bit value from the offset specified.
 158 */
 159uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) {
 
 
 
 
 160	if (offset < adev->rmmio_size)
 161		return (readb(adev->rmmio + offset));
 162	BUG();
 163}
 164
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 165/*
 166 * MMIO register write with bytes helper functions
 167 * @offset:bytes offset from MMIO start
 168 * @value: the value want to be written to the register
 169 *
 170*/
 171/**
 172 * amdgpu_mm_wreg8 - read a memory mapped IO register
 173 *
 174 * @adev: amdgpu_device pointer
 175 * @offset: byte aligned register offset
 176 * @value: 8 bit value to write
 177 *
 178 * Writes the value specified to the offset specified.
 179 */
 180void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) {
 
 
 
 
 181	if (offset < adev->rmmio_size)
 182		writeb(value, adev->rmmio + offset);
 183	else
 184		BUG();
 185}
 186
 187/**
 188 * amdgpu_mm_wreg - write to a memory mapped IO register
 189 *
 190 * @adev: amdgpu_device pointer
 191 * @reg: dword aligned register offset
 192 * @v: 32 bit value to write to the register
 193 * @acc_flags: access flags which require special behavior
 194 *
 195 * Writes the value specified to the offset specified.
 196 */
 197void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
 198		    uint32_t acc_flags)
 
 199{
 200	trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
 
 201
 202	if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
 203		adev->last_mm_index = v;
 
 
 
 
 
 
 
 
 
 204	}
 205
 206	if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
 207		return amdgpu_virt_kiq_wreg(adev, reg, v);
 208
 209	if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 210		writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
 211	else {
 212		unsigned long flags;
 213
 214		spin_lock_irqsave(&adev->mmio_idx_lock, flags);
 215		writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
 216		writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
 217		spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
 218	}
 219
 220	if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
 221		udelay(500);
 222	}
 223}
 224
 225/**
 226 * amdgpu_io_rreg - read an IO register
 227 *
 228 * @adev: amdgpu_device pointer
 229 * @reg: dword aligned register offset
 
 
 
 230 *
 231 * Returns the 32 bit value from the offset specified.
 232 */
 233u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
 
 
 234{
 235	if ((reg * 4) < adev->rio_mem_size)
 236		return ioread32(adev->rio_mem + (reg * 4));
 237	else {
 238		iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
 239		return ioread32(adev->rio_mem + (mmMM_DATA * 4));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 240	}
 241}
 242
 243/**
 244 * amdgpu_io_wreg - write to an IO register
 245 *
 246 * @adev: amdgpu_device pointer
 247 * @reg: dword aligned register offset
 248 * @v: 32 bit value to write to the register
 249 *
 250 * Writes the value specified to the offset specified.
 251 */
 252void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 253{
 254	if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
 255		adev->last_mm_index = v;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 256	}
 257
 258	if ((reg * 4) < adev->rio_mem_size)
 259		iowrite32(v, adev->rio_mem + (reg * 4));
 260	else {
 261		iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
 262		iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
 263	}
 264
 265	if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
 266		udelay(500);
 267	}
 268}
 269
 270/**
 271 * amdgpu_mm_rdoorbell - read a doorbell dword
 272 *
 273 * @adev: amdgpu_device pointer
 274 * @index: doorbell index
 275 *
 276 * Returns the value in the doorbell aperture at the
 277 * requested doorbell index (CIK).
 278 */
 279u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 280{
 281	if (index < adev->doorbell.num_doorbells) {
 282		return readl(adev->doorbell.ptr + index);
 283	} else {
 284		DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
 285		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 286	}
 
 
 
 
 287}
 288
 289/**
 290 * amdgpu_mm_wdoorbell - write a doorbell dword
 291 *
 292 * @adev: amdgpu_device pointer
 293 * @index: doorbell index
 294 * @v: value to write
 295 *
 296 * Writes @v to the doorbell aperture at the
 297 * requested doorbell index (CIK).
 298 */
 299void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
 
 300{
 301	if (index < adev->doorbell.num_doorbells) {
 302		writel(v, adev->doorbell.ptr + index);
 303	} else {
 304		DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 305	}
 
 
 306}
 307
 308/**
 309 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
 310 *
 311 * @adev: amdgpu_device pointer
 312 * @index: doorbell index
 
 313 *
 314 * Returns the value in the doorbell aperture at the
 315 * requested doorbell index (VEGA10+).
 316 */
 317u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
 
 318{
 319	if (index < adev->doorbell.num_doorbells) {
 320		return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
 321	} else {
 322		DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
 323		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 324	}
 
 
 325}
 326
 327/**
 328 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
 329 *
 330 * @adev: amdgpu_device pointer
 331 * @index: doorbell index
 332 * @v: value to write
 333 *
 334 * Writes @v to the doorbell aperture at the
 335 * requested doorbell index (VEGA10+).
 336 */
 337void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
 338{
 339	if (index < adev->doorbell.num_doorbells) {
 340		atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
 341	} else {
 342		DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
 343	}
 344}
 345
 346/**
 347 * amdgpu_invalid_rreg - dummy reg read function
 348 *
 349 * @adev: amdgpu device pointer
 350 * @reg: offset of register
 351 *
 352 * Dummy register read function.  Used for register blocks
 353 * that certain asics don't have (all asics).
 354 * Returns the value in the register.
 355 */
 356static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
 357{
 358	DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
 359	BUG();
 360	return 0;
 361}
 362
 
 
 
 
 
 
 
 363/**
 364 * amdgpu_invalid_wreg - dummy reg write function
 365 *
 366 * @adev: amdgpu device pointer
 367 * @reg: offset of register
 368 * @v: value to write to the register
 369 *
 370 * Dummy register read function.  Used for register blocks
 371 * that certain asics don't have (all asics).
 372 */
 373static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
 374{
 375	DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
 376		  reg, v);
 377	BUG();
 378}
 379
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 380/**
 381 * amdgpu_block_invalid_rreg - dummy reg read function
 382 *
 383 * @adev: amdgpu device pointer
 384 * @block: offset of instance
 385 * @reg: offset of register
 386 *
 387 * Dummy register read function.  Used for register blocks
 388 * that certain asics don't have (all asics).
 389 * Returns the value in the register.
 390 */
 391static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
 392					  uint32_t block, uint32_t reg)
 393{
 394	DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
 395		  reg, block);
 396	BUG();
 397	return 0;
 398}
 399
 400/**
 401 * amdgpu_block_invalid_wreg - dummy reg write function
 402 *
 403 * @adev: amdgpu device pointer
 404 * @block: offset of instance
 405 * @reg: offset of register
 406 * @v: value to write to the register
 407 *
 408 * Dummy register read function.  Used for register blocks
 409 * that certain asics don't have (all asics).
 410 */
 411static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
 412				      uint32_t block,
 413				      uint32_t reg, uint32_t v)
 414{
 415	DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
 416		  reg, block, v);
 417	BUG();
 418}
 419
 420/**
 421 * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
 422 *
 423 * @adev: amdgpu device pointer
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 424 *
 425 * Allocates a scratch page of VRAM for use by various things in the
 426 * driver.
 427 */
 428static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
 429{
 430	return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
 431				       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
 432				       &adev->vram_scratch.robj,
 433				       &adev->vram_scratch.gpu_addr,
 434				       (void **)&adev->vram_scratch.ptr);
 
 435}
 436
 437/**
 438 * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
 439 *
 440 * @adev: amdgpu device pointer
 441 *
 442 * Frees the VRAM scratch page.
 443 */
 444static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
 445{
 446	amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
 447}
 448
 449/**
 450 * amdgpu_device_program_register_sequence - program an array of registers.
 451 *
 452 * @adev: amdgpu_device pointer
 453 * @registers: pointer to the register array
 454 * @array_size: size of the register array
 455 *
 456 * Programs an array or registers with and and or masks.
 457 * This is a helper for setting golden registers.
 458 */
 459void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
 460					     const u32 *registers,
 461					     const u32 array_size)
 462{
 463	u32 tmp, reg, and_mask, or_mask;
 464	int i;
 465
 466	if (array_size % 3)
 467		return;
 468
 469	for (i = 0; i < array_size; i +=3) {
 470		reg = registers[i + 0];
 471		and_mask = registers[i + 1];
 472		or_mask = registers[i + 2];
 473
 474		if (and_mask == 0xffffffff) {
 475			tmp = or_mask;
 476		} else {
 477			tmp = RREG32(reg);
 478			tmp &= ~and_mask;
 479			tmp |= or_mask;
 
 
 
 480		}
 481		WREG32(reg, tmp);
 482	}
 483}
 484
 485/**
 486 * amdgpu_device_pci_config_reset - reset the GPU
 487 *
 488 * @adev: amdgpu_device pointer
 489 *
 490 * Resets the GPU using the pci config reset sequence.
 491 * Only applicable to asics prior to vega10.
 492 */
 493void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
 494{
 495	pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
 496}
 497
 498/*
 499 * GPU doorbell aperture helpers function.
 500 */
 501/**
 502 * amdgpu_device_doorbell_init - Init doorbell driver information.
 503 *
 504 * @adev: amdgpu_device pointer
 505 *
 506 * Init doorbell driver information (CIK)
 507 * Returns 0 on success, error on failure.
 508 */
 509static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
 510{
 511	/* No doorbell on SI hardware generation */
 512	if (adev->asic_type < CHIP_BONAIRE) {
 513		adev->doorbell.base = 0;
 514		adev->doorbell.size = 0;
 515		adev->doorbell.num_doorbells = 0;
 516		adev->doorbell.ptr = NULL;
 517		return 0;
 518	}
 519
 520	if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
 521		return -EINVAL;
 522
 523	/* doorbell bar mapping */
 524	adev->doorbell.base = pci_resource_start(adev->pdev, 2);
 525	adev->doorbell.size = pci_resource_len(adev->pdev, 2);
 526
 527	adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
 528					     AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
 529	if (adev->doorbell.num_doorbells == 0)
 530		return -EINVAL;
 531
 532	adev->doorbell.ptr = ioremap(adev->doorbell.base,
 533				     adev->doorbell.num_doorbells *
 534				     sizeof(u32));
 535	if (adev->doorbell.ptr == NULL)
 536		return -ENOMEM;
 537
 538	return 0;
 539}
 540
 541/**
 542 * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
 543 *
 544 * @adev: amdgpu_device pointer
 545 *
 546 * Tear down doorbell driver information (CIK)
 547 */
 548static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
 549{
 550	iounmap(adev->doorbell.ptr);
 551	adev->doorbell.ptr = NULL;
 552}
 553
 554
 555
 556/*
 557 * amdgpu_device_wb_*()
 558 * Writeback is the method by which the GPU updates special pages in memory
 559 * with the status of certain GPU events (fences, ring pointers,etc.).
 560 */
 561
 562/**
 563 * amdgpu_device_wb_fini - Disable Writeback and free memory
 564 *
 565 * @adev: amdgpu_device pointer
 566 *
 567 * Disables Writeback and frees the Writeback memory (all asics).
 568 * Used at driver shutdown.
 569 */
 570static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
 571{
 572	if (adev->wb.wb_obj) {
 573		amdgpu_bo_free_kernel(&adev->wb.wb_obj,
 574				      &adev->wb.gpu_addr,
 575				      (void **)&adev->wb.wb);
 576		adev->wb.wb_obj = NULL;
 577	}
 578}
 579
 580/**
 581 * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
 582 *
 583 * @adev: amdgpu_device pointer
 584 *
 585 * Initializes writeback and allocates writeback memory (all asics).
 586 * Used at driver startup.
 587 * Returns 0 on success or an -error on failure.
 588 */
 589static int amdgpu_device_wb_init(struct amdgpu_device *adev)
 590{
 591	int r;
 592
 593	if (adev->wb.wb_obj == NULL) {
 594		/* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
 595		r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
 596					    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
 597					    &adev->wb.wb_obj, &adev->wb.gpu_addr,
 598					    (void **)&adev->wb.wb);
 599		if (r) {
 600			dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
 601			return r;
 602		}
 603
 604		adev->wb.num_wb = AMDGPU_MAX_WB;
 605		memset(&adev->wb.used, 0, sizeof(adev->wb.used));
 606
 607		/* clear wb memory */
 608		memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
 609	}
 610
 611	return 0;
 612}
 613
 614/**
 615 * amdgpu_device_wb_get - Allocate a wb entry
 616 *
 617 * @adev: amdgpu_device pointer
 618 * @wb: wb index
 619 *
 620 * Allocate a wb slot for use by the driver (all asics).
 621 * Returns 0 on success or -EINVAL on failure.
 622 */
 623int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
 624{
 625	unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
 626
 627	if (offset < adev->wb.num_wb) {
 628		__set_bit(offset, adev->wb.used);
 629		*wb = offset << 3; /* convert to dw offset */
 630		return 0;
 631	} else {
 632		return -EINVAL;
 633	}
 634}
 635
 636/**
 637 * amdgpu_device_wb_free - Free a wb entry
 638 *
 639 * @adev: amdgpu_device pointer
 640 * @wb: wb index
 641 *
 642 * Free a wb slot allocated for use by the driver (all asics)
 643 */
 644void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
 645{
 646	wb >>= 3;
 647	if (wb < adev->wb.num_wb)
 648		__clear_bit(wb, adev->wb.used);
 649}
 650
 651/**
 652 * amdgpu_device_vram_location - try to find VRAM location
 653 *
 654 * @adev: amdgpu device structure holding all necessary informations
 655 * @mc: memory controller structure holding memory informations
 656 * @base: base address at which to put VRAM
 657 *
 658 * Function will try to place VRAM at base address provided
 659 * as parameter.
 660 */
 661void amdgpu_device_vram_location(struct amdgpu_device *adev,
 662				 struct amdgpu_gmc *mc, u64 base)
 663{
 664	uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
 665
 666	mc->vram_start = base;
 667	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
 668	if (limit && limit < mc->real_vram_size)
 669		mc->real_vram_size = limit;
 670	dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
 671			mc->mc_vram_size >> 20, mc->vram_start,
 672			mc->vram_end, mc->real_vram_size >> 20);
 673}
 674
 675/**
 676 * amdgpu_device_gart_location - try to find GTT location
 677 *
 678 * @adev: amdgpu device structure holding all necessary informations
 679 * @mc: memory controller structure holding memory informations
 680 *
 681 * Function will place try to place GTT before or after VRAM.
 682 *
 683 * If GTT size is bigger than space left then we ajust GTT size.
 684 * Thus function will never fails.
 685 *
 686 * FIXME: when reducing GTT size align new size on power of 2.
 687 */
 688void amdgpu_device_gart_location(struct amdgpu_device *adev,
 689				 struct amdgpu_gmc *mc)
 690{
 691	u64 size_af, size_bf;
 692
 693	size_af = adev->gmc.mc_mask - mc->vram_end;
 694	size_bf = mc->vram_start;
 695	if (size_bf > size_af) {
 696		if (mc->gart_size > size_bf) {
 697			dev_warn(adev->dev, "limiting GTT\n");
 698			mc->gart_size = size_bf;
 699		}
 700		mc->gart_start = 0;
 701	} else {
 702		if (mc->gart_size > size_af) {
 703			dev_warn(adev->dev, "limiting GTT\n");
 704			mc->gart_size = size_af;
 705		}
 706		/* VCE doesn't like it when BOs cross a 4GB segment, so align
 707		 * the GART base on a 4GB boundary as well.
 708		 */
 709		mc->gart_start = ALIGN(mc->vram_end + 1, 0x100000000ULL);
 710	}
 711	mc->gart_end = mc->gart_start + mc->gart_size - 1;
 712	dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
 713			mc->gart_size >> 20, mc->gart_start, mc->gart_end);
 714}
 715
 716/**
 717 * amdgpu_device_resize_fb_bar - try to resize FB BAR
 718 *
 719 * @adev: amdgpu_device pointer
 720 *
 721 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
 722 * to fail, but if any of the BARs is not accessible after the size we abort
 723 * driver loading by returning -ENODEV.
 724 */
 725int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
 726{
 727	u64 space_needed = roundup_pow_of_two(adev->gmc.real_vram_size);
 728	u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1;
 729	struct pci_bus *root;
 730	struct resource *res;
 731	unsigned i;
 732	u16 cmd;
 733	int r;
 734
 
 
 
 735	/* Bypass for VF */
 736	if (amdgpu_sriov_vf(adev))
 737		return 0;
 738
 
 
 
 
 
 739	/* Check if the root BUS has 64bit memory resources */
 740	root = adev->pdev->bus;
 741	while (root->parent)
 742		root = root->parent;
 743
 744	pci_bus_for_each_resource(root, res, i) {
 745		if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
 746		    res->start > 0x100000000ull)
 747			break;
 748	}
 749
 750	/* Trying to resize is pointless without a root hub window above 4GB */
 751	if (!res)
 752		return 0;
 753
 
 
 
 
 754	/* Disable memory decoding while we change the BAR addresses and size */
 755	pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
 756	pci_write_config_word(adev->pdev, PCI_COMMAND,
 757			      cmd & ~PCI_COMMAND_MEMORY);
 758
 759	/* Free the VRAM and doorbell BAR, we most likely need to move both. */
 760	amdgpu_device_doorbell_fini(adev);
 761	if (adev->asic_type >= CHIP_BONAIRE)
 762		pci_release_resource(adev->pdev, 2);
 763
 764	pci_release_resource(adev->pdev, 0);
 765
 766	r = pci_resize_resource(adev->pdev, 0, rbar_size);
 767	if (r == -ENOSPC)
 768		DRM_INFO("Not enough PCI address space for a large BAR.");
 769	else if (r && r != -ENOTSUPP)
 770		DRM_ERROR("Problem resizing BAR0 (%d).", r);
 771
 772	pci_assign_unassigned_bus_resources(adev->pdev->bus);
 773
 774	/* When the doorbell or fb BAR isn't available we have no chance of
 775	 * using the device.
 776	 */
 777	r = amdgpu_device_doorbell_init(adev);
 778	if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
 779		return -ENODEV;
 780
 781	pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
 782
 783	return 0;
 784}
 785
 
 
 
 
 
 
 
 
 786/*
 787 * GPU helpers function.
 788 */
 789/**
 790 * amdgpu_device_need_post - check if the hw need post or not
 791 *
 792 * @adev: amdgpu_device pointer
 793 *
 794 * Check if the asic has been initialized (all asics) at driver startup
 795 * or post is needed if  hw reset is performed.
 796 * Returns true if need or false if not.
 797 */
 798bool amdgpu_device_need_post(struct amdgpu_device *adev)
 799{
 800	uint32_t reg;
 801
 802	if (amdgpu_sriov_vf(adev))
 803		return false;
 804
 
 
 
 805	if (amdgpu_passthrough(adev)) {
 806		/* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
 807		 * some old smc fw still need driver do vPost otherwise gpu hang, while
 808		 * those smc fw version above 22.15 doesn't have this flaw, so we force
 809		 * vpost executed for smc version below 22.15
 810		 */
 811		if (adev->asic_type == CHIP_FIJI) {
 812			int err;
 813			uint32_t fw_ver;
 
 814			err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
 815			/* force vPost if error occured */
 816			if (err)
 817				return true;
 818
 819			fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
 
 820			if (fw_ver < 0x00160e00)
 821				return true;
 822		}
 823	}
 824
 
 
 
 
 825	if (adev->has_hw_reset) {
 826		adev->has_hw_reset = false;
 827		return true;
 828	}
 829
 830	/* bios scratch used on CIK+ */
 831	if (adev->asic_type >= CHIP_BONAIRE)
 832		return amdgpu_atombios_scratch_need_asic_init(adev);
 833
 834	/* check MEM_SIZE for older asics */
 835	reg = amdgpu_asic_get_config_memsize(adev);
 836
 837	if ((reg != 0) && (reg != 0xffffffff))
 838		return false;
 839
 840	return true;
 841}
 842
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 843/* if we get transitioned to only one device, take VGA back */
 844/**
 845 * amdgpu_device_vga_set_decode - enable/disable vga decode
 846 *
 847 * @cookie: amdgpu_device pointer
 848 * @state: enable/disable vga decode
 849 *
 850 * Enable/disable vga decode (all asics).
 851 * Returns VGA resource flags.
 852 */
 853static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state)
 
 854{
 855	struct amdgpu_device *adev = cookie;
 
 856	amdgpu_asic_set_vga_state(adev, state);
 857	if (state)
 858		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
 859		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
 860	else
 861		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
 862}
 863
 864/**
 865 * amdgpu_device_check_block_size - validate the vm block size
 866 *
 867 * @adev: amdgpu_device pointer
 868 *
 869 * Validates the vm block size specified via module parameter.
 870 * The vm block size defines number of bits in page table versus page directory,
 871 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
 872 * page table and the remaining bits are in the page directory.
 873 */
 874static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
 875{
 876	/* defines number of bits in page table versus page directory,
 877	 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
 878	 * page table and the remaining bits are in the page directory */
 
 879	if (amdgpu_vm_block_size == -1)
 880		return;
 881
 882	if (amdgpu_vm_block_size < 9) {
 883		dev_warn(adev->dev, "VM page table size (%d) too small\n",
 884			 amdgpu_vm_block_size);
 885		amdgpu_vm_block_size = -1;
 886	}
 887}
 888
 889/**
 890 * amdgpu_device_check_vm_size - validate the vm size
 891 *
 892 * @adev: amdgpu_device pointer
 893 *
 894 * Validates the vm size in GB specified via module parameter.
 895 * The VM size is the size of the GPU virtual memory space in GB.
 896 */
 897static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
 898{
 899	/* no need to check the default value */
 900	if (amdgpu_vm_size == -1)
 901		return;
 902
 903	if (amdgpu_vm_size < 1) {
 904		dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
 905			 amdgpu_vm_size);
 906		amdgpu_vm_size = -1;
 907	}
 908}
 909
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 910/**
 911 * amdgpu_device_check_arguments - validate module params
 912 *
 913 * @adev: amdgpu_device pointer
 914 *
 915 * Validates certain module parameters and updates
 916 * the associated values used by the driver (all asics).
 917 */
 918static void amdgpu_device_check_arguments(struct amdgpu_device *adev)
 919{
 920	if (amdgpu_sched_jobs < 4) {
 921		dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
 922			 amdgpu_sched_jobs);
 923		amdgpu_sched_jobs = 4;
 924	} else if (!is_power_of_2(amdgpu_sched_jobs)){
 925		dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
 926			 amdgpu_sched_jobs);
 927		amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
 928	}
 929
 930	if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
 931		/* gart size must be greater or equal to 32M */
 932		dev_warn(adev->dev, "gart size (%d) too small\n",
 933			 amdgpu_gart_size);
 934		amdgpu_gart_size = -1;
 935	}
 936
 937	if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
 938		/* gtt size must be greater or equal to 32M */
 939		dev_warn(adev->dev, "gtt size (%d) too small\n",
 940				 amdgpu_gtt_size);
 941		amdgpu_gtt_size = -1;
 942	}
 943
 944	/* valid range is between 4 and 9 inclusive */
 945	if (amdgpu_vm_fragment_size != -1 &&
 946	    (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
 947		dev_warn(adev->dev, "valid range is between 4 and 9\n");
 948		amdgpu_vm_fragment_size = -1;
 949	}
 950
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 951	amdgpu_device_check_vm_size(adev);
 952
 953	amdgpu_device_check_block_size(adev);
 954
 955	if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
 956	    !is_power_of_2(amdgpu_vram_page_split))) {
 957		dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
 958			 amdgpu_vram_page_split);
 959		amdgpu_vram_page_split = 1024;
 960	}
 961
 962	if (amdgpu_lockup_timeout == 0) {
 963		dev_warn(adev->dev, "lockup_timeout msut be > 0, adjusting to 10000\n");
 964		amdgpu_lockup_timeout = 10000;
 965	}
 966
 967	adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
 968}
 969
 970/**
 971 * amdgpu_switcheroo_set_state - set switcheroo state
 972 *
 973 * @pdev: pci dev pointer
 974 * @state: vga_switcheroo state
 975 *
 976 * Callback for the switcheroo driver.  Suspends or resumes the
 977 * the asics before or after it is powered up using ACPI methods.
 978 */
 979static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
 
 980{
 981	struct drm_device *dev = pci_get_drvdata(pdev);
 
 982
 983	if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
 984		return;
 985
 986	if (state == VGA_SWITCHEROO_ON) {
 987		pr_info("amdgpu: switched on\n");
 988		/* don't suspend or resume card normally */
 989		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
 990
 991		amdgpu_device_resume(dev, true, true);
 
 
 
 
 
 992
 993		dev->switch_power_state = DRM_SWITCH_POWER_ON;
 994		drm_kms_helper_poll_enable(dev);
 995	} else {
 996		pr_info("amdgpu: switched off\n");
 997		drm_kms_helper_poll_disable(dev);
 998		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
 999		amdgpu_device_suspend(dev, true, true);
 
 
 
 
 
1000		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1001	}
1002}
1003
1004/**
1005 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1006 *
1007 * @pdev: pci dev pointer
1008 *
1009 * Callback for the switcheroo driver.  Check of the switcheroo
1010 * state can be changed.
1011 * Returns true if the state can be changed, false if not.
1012 */
1013static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1014{
1015	struct drm_device *dev = pci_get_drvdata(pdev);
1016
1017	/*
1018	* FIXME: open_count is protected by drm_global_mutex but that would lead to
1019	* locking inversion with the driver load path. And the access here is
1020	* completely racy anyway. So don't bother with locking for now.
1021	*/
1022	return dev->open_count == 0;
1023}
1024
1025static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1026	.set_gpu_state = amdgpu_switcheroo_set_state,
1027	.reprobe = NULL,
1028	.can_switch = amdgpu_switcheroo_can_switch,
1029};
1030
1031/**
1032 * amdgpu_device_ip_set_clockgating_state - set the CG state
1033 *
1034 * @adev: amdgpu_device pointer
1035 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1036 * @state: clockgating state (gate or ungate)
1037 *
1038 * Sets the requested clockgating state for all instances of
1039 * the hardware IP specified.
1040 * Returns the error code from the last instance.
1041 */
1042int amdgpu_device_ip_set_clockgating_state(struct amdgpu_device *adev,
1043					   enum amd_ip_block_type block_type,
1044					   enum amd_clockgating_state state)
1045{
 
1046	int i, r = 0;
1047
1048	for (i = 0; i < adev->num_ip_blocks; i++) {
1049		if (!adev->ip_blocks[i].status.valid)
1050			continue;
1051		if (adev->ip_blocks[i].version->type != block_type)
1052			continue;
1053		if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1054			continue;
1055		r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1056			(void *)adev, state);
1057		if (r)
1058			DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1059				  adev->ip_blocks[i].version->funcs->name, r);
1060	}
1061	return r;
1062}
1063
1064/**
1065 * amdgpu_device_ip_set_powergating_state - set the PG state
1066 *
1067 * @adev: amdgpu_device pointer
1068 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1069 * @state: powergating state (gate or ungate)
1070 *
1071 * Sets the requested powergating state for all instances of
1072 * the hardware IP specified.
1073 * Returns the error code from the last instance.
1074 */
1075int amdgpu_device_ip_set_powergating_state(struct amdgpu_device *adev,
1076					   enum amd_ip_block_type block_type,
1077					   enum amd_powergating_state state)
1078{
 
1079	int i, r = 0;
1080
1081	for (i = 0; i < adev->num_ip_blocks; i++) {
1082		if (!adev->ip_blocks[i].status.valid)
1083			continue;
1084		if (adev->ip_blocks[i].version->type != block_type)
1085			continue;
1086		if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1087			continue;
1088		r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1089			(void *)adev, state);
1090		if (r)
1091			DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1092				  adev->ip_blocks[i].version->funcs->name, r);
1093	}
1094	return r;
1095}
1096
1097/**
1098 * amdgpu_device_ip_get_clockgating_state - get the CG state
1099 *
1100 * @adev: amdgpu_device pointer
1101 * @flags: clockgating feature flags
1102 *
1103 * Walks the list of IPs on the device and updates the clockgating
1104 * flags for each IP.
1105 * Updates @flags with the feature flags for each hardware IP where
1106 * clockgating is enabled.
1107 */
1108void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1109					    u32 *flags)
1110{
1111	int i;
1112
1113	for (i = 0; i < adev->num_ip_blocks; i++) {
1114		if (!adev->ip_blocks[i].status.valid)
1115			continue;
1116		if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1117			adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1118	}
1119}
1120
1121/**
1122 * amdgpu_device_ip_wait_for_idle - wait for idle
1123 *
1124 * @adev: amdgpu_device pointer
1125 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1126 *
1127 * Waits for the request hardware IP to be idle.
1128 * Returns 0 for success or a negative error code on failure.
1129 */
1130int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1131				   enum amd_ip_block_type block_type)
1132{
1133	int i, r;
1134
1135	for (i = 0; i < adev->num_ip_blocks; i++) {
1136		if (!adev->ip_blocks[i].status.valid)
1137			continue;
1138		if (adev->ip_blocks[i].version->type == block_type) {
1139			r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1140			if (r)
1141				return r;
1142			break;
1143		}
1144	}
1145	return 0;
1146
1147}
1148
1149/**
1150 * amdgpu_device_ip_is_idle - is the hardware IP idle
1151 *
1152 * @adev: amdgpu_device pointer
1153 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1154 *
1155 * Check if the hardware IP is idle or not.
1156 * Returns true if it the IP is idle, false if not.
1157 */
1158bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1159			      enum amd_ip_block_type block_type)
1160{
1161	int i;
1162
1163	for (i = 0; i < adev->num_ip_blocks; i++) {
1164		if (!adev->ip_blocks[i].status.valid)
1165			continue;
1166		if (adev->ip_blocks[i].version->type == block_type)
1167			return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1168	}
1169	return true;
1170
1171}
1172
1173/**
1174 * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1175 *
1176 * @adev: amdgpu_device pointer
1177 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1178 *
1179 * Returns a pointer to the hardware IP block structure
1180 * if it exists for the asic, otherwise NULL.
1181 */
1182struct amdgpu_ip_block *
1183amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1184			      enum amd_ip_block_type type)
1185{
1186	int i;
1187
1188	for (i = 0; i < adev->num_ip_blocks; i++)
1189		if (adev->ip_blocks[i].version->type == type)
1190			return &adev->ip_blocks[i];
1191
1192	return NULL;
1193}
1194
1195/**
1196 * amdgpu_device_ip_block_version_cmp
1197 *
1198 * @adev: amdgpu_device pointer
1199 * @type: enum amd_ip_block_type
1200 * @major: major version
1201 * @minor: minor version
1202 *
1203 * return 0 if equal or greater
1204 * return 1 if smaller or the ip_block doesn't exist
1205 */
1206int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1207				       enum amd_ip_block_type type,
1208				       u32 major, u32 minor)
1209{
1210	struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1211
1212	if (ip_block && ((ip_block->version->major > major) ||
1213			((ip_block->version->major == major) &&
1214			(ip_block->version->minor >= minor))))
1215		return 0;
1216
1217	return 1;
1218}
1219
1220/**
1221 * amdgpu_device_ip_block_add
1222 *
1223 * @adev: amdgpu_device pointer
1224 * @ip_block_version: pointer to the IP to add
1225 *
1226 * Adds the IP block driver information to the collection of IPs
1227 * on the asic.
1228 */
1229int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1230			       const struct amdgpu_ip_block_version *ip_block_version)
1231{
1232	if (!ip_block_version)
1233		return -EINVAL;
1234
 
 
 
 
 
 
 
 
 
 
 
 
 
1235	DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1236		  ip_block_version->funcs->name);
1237
1238	adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1239
1240	return 0;
1241}
1242
1243/**
1244 * amdgpu_device_enable_virtual_display - enable virtual display feature
1245 *
1246 * @adev: amdgpu_device pointer
1247 *
1248 * Enabled the virtual display feature if the user has enabled it via
1249 * the module parameter virtual_display.  This feature provides a virtual
1250 * display hardware on headless boards or in virtualized environments.
1251 * This function parses and validates the configuration string specified by
1252 * the user and configues the virtual display configuration (number of
1253 * virtual connectors, crtcs, etc.) specified.
1254 */
1255static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1256{
1257	adev->enable_virtual_display = false;
1258
1259	if (amdgpu_virtual_display) {
1260		struct drm_device *ddev = adev->ddev;
1261		const char *pci_address_name = pci_name(ddev->pdev);
1262		char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1263
1264		pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1265		pciaddstr_tmp = pciaddstr;
1266		while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1267			pciaddname = strsep(&pciaddname_tmp, ",");
1268			if (!strcmp("all", pciaddname)
1269			    || !strcmp(pci_address_name, pciaddname)) {
1270				long num_crtc;
1271				int res = -1;
1272
1273				adev->enable_virtual_display = true;
1274
1275				if (pciaddname_tmp)
1276					res = kstrtol(pciaddname_tmp, 10,
1277						      &num_crtc);
1278
1279				if (!res) {
1280					if (num_crtc < 1)
1281						num_crtc = 1;
1282					if (num_crtc > 6)
1283						num_crtc = 6;
1284					adev->mode_info.num_crtc = num_crtc;
1285				} else {
1286					adev->mode_info.num_crtc = 1;
1287				}
1288				break;
1289			}
1290		}
1291
1292		DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1293			 amdgpu_virtual_display, pci_address_name,
1294			 adev->enable_virtual_display, adev->mode_info.num_crtc);
1295
1296		kfree(pciaddstr);
1297	}
1298}
1299
 
 
 
 
 
 
 
 
 
 
1300/**
1301 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1302 *
1303 * @adev: amdgpu_device pointer
1304 *
1305 * Parses the asic configuration parameters specified in the gpu info
1306 * firmware and makes them availale to the driver for use in configuring
1307 * the asic.
1308 * Returns 0 on success, -EINVAL on failure.
1309 */
1310static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1311{
1312	const char *chip_name;
1313	char fw_name[30];
1314	int err;
1315	const struct gpu_info_firmware_header_v1_0 *hdr;
1316
1317	adev->firmware.gpu_info_fw = NULL;
1318
 
 
 
1319	switch (adev->asic_type) {
1320	case CHIP_TOPAZ:
1321	case CHIP_TONGA:
1322	case CHIP_FIJI:
1323	case CHIP_POLARIS11:
1324	case CHIP_POLARIS10:
1325	case CHIP_POLARIS12:
1326	case CHIP_CARRIZO:
1327	case CHIP_STONEY:
1328#ifdef CONFIG_DRM_AMDGPU_SI
1329	case CHIP_VERDE:
1330	case CHIP_TAHITI:
1331	case CHIP_PITCAIRN:
1332	case CHIP_OLAND:
1333	case CHIP_HAINAN:
1334#endif
1335#ifdef CONFIG_DRM_AMDGPU_CIK
1336	case CHIP_BONAIRE:
1337	case CHIP_HAWAII:
1338	case CHIP_KAVERI:
1339	case CHIP_KABINI:
1340	case CHIP_MULLINS:
1341#endif
1342	default:
1343		return 0;
1344	case CHIP_VEGA10:
1345		chip_name = "vega10";
1346		break;
1347	case CHIP_VEGA12:
1348		chip_name = "vega12";
1349		break;
1350	case CHIP_RAVEN:
1351		chip_name = "raven";
 
 
 
 
 
 
 
 
 
 
 
1352		break;
1353	}
1354
1355	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1356	err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1357	if (err) {
1358		dev_err(adev->dev,
1359			"Failed to load gpu_info firmware \"%s\"\n",
1360			fw_name);
1361		goto out;
1362	}
1363	err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1364	if (err) {
1365		dev_err(adev->dev,
1366			"Failed to validate gpu_info firmware \"%s\"\n",
1367			fw_name);
1368		goto out;
1369	}
1370
1371	hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1372	amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1373
1374	switch (hdr->version_major) {
1375	case 1:
1376	{
1377		const struct gpu_info_firmware_v1_0 *gpu_info_fw =
1378			(const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
1379								le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1380
 
 
 
 
 
 
1381		adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1382		adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1383		adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1384		adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
1385		adev->gfx.config.max_texture_channel_caches =
1386			le32_to_cpu(gpu_info_fw->gc_num_tccs);
1387		adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1388		adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1389		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1390		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
1391		adev->gfx.config.double_offchip_lds_buf =
1392			le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1393		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
1394		adev->gfx.cu_info.max_waves_per_simd =
1395			le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1396		adev->gfx.cu_info.max_scratch_slots_per_cu =
1397			le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1398		adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1399		break;
1400	}
1401	default:
1402		dev_err(adev->dev,
1403			"Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1404		err = -EINVAL;
1405		goto out;
1406	}
1407out:
1408	return err;
1409}
1410
1411/**
1412 * amdgpu_device_ip_early_init - run early init for hardware IPs
1413 *
1414 * @adev: amdgpu_device pointer
1415 *
1416 * Early initialization pass for hardware IPs.  The hardware IPs that make
1417 * up each asic are discovered each IP's early_init callback is run.  This
1418 * is the first stage in initializing the asic.
1419 * Returns 0 on success, negative error code on failure.
1420 */
1421static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
1422{
 
1423	int i, r;
 
1424
1425	amdgpu_device_enable_virtual_display(adev);
1426
1427	switch (adev->asic_type) {
1428	case CHIP_TOPAZ:
1429	case CHIP_TONGA:
1430	case CHIP_FIJI:
1431	case CHIP_POLARIS11:
1432	case CHIP_POLARIS10:
1433	case CHIP_POLARIS12:
1434	case CHIP_CARRIZO:
1435	case CHIP_STONEY:
1436		if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
1437			adev->family = AMDGPU_FAMILY_CZ;
1438		else
1439			adev->family = AMDGPU_FAMILY_VI;
1440
1441		r = vi_set_ip_blocks(adev);
1442		if (r)
1443			return r;
1444		break;
 
 
1445#ifdef CONFIG_DRM_AMDGPU_SI
1446	case CHIP_VERDE:
1447	case CHIP_TAHITI:
1448	case CHIP_PITCAIRN:
1449	case CHIP_OLAND:
1450	case CHIP_HAINAN:
1451		adev->family = AMDGPU_FAMILY_SI;
1452		r = si_set_ip_blocks(adev);
1453		if (r)
1454			return r;
1455		break;
1456#endif
1457#ifdef CONFIG_DRM_AMDGPU_CIK
1458	case CHIP_BONAIRE:
1459	case CHIP_HAWAII:
1460	case CHIP_KAVERI:
1461	case CHIP_KABINI:
1462	case CHIP_MULLINS:
1463		if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
 
 
1464			adev->family = AMDGPU_FAMILY_CI;
1465		else
1466			adev->family = AMDGPU_FAMILY_KV;
1467
1468		r = cik_set_ip_blocks(adev);
1469		if (r)
1470			return r;
1471		break;
1472#endif
1473	case CHIP_VEGA10:
1474	case CHIP_VEGA12:
1475	case CHIP_RAVEN:
1476		if (adev->asic_type == CHIP_RAVEN)
1477			adev->family = AMDGPU_FAMILY_RV;
 
 
 
 
 
 
1478		else
1479			adev->family = AMDGPU_FAMILY_AI;
1480
1481		r = soc15_set_ip_blocks(adev);
1482		if (r)
1483			return r;
1484		break;
1485	default:
1486		/* FIXME: not supported yet */
1487		return -EINVAL;
 
 
1488	}
1489
1490	r = amdgpu_device_parse_gpu_info_fw(adev);
1491	if (r)
1492		return r;
 
 
 
 
 
 
 
 
1493
1494	amdgpu_amdkfd_device_probe(adev);
1495
1496	if (amdgpu_sriov_vf(adev)) {
1497		r = amdgpu_virt_request_full_gpu(adev, true);
1498		if (r)
1499			return -EAGAIN;
1500	}
 
 
1501
 
1502	for (i = 0; i < adev->num_ip_blocks; i++) {
1503		if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
1504			DRM_ERROR("disabled ip block: %d <%s>\n",
1505				  i, adev->ip_blocks[i].version->funcs->name);
1506			adev->ip_blocks[i].status.valid = false;
1507		} else {
1508			if (adev->ip_blocks[i].version->funcs->early_init) {
1509				r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
1510				if (r == -ENOENT) {
1511					adev->ip_blocks[i].status.valid = false;
1512				} else if (r) {
1513					DRM_ERROR("early_init of IP block <%s> failed %d\n",
1514						  adev->ip_blocks[i].version->funcs->name, r);
1515					return r;
1516				} else {
1517					adev->ip_blocks[i].status.valid = true;
1518				}
1519			} else {
1520				adev->ip_blocks[i].status.valid = true;
1521			}
1522		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1523	}
 
 
1524
 
1525	adev->cg_flags &= amdgpu_cg_mask;
1526	adev->pg_flags &= amdgpu_pg_mask;
1527
1528	return 0;
1529}
1530
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1531/**
1532 * amdgpu_device_ip_init - run init for hardware IPs
1533 *
1534 * @adev: amdgpu_device pointer
1535 *
1536 * Main initialization pass for hardware IPs.  The list of all the hardware
1537 * IPs that make up the asic is walked and the sw_init and hw_init callbacks
1538 * are run.  sw_init initializes the software state associated with each IP
1539 * and hw_init initializes the hardware associated with each IP.
1540 * Returns 0 on success, negative error code on failure.
1541 */
1542static int amdgpu_device_ip_init(struct amdgpu_device *adev)
1543{
1544	int i, r;
1545
 
 
 
 
1546	for (i = 0; i < adev->num_ip_blocks; i++) {
1547		if (!adev->ip_blocks[i].status.valid)
1548			continue;
1549		r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
1550		if (r) {
1551			DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1552				  adev->ip_blocks[i].version->funcs->name, r);
1553			return r;
1554		}
1555		adev->ip_blocks[i].status.sw = true;
1556
1557		/* need to do gmc hw init early so we can allocate gpu mem */
1558		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
1559			r = amdgpu_device_vram_scratch_init(adev);
 
 
 
 
 
 
 
 
 
 
 
 
1560			if (r) {
1561				DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
1562				return r;
1563			}
1564			r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
1565			if (r) {
1566				DRM_ERROR("hw_init %d failed %d\n", i, r);
1567				return r;
1568			}
1569			r = amdgpu_device_wb_init(adev);
1570			if (r) {
1571				DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
1572				return r;
1573			}
1574			adev->ip_blocks[i].status.hw = true;
1575
1576			/* right after GMC hw init, we create CSA */
1577			if (amdgpu_sriov_vf(adev)) {
1578				r = amdgpu_allocate_static_csa(adev);
 
 
 
1579				if (r) {
1580					DRM_ERROR("allocate CSA failed %d\n", r);
1581					return r;
1582				}
1583			}
 
 
 
 
 
 
1584		}
1585	}
1586
1587	for (i = 0; i < adev->num_ip_blocks; i++) {
1588		if (!adev->ip_blocks[i].status.sw)
1589			continue;
1590		if (adev->ip_blocks[i].status.hw)
1591			continue;
1592		r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
1593		if (r) {
1594			DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1595				  adev->ip_blocks[i].version->funcs->name, r);
1596			return r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1597		}
1598		adev->ip_blocks[i].status.hw = true;
1599	}
1600
1601	amdgpu_amdkfd_device_init(adev);
 
 
 
 
 
 
 
 
 
 
 
 
 
1602
1603	if (amdgpu_sriov_vf(adev))
1604		amdgpu_virt_release_full_gpu(adev, true);
1605
1606	return 0;
1607}
1608
1609/**
1610 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
1611 *
1612 * @adev: amdgpu_device pointer
1613 *
1614 * Writes a reset magic value to the gart pointer in VRAM.  The driver calls
1615 * this function before a GPU reset.  If the value is retained after a
1616 * GPU reset, VRAM has not been lost.  Some GPU resets may destry VRAM contents.
1617 */
1618static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
1619{
1620	memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
1621}
1622
1623/**
1624 * amdgpu_device_check_vram_lost - check if vram is valid
1625 *
1626 * @adev: amdgpu_device pointer
1627 *
1628 * Checks the reset magic value written to the gart pointer in VRAM.
1629 * The driver calls this after a GPU reset to see if the contents of
1630 * VRAM is lost or now.
1631 * returns true if vram is lost, false if not.
1632 */
1633static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
1634{
1635	return !!memcmp(adev->gart.ptr, adev->reset_magic,
1636			AMDGPU_RESET_MAGIC_NUM);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1637}
1638
1639/**
1640 * amdgpu_device_ip_late_set_cg_state - late init for clockgating
1641 *
1642 * @adev: amdgpu_device pointer
 
1643 *
 
 
1644 * Late initialization pass enabling clockgating for hardware IPs.
1645 * The list of all the hardware IPs that make up the asic is walked and the
1646 * set_clockgating_state callbacks are run.  This stage is run late
1647 * in the init process.
1648 * Returns 0 on success, negative error code on failure.
1649 */
1650static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
 
 
1651{
1652	int i = 0, r;
1653
1654	if (amdgpu_emu_mode == 1)
1655		return 0;
1656
1657	for (i = 0; i < adev->num_ip_blocks; i++) {
1658		if (!adev->ip_blocks[i].status.valid)
 
 
 
 
 
 
1659			continue;
1660		/* skip CG for VCE/UVD, it's handled specially */
1661		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1662		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
 
 
1663		    adev->ip_blocks[i].version->funcs->set_clockgating_state) {
1664			/* enable clockgating to save power */
1665			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1666										     AMD_CG_STATE_GATE);
1667			if (r) {
1668				DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
1669					  adev->ip_blocks[i].version->funcs->name, r);
1670				return r;
1671			}
1672		}
1673	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1674	return 0;
1675}
1676
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1677/**
1678 * amdgpu_device_ip_late_init - run late init for hardware IPs
1679 *
1680 * @adev: amdgpu_device pointer
1681 *
1682 * Late initialization pass for hardware IPs.  The list of all the hardware
1683 * IPs that make up the asic is walked and the late_init callbacks are run.
1684 * late_init covers any special initialization that an IP requires
1685 * after all of the have been initialized or something that needs to happen
1686 * late in the init process.
1687 * Returns 0 on success, negative error code on failure.
1688 */
1689static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
1690{
 
1691	int i = 0, r;
1692
1693	for (i = 0; i < adev->num_ip_blocks; i++) {
1694		if (!adev->ip_blocks[i].status.valid)
1695			continue;
1696		if (adev->ip_blocks[i].version->funcs->late_init) {
1697			r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
1698			if (r) {
1699				DRM_ERROR("late_init of IP block <%s> failed %d\n",
1700					  adev->ip_blocks[i].version->funcs->name, r);
1701				return r;
1702			}
1703			adev->ip_blocks[i].status.late_initialized = true;
1704		}
 
1705	}
1706
1707	mod_delayed_work(system_wq, &adev->late_init_work,
1708			msecs_to_jiffies(AMDGPU_RESUME_MS));
 
 
 
 
 
 
 
 
1709
1710	amdgpu_device_fill_reset_magic(adev);
1711
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1712	return 0;
1713}
1714
1715/**
1716 * amdgpu_device_ip_fini - run fini for hardware IPs
1717 *
1718 * @adev: amdgpu_device pointer
1719 *
1720 * Main teardown pass for hardware IPs.  The list of all the hardware
1721 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
1722 * are run.  hw_fini tears down the hardware associated with each IP
1723 * and sw_fini tears down any software state associated with each IP.
1724 * Returns 0 on success, negative error code on failure.
1725 */
1726static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
1727{
1728	int i, r;
1729
1730	amdgpu_amdkfd_device_fini(adev);
1731	/* need to disable SMC first */
 
1732	for (i = 0; i < adev->num_ip_blocks; i++) {
1733		if (!adev->ip_blocks[i].status.hw)
1734			continue;
1735		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC &&
1736			adev->ip_blocks[i].version->funcs->set_clockgating_state) {
1737			/* ungate blocks before hw fini so that we can shutdown the blocks safely */
1738			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1739										     AMD_CG_STATE_UNGATE);
1740			if (r) {
1741				DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1742					  adev->ip_blocks[i].version->funcs->name, r);
1743				return r;
1744			}
1745			r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
1746			/* XXX handle errors */
1747			if (r) {
1748				DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1749					  adev->ip_blocks[i].version->funcs->name, r);
1750			}
1751			adev->ip_blocks[i].status.hw = false;
1752			break;
1753		}
1754	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1755
1756	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1757		if (!adev->ip_blocks[i].status.hw)
1758			continue;
1759
1760		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1761			adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
1762			adev->ip_blocks[i].version->funcs->set_clockgating_state) {
1763			/* ungate blocks before hw fini so that we can shutdown the blocks safely */
1764			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1765										     AMD_CG_STATE_UNGATE);
1766			if (r) {
1767				DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1768					  adev->ip_blocks[i].version->funcs->name, r);
1769				return r;
1770			}
1771		}
1772
1773		r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
1774		/* XXX handle errors */
1775		if (r) {
1776			DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1777				  adev->ip_blocks[i].version->funcs->name, r);
1778		}
1779
1780		adev->ip_blocks[i].status.hw = false;
1781	}
1782
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1783
1784	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1785		if (!adev->ip_blocks[i].status.sw)
1786			continue;
1787
1788		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
1789			amdgpu_free_static_csa(adev);
 
1790			amdgpu_device_wb_fini(adev);
1791			amdgpu_device_vram_scratch_fini(adev);
 
 
1792		}
1793
1794		r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
1795		/* XXX handle errors */
1796		if (r) {
1797			DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
1798				  adev->ip_blocks[i].version->funcs->name, r);
1799		}
1800		adev->ip_blocks[i].status.sw = false;
1801		adev->ip_blocks[i].status.valid = false;
1802	}
1803
1804	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1805		if (!adev->ip_blocks[i].status.late_initialized)
1806			continue;
1807		if (adev->ip_blocks[i].version->funcs->late_fini)
1808			adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
1809		adev->ip_blocks[i].status.late_initialized = false;
1810	}
1811
1812	if (amdgpu_sriov_vf(adev))
1813		if (amdgpu_virt_release_full_gpu(adev, false))
1814			DRM_ERROR("failed to release exclusive mode on fini\n");
1815
1816	return 0;
1817}
1818
1819/**
1820 * amdgpu_device_ip_late_init_func_handler - work handler for clockgating
1821 *
1822 * @work: work_struct
1823 *
1824 * Work handler for amdgpu_device_ip_late_set_cg_state.  We put the
1825 * clockgating setup into a worker thread to speed up driver init and
1826 * resume from suspend.
1827 */
1828static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
 
 
 
 
 
 
 
 
 
 
 
1829{
1830	struct amdgpu_device *adev =
1831		container_of(work, struct amdgpu_device, late_init_work.work);
1832	amdgpu_device_ip_late_set_cg_state(adev);
 
 
 
 
 
1833}
1834
1835/**
1836 * amdgpu_device_ip_suspend - run suspend for hardware IPs
1837 *
1838 * @adev: amdgpu_device pointer
1839 *
1840 * Main suspend function for hardware IPs.  The list of all the hardware
1841 * IPs that make up the asic is walked, clockgating is disabled and the
1842 * suspend callbacks are run.  suspend puts the hardware and software state
1843 * in each IP into a state suitable for suspend.
1844 * Returns 0 on success, negative error code on failure.
1845 */
1846int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
1847{
1848	int i, r;
1849
1850	if (amdgpu_sriov_vf(adev))
1851		amdgpu_virt_request_full_gpu(adev, false);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1852
1853	/* ungate SMC block first */
1854	r = amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
1855						   AMD_CG_STATE_UNGATE);
1856	if (r) {
1857		DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n", r);
1858	}
1859
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1860	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1861		if (!adev->ip_blocks[i].status.valid)
1862			continue;
1863		/* ungate blocks so that suspend can properly shut them down */
1864		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_SMC &&
1865			adev->ip_blocks[i].version->funcs->set_clockgating_state) {
1866			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1867										     AMD_CG_STATE_UNGATE);
1868			if (r) {
1869				DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1870					  adev->ip_blocks[i].version->funcs->name, r);
1871			}
 
 
 
 
 
 
 
 
 
1872		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1873		/* XXX handle errors */
1874		r = adev->ip_blocks[i].version->funcs->suspend(adev);
1875		/* XXX handle errors */
1876		if (r) {
1877			DRM_ERROR("suspend of IP block <%s> failed %d\n",
1878				  adev->ip_blocks[i].version->funcs->name, r);
1879		}
 
 
 
 
 
 
 
 
 
 
 
 
1880	}
1881
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1882	if (amdgpu_sriov_vf(adev))
1883		amdgpu_virt_release_full_gpu(adev, false);
1884
1885	return 0;
1886}
1887
1888static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
1889{
1890	int i, r;
1891
1892	static enum amd_ip_block_type ip_order[] = {
 
1893		AMD_IP_BLOCK_TYPE_GMC,
1894		AMD_IP_BLOCK_TYPE_COMMON,
1895		AMD_IP_BLOCK_TYPE_IH,
1896	};
1897
1898	for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1899		int j;
1900		struct amdgpu_ip_block *block;
1901
1902		for (j = 0; j < adev->num_ip_blocks; j++) {
1903			block = &adev->ip_blocks[j];
 
 
1904
1905			if (block->version->type != ip_order[i] ||
1906				!block->status.valid)
1907				continue;
1908
1909			r = block->version->funcs->hw_init(adev);
1910			DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
1911			if (r)
1912				return r;
 
1913		}
1914	}
1915
1916	return 0;
1917}
1918
1919static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
1920{
1921	int i, r;
1922
1923	static enum amd_ip_block_type ip_order[] = {
1924		AMD_IP_BLOCK_TYPE_SMC,
1925		AMD_IP_BLOCK_TYPE_PSP,
1926		AMD_IP_BLOCK_TYPE_DCE,
1927		AMD_IP_BLOCK_TYPE_GFX,
1928		AMD_IP_BLOCK_TYPE_SDMA,
 
1929		AMD_IP_BLOCK_TYPE_UVD,
1930		AMD_IP_BLOCK_TYPE_VCE
 
 
1931	};
1932
1933	for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1934		int j;
1935		struct amdgpu_ip_block *block;
1936
1937		for (j = 0; j < adev->num_ip_blocks; j++) {
1938			block = &adev->ip_blocks[j];
1939
1940			if (block->version->type != ip_order[i] ||
1941				!block->status.valid)
 
1942				continue;
1943
1944			r = block->version->funcs->hw_init(adev);
1945			DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
 
 
 
 
1946			if (r)
1947				return r;
 
1948		}
1949	}
1950
1951	return 0;
1952}
1953
1954/**
1955 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
1956 *
1957 * @adev: amdgpu_device pointer
1958 *
1959 * First resume function for hardware IPs.  The list of all the hardware
1960 * IPs that make up the asic is walked and the resume callbacks are run for
1961 * COMMON, GMC, and IH.  resume puts the hardware into a functional state
1962 * after a suspend and updates the software state as necessary.  This
1963 * function is also used for restoring the GPU after a GPU reset.
1964 * Returns 0 on success, negative error code on failure.
1965 */
1966static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
1967{
1968	int i, r;
1969
1970	for (i = 0; i < adev->num_ip_blocks; i++) {
1971		if (!adev->ip_blocks[i].status.valid)
1972			continue;
1973		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1974		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
1975		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
 
 
1976			r = adev->ip_blocks[i].version->funcs->resume(adev);
1977			if (r) {
1978				DRM_ERROR("resume of IP block <%s> failed %d\n",
1979					  adev->ip_blocks[i].version->funcs->name, r);
1980				return r;
1981			}
 
1982		}
1983	}
1984
1985	return 0;
1986}
1987
1988/**
1989 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
1990 *
1991 * @adev: amdgpu_device pointer
1992 *
1993 * First resume function for hardware IPs.  The list of all the hardware
1994 * IPs that make up the asic is walked and the resume callbacks are run for
1995 * all blocks except COMMON, GMC, and IH.  resume puts the hardware into a
1996 * functional state after a suspend and updates the software state as
1997 * necessary.  This function is also used for restoring the GPU after a GPU
1998 * reset.
1999 * Returns 0 on success, negative error code on failure.
2000 */
2001static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
2002{
2003	int i, r;
2004
2005	for (i = 0; i < adev->num_ip_blocks; i++) {
2006		if (!adev->ip_blocks[i].status.valid)
2007			continue;
2008		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2009		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2010		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)
 
2011			continue;
2012		r = adev->ip_blocks[i].version->funcs->resume(adev);
2013		if (r) {
2014			DRM_ERROR("resume of IP block <%s> failed %d\n",
2015				  adev->ip_blocks[i].version->funcs->name, r);
2016			return r;
2017		}
 
2018	}
2019
2020	return 0;
2021}
2022
2023/**
2024 * amdgpu_device_ip_resume - run resume for hardware IPs
2025 *
2026 * @adev: amdgpu_device pointer
2027 *
2028 * Main resume function for hardware IPs.  The hardware IPs
2029 * are split into two resume functions because they are
2030 * are also used in in recovering from a GPU reset and some additional
2031 * steps need to be take between them.  In this case (S3/S4) they are
2032 * run sequentially.
2033 * Returns 0 on success, negative error code on failure.
2034 */
2035static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
2036{
2037	int r;
2038
2039	r = amdgpu_device_ip_resume_phase1(adev);
2040	if (r)
2041		return r;
 
 
 
 
 
2042	r = amdgpu_device_ip_resume_phase2(adev);
2043
 
 
 
2044	return r;
2045}
2046
2047/**
2048 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
2049 *
2050 * @adev: amdgpu_device pointer
2051 *
2052 * Query the VBIOS data tables to determine if the board supports SR-IOV.
2053 */
2054static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
2055{
2056	if (amdgpu_sriov_vf(adev)) {
2057		if (adev->is_atom_fw) {
2058			if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
2059				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2060		} else {
2061			if (amdgpu_atombios_has_gpu_virtualization_table(adev))
2062				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2063		}
2064
2065		if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
2066			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
2067	}
2068}
2069
2070/**
2071 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
2072 *
2073 * @asic_type: AMD asic type
2074 *
2075 * Check if there is DC (new modesetting infrastructre) support for an asic.
2076 * returns true if DC has support, false if not.
2077 */
2078bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
2079{
2080	switch (asic_type) {
 
 
 
 
 
 
2081#if defined(CONFIG_DRM_AMD_DC)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2082	case CHIP_BONAIRE:
2083	case CHIP_HAWAII:
2084	case CHIP_KAVERI:
2085	case CHIP_KABINI:
2086	case CHIP_MULLINS:
2087	case CHIP_CARRIZO:
2088	case CHIP_STONEY:
2089	case CHIP_POLARIS11:
2090	case CHIP_POLARIS10:
2091	case CHIP_POLARIS12:
2092	case CHIP_TONGA:
2093	case CHIP_FIJI:
2094#if defined(CONFIG_DRM_AMD_DC_PRE_VEGA)
 
2095		return amdgpu_dc != 0;
2096#endif
2097	case CHIP_VEGA10:
2098	case CHIP_VEGA12:
2099#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
2100	case CHIP_RAVEN:
2101#endif
2102		return amdgpu_dc != 0;
2103#endif
2104	default:
 
 
2105		return false;
 
2106	}
2107}
2108
2109/**
2110 * amdgpu_device_has_dc_support - check if dc is supported
2111 *
2112 * @adev: amdgpu_device_pointer
2113 *
2114 * Returns true for supported, false for not supported
2115 */
2116bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
2117{
2118	if (amdgpu_sriov_vf(adev))
 
2119		return false;
2120
2121	return amdgpu_device_asic_has_dc_support(adev->asic_type);
2122}
2123
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2124/**
2125 * amdgpu_device_init - initialize the driver
2126 *
2127 * @adev: amdgpu_device pointer
2128 * @pdev: drm dev pointer
2129 * @pdev: pci dev pointer
2130 * @flags: driver flags
2131 *
2132 * Initializes the driver info and hw (all asics).
2133 * Returns 0 for success or an error on failure.
2134 * Called at driver startup.
2135 */
2136int amdgpu_device_init(struct amdgpu_device *adev,
2137		       struct drm_device *ddev,
2138		       struct pci_dev *pdev,
2139		       uint32_t flags)
2140{
 
 
2141	int r, i;
2142	bool runtime = false;
2143	u32 max_MBps;
 
2144
2145	adev->shutdown = false;
2146	adev->dev = &pdev->dev;
2147	adev->ddev = ddev;
2148	adev->pdev = pdev;
2149	adev->flags = flags;
2150	adev->asic_type = flags & AMD_ASIC_MASK;
 
 
 
 
 
2151	adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
2152	if (amdgpu_emu_mode == 1)
2153		adev->usec_timeout *= 2;
2154	adev->gmc.gart_size = 512 * 1024 * 1024;
2155	adev->accel_working = false;
2156	adev->num_rings = 0;
 
2157	adev->mman.buffer_funcs = NULL;
2158	adev->mman.buffer_funcs_ring = NULL;
2159	adev->vm_manager.vm_pte_funcs = NULL;
2160	adev->vm_manager.vm_pte_num_rings = 0;
2161	adev->gmc.gmc_funcs = NULL;
 
2162	adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2163	bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
2164
2165	adev->smc_rreg = &amdgpu_invalid_rreg;
2166	adev->smc_wreg = &amdgpu_invalid_wreg;
2167	adev->pcie_rreg = &amdgpu_invalid_rreg;
2168	adev->pcie_wreg = &amdgpu_invalid_wreg;
 
 
2169	adev->pciep_rreg = &amdgpu_invalid_rreg;
2170	adev->pciep_wreg = &amdgpu_invalid_wreg;
 
 
 
 
2171	adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
2172	adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
2173	adev->didt_rreg = &amdgpu_invalid_rreg;
2174	adev->didt_wreg = &amdgpu_invalid_wreg;
2175	adev->gc_cac_rreg = &amdgpu_invalid_rreg;
2176	adev->gc_cac_wreg = &amdgpu_invalid_wreg;
2177	adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
2178	adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
2179
2180	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
2181		 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
2182		 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
2183
2184	/* mutex initialization are all done here so we
2185	 * can recall function without having locking issues */
2186	atomic_set(&adev->irq.ih.lock, 0);
2187	mutex_init(&adev->firmware.mutex);
2188	mutex_init(&adev->pm.mutex);
2189	mutex_init(&adev->gfx.gpu_clock_mutex);
2190	mutex_init(&adev->srbm_mutex);
2191	mutex_init(&adev->gfx.pipe_reserve_mutex);
 
 
2192	mutex_init(&adev->grbm_idx_mutex);
2193	mutex_init(&adev->mn_lock);
2194	mutex_init(&adev->virt.vf_errors.lock);
2195	hash_init(adev->mn_hash);
2196	mutex_init(&adev->lock_reset);
 
 
 
 
 
2197
2198	amdgpu_device_check_arguments(adev);
 
 
2199
2200	spin_lock_init(&adev->mmio_idx_lock);
2201	spin_lock_init(&adev->smc_idx_lock);
2202	spin_lock_init(&adev->pcie_idx_lock);
2203	spin_lock_init(&adev->uvd_ctx_idx_lock);
2204	spin_lock_init(&adev->didt_idx_lock);
2205	spin_lock_init(&adev->gc_cac_idx_lock);
2206	spin_lock_init(&adev->se_cac_idx_lock);
2207	spin_lock_init(&adev->audio_endpt_idx_lock);
2208	spin_lock_init(&adev->mm_stats.lock);
2209
2210	INIT_LIST_HEAD(&adev->shadow_list);
2211	mutex_init(&adev->shadow_list_lock);
2212
2213	INIT_LIST_HEAD(&adev->ring_lru_list);
2214	spin_lock_init(&adev->ring_lru_list_lock);
2215
2216	INIT_DELAYED_WORK(&adev->late_init_work,
2217			  amdgpu_device_ip_late_init_func_handler);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2218
2219	/* Registers mapping */
2220	/* TODO: block userspace mapping of io register */
2221	if (adev->asic_type >= CHIP_BONAIRE) {
2222		adev->rmmio_base = pci_resource_start(adev->pdev, 5);
2223		adev->rmmio_size = pci_resource_len(adev->pdev, 5);
2224	} else {
2225		adev->rmmio_base = pci_resource_start(adev->pdev, 2);
2226		adev->rmmio_size = pci_resource_len(adev->pdev, 2);
2227	}
2228
 
 
 
2229	adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
2230	if (adev->rmmio == NULL) {
2231		return -ENOMEM;
2232	}
2233	DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
2234	DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
2235
2236	/* doorbell bar mapping */
2237	amdgpu_device_doorbell_init(adev);
 
 
 
 
 
 
2238
2239	/* io port mapping */
2240	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2241		if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
2242			adev->rio_mem_size = pci_resource_len(adev->pdev, i);
2243			adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
2244			break;
2245		}
2246	}
2247	if (adev->rio_mem == NULL)
2248		DRM_INFO("PCI I/O BAR is not found.\n");
2249
2250	amdgpu_device_get_pcie_info(adev);
2251
 
 
 
 
 
 
2252	/* early init functions */
2253	r = amdgpu_device_ip_early_init(adev);
2254	if (r)
2255		return r;
2256
2257	/* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
2258	/* this will fail for cards that aren't VGA class devices, just
2259	 * ignore it */
2260	vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2261
2262	if (amdgpu_device_is_px(ddev))
2263		runtime = true;
2264	if (!pci_is_thunderbolt_attached(adev->pdev))
2265		vga_switcheroo_register_client(adev->pdev,
2266					       &amdgpu_switcheroo_ops, runtime);
2267	if (runtime)
2268		vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
2269
2270	if (amdgpu_emu_mode == 1) {
2271		/* post the asic on emulation mode */
2272		emu_soc_asic_init(adev);
2273		goto fence_driver_init;
2274	}
2275
2276	/* Read BIOS */
2277	if (!amdgpu_get_bios(adev)) {
2278		r = -EINVAL;
2279		goto failed;
2280	}
2281
2282	r = amdgpu_atombios_init(adev);
2283	if (r) {
2284		dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2285		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2286		goto failed;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2287	}
2288
2289	/* detect if we are with an SRIOV vbios */
2290	amdgpu_device_detect_sriov_bios(adev);
2291
2292	/* Post card if necessary */
2293	if (amdgpu_device_need_post(adev)) {
2294		if (!adev->bios) {
2295			dev_err(adev->dev, "no vBIOS found\n");
2296			r = -EINVAL;
2297			goto failed;
2298		}
2299		DRM_INFO("GPU posting now...\n");
2300		r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2301		if (r) {
2302			dev_err(adev->dev, "gpu post error!\n");
2303			goto failed;
2304		}
2305	}
2306
2307	if (adev->is_atom_fw) {
2308		/* Initialize clocks */
2309		r = amdgpu_atomfirmware_get_clock_info(adev);
2310		if (r) {
2311			dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
2312			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
2313			goto failed;
2314		}
2315	} else {
2316		/* Initialize clocks */
2317		r = amdgpu_atombios_get_clock_info(adev);
2318		if (r) {
2319			dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
2320			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
2321			goto failed;
 
 
 
 
 
2322		}
2323		/* init i2c buses */
2324		if (!amdgpu_device_has_dc_support(adev))
2325			amdgpu_atombios_i2c_init(adev);
2326	}
2327
2328fence_driver_init:
2329	/* Fence driver */
2330	r = amdgpu_fence_driver_init(adev);
2331	if (r) {
2332		dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
2333		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
2334		goto failed;
2335	}
2336
2337	/* init the mode config */
2338	drm_mode_config_init(adev->ddev);
2339
2340	r = amdgpu_device_ip_init(adev);
2341	if (r) {
2342		/* failed in exclusive mode due to timeout */
2343		if (amdgpu_sriov_vf(adev) &&
2344		    !amdgpu_sriov_runtime(adev) &&
2345		    amdgpu_virt_mmio_blocked(adev) &&
2346		    !amdgpu_virt_wait_reset(adev)) {
2347			dev_err(adev->dev, "VF exclusive mode timeout\n");
2348			/* Don't send request since VF is inactive. */
2349			adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
2350			adev->virt.ops = NULL;
2351			r = -EAGAIN;
2352			goto failed;
2353		}
2354		dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
2355		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
2356		goto failed;
2357	}
2358
 
 
 
 
 
 
 
 
 
2359	adev->accel_working = true;
2360
2361	amdgpu_vm_check_compute_bug(adev);
2362
2363	/* Initialize the buffer migration limit. */
2364	if (amdgpu_moverate >= 0)
2365		max_MBps = amdgpu_moverate;
2366	else
2367		max_MBps = 8; /* Allow 8 MB/s. */
2368	/* Get a log2 for easy divisions. */
2369	adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
2370
2371	r = amdgpu_ib_pool_init(adev);
2372	if (r) {
2373		dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2374		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2375		goto failed;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2376	}
2377
2378	r = amdgpu_ib_ring_tests(adev);
 
 
 
 
 
2379	if (r)
2380		DRM_ERROR("ib ring test failed (%d).\n", r);
2381
2382	if (amdgpu_sriov_vf(adev))
2383		amdgpu_virt_init_data_exchange(adev);
2384
2385	amdgpu_fbdev_init(adev);
2386
2387	r = amdgpu_pm_sysfs_init(adev);
2388	if (r)
2389		DRM_ERROR("registering pm debugfs failed (%d).\n", r);
 
 
 
 
 
 
 
2390
2391	r = amdgpu_debugfs_gem_init(adev);
2392	if (r)
2393		DRM_ERROR("registering gem debugfs failed (%d).\n", r);
2394
2395	r = amdgpu_debugfs_regs_init(adev);
2396	if (r)
2397		DRM_ERROR("registering register debugfs failed (%d).\n", r);
 
2398
2399	r = amdgpu_debugfs_firmware_init(adev);
2400	if (r)
2401		DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
2402
2403	r = amdgpu_debugfs_init(adev);
 
2404	if (r)
2405		DRM_ERROR("Creating debugfs files failed (%d).\n", r);
2406
2407	if ((amdgpu_testing & 1)) {
2408		if (adev->accel_working)
2409			amdgpu_test_moves(adev);
2410		else
2411			DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
2412	}
2413	if (amdgpu_benchmarking) {
2414		if (adev->accel_working)
2415			amdgpu_benchmark(adev, amdgpu_benchmarking);
2416		else
2417			DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
2418	}
2419
2420	/* enable clockgating, etc. after ib tests, etc. since some blocks require
2421	 * explicit gating rather than handling it automatically.
 
2422	 */
2423	r = amdgpu_device_ip_late_init(adev);
2424	if (r) {
2425		dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
2426		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
2427		goto failed;
2428	}
 
 
 
 
 
 
 
 
 
 
 
 
2429
2430	return 0;
2431
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2432failed:
2433	amdgpu_vf_error_trans_all(adev);
2434	if (runtime)
2435		vga_switcheroo_fini_domain_pm_ops(adev->dev);
2436
2437	return r;
2438}
2439
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2440/**
2441 * amdgpu_device_fini - tear down the driver
2442 *
2443 * @adev: amdgpu_device pointer
2444 *
2445 * Tear down the driver info (all asics).
2446 * Called at driver shutdown.
2447 */
2448void amdgpu_device_fini(struct amdgpu_device *adev)
2449{
2450	int r;
 
 
 
 
 
 
 
 
 
 
2451
2452	DRM_INFO("amdgpu: finishing device.\n");
2453	adev->shutdown = true;
2454	/* disable all interrupts */
2455	amdgpu_irq_disable_all(adev);
2456	if (adev->mode_info.mode_config_initialized){
2457		if (!amdgpu_device_has_dc_support(adev))
2458			drm_crtc_force_disable_all(adev->ddev);
2459		else
2460			drm_atomic_helper_shutdown(adev->ddev);
2461	}
2462	amdgpu_ib_pool_fini(adev);
2463	amdgpu_fence_driver_fini(adev);
2464	amdgpu_pm_sysfs_fini(adev);
2465	amdgpu_fbdev_fini(adev);
2466	r = amdgpu_device_ip_fini(adev);
2467	if (adev->firmware.gpu_info_fw) {
2468		release_firmware(adev->firmware.gpu_info_fw);
2469		adev->firmware.gpu_info_fw = NULL;
2470	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2471	adev->accel_working = false;
2472	cancel_delayed_work_sync(&adev->late_init_work);
 
 
 
2473	/* free i2c buses */
2474	if (!amdgpu_device_has_dc_support(adev))
2475		amdgpu_i2c_fini(adev);
2476
2477	if (amdgpu_emu_mode != 1)
2478		amdgpu_atombios_fini(adev);
2479
2480	kfree(adev->bios);
2481	adev->bios = NULL;
2482	if (!pci_is_thunderbolt_attached(adev->pdev))
 
 
 
 
 
 
 
2483		vga_switcheroo_unregister_client(adev->pdev);
2484	if (adev->flags & AMD_IS_PX)
 
2485		vga_switcheroo_fini_domain_pm_ops(adev->dev);
2486	vga_client_register(adev->pdev, NULL, NULL, NULL);
2487	if (adev->rio_mem)
2488		pci_iounmap(adev->pdev, adev->rio_mem);
2489	adev->rio_mem = NULL;
2490	iounmap(adev->rmmio);
2491	adev->rmmio = NULL;
2492	amdgpu_device_doorbell_fini(adev);
2493	amdgpu_debugfs_regs_cleanup(adev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2494}
2495
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2496
2497/*
2498 * Suspend & resume.
2499 */
2500/**
2501 * amdgpu_device_suspend - initiate device suspend
2502 *
2503 * @pdev: drm dev pointer
2504 * @state: suspend state
2505 *
2506 * Puts the hw in the suspend state (all asics).
2507 * Returns 0 for success or an error on failure.
2508 * Called at driver suspend.
2509 */
2510int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
2511{
2512	struct amdgpu_device *adev;
2513	struct drm_crtc *crtc;
2514	struct drm_connector *connector;
2515	int r;
 
 
 
 
 
 
 
 
2516
2517	if (dev == NULL || dev->dev_private == NULL) {
2518		return -ENODEV;
 
 
 
 
 
 
2519	}
2520
2521	adev = dev->dev_private;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2522
2523	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2524		return 0;
2525
2526	drm_kms_helper_poll_disable(dev);
2527
2528	if (!amdgpu_device_has_dc_support(adev)) {
2529		/* turn off display hw */
2530		drm_modeset_lock_all(dev);
2531		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2532			drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
2533		}
2534		drm_modeset_unlock_all(dev);
2535	}
2536
2537	amdgpu_amdkfd_suspend(adev);
 
 
 
 
 
 
2538
2539	/* unpin the front buffers and cursors */
2540	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2541		struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2542		struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
2543		struct amdgpu_bo *robj;
2544
2545		if (amdgpu_crtc->cursor_bo) {
2546			struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2547			r = amdgpu_bo_reserve(aobj, true);
2548			if (r == 0) {
2549				amdgpu_bo_unpin(aobj);
2550				amdgpu_bo_unreserve(aobj);
2551			}
2552		}
2553
2554		if (rfb == NULL || rfb->obj == NULL) {
2555			continue;
2556		}
2557		robj = gem_to_amdgpu_bo(rfb->obj);
2558		/* don't unpin kernel fb objects */
2559		if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
2560			r = amdgpu_bo_reserve(robj, true);
2561			if (r == 0) {
2562				amdgpu_bo_unpin(robj);
2563				amdgpu_bo_unreserve(robj);
2564			}
2565		}
2566	}
2567	/* evict vram memory */
2568	amdgpu_bo_evict_vram(adev);
2569
2570	amdgpu_fence_driver_suspend(adev);
2571
2572	r = amdgpu_device_ip_suspend(adev);
2573
2574	/* evict remaining vram memory
2575	 * This second call to evict vram is to evict the gart page table
2576	 * using the CPU.
2577	 */
2578	amdgpu_bo_evict_vram(adev);
2579
2580	pci_save_state(dev->pdev);
2581	if (suspend) {
2582		/* Shut down the device */
2583		pci_disable_device(dev->pdev);
2584		pci_set_power_state(dev->pdev, PCI_D3hot);
2585	} else {
2586		r = amdgpu_asic_reset(adev);
2587		if (r)
2588			DRM_ERROR("amdgpu asic reset failed\n");
2589	}
2590
2591	if (fbcon) {
2592		console_lock();
2593		amdgpu_fbdev_set_suspend(adev, 1);
2594		console_unlock();
2595	}
2596	return 0;
2597}
2598
2599/**
2600 * amdgpu_device_resume - initiate device resume
2601 *
2602 * @pdev: drm dev pointer
 
2603 *
2604 * Bring the hw back to operating state (all asics).
2605 * Returns 0 for success or an error on failure.
2606 * Called at driver resume.
2607 */
2608int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
2609{
2610	struct drm_connector *connector;
2611	struct amdgpu_device *adev = dev->dev_private;
2612	struct drm_crtc *crtc;
2613	int r = 0;
2614
 
 
 
 
 
 
2615	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2616		return 0;
2617
2618	if (fbcon)
2619		console_lock();
2620
2621	if (resume) {
2622		pci_set_power_state(dev->pdev, PCI_D0);
2623		pci_restore_state(dev->pdev);
2624		r = pci_enable_device(dev->pdev);
2625		if (r)
2626			goto unlock;
2627	}
2628
2629	/* post card */
2630	if (amdgpu_device_need_post(adev)) {
2631		r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2632		if (r)
2633			DRM_ERROR("amdgpu asic init failed\n");
2634	}
2635
2636	r = amdgpu_device_ip_resume(adev);
 
2637	if (r) {
2638		DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r);
2639		goto unlock;
2640	}
2641	amdgpu_fence_driver_resume(adev);
2642
2643	if (resume) {
2644		r = amdgpu_ib_ring_tests(adev);
2645		if (r)
2646			DRM_ERROR("ib ring test failed (%d).\n", r);
2647	}
2648
2649	r = amdgpu_device_ip_late_init(adev);
2650	if (r)
2651		goto unlock;
2652
2653	/* pin cursors */
2654	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2655		struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2656
2657		if (amdgpu_crtc->cursor_bo) {
2658			struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2659			r = amdgpu_bo_reserve(aobj, true);
2660			if (r == 0) {
2661				r = amdgpu_bo_pin(aobj,
2662						  AMDGPU_GEM_DOMAIN_VRAM,
2663						  &amdgpu_crtc->cursor_addr);
2664				if (r != 0)
2665					DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
2666				amdgpu_bo_unreserve(aobj);
2667			}
2668		}
2669	}
2670	r = amdgpu_amdkfd_resume(adev);
2671	if (r)
2672		return r;
2673
2674	/* blat the mode back in */
2675	if (fbcon) {
2676		if (!amdgpu_device_has_dc_support(adev)) {
2677			/* pre DCE11 */
2678			drm_helper_resume_force_mode(dev);
2679
2680			/* turn on display hw */
2681			drm_modeset_lock_all(dev);
2682			list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2683				drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
2684			}
2685			drm_modeset_unlock_all(dev);
2686		}
2687	}
2688
2689	drm_kms_helper_poll_enable(dev);
2690
2691	/*
2692	 * Most of the connector probing functions try to acquire runtime pm
2693	 * refs to ensure that the GPU is powered on when connector polling is
2694	 * performed. Since we're calling this from a runtime PM callback,
2695	 * trying to acquire rpm refs will cause us to deadlock.
2696	 *
2697	 * Since we're guaranteed to be holding the rpm lock, it's safe to
2698	 * temporarily disable the rpm helpers so this doesn't deadlock us.
2699	 */
 
2700#ifdef CONFIG_PM
2701	dev->dev->power.disable_depth++;
2702#endif
2703	if (!amdgpu_device_has_dc_support(adev))
2704		drm_helper_hpd_irq_event(dev);
2705	else
2706		drm_kms_helper_hotplug_event(dev);
2707#ifdef CONFIG_PM
2708	dev->dev->power.disable_depth--;
2709#endif
 
 
2710
2711	if (fbcon)
2712		amdgpu_fbdev_set_suspend(adev, 0);
2713
2714unlock:
2715	if (fbcon)
2716		console_unlock();
2717
2718	return r;
2719}
2720
2721/**
2722 * amdgpu_device_ip_check_soft_reset - did soft reset succeed
2723 *
2724 * @adev: amdgpu_device pointer
2725 *
2726 * The list of all the hardware IPs that make up the asic is walked and
2727 * the check_soft_reset callbacks are run.  check_soft_reset determines
2728 * if the asic is still hung or not.
2729 * Returns true if any of the IPs are still in a hung state, false if not.
2730 */
2731static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
2732{
2733	int i;
2734	bool asic_hang = false;
2735
2736	if (amdgpu_sriov_vf(adev))
2737		return true;
2738
 
 
 
2739	for (i = 0; i < adev->num_ip_blocks; i++) {
2740		if (!adev->ip_blocks[i].status.valid)
2741			continue;
2742		if (adev->ip_blocks[i].version->funcs->check_soft_reset)
2743			adev->ip_blocks[i].status.hang =
2744				adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
2745		if (adev->ip_blocks[i].status.hang) {
2746			DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
2747			asic_hang = true;
2748		}
2749	}
2750	return asic_hang;
2751}
2752
2753/**
2754 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
2755 *
2756 * @adev: amdgpu_device pointer
2757 *
2758 * The list of all the hardware IPs that make up the asic is walked and the
2759 * pre_soft_reset callbacks are run if the block is hung.  pre_soft_reset
2760 * handles any IP specific hardware or software state changes that are
2761 * necessary for a soft reset to succeed.
2762 * Returns 0 on success, negative error code on failure.
2763 */
2764static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
2765{
2766	int i, r = 0;
2767
2768	for (i = 0; i < adev->num_ip_blocks; i++) {
2769		if (!adev->ip_blocks[i].status.valid)
2770			continue;
2771		if (adev->ip_blocks[i].status.hang &&
2772		    adev->ip_blocks[i].version->funcs->pre_soft_reset) {
2773			r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
2774			if (r)
2775				return r;
2776		}
2777	}
2778
2779	return 0;
2780}
2781
2782/**
2783 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
2784 *
2785 * @adev: amdgpu_device pointer
2786 *
2787 * Some hardware IPs cannot be soft reset.  If they are hung, a full gpu
2788 * reset is necessary to recover.
2789 * Returns true if a full asic reset is required, false if not.
2790 */
2791static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
2792{
2793	int i;
2794
 
 
 
2795	for (i = 0; i < adev->num_ip_blocks; i++) {
2796		if (!adev->ip_blocks[i].status.valid)
2797			continue;
2798		if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
2799		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
2800		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
2801		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
2802		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
2803			if (adev->ip_blocks[i].status.hang) {
2804				DRM_INFO("Some block need full reset!\n");
2805				return true;
2806			}
2807		}
2808	}
2809	return false;
2810}
2811
2812/**
2813 * amdgpu_device_ip_soft_reset - do a soft reset
2814 *
2815 * @adev: amdgpu_device pointer
2816 *
2817 * The list of all the hardware IPs that make up the asic is walked and the
2818 * soft_reset callbacks are run if the block is hung.  soft_reset handles any
2819 * IP specific hardware or software state changes that are necessary to soft
2820 * reset the IP.
2821 * Returns 0 on success, negative error code on failure.
2822 */
2823static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
2824{
2825	int i, r = 0;
2826
2827	for (i = 0; i < adev->num_ip_blocks; i++) {
2828		if (!adev->ip_blocks[i].status.valid)
2829			continue;
2830		if (adev->ip_blocks[i].status.hang &&
2831		    adev->ip_blocks[i].version->funcs->soft_reset) {
2832			r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
2833			if (r)
2834				return r;
2835		}
2836	}
2837
2838	return 0;
2839}
2840
2841/**
2842 * amdgpu_device_ip_post_soft_reset - clean up from soft reset
2843 *
2844 * @adev: amdgpu_device pointer
2845 *
2846 * The list of all the hardware IPs that make up the asic is walked and the
2847 * post_soft_reset callbacks are run if the asic was hung.  post_soft_reset
2848 * handles any IP specific hardware or software state changes that are
2849 * necessary after the IP has been soft reset.
2850 * Returns 0 on success, negative error code on failure.
2851 */
2852static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
2853{
2854	int i, r = 0;
2855
2856	for (i = 0; i < adev->num_ip_blocks; i++) {
2857		if (!adev->ip_blocks[i].status.valid)
2858			continue;
2859		if (adev->ip_blocks[i].status.hang &&
2860		    adev->ip_blocks[i].version->funcs->post_soft_reset)
2861			r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
2862		if (r)
2863			return r;
2864	}
2865
2866	return 0;
2867}
2868
2869/**
2870 * amdgpu_device_recover_vram_from_shadow - restore shadowed VRAM buffers
2871 *
2872 * @adev: amdgpu_device pointer
2873 * @ring: amdgpu_ring for the engine handling the buffer operations
2874 * @bo: amdgpu_bo buffer whose shadow is being restored
2875 * @fence: dma_fence associated with the operation
2876 *
2877 * Restores the VRAM buffer contents from the shadow in GTT.  Used to
2878 * restore things like GPUVM page tables after a GPU reset where
2879 * the contents of VRAM might be lost.
2880 * Returns 0 on success, negative error code on failure.
 
 
2881 */
2882static int amdgpu_device_recover_vram_from_shadow(struct amdgpu_device *adev,
2883						  struct amdgpu_ring *ring,
2884						  struct amdgpu_bo *bo,
2885						  struct dma_fence **fence)
2886{
2887	uint32_t domain;
2888	int r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2889
2890	if (!bo->shadow)
2891		return 0;
 
2892
2893	r = amdgpu_bo_reserve(bo, true);
2894	if (r)
2895		return r;
2896	domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
2897	/* if bo has been evicted, then no need to recover */
2898	if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
2899		r = amdgpu_bo_validate(bo->shadow);
2900		if (r) {
2901			DRM_ERROR("bo validate failed!\n");
2902			goto err;
 
 
 
2903		}
 
 
2904
2905		r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
2906						 NULL, fence, true);
2907		if (r) {
2908			DRM_ERROR("recover page table failed!\n");
2909			goto err;
2910		}
 
2911	}
2912err:
2913	amdgpu_bo_unreserve(bo);
2914	return r;
2915}
2916
 
2917/**
2918 * amdgpu_device_handle_vram_lost - Handle the loss of VRAM contents
2919 *
2920 * @adev: amdgpu_device pointer
 
2921 *
2922 * Restores the contents of VRAM buffers from the shadows in GTT.  Used to
2923 * restore things like GPUVM page tables after a GPU reset where
2924 * the contents of VRAM might be lost.
2925 * Returns 0 on success, 1 on failure.
2926 */
2927static int amdgpu_device_handle_vram_lost(struct amdgpu_device *adev)
 
2928{
2929	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2930	struct amdgpu_bo *bo, *tmp;
2931	struct dma_fence *fence = NULL, *next = NULL;
2932	long r = 1;
2933	int i = 0;
2934	long tmo;
2935
2936	if (amdgpu_sriov_runtime(adev))
2937		tmo = msecs_to_jiffies(amdgpu_lockup_timeout);
2938	else
2939		tmo = msecs_to_jiffies(100);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2940
2941	DRM_INFO("recover vram bo from shadow start\n");
2942	mutex_lock(&adev->shadow_list_lock);
2943	list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
2944		next = NULL;
2945		amdgpu_device_recover_vram_from_shadow(adev, ring, bo, &next);
2946		if (fence) {
2947			r = dma_fence_wait_timeout(fence, false, tmo);
2948			if (r == 0)
2949				pr_err("wait fence %p[%d] timeout\n", fence, i);
2950			else if (r < 0)
2951				pr_err("wait fence %p[%d] interrupted\n", fence, i);
2952			if (r < 1) {
2953				dma_fence_put(fence);
2954				fence = next;
2955				break;
2956			}
2957			i++;
2958		}
2959
2960		dma_fence_put(fence);
2961		fence = next;
2962	}
2963	mutex_unlock(&adev->shadow_list_lock);
2964
2965	if (fence) {
2966		r = dma_fence_wait_timeout(fence, false, tmo);
2967		if (r == 0)
2968			pr_err("wait fence %p[%d] timeout\n", fence, i);
2969		else if (r < 0)
2970			pr_err("wait fence %p[%d] interrupted\n", fence, i);
2971
 
 
 
 
 
 
2972	}
2973	dma_fence_put(fence);
2974
2975	if (r > 0)
2976		DRM_INFO("recover vram bo from shadow done\n");
2977	else
2978		DRM_ERROR("recover vram bo from shadow failed\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2979
2980	return (r > 0) ? 0 : 1;
 
 
 
 
 
 
 
2981}
2982
2983/**
2984 * amdgpu_device_reset - reset ASIC/GPU for bare-metal or passthrough
2985 *
2986 * @adev: amdgpu device pointer
2987 *
2988 * attempt to do soft-reset or full-reset and reinitialize Asic
2989 * return 0 means successed otherwise failed
2990 */
2991static int amdgpu_device_reset(struct amdgpu_device *adev)
2992{
2993	bool need_full_reset, vram_lost = 0;
2994	int r;
2995
2996	need_full_reset = amdgpu_device_ip_need_full_reset(adev);
 
 
 
 
 
 
 
 
2997
2998	if (!need_full_reset) {
2999		amdgpu_device_ip_pre_soft_reset(adev);
3000		r = amdgpu_device_ip_soft_reset(adev);
3001		amdgpu_device_ip_post_soft_reset(adev);
3002		if (r || amdgpu_device_ip_check_soft_reset(adev)) {
3003			DRM_INFO("soft reset failed, will fallback to full reset!\n");
3004			need_full_reset = true;
 
 
 
 
 
 
 
 
 
 
 
 
 
3005		}
3006	}
3007
3008	if (need_full_reset) {
3009		r = amdgpu_device_ip_suspend(adev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3010
3011retry:
3012		r = amdgpu_asic_reset(adev);
3013		/* post card */
3014		amdgpu_atom_asic_init(adev->mode_info.atom_context);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3015
3016		if (!r) {
3017			dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
3018			r = amdgpu_device_ip_resume_phase1(adev);
3019			if (r)
3020				goto out;
3021
3022			vram_lost = amdgpu_device_check_vram_lost(adev);
3023			if (vram_lost) {
3024				DRM_ERROR("VRAM is lost!\n");
3025				atomic_inc(&adev->vram_lost_counter);
 
 
 
 
3026			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3027
3028			r = amdgpu_gtt_mgr_recover(
3029				&adev->mman.bdev.man[TTM_PL_TT]);
3030			if (r)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3031				goto out;
 
 
3032
3033			r = amdgpu_device_ip_resume_phase2(adev);
3034			if (r)
3035				goto out;
 
 
 
 
 
 
 
 
 
3036
3037			if (vram_lost)
3038				amdgpu_device_fill_reset_magic(adev);
 
3039		}
 
 
3040	}
3041
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3042out:
3043	if (!r) {
3044		amdgpu_irq_gpu_reset_resume_helper(adev);
3045		r = amdgpu_ib_ring_tests(adev);
3046		if (r) {
3047			dev_err(adev->dev, "ib ring test failed (%d).\n", r);
3048			r = amdgpu_device_ip_suspend(adev);
3049			need_full_reset = true;
3050			goto retry;
 
3051		}
 
 
 
 
 
3052	}
3053
3054	if (!r && ((need_full_reset && !(adev->flags & AMD_IS_APU)) || vram_lost))
3055		r = amdgpu_device_handle_vram_lost(adev);
 
 
 
 
 
 
 
 
3056
3057	return r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3058}
3059
3060/**
3061 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
3062 *
3063 * @adev: amdgpu device pointer
3064 *
3065 * do VF FLR and reinitialize Asic
3066 * return 0 means successed otherwise failed
3067 */
3068static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
3069				     bool from_hypervisor)
3070{
3071	int r;
 
 
 
 
 
 
 
 
 
 
3072
3073	if (from_hypervisor)
3074		r = amdgpu_virt_request_full_gpu(adev, true);
3075	else
3076		r = amdgpu_virt_reset_gpu(adev);
3077	if (r)
3078		return r;
3079
3080	/* Resume IP prior to SMC */
3081	r = amdgpu_device_ip_reinit_early_sriov(adev);
3082	if (r)
3083		goto error;
 
 
 
 
3084
3085	/* we need recover gart prior to run SMC/CP/SDMA resume */
3086	amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]);
 
 
3087
3088	/* now we are okay to resume SMC/CP/SDMA */
3089	r = amdgpu_device_ip_reinit_late_sriov(adev);
3090	amdgpu_virt_release_full_gpu(adev, true);
3091	if (r)
3092		goto error;
 
 
 
 
3093
3094	amdgpu_irq_gpu_reset_resume_helper(adev);
3095	r = amdgpu_ib_ring_tests(adev);
 
3096
3097	if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
3098		atomic_inc(&adev->vram_lost_counter);
3099		r = amdgpu_device_handle_vram_lost(adev);
 
 
 
3100	}
3101
3102error:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3103
3104	return r;
3105}
3106
3107/**
3108 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
3109 *
3110 * @adev: amdgpu device pointer
3111 * @job: which job trigger hang
3112 * @force forces reset regardless of amdgpu_gpu_recovery
3113 *
3114 * Attempt to reset the GPU if it has hung (all asics).
 
3115 * Returns 0 for success or an error on failure.
3116 */
 
3117int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
3118			      struct amdgpu_job *job, bool force)
 
3119{
3120	struct drm_atomic_state *state = NULL;
3121	int i, r, resched;
 
 
 
 
 
 
 
 
 
 
3122
3123	if (!force && !amdgpu_device_ip_check_soft_reset(adev)) {
3124		DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
3125		return 0;
 
 
 
 
 
 
 
3126	}
3127
3128	if (!force && (amdgpu_gpu_recovery == 0 ||
3129			(amdgpu_gpu_recovery == -1  && !amdgpu_sriov_vf(adev)))) {
3130		DRM_INFO("GPU recovery disabled.\n");
3131		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3132	}
3133
3134	dev_info(adev->dev, "GPU reset begin!\n");
 
 
 
 
 
 
3135
3136	mutex_lock(&adev->lock_reset);
3137	atomic_inc(&adev->gpu_reset_counter);
3138	adev->in_gpu_reset = 1;
3139
3140	/* block TTM */
3141	resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
 
 
 
 
 
 
 
 
 
 
3142
3143	/* store modesetting */
3144	if (amdgpu_device_has_dc_support(adev))
3145		state = drm_atomic_helper_suspend(adev->ddev);
3146
3147	/* block all schedulers and reset given job's ring */
3148	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
3149		struct amdgpu_ring *ring = adev->rings[i];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3150
3151		if (!ring || !ring->sched.thread)
3152			continue;
 
 
 
3153
3154		kthread_park(ring->sched.thread);
 
3155
3156		if (job && job->ring->idx != i)
3157			continue;
 
 
 
 
 
 
 
 
 
3158
3159		drm_sched_hw_job_reset(&ring->sched, &job->base);
 
 
 
 
 
 
 
 
3160
3161		/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
3162		amdgpu_fence_driver_force_completion(ring);
 
 
 
3163	}
3164
3165	if (amdgpu_sriov_vf(adev))
 
 
3166		r = amdgpu_device_reset_sriov(adev, job ? false : true);
3167	else
3168		r = amdgpu_device_reset(adev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3169
3170	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
3171		struct amdgpu_ring *ring = adev->rings[i];
 
 
 
 
 
 
 
 
3172
3173		if (!ring || !ring->sched.thread)
3174			continue;
 
 
 
3175
3176		/* only need recovery sched of the given job's ring
3177		 * or all rings (in the case @job is NULL)
3178		 * after above amdgpu_reset accomplished
3179		 */
3180		if ((!job || job->ring->idx == i) && !r)
3181			drm_sched_job_recovery(&ring->sched);
3182
3183		kthread_unpark(ring->sched.thread);
3184	}
 
 
3185
3186	if (amdgpu_device_has_dc_support(adev)) {
3187		if (drm_atomic_helper_resume(adev->ddev, state))
3188			dev_info(adev->dev, "drm resume failed:%d\n", r);
3189	} else {
3190		drm_helper_resume_force_mode(adev->ddev);
3191	}
3192
3193	ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
 
 
3194
3195	if (r) {
3196		/* bad news, how to tell it to userspace ? */
3197		dev_info(adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
3198		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
3199	} else {
3200		dev_info(adev->dev, "GPU reset(%d) successed!\n",atomic_read(&adev->gpu_reset_counter));
3201	}
3202
3203	amdgpu_vf_error_trans_all(adev);
3204	adev->in_gpu_reset = 0;
3205	mutex_unlock(&adev->lock_reset);
 
3206	return r;
3207}
3208
3209/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3210 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
3211 *
3212 * @adev: amdgpu_device pointer
3213 *
3214 * Fetchs and stores in the driver the PCIE capabilities (gen speed
3215 * and lanes) of the slot the device is in. Handles APUs and
3216 * virtualized environments where PCIE config space may not be available.
3217 */
3218static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
3219{
3220	u32 mask;
3221	int ret;
 
3222
3223	if (amdgpu_pcie_gen_cap)
3224		adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
3225
3226	if (amdgpu_pcie_lane_cap)
3227		adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
3228
3229	/* covers APUs as well */
3230	if (pci_is_root_bus(adev->pdev->bus)) {
3231		if (adev->pm.pcie_gen_mask == 0)
3232			adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
3233		if (adev->pm.pcie_mlw_mask == 0)
3234			adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
3235		return;
3236	}
3237
 
 
 
 
 
 
3238	if (adev->pm.pcie_gen_mask == 0) {
3239		ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
3240		if (!ret) {
3241			adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
 
 
3242						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
3243						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3244
3245			if (mask & DRM_PCIE_SPEED_25)
3246				adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
3247			if (mask & DRM_PCIE_SPEED_50)
3248				adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
3249			if (mask & DRM_PCIE_SPEED_80)
3250				adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
3251		} else {
3252			adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
3253		}
3254	}
3255	if (adev->pm.pcie_mlw_mask == 0) {
3256		ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
3257		if (!ret) {
3258			switch (mask) {
3259			case 32:
 
3260				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
3261							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
3262							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3263							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3264							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3265							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3266							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3267				break;
3268			case 16:
3269				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
3270							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3271							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3272							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3273							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3274							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3275				break;
3276			case 12:
3277				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3278							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3279							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3280							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3281							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3282				break;
3283			case 8:
3284				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3285							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3286							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3287							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3288				break;
3289			case 4:
3290				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3291							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3292							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3293				break;
3294			case 2:
3295				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3296							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3297				break;
3298			case 1:
3299				adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
3300				break;
3301			default:
3302				break;
3303			}
3304		} else {
3305			adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
3306		}
3307	}
3308}
3309