Linux Audio

Check our new training course

Loading...
v6.9.4
   1/*
   2 * Copyright 2008 Advanced Micro Devices, Inc.
   3 * Copyright 2008 Red Hat Inc.
   4 * Copyright 2009 Jerome Glisse.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the "Software"),
   8 * to deal in the Software without restriction, including without limitation
   9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10 * and/or sell copies of the Software, and to permit persons to whom the
  11 * Software is furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22 * OTHER DEALINGS IN THE SOFTWARE.
  23 *
  24 * Authors: Dave Airlie
  25 *          Alex Deucher
  26 *          Jerome Glisse
  27 */
  28#include <linux/power_supply.h>
  29#include <linux/kthread.h>
  30#include <linux/module.h>
  31#include <linux/console.h>
  32#include <linux/slab.h>
  33#include <linux/iommu.h>
  34#include <linux/pci.h>
  35#include <linux/pci-p2pdma.h>
  36#include <linux/apple-gmux.h>
  37
  38#include <drm/drm_aperture.h>
  39#include <drm/drm_atomic_helper.h>
  40#include <drm/drm_crtc_helper.h>
  41#include <drm/drm_fb_helper.h>
  42#include <drm/drm_probe_helper.h>
  43#include <drm/amdgpu_drm.h>
  44#include <linux/device.h>
  45#include <linux/vgaarb.h>
  46#include <linux/vga_switcheroo.h>
  47#include <linux/efi.h>
  48#include "amdgpu.h"
  49#include "amdgpu_trace.h"
  50#include "amdgpu_i2c.h"
  51#include "atom.h"
  52#include "amdgpu_atombios.h"
  53#include "amdgpu_atomfirmware.h"
  54#include "amd_pcie.h"
  55#ifdef CONFIG_DRM_AMDGPU_SI
  56#include "si.h"
  57#endif
  58#ifdef CONFIG_DRM_AMDGPU_CIK
  59#include "cik.h"
  60#endif
  61#include "vi.h"
  62#include "soc15.h"
  63#include "nv.h"
  64#include "bif/bif_4_1_d.h"
 
  65#include <linux/firmware.h>
  66#include "amdgpu_vf_error.h"
  67
  68#include "amdgpu_amdkfd.h"
  69#include "amdgpu_pm.h"
  70
  71#include "amdgpu_xgmi.h"
  72#include "amdgpu_ras.h"
  73#include "amdgpu_pmu.h"
  74#include "amdgpu_fru_eeprom.h"
  75#include "amdgpu_reset.h"
  76#include "amdgpu_virt.h"
  77
  78#include <linux/suspend.h>
  79#include <drm/task_barrier.h>
  80#include <linux/pm_runtime.h>
  81
  82#include <drm/drm_drv.h>
 
  83
  84#if IS_ENABLED(CONFIG_X86)
  85#include <asm/intel-family.h>
  86#endif
  87
  88MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
  89MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
  90MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
  91MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
  92MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
  93MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
  94MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
  95
  96#define AMDGPU_RESUME_MS		2000
  97#define AMDGPU_MAX_RETRY_LIMIT		2
  98#define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
  99#define AMDGPU_PCIE_INDEX_FALLBACK (0x38 >> 2)
 100#define AMDGPU_PCIE_INDEX_HI_FALLBACK (0x44 >> 2)
 101#define AMDGPU_PCIE_DATA_FALLBACK (0x3C >> 2)
 102
 103static const struct drm_driver amdgpu_kms_driver;
 104
 105const char *amdgpu_asic_name[] = {
 106	"TAHITI",
 107	"PITCAIRN",
 108	"VERDE",
 109	"OLAND",
 110	"HAINAN",
 111	"BONAIRE",
 112	"KAVERI",
 113	"KABINI",
 114	"HAWAII",
 115	"MULLINS",
 116	"TOPAZ",
 117	"TONGA",
 118	"FIJI",
 119	"CARRIZO",
 120	"STONEY",
 121	"POLARIS10",
 122	"POLARIS11",
 123	"POLARIS12",
 124	"VEGAM",
 125	"VEGA10",
 126	"VEGA12",
 127	"VEGA20",
 128	"RAVEN",
 129	"ARCTURUS",
 130	"RENOIR",
 131	"ALDEBARAN",
 132	"NAVI10",
 133	"CYAN_SKILLFISH",
 134	"NAVI14",
 135	"NAVI12",
 136	"SIENNA_CICHLID",
 137	"NAVY_FLOUNDER",
 138	"VANGOGH",
 139	"DIMGREY_CAVEFISH",
 140	"BEIGE_GOBY",
 141	"YELLOW_CARP",
 142	"IP DISCOVERY",
 143	"LAST",
 144};
 145
 146/**
 147 * DOC: pcie_replay_count
 148 *
 149 * The amdgpu driver provides a sysfs API for reporting the total number
 150 * of PCIe replays (NAKs)
 151 * The file pcie_replay_count is used for this and returns the total
 152 * number of replays as a sum of the NAKs generated and NAKs received
 153 */
 154
 155static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
 156		struct device_attribute *attr, char *buf)
 157{
 158	struct drm_device *ddev = dev_get_drvdata(dev);
 159	struct amdgpu_device *adev = drm_to_adev(ddev);
 160	uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
 161
 162	return sysfs_emit(buf, "%llu\n", cnt);
 163}
 164
 165static DEVICE_ATTR(pcie_replay_count, 0444,
 166		amdgpu_device_get_pcie_replay_count, NULL);
 167
 168static ssize_t amdgpu_sysfs_reg_state_get(struct file *f, struct kobject *kobj,
 169					  struct bin_attribute *attr, char *buf,
 170					  loff_t ppos, size_t count)
 171{
 172	struct device *dev = kobj_to_dev(kobj);
 173	struct drm_device *ddev = dev_get_drvdata(dev);
 174	struct amdgpu_device *adev = drm_to_adev(ddev);
 175	ssize_t bytes_read;
 176
 177	switch (ppos) {
 178	case AMDGPU_SYS_REG_STATE_XGMI:
 179		bytes_read = amdgpu_asic_get_reg_state(
 180			adev, AMDGPU_REG_STATE_TYPE_XGMI, buf, count);
 181		break;
 182	case AMDGPU_SYS_REG_STATE_WAFL:
 183		bytes_read = amdgpu_asic_get_reg_state(
 184			adev, AMDGPU_REG_STATE_TYPE_WAFL, buf, count);
 185		break;
 186	case AMDGPU_SYS_REG_STATE_PCIE:
 187		bytes_read = amdgpu_asic_get_reg_state(
 188			adev, AMDGPU_REG_STATE_TYPE_PCIE, buf, count);
 189		break;
 190	case AMDGPU_SYS_REG_STATE_USR:
 191		bytes_read = amdgpu_asic_get_reg_state(
 192			adev, AMDGPU_REG_STATE_TYPE_USR, buf, count);
 193		break;
 194	case AMDGPU_SYS_REG_STATE_USR_1:
 195		bytes_read = amdgpu_asic_get_reg_state(
 196			adev, AMDGPU_REG_STATE_TYPE_USR_1, buf, count);
 197		break;
 198	default:
 199		return -EINVAL;
 200	}
 201
 202	return bytes_read;
 203}
 204
 205BIN_ATTR(reg_state, 0444, amdgpu_sysfs_reg_state_get, NULL,
 206	 AMDGPU_SYS_REG_STATE_END);
 207
 208int amdgpu_reg_state_sysfs_init(struct amdgpu_device *adev)
 209{
 210	int ret;
 211
 212	if (!amdgpu_asic_get_reg_state_supported(adev))
 213		return 0;
 214
 215	ret = sysfs_create_bin_file(&adev->dev->kobj, &bin_attr_reg_state);
 216
 217	return ret;
 218}
 219
 220void amdgpu_reg_state_sysfs_fini(struct amdgpu_device *adev)
 221{
 222	if (!amdgpu_asic_get_reg_state_supported(adev))
 223		return;
 224	sysfs_remove_bin_file(&adev->dev->kobj, &bin_attr_reg_state);
 225}
 226
 227/**
 228 * DOC: board_info
 229 *
 230 * The amdgpu driver provides a sysfs API for giving board related information.
 231 * It provides the form factor information in the format
 232 *
 233 *   type : form factor
 234 *
 235 * Possible form factor values
 236 *
 237 * - "cem"		- PCIE CEM card
 238 * - "oam"		- Open Compute Accelerator Module
 239 * - "unknown"	- Not known
 240 *
 241 */
 242
 243static ssize_t amdgpu_device_get_board_info(struct device *dev,
 244					    struct device_attribute *attr,
 245					    char *buf)
 246{
 247	struct drm_device *ddev = dev_get_drvdata(dev);
 248	struct amdgpu_device *adev = drm_to_adev(ddev);
 249	enum amdgpu_pkg_type pkg_type = AMDGPU_PKG_TYPE_CEM;
 250	const char *pkg;
 251
 252	if (adev->smuio.funcs && adev->smuio.funcs->get_pkg_type)
 253		pkg_type = adev->smuio.funcs->get_pkg_type(adev);
 254
 255	switch (pkg_type) {
 256	case AMDGPU_PKG_TYPE_CEM:
 257		pkg = "cem";
 258		break;
 259	case AMDGPU_PKG_TYPE_OAM:
 260		pkg = "oam";
 261		break;
 262	default:
 263		pkg = "unknown";
 264		break;
 265	}
 266
 267	return sysfs_emit(buf, "%s : %s\n", "type", pkg);
 268}
 269
 270static DEVICE_ATTR(board_info, 0444, amdgpu_device_get_board_info, NULL);
 271
 272static struct attribute *amdgpu_board_attrs[] = {
 273	&dev_attr_board_info.attr,
 274	NULL,
 275};
 276
 277static umode_t amdgpu_board_attrs_is_visible(struct kobject *kobj,
 278					     struct attribute *attr, int n)
 279{
 280	struct device *dev = kobj_to_dev(kobj);
 281	struct drm_device *ddev = dev_get_drvdata(dev);
 282	struct amdgpu_device *adev = drm_to_adev(ddev);
 283
 284	if (adev->flags & AMD_IS_APU)
 285		return 0;
 286
 287	return attr->mode;
 288}
 289
 290static const struct attribute_group amdgpu_board_attrs_group = {
 291	.attrs = amdgpu_board_attrs,
 292	.is_visible = amdgpu_board_attrs_is_visible
 293};
 294
 295static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
 296
 297
 298/**
 299 * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
 300 *
 301 * @dev: drm_device pointer
 302 *
 303 * Returns true if the device is a dGPU with ATPX power control,
 304 * otherwise return false.
 305 */
 306bool amdgpu_device_supports_px(struct drm_device *dev)
 307{
 308	struct amdgpu_device *adev = drm_to_adev(dev);
 309
 310	if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
 311		return true;
 312	return false;
 313}
 314
 315/**
 316 * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
 317 *
 318 * @dev: drm_device pointer
 319 *
 320 * Returns true if the device is a dGPU with ACPI power control,
 321 * otherwise return false.
 322 */
 323bool amdgpu_device_supports_boco(struct drm_device *dev)
 324{
 325	struct amdgpu_device *adev = drm_to_adev(dev);
 326
 327	if (adev->has_pr3 ||
 328	    ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
 329		return true;
 330	return false;
 331}
 332
 333/**
 334 * amdgpu_device_supports_baco - Does the device support BACO
 335 *
 336 * @dev: drm_device pointer
 337 *
 338 * Returns true if the device supporte BACO,
 339 * otherwise return false.
 340 */
 341bool amdgpu_device_supports_baco(struct drm_device *dev)
 342{
 343	struct amdgpu_device *adev = drm_to_adev(dev);
 344
 345	return amdgpu_asic_supports_baco(adev);
 346}
 347
 348/**
 349 * amdgpu_device_supports_smart_shift - Is the device dGPU with
 350 * smart shift support
 351 *
 352 * @dev: drm_device pointer
 353 *
 354 * Returns true if the device is a dGPU with Smart Shift support,
 355 * otherwise returns false.
 356 */
 357bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
 358{
 359	return (amdgpu_device_supports_boco(dev) &&
 360		amdgpu_acpi_is_power_shift_control_supported());
 361}
 362
 363/*
 364 * VRAM access helper functions
 365 */
 366
 367/**
 368 * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
 369 *
 370 * @adev: amdgpu_device pointer
 371 * @pos: offset of the buffer in vram
 372 * @buf: virtual address of the buffer in system memory
 373 * @size: read/write size, sizeof(@buf) must > @size
 374 * @write: true - write to vram, otherwise - read from vram
 375 */
 376void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
 377			     void *buf, size_t size, bool write)
 378{
 379	unsigned long flags;
 380	uint32_t hi = ~0, tmp = 0;
 381	uint32_t *data = buf;
 382	uint64_t last;
 383	int idx;
 384
 385	if (!drm_dev_enter(adev_to_drm(adev), &idx))
 386		return;
 387
 388	BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
 389
 390	spin_lock_irqsave(&adev->mmio_idx_lock, flags);
 391	for (last = pos + size; pos < last; pos += 4) {
 392		tmp = pos >> 31;
 393
 394		WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
 395		if (tmp != hi) {
 396			WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
 397			hi = tmp;
 398		}
 399		if (write)
 400			WREG32_NO_KIQ(mmMM_DATA, *data++);
 401		else
 402			*data++ = RREG32_NO_KIQ(mmMM_DATA);
 403	}
 404
 405	spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
 406	drm_dev_exit(idx);
 407}
 408
 409/**
 410 * amdgpu_device_aper_access - access vram by vram aperature
 411 *
 412 * @adev: amdgpu_device pointer
 413 * @pos: offset of the buffer in vram
 414 * @buf: virtual address of the buffer in system memory
 415 * @size: read/write size, sizeof(@buf) must > @size
 416 * @write: true - write to vram, otherwise - read from vram
 417 *
 418 * The return value means how many bytes have been transferred.
 419 */
 420size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
 421				 void *buf, size_t size, bool write)
 422{
 423#ifdef CONFIG_64BIT
 424	void __iomem *addr;
 425	size_t count = 0;
 426	uint64_t last;
 427
 428	if (!adev->mman.aper_base_kaddr)
 429		return 0;
 430
 431	last = min(pos + size, adev->gmc.visible_vram_size);
 432	if (last > pos) {
 433		addr = adev->mman.aper_base_kaddr + pos;
 434		count = last - pos;
 435
 436		if (write) {
 437			memcpy_toio(addr, buf, count);
 438			/* Make sure HDP write cache flush happens without any reordering
 439			 * after the system memory contents are sent over PCIe device
 440			 */
 441			mb();
 442			amdgpu_device_flush_hdp(adev, NULL);
 443		} else {
 444			amdgpu_device_invalidate_hdp(adev, NULL);
 445			/* Make sure HDP read cache is invalidated before issuing a read
 446			 * to the PCIe device
 447			 */
 448			mb();
 449			memcpy_fromio(buf, addr, count);
 450		}
 451
 452	}
 453
 454	return count;
 455#else
 456	return 0;
 457#endif
 458}
 459
 460/**
 461 * amdgpu_device_vram_access - read/write a buffer in vram
 462 *
 463 * @adev: amdgpu_device pointer
 464 * @pos: offset of the buffer in vram
 465 * @buf: virtual address of the buffer in system memory
 466 * @size: read/write size, sizeof(@buf) must > @size
 467 * @write: true - write to vram, otherwise - read from vram
 468 */
 469void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
 470			       void *buf, size_t size, bool write)
 471{
 472	size_t count;
 473
 474	/* try to using vram apreature to access vram first */
 475	count = amdgpu_device_aper_access(adev, pos, buf, size, write);
 476	size -= count;
 477	if (size) {
 478		/* using MM to access rest vram */
 479		pos += count;
 480		buf += count;
 481		amdgpu_device_mm_access(adev, pos, buf, size, write);
 482	}
 483}
 484
 485/*
 486 * register access helper functions.
 487 */
 488
 489/* Check if hw access should be skipped because of hotplug or device error */
 490bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
 491{
 492	if (adev->no_hw_access)
 493		return true;
 494
 495#ifdef CONFIG_LOCKDEP
 496	/*
 497	 * This is a bit complicated to understand, so worth a comment. What we assert
 498	 * here is that the GPU reset is not running on another thread in parallel.
 499	 *
 500	 * For this we trylock the read side of the reset semaphore, if that succeeds
 501	 * we know that the reset is not running in paralell.
 502	 *
 503	 * If the trylock fails we assert that we are either already holding the read
 504	 * side of the lock or are the reset thread itself and hold the write side of
 505	 * the lock.
 506	 */
 507	if (in_task()) {
 508		if (down_read_trylock(&adev->reset_domain->sem))
 509			up_read(&adev->reset_domain->sem);
 510		else
 511			lockdep_assert_held(&adev->reset_domain->sem);
 512	}
 513#endif
 514	return false;
 515}
 516
 517/**
 518 * amdgpu_device_rreg - read a memory mapped IO or indirect register
 519 *
 520 * @adev: amdgpu_device pointer
 521 * @reg: dword aligned register offset
 522 * @acc_flags: access flags which require special behavior
 523 *
 524 * Returns the 32 bit value from the offset specified.
 525 */
 526uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
 527			    uint32_t reg, uint32_t acc_flags)
 528{
 529	uint32_t ret;
 530
 531	if (amdgpu_device_skip_hw_access(adev))
 532		return 0;
 533
 534	if ((reg * 4) < adev->rmmio_size) {
 535		if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
 536		    amdgpu_sriov_runtime(adev) &&
 537		    down_read_trylock(&adev->reset_domain->sem)) {
 538			ret = amdgpu_kiq_rreg(adev, reg, 0);
 539			up_read(&adev->reset_domain->sem);
 540		} else {
 541			ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
 542		}
 543	} else {
 544		ret = adev->pcie_rreg(adev, reg * 4);
 545	}
 546
 547	trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
 548
 549	return ret;
 550}
 551
 552/*
 553 * MMIO register read with bytes helper functions
 554 * @offset:bytes offset from MMIO start
 555 */
 556
 557/**
 558 * amdgpu_mm_rreg8 - read a memory mapped IO register
 559 *
 560 * @adev: amdgpu_device pointer
 561 * @offset: byte aligned register offset
 562 *
 563 * Returns the 8 bit value from the offset specified.
 564 */
 565uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
 566{
 567	if (amdgpu_device_skip_hw_access(adev))
 568		return 0;
 569
 570	if (offset < adev->rmmio_size)
 571		return (readb(adev->rmmio + offset));
 572	BUG();
 573}
 574
 575
 576/**
 577 * amdgpu_device_xcc_rreg - read a memory mapped IO or indirect register with specific XCC
 578 *
 579 * @adev: amdgpu_device pointer
 580 * @reg: dword aligned register offset
 581 * @acc_flags: access flags which require special behavior
 582 * @xcc_id: xcc accelerated compute core id
 583 *
 584 * Returns the 32 bit value from the offset specified.
 585 */
 586uint32_t amdgpu_device_xcc_rreg(struct amdgpu_device *adev,
 587				uint32_t reg, uint32_t acc_flags,
 588				uint32_t xcc_id)
 589{
 590	uint32_t ret, rlcg_flag;
 591
 592	if (amdgpu_device_skip_hw_access(adev))
 593		return 0;
 
 
 594
 595	if ((reg * 4) < adev->rmmio_size) {
 596		if (amdgpu_sriov_vf(adev) &&
 597		    !amdgpu_sriov_runtime(adev) &&
 598		    adev->gfx.rlc.rlcg_reg_access_supported &&
 599		    amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags,
 600							 GC_HWIP, false,
 601							 &rlcg_flag)) {
 602			ret = amdgpu_virt_rlcg_reg_rw(adev, reg, 0, rlcg_flag, xcc_id);
 603		} else if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
 604		    amdgpu_sriov_runtime(adev) &&
 605		    down_read_trylock(&adev->reset_domain->sem)) {
 606			ret = amdgpu_kiq_rreg(adev, reg, xcc_id);
 607			up_read(&adev->reset_domain->sem);
 608		} else {
 609			ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
 610		}
 611	} else {
 612		ret = adev->pcie_rreg(adev, reg * 4);
 613	}
 614
 615	return ret;
 616}
 617
 618/*
 619 * MMIO register write with bytes helper functions
 620 * @offset:bytes offset from MMIO start
 621 * @value: the value want to be written to the register
 622 */
 623
 624/**
 625 * amdgpu_mm_wreg8 - read a memory mapped IO register
 626 *
 627 * @adev: amdgpu_device pointer
 628 * @offset: byte aligned register offset
 629 * @value: 8 bit value to write
 630 *
 631 * Writes the value specified to the offset specified.
 632 */
 633void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
 634{
 635	if (amdgpu_device_skip_hw_access(adev))
 636		return;
 637
 638	if (offset < adev->rmmio_size)
 639		writeb(value, adev->rmmio + offset);
 640	else
 641		BUG();
 642}
 643
 644/**
 645 * amdgpu_device_wreg - write to a memory mapped IO or indirect register
 646 *
 647 * @adev: amdgpu_device pointer
 648 * @reg: dword aligned register offset
 649 * @v: 32 bit value to write to the register
 650 * @acc_flags: access flags which require special behavior
 651 *
 652 * Writes the value specified to the offset specified.
 653 */
 654void amdgpu_device_wreg(struct amdgpu_device *adev,
 655			uint32_t reg, uint32_t v,
 656			uint32_t acc_flags)
 657{
 658	if (amdgpu_device_skip_hw_access(adev))
 659		return;
 660
 661	if ((reg * 4) < adev->rmmio_size) {
 662		if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
 663		    amdgpu_sriov_runtime(adev) &&
 664		    down_read_trylock(&adev->reset_domain->sem)) {
 665			amdgpu_kiq_wreg(adev, reg, v, 0);
 666			up_read(&adev->reset_domain->sem);
 667		} else {
 668			writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
 669		}
 670	} else {
 671		adev->pcie_wreg(adev, reg * 4, v);
 672	}
 673
 674	trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
 675}
 676
 677/**
 678 * amdgpu_mm_wreg_mmio_rlc -  write register either with direct/indirect mmio or with RLC path if in range
 679 *
 680 * @adev: amdgpu_device pointer
 681 * @reg: mmio/rlc register
 682 * @v: value to write
 683 * @xcc_id: xcc accelerated compute core id
 684 *
 685 * this function is invoked only for the debugfs register access
 686 */
 687void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
 688			     uint32_t reg, uint32_t v,
 689			     uint32_t xcc_id)
 690{
 691	if (amdgpu_device_skip_hw_access(adev))
 692		return;
 693
 694	if (amdgpu_sriov_fullaccess(adev) &&
 695	    adev->gfx.rlc.funcs &&
 696	    adev->gfx.rlc.funcs->is_rlcg_access_range) {
 697		if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
 698			return amdgpu_sriov_wreg(adev, reg, v, 0, 0, xcc_id);
 699	} else if ((reg * 4) >= adev->rmmio_size) {
 700		adev->pcie_wreg(adev, reg * 4, v);
 701	} else {
 702		writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
 703	}
 704}
 705
 706/**
 707 * amdgpu_device_xcc_wreg - write to a memory mapped IO or indirect register with specific XCC
 708 *
 709 * @adev: amdgpu_device pointer
 710 * @reg: dword aligned register offset
 711 * @v: 32 bit value to write to the register
 712 * @acc_flags: access flags which require special behavior
 713 * @xcc_id: xcc accelerated compute core id
 714 *
 715 * Writes the value specified to the offset specified.
 
 716 */
 717void amdgpu_device_xcc_wreg(struct amdgpu_device *adev,
 718			uint32_t reg, uint32_t v,
 719			uint32_t acc_flags, uint32_t xcc_id)
 720{
 721	uint32_t rlcg_flag;
 722
 723	if (amdgpu_device_skip_hw_access(adev))
 724		return;
 725
 726	if ((reg * 4) < adev->rmmio_size) {
 727		if (amdgpu_sriov_vf(adev) &&
 728		    !amdgpu_sriov_runtime(adev) &&
 729		    adev->gfx.rlc.rlcg_reg_access_supported &&
 730		    amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags,
 731							 GC_HWIP, true,
 732							 &rlcg_flag)) {
 733			amdgpu_virt_rlcg_reg_rw(adev, reg, v, rlcg_flag, xcc_id);
 734		} else if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
 735		    amdgpu_sriov_runtime(adev) &&
 736		    down_read_trylock(&adev->reset_domain->sem)) {
 737			amdgpu_kiq_wreg(adev, reg, v, xcc_id);
 738			up_read(&adev->reset_domain->sem);
 739		} else {
 740			writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
 741		}
 742	} else {
 743		adev->pcie_wreg(adev, reg * 4, v);
 
 744	}
 745}
 746
 747/**
 748 * amdgpu_device_indirect_rreg - read an indirect register
 749 *
 750 * @adev: amdgpu_device pointer
 751 * @reg_addr: indirect register address to read from
 
 752 *
 753 * Returns the value of indirect register @reg_addr
 
 754 */
 755u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
 756				u32 reg_addr)
 757{
 758	unsigned long flags, pcie_index, pcie_data;
 759	void __iomem *pcie_index_offset;
 760	void __iomem *pcie_data_offset;
 761	u32 r;
 762
 763	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
 764	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
 765
 766	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 767	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
 768	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
 769
 770	writel(reg_addr, pcie_index_offset);
 771	readl(pcie_index_offset);
 772	r = readl(pcie_data_offset);
 773	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 774
 775	return r;
 776}
 777
 778u32 amdgpu_device_indirect_rreg_ext(struct amdgpu_device *adev,
 779				    u64 reg_addr)
 780{
 781	unsigned long flags, pcie_index, pcie_index_hi, pcie_data;
 782	u32 r;
 783	void __iomem *pcie_index_offset;
 784	void __iomem *pcie_index_hi_offset;
 785	void __iomem *pcie_data_offset;
 786
 787	if (unlikely(!adev->nbio.funcs)) {
 788		pcie_index = AMDGPU_PCIE_INDEX_FALLBACK;
 789		pcie_data = AMDGPU_PCIE_DATA_FALLBACK;
 790	} else {
 791		pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
 792		pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
 793	}
 794
 795	if (reg_addr >> 32) {
 796		if (unlikely(!adev->nbio.funcs))
 797			pcie_index_hi = AMDGPU_PCIE_INDEX_HI_FALLBACK;
 798		else
 799			pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
 800	} else {
 801		pcie_index_hi = 0;
 802	}
 803
 804	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 805	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
 806	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
 807	if (pcie_index_hi != 0)
 808		pcie_index_hi_offset = (void __iomem *)adev->rmmio +
 809				pcie_index_hi * 4;
 810
 811	writel(reg_addr, pcie_index_offset);
 812	readl(pcie_index_offset);
 813	if (pcie_index_hi != 0) {
 814		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
 815		readl(pcie_index_hi_offset);
 816	}
 817	r = readl(pcie_data_offset);
 818
 819	/* clear the high bits */
 820	if (pcie_index_hi != 0) {
 821		writel(0, pcie_index_hi_offset);
 822		readl(pcie_index_hi_offset);
 823	}
 824
 825	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 826
 827	return r;
 828}
 829
 830/**
 831 * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
 832 *
 833 * @adev: amdgpu_device pointer
 834 * @reg_addr: indirect register address to read from
 835 *
 836 * Returns the value of indirect register @reg_addr
 837 */
 838u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
 839				  u32 reg_addr)
 840{
 841	unsigned long flags, pcie_index, pcie_data;
 842	void __iomem *pcie_index_offset;
 843	void __iomem *pcie_data_offset;
 844	u64 r;
 845
 846	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
 847	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
 848
 849	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 850	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
 851	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
 852
 853	/* read low 32 bits */
 854	writel(reg_addr, pcie_index_offset);
 855	readl(pcie_index_offset);
 856	r = readl(pcie_data_offset);
 857	/* read high 32 bits */
 858	writel(reg_addr + 4, pcie_index_offset);
 859	readl(pcie_index_offset);
 860	r |= ((u64)readl(pcie_data_offset) << 32);
 861	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 862
 863	return r;
 864}
 865
 866u64 amdgpu_device_indirect_rreg64_ext(struct amdgpu_device *adev,
 867				  u64 reg_addr)
 868{
 869	unsigned long flags, pcie_index, pcie_data;
 870	unsigned long pcie_index_hi = 0;
 871	void __iomem *pcie_index_offset;
 872	void __iomem *pcie_index_hi_offset;
 873	void __iomem *pcie_data_offset;
 874	u64 r;
 875
 876	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
 877	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
 878	if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
 879		pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
 880
 881	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 882	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
 883	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
 884	if (pcie_index_hi != 0)
 885		pcie_index_hi_offset = (void __iomem *)adev->rmmio +
 886			pcie_index_hi * 4;
 887
 888	/* read low 32 bits */
 889	writel(reg_addr, pcie_index_offset);
 890	readl(pcie_index_offset);
 891	if (pcie_index_hi != 0) {
 892		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
 893		readl(pcie_index_hi_offset);
 894	}
 895	r = readl(pcie_data_offset);
 896	/* read high 32 bits */
 897	writel(reg_addr + 4, pcie_index_offset);
 898	readl(pcie_index_offset);
 899	if (pcie_index_hi != 0) {
 900		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
 901		readl(pcie_index_hi_offset);
 902	}
 903	r |= ((u64)readl(pcie_data_offset) << 32);
 904
 905	/* clear the high bits */
 906	if (pcie_index_hi != 0) {
 907		writel(0, pcie_index_hi_offset);
 908		readl(pcie_index_hi_offset);
 909	}
 910
 911	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 912
 913	return r;
 914}
 915
 916/**
 917 * amdgpu_device_indirect_wreg - write an indirect register address
 918 *
 919 * @adev: amdgpu_device pointer
 920 * @reg_addr: indirect register offset
 921 * @reg_data: indirect register data
 922 *
 923 */
 924void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
 925				 u32 reg_addr, u32 reg_data)
 926{
 927	unsigned long flags, pcie_index, pcie_data;
 928	void __iomem *pcie_index_offset;
 929	void __iomem *pcie_data_offset;
 930
 931	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
 932	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
 933
 934	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 935	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
 936	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
 937
 938	writel(reg_addr, pcie_index_offset);
 939	readl(pcie_index_offset);
 940	writel(reg_data, pcie_data_offset);
 941	readl(pcie_data_offset);
 942	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 943}
 944
 945void amdgpu_device_indirect_wreg_ext(struct amdgpu_device *adev,
 946				     u64 reg_addr, u32 reg_data)
 947{
 948	unsigned long flags, pcie_index, pcie_index_hi, pcie_data;
 949	void __iomem *pcie_index_offset;
 950	void __iomem *pcie_index_hi_offset;
 951	void __iomem *pcie_data_offset;
 952
 953	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
 954	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
 955	if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
 956		pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
 957	else
 958		pcie_index_hi = 0;
 959
 960	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 961	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
 962	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
 963	if (pcie_index_hi != 0)
 964		pcie_index_hi_offset = (void __iomem *)adev->rmmio +
 965				pcie_index_hi * 4;
 966
 967	writel(reg_addr, pcie_index_offset);
 968	readl(pcie_index_offset);
 969	if (pcie_index_hi != 0) {
 970		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
 971		readl(pcie_index_hi_offset);
 972	}
 973	writel(reg_data, pcie_data_offset);
 974	readl(pcie_data_offset);
 975
 976	/* clear the high bits */
 977	if (pcie_index_hi != 0) {
 978		writel(0, pcie_index_hi_offset);
 979		readl(pcie_index_hi_offset);
 980	}
 981
 982	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 983}
 984
 985/**
 986 * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
 987 *
 988 * @adev: amdgpu_device pointer
 989 * @reg_addr: indirect register offset
 990 * @reg_data: indirect register data
 991 *
 992 */
 993void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
 994				   u32 reg_addr, u64 reg_data)
 995{
 996	unsigned long flags, pcie_index, pcie_data;
 997	void __iomem *pcie_index_offset;
 998	void __iomem *pcie_data_offset;
 999
1000	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
1001	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
1002
1003	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
1004	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
1005	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
1006
1007	/* write low 32 bits */
1008	writel(reg_addr, pcie_index_offset);
1009	readl(pcie_index_offset);
1010	writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
1011	readl(pcie_data_offset);
1012	/* write high 32 bits */
1013	writel(reg_addr + 4, pcie_index_offset);
1014	readl(pcie_index_offset);
1015	writel((u32)(reg_data >> 32), pcie_data_offset);
1016	readl(pcie_data_offset);
1017	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
1018}
1019
1020void amdgpu_device_indirect_wreg64_ext(struct amdgpu_device *adev,
1021				   u64 reg_addr, u64 reg_data)
1022{
1023	unsigned long flags, pcie_index, pcie_data;
1024	unsigned long pcie_index_hi = 0;
1025	void __iomem *pcie_index_offset;
1026	void __iomem *pcie_index_hi_offset;
1027	void __iomem *pcie_data_offset;
1028
1029	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
1030	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
1031	if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
1032		pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
1033
1034	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
1035	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
1036	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
1037	if (pcie_index_hi != 0)
1038		pcie_index_hi_offset = (void __iomem *)adev->rmmio +
1039				pcie_index_hi * 4;
1040
1041	/* write low 32 bits */
1042	writel(reg_addr, pcie_index_offset);
1043	readl(pcie_index_offset);
1044	if (pcie_index_hi != 0) {
1045		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
1046		readl(pcie_index_hi_offset);
1047	}
1048	writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
1049	readl(pcie_data_offset);
1050	/* write high 32 bits */
1051	writel(reg_addr + 4, pcie_index_offset);
1052	readl(pcie_index_offset);
1053	if (pcie_index_hi != 0) {
1054		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
1055		readl(pcie_index_hi_offset);
1056	}
1057	writel((u32)(reg_data >> 32), pcie_data_offset);
1058	readl(pcie_data_offset);
1059
1060	/* clear the high bits */
1061	if (pcie_index_hi != 0) {
1062		writel(0, pcie_index_hi_offset);
1063		readl(pcie_index_hi_offset);
1064	}
1065
1066	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
1067}
1068
1069/**
1070 * amdgpu_device_get_rev_id - query device rev_id
1071 *
1072 * @adev: amdgpu_device pointer
1073 *
1074 * Return device rev_id
1075 */
1076u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev)
1077{
1078	return adev->nbio.funcs->get_rev_id(adev);
1079}
1080
1081/**
1082 * amdgpu_invalid_rreg - dummy reg read function
1083 *
1084 * @adev: amdgpu_device pointer
1085 * @reg: offset of register
1086 *
1087 * Dummy register read function.  Used for register blocks
1088 * that certain asics don't have (all asics).
1089 * Returns the value in the register.
1090 */
1091static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
1092{
1093	DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
1094	BUG();
1095	return 0;
1096}
1097
1098static uint32_t amdgpu_invalid_rreg_ext(struct amdgpu_device *adev, uint64_t reg)
1099{
1100	DRM_ERROR("Invalid callback to read register 0x%llX\n", reg);
1101	BUG();
1102	return 0;
1103}
1104
1105/**
1106 * amdgpu_invalid_wreg - dummy reg write function
1107 *
1108 * @adev: amdgpu_device pointer
1109 * @reg: offset of register
1110 * @v: value to write to the register
1111 *
1112 * Dummy register read function.  Used for register blocks
1113 * that certain asics don't have (all asics).
1114 */
1115static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
1116{
1117	DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
1118		  reg, v);
1119	BUG();
1120}
1121
1122static void amdgpu_invalid_wreg_ext(struct amdgpu_device *adev, uint64_t reg, uint32_t v)
1123{
1124	DRM_ERROR("Invalid callback to write register 0x%llX with 0x%08X\n",
1125		  reg, v);
1126	BUG();
1127}
1128
1129/**
1130 * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
1131 *
1132 * @adev: amdgpu_device pointer
1133 * @reg: offset of register
1134 *
1135 * Dummy register read function.  Used for register blocks
1136 * that certain asics don't have (all asics).
1137 * Returns the value in the register.
1138 */
1139static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
1140{
1141	DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
1142	BUG();
1143	return 0;
1144}
1145
1146static uint64_t amdgpu_invalid_rreg64_ext(struct amdgpu_device *adev, uint64_t reg)
1147{
1148	DRM_ERROR("Invalid callback to read register 0x%llX\n", reg);
1149	BUG();
1150	return 0;
1151}
1152
1153/**
1154 * amdgpu_invalid_wreg64 - dummy reg write function
1155 *
1156 * @adev: amdgpu_device pointer
1157 * @reg: offset of register
1158 * @v: value to write to the register
1159 *
1160 * Dummy register read function.  Used for register blocks
1161 * that certain asics don't have (all asics).
1162 */
1163static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
1164{
1165	DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
1166		  reg, v);
1167	BUG();
1168}
1169
1170static void amdgpu_invalid_wreg64_ext(struct amdgpu_device *adev, uint64_t reg, uint64_t v)
1171{
1172	DRM_ERROR("Invalid callback to write 64 bit register 0x%llX with 0x%08llX\n",
1173		  reg, v);
1174	BUG();
1175}
1176
1177/**
1178 * amdgpu_block_invalid_rreg - dummy reg read function
1179 *
1180 * @adev: amdgpu_device pointer
1181 * @block: offset of instance
1182 * @reg: offset of register
1183 *
1184 * Dummy register read function.  Used for register blocks
1185 * that certain asics don't have (all asics).
1186 * Returns the value in the register.
1187 */
1188static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
1189					  uint32_t block, uint32_t reg)
1190{
1191	DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
1192		  reg, block);
1193	BUG();
1194	return 0;
1195}
1196
1197/**
1198 * amdgpu_block_invalid_wreg - dummy reg write function
1199 *
1200 * @adev: amdgpu_device pointer
1201 * @block: offset of instance
1202 * @reg: offset of register
1203 * @v: value to write to the register
1204 *
1205 * Dummy register read function.  Used for register blocks
1206 * that certain asics don't have (all asics).
1207 */
1208static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
1209				      uint32_t block,
1210				      uint32_t reg, uint32_t v)
1211{
1212	DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
1213		  reg, block, v);
1214	BUG();
1215}
1216
1217/**
1218 * amdgpu_device_asic_init - Wrapper for atom asic_init
1219 *
1220 * @adev: amdgpu_device pointer
1221 *
1222 * Does any asic specific work and then calls atom asic init.
1223 */
1224static int amdgpu_device_asic_init(struct amdgpu_device *adev)
1225{
1226	int ret;
1227
1228	amdgpu_asic_pre_asic_init(adev);
 
 
 
 
 
 
 
 
 
1229
1230	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
1231	    amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) {
1232		amdgpu_psp_wait_for_bootloader(adev);
1233		ret = amdgpu_atomfirmware_asic_init(adev, true);
1234		return ret;
1235	} else {
1236		return amdgpu_atom_asic_init(adev->mode_info.atom_context);
 
1237	}
 
 
 
 
 
1238
1239	return 0;
1240}
1241
1242/**
1243 * amdgpu_device_mem_scratch_init - allocate the VRAM scratch page
1244 *
1245 * @adev: amdgpu_device pointer
1246 *
1247 * Allocates a scratch page of VRAM for use by various things in the
1248 * driver.
1249 */
1250static int amdgpu_device_mem_scratch_init(struct amdgpu_device *adev)
1251{
1252	return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, PAGE_SIZE,
1253				       AMDGPU_GEM_DOMAIN_VRAM |
1254				       AMDGPU_GEM_DOMAIN_GTT,
1255				       &adev->mem_scratch.robj,
1256				       &adev->mem_scratch.gpu_addr,
1257				       (void **)&adev->mem_scratch.ptr);
1258}
1259
1260/**
1261 * amdgpu_device_mem_scratch_fini - Free the VRAM scratch page
1262 *
1263 * @adev: amdgpu_device pointer
1264 *
1265 * Frees the VRAM scratch page.
1266 */
1267static void amdgpu_device_mem_scratch_fini(struct amdgpu_device *adev)
1268{
1269	amdgpu_bo_free_kernel(&adev->mem_scratch.robj, NULL, NULL);
1270}
1271
1272/**
1273 * amdgpu_device_program_register_sequence - program an array of registers.
1274 *
1275 * @adev: amdgpu_device pointer
1276 * @registers: pointer to the register array
1277 * @array_size: size of the register array
1278 *
1279 * Programs an array or registers with and or masks.
1280 * This is a helper for setting golden registers.
1281 */
1282void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
1283					     const u32 *registers,
1284					     const u32 array_size)
1285{
1286	u32 tmp, reg, and_mask, or_mask;
1287	int i;
1288
1289	if (array_size % 3)
1290		return;
1291
1292	for (i = 0; i < array_size; i += 3) {
1293		reg = registers[i + 0];
1294		and_mask = registers[i + 1];
1295		or_mask = registers[i + 2];
1296
1297		if (and_mask == 0xffffffff) {
1298			tmp = or_mask;
1299		} else {
1300			tmp = RREG32(reg);
1301			tmp &= ~and_mask;
1302			if (adev->family >= AMDGPU_FAMILY_AI)
1303				tmp |= (or_mask & and_mask);
1304			else
1305				tmp |= or_mask;
1306		}
1307		WREG32(reg, tmp);
1308	}
1309}
1310
 
 
 
 
 
 
 
 
1311/**
1312 * amdgpu_device_pci_config_reset - reset the GPU
1313 *
1314 * @adev: amdgpu_device pointer
1315 *
1316 * Resets the GPU using the pci config reset sequence.
1317 * Only applicable to asics prior to vega10.
1318 */
1319void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
1320{
1321	pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1322}
1323
1324/**
1325 * amdgpu_device_pci_reset - reset the GPU using generic PCI means
1326 *
1327 * @adev: amdgpu_device pointer
1328 *
1329 * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
1330 */
1331int amdgpu_device_pci_reset(struct amdgpu_device *adev)
1332{
1333	return pci_reset_function(adev->pdev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1334}
1335
1336/*
1337 * amdgpu_device_wb_*()
1338 * Writeback is the method by which the GPU updates special pages in memory
1339 * with the status of certain GPU events (fences, ring pointers,etc.).
 
1340 */
1341
1342/**
1343 * amdgpu_device_wb_fini - Disable Writeback and free memory
1344 *
1345 * @adev: amdgpu_device pointer
1346 *
1347 * Disables Writeback and frees the Writeback memory (all asics).
1348 * Used at driver shutdown.
1349 */
1350static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1351{
1352	if (adev->wb.wb_obj) {
1353		amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1354				      &adev->wb.gpu_addr,
1355				      (void **)&adev->wb.wb);
1356		adev->wb.wb_obj = NULL;
1357	}
1358}
1359
1360/**
1361 * amdgpu_device_wb_init - Init Writeback driver info and allocate memory
1362 *
1363 * @adev: amdgpu_device pointer
1364 *
1365 * Initializes writeback and allocates writeback memory (all asics).
1366 * Used at driver startup.
1367 * Returns 0 on success or an -error on failure.
1368 */
1369static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1370{
1371	int r;
1372
1373	if (adev->wb.wb_obj == NULL) {
1374		/* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1375		r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1376					    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1377					    &adev->wb.wb_obj, &adev->wb.gpu_addr,
1378					    (void **)&adev->wb.wb);
1379		if (r) {
1380			dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1381			return r;
1382		}
1383
1384		adev->wb.num_wb = AMDGPU_MAX_WB;
1385		memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1386
1387		/* clear wb memory */
1388		memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1389	}
1390
1391	return 0;
1392}
1393
1394/**
1395 * amdgpu_device_wb_get - Allocate a wb entry
1396 *
1397 * @adev: amdgpu_device pointer
1398 * @wb: wb index
1399 *
1400 * Allocate a wb slot for use by the driver (all asics).
1401 * Returns 0 on success or -EINVAL on failure.
1402 */
1403int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1404{
1405	unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1406
1407	if (offset < adev->wb.num_wb) {
1408		__set_bit(offset, adev->wb.used);
1409		*wb = offset << 3; /* convert to dw offset */
1410		return 0;
1411	} else {
1412		return -EINVAL;
1413	}
1414}
1415
1416/**
1417 * amdgpu_device_wb_free - Free a wb entry
1418 *
1419 * @adev: amdgpu_device pointer
1420 * @wb: wb index
1421 *
1422 * Free a wb slot allocated for use by the driver (all asics)
1423 */
1424void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1425{
1426	wb >>= 3;
1427	if (wb < adev->wb.num_wb)
1428		__clear_bit(wb, adev->wb.used);
1429}
1430
1431/**
1432 * amdgpu_device_resize_fb_bar - try to resize FB BAR
 
 
 
1433 *
1434 * @adev: amdgpu_device pointer
 
 
1435 *
1436 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1437 * to fail, but if any of the BARs is not accessible after the size we abort
1438 * driver loading by returning -ENODEV.
1439 */
1440int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1441{
1442	int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1443	struct pci_bus *root;
1444	struct resource *res;
1445	unsigned int i;
1446	u16 cmd;
1447	int r;
1448
1449	if (!IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
1450		return 0;
1451
1452	/* Bypass for VF */
1453	if (amdgpu_sriov_vf(adev))
1454		return 0;
1455
1456	/* PCI_EXT_CAP_ID_VNDR extended capability is located at 0x100 */
1457	if (!pci_find_ext_capability(adev->pdev, PCI_EXT_CAP_ID_VNDR))
1458		DRM_WARN("System can't access extended configuration space,please check!!\n");
1459
1460	/* skip if the bios has already enabled large BAR */
1461	if (adev->gmc.real_vram_size &&
1462	    (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1463		return 0;
1464
1465	/* Check if the root BUS has 64bit memory resources */
1466	root = adev->pdev->bus;
1467	while (root->parent)
1468		root = root->parent;
1469
1470	pci_bus_for_each_resource(root, res, i) {
1471		if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1472		    res->start > 0x100000000ull)
1473			break;
1474	}
1475
1476	/* Trying to resize is pointless without a root hub window above 4GB */
1477	if (!res)
1478		return 0;
1479
1480	/* Limit the BAR size to what is available */
1481	rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1482			rbar_size);
1483
1484	/* Disable memory decoding while we change the BAR addresses and size */
1485	pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1486	pci_write_config_word(adev->pdev, PCI_COMMAND,
1487			      cmd & ~PCI_COMMAND_MEMORY);
1488
1489	/* Free the VRAM and doorbell BAR, we most likely need to move both. */
1490	amdgpu_doorbell_fini(adev);
1491	if (adev->asic_type >= CHIP_BONAIRE)
1492		pci_release_resource(adev->pdev, 2);
1493
1494	pci_release_resource(adev->pdev, 0);
1495
1496	r = pci_resize_resource(adev->pdev, 0, rbar_size);
1497	if (r == -ENOSPC)
1498		DRM_INFO("Not enough PCI address space for a large BAR.");
1499	else if (r && r != -ENOTSUPP)
1500		DRM_ERROR("Problem resizing BAR0 (%d).", r);
1501
1502	pci_assign_unassigned_bus_resources(adev->pdev->bus);
1503
1504	/* When the doorbell or fb BAR isn't available we have no chance of
1505	 * using the device.
1506	 */
1507	r = amdgpu_doorbell_init(adev);
1508	if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1509		return -ENODEV;
1510
1511	pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1512
1513	return 0;
1514}
1515
1516static bool amdgpu_device_read_bios(struct amdgpu_device *adev)
 
 
 
 
 
 
 
 
 
 
 
 
1517{
1518	if (hweight32(adev->aid_mask) && (adev->flags & AMD_IS_APU))
1519		return false;
1520
1521	return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1522}
1523
1524/*
1525 * GPU helpers function.
1526 */
1527/**
1528 * amdgpu_device_need_post - check if the hw need post or not
1529 *
1530 * @adev: amdgpu_device pointer
1531 *
1532 * Check if the asic has been initialized (all asics) at driver startup
1533 * or post is needed if  hw reset is performed.
1534 * Returns true if need or false if not.
1535 */
1536bool amdgpu_device_need_post(struct amdgpu_device *adev)
1537{
1538	uint32_t reg;
1539
1540	if (amdgpu_sriov_vf(adev))
1541		return false;
 
 
 
 
 
1542
1543	if (!amdgpu_device_read_bios(adev))
 
 
 
 
1544		return false;
1545
1546	if (amdgpu_passthrough(adev)) {
1547		/* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1548		 * some old smc fw still need driver do vPost otherwise gpu hang, while
1549		 * those smc fw version above 22.15 doesn't have this flaw, so we force
1550		 * vpost executed for smc version below 22.15
1551		 */
1552		if (adev->asic_type == CHIP_FIJI) {
1553			int err;
1554			uint32_t fw_ver;
1555
1556			err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1557			/* force vPost if error occured */
1558			if (err)
1559				return true;
1560
1561			fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1562			release_firmware(adev->pm.fw);
1563			if (fw_ver < 0x00160e00)
1564				return true;
1565		}
1566	}
 
 
1567
1568	/* Don't post if we need to reset whole hive on init */
1569	if (adev->gmc.xgmi.pending_reset)
1570		return false;
1571
1572	if (adev->has_hw_reset) {
1573		adev->has_hw_reset = false;
1574		return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1575	}
 
 
1576
1577	/* bios scratch used on CIK+ */
1578	if (adev->asic_type >= CHIP_BONAIRE)
1579		return amdgpu_atombios_scratch_need_asic_init(adev);
 
 
 
 
 
 
 
 
 
 
 
 
 
1580
1581	/* check MEM_SIZE for older asics */
1582	reg = amdgpu_asic_get_config_memsize(adev);
1583
1584	if ((reg != 0) && (reg != 0xffffffff))
1585		return false;
 
 
 
 
 
 
1586
1587	return true;
 
 
 
 
 
 
 
 
 
 
 
1588}
1589
1590/*
1591 * Check whether seamless boot is supported.
1592 *
1593 * So far we only support seamless boot on DCE 3.0 or later.
1594 * If users report that it works on older ASICS as well, we may
1595 * loosen this.
 
 
1596 */
1597bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev)
1598{
1599	switch (amdgpu_seamless) {
1600	case -1:
1601		break;
1602	case 1:
1603		return true;
1604	case 0:
1605		return false;
1606	default:
1607		DRM_ERROR("Invalid value for amdgpu.seamless: %d\n",
1608			  amdgpu_seamless);
1609		return false;
1610	}
1611
1612	if (!(adev->flags & AMD_IS_APU))
1613		return false;
1614
1615	if (adev->mman.keep_stolen_vga_memory)
1616		return false;
1617
1618	return amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0);
1619}
1620
1621/*
1622 * Intel hosts such as Rocket Lake, Alder Lake, Raptor Lake and Sapphire Rapids
1623 * don't support dynamic speed switching. Until we have confirmation from Intel
1624 * that a specific host supports it, it's safer that we keep it disabled for all.
 
1625 *
1626 * https://edc.intel.com/content/www/us/en/design/products/platforms/details/raptor-lake-s/13th-generation-core-processors-datasheet-volume-1-of-2/005/pci-express-support/
1627 * https://gitlab.freedesktop.org/drm/amd/-/issues/2663
1628 */
1629static bool amdgpu_device_pcie_dynamic_switching_supported(struct amdgpu_device *adev)
1630{
1631#if IS_ENABLED(CONFIG_X86)
1632	struct cpuinfo_x86 *c = &cpu_data(0);
1633
1634	/* eGPU change speeds based on USB4 fabric conditions */
1635	if (dev_is_removable(adev->dev))
1636		return true;
 
 
 
 
 
 
 
 
1637
1638	if (c->x86_vendor == X86_VENDOR_INTEL)
1639		return false;
1640#endif
1641	return true;
1642}
1643
1644/**
1645 * amdgpu_device_should_use_aspm - check if the device should program ASPM
1646 *
1647 * @adev: amdgpu_device pointer
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1648 *
1649 * Confirm whether the module parameter and pcie bridge agree that ASPM should
1650 * be set for this device.
1651 *
1652 * Returns true if it should be used or false if not.
 
1653 */
1654bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
1655{
1656	switch (amdgpu_aspm) {
1657	case -1:
1658		break;
1659	case 0:
1660		return false;
1661	case 1:
1662		return true;
1663	default:
1664		return false;
1665	}
1666	if (adev->flags & AMD_IS_APU)
1667		return false;
1668	if (!(adev->pm.pp_feature & PP_PCIE_DPM_MASK))
1669		return false;
1670	return pcie_aspm_enabled(adev->pdev);
1671}
1672
1673/* if we get transitioned to only one device, take VGA back */
1674/**
1675 * amdgpu_device_vga_set_decode - enable/disable vga decode
1676 *
1677 * @pdev: PCI device pointer
1678 * @state: enable/disable vga decode
 
1679 *
1680 * Enable/disable vga decode (all asics).
1681 * Returns VGA resource flags.
1682 */
1683static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
1684		bool state)
1685{
1686	struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
1687
1688	amdgpu_asic_set_vga_state(adev, state);
1689	if (state)
1690		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1691		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1692	else
1693		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1694}
1695
1696/**
1697 * amdgpu_device_check_block_size - validate the vm block size
1698 *
1699 * @adev: amdgpu_device pointer
 
1700 *
1701 * Validates the vm block size specified via module parameter.
1702 * The vm block size defines number of bits in page table versus page directory,
1703 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1704 * page table and the remaining bits are in the page directory.
1705 */
1706static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1707{
1708	/* defines number of bits in page table versus page directory,
1709	 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1710	 * page table and the remaining bits are in the page directory
1711	 */
1712	if (amdgpu_vm_block_size == -1)
1713		return;
1714
1715	if (amdgpu_vm_block_size < 9) {
1716		dev_warn(adev->dev, "VM page table size (%d) too small\n",
1717			 amdgpu_vm_block_size);
1718		amdgpu_vm_block_size = -1;
1719	}
1720}
1721
1722/**
1723 * amdgpu_device_check_vm_size - validate the vm size
1724 *
1725 * @adev: amdgpu_device pointer
1726 *
1727 * Validates the vm size in GB specified via module parameter.
1728 * The VM size is the size of the GPU virtual memory space in GB.
 
1729 */
1730static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1731{
1732	/* no need to check the default value */
1733	if (amdgpu_vm_size == -1)
1734		return;
1735
1736	if (amdgpu_vm_size < 1) {
1737		dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1738			 amdgpu_vm_size);
1739		amdgpu_vm_size = -1;
1740	}
 
 
 
 
1741}
1742
1743static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
 
 
 
 
 
 
 
 
 
 
1744{
1745	struct sysinfo si;
1746	bool is_os_64 = (sizeof(void *) == 8);
1747	uint64_t total_memory;
1748	uint64_t dram_size_seven_GB = 0x1B8000000;
1749	uint64_t dram_size_three_GB = 0xB8000000;
1750
1751	if (amdgpu_smu_memory_pool_size == 0)
1752		return;
1753
1754	if (!is_os_64) {
1755		DRM_WARN("Not 64-bit OS, feature not supported\n");
1756		goto def_value;
1757	}
1758	si_meminfo(&si);
1759	total_memory = (uint64_t)si.totalram * si.mem_unit;
1760
1761	if ((amdgpu_smu_memory_pool_size == 1) ||
1762		(amdgpu_smu_memory_pool_size == 2)) {
1763		if (total_memory < dram_size_three_GB)
1764			goto def_value1;
1765	} else if ((amdgpu_smu_memory_pool_size == 4) ||
1766		(amdgpu_smu_memory_pool_size == 8)) {
1767		if (total_memory < dram_size_seven_GB)
1768			goto def_value1;
1769	} else {
1770		DRM_WARN("Smu memory pool size not supported\n");
1771		goto def_value;
1772	}
1773	adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1774
1775	return;
 
 
 
 
1776
1777def_value1:
1778	DRM_WARN("No enough system memory\n");
1779def_value:
1780	adev->pm.smu_prv_buffer_size = 0;
1781}
1782
1783static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
 
 
 
 
 
 
 
 
 
 
1784{
1785	if (!(adev->flags & AMD_IS_APU) ||
1786	    adev->asic_type < CHIP_RAVEN)
1787		return 0;
1788
1789	switch (adev->asic_type) {
1790	case CHIP_RAVEN:
1791		if (adev->pdev->device == 0x15dd)
1792			adev->apu_flags |= AMD_APU_IS_RAVEN;
1793		if (adev->pdev->device == 0x15d8)
1794			adev->apu_flags |= AMD_APU_IS_PICASSO;
1795		break;
1796	case CHIP_RENOIR:
1797		if ((adev->pdev->device == 0x1636) ||
1798		    (adev->pdev->device == 0x164c))
1799			adev->apu_flags |= AMD_APU_IS_RENOIR;
1800		else
1801			adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1802		break;
1803	case CHIP_VANGOGH:
1804		adev->apu_flags |= AMD_APU_IS_VANGOGH;
1805		break;
1806	case CHIP_YELLOW_CARP:
1807		break;
1808	case CHIP_CYAN_SKILLFISH:
1809		if ((adev->pdev->device == 0x13FE) ||
1810		    (adev->pdev->device == 0x143F))
1811			adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
1812		break;
1813	default:
1814		break;
1815	}
1816
1817	return 0;
 
 
 
 
 
 
 
 
 
 
1818}
1819
1820/**
1821 * amdgpu_device_check_arguments - validate module params
1822 *
1823 * @adev: amdgpu_device pointer
1824 *
1825 * Validates certain module parameters and updates
1826 * the associated values used by the driver (all asics).
1827 */
1828static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1829{
1830	if (amdgpu_sched_jobs < 4) {
1831		dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1832			 amdgpu_sched_jobs);
1833		amdgpu_sched_jobs = 4;
1834	} else if (!is_power_of_2(amdgpu_sched_jobs)) {
1835		dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1836			 amdgpu_sched_jobs);
1837		amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1838	}
1839
1840	if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1841		/* gart size must be greater or equal to 32M */
1842		dev_warn(adev->dev, "gart size (%d) too small\n",
1843			 amdgpu_gart_size);
1844		amdgpu_gart_size = -1;
1845	}
1846
1847	if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1848		/* gtt size must be greater or equal to 32M */
1849		dev_warn(adev->dev, "gtt size (%d) too small\n",
1850				 amdgpu_gtt_size);
1851		amdgpu_gtt_size = -1;
 
 
1852	}
1853
1854	/* valid range is between 4 and 9 inclusive */
1855	if (amdgpu_vm_fragment_size != -1 &&
1856	    (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1857		dev_warn(adev->dev, "valid range is between 4 and 9\n");
1858		amdgpu_vm_fragment_size = -1;
1859	}
1860
1861	if (amdgpu_sched_hw_submission < 2) {
1862		dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1863			 amdgpu_sched_hw_submission);
1864		amdgpu_sched_hw_submission = 2;
1865	} else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1866		dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1867			 amdgpu_sched_hw_submission);
1868		amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1869	}
1870
1871	if (amdgpu_reset_method < -1 || amdgpu_reset_method > 4) {
1872		dev_warn(adev->dev, "invalid option for reset method, reverting to default\n");
1873		amdgpu_reset_method = -1;
 
 
 
 
1874	}
1875
1876	amdgpu_device_check_smu_prv_buffer_size(adev);
 
 
 
1877
1878	amdgpu_device_check_vm_size(adev);
 
1879
1880	amdgpu_device_check_block_size(adev);
 
 
 
 
 
1881
1882	adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
 
 
 
 
1883
1884	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
1885}
1886
1887/**
1888 * amdgpu_switcheroo_set_state - set switcheroo state
1889 *
1890 * @pdev: pci dev pointer
1891 * @state: vga_switcheroo state
1892 *
1893 * Callback for the switcheroo driver.  Suspends or resumes
1894 * the asics before or after it is powered up using ACPI methods.
1895 */
1896static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1897					enum vga_switcheroo_state state)
1898{
1899	struct drm_device *dev = pci_get_drvdata(pdev);
1900	int r;
1901
1902	if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
1903		return;
1904
1905	if (state == VGA_SWITCHEROO_ON) {
1906		pr_info("switched on\n");
 
 
1907		/* don't suspend or resume card normally */
1908		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1909
1910		pci_set_power_state(pdev, PCI_D0);
1911		amdgpu_device_load_pci_state(pdev);
1912		r = pci_enable_device(pdev);
1913		if (r)
1914			DRM_WARN("pci_enable_device failed (%d)\n", r);
1915		amdgpu_device_resume(dev, true);
1916
1917		dev->switch_power_state = DRM_SWITCH_POWER_ON;
 
1918	} else {
1919		pr_info("switched off\n");
 
1920		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1921		amdgpu_device_prepare(dev);
1922		amdgpu_device_suspend(dev, true);
1923		amdgpu_device_cache_pci_state(pdev);
1924		/* Shut down the device */
1925		pci_disable_device(pdev);
1926		pci_set_power_state(pdev, PCI_D3cold);
1927		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1928	}
1929}
1930
1931/**
1932 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1933 *
1934 * @pdev: pci dev pointer
1935 *
1936 * Callback for the switcheroo driver.  Check of the switcheroo
1937 * state can be changed.
1938 * Returns true if the state can be changed, false if not.
1939 */
1940static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1941{
1942	struct drm_device *dev = pci_get_drvdata(pdev);
1943
1944       /*
1945	* FIXME: open_count is protected by drm_global_mutex but that would lead to
1946	* locking inversion with the driver load path. And the access here is
1947	* completely racy anyway. So don't bother with locking for now.
1948	*/
1949	return atomic_read(&dev->open_count) == 0;
1950}
1951
1952static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1953	.set_gpu_state = amdgpu_switcheroo_set_state,
1954	.reprobe = NULL,
1955	.can_switch = amdgpu_switcheroo_can_switch,
1956};
1957
1958/**
1959 * amdgpu_device_ip_set_clockgating_state - set the CG state
1960 *
1961 * @dev: amdgpu_device pointer
1962 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1963 * @state: clockgating state (gate or ungate)
1964 *
1965 * Sets the requested clockgating state for all instances of
1966 * the hardware IP specified.
1967 * Returns the error code from the last instance.
1968 */
1969int amdgpu_device_ip_set_clockgating_state(void *dev,
1970					   enum amd_ip_block_type block_type,
1971					   enum amd_clockgating_state state)
1972{
1973	struct amdgpu_device *adev = dev;
1974	int i, r = 0;
1975
1976	for (i = 0; i < adev->num_ip_blocks; i++) {
1977		if (!adev->ip_blocks[i].status.valid)
1978			continue;
1979		if (adev->ip_blocks[i].version->type != block_type)
1980			continue;
1981		if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1982			continue;
1983		r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1984			(void *)adev, state);
1985		if (r)
1986			DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1987				  adev->ip_blocks[i].version->funcs->name, r);
1988	}
1989	return r;
1990}
1991
1992/**
1993 * amdgpu_device_ip_set_powergating_state - set the PG state
1994 *
1995 * @dev: amdgpu_device pointer
1996 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1997 * @state: powergating state (gate or ungate)
1998 *
1999 * Sets the requested powergating state for all instances of
2000 * the hardware IP specified.
2001 * Returns the error code from the last instance.
2002 */
2003int amdgpu_device_ip_set_powergating_state(void *dev,
2004					   enum amd_ip_block_type block_type,
2005					   enum amd_powergating_state state)
2006{
2007	struct amdgpu_device *adev = dev;
2008	int i, r = 0;
2009
2010	for (i = 0; i < adev->num_ip_blocks; i++) {
2011		if (!adev->ip_blocks[i].status.valid)
2012			continue;
2013		if (adev->ip_blocks[i].version->type != block_type)
2014			continue;
2015		if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
2016			continue;
2017		r = adev->ip_blocks[i].version->funcs->set_powergating_state(
2018			(void *)adev, state);
2019		if (r)
2020			DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
2021				  adev->ip_blocks[i].version->funcs->name, r);
2022	}
2023	return r;
2024}
2025
2026/**
2027 * amdgpu_device_ip_get_clockgating_state - get the CG state
2028 *
2029 * @adev: amdgpu_device pointer
2030 * @flags: clockgating feature flags
2031 *
2032 * Walks the list of IPs on the device and updates the clockgating
2033 * flags for each IP.
2034 * Updates @flags with the feature flags for each hardware IP where
2035 * clockgating is enabled.
2036 */
2037void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
2038					    u64 *flags)
2039{
2040	int i;
2041
2042	for (i = 0; i < adev->num_ip_blocks; i++) {
2043		if (!adev->ip_blocks[i].status.valid)
2044			continue;
2045		if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
2046			adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
2047	}
2048}
2049
2050/**
2051 * amdgpu_device_ip_wait_for_idle - wait for idle
2052 *
2053 * @adev: amdgpu_device pointer
2054 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
2055 *
2056 * Waits for the request hardware IP to be idle.
2057 * Returns 0 for success or a negative error code on failure.
2058 */
2059int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
2060				   enum amd_ip_block_type block_type)
2061{
2062	int i, r;
2063
2064	for (i = 0; i < adev->num_ip_blocks; i++) {
2065		if (!adev->ip_blocks[i].status.valid)
2066			continue;
2067		if (adev->ip_blocks[i].version->type == block_type) {
2068			r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
2069			if (r)
2070				return r;
2071			break;
2072		}
2073	}
2074	return 0;
2075
2076}
2077
2078/**
2079 * amdgpu_device_ip_is_idle - is the hardware IP idle
2080 *
2081 * @adev: amdgpu_device pointer
2082 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
2083 *
2084 * Check if the hardware IP is idle or not.
2085 * Returns true if it the IP is idle, false if not.
2086 */
2087bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
2088			      enum amd_ip_block_type block_type)
2089{
2090	int i;
2091
2092	for (i = 0; i < adev->num_ip_blocks; i++) {
2093		if (!adev->ip_blocks[i].status.valid)
2094			continue;
2095		if (adev->ip_blocks[i].version->type == block_type)
2096			return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
2097	}
2098	return true;
2099
2100}
2101
2102/**
2103 * amdgpu_device_ip_get_ip_block - get a hw IP pointer
2104 *
2105 * @adev: amdgpu_device pointer
2106 * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
2107 *
2108 * Returns a pointer to the hardware IP block structure
2109 * if it exists for the asic, otherwise NULL.
2110 */
2111struct amdgpu_ip_block *
2112amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
2113			      enum amd_ip_block_type type)
2114{
2115	int i;
2116
2117	for (i = 0; i < adev->num_ip_blocks; i++)
2118		if (adev->ip_blocks[i].version->type == type)
2119			return &adev->ip_blocks[i];
2120
2121	return NULL;
2122}
2123
2124/**
2125 * amdgpu_device_ip_block_version_cmp
2126 *
2127 * @adev: amdgpu_device pointer
2128 * @type: enum amd_ip_block_type
2129 * @major: major version
2130 * @minor: minor version
2131 *
2132 * return 0 if equal or greater
2133 * return 1 if smaller or the ip_block doesn't exist
2134 */
2135int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
2136				       enum amd_ip_block_type type,
2137				       u32 major, u32 minor)
2138{
2139	struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
2140
2141	if (ip_block && ((ip_block->version->major > major) ||
2142			((ip_block->version->major == major) &&
2143			(ip_block->version->minor >= minor))))
2144		return 0;
2145
2146	return 1;
2147}
2148
2149/**
2150 * amdgpu_device_ip_block_add
2151 *
2152 * @adev: amdgpu_device pointer
2153 * @ip_block_version: pointer to the IP to add
2154 *
2155 * Adds the IP block driver information to the collection of IPs
2156 * on the asic.
2157 */
2158int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
2159			       const struct amdgpu_ip_block_version *ip_block_version)
2160{
2161	if (!ip_block_version)
2162		return -EINVAL;
2163
2164	switch (ip_block_version->type) {
2165	case AMD_IP_BLOCK_TYPE_VCN:
2166		if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
2167			return 0;
2168		break;
2169	case AMD_IP_BLOCK_TYPE_JPEG:
2170		if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
2171			return 0;
2172		break;
2173	default:
2174		break;
2175	}
2176
2177	DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
2178		  ip_block_version->funcs->name);
2179
2180	adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
2181
2182	return 0;
2183}
2184
2185/**
2186 * amdgpu_device_enable_virtual_display - enable virtual display feature
2187 *
2188 * @adev: amdgpu_device pointer
2189 *
2190 * Enabled the virtual display feature if the user has enabled it via
2191 * the module parameter virtual_display.  This feature provides a virtual
2192 * display hardware on headless boards or in virtualized environments.
2193 * This function parses and validates the configuration string specified by
2194 * the user and configues the virtual display configuration (number of
2195 * virtual connectors, crtcs, etc.) specified.
2196 */
2197static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
2198{
2199	adev->enable_virtual_display = false;
2200
2201	if (amdgpu_virtual_display) {
2202		const char *pci_address_name = pci_name(adev->pdev);
 
2203		char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
2204
2205		pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
2206		pciaddstr_tmp = pciaddstr;
2207		while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
2208			pciaddname = strsep(&pciaddname_tmp, ",");
2209			if (!strcmp("all", pciaddname)
2210			    || !strcmp(pci_address_name, pciaddname)) {
2211				long num_crtc;
2212				int res = -1;
2213
2214				adev->enable_virtual_display = true;
2215
2216				if (pciaddname_tmp)
2217					res = kstrtol(pciaddname_tmp, 10,
2218						      &num_crtc);
2219
2220				if (!res) {
2221					if (num_crtc < 1)
2222						num_crtc = 1;
2223					if (num_crtc > 6)
2224						num_crtc = 6;
2225					adev->mode_info.num_crtc = num_crtc;
2226				} else {
2227					adev->mode_info.num_crtc = 1;
2228				}
2229				break;
2230			}
2231		}
2232
2233		DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
2234			 amdgpu_virtual_display, pci_address_name,
2235			 adev->enable_virtual_display, adev->mode_info.num_crtc);
2236
2237		kfree(pciaddstr);
2238	}
2239}
2240
2241void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev)
2242{
2243	if (amdgpu_sriov_vf(adev) && !adev->enable_virtual_display) {
2244		adev->mode_info.num_crtc = 1;
2245		adev->enable_virtual_display = true;
2246		DRM_INFO("virtual_display:%d, num_crtc:%d\n",
2247			 adev->enable_virtual_display, adev->mode_info.num_crtc);
2248	}
2249}
2250
2251/**
2252 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
2253 *
2254 * @adev: amdgpu_device pointer
2255 *
2256 * Parses the asic configuration parameters specified in the gpu info
2257 * firmware and makes them availale to the driver for use in configuring
2258 * the asic.
2259 * Returns 0 on success, -EINVAL on failure.
2260 */
2261static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
2262{
2263	const char *chip_name;
2264	char fw_name[40];
2265	int err;
2266	const struct gpu_info_firmware_header_v1_0 *hdr;
2267
2268	adev->firmware.gpu_info_fw = NULL;
2269
2270	if (adev->mman.discovery_bin)
2271		return 0;
2272
2273	switch (adev->asic_type) {
2274	default:
2275		return 0;
2276	case CHIP_VEGA10:
2277		chip_name = "vega10";
2278		break;
2279	case CHIP_VEGA12:
2280		chip_name = "vega12";
2281		break;
2282	case CHIP_RAVEN:
2283		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
2284			chip_name = "raven2";
2285		else if (adev->apu_flags & AMD_APU_IS_PICASSO)
2286			chip_name = "picasso";
2287		else
2288			chip_name = "raven";
2289		break;
2290	case CHIP_ARCTURUS:
2291		chip_name = "arcturus";
2292		break;
2293	case CHIP_NAVI12:
2294		chip_name = "navi12";
2295		break;
2296	}
2297
2298	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
2299	err = amdgpu_ucode_request(adev, &adev->firmware.gpu_info_fw, fw_name);
2300	if (err) {
2301		dev_err(adev->dev,
2302			"Failed to get gpu_info firmware \"%s\"\n",
2303			fw_name);
2304		goto out;
2305	}
2306
2307	hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
2308	amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
2309
2310	switch (hdr->version_major) {
2311	case 1:
2312	{
2313		const struct gpu_info_firmware_v1_0 *gpu_info_fw =
2314			(const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
2315								le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2316
2317		/*
2318		 * Should be droped when DAL no longer needs it.
2319		 */
2320		if (adev->asic_type == CHIP_NAVI12)
2321			goto parse_soc_bounding_box;
2322
2323		adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
2324		adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
2325		adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
2326		adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
2327		adev->gfx.config.max_texture_channel_caches =
2328			le32_to_cpu(gpu_info_fw->gc_num_tccs);
2329		adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
2330		adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
2331		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
2332		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
2333		adev->gfx.config.double_offchip_lds_buf =
2334			le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
2335		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
2336		adev->gfx.cu_info.max_waves_per_simd =
2337			le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
2338		adev->gfx.cu_info.max_scratch_slots_per_cu =
2339			le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
2340		adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
2341		if (hdr->version_minor >= 1) {
2342			const struct gpu_info_firmware_v1_1 *gpu_info_fw =
2343				(const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
2344									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2345			adev->gfx.config.num_sc_per_sh =
2346				le32_to_cpu(gpu_info_fw->num_sc_per_sh);
2347			adev->gfx.config.num_packer_per_sc =
2348				le32_to_cpu(gpu_info_fw->num_packer_per_sc);
2349		}
2350
2351parse_soc_bounding_box:
2352		/*
2353		 * soc bounding box info is not integrated in disocovery table,
2354		 * we always need to parse it from gpu info firmware if needed.
2355		 */
2356		if (hdr->version_minor == 2) {
2357			const struct gpu_info_firmware_v1_2 *gpu_info_fw =
2358				(const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
2359									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2360			adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
2361		}
2362		break;
2363	}
2364	default:
2365		dev_err(adev->dev,
2366			"Unsupported gpu_info table %d\n", hdr->header.ucode_version);
2367		err = -EINVAL;
2368		goto out;
2369	}
2370out:
2371	return err;
2372}
2373
2374/**
2375 * amdgpu_device_ip_early_init - run early init for hardware IPs
2376 *
2377 * @adev: amdgpu_device pointer
2378 *
2379 * Early initialization pass for hardware IPs.  The hardware IPs that make
2380 * up each asic are discovered each IP's early_init callback is run.  This
2381 * is the first stage in initializing the asic.
2382 * Returns 0 on success, negative error code on failure.
2383 */
2384static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
2385{
2386	struct pci_dev *parent;
2387	int i, r;
2388	bool total;
2389
2390	amdgpu_device_enable_virtual_display(adev);
2391
2392	if (amdgpu_sriov_vf(adev)) {
2393		r = amdgpu_virt_request_full_gpu(adev, true);
2394		if (r)
2395			return r;
2396	}
2397
2398	switch (adev->asic_type) {
2399#ifdef CONFIG_DRM_AMDGPU_SI
2400	case CHIP_VERDE:
2401	case CHIP_TAHITI:
2402	case CHIP_PITCAIRN:
2403	case CHIP_OLAND:
2404	case CHIP_HAINAN:
2405		adev->family = AMDGPU_FAMILY_SI;
2406		r = si_set_ip_blocks(adev);
2407		if (r)
2408			return r;
2409		break;
2410#endif
2411#ifdef CONFIG_DRM_AMDGPU_CIK
2412	case CHIP_BONAIRE:
2413	case CHIP_HAWAII:
2414	case CHIP_KAVERI:
2415	case CHIP_KABINI:
2416	case CHIP_MULLINS:
2417		if (adev->flags & AMD_IS_APU)
2418			adev->family = AMDGPU_FAMILY_KV;
2419		else
2420			adev->family = AMDGPU_FAMILY_CI;
 
 
2421
2422		r = cik_set_ip_blocks(adev);
2423		if (r)
2424			return r;
2425		break;
2426#endif
2427	case CHIP_TOPAZ:
2428	case CHIP_TONGA:
2429	case CHIP_FIJI:
2430	case CHIP_POLARIS10:
2431	case CHIP_POLARIS11:
2432	case CHIP_POLARIS12:
2433	case CHIP_VEGAM:
2434	case CHIP_CARRIZO:
2435	case CHIP_STONEY:
2436		if (adev->flags & AMD_IS_APU)
2437			adev->family = AMDGPU_FAMILY_CZ;
2438		else
2439			adev->family = AMDGPU_FAMILY_VI;
2440
2441		r = vi_set_ip_blocks(adev);
2442		if (r)
2443			return r;
2444		break;
2445	default:
2446		r = amdgpu_discovery_set_ip_blocks(adev);
2447		if (r)
2448			return r;
2449		break;
2450	}
2451
2452	if (amdgpu_has_atpx() &&
2453	    (amdgpu_is_atpx_hybrid() ||
2454	     amdgpu_has_atpx_dgpu_power_cntl()) &&
2455	    ((adev->flags & AMD_IS_APU) == 0) &&
2456	    !dev_is_removable(&adev->pdev->dev))
2457		adev->flags |= AMD_IS_PX;
2458
2459	if (!(adev->flags & AMD_IS_APU)) {
2460		parent = pcie_find_root_port(adev->pdev);
2461		adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
2462	}
2463
2464
2465	adev->pm.pp_feature = amdgpu_pp_feature_mask;
2466	if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2467		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2468	if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2469		adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2470	if (!amdgpu_device_pcie_dynamic_switching_supported(adev))
2471		adev->pm.pp_feature &= ~PP_PCIE_DPM_MASK;
2472
2473	total = true;
2474	for (i = 0; i < adev->num_ip_blocks; i++) {
2475		if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2476			DRM_WARN("disabled ip block: %d <%s>\n",
2477				  i, adev->ip_blocks[i].version->funcs->name);
2478			adev->ip_blocks[i].status.valid = false;
2479		} else {
2480			if (adev->ip_blocks[i].version->funcs->early_init) {
2481				r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2482				if (r == -ENOENT) {
2483					adev->ip_blocks[i].status.valid = false;
2484				} else if (r) {
2485					DRM_ERROR("early_init of IP block <%s> failed %d\n",
2486						  adev->ip_blocks[i].version->funcs->name, r);
2487					total = false;
2488				} else {
2489					adev->ip_blocks[i].status.valid = true;
2490				}
2491			} else {
2492				adev->ip_blocks[i].status.valid = true;
2493			}
2494		}
2495		/* get the vbios after the asic_funcs are set up */
2496		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2497			r = amdgpu_device_parse_gpu_info_fw(adev);
2498			if (r)
2499				return r;
2500
2501			/* Read BIOS */
2502			if (amdgpu_device_read_bios(adev)) {
2503				if (!amdgpu_get_bios(adev))
2504					return -EINVAL;
2505
2506				r = amdgpu_atombios_init(adev);
2507				if (r) {
2508					dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2509					amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2510					return r;
2511				}
2512			}
2513
2514			/*get pf2vf msg info at it's earliest time*/
2515			if (amdgpu_sriov_vf(adev))
2516				amdgpu_virt_init_data_exchange(adev);
2517
2518		}
2519	}
2520	if (!total)
2521		return -ENODEV;
2522
2523	amdgpu_amdkfd_device_probe(adev);
2524	adev->cg_flags &= amdgpu_cg_mask;
2525	adev->pg_flags &= amdgpu_pg_mask;
2526
2527	return 0;
2528}
2529
2530static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2531{
2532	int i, r;
2533
2534	for (i = 0; i < adev->num_ip_blocks; i++) {
2535		if (!adev->ip_blocks[i].status.sw)
2536			continue;
2537		if (adev->ip_blocks[i].status.hw)
2538			continue;
2539		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2540		    (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2541		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2542			r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2543			if (r) {
2544				DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2545					  adev->ip_blocks[i].version->funcs->name, r);
2546				return r;
2547			}
2548			adev->ip_blocks[i].status.hw = true;
2549		}
2550	}
2551
2552	return 0;
2553}
2554
2555static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2556{
2557	int i, r;
2558
2559	for (i = 0; i < adev->num_ip_blocks; i++) {
2560		if (!adev->ip_blocks[i].status.sw)
2561			continue;
2562		if (adev->ip_blocks[i].status.hw)
2563			continue;
2564		r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2565		if (r) {
2566			DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2567				  adev->ip_blocks[i].version->funcs->name, r);
2568			return r;
2569		}
2570		adev->ip_blocks[i].status.hw = true;
2571	}
2572
2573	return 0;
2574}
2575
2576static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2577{
2578	int r = 0;
2579	int i;
2580	uint32_t smu_version;
2581
2582	if (adev->asic_type >= CHIP_VEGA10) {
2583		for (i = 0; i < adev->num_ip_blocks; i++) {
2584			if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2585				continue;
2586
2587			if (!adev->ip_blocks[i].status.sw)
2588				continue;
2589
2590			/* no need to do the fw loading again if already done*/
2591			if (adev->ip_blocks[i].status.hw == true)
2592				break;
2593
2594			if (amdgpu_in_reset(adev) || adev->in_suspend) {
2595				r = adev->ip_blocks[i].version->funcs->resume(adev);
2596				if (r) {
2597					DRM_ERROR("resume of IP block <%s> failed %d\n",
2598							  adev->ip_blocks[i].version->funcs->name, r);
2599					return r;
2600				}
2601			} else {
2602				r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2603				if (r) {
2604					DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2605							  adev->ip_blocks[i].version->funcs->name, r);
2606					return r;
2607				}
2608			}
2609
2610			adev->ip_blocks[i].status.hw = true;
2611			break;
2612		}
2613	}
2614
2615	if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2616		r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2617
2618	return r;
2619}
2620
2621static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
2622{
2623	long timeout;
2624	int r, i;
2625
2626	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2627		struct amdgpu_ring *ring = adev->rings[i];
2628
2629		/* No need to setup the GPU scheduler for rings that don't need it */
2630		if (!ring || ring->no_scheduler)
2631			continue;
2632
2633		switch (ring->funcs->type) {
2634		case AMDGPU_RING_TYPE_GFX:
2635			timeout = adev->gfx_timeout;
2636			break;
2637		case AMDGPU_RING_TYPE_COMPUTE:
2638			timeout = adev->compute_timeout;
2639			break;
2640		case AMDGPU_RING_TYPE_SDMA:
2641			timeout = adev->sdma_timeout;
2642			break;
2643		default:
2644			timeout = adev->video_timeout;
2645			break;
2646		}
2647
2648		r = drm_sched_init(&ring->sched, &amdgpu_sched_ops, NULL,
2649				   DRM_SCHED_PRIORITY_COUNT,
2650				   ring->num_hw_submission, 0,
2651				   timeout, adev->reset_domain->wq,
2652				   ring->sched_score, ring->name,
2653				   adev->dev);
2654		if (r) {
2655			DRM_ERROR("Failed to create scheduler on ring %s.\n",
2656				  ring->name);
2657			return r;
2658		}
2659		r = amdgpu_uvd_entity_init(adev, ring);
2660		if (r) {
2661			DRM_ERROR("Failed to create UVD scheduling entity on ring %s.\n",
2662				  ring->name);
2663			return r;
2664		}
2665		r = amdgpu_vce_entity_init(adev, ring);
2666		if (r) {
2667			DRM_ERROR("Failed to create VCE scheduling entity on ring %s.\n",
2668				  ring->name);
2669			return r;
2670		}
2671	}
2672
2673	amdgpu_xcp_update_partition_sched_list(adev);
2674
2675	return 0;
2676}
2677
2678
2679/**
2680 * amdgpu_device_ip_init - run init for hardware IPs
2681 *
2682 * @adev: amdgpu_device pointer
2683 *
2684 * Main initialization pass for hardware IPs.  The list of all the hardware
2685 * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2686 * are run.  sw_init initializes the software state associated with each IP
2687 * and hw_init initializes the hardware associated with each IP.
2688 * Returns 0 on success, negative error code on failure.
2689 */
2690static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2691{
2692	int i, r;
2693
2694	r = amdgpu_ras_init(adev);
2695	if (r)
2696		return r;
2697
2698	for (i = 0; i < adev->num_ip_blocks; i++) {
2699		if (!adev->ip_blocks[i].status.valid)
2700			continue;
2701		r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2702		if (r) {
2703			DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2704				  adev->ip_blocks[i].version->funcs->name, r);
2705			goto init_failed;
2706		}
2707		adev->ip_blocks[i].status.sw = true;
2708
2709		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2710			/* need to do common hw init early so everything is set up for gmc */
2711			r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2712			if (r) {
2713				DRM_ERROR("hw_init %d failed %d\n", i, r);
2714				goto init_failed;
2715			}
2716			adev->ip_blocks[i].status.hw = true;
2717		} else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2718			/* need to do gmc hw init early so we can allocate gpu mem */
2719			/* Try to reserve bad pages early */
2720			if (amdgpu_sriov_vf(adev))
2721				amdgpu_virt_exchange_data(adev);
2722
2723			r = amdgpu_device_mem_scratch_init(adev);
2724			if (r) {
2725				DRM_ERROR("amdgpu_mem_scratch_init failed %d\n", r);
2726				goto init_failed;
2727			}
2728			r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2729			if (r) {
2730				DRM_ERROR("hw_init %d failed %d\n", i, r);
2731				goto init_failed;
2732			}
2733			r = amdgpu_device_wb_init(adev);
2734			if (r) {
2735				DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2736				goto init_failed;
2737			}
2738			adev->ip_blocks[i].status.hw = true;
2739
2740			/* right after GMC hw init, we create CSA */
2741			if (adev->gfx.mcbp) {
2742				r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2743							       AMDGPU_GEM_DOMAIN_VRAM |
2744							       AMDGPU_GEM_DOMAIN_GTT,
2745							       AMDGPU_CSA_SIZE);
2746				if (r) {
2747					DRM_ERROR("allocate CSA failed %d\n", r);
2748					goto init_failed;
2749				}
2750			}
2751
2752			r = amdgpu_seq64_init(adev);
2753			if (r) {
2754				DRM_ERROR("allocate seq64 failed %d\n", r);
2755				goto init_failed;
2756			}
2757		}
2758	}
2759
2760	if (amdgpu_sriov_vf(adev))
2761		amdgpu_virt_init_data_exchange(adev);
2762
2763	r = amdgpu_ib_pool_init(adev);
2764	if (r) {
2765		dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2766		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2767		goto init_failed;
2768	}
2769
2770	r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2771	if (r)
2772		goto init_failed;
2773
2774	r = amdgpu_device_ip_hw_init_phase1(adev);
2775	if (r)
2776		goto init_failed;
2777
2778	r = amdgpu_device_fw_loading(adev);
2779	if (r)
2780		goto init_failed;
2781
2782	r = amdgpu_device_ip_hw_init_phase2(adev);
2783	if (r)
2784		goto init_failed;
2785
2786	/*
2787	 * retired pages will be loaded from eeprom and reserved here,
2788	 * it should be called after amdgpu_device_ip_hw_init_phase2  since
2789	 * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2790	 * for I2C communication which only true at this point.
2791	 *
2792	 * amdgpu_ras_recovery_init may fail, but the upper only cares the
2793	 * failure from bad gpu situation and stop amdgpu init process
2794	 * accordingly. For other failed cases, it will still release all
2795	 * the resource and print error message, rather than returning one
2796	 * negative value to upper level.
2797	 *
2798	 * Note: theoretically, this should be called before all vram allocations
2799	 * to protect retired page from abusing
2800	 */
2801	r = amdgpu_ras_recovery_init(adev);
2802	if (r)
2803		goto init_failed;
2804
2805	/**
2806	 * In case of XGMI grab extra reference for reset domain for this device
2807	 */
2808	if (adev->gmc.xgmi.num_physical_nodes > 1) {
2809		if (amdgpu_xgmi_add_device(adev) == 0) {
2810			if (!amdgpu_sriov_vf(adev)) {
2811				struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2812
2813				if (WARN_ON(!hive)) {
2814					r = -ENOENT;
2815					goto init_failed;
2816				}
2817
2818				if (!hive->reset_domain ||
2819				    !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
2820					r = -ENOENT;
2821					amdgpu_put_xgmi_hive(hive);
2822					goto init_failed;
2823				}
2824
2825				/* Drop the early temporary reset domain we created for device */
2826				amdgpu_reset_put_reset_domain(adev->reset_domain);
2827				adev->reset_domain = hive->reset_domain;
2828				amdgpu_put_xgmi_hive(hive);
2829			}
2830		}
2831	}
2832
2833	r = amdgpu_device_init_schedulers(adev);
2834	if (r)
2835		goto init_failed;
2836
2837	if (adev->mman.buffer_funcs_ring->sched.ready)
2838		amdgpu_ttm_set_buffer_funcs_status(adev, true);
2839
2840	/* Don't init kfd if whole hive need to be reset during init */
2841	if (!adev->gmc.xgmi.pending_reset) {
2842		kgd2kfd_init_zone_device(adev);
2843		amdgpu_amdkfd_device_init(adev);
2844	}
2845
2846	amdgpu_fru_get_product_info(adev);
2847
2848init_failed:
2849
2850	return r;
2851}
2852
2853/**
2854 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2855 *
2856 * @adev: amdgpu_device pointer
2857 *
2858 * Writes a reset magic value to the gart pointer in VRAM.  The driver calls
2859 * this function before a GPU reset.  If the value is retained after a
2860 * GPU reset, VRAM has not been lost.  Some GPU resets may destry VRAM contents.
2861 */
2862static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2863{
2864	memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2865}
2866
2867/**
2868 * amdgpu_device_check_vram_lost - check if vram is valid
2869 *
2870 * @adev: amdgpu_device pointer
2871 *
2872 * Checks the reset magic value written to the gart pointer in VRAM.
2873 * The driver calls this after a GPU reset to see if the contents of
2874 * VRAM is lost or now.
2875 * returns true if vram is lost, false if not.
2876 */
2877static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2878{
2879	if (memcmp(adev->gart.ptr, adev->reset_magic,
2880			AMDGPU_RESET_MAGIC_NUM))
2881		return true;
2882
2883	if (!amdgpu_in_reset(adev))
2884		return false;
2885
2886	/*
2887	 * For all ASICs with baco/mode1 reset, the VRAM is
2888	 * always assumed to be lost.
2889	 */
2890	switch (amdgpu_asic_reset_method(adev)) {
2891	case AMD_RESET_METHOD_BACO:
2892	case AMD_RESET_METHOD_MODE1:
2893		return true;
2894	default:
2895		return false;
2896	}
2897}
2898
2899/**
2900 * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2901 *
2902 * @adev: amdgpu_device pointer
2903 * @state: clockgating state (gate or ungate)
2904 *
2905 * The list of all the hardware IPs that make up the asic is walked and the
2906 * set_clockgating_state callbacks are run.
2907 * Late initialization pass enabling clockgating for hardware IPs.
2908 * Fini or suspend, pass disabling clockgating for hardware IPs.
2909 * Returns 0 on success, negative error code on failure.
2910 */
2911
2912int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2913			       enum amd_clockgating_state state)
2914{
2915	int i, j, r;
2916
2917	if (amdgpu_emu_mode == 1)
2918		return 0;
2919
2920	for (j = 0; j < adev->num_ip_blocks; j++) {
2921		i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2922		if (!adev->ip_blocks[i].status.late_initialized)
2923			continue;
2924		/* skip CG for GFX, SDMA on S0ix */
2925		if (adev->in_s0ix &&
2926		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
2927		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
2928			continue;
2929		/* skip CG for VCE/UVD, it's handled specially */
2930		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2931		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2932		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2933		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2934		    adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2935			/* enable clockgating to save power */
2936			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2937										     state);
2938			if (r) {
2939				DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2940					  adev->ip_blocks[i].version->funcs->name, r);
2941				return r;
2942			}
2943		}
 
2944	}
2945
2946	return 0;
2947}
2948
2949int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2950			       enum amd_powergating_state state)
2951{
2952	int i, j, r;
2953
2954	if (amdgpu_emu_mode == 1)
2955		return 0;
2956
2957	for (j = 0; j < adev->num_ip_blocks; j++) {
2958		i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2959		if (!adev->ip_blocks[i].status.late_initialized)
2960			continue;
2961		/* skip PG for GFX, SDMA on S0ix */
2962		if (adev->in_s0ix &&
2963		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
2964		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
2965			continue;
2966		/* skip CG for VCE/UVD, it's handled specially */
2967		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2968		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2969		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2970		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2971		    adev->ip_blocks[i].version->funcs->set_powergating_state) {
2972			/* enable powergating to save power */
2973			r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2974											state);
2975			if (r) {
2976				DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2977					  adev->ip_blocks[i].version->funcs->name, r);
2978				return r;
2979			}
2980		}
2981	}
2982	return 0;
2983}
2984
2985static int amdgpu_device_enable_mgpu_fan_boost(void)
2986{
2987	struct amdgpu_gpu_instance *gpu_ins;
2988	struct amdgpu_device *adev;
2989	int i, ret = 0;
2990
2991	mutex_lock(&mgpu_info.mutex);
2992
2993	/*
2994	 * MGPU fan boost feature should be enabled
2995	 * only when there are two or more dGPUs in
2996	 * the system
2997	 */
2998	if (mgpu_info.num_dgpu < 2)
2999		goto out;
3000
3001	for (i = 0; i < mgpu_info.num_dgpu; i++) {
3002		gpu_ins = &(mgpu_info.gpu_ins[i]);
3003		adev = gpu_ins->adev;
3004		if (!(adev->flags & AMD_IS_APU) &&
3005		    !gpu_ins->mgpu_fan_enabled) {
3006			ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
3007			if (ret)
3008				break;
3009
3010			gpu_ins->mgpu_fan_enabled = 1;
3011		}
3012	}
3013
3014out:
3015	mutex_unlock(&mgpu_info.mutex);
3016
3017	return ret;
3018}
3019
3020/**
3021 * amdgpu_device_ip_late_init - run late init for hardware IPs
3022 *
3023 * @adev: amdgpu_device pointer
3024 *
3025 * Late initialization pass for hardware IPs.  The list of all the hardware
3026 * IPs that make up the asic is walked and the late_init callbacks are run.
3027 * late_init covers any special initialization that an IP requires
3028 * after all of the have been initialized or something that needs to happen
3029 * late in the init process.
3030 * Returns 0 on success, negative error code on failure.
3031 */
3032static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
3033{
3034	struct amdgpu_gpu_instance *gpu_instance;
3035	int i = 0, r;
3036
3037	for (i = 0; i < adev->num_ip_blocks; i++) {
3038		if (!adev->ip_blocks[i].status.hw)
3039			continue;
3040		if (adev->ip_blocks[i].version->funcs->late_init) {
3041			r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
3042			if (r) {
3043				DRM_ERROR("late_init of IP block <%s> failed %d\n",
3044					  adev->ip_blocks[i].version->funcs->name, r);
3045				return r;
3046			}
 
3047		}
3048		adev->ip_blocks[i].status.late_initialized = true;
3049	}
3050
3051	r = amdgpu_ras_late_init(adev);
3052	if (r) {
3053		DRM_ERROR("amdgpu_ras_late_init failed %d", r);
3054		return r;
3055	}
3056
3057	amdgpu_ras_set_error_query_ready(adev, true);
3058
3059	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
3060	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
3061
3062	amdgpu_device_fill_reset_magic(adev);
3063
3064	r = amdgpu_device_enable_mgpu_fan_boost();
3065	if (r)
3066		DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
3067
3068	/* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
3069	if (amdgpu_passthrough(adev) &&
3070	    ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1) ||
3071	     adev->asic_type == CHIP_ALDEBARAN))
3072		amdgpu_dpm_handle_passthrough_sbr(adev, true);
3073
3074	if (adev->gmc.xgmi.num_physical_nodes > 1) {
3075		mutex_lock(&mgpu_info.mutex);
3076
3077		/*
3078		 * Reset device p-state to low as this was booted with high.
3079		 *
3080		 * This should be performed only after all devices from the same
3081		 * hive get initialized.
3082		 *
3083		 * However, it's unknown how many device in the hive in advance.
3084		 * As this is counted one by one during devices initializations.
3085		 *
3086		 * So, we wait for all XGMI interlinked devices initialized.
3087		 * This may bring some delays as those devices may come from
3088		 * different hives. But that should be OK.
3089		 */
3090		if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
3091			for (i = 0; i < mgpu_info.num_gpu; i++) {
3092				gpu_instance = &(mgpu_info.gpu_ins[i]);
3093				if (gpu_instance->adev->flags & AMD_IS_APU)
3094					continue;
3095
3096				r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
3097						AMDGPU_XGMI_PSTATE_MIN);
3098				if (r) {
3099					DRM_ERROR("pstate setting failed (%d).\n", r);
3100					break;
3101				}
3102			}
3103		}
3104
3105		mutex_unlock(&mgpu_info.mutex);
3106	}
3107
3108	return 0;
3109}
3110
3111/**
3112 * amdgpu_device_smu_fini_early - smu hw_fini wrapper
3113 *
3114 * @adev: amdgpu_device pointer
3115 *
3116 * For ASICs need to disable SMC first
3117 */
3118static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
3119{
3120	int i, r;
3121
3122	if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(9, 0, 0))
3123		return;
3124
3125	for (i = 0; i < adev->num_ip_blocks; i++) {
3126		if (!adev->ip_blocks[i].status.hw)
3127			continue;
3128		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
 
 
 
 
 
 
 
 
3129			r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
3130			/* XXX handle errors */
3131			if (r) {
3132				DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
3133					  adev->ip_blocks[i].version->funcs->name, r);
3134			}
3135			adev->ip_blocks[i].status.hw = false;
3136			break;
3137		}
3138	}
3139}
3140
3141static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
3142{
3143	int i, r;
3144
3145	for (i = 0; i < adev->num_ip_blocks; i++) {
3146		if (!adev->ip_blocks[i].version->funcs->early_fini)
3147			continue;
3148
3149		r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
3150		if (r) {
3151			DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
3152				  adev->ip_blocks[i].version->funcs->name, r);
3153		}
3154	}
3155
3156	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
3157	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
3158
3159	amdgpu_amdkfd_suspend(adev, false);
3160
3161	/* Workaroud for ASICs need to disable SMC first */
3162	amdgpu_device_smu_fini_early(adev);
3163
3164	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3165		if (!adev->ip_blocks[i].status.hw)
3166			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3167
3168		r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
3169		/* XXX handle errors */
3170		if (r) {
3171			DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
3172				  adev->ip_blocks[i].version->funcs->name, r);
3173		}
3174
3175		adev->ip_blocks[i].status.hw = false;
3176	}
3177
3178	if (amdgpu_sriov_vf(adev)) {
3179		if (amdgpu_virt_release_full_gpu(adev, false))
3180			DRM_ERROR("failed to release exclusive mode on fini\n");
3181	}
3182
3183	return 0;
3184}
3185
3186/**
3187 * amdgpu_device_ip_fini - run fini for hardware IPs
3188 *
3189 * @adev: amdgpu_device pointer
3190 *
3191 * Main teardown pass for hardware IPs.  The list of all the hardware
3192 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
3193 * are run.  hw_fini tears down the hardware associated with each IP
3194 * and sw_fini tears down any software state associated with each IP.
3195 * Returns 0 on success, negative error code on failure.
3196 */
3197static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
3198{
3199	int i, r;
3200
3201	if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
3202		amdgpu_virt_release_ras_err_handler_data(adev);
3203
3204	if (adev->gmc.xgmi.num_physical_nodes > 1)
3205		amdgpu_xgmi_remove_device(adev);
3206
3207	amdgpu_amdkfd_device_fini_sw(adev);
3208
3209	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3210		if (!adev->ip_blocks[i].status.sw)
3211			continue;
3212
3213		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
3214			amdgpu_ucode_free_bo(adev);
3215			amdgpu_free_static_csa(&adev->virt.csa_obj);
3216			amdgpu_device_wb_fini(adev);
3217			amdgpu_device_mem_scratch_fini(adev);
3218			amdgpu_ib_pool_fini(adev);
3219			amdgpu_seq64_fini(adev);
3220		}
3221
3222		r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
3223		/* XXX handle errors */
3224		if (r) {
3225			DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
3226				  adev->ip_blocks[i].version->funcs->name, r);
3227		}
3228		adev->ip_blocks[i].status.sw = false;
3229		adev->ip_blocks[i].status.valid = false;
3230	}
3231
3232	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3233		if (!adev->ip_blocks[i].status.late_initialized)
3234			continue;
3235		if (adev->ip_blocks[i].version->funcs->late_fini)
3236			adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
3237		adev->ip_blocks[i].status.late_initialized = false;
3238	}
3239
3240	amdgpu_ras_fini(adev);
3241
3242	return 0;
3243}
3244
3245/**
3246 * amdgpu_device_delayed_init_work_handler - work handler for IB tests
3247 *
3248 * @work: work_struct.
3249 */
3250static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
3251{
3252	struct amdgpu_device *adev =
3253		container_of(work, struct amdgpu_device, delayed_init_work.work);
3254	int r;
3255
3256	r = amdgpu_ib_ring_tests(adev);
3257	if (r)
3258		DRM_ERROR("ib ring test failed (%d).\n", r);
3259}
3260
3261static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
3262{
3263	struct amdgpu_device *adev =
3264		container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
3265
3266	WARN_ON_ONCE(adev->gfx.gfx_off_state);
3267	WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
3268
3269	if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
3270		adev->gfx.gfx_off_state = true;
3271}
3272
3273/**
3274 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
3275 *
3276 * @adev: amdgpu_device pointer
3277 *
3278 * Main suspend function for hardware IPs.  The list of all the hardware
3279 * IPs that make up the asic is walked, clockgating is disabled and the
3280 * suspend callbacks are run.  suspend puts the hardware and software state
3281 * in each IP into a state suitable for suspend.
3282 * Returns 0 on success, negative error code on failure.
3283 */
3284static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
3285{
3286	int i, r;
3287
3288	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
3289	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
3290
3291	/*
3292	 * Per PMFW team's suggestion, driver needs to handle gfxoff
3293	 * and df cstate features disablement for gpu reset(e.g. Mode1Reset)
3294	 * scenario. Add the missing df cstate disablement here.
3295	 */
3296	if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
3297		dev_warn(adev->dev, "Failed to disallow df cstate");
3298
3299	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3300		if (!adev->ip_blocks[i].status.valid)
3301			continue;
3302
3303		/* displays are handled separately */
3304		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
3305			continue;
3306
3307		/* XXX handle errors */
3308		r = adev->ip_blocks[i].version->funcs->suspend(adev);
3309		/* XXX handle errors */
3310		if (r) {
3311			DRM_ERROR("suspend of IP block <%s> failed %d\n",
3312				  adev->ip_blocks[i].version->funcs->name, r);
3313			return r;
3314		}
3315
3316		adev->ip_blocks[i].status.hw = false;
3317	}
3318
3319	return 0;
3320}
3321
3322/**
3323 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
3324 *
3325 * @adev: amdgpu_device pointer
3326 *
3327 * Main suspend function for hardware IPs.  The list of all the hardware
3328 * IPs that make up the asic is walked, clockgating is disabled and the
3329 * suspend callbacks are run.  suspend puts the hardware and software state
3330 * in each IP into a state suitable for suspend.
3331 * Returns 0 on success, negative error code on failure.
3332 */
3333static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
3334{
3335	int i, r;
3336
3337	if (adev->in_s0ix)
3338		amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry);
3339
3340	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3341		if (!adev->ip_blocks[i].status.valid)
3342			continue;
3343		/* displays are handled in phase1 */
3344		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
3345			continue;
3346		/* PSP lost connection when err_event_athub occurs */
3347		if (amdgpu_ras_intr_triggered() &&
3348		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
3349			adev->ip_blocks[i].status.hw = false;
3350			continue;
3351		}
3352
3353		/* skip unnecessary suspend if we do not initialize them yet */
3354		if (adev->gmc.xgmi.pending_reset &&
3355		    !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3356		      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
3357		      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3358		      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
3359			adev->ip_blocks[i].status.hw = false;
3360			continue;
3361		}
3362
3363		/* skip suspend of gfx/mes and psp for S0ix
3364		 * gfx is in gfxoff state, so on resume it will exit gfxoff just
3365		 * like at runtime. PSP is also part of the always on hardware
3366		 * so no need to suspend it.
3367		 */
3368		if (adev->in_s0ix &&
3369		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
3370		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
3371		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES))
3372			continue;
3373
3374		/* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */
3375		if (adev->in_s0ix &&
3376		    (amdgpu_ip_version(adev, SDMA0_HWIP, 0) >=
3377		     IP_VERSION(5, 0, 0)) &&
3378		    (adev->ip_blocks[i].version->type ==
3379		     AMD_IP_BLOCK_TYPE_SDMA))
3380			continue;
3381
3382		/* Once swPSP provides the IMU, RLC FW binaries to TOS during cold-boot.
3383		 * These are in TMR, hence are expected to be reused by PSP-TOS to reload
3384		 * from this location and RLC Autoload automatically also gets loaded
3385		 * from here based on PMFW -> PSP message during re-init sequence.
3386		 * Therefore, the psp suspend & resume should be skipped to avoid destroy
3387		 * the TMR and reload FWs again for IMU enabled APU ASICs.
3388		 */
3389		if (amdgpu_in_reset(adev) &&
3390		    (adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs &&
3391		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3392			continue;
3393
3394		/* XXX handle errors */
3395		r = adev->ip_blocks[i].version->funcs->suspend(adev);
3396		/* XXX handle errors */
3397		if (r) {
3398			DRM_ERROR("suspend of IP block <%s> failed %d\n",
3399				  adev->ip_blocks[i].version->funcs->name, r);
3400		}
3401		adev->ip_blocks[i].status.hw = false;
3402		/* handle putting the SMC in the appropriate state */
3403		if (!amdgpu_sriov_vf(adev)) {
3404			if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3405				r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
3406				if (r) {
3407					DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
3408							adev->mp1_state, r);
3409					return r;
3410				}
3411			}
3412		}
3413	}
3414
3415	return 0;
3416}
3417
3418/**
3419 * amdgpu_device_ip_suspend - run suspend for hardware IPs
3420 *
3421 * @adev: amdgpu_device pointer
3422 *
3423 * Main suspend function for hardware IPs.  The list of all the hardware
3424 * IPs that make up the asic is walked, clockgating is disabled and the
3425 * suspend callbacks are run.  suspend puts the hardware and software state
3426 * in each IP into a state suitable for suspend.
3427 * Returns 0 on success, negative error code on failure.
3428 */
3429int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
3430{
3431	int r;
3432
3433	if (amdgpu_sriov_vf(adev)) {
3434		amdgpu_virt_fini_data_exchange(adev);
3435		amdgpu_virt_request_full_gpu(adev, false);
3436	}
3437
3438	amdgpu_ttm_set_buffer_funcs_status(adev, false);
3439
3440	r = amdgpu_device_ip_suspend_phase1(adev);
3441	if (r)
3442		return r;
3443	r = amdgpu_device_ip_suspend_phase2(adev);
3444
3445	if (amdgpu_sriov_vf(adev))
3446		amdgpu_virt_release_full_gpu(adev, false);
3447
3448	return r;
3449}
3450
3451static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
3452{
3453	int i, r;
3454
3455	static enum amd_ip_block_type ip_order[] = {
3456		AMD_IP_BLOCK_TYPE_COMMON,
3457		AMD_IP_BLOCK_TYPE_GMC,
3458		AMD_IP_BLOCK_TYPE_PSP,
3459		AMD_IP_BLOCK_TYPE_IH,
3460	};
3461
3462	for (i = 0; i < adev->num_ip_blocks; i++) {
3463		int j;
3464		struct amdgpu_ip_block *block;
3465
3466		block = &adev->ip_blocks[i];
3467		block->status.hw = false;
3468
3469		for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
3470
3471			if (block->version->type != ip_order[j] ||
3472				!block->status.valid)
3473				continue;
3474
3475			r = block->version->funcs->hw_init(adev);
3476			DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3477			if (r)
3478				return r;
3479			block->status.hw = true;
3480		}
3481	}
3482
3483	return 0;
3484}
3485
3486static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
3487{
3488	int i, r;
3489
3490	static enum amd_ip_block_type ip_order[] = {
3491		AMD_IP_BLOCK_TYPE_SMC,
3492		AMD_IP_BLOCK_TYPE_DCE,
3493		AMD_IP_BLOCK_TYPE_GFX,
3494		AMD_IP_BLOCK_TYPE_SDMA,
3495		AMD_IP_BLOCK_TYPE_MES,
3496		AMD_IP_BLOCK_TYPE_UVD,
3497		AMD_IP_BLOCK_TYPE_VCE,
3498		AMD_IP_BLOCK_TYPE_VCN,
3499		AMD_IP_BLOCK_TYPE_JPEG
3500	};
3501
3502	for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
3503		int j;
3504		struct amdgpu_ip_block *block;
3505
3506		for (j = 0; j < adev->num_ip_blocks; j++) {
3507			block = &adev->ip_blocks[j];
3508
3509			if (block->version->type != ip_order[i] ||
3510				!block->status.valid ||
3511				block->status.hw)
3512				continue;
3513
3514			if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
3515				r = block->version->funcs->resume(adev);
3516			else
3517				r = block->version->funcs->hw_init(adev);
3518
3519			DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3520			if (r)
3521				return r;
3522			block->status.hw = true;
3523		}
3524	}
3525
3526	return 0;
3527}
3528
3529/**
3530 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3531 *
3532 * @adev: amdgpu_device pointer
3533 *
3534 * First resume function for hardware IPs.  The list of all the hardware
3535 * IPs that make up the asic is walked and the resume callbacks are run for
3536 * COMMON, GMC, and IH.  resume puts the hardware into a functional state
3537 * after a suspend and updates the software state as necessary.  This
3538 * function is also used for restoring the GPU after a GPU reset.
3539 * Returns 0 on success, negative error code on failure.
3540 */
3541static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
3542{
3543	int i, r;
3544
3545	for (i = 0; i < adev->num_ip_blocks; i++) {
3546		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3547			continue;
3548		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3549		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3550		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3551		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) {
3552
3553			r = adev->ip_blocks[i].version->funcs->resume(adev);
3554			if (r) {
3555				DRM_ERROR("resume of IP block <%s> failed %d\n",
3556					  adev->ip_blocks[i].version->funcs->name, r);
3557				return r;
3558			}
3559			adev->ip_blocks[i].status.hw = true;
3560		}
3561	}
3562
3563	return 0;
3564}
3565
3566/**
3567 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3568 *
3569 * @adev: amdgpu_device pointer
3570 *
3571 * First resume function for hardware IPs.  The list of all the hardware
3572 * IPs that make up the asic is walked and the resume callbacks are run for
3573 * all blocks except COMMON, GMC, and IH.  resume puts the hardware into a
3574 * functional state after a suspend and updates the software state as
3575 * necessary.  This function is also used for restoring the GPU after a GPU
3576 * reset.
3577 * Returns 0 on success, negative error code on failure.
3578 */
3579static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3580{
3581	int i, r;
3582
3583	for (i = 0; i < adev->num_ip_blocks; i++) {
3584		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3585			continue;
3586		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3587		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3588		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3589		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3590			continue;
3591		r = adev->ip_blocks[i].version->funcs->resume(adev);
3592		if (r) {
3593			DRM_ERROR("resume of IP block <%s> failed %d\n",
3594				  adev->ip_blocks[i].version->funcs->name, r);
3595			return r;
3596		}
3597		adev->ip_blocks[i].status.hw = true;
3598	}
3599
3600	return 0;
3601}
3602
3603/**
3604 * amdgpu_device_ip_resume - run resume for hardware IPs
3605 *
3606 * @adev: amdgpu_device pointer
3607 *
3608 * Main resume function for hardware IPs.  The hardware IPs
3609 * are split into two resume functions because they are
3610 * also used in recovering from a GPU reset and some additional
3611 * steps need to be take between them.  In this case (S3/S4) they are
3612 * run sequentially.
3613 * Returns 0 on success, negative error code on failure.
3614 */
3615static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
3616{
3617	int r;
3618
3619	r = amdgpu_device_ip_resume_phase1(adev);
3620	if (r)
3621		return r;
3622
3623	r = amdgpu_device_fw_loading(adev);
3624	if (r)
3625		return r;
3626
3627	r = amdgpu_device_ip_resume_phase2(adev);
3628
3629	if (adev->mman.buffer_funcs_ring->sched.ready)
3630		amdgpu_ttm_set_buffer_funcs_status(adev, true);
3631
3632	return r;
3633}
3634
3635/**
3636 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3637 *
3638 * @adev: amdgpu_device pointer
3639 *
3640 * Query the VBIOS data tables to determine if the board supports SR-IOV.
3641 */
3642static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3643{
3644	if (amdgpu_sriov_vf(adev)) {
3645		if (adev->is_atom_fw) {
3646			if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
3647				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3648		} else {
3649			if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3650				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3651		}
3652
3653		if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3654			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3655	}
3656}
3657
3658/**
3659 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3660 *
3661 * @asic_type: AMD asic type
3662 *
3663 * Check if there is DC (new modesetting infrastructre) support for an asic.
3664 * returns true if DC has support, false if not.
3665 */
3666bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3667{
3668	switch (asic_type) {
3669#ifdef CONFIG_DRM_AMDGPU_SI
3670	case CHIP_HAINAN:
3671#endif
3672	case CHIP_TOPAZ:
3673		/* chips with no display hardware */
3674		return false;
3675#if defined(CONFIG_DRM_AMD_DC)
3676	case CHIP_TAHITI:
3677	case CHIP_PITCAIRN:
3678	case CHIP_VERDE:
3679	case CHIP_OLAND:
3680		/*
3681		 * We have systems in the wild with these ASICs that require
3682		 * LVDS and VGA support which is not supported with DC.
3683		 *
3684		 * Fallback to the non-DC driver here by default so as not to
3685		 * cause regressions.
3686		 */
3687#if defined(CONFIG_DRM_AMD_DC_SI)
3688		return amdgpu_dc > 0;
3689#else
3690		return false;
3691#endif
3692	case CHIP_BONAIRE:
3693	case CHIP_KAVERI:
3694	case CHIP_KABINI:
3695	case CHIP_MULLINS:
3696		/*
3697		 * We have systems in the wild with these ASICs that require
3698		 * VGA support which is not supported with DC.
3699		 *
3700		 * Fallback to the non-DC driver here by default so as not to
3701		 * cause regressions.
3702		 */
3703		return amdgpu_dc > 0;
3704	default:
3705		return amdgpu_dc != 0;
3706#else
3707	default:
3708		if (amdgpu_dc > 0)
3709			DRM_INFO_ONCE("Display Core has been requested via kernel parameter but isn't supported by ASIC, ignoring\n");
3710		return false;
3711#endif
3712	}
3713}
3714
3715/**
3716 * amdgpu_device_has_dc_support - check if dc is supported
3717 *
3718 * @adev: amdgpu_device pointer
3719 *
3720 * Returns true for supported, false for not supported
3721 */
3722bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3723{
3724	if (adev->enable_virtual_display ||
3725	    (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
3726		return false;
3727
3728	return amdgpu_device_asic_has_dc_support(adev->asic_type);
3729}
3730
3731static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3732{
3733	struct amdgpu_device *adev =
3734		container_of(__work, struct amdgpu_device, xgmi_reset_work);
3735	struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3736
3737	/* It's a bug to not have a hive within this function */
3738	if (WARN_ON(!hive))
3739		return;
3740
3741	/*
3742	 * Use task barrier to synchronize all xgmi reset works across the
3743	 * hive. task_barrier_enter and task_barrier_exit will block
3744	 * until all the threads running the xgmi reset works reach
3745	 * those points. task_barrier_full will do both blocks.
3746	 */
3747	if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3748
3749		task_barrier_enter(&hive->tb);
3750		adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3751
3752		if (adev->asic_reset_res)
3753			goto fail;
3754
3755		task_barrier_exit(&hive->tb);
3756		adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3757
3758		if (adev->asic_reset_res)
3759			goto fail;
3760
3761		amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__MMHUB);
3762	} else {
3763
3764		task_barrier_full(&hive->tb);
3765		adev->asic_reset_res =  amdgpu_asic_reset(adev);
3766	}
3767
3768fail:
3769	if (adev->asic_reset_res)
3770		DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3771			 adev->asic_reset_res, adev_to_drm(adev)->unique);
3772	amdgpu_put_xgmi_hive(hive);
3773}
3774
3775static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3776{
3777	char *input = amdgpu_lockup_timeout;
3778	char *timeout_setting = NULL;
3779	int index = 0;
3780	long timeout;
3781	int ret = 0;
3782
3783	/*
3784	 * By default timeout for non compute jobs is 10000
3785	 * and 60000 for compute jobs.
3786	 * In SR-IOV or passthrough mode, timeout for compute
3787	 * jobs are 60000 by default.
3788	 */
3789	adev->gfx_timeout = msecs_to_jiffies(10000);
3790	adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3791	if (amdgpu_sriov_vf(adev))
3792		adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3793					msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3794	else
3795		adev->compute_timeout =  msecs_to_jiffies(60000);
3796
3797	if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3798		while ((timeout_setting = strsep(&input, ",")) &&
3799				strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3800			ret = kstrtol(timeout_setting, 0, &timeout);
3801			if (ret)
3802				return ret;
3803
3804			if (timeout == 0) {
3805				index++;
3806				continue;
3807			} else if (timeout < 0) {
3808				timeout = MAX_SCHEDULE_TIMEOUT;
3809				dev_warn(adev->dev, "lockup timeout disabled");
3810				add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
3811			} else {
3812				timeout = msecs_to_jiffies(timeout);
3813			}
3814
3815			switch (index++) {
3816			case 0:
3817				adev->gfx_timeout = timeout;
3818				break;
3819			case 1:
3820				adev->compute_timeout = timeout;
3821				break;
3822			case 2:
3823				adev->sdma_timeout = timeout;
3824				break;
3825			case 3:
3826				adev->video_timeout = timeout;
3827				break;
3828			default:
3829				break;
3830			}
3831		}
3832		/*
3833		 * There is only one value specified and
3834		 * it should apply to all non-compute jobs.
3835		 */
3836		if (index == 1) {
3837			adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3838			if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3839				adev->compute_timeout = adev->gfx_timeout;
3840		}
3841	}
3842
3843	return ret;
3844}
3845
3846/**
3847 * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU
3848 *
3849 * @adev: amdgpu_device pointer
3850 *
3851 * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode
3852 */
3853static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
3854{
3855	struct iommu_domain *domain;
3856
3857	domain = iommu_get_domain_for_dev(adev->dev);
3858	if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY)
3859		adev->ram_is_direct_mapped = true;
3860}
3861
3862static const struct attribute *amdgpu_dev_attributes[] = {
3863	&dev_attr_pcie_replay_count.attr,
3864	NULL
3865};
3866
3867static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)
3868{
3869	if (amdgpu_mcbp == 1)
3870		adev->gfx.mcbp = true;
3871	else if (amdgpu_mcbp == 0)
3872		adev->gfx.mcbp = false;
3873
3874	if (amdgpu_sriov_vf(adev))
3875		adev->gfx.mcbp = true;
3876
3877	if (adev->gfx.mcbp)
3878		DRM_INFO("MCBP is enabled\n");
3879}
3880
3881/**
3882 * amdgpu_device_init - initialize the driver
3883 *
3884 * @adev: amdgpu_device pointer
 
 
3885 * @flags: driver flags
3886 *
3887 * Initializes the driver info and hw (all asics).
3888 * Returns 0 for success or an error on failure.
3889 * Called at driver startup.
3890 */
3891int amdgpu_device_init(struct amdgpu_device *adev,
 
 
3892		       uint32_t flags)
3893{
3894	struct drm_device *ddev = adev_to_drm(adev);
3895	struct pci_dev *pdev = adev->pdev;
3896	int r, i;
3897	bool px = false;
3898	u32 max_MBps;
3899	int tmp;
3900
3901	adev->shutdown = false;
 
 
 
3902	adev->flags = flags;
3903
3904	if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3905		adev->asic_type = amdgpu_force_asic_type;
3906	else
3907		adev->asic_type = flags & AMD_ASIC_MASK;
3908
3909	adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3910	if (amdgpu_emu_mode == 1)
3911		adev->usec_timeout *= 10;
3912	adev->gmc.gart_size = 512 * 1024 * 1024;
3913	adev->accel_working = false;
3914	adev->num_rings = 0;
3915	RCU_INIT_POINTER(adev->gang_submit, dma_fence_get_stub());
3916	adev->mman.buffer_funcs = NULL;
3917	adev->mman.buffer_funcs_ring = NULL;
3918	adev->vm_manager.vm_pte_funcs = NULL;
3919	adev->vm_manager.vm_pte_num_scheds = 0;
3920	adev->gmc.gmc_funcs = NULL;
3921	adev->harvest_ip_mask = 0x0;
3922	adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3923	bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3924
3925	adev->smc_rreg = &amdgpu_invalid_rreg;
3926	adev->smc_wreg = &amdgpu_invalid_wreg;
3927	adev->pcie_rreg = &amdgpu_invalid_rreg;
3928	adev->pcie_wreg = &amdgpu_invalid_wreg;
3929	adev->pcie_rreg_ext = &amdgpu_invalid_rreg_ext;
3930	adev->pcie_wreg_ext = &amdgpu_invalid_wreg_ext;
3931	adev->pciep_rreg = &amdgpu_invalid_rreg;
3932	adev->pciep_wreg = &amdgpu_invalid_wreg;
3933	adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3934	adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3935	adev->pcie_rreg64_ext = &amdgpu_invalid_rreg64_ext;
3936	adev->pcie_wreg64_ext = &amdgpu_invalid_wreg64_ext;
3937	adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3938	adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3939	adev->didt_rreg = &amdgpu_invalid_rreg;
3940	adev->didt_wreg = &amdgpu_invalid_wreg;
3941	adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3942	adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3943	adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3944	adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3945
 
3946	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3947		 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3948		 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3949
3950	/* mutex initialization are all done here so we
3951	 * can recall function without having locking issues
3952	 */
3953	mutex_init(&adev->firmware.mutex);
3954	mutex_init(&adev->pm.mutex);
3955	mutex_init(&adev->gfx.gpu_clock_mutex);
3956	mutex_init(&adev->srbm_mutex);
3957	mutex_init(&adev->gfx.pipe_reserve_mutex);
3958	mutex_init(&adev->gfx.gfx_off_mutex);
3959	mutex_init(&adev->gfx.partition_mutex);
3960	mutex_init(&adev->grbm_idx_mutex);
3961	mutex_init(&adev->mn_lock);
3962	mutex_init(&adev->virt.vf_errors.lock);
3963	hash_init(adev->mn_hash);
3964	mutex_init(&adev->psp.mutex);
3965	mutex_init(&adev->notifier_lock);
3966	mutex_init(&adev->pm.stable_pstate_ctx_lock);
3967	mutex_init(&adev->benchmark_mutex);
3968
3969	amdgpu_device_init_apu_flags(adev);
3970
3971	r = amdgpu_device_check_arguments(adev);
3972	if (r)
3973		return r;
3974
 
 
3975	spin_lock_init(&adev->mmio_idx_lock);
3976	spin_lock_init(&adev->smc_idx_lock);
3977	spin_lock_init(&adev->pcie_idx_lock);
3978	spin_lock_init(&adev->uvd_ctx_idx_lock);
3979	spin_lock_init(&adev->didt_idx_lock);
3980	spin_lock_init(&adev->gc_cac_idx_lock);
3981	spin_lock_init(&adev->se_cac_idx_lock);
3982	spin_lock_init(&adev->audio_endpt_idx_lock);
3983	spin_lock_init(&adev->mm_stats.lock);
3984
3985	INIT_LIST_HEAD(&adev->shadow_list);
3986	mutex_init(&adev->shadow_list_lock);
3987
3988	INIT_LIST_HEAD(&adev->reset_list);
3989
3990	INIT_LIST_HEAD(&adev->ras_list);
3991
3992	INIT_LIST_HEAD(&adev->pm.od_kobj_list);
3993
3994	INIT_DELAYED_WORK(&adev->delayed_init_work,
3995			  amdgpu_device_delayed_init_work_handler);
3996	INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3997			  amdgpu_device_delay_enable_gfx_off);
3998
3999	INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
4000
4001	adev->gfx.gfx_off_req_count = 1;
4002	adev->gfx.gfx_off_residency = 0;
4003	adev->gfx.gfx_off_entrycount = 0;
4004	adev->pm.ac_power = power_supply_is_system_supplied() > 0;
4005
4006	atomic_set(&adev->throttling_logging_enabled, 1);
4007	/*
4008	 * If throttling continues, logging will be performed every minute
4009	 * to avoid log flooding. "-1" is subtracted since the thermal
4010	 * throttling interrupt comes every second. Thus, the total logging
4011	 * interval is 59 seconds(retelimited printk interval) + 1(waiting
4012	 * for throttling interrupt) = 60 seconds.
4013	 */
4014	ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
4015	ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
4016
4017	/* Registers mapping */
4018	/* TODO: block userspace mapping of io register */
4019	if (adev->asic_type >= CHIP_BONAIRE) {
4020		adev->rmmio_base = pci_resource_start(adev->pdev, 5);
4021		adev->rmmio_size = pci_resource_len(adev->pdev, 5);
4022	} else {
4023		adev->rmmio_base = pci_resource_start(adev->pdev, 2);
4024		adev->rmmio_size = pci_resource_len(adev->pdev, 2);
4025	}
4026
4027	for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
4028		atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
4029
4030	adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
4031	if (!adev->rmmio)
4032		return -ENOMEM;
4033
4034	DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
4035	DRM_INFO("register mmio size: %u\n", (unsigned int)adev->rmmio_size);
4036
4037	/*
4038	 * Reset domain needs to be present early, before XGMI hive discovered
4039	 * (if any) and intitialized to use reset sem and in_gpu reset flag
4040	 * early on during init and before calling to RREG32.
4041	 */
4042	adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
4043	if (!adev->reset_domain)
4044		return -ENOMEM;
4045
4046	/* detect hw virtualization here */
4047	amdgpu_detect_virtualization(adev);
4048
4049	amdgpu_device_get_pcie_info(adev);
 
 
4050
4051	r = amdgpu_device_get_job_timeout_settings(adev);
4052	if (r) {
4053		dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
4054		return r;
 
 
 
4055	}
4056
4057	amdgpu_device_set_mcbp(adev);
4058
4059	/* early init functions */
4060	r = amdgpu_device_ip_early_init(adev);
4061	if (r)
4062		return r;
4063
4064	/* Get rid of things like offb */
4065	r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver);
4066	if (r)
4067		return r;
4068
4069	/* Enable TMZ based on IP_VERSION */
4070	amdgpu_gmc_tmz_set(adev);
 
 
 
 
 
4071
4072	amdgpu_gmc_noretry_set(adev);
4073	/* Need to get xgmi info early to decide the reset behavior*/
4074	if (adev->gmc.xgmi.supported) {
4075		r = adev->gfxhub.funcs->get_xgmi_info(adev);
4076		if (r)
4077			return r;
4078	}
4079
4080	/* enable PCIE atomic ops */
4081	if (amdgpu_sriov_vf(adev)) {
4082		if (adev->virt.fw_reserve.p_pf2vf)
4083			adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
4084						      adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
4085				(PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
4086	/* APUs w/ gfx9 onwards doesn't reply on PCIe atomics, rather it is a
4087	 * internal path natively support atomics, set have_atomics_support to true.
4088	 */
4089	} else if ((adev->flags & AMD_IS_APU) &&
4090		   (amdgpu_ip_version(adev, GC_HWIP, 0) >
4091		    IP_VERSION(9, 0, 0))) {
4092		adev->have_atomics_support = true;
4093	} else {
4094		adev->have_atomics_support =
4095			!pci_enable_atomic_ops_to_root(adev->pdev,
4096					  PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
4097					  PCI_EXP_DEVCAP2_ATOMIC_COMP64);
4098	}
4099
4100	if (!adev->have_atomics_support)
4101		dev_info(adev->dev, "PCIE atomic ops is not supported\n");
4102
4103	/* doorbell bar mapping and doorbell index init*/
4104	amdgpu_doorbell_init(adev);
4105
4106	if (amdgpu_emu_mode == 1) {
4107		/* post the asic on emulation mode */
4108		emu_soc_asic_init(adev);
4109		goto fence_driver_init;
4110	}
4111
4112	amdgpu_reset_init(adev);
4113
4114	/* detect if we are with an SRIOV vbios */
4115	if (adev->bios)
4116		amdgpu_device_detect_sriov_bios(adev);
4117
4118	/* check if we need to reset the asic
4119	 *  E.g., driver was not cleanly unloaded previously, etc.
4120	 */
4121	if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
4122		if (adev->gmc.xgmi.num_physical_nodes) {
4123			dev_info(adev->dev, "Pending hive reset.\n");
4124			adev->gmc.xgmi.pending_reset = true;
4125			/* Only need to init necessary block for SMU to handle the reset */
4126			for (i = 0; i < adev->num_ip_blocks; i++) {
4127				if (!adev->ip_blocks[i].status.valid)
4128					continue;
4129				if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
4130				      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
4131				      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
4132				      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
4133					DRM_DEBUG("IP %s disabled for hw_init.\n",
4134						adev->ip_blocks[i].version->funcs->name);
4135					adev->ip_blocks[i].status.hw = true;
4136				}
4137			}
4138		} else if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) &&
4139				   !amdgpu_device_has_display_hardware(adev)) {
4140					r = psp_gpu_reset(adev);
4141		} else {
4142				tmp = amdgpu_reset_method;
4143				/* It should do a default reset when loading or reloading the driver,
4144				 * regardless of the module parameter reset_method.
4145				 */
4146				amdgpu_reset_method = AMD_RESET_METHOD_NONE;
4147				r = amdgpu_asic_reset(adev);
4148				amdgpu_reset_method = tmp;
4149		}
4150
4151		if (r) {
4152		  dev_err(adev->dev, "asic reset on init failed\n");
4153		  goto failed;
4154		}
4155	}
4156
4157	/* Post card if necessary */
4158	if (amdgpu_device_need_post(adev)) {
4159		if (!adev->bios) {
4160			dev_err(adev->dev, "no vBIOS found\n");
4161			r = -EINVAL;
4162			goto failed;
4163		}
4164		DRM_INFO("GPU posting now...\n");
4165		r = amdgpu_device_asic_init(adev);
4166		if (r) {
4167			dev_err(adev->dev, "gpu post error!\n");
4168			goto failed;
4169		}
 
 
4170	}
4171
4172	if (adev->bios) {
4173		if (adev->is_atom_fw) {
4174			/* Initialize clocks */
4175			r = amdgpu_atomfirmware_get_clock_info(adev);
4176			if (r) {
4177				dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
4178				amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
4179				goto failed;
4180			}
4181		} else {
4182			/* Initialize clocks */
4183			r = amdgpu_atombios_get_clock_info(adev);
4184			if (r) {
4185				dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
4186				amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
4187				goto failed;
4188			}
4189			/* init i2c buses */
4190			if (!amdgpu_device_has_dc_support(adev))
4191				amdgpu_atombios_i2c_init(adev);
4192		}
4193	}
 
 
4194
4195fence_driver_init:
4196	/* Fence driver */
4197	r = amdgpu_fence_driver_sw_init(adev);
4198	if (r) {
4199		dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
4200		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
4201		goto failed;
4202	}
4203
4204	/* init the mode config */
4205	drm_mode_config_init(adev_to_drm(adev));
4206
4207	r = amdgpu_device_ip_init(adev);
4208	if (r) {
4209		dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
4210		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
4211		goto release_ras_con;
4212	}
4213
4214	amdgpu_fence_driver_hw_init(adev);
4215
4216	dev_info(adev->dev,
4217		"SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
4218			adev->gfx.config.max_shader_engines,
4219			adev->gfx.config.max_sh_per_se,
4220			adev->gfx.config.max_cu_per_sh,
4221			adev->gfx.cu_info.number);
4222
4223	adev->accel_working = true;
4224
4225	amdgpu_vm_check_compute_bug(adev);
4226
4227	/* Initialize the buffer migration limit. */
4228	if (amdgpu_moverate >= 0)
4229		max_MBps = amdgpu_moverate;
4230	else
4231		max_MBps = 8; /* Allow 8 MB/s. */
4232	/* Get a log2 for easy divisions. */
4233	adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
4234
4235	/*
4236	 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
4237	 * Otherwise the mgpu fan boost feature will be skipped due to the
4238	 * gpu instance is counted less.
4239	 */
4240	amdgpu_register_gpu_instance(adev);
4241
4242	/* enable clockgating, etc. after ib tests, etc. since some blocks require
4243	 * explicit gating rather than handling it automatically.
4244	 */
4245	if (!adev->gmc.xgmi.pending_reset) {
4246		r = amdgpu_device_ip_late_init(adev);
4247		if (r) {
4248			dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
4249			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
4250			goto release_ras_con;
4251		}
4252		/* must succeed. */
4253		amdgpu_ras_resume(adev);
4254		queue_delayed_work(system_wq, &adev->delayed_init_work,
4255				   msecs_to_jiffies(AMDGPU_RESUME_MS));
4256	}
4257
4258	if (amdgpu_sriov_vf(adev)) {
4259		amdgpu_virt_release_full_gpu(adev, true);
4260		flush_delayed_work(&adev->delayed_init_work);
 
4261	}
4262
4263	/*
4264	 * Place those sysfs registering after `late_init`. As some of those
4265	 * operations performed in `late_init` might affect the sysfs
4266	 * interfaces creating.
4267	 */
4268	r = amdgpu_atombios_sysfs_init(adev);
4269	if (r)
4270		drm_err(&adev->ddev,
4271			"registering atombios sysfs failed (%d).\n", r);
4272
4273	r = amdgpu_pm_sysfs_init(adev);
4274	if (r)
4275		DRM_ERROR("registering pm sysfs failed (%d).\n", r);
4276
4277	r = amdgpu_ucode_sysfs_init(adev);
4278	if (r) {
4279		adev->ucode_sysfs_en = false;
4280		DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
4281	} else
4282		adev->ucode_sysfs_en = true;
4283
4284	r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
4285	if (r)
4286		dev_err(adev->dev, "Could not create amdgpu device attr\n");
4287
4288	r = devm_device_add_group(adev->dev, &amdgpu_board_attrs_group);
4289	if (r)
4290		dev_err(adev->dev,
4291			"Could not create amdgpu board attributes\n");
4292
4293	amdgpu_fru_sysfs_init(adev);
4294	amdgpu_reg_state_sysfs_init(adev);
 
 
4295
4296	if (IS_ENABLED(CONFIG_PERF_EVENTS))
4297		r = amdgpu_pmu_init(adev);
4298	if (r)
4299		dev_err(adev->dev, "amdgpu_pmu_init failed\n");
 
4300
4301	/* Have stored pci confspace at hand for restore in sudden PCI error */
4302	if (amdgpu_device_cache_pci_state(adev->pdev))
4303		pci_restore_state(pdev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4304
4305	/* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
4306	/* this will fail for cards that aren't VGA class devices, just
4307	 * ignore it
4308	 */
4309	if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4310		vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
4311
4312	px = amdgpu_device_supports_px(ddev);
4313
4314	if (px || (!dev_is_removable(&adev->pdev->dev) &&
4315				apple_gmux_detect(NULL, NULL)))
4316		vga_switcheroo_register_client(adev->pdev,
4317					       &amdgpu_switcheroo_ops, px);
4318
4319	if (px)
4320		vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
4321
4322	if (adev->gmc.xgmi.pending_reset)
4323		queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
4324				   msecs_to_jiffies(AMDGPU_RESUME_MS));
4325
4326	amdgpu_device_check_iommu_direct_map(adev);
4327
4328	return 0;
4329
4330release_ras_con:
4331	if (amdgpu_sriov_vf(adev))
4332		amdgpu_virt_release_full_gpu(adev, true);
4333
4334	/* failed in exclusive mode due to timeout */
4335	if (amdgpu_sriov_vf(adev) &&
4336		!amdgpu_sriov_runtime(adev) &&
4337		amdgpu_virt_mmio_blocked(adev) &&
4338		!amdgpu_virt_wait_reset(adev)) {
4339		dev_err(adev->dev, "VF exclusive mode timeout\n");
4340		/* Don't send request since VF is inactive. */
4341		adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
4342		adev->virt.ops = NULL;
4343		r = -EAGAIN;
4344	}
4345	amdgpu_release_ras_context(adev);
4346
4347failed:
4348	amdgpu_vf_error_trans_all(adev);
4349
4350	return r;
4351}
4352
4353static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
4354{
4355
4356	/* Clear all CPU mappings pointing to this device */
4357	unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
4358
4359	/* Unmap all mapped bars - Doorbell, registers and VRAM */
4360	amdgpu_doorbell_fini(adev);
4361
4362	iounmap(adev->rmmio);
4363	adev->rmmio = NULL;
4364	if (adev->mman.aper_base_kaddr)
4365		iounmap(adev->mman.aper_base_kaddr);
4366	adev->mman.aper_base_kaddr = NULL;
4367
4368	/* Memory manager related */
4369	if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
4370		arch_phys_wc_del(adev->gmc.vram_mtrr);
4371		arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
4372	}
4373}
4374
4375/**
4376 * amdgpu_device_fini_hw - tear down the driver
4377 *
4378 * @adev: amdgpu_device pointer
4379 *
4380 * Tear down the driver info (all asics).
4381 * Called at driver shutdown.
4382 */
4383void amdgpu_device_fini_hw(struct amdgpu_device *adev)
4384{
4385	dev_info(adev->dev, "amdgpu: finishing device.\n");
4386	flush_delayed_work(&adev->delayed_init_work);
4387	adev->shutdown = true;
4388
4389	/* make sure IB test finished before entering exclusive mode
4390	 * to avoid preemption on IB test
4391	 */
4392	if (amdgpu_sriov_vf(adev)) {
4393		amdgpu_virt_request_full_gpu(adev, false);
4394		amdgpu_virt_fini_data_exchange(adev);
4395	}
4396
4397	/* disable all interrupts */
4398	amdgpu_irq_disable_all(adev);
4399	if (adev->mode_info.mode_config_initialized) {
4400		if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
4401			drm_helper_force_disable_all(adev_to_drm(adev));
4402		else
4403			drm_atomic_helper_shutdown(adev_to_drm(adev));
4404	}
4405	amdgpu_fence_driver_hw_fini(adev);
4406
4407	if (adev->mman.initialized)
4408		drain_workqueue(adev->mman.bdev.wq);
4409
4410	if (adev->pm.sysfs_initialized)
4411		amdgpu_pm_sysfs_fini(adev);
4412	if (adev->ucode_sysfs_en)
4413		amdgpu_ucode_sysfs_fini(adev);
4414	sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
4415	amdgpu_fru_sysfs_fini(adev);
4416
4417	amdgpu_reg_state_sysfs_fini(adev);
4418
4419	/* disable ras feature must before hw fini */
4420	amdgpu_ras_pre_fini(adev);
4421
4422	amdgpu_ttm_set_buffer_funcs_status(adev, false);
4423
4424	amdgpu_device_ip_fini_early(adev);
4425
4426	amdgpu_irq_fini_hw(adev);
4427
4428	if (adev->mman.initialized)
4429		ttm_device_clear_dma_mappings(&adev->mman.bdev);
4430
4431	amdgpu_gart_dummy_page_fini(adev);
4432
4433	if (drm_dev_is_unplugged(adev_to_drm(adev)))
4434		amdgpu_device_unmap_mmio(adev);
4435
4436}
4437
4438void amdgpu_device_fini_sw(struct amdgpu_device *adev)
4439{
4440	int idx;
4441	bool px;
4442
4443	amdgpu_fence_driver_sw_fini(adev);
4444	amdgpu_device_ip_fini(adev);
4445	amdgpu_ucode_release(&adev->firmware.gpu_info_fw);
 
 
 
 
 
 
4446	adev->accel_working = false;
4447	dma_fence_put(rcu_dereference_protected(adev->gang_submit, true));
4448
4449	amdgpu_reset_fini(adev);
4450
4451	/* free i2c buses */
4452	if (!amdgpu_device_has_dc_support(adev))
4453		amdgpu_i2c_fini(adev);
4454
4455	if (amdgpu_emu_mode != 1)
4456		amdgpu_atombios_fini(adev);
4457
4458	kfree(adev->bios);
4459	adev->bios = NULL;
4460
4461	kfree(adev->fru_info);
4462	adev->fru_info = NULL;
4463
4464	px = amdgpu_device_supports_px(adev_to_drm(adev));
4465
4466	if (px || (!dev_is_removable(&adev->pdev->dev) &&
4467				apple_gmux_detect(NULL, NULL)))
4468		vga_switcheroo_unregister_client(adev->pdev);
4469
4470	if (px)
4471		vga_switcheroo_fini_domain_pm_ops(adev->dev);
4472
4473	if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4474		vga_client_unregister(adev->pdev);
4475
4476	if (drm_dev_enter(adev_to_drm(adev), &idx)) {
4477
4478		iounmap(adev->rmmio);
4479		adev->rmmio = NULL;
4480		amdgpu_doorbell_fini(adev);
4481		drm_dev_exit(idx);
4482	}
4483
4484	if (IS_ENABLED(CONFIG_PERF_EVENTS))
4485		amdgpu_pmu_fini(adev);
4486	if (adev->mman.discovery_bin)
4487		amdgpu_discovery_fini(adev);
4488
4489	amdgpu_reset_put_reset_domain(adev->reset_domain);
4490	adev->reset_domain = NULL;
4491
4492	kfree(adev->pci_state);
4493
4494}
4495
4496/**
4497 * amdgpu_device_evict_resources - evict device resources
4498 * @adev: amdgpu device object
4499 *
4500 * Evicts all ttm device resources(vram BOs, gart table) from the lru list
4501 * of the vram memory type. Mainly used for evicting device resources
4502 * at suspend time.
4503 *
4504 */
4505static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
4506{
4507	int ret;
4508
4509	/* No need to evict vram on APUs for suspend to ram or s2idle */
4510	if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU))
4511		return 0;
4512
4513	ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
4514	if (ret)
4515		DRM_WARN("evicting device resources failed\n");
4516	return ret;
4517}
4518
4519/*
4520 * Suspend & resume.
4521 */
4522/**
4523 * amdgpu_device_prepare - prepare for device suspend
4524 *
4525 * @dev: drm dev pointer
 
4526 *
4527 * Prepare to put the hw in the suspend state (all asics).
4528 * Returns 0 for success or an error on failure.
4529 * Called at driver suspend.
4530 */
4531int amdgpu_device_prepare(struct drm_device *dev)
4532{
4533	struct amdgpu_device *adev = drm_to_adev(dev);
4534	int i, r;
4535
4536	amdgpu_choose_low_power_state(adev);
4537
4538	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4539		return 0;
4540
4541	/* Evict the majority of BOs before starting suspend sequence */
4542	r = amdgpu_device_evict_resources(adev);
4543	if (r)
4544		goto unprepare;
4545
4546	flush_delayed_work(&adev->gfx.gfx_off_delay_work);
4547
4548	for (i = 0; i < adev->num_ip_blocks; i++) {
4549		if (!adev->ip_blocks[i].status.valid)
4550			continue;
4551		if (!adev->ip_blocks[i].version->funcs->prepare_suspend)
4552			continue;
4553		r = adev->ip_blocks[i].version->funcs->prepare_suspend((void *)adev);
4554		if (r)
4555			goto unprepare;
4556	}
4557
4558	return 0;
4559
4560unprepare:
4561	adev->in_s0ix = adev->in_s3 = false;
4562
4563	return r;
4564}
4565
4566/**
4567 * amdgpu_device_suspend - initiate device suspend
4568 *
4569 * @dev: drm dev pointer
4570 * @fbcon : notify the fbdev of suspend
4571 *
4572 * Puts the hw in the suspend state (all asics).
4573 * Returns 0 for success or an error on failure.
4574 * Called at driver suspend.
4575 */
4576int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
4577{
4578	struct amdgpu_device *adev = drm_to_adev(dev);
4579	int r = 0;
4580
4581	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4582		return 0;
4583
4584	adev->in_suspend = true;
4585
4586	if (amdgpu_sriov_vf(adev)) {
4587		amdgpu_virt_fini_data_exchange(adev);
4588		r = amdgpu_virt_request_full_gpu(adev, false);
4589		if (r)
4590			return r;
4591	}
 
4592
4593	if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
4594		DRM_WARN("smart shift update failed\n");
4595
4596	if (fbcon)
4597		drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
4598
4599	cancel_delayed_work_sync(&adev->delayed_init_work);
4600
4601	amdgpu_ras_suspend(adev);
4602
4603	amdgpu_device_ip_suspend_phase1(adev);
4604
4605	if (!adev->in_s0ix)
4606		amdgpu_amdkfd_suspend(adev, adev->in_runpm);
4607
4608	r = amdgpu_device_evict_resources(adev);
4609	if (r)
4610		return r;
4611
4612	amdgpu_ttm_set_buffer_funcs_status(adev, false);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4613
4614	amdgpu_fence_driver_hw_fini(adev);
4615
4616	amdgpu_device_ip_suspend_phase2(adev);
4617
4618	if (amdgpu_sriov_vf(adev))
4619		amdgpu_virt_release_full_gpu(adev, false);
 
 
 
4620
4621	r = amdgpu_dpm_notify_rlc_state(adev, false);
4622	if (r)
4623		return r;
 
 
 
 
 
 
 
 
4624
 
 
 
 
 
4625	return 0;
4626}
4627
4628/**
4629 * amdgpu_device_resume - initiate device resume
4630 *
4631 * @dev: drm dev pointer
4632 * @fbcon : notify the fbdev of resume
4633 *
4634 * Bring the hw back to operating state (all asics).
4635 * Returns 0 for success or an error on failure.
4636 * Called at driver resume.
4637 */
4638int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
4639{
4640	struct amdgpu_device *adev = drm_to_adev(dev);
4641	int r = 0;
4642
4643	if (amdgpu_sriov_vf(adev)) {
4644		r = amdgpu_virt_request_full_gpu(adev, true);
4645		if (r)
4646			return r;
4647	}
4648
4649	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4650		return 0;
4651
4652	if (adev->in_s0ix)
4653		amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry);
4654
4655	/* post card */
4656	if (amdgpu_device_need_post(adev)) {
4657		r = amdgpu_device_asic_init(adev);
4658		if (r)
4659			dev_err(adev->dev, "amdgpu asic init failed\n");
4660	}
4661
4662	r = amdgpu_device_ip_resume(adev);
4663
4664	if (r) {
4665		dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
4666		goto exit;
 
 
 
 
 
 
4667	}
4668	amdgpu_fence_driver_hw_init(adev);
4669
4670	if (!adev->in_s0ix) {
4671		r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
 
4672		if (r)
4673			goto exit;
4674	}
4675
4676	r = amdgpu_device_ip_late_init(adev);
4677	if (r)
4678		goto exit;
4679
4680	queue_delayed_work(system_wq, &adev->delayed_init_work,
4681			   msecs_to_jiffies(AMDGPU_RESUME_MS));
4682exit:
4683	if (amdgpu_sriov_vf(adev)) {
4684		amdgpu_virt_init_data_exchange(adev);
4685		amdgpu_virt_release_full_gpu(adev, true);
4686	}
4687
 
4688	if (r)
4689		return r;
4690
4691	/* Make sure IB tests flushed */
4692	flush_delayed_work(&adev->delayed_init_work);
 
4693
4694	if (fbcon)
4695		drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
 
 
 
 
 
 
 
 
 
 
 
4696
4697	amdgpu_ras_resume(adev);
 
 
 
 
 
 
 
 
 
4698
4699	if (adev->mode_info.num_crtc) {
4700		/*
4701		 * Most of the connector probing functions try to acquire runtime pm
4702		 * refs to ensure that the GPU is powered on when connector polling is
4703		 * performed. Since we're calling this from a runtime PM callback,
4704		 * trying to acquire rpm refs will cause us to deadlock.
4705		 *
4706		 * Since we're guaranteed to be holding the rpm lock, it's safe to
4707		 * temporarily disable the rpm helpers so this doesn't deadlock us.
4708		 */
 
4709#ifdef CONFIG_PM
4710		dev->dev->power.disable_depth++;
4711#endif
4712		if (!adev->dc_enabled)
4713			drm_helper_hpd_irq_event(dev);
4714		else
4715			drm_kms_helper_hotplug_event(dev);
4716#ifdef CONFIG_PM
4717		dev->dev->power.disable_depth--;
4718#endif
4719	}
4720	adev->in_suspend = false;
4721
4722	if (adev->enable_mes)
4723		amdgpu_mes_self_test(adev);
4724
4725	if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
4726		DRM_WARN("smart shift update failed\n");
4727
4728	return 0;
4729}
4730
4731/**
4732 * amdgpu_device_ip_check_soft_reset - did soft reset succeed
4733 *
4734 * @adev: amdgpu_device pointer
4735 *
4736 * The list of all the hardware IPs that make up the asic is walked and
4737 * the check_soft_reset callbacks are run.  check_soft_reset determines
4738 * if the asic is still hung or not.
4739 * Returns true if any of the IPs are still in a hung state, false if not.
4740 */
4741static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
4742{
4743	int i;
4744	bool asic_hang = false;
4745
4746	if (amdgpu_sriov_vf(adev))
4747		return true;
4748
4749	if (amdgpu_asic_need_full_reset(adev))
4750		return true;
4751
4752	for (i = 0; i < adev->num_ip_blocks; i++) {
4753		if (!adev->ip_blocks[i].status.valid)
4754			continue;
4755		if (adev->ip_blocks[i].version->funcs->check_soft_reset)
4756			adev->ip_blocks[i].status.hang =
4757				adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
4758		if (adev->ip_blocks[i].status.hang) {
4759			dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
4760			asic_hang = true;
4761		}
4762	}
4763	return asic_hang;
4764}
4765
4766/**
4767 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
4768 *
4769 * @adev: amdgpu_device pointer
4770 *
4771 * The list of all the hardware IPs that make up the asic is walked and the
4772 * pre_soft_reset callbacks are run if the block is hung.  pre_soft_reset
4773 * handles any IP specific hardware or software state changes that are
4774 * necessary for a soft reset to succeed.
4775 * Returns 0 on success, negative error code on failure.
4776 */
4777static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
4778{
4779	int i, r = 0;
4780
4781	for (i = 0; i < adev->num_ip_blocks; i++) {
4782		if (!adev->ip_blocks[i].status.valid)
4783			continue;
4784		if (adev->ip_blocks[i].status.hang &&
4785		    adev->ip_blocks[i].version->funcs->pre_soft_reset) {
4786			r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
4787			if (r)
4788				return r;
4789		}
4790	}
4791
4792	return 0;
4793}
4794
4795/**
4796 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
4797 *
4798 * @adev: amdgpu_device pointer
4799 *
4800 * Some hardware IPs cannot be soft reset.  If they are hung, a full gpu
4801 * reset is necessary to recover.
4802 * Returns true if a full asic reset is required, false if not.
4803 */
4804static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
4805{
4806	int i;
4807
4808	if (amdgpu_asic_need_full_reset(adev))
4809		return true;
4810
4811	for (i = 0; i < adev->num_ip_blocks; i++) {
4812		if (!adev->ip_blocks[i].status.valid)
4813			continue;
4814		if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
4815		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
4816		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
4817		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
4818		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
4819			if (adev->ip_blocks[i].status.hang) {
4820				dev_info(adev->dev, "Some block need full reset!\n");
4821				return true;
4822			}
4823		}
4824	}
4825	return false;
4826}
4827
4828/**
4829 * amdgpu_device_ip_soft_reset - do a soft reset
4830 *
4831 * @adev: amdgpu_device pointer
4832 *
4833 * The list of all the hardware IPs that make up the asic is walked and the
4834 * soft_reset callbacks are run if the block is hung.  soft_reset handles any
4835 * IP specific hardware or software state changes that are necessary to soft
4836 * reset the IP.
4837 * Returns 0 on success, negative error code on failure.
4838 */
4839static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
4840{
4841	int i, r = 0;
4842
4843	for (i = 0; i < adev->num_ip_blocks; i++) {
4844		if (!adev->ip_blocks[i].status.valid)
4845			continue;
4846		if (adev->ip_blocks[i].status.hang &&
4847		    adev->ip_blocks[i].version->funcs->soft_reset) {
4848			r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
4849			if (r)
4850				return r;
4851		}
4852	}
4853
4854	return 0;
4855}
4856
4857/**
4858 * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4859 *
4860 * @adev: amdgpu_device pointer
4861 *
4862 * The list of all the hardware IPs that make up the asic is walked and the
4863 * post_soft_reset callbacks are run if the asic was hung.  post_soft_reset
4864 * handles any IP specific hardware or software state changes that are
4865 * necessary after the IP has been soft reset.
4866 * Returns 0 on success, negative error code on failure.
4867 */
4868static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4869{
4870	int i, r = 0;
4871
4872	for (i = 0; i < adev->num_ip_blocks; i++) {
4873		if (!adev->ip_blocks[i].status.valid)
4874			continue;
4875		if (adev->ip_blocks[i].status.hang &&
4876		    adev->ip_blocks[i].version->funcs->post_soft_reset)
4877			r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
4878		if (r)
4879			return r;
4880	}
4881
4882	return 0;
4883}
4884
4885/**
4886 * amdgpu_device_recover_vram - Recover some VRAM contents
4887 *
4888 * @adev: amdgpu_device pointer
4889 *
4890 * Restores the contents of VRAM buffers from the shadows in GTT.  Used to
4891 * restore things like GPUVM page tables after a GPU reset where
4892 * the contents of VRAM might be lost.
4893 *
4894 * Returns:
4895 * 0 on success, negative error code on failure.
4896 */
4897static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
4898{
4899	struct dma_fence *fence = NULL, *next = NULL;
4900	struct amdgpu_bo *shadow;
4901	struct amdgpu_bo_vm *vmbo;
4902	long r = 1, tmo;
4903
4904	if (amdgpu_sriov_runtime(adev))
4905		tmo = msecs_to_jiffies(8000);
4906	else
4907		tmo = msecs_to_jiffies(100);
4908
4909	dev_info(adev->dev, "recover vram bo from shadow start\n");
4910	mutex_lock(&adev->shadow_list_lock);
4911	list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
4912		/* If vm is compute context or adev is APU, shadow will be NULL */
4913		if (!vmbo->shadow)
4914			continue;
4915		shadow = vmbo->shadow;
4916
4917		/* No need to recover an evicted BO */
4918		if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
4919		    shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
4920		    shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
4921			continue;
4922
4923		r = amdgpu_bo_restore_shadow(shadow, &next);
4924		if (r)
4925			break;
4926
4927		if (fence) {
4928			tmo = dma_fence_wait_timeout(fence, false, tmo);
4929			dma_fence_put(fence);
4930			fence = next;
4931			if (tmo == 0) {
4932				r = -ETIMEDOUT;
4933				break;
4934			} else if (tmo < 0) {
4935				r = tmo;
4936				break;
4937			}
4938		} else {
4939			fence = next;
4940		}
4941	}
4942	mutex_unlock(&adev->shadow_list_lock);
4943
4944	if (fence)
4945		tmo = dma_fence_wait_timeout(fence, false, tmo);
4946	dma_fence_put(fence);
4947
4948	if (r < 0 || tmo <= 0) {
4949		dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4950		return -EIO;
4951	}
4952
4953	dev_info(adev->dev, "recover vram bo from shadow done\n");
4954	return 0;
4955}
4956
4957
4958/**
4959 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4960 *
4961 * @adev: amdgpu_device pointer
4962 * @from_hypervisor: request from hypervisor
4963 *
4964 * do VF FLR and reinitialize Asic
4965 * return 0 means succeeded otherwise failed
4966 */
4967static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4968				     bool from_hypervisor)
4969{
4970	int r;
4971	struct amdgpu_hive_info *hive = NULL;
4972	int retry_limit = 0;
4973
4974retry:
4975	amdgpu_amdkfd_pre_reset(adev);
4976
4977	if (from_hypervisor)
4978		r = amdgpu_virt_request_full_gpu(adev, true);
4979	else
4980		r = amdgpu_virt_reset_gpu(adev);
4981	if (r)
4982		return r;
4983	amdgpu_irq_gpu_reset_resume_helper(adev);
4984
4985	/* some sw clean up VF needs to do before recover */
4986	amdgpu_virt_post_reset(adev);
4987
4988	/* Resume IP prior to SMC */
4989	r = amdgpu_device_ip_reinit_early_sriov(adev);
4990	if (r)
4991		goto error;
4992
4993	amdgpu_virt_init_data_exchange(adev);
4994
4995	r = amdgpu_device_fw_loading(adev);
4996	if (r)
4997		return r;
4998
4999	/* now we are okay to resume SMC/CP/SDMA */
5000	r = amdgpu_device_ip_reinit_late_sriov(adev);
5001	if (r)
5002		goto error;
5003
5004	hive = amdgpu_get_xgmi_hive(adev);
5005	/* Update PSP FW topology after reset */
5006	if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
5007		r = amdgpu_xgmi_update_topology(hive, adev);
5008
5009	if (hive)
5010		amdgpu_put_xgmi_hive(hive);
5011
5012	if (!r) {
5013		r = amdgpu_ib_ring_tests(adev);
5014
5015		amdgpu_amdkfd_post_reset(adev);
5016	}
5017
5018error:
5019	if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
5020		amdgpu_inc_vram_lost(adev);
5021		r = amdgpu_device_recover_vram(adev);
5022	}
5023	amdgpu_virt_release_full_gpu(adev, true);
5024
5025	if (AMDGPU_RETRY_SRIOV_RESET(r)) {
5026		if (retry_limit < AMDGPU_MAX_RETRY_LIMIT) {
5027			retry_limit++;
5028			goto retry;
5029		} else
5030			DRM_ERROR("GPU reset retry is beyond the retry limit\n");
5031	}
5032
5033	return r;
5034}
5035
5036/**
5037 * amdgpu_device_has_job_running - check if there is any job in mirror list
5038 *
5039 * @adev: amdgpu_device pointer
5040 *
5041 * check if there is any job in mirror list
5042 */
5043bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
5044{
5045	int i;
5046	struct drm_sched_job *job;
5047
5048	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5049		struct amdgpu_ring *ring = adev->rings[i];
5050
5051		if (!amdgpu_ring_sched_ready(ring))
5052			continue;
5053
5054		spin_lock(&ring->sched.job_list_lock);
5055		job = list_first_entry_or_null(&ring->sched.pending_list,
5056					       struct drm_sched_job, list);
5057		spin_unlock(&ring->sched.job_list_lock);
5058		if (job)
5059			return true;
5060	}
5061	return false;
 
 
 
 
 
5062}
5063
5064/**
5065 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
5066 *
5067 * @adev: amdgpu_device pointer
5068 *
5069 * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
5070 * a hung GPU.
5071 */
5072bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
5073{
5074
5075	if (amdgpu_gpu_recovery == 0)
5076		goto disabled;
5077
5078	/* Skip soft reset check in fatal error mode */
5079	if (!amdgpu_ras_is_poison_mode_supported(adev))
5080		return true;
5081
5082	if (amdgpu_sriov_vf(adev))
5083		return true;
5084
5085	if (amdgpu_gpu_recovery == -1) {
5086		switch (adev->asic_type) {
5087#ifdef CONFIG_DRM_AMDGPU_SI
5088		case CHIP_VERDE:
5089		case CHIP_TAHITI:
5090		case CHIP_PITCAIRN:
5091		case CHIP_OLAND:
5092		case CHIP_HAINAN:
5093#endif
5094#ifdef CONFIG_DRM_AMDGPU_CIK
5095		case CHIP_KAVERI:
5096		case CHIP_KABINI:
5097		case CHIP_MULLINS:
5098#endif
5099		case CHIP_CARRIZO:
5100		case CHIP_STONEY:
5101		case CHIP_CYAN_SKILLFISH:
5102			goto disabled;
5103		default:
5104			break;
5105		}
5106	}
5107
5108	return true;
5109
5110disabled:
5111		dev_info(adev->dev, "GPU recovery disabled.\n");
5112		return false;
5113}
5114
5115int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
5116{
5117	u32 i;
5118	int ret = 0;
5119
5120	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
5121
5122	dev_info(adev->dev, "GPU mode1 reset\n");
5123
5124	/* disable BM */
5125	pci_clear_master(adev->pdev);
5126
5127	amdgpu_device_cache_pci_state(adev->pdev);
5128
5129	if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
5130		dev_info(adev->dev, "GPU smu mode1 reset\n");
5131		ret = amdgpu_dpm_mode1_reset(adev);
5132	} else {
5133		dev_info(adev->dev, "GPU psp mode1 reset\n");
5134		ret = psp_gpu_reset(adev);
5135	}
5136
5137	if (ret)
5138		goto mode1_reset_failed;
5139
5140	amdgpu_device_load_pci_state(adev->pdev);
5141	ret = amdgpu_psp_wait_for_bootloader(adev);
5142	if (ret)
5143		goto mode1_reset_failed;
5144
5145	/* wait for asic to come out of reset */
5146	for (i = 0; i < adev->usec_timeout; i++) {
5147		u32 memsize = adev->nbio.funcs->get_memsize(adev);
5148
5149		if (memsize != 0xffffffff)
5150			break;
5151		udelay(1);
5152	}
5153
5154	if (i >= adev->usec_timeout) {
5155		ret = -ETIMEDOUT;
5156		goto mode1_reset_failed;
5157	}
5158
5159	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
5160
5161	return 0;
 
5162
5163mode1_reset_failed:
5164	dev_err(adev->dev, "GPU mode1 reset failed\n");
5165	return ret;
5166}
5167
5168int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
5169				 struct amdgpu_reset_context *reset_context)
5170{
5171	int i, r = 0;
5172	struct amdgpu_job *job = NULL;
5173	bool need_full_reset =
5174		test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5175
5176	if (reset_context->reset_req_dev == adev)
5177		job = reset_context->job;
5178
5179	if (amdgpu_sriov_vf(adev)) {
5180		/* stop the data exchange thread */
5181		amdgpu_virt_fini_data_exchange(adev);
5182	}
5183
5184	amdgpu_fence_driver_isr_toggle(adev, true);
5185
5186	/* block all schedulers and reset given job's ring */
5187	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5188		struct amdgpu_ring *ring = adev->rings[i];
5189
5190		if (!amdgpu_ring_sched_ready(ring))
5191			continue;
5192
5193		/* Clear job fence from fence drv to avoid force_completion
5194		 * leave NULL and vm flush fence in fence drv
5195		 */
5196		amdgpu_fence_driver_clear_job_fences(ring);
5197
5198		/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
5199		amdgpu_fence_driver_force_completion(ring);
5200	}
 
 
5201
5202	amdgpu_fence_driver_isr_toggle(adev, false);
5203
5204	if (job && job->vm)
5205		drm_sched_increase_karma(&job->base);
5206
5207	r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
5208	/* If reset handler not implemented, continue; otherwise return */
5209	if (r == -EOPNOTSUPP)
5210		r = 0;
5211	else
5212		return r;
5213
5214	/* Don't suspend on bare metal if we are not going to HW reset the ASIC */
5215	if (!amdgpu_sriov_vf(adev)) {
5216
5217		if (!need_full_reset)
5218			need_full_reset = amdgpu_device_ip_need_full_reset(adev);
5219
5220		if (!need_full_reset && amdgpu_gpu_recovery &&
5221		    amdgpu_device_ip_check_soft_reset(adev)) {
5222			amdgpu_device_ip_pre_soft_reset(adev);
5223			r = amdgpu_device_ip_soft_reset(adev);
5224			amdgpu_device_ip_post_soft_reset(adev);
5225			if (r || amdgpu_device_ip_check_soft_reset(adev)) {
5226				dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
5227				need_full_reset = true;
5228			}
5229		}
5230
5231		if (need_full_reset)
5232			r = amdgpu_device_ip_suspend(adev);
5233		if (need_full_reset)
5234			set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5235		else
5236			clear_bit(AMDGPU_NEED_FULL_RESET,
5237				  &reset_context->flags);
5238	}
5239
5240	return r;
5241}
5242
5243static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
5244{
5245	int i;
5246
5247	lockdep_assert_held(&adev->reset_domain->sem);
5248
5249	for (i = 0; i < adev->reset_info.num_regs; i++) {
5250		adev->reset_info.reset_dump_reg_value[i] =
5251			RREG32(adev->reset_info.reset_dump_reg_list[i]);
5252
5253		trace_amdgpu_reset_reg_dumps(adev->reset_info.reset_dump_reg_list[i],
5254					     adev->reset_info.reset_dump_reg_value[i]);
5255	}
5256
5257	return 0;
5258}
5259
5260int amdgpu_do_asic_reset(struct list_head *device_list_handle,
5261			 struct amdgpu_reset_context *reset_context)
5262{
5263	struct amdgpu_device *tmp_adev = NULL;
5264	bool need_full_reset, skip_hw_reset, vram_lost = false;
5265	int r = 0;
5266
5267	/* Try reset handler method first */
5268	tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5269				    reset_list);
5270	amdgpu_reset_reg_dumps(tmp_adev);
5271
5272	reset_context->reset_device_list = device_list_handle;
5273	r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
5274	/* If reset handler not implemented, continue; otherwise return */
5275	if (r == -EOPNOTSUPP)
5276		r = 0;
5277	else
5278		return r;
5279
5280	/* Reset handler not implemented, use the default method */
5281	need_full_reset =
5282		test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5283	skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
5284
5285	/*
5286	 * ASIC reset has to be done on all XGMI hive nodes ASAP
5287	 * to allow proper links negotiation in FW (within 1 sec)
5288	 */
5289	if (!skip_hw_reset && need_full_reset) {
5290		list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5291			/* For XGMI run all resets in parallel to speed up the process */
5292			if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
5293				tmp_adev->gmc.xgmi.pending_reset = false;
5294				if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
5295					r = -EALREADY;
5296			} else
5297				r = amdgpu_asic_reset(tmp_adev);
5298
5299			if (r) {
5300				dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
5301					 r, adev_to_drm(tmp_adev)->unique);
5302				goto out;
5303			}
5304		}
5305
5306		/* For XGMI wait for all resets to complete before proceed */
5307		if (!r) {
5308			list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5309				if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
5310					flush_work(&tmp_adev->xgmi_reset_work);
5311					r = tmp_adev->asic_reset_res;
5312					if (r)
5313						break;
5314				}
5315			}
5316		}
5317	}
5318
5319	if (!r && amdgpu_ras_intr_triggered()) {
5320		list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5321			amdgpu_ras_reset_error_count(tmp_adev, AMDGPU_RAS_BLOCK__MMHUB);
 
 
5322		}
5323
5324		amdgpu_ras_intr_cleared();
5325	}
5326
5327	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5328		if (need_full_reset) {
5329			/* post card */
5330			amdgpu_ras_set_fed(tmp_adev, false);
5331			r = amdgpu_device_asic_init(tmp_adev);
5332			if (r) {
5333				dev_warn(tmp_adev->dev, "asic atom init failed!");
5334			} else {
5335				dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
5336
5337				r = amdgpu_device_ip_resume_phase1(tmp_adev);
5338				if (r)
5339					goto out;
5340
5341				vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
5342
5343				amdgpu_coredump(tmp_adev, vram_lost, reset_context);
5344
5345				if (vram_lost) {
5346					DRM_INFO("VRAM is lost due to GPU reset!\n");
5347					amdgpu_inc_vram_lost(tmp_adev);
5348				}
5349
5350				r = amdgpu_device_fw_loading(tmp_adev);
5351				if (r)
5352					return r;
5353
5354				r = amdgpu_xcp_restore_partition_mode(
5355					tmp_adev->xcp_mgr);
5356				if (r)
5357					goto out;
5358
5359				r = amdgpu_device_ip_resume_phase2(tmp_adev);
5360				if (r)
5361					goto out;
5362
5363				if (tmp_adev->mman.buffer_funcs_ring->sched.ready)
5364					amdgpu_ttm_set_buffer_funcs_status(tmp_adev, true);
5365
5366				if (vram_lost)
5367					amdgpu_device_fill_reset_magic(tmp_adev);
5368
5369				/*
5370				 * Add this ASIC as tracked as reset was already
5371				 * complete successfully.
5372				 */
5373				amdgpu_register_gpu_instance(tmp_adev);
5374
5375				if (!reset_context->hive &&
5376				    tmp_adev->gmc.xgmi.num_physical_nodes > 1)
5377					amdgpu_xgmi_add_device(tmp_adev);
5378
5379				r = amdgpu_device_ip_late_init(tmp_adev);
5380				if (r)
5381					goto out;
5382
5383				drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false);
5384
5385				/*
5386				 * The GPU enters bad state once faulty pages
5387				 * by ECC has reached the threshold, and ras
5388				 * recovery is scheduled next. So add one check
5389				 * here to break recovery if it indeed exceeds
5390				 * bad page threshold, and remind user to
5391				 * retire this GPU or setting one bigger
5392				 * bad_page_threshold value to fix this once
5393				 * probing driver again.
5394				 */
5395				if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
5396					/* must succeed. */
5397					amdgpu_ras_resume(tmp_adev);
5398				} else {
5399					r = -EINVAL;
5400					goto out;
5401				}
5402
5403				/* Update PSP FW topology after reset */
5404				if (reset_context->hive &&
5405				    tmp_adev->gmc.xgmi.num_physical_nodes > 1)
5406					r = amdgpu_xgmi_update_topology(
5407						reset_context->hive, tmp_adev);
5408			}
5409		}
5410
5411out:
5412		if (!r) {
5413			amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
5414			r = amdgpu_ib_ring_tests(tmp_adev);
5415			if (r) {
5416				dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
5417				need_full_reset = true;
5418				r = -EAGAIN;
5419				goto end;
5420			}
 
5421		}
5422
5423		if (!r)
5424			r = amdgpu_device_recover_vram(tmp_adev);
5425		else
5426			tmp_adev->asic_reset_res = r;
5427	}
5428
5429end:
5430	if (need_full_reset)
5431		set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5432	else
5433		clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5434	return r;
5435}
5436
5437static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev)
5438{
5439
5440	switch (amdgpu_asic_reset_method(adev)) {
5441	case AMD_RESET_METHOD_MODE1:
5442		adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
5443		break;
5444	case AMD_RESET_METHOD_MODE2:
5445		adev->mp1_state = PP_MP1_STATE_RESET;
5446		break;
5447	default:
5448		adev->mp1_state = PP_MP1_STATE_NONE;
5449		break;
5450	}
5451}
5452
5453static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev)
5454{
5455	amdgpu_vf_error_trans_all(adev);
5456	adev->mp1_state = PP_MP1_STATE_NONE;
5457}
5458
5459static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
5460{
5461	struct pci_dev *p = NULL;
5462
5463	p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5464			adev->pdev->bus->number, 1);
5465	if (p) {
5466		pm_runtime_enable(&(p->dev));
5467		pm_runtime_resume(&(p->dev));
5468	}
5469
5470	pci_dev_put(p);
5471}
5472
5473static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
5474{
5475	enum amd_reset_method reset_method;
5476	struct pci_dev *p = NULL;
5477	u64 expires;
5478
5479	/*
5480	 * For now, only BACO and mode1 reset are confirmed
5481	 * to suffer the audio issue without proper suspended.
5482	 */
5483	reset_method = amdgpu_asic_reset_method(adev);
5484	if ((reset_method != AMD_RESET_METHOD_BACO) &&
5485	     (reset_method != AMD_RESET_METHOD_MODE1))
5486		return -EINVAL;
5487
5488	p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5489			adev->pdev->bus->number, 1);
5490	if (!p)
5491		return -ENODEV;
5492
5493	expires = pm_runtime_autosuspend_expiration(&(p->dev));
5494	if (!expires)
5495		/*
5496		 * If we cannot get the audio device autosuspend delay,
5497		 * a fixed 4S interval will be used. Considering 3S is
5498		 * the audio controller default autosuspend delay setting.
5499		 * 4S used here is guaranteed to cover that.
5500		 */
5501		expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
5502
5503	while (!pm_runtime_status_suspended(&(p->dev))) {
5504		if (!pm_runtime_suspend(&(p->dev)))
5505			break;
5506
5507		if (expires < ktime_get_mono_fast_ns()) {
5508			dev_warn(adev->dev, "failed to suspend display audio\n");
5509			pci_dev_put(p);
5510			/* TODO: abort the succeeding gpu reset? */
5511			return -ETIMEDOUT;
5512		}
5513	}
5514
5515	pm_runtime_disable(&(p->dev));
5516
5517	pci_dev_put(p);
5518	return 0;
5519}
5520
5521static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
5522{
5523	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5524
5525#if defined(CONFIG_DEBUG_FS)
5526	if (!amdgpu_sriov_vf(adev))
5527		cancel_work(&adev->reset_work);
5528#endif
5529
5530	if (adev->kfd.dev)
5531		cancel_work(&adev->kfd.reset_work);
5532
5533	if (amdgpu_sriov_vf(adev))
5534		cancel_work(&adev->virt.flr_work);
5535
5536	if (con && adev->ras_enabled)
5537		cancel_work(&con->recovery_work);
5538
5539}
5540
5541/**
5542 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
5543 *
5544 * @adev: amdgpu_device pointer
5545 * @job: which job trigger hang
5546 * @reset_context: amdgpu reset context pointer
5547 *
5548 * Attempt to reset the GPU if it has hung (all asics).
5549 * Attempt to do soft-reset or full-reset and reinitialize Asic
5550 * Returns 0 for success or an error on failure.
5551 */
5552
5553int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
5554			      struct amdgpu_job *job,
5555			      struct amdgpu_reset_context *reset_context)
5556{
5557	struct list_head device_list, *device_list_handle =  NULL;
5558	bool job_signaled = false;
5559	struct amdgpu_hive_info *hive = NULL;
5560	struct amdgpu_device *tmp_adev = NULL;
5561	int i, r = 0;
5562	bool need_emergency_restart = false;
5563	bool audio_suspended = false;
5564
5565	/*
5566	 * Special case: RAS triggered and full reset isn't supported
5567	 */
5568	need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
5569
5570	/*
5571	 * Flush RAM to disk so that after reboot
5572	 * the user can read log and see why the system rebooted.
5573	 */
5574	if (need_emergency_restart && amdgpu_ras_get_context(adev) &&
5575		amdgpu_ras_get_context(adev)->reboot) {
5576		DRM_WARN("Emergency reboot.");
5577
5578		ksys_sync_helper();
5579		emergency_restart();
5580	}
5581
5582	dev_info(adev->dev, "GPU %s begin!\n",
5583		need_emergency_restart ? "jobs stop":"reset");
5584
5585	if (!amdgpu_sriov_vf(adev))
5586		hive = amdgpu_get_xgmi_hive(adev);
5587	if (hive)
5588		mutex_lock(&hive->hive_lock);
5589
5590	reset_context->job = job;
5591	reset_context->hive = hive;
5592	/*
5593	 * Build list of devices to reset.
5594	 * In case we are in XGMI hive mode, resort the device list
5595	 * to put adev in the 1st position.
5596	 */
5597	INIT_LIST_HEAD(&device_list);
5598	if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
5599		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
5600			list_add_tail(&tmp_adev->reset_list, &device_list);
5601			if (adev->shutdown)
5602				tmp_adev->shutdown = true;
5603		}
5604		if (!list_is_first(&adev->reset_list, &device_list))
5605			list_rotate_to_front(&adev->reset_list, &device_list);
5606		device_list_handle = &device_list;
5607	} else {
5608		list_add_tail(&adev->reset_list, &device_list);
5609		device_list_handle = &device_list;
5610	}
5611
5612	/* We need to lock reset domain only once both for XGMI and single device */
5613	tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5614				    reset_list);
5615	amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
5616
5617	/* block all schedulers and reset given job's ring */
5618	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5619
5620		amdgpu_device_set_mp1_state(tmp_adev);
5621
5622		/*
5623		 * Try to put the audio codec into suspend state
5624		 * before gpu reset started.
5625		 *
5626		 * Due to the power domain of the graphics device
5627		 * is shared with AZ power domain. Without this,
5628		 * we may change the audio hardware from behind
5629		 * the audio driver's back. That will trigger
5630		 * some audio codec errors.
5631		 */
5632		if (!amdgpu_device_suspend_display_audio(tmp_adev))
5633			audio_suspended = true;
5634
5635		amdgpu_ras_set_error_query_ready(tmp_adev, false);
5636
5637		cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
5638
5639		if (!amdgpu_sriov_vf(tmp_adev))
5640			amdgpu_amdkfd_pre_reset(tmp_adev);
5641
5642		/*
5643		 * Mark these ASICs to be reseted as untracked first
5644		 * And add them back after reset completed
5645		 */
5646		amdgpu_unregister_gpu_instance(tmp_adev);
5647
5648		drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, true);
5649
5650		/* disable ras on ALL IPs */
5651		if (!need_emergency_restart &&
5652		      amdgpu_device_ip_need_full_reset(tmp_adev))
5653			amdgpu_ras_suspend(tmp_adev);
5654
5655		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5656			struct amdgpu_ring *ring = tmp_adev->rings[i];
5657
5658			if (!amdgpu_ring_sched_ready(ring))
5659				continue;
5660
5661			drm_sched_stop(&ring->sched, job ? &job->base : NULL);
5662
5663			if (need_emergency_restart)
5664				amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
5665		}
5666		atomic_inc(&tmp_adev->gpu_reset_counter);
5667	}
5668
5669	if (need_emergency_restart)
5670		goto skip_sched_resume;
5671
5672	/*
5673	 * Must check guilty signal here since after this point all old
5674	 * HW fences are force signaled.
5675	 *
5676	 * job->base holds a reference to parent fence
5677	 */
5678	if (job && dma_fence_is_signaled(&job->hw_fence)) {
5679		job_signaled = true;
5680		dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5681		goto skip_hw_reset;
5682	}
5683
5684retry:	/* Rest of adevs pre asic reset from XGMI hive. */
5685	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5686		r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
5687		/*TODO Should we stop ?*/
5688		if (r) {
5689			dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
5690				  r, adev_to_drm(tmp_adev)->unique);
5691			tmp_adev->asic_reset_res = r;
5692		}
5693
5694		/*
5695		 * Drop all pending non scheduler resets. Scheduler resets
5696		 * were already dropped during drm_sched_stop
5697		 */
5698		amdgpu_device_stop_pending_resets(tmp_adev);
5699	}
5700
5701	/* Actual ASIC resets if needed.*/
5702	/* Host driver will handle XGMI hive reset for SRIOV */
5703	if (amdgpu_sriov_vf(adev)) {
5704		r = amdgpu_device_reset_sriov(adev, job ? false : true);
5705		if (r)
5706			adev->asic_reset_res = r;
5707
5708		/* Aldebaran and gfx_11_0_3 support ras in SRIOV, so need resume ras during reset */
5709		if (amdgpu_ip_version(adev, GC_HWIP, 0) ==
5710			    IP_VERSION(9, 4, 2) ||
5711		    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
5712		    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3))
5713			amdgpu_ras_resume(adev);
5714	} else {
5715		r = amdgpu_do_asic_reset(device_list_handle, reset_context);
5716		if (r && r == -EAGAIN)
5717			goto retry;
5718	}
5719
5720skip_hw_reset:
5721
5722	/* Post ASIC reset for all devs .*/
5723	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5724
5725		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5726			struct amdgpu_ring *ring = tmp_adev->rings[i];
5727
5728			if (!amdgpu_ring_sched_ready(ring))
5729				continue;
5730
5731			drm_sched_start(&ring->sched, true);
5732		}
5733
5734		if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled)
5735			drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
5736
5737		if (tmp_adev->asic_reset_res)
5738			r = tmp_adev->asic_reset_res;
5739
5740		tmp_adev->asic_reset_res = 0;
5741
5742		if (r) {
5743			/* bad news, how to tell it to userspace ? */
5744			dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
5745			amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
5746		} else {
5747			dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
5748			if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
5749				DRM_WARN("smart shift update failed\n");
5750		}
5751	}
5752
5753skip_sched_resume:
5754	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5755		/* unlock kfd: SRIOV would do it separately */
5756		if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
5757			amdgpu_amdkfd_post_reset(tmp_adev);
5758
5759		/* kfd_post_reset will do nothing if kfd device is not initialized,
5760		 * need to bring up kfd here if it's not be initialized before
5761		 */
5762		if (!adev->kfd.init_complete)
5763			amdgpu_amdkfd_device_init(adev);
5764
5765		if (audio_suspended)
5766			amdgpu_device_resume_display_audio(tmp_adev);
5767
5768		amdgpu_device_unset_mp1_state(tmp_adev);
5769
5770		amdgpu_ras_set_error_query_ready(tmp_adev, true);
5771	}
5772
5773	tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5774					    reset_list);
5775	amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
5776
5777	if (hive) {
5778		mutex_unlock(&hive->hive_lock);
5779		amdgpu_put_xgmi_hive(hive);
 
5780	}
5781
5782	if (r)
5783		dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
5784
5785	atomic_set(&adev->reset_domain->reset_res, r);
5786	return r;
5787}
5788
5789/**
5790 * amdgpu_device_partner_bandwidth - find the bandwidth of appropriate partner
5791 *
5792 * @adev: amdgpu_device pointer
5793 * @speed: pointer to the speed of the link
5794 * @width: pointer to the width of the link
5795 *
5796 * Evaluate the hierarchy to find the speed and bandwidth capabilities of the
5797 * first physical partner to an AMD dGPU.
5798 * This will exclude any virtual switches and links.
5799 */
5800static void amdgpu_device_partner_bandwidth(struct amdgpu_device *adev,
5801					    enum pci_bus_speed *speed,
5802					    enum pcie_link_width *width)
5803{
5804	struct pci_dev *parent = adev->pdev;
5805
5806	if (!speed || !width)
5807		return;
5808
5809	*speed = PCI_SPEED_UNKNOWN;
5810	*width = PCIE_LNK_WIDTH_UNKNOWN;
5811
5812	if (amdgpu_device_pcie_dynamic_switching_supported(adev)) {
5813		while ((parent = pci_upstream_bridge(parent))) {
5814			/* skip upstream/downstream switches internal to dGPU*/
5815			if (parent->vendor == PCI_VENDOR_ID_ATI)
5816				continue;
5817			*speed = pcie_get_speed_cap(parent);
5818			*width = pcie_get_width_cap(parent);
5819			break;
5820		}
5821	} else {
5822		/* use the current speeds rather than max if switching is not supported */
5823		pcie_bandwidth_available(adev->pdev, NULL, speed, width);
5824	}
5825}
5826
5827/**
5828 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
5829 *
5830 * @adev: amdgpu_device pointer
5831 *
5832 * Fetchs and stores in the driver the PCIE capabilities (gen speed
5833 * and lanes) of the slot the device is in. Handles APUs and
5834 * virtualized environments where PCIE config space may not be available.
5835 */
5836static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
5837{
5838	struct pci_dev *pdev;
5839	enum pci_bus_speed speed_cap, platform_speed_cap;
5840	enum pcie_link_width platform_link_width;
5841
5842	if (amdgpu_pcie_gen_cap)
5843		adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
5844
5845	if (amdgpu_pcie_lane_cap)
5846		adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
5847
5848	/* covers APUs as well */
5849	if (pci_is_root_bus(adev->pdev->bus) && !amdgpu_passthrough(adev)) {
5850		if (adev->pm.pcie_gen_mask == 0)
5851			adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
5852		if (adev->pm.pcie_mlw_mask == 0)
5853			adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
5854		return;
5855	}
5856
5857	if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
5858		return;
5859
5860	amdgpu_device_partner_bandwidth(adev, &platform_speed_cap,
5861					&platform_link_width);
5862
5863	if (adev->pm.pcie_gen_mask == 0) {
5864		/* asic caps */
5865		pdev = adev->pdev;
5866		speed_cap = pcie_get_speed_cap(pdev);
5867		if (speed_cap == PCI_SPEED_UNKNOWN) {
5868			adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5869						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5870						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5871		} else {
5872			if (speed_cap == PCIE_SPEED_32_0GT)
5873				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5874							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5875							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5876							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5877							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
5878			else if (speed_cap == PCIE_SPEED_16_0GT)
5879				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5880							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5881							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5882							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
5883			else if (speed_cap == PCIE_SPEED_8_0GT)
5884				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5885							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5886							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5887			else if (speed_cap == PCIE_SPEED_5_0GT)
5888				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5889							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
5890			else
5891				adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
5892		}
5893		/* platform caps */
5894		if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5895			adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5896						   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5897		} else {
5898			if (platform_speed_cap == PCIE_SPEED_32_0GT)
5899				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5900							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5901							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5902							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5903							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
5904			else if (platform_speed_cap == PCIE_SPEED_16_0GT)
5905				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5906							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5907							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5908							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
5909			else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5910				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5911							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5912							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
5913			else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5914				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5915							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5916			else
5917				adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
5918
 
 
 
 
 
 
 
 
5919		}
5920	}
5921	if (adev->pm.pcie_mlw_mask == 0) {
5922		if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5923			adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
5924		} else {
5925			switch (platform_link_width) {
5926			case PCIE_LNK_X32:
5927				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
5928							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5929							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5930							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5931							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5932							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5933							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5934				break;
5935			case PCIE_LNK_X16:
5936				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5937							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5938							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5939							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5940							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5941							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5942				break;
5943			case PCIE_LNK_X12:
5944				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5945							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5946							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5947							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5948							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5949				break;
5950			case PCIE_LNK_X8:
5951				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5952							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5953							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5954							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5955				break;
5956			case PCIE_LNK_X4:
5957				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5958							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5959							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5960				break;
5961			case PCIE_LNK_X2:
5962				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5963							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5964				break;
5965			case PCIE_LNK_X1:
5966				adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5967				break;
5968			default:
5969				break;
5970			}
 
 
5971		}
5972	}
5973}
5974
5975/**
5976 * amdgpu_device_is_peer_accessible - Check peer access through PCIe BAR
5977 *
5978 * @adev: amdgpu_device pointer
5979 * @peer_adev: amdgpu_device pointer for peer device trying to access @adev
5980 *
5981 * Return true if @peer_adev can access (DMA) @adev through the PCIe
5982 * BAR, i.e. @adev is "large BAR" and the BAR matches the DMA mask of
5983 * @peer_adev.
5984 */
5985bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
5986				      struct amdgpu_device *peer_adev)
5987{
5988#ifdef CONFIG_HSA_AMD_P2P
5989	uint64_t address_mask = peer_adev->dev->dma_mask ?
5990		~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
5991	resource_size_t aper_limit =
5992		adev->gmc.aper_base + adev->gmc.aper_size - 1;
5993	bool p2p_access =
5994		!adev->gmc.xgmi.connected_to_cpu &&
5995		!(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0);
5996
5997	return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size &&
5998		adev->gmc.real_vram_size == adev->gmc.visible_vram_size &&
5999		!(adev->gmc.aper_base & address_mask ||
6000		  aper_limit & address_mask));
6001#else
6002	return false;
 
 
6003#endif
 
6004}
6005
6006int amdgpu_device_baco_enter(struct drm_device *dev)
6007{
6008	struct amdgpu_device *adev = drm_to_adev(dev);
6009	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
6010
6011	if (!amdgpu_device_supports_baco(dev))
6012		return -ENOTSUPP;
6013
6014	if (ras && adev->ras_enabled &&
6015	    adev->nbio.funcs->enable_doorbell_interrupt)
6016		adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
6017
6018	return amdgpu_dpm_baco_enter(adev);
 
 
 
 
 
6019}
6020
6021int amdgpu_device_baco_exit(struct drm_device *dev)
 
 
 
6022{
6023	struct amdgpu_device *adev = drm_to_adev(dev);
6024	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
6025	int ret = 0;
 
 
6026
6027	if (!amdgpu_device_supports_baco(dev))
6028		return -ENOTSUPP;
6029
6030	ret = amdgpu_dpm_baco_exit(adev);
6031	if (ret)
6032		return ret;
6033
6034	if (ras && adev->ras_enabled &&
6035	    adev->nbio.funcs->enable_doorbell_interrupt)
6036		adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
 
6037
6038	if (amdgpu_passthrough(adev) &&
6039	    adev->nbio.funcs->clear_doorbell_interrupt)
6040		adev->nbio.funcs->clear_doorbell_interrupt(adev);
 
 
 
 
 
 
 
6041
6042	return 0;
6043}
6044
6045/**
6046 * amdgpu_pci_error_detected - Called when a PCI error is detected.
6047 * @pdev: PCI device struct
6048 * @state: PCI channel state
6049 *
6050 * Description: Called when a PCI error is detected.
6051 *
6052 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
6053 */
6054pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
6055{
6056	struct drm_device *dev = pci_get_drvdata(pdev);
6057	struct amdgpu_device *adev = drm_to_adev(dev);
6058	int i;
6059
6060	DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
 
6061
6062	if (adev->gmc.xgmi.num_physical_nodes > 1) {
6063		DRM_WARN("No support for XGMI hive yet...");
6064		return PCI_ERS_RESULT_DISCONNECT;
6065	}
6066
6067	adev->pci_channel_state = state;
6068
6069	switch (state) {
6070	case pci_channel_io_normal:
6071		return PCI_ERS_RESULT_CAN_RECOVER;
6072	/* Fatal error, prepare for slot reset */
6073	case pci_channel_io_frozen:
6074		/*
6075		 * Locking adev->reset_domain->sem will prevent any external access
6076		 * to GPU during PCI error recovery
6077		 */
6078		amdgpu_device_lock_reset_domain(adev->reset_domain);
6079		amdgpu_device_set_mp1_state(adev);
6080
6081		/*
6082		 * Block any work scheduling as we do for regular GPU reset
6083		 * for the duration of the recovery
6084		 */
6085		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
6086			struct amdgpu_ring *ring = adev->rings[i];
6087
6088			if (!amdgpu_ring_sched_ready(ring))
6089				continue;
6090
6091			drm_sched_stop(&ring->sched, NULL);
 
6092		}
6093		atomic_inc(&adev->gpu_reset_counter);
6094		return PCI_ERS_RESULT_NEED_RESET;
6095	case pci_channel_io_perm_failure:
6096		/* Permanent error, prepare for device removal */
6097		return PCI_ERS_RESULT_DISCONNECT;
6098	}
6099
6100	return PCI_ERS_RESULT_NEED_RESET;
 
 
 
 
 
 
 
 
 
6101}
6102
6103/**
6104 * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
6105 * @pdev: pointer to PCI device
6106 */
6107pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
6108{
 
 
 
 
 
6109
6110	DRM_INFO("PCI error: mmio enabled callback!!\n");
 
6111
6112	/* TODO - dump whatever for debugging purposes */
 
6113
6114	/* This called only if amdgpu_pci_error_detected returns
6115	 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
6116	 * works, no need to reset slot.
6117	 */
6118
6119	return PCI_ERS_RESULT_RECOVERED;
6120}
 
 
 
 
 
 
 
 
6121
6122/**
6123 * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
6124 * @pdev: PCI device struct
6125 *
6126 * Description: This routine is called by the pci error recovery
6127 * code after the PCI slot has been reset, just before we
6128 * should resume normal operations.
6129 */
6130pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
6131{
6132	struct drm_device *dev = pci_get_drvdata(pdev);
6133	struct amdgpu_device *adev = drm_to_adev(dev);
6134	int r, i;
6135	struct amdgpu_reset_context reset_context;
6136	u32 memsize;
6137	struct list_head device_list;
6138	struct amdgpu_hive_info *hive;
6139	int hive_ras_recovery = 0;
6140	struct amdgpu_ras *ras;
6141
6142	/* PCI error slot reset should be skipped During RAS recovery */
6143	hive = amdgpu_get_xgmi_hive(adev);
6144	if (hive) {
6145		hive_ras_recovery = atomic_read(&hive->ras_recovery);
6146		amdgpu_put_xgmi_hive(hive);
6147	}
6148	ras = amdgpu_ras_get_context(adev);
6149	if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) &&
6150		 ras && (atomic_read(&ras->in_recovery) || hive_ras_recovery))
6151		return PCI_ERS_RESULT_RECOVERED;
6152
6153	DRM_INFO("PCI error: slot reset callback!!\n");
6154
6155	memset(&reset_context, 0, sizeof(reset_context));
6156
6157	INIT_LIST_HEAD(&device_list);
6158	list_add_tail(&adev->reset_list, &device_list);
6159
6160	/* wait for asic to come out of reset */
6161	msleep(500);
6162
6163	/* Restore PCI confspace */
6164	amdgpu_device_load_pci_state(pdev);
6165
6166	/* confirm  ASIC came out of reset */
6167	for (i = 0; i < adev->usec_timeout; i++) {
6168		memsize = amdgpu_asic_get_config_memsize(adev);
6169
6170		if (memsize != 0xffffffff)
6171			break;
6172		udelay(1);
6173	}
6174	if (memsize == 0xffffffff) {
6175		r = -ETIME;
6176		goto out;
6177	}
6178
6179	reset_context.method = AMD_RESET_METHOD_NONE;
6180	reset_context.reset_req_dev = adev;
6181	set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
6182	set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
6183
6184	adev->no_hw_access = true;
6185	r = amdgpu_device_pre_asic_reset(adev, &reset_context);
6186	adev->no_hw_access = false;
6187	if (r)
6188		goto out;
6189
6190	r = amdgpu_do_asic_reset(&device_list, &reset_context);
 
6191
6192out:
6193	if (!r) {
6194		if (amdgpu_device_cache_pci_state(adev->pdev))
6195			pci_restore_state(adev->pdev);
6196
6197		DRM_INFO("PCIe error recovery succeeded\n");
6198	} else {
6199		DRM_ERROR("PCIe error recovery failed, err:%d", r);
6200		amdgpu_device_unset_mp1_state(adev);
6201		amdgpu_device_unlock_reset_domain(adev->reset_domain);
 
6202	}
6203
6204	return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
6205}
 
 
6206
6207/**
6208 * amdgpu_pci_resume() - resume normal ops after PCI reset
6209 * @pdev: pointer to PCI device
6210 *
6211 * Called when the error recovery driver tells us that its
6212 * OK to resume normal operation.
6213 */
6214void amdgpu_pci_resume(struct pci_dev *pdev)
6215{
6216	struct drm_device *dev = pci_get_drvdata(pdev);
6217	struct amdgpu_device *adev = drm_to_adev(dev);
6218	int i;
6219
 
 
6220
6221	DRM_INFO("PCI error: resume callback!!\n");
 
 
 
 
 
6222
6223	/* Only continue execution for the case of pci_channel_io_frozen */
6224	if (adev->pci_channel_state != pci_channel_io_frozen)
6225		return;
6226
6227	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
6228		struct amdgpu_ring *ring = adev->rings[i];
6229
6230		if (!amdgpu_ring_sched_ready(ring))
6231			continue;
 
 
6232
6233		drm_sched_start(&ring->sched, true);
 
 
 
6234	}
6235
6236	amdgpu_device_unset_mp1_state(adev);
6237	amdgpu_device_unlock_reset_domain(adev->reset_domain);
6238}
6239
6240bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
 
6241{
6242	struct drm_device *dev = pci_get_drvdata(pdev);
6243	struct amdgpu_device *adev = drm_to_adev(dev);
6244	int r;
6245
6246	r = pci_save_state(pdev);
6247	if (!r) {
6248		kfree(adev->pci_state);
6249
6250		adev->pci_state = pci_store_saved_state(pdev);
 
6251
6252		if (!adev->pci_state) {
6253			DRM_ERROR("Failed to store PCI saved state");
6254			return false;
6255		}
6256	} else {
6257		DRM_WARN("Failed to save PCI state, err:%d\n", r);
6258		return false;
 
 
 
6259	}
6260
6261	return true;
6262}
6263
6264bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
 
6265{
6266	struct drm_device *dev = pci_get_drvdata(pdev);
6267	struct amdgpu_device *adev = drm_to_adev(dev);
6268	int r;
6269
6270	if (!adev->pci_state)
6271		return false;
6272
6273	r = pci_load_saved_state(pdev, adev->pci_state);
 
6274
6275	if (!r) {
6276		pci_restore_state(pdev);
6277	} else {
6278		DRM_WARN("Failed to load PCI state, err:%d\n", r);
6279		return false;
 
 
 
 
6280	}
6281
6282	return true;
6283}
6284
6285void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
6286		struct amdgpu_ring *ring)
6287{
6288#ifdef CONFIG_X86_64
6289	if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
6290		return;
6291#endif
6292	if (adev->gmc.xgmi.connected_to_cpu)
6293		return;
6294
6295	if (ring && ring->funcs->emit_hdp_flush)
6296		amdgpu_ring_emit_hdp_flush(ring);
6297	else
6298		amdgpu_asic_flush_hdp(adev, ring);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6299}
6300
6301void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
6302		struct amdgpu_ring *ring)
6303{
6304#ifdef CONFIG_X86_64
6305	if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
6306		return;
6307#endif
6308	if (adev->gmc.xgmi.connected_to_cpu)
6309		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6310
6311	amdgpu_asic_invalidate_hdp(adev, ring);
6312}
6313
6314int amdgpu_in_reset(struct amdgpu_device *adev)
 
6315{
6316	return atomic_read(&adev->reset_domain->in_gpu_reset);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6317}
6318
6319/**
6320 * amdgpu_device_halt() - bring hardware to some kind of halt state
6321 *
6322 * @adev: amdgpu_device pointer
6323 *
6324 * Bring hardware to some kind of halt state so that no one can touch it
6325 * any more. It will help to maintain error context when error occurred.
6326 * Compare to a simple hang, the system will keep stable at least for SSH
6327 * access. Then it should be trivial to inspect the hardware state and
6328 * see what's going on. Implemented as following:
6329 *
6330 * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc),
6331 *    clears all CPU mappings to device, disallows remappings through page faults
6332 * 2. amdgpu_irq_disable_all() disables all interrupts
6333 * 3. amdgpu_fence_driver_hw_fini() signals all HW fences
6334 * 4. set adev->no_hw_access to avoid potential crashes after setp 5
6335 * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings
6336 * 6. pci_disable_device() and pci_wait_for_pending_transaction()
6337 *    flush any in flight DMA operations
6338 */
6339void amdgpu_device_halt(struct amdgpu_device *adev)
6340{
6341	struct pci_dev *pdev = adev->pdev;
6342	struct drm_device *ddev = adev_to_drm(adev);
 
 
6343
6344	amdgpu_xcp_dev_unplug(adev);
6345	drm_dev_unplug(ddev);
 
 
 
 
6346
6347	amdgpu_irq_disable_all(adev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6348
6349	amdgpu_fence_driver_hw_fini(adev);
 
6350
6351	adev->no_hw_access = true;
 
 
 
 
 
6352
6353	amdgpu_device_unmap_mmio(adev);
 
 
 
 
6354
6355	pci_disable_device(pdev);
6356	pci_wait_for_pending_transaction(pdev);
6357}
6358
6359u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
6360				u32 reg)
6361{
6362	unsigned long flags, address, data;
6363	u32 r;
 
6364
6365	address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
6366	data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
6367
6368	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
6369	WREG32(address, reg * 4);
6370	(void)RREG32(address);
6371	r = RREG32(data);
6372	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
6373	return r;
 
 
 
 
 
 
6374}
6375
6376void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
6377				u32 reg, u32 v)
6378{
6379	unsigned long flags, address, data;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6380
6381	address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
6382	data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
6383
6384	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
6385	WREG32(address, reg * 4);
6386	(void)RREG32(address);
6387	WREG32(data, v);
6388	(void)RREG32(data);
6389	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 
6390}
6391
6392/**
6393 * amdgpu_device_switch_gang - switch to a new gang
6394 * @adev: amdgpu_device pointer
6395 * @gang: the gang to switch to
6396 *
6397 * Try to switch to a new gang.
6398 * Returns: NULL if we switched to the new gang or a reference to the current
6399 * gang leader.
6400 */
6401struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
6402					    struct dma_fence *gang)
6403{
6404	struct dma_fence *old = NULL;
 
 
 
6405
6406	do {
6407		dma_fence_put(old);
6408		rcu_read_lock();
6409		old = dma_fence_get_rcu_safe(&adev->gang_submit);
6410		rcu_read_unlock();
6411
6412		if (old == gang)
6413			break;
 
 
 
 
 
 
 
6414
6415		if (!dma_fence_is_signaled(old))
6416			return old;
 
6417
6418	} while (cmpxchg((struct dma_fence __force **)&adev->gang_submit,
6419			 old, gang) != old);
 
6420
6421	dma_fence_put(old);
6422	return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6423}
6424
6425bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6426{
6427	switch (adev->asic_type) {
6428#ifdef CONFIG_DRM_AMDGPU_SI
6429	case CHIP_HAINAN:
6430#endif
6431	case CHIP_TOPAZ:
6432		/* chips with no display hardware */
6433		return false;
6434#ifdef CONFIG_DRM_AMDGPU_SI
6435	case CHIP_TAHITI:
6436	case CHIP_PITCAIRN:
6437	case CHIP_VERDE:
6438	case CHIP_OLAND:
6439#endif
6440#ifdef CONFIG_DRM_AMDGPU_CIK
6441	case CHIP_BONAIRE:
6442	case CHIP_HAWAII:
6443	case CHIP_KAVERI:
6444	case CHIP_KABINI:
6445	case CHIP_MULLINS:
6446#endif
6447	case CHIP_TONGA:
6448	case CHIP_FIJI:
6449	case CHIP_POLARIS10:
6450	case CHIP_POLARIS11:
6451	case CHIP_POLARIS12:
6452	case CHIP_VEGAM:
6453	case CHIP_CARRIZO:
6454	case CHIP_STONEY:
6455		/* chips with display hardware */
6456		return true;
6457	default:
6458		/* IP discovery */
6459		if (!amdgpu_ip_version(adev, DCE_HWIP, 0) ||
6460		    (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
6461			return false;
6462		return true;
6463	}
 
 
6464}
6465
6466uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev,
6467		uint32_t inst, uint32_t reg_addr, char reg_name[],
6468		uint32_t expected_value, uint32_t mask)
6469{
6470	uint32_t ret = 0;
6471	uint32_t old_ = 0;
6472	uint32_t tmp_ = RREG32(reg_addr);
6473	uint32_t loop = adev->usec_timeout;
6474
6475	while ((tmp_ & (mask)) != (expected_value)) {
6476		if (old_ != tmp_) {
6477			loop = adev->usec_timeout;
6478			old_ = tmp_;
6479		} else
6480			udelay(1);
6481		tmp_ = RREG32(reg_addr);
6482		loop--;
6483		if (!loop) {
6484			DRM_WARN("Register(%d) [%s] failed to reach value 0x%08x != 0x%08xn",
6485				  inst, reg_name, (uint32_t)expected_value,
6486				  (uint32_t)(tmp_ & (mask)));
6487			ret = -ETIMEDOUT;
6488			break;
6489		}
6490	}
6491	return ret;
6492}
v4.10.11
   1/*
   2 * Copyright 2008 Advanced Micro Devices, Inc.
   3 * Copyright 2008 Red Hat Inc.
   4 * Copyright 2009 Jerome Glisse.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the "Software"),
   8 * to deal in the Software without restriction, including without limitation
   9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10 * and/or sell copies of the Software, and to permit persons to whom the
  11 * Software is furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22 * OTHER DEALINGS IN THE SOFTWARE.
  23 *
  24 * Authors: Dave Airlie
  25 *          Alex Deucher
  26 *          Jerome Glisse
  27 */
 
  28#include <linux/kthread.h>
 
  29#include <linux/console.h>
  30#include <linux/slab.h>
  31#include <linux/debugfs.h>
  32#include <drm/drmP.h>
 
 
 
 
 
  33#include <drm/drm_crtc_helper.h>
 
 
  34#include <drm/amdgpu_drm.h>
 
  35#include <linux/vgaarb.h>
  36#include <linux/vga_switcheroo.h>
  37#include <linux/efi.h>
  38#include "amdgpu.h"
  39#include "amdgpu_trace.h"
  40#include "amdgpu_i2c.h"
  41#include "atom.h"
  42#include "amdgpu_atombios.h"
 
  43#include "amd_pcie.h"
  44#ifdef CONFIG_DRM_AMDGPU_SI
  45#include "si.h"
  46#endif
  47#ifdef CONFIG_DRM_AMDGPU_CIK
  48#include "cik.h"
  49#endif
  50#include "vi.h"
 
 
  51#include "bif/bif_4_1_d.h"
  52#include <linux/pci.h>
  53#include <linux/firmware.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  54
  55static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
  56static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
  57
  58static const char *amdgpu_asic_name[] = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  59	"TAHITI",
  60	"PITCAIRN",
  61	"VERDE",
  62	"OLAND",
  63	"HAINAN",
  64	"BONAIRE",
  65	"KAVERI",
  66	"KABINI",
  67	"HAWAII",
  68	"MULLINS",
  69	"TOPAZ",
  70	"TONGA",
  71	"FIJI",
  72	"CARRIZO",
  73	"STONEY",
  74	"POLARIS10",
  75	"POLARIS11",
  76	"POLARIS12",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  77	"LAST",
  78};
  79
  80bool amdgpu_device_is_px(struct drm_device *dev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  81{
  82	struct amdgpu_device *adev = dev->dev_private;
  83
  84	if (adev->flags & AMD_IS_PX)
  85		return true;
  86	return false;
  87}
  88
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  89/*
  90 * MMIO register access helper functions.
  91 */
  92uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
  93			bool always_indirect)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  94{
  95	uint32_t ret;
  96
  97	if ((reg * 4) < adev->rmmio_size && !always_indirect)
  98		ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
  99	else {
 100		unsigned long flags;
 101
 102		spin_lock_irqsave(&adev->mmio_idx_lock, flags);
 103		writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
 104		ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
 105		spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
 
 
 
 
 
 106	}
 107	trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
 
 
 108	return ret;
 109}
 110
 111void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
 112		    bool always_indirect)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 113{
 114	trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
 115
 116	if ((reg * 4) < adev->rmmio_size && !always_indirect)
 117		writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
 118	else {
 119		unsigned long flags;
 120
 121		spin_lock_irqsave(&adev->mmio_idx_lock, flags);
 122		writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
 123		writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
 124		spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 125	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 126}
 127
 128u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
 
 
 
 
 
 
 
 
 
 
 
 
 129{
 130	if ((reg * 4) < adev->rio_mem_size)
 131		return ioread32(adev->rio_mem + (reg * 4));
 132	else {
 133		iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
 134		return ioread32(adev->rio_mem + (mmMM_DATA * 4));
 
 
 
 
 
 
 
 
 
 135	}
 
 
 136}
 137
 138void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 
 
 
 
 
 
 
 
 
 
 
 
 139{
 
 
 140
 141	if ((reg * 4) < adev->rio_mem_size)
 142		iowrite32(v, adev->rio_mem + (reg * 4));
 143	else {
 144		iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
 145		iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
 
 
 
 
 146	}
 147}
 148
 149/**
 150 * amdgpu_mm_rdoorbell - read a doorbell dword
 151 *
 152 * @adev: amdgpu_device pointer
 153 * @index: doorbell index
 
 
 
 154 *
 155 * Returns the value in the doorbell aperture at the
 156 * requested doorbell index (CIK).
 157 */
 158u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
 
 
 159{
 160	if (index < adev->doorbell.num_doorbells) {
 161		return readl(adev->doorbell.ptr + index);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 162	} else {
 163		DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
 164		return 0;
 165	}
 166}
 167
 168/**
 169 * amdgpu_mm_wdoorbell - write a doorbell dword
 170 *
 171 * @adev: amdgpu_device pointer
 172 * @index: doorbell index
 173 * @v: value to write
 174 *
 175 * Writes @v to the doorbell aperture at the
 176 * requested doorbell index (CIK).
 177 */
 178void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 179{
 180	if (index < adev->doorbell.num_doorbells) {
 181		writel(v, adev->doorbell.ptr + index);
 
 
 
 
 
 
 
 182	} else {
 183		DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
 
 184	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 185}
 186
 187/**
 188 * amdgpu_invalid_rreg - dummy reg read function
 189 *
 190 * @adev: amdgpu device pointer
 191 * @reg: offset of register
 192 *
 193 * Dummy register read function.  Used for register blocks
 194 * that certain asics don't have (all asics).
 195 * Returns the value in the register.
 196 */
 197static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
 198{
 199	DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
 200	BUG();
 201	return 0;
 202}
 203
 
 
 
 
 
 
 
 204/**
 205 * amdgpu_invalid_wreg - dummy reg write function
 206 *
 207 * @adev: amdgpu device pointer
 208 * @reg: offset of register
 209 * @v: value to write to the register
 210 *
 211 * Dummy register read function.  Used for register blocks
 212 * that certain asics don't have (all asics).
 213 */
 214static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
 215{
 216	DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
 217		  reg, v);
 218	BUG();
 219}
 220
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 221/**
 222 * amdgpu_block_invalid_rreg - dummy reg read function
 223 *
 224 * @adev: amdgpu device pointer
 225 * @block: offset of instance
 226 * @reg: offset of register
 227 *
 228 * Dummy register read function.  Used for register blocks
 229 * that certain asics don't have (all asics).
 230 * Returns the value in the register.
 231 */
 232static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
 233					  uint32_t block, uint32_t reg)
 234{
 235	DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
 236		  reg, block);
 237	BUG();
 238	return 0;
 239}
 240
 241/**
 242 * amdgpu_block_invalid_wreg - dummy reg write function
 243 *
 244 * @adev: amdgpu device pointer
 245 * @block: offset of instance
 246 * @reg: offset of register
 247 * @v: value to write to the register
 248 *
 249 * Dummy register read function.  Used for register blocks
 250 * that certain asics don't have (all asics).
 251 */
 252static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
 253				      uint32_t block,
 254				      uint32_t reg, uint32_t v)
 255{
 256	DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
 257		  reg, block, v);
 258	BUG();
 259}
 260
 261static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
 
 
 
 
 
 
 
 262{
 263	int r;
 264
 265	if (adev->vram_scratch.robj == NULL) {
 266		r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
 267				     PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
 268				     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
 269				     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
 270				     NULL, NULL, &adev->vram_scratch.robj);
 271		if (r) {
 272			return r;
 273		}
 274	}
 275
 276	r = amdgpu_bo_reserve(adev->vram_scratch.robj, false);
 277	if (unlikely(r != 0))
 278		return r;
 279	r = amdgpu_bo_pin(adev->vram_scratch.robj,
 280			  AMDGPU_GEM_DOMAIN_VRAM, &adev->vram_scratch.gpu_addr);
 281	if (r) {
 282		amdgpu_bo_unreserve(adev->vram_scratch.robj);
 283		return r;
 284	}
 285	r = amdgpu_bo_kmap(adev->vram_scratch.robj,
 286				(void **)&adev->vram_scratch.ptr);
 287	if (r)
 288		amdgpu_bo_unpin(adev->vram_scratch.robj);
 289	amdgpu_bo_unreserve(adev->vram_scratch.robj);
 290
 291	return r;
 292}
 293
 294static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
 
 
 
 
 
 
 
 
 295{
 296	int r;
 
 
 
 
 
 
 297
 298	if (adev->vram_scratch.robj == NULL) {
 299		return;
 300	}
 301	r = amdgpu_bo_reserve(adev->vram_scratch.robj, false);
 302	if (likely(r == 0)) {
 303		amdgpu_bo_kunmap(adev->vram_scratch.robj);
 304		amdgpu_bo_unpin(adev->vram_scratch.robj);
 305		amdgpu_bo_unreserve(adev->vram_scratch.robj);
 306	}
 307	amdgpu_bo_unref(&adev->vram_scratch.robj);
 308}
 309
 310/**
 311 * amdgpu_program_register_sequence - program an array of registers.
 312 *
 313 * @adev: amdgpu_device pointer
 314 * @registers: pointer to the register array
 315 * @array_size: size of the register array
 316 *
 317 * Programs an array or registers with and and or masks.
 318 * This is a helper for setting golden registers.
 319 */
 320void amdgpu_program_register_sequence(struct amdgpu_device *adev,
 321				      const u32 *registers,
 322				      const u32 array_size)
 323{
 324	u32 tmp, reg, and_mask, or_mask;
 325	int i;
 326
 327	if (array_size % 3)
 328		return;
 329
 330	for (i = 0; i < array_size; i +=3) {
 331		reg = registers[i + 0];
 332		and_mask = registers[i + 1];
 333		or_mask = registers[i + 2];
 334
 335		if (and_mask == 0xffffffff) {
 336			tmp = or_mask;
 337		} else {
 338			tmp = RREG32(reg);
 339			tmp &= ~and_mask;
 340			tmp |= or_mask;
 
 
 
 341		}
 342		WREG32(reg, tmp);
 343	}
 344}
 345
 346void amdgpu_pci_config_reset(struct amdgpu_device *adev)
 347{
 348	pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
 349}
 350
 351/*
 352 * GPU doorbell aperture helpers function.
 353 */
 354/**
 355 * amdgpu_doorbell_init - Init doorbell driver information.
 356 *
 357 * @adev: amdgpu_device pointer
 358 *
 359 * Init doorbell driver information (CIK)
 360 * Returns 0 on success, error on failure.
 361 */
 362static int amdgpu_doorbell_init(struct amdgpu_device *adev)
 363{
 364	/* doorbell bar mapping */
 365	adev->doorbell.base = pci_resource_start(adev->pdev, 2);
 366	adev->doorbell.size = pci_resource_len(adev->pdev, 2);
 367
 368	adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
 369					     AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
 370	if (adev->doorbell.num_doorbells == 0)
 371		return -EINVAL;
 372
 373	adev->doorbell.ptr = ioremap(adev->doorbell.base, adev->doorbell.num_doorbells * sizeof(u32));
 374	if (adev->doorbell.ptr == NULL) {
 375		return -ENOMEM;
 376	}
 377	DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)adev->doorbell.base);
 378	DRM_INFO("doorbell mmio size: %u\n", (unsigned)adev->doorbell.size);
 379
 380	return 0;
 381}
 382
 383/**
 384 * amdgpu_doorbell_fini - Tear down doorbell driver information.
 385 *
 386 * @adev: amdgpu_device pointer
 387 *
 388 * Tear down doorbell driver information (CIK)
 389 */
 390static void amdgpu_doorbell_fini(struct amdgpu_device *adev)
 391{
 392	iounmap(adev->doorbell.ptr);
 393	adev->doorbell.ptr = NULL;
 394}
 395
 396/**
 397 * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
 398 *                                setup amdkfd
 399 *
 400 * @adev: amdgpu_device pointer
 401 * @aperture_base: output returning doorbell aperture base physical address
 402 * @aperture_size: output returning doorbell aperture size in bytes
 403 * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
 404 *
 405 * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
 406 * takes doorbells required for its own rings and reports the setup to amdkfd.
 407 * amdgpu reserved doorbells are at the start of the doorbell aperture.
 408 */
 409void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
 410				phys_addr_t *aperture_base,
 411				size_t *aperture_size,
 412				size_t *start_offset)
 413{
 414	/*
 415	 * The first num_doorbells are used by amdgpu.
 416	 * amdkfd takes whatever's left in the aperture.
 417	 */
 418	if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
 419		*aperture_base = adev->doorbell.base;
 420		*aperture_size = adev->doorbell.size;
 421		*start_offset = adev->doorbell.num_doorbells * sizeof(u32);
 422	} else {
 423		*aperture_base = 0;
 424		*aperture_size = 0;
 425		*start_offset = 0;
 426	}
 427}
 428
 429/*
 430 * amdgpu_wb_*()
 431 * Writeback is the the method by which the the GPU updates special pages
 432 * in memory with the status of certain GPU events (fences, ring pointers,
 433 * etc.).
 434 */
 435
 436/**
 437 * amdgpu_wb_fini - Disable Writeback and free memory
 438 *
 439 * @adev: amdgpu_device pointer
 440 *
 441 * Disables Writeback and frees the Writeback memory (all asics).
 442 * Used at driver shutdown.
 443 */
 444static void amdgpu_wb_fini(struct amdgpu_device *adev)
 445{
 446	if (adev->wb.wb_obj) {
 447		amdgpu_bo_free_kernel(&adev->wb.wb_obj,
 448				      &adev->wb.gpu_addr,
 449				      (void **)&adev->wb.wb);
 450		adev->wb.wb_obj = NULL;
 451	}
 452}
 453
 454/**
 455 * amdgpu_wb_init- Init Writeback driver info and allocate memory
 456 *
 457 * @adev: amdgpu_device pointer
 458 *
 459 * Disables Writeback and frees the Writeback memory (all asics).
 460 * Used at driver startup.
 461 * Returns 0 on success or an -error on failure.
 462 */
 463static int amdgpu_wb_init(struct amdgpu_device *adev)
 464{
 465	int r;
 466
 467	if (adev->wb.wb_obj == NULL) {
 468		r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * 4,
 
 469					    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
 470					    &adev->wb.wb_obj, &adev->wb.gpu_addr,
 471					    (void **)&adev->wb.wb);
 472		if (r) {
 473			dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
 474			return r;
 475		}
 476
 477		adev->wb.num_wb = AMDGPU_MAX_WB;
 478		memset(&adev->wb.used, 0, sizeof(adev->wb.used));
 479
 480		/* clear wb memory */
 481		memset((char *)adev->wb.wb, 0, AMDGPU_GPU_PAGE_SIZE);
 482	}
 483
 484	return 0;
 485}
 486
 487/**
 488 * amdgpu_wb_get - Allocate a wb entry
 489 *
 490 * @adev: amdgpu_device pointer
 491 * @wb: wb index
 492 *
 493 * Allocate a wb slot for use by the driver (all asics).
 494 * Returns 0 on success or -EINVAL on failure.
 495 */
 496int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
 497{
 498	unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
 
 499	if (offset < adev->wb.num_wb) {
 500		__set_bit(offset, adev->wb.used);
 501		*wb = offset;
 502		return 0;
 503	} else {
 504		return -EINVAL;
 505	}
 506}
 507
 508/**
 509 * amdgpu_wb_free - Free a wb entry
 510 *
 511 * @adev: amdgpu_device pointer
 512 * @wb: wb index
 513 *
 514 * Free a wb slot allocated for use by the driver (all asics)
 515 */
 516void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
 517{
 
 518	if (wb < adev->wb.num_wb)
 519		__clear_bit(wb, adev->wb.used);
 520}
 521
 522/**
 523 * amdgpu_vram_location - try to find VRAM location
 524 * @adev: amdgpu device structure holding all necessary informations
 525 * @mc: memory controller structure holding memory informations
 526 * @base: base address at which to put VRAM
 527 *
 528 * Function will place try to place VRAM at base address provided
 529 * as parameter (which is so far either PCI aperture address or
 530 * for IGP TOM base address).
 531 *
 532 * If there is not enough space to fit the unvisible VRAM in the 32bits
 533 * address space then we limit the VRAM size to the aperture.
 534 *
 535 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
 536 * this shouldn't be a problem as we are using the PCI aperture as a reference.
 537 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
 538 * not IGP.
 539 *
 540 * Note: we use mc_vram_size as on some board we need to program the mc to
 541 * cover the whole aperture even if VRAM size is inferior to aperture size
 542 * Novell bug 204882 + along with lots of ubuntu ones
 543 *
 544 * Note: when limiting vram it's safe to overwritte real_vram_size because
 545 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
 546 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
 547 * ones)
 548 *
 549 * Note: IGP TOM addr should be the same as the aperture addr, we don't
 550 * explicitly check for that thought.
 551 *
 552 * FIXME: when reducing VRAM size align new size on power of 2.
 553 */
 554void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base)
 555{
 556	uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
 
 
 
 557
 558	mc->vram_start = base;
 559	if (mc->mc_vram_size > (adev->mc.mc_mask - base + 1)) {
 560		dev_warn(adev->dev, "limiting VRAM to PCI aperture size\n");
 561		mc->real_vram_size = mc->aper_size;
 562		mc->mc_vram_size = mc->aper_size;
 
 
 
 
 563	}
 564	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
 565	if (limit && limit < mc->real_vram_size)
 566		mc->real_vram_size = limit;
 567	dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
 568			mc->mc_vram_size >> 20, mc->vram_start,
 569			mc->vram_end, mc->real_vram_size >> 20);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 570}
 571
 572/**
 573 * amdgpu_gtt_location - try to find GTT location
 574 * @adev: amdgpu device structure holding all necessary informations
 575 * @mc: memory controller structure holding memory informations
 576 *
 577 * Function will place try to place GTT before or after VRAM.
 578 *
 579 * If GTT size is bigger than space left then we ajust GTT size.
 580 * Thus function will never fails.
 581 *
 582 * FIXME: when reducing GTT size align new size on power of 2.
 583 */
 584void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
 585{
 586	u64 size_af, size_bf;
 
 587
 588	size_af = ((adev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
 589	size_bf = mc->vram_start & ~mc->gtt_base_align;
 590	if (size_bf > size_af) {
 591		if (mc->gtt_size > size_bf) {
 592			dev_warn(adev->dev, "limiting GTT\n");
 593			mc->gtt_size = size_bf;
 594		}
 595		mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
 596	} else {
 597		if (mc->gtt_size > size_af) {
 598			dev_warn(adev->dev, "limiting GTT\n");
 599			mc->gtt_size = size_af;
 600		}
 601		mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
 602	}
 603	mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
 604	dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
 605			mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
 606}
 607
 608/*
 609 * GPU helpers function.
 610 */
 611/**
 612 * amdgpu_card_posted - check if the hw has already been initialized
 613 *
 614 * @adev: amdgpu_device pointer
 615 *
 616 * Check if the asic has been initialized (all asics).
 617 * Used at driver startup.
 618 * Returns true if initialized or false if not.
 619 */
 620bool amdgpu_card_posted(struct amdgpu_device *adev)
 621{
 622	uint32_t reg;
 623
 624	/* then check MEM_SIZE, in case the crtcs are off */
 625	reg = RREG32(mmCONFIG_MEMSIZE);
 626
 627	if (reg)
 628		return true;
 629
 630	return false;
 631
 632}
 633
 634static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
 635{
 636	if (amdgpu_sriov_vf(adev))
 637		return false;
 638
 639	if (amdgpu_passthrough(adev)) {
 640		/* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
 641		 * some old smc fw still need driver do vPost otherwise gpu hang, while
 642		 * those smc fw version above 22.15 doesn't have this flaw, so we force
 643		 * vpost executed for smc version below 22.15
 644		 */
 645		if (adev->asic_type == CHIP_FIJI) {
 646			int err;
 647			uint32_t fw_ver;
 
 648			err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
 649			/* force vPost if error occured */
 650			if (err)
 651				return true;
 652
 653			fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
 
 654			if (fw_ver < 0x00160e00)
 655				return true;
 656		}
 657	}
 658	return !amdgpu_card_posted(adev);
 659}
 660
 661/**
 662 * amdgpu_dummy_page_init - init dummy page used by the driver
 663 *
 664 * @adev: amdgpu_device pointer
 665 *
 666 * Allocate the dummy page used by the driver (all asics).
 667 * This dummy page is used by the driver as a filler for gart entries
 668 * when pages are taken out of the GART
 669 * Returns 0 on sucess, -ENOMEM on failure.
 670 */
 671int amdgpu_dummy_page_init(struct amdgpu_device *adev)
 672{
 673	if (adev->dummy_page.page)
 674		return 0;
 675	adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
 676	if (adev->dummy_page.page == NULL)
 677		return -ENOMEM;
 678	adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page,
 679					0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
 680	if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) {
 681		dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
 682		__free_page(adev->dummy_page.page);
 683		adev->dummy_page.page = NULL;
 684		return -ENOMEM;
 685	}
 686	return 0;
 687}
 688
 689/**
 690 * amdgpu_dummy_page_fini - free dummy page used by the driver
 691 *
 692 * @adev: amdgpu_device pointer
 693 *
 694 * Frees the dummy page used by the driver (all asics).
 695 */
 696void amdgpu_dummy_page_fini(struct amdgpu_device *adev)
 697{
 698	if (adev->dummy_page.page == NULL)
 699		return;
 700	pci_unmap_page(adev->pdev, adev->dummy_page.addr,
 701			PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
 702	__free_page(adev->dummy_page.page);
 703	adev->dummy_page.page = NULL;
 704}
 705
 
 
 706
 707/* ATOM accessor methods */
 708/*
 709 * ATOM is an interpreted byte code stored in tables in the vbios.  The
 710 * driver registers callbacks to access registers and the interpreter
 711 * in the driver parses the tables and executes then to program specific
 712 * actions (set display modes, asic init, etc.).  See amdgpu_atombios.c,
 713 * atombios.h, and atom.c
 714 */
 715
 716/**
 717 * cail_pll_read - read PLL register
 718 *
 719 * @info: atom card_info pointer
 720 * @reg: PLL register offset
 721 *
 722 * Provides a PLL register accessor for the atom interpreter (r4xx+).
 723 * Returns the value of the PLL register.
 724 */
 725static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
 726{
 727	return 0;
 728}
 729
 730/**
 731 * cail_pll_write - write PLL register
 732 *
 733 * @info: atom card_info pointer
 734 * @reg: PLL register offset
 735 * @val: value to write to the pll register
 736 *
 737 * Provides a PLL register accessor for the atom interpreter (r4xx+).
 738 */
 739static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
 740{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 741
 
 742}
 743
 744/**
 745 * cail_mc_read - read MC (Memory Controller) register
 746 *
 747 * @info: atom card_info pointer
 748 * @reg: MC register offset
 749 *
 750 * Provides an MC register accessor for the atom interpreter (r4xx+).
 751 * Returns the value of the MC register.
 752 */
 753static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
 754{
 755	return 0;
 756}
 757
 758/**
 759 * cail_mc_write - write MC (Memory Controller) register
 760 *
 761 * @info: atom card_info pointer
 762 * @reg: MC register offset
 763 * @val: value to write to the pll register
 764 *
 765 * Provides a MC register accessor for the atom interpreter (r4xx+).
 766 */
 767static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
 768{
 769
 
 
 
 
 770}
 771
 772/**
 773 * cail_reg_write - write MMIO register
 774 *
 775 * @info: atom card_info pointer
 776 * @reg: MMIO register offset
 777 * @val: value to write to the pll register
 778 *
 779 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
 780 */
 781static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
 782{
 783	struct amdgpu_device *adev = info->dev->dev_private;
 784
 785	WREG32(reg, val);
 786}
 787
 788/**
 789 * cail_reg_read - read MMIO register
 790 *
 791 * @info: atom card_info pointer
 792 * @reg: MMIO register offset
 793 *
 794 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
 795 * Returns the value of the MMIO register.
 796 */
 797static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
 798{
 799	struct amdgpu_device *adev = info->dev->dev_private;
 800	uint32_t r;
 801
 802	r = RREG32(reg);
 803	return r;
 
 
 
 
 
 
 
 
 
 
 804}
 805
 
 806/**
 807 * cail_ioreg_write - write IO register
 808 *
 809 * @info: atom card_info pointer
 810 * @reg: IO register offset
 811 * @val: value to write to the pll register
 812 *
 813 * Provides a IO register accessor for the atom interpreter (r4xx+).
 
 814 */
 815static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
 
 816{
 817	struct amdgpu_device *adev = info->dev->dev_private;
 818
 819	WREG32_IO(reg, val);
 
 
 
 
 
 820}
 821
 822/**
 823 * cail_ioreg_read - read IO register
 824 *
 825 * @info: atom card_info pointer
 826 * @reg: IO register offset
 827 *
 828 * Provides an IO register accessor for the atom interpreter (r4xx+).
 829 * Returns the value of the IO register.
 
 
 830 */
 831static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
 832{
 833	struct amdgpu_device *adev = info->dev->dev_private;
 834	uint32_t r;
 
 
 
 
 835
 836	r = RREG32_IO(reg);
 837	return r;
 
 
 
 838}
 839
 840/**
 841 * amdgpu_atombios_fini - free the driver info and callbacks for atombios
 842 *
 843 * @adev: amdgpu_device pointer
 844 *
 845 * Frees the driver info and register access callbacks for the ATOM
 846 * interpreter (r4xx+).
 847 * Called at driver shutdown.
 848 */
 849static void amdgpu_atombios_fini(struct amdgpu_device *adev)
 850{
 851	if (adev->mode_info.atom_context) {
 852		kfree(adev->mode_info.atom_context->scratch);
 853		kfree(adev->mode_info.atom_context->iio);
 
 
 
 
 
 854	}
 855	kfree(adev->mode_info.atom_context);
 856	adev->mode_info.atom_context = NULL;
 857	kfree(adev->mode_info.atom_card_info);
 858	adev->mode_info.atom_card_info = NULL;
 859}
 860
 861/**
 862 * amdgpu_atombios_init - init the driver info and callbacks for atombios
 863 *
 864 * @adev: amdgpu_device pointer
 865 *
 866 * Initializes the driver info and register access callbacks for the
 867 * ATOM interpreter (r4xx+).
 868 * Returns 0 on sucess, -ENOMEM on failure.
 869 * Called at driver startup.
 870 */
 871static int amdgpu_atombios_init(struct amdgpu_device *adev)
 872{
 873	struct card_info *atom_card_info =
 874	    kzalloc(sizeof(struct card_info), GFP_KERNEL);
 
 
 
 875
 876	if (!atom_card_info)
 877		return -ENOMEM;
 878
 879	adev->mode_info.atom_card_info = atom_card_info;
 880	atom_card_info->dev = adev->ddev;
 881	atom_card_info->reg_read = cail_reg_read;
 882	atom_card_info->reg_write = cail_reg_write;
 883	/* needed for iio ops */
 884	if (adev->rio_mem) {
 885		atom_card_info->ioreg_read = cail_ioreg_read;
 886		atom_card_info->ioreg_write = cail_ioreg_write;
 887	} else {
 888		DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
 889		atom_card_info->ioreg_read = cail_reg_read;
 890		atom_card_info->ioreg_write = cail_reg_write;
 891	}
 892	atom_card_info->mc_read = cail_mc_read;
 893	atom_card_info->mc_write = cail_mc_write;
 894	atom_card_info->pll_read = cail_pll_read;
 895	atom_card_info->pll_write = cail_pll_write;
 
 
 
 896
 897	adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios);
 898	if (!adev->mode_info.atom_context) {
 899		amdgpu_atombios_fini(adev);
 900		return -ENOMEM;
 901	}
 902
 903	mutex_init(&adev->mode_info.atom_context->mutex);
 904	amdgpu_atombios_scratch_regs_init(adev);
 905	amdgpu_atom_allocate_fb_scratch(adev->mode_info.atom_context);
 906	return 0;
 907}
 908
 909/* if we get transitioned to only one device, take VGA back */
 910/**
 911 * amdgpu_vga_set_decode - enable/disable vga decode
 912 *
 913 * @cookie: amdgpu_device pointer
 914 * @state: enable/disable vga decode
 915 *
 916 * Enable/disable vga decode (all asics).
 917 * Returns VGA resource flags.
 918 */
 919static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
 920{
 921	struct amdgpu_device *adev = cookie;
 922	amdgpu_asic_set_vga_state(adev, state);
 923	if (state)
 924		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
 925		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
 926	else
 927		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
 928}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 929
 930/**
 931 * amdgpu_check_pot_argument - check that argument is a power of two
 932 *
 933 * @arg: value to check
 934 *
 935 * Validates that a certain argument is a power of two (all asics).
 936 * Returns true if argument is valid.
 937 */
 938static bool amdgpu_check_pot_argument(int arg)
 939{
 940	return (arg & (arg - 1)) == 0;
 941}
 942
 943/**
 944 * amdgpu_check_arguments - validate module params
 945 *
 946 * @adev: amdgpu_device pointer
 947 *
 948 * Validates certain module parameters and updates
 949 * the associated values used by the driver (all asics).
 950 */
 951static void amdgpu_check_arguments(struct amdgpu_device *adev)
 952{
 953	if (amdgpu_sched_jobs < 4) {
 954		dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
 955			 amdgpu_sched_jobs);
 956		amdgpu_sched_jobs = 4;
 957	} else if (!amdgpu_check_pot_argument(amdgpu_sched_jobs)){
 958		dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
 959			 amdgpu_sched_jobs);
 960		amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
 961	}
 962
 963	if (amdgpu_gart_size != -1) {
 
 
 
 
 
 
 
 964		/* gtt size must be greater or equal to 32M */
 965		if (amdgpu_gart_size < 32) {
 966			dev_warn(adev->dev, "gart size (%d) too small\n",
 967				 amdgpu_gart_size);
 968			amdgpu_gart_size = -1;
 969		}
 970	}
 971
 972	if (!amdgpu_check_pot_argument(amdgpu_vm_size)) {
 973		dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
 974			 amdgpu_vm_size);
 975		amdgpu_vm_size = 8;
 
 976	}
 977
 978	if (amdgpu_vm_size < 1) {
 979		dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
 980			 amdgpu_vm_size);
 981		amdgpu_vm_size = 8;
 
 
 
 
 982	}
 983
 984	/*
 985	 * Max GPUVM size for Cayman, SI and CI are 40 bits.
 986	 */
 987	if (amdgpu_vm_size > 1024) {
 988		dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n",
 989			 amdgpu_vm_size);
 990		amdgpu_vm_size = 8;
 991	}
 992
 993	/* defines number of bits in page table versus page directory,
 994	 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
 995	 * page table and the remaining bits are in the page directory */
 996	if (amdgpu_vm_block_size == -1) {
 997
 998		/* Total bits covered by PD + PTs */
 999		unsigned bits = ilog2(amdgpu_vm_size) + 18;
1000
1001		/* Make sure the PD is 4K in size up to 8GB address space.
1002		   Above that split equal between PD and PTs */
1003		if (amdgpu_vm_size <= 8)
1004			amdgpu_vm_block_size = bits - 9;
1005		else
1006			amdgpu_vm_block_size = (bits + 3) / 2;
1007
1008	} else if (amdgpu_vm_block_size < 9) {
1009		dev_warn(adev->dev, "VM page table size (%d) too small\n",
1010			 amdgpu_vm_block_size);
1011		amdgpu_vm_block_size = 9;
1012	}
1013
1014	if (amdgpu_vm_block_size > 24 ||
1015	    (amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) {
1016		dev_warn(adev->dev, "VM page table size (%d) too large\n",
1017			 amdgpu_vm_block_size);
1018		amdgpu_vm_block_size = 9;
1019	}
1020
1021	if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
1022	    !amdgpu_check_pot_argument(amdgpu_vram_page_split))) {
1023		dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
1024			 amdgpu_vram_page_split);
1025		amdgpu_vram_page_split = 1024;
1026	}
1027}
1028
1029/**
1030 * amdgpu_switcheroo_set_state - set switcheroo state
1031 *
1032 * @pdev: pci dev pointer
1033 * @state: vga_switcheroo state
1034 *
1035 * Callback for the switcheroo driver.  Suspends or resumes the
1036 * the asics before or after it is powered up using ACPI methods.
1037 */
1038static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
 
1039{
1040	struct drm_device *dev = pci_get_drvdata(pdev);
 
1041
1042	if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1043		return;
1044
1045	if (state == VGA_SWITCHEROO_ON) {
1046		unsigned d3_delay = dev->pdev->d3_delay;
1047
1048		printk(KERN_INFO "amdgpu: switched on\n");
1049		/* don't suspend or resume card normally */
1050		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1051
1052		amdgpu_device_resume(dev, true, true);
1053
1054		dev->pdev->d3_delay = d3_delay;
 
 
 
1055
1056		dev->switch_power_state = DRM_SWITCH_POWER_ON;
1057		drm_kms_helper_poll_enable(dev);
1058	} else {
1059		printk(KERN_INFO "amdgpu: switched off\n");
1060		drm_kms_helper_poll_disable(dev);
1061		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1062		amdgpu_device_suspend(dev, true, true);
 
 
 
 
 
1063		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1064	}
1065}
1066
1067/**
1068 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1069 *
1070 * @pdev: pci dev pointer
1071 *
1072 * Callback for the switcheroo driver.  Check of the switcheroo
1073 * state can be changed.
1074 * Returns true if the state can be changed, false if not.
1075 */
1076static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1077{
1078	struct drm_device *dev = pci_get_drvdata(pdev);
1079
1080	/*
1081	* FIXME: open_count is protected by drm_global_mutex but that would lead to
1082	* locking inversion with the driver load path. And the access here is
1083	* completely racy anyway. So don't bother with locking for now.
1084	*/
1085	return dev->open_count == 0;
1086}
1087
1088static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1089	.set_gpu_state = amdgpu_switcheroo_set_state,
1090	.reprobe = NULL,
1091	.can_switch = amdgpu_switcheroo_can_switch,
1092};
1093
1094int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
1095				  enum amd_ip_block_type block_type,
1096				  enum amd_clockgating_state state)
 
 
 
 
 
 
 
 
 
 
 
1097{
 
1098	int i, r = 0;
1099
1100	for (i = 0; i < adev->num_ip_blocks; i++) {
1101		if (!adev->ip_blocks[i].status.valid)
1102			continue;
1103		if (adev->ip_blocks[i].version->type == block_type) {
1104			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1105										     state);
1106			if (r)
1107				return r;
1108			break;
1109		}
 
 
1110	}
1111	return r;
1112}
1113
1114int amdgpu_set_powergating_state(struct amdgpu_device *adev,
1115				  enum amd_ip_block_type block_type,
1116				  enum amd_powergating_state state)
 
 
 
 
 
 
 
 
 
 
 
1117{
 
1118	int i, r = 0;
1119
1120	for (i = 0; i < adev->num_ip_blocks; i++) {
1121		if (!adev->ip_blocks[i].status.valid)
1122			continue;
1123		if (adev->ip_blocks[i].version->type == block_type) {
1124			r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
1125										     state);
1126			if (r)
1127				return r;
1128			break;
1129		}
 
 
1130	}
1131	return r;
1132}
1133
1134int amdgpu_wait_for_idle(struct amdgpu_device *adev,
1135			 enum amd_ip_block_type block_type)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1136{
1137	int i, r;
1138
1139	for (i = 0; i < adev->num_ip_blocks; i++) {
1140		if (!adev->ip_blocks[i].status.valid)
1141			continue;
1142		if (adev->ip_blocks[i].version->type == block_type) {
1143			r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1144			if (r)
1145				return r;
1146			break;
1147		}
1148	}
1149	return 0;
1150
1151}
1152
1153bool amdgpu_is_idle(struct amdgpu_device *adev,
1154		    enum amd_ip_block_type block_type)
 
 
 
 
 
 
 
 
 
1155{
1156	int i;
1157
1158	for (i = 0; i < adev->num_ip_blocks; i++) {
1159		if (!adev->ip_blocks[i].status.valid)
1160			continue;
1161		if (adev->ip_blocks[i].version->type == block_type)
1162			return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1163	}
1164	return true;
1165
1166}
1167
1168struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
1169					     enum amd_ip_block_type type)
 
 
 
 
 
 
 
 
 
 
1170{
1171	int i;
1172
1173	for (i = 0; i < adev->num_ip_blocks; i++)
1174		if (adev->ip_blocks[i].version->type == type)
1175			return &adev->ip_blocks[i];
1176
1177	return NULL;
1178}
1179
1180/**
1181 * amdgpu_ip_block_version_cmp
1182 *
1183 * @adev: amdgpu_device pointer
1184 * @type: enum amd_ip_block_type
1185 * @major: major version
1186 * @minor: minor version
1187 *
1188 * return 0 if equal or greater
1189 * return 1 if smaller or the ip_block doesn't exist
1190 */
1191int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
1192				enum amd_ip_block_type type,
1193				u32 major, u32 minor)
1194{
1195	struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type);
1196
1197	if (ip_block && ((ip_block->version->major > major) ||
1198			((ip_block->version->major == major) &&
1199			(ip_block->version->minor >= minor))))
1200		return 0;
1201
1202	return 1;
1203}
1204
1205/**
1206 * amdgpu_ip_block_add
1207 *
1208 * @adev: amdgpu_device pointer
1209 * @ip_block_version: pointer to the IP to add
1210 *
1211 * Adds the IP block driver information to the collection of IPs
1212 * on the asic.
1213 */
1214int amdgpu_ip_block_add(struct amdgpu_device *adev,
1215			const struct amdgpu_ip_block_version *ip_block_version)
1216{
1217	if (!ip_block_version)
1218		return -EINVAL;
1219
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1220	adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1221
1222	return 0;
1223}
1224
 
 
 
 
 
 
 
 
 
 
 
 
1225static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1226{
1227	adev->enable_virtual_display = false;
1228
1229	if (amdgpu_virtual_display) {
1230		struct drm_device *ddev = adev->ddev;
1231		const char *pci_address_name = pci_name(ddev->pdev);
1232		char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1233
1234		pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1235		pciaddstr_tmp = pciaddstr;
1236		while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1237			pciaddname = strsep(&pciaddname_tmp, ",");
1238			if (!strcmp(pci_address_name, pciaddname)) {
 
1239				long num_crtc;
1240				int res = -1;
1241
1242				adev->enable_virtual_display = true;
1243
1244				if (pciaddname_tmp)
1245					res = kstrtol(pciaddname_tmp, 10,
1246						      &num_crtc);
1247
1248				if (!res) {
1249					if (num_crtc < 1)
1250						num_crtc = 1;
1251					if (num_crtc > 6)
1252						num_crtc = 6;
1253					adev->mode_info.num_crtc = num_crtc;
1254				} else {
1255					adev->mode_info.num_crtc = 1;
1256				}
1257				break;
1258			}
1259		}
1260
1261		DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1262			 amdgpu_virtual_display, pci_address_name,
1263			 adev->enable_virtual_display, adev->mode_info.num_crtc);
1264
1265		kfree(pciaddstr);
1266	}
1267}
1268
1269static int amdgpu_early_init(struct amdgpu_device *adev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1270{
1271	int i, r;
 
 
 
 
 
1272
1273	amdgpu_device_enable_virtual_display(adev);
 
1274
1275	switch (adev->asic_type) {
1276	case CHIP_TOPAZ:
1277	case CHIP_TONGA:
1278	case CHIP_FIJI:
1279	case CHIP_POLARIS11:
1280	case CHIP_POLARIS10:
1281	case CHIP_POLARIS12:
1282	case CHIP_CARRIZO:
1283	case CHIP_STONEY:
1284		if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
1285			adev->family = AMDGPU_FAMILY_CZ;
 
 
 
1286		else
1287			adev->family = AMDGPU_FAMILY_VI;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1288
1289		r = vi_set_ip_blocks(adev);
 
1290		if (r)
1291			return r;
1292		break;
 
 
1293#ifdef CONFIG_DRM_AMDGPU_SI
1294	case CHIP_VERDE:
1295	case CHIP_TAHITI:
1296	case CHIP_PITCAIRN:
1297	case CHIP_OLAND:
1298	case CHIP_HAINAN:
1299		adev->family = AMDGPU_FAMILY_SI;
1300		r = si_set_ip_blocks(adev);
1301		if (r)
1302			return r;
1303		break;
1304#endif
1305#ifdef CONFIG_DRM_AMDGPU_CIK
1306	case CHIP_BONAIRE:
1307	case CHIP_HAWAII:
1308	case CHIP_KAVERI:
1309	case CHIP_KABINI:
1310	case CHIP_MULLINS:
1311		if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
 
 
1312			adev->family = AMDGPU_FAMILY_CI;
1313		else
1314			adev->family = AMDGPU_FAMILY_KV;
1315
1316		r = cik_set_ip_blocks(adev);
1317		if (r)
1318			return r;
1319		break;
1320#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1321	default:
1322		/* FIXME: not supported yet */
1323		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1324	}
1325
 
 
 
 
 
 
 
 
 
 
1326	for (i = 0; i < adev->num_ip_blocks; i++) {
1327		if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
1328			DRM_ERROR("disabled ip block: %d\n", i);
 
1329			adev->ip_blocks[i].status.valid = false;
1330		} else {
1331			if (adev->ip_blocks[i].version->funcs->early_init) {
1332				r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
1333				if (r == -ENOENT) {
1334					adev->ip_blocks[i].status.valid = false;
1335				} else if (r) {
1336					DRM_ERROR("early_init of IP block <%s> failed %d\n",
1337						  adev->ip_blocks[i].version->funcs->name, r);
1338					return r;
1339				} else {
1340					adev->ip_blocks[i].status.valid = true;
1341				}
1342			} else {
1343				adev->ip_blocks[i].status.valid = true;
1344			}
1345		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1346	}
 
 
1347
 
1348	adev->cg_flags &= amdgpu_cg_mask;
1349	adev->pg_flags &= amdgpu_pg_mask;
1350
1351	return 0;
1352}
1353
1354static int amdgpu_init(struct amdgpu_device *adev)
1355{
1356	int i, r;
1357
1358	for (i = 0; i < adev->num_ip_blocks; i++) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1359		if (!adev->ip_blocks[i].status.valid)
1360			continue;
1361		r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
1362		if (r) {
1363			DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1364				  adev->ip_blocks[i].version->funcs->name, r);
1365			return r;
1366		}
1367		adev->ip_blocks[i].status.sw = true;
1368		/* need to do gmc hw init early so we can allocate gpu mem */
1369		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
1370			r = amdgpu_vram_scratch_init(adev);
 
 
 
 
 
 
 
 
 
 
 
 
 
1371			if (r) {
1372				DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
1373				return r;
1374			}
1375			r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
1376			if (r) {
1377				DRM_ERROR("hw_init %d failed %d\n", i, r);
1378				return r;
1379			}
1380			r = amdgpu_wb_init(adev);
1381			if (r) {
1382				DRM_ERROR("amdgpu_wb_init failed %d\n", r);
1383				return r;
1384			}
1385			adev->ip_blocks[i].status.hw = true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1386		}
1387	}
1388
1389	for (i = 0; i < adev->num_ip_blocks; i++) {
1390		if (!adev->ip_blocks[i].status.sw)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1391			continue;
1392		/* gmc hw init is done early */
1393		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC)
 
 
1394			continue;
1395		r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
1396		if (r) {
1397			DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1398				  adev->ip_blocks[i].version->funcs->name, r);
1399			return r;
 
 
 
 
 
 
 
 
 
1400		}
1401		adev->ip_blocks[i].status.hw = true;
1402	}
1403
1404	return 0;
1405}
1406
1407static int amdgpu_late_init(struct amdgpu_device *adev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1408{
 
1409	int i = 0, r;
1410
1411	for (i = 0; i < adev->num_ip_blocks; i++) {
1412		if (!adev->ip_blocks[i].status.valid)
1413			continue;
1414		if (adev->ip_blocks[i].version->funcs->late_init) {
1415			r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
1416			if (r) {
1417				DRM_ERROR("late_init of IP block <%s> failed %d\n",
1418					  adev->ip_blocks[i].version->funcs->name, r);
1419				return r;
1420			}
1421			adev->ip_blocks[i].status.late_initialized = true;
1422		}
1423		/* skip CG for VCE/UVD, it's handled specially */
1424		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1425		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
1426			/* enable clockgating to save power */
1427			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1428										     AMD_CG_STATE_GATE);
1429			if (r) {
1430				DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
1431					  adev->ip_blocks[i].version->funcs->name, r);
1432				return r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1433			}
1434		}
 
 
1435	}
1436
1437	return 0;
1438}
1439
1440static int amdgpu_fini(struct amdgpu_device *adev)
 
 
 
 
 
 
 
1441{
1442	int i, r;
1443
1444	/* need to disable SMC first */
 
 
1445	for (i = 0; i < adev->num_ip_blocks; i++) {
1446		if (!adev->ip_blocks[i].status.hw)
1447			continue;
1448		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
1449			/* ungate blocks before hw fini so that we can shutdown the blocks safely */
1450			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1451										     AMD_CG_STATE_UNGATE);
1452			if (r) {
1453				DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1454					  adev->ip_blocks[i].version->funcs->name, r);
1455				return r;
1456			}
1457			r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
1458			/* XXX handle errors */
1459			if (r) {
1460				DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1461					  adev->ip_blocks[i].version->funcs->name, r);
1462			}
1463			adev->ip_blocks[i].status.hw = false;
1464			break;
1465		}
1466	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1467
1468	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1469		if (!adev->ip_blocks[i].status.hw)
1470			continue;
1471		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
1472			amdgpu_wb_fini(adev);
1473			amdgpu_vram_scratch_fini(adev);
1474		}
1475
1476		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1477			adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
1478			/* ungate blocks before hw fini so that we can shutdown the blocks safely */
1479			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1480										     AMD_CG_STATE_UNGATE);
1481			if (r) {
1482				DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1483					  adev->ip_blocks[i].version->funcs->name, r);
1484				return r;
1485			}
1486		}
1487
1488		r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
1489		/* XXX handle errors */
1490		if (r) {
1491			DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1492				  adev->ip_blocks[i].version->funcs->name, r);
1493		}
1494
1495		adev->ip_blocks[i].status.hw = false;
1496	}
1497
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1498	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1499		if (!adev->ip_blocks[i].status.sw)
1500			continue;
 
 
 
 
 
 
 
 
 
 
1501		r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
1502		/* XXX handle errors */
1503		if (r) {
1504			DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
1505				  adev->ip_blocks[i].version->funcs->name, r);
1506		}
1507		adev->ip_blocks[i].status.sw = false;
1508		adev->ip_blocks[i].status.valid = false;
1509	}
1510
1511	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1512		if (!adev->ip_blocks[i].status.late_initialized)
1513			continue;
1514		if (adev->ip_blocks[i].version->funcs->late_fini)
1515			adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
1516		adev->ip_blocks[i].status.late_initialized = false;
1517	}
1518
 
 
1519	return 0;
1520}
1521
1522int amdgpu_suspend(struct amdgpu_device *adev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1523{
1524	int i, r;
1525
1526	/* ungate SMC block first */
1527	r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
1528					 AMD_CG_STATE_UNGATE);
1529	if (r) {
1530		DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n",r);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1531	}
1532
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1533	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1534		if (!adev->ip_blocks[i].status.valid)
1535			continue;
1536		/* ungate blocks so that suspend can properly shut them down */
1537		if (i != AMD_IP_BLOCK_TYPE_SMC) {
1538			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1539										     AMD_CG_STATE_UNGATE);
1540			if (r) {
1541				DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1542					  adev->ip_blocks[i].version->funcs->name, r);
1543			}
 
 
 
 
 
 
 
 
 
 
1544		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1545		/* XXX handle errors */
1546		r = adev->ip_blocks[i].version->funcs->suspend(adev);
1547		/* XXX handle errors */
1548		if (r) {
1549			DRM_ERROR("suspend of IP block <%s> failed %d\n",
1550				  adev->ip_blocks[i].version->funcs->name, r);
1551		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1552	}
1553
1554	return 0;
1555}
1556
1557static int amdgpu_resume(struct amdgpu_device *adev)
 
 
 
 
 
 
 
 
 
 
 
 
 
1558{
1559	int i, r;
1560
1561	for (i = 0; i < adev->num_ip_blocks; i++) {
1562		if (!adev->ip_blocks[i].status.valid)
 
 
 
 
 
1563			continue;
1564		r = adev->ip_blocks[i].version->funcs->resume(adev);
1565		if (r) {
1566			DRM_ERROR("resume of IP block <%s> failed %d\n",
1567				  adev->ip_blocks[i].version->funcs->name, r);
1568			return r;
1569		}
 
1570	}
1571
1572	return 0;
1573}
1574
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1575static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
1576{
1577	if (amdgpu_atombios_has_gpu_virtualization_table(adev))
1578		adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1579}
1580
1581/**
1582 * amdgpu_device_init - initialize the driver
1583 *
1584 * @adev: amdgpu_device pointer
1585 * @pdev: drm dev pointer
1586 * @pdev: pci dev pointer
1587 * @flags: driver flags
1588 *
1589 * Initializes the driver info and hw (all asics).
1590 * Returns 0 for success or an error on failure.
1591 * Called at driver startup.
1592 */
1593int amdgpu_device_init(struct amdgpu_device *adev,
1594		       struct drm_device *ddev,
1595		       struct pci_dev *pdev,
1596		       uint32_t flags)
1597{
 
 
1598	int r, i;
1599	bool runtime = false;
1600	u32 max_MBps;
 
1601
1602	adev->shutdown = false;
1603	adev->dev = &pdev->dev;
1604	adev->ddev = ddev;
1605	adev->pdev = pdev;
1606	adev->flags = flags;
1607	adev->asic_type = flags & AMD_ASIC_MASK;
1608	adev->is_atom_bios = false;
 
 
 
 
1609	adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
1610	adev->mc.gtt_size = 512 * 1024 * 1024;
 
 
1611	adev->accel_working = false;
1612	adev->num_rings = 0;
 
1613	adev->mman.buffer_funcs = NULL;
1614	adev->mman.buffer_funcs_ring = NULL;
1615	adev->vm_manager.vm_pte_funcs = NULL;
1616	adev->vm_manager.vm_pte_num_rings = 0;
1617	adev->gart.gart_funcs = NULL;
 
1618	adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
 
1619
1620	adev->smc_rreg = &amdgpu_invalid_rreg;
1621	adev->smc_wreg = &amdgpu_invalid_wreg;
1622	adev->pcie_rreg = &amdgpu_invalid_rreg;
1623	adev->pcie_wreg = &amdgpu_invalid_wreg;
 
 
1624	adev->pciep_rreg = &amdgpu_invalid_rreg;
1625	adev->pciep_wreg = &amdgpu_invalid_wreg;
 
 
 
 
1626	adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
1627	adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
1628	adev->didt_rreg = &amdgpu_invalid_rreg;
1629	adev->didt_wreg = &amdgpu_invalid_wreg;
1630	adev->gc_cac_rreg = &amdgpu_invalid_rreg;
1631	adev->gc_cac_wreg = &amdgpu_invalid_wreg;
1632	adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
1633	adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
1634
1635
1636	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1637		 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
1638		 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
1639
1640	/* mutex initialization are all done here so we
1641	 * can recall function without having locking issues */
1642	mutex_init(&adev->vm_manager.lock);
1643	atomic_set(&adev->irq.ih.lock, 0);
1644	mutex_init(&adev->pm.mutex);
1645	mutex_init(&adev->gfx.gpu_clock_mutex);
1646	mutex_init(&adev->srbm_mutex);
 
 
 
1647	mutex_init(&adev->grbm_idx_mutex);
1648	mutex_init(&adev->mn_lock);
 
1649	hash_init(adev->mn_hash);
 
 
 
 
1650
1651	amdgpu_check_arguments(adev);
 
 
 
 
1652
1653	/* Registers mapping */
1654	/* TODO: block userspace mapping of io register */
1655	spin_lock_init(&adev->mmio_idx_lock);
1656	spin_lock_init(&adev->smc_idx_lock);
1657	spin_lock_init(&adev->pcie_idx_lock);
1658	spin_lock_init(&adev->uvd_ctx_idx_lock);
1659	spin_lock_init(&adev->didt_idx_lock);
1660	spin_lock_init(&adev->gc_cac_idx_lock);
 
1661	spin_lock_init(&adev->audio_endpt_idx_lock);
1662	spin_lock_init(&adev->mm_stats.lock);
1663
1664	INIT_LIST_HEAD(&adev->shadow_list);
1665	mutex_init(&adev->shadow_list_lock);
1666
1667	INIT_LIST_HEAD(&adev->gtt_list);
1668	spin_lock_init(&adev->gtt_list_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1669
 
 
1670	if (adev->asic_type >= CHIP_BONAIRE) {
1671		adev->rmmio_base = pci_resource_start(adev->pdev, 5);
1672		adev->rmmio_size = pci_resource_len(adev->pdev, 5);
1673	} else {
1674		adev->rmmio_base = pci_resource_start(adev->pdev, 2);
1675		adev->rmmio_size = pci_resource_len(adev->pdev, 2);
1676	}
1677
 
 
 
1678	adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
1679	if (adev->rmmio == NULL) {
1680		return -ENOMEM;
1681	}
1682	DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
1683	DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
 
 
 
 
 
 
 
 
 
 
 
 
1684
1685	if (adev->asic_type >= CHIP_BONAIRE)
1686		/* doorbell bar mapping */
1687		amdgpu_doorbell_init(adev);
1688
1689	/* io port mapping */
1690	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1691		if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
1692			adev->rio_mem_size = pci_resource_len(adev->pdev, i);
1693			adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
1694			break;
1695		}
1696	}
1697	if (adev->rio_mem == NULL)
1698		DRM_ERROR("Unable to find PCI I/O BAR\n");
1699
1700	/* early init functions */
1701	r = amdgpu_early_init(adev);
1702	if (r)
1703		return r;
1704
1705	/* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
1706	/* this will fail for cards that aren't VGA class devices, just
1707	 * ignore it */
1708	vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode);
1709
1710	if (amdgpu_runtime_pm == 1)
1711		runtime = true;
1712	if (amdgpu_device_is_px(ddev))
1713		runtime = true;
1714	vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime);
1715	if (runtime)
1716		vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
1717
1718	/* Read BIOS */
1719	if (!amdgpu_get_bios(adev)) {
1720		r = -EINVAL;
1721		goto failed;
 
 
1722	}
1723	/* Must be an ATOMBIOS */
1724	if (!adev->is_atom_bios) {
1725		dev_err(adev->dev, "Expecting atombios for GPU\n");
1726		r = -EINVAL;
1727		goto failed;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1728	}
1729	r = amdgpu_atombios_init(adev);
1730	if (r) {
1731		dev_err(adev->dev, "amdgpu_atombios_init failed\n");
1732		goto failed;
 
 
 
 
 
 
 
1733	}
1734
 
 
1735	/* detect if we are with an SRIOV vbios */
1736	amdgpu_device_detect_sriov_bios(adev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1737
1738	/* Post card if necessary */
1739	if (amdgpu_vpost_needed(adev)) {
1740		if (!adev->bios) {
1741			dev_err(adev->dev, "no vBIOS found\n");
1742			r = -EINVAL;
1743			goto failed;
1744		}
1745		DRM_INFO("GPU posting now...\n");
1746		r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
1747		if (r) {
1748			dev_err(adev->dev, "gpu post error!\n");
1749			goto failed;
1750		}
1751	} else {
1752		DRM_INFO("GPU post is not needed\n");
1753	}
1754
1755	/* Initialize clocks */
1756	r = amdgpu_atombios_get_clock_info(adev);
1757	if (r) {
1758		dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
1759		goto failed;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1760	}
1761	/* init i2c buses */
1762	amdgpu_atombios_i2c_init(adev);
1763
 
1764	/* Fence driver */
1765	r = amdgpu_fence_driver_init(adev);
1766	if (r) {
1767		dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
 
1768		goto failed;
1769	}
1770
1771	/* init the mode config */
1772	drm_mode_config_init(adev->ddev);
1773
1774	r = amdgpu_init(adev);
1775	if (r) {
1776		dev_err(adev->dev, "amdgpu_init failed\n");
1777		amdgpu_fini(adev);
1778		goto failed;
1779	}
1780
 
 
 
 
 
 
 
 
 
1781	adev->accel_working = true;
1782
 
 
1783	/* Initialize the buffer migration limit. */
1784	if (amdgpu_moverate >= 0)
1785		max_MBps = amdgpu_moverate;
1786	else
1787		max_MBps = 8; /* Allow 8 MB/s. */
1788	/* Get a log2 for easy divisions. */
1789	adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
1790
1791	amdgpu_fbdev_init(adev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1792
1793	r = amdgpu_ib_pool_init(adev);
1794	if (r) {
1795		dev_err(adev->dev, "IB initialization failed (%d).\n", r);
1796		goto failed;
1797	}
1798
1799	r = amdgpu_ib_ring_tests(adev);
 
 
 
 
 
 
 
 
 
 
1800	if (r)
1801		DRM_ERROR("ib ring test failed (%d).\n", r);
1802
1803	r = amdgpu_gem_debugfs_init(adev);
1804	if (r) {
1805		DRM_ERROR("registering gem debugfs failed (%d).\n", r);
1806	}
 
 
 
 
 
 
 
 
 
 
 
1807
1808	r = amdgpu_debugfs_regs_init(adev);
1809	if (r) {
1810		DRM_ERROR("registering register debugfs failed (%d).\n", r);
1811	}
1812
1813	r = amdgpu_debugfs_firmware_init(adev);
1814	if (r) {
1815		DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
1816		return r;
1817	}
1818
1819	if ((amdgpu_testing & 1)) {
1820		if (adev->accel_working)
1821			amdgpu_test_moves(adev);
1822		else
1823			DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
1824	}
1825	if ((amdgpu_testing & 2)) {
1826		if (adev->accel_working)
1827			amdgpu_test_syncing(adev);
1828		else
1829			DRM_INFO("amdgpu: acceleration disabled, skipping sync tests\n");
1830	}
1831	if (amdgpu_benchmarking) {
1832		if (adev->accel_working)
1833			amdgpu_benchmark(adev, amdgpu_benchmarking);
1834		else
1835			DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
1836	}
1837
1838	/* enable clockgating, etc. after ib tests, etc. since some blocks require
1839	 * explicit gating rather than handling it automatically.
 
1840	 */
1841	r = amdgpu_late_init(adev);
1842	if (r) {
1843		dev_err(adev->dev, "amdgpu_late_init failed\n");
1844		goto failed;
1845	}
 
 
 
 
 
 
 
 
 
 
 
 
 
1846
1847	return 0;
1848
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1849failed:
1850	if (runtime)
1851		vga_switcheroo_fini_domain_pm_ops(adev->dev);
1852	return r;
1853}
1854
1855static void amdgpu_debugfs_remove_files(struct amdgpu_device *adev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1856
1857/**
1858 * amdgpu_device_fini - tear down the driver
1859 *
1860 * @adev: amdgpu_device pointer
1861 *
1862 * Tear down the driver info (all asics).
1863 * Called at driver shutdown.
1864 */
1865void amdgpu_device_fini(struct amdgpu_device *adev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1866{
1867	int r;
 
1868
1869	DRM_INFO("amdgpu: finishing device.\n");
1870	adev->shutdown = true;
1871	drm_crtc_force_disable_all(adev->ddev);
1872	/* evict vram memory */
1873	amdgpu_bo_evict_vram(adev);
1874	amdgpu_ib_pool_fini(adev);
1875	amdgpu_fence_driver_fini(adev);
1876	amdgpu_fbdev_fini(adev);
1877	r = amdgpu_fini(adev);
1878	adev->accel_working = false;
 
 
 
 
1879	/* free i2c buses */
1880	amdgpu_i2c_fini(adev);
1881	amdgpu_atombios_fini(adev);
 
 
 
 
1882	kfree(adev->bios);
1883	adev->bios = NULL;
1884	vga_switcheroo_unregister_client(adev->pdev);
1885	if (adev->flags & AMD_IS_PX)
 
 
 
 
 
 
 
 
 
1886		vga_switcheroo_fini_domain_pm_ops(adev->dev);
1887	vga_client_register(adev->pdev, NULL, NULL, NULL);
1888	if (adev->rio_mem)
1889		pci_iounmap(adev->pdev, adev->rio_mem);
1890	adev->rio_mem = NULL;
1891	iounmap(adev->rmmio);
1892	adev->rmmio = NULL;
1893	if (adev->asic_type >= CHIP_BONAIRE)
 
1894		amdgpu_doorbell_fini(adev);
1895	amdgpu_debugfs_regs_cleanup(adev);
1896	amdgpu_debugfs_remove_files(adev);
 
 
 
 
 
 
 
 
 
 
 
1897}
1898
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1899
1900/*
1901 * Suspend & resume.
1902 */
1903/**
1904 * amdgpu_device_suspend - initiate device suspend
1905 *
1906 * @pdev: drm dev pointer
1907 * @state: suspend state
1908 *
1909 * Puts the hw in the suspend state (all asics).
1910 * Returns 0 for success or an error on failure.
1911 * Called at driver suspend.
1912 */
1913int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
1914{
1915	struct amdgpu_device *adev;
1916	struct drm_crtc *crtc;
1917	struct drm_connector *connector;
1918	int r;
 
 
 
 
 
 
 
 
 
 
1919
1920	if (dev == NULL || dev->dev_private == NULL) {
1921		return -ENODEV;
 
 
 
 
 
 
1922	}
1923
1924	adev = dev->dev_private;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1925
1926	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1927		return 0;
1928
1929	drm_kms_helper_poll_disable(dev);
1930
1931	/* turn off display hw */
1932	drm_modeset_lock_all(dev);
1933	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1934		drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
 
1935	}
1936	drm_modeset_unlock_all(dev);
1937
1938	/* unpin the front buffers and cursors */
1939	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1940		struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1941		struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
1942		struct amdgpu_bo *robj;
 
 
 
 
1943
1944		if (amdgpu_crtc->cursor_bo) {
1945			struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
1946			r = amdgpu_bo_reserve(aobj, false);
1947			if (r == 0) {
1948				amdgpu_bo_unpin(aobj);
1949				amdgpu_bo_unreserve(aobj);
1950			}
1951		}
1952
1953		if (rfb == NULL || rfb->obj == NULL) {
1954			continue;
1955		}
1956		robj = gem_to_amdgpu_bo(rfb->obj);
1957		/* don't unpin kernel fb objects */
1958		if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
1959			r = amdgpu_bo_reserve(robj, false);
1960			if (r == 0) {
1961				amdgpu_bo_unpin(robj);
1962				amdgpu_bo_unreserve(robj);
1963			}
1964		}
1965	}
1966	/* evict vram memory */
1967	amdgpu_bo_evict_vram(adev);
1968
1969	amdgpu_fence_driver_suspend(adev);
1970
1971	r = amdgpu_suspend(adev);
1972
1973	/* evict remaining vram memory
1974	 * This second call to evict vram is to evict the gart page table
1975	 * using the CPU.
1976	 */
1977	amdgpu_bo_evict_vram(adev);
1978
1979	amdgpu_atombios_scratch_regs_save(adev);
1980	pci_save_state(dev->pdev);
1981	if (suspend) {
1982		/* Shut down the device */
1983		pci_disable_device(dev->pdev);
1984		pci_set_power_state(dev->pdev, PCI_D3hot);
1985	} else {
1986		r = amdgpu_asic_reset(adev);
1987		if (r)
1988			DRM_ERROR("amdgpu asic reset failed\n");
1989	}
1990
1991	if (fbcon) {
1992		console_lock();
1993		amdgpu_fbdev_set_suspend(adev, 1);
1994		console_unlock();
1995	}
1996	return 0;
1997}
1998
1999/**
2000 * amdgpu_device_resume - initiate device resume
2001 *
2002 * @pdev: drm dev pointer
 
2003 *
2004 * Bring the hw back to operating state (all asics).
2005 * Returns 0 for success or an error on failure.
2006 * Called at driver resume.
2007 */
2008int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
2009{
2010	struct drm_connector *connector;
2011	struct amdgpu_device *adev = dev->dev_private;
2012	struct drm_crtc *crtc;
2013	int r;
 
 
 
 
2014
2015	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2016		return 0;
2017
2018	if (fbcon)
2019		console_lock();
 
 
 
 
 
 
 
 
 
2020
2021	if (resume) {
2022		pci_set_power_state(dev->pdev, PCI_D0);
2023		pci_restore_state(dev->pdev);
2024		r = pci_enable_device(dev->pdev);
2025		if (r) {
2026			if (fbcon)
2027				console_unlock();
2028			return r;
2029		}
2030	}
2031	amdgpu_atombios_scratch_regs_restore(adev);
2032
2033	/* post card */
2034	if (!amdgpu_card_posted(adev) || !resume) {
2035		r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2036		if (r)
2037			DRM_ERROR("amdgpu asic init failed\n");
2038	}
2039
2040	r = amdgpu_resume(adev);
2041	if (r)
2042		DRM_ERROR("amdgpu_resume failed (%d).\n", r);
2043
2044	amdgpu_fence_driver_resume(adev);
2045
2046	if (resume) {
2047		r = amdgpu_ib_ring_tests(adev);
2048		if (r)
2049			DRM_ERROR("ib ring test failed (%d).\n", r);
2050	}
2051
2052	r = amdgpu_late_init(adev);
2053	if (r)
2054		return r;
2055
2056	/* pin cursors */
2057	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2058		struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2059
2060		if (amdgpu_crtc->cursor_bo) {
2061			struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2062			r = amdgpu_bo_reserve(aobj, false);
2063			if (r == 0) {
2064				r = amdgpu_bo_pin(aobj,
2065						  AMDGPU_GEM_DOMAIN_VRAM,
2066						  &amdgpu_crtc->cursor_addr);
2067				if (r != 0)
2068					DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
2069				amdgpu_bo_unreserve(aobj);
2070			}
2071		}
2072	}
2073
2074	/* blat the mode back in */
2075	if (fbcon) {
2076		drm_helper_resume_force_mode(dev);
2077		/* turn on display hw */
2078		drm_modeset_lock_all(dev);
2079		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2080			drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
2081		}
2082		drm_modeset_unlock_all(dev);
2083	}
2084
2085	drm_kms_helper_poll_enable(dev);
2086
2087	/*
2088	 * Most of the connector probing functions try to acquire runtime pm
2089	 * refs to ensure that the GPU is powered on when connector polling is
2090	 * performed. Since we're calling this from a runtime PM callback,
2091	 * trying to acquire rpm refs will cause us to deadlock.
2092	 *
2093	 * Since we're guaranteed to be holding the rpm lock, it's safe to
2094	 * temporarily disable the rpm helpers so this doesn't deadlock us.
2095	 */
2096#ifdef CONFIG_PM
2097	dev->dev->power.disable_depth++;
2098#endif
2099	drm_helper_hpd_irq_event(dev);
 
 
 
2100#ifdef CONFIG_PM
2101	dev->dev->power.disable_depth--;
2102#endif
 
 
2103
2104	if (fbcon) {
2105		amdgpu_fbdev_set_suspend(adev, 0);
2106		console_unlock();
2107	}
 
2108
2109	return 0;
2110}
2111
2112static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
 
 
 
 
 
 
 
 
 
 
2113{
2114	int i;
2115	bool asic_hang = false;
2116
 
 
 
 
 
 
2117	for (i = 0; i < adev->num_ip_blocks; i++) {
2118		if (!adev->ip_blocks[i].status.valid)
2119			continue;
2120		if (adev->ip_blocks[i].version->funcs->check_soft_reset)
2121			adev->ip_blocks[i].status.hang =
2122				adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
2123		if (adev->ip_blocks[i].status.hang) {
2124			DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
2125			asic_hang = true;
2126		}
2127	}
2128	return asic_hang;
2129}
2130
2131static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
 
 
 
 
 
 
 
 
 
 
 
2132{
2133	int i, r = 0;
2134
2135	for (i = 0; i < adev->num_ip_blocks; i++) {
2136		if (!adev->ip_blocks[i].status.valid)
2137			continue;
2138		if (adev->ip_blocks[i].status.hang &&
2139		    adev->ip_blocks[i].version->funcs->pre_soft_reset) {
2140			r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
2141			if (r)
2142				return r;
2143		}
2144	}
2145
2146	return 0;
2147}
2148
2149static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
 
 
 
 
 
 
 
 
 
2150{
2151	int i;
2152
 
 
 
2153	for (i = 0; i < adev->num_ip_blocks; i++) {
2154		if (!adev->ip_blocks[i].status.valid)
2155			continue;
2156		if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
2157		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
2158		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
2159		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)) {
 
2160			if (adev->ip_blocks[i].status.hang) {
2161				DRM_INFO("Some block need full reset!\n");
2162				return true;
2163			}
2164		}
2165	}
2166	return false;
2167}
2168
2169static int amdgpu_soft_reset(struct amdgpu_device *adev)
 
 
 
 
 
 
 
 
 
 
 
2170{
2171	int i, r = 0;
2172
2173	for (i = 0; i < adev->num_ip_blocks; i++) {
2174		if (!adev->ip_blocks[i].status.valid)
2175			continue;
2176		if (adev->ip_blocks[i].status.hang &&
2177		    adev->ip_blocks[i].version->funcs->soft_reset) {
2178			r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
2179			if (r)
2180				return r;
2181		}
2182	}
2183
2184	return 0;
2185}
2186
2187static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
 
 
 
 
 
 
 
 
 
 
 
2188{
2189	int i, r = 0;
2190
2191	for (i = 0; i < adev->num_ip_blocks; i++) {
2192		if (!adev->ip_blocks[i].status.valid)
2193			continue;
2194		if (adev->ip_blocks[i].status.hang &&
2195		    adev->ip_blocks[i].version->funcs->post_soft_reset)
2196			r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
2197		if (r)
2198			return r;
2199	}
2200
2201	return 0;
2202}
2203
2204bool amdgpu_need_backup(struct amdgpu_device *adev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2205{
2206	if (adev->flags & AMD_IS_APU)
2207		return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2208
2209	return amdgpu_lockup_timeout > 0 ? true : false;
2210}
2211
2212static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
2213					   struct amdgpu_ring *ring,
2214					   struct amdgpu_bo *bo,
2215					   struct dma_fence **fence)
 
 
 
 
2216{
2217	uint32_t domain;
2218	int r;
2219
2220       if (!bo->shadow)
2221               return 0;
2222
2223       r = amdgpu_bo_reserve(bo, false);
2224       if (r)
2225               return r;
2226       domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
2227       /* if bo has been evicted, then no need to recover */
2228       if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
2229               r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
2230						 NULL, fence, true);
2231               if (r) {
2232                       DRM_ERROR("recover page table failed!\n");
2233                       goto err;
2234               }
2235       }
2236err:
2237       amdgpu_bo_unreserve(bo);
2238       return r;
2239}
2240
2241/**
2242 * amdgpu_gpu_reset - reset the asic
2243 *
2244 * @adev: amdgpu device pointer
2245 *
2246 * Attempt the reset the GPU if it has hung (all asics).
2247 * Returns 0 for success or an error on failure.
2248 */
2249int amdgpu_gpu_reset(struct amdgpu_device *adev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2250{
2251	int i, r;
2252	int resched;
2253	bool need_full_reset;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2254
2255	if (!amdgpu_check_soft_reset(adev)) {
2256		DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
2257		return 0;
2258	}
2259
2260	atomic_inc(&adev->gpu_reset_counter);
2261
2262	/* block TTM */
2263	resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2264
2265	/* block scheduler */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2266	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2267		struct amdgpu_ring *ring = adev->rings[i];
2268
2269		if (!ring)
2270			continue;
2271		kthread_park(ring->sched.thread);
2272		amd_sched_hw_job_reset(&ring->sched);
 
 
 
 
 
 
2273	}
2274	/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2275	amdgpu_fence_driver_force_completion(adev);
2276
2277	need_full_reset = amdgpu_need_full_reset(adev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2278
2279	if (!need_full_reset) {
2280		amdgpu_pre_soft_reset(adev);
2281		r = amdgpu_soft_reset(adev);
2282		amdgpu_post_soft_reset(adev);
2283		if (r || amdgpu_check_soft_reset(adev)) {
2284			DRM_INFO("soft reset failed, will fallback to full reset!\n");
2285			need_full_reset = true;
 
 
2286		}
 
 
 
 
 
 
 
 
2287	}
2288
2289	if (need_full_reset) {
2290		r = amdgpu_suspend(adev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2291
2292retry:
2293		/* Disable fb access */
2294		if (adev->mode_info.num_crtc) {
2295			struct amdgpu_mode_mc_save save;
2296			amdgpu_display_stop_mc_access(adev, &save);
2297			amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC);
2298		}
2299		amdgpu_atombios_scratch_regs_save(adev);
2300		r = amdgpu_asic_reset(adev);
2301		amdgpu_atombios_scratch_regs_restore(adev);
2302		/* post card */
2303		amdgpu_atom_asic_init(adev->mode_info.atom_context);
 
 
 
 
 
 
 
 
 
 
 
 
 
2304
 
2305		if (!r) {
2306			dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
2307			r = amdgpu_resume(adev);
 
 
 
 
 
 
2308		}
2309	}
2310	if (!r) {
2311		amdgpu_irq_gpu_reset_resume_helper(adev);
2312		if (need_full_reset && amdgpu_need_backup(adev)) {
2313			r = amdgpu_ttm_recover_gart(adev);
2314			if (r)
2315				DRM_ERROR("gart recovery failed!!!\n");
2316		}
2317		r = amdgpu_ib_ring_tests(adev);
2318		if (r) {
2319			dev_err(adev->dev, "ib ring test failed (%d).\n", r);
2320			r = amdgpu_suspend(adev);
2321			need_full_reset = true;
2322			goto retry;
2323		}
2324		/**
2325		 * recovery vm page tables, since we cannot depend on VRAM is
2326		 * consistent after gpu full reset.
2327		 */
2328		if (need_full_reset && amdgpu_need_backup(adev)) {
2329			struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2330			struct amdgpu_bo *bo, *tmp;
2331			struct dma_fence *fence = NULL, *next = NULL;
2332
2333			DRM_INFO("recover vram bo from shadow\n");
2334			mutex_lock(&adev->shadow_list_lock);
2335			list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
2336				amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
2337				if (fence) {
2338					r = dma_fence_wait(fence, false);
2339					if (r) {
2340						WARN(r, "recovery from shadow isn't comleted\n");
2341						break;
2342					}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2343				}
2344
2345				dma_fence_put(fence);
2346				fence = next;
 
 
 
2347			}
2348			mutex_unlock(&adev->shadow_list_lock);
2349			if (fence) {
2350				r = dma_fence_wait(fence, false);
2351				if (r)
2352					WARN(r, "recovery from shadow isn't comleted\n");
 
 
 
 
 
 
2353			}
2354			dma_fence_put(fence);
2355		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2356		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2357			struct amdgpu_ring *ring = adev->rings[i];
2358			if (!ring)
 
2359				continue;
2360
2361			amd_sched_job_recovery(&ring->sched);
2362			kthread_unpark(ring->sched.thread);
 
 
2363		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2364	} else {
2365		dev_err(adev->dev, "asic resume failed (%d).\n", r);
 
 
 
 
 
 
 
 
 
2366		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2367			if (adev->rings[i]) {
2368				kthread_unpark(adev->rings[i]->sched.thread);
2369			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2370		}
2371	}
2372
2373	drm_helper_resume_force_mode(adev->ddev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2374
2375	ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
2376	if (r) {
2377		/* bad news, how to tell it to userspace ? */
2378		dev_info(adev->dev, "GPU reset failed\n");
2379	}
2380
 
 
 
 
2381	return r;
2382}
2383
2384void amdgpu_get_pcie_info(struct amdgpu_device *adev)
 
 
 
 
 
 
 
 
 
 
 
 
 
2385{
2386	u32 mask;
2387	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2388
2389	if (amdgpu_pcie_gen_cap)
2390		adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
2391
2392	if (amdgpu_pcie_lane_cap)
2393		adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
2394
2395	/* covers APUs as well */
2396	if (pci_is_root_bus(adev->pdev->bus)) {
2397		if (adev->pm.pcie_gen_mask == 0)
2398			adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2399		if (adev->pm.pcie_mlw_mask == 0)
2400			adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
2401		return;
2402	}
2403
 
 
 
 
 
 
2404	if (adev->pm.pcie_gen_mask == 0) {
2405		ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
2406		if (!ret) {
2407			adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
 
 
2408						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
2409						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2410
2411			if (mask & DRM_PCIE_SPEED_25)
2412				adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
2413			if (mask & DRM_PCIE_SPEED_50)
2414				adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
2415			if (mask & DRM_PCIE_SPEED_80)
2416				adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
2417		} else {
2418			adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2419		}
2420	}
2421	if (adev->pm.pcie_mlw_mask == 0) {
2422		ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
2423		if (!ret) {
2424			switch (mask) {
2425			case 32:
 
2426				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
2427							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
2428							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2429							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2430							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2431							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2432							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2433				break;
2434			case 16:
2435				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
2436							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2437							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2438							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2439							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2440							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2441				break;
2442			case 12:
2443				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2444							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2445							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2446							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2447							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2448				break;
2449			case 8:
2450				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2451							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2452							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2453							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2454				break;
2455			case 4:
2456				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2457							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2458							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2459				break;
2460			case 2:
2461				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2462							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2463				break;
2464			case 1:
2465				adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
2466				break;
2467			default:
2468				break;
2469			}
2470		} else {
2471			adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
2472		}
2473	}
2474}
2475
2476/*
2477 * Debugfs
2478 */
2479int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
2480			     const struct drm_info_list *files,
2481			     unsigned nfiles)
2482{
2483	unsigned i;
2484
2485	for (i = 0; i < adev->debugfs_count; i++) {
2486		if (adev->debugfs[i].files == files) {
2487			/* Already registered */
2488			return 0;
2489		}
2490	}
2491
2492	i = adev->debugfs_count + 1;
2493	if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
2494		DRM_ERROR("Reached maximum number of debugfs components.\n");
2495		DRM_ERROR("Report so we increase "
2496			  "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
2497		return -EINVAL;
2498	}
2499	adev->debugfs[adev->debugfs_count].files = files;
2500	adev->debugfs[adev->debugfs_count].num_files = nfiles;
2501	adev->debugfs_count = i;
2502#if defined(CONFIG_DEBUG_FS)
2503	drm_debugfs_create_files(files, nfiles,
2504				 adev->ddev->primary->debugfs_root,
2505				 adev->ddev->primary);
2506#endif
2507	return 0;
2508}
2509
2510static void amdgpu_debugfs_remove_files(struct amdgpu_device *adev)
2511{
2512#if defined(CONFIG_DEBUG_FS)
2513	unsigned i;
 
 
 
 
 
 
 
2514
2515	for (i = 0; i < adev->debugfs_count; i++) {
2516		drm_debugfs_remove_files(adev->debugfs[i].files,
2517					 adev->debugfs[i].num_files,
2518					 adev->ddev->primary);
2519	}
2520#endif
2521}
2522
2523#if defined(CONFIG_DEBUG_FS)
2524
2525static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
2526					size_t size, loff_t *pos)
2527{
2528	struct amdgpu_device *adev = file_inode(f)->i_private;
2529	ssize_t result = 0;
2530	int r;
2531	bool pm_pg_lock, use_bank;
2532	unsigned instance_bank, sh_bank, se_bank;
2533
2534	if (size & 0x3 || *pos & 0x3)
2535		return -EINVAL;
2536
2537	/* are we reading registers for which a PG lock is necessary? */
2538	pm_pg_lock = (*pos >> 23) & 1;
 
2539
2540	if (*pos & (1ULL << 62)) {
2541		se_bank = (*pos >> 24) & 0x3FF;
2542		sh_bank = (*pos >> 34) & 0x3FF;
2543		instance_bank = (*pos >> 44) & 0x3FF;
2544
2545		if (se_bank == 0x3FF)
2546			se_bank = 0xFFFFFFFF;
2547		if (sh_bank == 0x3FF)
2548			sh_bank = 0xFFFFFFFF;
2549		if (instance_bank == 0x3FF)
2550			instance_bank = 0xFFFFFFFF;
2551		use_bank = 1;
2552	} else {
2553		use_bank = 0;
2554	}
2555
2556	*pos &= 0x3FFFF;
 
2557
2558	if (use_bank) {
2559		if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
2560		    (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
2561			return -EINVAL;
2562		mutex_lock(&adev->grbm_idx_mutex);
2563		amdgpu_gfx_select_se_sh(adev, se_bank,
2564					sh_bank, instance_bank);
2565	}
 
 
 
 
 
 
2566
2567	if (pm_pg_lock)
2568		mutex_lock(&adev->pm.mutex);
2569
2570	while (size) {
2571		uint32_t value;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2572
2573		if (*pos > adev->rmmio_size)
2574			goto end;
 
 
 
 
2575
2576		value = RREG32(*pos >> 2);
2577		r = put_user(value, (uint32_t *)buf);
2578		if (r) {
2579			result = r;
2580			goto end;
2581		}
2582
2583		result += 4;
2584		buf += 4;
2585		*pos += 4;
2586		size -= 4;
2587	}
2588
2589end:
2590	if (use_bank) {
2591		amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2592		mutex_unlock(&adev->grbm_idx_mutex);
2593	}
2594
2595	if (pm_pg_lock)
2596		mutex_unlock(&adev->pm.mutex);
2597
2598	return result;
2599}
2600
2601static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
2602					 size_t size, loff_t *pos)
 
 
 
2603{
2604	struct amdgpu_device *adev = file_inode(f)->i_private;
2605	ssize_t result = 0;
2606	int r;
2607	bool pm_pg_lock, use_bank;
2608	unsigned instance_bank, sh_bank, se_bank;
2609
2610	if (size & 0x3 || *pos & 0x3)
2611		return -EINVAL;
2612
2613	/* are we reading registers for which a PG lock is necessary? */
2614	pm_pg_lock = (*pos >> 23) & 1;
2615
2616	if (*pos & (1ULL << 62)) {
2617		se_bank = (*pos >> 24) & 0x3FF;
2618		sh_bank = (*pos >> 34) & 0x3FF;
2619		instance_bank = (*pos >> 44) & 0x3FF;
2620
2621		if (se_bank == 0x3FF)
2622			se_bank = 0xFFFFFFFF;
2623		if (sh_bank == 0x3FF)
2624			sh_bank = 0xFFFFFFFF;
2625		if (instance_bank == 0x3FF)
2626			instance_bank = 0xFFFFFFFF;
2627		use_bank = 1;
2628	} else {
2629		use_bank = 0;
2630	}
2631
2632	*pos &= 0x3FFFF;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2633
2634	if (use_bank) {
2635		if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
2636		    (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
2637			return -EINVAL;
2638		mutex_lock(&adev->grbm_idx_mutex);
2639		amdgpu_gfx_select_se_sh(adev, se_bank,
2640					sh_bank, instance_bank);
2641	}
2642
2643	if (pm_pg_lock)
2644		mutex_lock(&adev->pm.mutex);
 
 
2645
2646	while (size) {
2647		uint32_t value;
 
 
 
2648
2649		if (*pos > adev->rmmio_size)
2650			return result;
2651
2652		r = get_user(value, (uint32_t *)buf);
2653		if (r)
2654			return r;
 
2655
2656		WREG32(*pos >> 2, value);
2657
2658		result += 4;
2659		buf += 4;
2660		*pos += 4;
2661		size -= 4;
2662	}
2663
2664	if (use_bank) {
2665		amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2666		mutex_unlock(&adev->grbm_idx_mutex);
2667	}
2668
2669	if (pm_pg_lock)
2670		mutex_unlock(&adev->pm.mutex);
 
 
 
 
 
 
 
 
 
 
2671
2672	return result;
2673}
2674
2675static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
2676					size_t size, loff_t *pos)
2677{
2678	struct amdgpu_device *adev = file_inode(f)->i_private;
2679	ssize_t result = 0;
2680	int r;
2681
2682	if (size & 0x3 || *pos & 0x3)
2683		return -EINVAL;
 
2684
2685	while (size) {
2686		uint32_t value;
2687
2688		value = RREG32_PCIE(*pos >> 2);
2689		r = put_user(value, (uint32_t *)buf);
2690		if (r)
2691			return r;
2692
2693		result += 4;
2694		buf += 4;
2695		*pos += 4;
2696		size -= 4;
2697	}
2698
2699	return result;
 
2700}
2701
2702static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
2703					 size_t size, loff_t *pos)
2704{
2705	struct amdgpu_device *adev = file_inode(f)->i_private;
2706	ssize_t result = 0;
2707	int r;
2708
2709	if (size & 0x3 || *pos & 0x3)
2710		return -EINVAL;
 
2711
2712	while (size) {
2713		uint32_t value;
2714
2715		r = get_user(value, (uint32_t *)buf);
2716		if (r)
2717			return r;
2718
2719		WREG32_PCIE(*pos >> 2, value);
2720
2721		result += 4;
2722		buf += 4;
2723		*pos += 4;
2724		size -= 4;
2725	}
2726
2727	return result;
2728}
2729
2730static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
2731					size_t size, loff_t *pos)
2732{
2733	struct amdgpu_device *adev = file_inode(f)->i_private;
2734	ssize_t result = 0;
2735	int r;
2736
2737	if (size & 0x3 || *pos & 0x3)
2738		return -EINVAL;
2739
2740	while (size) {
2741		uint32_t value;
2742
2743		value = RREG32_DIDT(*pos >> 2);
2744		r = put_user(value, (uint32_t *)buf);
2745		if (r)
2746			return r;
2747
2748		result += 4;
2749		buf += 4;
2750		*pos += 4;
2751		size -= 4;
2752	}
2753
2754	return result;
2755}
2756
2757static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
2758					 size_t size, loff_t *pos)
2759{
2760	struct amdgpu_device *adev = file_inode(f)->i_private;
2761	ssize_t result = 0;
2762	int r;
 
 
 
2763
2764	if (size & 0x3 || *pos & 0x3)
2765		return -EINVAL;
2766
2767	while (size) {
2768		uint32_t value;
2769
2770		r = get_user(value, (uint32_t *)buf);
2771		if (r)
2772			return r;
2773
2774		WREG32_DIDT(*pos >> 2, value);
2775
2776		result += 4;
2777		buf += 4;
2778		*pos += 4;
2779		size -= 4;
2780	}
2781
2782	return result;
2783}
2784
2785static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
2786					size_t size, loff_t *pos)
2787{
2788	struct amdgpu_device *adev = file_inode(f)->i_private;
2789	ssize_t result = 0;
2790	int r;
2791
2792	if (size & 0x3 || *pos & 0x3)
2793		return -EINVAL;
2794
2795	while (size) {
2796		uint32_t value;
2797
2798		value = RREG32_SMC(*pos);
2799		r = put_user(value, (uint32_t *)buf);
2800		if (r)
2801			return r;
2802
2803		result += 4;
2804		buf += 4;
2805		*pos += 4;
2806		size -= 4;
2807	}
2808
2809	return result;
2810}
2811
2812static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
2813					 size_t size, loff_t *pos)
2814{
2815	struct amdgpu_device *adev = file_inode(f)->i_private;
2816	ssize_t result = 0;
2817	int r;
2818
2819	if (size & 0x3 || *pos & 0x3)
2820		return -EINVAL;
2821
2822	while (size) {
2823		uint32_t value;
2824
2825		r = get_user(value, (uint32_t *)buf);
2826		if (r)
2827			return r;
2828
2829		WREG32_SMC(*pos, value);
2830
2831		result += 4;
2832		buf += 4;
2833		*pos += 4;
2834		size -= 4;
2835	}
2836
2837	return result;
2838}
2839
2840static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
2841					size_t size, loff_t *pos)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2842{
2843	struct amdgpu_device *adev = file_inode(f)->i_private;
2844	ssize_t result = 0;
2845	int r;
2846	uint32_t *config, no_regs = 0;
2847
2848	if (size & 0x3 || *pos & 0x3)
2849		return -EINVAL;
2850
2851	config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
2852	if (!config)
2853		return -ENOMEM;
2854
2855	/* version, increment each time something is added */
2856	config[no_regs++] = 2;
2857	config[no_regs++] = adev->gfx.config.max_shader_engines;
2858	config[no_regs++] = adev->gfx.config.max_tile_pipes;
2859	config[no_regs++] = adev->gfx.config.max_cu_per_sh;
2860	config[no_regs++] = adev->gfx.config.max_sh_per_se;
2861	config[no_regs++] = adev->gfx.config.max_backends_per_se;
2862	config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
2863	config[no_regs++] = adev->gfx.config.max_gprs;
2864	config[no_regs++] = adev->gfx.config.max_gs_threads;
2865	config[no_regs++] = adev->gfx.config.max_hw_contexts;
2866	config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
2867	config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
2868	config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
2869	config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
2870	config[no_regs++] = adev->gfx.config.num_tile_pipes;
2871	config[no_regs++] = adev->gfx.config.backend_enable_mask;
2872	config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
2873	config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
2874	config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
2875	config[no_regs++] = adev->gfx.config.num_gpus;
2876	config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
2877	config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
2878	config[no_regs++] = adev->gfx.config.gb_addr_config;
2879	config[no_regs++] = adev->gfx.config.num_rbs;
2880
2881	/* rev==1 */
2882	config[no_regs++] = adev->rev_id;
2883	config[no_regs++] = adev->pg_flags;
2884	config[no_regs++] = adev->cg_flags;
2885
2886	/* rev==2 */
2887	config[no_regs++] = adev->family;
2888	config[no_regs++] = adev->external_rev_id;
2889
2890	while (size && (*pos < no_regs * 4)) {
2891		uint32_t value;
2892
2893		value = config[*pos >> 2];
2894		r = put_user(value, (uint32_t *)buf);
2895		if (r) {
2896			kfree(config);
2897			return r;
2898		}
2899
2900		result += 4;
2901		buf += 4;
2902		*pos += 4;
2903		size -= 4;
2904	}
2905
2906	kfree(config);
2907	return result;
2908}
2909
2910static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
2911					size_t size, loff_t *pos)
2912{
2913	struct amdgpu_device *adev = file_inode(f)->i_private;
2914	int idx, r;
2915	int32_t value;
2916
2917	if (size != 4 || *pos & 0x3)
2918		return -EINVAL;
2919
2920	/* convert offset to sensor number */
2921	idx = *pos >> 2;
2922
2923	if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
2924		r = adev->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, idx, &value);
2925	else
2926		return -EINVAL;
2927
2928	if (!r)
2929		r = put_user(value, (int32_t *)buf);
2930
2931	return !r ? 4 : r;
2932}
2933
2934static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
2935					size_t size, loff_t *pos)
2936{
2937	struct amdgpu_device *adev = f->f_inode->i_private;
2938	int r, x;
2939	ssize_t result=0;
2940	uint32_t offset, se, sh, cu, wave, simd, data[32];
2941
2942	if (size & 3 || *pos & 3)
2943		return -EINVAL;
2944
2945	/* decode offset */
2946	offset = (*pos & 0x7F);
2947	se = ((*pos >> 7) & 0xFF);
2948	sh = ((*pos >> 15) & 0xFF);
2949	cu = ((*pos >> 23) & 0xFF);
2950	wave = ((*pos >> 31) & 0xFF);
2951	simd = ((*pos >> 37) & 0xFF);
2952
2953	/* switch to the specific se/sh/cu */
2954	mutex_lock(&adev->grbm_idx_mutex);
2955	amdgpu_gfx_select_se_sh(adev, se, sh, cu);
2956
2957	x = 0;
2958	if (adev->gfx.funcs->read_wave_data)
2959		adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
2960
2961	amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
2962	mutex_unlock(&adev->grbm_idx_mutex);
2963
2964	if (!x)
2965		return -EINVAL;
2966
2967	while (size && (offset < x * 4)) {
2968		uint32_t value;
2969
2970		value = data[offset >> 2];
2971		r = put_user(value, (uint32_t *)buf);
2972		if (r)
2973			return r;
2974
2975		result += 4;
2976		buf += 4;
2977		offset += 4;
2978		size -= 4;
2979	}
2980
2981	return result;
2982}
2983
2984static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
2985					size_t size, loff_t *pos)
 
 
 
 
 
 
 
 
 
2986{
2987	struct amdgpu_device *adev = f->f_inode->i_private;
2988	int r;
2989	ssize_t result = 0;
2990	uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
2991
2992	if (size & 3 || *pos & 3)
2993		return -EINVAL;
 
 
 
2994
2995	/* decode offset */
2996	offset = (*pos & 0xFFF);       /* in dwords */
2997	se = ((*pos >> 12) & 0xFF);
2998	sh = ((*pos >> 20) & 0xFF);
2999	cu = ((*pos >> 28) & 0xFF);
3000	wave = ((*pos >> 36) & 0xFF);
3001	simd = ((*pos >> 44) & 0xFF);
3002	thread = ((*pos >> 52) & 0xFF);
3003	bank = ((*pos >> 60) & 1);
3004
3005	data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
3006	if (!data)
3007		return -ENOMEM;
3008
3009	/* switch to the specific se/sh/cu */
3010	mutex_lock(&adev->grbm_idx_mutex);
3011	amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3012
3013	if (bank == 0) {
3014		if (adev->gfx.funcs->read_wave_vgprs)
3015			adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
3016	} else {
3017		if (adev->gfx.funcs->read_wave_sgprs)
3018			adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
3019	}
3020
3021	amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3022	mutex_unlock(&adev->grbm_idx_mutex);
3023
3024	while (size) {
3025		uint32_t value;
3026
3027		value = data[offset++];
3028		r = put_user(value, (uint32_t *)buf);
3029		if (r) {
3030			result = r;
3031			goto err;
3032		}
3033
3034		result += 4;
3035		buf += 4;
3036		size -= 4;
3037	}
3038
3039err:
3040	kfree(data);
3041	return result;
3042}
3043
3044static const struct file_operations amdgpu_debugfs_regs_fops = {
3045	.owner = THIS_MODULE,
3046	.read = amdgpu_debugfs_regs_read,
3047	.write = amdgpu_debugfs_regs_write,
3048	.llseek = default_llseek
3049};
3050static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
3051	.owner = THIS_MODULE,
3052	.read = amdgpu_debugfs_regs_didt_read,
3053	.write = amdgpu_debugfs_regs_didt_write,
3054	.llseek = default_llseek
3055};
3056static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
3057	.owner = THIS_MODULE,
3058	.read = amdgpu_debugfs_regs_pcie_read,
3059	.write = amdgpu_debugfs_regs_pcie_write,
3060	.llseek = default_llseek
3061};
3062static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
3063	.owner = THIS_MODULE,
3064	.read = amdgpu_debugfs_regs_smc_read,
3065	.write = amdgpu_debugfs_regs_smc_write,
3066	.llseek = default_llseek
3067};
3068
3069static const struct file_operations amdgpu_debugfs_gca_config_fops = {
3070	.owner = THIS_MODULE,
3071	.read = amdgpu_debugfs_gca_config_read,
3072	.llseek = default_llseek
3073};
3074
3075static const struct file_operations amdgpu_debugfs_sensors_fops = {
3076	.owner = THIS_MODULE,
3077	.read = amdgpu_debugfs_sensor_read,
3078	.llseek = default_llseek
3079};
3080
3081static const struct file_operations amdgpu_debugfs_wave_fops = {
3082	.owner = THIS_MODULE,
3083	.read = amdgpu_debugfs_wave_read,
3084	.llseek = default_llseek
3085};
3086static const struct file_operations amdgpu_debugfs_gpr_fops = {
3087	.owner = THIS_MODULE,
3088	.read = amdgpu_debugfs_gpr_read,
3089	.llseek = default_llseek
3090};
3091
3092static const struct file_operations *debugfs_regs[] = {
3093	&amdgpu_debugfs_regs_fops,
3094	&amdgpu_debugfs_regs_didt_fops,
3095	&amdgpu_debugfs_regs_pcie_fops,
3096	&amdgpu_debugfs_regs_smc_fops,
3097	&amdgpu_debugfs_gca_config_fops,
3098	&amdgpu_debugfs_sensors_fops,
3099	&amdgpu_debugfs_wave_fops,
3100	&amdgpu_debugfs_gpr_fops,
3101};
3102
3103static const char *debugfs_regs_names[] = {
3104	"amdgpu_regs",
3105	"amdgpu_regs_didt",
3106	"amdgpu_regs_pcie",
3107	"amdgpu_regs_smc",
3108	"amdgpu_gca_config",
3109	"amdgpu_sensors",
3110	"amdgpu_wave",
3111	"amdgpu_gpr",
3112};
3113
3114static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3115{
3116	struct drm_minor *minor = adev->ddev->primary;
3117	struct dentry *ent, *root = minor->debugfs_root;
3118	unsigned i, j;
3119
3120	for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3121		ent = debugfs_create_file(debugfs_regs_names[i],
3122					  S_IFREG | S_IRUGO, root,
3123					  adev, debugfs_regs[i]);
3124		if (IS_ERR(ent)) {
3125			for (j = 0; j < i; j++) {
3126				debugfs_remove(adev->debugfs_regs[i]);
3127				adev->debugfs_regs[i] = NULL;
3128			}
3129			return PTR_ERR(ent);
3130		}
3131
3132		if (!i)
3133			i_size_write(ent->d_inode, adev->rmmio_size);
3134		adev->debugfs_regs[i] = ent;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3135	}
3136
3137	return 0;
3138}
3139
3140static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
3141{
3142	unsigned i;
3143
3144	for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3145		if (adev->debugfs_regs[i]) {
3146			debugfs_remove(adev->debugfs_regs[i]);
3147			adev->debugfs_regs[i] = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3148		}
3149	}
 
3150}
3151
3152int amdgpu_debugfs_init(struct drm_minor *minor)
3153{
3154	return 0;
3155}
3156
3157void amdgpu_debugfs_cleanup(struct drm_minor *minor)
3158{
3159}
3160#else
3161static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3162{
3163	return 0;
3164}
3165static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
3166#endif