Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Feb 10-13, 2025
Register
Loading...
v6.2
   1/*
   2 * Copyright 2008 Advanced Micro Devices, Inc.
   3 * Copyright 2008 Red Hat Inc.
   4 * Copyright 2009 Jerome Glisse.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the "Software"),
   8 * to deal in the Software without restriction, including without limitation
   9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10 * and/or sell copies of the Software, and to permit persons to whom the
  11 * Software is furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22 * OTHER DEALINGS IN THE SOFTWARE.
  23 *
  24 * Authors: Dave Airlie
  25 *          Alex Deucher
  26 *          Jerome Glisse
  27 */
  28#include <linux/power_supply.h>
  29#include <linux/kthread.h>
  30#include <linux/module.h>
  31#include <linux/console.h>
  32#include <linux/slab.h>
  33#include <linux/iommu.h>
  34#include <linux/pci.h>
  35#include <linux/devcoredump.h>
  36#include <generated/utsrelease.h>
  37#include <linux/pci-p2pdma.h>
  38
  39#include <drm/drm_aperture.h>
  40#include <drm/drm_atomic_helper.h>
  41#include <drm/drm_fb_helper.h>
  42#include <drm/drm_probe_helper.h>
  43#include <drm/amdgpu_drm.h>
  44#include <linux/vgaarb.h>
  45#include <linux/vga_switcheroo.h>
  46#include <linux/efi.h>
  47#include "amdgpu.h"
  48#include "amdgpu_trace.h"
  49#include "amdgpu_i2c.h"
  50#include "atom.h"
  51#include "amdgpu_atombios.h"
  52#include "amdgpu_atomfirmware.h"
  53#include "amd_pcie.h"
  54#ifdef CONFIG_DRM_AMDGPU_SI
  55#include "si.h"
  56#endif
  57#ifdef CONFIG_DRM_AMDGPU_CIK
  58#include "cik.h"
  59#endif
  60#include "vi.h"
  61#include "soc15.h"
  62#include "nv.h"
  63#include "bif/bif_4_1_d.h"
 
  64#include <linux/firmware.h>
  65#include "amdgpu_vf_error.h"
  66
  67#include "amdgpu_amdkfd.h"
  68#include "amdgpu_pm.h"
  69
  70#include "amdgpu_xgmi.h"
  71#include "amdgpu_ras.h"
  72#include "amdgpu_pmu.h"
  73#include "amdgpu_fru_eeprom.h"
  74#include "amdgpu_reset.h"
  75
  76#include <linux/suspend.h>
  77#include <drm/task_barrier.h>
  78#include <linux/pm_runtime.h>
  79
  80#include <drm/drm_drv.h>
  81
  82MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
  83MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
  84MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
  85MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
  86MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
  87MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
 
 
 
  88MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
  89
  90#define AMDGPU_RESUME_MS		2000
  91#define AMDGPU_MAX_RETRY_LIMIT		2
  92#define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
  93
  94static const struct drm_driver amdgpu_kms_driver;
  95
  96const char *amdgpu_asic_name[] = {
  97	"TAHITI",
  98	"PITCAIRN",
  99	"VERDE",
 100	"OLAND",
 101	"HAINAN",
 102	"BONAIRE",
 103	"KAVERI",
 104	"KABINI",
 105	"HAWAII",
 106	"MULLINS",
 107	"TOPAZ",
 108	"TONGA",
 109	"FIJI",
 110	"CARRIZO",
 111	"STONEY",
 112	"POLARIS10",
 113	"POLARIS11",
 114	"POLARIS12",
 115	"VEGAM",
 116	"VEGA10",
 117	"VEGA12",
 118	"VEGA20",
 119	"RAVEN",
 120	"ARCTURUS",
 121	"RENOIR",
 122	"ALDEBARAN",
 123	"NAVI10",
 124	"CYAN_SKILLFISH",
 125	"NAVI14",
 126	"NAVI12",
 127	"SIENNA_CICHLID",
 128	"NAVY_FLOUNDER",
 129	"VANGOGH",
 130	"DIMGREY_CAVEFISH",
 131	"BEIGE_GOBY",
 132	"YELLOW_CARP",
 133	"IP DISCOVERY",
 134	"LAST",
 135};
 136
 137/**
 138 * DOC: pcie_replay_count
 139 *
 140 * The amdgpu driver provides a sysfs API for reporting the total number
 141 * of PCIe replays (NAKs)
 142 * The file pcie_replay_count is used for this and returns the total
 143 * number of replays as a sum of the NAKs generated and NAKs received
 144 */
 145
 146static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
 147		struct device_attribute *attr, char *buf)
 148{
 149	struct drm_device *ddev = dev_get_drvdata(dev);
 150	struct amdgpu_device *adev = drm_to_adev(ddev);
 151	uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
 152
 153	return sysfs_emit(buf, "%llu\n", cnt);
 154}
 155
 156static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
 157		amdgpu_device_get_pcie_replay_count, NULL);
 158
 159static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
 160
 161/**
 162 * DOC: product_name
 163 *
 164 * The amdgpu driver provides a sysfs API for reporting the product name
 165 * for the device
 166 * The file serial_number is used for this and returns the product name
 167 * as returned from the FRU.
 168 * NOTE: This is only available for certain server cards
 169 */
 170
 171static ssize_t amdgpu_device_get_product_name(struct device *dev,
 172		struct device_attribute *attr, char *buf)
 173{
 174	struct drm_device *ddev = dev_get_drvdata(dev);
 175	struct amdgpu_device *adev = drm_to_adev(ddev);
 176
 177	return sysfs_emit(buf, "%s\n", adev->product_name);
 178}
 179
 180static DEVICE_ATTR(product_name, S_IRUGO,
 181		amdgpu_device_get_product_name, NULL);
 182
 183/**
 184 * DOC: product_number
 185 *
 186 * The amdgpu driver provides a sysfs API for reporting the part number
 187 * for the device
 188 * The file serial_number is used for this and returns the part number
 189 * as returned from the FRU.
 190 * NOTE: This is only available for certain server cards
 191 */
 192
 193static ssize_t amdgpu_device_get_product_number(struct device *dev,
 194		struct device_attribute *attr, char *buf)
 195{
 196	struct drm_device *ddev = dev_get_drvdata(dev);
 197	struct amdgpu_device *adev = drm_to_adev(ddev);
 198
 199	return sysfs_emit(buf, "%s\n", adev->product_number);
 200}
 201
 202static DEVICE_ATTR(product_number, S_IRUGO,
 203		amdgpu_device_get_product_number, NULL);
 204
 205/**
 206 * DOC: serial_number
 207 *
 208 * The amdgpu driver provides a sysfs API for reporting the serial number
 209 * for the device
 210 * The file serial_number is used for this and returns the serial number
 211 * as returned from the FRU.
 212 * NOTE: This is only available for certain server cards
 213 */
 214
 215static ssize_t amdgpu_device_get_serial_number(struct device *dev,
 216		struct device_attribute *attr, char *buf)
 217{
 218	struct drm_device *ddev = dev_get_drvdata(dev);
 219	struct amdgpu_device *adev = drm_to_adev(ddev);
 220
 221	return sysfs_emit(buf, "%s\n", adev->serial);
 222}
 223
 224static DEVICE_ATTR(serial_number, S_IRUGO,
 225		amdgpu_device_get_serial_number, NULL);
 226
 227/**
 228 * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
 229 *
 230 * @dev: drm_device pointer
 231 *
 232 * Returns true if the device is a dGPU with ATPX power control,
 233 * otherwise return false.
 234 */
 235bool amdgpu_device_supports_px(struct drm_device *dev)
 236{
 237	struct amdgpu_device *adev = drm_to_adev(dev);
 238
 239	if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
 240		return true;
 241	return false;
 242}
 243
 244/**
 245 * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
 246 *
 247 * @dev: drm_device pointer
 248 *
 249 * Returns true if the device is a dGPU with ACPI power control,
 250 * otherwise return false.
 251 */
 252bool amdgpu_device_supports_boco(struct drm_device *dev)
 253{
 254	struct amdgpu_device *adev = drm_to_adev(dev);
 255
 256	if (adev->has_pr3 ||
 257	    ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
 258		return true;
 259	return false;
 260}
 261
 262/**
 263 * amdgpu_device_supports_baco - Does the device support BACO
 264 *
 265 * @dev: drm_device pointer
 266 *
 267 * Returns true if the device supporte BACO,
 268 * otherwise return false.
 269 */
 270bool amdgpu_device_supports_baco(struct drm_device *dev)
 271{
 272	struct amdgpu_device *adev = drm_to_adev(dev);
 273
 274	return amdgpu_asic_supports_baco(adev);
 275}
 276
 277/**
 278 * amdgpu_device_supports_smart_shift - Is the device dGPU with
 279 * smart shift support
 280 *
 281 * @dev: drm_device pointer
 282 *
 283 * Returns true if the device is a dGPU with Smart Shift support,
 284 * otherwise returns false.
 285 */
 286bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
 287{
 288	return (amdgpu_device_supports_boco(dev) &&
 289		amdgpu_acpi_is_power_shift_control_supported());
 290}
 291
 292/*
 293 * VRAM access helper functions
 294 */
 295
 296/**
 297 * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
 298 *
 299 * @adev: amdgpu_device pointer
 300 * @pos: offset of the buffer in vram
 301 * @buf: virtual address of the buffer in system memory
 302 * @size: read/write size, sizeof(@buf) must > @size
 303 * @write: true - write to vram, otherwise - read from vram
 304 */
 305void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
 306			     void *buf, size_t size, bool write)
 307{
 308	unsigned long flags;
 309	uint32_t hi = ~0, tmp = 0;
 310	uint32_t *data = buf;
 311	uint64_t last;
 312	int idx;
 313
 314	if (!drm_dev_enter(adev_to_drm(adev), &idx))
 315		return;
 316
 317	BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
 318
 319	spin_lock_irqsave(&adev->mmio_idx_lock, flags);
 320	for (last = pos + size; pos < last; pos += 4) {
 321		tmp = pos >> 31;
 322
 323		WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
 324		if (tmp != hi) {
 325			WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
 326			hi = tmp;
 327		}
 328		if (write)
 329			WREG32_NO_KIQ(mmMM_DATA, *data++);
 330		else
 331			*data++ = RREG32_NO_KIQ(mmMM_DATA);
 332	}
 333
 334	spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
 335	drm_dev_exit(idx);
 336}
 337
 338/**
 339 * amdgpu_device_aper_access - access vram by vram aperature
 340 *
 341 * @adev: amdgpu_device pointer
 342 * @pos: offset of the buffer in vram
 343 * @buf: virtual address of the buffer in system memory
 344 * @size: read/write size, sizeof(@buf) must > @size
 345 * @write: true - write to vram, otherwise - read from vram
 346 *
 347 * The return value means how many bytes have been transferred.
 348 */
 349size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
 350				 void *buf, size_t size, bool write)
 351{
 352#ifdef CONFIG_64BIT
 353	void __iomem *addr;
 354	size_t count = 0;
 355	uint64_t last;
 356
 357	if (!adev->mman.aper_base_kaddr)
 358		return 0;
 359
 360	last = min(pos + size, adev->gmc.visible_vram_size);
 361	if (last > pos) {
 362		addr = adev->mman.aper_base_kaddr + pos;
 363		count = last - pos;
 364
 365		if (write) {
 366			memcpy_toio(addr, buf, count);
 367			mb();
 368			amdgpu_device_flush_hdp(adev, NULL);
 369		} else {
 370			amdgpu_device_invalidate_hdp(adev, NULL);
 371			mb();
 372			memcpy_fromio(buf, addr, count);
 373		}
 374
 375	}
 376
 377	return count;
 378#else
 379	return 0;
 380#endif
 381}
 382
 383/**
 384 * amdgpu_device_vram_access - read/write a buffer in vram
 385 *
 386 * @adev: amdgpu_device pointer
 387 * @pos: offset of the buffer in vram
 388 * @buf: virtual address of the buffer in system memory
 389 * @size: read/write size, sizeof(@buf) must > @size
 390 * @write: true - write to vram, otherwise - read from vram
 391 */
 392void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
 393			       void *buf, size_t size, bool write)
 394{
 395	size_t count;
 396
 397	/* try to using vram apreature to access vram first */
 398	count = amdgpu_device_aper_access(adev, pos, buf, size, write);
 399	size -= count;
 400	if (size) {
 401		/* using MM to access rest vram */
 402		pos += count;
 403		buf += count;
 404		amdgpu_device_mm_access(adev, pos, buf, size, write);
 405	}
 406}
 407
 408/*
 409 * register access helper functions.
 410 */
 411
 412/* Check if hw access should be skipped because of hotplug or device error */
 413bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
 414{
 415	if (adev->no_hw_access)
 416		return true;
 417
 418#ifdef CONFIG_LOCKDEP
 419	/*
 420	 * This is a bit complicated to understand, so worth a comment. What we assert
 421	 * here is that the GPU reset is not running on another thread in parallel.
 422	 *
 423	 * For this we trylock the read side of the reset semaphore, if that succeeds
 424	 * we know that the reset is not running in paralell.
 425	 *
 426	 * If the trylock fails we assert that we are either already holding the read
 427	 * side of the lock or are the reset thread itself and hold the write side of
 428	 * the lock.
 429	 */
 430	if (in_task()) {
 431		if (down_read_trylock(&adev->reset_domain->sem))
 432			up_read(&adev->reset_domain->sem);
 433		else
 434			lockdep_assert_held(&adev->reset_domain->sem);
 435	}
 436#endif
 437	return false;
 438}
 439
 440/**
 441 * amdgpu_device_rreg - read a memory mapped IO or indirect register
 442 *
 443 * @adev: amdgpu_device pointer
 444 * @reg: dword aligned register offset
 445 * @acc_flags: access flags which require special behavior
 446 *
 447 * Returns the 32 bit value from the offset specified.
 448 */
 449uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
 450			    uint32_t reg, uint32_t acc_flags)
 451{
 452	uint32_t ret;
 453
 454	if (amdgpu_device_skip_hw_access(adev))
 455		return 0;
 456
 457	if ((reg * 4) < adev->rmmio_size) {
 458		if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
 459		    amdgpu_sriov_runtime(adev) &&
 460		    down_read_trylock(&adev->reset_domain->sem)) {
 461			ret = amdgpu_kiq_rreg(adev, reg);
 462			up_read(&adev->reset_domain->sem);
 463		} else {
 464			ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
 465		}
 466	} else {
 467		ret = adev->pcie_rreg(adev, reg * 4);
 468	}
 469
 470	trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
 471
 472	return ret;
 473}
 474
 475/*
 476 * MMIO register read with bytes helper functions
 477 * @offset:bytes offset from MMIO start
 478 *
 479*/
 480
 481/**
 482 * amdgpu_mm_rreg8 - read a memory mapped IO register
 483 *
 484 * @adev: amdgpu_device pointer
 485 * @offset: byte aligned register offset
 486 *
 487 * Returns the 8 bit value from the offset specified.
 488 */
 489uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
 490{
 491	if (amdgpu_device_skip_hw_access(adev))
 492		return 0;
 493
 494	if (offset < adev->rmmio_size)
 495		return (readb(adev->rmmio + offset));
 496	BUG();
 497}
 498
 499/*
 500 * MMIO register write with bytes helper functions
 501 * @offset:bytes offset from MMIO start
 502 * @value: the value want to be written to the register
 503 *
 504*/
 505/**
 506 * amdgpu_mm_wreg8 - read a memory mapped IO register
 507 *
 508 * @adev: amdgpu_device pointer
 509 * @offset: byte aligned register offset
 510 * @value: 8 bit value to write
 511 *
 512 * Writes the value specified to the offset specified.
 513 */
 514void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
 515{
 516	if (amdgpu_device_skip_hw_access(adev))
 517		return;
 518
 519	if (offset < adev->rmmio_size)
 520		writeb(value, adev->rmmio + offset);
 521	else
 522		BUG();
 523}
 524
 525/**
 526 * amdgpu_device_wreg - write to a memory mapped IO or indirect register
 527 *
 528 * @adev: amdgpu_device pointer
 529 * @reg: dword aligned register offset
 530 * @v: 32 bit value to write to the register
 531 * @acc_flags: access flags which require special behavior
 532 *
 533 * Writes the value specified to the offset specified.
 534 */
 535void amdgpu_device_wreg(struct amdgpu_device *adev,
 536			uint32_t reg, uint32_t v,
 537			uint32_t acc_flags)
 538{
 539	if (amdgpu_device_skip_hw_access(adev))
 540		return;
 541
 542	if ((reg * 4) < adev->rmmio_size) {
 543		if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
 544		    amdgpu_sriov_runtime(adev) &&
 545		    down_read_trylock(&adev->reset_domain->sem)) {
 546			amdgpu_kiq_wreg(adev, reg, v);
 547			up_read(&adev->reset_domain->sem);
 548		} else {
 549			writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
 550		}
 551	} else {
 552		adev->pcie_wreg(adev, reg * 4, v);
 
 
 
 
 
 553	}
 554
 555	trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
 
 
 556}
 557
 558/**
 559 * amdgpu_mm_wreg_mmio_rlc -  write register either with direct/indirect mmio or with RLC path if in range
 560 *
 561 * @adev: amdgpu_device pointer
 562 * @reg: mmio/rlc register
 563 * @v: value to write
 564 *
 565 * this function is invoked only for the debugfs register access
 566 */
 567void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
 568			     uint32_t reg, uint32_t v)
 569{
 570	if (amdgpu_device_skip_hw_access(adev))
 571		return;
 
 
 
 
 
 572
 573	if (amdgpu_sriov_fullaccess(adev) &&
 574	    adev->gfx.rlc.funcs &&
 575	    adev->gfx.rlc.funcs->is_rlcg_access_range) {
 576		if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
 577			return amdgpu_sriov_wreg(adev, reg, v, 0, 0);
 578	} else if ((reg * 4) >= adev->rmmio_size) {
 579		adev->pcie_wreg(adev, reg * 4, v);
 580	} else {
 581		writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 582	}
 583}
 584
 585/**
 586 * amdgpu_mm_rdoorbell - read a doorbell dword
 587 *
 588 * @adev: amdgpu_device pointer
 589 * @index: doorbell index
 590 *
 591 * Returns the value in the doorbell aperture at the
 592 * requested doorbell index (CIK).
 593 */
 594u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
 595{
 596	if (amdgpu_device_skip_hw_access(adev))
 597		return 0;
 598
 599	if (index < adev->doorbell.num_doorbells) {
 600		return readl(adev->doorbell.ptr + index);
 601	} else {
 602		DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
 603		return 0;
 604	}
 605}
 606
 607/**
 608 * amdgpu_mm_wdoorbell - write a doorbell dword
 609 *
 610 * @adev: amdgpu_device pointer
 611 * @index: doorbell index
 612 * @v: value to write
 613 *
 614 * Writes @v to the doorbell aperture at the
 615 * requested doorbell index (CIK).
 616 */
 617void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
 618{
 619	if (amdgpu_device_skip_hw_access(adev))
 620		return;
 621
 622	if (index < adev->doorbell.num_doorbells) {
 623		writel(v, adev->doorbell.ptr + index);
 624	} else {
 625		DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
 626	}
 627}
 628
 629/**
 630 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
 631 *
 632 * @adev: amdgpu_device pointer
 633 * @index: doorbell index
 634 *
 635 * Returns the value in the doorbell aperture at the
 636 * requested doorbell index (VEGA10+).
 637 */
 638u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
 639{
 640	if (amdgpu_device_skip_hw_access(adev))
 641		return 0;
 642
 643	if (index < adev->doorbell.num_doorbells) {
 644		return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
 645	} else {
 646		DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
 647		return 0;
 648	}
 649}
 650
 651/**
 652 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
 653 *
 654 * @adev: amdgpu_device pointer
 655 * @index: doorbell index
 656 * @v: value to write
 657 *
 658 * Writes @v to the doorbell aperture at the
 659 * requested doorbell index (VEGA10+).
 660 */
 661void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
 662{
 663	if (amdgpu_device_skip_hw_access(adev))
 664		return;
 665
 666	if (index < adev->doorbell.num_doorbells) {
 667		atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
 668	} else {
 669		DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
 670	}
 671}
 672
 673/**
 674 * amdgpu_device_indirect_rreg - read an indirect register
 675 *
 676 * @adev: amdgpu_device pointer
 677 * @pcie_index: mmio register offset
 678 * @pcie_data: mmio register offset
 679 * @reg_addr: indirect register address to read from
 680 *
 681 * Returns the value of indirect register @reg_addr
 682 */
 683u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
 684				u32 pcie_index, u32 pcie_data,
 685				u32 reg_addr)
 686{
 687	unsigned long flags;
 688	u32 r;
 689	void __iomem *pcie_index_offset;
 690	void __iomem *pcie_data_offset;
 691
 692	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 693	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
 694	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
 695
 696	writel(reg_addr, pcie_index_offset);
 697	readl(pcie_index_offset);
 698	r = readl(pcie_data_offset);
 699	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 700
 701	return r;
 702}
 703
 704/**
 705 * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
 706 *
 707 * @adev: amdgpu_device pointer
 708 * @pcie_index: mmio register offset
 709 * @pcie_data: mmio register offset
 710 * @reg_addr: indirect register address to read from
 711 *
 712 * Returns the value of indirect register @reg_addr
 713 */
 714u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
 715				  u32 pcie_index, u32 pcie_data,
 716				  u32 reg_addr)
 717{
 718	unsigned long flags;
 719	u64 r;
 720	void __iomem *pcie_index_offset;
 721	void __iomem *pcie_data_offset;
 722
 723	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 724	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
 725	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
 726
 727	/* read low 32 bits */
 728	writel(reg_addr, pcie_index_offset);
 729	readl(pcie_index_offset);
 730	r = readl(pcie_data_offset);
 731	/* read high 32 bits */
 732	writel(reg_addr + 4, pcie_index_offset);
 733	readl(pcie_index_offset);
 734	r |= ((u64)readl(pcie_data_offset) << 32);
 735	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 736
 737	return r;
 738}
 739
 740/**
 741 * amdgpu_device_indirect_wreg - write an indirect register address
 742 *
 743 * @adev: amdgpu_device pointer
 744 * @pcie_index: mmio register offset
 745 * @pcie_data: mmio register offset
 746 * @reg_addr: indirect register offset
 747 * @reg_data: indirect register data
 748 *
 749 */
 750void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
 751				 u32 pcie_index, u32 pcie_data,
 752				 u32 reg_addr, u32 reg_data)
 753{
 754	unsigned long flags;
 755	void __iomem *pcie_index_offset;
 756	void __iomem *pcie_data_offset;
 757
 758	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 759	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
 760	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
 761
 762	writel(reg_addr, pcie_index_offset);
 763	readl(pcie_index_offset);
 764	writel(reg_data, pcie_data_offset);
 765	readl(pcie_data_offset);
 766	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 767}
 768
 769/**
 770 * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
 771 *
 772 * @adev: amdgpu_device pointer
 773 * @pcie_index: mmio register offset
 774 * @pcie_data: mmio register offset
 775 * @reg_addr: indirect register offset
 776 * @reg_data: indirect register data
 777 *
 778 */
 779void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
 780				   u32 pcie_index, u32 pcie_data,
 781				   u32 reg_addr, u64 reg_data)
 782{
 783	unsigned long flags;
 784	void __iomem *pcie_index_offset;
 785	void __iomem *pcie_data_offset;
 786
 787	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 788	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
 789	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
 790
 791	/* write low 32 bits */
 792	writel(reg_addr, pcie_index_offset);
 793	readl(pcie_index_offset);
 794	writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
 795	readl(pcie_data_offset);
 796	/* write high 32 bits */
 797	writel(reg_addr + 4, pcie_index_offset);
 798	readl(pcie_index_offset);
 799	writel((u32)(reg_data >> 32), pcie_data_offset);
 800	readl(pcie_data_offset);
 801	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 802}
 803
 804/**
 805 * amdgpu_invalid_rreg - dummy reg read function
 806 *
 807 * @adev: amdgpu_device pointer
 808 * @reg: offset of register
 809 *
 810 * Dummy register read function.  Used for register blocks
 811 * that certain asics don't have (all asics).
 812 * Returns the value in the register.
 813 */
 814static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
 815{
 816	DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
 817	BUG();
 818	return 0;
 819}
 820
 821/**
 822 * amdgpu_invalid_wreg - dummy reg write function
 823 *
 824 * @adev: amdgpu_device pointer
 825 * @reg: offset of register
 826 * @v: value to write to the register
 827 *
 828 * Dummy register read function.  Used for register blocks
 829 * that certain asics don't have (all asics).
 830 */
 831static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
 832{
 833	DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
 834		  reg, v);
 835	BUG();
 836}
 837
 838/**
 839 * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
 840 *
 841 * @adev: amdgpu_device pointer
 842 * @reg: offset of register
 843 *
 844 * Dummy register read function.  Used for register blocks
 845 * that certain asics don't have (all asics).
 846 * Returns the value in the register.
 847 */
 848static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
 849{
 850	DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
 851	BUG();
 852	return 0;
 853}
 854
 855/**
 856 * amdgpu_invalid_wreg64 - dummy reg write function
 857 *
 858 * @adev: amdgpu_device pointer
 859 * @reg: offset of register
 860 * @v: value to write to the register
 861 *
 862 * Dummy register read function.  Used for register blocks
 863 * that certain asics don't have (all asics).
 864 */
 865static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
 866{
 867	DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
 868		  reg, v);
 869	BUG();
 870}
 871
 872/**
 873 * amdgpu_block_invalid_rreg - dummy reg read function
 874 *
 875 * @adev: amdgpu_device pointer
 876 * @block: offset of instance
 877 * @reg: offset of register
 878 *
 879 * Dummy register read function.  Used for register blocks
 880 * that certain asics don't have (all asics).
 881 * Returns the value in the register.
 882 */
 883static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
 884					  uint32_t block, uint32_t reg)
 885{
 886	DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
 887		  reg, block);
 888	BUG();
 889	return 0;
 890}
 891
 892/**
 893 * amdgpu_block_invalid_wreg - dummy reg write function
 894 *
 895 * @adev: amdgpu_device pointer
 896 * @block: offset of instance
 897 * @reg: offset of register
 898 * @v: value to write to the register
 899 *
 900 * Dummy register read function.  Used for register blocks
 901 * that certain asics don't have (all asics).
 902 */
 903static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
 904				      uint32_t block,
 905				      uint32_t reg, uint32_t v)
 906{
 907	DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
 908		  reg, block, v);
 909	BUG();
 910}
 911
 912/**
 913 * amdgpu_device_asic_init - Wrapper for atom asic_init
 914 *
 915 * @adev: amdgpu_device pointer
 916 *
 917 * Does any asic specific work and then calls atom asic init.
 918 */
 919static int amdgpu_device_asic_init(struct amdgpu_device *adev)
 920{
 921	amdgpu_asic_pre_asic_init(adev);
 922
 923	if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0))
 924		return amdgpu_atomfirmware_asic_init(adev, true);
 925	else
 926		return amdgpu_atom_asic_init(adev->mode_info.atom_context);
 927}
 928
 929/**
 930 * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
 931 *
 932 * @adev: amdgpu_device pointer
 933 *
 934 * Allocates a scratch page of VRAM for use by various things in the
 935 * driver.
 936 */
 937static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
 938{
 939	return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
 940				       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
 941				       &adev->vram_scratch.robj,
 942				       &adev->vram_scratch.gpu_addr,
 943				       (void **)&adev->vram_scratch.ptr);
 944}
 945
 946/**
 947 * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
 948 *
 949 * @adev: amdgpu_device pointer
 950 *
 951 * Frees the VRAM scratch page.
 952 */
 953static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
 954{
 955	amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
 956}
 957
 958/**
 959 * amdgpu_device_program_register_sequence - program an array of registers.
 960 *
 961 * @adev: amdgpu_device pointer
 962 * @registers: pointer to the register array
 963 * @array_size: size of the register array
 964 *
 965 * Programs an array or registers with and and or masks.
 966 * This is a helper for setting golden registers.
 967 */
 968void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
 969					     const u32 *registers,
 970					     const u32 array_size)
 971{
 972	u32 tmp, reg, and_mask, or_mask;
 973	int i;
 974
 975	if (array_size % 3)
 976		return;
 977
 978	for (i = 0; i < array_size; i +=3) {
 979		reg = registers[i + 0];
 980		and_mask = registers[i + 1];
 981		or_mask = registers[i + 2];
 982
 983		if (and_mask == 0xffffffff) {
 984			tmp = or_mask;
 985		} else {
 986			tmp = RREG32(reg);
 987			tmp &= ~and_mask;
 988			if (adev->family >= AMDGPU_FAMILY_AI)
 989				tmp |= (or_mask & and_mask);
 990			else
 991				tmp |= or_mask;
 992		}
 993		WREG32(reg, tmp);
 994	}
 995}
 996
 997/**
 998 * amdgpu_device_pci_config_reset - reset the GPU
 999 *
1000 * @adev: amdgpu_device pointer
1001 *
1002 * Resets the GPU using the pci config reset sequence.
1003 * Only applicable to asics prior to vega10.
1004 */
1005void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
1006{
1007	pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
1008}
1009
1010/**
1011 * amdgpu_device_pci_reset - reset the GPU using generic PCI means
1012 *
1013 * @adev: amdgpu_device pointer
1014 *
1015 * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
1016 */
1017int amdgpu_device_pci_reset(struct amdgpu_device *adev)
1018{
1019	return pci_reset_function(adev->pdev);
1020}
1021
1022/*
1023 * GPU doorbell aperture helpers function.
1024 */
1025/**
1026 * amdgpu_device_doorbell_init - Init doorbell driver information.
1027 *
1028 * @adev: amdgpu_device pointer
1029 *
1030 * Init doorbell driver information (CIK)
1031 * Returns 0 on success, error on failure.
1032 */
1033static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
1034{
1035
1036	/* No doorbell on SI hardware generation */
1037	if (adev->asic_type < CHIP_BONAIRE) {
1038		adev->doorbell.base = 0;
1039		adev->doorbell.size = 0;
1040		adev->doorbell.num_doorbells = 0;
1041		adev->doorbell.ptr = NULL;
1042		return 0;
1043	}
1044
1045	if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
1046		return -EINVAL;
1047
1048	amdgpu_asic_init_doorbell_index(adev);
1049
1050	/* doorbell bar mapping */
1051	adev->doorbell.base = pci_resource_start(adev->pdev, 2);
1052	adev->doorbell.size = pci_resource_len(adev->pdev, 2);
1053
1054	if (adev->enable_mes) {
1055		adev->doorbell.num_doorbells =
1056			adev->doorbell.size / sizeof(u32);
1057	} else {
1058		adev->doorbell.num_doorbells =
1059			min_t(u32, adev->doorbell.size / sizeof(u32),
1060			      adev->doorbell_index.max_assignment+1);
1061		if (adev->doorbell.num_doorbells == 0)
1062			return -EINVAL;
1063
1064		/* For Vega, reserve and map two pages on doorbell BAR since SDMA
1065		 * paging queue doorbell use the second page. The
1066		 * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
1067		 * doorbells are in the first page. So with paging queue enabled,
1068		 * the max num_doorbells should + 1 page (0x400 in dword)
1069		 */
1070		if (adev->asic_type >= CHIP_VEGA10)
1071			adev->doorbell.num_doorbells += 0x400;
1072	}
1073
1074	adev->doorbell.ptr = ioremap(adev->doorbell.base,
1075				     adev->doorbell.num_doorbells *
1076				     sizeof(u32));
1077	if (adev->doorbell.ptr == NULL)
1078		return -ENOMEM;
1079
1080	return 0;
1081}
1082
1083/**
1084 * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
1085 *
1086 * @adev: amdgpu_device pointer
1087 *
1088 * Tear down doorbell driver information (CIK)
1089 */
1090static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
1091{
1092	iounmap(adev->doorbell.ptr);
1093	adev->doorbell.ptr = NULL;
1094}
1095
1096
1097
1098/*
1099 * amdgpu_device_wb_*()
1100 * Writeback is the method by which the GPU updates special pages in memory
1101 * with the status of certain GPU events (fences, ring pointers,etc.).
1102 */
1103
1104/**
1105 * amdgpu_device_wb_fini - Disable Writeback and free memory
1106 *
1107 * @adev: amdgpu_device pointer
1108 *
1109 * Disables Writeback and frees the Writeback memory (all asics).
1110 * Used at driver shutdown.
1111 */
1112static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1113{
1114	if (adev->wb.wb_obj) {
1115		amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1116				      &adev->wb.gpu_addr,
1117				      (void **)&adev->wb.wb);
1118		adev->wb.wb_obj = NULL;
1119	}
1120}
1121
1122/**
1123 * amdgpu_device_wb_init - Init Writeback driver info and allocate memory
1124 *
1125 * @adev: amdgpu_device pointer
1126 *
1127 * Initializes writeback and allocates writeback memory (all asics).
1128 * Used at driver startup.
1129 * Returns 0 on success or an -error on failure.
1130 */
1131static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1132{
1133	int r;
1134
1135	if (adev->wb.wb_obj == NULL) {
1136		/* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1137		r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1138					    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1139					    &adev->wb.wb_obj, &adev->wb.gpu_addr,
1140					    (void **)&adev->wb.wb);
1141		if (r) {
1142			dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1143			return r;
1144		}
1145
1146		adev->wb.num_wb = AMDGPU_MAX_WB;
1147		memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1148
1149		/* clear wb memory */
1150		memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1151	}
1152
1153	return 0;
1154}
1155
1156/**
1157 * amdgpu_device_wb_get - Allocate a wb entry
1158 *
1159 * @adev: amdgpu_device pointer
1160 * @wb: wb index
1161 *
1162 * Allocate a wb slot for use by the driver (all asics).
1163 * Returns 0 on success or -EINVAL on failure.
1164 */
1165int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1166{
1167	unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1168
1169	if (offset < adev->wb.num_wb) {
1170		__set_bit(offset, adev->wb.used);
1171		*wb = offset << 3; /* convert to dw offset */
1172		return 0;
1173	} else {
1174		return -EINVAL;
1175	}
1176}
1177
1178/**
1179 * amdgpu_device_wb_free - Free a wb entry
1180 *
1181 * @adev: amdgpu_device pointer
1182 * @wb: wb index
1183 *
1184 * Free a wb slot allocated for use by the driver (all asics)
1185 */
1186void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1187{
1188	wb >>= 3;
1189	if (wb < adev->wb.num_wb)
1190		__clear_bit(wb, adev->wb.used);
1191}
1192
1193/**
1194 * amdgpu_device_resize_fb_bar - try to resize FB BAR
1195 *
1196 * @adev: amdgpu_device pointer
1197 *
1198 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1199 * to fail, but if any of the BARs is not accessible after the size we abort
1200 * driver loading by returning -ENODEV.
1201 */
1202int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1203{
1204	int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
 
1205	struct pci_bus *root;
1206	struct resource *res;
1207	unsigned i;
1208	u16 cmd;
1209	int r;
1210
1211	/* Bypass for VF */
1212	if (amdgpu_sriov_vf(adev))
1213		return 0;
1214
1215	/* skip if the bios has already enabled large BAR */
1216	if (adev->gmc.real_vram_size &&
1217	    (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1218		return 0;
1219
1220	/* Check if the root BUS has 64bit memory resources */
1221	root = adev->pdev->bus;
1222	while (root->parent)
1223		root = root->parent;
1224
1225	pci_bus_for_each_resource(root, res, i) {
1226		if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1227		    res->start > 0x100000000ull)
1228			break;
1229	}
1230
1231	/* Trying to resize is pointless without a root hub window above 4GB */
1232	if (!res)
1233		return 0;
1234
1235	/* Limit the BAR size to what is available */
1236	rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1237			rbar_size);
1238
1239	/* Disable memory decoding while we change the BAR addresses and size */
1240	pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1241	pci_write_config_word(adev->pdev, PCI_COMMAND,
1242			      cmd & ~PCI_COMMAND_MEMORY);
1243
1244	/* Free the VRAM and doorbell BAR, we most likely need to move both. */
1245	amdgpu_device_doorbell_fini(adev);
1246	if (adev->asic_type >= CHIP_BONAIRE)
1247		pci_release_resource(adev->pdev, 2);
1248
1249	pci_release_resource(adev->pdev, 0);
1250
1251	r = pci_resize_resource(adev->pdev, 0, rbar_size);
1252	if (r == -ENOSPC)
1253		DRM_INFO("Not enough PCI address space for a large BAR.");
1254	else if (r && r != -ENOTSUPP)
1255		DRM_ERROR("Problem resizing BAR0 (%d).", r);
1256
1257	pci_assign_unassigned_bus_resources(adev->pdev->bus);
1258
1259	/* When the doorbell or fb BAR isn't available we have no chance of
1260	 * using the device.
1261	 */
1262	r = amdgpu_device_doorbell_init(adev);
1263	if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1264		return -ENODEV;
1265
1266	pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1267
1268	return 0;
1269}
1270
1271/*
1272 * GPU helpers function.
1273 */
1274/**
1275 * amdgpu_device_need_post - check if the hw need post or not
1276 *
1277 * @adev: amdgpu_device pointer
1278 *
1279 * Check if the asic has been initialized (all asics) at driver startup
1280 * or post is needed if  hw reset is performed.
1281 * Returns true if need or false if not.
1282 */
1283bool amdgpu_device_need_post(struct amdgpu_device *adev)
1284{
1285	uint32_t reg;
1286
1287	if (amdgpu_sriov_vf(adev))
1288		return false;
1289
1290	if (amdgpu_passthrough(adev)) {
1291		/* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1292		 * some old smc fw still need driver do vPost otherwise gpu hang, while
1293		 * those smc fw version above 22.15 doesn't have this flaw, so we force
1294		 * vpost executed for smc version below 22.15
1295		 */
1296		if (adev->asic_type == CHIP_FIJI) {
1297			int err;
1298			uint32_t fw_ver;
1299			err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1300			/* force vPost if error occured */
1301			if (err)
1302				return true;
1303
1304			fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1305			if (fw_ver < 0x00160e00)
1306				return true;
1307		}
1308	}
1309
1310	/* Don't post if we need to reset whole hive on init */
1311	if (adev->gmc.xgmi.pending_reset)
1312		return false;
1313
1314	if (adev->has_hw_reset) {
1315		adev->has_hw_reset = false;
1316		return true;
1317	}
1318
1319	/* bios scratch used on CIK+ */
1320	if (adev->asic_type >= CHIP_BONAIRE)
1321		return amdgpu_atombios_scratch_need_asic_init(adev);
1322
1323	/* check MEM_SIZE for older asics */
1324	reg = amdgpu_asic_get_config_memsize(adev);
1325
1326	if ((reg != 0) && (reg != 0xffffffff))
1327		return false;
1328
1329	return true;
1330}
1331
1332/**
1333 * amdgpu_device_should_use_aspm - check if the device should program ASPM
1334 *
1335 * @adev: amdgpu_device pointer
1336 *
1337 * Confirm whether the module parameter and pcie bridge agree that ASPM should
1338 * be set for this device.
1339 *
1340 * Returns true if it should be used or false if not.
1341 */
1342bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
1343{
1344	switch (amdgpu_aspm) {
1345	case -1:
1346		break;
1347	case 0:
1348		return false;
1349	case 1:
1350		return true;
1351	default:
1352		return false;
1353	}
1354	return pcie_aspm_enabled(adev->pdev);
1355}
1356
1357/* if we get transitioned to only one device, take VGA back */
1358/**
1359 * amdgpu_device_vga_set_decode - enable/disable vga decode
1360 *
1361 * @pdev: PCI device pointer
1362 * @state: enable/disable vga decode
1363 *
1364 * Enable/disable vga decode (all asics).
1365 * Returns VGA resource flags.
1366 */
1367static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
1368		bool state)
1369{
1370	struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
1371	amdgpu_asic_set_vga_state(adev, state);
1372	if (state)
1373		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1374		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1375	else
1376		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1377}
1378
1379/**
1380 * amdgpu_device_check_block_size - validate the vm block size
1381 *
1382 * @adev: amdgpu_device pointer
1383 *
1384 * Validates the vm block size specified via module parameter.
1385 * The vm block size defines number of bits in page table versus page directory,
1386 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1387 * page table and the remaining bits are in the page directory.
1388 */
1389static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1390{
1391	/* defines number of bits in page table versus page directory,
1392	 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1393	 * page table and the remaining bits are in the page directory */
1394	if (amdgpu_vm_block_size == -1)
1395		return;
1396
1397	if (amdgpu_vm_block_size < 9) {
1398		dev_warn(adev->dev, "VM page table size (%d) too small\n",
1399			 amdgpu_vm_block_size);
1400		amdgpu_vm_block_size = -1;
1401	}
1402}
1403
1404/**
1405 * amdgpu_device_check_vm_size - validate the vm size
1406 *
1407 * @adev: amdgpu_device pointer
1408 *
1409 * Validates the vm size in GB specified via module parameter.
1410 * The VM size is the size of the GPU virtual memory space in GB.
1411 */
1412static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1413{
1414	/* no need to check the default value */
1415	if (amdgpu_vm_size == -1)
1416		return;
1417
1418	if (amdgpu_vm_size < 1) {
1419		dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1420			 amdgpu_vm_size);
1421		amdgpu_vm_size = -1;
1422	}
1423}
1424
1425static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1426{
1427	struct sysinfo si;
1428	bool is_os_64 = (sizeof(void *) == 8);
1429	uint64_t total_memory;
1430	uint64_t dram_size_seven_GB = 0x1B8000000;
1431	uint64_t dram_size_three_GB = 0xB8000000;
1432
1433	if (amdgpu_smu_memory_pool_size == 0)
1434		return;
1435
1436	if (!is_os_64) {
1437		DRM_WARN("Not 64-bit OS, feature not supported\n");
1438		goto def_value;
1439	}
1440	si_meminfo(&si);
1441	total_memory = (uint64_t)si.totalram * si.mem_unit;
1442
1443	if ((amdgpu_smu_memory_pool_size == 1) ||
1444		(amdgpu_smu_memory_pool_size == 2)) {
1445		if (total_memory < dram_size_three_GB)
1446			goto def_value1;
1447	} else if ((amdgpu_smu_memory_pool_size == 4) ||
1448		(amdgpu_smu_memory_pool_size == 8)) {
1449		if (total_memory < dram_size_seven_GB)
1450			goto def_value1;
1451	} else {
1452		DRM_WARN("Smu memory pool size not supported\n");
1453		goto def_value;
1454	}
1455	adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1456
1457	return;
1458
1459def_value1:
1460	DRM_WARN("No enough system memory\n");
1461def_value:
1462	adev->pm.smu_prv_buffer_size = 0;
1463}
1464
1465static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
1466{
1467	if (!(adev->flags & AMD_IS_APU) ||
1468	    adev->asic_type < CHIP_RAVEN)
1469		return 0;
1470
1471	switch (adev->asic_type) {
1472	case CHIP_RAVEN:
1473		if (adev->pdev->device == 0x15dd)
1474			adev->apu_flags |= AMD_APU_IS_RAVEN;
1475		if (adev->pdev->device == 0x15d8)
1476			adev->apu_flags |= AMD_APU_IS_PICASSO;
1477		break;
1478	case CHIP_RENOIR:
1479		if ((adev->pdev->device == 0x1636) ||
1480		    (adev->pdev->device == 0x164c))
1481			adev->apu_flags |= AMD_APU_IS_RENOIR;
1482		else
1483			adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1484		break;
1485	case CHIP_VANGOGH:
1486		adev->apu_flags |= AMD_APU_IS_VANGOGH;
1487		break;
1488	case CHIP_YELLOW_CARP:
1489		break;
1490	case CHIP_CYAN_SKILLFISH:
1491		if ((adev->pdev->device == 0x13FE) ||
1492		    (adev->pdev->device == 0x143F))
1493			adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
1494		break;
1495	default:
1496		break;
1497	}
1498
1499	return 0;
1500}
1501
1502/**
1503 * amdgpu_device_check_arguments - validate module params
1504 *
1505 * @adev: amdgpu_device pointer
1506 *
1507 * Validates certain module parameters and updates
1508 * the associated values used by the driver (all asics).
1509 */
1510static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1511{
 
 
1512	if (amdgpu_sched_jobs < 4) {
1513		dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1514			 amdgpu_sched_jobs);
1515		amdgpu_sched_jobs = 4;
1516	} else if (!is_power_of_2(amdgpu_sched_jobs)){
1517		dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1518			 amdgpu_sched_jobs);
1519		amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1520	}
1521
1522	if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1523		/* gart size must be greater or equal to 32M */
1524		dev_warn(adev->dev, "gart size (%d) too small\n",
1525			 amdgpu_gart_size);
1526		amdgpu_gart_size = -1;
1527	}
1528
1529	if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1530		/* gtt size must be greater or equal to 32M */
1531		dev_warn(adev->dev, "gtt size (%d) too small\n",
1532				 amdgpu_gtt_size);
1533		amdgpu_gtt_size = -1;
1534	}
1535
1536	/* valid range is between 4 and 9 inclusive */
1537	if (amdgpu_vm_fragment_size != -1 &&
1538	    (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1539		dev_warn(adev->dev, "valid range is between 4 and 9\n");
1540		amdgpu_vm_fragment_size = -1;
1541	}
1542
1543	if (amdgpu_sched_hw_submission < 2) {
1544		dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1545			 amdgpu_sched_hw_submission);
1546		amdgpu_sched_hw_submission = 2;
1547	} else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1548		dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1549			 amdgpu_sched_hw_submission);
1550		amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1551	}
1552
1553	if (amdgpu_reset_method < -1 || amdgpu_reset_method > 4) {
1554		dev_warn(adev->dev, "invalid option for reset method, reverting to default\n");
1555		amdgpu_reset_method = -1;
1556	}
1557
1558	amdgpu_device_check_smu_prv_buffer_size(adev);
1559
1560	amdgpu_device_check_vm_size(adev);
1561
1562	amdgpu_device_check_block_size(adev);
1563
 
 
 
 
 
 
1564	adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1565
1566	return 0;
1567}
1568
1569/**
1570 * amdgpu_switcheroo_set_state - set switcheroo state
1571 *
1572 * @pdev: pci dev pointer
1573 * @state: vga_switcheroo state
1574 *
1575 * Callback for the switcheroo driver.  Suspends or resumes
1576 * the asics before or after it is powered up using ACPI methods.
1577 */
1578static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1579					enum vga_switcheroo_state state)
1580{
1581	struct drm_device *dev = pci_get_drvdata(pdev);
1582	int r;
1583
1584	if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
1585		return;
1586
1587	if (state == VGA_SWITCHEROO_ON) {
1588		pr_info("switched on\n");
1589		/* don't suspend or resume card normally */
1590		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1591
1592		pci_set_power_state(pdev, PCI_D0);
1593		amdgpu_device_load_pci_state(pdev);
1594		r = pci_enable_device(pdev);
1595		if (r)
1596			DRM_WARN("pci_enable_device failed (%d)\n", r);
1597		amdgpu_device_resume(dev, true);
1598
1599		dev->switch_power_state = DRM_SWITCH_POWER_ON;
 
1600	} else {
1601		pr_info("switched off\n");
 
1602		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1603		amdgpu_device_suspend(dev, true);
1604		amdgpu_device_cache_pci_state(pdev);
1605		/* Shut down the device */
1606		pci_disable_device(pdev);
1607		pci_set_power_state(pdev, PCI_D3cold);
1608		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1609	}
1610}
1611
1612/**
1613 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1614 *
1615 * @pdev: pci dev pointer
1616 *
1617 * Callback for the switcheroo driver.  Check of the switcheroo
1618 * state can be changed.
1619 * Returns true if the state can be changed, false if not.
1620 */
1621static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1622{
1623	struct drm_device *dev = pci_get_drvdata(pdev);
1624
1625	/*
1626	* FIXME: open_count is protected by drm_global_mutex but that would lead to
1627	* locking inversion with the driver load path. And the access here is
1628	* completely racy anyway. So don't bother with locking for now.
1629	*/
1630	return atomic_read(&dev->open_count) == 0;
1631}
1632
1633static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1634	.set_gpu_state = amdgpu_switcheroo_set_state,
1635	.reprobe = NULL,
1636	.can_switch = amdgpu_switcheroo_can_switch,
1637};
1638
1639/**
1640 * amdgpu_device_ip_set_clockgating_state - set the CG state
1641 *
1642 * @dev: amdgpu_device pointer
1643 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1644 * @state: clockgating state (gate or ungate)
1645 *
1646 * Sets the requested clockgating state for all instances of
1647 * the hardware IP specified.
1648 * Returns the error code from the last instance.
1649 */
1650int amdgpu_device_ip_set_clockgating_state(void *dev,
1651					   enum amd_ip_block_type block_type,
1652					   enum amd_clockgating_state state)
1653{
1654	struct amdgpu_device *adev = dev;
1655	int i, r = 0;
1656
1657	for (i = 0; i < adev->num_ip_blocks; i++) {
1658		if (!adev->ip_blocks[i].status.valid)
1659			continue;
1660		if (adev->ip_blocks[i].version->type != block_type)
1661			continue;
1662		if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1663			continue;
1664		r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1665			(void *)adev, state);
1666		if (r)
1667			DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1668				  adev->ip_blocks[i].version->funcs->name, r);
1669	}
1670	return r;
1671}
1672
1673/**
1674 * amdgpu_device_ip_set_powergating_state - set the PG state
1675 *
1676 * @dev: amdgpu_device pointer
1677 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1678 * @state: powergating state (gate or ungate)
1679 *
1680 * Sets the requested powergating state for all instances of
1681 * the hardware IP specified.
1682 * Returns the error code from the last instance.
1683 */
1684int amdgpu_device_ip_set_powergating_state(void *dev,
1685					   enum amd_ip_block_type block_type,
1686					   enum amd_powergating_state state)
1687{
1688	struct amdgpu_device *adev = dev;
1689	int i, r = 0;
1690
1691	for (i = 0; i < adev->num_ip_blocks; i++) {
1692		if (!adev->ip_blocks[i].status.valid)
1693			continue;
1694		if (adev->ip_blocks[i].version->type != block_type)
1695			continue;
1696		if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1697			continue;
1698		r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1699			(void *)adev, state);
1700		if (r)
1701			DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1702				  adev->ip_blocks[i].version->funcs->name, r);
1703	}
1704	return r;
1705}
1706
1707/**
1708 * amdgpu_device_ip_get_clockgating_state - get the CG state
1709 *
1710 * @adev: amdgpu_device pointer
1711 * @flags: clockgating feature flags
1712 *
1713 * Walks the list of IPs on the device and updates the clockgating
1714 * flags for each IP.
1715 * Updates @flags with the feature flags for each hardware IP where
1716 * clockgating is enabled.
1717 */
1718void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1719					    u64 *flags)
1720{
1721	int i;
1722
1723	for (i = 0; i < adev->num_ip_blocks; i++) {
1724		if (!adev->ip_blocks[i].status.valid)
1725			continue;
1726		if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1727			adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1728	}
1729}
1730
1731/**
1732 * amdgpu_device_ip_wait_for_idle - wait for idle
1733 *
1734 * @adev: amdgpu_device pointer
1735 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1736 *
1737 * Waits for the request hardware IP to be idle.
1738 * Returns 0 for success or a negative error code on failure.
1739 */
1740int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1741				   enum amd_ip_block_type block_type)
1742{
1743	int i, r;
1744
1745	for (i = 0; i < adev->num_ip_blocks; i++) {
1746		if (!adev->ip_blocks[i].status.valid)
1747			continue;
1748		if (adev->ip_blocks[i].version->type == block_type) {
1749			r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1750			if (r)
1751				return r;
1752			break;
1753		}
1754	}
1755	return 0;
1756
1757}
1758
1759/**
1760 * amdgpu_device_ip_is_idle - is the hardware IP idle
1761 *
1762 * @adev: amdgpu_device pointer
1763 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1764 *
1765 * Check if the hardware IP is idle or not.
1766 * Returns true if it the IP is idle, false if not.
1767 */
1768bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1769			      enum amd_ip_block_type block_type)
1770{
1771	int i;
1772
1773	for (i = 0; i < adev->num_ip_blocks; i++) {
1774		if (!adev->ip_blocks[i].status.valid)
1775			continue;
1776		if (adev->ip_blocks[i].version->type == block_type)
1777			return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1778	}
1779	return true;
1780
1781}
1782
1783/**
1784 * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1785 *
1786 * @adev: amdgpu_device pointer
1787 * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1788 *
1789 * Returns a pointer to the hardware IP block structure
1790 * if it exists for the asic, otherwise NULL.
1791 */
1792struct amdgpu_ip_block *
1793amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1794			      enum amd_ip_block_type type)
1795{
1796	int i;
1797
1798	for (i = 0; i < adev->num_ip_blocks; i++)
1799		if (adev->ip_blocks[i].version->type == type)
1800			return &adev->ip_blocks[i];
1801
1802	return NULL;
1803}
1804
1805/**
1806 * amdgpu_device_ip_block_version_cmp
1807 *
1808 * @adev: amdgpu_device pointer
1809 * @type: enum amd_ip_block_type
1810 * @major: major version
1811 * @minor: minor version
1812 *
1813 * return 0 if equal or greater
1814 * return 1 if smaller or the ip_block doesn't exist
1815 */
1816int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1817				       enum amd_ip_block_type type,
1818				       u32 major, u32 minor)
1819{
1820	struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1821
1822	if (ip_block && ((ip_block->version->major > major) ||
1823			((ip_block->version->major == major) &&
1824			(ip_block->version->minor >= minor))))
1825		return 0;
1826
1827	return 1;
1828}
1829
1830/**
1831 * amdgpu_device_ip_block_add
1832 *
1833 * @adev: amdgpu_device pointer
1834 * @ip_block_version: pointer to the IP to add
1835 *
1836 * Adds the IP block driver information to the collection of IPs
1837 * on the asic.
1838 */
1839int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1840			       const struct amdgpu_ip_block_version *ip_block_version)
1841{
1842	if (!ip_block_version)
1843		return -EINVAL;
1844
1845	switch (ip_block_version->type) {
1846	case AMD_IP_BLOCK_TYPE_VCN:
1847		if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
1848			return 0;
1849		break;
1850	case AMD_IP_BLOCK_TYPE_JPEG:
1851		if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
1852			return 0;
1853		break;
1854	default:
1855		break;
1856	}
1857
1858	DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1859		  ip_block_version->funcs->name);
1860
1861	adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1862
1863	return 0;
1864}
1865
1866/**
1867 * amdgpu_device_enable_virtual_display - enable virtual display feature
1868 *
1869 * @adev: amdgpu_device pointer
1870 *
1871 * Enabled the virtual display feature if the user has enabled it via
1872 * the module parameter virtual_display.  This feature provides a virtual
1873 * display hardware on headless boards or in virtualized environments.
1874 * This function parses and validates the configuration string specified by
1875 * the user and configues the virtual display configuration (number of
1876 * virtual connectors, crtcs, etc.) specified.
1877 */
1878static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1879{
1880	adev->enable_virtual_display = false;
1881
1882	if (amdgpu_virtual_display) {
1883		const char *pci_address_name = pci_name(adev->pdev);
 
1884		char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1885
1886		pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1887		pciaddstr_tmp = pciaddstr;
1888		while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1889			pciaddname = strsep(&pciaddname_tmp, ",");
1890			if (!strcmp("all", pciaddname)
1891			    || !strcmp(pci_address_name, pciaddname)) {
1892				long num_crtc;
1893				int res = -1;
1894
1895				adev->enable_virtual_display = true;
1896
1897				if (pciaddname_tmp)
1898					res = kstrtol(pciaddname_tmp, 10,
1899						      &num_crtc);
1900
1901				if (!res) {
1902					if (num_crtc < 1)
1903						num_crtc = 1;
1904					if (num_crtc > 6)
1905						num_crtc = 6;
1906					adev->mode_info.num_crtc = num_crtc;
1907				} else {
1908					adev->mode_info.num_crtc = 1;
1909				}
1910				break;
1911			}
1912		}
1913
1914		DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1915			 amdgpu_virtual_display, pci_address_name,
1916			 adev->enable_virtual_display, adev->mode_info.num_crtc);
1917
1918		kfree(pciaddstr);
1919	}
1920}
1921
1922void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev)
1923{
1924	if (amdgpu_sriov_vf(adev) && !adev->enable_virtual_display) {
1925		adev->mode_info.num_crtc = 1;
1926		adev->enable_virtual_display = true;
1927		DRM_INFO("virtual_display:%d, num_crtc:%d\n",
1928			 adev->enable_virtual_display, adev->mode_info.num_crtc);
1929	}
1930}
1931
1932/**
1933 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1934 *
1935 * @adev: amdgpu_device pointer
1936 *
1937 * Parses the asic configuration parameters specified in the gpu info
1938 * firmware and makes them availale to the driver for use in configuring
1939 * the asic.
1940 * Returns 0 on success, -EINVAL on failure.
1941 */
1942static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1943{
1944	const char *chip_name;
1945	char fw_name[40];
1946	int err;
1947	const struct gpu_info_firmware_header_v1_0 *hdr;
1948
1949	adev->firmware.gpu_info_fw = NULL;
1950
1951	if (adev->mman.discovery_bin) {
1952		/*
1953		 * FIXME: The bounding box is still needed by Navi12, so
1954		 * temporarily read it from gpu_info firmware. Should be dropped
1955		 * when DAL no longer needs it.
1956		 */
1957		if (adev->asic_type != CHIP_NAVI12)
1958			return 0;
1959	}
1960
1961	switch (adev->asic_type) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1962	default:
1963		return 0;
1964	case CHIP_VEGA10:
1965		chip_name = "vega10";
1966		break;
1967	case CHIP_VEGA12:
1968		chip_name = "vega12";
1969		break;
1970	case CHIP_RAVEN:
1971		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1972			chip_name = "raven2";
1973		else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1974			chip_name = "picasso";
1975		else
1976			chip_name = "raven";
1977		break;
1978	case CHIP_ARCTURUS:
1979		chip_name = "arcturus";
1980		break;
 
 
 
 
 
 
 
 
 
1981	case CHIP_NAVI12:
1982		chip_name = "navi12";
1983		break;
1984	}
1985
1986	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1987	err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1988	if (err) {
1989		dev_err(adev->dev,
1990			"Failed to load gpu_info firmware \"%s\"\n",
1991			fw_name);
1992		goto out;
1993	}
1994	err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1995	if (err) {
1996		dev_err(adev->dev,
1997			"Failed to validate gpu_info firmware \"%s\"\n",
1998			fw_name);
1999		goto out;
2000	}
2001
2002	hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
2003	amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
2004
2005	switch (hdr->version_major) {
2006	case 1:
2007	{
2008		const struct gpu_info_firmware_v1_0 *gpu_info_fw =
2009			(const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
2010								le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2011
2012		/*
2013		 * Should be droped when DAL no longer needs it.
2014		 */
2015		if (adev->asic_type == CHIP_NAVI12)
2016			goto parse_soc_bounding_box;
2017
2018		adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
2019		adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
2020		adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
2021		adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
2022		adev->gfx.config.max_texture_channel_caches =
2023			le32_to_cpu(gpu_info_fw->gc_num_tccs);
2024		adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
2025		adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
2026		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
2027		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
2028		adev->gfx.config.double_offchip_lds_buf =
2029			le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
2030		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
2031		adev->gfx.cu_info.max_waves_per_simd =
2032			le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
2033		adev->gfx.cu_info.max_scratch_slots_per_cu =
2034			le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
2035		adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
2036		if (hdr->version_minor >= 1) {
2037			const struct gpu_info_firmware_v1_1 *gpu_info_fw =
2038				(const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
2039									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2040			adev->gfx.config.num_sc_per_sh =
2041				le32_to_cpu(gpu_info_fw->num_sc_per_sh);
2042			adev->gfx.config.num_packer_per_sc =
2043				le32_to_cpu(gpu_info_fw->num_packer_per_sc);
2044		}
2045
2046parse_soc_bounding_box:
2047		/*
2048		 * soc bounding box info is not integrated in disocovery table,
2049		 * we always need to parse it from gpu info firmware if needed.
2050		 */
2051		if (hdr->version_minor == 2) {
2052			const struct gpu_info_firmware_v1_2 *gpu_info_fw =
2053				(const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
2054									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2055			adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
2056		}
 
2057		break;
2058	}
2059	default:
2060		dev_err(adev->dev,
2061			"Unsupported gpu_info table %d\n", hdr->header.ucode_version);
2062		err = -EINVAL;
2063		goto out;
2064	}
2065out:
2066	return err;
2067}
2068
2069/**
2070 * amdgpu_device_ip_early_init - run early init for hardware IPs
2071 *
2072 * @adev: amdgpu_device pointer
2073 *
2074 * Early initialization pass for hardware IPs.  The hardware IPs that make
2075 * up each asic are discovered each IP's early_init callback is run.  This
2076 * is the first stage in initializing the asic.
2077 * Returns 0 on success, negative error code on failure.
2078 */
2079static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
2080{
2081	struct drm_device *dev = adev_to_drm(adev);
2082	struct pci_dev *parent;
2083	int i, r;
2084
2085	amdgpu_device_enable_virtual_display(adev);
2086
2087	if (amdgpu_sriov_vf(adev)) {
2088		r = amdgpu_virt_request_full_gpu(adev, true);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2089		if (r)
2090			return r;
2091	}
2092
2093	switch (adev->asic_type) {
2094#ifdef CONFIG_DRM_AMDGPU_SI
2095	case CHIP_VERDE:
2096	case CHIP_TAHITI:
2097	case CHIP_PITCAIRN:
2098	case CHIP_OLAND:
2099	case CHIP_HAINAN:
2100		adev->family = AMDGPU_FAMILY_SI;
2101		r = si_set_ip_blocks(adev);
2102		if (r)
2103			return r;
2104		break;
2105#endif
2106#ifdef CONFIG_DRM_AMDGPU_CIK
2107	case CHIP_BONAIRE:
2108	case CHIP_HAWAII:
2109	case CHIP_KAVERI:
2110	case CHIP_KABINI:
2111	case CHIP_MULLINS:
2112		if (adev->flags & AMD_IS_APU)
2113			adev->family = AMDGPU_FAMILY_KV;
2114		else
2115			adev->family = AMDGPU_FAMILY_CI;
 
 
2116
2117		r = cik_set_ip_blocks(adev);
2118		if (r)
2119			return r;
2120		break;
2121#endif
2122	case CHIP_TOPAZ:
2123	case CHIP_TONGA:
2124	case CHIP_FIJI:
2125	case CHIP_POLARIS10:
2126	case CHIP_POLARIS11:
2127	case CHIP_POLARIS12:
2128	case CHIP_VEGAM:
2129	case CHIP_CARRIZO:
2130	case CHIP_STONEY:
2131		if (adev->flags & AMD_IS_APU)
2132			adev->family = AMDGPU_FAMILY_CZ;
2133		else
2134			adev->family = AMDGPU_FAMILY_VI;
2135
2136		r = vi_set_ip_blocks(adev);
2137		if (r)
2138			return r;
2139		break;
2140	default:
2141		r = amdgpu_discovery_set_ip_blocks(adev);
 
 
 
 
2142		if (r)
2143			return r;
2144		break;
 
 
 
2145	}
2146
2147	if (amdgpu_has_atpx() &&
2148	    (amdgpu_is_atpx_hybrid() ||
2149	     amdgpu_has_atpx_dgpu_power_cntl()) &&
2150	    ((adev->flags & AMD_IS_APU) == 0) &&
2151	    !pci_is_thunderbolt_attached(to_pci_dev(dev->dev)))
2152		adev->flags |= AMD_IS_PX;
2153
2154	if (!(adev->flags & AMD_IS_APU)) {
2155		parent = pci_upstream_bridge(adev->pdev);
2156		adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
2157	}
2158
2159	amdgpu_amdkfd_device_probe(adev);
2160
 
 
 
 
 
 
2161	adev->pm.pp_feature = amdgpu_pp_feature_mask;
2162	if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2163		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2164	if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2165		adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2166
2167	for (i = 0; i < adev->num_ip_blocks; i++) {
2168		if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2169			DRM_ERROR("disabled ip block: %d <%s>\n",
2170				  i, adev->ip_blocks[i].version->funcs->name);
2171			adev->ip_blocks[i].status.valid = false;
2172		} else {
2173			if (adev->ip_blocks[i].version->funcs->early_init) {
2174				r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2175				if (r == -ENOENT) {
2176					adev->ip_blocks[i].status.valid = false;
2177				} else if (r) {
2178					DRM_ERROR("early_init of IP block <%s> failed %d\n",
2179						  adev->ip_blocks[i].version->funcs->name, r);
2180					return r;
2181				} else {
2182					adev->ip_blocks[i].status.valid = true;
2183				}
2184			} else {
2185				adev->ip_blocks[i].status.valid = true;
2186			}
2187		}
2188		/* get the vbios after the asic_funcs are set up */
2189		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2190			r = amdgpu_device_parse_gpu_info_fw(adev);
2191			if (r)
2192				return r;
2193
2194			/* Read BIOS */
2195			if (!amdgpu_get_bios(adev))
2196				return -EINVAL;
2197
2198			r = amdgpu_atombios_init(adev);
2199			if (r) {
2200				dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2201				amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2202				return r;
2203			}
2204
2205			/*get pf2vf msg info at it's earliest time*/
2206			if (amdgpu_sriov_vf(adev))
2207				amdgpu_virt_init_data_exchange(adev);
2208
2209		}
2210	}
2211
2212	adev->cg_flags &= amdgpu_cg_mask;
2213	adev->pg_flags &= amdgpu_pg_mask;
2214
2215	return 0;
2216}
2217
2218static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2219{
2220	int i, r;
2221
2222	for (i = 0; i < adev->num_ip_blocks; i++) {
2223		if (!adev->ip_blocks[i].status.sw)
2224			continue;
2225		if (adev->ip_blocks[i].status.hw)
2226			continue;
2227		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2228		    (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2229		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2230			r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2231			if (r) {
2232				DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2233					  adev->ip_blocks[i].version->funcs->name, r);
2234				return r;
2235			}
2236			adev->ip_blocks[i].status.hw = true;
2237		}
2238	}
2239
2240	return 0;
2241}
2242
2243static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2244{
2245	int i, r;
2246
2247	for (i = 0; i < adev->num_ip_blocks; i++) {
2248		if (!adev->ip_blocks[i].status.sw)
2249			continue;
2250		if (adev->ip_blocks[i].status.hw)
2251			continue;
2252		r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2253		if (r) {
2254			DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2255				  adev->ip_blocks[i].version->funcs->name, r);
2256			return r;
2257		}
2258		adev->ip_blocks[i].status.hw = true;
2259	}
2260
2261	return 0;
2262}
2263
2264static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2265{
2266	int r = 0;
2267	int i;
2268	uint32_t smu_version;
2269
2270	if (adev->asic_type >= CHIP_VEGA10) {
2271		for (i = 0; i < adev->num_ip_blocks; i++) {
2272			if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2273				continue;
2274
2275			if (!adev->ip_blocks[i].status.sw)
2276				continue;
2277
2278			/* no need to do the fw loading again if already done*/
2279			if (adev->ip_blocks[i].status.hw == true)
2280				break;
2281
2282			if (amdgpu_in_reset(adev) || adev->in_suspend) {
2283				r = adev->ip_blocks[i].version->funcs->resume(adev);
2284				if (r) {
2285					DRM_ERROR("resume of IP block <%s> failed %d\n",
2286							  adev->ip_blocks[i].version->funcs->name, r);
2287					return r;
2288				}
2289			} else {
2290				r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2291				if (r) {
2292					DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2293							  adev->ip_blocks[i].version->funcs->name, r);
2294					return r;
2295				}
2296			}
2297
2298			adev->ip_blocks[i].status.hw = true;
2299			break;
2300		}
2301	}
2302
2303	if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2304		r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2305
2306	return r;
2307}
2308
2309static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
2310{
2311	long timeout;
2312	int r, i;
2313
2314	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2315		struct amdgpu_ring *ring = adev->rings[i];
2316
2317		/* No need to setup the GPU scheduler for rings that don't need it */
2318		if (!ring || ring->no_scheduler)
2319			continue;
2320
2321		switch (ring->funcs->type) {
2322		case AMDGPU_RING_TYPE_GFX:
2323			timeout = adev->gfx_timeout;
2324			break;
2325		case AMDGPU_RING_TYPE_COMPUTE:
2326			timeout = adev->compute_timeout;
2327			break;
2328		case AMDGPU_RING_TYPE_SDMA:
2329			timeout = adev->sdma_timeout;
2330			break;
2331		default:
2332			timeout = adev->video_timeout;
2333			break;
2334		}
2335
2336		r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
2337				   ring->num_hw_submission, amdgpu_job_hang_limit,
2338				   timeout, adev->reset_domain->wq,
2339				   ring->sched_score, ring->name,
2340				   adev->dev);
2341		if (r) {
2342			DRM_ERROR("Failed to create scheduler on ring %s.\n",
2343				  ring->name);
2344			return r;
2345		}
2346	}
2347
2348	return 0;
2349}
2350
2351
2352/**
2353 * amdgpu_device_ip_init - run init for hardware IPs
2354 *
2355 * @adev: amdgpu_device pointer
2356 *
2357 * Main initialization pass for hardware IPs.  The list of all the hardware
2358 * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2359 * are run.  sw_init initializes the software state associated with each IP
2360 * and hw_init initializes the hardware associated with each IP.
2361 * Returns 0 on success, negative error code on failure.
2362 */
2363static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2364{
2365	int i, r;
2366
2367	r = amdgpu_ras_init(adev);
2368	if (r)
2369		return r;
2370
2371	for (i = 0; i < adev->num_ip_blocks; i++) {
2372		if (!adev->ip_blocks[i].status.valid)
2373			continue;
2374		r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2375		if (r) {
2376			DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2377				  adev->ip_blocks[i].version->funcs->name, r);
2378			goto init_failed;
2379		}
2380		adev->ip_blocks[i].status.sw = true;
2381
2382		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2383			/* need to do common hw init early so everything is set up for gmc */
2384			r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2385			if (r) {
2386				DRM_ERROR("hw_init %d failed %d\n", i, r);
2387				goto init_failed;
2388			}
2389			adev->ip_blocks[i].status.hw = true;
2390		} else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2391			/* need to do gmc hw init early so we can allocate gpu mem */
2392			/* Try to reserve bad pages early */
2393			if (amdgpu_sriov_vf(adev))
2394				amdgpu_virt_exchange_data(adev);
2395
2396			r = amdgpu_device_vram_scratch_init(adev);
2397			if (r) {
2398				DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
2399				goto init_failed;
2400			}
2401			r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2402			if (r) {
2403				DRM_ERROR("hw_init %d failed %d\n", i, r);
2404				goto init_failed;
2405			}
2406			r = amdgpu_device_wb_init(adev);
2407			if (r) {
2408				DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2409				goto init_failed;
2410			}
2411			adev->ip_blocks[i].status.hw = true;
2412
2413			/* right after GMC hw init, we create CSA */
2414			if (amdgpu_mcbp) {
2415				r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2416								AMDGPU_GEM_DOMAIN_VRAM,
2417								AMDGPU_CSA_SIZE);
2418				if (r) {
2419					DRM_ERROR("allocate CSA failed %d\n", r);
2420					goto init_failed;
2421				}
2422			}
2423		}
2424	}
2425
2426	if (amdgpu_sriov_vf(adev))
2427		amdgpu_virt_init_data_exchange(adev);
2428
2429	r = amdgpu_ib_pool_init(adev);
2430	if (r) {
2431		dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2432		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2433		goto init_failed;
2434	}
2435
2436	r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2437	if (r)
2438		goto init_failed;
2439
2440	r = amdgpu_device_ip_hw_init_phase1(adev);
2441	if (r)
2442		goto init_failed;
2443
2444	r = amdgpu_device_fw_loading(adev);
2445	if (r)
2446		goto init_failed;
2447
2448	r = amdgpu_device_ip_hw_init_phase2(adev);
2449	if (r)
2450		goto init_failed;
2451
2452	/*
2453	 * retired pages will be loaded from eeprom and reserved here,
2454	 * it should be called after amdgpu_device_ip_hw_init_phase2  since
2455	 * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2456	 * for I2C communication which only true at this point.
2457	 *
2458	 * amdgpu_ras_recovery_init may fail, but the upper only cares the
2459	 * failure from bad gpu situation and stop amdgpu init process
2460	 * accordingly. For other failed cases, it will still release all
2461	 * the resource and print error message, rather than returning one
2462	 * negative value to upper level.
2463	 *
2464	 * Note: theoretically, this should be called before all vram allocations
2465	 * to protect retired page from abusing
2466	 */
2467	r = amdgpu_ras_recovery_init(adev);
2468	if (r)
2469		goto init_failed;
2470
2471	/**
2472	 * In case of XGMI grab extra reference for reset domain for this device
2473	 */
2474	if (adev->gmc.xgmi.num_physical_nodes > 1) {
2475		if (amdgpu_xgmi_add_device(adev) == 0) {
2476			if (!amdgpu_sriov_vf(adev)) {
2477				struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2478
2479				if (WARN_ON(!hive)) {
2480					r = -ENOENT;
2481					goto init_failed;
2482				}
2483
2484				if (!hive->reset_domain ||
2485				    !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
2486					r = -ENOENT;
2487					amdgpu_put_xgmi_hive(hive);
2488					goto init_failed;
2489				}
2490
2491				/* Drop the early temporary reset domain we created for device */
2492				amdgpu_reset_put_reset_domain(adev->reset_domain);
2493				adev->reset_domain = hive->reset_domain;
2494				amdgpu_put_xgmi_hive(hive);
2495			}
2496		}
2497	}
2498
2499	r = amdgpu_device_init_schedulers(adev);
2500	if (r)
2501		goto init_failed;
2502
2503	/* Don't init kfd if whole hive need to be reset during init */
2504	if (!adev->gmc.xgmi.pending_reset)
2505		amdgpu_amdkfd_device_init(adev);
2506
2507	amdgpu_fru_get_product_info(adev);
2508
2509init_failed:
2510	if (amdgpu_sriov_vf(adev))
 
 
2511		amdgpu_virt_release_full_gpu(adev, true);
 
2512
2513	return r;
2514}
2515
2516/**
2517 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2518 *
2519 * @adev: amdgpu_device pointer
2520 *
2521 * Writes a reset magic value to the gart pointer in VRAM.  The driver calls
2522 * this function before a GPU reset.  If the value is retained after a
2523 * GPU reset, VRAM has not been lost.  Some GPU resets may destry VRAM contents.
2524 */
2525static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2526{
2527	memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2528}
2529
2530/**
2531 * amdgpu_device_check_vram_lost - check if vram is valid
2532 *
2533 * @adev: amdgpu_device pointer
2534 *
2535 * Checks the reset magic value written to the gart pointer in VRAM.
2536 * The driver calls this after a GPU reset to see if the contents of
2537 * VRAM is lost or now.
2538 * returns true if vram is lost, false if not.
2539 */
2540static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2541{
2542	if (memcmp(adev->gart.ptr, adev->reset_magic,
2543			AMDGPU_RESET_MAGIC_NUM))
2544		return true;
2545
2546	if (!amdgpu_in_reset(adev))
2547		return false;
2548
2549	/*
2550	 * For all ASICs with baco/mode1 reset, the VRAM is
2551	 * always assumed to be lost.
2552	 */
2553	switch (amdgpu_asic_reset_method(adev)) {
2554	case AMD_RESET_METHOD_BACO:
2555	case AMD_RESET_METHOD_MODE1:
2556		return true;
2557	default:
2558		return false;
2559	}
2560}
2561
2562/**
2563 * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2564 *
2565 * @adev: amdgpu_device pointer
2566 * @state: clockgating state (gate or ungate)
2567 *
2568 * The list of all the hardware IPs that make up the asic is walked and the
2569 * set_clockgating_state callbacks are run.
2570 * Late initialization pass enabling clockgating for hardware IPs.
2571 * Fini or suspend, pass disabling clockgating for hardware IPs.
2572 * Returns 0 on success, negative error code on failure.
2573 */
2574
2575int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2576			       enum amd_clockgating_state state)
2577{
2578	int i, j, r;
2579
2580	if (amdgpu_emu_mode == 1)
2581		return 0;
2582
2583	for (j = 0; j < adev->num_ip_blocks; j++) {
2584		i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2585		if (!adev->ip_blocks[i].status.late_initialized)
2586			continue;
2587		/* skip CG for GFX on S0ix */
2588		if (adev->in_s0ix &&
2589		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2590			continue;
2591		/* skip CG for VCE/UVD, it's handled specially */
2592		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2593		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2594		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2595		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2596		    adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2597			/* enable clockgating to save power */
2598			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2599										     state);
2600			if (r) {
2601				DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2602					  adev->ip_blocks[i].version->funcs->name, r);
2603				return r;
2604			}
2605		}
2606	}
2607
2608	return 0;
2609}
2610
2611int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2612			       enum amd_powergating_state state)
2613{
2614	int i, j, r;
2615
2616	if (amdgpu_emu_mode == 1)
2617		return 0;
2618
2619	for (j = 0; j < adev->num_ip_blocks; j++) {
2620		i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2621		if (!adev->ip_blocks[i].status.late_initialized)
2622			continue;
2623		/* skip PG for GFX on S0ix */
2624		if (adev->in_s0ix &&
2625		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2626			continue;
2627		/* skip CG for VCE/UVD, it's handled specially */
2628		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2629		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2630		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2631		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2632		    adev->ip_blocks[i].version->funcs->set_powergating_state) {
2633			/* enable powergating to save power */
2634			r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2635											state);
2636			if (r) {
2637				DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2638					  adev->ip_blocks[i].version->funcs->name, r);
2639				return r;
2640			}
2641		}
2642	}
2643	return 0;
2644}
2645
2646static int amdgpu_device_enable_mgpu_fan_boost(void)
2647{
2648	struct amdgpu_gpu_instance *gpu_ins;
2649	struct amdgpu_device *adev;
2650	int i, ret = 0;
2651
2652	mutex_lock(&mgpu_info.mutex);
2653
2654	/*
2655	 * MGPU fan boost feature should be enabled
2656	 * only when there are two or more dGPUs in
2657	 * the system
2658	 */
2659	if (mgpu_info.num_dgpu < 2)
2660		goto out;
2661
2662	for (i = 0; i < mgpu_info.num_dgpu; i++) {
2663		gpu_ins = &(mgpu_info.gpu_ins[i]);
2664		adev = gpu_ins->adev;
2665		if (!(adev->flags & AMD_IS_APU) &&
2666		    !gpu_ins->mgpu_fan_enabled) {
 
 
2667			ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2668			if (ret)
2669				break;
2670
2671			gpu_ins->mgpu_fan_enabled = 1;
2672		}
2673	}
2674
2675out:
2676	mutex_unlock(&mgpu_info.mutex);
2677
2678	return ret;
2679}
2680
2681/**
2682 * amdgpu_device_ip_late_init - run late init for hardware IPs
2683 *
2684 * @adev: amdgpu_device pointer
2685 *
2686 * Late initialization pass for hardware IPs.  The list of all the hardware
2687 * IPs that make up the asic is walked and the late_init callbacks are run.
2688 * late_init covers any special initialization that an IP requires
2689 * after all of the have been initialized or something that needs to happen
2690 * late in the init process.
2691 * Returns 0 on success, negative error code on failure.
2692 */
2693static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2694{
2695	struct amdgpu_gpu_instance *gpu_instance;
2696	int i = 0, r;
2697
2698	for (i = 0; i < adev->num_ip_blocks; i++) {
2699		if (!adev->ip_blocks[i].status.hw)
2700			continue;
2701		if (adev->ip_blocks[i].version->funcs->late_init) {
2702			r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2703			if (r) {
2704				DRM_ERROR("late_init of IP block <%s> failed %d\n",
2705					  adev->ip_blocks[i].version->funcs->name, r);
2706				return r;
2707			}
2708		}
2709		adev->ip_blocks[i].status.late_initialized = true;
2710	}
2711
2712	r = amdgpu_ras_late_init(adev);
2713	if (r) {
2714		DRM_ERROR("amdgpu_ras_late_init failed %d", r);
2715		return r;
2716	}
2717
2718	amdgpu_ras_set_error_query_ready(adev, true);
2719
2720	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2721	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2722
2723	amdgpu_device_fill_reset_magic(adev);
2724
2725	r = amdgpu_device_enable_mgpu_fan_boost();
2726	if (r)
2727		DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2728
2729	/* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
2730	if (amdgpu_passthrough(adev) && ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1)||
2731			       adev->asic_type == CHIP_ALDEBARAN ))
2732		amdgpu_dpm_handle_passthrough_sbr(adev, true);
2733
2734	if (adev->gmc.xgmi.num_physical_nodes > 1) {
2735		mutex_lock(&mgpu_info.mutex);
2736
2737		/*
2738		 * Reset device p-state to low as this was booted with high.
2739		 *
2740		 * This should be performed only after all devices from the same
2741		 * hive get initialized.
2742		 *
2743		 * However, it's unknown how many device in the hive in advance.
2744		 * As this is counted one by one during devices initializations.
2745		 *
2746		 * So, we wait for all XGMI interlinked devices initialized.
2747		 * This may bring some delays as those devices may come from
2748		 * different hives. But that should be OK.
2749		 */
2750		if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2751			for (i = 0; i < mgpu_info.num_gpu; i++) {
2752				gpu_instance = &(mgpu_info.gpu_ins[i]);
2753				if (gpu_instance->adev->flags & AMD_IS_APU)
2754					continue;
2755
2756				r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2757						AMDGPU_XGMI_PSTATE_MIN);
2758				if (r) {
2759					DRM_ERROR("pstate setting failed (%d).\n", r);
2760					break;
2761				}
2762			}
2763		}
2764
2765		mutex_unlock(&mgpu_info.mutex);
2766	}
2767
2768	return 0;
2769}
2770
2771/**
2772 * amdgpu_device_smu_fini_early - smu hw_fini wrapper
2773 *
2774 * @adev: amdgpu_device pointer
2775 *
2776 * For ASICs need to disable SMC first
 
 
 
 
2777 */
2778static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
2779{
2780	int i, r;
2781
2782	if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))
2783		return;
 
 
2784
 
 
 
 
 
 
2785	for (i = 0; i < adev->num_ip_blocks; i++) {
2786		if (!adev->ip_blocks[i].status.hw)
2787			continue;
2788		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2789			r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2790			/* XXX handle errors */
2791			if (r) {
2792				DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2793					  adev->ip_blocks[i].version->funcs->name, r);
2794			}
2795			adev->ip_blocks[i].status.hw = false;
2796			break;
2797		}
2798	}
2799}
2800
2801static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
2802{
2803	int i, r;
2804
2805	for (i = 0; i < adev->num_ip_blocks; i++) {
2806		if (!adev->ip_blocks[i].version->funcs->early_fini)
2807			continue;
2808
2809		r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
2810		if (r) {
2811			DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
2812				  adev->ip_blocks[i].version->funcs->name, r);
2813		}
2814	}
2815
2816	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2817	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2818
2819	amdgpu_amdkfd_suspend(adev, false);
2820
2821	/* Workaroud for ASICs need to disable SMC first */
2822	amdgpu_device_smu_fini_early(adev);
2823
2824	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2825		if (!adev->ip_blocks[i].status.hw)
2826			continue;
2827
2828		r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2829		/* XXX handle errors */
2830		if (r) {
2831			DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2832				  adev->ip_blocks[i].version->funcs->name, r);
2833		}
2834
2835		adev->ip_blocks[i].status.hw = false;
2836	}
2837
2838	if (amdgpu_sriov_vf(adev)) {
2839		if (amdgpu_virt_release_full_gpu(adev, false))
2840			DRM_ERROR("failed to release exclusive mode on fini\n");
2841	}
2842
2843	return 0;
2844}
2845
2846/**
2847 * amdgpu_device_ip_fini - run fini for hardware IPs
2848 *
2849 * @adev: amdgpu_device pointer
2850 *
2851 * Main teardown pass for hardware IPs.  The list of all the hardware
2852 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2853 * are run.  hw_fini tears down the hardware associated with each IP
2854 * and sw_fini tears down any software state associated with each IP.
2855 * Returns 0 on success, negative error code on failure.
2856 */
2857static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2858{
2859	int i, r;
2860
2861	if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2862		amdgpu_virt_release_ras_err_handler_data(adev);
2863
2864	if (adev->gmc.xgmi.num_physical_nodes > 1)
2865		amdgpu_xgmi_remove_device(adev);
2866
2867	amdgpu_amdkfd_device_fini_sw(adev);
2868
2869	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2870		if (!adev->ip_blocks[i].status.sw)
2871			continue;
2872
2873		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2874			amdgpu_ucode_free_bo(adev);
2875			amdgpu_free_static_csa(&adev->virt.csa_obj);
2876			amdgpu_device_wb_fini(adev);
2877			amdgpu_device_vram_scratch_fini(adev);
2878			amdgpu_ib_pool_fini(adev);
2879		}
2880
2881		r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2882		/* XXX handle errors */
2883		if (r) {
2884			DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2885				  adev->ip_blocks[i].version->funcs->name, r);
2886		}
2887		adev->ip_blocks[i].status.sw = false;
2888		adev->ip_blocks[i].status.valid = false;
2889	}
2890
2891	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2892		if (!adev->ip_blocks[i].status.late_initialized)
2893			continue;
2894		if (adev->ip_blocks[i].version->funcs->late_fini)
2895			adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2896		adev->ip_blocks[i].status.late_initialized = false;
2897	}
2898
2899	amdgpu_ras_fini(adev);
2900
 
 
 
 
2901	return 0;
2902}
2903
2904/**
2905 * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2906 *
2907 * @work: work_struct.
2908 */
2909static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2910{
2911	struct amdgpu_device *adev =
2912		container_of(work, struct amdgpu_device, delayed_init_work.work);
2913	int r;
2914
2915	r = amdgpu_ib_ring_tests(adev);
2916	if (r)
2917		DRM_ERROR("ib ring test failed (%d).\n", r);
2918}
2919
2920static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2921{
2922	struct amdgpu_device *adev =
2923		container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2924
2925	WARN_ON_ONCE(adev->gfx.gfx_off_state);
2926	WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
2927
2928	if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2929		adev->gfx.gfx_off_state = true;
 
2930}
2931
2932/**
2933 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2934 *
2935 * @adev: amdgpu_device pointer
2936 *
2937 * Main suspend function for hardware IPs.  The list of all the hardware
2938 * IPs that make up the asic is walked, clockgating is disabled and the
2939 * suspend callbacks are run.  suspend puts the hardware and software state
2940 * in each IP into a state suitable for suspend.
2941 * Returns 0 on success, negative error code on failure.
2942 */
2943static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2944{
2945	int i, r;
2946
2947	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2948	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2949
2950	/*
2951	 * Per PMFW team's suggestion, driver needs to handle gfxoff
2952	 * and df cstate features disablement for gpu reset(e.g. Mode1Reset)
2953	 * scenario. Add the missing df cstate disablement here.
2954	 */
2955	if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
2956		dev_warn(adev->dev, "Failed to disallow df cstate");
2957
2958	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2959		if (!adev->ip_blocks[i].status.valid)
2960			continue;
2961
2962		/* displays are handled separately */
2963		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2964			continue;
2965
2966		/* XXX handle errors */
2967		r = adev->ip_blocks[i].version->funcs->suspend(adev);
2968		/* XXX handle errors */
2969		if (r) {
2970			DRM_ERROR("suspend of IP block <%s> failed %d\n",
2971				  adev->ip_blocks[i].version->funcs->name, r);
2972			return r;
2973		}
2974
2975		adev->ip_blocks[i].status.hw = false;
2976	}
2977
2978	return 0;
2979}
2980
2981/**
2982 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2983 *
2984 * @adev: amdgpu_device pointer
2985 *
2986 * Main suspend function for hardware IPs.  The list of all the hardware
2987 * IPs that make up the asic is walked, clockgating is disabled and the
2988 * suspend callbacks are run.  suspend puts the hardware and software state
2989 * in each IP into a state suitable for suspend.
2990 * Returns 0 on success, negative error code on failure.
2991 */
2992static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
2993{
2994	int i, r;
2995
2996	if (adev->in_s0ix)
2997		amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry);
2998
2999	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3000		if (!adev->ip_blocks[i].status.valid)
3001			continue;
3002		/* displays are handled in phase1 */
3003		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
3004			continue;
3005		/* PSP lost connection when err_event_athub occurs */
3006		if (amdgpu_ras_intr_triggered() &&
3007		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
3008			adev->ip_blocks[i].status.hw = false;
3009			continue;
3010		}
3011
3012		/* skip unnecessary suspend if we do not initialize them yet */
3013		if (adev->gmc.xgmi.pending_reset &&
3014		    !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3015		      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
3016		      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3017		      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
3018			adev->ip_blocks[i].status.hw = false;
3019			continue;
3020		}
3021
3022		/* skip suspend of gfx/mes and psp for S0ix
3023		 * gfx is in gfxoff state, so on resume it will exit gfxoff just
3024		 * like at runtime. PSP is also part of the always on hardware
3025		 * so no need to suspend it.
3026		 */
3027		if (adev->in_s0ix &&
3028		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
3029		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
3030		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES))
3031			continue;
3032
3033		/* XXX handle errors */
3034		r = adev->ip_blocks[i].version->funcs->suspend(adev);
3035		/* XXX handle errors */
3036		if (r) {
3037			DRM_ERROR("suspend of IP block <%s> failed %d\n",
3038				  adev->ip_blocks[i].version->funcs->name, r);
3039		}
3040		adev->ip_blocks[i].status.hw = false;
3041		/* handle putting the SMC in the appropriate state */
3042		if(!amdgpu_sriov_vf(adev)){
3043			if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3044				r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
 
 
 
 
 
3045				if (r) {
3046					DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
3047							adev->mp1_state, r);
3048					return r;
3049				}
3050			}
3051		}
 
 
3052	}
3053
3054	return 0;
3055}
3056
3057/**
3058 * amdgpu_device_ip_suspend - run suspend for hardware IPs
3059 *
3060 * @adev: amdgpu_device pointer
3061 *
3062 * Main suspend function for hardware IPs.  The list of all the hardware
3063 * IPs that make up the asic is walked, clockgating is disabled and the
3064 * suspend callbacks are run.  suspend puts the hardware and software state
3065 * in each IP into a state suitable for suspend.
3066 * Returns 0 on success, negative error code on failure.
3067 */
3068int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
3069{
3070	int r;
3071
3072	if (amdgpu_sriov_vf(adev)) {
3073		amdgpu_virt_fini_data_exchange(adev);
3074		amdgpu_virt_request_full_gpu(adev, false);
3075	}
3076
3077	r = amdgpu_device_ip_suspend_phase1(adev);
3078	if (r)
3079		return r;
3080	r = amdgpu_device_ip_suspend_phase2(adev);
3081
3082	if (amdgpu_sriov_vf(adev))
3083		amdgpu_virt_release_full_gpu(adev, false);
3084
3085	return r;
3086}
3087
3088static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
3089{
3090	int i, r;
3091
3092	static enum amd_ip_block_type ip_order[] = {
3093		AMD_IP_BLOCK_TYPE_COMMON,
3094		AMD_IP_BLOCK_TYPE_GMC,
 
3095		AMD_IP_BLOCK_TYPE_PSP,
3096		AMD_IP_BLOCK_TYPE_IH,
3097	};
3098
3099	for (i = 0; i < adev->num_ip_blocks; i++) {
3100		int j;
3101		struct amdgpu_ip_block *block;
3102
3103		block = &adev->ip_blocks[i];
3104		block->status.hw = false;
3105
3106		for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
3107
3108			if (block->version->type != ip_order[j] ||
 
3109				!block->status.valid)
3110				continue;
3111
3112			r = block->version->funcs->hw_init(adev);
3113			DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3114			if (r)
3115				return r;
3116			block->status.hw = true;
3117		}
3118	}
3119
3120	return 0;
3121}
3122
3123static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
3124{
3125	int i, r;
3126
3127	static enum amd_ip_block_type ip_order[] = {
3128		AMD_IP_BLOCK_TYPE_SMC,
3129		AMD_IP_BLOCK_TYPE_DCE,
3130		AMD_IP_BLOCK_TYPE_GFX,
3131		AMD_IP_BLOCK_TYPE_SDMA,
3132		AMD_IP_BLOCK_TYPE_UVD,
3133		AMD_IP_BLOCK_TYPE_VCE,
3134		AMD_IP_BLOCK_TYPE_VCN
3135	};
3136
3137	for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
3138		int j;
3139		struct amdgpu_ip_block *block;
3140
3141		for (j = 0; j < adev->num_ip_blocks; j++) {
3142			block = &adev->ip_blocks[j];
3143
3144			if (block->version->type != ip_order[i] ||
3145				!block->status.valid ||
3146				block->status.hw)
3147				continue;
3148
3149			if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
3150				r = block->version->funcs->resume(adev);
3151			else
3152				r = block->version->funcs->hw_init(adev);
3153
3154			DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3155			if (r)
3156				return r;
3157			block->status.hw = true;
3158		}
3159	}
3160
3161	return 0;
3162}
3163
3164/**
3165 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3166 *
3167 * @adev: amdgpu_device pointer
3168 *
3169 * First resume function for hardware IPs.  The list of all the hardware
3170 * IPs that make up the asic is walked and the resume callbacks are run for
3171 * COMMON, GMC, and IH.  resume puts the hardware into a functional state
3172 * after a suspend and updates the software state as necessary.  This
3173 * function is also used for restoring the GPU after a GPU reset.
3174 * Returns 0 on success, negative error code on failure.
3175 */
3176static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
3177{
3178	int i, r;
3179
3180	for (i = 0; i < adev->num_ip_blocks; i++) {
3181		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3182			continue;
3183		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3184		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3185		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3186		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) {
3187
3188			r = adev->ip_blocks[i].version->funcs->resume(adev);
3189			if (r) {
3190				DRM_ERROR("resume of IP block <%s> failed %d\n",
3191					  adev->ip_blocks[i].version->funcs->name, r);
3192				return r;
3193			}
3194			adev->ip_blocks[i].status.hw = true;
3195		}
3196	}
3197
3198	return 0;
3199}
3200
3201/**
3202 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3203 *
3204 * @adev: amdgpu_device pointer
3205 *
3206 * First resume function for hardware IPs.  The list of all the hardware
3207 * IPs that make up the asic is walked and the resume callbacks are run for
3208 * all blocks except COMMON, GMC, and IH.  resume puts the hardware into a
3209 * functional state after a suspend and updates the software state as
3210 * necessary.  This function is also used for restoring the GPU after a GPU
3211 * reset.
3212 * Returns 0 on success, negative error code on failure.
3213 */
3214static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3215{
3216	int i, r;
3217
3218	for (i = 0; i < adev->num_ip_blocks; i++) {
3219		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3220			continue;
3221		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3222		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3223		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3224		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3225			continue;
3226		r = adev->ip_blocks[i].version->funcs->resume(adev);
3227		if (r) {
3228			DRM_ERROR("resume of IP block <%s> failed %d\n",
3229				  adev->ip_blocks[i].version->funcs->name, r);
3230			return r;
3231		}
3232		adev->ip_blocks[i].status.hw = true;
3233
3234		if (adev->in_s0ix && adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3235			/* disable gfxoff for IP resume. The gfxoff will be re-enabled in
3236			 * amdgpu_device_resume() after IP resume.
3237			 */
3238			amdgpu_gfx_off_ctrl(adev, false);
3239			DRM_DEBUG("will disable gfxoff for re-initializing other blocks\n");
3240		}
3241
3242	}
3243
3244	return 0;
3245}
3246
3247/**
3248 * amdgpu_device_ip_resume - run resume for hardware IPs
3249 *
3250 * @adev: amdgpu_device pointer
3251 *
3252 * Main resume function for hardware IPs.  The hardware IPs
3253 * are split into two resume functions because they are
3254 * are also used in in recovering from a GPU reset and some additional
3255 * steps need to be take between them.  In this case (S3/S4) they are
3256 * run sequentially.
3257 * Returns 0 on success, negative error code on failure.
3258 */
3259static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
3260{
3261	int r;
3262
3263	r = amdgpu_amdkfd_resume_iommu(adev);
3264	if (r)
3265		return r;
3266
3267	r = amdgpu_device_ip_resume_phase1(adev);
3268	if (r)
3269		return r;
3270
3271	r = amdgpu_device_fw_loading(adev);
3272	if (r)
3273		return r;
3274
3275	r = amdgpu_device_ip_resume_phase2(adev);
3276
3277	return r;
3278}
3279
3280/**
3281 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3282 *
3283 * @adev: amdgpu_device pointer
3284 *
3285 * Query the VBIOS data tables to determine if the board supports SR-IOV.
3286 */
3287static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3288{
3289	if (amdgpu_sriov_vf(adev)) {
3290		if (adev->is_atom_fw) {
3291			if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
3292				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3293		} else {
3294			if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3295				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3296		}
3297
3298		if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3299			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3300	}
3301}
3302
3303/**
3304 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3305 *
3306 * @asic_type: AMD asic type
3307 *
3308 * Check if there is DC (new modesetting infrastructre) support for an asic.
3309 * returns true if DC has support, false if not.
3310 */
3311bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3312{
3313	switch (asic_type) {
3314#ifdef CONFIG_DRM_AMDGPU_SI
3315	case CHIP_HAINAN:
3316#endif
3317	case CHIP_TOPAZ:
3318		/* chips with no display hardware */
3319		return false;
3320#if defined(CONFIG_DRM_AMD_DC)
3321	case CHIP_TAHITI:
3322	case CHIP_PITCAIRN:
3323	case CHIP_VERDE:
3324	case CHIP_OLAND:
3325		/*
3326		 * We have systems in the wild with these ASICs that require
3327		 * LVDS and VGA support which is not supported with DC.
3328		 *
3329		 * Fallback to the non-DC driver here by default so as not to
3330		 * cause regressions.
3331		 */
3332#if defined(CONFIG_DRM_AMD_DC_SI)
3333		return amdgpu_dc > 0;
3334#else
3335		return false;
3336#endif
3337	case CHIP_BONAIRE:
3338	case CHIP_KAVERI:
3339	case CHIP_KABINI:
3340	case CHIP_MULLINS:
3341		/*
3342		 * We have systems in the wild with these ASICs that require
3343		 * VGA support which is not supported with DC.
3344		 *
3345		 * Fallback to the non-DC driver here by default so as not to
3346		 * cause regressions.
3347		 */
3348		return amdgpu_dc > 0;
3349	default:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3350		return amdgpu_dc != 0;
3351#else
3352	default:
3353		if (amdgpu_dc > 0)
3354			DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
3355					 "but isn't supported by ASIC, ignoring\n");
3356		return false;
3357#endif
3358	}
3359}
3360
3361/**
3362 * amdgpu_device_has_dc_support - check if dc is supported
3363 *
3364 * @adev: amdgpu_device pointer
3365 *
3366 * Returns true for supported, false for not supported
3367 */
3368bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3369{
3370	if (adev->enable_virtual_display ||
3371	    (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
3372		return false;
3373
3374	return amdgpu_device_asic_has_dc_support(adev->asic_type);
3375}
3376
 
3377static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3378{
3379	struct amdgpu_device *adev =
3380		container_of(__work, struct amdgpu_device, xgmi_reset_work);
3381	struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3382
3383	/* It's a bug to not have a hive within this function */
3384	if (WARN_ON(!hive))
3385		return;
3386
3387	/*
3388	 * Use task barrier to synchronize all xgmi reset works across the
3389	 * hive. task_barrier_enter and task_barrier_exit will block
3390	 * until all the threads running the xgmi reset works reach
3391	 * those points. task_barrier_full will do both blocks.
3392	 */
3393	if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3394
3395		task_barrier_enter(&hive->tb);
3396		adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3397
3398		if (adev->asic_reset_res)
3399			goto fail;
3400
3401		task_barrier_exit(&hive->tb);
3402		adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3403
3404		if (adev->asic_reset_res)
3405			goto fail;
3406
3407		if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops &&
3408		    adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
3409			adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev);
3410	} else {
3411
3412		task_barrier_full(&hive->tb);
3413		adev->asic_reset_res =  amdgpu_asic_reset(adev);
3414	}
3415
3416fail:
3417	if (adev->asic_reset_res)
3418		DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3419			 adev->asic_reset_res, adev_to_drm(adev)->unique);
3420	amdgpu_put_xgmi_hive(hive);
3421}
3422
3423static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3424{
3425	char *input = amdgpu_lockup_timeout;
3426	char *timeout_setting = NULL;
3427	int index = 0;
3428	long timeout;
3429	int ret = 0;
3430
3431	/*
3432	 * By default timeout for non compute jobs is 10000
3433	 * and 60000 for compute jobs.
3434	 * In SR-IOV or passthrough mode, timeout for compute
3435	 * jobs are 60000 by default.
3436	 */
3437	adev->gfx_timeout = msecs_to_jiffies(10000);
3438	adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3439	if (amdgpu_sriov_vf(adev))
3440		adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3441					msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3442	else
3443		adev->compute_timeout =  msecs_to_jiffies(60000);
3444
3445	if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3446		while ((timeout_setting = strsep(&input, ",")) &&
3447				strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3448			ret = kstrtol(timeout_setting, 0, &timeout);
3449			if (ret)
3450				return ret;
3451
3452			if (timeout == 0) {
3453				index++;
3454				continue;
3455			} else if (timeout < 0) {
3456				timeout = MAX_SCHEDULE_TIMEOUT;
3457				dev_warn(adev->dev, "lockup timeout disabled");
3458				add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
3459			} else {
3460				timeout = msecs_to_jiffies(timeout);
3461			}
3462
3463			switch (index++) {
3464			case 0:
3465				adev->gfx_timeout = timeout;
3466				break;
3467			case 1:
3468				adev->compute_timeout = timeout;
3469				break;
3470			case 2:
3471				adev->sdma_timeout = timeout;
3472				break;
3473			case 3:
3474				adev->video_timeout = timeout;
3475				break;
3476			default:
3477				break;
3478			}
3479		}
3480		/*
3481		 * There is only one value specified and
3482		 * it should apply to all non-compute jobs.
3483		 */
3484		if (index == 1) {
3485			adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3486			if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3487				adev->compute_timeout = adev->gfx_timeout;
3488		}
3489	}
3490
3491	return ret;
3492}
3493
3494/**
3495 * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU
3496 *
3497 * @adev: amdgpu_device pointer
3498 *
3499 * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode
3500 */
3501static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
3502{
3503	struct iommu_domain *domain;
3504
3505	domain = iommu_get_domain_for_dev(adev->dev);
3506	if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY)
3507		adev->ram_is_direct_mapped = true;
3508}
3509
3510static const struct attribute *amdgpu_dev_attributes[] = {
3511	&dev_attr_product_name.attr,
3512	&dev_attr_product_number.attr,
3513	&dev_attr_serial_number.attr,
3514	&dev_attr_pcie_replay_count.attr,
3515	NULL
3516};
3517
3518/**
3519 * amdgpu_device_init - initialize the driver
3520 *
3521 * @adev: amdgpu_device pointer
 
 
3522 * @flags: driver flags
3523 *
3524 * Initializes the driver info and hw (all asics).
3525 * Returns 0 for success or an error on failure.
3526 * Called at driver startup.
3527 */
3528int amdgpu_device_init(struct amdgpu_device *adev,
 
 
3529		       uint32_t flags)
3530{
3531	struct drm_device *ddev = adev_to_drm(adev);
3532	struct pci_dev *pdev = adev->pdev;
3533	int r, i;
3534	bool px = false;
3535	u32 max_MBps;
3536
3537	adev->shutdown = false;
 
 
 
3538	adev->flags = flags;
3539
3540	if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3541		adev->asic_type = amdgpu_force_asic_type;
3542	else
3543		adev->asic_type = flags & AMD_ASIC_MASK;
3544
3545	adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3546	if (amdgpu_emu_mode == 1)
3547		adev->usec_timeout *= 10;
3548	adev->gmc.gart_size = 512 * 1024 * 1024;
3549	adev->accel_working = false;
3550	adev->num_rings = 0;
3551	RCU_INIT_POINTER(adev->gang_submit, dma_fence_get_stub());
3552	adev->mman.buffer_funcs = NULL;
3553	adev->mman.buffer_funcs_ring = NULL;
3554	adev->vm_manager.vm_pte_funcs = NULL;
3555	adev->vm_manager.vm_pte_num_scheds = 0;
3556	adev->gmc.gmc_funcs = NULL;
3557	adev->harvest_ip_mask = 0x0;
3558	adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3559	bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3560
3561	adev->smc_rreg = &amdgpu_invalid_rreg;
3562	adev->smc_wreg = &amdgpu_invalid_wreg;
3563	adev->pcie_rreg = &amdgpu_invalid_rreg;
3564	adev->pcie_wreg = &amdgpu_invalid_wreg;
3565	adev->pciep_rreg = &amdgpu_invalid_rreg;
3566	adev->pciep_wreg = &amdgpu_invalid_wreg;
3567	adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3568	adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3569	adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3570	adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3571	adev->didt_rreg = &amdgpu_invalid_rreg;
3572	adev->didt_wreg = &amdgpu_invalid_wreg;
3573	adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3574	adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3575	adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3576	adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3577
3578	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3579		 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3580		 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3581
3582	/* mutex initialization are all done here so we
3583	 * can recall function without having locking issues */
 
3584	mutex_init(&adev->firmware.mutex);
3585	mutex_init(&adev->pm.mutex);
3586	mutex_init(&adev->gfx.gpu_clock_mutex);
3587	mutex_init(&adev->srbm_mutex);
3588	mutex_init(&adev->gfx.pipe_reserve_mutex);
3589	mutex_init(&adev->gfx.gfx_off_mutex);
3590	mutex_init(&adev->grbm_idx_mutex);
3591	mutex_init(&adev->mn_lock);
3592	mutex_init(&adev->virt.vf_errors.lock);
3593	hash_init(adev->mn_hash);
 
 
3594	mutex_init(&adev->psp.mutex);
3595	mutex_init(&adev->notifier_lock);
3596	mutex_init(&adev->pm.stable_pstate_ctx_lock);
3597	mutex_init(&adev->benchmark_mutex);
3598
3599	amdgpu_device_init_apu_flags(adev);
3600
3601	r = amdgpu_device_check_arguments(adev);
3602	if (r)
3603		return r;
3604
3605	spin_lock_init(&adev->mmio_idx_lock);
3606	spin_lock_init(&adev->smc_idx_lock);
3607	spin_lock_init(&adev->pcie_idx_lock);
3608	spin_lock_init(&adev->uvd_ctx_idx_lock);
3609	spin_lock_init(&adev->didt_idx_lock);
3610	spin_lock_init(&adev->gc_cac_idx_lock);
3611	spin_lock_init(&adev->se_cac_idx_lock);
3612	spin_lock_init(&adev->audio_endpt_idx_lock);
3613	spin_lock_init(&adev->mm_stats.lock);
3614
3615	INIT_LIST_HEAD(&adev->shadow_list);
3616	mutex_init(&adev->shadow_list_lock);
3617
3618	INIT_LIST_HEAD(&adev->reset_list);
3619
3620	INIT_LIST_HEAD(&adev->ras_list);
3621
3622	INIT_DELAYED_WORK(&adev->delayed_init_work,
3623			  amdgpu_device_delayed_init_work_handler);
3624	INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3625			  amdgpu_device_delay_enable_gfx_off);
3626
3627	INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3628
3629	adev->gfx.gfx_off_req_count = 1;
3630	adev->gfx.gfx_off_residency = 0;
3631	adev->gfx.gfx_off_entrycount = 0;
3632	adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3633
3634	atomic_set(&adev->throttling_logging_enabled, 1);
3635	/*
3636	 * If throttling continues, logging will be performed every minute
3637	 * to avoid log flooding. "-1" is subtracted since the thermal
3638	 * throttling interrupt comes every second. Thus, the total logging
3639	 * interval is 59 seconds(retelimited printk interval) + 1(waiting
3640	 * for throttling interrupt) = 60 seconds.
3641	 */
3642	ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3643	ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3644
3645	/* Registers mapping */
3646	/* TODO: block userspace mapping of io register */
3647	if (adev->asic_type >= CHIP_BONAIRE) {
3648		adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3649		adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3650	} else {
3651		adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3652		adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3653	}
3654
3655	for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
3656		atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
3657
3658	adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3659	if (adev->rmmio == NULL) {
3660		return -ENOMEM;
3661	}
3662	DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3663	DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
3664
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3665	amdgpu_device_get_pcie_info(adev);
3666
3667	if (amdgpu_mcbp)
3668		DRM_INFO("MCBP is enabled\n");
3669
3670	/*
3671	 * Reset domain needs to be present early, before XGMI hive discovered
3672	 * (if any) and intitialized to use reset sem and in_gpu reset flag
3673	 * early on during init and before calling to RREG32.
3674	 */
3675	adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
3676	if (!adev->reset_domain)
3677		return -ENOMEM;
3678
3679	/* detect hw virtualization here */
3680	amdgpu_detect_virtualization(adev);
3681
3682	r = amdgpu_device_get_job_timeout_settings(adev);
3683	if (r) {
3684		dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3685		return r;
 
 
3686	}
3687
3688	/* early init functions */
3689	r = amdgpu_device_ip_early_init(adev);
3690	if (r)
3691		return r;
3692
3693	/* Get rid of things like offb */
3694	r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver);
3695	if (r)
3696		return r;
3697
3698	/* Enable TMZ based on IP_VERSION */
3699	amdgpu_gmc_tmz_set(adev);
3700
3701	amdgpu_gmc_noretry_set(adev);
3702	/* Need to get xgmi info early to decide the reset behavior*/
3703	if (adev->gmc.xgmi.supported) {
3704		r = adev->gfxhub.funcs->get_xgmi_info(adev);
3705		if (r)
3706			return r;
3707	}
3708
3709	/* enable PCIE atomic ops */
3710	if (amdgpu_sriov_vf(adev))
3711		adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
3712			adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
3713			(PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3714	else
3715		adev->have_atomics_support =
3716			!pci_enable_atomic_ops_to_root(adev->pdev,
3717					  PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3718					  PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3719	if (!adev->have_atomics_support)
3720		dev_info(adev->dev, "PCIE atomic ops is not supported\n");
3721
3722	/* doorbell bar mapping and doorbell index init*/
3723	amdgpu_device_doorbell_init(adev);
3724
 
 
 
 
 
 
 
 
 
 
 
 
 
3725	if (amdgpu_emu_mode == 1) {
3726		/* post the asic on emulation mode */
3727		emu_soc_asic_init(adev);
3728		goto fence_driver_init;
3729	}
3730
3731	amdgpu_reset_init(adev);
3732
3733	/* detect if we are with an SRIOV vbios */
3734	amdgpu_device_detect_sriov_bios(adev);
3735
3736	/* check if we need to reset the asic
3737	 *  E.g., driver was not cleanly unloaded previously, etc.
3738	 */
3739	if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3740		if (adev->gmc.xgmi.num_physical_nodes) {
3741			dev_info(adev->dev, "Pending hive reset.\n");
3742			adev->gmc.xgmi.pending_reset = true;
3743			/* Only need to init necessary block for SMU to handle the reset */
3744			for (i = 0; i < adev->num_ip_blocks; i++) {
3745				if (!adev->ip_blocks[i].status.valid)
3746					continue;
3747				if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3748				      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3749				      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3750				      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
3751					DRM_DEBUG("IP %s disabled for hw_init.\n",
3752						adev->ip_blocks[i].version->funcs->name);
3753					adev->ip_blocks[i].status.hw = true;
3754				}
3755			}
3756		} else {
3757			r = amdgpu_asic_reset(adev);
3758			if (r) {
3759				dev_err(adev->dev, "asic reset on init failed\n");
3760				goto failed;
3761			}
3762		}
3763	}
3764
3765	pci_enable_pcie_error_reporting(adev->pdev);
3766
3767	/* Post card if necessary */
3768	if (amdgpu_device_need_post(adev)) {
3769		if (!adev->bios) {
3770			dev_err(adev->dev, "no vBIOS found\n");
3771			r = -EINVAL;
3772			goto failed;
3773		}
3774		DRM_INFO("GPU posting now...\n");
3775		r = amdgpu_device_asic_init(adev);
3776		if (r) {
3777			dev_err(adev->dev, "gpu post error!\n");
3778			goto failed;
3779		}
3780	}
3781
3782	if (adev->is_atom_fw) {
3783		/* Initialize clocks */
3784		r = amdgpu_atomfirmware_get_clock_info(adev);
3785		if (r) {
3786			dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3787			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3788			goto failed;
3789		}
3790	} else {
3791		/* Initialize clocks */
3792		r = amdgpu_atombios_get_clock_info(adev);
3793		if (r) {
3794			dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3795			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3796			goto failed;
3797		}
3798		/* init i2c buses */
3799		if (!amdgpu_device_has_dc_support(adev))
3800			amdgpu_atombios_i2c_init(adev);
3801	}
3802
3803fence_driver_init:
3804	/* Fence driver */
3805	r = amdgpu_fence_driver_sw_init(adev);
3806	if (r) {
3807		dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
3808		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3809		goto failed;
3810	}
3811
3812	/* init the mode config */
3813	drm_mode_config_init(adev_to_drm(adev));
3814
3815	r = amdgpu_device_ip_init(adev);
3816	if (r) {
3817		/* failed in exclusive mode due to timeout */
3818		if (amdgpu_sriov_vf(adev) &&
3819		    !amdgpu_sriov_runtime(adev) &&
3820		    amdgpu_virt_mmio_blocked(adev) &&
3821		    !amdgpu_virt_wait_reset(adev)) {
3822			dev_err(adev->dev, "VF exclusive mode timeout\n");
3823			/* Don't send request since VF is inactive. */
3824			adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
3825			adev->virt.ops = NULL;
3826			r = -EAGAIN;
3827			goto release_ras_con;
3828		}
3829		dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
3830		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3831		goto release_ras_con;
 
 
3832	}
3833
3834	amdgpu_fence_driver_hw_init(adev);
3835
3836	dev_info(adev->dev,
3837		"SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
3838			adev->gfx.config.max_shader_engines,
3839			adev->gfx.config.max_sh_per_se,
3840			adev->gfx.config.max_cu_per_sh,
3841			adev->gfx.cu_info.number);
3842
3843	adev->accel_working = true;
3844
3845	amdgpu_vm_check_compute_bug(adev);
3846
3847	/* Initialize the buffer migration limit. */
3848	if (amdgpu_moverate >= 0)
3849		max_MBps = amdgpu_moverate;
3850	else
3851		max_MBps = 8; /* Allow 8 MB/s. */
3852	/* Get a log2 for easy divisions. */
3853	adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3854
 
 
 
 
 
3855	r = amdgpu_pm_sysfs_init(adev);
3856	if (r) {
3857		adev->pm_sysfs_en = false;
3858		DRM_ERROR("registering pm debugfs failed (%d).\n", r);
3859	} else
3860		adev->pm_sysfs_en = true;
3861
3862	r = amdgpu_ucode_sysfs_init(adev);
3863	if (r) {
3864		adev->ucode_sysfs_en = false;
3865		DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
3866	} else
3867		adev->ucode_sysfs_en = true;
3868
3869	r = amdgpu_psp_sysfs_init(adev);
3870	if (r) {
3871		adev->psp_sysfs_en = false;
3872		if (!amdgpu_sriov_vf(adev))
3873			DRM_ERROR("Creating psp sysfs failed\n");
3874	} else
3875		adev->psp_sysfs_en = true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3876
3877	/*
3878	 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3879	 * Otherwise the mgpu fan boost feature will be skipped due to the
3880	 * gpu instance is counted less.
3881	 */
3882	amdgpu_register_gpu_instance(adev);
3883
3884	/* enable clockgating, etc. after ib tests, etc. since some blocks require
3885	 * explicit gating rather than handling it automatically.
3886	 */
3887	if (!adev->gmc.xgmi.pending_reset) {
3888		r = amdgpu_device_ip_late_init(adev);
3889		if (r) {
3890			dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
3891			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
3892			goto release_ras_con;
3893		}
3894		/* must succeed. */
3895		amdgpu_ras_resume(adev);
3896		queue_delayed_work(system_wq, &adev->delayed_init_work,
3897				   msecs_to_jiffies(AMDGPU_RESUME_MS));
3898	}
3899
3900	if (amdgpu_sriov_vf(adev))
3901		flush_delayed_work(&adev->delayed_init_work);
3902
3903	r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
3904	if (r)
3905		dev_err(adev->dev, "Could not create amdgpu device attr\n");
 
 
 
 
 
3906
3907	if (IS_ENABLED(CONFIG_PERF_EVENTS))
3908		r = amdgpu_pmu_init(adev);
3909	if (r)
3910		dev_err(adev->dev, "amdgpu_pmu_init failed\n");
3911
3912	/* Have stored pci confspace at hand for restore in sudden PCI error */
3913	if (amdgpu_device_cache_pci_state(adev->pdev))
3914		pci_restore_state(pdev);
3915
3916	/* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
3917	/* this will fail for cards that aren't VGA class devices, just
3918	 * ignore it */
3919	if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3920		vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
3921
3922	if (amdgpu_device_supports_px(ddev)) {
3923		px = true;
3924		vga_switcheroo_register_client(adev->pdev,
3925					       &amdgpu_switcheroo_ops, px);
3926		vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
3927	}
3928
3929	if (adev->gmc.xgmi.pending_reset)
3930		queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
3931				   msecs_to_jiffies(AMDGPU_RESUME_MS));
3932
3933	amdgpu_device_check_iommu_direct_map(adev);
3934
3935	return 0;
3936
3937release_ras_con:
3938	amdgpu_release_ras_context(adev);
3939
3940failed:
3941	amdgpu_vf_error_trans_all(adev);
 
 
3942
3943	return r;
3944}
3945
3946static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
3947{
3948
3949	/* Clear all CPU mappings pointing to this device */
3950	unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
3951
3952	/* Unmap all mapped bars - Doorbell, registers and VRAM */
3953	amdgpu_device_doorbell_fini(adev);
3954
3955	iounmap(adev->rmmio);
3956	adev->rmmio = NULL;
3957	if (adev->mman.aper_base_kaddr)
3958		iounmap(adev->mman.aper_base_kaddr);
3959	adev->mman.aper_base_kaddr = NULL;
3960
3961	/* Memory manager related */
3962	if (!adev->gmc.xgmi.connected_to_cpu) {
3963		arch_phys_wc_del(adev->gmc.vram_mtrr);
3964		arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
3965	}
3966}
3967
3968/**
3969 * amdgpu_device_fini_hw - tear down the driver
3970 *
3971 * @adev: amdgpu_device pointer
3972 *
3973 * Tear down the driver info (all asics).
3974 * Called at driver shutdown.
3975 */
3976void amdgpu_device_fini_hw(struct amdgpu_device *adev)
3977{
3978	dev_info(adev->dev, "amdgpu: finishing device.\n");
3979	flush_delayed_work(&adev->delayed_init_work);
3980	adev->shutdown = true;
3981
3982	/* make sure IB test finished before entering exclusive mode
3983	 * to avoid preemption on IB test
3984	 * */
3985	if (amdgpu_sriov_vf(adev)) {
3986		amdgpu_virt_request_full_gpu(adev, false);
3987		amdgpu_virt_fini_data_exchange(adev);
3988	}
3989
 
 
3990	/* disable all interrupts */
3991	amdgpu_irq_disable_all(adev);
3992	if (adev->mode_info.mode_config_initialized){
3993		if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
3994			drm_helper_force_disable_all(adev_to_drm(adev));
3995		else
3996			drm_atomic_helper_shutdown(adev_to_drm(adev));
3997	}
3998	amdgpu_fence_driver_hw_fini(adev);
3999
4000	if (adev->mman.initialized) {
4001		flush_delayed_work(&adev->mman.bdev.wq);
4002		ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
 
 
4003	}
4004
4005	if (adev->pm_sysfs_en)
4006		amdgpu_pm_sysfs_fini(adev);
4007	if (adev->ucode_sysfs_en)
4008		amdgpu_ucode_sysfs_fini(adev);
4009	if (adev->psp_sysfs_en)
4010		amdgpu_psp_sysfs_fini(adev);
4011	sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
4012
4013	/* disable ras feature must before hw fini */
4014	amdgpu_ras_pre_fini(adev);
4015
4016	amdgpu_device_ip_fini_early(adev);
4017
4018	amdgpu_irq_fini_hw(adev);
4019
4020	if (adev->mman.initialized)
4021		ttm_device_clear_dma_mappings(&adev->mman.bdev);
4022
4023	amdgpu_gart_dummy_page_fini(adev);
4024
4025	amdgpu_device_unmap_mmio(adev);
4026
4027}
4028
4029void amdgpu_device_fini_sw(struct amdgpu_device *adev)
4030{
4031	int idx;
4032
4033	amdgpu_fence_driver_sw_fini(adev);
4034	amdgpu_device_ip_fini(adev);
4035	release_firmware(adev->firmware.gpu_info_fw);
4036	adev->firmware.gpu_info_fw = NULL;
4037	adev->accel_working = false;
4038	dma_fence_put(rcu_dereference_protected(adev->gang_submit, true));
4039
4040	amdgpu_reset_fini(adev);
4041
4042	/* free i2c buses */
4043	if (!amdgpu_device_has_dc_support(adev))
4044		amdgpu_i2c_fini(adev);
4045
4046	if (amdgpu_emu_mode != 1)
4047		amdgpu_atombios_fini(adev);
4048
4049	kfree(adev->bios);
4050	adev->bios = NULL;
4051	if (amdgpu_device_supports_px(adev_to_drm(adev))) {
4052		vga_switcheroo_unregister_client(adev->pdev);
 
4053		vga_switcheroo_fini_domain_pm_ops(adev->dev);
4054	}
4055	if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4056		vga_client_unregister(adev->pdev);
4057
4058	if (drm_dev_enter(adev_to_drm(adev), &idx)) {
4059
4060		iounmap(adev->rmmio);
4061		adev->rmmio = NULL;
4062		amdgpu_device_doorbell_fini(adev);
4063		drm_dev_exit(idx);
4064	}
4065
 
 
 
4066	if (IS_ENABLED(CONFIG_PERF_EVENTS))
4067		amdgpu_pmu_fini(adev);
4068	if (adev->mman.discovery_bin)
 
4069		amdgpu_discovery_fini(adev);
4070
4071	amdgpu_reset_put_reset_domain(adev->reset_domain);
4072	adev->reset_domain = NULL;
4073
4074	kfree(adev->pci_state);
4075
4076}
4077
4078/**
4079 * amdgpu_device_evict_resources - evict device resources
4080 * @adev: amdgpu device object
4081 *
4082 * Evicts all ttm device resources(vram BOs, gart table) from the lru list
4083 * of the vram memory type. Mainly used for evicting device resources
4084 * at suspend time.
4085 *
4086 */
4087static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
4088{
4089	int ret;
4090
4091	/* No need to evict vram on APUs for suspend to ram or s2idle */
4092	if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU))
4093		return 0;
4094
4095	ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
4096	if (ret)
4097		DRM_WARN("evicting device resources failed\n");
4098	return ret;
4099}
4100
4101/*
4102 * Suspend & resume.
4103 */
4104/**
4105 * amdgpu_device_suspend - initiate device suspend
4106 *
4107 * @dev: drm dev pointer
 
4108 * @fbcon : notify the fbdev of suspend
4109 *
4110 * Puts the hw in the suspend state (all asics).
4111 * Returns 0 for success or an error on failure.
4112 * Called at driver suspend.
4113 */
4114int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
4115{
4116	struct amdgpu_device *adev = drm_to_adev(dev);
4117	int r = 0;
 
 
 
 
 
 
 
 
4118
4119	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4120		return 0;
4121
4122	adev->in_suspend = true;
4123
4124	/* Evict the majority of BOs before grabbing the full access */
4125	r = amdgpu_device_evict_resources(adev);
4126	if (r)
4127		return r;
4128
4129	if (amdgpu_sriov_vf(adev)) {
4130		amdgpu_virt_fini_data_exchange(adev);
4131		r = amdgpu_virt_request_full_gpu(adev, false);
4132		if (r)
4133			return r;
4134	}
4135
4136	if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
4137		DRM_WARN("smart shift update failed\n");
4138
4139	drm_kms_helper_poll_disable(dev);
4140
4141	if (fbcon)
4142		drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
4143
4144	cancel_delayed_work_sync(&adev->delayed_init_work);
4145
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4146	amdgpu_ras_suspend(adev);
4147
4148	amdgpu_device_ip_suspend_phase1(adev);
4149
4150	if (!adev->in_s0ix)
4151		amdgpu_amdkfd_suspend(adev, adev->in_runpm);
4152
4153	r = amdgpu_device_evict_resources(adev);
4154	if (r)
4155		return r;
4156
4157	amdgpu_fence_driver_hw_fini(adev);
4158
4159	amdgpu_device_ip_suspend_phase2(adev);
 
 
 
 
4160
4161	if (amdgpu_sriov_vf(adev))
4162		amdgpu_virt_release_full_gpu(adev, false);
 
 
 
 
 
 
 
 
4163
4164	return 0;
4165}
4166
4167/**
4168 * amdgpu_device_resume - initiate device resume
4169 *
4170 * @dev: drm dev pointer
 
4171 * @fbcon : notify the fbdev of resume
4172 *
4173 * Bring the hw back to operating state (all asics).
4174 * Returns 0 for success or an error on failure.
4175 * Called at driver resume.
4176 */
4177int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
4178{
4179	struct amdgpu_device *adev = drm_to_adev(dev);
 
 
4180	int r = 0;
4181
4182	if (amdgpu_sriov_vf(adev)) {
4183		r = amdgpu_virt_request_full_gpu(adev, true);
4184		if (r)
4185			return r;
4186	}
4187
4188	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4189		return 0;
4190
4191	if (adev->in_s0ix)
4192		amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry);
 
 
 
 
 
4193
4194	/* post card */
4195	if (amdgpu_device_need_post(adev)) {
4196		r = amdgpu_device_asic_init(adev);
4197		if (r)
4198			dev_err(adev->dev, "amdgpu asic init failed\n");
4199	}
4200
4201	r = amdgpu_device_ip_resume(adev);
4202
4203	if (r) {
4204		dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
4205		goto exit;
4206	}
4207	amdgpu_fence_driver_hw_init(adev);
 
4208
4209	r = amdgpu_device_ip_late_init(adev);
4210	if (r)
4211		goto exit;
4212
4213	queue_delayed_work(system_wq, &adev->delayed_init_work,
4214			   msecs_to_jiffies(AMDGPU_RESUME_MS));
4215
4216	if (!adev->in_s0ix) {
4217		r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
4218		if (r)
4219			goto exit;
4220	}
4221
4222exit:
4223	if (amdgpu_sriov_vf(adev)) {
4224		amdgpu_virt_init_data_exchange(adev);
4225		amdgpu_virt_release_full_gpu(adev, true);
 
 
 
 
 
 
 
4226	}
4227
4228	if (r)
4229		return r;
4230
4231	/* Make sure IB tests flushed */
4232	flush_delayed_work(&adev->delayed_init_work);
4233
4234	if (adev->in_s0ix) {
4235		/* re-enable gfxoff after IP resume. This re-enables gfxoff after
4236		 * it was disabled for IP resume in amdgpu_device_ip_resume_phase2().
4237		 */
4238		amdgpu_gfx_off_ctrl(adev, true);
4239		DRM_DEBUG("will enable gfxoff for the mission mode\n");
 
 
 
 
 
 
 
 
4240	}
4241	if (fbcon)
4242		drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
4243
4244	drm_kms_helper_poll_enable(dev);
4245
4246	amdgpu_ras_resume(adev);
4247
4248	if (adev->mode_info.num_crtc) {
4249		/*
4250		 * Most of the connector probing functions try to acquire runtime pm
4251		 * refs to ensure that the GPU is powered on when connector polling is
4252		 * performed. Since we're calling this from a runtime PM callback,
4253		 * trying to acquire rpm refs will cause us to deadlock.
4254		 *
4255		 * Since we're guaranteed to be holding the rpm lock, it's safe to
4256		 * temporarily disable the rpm helpers so this doesn't deadlock us.
4257		 */
4258#ifdef CONFIG_PM
4259		dev->dev->power.disable_depth++;
4260#endif
4261		if (!adev->dc_enabled)
4262			drm_helper_hpd_irq_event(dev);
4263		else
4264			drm_kms_helper_hotplug_event(dev);
4265#ifdef CONFIG_PM
4266		dev->dev->power.disable_depth--;
4267#endif
4268	}
4269	adev->in_suspend = false;
4270
4271	if (adev->enable_mes)
4272		amdgpu_mes_self_test(adev);
4273
4274	if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
4275		DRM_WARN("smart shift update failed\n");
4276
4277	return 0;
4278}
4279
4280/**
4281 * amdgpu_device_ip_check_soft_reset - did soft reset succeed
4282 *
4283 * @adev: amdgpu_device pointer
4284 *
4285 * The list of all the hardware IPs that make up the asic is walked and
4286 * the check_soft_reset callbacks are run.  check_soft_reset determines
4287 * if the asic is still hung or not.
4288 * Returns true if any of the IPs are still in a hung state, false if not.
4289 */
4290static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
4291{
4292	int i;
4293	bool asic_hang = false;
4294
4295	if (amdgpu_sriov_vf(adev))
4296		return true;
4297
4298	if (amdgpu_asic_need_full_reset(adev))
4299		return true;
4300
4301	for (i = 0; i < adev->num_ip_blocks; i++) {
4302		if (!adev->ip_blocks[i].status.valid)
4303			continue;
4304		if (adev->ip_blocks[i].version->funcs->check_soft_reset)
4305			adev->ip_blocks[i].status.hang =
4306				adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
4307		if (adev->ip_blocks[i].status.hang) {
4308			dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
4309			asic_hang = true;
4310		}
4311	}
4312	return asic_hang;
4313}
4314
4315/**
4316 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
4317 *
4318 * @adev: amdgpu_device pointer
4319 *
4320 * The list of all the hardware IPs that make up the asic is walked and the
4321 * pre_soft_reset callbacks are run if the block is hung.  pre_soft_reset
4322 * handles any IP specific hardware or software state changes that are
4323 * necessary for a soft reset to succeed.
4324 * Returns 0 on success, negative error code on failure.
4325 */
4326static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
4327{
4328	int i, r = 0;
4329
4330	for (i = 0; i < adev->num_ip_blocks; i++) {
4331		if (!adev->ip_blocks[i].status.valid)
4332			continue;
4333		if (adev->ip_blocks[i].status.hang &&
4334		    adev->ip_blocks[i].version->funcs->pre_soft_reset) {
4335			r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
4336			if (r)
4337				return r;
4338		}
4339	}
4340
4341	return 0;
4342}
4343
4344/**
4345 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
4346 *
4347 * @adev: amdgpu_device pointer
4348 *
4349 * Some hardware IPs cannot be soft reset.  If they are hung, a full gpu
4350 * reset is necessary to recover.
4351 * Returns true if a full asic reset is required, false if not.
4352 */
4353static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
4354{
4355	int i;
4356
4357	if (amdgpu_asic_need_full_reset(adev))
4358		return true;
4359
4360	for (i = 0; i < adev->num_ip_blocks; i++) {
4361		if (!adev->ip_blocks[i].status.valid)
4362			continue;
4363		if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
4364		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
4365		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
4366		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
4367		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
4368			if (adev->ip_blocks[i].status.hang) {
4369				dev_info(adev->dev, "Some block need full reset!\n");
4370				return true;
4371			}
4372		}
4373	}
4374	return false;
4375}
4376
4377/**
4378 * amdgpu_device_ip_soft_reset - do a soft reset
4379 *
4380 * @adev: amdgpu_device pointer
4381 *
4382 * The list of all the hardware IPs that make up the asic is walked and the
4383 * soft_reset callbacks are run if the block is hung.  soft_reset handles any
4384 * IP specific hardware or software state changes that are necessary to soft
4385 * reset the IP.
4386 * Returns 0 on success, negative error code on failure.
4387 */
4388static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
4389{
4390	int i, r = 0;
4391
4392	for (i = 0; i < adev->num_ip_blocks; i++) {
4393		if (!adev->ip_blocks[i].status.valid)
4394			continue;
4395		if (adev->ip_blocks[i].status.hang &&
4396		    adev->ip_blocks[i].version->funcs->soft_reset) {
4397			r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
4398			if (r)
4399				return r;
4400		}
4401	}
4402
4403	return 0;
4404}
4405
4406/**
4407 * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4408 *
4409 * @adev: amdgpu_device pointer
4410 *
4411 * The list of all the hardware IPs that make up the asic is walked and the
4412 * post_soft_reset callbacks are run if the asic was hung.  post_soft_reset
4413 * handles any IP specific hardware or software state changes that are
4414 * necessary after the IP has been soft reset.
4415 * Returns 0 on success, negative error code on failure.
4416 */
4417static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4418{
4419	int i, r = 0;
4420
4421	for (i = 0; i < adev->num_ip_blocks; i++) {
4422		if (!adev->ip_blocks[i].status.valid)
4423			continue;
4424		if (adev->ip_blocks[i].status.hang &&
4425		    adev->ip_blocks[i].version->funcs->post_soft_reset)
4426			r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
4427		if (r)
4428			return r;
4429	}
4430
4431	return 0;
4432}
4433
4434/**
4435 * amdgpu_device_recover_vram - Recover some VRAM contents
4436 *
4437 * @adev: amdgpu_device pointer
4438 *
4439 * Restores the contents of VRAM buffers from the shadows in GTT.  Used to
4440 * restore things like GPUVM page tables after a GPU reset where
4441 * the contents of VRAM might be lost.
4442 *
4443 * Returns:
4444 * 0 on success, negative error code on failure.
4445 */
4446static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
4447{
4448	struct dma_fence *fence = NULL, *next = NULL;
4449	struct amdgpu_bo *shadow;
4450	struct amdgpu_bo_vm *vmbo;
4451	long r = 1, tmo;
4452
4453	if (amdgpu_sriov_runtime(adev))
4454		tmo = msecs_to_jiffies(8000);
4455	else
4456		tmo = msecs_to_jiffies(100);
4457
4458	dev_info(adev->dev, "recover vram bo from shadow start\n");
4459	mutex_lock(&adev->shadow_list_lock);
4460	list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
4461		shadow = &vmbo->bo;
4462		/* No need to recover an evicted BO */
4463		if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
4464		    shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
4465		    shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
4466			continue;
4467
4468		r = amdgpu_bo_restore_shadow(shadow, &next);
4469		if (r)
4470			break;
4471
4472		if (fence) {
4473			tmo = dma_fence_wait_timeout(fence, false, tmo);
4474			dma_fence_put(fence);
4475			fence = next;
4476			if (tmo == 0) {
4477				r = -ETIMEDOUT;
4478				break;
4479			} else if (tmo < 0) {
4480				r = tmo;
4481				break;
4482			}
4483		} else {
4484			fence = next;
4485		}
4486	}
4487	mutex_unlock(&adev->shadow_list_lock);
4488
4489	if (fence)
4490		tmo = dma_fence_wait_timeout(fence, false, tmo);
4491	dma_fence_put(fence);
4492
4493	if (r < 0 || tmo <= 0) {
4494		dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4495		return -EIO;
4496	}
4497
4498	dev_info(adev->dev, "recover vram bo from shadow done\n");
4499	return 0;
4500}
4501
4502
4503/**
4504 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4505 *
4506 * @adev: amdgpu_device pointer
4507 * @from_hypervisor: request from hypervisor
4508 *
4509 * do VF FLR and reinitialize Asic
4510 * return 0 means succeeded otherwise failed
4511 */
4512static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4513				     bool from_hypervisor)
4514{
4515	int r;
4516	struct amdgpu_hive_info *hive = NULL;
4517	int retry_limit = 0;
4518
4519retry:
4520	amdgpu_amdkfd_pre_reset(adev);
4521
4522	if (from_hypervisor)
4523		r = amdgpu_virt_request_full_gpu(adev, true);
4524	else
4525		r = amdgpu_virt_reset_gpu(adev);
4526	if (r)
4527		return r;
4528
 
 
4529	/* Resume IP prior to SMC */
4530	r = amdgpu_device_ip_reinit_early_sriov(adev);
4531	if (r)
4532		goto error;
4533
4534	amdgpu_virt_init_data_exchange(adev);
 
4535
4536	r = amdgpu_device_fw_loading(adev);
4537	if (r)
4538		return r;
4539
4540	/* now we are okay to resume SMC/CP/SDMA */
4541	r = amdgpu_device_ip_reinit_late_sriov(adev);
4542	if (r)
4543		goto error;
4544
4545	hive = amdgpu_get_xgmi_hive(adev);
4546	/* Update PSP FW topology after reset */
4547	if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
4548		r = amdgpu_xgmi_update_topology(hive, adev);
4549
4550	if (hive)
4551		amdgpu_put_xgmi_hive(hive);
4552
4553	if (!r) {
4554		amdgpu_irq_gpu_reset_resume_helper(adev);
4555		r = amdgpu_ib_ring_tests(adev);
4556
4557		amdgpu_amdkfd_post_reset(adev);
4558	}
4559
4560error:
 
 
4561	if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
4562		amdgpu_inc_vram_lost(adev);
4563		r = amdgpu_device_recover_vram(adev);
4564	}
4565	amdgpu_virt_release_full_gpu(adev, true);
4566
4567	if (AMDGPU_RETRY_SRIOV_RESET(r)) {
4568		if (retry_limit < AMDGPU_MAX_RETRY_LIMIT) {
4569			retry_limit++;
4570			goto retry;
4571		} else
4572			DRM_ERROR("GPU reset retry is beyond the retry limit\n");
4573	}
4574
4575	return r;
4576}
4577
4578/**
4579 * amdgpu_device_has_job_running - check if there is any job in mirror list
4580 *
4581 * @adev: amdgpu_device pointer
4582 *
4583 * check if there is any job in mirror list
4584 */
4585bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
4586{
4587	int i;
4588	struct drm_sched_job *job;
4589
4590	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4591		struct amdgpu_ring *ring = adev->rings[i];
4592
4593		if (!ring || !ring->sched.thread)
4594			continue;
4595
4596		spin_lock(&ring->sched.job_list_lock);
4597		job = list_first_entry_or_null(&ring->sched.pending_list,
4598					       struct drm_sched_job, list);
4599		spin_unlock(&ring->sched.job_list_lock);
4600		if (job)
4601			return true;
4602	}
4603	return false;
4604}
4605
4606/**
4607 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
4608 *
4609 * @adev: amdgpu_device pointer
4610 *
4611 * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
4612 * a hung GPU.
4613 */
4614bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4615{
4616
4617	if (amdgpu_gpu_recovery == 0)
4618		goto disabled;
4619
4620	/* Skip soft reset check in fatal error mode */
4621	if (!amdgpu_ras_is_poison_mode_supported(adev))
4622		return true;
4623
4624	if (!amdgpu_device_ip_check_soft_reset(adev)) {
4625		dev_info(adev->dev,"Timeout, but no hardware hang detected.\n");
4626		return false;
4627	}
4628
 
 
 
4629	if (amdgpu_sriov_vf(adev))
4630		return true;
4631
4632	if (amdgpu_gpu_recovery == -1) {
4633		switch (adev->asic_type) {
4634#ifdef CONFIG_DRM_AMDGPU_SI
4635		case CHIP_VERDE:
4636		case CHIP_TAHITI:
4637		case CHIP_PITCAIRN:
4638		case CHIP_OLAND:
4639		case CHIP_HAINAN:
4640#endif
4641#ifdef CONFIG_DRM_AMDGPU_CIK
4642		case CHIP_KAVERI:
4643		case CHIP_KABINI:
4644		case CHIP_MULLINS:
4645#endif
4646		case CHIP_CARRIZO:
4647		case CHIP_STONEY:
4648		case CHIP_CYAN_SKILLFISH:
4649			goto disabled;
4650		default:
4651			break;
 
 
4652		}
4653	}
4654
4655	return true;
4656
4657disabled:
4658		dev_info(adev->dev, "GPU recovery disabled.\n");
4659		return false;
4660}
4661
4662int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
4663{
4664        u32 i;
4665        int ret = 0;
4666
4667        amdgpu_atombios_scratch_regs_engine_hung(adev, true);
4668
4669        dev_info(adev->dev, "GPU mode1 reset\n");
4670
4671        /* disable BM */
4672        pci_clear_master(adev->pdev);
4673
4674        amdgpu_device_cache_pci_state(adev->pdev);
4675
4676        if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
4677                dev_info(adev->dev, "GPU smu mode1 reset\n");
4678                ret = amdgpu_dpm_mode1_reset(adev);
4679        } else {
4680                dev_info(adev->dev, "GPU psp mode1 reset\n");
4681                ret = psp_gpu_reset(adev);
4682        }
4683
4684        if (ret)
4685                dev_err(adev->dev, "GPU mode1 reset failed\n");
4686
4687        amdgpu_device_load_pci_state(adev->pdev);
4688
4689        /* wait for asic to come out of reset */
4690        for (i = 0; i < adev->usec_timeout; i++) {
4691                u32 memsize = adev->nbio.funcs->get_memsize(adev);
4692
4693                if (memsize != 0xffffffff)
4694                        break;
4695                udelay(1);
4696        }
4697
4698        amdgpu_atombios_scratch_regs_engine_hung(adev, false);
4699        return ret;
4700}
4701
4702int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
4703				 struct amdgpu_reset_context *reset_context)
4704{
4705	int i, r = 0;
4706	struct amdgpu_job *job = NULL;
4707	bool need_full_reset =
4708		test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4709
4710	if (reset_context->reset_req_dev == adev)
4711		job = reset_context->job;
4712
4713	if (amdgpu_sriov_vf(adev)) {
4714		/* stop the data exchange thread */
4715		amdgpu_virt_fini_data_exchange(adev);
4716	}
4717
4718	amdgpu_fence_driver_isr_toggle(adev, true);
4719
4720	/* block all schedulers and reset given job's ring */
4721	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4722		struct amdgpu_ring *ring = adev->rings[i];
4723
4724		if (!ring || !ring->sched.thread)
4725			continue;
4726
4727		/*clear job fence from fence drv to avoid force_completion
4728		 *leave NULL and vm flush fence in fence drv */
4729		amdgpu_fence_driver_clear_job_fences(ring);
4730
4731		/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
4732		amdgpu_fence_driver_force_completion(ring);
4733	}
4734
4735	amdgpu_fence_driver_isr_toggle(adev, false);
4736
4737	if (job && job->vm)
4738		drm_sched_increase_karma(&job->base);
4739
4740	r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
4741	/* If reset handler not implemented, continue; otherwise return */
4742	if (r == -ENOSYS)
4743		r = 0;
4744	else
4745		return r;
4746
4747	/* Don't suspend on bare metal if we are not going to HW reset the ASIC */
4748	if (!amdgpu_sriov_vf(adev)) {
4749
4750		if (!need_full_reset)
4751			need_full_reset = amdgpu_device_ip_need_full_reset(adev);
4752
4753		if (!need_full_reset && amdgpu_gpu_recovery) {
4754			amdgpu_device_ip_pre_soft_reset(adev);
4755			r = amdgpu_device_ip_soft_reset(adev);
4756			amdgpu_device_ip_post_soft_reset(adev);
4757			if (r || amdgpu_device_ip_check_soft_reset(adev)) {
4758				dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
4759				need_full_reset = true;
4760			}
4761		}
4762
4763		if (need_full_reset)
4764			r = amdgpu_device_ip_suspend(adev);
4765		if (need_full_reset)
4766			set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4767		else
4768			clear_bit(AMDGPU_NEED_FULL_RESET,
4769				  &reset_context->flags);
4770	}
4771
4772	return r;
4773}
4774
4775static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
4776{
4777	int i;
4778
4779	lockdep_assert_held(&adev->reset_domain->sem);
4780
4781	for (i = 0; i < adev->num_regs; i++) {
4782		adev->reset_dump_reg_value[i] = RREG32(adev->reset_dump_reg_list[i]);
4783		trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i],
4784					     adev->reset_dump_reg_value[i]);
4785	}
4786
4787	return 0;
4788}
4789
4790#ifdef CONFIG_DEV_COREDUMP
4791static ssize_t amdgpu_devcoredump_read(char *buffer, loff_t offset,
4792		size_t count, void *data, size_t datalen)
4793{
4794	struct drm_printer p;
4795	struct amdgpu_device *adev = data;
4796	struct drm_print_iterator iter;
4797	int i;
4798
4799	iter.data = buffer;
4800	iter.offset = 0;
4801	iter.start = offset;
4802	iter.remain = count;
4803
4804	p = drm_coredump_printer(&iter);
4805
4806	drm_printf(&p, "**** AMDGPU Device Coredump ****\n");
4807	drm_printf(&p, "kernel: " UTS_RELEASE "\n");
4808	drm_printf(&p, "module: " KBUILD_MODNAME "\n");
4809	drm_printf(&p, "time: %lld.%09ld\n", adev->reset_time.tv_sec, adev->reset_time.tv_nsec);
4810	if (adev->reset_task_info.pid)
4811		drm_printf(&p, "process_name: %s PID: %d\n",
4812			   adev->reset_task_info.process_name,
4813			   adev->reset_task_info.pid);
4814
4815	if (adev->reset_vram_lost)
4816		drm_printf(&p, "VRAM is lost due to GPU reset!\n");
4817	if (adev->num_regs) {
4818		drm_printf(&p, "AMDGPU register dumps:\nOffset:     Value:\n");
4819
4820		for (i = 0; i < adev->num_regs; i++)
4821			drm_printf(&p, "0x%08x: 0x%08x\n",
4822				   adev->reset_dump_reg_list[i],
4823				   adev->reset_dump_reg_value[i]);
4824	}
4825
4826	return count - iter.remain;
4827}
4828
4829static void amdgpu_devcoredump_free(void *data)
4830{
4831}
4832
4833static void amdgpu_reset_capture_coredumpm(struct amdgpu_device *adev)
4834{
4835	struct drm_device *dev = adev_to_drm(adev);
4836
4837	ktime_get_ts64(&adev->reset_time);
4838	dev_coredumpm(dev->dev, THIS_MODULE, adev, 0, GFP_KERNEL,
4839		      amdgpu_devcoredump_read, amdgpu_devcoredump_free);
4840}
4841#endif
4842
4843int amdgpu_do_asic_reset(struct list_head *device_list_handle,
4844			 struct amdgpu_reset_context *reset_context)
 
4845{
4846	struct amdgpu_device *tmp_adev = NULL;
4847	bool need_full_reset, skip_hw_reset, vram_lost = false;
4848	int r = 0;
4849	bool gpu_reset_for_dev_remove = 0;
4850
4851	/* Try reset handler method first */
4852	tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
4853				    reset_list);
4854	amdgpu_reset_reg_dumps(tmp_adev);
4855
4856	reset_context->reset_device_list = device_list_handle;
4857	r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
4858	/* If reset handler not implemented, continue; otherwise return */
4859	if (r == -ENOSYS)
4860		r = 0;
4861	else
4862		return r;
4863
4864	/* Reset handler not implemented, use the default method */
4865	need_full_reset =
4866		test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4867	skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
4868
4869	gpu_reset_for_dev_remove =
4870		test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
4871			test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4872
4873	/*
4874	 * ASIC reset has to be done on all XGMI hive nodes ASAP
4875	 * to allow proper links negotiation in FW (within 1 sec)
4876	 */
4877	if (!skip_hw_reset && need_full_reset) {
4878		list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4879			/* For XGMI run all resets in parallel to speed up the process */
4880			if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4881				tmp_adev->gmc.xgmi.pending_reset = false;
4882				if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
4883					r = -EALREADY;
4884			} else
4885				r = amdgpu_asic_reset(tmp_adev);
4886
4887			if (r) {
4888				dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
4889					 r, adev_to_drm(tmp_adev)->unique);
4890				break;
4891			}
4892		}
4893
4894		/* For XGMI wait for all resets to complete before proceed */
4895		if (!r) {
4896			list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
 
4897				if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4898					flush_work(&tmp_adev->xgmi_reset_work);
4899					r = tmp_adev->asic_reset_res;
4900					if (r)
4901						break;
4902				}
4903			}
4904		}
4905	}
4906
4907	if (!r && amdgpu_ras_intr_triggered()) {
4908		list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4909			if (tmp_adev->mmhub.ras && tmp_adev->mmhub.ras->ras_block.hw_ops &&
4910			    tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
4911				tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(tmp_adev);
4912		}
4913
4914		amdgpu_ras_intr_cleared();
4915	}
4916
4917	/* Since the mode1 reset affects base ip blocks, the
4918	 * phase1 ip blocks need to be resumed. Otherwise there
4919	 * will be a BIOS signature error and the psp bootloader
4920	 * can't load kdb on the next amdgpu install.
4921	 */
4922	if (gpu_reset_for_dev_remove) {
4923		list_for_each_entry(tmp_adev, device_list_handle, reset_list)
4924			amdgpu_device_ip_resume_phase1(tmp_adev);
4925
4926		goto end;
4927	}
4928
4929	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4930		if (need_full_reset) {
4931			/* post card */
4932			r = amdgpu_device_asic_init(tmp_adev);
4933			if (r) {
4934				dev_warn(tmp_adev->dev, "asic atom init failed!");
4935			} else {
4936				dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
4937				r = amdgpu_amdkfd_resume_iommu(tmp_adev);
4938				if (r)
4939					goto out;
4940
 
 
4941				r = amdgpu_device_ip_resume_phase1(tmp_adev);
4942				if (r)
4943					goto out;
4944
4945				vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
4946#ifdef CONFIG_DEV_COREDUMP
4947				tmp_adev->reset_vram_lost = vram_lost;
4948				memset(&tmp_adev->reset_task_info, 0,
4949						sizeof(tmp_adev->reset_task_info));
4950				if (reset_context->job && reset_context->job->vm)
4951					tmp_adev->reset_task_info =
4952						reset_context->job->vm->task_info;
4953				amdgpu_reset_capture_coredumpm(tmp_adev);
4954#endif
4955				if (vram_lost) {
4956					DRM_INFO("VRAM is lost due to GPU reset!\n");
4957					amdgpu_inc_vram_lost(tmp_adev);
4958				}
4959
 
 
 
 
 
4960				r = amdgpu_device_fw_loading(tmp_adev);
4961				if (r)
4962					return r;
4963
4964				r = amdgpu_device_ip_resume_phase2(tmp_adev);
4965				if (r)
4966					goto out;
4967
4968				if (vram_lost)
4969					amdgpu_device_fill_reset_magic(tmp_adev);
4970
4971				/*
4972				 * Add this ASIC as tracked as reset was already
4973				 * complete successfully.
4974				 */
4975				amdgpu_register_gpu_instance(tmp_adev);
4976
4977				if (!reset_context->hive &&
4978				    tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4979					amdgpu_xgmi_add_device(tmp_adev);
4980
4981				r = amdgpu_device_ip_late_init(tmp_adev);
4982				if (r)
4983					goto out;
4984
4985				drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false);
4986
4987				/*
4988				 * The GPU enters bad state once faulty pages
4989				 * by ECC has reached the threshold, and ras
4990				 * recovery is scheduled next. So add one check
4991				 * here to break recovery if it indeed exceeds
4992				 * bad page threshold, and remind user to
4993				 * retire this GPU or setting one bigger
4994				 * bad_page_threshold value to fix this once
4995				 * probing driver again.
4996				 */
4997				if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
4998					/* must succeed. */
4999					amdgpu_ras_resume(tmp_adev);
5000				} else {
5001					r = -EINVAL;
5002					goto out;
5003				}
5004
5005				/* Update PSP FW topology after reset */
5006				if (reset_context->hive &&
5007				    tmp_adev->gmc.xgmi.num_physical_nodes > 1)
5008					r = amdgpu_xgmi_update_topology(
5009						reset_context->hive, tmp_adev);
5010			}
5011		}
5012
 
5013out:
5014		if (!r) {
5015			amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
5016			r = amdgpu_ib_ring_tests(tmp_adev);
5017			if (r) {
5018				dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
 
5019				need_full_reset = true;
5020				r = -EAGAIN;
5021				goto end;
5022			}
5023		}
5024
5025		if (!r)
5026			r = amdgpu_device_recover_vram(tmp_adev);
5027		else
5028			tmp_adev->asic_reset_res = r;
5029	}
5030
5031end:
5032	if (need_full_reset)
5033		set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5034	else
5035		clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5036	return r;
5037}
5038
5039static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev)
5040{
 
 
 
 
 
5041
 
 
5042	switch (amdgpu_asic_reset_method(adev)) {
5043	case AMD_RESET_METHOD_MODE1:
5044		adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
5045		break;
5046	case AMD_RESET_METHOD_MODE2:
5047		adev->mp1_state = PP_MP1_STATE_RESET;
5048		break;
5049	default:
5050		adev->mp1_state = PP_MP1_STATE_NONE;
5051		break;
5052	}
 
 
 
 
 
5053}
5054
5055static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev)
5056{
 
 
 
5057	amdgpu_vf_error_trans_all(adev);
5058	adev->mp1_state = PP_MP1_STATE_NONE;
 
 
5059}
5060
5061static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
5062{
5063	struct pci_dev *p = NULL;
5064
5065	p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5066			adev->pdev->bus->number, 1);
5067	if (p) {
5068		pm_runtime_enable(&(p->dev));
5069		pm_runtime_resume(&(p->dev));
5070	}
5071
5072	pci_dev_put(p);
5073}
5074
5075static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
5076{
5077	enum amd_reset_method reset_method;
5078	struct pci_dev *p = NULL;
5079	u64 expires;
5080
5081	/*
5082	 * For now, only BACO and mode1 reset are confirmed
5083	 * to suffer the audio issue without proper suspended.
5084	 */
5085	reset_method = amdgpu_asic_reset_method(adev);
5086	if ((reset_method != AMD_RESET_METHOD_BACO) &&
5087	     (reset_method != AMD_RESET_METHOD_MODE1))
5088		return -EINVAL;
5089
5090	p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5091			adev->pdev->bus->number, 1);
5092	if (!p)
5093		return -ENODEV;
5094
5095	expires = pm_runtime_autosuspend_expiration(&(p->dev));
5096	if (!expires)
5097		/*
5098		 * If we cannot get the audio device autosuspend delay,
5099		 * a fixed 4S interval will be used. Considering 3S is
5100		 * the audio controller default autosuspend delay setting.
5101		 * 4S used here is guaranteed to cover that.
5102		 */
5103		expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
5104
5105	while (!pm_runtime_status_suspended(&(p->dev))) {
5106		if (!pm_runtime_suspend(&(p->dev)))
5107			break;
5108
5109		if (expires < ktime_get_mono_fast_ns()) {
5110			dev_warn(adev->dev, "failed to suspend display audio\n");
5111			pci_dev_put(p);
5112			/* TODO: abort the succeeding gpu reset? */
5113			return -ETIMEDOUT;
5114		}
5115	}
5116
5117	pm_runtime_disable(&(p->dev));
5118
5119	pci_dev_put(p);
5120	return 0;
5121}
5122
5123static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
5124{
5125	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5126
5127#if defined(CONFIG_DEBUG_FS)
5128	if (!amdgpu_sriov_vf(adev))
5129		cancel_work(&adev->reset_work);
5130#endif
5131
5132	if (adev->kfd.dev)
5133		cancel_work(&adev->kfd.reset_work);
5134
5135	if (amdgpu_sriov_vf(adev))
5136		cancel_work(&adev->virt.flr_work);
5137
5138	if (con && adev->ras_enabled)
5139		cancel_work(&con->recovery_work);
5140
5141}
5142
5143/**
5144 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
5145 *
5146 * @adev: amdgpu_device pointer
5147 * @job: which job trigger hang
5148 *
5149 * Attempt to reset the GPU if it has hung (all asics).
5150 * Attempt to do soft-reset or full-reset and reinitialize Asic
5151 * Returns 0 for success or an error on failure.
5152 */
5153
5154int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
5155			      struct amdgpu_job *job,
5156			      struct amdgpu_reset_context *reset_context)
5157{
5158	struct list_head device_list, *device_list_handle =  NULL;
5159	bool job_signaled = false;
5160	struct amdgpu_hive_info *hive = NULL;
5161	struct amdgpu_device *tmp_adev = NULL;
5162	int i, r = 0;
5163	bool need_emergency_restart = false;
5164	bool audio_suspended = false;
5165	bool gpu_reset_for_dev_remove = false;
5166
5167	gpu_reset_for_dev_remove =
5168			test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
5169				test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5170
5171	/*
5172	 * Special case: RAS triggered and full reset isn't supported
5173	 */
5174	need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
5175
5176	/*
5177	 * Flush RAM to disk so that after reboot
5178	 * the user can read log and see why the system rebooted.
5179	 */
5180	if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
5181		DRM_WARN("Emergency reboot.");
5182
5183		ksys_sync_helper();
5184		emergency_restart();
5185	}
5186
5187	dev_info(adev->dev, "GPU %s begin!\n",
5188		need_emergency_restart ? "jobs stop":"reset");
5189
5190	if (!amdgpu_sriov_vf(adev))
5191		hive = amdgpu_get_xgmi_hive(adev);
5192	if (hive)
5193		mutex_lock(&hive->hive_lock);
5194
5195	reset_context->job = job;
5196	reset_context->hive = hive;
5197	/*
5198	 * Build list of devices to reset.
5199	 * In case we are in XGMI hive mode, resort the device list
5200	 * to put adev in the 1st position.
 
 
5201	 */
5202	INIT_LIST_HEAD(&device_list);
5203	if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
5204		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
5205			list_add_tail(&tmp_adev->reset_list, &device_list);
5206			if (gpu_reset_for_dev_remove && adev->shutdown)
5207				tmp_adev->shutdown = true;
5208		}
5209		if (!list_is_first(&adev->reset_list, &device_list))
5210			list_rotate_to_front(&adev->reset_list, &device_list);
5211		device_list_handle = &device_list;
5212	} else {
5213		list_add_tail(&adev->reset_list, &device_list);
5214		device_list_handle = &device_list;
5215	}
5216
5217	/* We need to lock reset domain only once both for XGMI and single device */
5218	tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5219				    reset_list);
5220	amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
 
5221
5222	/* block all schedulers and reset given job's ring */
5223	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
 
 
 
 
5224
5225		amdgpu_device_set_mp1_state(tmp_adev);
 
 
 
 
 
5226
5227		/*
5228		 * Try to put the audio codec into suspend state
5229		 * before gpu reset started.
5230		 *
5231		 * Due to the power domain of the graphics device
5232		 * is shared with AZ power domain. Without this,
5233		 * we may change the audio hardware from behind
5234		 * the audio driver's back. That will trigger
5235		 * some audio codec errors.
5236		 */
5237		if (!amdgpu_device_suspend_display_audio(tmp_adev))
5238			audio_suspended = true;
5239
5240		amdgpu_ras_set_error_query_ready(tmp_adev, false);
5241
5242		cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
5243
5244		if (!amdgpu_sriov_vf(tmp_adev))
5245			amdgpu_amdkfd_pre_reset(tmp_adev);
5246
5247		/*
5248		 * Mark these ASICs to be reseted as untracked first
5249		 * And add them back after reset completed
5250		 */
 
5251		amdgpu_unregister_gpu_instance(tmp_adev);
5252
5253		drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, true);
5254
5255		/* disable ras on ALL IPs */
5256		if (!need_emergency_restart &&
5257		      amdgpu_device_ip_need_full_reset(tmp_adev))
5258			amdgpu_ras_suspend(tmp_adev);
5259
5260		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5261			struct amdgpu_ring *ring = tmp_adev->rings[i];
5262
5263			if (!ring || !ring->sched.thread)
5264				continue;
5265
5266			drm_sched_stop(&ring->sched, job ? &job->base : NULL);
5267
5268			if (need_emergency_restart)
5269				amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
5270		}
5271		atomic_inc(&tmp_adev->gpu_reset_counter);
5272	}
5273
5274	if (need_emergency_restart)
5275		goto skip_sched_resume;
5276
5277	/*
5278	 * Must check guilty signal here since after this point all old
5279	 * HW fences are force signaled.
5280	 *
5281	 * job->base holds a reference to parent fence
5282	 */
5283	if (job && dma_fence_is_signaled(&job->hw_fence)) {
 
5284		job_signaled = true;
 
 
 
 
 
5285		dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5286		goto skip_hw_reset;
5287	}
5288
 
 
 
 
 
 
 
 
 
 
5289retry:	/* Rest of adevs pre asic reset from XGMI hive. */
5290	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5291		if (gpu_reset_for_dev_remove) {
5292			/* Workaroud for ASICs need to disable SMC first */
5293			amdgpu_device_smu_fini_early(tmp_adev);
5294		}
5295		r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
 
 
 
5296		/*TODO Should we stop ?*/
5297		if (r) {
5298			dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
5299				  r, adev_to_drm(tmp_adev)->unique);
5300			tmp_adev->asic_reset_res = r;
5301		}
5302
5303		/*
5304		 * Drop all pending non scheduler resets. Scheduler resets
5305		 * were already dropped during drm_sched_stop
5306		 */
5307		amdgpu_device_stop_pending_resets(tmp_adev);
5308	}
5309
5310	/* Actual ASIC resets if needed.*/
5311	/* Host driver will handle XGMI hive reset for SRIOV */
5312	if (amdgpu_sriov_vf(adev)) {
5313		r = amdgpu_device_reset_sriov(adev, job ? false : true);
5314		if (r)
5315			adev->asic_reset_res = r;
5316
5317		/* Aldebaran supports ras in SRIOV, so need resume ras during reset */
5318		if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
5319			amdgpu_ras_resume(adev);
5320	} else {
5321		r = amdgpu_do_asic_reset(device_list_handle, reset_context);
5322		if (r && r == -EAGAIN)
5323			goto retry;
5324
5325		if (!r && gpu_reset_for_dev_remove)
5326			goto recover_end;
5327	}
5328
5329skip_hw_reset:
5330
5331	/* Post ASIC reset for all devs .*/
5332	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5333
5334		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5335			struct amdgpu_ring *ring = tmp_adev->rings[i];
5336
5337			if (!ring || !ring->sched.thread)
5338				continue;
5339
5340			drm_sched_start(&ring->sched, true);
5341		}
5342
5343		if (adev->enable_mes && adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3))
5344			amdgpu_mes_self_test(tmp_adev);
5345
5346		if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) {
5347			drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
5348		}
5349
5350		if (tmp_adev->asic_reset_res)
5351			r = tmp_adev->asic_reset_res;
 
5352
5353		tmp_adev->asic_reset_res = 0;
5354
5355		if (r) {
5356			/* bad news, how to tell it to userspace ? */
5357			dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
5358			amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
5359		} else {
5360			dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
5361			if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
5362				DRM_WARN("smart shift update failed\n");
5363		}
5364	}
5365
5366skip_sched_resume:
5367	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5368		/* unlock kfd: SRIOV would do it separately */
5369		if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
5370			amdgpu_amdkfd_post_reset(tmp_adev);
5371
5372		/* kfd_post_reset will do nothing if kfd device is not initialized,
5373		 * need to bring up kfd here if it's not be initialized before
5374		 */
5375		if (!adev->kfd.init_complete)
5376			amdgpu_amdkfd_device_init(adev);
5377
5378		if (audio_suspended)
5379			amdgpu_device_resume_display_audio(tmp_adev);
5380
5381		amdgpu_device_unset_mp1_state(tmp_adev);
5382
5383		amdgpu_ras_set_error_query_ready(tmp_adev, true);
5384	}
5385
5386recover_end:
5387	tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5388					    reset_list);
5389	amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
5390
5391	if (hive) {
5392		mutex_unlock(&hive->hive_lock);
5393		amdgpu_put_xgmi_hive(hive);
5394	}
5395
5396	if (r)
5397		dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
5398
5399	atomic_set(&adev->reset_domain->reset_res, r);
5400	return r;
5401}
5402
5403/**
5404 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
5405 *
5406 * @adev: amdgpu_device pointer
5407 *
5408 * Fetchs and stores in the driver the PCIE capabilities (gen speed
5409 * and lanes) of the slot the device is in. Handles APUs and
5410 * virtualized environments where PCIE config space may not be available.
5411 */
5412static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
5413{
5414	struct pci_dev *pdev;
5415	enum pci_bus_speed speed_cap, platform_speed_cap;
5416	enum pcie_link_width platform_link_width;
5417
5418	if (amdgpu_pcie_gen_cap)
5419		adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
5420
5421	if (amdgpu_pcie_lane_cap)
5422		adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
5423
5424	/* covers APUs as well */
5425	if (pci_is_root_bus(adev->pdev->bus)) {
5426		if (adev->pm.pcie_gen_mask == 0)
5427			adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
5428		if (adev->pm.pcie_mlw_mask == 0)
5429			adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
5430		return;
5431	}
5432
5433	if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
5434		return;
5435
5436	pcie_bandwidth_available(adev->pdev, NULL,
5437				 &platform_speed_cap, &platform_link_width);
5438
5439	if (adev->pm.pcie_gen_mask == 0) {
5440		/* asic caps */
5441		pdev = adev->pdev;
5442		speed_cap = pcie_get_speed_cap(pdev);
5443		if (speed_cap == PCI_SPEED_UNKNOWN) {
5444			adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5445						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5446						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5447		} else {
5448			if (speed_cap == PCIE_SPEED_32_0GT)
5449				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5450							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5451							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5452							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5453							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
5454			else if (speed_cap == PCIE_SPEED_16_0GT)
5455				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5456							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5457							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5458							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
5459			else if (speed_cap == PCIE_SPEED_8_0GT)
5460				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5461							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5462							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5463			else if (speed_cap == PCIE_SPEED_5_0GT)
5464				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5465							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
5466			else
5467				adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
5468		}
5469		/* platform caps */
5470		if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5471			adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5472						   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5473		} else {
5474			if (platform_speed_cap == PCIE_SPEED_32_0GT)
5475				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5476							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5477							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5478							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5479							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
5480			else if (platform_speed_cap == PCIE_SPEED_16_0GT)
5481				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5482							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5483							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5484							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
5485			else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5486				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5487							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5488							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
5489			else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5490				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5491							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5492			else
5493				adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
5494
5495		}
5496	}
5497	if (adev->pm.pcie_mlw_mask == 0) {
5498		if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5499			adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
5500		} else {
5501			switch (platform_link_width) {
5502			case PCIE_LNK_X32:
5503				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
5504							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5505							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5506							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5507							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5508							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5509							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5510				break;
5511			case PCIE_LNK_X16:
5512				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5513							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5514							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5515							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5516							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5517							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5518				break;
5519			case PCIE_LNK_X12:
5520				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5521							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5522							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5523							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5524							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5525				break;
5526			case PCIE_LNK_X8:
5527				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5528							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5529							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5530							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5531				break;
5532			case PCIE_LNK_X4:
5533				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5534							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5535							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5536				break;
5537			case PCIE_LNK_X2:
5538				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5539							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5540				break;
5541			case PCIE_LNK_X1:
5542				adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5543				break;
5544			default:
5545				break;
5546			}
5547		}
5548	}
5549}
5550
5551/**
5552 * amdgpu_device_is_peer_accessible - Check peer access through PCIe BAR
5553 *
5554 * @adev: amdgpu_device pointer
5555 * @peer_adev: amdgpu_device pointer for peer device trying to access @adev
5556 *
5557 * Return true if @peer_adev can access (DMA) @adev through the PCIe
5558 * BAR, i.e. @adev is "large BAR" and the BAR matches the DMA mask of
5559 * @peer_adev.
5560 */
5561bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
5562				      struct amdgpu_device *peer_adev)
5563{
5564#ifdef CONFIG_HSA_AMD_P2P
5565	uint64_t address_mask = peer_adev->dev->dma_mask ?
5566		~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
5567	resource_size_t aper_limit =
5568		adev->gmc.aper_base + adev->gmc.aper_size - 1;
5569	bool p2p_access =
5570		!adev->gmc.xgmi.connected_to_cpu &&
5571		!(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0);
5572
5573	return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size &&
5574		adev->gmc.real_vram_size == adev->gmc.visible_vram_size &&
5575		!(adev->gmc.aper_base & address_mask ||
5576		  aper_limit & address_mask));
5577#else
5578	return false;
5579#endif
5580}
5581
5582int amdgpu_device_baco_enter(struct drm_device *dev)
5583{
5584	struct amdgpu_device *adev = drm_to_adev(dev);
5585	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5586
5587	if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5588		return -ENOTSUPP;
5589
5590	if (ras && adev->ras_enabled &&
5591	    adev->nbio.funcs->enable_doorbell_interrupt)
5592		adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
5593
5594	return amdgpu_dpm_baco_enter(adev);
5595}
5596
5597int amdgpu_device_baco_exit(struct drm_device *dev)
5598{
5599	struct amdgpu_device *adev = drm_to_adev(dev);
5600	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5601	int ret = 0;
5602
5603	if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5604		return -ENOTSUPP;
5605
5606	ret = amdgpu_dpm_baco_exit(adev);
5607	if (ret)
5608		return ret;
5609
5610	if (ras && adev->ras_enabled &&
5611	    adev->nbio.funcs->enable_doorbell_interrupt)
5612		adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
5613
5614	if (amdgpu_passthrough(adev) &&
5615	    adev->nbio.funcs->clear_doorbell_interrupt)
5616		adev->nbio.funcs->clear_doorbell_interrupt(adev);
5617
5618	return 0;
5619}
5620
5621/**
5622 * amdgpu_pci_error_detected - Called when a PCI error is detected.
5623 * @pdev: PCI device struct
5624 * @state: PCI channel state
5625 *
5626 * Description: Called when a PCI error is detected.
5627 *
5628 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
5629 */
5630pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5631{
5632	struct drm_device *dev = pci_get_drvdata(pdev);
5633	struct amdgpu_device *adev = drm_to_adev(dev);
5634	int i;
5635
5636	DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
5637
5638	if (adev->gmc.xgmi.num_physical_nodes > 1) {
5639		DRM_WARN("No support for XGMI hive yet...");
5640		return PCI_ERS_RESULT_DISCONNECT;
5641	}
5642
5643	adev->pci_channel_state = state;
5644
5645	switch (state) {
5646	case pci_channel_io_normal:
5647		return PCI_ERS_RESULT_CAN_RECOVER;
5648	/* Fatal error, prepare for slot reset */
5649	case pci_channel_io_frozen:
5650		/*
5651		 * Locking adev->reset_domain->sem will prevent any external access
5652		 * to GPU during PCI error recovery
5653		 */
5654		amdgpu_device_lock_reset_domain(adev->reset_domain);
5655		amdgpu_device_set_mp1_state(adev);
5656
5657		/*
5658		 * Block any work scheduling as we do for regular GPU reset
5659		 * for the duration of the recovery
5660		 */
5661		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5662			struct amdgpu_ring *ring = adev->rings[i];
5663
5664			if (!ring || !ring->sched.thread)
5665				continue;
5666
5667			drm_sched_stop(&ring->sched, NULL);
5668		}
5669		atomic_inc(&adev->gpu_reset_counter);
5670		return PCI_ERS_RESULT_NEED_RESET;
5671	case pci_channel_io_perm_failure:
5672		/* Permanent error, prepare for device removal */
5673		return PCI_ERS_RESULT_DISCONNECT;
5674	}
5675
5676	return PCI_ERS_RESULT_NEED_RESET;
5677}
5678
5679/**
5680 * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
5681 * @pdev: pointer to PCI device
5682 */
5683pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
5684{
5685
5686	DRM_INFO("PCI error: mmio enabled callback!!\n");
5687
5688	/* TODO - dump whatever for debugging purposes */
5689
5690	/* This called only if amdgpu_pci_error_detected returns
5691	 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
5692	 * works, no need to reset slot.
5693	 */
5694
5695	return PCI_ERS_RESULT_RECOVERED;
5696}
5697
5698/**
5699 * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
5700 * @pdev: PCI device struct
5701 *
5702 * Description: This routine is called by the pci error recovery
5703 * code after the PCI slot has been reset, just before we
5704 * should resume normal operations.
5705 */
5706pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
5707{
5708	struct drm_device *dev = pci_get_drvdata(pdev);
5709	struct amdgpu_device *adev = drm_to_adev(dev);
5710	int r, i;
5711	struct amdgpu_reset_context reset_context;
5712	u32 memsize;
5713	struct list_head device_list;
5714
5715	DRM_INFO("PCI error: slot reset callback!!\n");
5716
5717	memset(&reset_context, 0, sizeof(reset_context));
5718
5719	INIT_LIST_HEAD(&device_list);
5720	list_add_tail(&adev->reset_list, &device_list);
5721
5722	/* wait for asic to come out of reset */
5723	msleep(500);
5724
5725	/* Restore PCI confspace */
5726	amdgpu_device_load_pci_state(pdev);
5727
5728	/* confirm  ASIC came out of reset */
5729	for (i = 0; i < adev->usec_timeout; i++) {
5730		memsize = amdgpu_asic_get_config_memsize(adev);
5731
5732		if (memsize != 0xffffffff)
5733			break;
5734		udelay(1);
5735	}
5736	if (memsize == 0xffffffff) {
5737		r = -ETIME;
5738		goto out;
5739	}
5740
5741	reset_context.method = AMD_RESET_METHOD_NONE;
5742	reset_context.reset_req_dev = adev;
5743	set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
5744	set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
5745
5746	adev->no_hw_access = true;
5747	r = amdgpu_device_pre_asic_reset(adev, &reset_context);
5748	adev->no_hw_access = false;
5749	if (r)
5750		goto out;
5751
5752	r = amdgpu_do_asic_reset(&device_list, &reset_context);
5753
5754out:
5755	if (!r) {
5756		if (amdgpu_device_cache_pci_state(adev->pdev))
5757			pci_restore_state(adev->pdev);
5758
5759		DRM_INFO("PCIe error recovery succeeded\n");
5760	} else {
5761		DRM_ERROR("PCIe error recovery failed, err:%d", r);
5762		amdgpu_device_unset_mp1_state(adev);
5763		amdgpu_device_unlock_reset_domain(adev->reset_domain);
5764	}
5765
5766	return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
5767}
5768
5769/**
5770 * amdgpu_pci_resume() - resume normal ops after PCI reset
5771 * @pdev: pointer to PCI device
5772 *
5773 * Called when the error recovery driver tells us that its
5774 * OK to resume normal operation.
5775 */
5776void amdgpu_pci_resume(struct pci_dev *pdev)
5777{
5778	struct drm_device *dev = pci_get_drvdata(pdev);
5779	struct amdgpu_device *adev = drm_to_adev(dev);
5780	int i;
5781
5782
5783	DRM_INFO("PCI error: resume callback!!\n");
5784
5785	/* Only continue execution for the case of pci_channel_io_frozen */
5786	if (adev->pci_channel_state != pci_channel_io_frozen)
5787		return;
5788
5789	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5790		struct amdgpu_ring *ring = adev->rings[i];
5791
5792		if (!ring || !ring->sched.thread)
5793			continue;
5794
5795		drm_sched_start(&ring->sched, true);
5796	}
5797
5798	amdgpu_device_unset_mp1_state(adev);
5799	amdgpu_device_unlock_reset_domain(adev->reset_domain);
5800}
5801
5802bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
5803{
5804	struct drm_device *dev = pci_get_drvdata(pdev);
5805	struct amdgpu_device *adev = drm_to_adev(dev);
5806	int r;
5807
5808	r = pci_save_state(pdev);
5809	if (!r) {
5810		kfree(adev->pci_state);
5811
5812		adev->pci_state = pci_store_saved_state(pdev);
5813
5814		if (!adev->pci_state) {
5815			DRM_ERROR("Failed to store PCI saved state");
5816			return false;
5817		}
5818	} else {
5819		DRM_WARN("Failed to save PCI state, err:%d\n", r);
5820		return false;
5821	}
5822
5823	return true;
5824}
5825
5826bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
5827{
5828	struct drm_device *dev = pci_get_drvdata(pdev);
5829	struct amdgpu_device *adev = drm_to_adev(dev);
5830	int r;
5831
5832	if (!adev->pci_state)
5833		return false;
5834
5835	r = pci_load_saved_state(pdev, adev->pci_state);
5836
5837	if (!r) {
5838		pci_restore_state(pdev);
5839	} else {
5840		DRM_WARN("Failed to load PCI state, err:%d\n", r);
5841		return false;
5842	}
5843
5844	return true;
5845}
5846
5847void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
5848		struct amdgpu_ring *ring)
5849{
5850#ifdef CONFIG_X86_64
5851	if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
5852		return;
5853#endif
5854	if (adev->gmc.xgmi.connected_to_cpu)
5855		return;
5856
5857	if (ring && ring->funcs->emit_hdp_flush)
5858		amdgpu_ring_emit_hdp_flush(ring);
5859	else
5860		amdgpu_asic_flush_hdp(adev, ring);
5861}
5862
5863void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
5864		struct amdgpu_ring *ring)
5865{
5866#ifdef CONFIG_X86_64
5867	if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
5868		return;
5869#endif
5870	if (adev->gmc.xgmi.connected_to_cpu)
5871		return;
5872
5873	amdgpu_asic_invalidate_hdp(adev, ring);
5874}
5875
5876int amdgpu_in_reset(struct amdgpu_device *adev)
5877{
5878	return atomic_read(&adev->reset_domain->in_gpu_reset);
5879	}
5880	
5881/**
5882 * amdgpu_device_halt() - bring hardware to some kind of halt state
5883 *
5884 * @adev: amdgpu_device pointer
5885 *
5886 * Bring hardware to some kind of halt state so that no one can touch it
5887 * any more. It will help to maintain error context when error occurred.
5888 * Compare to a simple hang, the system will keep stable at least for SSH
5889 * access. Then it should be trivial to inspect the hardware state and
5890 * see what's going on. Implemented as following:
5891 *
5892 * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc),
5893 *    clears all CPU mappings to device, disallows remappings through page faults
5894 * 2. amdgpu_irq_disable_all() disables all interrupts
5895 * 3. amdgpu_fence_driver_hw_fini() signals all HW fences
5896 * 4. set adev->no_hw_access to avoid potential crashes after setp 5
5897 * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings
5898 * 6. pci_disable_device() and pci_wait_for_pending_transaction()
5899 *    flush any in flight DMA operations
5900 */
5901void amdgpu_device_halt(struct amdgpu_device *adev)
5902{
5903	struct pci_dev *pdev = adev->pdev;
5904	struct drm_device *ddev = adev_to_drm(adev);
5905
5906	drm_dev_unplug(ddev);
5907
5908	amdgpu_irq_disable_all(adev);
5909
5910	amdgpu_fence_driver_hw_fini(adev);
5911
5912	adev->no_hw_access = true;
5913
5914	amdgpu_device_unmap_mmio(adev);
5915
5916	pci_disable_device(pdev);
5917	pci_wait_for_pending_transaction(pdev);
5918}
5919
5920u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
5921				u32 reg)
5922{
5923	unsigned long flags, address, data;
5924	u32 r;
5925
5926	address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
5927	data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
5928
5929	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
5930	WREG32(address, reg * 4);
5931	(void)RREG32(address);
5932	r = RREG32(data);
5933	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
5934	return r;
5935}
5936
5937void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
5938				u32 reg, u32 v)
5939{
5940	unsigned long flags, address, data;
5941
5942	address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
5943	data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
5944
5945	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
5946	WREG32(address, reg * 4);
5947	(void)RREG32(address);
5948	WREG32(data, v);
5949	(void)RREG32(data);
5950	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
5951}
5952
5953/**
5954 * amdgpu_device_switch_gang - switch to a new gang
5955 * @adev: amdgpu_device pointer
5956 * @gang: the gang to switch to
5957 *
5958 * Try to switch to a new gang.
5959 * Returns: NULL if we switched to the new gang or a reference to the current
5960 * gang leader.
5961 */
5962struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
5963					    struct dma_fence *gang)
5964{
5965	struct dma_fence *old = NULL;
5966
5967	do {
5968		dma_fence_put(old);
5969		rcu_read_lock();
5970		old = dma_fence_get_rcu_safe(&adev->gang_submit);
5971		rcu_read_unlock();
5972
5973		if (old == gang)
5974			break;
5975
5976		if (!dma_fence_is_signaled(old))
5977			return old;
5978
5979	} while (cmpxchg((struct dma_fence __force **)&adev->gang_submit,
5980			 old, gang) != old);
5981
5982	dma_fence_put(old);
5983	return NULL;
5984}
5985
5986bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev)
5987{
5988	switch (adev->asic_type) {
5989#ifdef CONFIG_DRM_AMDGPU_SI
5990	case CHIP_HAINAN:
5991#endif
5992	case CHIP_TOPAZ:
5993		/* chips with no display hardware */
5994		return false;
5995#ifdef CONFIG_DRM_AMDGPU_SI
5996	case CHIP_TAHITI:
5997	case CHIP_PITCAIRN:
5998	case CHIP_VERDE:
5999	case CHIP_OLAND:
6000#endif
6001#ifdef CONFIG_DRM_AMDGPU_CIK
6002	case CHIP_BONAIRE:
6003	case CHIP_HAWAII:
6004	case CHIP_KAVERI:
6005	case CHIP_KABINI:
6006	case CHIP_MULLINS:
6007#endif
6008	case CHIP_TONGA:
6009	case CHIP_FIJI:
6010	case CHIP_POLARIS10:
6011	case CHIP_POLARIS11:
6012	case CHIP_POLARIS12:
6013	case CHIP_VEGAM:
6014	case CHIP_CARRIZO:
6015	case CHIP_STONEY:
6016		/* chips with display hardware */
6017		return true;
6018	default:
6019		/* IP discovery */
6020		if (!adev->ip_versions[DCE_HWIP][0] ||
6021		    (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
6022			return false;
6023		return true;
6024	}
6025}
v5.4
   1/*
   2 * Copyright 2008 Advanced Micro Devices, Inc.
   3 * Copyright 2008 Red Hat Inc.
   4 * Copyright 2009 Jerome Glisse.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the "Software"),
   8 * to deal in the Software without restriction, including without limitation
   9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10 * and/or sell copies of the Software, and to permit persons to whom the
  11 * Software is furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22 * OTHER DEALINGS IN THE SOFTWARE.
  23 *
  24 * Authors: Dave Airlie
  25 *          Alex Deucher
  26 *          Jerome Glisse
  27 */
  28#include <linux/power_supply.h>
  29#include <linux/kthread.h>
  30#include <linux/module.h>
  31#include <linux/console.h>
  32#include <linux/slab.h>
 
 
 
 
 
  33
 
  34#include <drm/drm_atomic_helper.h>
 
  35#include <drm/drm_probe_helper.h>
  36#include <drm/amdgpu_drm.h>
  37#include <linux/vgaarb.h>
  38#include <linux/vga_switcheroo.h>
  39#include <linux/efi.h>
  40#include "amdgpu.h"
  41#include "amdgpu_trace.h"
  42#include "amdgpu_i2c.h"
  43#include "atom.h"
  44#include "amdgpu_atombios.h"
  45#include "amdgpu_atomfirmware.h"
  46#include "amd_pcie.h"
  47#ifdef CONFIG_DRM_AMDGPU_SI
  48#include "si.h"
  49#endif
  50#ifdef CONFIG_DRM_AMDGPU_CIK
  51#include "cik.h"
  52#endif
  53#include "vi.h"
  54#include "soc15.h"
  55#include "nv.h"
  56#include "bif/bif_4_1_d.h"
  57#include <linux/pci.h>
  58#include <linux/firmware.h>
  59#include "amdgpu_vf_error.h"
  60
  61#include "amdgpu_amdkfd.h"
  62#include "amdgpu_pm.h"
  63
  64#include "amdgpu_xgmi.h"
  65#include "amdgpu_ras.h"
  66#include "amdgpu_pmu.h"
 
 
 
 
 
 
 
 
  67
  68MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
  69MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
  70MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
  71MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
  72MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
  73MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
  74MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin");
  75MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
  76MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
  77MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
  78
  79#define AMDGPU_RESUME_MS		2000
 
 
 
 
  80
  81static const char *amdgpu_asic_name[] = {
  82	"TAHITI",
  83	"PITCAIRN",
  84	"VERDE",
  85	"OLAND",
  86	"HAINAN",
  87	"BONAIRE",
  88	"KAVERI",
  89	"KABINI",
  90	"HAWAII",
  91	"MULLINS",
  92	"TOPAZ",
  93	"TONGA",
  94	"FIJI",
  95	"CARRIZO",
  96	"STONEY",
  97	"POLARIS10",
  98	"POLARIS11",
  99	"POLARIS12",
 100	"VEGAM",
 101	"VEGA10",
 102	"VEGA12",
 103	"VEGA20",
 104	"RAVEN",
 105	"ARCTURUS",
 106	"RENOIR",
 
 107	"NAVI10",
 
 108	"NAVI14",
 109	"NAVI12",
 
 
 
 
 
 
 
 110	"LAST",
 111};
 112
 113/**
 114 * DOC: pcie_replay_count
 115 *
 116 * The amdgpu driver provides a sysfs API for reporting the total number
 117 * of PCIe replays (NAKs)
 118 * The file pcie_replay_count is used for this and returns the total
 119 * number of replays as a sum of the NAKs generated and NAKs received
 120 */
 121
 122static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
 123		struct device_attribute *attr, char *buf)
 124{
 125	struct drm_device *ddev = dev_get_drvdata(dev);
 126	struct amdgpu_device *adev = ddev->dev_private;
 127	uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
 128
 129	return snprintf(buf, PAGE_SIZE, "%llu\n", cnt);
 130}
 131
 132static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
 133		amdgpu_device_get_pcie_replay_count, NULL);
 134
 135static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
 136
 137/**
 138 * amdgpu_device_is_px - Is the device is a dGPU with HG/PX power control
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 139 *
 140 * @dev: drm_device pointer
 141 *
 142 * Returns true if the device is a dGPU with HG/PX power control,
 143 * otherwise return false.
 144 */
 145bool amdgpu_device_is_px(struct drm_device *dev)
 146{
 147	struct amdgpu_device *adev = dev->dev_private;
 148
 149	if (adev->flags & AMD_IS_PX)
 
 150		return true;
 151	return false;
 152}
 153
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 154/*
 155 * MMIO register access helper functions.
 156 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 157/**
 158 * amdgpu_mm_rreg - read a memory mapped IO register
 159 *
 160 * @adev: amdgpu_device pointer
 161 * @reg: dword aligned register offset
 162 * @acc_flags: access flags which require special behavior
 163 *
 164 * Returns the 32 bit value from the offset specified.
 165 */
 166uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
 167			uint32_t acc_flags)
 168{
 169	uint32_t ret;
 170
 171	if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
 172		return amdgpu_virt_kiq_rreg(adev, reg);
 173
 174	if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
 175		ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
 176	else {
 177		unsigned long flags;
 178
 179		spin_lock_irqsave(&adev->mmio_idx_lock, flags);
 180		writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
 181		ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
 182		spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
 
 
 183	}
 184	trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
 
 
 185	return ret;
 186}
 187
 188/*
 189 * MMIO register read with bytes helper functions
 190 * @offset:bytes offset from MMIO start
 191 *
 192*/
 193
 194/**
 195 * amdgpu_mm_rreg8 - read a memory mapped IO register
 196 *
 197 * @adev: amdgpu_device pointer
 198 * @offset: byte aligned register offset
 199 *
 200 * Returns the 8 bit value from the offset specified.
 201 */
 202uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) {
 
 
 
 
 203	if (offset < adev->rmmio_size)
 204		return (readb(adev->rmmio + offset));
 205	BUG();
 206}
 207
 208/*
 209 * MMIO register write with bytes helper functions
 210 * @offset:bytes offset from MMIO start
 211 * @value: the value want to be written to the register
 212 *
 213*/
 214/**
 215 * amdgpu_mm_wreg8 - read a memory mapped IO register
 216 *
 217 * @adev: amdgpu_device pointer
 218 * @offset: byte aligned register offset
 219 * @value: 8 bit value to write
 220 *
 221 * Writes the value specified to the offset specified.
 222 */
 223void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) {
 
 
 
 
 224	if (offset < adev->rmmio_size)
 225		writeb(value, adev->rmmio + offset);
 226	else
 227		BUG();
 228}
 229
 230/**
 231 * amdgpu_mm_wreg - write to a memory mapped IO register
 232 *
 233 * @adev: amdgpu_device pointer
 234 * @reg: dword aligned register offset
 235 * @v: 32 bit value to write to the register
 236 * @acc_flags: access flags which require special behavior
 237 *
 238 * Writes the value specified to the offset specified.
 239 */
 240void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
 241		    uint32_t acc_flags)
 
 242{
 243	trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
 
 244
 245	if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
 246		adev->last_mm_index = v;
 247	}
 248
 249	if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
 250		return amdgpu_virt_kiq_wreg(adev, reg, v);
 251
 252	if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
 253		writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
 254	else {
 255		unsigned long flags;
 256
 257		spin_lock_irqsave(&adev->mmio_idx_lock, flags);
 258		writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
 259		writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
 260		spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
 261	}
 262
 263	if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
 264		udelay(500);
 265	}
 266}
 267
 268/**
 269 * amdgpu_io_rreg - read an IO register
 270 *
 271 * @adev: amdgpu_device pointer
 272 * @reg: dword aligned register offset
 
 273 *
 274 * Returns the 32 bit value from the offset specified.
 275 */
 276u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
 
 277{
 278	if ((reg * 4) < adev->rio_mem_size)
 279		return ioread32(adev->rio_mem + (reg * 4));
 280	else {
 281		iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
 282		return ioread32(adev->rio_mem + (mmMM_DATA * 4));
 283	}
 284}
 285
 286/**
 287 * amdgpu_io_wreg - write to an IO register
 288 *
 289 * @adev: amdgpu_device pointer
 290 * @reg: dword aligned register offset
 291 * @v: 32 bit value to write to the register
 292 *
 293 * Writes the value specified to the offset specified.
 294 */
 295void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 296{
 297	if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
 298		adev->last_mm_index = v;
 299	}
 300
 301	if ((reg * 4) < adev->rio_mem_size)
 302		iowrite32(v, adev->rio_mem + (reg * 4));
 303	else {
 304		iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
 305		iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
 306	}
 307
 308	if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
 309		udelay(500);
 310	}
 311}
 312
 313/**
 314 * amdgpu_mm_rdoorbell - read a doorbell dword
 315 *
 316 * @adev: amdgpu_device pointer
 317 * @index: doorbell index
 318 *
 319 * Returns the value in the doorbell aperture at the
 320 * requested doorbell index (CIK).
 321 */
 322u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
 323{
 
 
 
 324	if (index < adev->doorbell.num_doorbells) {
 325		return readl(adev->doorbell.ptr + index);
 326	} else {
 327		DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
 328		return 0;
 329	}
 330}
 331
 332/**
 333 * amdgpu_mm_wdoorbell - write a doorbell dword
 334 *
 335 * @adev: amdgpu_device pointer
 336 * @index: doorbell index
 337 * @v: value to write
 338 *
 339 * Writes @v to the doorbell aperture at the
 340 * requested doorbell index (CIK).
 341 */
 342void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
 343{
 
 
 
 344	if (index < adev->doorbell.num_doorbells) {
 345		writel(v, adev->doorbell.ptr + index);
 346	} else {
 347		DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
 348	}
 349}
 350
 351/**
 352 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
 353 *
 354 * @adev: amdgpu_device pointer
 355 * @index: doorbell index
 356 *
 357 * Returns the value in the doorbell aperture at the
 358 * requested doorbell index (VEGA10+).
 359 */
 360u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
 361{
 
 
 
 362	if (index < adev->doorbell.num_doorbells) {
 363		return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
 364	} else {
 365		DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
 366		return 0;
 367	}
 368}
 369
 370/**
 371 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
 372 *
 373 * @adev: amdgpu_device pointer
 374 * @index: doorbell index
 375 * @v: value to write
 376 *
 377 * Writes @v to the doorbell aperture at the
 378 * requested doorbell index (VEGA10+).
 379 */
 380void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
 381{
 
 
 
 382	if (index < adev->doorbell.num_doorbells) {
 383		atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
 384	} else {
 385		DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
 386	}
 387}
 388
 389/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 390 * amdgpu_invalid_rreg - dummy reg read function
 391 *
 392 * @adev: amdgpu device pointer
 393 * @reg: offset of register
 394 *
 395 * Dummy register read function.  Used for register blocks
 396 * that certain asics don't have (all asics).
 397 * Returns the value in the register.
 398 */
 399static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
 400{
 401	DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
 402	BUG();
 403	return 0;
 404}
 405
 406/**
 407 * amdgpu_invalid_wreg - dummy reg write function
 408 *
 409 * @adev: amdgpu device pointer
 410 * @reg: offset of register
 411 * @v: value to write to the register
 412 *
 413 * Dummy register read function.  Used for register blocks
 414 * that certain asics don't have (all asics).
 415 */
 416static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
 417{
 418	DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
 419		  reg, v);
 420	BUG();
 421}
 422
 423/**
 424 * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
 425 *
 426 * @adev: amdgpu device pointer
 427 * @reg: offset of register
 428 *
 429 * Dummy register read function.  Used for register blocks
 430 * that certain asics don't have (all asics).
 431 * Returns the value in the register.
 432 */
 433static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
 434{
 435	DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
 436	BUG();
 437	return 0;
 438}
 439
 440/**
 441 * amdgpu_invalid_wreg64 - dummy reg write function
 442 *
 443 * @adev: amdgpu device pointer
 444 * @reg: offset of register
 445 * @v: value to write to the register
 446 *
 447 * Dummy register read function.  Used for register blocks
 448 * that certain asics don't have (all asics).
 449 */
 450static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
 451{
 452	DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
 453		  reg, v);
 454	BUG();
 455}
 456
 457/**
 458 * amdgpu_block_invalid_rreg - dummy reg read function
 459 *
 460 * @adev: amdgpu device pointer
 461 * @block: offset of instance
 462 * @reg: offset of register
 463 *
 464 * Dummy register read function.  Used for register blocks
 465 * that certain asics don't have (all asics).
 466 * Returns the value in the register.
 467 */
 468static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
 469					  uint32_t block, uint32_t reg)
 470{
 471	DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
 472		  reg, block);
 473	BUG();
 474	return 0;
 475}
 476
 477/**
 478 * amdgpu_block_invalid_wreg - dummy reg write function
 479 *
 480 * @adev: amdgpu device pointer
 481 * @block: offset of instance
 482 * @reg: offset of register
 483 * @v: value to write to the register
 484 *
 485 * Dummy register read function.  Used for register blocks
 486 * that certain asics don't have (all asics).
 487 */
 488static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
 489				      uint32_t block,
 490				      uint32_t reg, uint32_t v)
 491{
 492	DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
 493		  reg, block, v);
 494	BUG();
 495}
 496
 497/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 498 * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
 499 *
 500 * @adev: amdgpu device pointer
 501 *
 502 * Allocates a scratch page of VRAM for use by various things in the
 503 * driver.
 504 */
 505static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
 506{
 507	return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
 508				       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
 509				       &adev->vram_scratch.robj,
 510				       &adev->vram_scratch.gpu_addr,
 511				       (void **)&adev->vram_scratch.ptr);
 512}
 513
 514/**
 515 * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
 516 *
 517 * @adev: amdgpu device pointer
 518 *
 519 * Frees the VRAM scratch page.
 520 */
 521static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
 522{
 523	amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
 524}
 525
 526/**
 527 * amdgpu_device_program_register_sequence - program an array of registers.
 528 *
 529 * @adev: amdgpu_device pointer
 530 * @registers: pointer to the register array
 531 * @array_size: size of the register array
 532 *
 533 * Programs an array or registers with and and or masks.
 534 * This is a helper for setting golden registers.
 535 */
 536void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
 537					     const u32 *registers,
 538					     const u32 array_size)
 539{
 540	u32 tmp, reg, and_mask, or_mask;
 541	int i;
 542
 543	if (array_size % 3)
 544		return;
 545
 546	for (i = 0; i < array_size; i +=3) {
 547		reg = registers[i + 0];
 548		and_mask = registers[i + 1];
 549		or_mask = registers[i + 2];
 550
 551		if (and_mask == 0xffffffff) {
 552			tmp = or_mask;
 553		} else {
 554			tmp = RREG32(reg);
 555			tmp &= ~and_mask;
 556			if (adev->family >= AMDGPU_FAMILY_AI)
 557				tmp |= (or_mask & and_mask);
 558			else
 559				tmp |= or_mask;
 560		}
 561		WREG32(reg, tmp);
 562	}
 563}
 564
 565/**
 566 * amdgpu_device_pci_config_reset - reset the GPU
 567 *
 568 * @adev: amdgpu_device pointer
 569 *
 570 * Resets the GPU using the pci config reset sequence.
 571 * Only applicable to asics prior to vega10.
 572 */
 573void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
 574{
 575	pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
 576}
 577
 
 
 
 
 
 
 
 
 
 
 
 
 578/*
 579 * GPU doorbell aperture helpers function.
 580 */
 581/**
 582 * amdgpu_device_doorbell_init - Init doorbell driver information.
 583 *
 584 * @adev: amdgpu_device pointer
 585 *
 586 * Init doorbell driver information (CIK)
 587 * Returns 0 on success, error on failure.
 588 */
 589static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
 590{
 591
 592	/* No doorbell on SI hardware generation */
 593	if (adev->asic_type < CHIP_BONAIRE) {
 594		adev->doorbell.base = 0;
 595		adev->doorbell.size = 0;
 596		adev->doorbell.num_doorbells = 0;
 597		adev->doorbell.ptr = NULL;
 598		return 0;
 599	}
 600
 601	if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
 602		return -EINVAL;
 603
 604	amdgpu_asic_init_doorbell_index(adev);
 605
 606	/* doorbell bar mapping */
 607	adev->doorbell.base = pci_resource_start(adev->pdev, 2);
 608	adev->doorbell.size = pci_resource_len(adev->pdev, 2);
 609
 610	adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
 611					     adev->doorbell_index.max_assignment+1);
 612	if (adev->doorbell.num_doorbells == 0)
 613		return -EINVAL;
 614
 615	/* For Vega, reserve and map two pages on doorbell BAR since SDMA
 616	 * paging queue doorbell use the second page. The
 617	 * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
 618	 * doorbells are in the first page. So with paging queue enabled,
 619	 * the max num_doorbells should + 1 page (0x400 in dword)
 620	 */
 621	if (adev->asic_type >= CHIP_VEGA10)
 622		adev->doorbell.num_doorbells += 0x400;
 
 
 
 
 
 
 623
 624	adev->doorbell.ptr = ioremap(adev->doorbell.base,
 625				     adev->doorbell.num_doorbells *
 626				     sizeof(u32));
 627	if (adev->doorbell.ptr == NULL)
 628		return -ENOMEM;
 629
 630	return 0;
 631}
 632
 633/**
 634 * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
 635 *
 636 * @adev: amdgpu_device pointer
 637 *
 638 * Tear down doorbell driver information (CIK)
 639 */
 640static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
 641{
 642	iounmap(adev->doorbell.ptr);
 643	adev->doorbell.ptr = NULL;
 644}
 645
 646
 647
 648/*
 649 * amdgpu_device_wb_*()
 650 * Writeback is the method by which the GPU updates special pages in memory
 651 * with the status of certain GPU events (fences, ring pointers,etc.).
 652 */
 653
 654/**
 655 * amdgpu_device_wb_fini - Disable Writeback and free memory
 656 *
 657 * @adev: amdgpu_device pointer
 658 *
 659 * Disables Writeback and frees the Writeback memory (all asics).
 660 * Used at driver shutdown.
 661 */
 662static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
 663{
 664	if (adev->wb.wb_obj) {
 665		amdgpu_bo_free_kernel(&adev->wb.wb_obj,
 666				      &adev->wb.gpu_addr,
 667				      (void **)&adev->wb.wb);
 668		adev->wb.wb_obj = NULL;
 669	}
 670}
 671
 672/**
 673 * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
 674 *
 675 * @adev: amdgpu_device pointer
 676 *
 677 * Initializes writeback and allocates writeback memory (all asics).
 678 * Used at driver startup.
 679 * Returns 0 on success or an -error on failure.
 680 */
 681static int amdgpu_device_wb_init(struct amdgpu_device *adev)
 682{
 683	int r;
 684
 685	if (adev->wb.wb_obj == NULL) {
 686		/* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
 687		r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
 688					    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
 689					    &adev->wb.wb_obj, &adev->wb.gpu_addr,
 690					    (void **)&adev->wb.wb);
 691		if (r) {
 692			dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
 693			return r;
 694		}
 695
 696		adev->wb.num_wb = AMDGPU_MAX_WB;
 697		memset(&adev->wb.used, 0, sizeof(adev->wb.used));
 698
 699		/* clear wb memory */
 700		memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
 701	}
 702
 703	return 0;
 704}
 705
 706/**
 707 * amdgpu_device_wb_get - Allocate a wb entry
 708 *
 709 * @adev: amdgpu_device pointer
 710 * @wb: wb index
 711 *
 712 * Allocate a wb slot for use by the driver (all asics).
 713 * Returns 0 on success or -EINVAL on failure.
 714 */
 715int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
 716{
 717	unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
 718
 719	if (offset < adev->wb.num_wb) {
 720		__set_bit(offset, adev->wb.used);
 721		*wb = offset << 3; /* convert to dw offset */
 722		return 0;
 723	} else {
 724		return -EINVAL;
 725	}
 726}
 727
 728/**
 729 * amdgpu_device_wb_free - Free a wb entry
 730 *
 731 * @adev: amdgpu_device pointer
 732 * @wb: wb index
 733 *
 734 * Free a wb slot allocated for use by the driver (all asics)
 735 */
 736void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
 737{
 738	wb >>= 3;
 739	if (wb < adev->wb.num_wb)
 740		__clear_bit(wb, adev->wb.used);
 741}
 742
 743/**
 744 * amdgpu_device_resize_fb_bar - try to resize FB BAR
 745 *
 746 * @adev: amdgpu_device pointer
 747 *
 748 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
 749 * to fail, but if any of the BARs is not accessible after the size we abort
 750 * driver loading by returning -ENODEV.
 751 */
 752int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
 753{
 754	u64 space_needed = roundup_pow_of_two(adev->gmc.real_vram_size);
 755	u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1;
 756	struct pci_bus *root;
 757	struct resource *res;
 758	unsigned i;
 759	u16 cmd;
 760	int r;
 761
 762	/* Bypass for VF */
 763	if (amdgpu_sriov_vf(adev))
 764		return 0;
 765
 
 
 
 
 
 766	/* Check if the root BUS has 64bit memory resources */
 767	root = adev->pdev->bus;
 768	while (root->parent)
 769		root = root->parent;
 770
 771	pci_bus_for_each_resource(root, res, i) {
 772		if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
 773		    res->start > 0x100000000ull)
 774			break;
 775	}
 776
 777	/* Trying to resize is pointless without a root hub window above 4GB */
 778	if (!res)
 779		return 0;
 780
 
 
 
 
 781	/* Disable memory decoding while we change the BAR addresses and size */
 782	pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
 783	pci_write_config_word(adev->pdev, PCI_COMMAND,
 784			      cmd & ~PCI_COMMAND_MEMORY);
 785
 786	/* Free the VRAM and doorbell BAR, we most likely need to move both. */
 787	amdgpu_device_doorbell_fini(adev);
 788	if (adev->asic_type >= CHIP_BONAIRE)
 789		pci_release_resource(adev->pdev, 2);
 790
 791	pci_release_resource(adev->pdev, 0);
 792
 793	r = pci_resize_resource(adev->pdev, 0, rbar_size);
 794	if (r == -ENOSPC)
 795		DRM_INFO("Not enough PCI address space for a large BAR.");
 796	else if (r && r != -ENOTSUPP)
 797		DRM_ERROR("Problem resizing BAR0 (%d).", r);
 798
 799	pci_assign_unassigned_bus_resources(adev->pdev->bus);
 800
 801	/* When the doorbell or fb BAR isn't available we have no chance of
 802	 * using the device.
 803	 */
 804	r = amdgpu_device_doorbell_init(adev);
 805	if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
 806		return -ENODEV;
 807
 808	pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
 809
 810	return 0;
 811}
 812
 813/*
 814 * GPU helpers function.
 815 */
 816/**
 817 * amdgpu_device_need_post - check if the hw need post or not
 818 *
 819 * @adev: amdgpu_device pointer
 820 *
 821 * Check if the asic has been initialized (all asics) at driver startup
 822 * or post is needed if  hw reset is performed.
 823 * Returns true if need or false if not.
 824 */
 825bool amdgpu_device_need_post(struct amdgpu_device *adev)
 826{
 827	uint32_t reg;
 828
 829	if (amdgpu_sriov_vf(adev))
 830		return false;
 831
 832	if (amdgpu_passthrough(adev)) {
 833		/* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
 834		 * some old smc fw still need driver do vPost otherwise gpu hang, while
 835		 * those smc fw version above 22.15 doesn't have this flaw, so we force
 836		 * vpost executed for smc version below 22.15
 837		 */
 838		if (adev->asic_type == CHIP_FIJI) {
 839			int err;
 840			uint32_t fw_ver;
 841			err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
 842			/* force vPost if error occured */
 843			if (err)
 844				return true;
 845
 846			fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
 847			if (fw_ver < 0x00160e00)
 848				return true;
 849		}
 850	}
 851
 
 
 
 
 852	if (adev->has_hw_reset) {
 853		adev->has_hw_reset = false;
 854		return true;
 855	}
 856
 857	/* bios scratch used on CIK+ */
 858	if (adev->asic_type >= CHIP_BONAIRE)
 859		return amdgpu_atombios_scratch_need_asic_init(adev);
 860
 861	/* check MEM_SIZE for older asics */
 862	reg = amdgpu_asic_get_config_memsize(adev);
 863
 864	if ((reg != 0) && (reg != 0xffffffff))
 865		return false;
 866
 867	return true;
 868}
 869
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 870/* if we get transitioned to only one device, take VGA back */
 871/**
 872 * amdgpu_device_vga_set_decode - enable/disable vga decode
 873 *
 874 * @cookie: amdgpu_device pointer
 875 * @state: enable/disable vga decode
 876 *
 877 * Enable/disable vga decode (all asics).
 878 * Returns VGA resource flags.
 879 */
 880static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state)
 
 881{
 882	struct amdgpu_device *adev = cookie;
 883	amdgpu_asic_set_vga_state(adev, state);
 884	if (state)
 885		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
 886		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
 887	else
 888		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
 889}
 890
 891/**
 892 * amdgpu_device_check_block_size - validate the vm block size
 893 *
 894 * @adev: amdgpu_device pointer
 895 *
 896 * Validates the vm block size specified via module parameter.
 897 * The vm block size defines number of bits in page table versus page directory,
 898 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
 899 * page table and the remaining bits are in the page directory.
 900 */
 901static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
 902{
 903	/* defines number of bits in page table versus page directory,
 904	 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
 905	 * page table and the remaining bits are in the page directory */
 906	if (amdgpu_vm_block_size == -1)
 907		return;
 908
 909	if (amdgpu_vm_block_size < 9) {
 910		dev_warn(adev->dev, "VM page table size (%d) too small\n",
 911			 amdgpu_vm_block_size);
 912		amdgpu_vm_block_size = -1;
 913	}
 914}
 915
 916/**
 917 * amdgpu_device_check_vm_size - validate the vm size
 918 *
 919 * @adev: amdgpu_device pointer
 920 *
 921 * Validates the vm size in GB specified via module parameter.
 922 * The VM size is the size of the GPU virtual memory space in GB.
 923 */
 924static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
 925{
 926	/* no need to check the default value */
 927	if (amdgpu_vm_size == -1)
 928		return;
 929
 930	if (amdgpu_vm_size < 1) {
 931		dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
 932			 amdgpu_vm_size);
 933		amdgpu_vm_size = -1;
 934	}
 935}
 936
 937static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
 938{
 939	struct sysinfo si;
 940	bool is_os_64 = (sizeof(void *) == 8) ? true : false;
 941	uint64_t total_memory;
 942	uint64_t dram_size_seven_GB = 0x1B8000000;
 943	uint64_t dram_size_three_GB = 0xB8000000;
 944
 945	if (amdgpu_smu_memory_pool_size == 0)
 946		return;
 947
 948	if (!is_os_64) {
 949		DRM_WARN("Not 64-bit OS, feature not supported\n");
 950		goto def_value;
 951	}
 952	si_meminfo(&si);
 953	total_memory = (uint64_t)si.totalram * si.mem_unit;
 954
 955	if ((amdgpu_smu_memory_pool_size == 1) ||
 956		(amdgpu_smu_memory_pool_size == 2)) {
 957		if (total_memory < dram_size_three_GB)
 958			goto def_value1;
 959	} else if ((amdgpu_smu_memory_pool_size == 4) ||
 960		(amdgpu_smu_memory_pool_size == 8)) {
 961		if (total_memory < dram_size_seven_GB)
 962			goto def_value1;
 963	} else {
 964		DRM_WARN("Smu memory pool size not supported\n");
 965		goto def_value;
 966	}
 967	adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
 968
 969	return;
 970
 971def_value1:
 972	DRM_WARN("No enough system memory\n");
 973def_value:
 974	adev->pm.smu_prv_buffer_size = 0;
 975}
 976
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 977/**
 978 * amdgpu_device_check_arguments - validate module params
 979 *
 980 * @adev: amdgpu_device pointer
 981 *
 982 * Validates certain module parameters and updates
 983 * the associated values used by the driver (all asics).
 984 */
 985static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
 986{
 987	int ret = 0;
 988
 989	if (amdgpu_sched_jobs < 4) {
 990		dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
 991			 amdgpu_sched_jobs);
 992		amdgpu_sched_jobs = 4;
 993	} else if (!is_power_of_2(amdgpu_sched_jobs)){
 994		dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
 995			 amdgpu_sched_jobs);
 996		amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
 997	}
 998
 999	if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1000		/* gart size must be greater or equal to 32M */
1001		dev_warn(adev->dev, "gart size (%d) too small\n",
1002			 amdgpu_gart_size);
1003		amdgpu_gart_size = -1;
1004	}
1005
1006	if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1007		/* gtt size must be greater or equal to 32M */
1008		dev_warn(adev->dev, "gtt size (%d) too small\n",
1009				 amdgpu_gtt_size);
1010		amdgpu_gtt_size = -1;
1011	}
1012
1013	/* valid range is between 4 and 9 inclusive */
1014	if (amdgpu_vm_fragment_size != -1 &&
1015	    (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1016		dev_warn(adev->dev, "valid range is between 4 and 9\n");
1017		amdgpu_vm_fragment_size = -1;
1018	}
1019
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1020	amdgpu_device_check_smu_prv_buffer_size(adev);
1021
1022	amdgpu_device_check_vm_size(adev);
1023
1024	amdgpu_device_check_block_size(adev);
1025
1026	ret = amdgpu_device_get_job_timeout_settings(adev);
1027	if (ret) {
1028		dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
1029		return ret;
1030	}
1031
1032	adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1033
1034	return ret;
1035}
1036
1037/**
1038 * amdgpu_switcheroo_set_state - set switcheroo state
1039 *
1040 * @pdev: pci dev pointer
1041 * @state: vga_switcheroo state
1042 *
1043 * Callback for the switcheroo driver.  Suspends or resumes the
1044 * the asics before or after it is powered up using ACPI methods.
1045 */
1046static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
 
1047{
1048	struct drm_device *dev = pci_get_drvdata(pdev);
 
1049
1050	if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1051		return;
1052
1053	if (state == VGA_SWITCHEROO_ON) {
1054		pr_info("amdgpu: switched on\n");
1055		/* don't suspend or resume card normally */
1056		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1057
1058		amdgpu_device_resume(dev, true, true);
 
 
 
 
 
1059
1060		dev->switch_power_state = DRM_SWITCH_POWER_ON;
1061		drm_kms_helper_poll_enable(dev);
1062	} else {
1063		pr_info("amdgpu: switched off\n");
1064		drm_kms_helper_poll_disable(dev);
1065		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1066		amdgpu_device_suspend(dev, true, true);
 
 
 
 
1067		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1068	}
1069}
1070
1071/**
1072 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1073 *
1074 * @pdev: pci dev pointer
1075 *
1076 * Callback for the switcheroo driver.  Check of the switcheroo
1077 * state can be changed.
1078 * Returns true if the state can be changed, false if not.
1079 */
1080static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1081{
1082	struct drm_device *dev = pci_get_drvdata(pdev);
1083
1084	/*
1085	* FIXME: open_count is protected by drm_global_mutex but that would lead to
1086	* locking inversion with the driver load path. And the access here is
1087	* completely racy anyway. So don't bother with locking for now.
1088	*/
1089	return dev->open_count == 0;
1090}
1091
1092static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1093	.set_gpu_state = amdgpu_switcheroo_set_state,
1094	.reprobe = NULL,
1095	.can_switch = amdgpu_switcheroo_can_switch,
1096};
1097
1098/**
1099 * amdgpu_device_ip_set_clockgating_state - set the CG state
1100 *
1101 * @dev: amdgpu_device pointer
1102 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1103 * @state: clockgating state (gate or ungate)
1104 *
1105 * Sets the requested clockgating state for all instances of
1106 * the hardware IP specified.
1107 * Returns the error code from the last instance.
1108 */
1109int amdgpu_device_ip_set_clockgating_state(void *dev,
1110					   enum amd_ip_block_type block_type,
1111					   enum amd_clockgating_state state)
1112{
1113	struct amdgpu_device *adev = dev;
1114	int i, r = 0;
1115
1116	for (i = 0; i < adev->num_ip_blocks; i++) {
1117		if (!adev->ip_blocks[i].status.valid)
1118			continue;
1119		if (adev->ip_blocks[i].version->type != block_type)
1120			continue;
1121		if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1122			continue;
1123		r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1124			(void *)adev, state);
1125		if (r)
1126			DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1127				  adev->ip_blocks[i].version->funcs->name, r);
1128	}
1129	return r;
1130}
1131
1132/**
1133 * amdgpu_device_ip_set_powergating_state - set the PG state
1134 *
1135 * @dev: amdgpu_device pointer
1136 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1137 * @state: powergating state (gate or ungate)
1138 *
1139 * Sets the requested powergating state for all instances of
1140 * the hardware IP specified.
1141 * Returns the error code from the last instance.
1142 */
1143int amdgpu_device_ip_set_powergating_state(void *dev,
1144					   enum amd_ip_block_type block_type,
1145					   enum amd_powergating_state state)
1146{
1147	struct amdgpu_device *adev = dev;
1148	int i, r = 0;
1149
1150	for (i = 0; i < adev->num_ip_blocks; i++) {
1151		if (!adev->ip_blocks[i].status.valid)
1152			continue;
1153		if (adev->ip_blocks[i].version->type != block_type)
1154			continue;
1155		if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1156			continue;
1157		r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1158			(void *)adev, state);
1159		if (r)
1160			DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1161				  adev->ip_blocks[i].version->funcs->name, r);
1162	}
1163	return r;
1164}
1165
1166/**
1167 * amdgpu_device_ip_get_clockgating_state - get the CG state
1168 *
1169 * @adev: amdgpu_device pointer
1170 * @flags: clockgating feature flags
1171 *
1172 * Walks the list of IPs on the device and updates the clockgating
1173 * flags for each IP.
1174 * Updates @flags with the feature flags for each hardware IP where
1175 * clockgating is enabled.
1176 */
1177void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1178					    u32 *flags)
1179{
1180	int i;
1181
1182	for (i = 0; i < adev->num_ip_blocks; i++) {
1183		if (!adev->ip_blocks[i].status.valid)
1184			continue;
1185		if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1186			adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1187	}
1188}
1189
1190/**
1191 * amdgpu_device_ip_wait_for_idle - wait for idle
1192 *
1193 * @adev: amdgpu_device pointer
1194 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1195 *
1196 * Waits for the request hardware IP to be idle.
1197 * Returns 0 for success or a negative error code on failure.
1198 */
1199int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1200				   enum amd_ip_block_type block_type)
1201{
1202	int i, r;
1203
1204	for (i = 0; i < adev->num_ip_blocks; i++) {
1205		if (!adev->ip_blocks[i].status.valid)
1206			continue;
1207		if (adev->ip_blocks[i].version->type == block_type) {
1208			r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1209			if (r)
1210				return r;
1211			break;
1212		}
1213	}
1214	return 0;
1215
1216}
1217
1218/**
1219 * amdgpu_device_ip_is_idle - is the hardware IP idle
1220 *
1221 * @adev: amdgpu_device pointer
1222 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1223 *
1224 * Check if the hardware IP is idle or not.
1225 * Returns true if it the IP is idle, false if not.
1226 */
1227bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1228			      enum amd_ip_block_type block_type)
1229{
1230	int i;
1231
1232	for (i = 0; i < adev->num_ip_blocks; i++) {
1233		if (!adev->ip_blocks[i].status.valid)
1234			continue;
1235		if (adev->ip_blocks[i].version->type == block_type)
1236			return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1237	}
1238	return true;
1239
1240}
1241
1242/**
1243 * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1244 *
1245 * @adev: amdgpu_device pointer
1246 * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1247 *
1248 * Returns a pointer to the hardware IP block structure
1249 * if it exists for the asic, otherwise NULL.
1250 */
1251struct amdgpu_ip_block *
1252amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1253			      enum amd_ip_block_type type)
1254{
1255	int i;
1256
1257	for (i = 0; i < adev->num_ip_blocks; i++)
1258		if (adev->ip_blocks[i].version->type == type)
1259			return &adev->ip_blocks[i];
1260
1261	return NULL;
1262}
1263
1264/**
1265 * amdgpu_device_ip_block_version_cmp
1266 *
1267 * @adev: amdgpu_device pointer
1268 * @type: enum amd_ip_block_type
1269 * @major: major version
1270 * @minor: minor version
1271 *
1272 * return 0 if equal or greater
1273 * return 1 if smaller or the ip_block doesn't exist
1274 */
1275int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1276				       enum amd_ip_block_type type,
1277				       u32 major, u32 minor)
1278{
1279	struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1280
1281	if (ip_block && ((ip_block->version->major > major) ||
1282			((ip_block->version->major == major) &&
1283			(ip_block->version->minor >= minor))))
1284		return 0;
1285
1286	return 1;
1287}
1288
1289/**
1290 * amdgpu_device_ip_block_add
1291 *
1292 * @adev: amdgpu_device pointer
1293 * @ip_block_version: pointer to the IP to add
1294 *
1295 * Adds the IP block driver information to the collection of IPs
1296 * on the asic.
1297 */
1298int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1299			       const struct amdgpu_ip_block_version *ip_block_version)
1300{
1301	if (!ip_block_version)
1302		return -EINVAL;
1303
 
 
 
 
 
 
 
 
 
 
 
 
 
1304	DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1305		  ip_block_version->funcs->name);
1306
1307	adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1308
1309	return 0;
1310}
1311
1312/**
1313 * amdgpu_device_enable_virtual_display - enable virtual display feature
1314 *
1315 * @adev: amdgpu_device pointer
1316 *
1317 * Enabled the virtual display feature if the user has enabled it via
1318 * the module parameter virtual_display.  This feature provides a virtual
1319 * display hardware on headless boards or in virtualized environments.
1320 * This function parses and validates the configuration string specified by
1321 * the user and configues the virtual display configuration (number of
1322 * virtual connectors, crtcs, etc.) specified.
1323 */
1324static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1325{
1326	adev->enable_virtual_display = false;
1327
1328	if (amdgpu_virtual_display) {
1329		struct drm_device *ddev = adev->ddev;
1330		const char *pci_address_name = pci_name(ddev->pdev);
1331		char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1332
1333		pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1334		pciaddstr_tmp = pciaddstr;
1335		while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1336			pciaddname = strsep(&pciaddname_tmp, ",");
1337			if (!strcmp("all", pciaddname)
1338			    || !strcmp(pci_address_name, pciaddname)) {
1339				long num_crtc;
1340				int res = -1;
1341
1342				adev->enable_virtual_display = true;
1343
1344				if (pciaddname_tmp)
1345					res = kstrtol(pciaddname_tmp, 10,
1346						      &num_crtc);
1347
1348				if (!res) {
1349					if (num_crtc < 1)
1350						num_crtc = 1;
1351					if (num_crtc > 6)
1352						num_crtc = 6;
1353					adev->mode_info.num_crtc = num_crtc;
1354				} else {
1355					adev->mode_info.num_crtc = 1;
1356				}
1357				break;
1358			}
1359		}
1360
1361		DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1362			 amdgpu_virtual_display, pci_address_name,
1363			 adev->enable_virtual_display, adev->mode_info.num_crtc);
1364
1365		kfree(pciaddstr);
1366	}
1367}
1368
 
 
 
 
 
 
 
 
 
 
1369/**
1370 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1371 *
1372 * @adev: amdgpu_device pointer
1373 *
1374 * Parses the asic configuration parameters specified in the gpu info
1375 * firmware and makes them availale to the driver for use in configuring
1376 * the asic.
1377 * Returns 0 on success, -EINVAL on failure.
1378 */
1379static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1380{
1381	const char *chip_name;
1382	char fw_name[30];
1383	int err;
1384	const struct gpu_info_firmware_header_v1_0 *hdr;
1385
1386	adev->firmware.gpu_info_fw = NULL;
1387
 
 
 
 
 
 
 
 
 
 
1388	switch (adev->asic_type) {
1389	case CHIP_TOPAZ:
1390	case CHIP_TONGA:
1391	case CHIP_FIJI:
1392	case CHIP_POLARIS10:
1393	case CHIP_POLARIS11:
1394	case CHIP_POLARIS12:
1395	case CHIP_VEGAM:
1396	case CHIP_CARRIZO:
1397	case CHIP_STONEY:
1398#ifdef CONFIG_DRM_AMDGPU_SI
1399	case CHIP_VERDE:
1400	case CHIP_TAHITI:
1401	case CHIP_PITCAIRN:
1402	case CHIP_OLAND:
1403	case CHIP_HAINAN:
1404#endif
1405#ifdef CONFIG_DRM_AMDGPU_CIK
1406	case CHIP_BONAIRE:
1407	case CHIP_HAWAII:
1408	case CHIP_KAVERI:
1409	case CHIP_KABINI:
1410	case CHIP_MULLINS:
1411#endif
1412	case CHIP_VEGA20:
1413	default:
1414		return 0;
1415	case CHIP_VEGA10:
1416		chip_name = "vega10";
1417		break;
1418	case CHIP_VEGA12:
1419		chip_name = "vega12";
1420		break;
1421	case CHIP_RAVEN:
1422		if (adev->rev_id >= 8)
1423			chip_name = "raven2";
1424		else if (adev->pdev->device == 0x15d8)
1425			chip_name = "picasso";
1426		else
1427			chip_name = "raven";
1428		break;
1429	case CHIP_ARCTURUS:
1430		chip_name = "arcturus";
1431		break;
1432	case CHIP_RENOIR:
1433		chip_name = "renoir";
1434		break;
1435	case CHIP_NAVI10:
1436		chip_name = "navi10";
1437		break;
1438	case CHIP_NAVI14:
1439		chip_name = "navi14";
1440		break;
1441	case CHIP_NAVI12:
1442		chip_name = "navi12";
1443		break;
1444	}
1445
1446	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1447	err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1448	if (err) {
1449		dev_err(adev->dev,
1450			"Failed to load gpu_info firmware \"%s\"\n",
1451			fw_name);
1452		goto out;
1453	}
1454	err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1455	if (err) {
1456		dev_err(adev->dev,
1457			"Failed to validate gpu_info firmware \"%s\"\n",
1458			fw_name);
1459		goto out;
1460	}
1461
1462	hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1463	amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1464
1465	switch (hdr->version_major) {
1466	case 1:
1467	{
1468		const struct gpu_info_firmware_v1_0 *gpu_info_fw =
1469			(const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
1470								le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1471
 
 
 
 
 
 
1472		adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1473		adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1474		adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1475		adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
1476		adev->gfx.config.max_texture_channel_caches =
1477			le32_to_cpu(gpu_info_fw->gc_num_tccs);
1478		adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1479		adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1480		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1481		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
1482		adev->gfx.config.double_offchip_lds_buf =
1483			le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1484		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
1485		adev->gfx.cu_info.max_waves_per_simd =
1486			le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1487		adev->gfx.cu_info.max_scratch_slots_per_cu =
1488			le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1489		adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
1490		if (hdr->version_minor >= 1) {
1491			const struct gpu_info_firmware_v1_1 *gpu_info_fw =
1492				(const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
1493									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1494			adev->gfx.config.num_sc_per_sh =
1495				le32_to_cpu(gpu_info_fw->num_sc_per_sh);
1496			adev->gfx.config.num_packer_per_sc =
1497				le32_to_cpu(gpu_info_fw->num_packer_per_sc);
1498		}
1499#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 
 
 
 
 
1500		if (hdr->version_minor == 2) {
1501			const struct gpu_info_firmware_v1_2 *gpu_info_fw =
1502				(const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
1503									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1504			adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
1505		}
1506#endif
1507		break;
1508	}
1509	default:
1510		dev_err(adev->dev,
1511			"Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1512		err = -EINVAL;
1513		goto out;
1514	}
1515out:
1516	return err;
1517}
1518
1519/**
1520 * amdgpu_device_ip_early_init - run early init for hardware IPs
1521 *
1522 * @adev: amdgpu_device pointer
1523 *
1524 * Early initialization pass for hardware IPs.  The hardware IPs that make
1525 * up each asic are discovered each IP's early_init callback is run.  This
1526 * is the first stage in initializing the asic.
1527 * Returns 0 on success, negative error code on failure.
1528 */
1529static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
1530{
 
 
1531	int i, r;
1532
1533	amdgpu_device_enable_virtual_display(adev);
1534
1535	switch (adev->asic_type) {
1536	case CHIP_TOPAZ:
1537	case CHIP_TONGA:
1538	case CHIP_FIJI:
1539	case CHIP_POLARIS10:
1540	case CHIP_POLARIS11:
1541	case CHIP_POLARIS12:
1542	case CHIP_VEGAM:
1543	case CHIP_CARRIZO:
1544	case CHIP_STONEY:
1545		if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
1546			adev->family = AMDGPU_FAMILY_CZ;
1547		else
1548			adev->family = AMDGPU_FAMILY_VI;
1549
1550		r = vi_set_ip_blocks(adev);
1551		if (r)
1552			return r;
1553		break;
 
 
1554#ifdef CONFIG_DRM_AMDGPU_SI
1555	case CHIP_VERDE:
1556	case CHIP_TAHITI:
1557	case CHIP_PITCAIRN:
1558	case CHIP_OLAND:
1559	case CHIP_HAINAN:
1560		adev->family = AMDGPU_FAMILY_SI;
1561		r = si_set_ip_blocks(adev);
1562		if (r)
1563			return r;
1564		break;
1565#endif
1566#ifdef CONFIG_DRM_AMDGPU_CIK
1567	case CHIP_BONAIRE:
1568	case CHIP_HAWAII:
1569	case CHIP_KAVERI:
1570	case CHIP_KABINI:
1571	case CHIP_MULLINS:
1572		if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
 
 
1573			adev->family = AMDGPU_FAMILY_CI;
1574		else
1575			adev->family = AMDGPU_FAMILY_KV;
1576
1577		r = cik_set_ip_blocks(adev);
1578		if (r)
1579			return r;
1580		break;
1581#endif
1582	case CHIP_VEGA10:
1583	case CHIP_VEGA12:
1584	case CHIP_VEGA20:
1585	case CHIP_RAVEN:
1586	case CHIP_ARCTURUS:
1587	case CHIP_RENOIR:
1588		if (adev->asic_type == CHIP_RAVEN ||
1589		    adev->asic_type == CHIP_RENOIR)
1590			adev->family = AMDGPU_FAMILY_RV;
 
 
1591		else
1592			adev->family = AMDGPU_FAMILY_AI;
1593
1594		r = soc15_set_ip_blocks(adev);
1595		if (r)
1596			return r;
1597		break;
1598	case  CHIP_NAVI10:
1599	case  CHIP_NAVI14:
1600	case  CHIP_NAVI12:
1601		adev->family = AMDGPU_FAMILY_NV;
1602
1603		r = nv_set_ip_blocks(adev);
1604		if (r)
1605			return r;
1606		break;
1607	default:
1608		/* FIXME: not supported yet */
1609		return -EINVAL;
1610	}
1611
1612	r = amdgpu_device_parse_gpu_info_fw(adev);
1613	if (r)
1614		return r;
 
 
 
 
 
 
 
 
1615
1616	amdgpu_amdkfd_device_probe(adev);
1617
1618	if (amdgpu_sriov_vf(adev)) {
1619		r = amdgpu_virt_request_full_gpu(adev, true);
1620		if (r)
1621			return -EAGAIN;
1622	}
1623
1624	adev->pm.pp_feature = amdgpu_pp_feature_mask;
1625	if (amdgpu_sriov_vf(adev))
1626		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
 
 
1627
1628	for (i = 0; i < adev->num_ip_blocks; i++) {
1629		if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
1630			DRM_ERROR("disabled ip block: %d <%s>\n",
1631				  i, adev->ip_blocks[i].version->funcs->name);
1632			adev->ip_blocks[i].status.valid = false;
1633		} else {
1634			if (adev->ip_blocks[i].version->funcs->early_init) {
1635				r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
1636				if (r == -ENOENT) {
1637					adev->ip_blocks[i].status.valid = false;
1638				} else if (r) {
1639					DRM_ERROR("early_init of IP block <%s> failed %d\n",
1640						  adev->ip_blocks[i].version->funcs->name, r);
1641					return r;
1642				} else {
1643					adev->ip_blocks[i].status.valid = true;
1644				}
1645			} else {
1646				adev->ip_blocks[i].status.valid = true;
1647			}
1648		}
1649		/* get the vbios after the asic_funcs are set up */
1650		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
 
 
 
 
1651			/* Read BIOS */
1652			if (!amdgpu_get_bios(adev))
1653				return -EINVAL;
1654
1655			r = amdgpu_atombios_init(adev);
1656			if (r) {
1657				dev_err(adev->dev, "amdgpu_atombios_init failed\n");
1658				amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
1659				return r;
1660			}
 
 
 
 
 
1661		}
1662	}
1663
1664	adev->cg_flags &= amdgpu_cg_mask;
1665	adev->pg_flags &= amdgpu_pg_mask;
1666
1667	return 0;
1668}
1669
1670static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
1671{
1672	int i, r;
1673
1674	for (i = 0; i < adev->num_ip_blocks; i++) {
1675		if (!adev->ip_blocks[i].status.sw)
1676			continue;
1677		if (adev->ip_blocks[i].status.hw)
1678			continue;
1679		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1680		    (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
1681		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
1682			r = adev->ip_blocks[i].version->funcs->hw_init(adev);
1683			if (r) {
1684				DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1685					  adev->ip_blocks[i].version->funcs->name, r);
1686				return r;
1687			}
1688			adev->ip_blocks[i].status.hw = true;
1689		}
1690	}
1691
1692	return 0;
1693}
1694
1695static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
1696{
1697	int i, r;
1698
1699	for (i = 0; i < adev->num_ip_blocks; i++) {
1700		if (!adev->ip_blocks[i].status.sw)
1701			continue;
1702		if (adev->ip_blocks[i].status.hw)
1703			continue;
1704		r = adev->ip_blocks[i].version->funcs->hw_init(adev);
1705		if (r) {
1706			DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1707				  adev->ip_blocks[i].version->funcs->name, r);
1708			return r;
1709		}
1710		adev->ip_blocks[i].status.hw = true;
1711	}
1712
1713	return 0;
1714}
1715
1716static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
1717{
1718	int r = 0;
1719	int i;
1720	uint32_t smu_version;
1721
1722	if (adev->asic_type >= CHIP_VEGA10) {
1723		for (i = 0; i < adev->num_ip_blocks; i++) {
1724			if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
1725				continue;
1726
 
 
 
1727			/* no need to do the fw loading again if already done*/
1728			if (adev->ip_blocks[i].status.hw == true)
1729				break;
1730
1731			if (adev->in_gpu_reset || adev->in_suspend) {
1732				r = adev->ip_blocks[i].version->funcs->resume(adev);
1733				if (r) {
1734					DRM_ERROR("resume of IP block <%s> failed %d\n",
1735							  adev->ip_blocks[i].version->funcs->name, r);
1736					return r;
1737				}
1738			} else {
1739				r = adev->ip_blocks[i].version->funcs->hw_init(adev);
1740				if (r) {
1741					DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1742							  adev->ip_blocks[i].version->funcs->name, r);
1743					return r;
1744				}
1745			}
1746
1747			adev->ip_blocks[i].status.hw = true;
1748			break;
1749		}
1750	}
1751
1752	r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
 
1753
1754	return r;
1755}
1756
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1757/**
1758 * amdgpu_device_ip_init - run init for hardware IPs
1759 *
1760 * @adev: amdgpu_device pointer
1761 *
1762 * Main initialization pass for hardware IPs.  The list of all the hardware
1763 * IPs that make up the asic is walked and the sw_init and hw_init callbacks
1764 * are run.  sw_init initializes the software state associated with each IP
1765 * and hw_init initializes the hardware associated with each IP.
1766 * Returns 0 on success, negative error code on failure.
1767 */
1768static int amdgpu_device_ip_init(struct amdgpu_device *adev)
1769{
1770	int i, r;
1771
1772	r = amdgpu_ras_init(adev);
1773	if (r)
1774		return r;
1775
1776	for (i = 0; i < adev->num_ip_blocks; i++) {
1777		if (!adev->ip_blocks[i].status.valid)
1778			continue;
1779		r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
1780		if (r) {
1781			DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1782				  adev->ip_blocks[i].version->funcs->name, r);
1783			goto init_failed;
1784		}
1785		adev->ip_blocks[i].status.sw = true;
1786
1787		/* need to do gmc hw init early so we can allocate gpu mem */
1788		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
 
 
 
 
 
 
 
 
 
 
 
 
1789			r = amdgpu_device_vram_scratch_init(adev);
1790			if (r) {
1791				DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
1792				goto init_failed;
1793			}
1794			r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
1795			if (r) {
1796				DRM_ERROR("hw_init %d failed %d\n", i, r);
1797				goto init_failed;
1798			}
1799			r = amdgpu_device_wb_init(adev);
1800			if (r) {
1801				DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
1802				goto init_failed;
1803			}
1804			adev->ip_blocks[i].status.hw = true;
1805
1806			/* right after GMC hw init, we create CSA */
1807			if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
1808				r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
1809								AMDGPU_GEM_DOMAIN_VRAM,
1810								AMDGPU_CSA_SIZE);
1811				if (r) {
1812					DRM_ERROR("allocate CSA failed %d\n", r);
1813					goto init_failed;
1814				}
1815			}
1816		}
1817	}
1818
 
 
 
1819	r = amdgpu_ib_pool_init(adev);
1820	if (r) {
1821		dev_err(adev->dev, "IB initialization failed (%d).\n", r);
1822		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
1823		goto init_failed;
1824	}
1825
1826	r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
1827	if (r)
1828		goto init_failed;
1829
1830	r = amdgpu_device_ip_hw_init_phase1(adev);
1831	if (r)
1832		goto init_failed;
1833
1834	r = amdgpu_device_fw_loading(adev);
1835	if (r)
1836		goto init_failed;
1837
1838	r = amdgpu_device_ip_hw_init_phase2(adev);
1839	if (r)
1840		goto init_failed;
1841
1842	if (adev->gmc.xgmi.num_physical_nodes > 1)
1843		amdgpu_xgmi_add_device(adev);
1844	amdgpu_amdkfd_device_init(adev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1845
1846init_failed:
1847	if (amdgpu_sriov_vf(adev)) {
1848		if (!r)
1849			amdgpu_virt_init_data_exchange(adev);
1850		amdgpu_virt_release_full_gpu(adev, true);
1851	}
1852
1853	return r;
1854}
1855
1856/**
1857 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
1858 *
1859 * @adev: amdgpu_device pointer
1860 *
1861 * Writes a reset magic value to the gart pointer in VRAM.  The driver calls
1862 * this function before a GPU reset.  If the value is retained after a
1863 * GPU reset, VRAM has not been lost.  Some GPU resets may destry VRAM contents.
1864 */
1865static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
1866{
1867	memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
1868}
1869
1870/**
1871 * amdgpu_device_check_vram_lost - check if vram is valid
1872 *
1873 * @adev: amdgpu_device pointer
1874 *
1875 * Checks the reset magic value written to the gart pointer in VRAM.
1876 * The driver calls this after a GPU reset to see if the contents of
1877 * VRAM is lost or now.
1878 * returns true if vram is lost, false if not.
1879 */
1880static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
1881{
1882	return !!memcmp(adev->gart.ptr, adev->reset_magic,
1883			AMDGPU_RESET_MAGIC_NUM);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1884}
1885
1886/**
1887 * amdgpu_device_set_cg_state - set clockgating for amdgpu device
1888 *
1889 * @adev: amdgpu_device pointer
 
1890 *
1891 * The list of all the hardware IPs that make up the asic is walked and the
1892 * set_clockgating_state callbacks are run.
1893 * Late initialization pass enabling clockgating for hardware IPs.
1894 * Fini or suspend, pass disabling clockgating for hardware IPs.
1895 * Returns 0 on success, negative error code on failure.
1896 */
1897
1898static int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
1899						enum amd_clockgating_state state)
1900{
1901	int i, j, r;
1902
1903	if (amdgpu_emu_mode == 1)
1904		return 0;
1905
1906	for (j = 0; j < adev->num_ip_blocks; j++) {
1907		i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
1908		if (!adev->ip_blocks[i].status.late_initialized)
1909			continue;
 
 
 
 
1910		/* skip CG for VCE/UVD, it's handled specially */
1911		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1912		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
1913		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
 
1914		    adev->ip_blocks[i].version->funcs->set_clockgating_state) {
1915			/* enable clockgating to save power */
1916			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1917										     state);
1918			if (r) {
1919				DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
1920					  adev->ip_blocks[i].version->funcs->name, r);
1921				return r;
1922			}
1923		}
1924	}
1925
1926	return 0;
1927}
1928
1929static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_powergating_state state)
 
1930{
1931	int i, j, r;
1932
1933	if (amdgpu_emu_mode == 1)
1934		return 0;
1935
1936	for (j = 0; j < adev->num_ip_blocks; j++) {
1937		i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
1938		if (!adev->ip_blocks[i].status.late_initialized)
1939			continue;
 
 
 
 
1940		/* skip CG for VCE/UVD, it's handled specially */
1941		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1942		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
1943		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
 
1944		    adev->ip_blocks[i].version->funcs->set_powergating_state) {
1945			/* enable powergating to save power */
1946			r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
1947											state);
1948			if (r) {
1949				DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
1950					  adev->ip_blocks[i].version->funcs->name, r);
1951				return r;
1952			}
1953		}
1954	}
1955	return 0;
1956}
1957
1958static int amdgpu_device_enable_mgpu_fan_boost(void)
1959{
1960	struct amdgpu_gpu_instance *gpu_ins;
1961	struct amdgpu_device *adev;
1962	int i, ret = 0;
1963
1964	mutex_lock(&mgpu_info.mutex);
1965
1966	/*
1967	 * MGPU fan boost feature should be enabled
1968	 * only when there are two or more dGPUs in
1969	 * the system
1970	 */
1971	if (mgpu_info.num_dgpu < 2)
1972		goto out;
1973
1974	for (i = 0; i < mgpu_info.num_dgpu; i++) {
1975		gpu_ins = &(mgpu_info.gpu_ins[i]);
1976		adev = gpu_ins->adev;
1977		if (!(adev->flags & AMD_IS_APU) &&
1978		    !gpu_ins->mgpu_fan_enabled &&
1979		    adev->powerplay.pp_funcs &&
1980		    adev->powerplay.pp_funcs->enable_mgpu_fan_boost) {
1981			ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
1982			if (ret)
1983				break;
1984
1985			gpu_ins->mgpu_fan_enabled = 1;
1986		}
1987	}
1988
1989out:
1990	mutex_unlock(&mgpu_info.mutex);
1991
1992	return ret;
1993}
1994
1995/**
1996 * amdgpu_device_ip_late_init - run late init for hardware IPs
1997 *
1998 * @adev: amdgpu_device pointer
1999 *
2000 * Late initialization pass for hardware IPs.  The list of all the hardware
2001 * IPs that make up the asic is walked and the late_init callbacks are run.
2002 * late_init covers any special initialization that an IP requires
2003 * after all of the have been initialized or something that needs to happen
2004 * late in the init process.
2005 * Returns 0 on success, negative error code on failure.
2006 */
2007static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2008{
 
2009	int i = 0, r;
2010
2011	for (i = 0; i < adev->num_ip_blocks; i++) {
2012		if (!adev->ip_blocks[i].status.hw)
2013			continue;
2014		if (adev->ip_blocks[i].version->funcs->late_init) {
2015			r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2016			if (r) {
2017				DRM_ERROR("late_init of IP block <%s> failed %d\n",
2018					  adev->ip_blocks[i].version->funcs->name, r);
2019				return r;
2020			}
2021		}
2022		adev->ip_blocks[i].status.late_initialized = true;
2023	}
2024
 
 
 
 
 
 
 
 
2025	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2026	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2027
2028	amdgpu_device_fill_reset_magic(adev);
2029
2030	r = amdgpu_device_enable_mgpu_fan_boost();
2031	if (r)
2032		DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2033
2034	/* set to low pstate by default */
2035	amdgpu_xgmi_set_pstate(adev, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2036
2037	return 0;
2038}
2039
2040/**
2041 * amdgpu_device_ip_fini - run fini for hardware IPs
2042 *
2043 * @adev: amdgpu_device pointer
2044 *
2045 * Main teardown pass for hardware IPs.  The list of all the hardware
2046 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2047 * are run.  hw_fini tears down the hardware associated with each IP
2048 * and sw_fini tears down any software state associated with each IP.
2049 * Returns 0 on success, negative error code on failure.
2050 */
2051static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2052{
2053	int i, r;
2054
2055	amdgpu_ras_pre_fini(adev);
2056
2057	if (adev->gmc.xgmi.num_physical_nodes > 1)
2058		amdgpu_xgmi_remove_device(adev);
2059
2060	amdgpu_amdkfd_device_fini(adev);
2061
2062	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2063	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2064
2065	/* need to disable SMC first */
2066	for (i = 0; i < adev->num_ip_blocks; i++) {
2067		if (!adev->ip_blocks[i].status.hw)
2068			continue;
2069		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2070			r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2071			/* XXX handle errors */
2072			if (r) {
2073				DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2074					  adev->ip_blocks[i].version->funcs->name, r);
2075			}
2076			adev->ip_blocks[i].status.hw = false;
2077			break;
2078		}
2079	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2080
2081	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2082		if (!adev->ip_blocks[i].status.hw)
2083			continue;
2084
2085		r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2086		/* XXX handle errors */
2087		if (r) {
2088			DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2089				  adev->ip_blocks[i].version->funcs->name, r);
2090		}
2091
2092		adev->ip_blocks[i].status.hw = false;
2093	}
2094
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2095
2096	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2097		if (!adev->ip_blocks[i].status.sw)
2098			continue;
2099
2100		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2101			amdgpu_ucode_free_bo(adev);
2102			amdgpu_free_static_csa(&adev->virt.csa_obj);
2103			amdgpu_device_wb_fini(adev);
2104			amdgpu_device_vram_scratch_fini(adev);
2105			amdgpu_ib_pool_fini(adev);
2106		}
2107
2108		r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2109		/* XXX handle errors */
2110		if (r) {
2111			DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2112				  adev->ip_blocks[i].version->funcs->name, r);
2113		}
2114		adev->ip_blocks[i].status.sw = false;
2115		adev->ip_blocks[i].status.valid = false;
2116	}
2117
2118	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2119		if (!adev->ip_blocks[i].status.late_initialized)
2120			continue;
2121		if (adev->ip_blocks[i].version->funcs->late_fini)
2122			adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2123		adev->ip_blocks[i].status.late_initialized = false;
2124	}
2125
2126	amdgpu_ras_fini(adev);
2127
2128	if (amdgpu_sriov_vf(adev))
2129		if (amdgpu_virt_release_full_gpu(adev, false))
2130			DRM_ERROR("failed to release exclusive mode on fini\n");
2131
2132	return 0;
2133}
2134
2135/**
2136 * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2137 *
2138 * @work: work_struct.
2139 */
2140static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2141{
2142	struct amdgpu_device *adev =
2143		container_of(work, struct amdgpu_device, delayed_init_work.work);
2144	int r;
2145
2146	r = amdgpu_ib_ring_tests(adev);
2147	if (r)
2148		DRM_ERROR("ib ring test failed (%d).\n", r);
2149}
2150
2151static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2152{
2153	struct amdgpu_device *adev =
2154		container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2155
2156	mutex_lock(&adev->gfx.gfx_off_mutex);
2157	if (!adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
2158		if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2159			adev->gfx.gfx_off_state = true;
2160	}
2161	mutex_unlock(&adev->gfx.gfx_off_mutex);
2162}
2163
2164/**
2165 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2166 *
2167 * @adev: amdgpu_device pointer
2168 *
2169 * Main suspend function for hardware IPs.  The list of all the hardware
2170 * IPs that make up the asic is walked, clockgating is disabled and the
2171 * suspend callbacks are run.  suspend puts the hardware and software state
2172 * in each IP into a state suitable for suspend.
2173 * Returns 0 on success, negative error code on failure.
2174 */
2175static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2176{
2177	int i, r;
2178
2179	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2180	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2181
 
 
 
 
 
 
 
 
2182	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2183		if (!adev->ip_blocks[i].status.valid)
2184			continue;
 
2185		/* displays are handled separately */
2186		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) {
2187			/* XXX handle errors */
2188			r = adev->ip_blocks[i].version->funcs->suspend(adev);
2189			/* XXX handle errors */
2190			if (r) {
2191				DRM_ERROR("suspend of IP block <%s> failed %d\n",
2192					  adev->ip_blocks[i].version->funcs->name, r);
2193				return r;
2194			}
2195			adev->ip_blocks[i].status.hw = false;
2196		}
 
 
2197	}
2198
2199	return 0;
2200}
2201
2202/**
2203 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2204 *
2205 * @adev: amdgpu_device pointer
2206 *
2207 * Main suspend function for hardware IPs.  The list of all the hardware
2208 * IPs that make up the asic is walked, clockgating is disabled and the
2209 * suspend callbacks are run.  suspend puts the hardware and software state
2210 * in each IP into a state suitable for suspend.
2211 * Returns 0 on success, negative error code on failure.
2212 */
2213static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
2214{
2215	int i, r;
2216
 
 
 
2217	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2218		if (!adev->ip_blocks[i].status.valid)
2219			continue;
2220		/* displays are handled in phase1 */
2221		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
2222			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2223		/* XXX handle errors */
2224		r = adev->ip_blocks[i].version->funcs->suspend(adev);
2225		/* XXX handle errors */
2226		if (r) {
2227			DRM_ERROR("suspend of IP block <%s> failed %d\n",
2228				  adev->ip_blocks[i].version->funcs->name, r);
2229		}
2230		adev->ip_blocks[i].status.hw = false;
2231		/* handle putting the SMC in the appropriate state */
2232		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2233			if (is_support_sw_smu(adev)) {
2234				/* todo */
2235			} else if (adev->powerplay.pp_funcs &&
2236					   adev->powerplay.pp_funcs->set_mp1_state) {
2237				r = adev->powerplay.pp_funcs->set_mp1_state(
2238					adev->powerplay.pp_handle,
2239					adev->mp1_state);
2240				if (r) {
2241					DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
2242						  adev->mp1_state, r);
2243					return r;
2244				}
2245			}
2246		}
2247
2248		adev->ip_blocks[i].status.hw = false;
2249	}
2250
2251	return 0;
2252}
2253
2254/**
2255 * amdgpu_device_ip_suspend - run suspend for hardware IPs
2256 *
2257 * @adev: amdgpu_device pointer
2258 *
2259 * Main suspend function for hardware IPs.  The list of all the hardware
2260 * IPs that make up the asic is walked, clockgating is disabled and the
2261 * suspend callbacks are run.  suspend puts the hardware and software state
2262 * in each IP into a state suitable for suspend.
2263 * Returns 0 on success, negative error code on failure.
2264 */
2265int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
2266{
2267	int r;
2268
2269	if (amdgpu_sriov_vf(adev))
 
2270		amdgpu_virt_request_full_gpu(adev, false);
 
2271
2272	r = amdgpu_device_ip_suspend_phase1(adev);
2273	if (r)
2274		return r;
2275	r = amdgpu_device_ip_suspend_phase2(adev);
2276
2277	if (amdgpu_sriov_vf(adev))
2278		amdgpu_virt_release_full_gpu(adev, false);
2279
2280	return r;
2281}
2282
2283static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
2284{
2285	int i, r;
2286
2287	static enum amd_ip_block_type ip_order[] = {
 
2288		AMD_IP_BLOCK_TYPE_GMC,
2289		AMD_IP_BLOCK_TYPE_COMMON,
2290		AMD_IP_BLOCK_TYPE_PSP,
2291		AMD_IP_BLOCK_TYPE_IH,
2292	};
2293
2294	for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
2295		int j;
2296		struct amdgpu_ip_block *block;
2297
2298		for (j = 0; j < adev->num_ip_blocks; j++) {
2299			block = &adev->ip_blocks[j];
 
 
2300
2301			block->status.hw = false;
2302			if (block->version->type != ip_order[i] ||
2303				!block->status.valid)
2304				continue;
2305
2306			r = block->version->funcs->hw_init(adev);
2307			DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
2308			if (r)
2309				return r;
2310			block->status.hw = true;
2311		}
2312	}
2313
2314	return 0;
2315}
2316
2317static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
2318{
2319	int i, r;
2320
2321	static enum amd_ip_block_type ip_order[] = {
2322		AMD_IP_BLOCK_TYPE_SMC,
2323		AMD_IP_BLOCK_TYPE_DCE,
2324		AMD_IP_BLOCK_TYPE_GFX,
2325		AMD_IP_BLOCK_TYPE_SDMA,
2326		AMD_IP_BLOCK_TYPE_UVD,
2327		AMD_IP_BLOCK_TYPE_VCE
 
2328	};
2329
2330	for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
2331		int j;
2332		struct amdgpu_ip_block *block;
2333
2334		for (j = 0; j < adev->num_ip_blocks; j++) {
2335			block = &adev->ip_blocks[j];
2336
2337			if (block->version->type != ip_order[i] ||
2338				!block->status.valid ||
2339				block->status.hw)
2340				continue;
2341
2342			r = block->version->funcs->hw_init(adev);
 
 
 
 
2343			DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
2344			if (r)
2345				return r;
2346			block->status.hw = true;
2347		}
2348	}
2349
2350	return 0;
2351}
2352
2353/**
2354 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
2355 *
2356 * @adev: amdgpu_device pointer
2357 *
2358 * First resume function for hardware IPs.  The list of all the hardware
2359 * IPs that make up the asic is walked and the resume callbacks are run for
2360 * COMMON, GMC, and IH.  resume puts the hardware into a functional state
2361 * after a suspend and updates the software state as necessary.  This
2362 * function is also used for restoring the GPU after a GPU reset.
2363 * Returns 0 on success, negative error code on failure.
2364 */
2365static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
2366{
2367	int i, r;
2368
2369	for (i = 0; i < adev->num_ip_blocks; i++) {
2370		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
2371			continue;
2372		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2373		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2374		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
 
2375
2376			r = adev->ip_blocks[i].version->funcs->resume(adev);
2377			if (r) {
2378				DRM_ERROR("resume of IP block <%s> failed %d\n",
2379					  adev->ip_blocks[i].version->funcs->name, r);
2380				return r;
2381			}
2382			adev->ip_blocks[i].status.hw = true;
2383		}
2384	}
2385
2386	return 0;
2387}
2388
2389/**
2390 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
2391 *
2392 * @adev: amdgpu_device pointer
2393 *
2394 * First resume function for hardware IPs.  The list of all the hardware
2395 * IPs that make up the asic is walked and the resume callbacks are run for
2396 * all blocks except COMMON, GMC, and IH.  resume puts the hardware into a
2397 * functional state after a suspend and updates the software state as
2398 * necessary.  This function is also used for restoring the GPU after a GPU
2399 * reset.
2400 * Returns 0 on success, negative error code on failure.
2401 */
2402static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
2403{
2404	int i, r;
2405
2406	for (i = 0; i < adev->num_ip_blocks; i++) {
2407		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
2408			continue;
2409		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2410		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2411		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
2412		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
2413			continue;
2414		r = adev->ip_blocks[i].version->funcs->resume(adev);
2415		if (r) {
2416			DRM_ERROR("resume of IP block <%s> failed %d\n",
2417				  adev->ip_blocks[i].version->funcs->name, r);
2418			return r;
2419		}
2420		adev->ip_blocks[i].status.hw = true;
 
 
 
 
 
 
 
 
 
2421	}
2422
2423	return 0;
2424}
2425
2426/**
2427 * amdgpu_device_ip_resume - run resume for hardware IPs
2428 *
2429 * @adev: amdgpu_device pointer
2430 *
2431 * Main resume function for hardware IPs.  The hardware IPs
2432 * are split into two resume functions because they are
2433 * are also used in in recovering from a GPU reset and some additional
2434 * steps need to be take between them.  In this case (S3/S4) they are
2435 * run sequentially.
2436 * Returns 0 on success, negative error code on failure.
2437 */
2438static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
2439{
2440	int r;
2441
 
 
 
 
2442	r = amdgpu_device_ip_resume_phase1(adev);
2443	if (r)
2444		return r;
2445
2446	r = amdgpu_device_fw_loading(adev);
2447	if (r)
2448		return r;
2449
2450	r = amdgpu_device_ip_resume_phase2(adev);
2451
2452	return r;
2453}
2454
2455/**
2456 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
2457 *
2458 * @adev: amdgpu_device pointer
2459 *
2460 * Query the VBIOS data tables to determine if the board supports SR-IOV.
2461 */
2462static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
2463{
2464	if (amdgpu_sriov_vf(adev)) {
2465		if (adev->is_atom_fw) {
2466			if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
2467				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2468		} else {
2469			if (amdgpu_atombios_has_gpu_virtualization_table(adev))
2470				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2471		}
2472
2473		if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
2474			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
2475	}
2476}
2477
2478/**
2479 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
2480 *
2481 * @asic_type: AMD asic type
2482 *
2483 * Check if there is DC (new modesetting infrastructre) support for an asic.
2484 * returns true if DC has support, false if not.
2485 */
2486bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
2487{
2488	switch (asic_type) {
 
 
 
 
 
 
2489#if defined(CONFIG_DRM_AMD_DC)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2490	case CHIP_BONAIRE:
2491	case CHIP_KAVERI:
2492	case CHIP_KABINI:
2493	case CHIP_MULLINS:
2494		/*
2495		 * We have systems in the wild with these ASICs that require
2496		 * LVDS and VGA support which is not supported with DC.
2497		 *
2498		 * Fallback to the non-DC driver here by default so as not to
2499		 * cause regressions.
2500		 */
2501		return amdgpu_dc > 0;
2502	case CHIP_HAWAII:
2503	case CHIP_CARRIZO:
2504	case CHIP_STONEY:
2505	case CHIP_POLARIS10:
2506	case CHIP_POLARIS11:
2507	case CHIP_POLARIS12:
2508	case CHIP_VEGAM:
2509	case CHIP_TONGA:
2510	case CHIP_FIJI:
2511	case CHIP_VEGA10:
2512	case CHIP_VEGA12:
2513	case CHIP_VEGA20:
2514#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
2515	case CHIP_RAVEN:
2516#endif
2517#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
2518	case CHIP_NAVI10:
2519	case CHIP_NAVI14:
2520	case CHIP_NAVI12:
2521#endif
2522#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
2523	case CHIP_RENOIR:
2524#endif
2525		return amdgpu_dc != 0;
2526#endif
2527	default:
 
 
 
2528		return false;
 
2529	}
2530}
2531
2532/**
2533 * amdgpu_device_has_dc_support - check if dc is supported
2534 *
2535 * @adev: amdgpu_device_pointer
2536 *
2537 * Returns true for supported, false for not supported
2538 */
2539bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
2540{
2541	if (amdgpu_sriov_vf(adev))
 
2542		return false;
2543
2544	return amdgpu_device_asic_has_dc_support(adev->asic_type);
2545}
2546
2547
2548static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
2549{
2550	struct amdgpu_device *adev =
2551		container_of(__work, struct amdgpu_device, xgmi_reset_work);
 
 
 
 
 
 
 
 
 
 
 
 
 
2552
2553	adev->asic_reset_res =  amdgpu_asic_reset(adev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2554	if (adev->asic_reset_res)
2555		DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
2556			 adev->asic_reset_res, adev->ddev->unique);
 
2557}
2558
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2559
2560/**
2561 * amdgpu_device_init - initialize the driver
2562 *
2563 * @adev: amdgpu_device pointer
2564 * @ddev: drm dev pointer
2565 * @pdev: pci dev pointer
2566 * @flags: driver flags
2567 *
2568 * Initializes the driver info and hw (all asics).
2569 * Returns 0 for success or an error on failure.
2570 * Called at driver startup.
2571 */
2572int amdgpu_device_init(struct amdgpu_device *adev,
2573		       struct drm_device *ddev,
2574		       struct pci_dev *pdev,
2575		       uint32_t flags)
2576{
 
 
2577	int r, i;
2578	bool runtime = false;
2579	u32 max_MBps;
2580
2581	adev->shutdown = false;
2582	adev->dev = &pdev->dev;
2583	adev->ddev = ddev;
2584	adev->pdev = pdev;
2585	adev->flags = flags;
2586	adev->asic_type = flags & AMD_ASIC_MASK;
 
 
 
 
 
2587	adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
2588	if (amdgpu_emu_mode == 1)
2589		adev->usec_timeout *= 2;
2590	adev->gmc.gart_size = 512 * 1024 * 1024;
2591	adev->accel_working = false;
2592	adev->num_rings = 0;
 
2593	adev->mman.buffer_funcs = NULL;
2594	adev->mman.buffer_funcs_ring = NULL;
2595	adev->vm_manager.vm_pte_funcs = NULL;
2596	adev->vm_manager.vm_pte_num_rqs = 0;
2597	adev->gmc.gmc_funcs = NULL;
 
2598	adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2599	bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
2600
2601	adev->smc_rreg = &amdgpu_invalid_rreg;
2602	adev->smc_wreg = &amdgpu_invalid_wreg;
2603	adev->pcie_rreg = &amdgpu_invalid_rreg;
2604	adev->pcie_wreg = &amdgpu_invalid_wreg;
2605	adev->pciep_rreg = &amdgpu_invalid_rreg;
2606	adev->pciep_wreg = &amdgpu_invalid_wreg;
2607	adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
2608	adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
2609	adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
2610	adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
2611	adev->didt_rreg = &amdgpu_invalid_rreg;
2612	adev->didt_wreg = &amdgpu_invalid_wreg;
2613	adev->gc_cac_rreg = &amdgpu_invalid_rreg;
2614	adev->gc_cac_wreg = &amdgpu_invalid_wreg;
2615	adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
2616	adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
2617
2618	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
2619		 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
2620		 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
2621
2622	/* mutex initialization are all done here so we
2623	 * can recall function without having locking issues */
2624	atomic_set(&adev->irq.ih.lock, 0);
2625	mutex_init(&adev->firmware.mutex);
2626	mutex_init(&adev->pm.mutex);
2627	mutex_init(&adev->gfx.gpu_clock_mutex);
2628	mutex_init(&adev->srbm_mutex);
2629	mutex_init(&adev->gfx.pipe_reserve_mutex);
2630	mutex_init(&adev->gfx.gfx_off_mutex);
2631	mutex_init(&adev->grbm_idx_mutex);
2632	mutex_init(&adev->mn_lock);
2633	mutex_init(&adev->virt.vf_errors.lock);
2634	hash_init(adev->mn_hash);
2635	mutex_init(&adev->lock_reset);
2636	mutex_init(&adev->virt.dpm_mutex);
2637	mutex_init(&adev->psp.mutex);
 
 
 
 
 
2638
2639	r = amdgpu_device_check_arguments(adev);
2640	if (r)
2641		return r;
2642
2643	spin_lock_init(&adev->mmio_idx_lock);
2644	spin_lock_init(&adev->smc_idx_lock);
2645	spin_lock_init(&adev->pcie_idx_lock);
2646	spin_lock_init(&adev->uvd_ctx_idx_lock);
2647	spin_lock_init(&adev->didt_idx_lock);
2648	spin_lock_init(&adev->gc_cac_idx_lock);
2649	spin_lock_init(&adev->se_cac_idx_lock);
2650	spin_lock_init(&adev->audio_endpt_idx_lock);
2651	spin_lock_init(&adev->mm_stats.lock);
2652
2653	INIT_LIST_HEAD(&adev->shadow_list);
2654	mutex_init(&adev->shadow_list_lock);
2655
2656	INIT_LIST_HEAD(&adev->ring_lru_list);
2657	spin_lock_init(&adev->ring_lru_list_lock);
 
2658
2659	INIT_DELAYED_WORK(&adev->delayed_init_work,
2660			  amdgpu_device_delayed_init_work_handler);
2661	INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
2662			  amdgpu_device_delay_enable_gfx_off);
2663
2664	INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
2665
2666	adev->gfx.gfx_off_req_count = 1;
2667	adev->pm.ac_power = power_supply_is_system_supplied() > 0 ? true : false;
 
 
 
 
 
 
 
 
 
 
 
 
 
2668
2669	/* Registers mapping */
2670	/* TODO: block userspace mapping of io register */
2671	if (adev->asic_type >= CHIP_BONAIRE) {
2672		adev->rmmio_base = pci_resource_start(adev->pdev, 5);
2673		adev->rmmio_size = pci_resource_len(adev->pdev, 5);
2674	} else {
2675		adev->rmmio_base = pci_resource_start(adev->pdev, 2);
2676		adev->rmmio_size = pci_resource_len(adev->pdev, 2);
2677	}
2678
 
 
 
2679	adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
2680	if (adev->rmmio == NULL) {
2681		return -ENOMEM;
2682	}
2683	DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
2684	DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
2685
2686	/* io port mapping */
2687	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2688		if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
2689			adev->rio_mem_size = pci_resource_len(adev->pdev, i);
2690			adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
2691			break;
2692		}
2693	}
2694	if (adev->rio_mem == NULL)
2695		DRM_INFO("PCI I/O BAR is not found.\n");
2696
2697	/* enable PCIE atomic ops */
2698	r = pci_enable_atomic_ops_to_root(adev->pdev,
2699					  PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
2700					  PCI_EXP_DEVCAP2_ATOMIC_COMP64);
2701	if (r) {
2702		adev->have_atomics_support = false;
2703		DRM_INFO("PCIE atomic ops is not supported\n");
2704	} else {
2705		adev->have_atomics_support = true;
2706	}
2707
2708	amdgpu_device_get_pcie_info(adev);
2709
2710	if (amdgpu_mcbp)
2711		DRM_INFO("MCBP is enabled\n");
2712
2713	if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10)
2714		adev->enable_mes = true;
 
 
 
 
 
 
 
 
 
2715
2716	if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) {
2717		r = amdgpu_discovery_init(adev);
2718		if (r) {
2719			dev_err(adev->dev, "amdgpu_discovery_init failed\n");
2720			return r;
2721		}
2722	}
2723
2724	/* early init functions */
2725	r = amdgpu_device_ip_early_init(adev);
2726	if (r)
2727		return r;
2728
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2729	/* doorbell bar mapping and doorbell index init*/
2730	amdgpu_device_doorbell_init(adev);
2731
2732	/* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
2733	/* this will fail for cards that aren't VGA class devices, just
2734	 * ignore it */
2735	vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
2736
2737	if (amdgpu_device_is_px(ddev))
2738		runtime = true;
2739	if (!pci_is_thunderbolt_attached(adev->pdev))
2740		vga_switcheroo_register_client(adev->pdev,
2741					       &amdgpu_switcheroo_ops, runtime);
2742	if (runtime)
2743		vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
2744
2745	if (amdgpu_emu_mode == 1) {
2746		/* post the asic on emulation mode */
2747		emu_soc_asic_init(adev);
2748		goto fence_driver_init;
2749	}
2750
 
 
2751	/* detect if we are with an SRIOV vbios */
2752	amdgpu_device_detect_sriov_bios(adev);
2753
2754	/* check if we need to reset the asic
2755	 *  E.g., driver was not cleanly unloaded previously, etc.
2756	 */
2757	if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
2758		r = amdgpu_asic_reset(adev);
2759		if (r) {
2760			dev_err(adev->dev, "asic reset on init failed\n");
2761			goto failed;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2762		}
2763	}
2764
 
 
2765	/* Post card if necessary */
2766	if (amdgpu_device_need_post(adev)) {
2767		if (!adev->bios) {
2768			dev_err(adev->dev, "no vBIOS found\n");
2769			r = -EINVAL;
2770			goto failed;
2771		}
2772		DRM_INFO("GPU posting now...\n");
2773		r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2774		if (r) {
2775			dev_err(adev->dev, "gpu post error!\n");
2776			goto failed;
2777		}
2778	}
2779
2780	if (adev->is_atom_fw) {
2781		/* Initialize clocks */
2782		r = amdgpu_atomfirmware_get_clock_info(adev);
2783		if (r) {
2784			dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
2785			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
2786			goto failed;
2787		}
2788	} else {
2789		/* Initialize clocks */
2790		r = amdgpu_atombios_get_clock_info(adev);
2791		if (r) {
2792			dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
2793			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
2794			goto failed;
2795		}
2796		/* init i2c buses */
2797		if (!amdgpu_device_has_dc_support(adev))
2798			amdgpu_atombios_i2c_init(adev);
2799	}
2800
2801fence_driver_init:
2802	/* Fence driver */
2803	r = amdgpu_fence_driver_init(adev);
2804	if (r) {
2805		dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
2806		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
2807		goto failed;
2808	}
2809
2810	/* init the mode config */
2811	drm_mode_config_init(adev->ddev);
2812
2813	r = amdgpu_device_ip_init(adev);
2814	if (r) {
2815		/* failed in exclusive mode due to timeout */
2816		if (amdgpu_sriov_vf(adev) &&
2817		    !amdgpu_sriov_runtime(adev) &&
2818		    amdgpu_virt_mmio_blocked(adev) &&
2819		    !amdgpu_virt_wait_reset(adev)) {
2820			dev_err(adev->dev, "VF exclusive mode timeout\n");
2821			/* Don't send request since VF is inactive. */
2822			adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
2823			adev->virt.ops = NULL;
2824			r = -EAGAIN;
2825			goto failed;
2826		}
2827		dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
2828		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
2829		if (amdgpu_virt_request_full_gpu(adev, false))
2830			amdgpu_virt_release_full_gpu(adev, false);
2831		goto failed;
2832	}
2833
 
 
 
 
 
 
 
 
 
2834	adev->accel_working = true;
2835
2836	amdgpu_vm_check_compute_bug(adev);
2837
2838	/* Initialize the buffer migration limit. */
2839	if (amdgpu_moverate >= 0)
2840		max_MBps = amdgpu_moverate;
2841	else
2842		max_MBps = 8; /* Allow 8 MB/s. */
2843	/* Get a log2 for easy divisions. */
2844	adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
2845
2846	amdgpu_fbdev_init(adev);
2847
2848	if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev))
2849		amdgpu_pm_virt_sysfs_init(adev);
2850
2851	r = amdgpu_pm_sysfs_init(adev);
2852	if (r)
 
2853		DRM_ERROR("registering pm debugfs failed (%d).\n", r);
 
 
2854
2855	r = amdgpu_ucode_sysfs_init(adev);
2856	if (r)
 
2857		DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
 
 
2858
2859	r = amdgpu_debugfs_gem_init(adev);
2860	if (r)
2861		DRM_ERROR("registering gem debugfs failed (%d).\n", r);
2862
2863	r = amdgpu_debugfs_regs_init(adev);
2864	if (r)
2865		DRM_ERROR("registering register debugfs failed (%d).\n", r);
2866
2867	r = amdgpu_debugfs_firmware_init(adev);
2868	if (r)
2869		DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
2870
2871	r = amdgpu_debugfs_init(adev);
2872	if (r)
2873		DRM_ERROR("Creating debugfs files failed (%d).\n", r);
2874
2875	if ((amdgpu_testing & 1)) {
2876		if (adev->accel_working)
2877			amdgpu_test_moves(adev);
2878		else
2879			DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
2880	}
2881	if (amdgpu_benchmarking) {
2882		if (adev->accel_working)
2883			amdgpu_benchmark(adev, amdgpu_benchmarking);
2884		else
2885			DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
2886	}
2887
2888	/*
2889	 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
2890	 * Otherwise the mgpu fan boost feature will be skipped due to the
2891	 * gpu instance is counted less.
2892	 */
2893	amdgpu_register_gpu_instance(adev);
2894
2895	/* enable clockgating, etc. after ib tests, etc. since some blocks require
2896	 * explicit gating rather than handling it automatically.
2897	 */
2898	r = amdgpu_device_ip_late_init(adev);
2899	if (r) {
2900		dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
2901		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
2902		goto failed;
 
 
 
 
 
 
2903	}
2904
2905	/* must succeed. */
2906	amdgpu_ras_resume(adev);
2907
2908	queue_delayed_work(system_wq, &adev->delayed_init_work,
2909			   msecs_to_jiffies(AMDGPU_RESUME_MS));
2910
2911	r = device_create_file(adev->dev, &dev_attr_pcie_replay_count);
2912	if (r) {
2913		dev_err(adev->dev, "Could not create pcie_replay_count");
2914		return r;
2915	}
2916
2917	if (IS_ENABLED(CONFIG_PERF_EVENTS))
2918		r = amdgpu_pmu_init(adev);
2919	if (r)
2920		dev_err(adev->dev, "amdgpu_pmu_init failed\n");
2921
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2922	return 0;
2923
 
 
 
2924failed:
2925	amdgpu_vf_error_trans_all(adev);
2926	if (runtime)
2927		vga_switcheroo_fini_domain_pm_ops(adev->dev);
2928
2929	return r;
2930}
2931
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2932/**
2933 * amdgpu_device_fini - tear down the driver
2934 *
2935 * @adev: amdgpu_device pointer
2936 *
2937 * Tear down the driver info (all asics).
2938 * Called at driver shutdown.
2939 */
2940void amdgpu_device_fini(struct amdgpu_device *adev)
2941{
2942	int r;
 
 
 
 
 
 
 
 
 
 
2943
2944	DRM_INFO("amdgpu: finishing device.\n");
2945	adev->shutdown = true;
2946	/* disable all interrupts */
2947	amdgpu_irq_disable_all(adev);
2948	if (adev->mode_info.mode_config_initialized){
2949		if (!amdgpu_device_has_dc_support(adev))
2950			drm_helper_force_disable_all(adev->ddev);
2951		else
2952			drm_atomic_helper_shutdown(adev->ddev);
2953	}
2954	amdgpu_fence_driver_fini(adev);
2955	amdgpu_pm_sysfs_fini(adev);
2956	amdgpu_fbdev_fini(adev);
2957	r = amdgpu_device_ip_fini(adev);
2958	if (adev->firmware.gpu_info_fw) {
2959		release_firmware(adev->firmware.gpu_info_fw);
2960		adev->firmware.gpu_info_fw = NULL;
2961	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2962	adev->accel_working = false;
2963	cancel_delayed_work_sync(&adev->delayed_init_work);
 
 
 
2964	/* free i2c buses */
2965	if (!amdgpu_device_has_dc_support(adev))
2966		amdgpu_i2c_fini(adev);
2967
2968	if (amdgpu_emu_mode != 1)
2969		amdgpu_atombios_fini(adev);
2970
2971	kfree(adev->bios);
2972	adev->bios = NULL;
2973	if (!pci_is_thunderbolt_attached(adev->pdev))
2974		vga_switcheroo_unregister_client(adev->pdev);
2975	if (adev->flags & AMD_IS_PX)
2976		vga_switcheroo_fini_domain_pm_ops(adev->dev);
2977	vga_client_register(adev->pdev, NULL, NULL, NULL);
2978	if (adev->rio_mem)
2979		pci_iounmap(adev->pdev, adev->rio_mem);
2980	adev->rio_mem = NULL;
2981	iounmap(adev->rmmio);
2982	adev->rmmio = NULL;
2983	amdgpu_device_doorbell_fini(adev);
2984	if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev))
2985		amdgpu_pm_virt_sysfs_fini(adev);
 
 
2986
2987	amdgpu_debugfs_regs_cleanup(adev);
2988	device_remove_file(adev->dev, &dev_attr_pcie_replay_count);
2989	amdgpu_ucode_sysfs_fini(adev);
2990	if (IS_ENABLED(CONFIG_PERF_EVENTS))
2991		amdgpu_pmu_fini(adev);
2992	amdgpu_debugfs_preempt_cleanup(adev);
2993	if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
2994		amdgpu_discovery_fini(adev);
 
 
 
 
 
 
2995}
2996
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2997
2998/*
2999 * Suspend & resume.
3000 */
3001/**
3002 * amdgpu_device_suspend - initiate device suspend
3003 *
3004 * @dev: drm dev pointer
3005 * @suspend: suspend state
3006 * @fbcon : notify the fbdev of suspend
3007 *
3008 * Puts the hw in the suspend state (all asics).
3009 * Returns 0 for success or an error on failure.
3010 * Called at driver suspend.
3011 */
3012int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
3013{
3014	struct amdgpu_device *adev;
3015	struct drm_crtc *crtc;
3016	struct drm_connector *connector;
3017	int r;
3018
3019	if (dev == NULL || dev->dev_private == NULL) {
3020		return -ENODEV;
3021	}
3022
3023	adev = dev->dev_private;
3024
3025	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3026		return 0;
3027
3028	adev->in_suspend = true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3029	drm_kms_helper_poll_disable(dev);
3030
3031	if (fbcon)
3032		amdgpu_fbdev_set_suspend(adev, 1);
3033
3034	cancel_delayed_work_sync(&adev->delayed_init_work);
3035
3036	if (!amdgpu_device_has_dc_support(adev)) {
3037		/* turn off display hw */
3038		drm_modeset_lock_all(dev);
3039		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
3040			drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
3041		}
3042		drm_modeset_unlock_all(dev);
3043			/* unpin the front buffers and cursors */
3044		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3045			struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
3046			struct drm_framebuffer *fb = crtc->primary->fb;
3047			struct amdgpu_bo *robj;
3048
3049			if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
3050				struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
3051				r = amdgpu_bo_reserve(aobj, true);
3052				if (r == 0) {
3053					amdgpu_bo_unpin(aobj);
3054					amdgpu_bo_unreserve(aobj);
3055				}
3056			}
3057
3058			if (fb == NULL || fb->obj[0] == NULL) {
3059				continue;
3060			}
3061			robj = gem_to_amdgpu_bo(fb->obj[0]);
3062			/* don't unpin kernel fb objects */
3063			if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
3064				r = amdgpu_bo_reserve(robj, true);
3065				if (r == 0) {
3066					amdgpu_bo_unpin(robj);
3067					amdgpu_bo_unreserve(robj);
3068				}
3069			}
3070		}
3071	}
3072
3073	amdgpu_amdkfd_suspend(adev);
3074
3075	amdgpu_ras_suspend(adev);
3076
3077	r = amdgpu_device_ip_suspend_phase1(adev);
3078
3079	/* evict vram memory */
3080	amdgpu_bo_evict_vram(adev);
3081
3082	amdgpu_fence_driver_suspend(adev);
 
 
3083
3084	r = amdgpu_device_ip_suspend_phase2(adev);
3085
3086	/* evict remaining vram memory
3087	 * This second call to evict vram is to evict the gart page table
3088	 * using the CPU.
3089	 */
3090	amdgpu_bo_evict_vram(adev);
3091
3092	pci_save_state(dev->pdev);
3093	if (suspend) {
3094		/* Shut down the device */
3095		pci_disable_device(dev->pdev);
3096		pci_set_power_state(dev->pdev, PCI_D3hot);
3097	} else {
3098		r = amdgpu_asic_reset(adev);
3099		if (r)
3100			DRM_ERROR("amdgpu asic reset failed\n");
3101	}
3102
3103	return 0;
3104}
3105
3106/**
3107 * amdgpu_device_resume - initiate device resume
3108 *
3109 * @dev: drm dev pointer
3110 * @resume: resume state
3111 * @fbcon : notify the fbdev of resume
3112 *
3113 * Bring the hw back to operating state (all asics).
3114 * Returns 0 for success or an error on failure.
3115 * Called at driver resume.
3116 */
3117int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
3118{
3119	struct drm_connector *connector;
3120	struct amdgpu_device *adev = dev->dev_private;
3121	struct drm_crtc *crtc;
3122	int r = 0;
3123
 
 
 
 
 
 
3124	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3125		return 0;
3126
3127	if (resume) {
3128		pci_set_power_state(dev->pdev, PCI_D0);
3129		pci_restore_state(dev->pdev);
3130		r = pci_enable_device(dev->pdev);
3131		if (r)
3132			return r;
3133	}
3134
3135	/* post card */
3136	if (amdgpu_device_need_post(adev)) {
3137		r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
3138		if (r)
3139			DRM_ERROR("amdgpu asic init failed\n");
3140	}
3141
3142	r = amdgpu_device_ip_resume(adev);
 
3143	if (r) {
3144		DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r);
3145		return r;
3146	}
3147	amdgpu_fence_driver_resume(adev);
3148
3149
3150	r = amdgpu_device_ip_late_init(adev);
3151	if (r)
3152		return r;
3153
3154	queue_delayed_work(system_wq, &adev->delayed_init_work,
3155			   msecs_to_jiffies(AMDGPU_RESUME_MS));
3156
3157	if (!amdgpu_device_has_dc_support(adev)) {
3158		/* pin cursors */
3159		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3160			struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
3161
3162			if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
3163				struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
3164				r = amdgpu_bo_reserve(aobj, true);
3165				if (r == 0) {
3166					r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
3167					if (r != 0)
3168						DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
3169					amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
3170					amdgpu_bo_unreserve(aobj);
3171				}
3172			}
3173		}
3174	}
3175	r = amdgpu_amdkfd_resume(adev);
3176	if (r)
3177		return r;
3178
3179	/* Make sure IB tests flushed */
3180	flush_delayed_work(&adev->delayed_init_work);
3181
3182	/* blat the mode back in */
3183	if (fbcon) {
3184		if (!amdgpu_device_has_dc_support(adev)) {
3185			/* pre DCE11 */
3186			drm_helper_resume_force_mode(dev);
3187
3188			/* turn on display hw */
3189			drm_modeset_lock_all(dev);
3190			list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
3191				drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
3192			}
3193			drm_modeset_unlock_all(dev);
3194		}
3195		amdgpu_fbdev_set_suspend(adev, 0);
3196	}
 
 
3197
3198	drm_kms_helper_poll_enable(dev);
3199
3200	amdgpu_ras_resume(adev);
3201
3202	/*
3203	 * Most of the connector probing functions try to acquire runtime pm
3204	 * refs to ensure that the GPU is powered on when connector polling is
3205	 * performed. Since we're calling this from a runtime PM callback,
3206	 * trying to acquire rpm refs will cause us to deadlock.
3207	 *
3208	 * Since we're guaranteed to be holding the rpm lock, it's safe to
3209	 * temporarily disable the rpm helpers so this doesn't deadlock us.
3210	 */
 
3211#ifdef CONFIG_PM
3212	dev->dev->power.disable_depth++;
3213#endif
3214	if (!amdgpu_device_has_dc_support(adev))
3215		drm_helper_hpd_irq_event(dev);
3216	else
3217		drm_kms_helper_hotplug_event(dev);
3218#ifdef CONFIG_PM
3219	dev->dev->power.disable_depth--;
3220#endif
 
3221	adev->in_suspend = false;
3222
 
 
 
 
 
 
3223	return 0;
3224}
3225
3226/**
3227 * amdgpu_device_ip_check_soft_reset - did soft reset succeed
3228 *
3229 * @adev: amdgpu_device pointer
3230 *
3231 * The list of all the hardware IPs that make up the asic is walked and
3232 * the check_soft_reset callbacks are run.  check_soft_reset determines
3233 * if the asic is still hung or not.
3234 * Returns true if any of the IPs are still in a hung state, false if not.
3235 */
3236static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
3237{
3238	int i;
3239	bool asic_hang = false;
3240
3241	if (amdgpu_sriov_vf(adev))
3242		return true;
3243
3244	if (amdgpu_asic_need_full_reset(adev))
3245		return true;
3246
3247	for (i = 0; i < adev->num_ip_blocks; i++) {
3248		if (!adev->ip_blocks[i].status.valid)
3249			continue;
3250		if (adev->ip_blocks[i].version->funcs->check_soft_reset)
3251			adev->ip_blocks[i].status.hang =
3252				adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
3253		if (adev->ip_blocks[i].status.hang) {
3254			DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
3255			asic_hang = true;
3256		}
3257	}
3258	return asic_hang;
3259}
3260
3261/**
3262 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
3263 *
3264 * @adev: amdgpu_device pointer
3265 *
3266 * The list of all the hardware IPs that make up the asic is walked and the
3267 * pre_soft_reset callbacks are run if the block is hung.  pre_soft_reset
3268 * handles any IP specific hardware or software state changes that are
3269 * necessary for a soft reset to succeed.
3270 * Returns 0 on success, negative error code on failure.
3271 */
3272static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
3273{
3274	int i, r = 0;
3275
3276	for (i = 0; i < adev->num_ip_blocks; i++) {
3277		if (!adev->ip_blocks[i].status.valid)
3278			continue;
3279		if (adev->ip_blocks[i].status.hang &&
3280		    adev->ip_blocks[i].version->funcs->pre_soft_reset) {
3281			r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
3282			if (r)
3283				return r;
3284		}
3285	}
3286
3287	return 0;
3288}
3289
3290/**
3291 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
3292 *
3293 * @adev: amdgpu_device pointer
3294 *
3295 * Some hardware IPs cannot be soft reset.  If they are hung, a full gpu
3296 * reset is necessary to recover.
3297 * Returns true if a full asic reset is required, false if not.
3298 */
3299static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
3300{
3301	int i;
3302
3303	if (amdgpu_asic_need_full_reset(adev))
3304		return true;
3305
3306	for (i = 0; i < adev->num_ip_blocks; i++) {
3307		if (!adev->ip_blocks[i].status.valid)
3308			continue;
3309		if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
3310		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
3311		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
3312		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
3313		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
3314			if (adev->ip_blocks[i].status.hang) {
3315				DRM_INFO("Some block need full reset!\n");
3316				return true;
3317			}
3318		}
3319	}
3320	return false;
3321}
3322
3323/**
3324 * amdgpu_device_ip_soft_reset - do a soft reset
3325 *
3326 * @adev: amdgpu_device pointer
3327 *
3328 * The list of all the hardware IPs that make up the asic is walked and the
3329 * soft_reset callbacks are run if the block is hung.  soft_reset handles any
3330 * IP specific hardware or software state changes that are necessary to soft
3331 * reset the IP.
3332 * Returns 0 on success, negative error code on failure.
3333 */
3334static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
3335{
3336	int i, r = 0;
3337
3338	for (i = 0; i < adev->num_ip_blocks; i++) {
3339		if (!adev->ip_blocks[i].status.valid)
3340			continue;
3341		if (adev->ip_blocks[i].status.hang &&
3342		    adev->ip_blocks[i].version->funcs->soft_reset) {
3343			r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
3344			if (r)
3345				return r;
3346		}
3347	}
3348
3349	return 0;
3350}
3351
3352/**
3353 * amdgpu_device_ip_post_soft_reset - clean up from soft reset
3354 *
3355 * @adev: amdgpu_device pointer
3356 *
3357 * The list of all the hardware IPs that make up the asic is walked and the
3358 * post_soft_reset callbacks are run if the asic was hung.  post_soft_reset
3359 * handles any IP specific hardware or software state changes that are
3360 * necessary after the IP has been soft reset.
3361 * Returns 0 on success, negative error code on failure.
3362 */
3363static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
3364{
3365	int i, r = 0;
3366
3367	for (i = 0; i < adev->num_ip_blocks; i++) {
3368		if (!adev->ip_blocks[i].status.valid)
3369			continue;
3370		if (adev->ip_blocks[i].status.hang &&
3371		    adev->ip_blocks[i].version->funcs->post_soft_reset)
3372			r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
3373		if (r)
3374			return r;
3375	}
3376
3377	return 0;
3378}
3379
3380/**
3381 * amdgpu_device_recover_vram - Recover some VRAM contents
3382 *
3383 * @adev: amdgpu_device pointer
3384 *
3385 * Restores the contents of VRAM buffers from the shadows in GTT.  Used to
3386 * restore things like GPUVM page tables after a GPU reset where
3387 * the contents of VRAM might be lost.
3388 *
3389 * Returns:
3390 * 0 on success, negative error code on failure.
3391 */
3392static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
3393{
3394	struct dma_fence *fence = NULL, *next = NULL;
3395	struct amdgpu_bo *shadow;
 
3396	long r = 1, tmo;
3397
3398	if (amdgpu_sriov_runtime(adev))
3399		tmo = msecs_to_jiffies(8000);
3400	else
3401		tmo = msecs_to_jiffies(100);
3402
3403	DRM_INFO("recover vram bo from shadow start\n");
3404	mutex_lock(&adev->shadow_list_lock);
3405	list_for_each_entry(shadow, &adev->shadow_list, shadow_list) {
3406
3407		/* No need to recover an evicted BO */
3408		if (shadow->tbo.mem.mem_type != TTM_PL_TT ||
3409		    shadow->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET ||
3410		    shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM)
3411			continue;
3412
3413		r = amdgpu_bo_restore_shadow(shadow, &next);
3414		if (r)
3415			break;
3416
3417		if (fence) {
3418			tmo = dma_fence_wait_timeout(fence, false, tmo);
3419			dma_fence_put(fence);
3420			fence = next;
3421			if (tmo == 0) {
3422				r = -ETIMEDOUT;
3423				break;
3424			} else if (tmo < 0) {
3425				r = tmo;
3426				break;
3427			}
3428		} else {
3429			fence = next;
3430		}
3431	}
3432	mutex_unlock(&adev->shadow_list_lock);
3433
3434	if (fence)
3435		tmo = dma_fence_wait_timeout(fence, false, tmo);
3436	dma_fence_put(fence);
3437
3438	if (r < 0 || tmo <= 0) {
3439		DRM_ERROR("recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
3440		return -EIO;
3441	}
3442
3443	DRM_INFO("recover vram bo from shadow done\n");
3444	return 0;
3445}
3446
3447
3448/**
3449 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
3450 *
3451 * @adev: amdgpu device pointer
3452 * @from_hypervisor: request from hypervisor
3453 *
3454 * do VF FLR and reinitialize Asic
3455 * return 0 means succeeded otherwise failed
3456 */
3457static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
3458				     bool from_hypervisor)
3459{
3460	int r;
 
 
 
 
 
3461
3462	if (from_hypervisor)
3463		r = amdgpu_virt_request_full_gpu(adev, true);
3464	else
3465		r = amdgpu_virt_reset_gpu(adev);
3466	if (r)
3467		return r;
3468
3469	amdgpu_amdkfd_pre_reset(adev);
3470
3471	/* Resume IP prior to SMC */
3472	r = amdgpu_device_ip_reinit_early_sriov(adev);
3473	if (r)
3474		goto error;
3475
3476	/* we need recover gart prior to run SMC/CP/SDMA resume */
3477	amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]);
3478
3479	r = amdgpu_device_fw_loading(adev);
3480	if (r)
3481		return r;
3482
3483	/* now we are okay to resume SMC/CP/SDMA */
3484	r = amdgpu_device_ip_reinit_late_sriov(adev);
3485	if (r)
3486		goto error;
3487
3488	amdgpu_irq_gpu_reset_resume_helper(adev);
3489	r = amdgpu_ib_ring_tests(adev);
3490	amdgpu_amdkfd_post_reset(adev);
 
 
 
 
 
 
 
 
 
 
 
3491
3492error:
3493	amdgpu_virt_init_data_exchange(adev);
3494	amdgpu_virt_release_full_gpu(adev, true);
3495	if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
3496		amdgpu_inc_vram_lost(adev);
3497		r = amdgpu_device_recover_vram(adev);
3498	}
 
 
 
 
 
 
 
 
 
3499
3500	return r;
3501}
3502
3503/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3504 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
3505 *
3506 * @adev: amdgpu device pointer
3507 *
3508 * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
3509 * a hung GPU.
3510 */
3511bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
3512{
 
 
 
 
 
 
 
 
3513	if (!amdgpu_device_ip_check_soft_reset(adev)) {
3514		DRM_INFO("Timeout, but no hardware hang detected.\n");
3515		return false;
3516	}
3517
3518	if (amdgpu_gpu_recovery == 0)
3519		goto disabled;
3520
3521	if (amdgpu_sriov_vf(adev))
3522		return true;
3523
3524	if (amdgpu_gpu_recovery == -1) {
3525		switch (adev->asic_type) {
3526		case CHIP_BONAIRE:
3527		case CHIP_HAWAII:
3528		case CHIP_TOPAZ:
3529		case CHIP_TONGA:
3530		case CHIP_FIJI:
3531		case CHIP_POLARIS10:
3532		case CHIP_POLARIS11:
3533		case CHIP_POLARIS12:
3534		case CHIP_VEGAM:
3535		case CHIP_VEGA20:
3536		case CHIP_VEGA10:
3537		case CHIP_VEGA12:
3538		case CHIP_RAVEN:
 
 
 
 
3539			break;
3540		default:
3541			goto disabled;
3542		}
3543	}
3544
3545	return true;
3546
3547disabled:
3548		DRM_INFO("GPU recovery disabled.\n");
3549		return false;
3550}
3551
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3552
3553static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
3554					struct amdgpu_job *job,
3555					bool *need_full_reset_arg)
 
 
 
 
 
 
 
 
3556{
3557	int i, r = 0;
3558	bool need_full_reset  = *need_full_reset_arg;
 
 
 
 
 
 
 
 
 
 
 
 
3559
3560	/* block all schedulers and reset given job's ring */
3561	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
3562		struct amdgpu_ring *ring = adev->rings[i];
3563
3564		if (!ring || !ring->sched.thread)
3565			continue;
3566
 
 
 
 
3567		/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
3568		amdgpu_fence_driver_force_completion(ring);
3569	}
3570
3571	if(job)
 
 
3572		drm_sched_increase_karma(&job->base);
3573
 
 
 
 
 
 
 
3574	/* Don't suspend on bare metal if we are not going to HW reset the ASIC */
3575	if (!amdgpu_sriov_vf(adev)) {
3576
3577		if (!need_full_reset)
3578			need_full_reset = amdgpu_device_ip_need_full_reset(adev);
3579
3580		if (!need_full_reset) {
3581			amdgpu_device_ip_pre_soft_reset(adev);
3582			r = amdgpu_device_ip_soft_reset(adev);
3583			amdgpu_device_ip_post_soft_reset(adev);
3584			if (r || amdgpu_device_ip_check_soft_reset(adev)) {
3585				DRM_INFO("soft reset failed, will fallback to full reset!\n");
3586				need_full_reset = true;
3587			}
3588		}
3589
3590		if (need_full_reset)
3591			r = amdgpu_device_ip_suspend(adev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3592
3593		*need_full_reset_arg = need_full_reset;
 
 
 
3594	}
3595
3596	return r;
 
 
 
 
 
 
 
 
 
 
 
 
 
3597}
 
3598
3599static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
3600			       struct list_head *device_list_handle,
3601			       bool *need_full_reset_arg)
3602{
3603	struct amdgpu_device *tmp_adev = NULL;
3604	bool need_full_reset = *need_full_reset_arg, vram_lost = false;
3605	int r = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3606
3607	/*
3608	 * ASIC reset has to be done on all HGMI hive nodes ASAP
3609	 * to allow proper links negotiation in FW (within 1 sec)
3610	 */
3611	if (need_full_reset) {
3612		list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
3613			/* For XGMI run all resets in parallel to speed up the process */
3614			if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
3615				if (!queue_work(system_highpri_wq, &tmp_adev->xgmi_reset_work))
 
3616					r = -EALREADY;
3617			} else
3618				r = amdgpu_asic_reset(tmp_adev);
3619
3620			if (r) {
3621				DRM_ERROR("ASIC reset failed with error, %d for drm dev, %s",
3622					 r, tmp_adev->ddev->unique);
3623				break;
3624			}
3625		}
3626
3627		/* For XGMI wait for all PSP resets to complete before proceed */
3628		if (!r) {
3629			list_for_each_entry(tmp_adev, device_list_handle,
3630					    gmc.xgmi.head) {
3631				if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
3632					flush_work(&tmp_adev->xgmi_reset_work);
3633					r = tmp_adev->asic_reset_res;
3634					if (r)
3635						break;
3636				}
3637			}
 
 
3638
3639			list_for_each_entry(tmp_adev, device_list_handle,
3640					gmc.xgmi.head) {
3641				amdgpu_ras_reserve_bad_pages(tmp_adev);
3642			}
 
3643		}
 
 
3644	}
3645
 
 
 
 
 
 
 
 
 
 
 
3646
3647	list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
3648		if (need_full_reset) {
3649			/* post card */
3650			if (amdgpu_atom_asic_init(tmp_adev->mode_info.atom_context))
3651				DRM_WARN("asic atom init failed!");
 
 
 
 
 
 
3652
3653			if (!r) {
3654				dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
3655				r = amdgpu_device_ip_resume_phase1(tmp_adev);
3656				if (r)
3657					goto out;
3658
3659				vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
 
 
 
 
 
 
 
 
 
3660				if (vram_lost) {
3661					DRM_INFO("VRAM is lost due to GPU reset!\n");
3662					amdgpu_inc_vram_lost(tmp_adev);
3663				}
3664
3665				r = amdgpu_gtt_mgr_recover(
3666					&tmp_adev->mman.bdev.man[TTM_PL_TT]);
3667				if (r)
3668					goto out;
3669
3670				r = amdgpu_device_fw_loading(tmp_adev);
3671				if (r)
3672					return r;
3673
3674				r = amdgpu_device_ip_resume_phase2(tmp_adev);
3675				if (r)
3676					goto out;
3677
3678				if (vram_lost)
3679					amdgpu_device_fill_reset_magic(tmp_adev);
3680
3681				/*
3682				 * Add this ASIC as tracked as reset was already
3683				 * complete successfully.
3684				 */
3685				amdgpu_register_gpu_instance(tmp_adev);
3686
 
 
 
 
3687				r = amdgpu_device_ip_late_init(tmp_adev);
3688				if (r)
3689					goto out;
3690
3691				/* must succeed. */
3692				amdgpu_ras_resume(tmp_adev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3693
3694				/* Update PSP FW topology after reset */
3695				if (hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1)
3696					r = amdgpu_xgmi_update_topology(hive, tmp_adev);
 
 
3697			}
3698		}
3699
3700
3701out:
3702		if (!r) {
3703			amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
3704			r = amdgpu_ib_ring_tests(tmp_adev);
3705			if (r) {
3706				dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
3707				r = amdgpu_device_ip_suspend(tmp_adev);
3708				need_full_reset = true;
3709				r = -EAGAIN;
3710				goto end;
3711			}
3712		}
3713
3714		if (!r)
3715			r = amdgpu_device_recover_vram(tmp_adev);
3716		else
3717			tmp_adev->asic_reset_res = r;
3718	}
3719
3720end:
3721	*need_full_reset_arg = need_full_reset;
 
 
 
3722	return r;
3723}
3724
3725static bool amdgpu_device_lock_adev(struct amdgpu_device *adev, bool trylock)
3726{
3727	if (trylock) {
3728		if (!mutex_trylock(&adev->lock_reset))
3729			return false;
3730	} else
3731		mutex_lock(&adev->lock_reset);
3732
3733	atomic_inc(&adev->gpu_reset_counter);
3734	adev->in_gpu_reset = 1;
3735	switch (amdgpu_asic_reset_method(adev)) {
3736	case AMD_RESET_METHOD_MODE1:
3737		adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
3738		break;
3739	case AMD_RESET_METHOD_MODE2:
3740		adev->mp1_state = PP_MP1_STATE_RESET;
3741		break;
3742	default:
3743		adev->mp1_state = PP_MP1_STATE_NONE;
3744		break;
3745	}
3746	/* Block kfd: SRIOV would do it separately */
3747	if (!amdgpu_sriov_vf(adev))
3748                amdgpu_amdkfd_pre_reset(adev);
3749
3750	return true;
3751}
3752
3753static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
3754{
3755	/*unlock kfd: SRIOV would do it separately */
3756	if (!amdgpu_sriov_vf(adev))
3757                amdgpu_amdkfd_post_reset(adev);
3758	amdgpu_vf_error_trans_all(adev);
3759	adev->mp1_state = PP_MP1_STATE_NONE;
3760	adev->in_gpu_reset = 0;
3761	mutex_unlock(&adev->lock_reset);
3762}
3763
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3764
3765/**
3766 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
3767 *
3768 * @adev: amdgpu device pointer
3769 * @job: which job trigger hang
3770 *
3771 * Attempt to reset the GPU if it has hung (all asics).
3772 * Attempt to do soft-reset or full-reset and reinitialize Asic
3773 * Returns 0 for success or an error on failure.
3774 */
3775
3776int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
3777			      struct amdgpu_job *job)
 
3778{
3779	struct list_head device_list, *device_list_handle =  NULL;
3780	bool need_full_reset, job_signaled;
3781	struct amdgpu_hive_info *hive = NULL;
3782	struct amdgpu_device *tmp_adev = NULL;
3783	int i, r = 0;
 
 
 
 
 
 
 
 
 
 
 
 
3784
3785	need_full_reset = job_signaled = false;
3786	INIT_LIST_HEAD(&device_list);
 
 
 
 
3787
3788	dev_info(adev->dev, "GPU reset begin!\n");
 
 
3789
3790	cancel_delayed_work_sync(&adev->delayed_init_work);
 
3791
3792	hive = amdgpu_get_xgmi_hive(adev, false);
 
 
 
3793
 
 
3794	/*
3795	 * Here we trylock to avoid chain of resets executing from
3796	 * either trigger by jobs on different adevs in XGMI hive or jobs on
3797	 * different schedulers for same device while this TO handler is running.
3798	 * We always reset all schedulers for device and all devices for XGMI
3799	 * hive so that should take care of them too.
3800	 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3801
3802	if (hive && !mutex_trylock(&hive->reset_lock)) {
3803		DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
3804			  job ? job->base.id : -1, hive->hive_id);
3805		return 0;
3806	}
3807
3808	/* Start with adev pre asic reset first for soft reset check.*/
3809	if (!amdgpu_device_lock_adev(adev, !hive)) {
3810		DRM_INFO("Bailing on TDR for s_job:%llx, as another already in progress",
3811			  job ? job->base.id : -1);
3812		return 0;
3813	}
3814
3815	/* Build list of devices to reset */
3816	if  (adev->gmc.xgmi.num_physical_nodes > 1) {
3817		if (!hive) {
3818			amdgpu_device_unlock_adev(adev);
3819			return -ENODEV;
3820		}
3821
3822		/*
3823		 * In case we are in XGMI hive mode device reset is done for all the
3824		 * nodes in the hive to retrain all XGMI links and hence the reset
3825		 * sequence is executed in loop on all nodes.
 
 
 
 
 
3826		 */
3827		device_list_handle = &hive->device_list;
3828	} else {
3829		list_add_tail(&adev->gmc.xgmi.head, &device_list);
3830		device_list_handle = &device_list;
3831	}
 
 
 
 
3832
3833	/*
3834	 * Mark these ASICs to be reseted as untracked first
3835	 * And add them back after reset completed
3836	 */
3837	list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head)
3838		amdgpu_unregister_gpu_instance(tmp_adev);
3839
3840	/* block all schedulers and reset given job's ring */
3841	list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
3842		/* disable ras on ALL IPs */
3843		if (amdgpu_device_ip_need_full_reset(tmp_adev))
 
3844			amdgpu_ras_suspend(tmp_adev);
3845
3846		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
3847			struct amdgpu_ring *ring = tmp_adev->rings[i];
3848
3849			if (!ring || !ring->sched.thread)
3850				continue;
3851
3852			drm_sched_stop(&ring->sched, job ? &job->base : NULL);
 
 
 
3853		}
 
3854	}
3855
 
 
3856
3857	/*
3858	 * Must check guilty signal here since after this point all old
3859	 * HW fences are force signaled.
3860	 *
3861	 * job->base holds a reference to parent fence
3862	 */
3863	if (job && job->base.s_fence->parent &&
3864	    dma_fence_is_signaled(job->base.s_fence->parent))
3865		job_signaled = true;
3866
3867	if (!amdgpu_device_ip_need_full_reset(adev))
3868		device_list_handle = &device_list;
3869
3870	if (job_signaled) {
3871		dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
3872		goto skip_hw_reset;
3873	}
3874
3875
3876	/* Guilty job will be freed after this*/
3877	r = amdgpu_device_pre_asic_reset(adev, job, &need_full_reset);
3878	if (r) {
3879		/*TODO Should we stop ?*/
3880		DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
3881			  r, adev->ddev->unique);
3882		adev->asic_reset_res = r;
3883	}
3884
3885retry:	/* Rest of adevs pre asic reset from XGMI hive. */
3886	list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
3887
3888		if (tmp_adev == adev)
3889			continue;
3890
3891		amdgpu_device_lock_adev(tmp_adev, false);
3892		r = amdgpu_device_pre_asic_reset(tmp_adev,
3893						 NULL,
3894						 &need_full_reset);
3895		/*TODO Should we stop ?*/
3896		if (r) {
3897			DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
3898				  r, tmp_adev->ddev->unique);
3899			tmp_adev->asic_reset_res = r;
3900		}
 
 
 
 
 
 
3901	}
3902
3903	/* Actual ASIC resets if needed.*/
3904	/* TODO Implement XGMI hive reset logic for SRIOV */
3905	if (amdgpu_sriov_vf(adev)) {
3906		r = amdgpu_device_reset_sriov(adev, job ? false : true);
3907		if (r)
3908			adev->asic_reset_res = r;
 
 
 
 
3909	} else {
3910		r  = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset);
3911		if (r && r == -EAGAIN)
3912			goto retry;
 
 
 
3913	}
3914
3915skip_hw_reset:
3916
3917	/* Post ASIC reset for all devs .*/
3918	list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
 
3919		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
3920			struct amdgpu_ring *ring = tmp_adev->rings[i];
3921
3922			if (!ring || !ring->sched.thread)
3923				continue;
3924
3925			/* No point to resubmit jobs if we didn't HW reset*/
3926			if (!tmp_adev->asic_reset_res && !job_signaled)
3927				drm_sched_resubmit_jobs(&ring->sched);
 
 
3928
3929			drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
 
3930		}
3931
3932		if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) {
3933			drm_helper_resume_force_mode(tmp_adev->ddev);
3934		}
3935
3936		tmp_adev->asic_reset_res = 0;
3937
3938		if (r) {
3939			/* bad news, how to tell it to userspace ? */
3940			dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
3941			amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
3942		} else {
3943			dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&adev->gpu_reset_counter));
 
 
3944		}
 
 
 
 
 
 
 
 
 
 
 
 
 
3945
3946		amdgpu_device_unlock_adev(tmp_adev);
 
 
 
 
 
3947	}
3948
3949	if (hive)
3950		mutex_unlock(&hive->reset_lock);
 
 
 
 
 
 
 
3951
3952	if (r)
3953		dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
 
 
3954	return r;
3955}
3956
3957/**
3958 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
3959 *
3960 * @adev: amdgpu_device pointer
3961 *
3962 * Fetchs and stores in the driver the PCIE capabilities (gen speed
3963 * and lanes) of the slot the device is in. Handles APUs and
3964 * virtualized environments where PCIE config space may not be available.
3965 */
3966static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
3967{
3968	struct pci_dev *pdev;
3969	enum pci_bus_speed speed_cap, platform_speed_cap;
3970	enum pcie_link_width platform_link_width;
3971
3972	if (amdgpu_pcie_gen_cap)
3973		adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
3974
3975	if (amdgpu_pcie_lane_cap)
3976		adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
3977
3978	/* covers APUs as well */
3979	if (pci_is_root_bus(adev->pdev->bus)) {
3980		if (adev->pm.pcie_gen_mask == 0)
3981			adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
3982		if (adev->pm.pcie_mlw_mask == 0)
3983			adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
3984		return;
3985	}
3986
3987	if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
3988		return;
3989
3990	pcie_bandwidth_available(adev->pdev, NULL,
3991				 &platform_speed_cap, &platform_link_width);
3992
3993	if (adev->pm.pcie_gen_mask == 0) {
3994		/* asic caps */
3995		pdev = adev->pdev;
3996		speed_cap = pcie_get_speed_cap(pdev);
3997		if (speed_cap == PCI_SPEED_UNKNOWN) {
3998			adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3999						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4000						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
4001		} else {
4002			if (speed_cap == PCIE_SPEED_16_0GT)
 
 
 
 
 
 
4003				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4004							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4005							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
4006							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
4007			else if (speed_cap == PCIE_SPEED_8_0GT)
4008				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4009							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4010							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
4011			else if (speed_cap == PCIE_SPEED_5_0GT)
4012				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4013							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
4014			else
4015				adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
4016		}
4017		/* platform caps */
4018		if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
4019			adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4020						   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
4021		} else {
4022			if (platform_speed_cap == PCIE_SPEED_16_0GT)
 
 
 
 
 
 
4023				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4024							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4025							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
4026							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
4027			else if (platform_speed_cap == PCIE_SPEED_8_0GT)
4028				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4029							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4030							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
4031			else if (platform_speed_cap == PCIE_SPEED_5_0GT)
4032				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4033							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
4034			else
4035				adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
4036
4037		}
4038	}
4039	if (adev->pm.pcie_mlw_mask == 0) {
4040		if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
4041			adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
4042		} else {
4043			switch (platform_link_width) {
4044			case PCIE_LNK_X32:
4045				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
4046							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
4047							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
4048							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4049							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4050							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4051							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4052				break;
4053			case PCIE_LNK_X16:
4054				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
4055							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
4056							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4057							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4058							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4059							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4060				break;
4061			case PCIE_LNK_X12:
4062				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
4063							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4064							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4065							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4066							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4067				break;
4068			case PCIE_LNK_X8:
4069				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4070							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4071							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4072							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4073				break;
4074			case PCIE_LNK_X4:
4075				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4076							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4077							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4078				break;
4079			case PCIE_LNK_X2:
4080				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4081							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4082				break;
4083			case PCIE_LNK_X1:
4084				adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
4085				break;
4086			default:
4087				break;
4088			}
4089		}
4090	}
4091}
4092