Linux Audio

Check our new training course

Loading...
v6.9.4
   1/*
   2 * Copyright 2018 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 *
  23 */
  24#include <linux/debugfs.h>
  25#include <linux/list.h>
  26#include <linux/module.h>
  27#include <linux/uaccess.h>
  28#include <linux/reboot.h>
  29#include <linux/syscalls.h>
  30#include <linux/pm_runtime.h>
  31#include <linux/list_sort.h>
  32
  33#include "amdgpu.h"
  34#include "amdgpu_ras.h"
  35#include "amdgpu_atomfirmware.h"
  36#include "amdgpu_xgmi.h"
  37#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
  38#include "nbio_v4_3.h"
  39#include "nbio_v7_9.h"
  40#include "atom.h"
  41#include "amdgpu_reset.h"
  42#include "amdgpu_psp.h"
  43
  44#ifdef CONFIG_X86_MCE_AMD
  45#include <asm/mce.h>
  46
  47static bool notifier_registered;
  48#endif
  49static const char *RAS_FS_NAME = "ras";
  50
  51const char *ras_error_string[] = {
  52	"none",
  53	"parity",
  54	"single_correctable",
  55	"multi_uncorrectable",
  56	"poison",
  57};
  58
  59const char *ras_block_string[] = {
  60	"umc",
  61	"sdma",
  62	"gfx",
  63	"mmhub",
  64	"athub",
  65	"pcie_bif",
  66	"hdp",
  67	"xgmi_wafl",
  68	"df",
  69	"smn",
  70	"sem",
  71	"mp0",
  72	"mp1",
  73	"fuse",
  74	"mca",
  75	"vcn",
  76	"jpeg",
  77	"ih",
  78	"mpio",
  79};
  80
  81const char *ras_mca_block_string[] = {
  82	"mca_mp0",
  83	"mca_mp1",
  84	"mca_mpio",
  85	"mca_iohc",
  86};
  87
  88struct amdgpu_ras_block_list {
  89	/* ras block link */
  90	struct list_head node;
  91
  92	struct amdgpu_ras_block_object *ras_obj;
  93};
  94
  95const char *get_ras_block_str(struct ras_common_if *ras_block)
  96{
  97	if (!ras_block)
  98		return "NULL";
  99
 100	if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNT ||
 101	    ras_block->block >= ARRAY_SIZE(ras_block_string))
 102		return "OUT OF RANGE";
 103
 104	if (ras_block->block == AMDGPU_RAS_BLOCK__MCA)
 105		return ras_mca_block_string[ras_block->sub_block_index];
 106
 107	return ras_block_string[ras_block->block];
 108}
 109
 110#define ras_block_str(_BLOCK_) \
 111	(((_BLOCK_) < ARRAY_SIZE(ras_block_string)) ? ras_block_string[_BLOCK_] : "Out Of Range")
 112
 113#define ras_err_str(i) (ras_error_string[ffs(i)])
 114
 115#define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
 116
 117/* inject address is 52 bits */
 118#define	RAS_UMC_INJECT_ADDR_LIMIT	(0x1ULL << 52)
 119
 120/* typical ECC bad page rate is 1 bad page per 100MB VRAM */
 121#define RAS_BAD_PAGE_COVER              (100 * 1024 * 1024ULL)
 122
 123#define MAX_UMC_POISON_POLLING_TIME_ASYNC  100  //ms
 
 
 
 
 124
 125enum amdgpu_ras_retire_page_reservation {
 126	AMDGPU_RAS_RETIRE_PAGE_RESERVED,
 127	AMDGPU_RAS_RETIRE_PAGE_PENDING,
 128	AMDGPU_RAS_RETIRE_PAGE_FAULT,
 129};
 130
 131atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
 132
 133static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
 134				uint64_t addr);
 135static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
 136				uint64_t addr);
 137#ifdef CONFIG_X86_MCE_AMD
 138static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev);
 139struct mce_notifier_adev_list {
 140	struct amdgpu_device *devs[MAX_GPU_INSTANCE];
 141	int num_gpu;
 142};
 143static struct mce_notifier_adev_list mce_adev_list;
 144#endif
 145
 146void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready)
 147{
 148	if (adev && amdgpu_ras_get_context(adev))
 149		amdgpu_ras_get_context(adev)->error_query_ready = ready;
 150}
 151
 152static bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev)
 153{
 154	if (adev && amdgpu_ras_get_context(adev))
 155		return amdgpu_ras_get_context(adev)->error_query_ready;
 156
 157	return false;
 158}
 159
 160static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t address)
 161{
 162	struct ras_err_data err_data;
 163	struct eeprom_table_record err_rec;
 164	int ret;
 165
 166	if ((address >= adev->gmc.mc_vram_size) ||
 167	    (address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
 168		dev_warn(adev->dev,
 169		         "RAS WARN: input address 0x%llx is invalid.\n",
 170		         address);
 171		return -EINVAL;
 172	}
 173
 174	if (amdgpu_ras_check_bad_page(adev, address)) {
 175		dev_warn(adev->dev,
 176			 "RAS WARN: 0x%llx has already been marked as bad page!\n",
 177			 address);
 178		return 0;
 179	}
 180
 181	ret = amdgpu_ras_error_data_init(&err_data);
 182	if (ret)
 183		return ret;
 184
 185	memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
 186	err_data.err_addr = &err_rec;
 187	amdgpu_umc_fill_error_record(&err_data, address, address, 0, 0);
 188
 189	if (amdgpu_bad_page_threshold != 0) {
 190		amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
 191					 err_data.err_addr_cnt);
 192		amdgpu_ras_save_bad_pages(adev, NULL);
 193	}
 194
 195	amdgpu_ras_error_data_fini(&err_data);
 196
 197	dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n");
 198	dev_warn(adev->dev, "Clear EEPROM:\n");
 199	dev_warn(adev->dev, "    echo 1 > /sys/kernel/debug/dri/0/ras/ras_eeprom_reset\n");
 200
 201	return 0;
 202}
 203
 204static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
 205					size_t size, loff_t *pos)
 206{
 207	struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private;
 208	struct ras_query_if info = {
 209		.head = obj->head,
 210	};
 211	ssize_t s;
 212	char val[128];
 213
 214	if (amdgpu_ras_query_error_status(obj->adev, &info))
 215		return -EINVAL;
 216
 217	/* Hardware counter will be reset automatically after the query on Vega20 and Arcturus */
 218	if (amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
 219	    amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
 220		if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
 221			dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
 222	}
 223
 224	s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n",
 225			"ue", info.ue_count,
 226			"ce", info.ce_count);
 227	if (*pos >= s)
 228		return 0;
 229
 230	s -= *pos;
 231	s = min_t(u64, s, size);
 232
 233
 234	if (copy_to_user(buf, &val[*pos], s))
 235		return -EINVAL;
 236
 237	*pos += s;
 238
 239	return s;
 240}
 241
 242static const struct file_operations amdgpu_ras_debugfs_ops = {
 243	.owner = THIS_MODULE,
 244	.read = amdgpu_ras_debugfs_read,
 245	.write = NULL,
 246	.llseek = default_llseek
 247};
 248
 249static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id)
 250{
 251	int i;
 252
 253	for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) {
 254		*block_id = i;
 255		if (strcmp(name, ras_block_string[i]) == 0)
 256			return 0;
 257	}
 258	return -EINVAL;
 259}
 260
 261static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
 262		const char __user *buf, size_t size,
 263		loff_t *pos, struct ras_debug_if *data)
 264{
 265	ssize_t s = min_t(u64, 64, size);
 266	char str[65];
 267	char block_name[33];
 268	char err[9] = "ue";
 269	int op = -1;
 270	int block_id;
 271	uint32_t sub_block;
 272	u64 address, value;
 273	/* default value is 0 if the mask is not set by user */
 274	u32 instance_mask = 0;
 275
 276	if (*pos)
 277		return -EINVAL;
 278	*pos = size;
 279
 280	memset(str, 0, sizeof(str));
 281	memset(data, 0, sizeof(*data));
 282
 283	if (copy_from_user(str, buf, s))
 284		return -EINVAL;
 285
 286	if (sscanf(str, "disable %32s", block_name) == 1)
 287		op = 0;
 288	else if (sscanf(str, "enable %32s %8s", block_name, err) == 2)
 289		op = 1;
 290	else if (sscanf(str, "inject %32s %8s", block_name, err) == 2)
 291		op = 2;
 292	else if (strstr(str, "retire_page") != NULL)
 293		op = 3;
 294	else if (str[0] && str[1] && str[2] && str[3])
 295		/* ascii string, but commands are not matched. */
 296		return -EINVAL;
 297
 298	if (op != -1) {
 299		if (op == 3) {
 300			if (sscanf(str, "%*s 0x%llx", &address) != 1 &&
 301			    sscanf(str, "%*s %llu", &address) != 1)
 302				return -EINVAL;
 303
 304			data->op = op;
 305			data->inject.address = address;
 306
 307			return 0;
 308		}
 309
 310		if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
 311			return -EINVAL;
 312
 313		data->head.block = block_id;
 314		/* only ue, ce and poison errors are supported */
 315		if (!memcmp("ue", err, 2))
 316			data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
 317		else if (!memcmp("ce", err, 2))
 318			data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE;
 319		else if (!memcmp("poison", err, 6))
 320			data->head.type = AMDGPU_RAS_ERROR__POISON;
 321		else
 322			return -EINVAL;
 323
 324		data->op = op;
 325
 326		if (op == 2) {
 327			if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx 0x%x",
 328				   &sub_block, &address, &value, &instance_mask) != 4 &&
 329			    sscanf(str, "%*s %*s %*s %u %llu %llu %u",
 330				   &sub_block, &address, &value, &instance_mask) != 4 &&
 331				sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx",
 332				   &sub_block, &address, &value) != 3 &&
 333			    sscanf(str, "%*s %*s %*s %u %llu %llu",
 334				   &sub_block, &address, &value) != 3)
 335				return -EINVAL;
 336			data->head.sub_block_index = sub_block;
 337			data->inject.address = address;
 338			data->inject.value = value;
 339			data->inject.instance_mask = instance_mask;
 340		}
 341	} else {
 342		if (size < sizeof(*data))
 343			return -EINVAL;
 344
 345		if (copy_from_user(data, buf, sizeof(*data)))
 346			return -EINVAL;
 347	}
 348
 349	return 0;
 350}
 351
 352static void amdgpu_ras_instance_mask_check(struct amdgpu_device *adev,
 353				struct ras_debug_if *data)
 354{
 355	int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
 356	uint32_t mask, inst_mask = data->inject.instance_mask;
 357
 358	/* no need to set instance mask if there is only one instance */
 359	if (num_xcc <= 1 && inst_mask) {
 360		data->inject.instance_mask = 0;
 361		dev_dbg(adev->dev,
 362			"RAS inject mask(0x%x) isn't supported and force it to 0.\n",
 363			inst_mask);
 364
 365		return;
 366	}
 367
 368	switch (data->head.block) {
 369	case AMDGPU_RAS_BLOCK__GFX:
 370		mask = GENMASK(num_xcc - 1, 0);
 371		break;
 372	case AMDGPU_RAS_BLOCK__SDMA:
 373		mask = GENMASK(adev->sdma.num_instances - 1, 0);
 374		break;
 375	case AMDGPU_RAS_BLOCK__VCN:
 376	case AMDGPU_RAS_BLOCK__JPEG:
 377		mask = GENMASK(adev->vcn.num_vcn_inst - 1, 0);
 378		break;
 379	default:
 380		mask = inst_mask;
 381		break;
 382	}
 383
 384	/* remove invalid bits in instance mask */
 385	data->inject.instance_mask &= mask;
 386	if (inst_mask != data->inject.instance_mask)
 387		dev_dbg(adev->dev,
 388			"Adjust RAS inject mask 0x%x to 0x%x\n",
 389			inst_mask, data->inject.instance_mask);
 390}
 391
 392/**
 393 * DOC: AMDGPU RAS debugfs control interface
 394 *
 395 * The control interface accepts struct ras_debug_if which has two members.
 396 *
 397 * First member: ras_debug_if::head or ras_debug_if::inject.
 398 *
 399 * head is used to indicate which IP block will be under control.
 400 *
 401 * head has four members, they are block, type, sub_block_index, name.
 402 * block: which IP will be under control.
 403 * type: what kind of error will be enabled/disabled/injected.
 404 * sub_block_index: some IPs have subcomponets. say, GFX, sDMA.
 405 * name: the name of IP.
 406 *
 407 * inject has three more members than head, they are address, value and mask.
 408 * As their names indicate, inject operation will write the
 409 * value to the address.
 410 *
 411 * The second member: struct ras_debug_if::op.
 412 * It has three kinds of operations.
 413 *
 414 * - 0: disable RAS on the block. Take ::head as its data.
 415 * - 1: enable RAS on the block. Take ::head as its data.
 416 * - 2: inject errors on the block. Take ::inject as its data.
 417 *
 418 * How to use the interface?
 419 *
 420 * In a program
 421 *
 422 * Copy the struct ras_debug_if in your code and initialize it.
 423 * Write the struct to the control interface.
 424 *
 425 * From shell
 426 *
 427 * .. code-block:: bash
 428 *
 429 *	echo "disable <block>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
 430 *	echo "enable  <block> <error>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
 431 *	echo "inject  <block> <error> <sub-block> <address> <value> <mask>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
 432 *
 433 * Where N, is the card which you want to affect.
 434 *
 435 * "disable" requires only the block.
 436 * "enable" requires the block and error type.
 437 * "inject" requires the block, error type, address, and value.
 438 *
 439 * The block is one of: umc, sdma, gfx, etc.
 440 *	see ras_block_string[] for details
 441 *
 442 * The error type is one of: ue, ce and poison where,
 443 *	ue is multi-uncorrectable
 444 *	ce is single-correctable
 445 *	poison is poison
 446 *
 447 * The sub-block is a the sub-block index, pass 0 if there is no sub-block.
 448 * The address and value are hexadecimal numbers, leading 0x is optional.
 449 * The mask means instance mask, is optional, default value is 0x1.
 450 *
 451 * For instance,
 452 *
 453 * .. code-block:: bash
 454 *
 455 *	echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
 456 *	echo inject umc ce 0 0 0 3 > /sys/kernel/debug/dri/0/ras/ras_ctrl
 457 *	echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
 458 *
 459 * How to check the result of the operation?
 460 *
 461 * To check disable/enable, see "ras" features at,
 462 * /sys/class/drm/card[0/1/2...]/device/ras/features
 463 *
 464 * To check inject, see the corresponding error count at,
 465 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx|sdma|umc|...]_err_count
 466 *
 467 * .. note::
 468 *	Operations are only allowed on blocks which are supported.
 469 *	Check the "ras" mask at /sys/module/amdgpu/parameters/ras_mask
 470 *	to see which blocks support RAS on a particular asic.
 471 *
 472 */
 473static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
 474					     const char __user *buf,
 475					     size_t size, loff_t *pos)
 476{
 477	struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
 478	struct ras_debug_if data;
 479	int ret = 0;
 480
 481	if (!amdgpu_ras_get_error_query_ready(adev)) {
 482		dev_warn(adev->dev, "RAS WARN: error injection "
 483				"currently inaccessible\n");
 484		return size;
 485	}
 486
 487	ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
 488	if (ret)
 489		return ret;
 490
 491	if (data.op == 3) {
 492		ret = amdgpu_reserve_page_direct(adev, data.inject.address);
 493		if (!ret)
 494			return size;
 495		else
 496			return ret;
 497	}
 498
 499	if (!amdgpu_ras_is_supported(adev, data.head.block))
 500		return -EINVAL;
 501
 502	switch (data.op) {
 503	case 0:
 504		ret = amdgpu_ras_feature_enable(adev, &data.head, 0);
 505		break;
 506	case 1:
 507		ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
 508		break;
 509	case 2:
 510		if ((data.inject.address >= adev->gmc.mc_vram_size &&
 511		    adev->gmc.mc_vram_size) ||
 512		    (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
 513			dev_warn(adev->dev, "RAS WARN: input address "
 514					"0x%llx is invalid.",
 515					data.inject.address);
 516			ret = -EINVAL;
 517			break;
 518		}
 519
 520		/* umc ce/ue error injection for a bad page is not allowed */
 521		if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
 522		    amdgpu_ras_check_bad_page(adev, data.inject.address)) {
 523			dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has "
 524				 "already been marked as bad!\n",
 525				 data.inject.address);
 526			break;
 527		}
 528
 529		amdgpu_ras_instance_mask_check(adev, &data);
 530
 531		/* data.inject.address is offset instead of absolute gpu address */
 532		ret = amdgpu_ras_error_inject(adev, &data.inject);
 533		break;
 534	default:
 535		ret = -EINVAL;
 536		break;
 537	}
 538
 539	if (ret)
 540		return ret;
 541
 542	return size;
 543}
 544
 545/**
 546 * DOC: AMDGPU RAS debugfs EEPROM table reset interface
 547 *
 548 * Some boards contain an EEPROM which is used to persistently store a list of
 549 * bad pages which experiences ECC errors in vram.  This interface provides
 550 * a way to reset the EEPROM, e.g., after testing error injection.
 551 *
 552 * Usage:
 553 *
 554 * .. code-block:: bash
 555 *
 556 *	echo 1 > ../ras/ras_eeprom_reset
 557 *
 558 * will reset EEPROM table to 0 entries.
 559 *
 560 */
 561static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f,
 562					       const char __user *buf,
 563					       size_t size, loff_t *pos)
 564{
 565	struct amdgpu_device *adev =
 566		(struct amdgpu_device *)file_inode(f)->i_private;
 567	int ret;
 568
 569	ret = amdgpu_ras_eeprom_reset_table(
 570		&(amdgpu_ras_get_context(adev)->eeprom_control));
 571
 572	if (!ret) {
 573		/* Something was written to EEPROM.
 574		 */
 575		amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS;
 576		return size;
 577	} else {
 578		return ret;
 579	}
 580}
 581
 582static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
 583	.owner = THIS_MODULE,
 584	.read = NULL,
 585	.write = amdgpu_ras_debugfs_ctrl_write,
 586	.llseek = default_llseek
 587};
 588
 589static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = {
 590	.owner = THIS_MODULE,
 591	.read = NULL,
 592	.write = amdgpu_ras_debugfs_eeprom_write,
 593	.llseek = default_llseek
 594};
 595
 596/**
 597 * DOC: AMDGPU RAS sysfs Error Count Interface
 598 *
 599 * It allows the user to read the error count for each IP block on the gpu through
 600 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
 601 *
 602 * It outputs the multiple lines which report the uncorrected (ue) and corrected
 603 * (ce) error counts.
 604 *
 605 * The format of one line is below,
 606 *
 607 * [ce|ue]: count
 608 *
 609 * Example:
 610 *
 611 * .. code-block:: bash
 612 *
 613 *	ue: 0
 614 *	ce: 1
 615 *
 616 */
 617static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
 618		struct device_attribute *attr, char *buf)
 619{
 620	struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr);
 621	struct ras_query_if info = {
 622		.head = obj->head,
 623	};
 624
 625	if (!amdgpu_ras_get_error_query_ready(obj->adev))
 626		return sysfs_emit(buf, "Query currently inaccessible\n");
 627
 628	if (amdgpu_ras_query_error_status(obj->adev, &info))
 629		return -EINVAL;
 630
 631	if (amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
 632	    amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
 633		if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
 634			dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
 635	}
 636
 637	if (info.head.block == AMDGPU_RAS_BLOCK__UMC)
 638		return sysfs_emit(buf, "%s: %lu\n%s: %lu\n%s: %lu\n", "ue", info.ue_count,
 639				"ce", info.ce_count, "de", info.de_count);
 640	else
 641		return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
 642				"ce", info.ce_count);
 643}
 644
 645/* obj begin */
 646
 647#define get_obj(obj) do { (obj)->use++; } while (0)
 648#define alive_obj(obj) ((obj)->use)
 649
 650static inline void put_obj(struct ras_manager *obj)
 651{
 652	if (obj && (--obj->use == 0)) {
 653		list_del(&obj->node);
 654		amdgpu_ras_error_data_fini(&obj->err_data);
 655	}
 656
 657	if (obj && (obj->use < 0))
 658		DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", get_ras_block_str(&obj->head));
 659}
 660
 661/* make one obj and return it. */
 662static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
 663		struct ras_common_if *head)
 664{
 665	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
 666	struct ras_manager *obj;
 667
 668	if (!adev->ras_enabled || !con)
 669		return NULL;
 670
 671	if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
 672		return NULL;
 673
 674	if (head->block == AMDGPU_RAS_BLOCK__MCA) {
 675		if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
 676			return NULL;
 677
 678		obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
 679	} else
 680		obj = &con->objs[head->block];
 681
 682	/* already exist. return obj? */
 683	if (alive_obj(obj))
 684		return NULL;
 685
 686	if (amdgpu_ras_error_data_init(&obj->err_data))
 687		return NULL;
 688
 689	obj->head = *head;
 690	obj->adev = adev;
 691	list_add(&obj->node, &con->head);
 692	get_obj(obj);
 693
 694	return obj;
 695}
 696
 697/* return an obj equal to head, or the first when head is NULL */
 698struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
 699		struct ras_common_if *head)
 700{
 701	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
 702	struct ras_manager *obj;
 703	int i;
 704
 705	if (!adev->ras_enabled || !con)
 706		return NULL;
 707
 708	if (head) {
 709		if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
 710			return NULL;
 711
 712		if (head->block == AMDGPU_RAS_BLOCK__MCA) {
 713			if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
 714				return NULL;
 715
 716			obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
 717		} else
 718			obj = &con->objs[head->block];
 719
 720		if (alive_obj(obj))
 721			return obj;
 722	} else {
 723		for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT + AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
 724			obj = &con->objs[i];
 725			if (alive_obj(obj))
 726				return obj;
 727		}
 728	}
 729
 730	return NULL;
 731}
 732/* obj end */
 733
 734/* feature ctl begin */
 735static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev,
 736					 struct ras_common_if *head)
 737{
 738	return adev->ras_hw_enabled & BIT(head->block);
 739}
 740
 741static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev,
 742		struct ras_common_if *head)
 743{
 744	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
 745
 746	return con->features & BIT(head->block);
 747}
 748
 749/*
 750 * if obj is not created, then create one.
 751 * set feature enable flag.
 752 */
 753static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev,
 754		struct ras_common_if *head, int enable)
 755{
 756	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
 757	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
 758
 759	/* If hardware does not support ras, then do not create obj.
 760	 * But if hardware support ras, we can create the obj.
 761	 * Ras framework checks con->hw_supported to see if it need do
 762	 * corresponding initialization.
 763	 * IP checks con->support to see if it need disable ras.
 764	 */
 765	if (!amdgpu_ras_is_feature_allowed(adev, head))
 766		return 0;
 767
 768	if (enable) {
 769		if (!obj) {
 770			obj = amdgpu_ras_create_obj(adev, head);
 771			if (!obj)
 772				return -EINVAL;
 773		} else {
 774			/* In case we create obj somewhere else */
 775			get_obj(obj);
 776		}
 777		con->features |= BIT(head->block);
 778	} else {
 779		if (obj && amdgpu_ras_is_feature_enabled(adev, head)) {
 780			con->features &= ~BIT(head->block);
 781			put_obj(obj);
 782		}
 783	}
 784
 785	return 0;
 786}
 787
 788/* wrapper of psp_ras_enable_features */
 789int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
 790		struct ras_common_if *head, bool enable)
 791{
 792	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
 793	union ta_ras_cmd_input *info;
 794	int ret;
 795
 796	if (!con)
 797		return -EINVAL;
 798
 799	/* For non-gfx ip, do not enable ras feature if it is not allowed */
 800	/* For gfx ip, regardless of feature support status, */
 801	/* Force issue enable or disable ras feature commands */
 802	if (head->block != AMDGPU_RAS_BLOCK__GFX &&
 803	    !amdgpu_ras_is_feature_allowed(adev, head))
 804		return 0;
 805
 806	/* Only enable gfx ras feature from host side */
 807	if (head->block == AMDGPU_RAS_BLOCK__GFX &&
 808	    !amdgpu_sriov_vf(adev) &&
 809	    !amdgpu_ras_intr_triggered()) {
 810		info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL);
 811		if (!info)
 812			return -ENOMEM;
 813
 814		if (!enable) {
 815			info->disable_features = (struct ta_ras_disable_features_input) {
 816				.block_id =  amdgpu_ras_block_to_ta(head->block),
 817				.error_type = amdgpu_ras_error_to_ta(head->type),
 818			};
 819		} else {
 820			info->enable_features = (struct ta_ras_enable_features_input) {
 821				.block_id =  amdgpu_ras_block_to_ta(head->block),
 822				.error_type = amdgpu_ras_error_to_ta(head->type),
 823			};
 824		}
 825
 826		ret = psp_ras_enable_features(&adev->psp, info, enable);
 827		if (ret) {
 828			dev_err(adev->dev, "ras %s %s failed poison:%d ret:%d\n",
 829				enable ? "enable":"disable",
 830				get_ras_block_str(head),
 831				amdgpu_ras_is_poison_mode_supported(adev), ret);
 832			kfree(info);
 833			return ret;
 834		}
 835
 836		kfree(info);
 837	}
 838
 839	/* setup the obj */
 840	__amdgpu_ras_feature_enable(adev, head, enable);
 841
 842	return 0;
 843}
 844
 845/* Only used in device probe stage and called only once. */
 846int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
 847		struct ras_common_if *head, bool enable)
 848{
 849	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
 850	int ret;
 851
 852	if (!con)
 853		return -EINVAL;
 854
 855	if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
 856		if (enable) {
 857			/* There is no harm to issue a ras TA cmd regardless of
 858			 * the currecnt ras state.
 859			 * If current state == target state, it will do nothing
 860			 * But sometimes it requests driver to reset and repost
 861			 * with error code -EAGAIN.
 862			 */
 863			ret = amdgpu_ras_feature_enable(adev, head, 1);
 864			/* With old ras TA, we might fail to enable ras.
 865			 * Log it and just setup the object.
 866			 * TODO need remove this WA in the future.
 867			 */
 868			if (ret == -EINVAL) {
 869				ret = __amdgpu_ras_feature_enable(adev, head, 1);
 870				if (!ret)
 871					dev_info(adev->dev,
 872						"RAS INFO: %s setup object\n",
 873						get_ras_block_str(head));
 874			}
 875		} else {
 876			/* setup the object then issue a ras TA disable cmd.*/
 877			ret = __amdgpu_ras_feature_enable(adev, head, 1);
 878			if (ret)
 879				return ret;
 880
 881			/* gfx block ras dsiable cmd must send to ras-ta */
 882			if (head->block == AMDGPU_RAS_BLOCK__GFX)
 883				con->features |= BIT(head->block);
 884
 885			ret = amdgpu_ras_feature_enable(adev, head, 0);
 886
 887			/* clean gfx block ras features flag */
 888			if (adev->ras_enabled && head->block == AMDGPU_RAS_BLOCK__GFX)
 889				con->features &= ~BIT(head->block);
 890		}
 891	} else
 892		ret = amdgpu_ras_feature_enable(adev, head, enable);
 893
 894	return ret;
 895}
 896
 897static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev,
 898		bool bypass)
 899{
 900	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
 901	struct ras_manager *obj, *tmp;
 902
 903	list_for_each_entry_safe(obj, tmp, &con->head, node) {
 904		/* bypass psp.
 905		 * aka just release the obj and corresponding flags
 906		 */
 907		if (bypass) {
 908			if (__amdgpu_ras_feature_enable(adev, &obj->head, 0))
 909				break;
 910		} else {
 911			if (amdgpu_ras_feature_enable(adev, &obj->head, 0))
 912				break;
 913		}
 914	}
 915
 916	return con->features;
 917}
 918
 919static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
 920		bool bypass)
 921{
 922	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
 923	int i;
 924	const enum amdgpu_ras_error_type default_ras_type = AMDGPU_RAS_ERROR__NONE;
 925
 926	for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) {
 927		struct ras_common_if head = {
 928			.block = i,
 929			.type = default_ras_type,
 930			.sub_block_index = 0,
 931		};
 932
 933		if (i == AMDGPU_RAS_BLOCK__MCA)
 934			continue;
 935
 936		if (bypass) {
 937			/*
 938			 * bypass psp. vbios enable ras for us.
 939			 * so just create the obj
 940			 */
 941			if (__amdgpu_ras_feature_enable(adev, &head, 1))
 942				break;
 943		} else {
 944			if (amdgpu_ras_feature_enable(adev, &head, 1))
 945				break;
 946		}
 947	}
 948
 949	for (i = 0; i < AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
 950		struct ras_common_if head = {
 951			.block = AMDGPU_RAS_BLOCK__MCA,
 952			.type = default_ras_type,
 953			.sub_block_index = i,
 954		};
 955
 956		if (bypass) {
 957			/*
 958			 * bypass psp. vbios enable ras for us.
 959			 * so just create the obj
 960			 */
 961			if (__amdgpu_ras_feature_enable(adev, &head, 1))
 962				break;
 963		} else {
 964			if (amdgpu_ras_feature_enable(adev, &head, 1))
 965				break;
 966		}
 967	}
 968
 969	return con->features;
 970}
 971/* feature ctl end */
 972
 973static int amdgpu_ras_block_match_default(struct amdgpu_ras_block_object *block_obj,
 974		enum amdgpu_ras_block block)
 975{
 976	if (!block_obj)
 977		return -EINVAL;
 978
 979	if (block_obj->ras_comm.block == block)
 980		return 0;
 981
 982	return -EINVAL;
 983}
 984
 985static struct amdgpu_ras_block_object *amdgpu_ras_get_ras_block(struct amdgpu_device *adev,
 986					enum amdgpu_ras_block block, uint32_t sub_block_index)
 987{
 988	struct amdgpu_ras_block_list *node, *tmp;
 989	struct amdgpu_ras_block_object *obj;
 990
 991	if (block >= AMDGPU_RAS_BLOCK__LAST)
 992		return NULL;
 993
 994	list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
 995		if (!node->ras_obj) {
 996			dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
 997			continue;
 998		}
 999
1000		obj = node->ras_obj;
1001		if (obj->ras_block_match) {
1002			if (obj->ras_block_match(obj, block, sub_block_index) == 0)
1003				return obj;
1004		} else {
1005			if (amdgpu_ras_block_match_default(obj, block) == 0)
1006				return obj;
1007		}
1008	}
1009
1010	return NULL;
1011}
1012
1013static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_data *err_data)
1014{
1015	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
1016	int ret = 0;
1017
1018	/*
1019	 * choosing right query method according to
1020	 * whether smu support query error information
1021	 */
1022	ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(ras->umc_ecc));
1023	if (ret == -EOPNOTSUPP) {
1024		if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
1025			adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
1026			adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data);
1027
1028		/* umc query_ras_error_address is also responsible for clearing
1029		 * error status
1030		 */
1031		if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
1032		    adev->umc.ras->ras_block.hw_ops->query_ras_error_address)
1033			adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, err_data);
1034	} else if (!ret) {
1035		if (adev->umc.ras &&
1036			adev->umc.ras->ecc_info_query_ras_error_count)
1037			adev->umc.ras->ecc_info_query_ras_error_count(adev, err_data);
1038
1039		if (adev->umc.ras &&
1040			adev->umc.ras->ecc_info_query_ras_error_address)
1041			adev->umc.ras->ecc_info_query_ras_error_address(adev, err_data);
1042	}
1043}
1044
1045static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev,
1046					      struct ras_manager *ras_mgr,
1047					      struct ras_err_data *err_data,
 
1048					      const char *blk_name,
1049					      bool is_ue,
1050					      bool is_de)
1051{
1052	struct amdgpu_smuio_mcm_config_info *mcm_info;
1053	struct ras_err_node *err_node;
1054	struct ras_err_info *err_info;
 
1055
1056	if (is_ue) {
1057		for_each_ras_error(err_node, err_data) {
1058			err_info = &err_node->err_info;
1059			mcm_info = &err_info->mcm_info;
1060			if (err_info->ue_count) {
1061				dev_info(adev->dev, "socket: %d, die: %d, "
1062					 "%lld new uncorrectable hardware errors detected in %s block\n",
1063					 mcm_info->socket_id,
1064					 mcm_info->die_id,
1065					 err_info->ue_count,
1066					 blk_name);
1067			}
1068		}
1069
1070		for_each_ras_error(err_node, &ras_mgr->err_data) {
1071			err_info = &err_node->err_info;
1072			mcm_info = &err_info->mcm_info;
1073			dev_info(adev->dev, "socket: %d, die: %d, "
1074				 "%lld uncorrectable hardware errors detected in total in %s block\n",
1075				 mcm_info->socket_id, mcm_info->die_id, err_info->ue_count, blk_name);
1076		}
1077
1078	} else {
1079		if (is_de) {
1080			for_each_ras_error(err_node, err_data) {
1081				err_info = &err_node->err_info;
1082				mcm_info = &err_info->mcm_info;
1083				if (err_info->de_count) {
1084					dev_info(adev->dev, "socket: %d, die: %d, "
1085						"%lld new deferred hardware errors detected in %s block\n",
1086						mcm_info->socket_id,
1087						mcm_info->die_id,
1088						err_info->de_count,
1089						blk_name);
1090				}
1091			}
1092
1093			for_each_ras_error(err_node, &ras_mgr->err_data) {
1094				err_info = &err_node->err_info;
1095				mcm_info = &err_info->mcm_info;
1096				dev_info(adev->dev, "socket: %d, die: %d, "
1097					"%lld deferred hardware errors detected in total in %s block\n",
1098					mcm_info->socket_id, mcm_info->die_id,
1099					err_info->de_count, blk_name);
1100			}
1101		} else {
1102			for_each_ras_error(err_node, err_data) {
1103				err_info = &err_node->err_info;
1104				mcm_info = &err_info->mcm_info;
1105				if (err_info->ce_count) {
1106					dev_info(adev->dev, "socket: %d, die: %d, "
1107						"%lld new correctable hardware errors detected in %s block\n",
1108						mcm_info->socket_id,
1109						mcm_info->die_id,
1110						err_info->ce_count,
1111						blk_name);
1112				}
1113			}
1114
1115			for_each_ras_error(err_node, &ras_mgr->err_data) {
1116				err_info = &err_node->err_info;
1117				mcm_info = &err_info->mcm_info;
1118				dev_info(adev->dev, "socket: %d, die: %d, "
1119					"%lld correctable hardware errors detected in total in %s block\n",
1120					mcm_info->socket_id, mcm_info->die_id,
1121					err_info->ce_count, blk_name);
1122			}
1123		}
1124	}
1125}
1126
1127static inline bool err_data_has_source_info(struct ras_err_data *data)
1128{
1129	return !list_empty(&data->err_node_list);
1130}
1131
1132static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev,
1133					     struct ras_query_if *query_if,
1134					     struct ras_err_data *err_data)
 
1135{
1136	struct ras_manager *ras_mgr = amdgpu_ras_find_obj(adev, &query_if->head);
1137	const char *blk_name = get_ras_block_str(&query_if->head);
 
1138
1139	if (err_data->ce_count) {
1140		if (err_data_has_source_info(err_data)) {
1141			amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data,
1142							  blk_name, false, false);
1143		} else if (!adev->aid_mask &&
1144			   adev->smuio.funcs &&
1145			   adev->smuio.funcs->get_socket_id &&
1146			   adev->smuio.funcs->get_die_id) {
1147			dev_info(adev->dev, "socket: %d, die: %d "
1148				 "%ld correctable hardware errors "
1149				 "detected in %s block\n",
1150				 adev->smuio.funcs->get_socket_id(adev),
1151				 adev->smuio.funcs->get_die_id(adev),
1152				 ras_mgr->err_data.ce_count,
1153				 blk_name);
1154		} else {
1155			dev_info(adev->dev, "%ld correctable hardware errors "
1156				 "detected in %s block\n",
1157				 ras_mgr->err_data.ce_count,
1158				 blk_name);
1159		}
1160	}
1161
1162	if (err_data->ue_count) {
1163		if (err_data_has_source_info(err_data)) {
1164			amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data,
1165							  blk_name, true, false);
1166		} else if (!adev->aid_mask &&
1167			   adev->smuio.funcs &&
1168			   adev->smuio.funcs->get_socket_id &&
1169			   adev->smuio.funcs->get_die_id) {
1170			dev_info(adev->dev, "socket: %d, die: %d "
1171				 "%ld uncorrectable hardware errors "
1172				 "detected in %s block\n",
1173				 adev->smuio.funcs->get_socket_id(adev),
1174				 adev->smuio.funcs->get_die_id(adev),
1175				 ras_mgr->err_data.ue_count,
1176				 blk_name);
1177		} else {
1178			dev_info(adev->dev, "%ld uncorrectable hardware errors "
1179				 "detected in %s block\n",
1180				 ras_mgr->err_data.ue_count,
1181				 blk_name);
1182		}
1183	}
1184
1185	if (err_data->de_count) {
1186		if (err_data_has_source_info(err_data)) {
1187			amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data,
1188							  blk_name, false, true);
1189		} else if (!adev->aid_mask &&
1190			   adev->smuio.funcs &&
1191			   adev->smuio.funcs->get_socket_id &&
1192			   adev->smuio.funcs->get_die_id) {
1193			dev_info(adev->dev, "socket: %d, die: %d "
1194				 "%ld deferred hardware errors "
1195				 "detected in %s block\n",
1196				 adev->smuio.funcs->get_socket_id(adev),
1197				 adev->smuio.funcs->get_die_id(adev),
1198				 ras_mgr->err_data.de_count,
1199				 blk_name);
1200		} else {
1201			dev_info(adev->dev, "%ld deferred hardware errors "
1202				 "detected in %s block\n",
1203				 ras_mgr->err_data.de_count,
1204				 blk_name);
1205		}
1206	}
1207}
1208
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1209static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, struct ras_err_data *err_data)
1210{
1211	struct ras_err_node *err_node;
1212	struct ras_err_info *err_info;
1213
1214	if (err_data_has_source_info(err_data)) {
1215		for_each_ras_error(err_node, err_data) {
1216			err_info = &err_node->err_info;
1217			amdgpu_ras_error_statistic_de_count(&obj->err_data,
1218					&err_info->mcm_info, NULL, err_info->de_count);
1219			amdgpu_ras_error_statistic_ce_count(&obj->err_data,
1220					&err_info->mcm_info, NULL, err_info->ce_count);
1221			amdgpu_ras_error_statistic_ue_count(&obj->err_data,
1222					&err_info->mcm_info, NULL, err_info->ue_count);
1223		}
1224	} else {
1225		/* for legacy asic path which doesn't has error source info */
1226		obj->err_data.ue_count += err_data->ue_count;
1227		obj->err_data.ce_count += err_data->ce_count;
1228		obj->err_data.de_count += err_data->de_count;
1229	}
1230}
1231
 
 
 
 
 
 
 
 
 
1232static struct ras_manager *get_ras_manager(struct amdgpu_device *adev, enum amdgpu_ras_block blk)
1233{
1234	struct ras_common_if head;
1235
1236	memset(&head, 0, sizeof(head));
1237	head.block = blk;
1238
1239	return amdgpu_ras_find_obj(adev, &head);
1240}
1241
1242int amdgpu_ras_bind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
1243			const struct aca_info *aca_info, void *data)
1244{
1245	struct ras_manager *obj;
1246
 
 
 
 
1247	obj = get_ras_manager(adev, blk);
1248	if (!obj)
1249		return -EINVAL;
1250
1251	return amdgpu_aca_add_handle(adev, &obj->aca_handle, ras_block_str(blk), aca_info, data);
1252}
1253
1254int amdgpu_ras_unbind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk)
1255{
1256	struct ras_manager *obj;
1257
1258	obj = get_ras_manager(adev, blk);
1259	if (!obj)
1260		return -EINVAL;
1261
1262	amdgpu_aca_remove_handle(&obj->aca_handle);
1263
1264	return 0;
1265}
1266
1267static int amdgpu_aca_log_ras_error_data(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
1268					 enum aca_error_type type, struct ras_err_data *err_data)
 
1269{
1270	struct ras_manager *obj;
1271
1272	obj = get_ras_manager(adev, blk);
1273	if (!obj)
1274		return -EINVAL;
1275
1276	return amdgpu_aca_get_error_data(adev, &obj->aca_handle, type, err_data);
1277}
1278
1279ssize_t amdgpu_ras_aca_sysfs_read(struct device *dev, struct device_attribute *attr,
1280				  struct aca_handle *handle, char *buf, void *data)
1281{
1282	struct ras_manager *obj = container_of(handle, struct ras_manager, aca_handle);
1283	struct ras_query_if info = {
1284		.head = obj->head,
1285	};
1286
 
 
 
1287	if (amdgpu_ras_query_error_status(obj->adev, &info))
1288		return -EINVAL;
1289
1290	return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
1291			  "ce", info.ce_count);
1292}
1293
1294static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev,
1295						struct ras_query_if *info,
1296						struct ras_err_data *err_data,
 
1297						unsigned int error_query_mode)
1298{
1299	enum amdgpu_ras_block blk = info ? info->head.block : AMDGPU_RAS_BLOCK_COUNT;
1300	struct amdgpu_ras_block_object *block_obj = NULL;
1301	int ret;
1302
1303	if (blk == AMDGPU_RAS_BLOCK_COUNT)
1304		return -EINVAL;
1305
1306	if (error_query_mode == AMDGPU_RAS_INVALID_ERROR_QUERY)
1307		return -EINVAL;
1308
1309	if (error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) {
 
 
1310		if (info->head.block == AMDGPU_RAS_BLOCK__UMC) {
1311			amdgpu_ras_get_ecc_info(adev, err_data);
1312		} else {
1313			block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, 0);
1314			if (!block_obj || !block_obj->hw_ops) {
1315				dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1316					     get_ras_block_str(&info->head));
1317				return -EINVAL;
1318			}
1319
1320			if (block_obj->hw_ops->query_ras_error_count)
1321				block_obj->hw_ops->query_ras_error_count(adev, err_data);
1322
1323			if ((info->head.block == AMDGPU_RAS_BLOCK__SDMA) ||
1324			    (info->head.block == AMDGPU_RAS_BLOCK__GFX) ||
1325			    (info->head.block == AMDGPU_RAS_BLOCK__MMHUB)) {
1326				if (block_obj->hw_ops->query_ras_error_status)
1327					block_obj->hw_ops->query_ras_error_status(adev);
1328			}
1329		}
1330	} else {
1331		if (amdgpu_aca_is_enabled(adev)) {
1332			ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_UE, err_data);
 
 
 
 
1333			if (ret)
1334				return ret;
1335
1336			ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_CE, err_data);
1337			if (ret)
1338				return ret;
1339		} else {
1340			/* FIXME: add code to check return value later */
1341			amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_UE, err_data);
1342			amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_CE, err_data);
1343		}
1344	}
1345
1346	return 0;
1347}
1348
1349/* query/inject/cure begin */
1350int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_if *info)
 
 
1351{
1352	struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1353	struct ras_err_data err_data;
 
1354	unsigned int error_query_mode;
1355	int ret;
1356
1357	if (!obj)
1358		return -EINVAL;
1359
1360	ret = amdgpu_ras_error_data_init(&err_data);
1361	if (ret)
1362		return ret;
1363
1364	if (!amdgpu_ras_get_error_query_mode(adev, &error_query_mode))
1365		return -EINVAL;
1366
 
 
 
 
 
 
 
 
 
1367	ret = amdgpu_ras_query_error_status_helper(adev, info,
1368						   &err_data,
 
1369						   error_query_mode);
 
1370	if (ret)
1371		goto out_fini_err_data;
1372
1373	amdgpu_rasmgr_error_data_statistic_update(obj, &err_data);
 
 
 
 
 
 
 
 
 
 
1374
1375	info->ue_count = obj->err_data.ue_count;
1376	info->ce_count = obj->err_data.ce_count;
1377	info->de_count = obj->err_data.de_count;
1378
1379	amdgpu_ras_error_generate_report(adev, info, &err_data);
1380
1381out_fini_err_data:
1382	amdgpu_ras_error_data_fini(&err_data);
1383
1384	return ret;
1385}
1386
 
 
 
 
 
1387int amdgpu_ras_reset_error_count(struct amdgpu_device *adev,
1388		enum amdgpu_ras_block block)
1389{
1390	struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
1391	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
1392	const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
1393	const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
1394	struct amdgpu_hive_info *hive;
1395	int hive_ras_recovery = 0;
1396
1397	if (!block_obj || !block_obj->hw_ops) {
1398		dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1399				ras_block_str(block));
1400		return -EOPNOTSUPP;
1401	}
1402
1403	if (!amdgpu_ras_is_supported(adev, block) ||
1404	    !amdgpu_ras_get_aca_debug_mode(adev))
1405		return -EOPNOTSUPP;
1406
1407	hive = amdgpu_get_xgmi_hive(adev);
1408	if (hive) {
1409		hive_ras_recovery = atomic_read(&hive->ras_recovery);
1410		amdgpu_put_xgmi_hive(hive);
1411	}
1412
1413	/* skip ras error reset in gpu reset */
1414	if ((amdgpu_in_reset(adev) || atomic_read(&ras->in_recovery) ||
1415	    hive_ras_recovery) &&
1416	    ((smu_funcs && smu_funcs->set_debug_mode) ||
1417	     (mca_funcs && mca_funcs->mca_set_debug_mode)))
1418		return -EOPNOTSUPP;
1419
1420	if (block_obj->hw_ops->reset_ras_error_count)
1421		block_obj->hw_ops->reset_ras_error_count(adev);
1422
1423	return 0;
1424}
1425
1426int amdgpu_ras_reset_error_status(struct amdgpu_device *adev,
1427		enum amdgpu_ras_block block)
1428{
1429	struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
1430
1431	if (amdgpu_ras_reset_error_count(adev, block) == -EOPNOTSUPP)
1432		return 0;
1433
1434	if ((block == AMDGPU_RAS_BLOCK__GFX) ||
1435	    (block == AMDGPU_RAS_BLOCK__MMHUB)) {
1436		if (block_obj->hw_ops->reset_ras_error_status)
1437			block_obj->hw_ops->reset_ras_error_status(adev);
1438	}
1439
1440	return 0;
1441}
1442
1443/* wrapper of psp_ras_trigger_error */
1444int amdgpu_ras_error_inject(struct amdgpu_device *adev,
1445		struct ras_inject_if *info)
1446{
1447	struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1448	struct ta_ras_trigger_error_input block_info = {
1449		.block_id =  amdgpu_ras_block_to_ta(info->head.block),
1450		.inject_error_type = amdgpu_ras_error_to_ta(info->head.type),
1451		.sub_block_index = info->head.sub_block_index,
1452		.address = info->address,
1453		.value = info->value,
1454	};
1455	int ret = -EINVAL;
1456	struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev,
1457							info->head.block,
1458							info->head.sub_block_index);
1459
1460	/* inject on guest isn't allowed, return success directly */
1461	if (amdgpu_sriov_vf(adev))
1462		return 0;
1463
1464	if (!obj)
1465		return -EINVAL;
1466
1467	if (!block_obj || !block_obj->hw_ops)	{
1468		dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1469			     get_ras_block_str(&info->head));
1470		return -EINVAL;
1471	}
1472
1473	/* Calculate XGMI relative offset */
1474	if (adev->gmc.xgmi.num_physical_nodes > 1 &&
1475	    info->head.block != AMDGPU_RAS_BLOCK__GFX) {
1476		block_info.address =
1477			amdgpu_xgmi_get_relative_phy_addr(adev,
1478							  block_info.address);
1479	}
1480
1481	if (block_obj->hw_ops->ras_error_inject) {
1482		if (info->head.block == AMDGPU_RAS_BLOCK__GFX)
1483			ret = block_obj->hw_ops->ras_error_inject(adev, info, info->instance_mask);
1484		else /* Special ras_error_inject is defined (e.g: xgmi) */
1485			ret = block_obj->hw_ops->ras_error_inject(adev, &block_info,
1486						info->instance_mask);
1487	} else {
1488		/* default path */
1489		ret = psp_ras_trigger_error(&adev->psp, &block_info, info->instance_mask);
1490	}
1491
1492	if (ret)
1493		dev_err(adev->dev, "ras inject %s failed %d\n",
1494			get_ras_block_str(&info->head), ret);
1495
1496	return ret;
1497}
1498
1499/**
1500 * amdgpu_ras_query_error_count_helper -- Get error counter for specific IP
1501 * @adev: pointer to AMD GPU device
1502 * @ce_count: pointer to an integer to be set to the count of correctible errors.
1503 * @ue_count: pointer to an integer to be set to the count of uncorrectible errors.
1504 * @query_info: pointer to ras_query_if
1505 *
1506 * Return 0 for query success or do nothing, otherwise return an error
1507 * on failures
1508 */
1509static int amdgpu_ras_query_error_count_helper(struct amdgpu_device *adev,
1510					       unsigned long *ce_count,
1511					       unsigned long *ue_count,
1512					       struct ras_query_if *query_info)
1513{
1514	int ret;
1515
1516	if (!query_info)
1517		/* do nothing if query_info is not specified */
1518		return 0;
1519
1520	ret = amdgpu_ras_query_error_status(adev, query_info);
1521	if (ret)
1522		return ret;
1523
1524	*ce_count += query_info->ce_count;
1525	*ue_count += query_info->ue_count;
1526
1527	/* some hardware/IP supports read to clear
1528	 * no need to explictly reset the err status after the query call */
1529	if (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
1530	    amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
1531		if (amdgpu_ras_reset_error_status(adev, query_info->head.block))
1532			dev_warn(adev->dev,
1533				 "Failed to reset error counter and error status\n");
1534	}
1535
1536	return 0;
1537}
1538
1539/**
1540 * amdgpu_ras_query_error_count -- Get error counts of all IPs or specific IP
1541 * @adev: pointer to AMD GPU device
1542 * @ce_count: pointer to an integer to be set to the count of correctible errors.
1543 * @ue_count: pointer to an integer to be set to the count of uncorrectible
1544 * errors.
1545 * @query_info: pointer to ras_query_if if the query request is only for
1546 * specific ip block; if info is NULL, then the qurey request is for
1547 * all the ip blocks that support query ras error counters/status
1548 *
1549 * If set, @ce_count or @ue_count, count and return the corresponding
1550 * error counts in those integer pointers. Return 0 if the device
1551 * supports RAS. Return -EOPNOTSUPP if the device doesn't support RAS.
1552 */
1553int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
1554				 unsigned long *ce_count,
1555				 unsigned long *ue_count,
1556				 struct ras_query_if *query_info)
1557{
1558	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1559	struct ras_manager *obj;
1560	unsigned long ce, ue;
1561	int ret;
1562
1563	if (!adev->ras_enabled || !con)
1564		return -EOPNOTSUPP;
1565
1566	/* Don't count since no reporting.
1567	 */
1568	if (!ce_count && !ue_count)
1569		return 0;
1570
1571	ce = 0;
1572	ue = 0;
1573	if (!query_info) {
1574		/* query all the ip blocks that support ras query interface */
1575		list_for_each_entry(obj, &con->head, node) {
1576			struct ras_query_if info = {
1577				.head = obj->head,
1578			};
1579
1580			ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, &info);
1581		}
1582	} else {
1583		/* query specific ip block */
1584		ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, query_info);
1585	}
1586
1587	if (ret)
1588		return ret;
1589
1590	if (ce_count)
1591		*ce_count = ce;
1592
1593	if (ue_count)
1594		*ue_count = ue;
1595
1596	return 0;
1597}
1598/* query/inject/cure end */
1599
1600
1601/* sysfs begin */
1602
1603static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
1604		struct ras_badpage **bps, unsigned int *count);
1605
1606static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
1607{
1608	switch (flags) {
1609	case AMDGPU_RAS_RETIRE_PAGE_RESERVED:
1610		return "R";
1611	case AMDGPU_RAS_RETIRE_PAGE_PENDING:
1612		return "P";
1613	case AMDGPU_RAS_RETIRE_PAGE_FAULT:
1614	default:
1615		return "F";
1616	}
1617}
1618
1619/**
1620 * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface
1621 *
1622 * It allows user to read the bad pages of vram on the gpu through
1623 * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages
1624 *
1625 * It outputs multiple lines, and each line stands for one gpu page.
1626 *
1627 * The format of one line is below,
1628 * gpu pfn : gpu page size : flags
1629 *
1630 * gpu pfn and gpu page size are printed in hex format.
1631 * flags can be one of below character,
1632 *
1633 * R: reserved, this gpu page is reserved and not able to use.
1634 *
1635 * P: pending for reserve, this gpu page is marked as bad, will be reserved
1636 * in next window of page_reserve.
1637 *
1638 * F: unable to reserve. this gpu page can't be reserved due to some reasons.
1639 *
1640 * Examples:
1641 *
1642 * .. code-block:: bash
1643 *
1644 *	0x00000001 : 0x00001000 : R
1645 *	0x00000002 : 0x00001000 : P
1646 *
1647 */
1648
1649static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
1650		struct kobject *kobj, struct bin_attribute *attr,
1651		char *buf, loff_t ppos, size_t count)
1652{
1653	struct amdgpu_ras *con =
1654		container_of(attr, struct amdgpu_ras, badpages_attr);
1655	struct amdgpu_device *adev = con->adev;
1656	const unsigned int element_size =
1657		sizeof("0xabcdabcd : 0x12345678 : R\n") - 1;
1658	unsigned int start = div64_ul(ppos + element_size - 1, element_size);
1659	unsigned int end = div64_ul(ppos + count - 1, element_size);
1660	ssize_t s = 0;
1661	struct ras_badpage *bps = NULL;
1662	unsigned int bps_count = 0;
1663
1664	memset(buf, 0, count);
1665
1666	if (amdgpu_ras_badpages_read(adev, &bps, &bps_count))
1667		return 0;
1668
1669	for (; start < end && start < bps_count; start++)
1670		s += scnprintf(&buf[s], element_size + 1,
1671				"0x%08x : 0x%08x : %1s\n",
1672				bps[start].bp,
1673				bps[start].size,
1674				amdgpu_ras_badpage_flags_str(bps[start].flags));
1675
1676	kfree(bps);
1677
1678	return s;
1679}
1680
1681static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
1682		struct device_attribute *attr, char *buf)
1683{
1684	struct amdgpu_ras *con =
1685		container_of(attr, struct amdgpu_ras, features_attr);
1686
1687	return sysfs_emit(buf, "feature mask: 0x%x\n", con->features);
1688}
1689
1690static ssize_t amdgpu_ras_sysfs_version_show(struct device *dev,
1691		struct device_attribute *attr, char *buf)
1692{
1693	struct amdgpu_ras *con =
1694		container_of(attr, struct amdgpu_ras, version_attr);
1695	return sysfs_emit(buf, "table version: 0x%x\n", con->eeprom_control.tbl_hdr.version);
1696}
1697
1698static ssize_t amdgpu_ras_sysfs_schema_show(struct device *dev,
1699		struct device_attribute *attr, char *buf)
1700{
1701	struct amdgpu_ras *con =
1702		container_of(attr, struct amdgpu_ras, schema_attr);
1703	return sysfs_emit(buf, "schema: 0x%x\n", con->schema);
1704}
1705
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1706static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev)
1707{
1708	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1709
1710	if (adev->dev->kobj.sd)
1711		sysfs_remove_file_from_group(&adev->dev->kobj,
1712				&con->badpages_attr.attr,
1713				RAS_FS_NAME);
1714}
1715
1716static int amdgpu_ras_sysfs_remove_dev_attr_node(struct amdgpu_device *adev)
1717{
1718	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1719	struct attribute *attrs[] = {
1720		&con->features_attr.attr,
1721		&con->version_attr.attr,
1722		&con->schema_attr.attr,
 
1723		NULL
1724	};
1725	struct attribute_group group = {
1726		.name = RAS_FS_NAME,
1727		.attrs = attrs,
1728	};
1729
1730	if (adev->dev->kobj.sd)
1731		sysfs_remove_group(&adev->dev->kobj, &group);
1732
1733	return 0;
1734}
1735
1736int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
1737		struct ras_common_if *head)
1738{
1739	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1740
 
 
 
1741	if (!obj || obj->attr_inuse)
1742		return -EINVAL;
1743
1744	get_obj(obj);
1745
1746	snprintf(obj->fs_data.sysfs_name, sizeof(obj->fs_data.sysfs_name),
1747		"%s_err_count", head->name);
1748
1749	obj->sysfs_attr = (struct device_attribute){
1750		.attr = {
1751			.name = obj->fs_data.sysfs_name,
1752			.mode = S_IRUGO,
1753		},
1754			.show = amdgpu_ras_sysfs_read,
1755	};
1756	sysfs_attr_init(&obj->sysfs_attr.attr);
1757
1758	if (sysfs_add_file_to_group(&adev->dev->kobj,
1759				&obj->sysfs_attr.attr,
1760				RAS_FS_NAME)) {
1761		put_obj(obj);
1762		return -EINVAL;
1763	}
1764
1765	obj->attr_inuse = 1;
1766
1767	return 0;
1768}
1769
1770int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
1771		struct ras_common_if *head)
1772{
1773	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1774
 
 
 
1775	if (!obj || !obj->attr_inuse)
1776		return -EINVAL;
1777
1778	if (adev->dev->kobj.sd)
1779		sysfs_remove_file_from_group(&adev->dev->kobj,
1780				&obj->sysfs_attr.attr,
1781				RAS_FS_NAME);
1782	obj->attr_inuse = 0;
1783	put_obj(obj);
1784
1785	return 0;
1786}
1787
1788static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
1789{
1790	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1791	struct ras_manager *obj, *tmp;
1792
1793	list_for_each_entry_safe(obj, tmp, &con->head, node) {
1794		amdgpu_ras_sysfs_remove(adev, &obj->head);
1795	}
1796
1797	if (amdgpu_bad_page_threshold != 0)
1798		amdgpu_ras_sysfs_remove_bad_page_node(adev);
1799
1800	amdgpu_ras_sysfs_remove_dev_attr_node(adev);
1801
1802	return 0;
1803}
1804/* sysfs end */
1805
1806/**
1807 * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors
1808 *
1809 * Normally when there is an uncorrectable error, the driver will reset
1810 * the GPU to recover.  However, in the event of an unrecoverable error,
1811 * the driver provides an interface to reboot the system automatically
1812 * in that event.
1813 *
1814 * The following file in debugfs provides that interface:
1815 * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot
1816 *
1817 * Usage:
1818 *
1819 * .. code-block:: bash
1820 *
1821 *	echo true > .../ras/auto_reboot
1822 *
1823 */
1824/* debugfs begin */
1825static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
1826{
1827	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1828	struct amdgpu_ras_eeprom_control *eeprom = &con->eeprom_control;
1829	struct drm_minor  *minor = adev_to_drm(adev)->primary;
1830	struct dentry     *dir;
1831
1832	dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root);
1833	debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, dir, adev,
1834			    &amdgpu_ras_debugfs_ctrl_ops);
1835	debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, dir, adev,
1836			    &amdgpu_ras_debugfs_eeprom_ops);
1837	debugfs_create_u32("bad_page_cnt_threshold", 0444, dir,
1838			   &con->bad_page_cnt_threshold);
1839	debugfs_create_u32("ras_num_recs", 0444, dir, &eeprom->ras_num_recs);
1840	debugfs_create_x32("ras_hw_enabled", 0444, dir, &adev->ras_hw_enabled);
1841	debugfs_create_x32("ras_enabled", 0444, dir, &adev->ras_enabled);
1842	debugfs_create_file("ras_eeprom_size", S_IRUGO, dir, adev,
1843			    &amdgpu_ras_debugfs_eeprom_size_ops);
1844	con->de_ras_eeprom_table = debugfs_create_file("ras_eeprom_table",
1845						       S_IRUGO, dir, adev,
1846						       &amdgpu_ras_debugfs_eeprom_table_ops);
1847	amdgpu_ras_debugfs_set_ret_size(&con->eeprom_control);
1848
1849	/*
1850	 * After one uncorrectable error happens, usually GPU recovery will
1851	 * be scheduled. But due to the known problem in GPU recovery failing
1852	 * to bring GPU back, below interface provides one direct way to
1853	 * user to reboot system automatically in such case within
1854	 * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine
1855	 * will never be called.
1856	 */
1857	debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot);
1858
1859	/*
1860	 * User could set this not to clean up hardware's error count register
1861	 * of RAS IPs during ras recovery.
1862	 */
1863	debugfs_create_bool("disable_ras_err_cnt_harvest", 0644, dir,
1864			    &con->disable_ras_err_cnt_harvest);
1865	return dir;
1866}
1867
1868static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
1869				      struct ras_fs_if *head,
1870				      struct dentry *dir)
1871{
1872	struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
1873
1874	if (!obj || !dir)
1875		return;
1876
1877	get_obj(obj);
1878
1879	memcpy(obj->fs_data.debugfs_name,
1880			head->debugfs_name,
1881			sizeof(obj->fs_data.debugfs_name));
1882
1883	debugfs_create_file(obj->fs_data.debugfs_name, S_IWUGO | S_IRUGO, dir,
1884			    obj, &amdgpu_ras_debugfs_ops);
1885}
1886
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1887void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
1888{
1889	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1890	struct dentry *dir;
1891	struct ras_manager *obj;
1892	struct ras_fs_if fs_info;
1893
1894	/*
1895	 * it won't be called in resume path, no need to check
1896	 * suspend and gpu reset status
1897	 */
1898	if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con)
1899		return;
1900
1901	dir = amdgpu_ras_debugfs_create_ctrl_node(adev);
1902
1903	list_for_each_entry(obj, &con->head, node) {
1904		if (amdgpu_ras_is_supported(adev, obj->head.block) &&
1905			(obj->attr_inuse == 1)) {
1906			sprintf(fs_info.debugfs_name, "%s_err_inject",
1907					get_ras_block_str(&obj->head));
1908			fs_info.head = obj->head;
1909			amdgpu_ras_debugfs_create(adev, &fs_info, dir);
1910		}
1911	}
1912
1913	if (amdgpu_aca_is_enabled(adev))
1914		amdgpu_aca_smu_debugfs_init(adev, dir);
1915	else
1916		amdgpu_mca_smu_debugfs_init(adev, dir);
 
 
1917}
1918
1919/* debugfs end */
1920
1921/* ras fs */
1922static BIN_ATTR(gpu_vram_bad_pages, S_IRUGO,
1923		amdgpu_ras_sysfs_badpages_read, NULL, 0);
1924static DEVICE_ATTR(features, S_IRUGO,
1925		amdgpu_ras_sysfs_features_read, NULL);
1926static DEVICE_ATTR(version, 0444,
1927		amdgpu_ras_sysfs_version_show, NULL);
1928static DEVICE_ATTR(schema, 0444,
1929		amdgpu_ras_sysfs_schema_show, NULL);
 
 
1930static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
1931{
1932	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1933	struct attribute_group group = {
1934		.name = RAS_FS_NAME,
1935	};
1936	struct attribute *attrs[] = {
1937		&con->features_attr.attr,
1938		&con->version_attr.attr,
1939		&con->schema_attr.attr,
 
1940		NULL
1941	};
1942	struct bin_attribute *bin_attrs[] = {
1943		NULL,
1944		NULL,
1945	};
1946	int r;
1947
1948	group.attrs = attrs;
1949
1950	/* add features entry */
1951	con->features_attr = dev_attr_features;
1952	sysfs_attr_init(attrs[0]);
1953
1954	/* add version entry */
1955	con->version_attr = dev_attr_version;
1956	sysfs_attr_init(attrs[1]);
1957
1958	/* add schema entry */
1959	con->schema_attr = dev_attr_schema;
1960	sysfs_attr_init(attrs[2]);
1961
 
 
 
 
1962	if (amdgpu_bad_page_threshold != 0) {
1963		/* add bad_page_features entry */
1964		bin_attr_gpu_vram_bad_pages.private = NULL;
1965		con->badpages_attr = bin_attr_gpu_vram_bad_pages;
1966		bin_attrs[0] = &con->badpages_attr;
1967		group.bin_attrs = bin_attrs;
1968		sysfs_bin_attr_init(bin_attrs[0]);
1969	}
1970
1971	r = sysfs_create_group(&adev->dev->kobj, &group);
1972	if (r)
1973		dev_err(adev->dev, "Failed to create RAS sysfs group!");
1974
1975	return 0;
1976}
1977
1978static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
1979{
1980	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1981	struct ras_manager *con_obj, *ip_obj, *tmp;
1982
1983	if (IS_ENABLED(CONFIG_DEBUG_FS)) {
1984		list_for_each_entry_safe(con_obj, tmp, &con->head, node) {
1985			ip_obj = amdgpu_ras_find_obj(adev, &con_obj->head);
1986			if (ip_obj)
1987				put_obj(ip_obj);
1988		}
1989	}
1990
1991	amdgpu_ras_sysfs_remove_all(adev);
1992	return 0;
1993}
1994/* ras fs end */
1995
1996/* ih begin */
1997
1998/* For the hardware that cannot enable bif ring for both ras_controller_irq
1999 * and ras_err_evnet_athub_irq ih cookies, the driver has to poll status
2000 * register to check whether the interrupt is triggered or not, and properly
2001 * ack the interrupt if it is there
2002 */
2003void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev)
2004{
2005	/* Fatal error events are handled on host side */
2006	if (amdgpu_sriov_vf(adev))
2007		return;
2008
2009	if (adev->nbio.ras &&
2010	    adev->nbio.ras->handle_ras_controller_intr_no_bifring)
2011		adev->nbio.ras->handle_ras_controller_intr_no_bifring(adev);
2012
2013	if (adev->nbio.ras &&
2014	    adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring)
2015		adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring(adev);
2016}
2017
2018static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *obj,
2019				struct amdgpu_iv_entry *entry)
2020{
2021	bool poison_stat = false;
2022	struct amdgpu_device *adev = obj->adev;
2023	struct amdgpu_ras_block_object *block_obj =
2024		amdgpu_ras_get_ras_block(adev, obj->head.block, 0);
 
 
 
 
2025
2026	if (!block_obj)
 
 
 
 
2027		return;
2028
2029	/* both query_poison_status and handle_poison_consumption are optional,
2030	 * but at least one of them should be implemented if we need poison
2031	 * consumption handler
2032	 */
2033	if (block_obj->hw_ops && block_obj->hw_ops->query_poison_status) {
2034		poison_stat = block_obj->hw_ops->query_poison_status(adev);
2035		if (!poison_stat) {
2036			/* Not poison consumption interrupt, no need to handle it */
2037			dev_info(adev->dev, "No RAS poison status in %s poison IH.\n",
2038					block_obj->ras_comm.name);
2039
2040			return;
2041		}
2042	}
2043
2044	amdgpu_umc_poison_handler(adev, obj->head.block, false);
2045
2046	if (block_obj->hw_ops && block_obj->hw_ops->handle_poison_consumption)
2047		poison_stat = block_obj->hw_ops->handle_poison_consumption(adev);
2048
2049	/* gpu reset is fallback for failed and default cases */
2050	if (poison_stat) {
2051		dev_info(adev->dev, "GPU reset for %s RAS poison consumption is issued!\n",
2052				block_obj->ras_comm.name);
 
 
 
 
2053		amdgpu_ras_reset_gpu(adev);
2054	} else {
2055		amdgpu_gfx_poison_consumption_handler(adev, entry);
2056	}
 
 
 
2057}
2058
2059static void amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager *obj,
2060				struct amdgpu_iv_entry *entry)
2061{
2062	dev_info(obj->adev->dev,
2063		"Poison is created\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2064}
2065
2066static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj,
2067				struct amdgpu_iv_entry *entry)
2068{
2069	struct ras_ih_data *data = &obj->ih_data;
2070	struct ras_err_data err_data;
2071	int ret;
2072
2073	if (!data->cb)
2074		return;
2075
2076	ret = amdgpu_ras_error_data_init(&err_data);
2077	if (ret)
2078		return;
2079
2080	/* Let IP handle its data, maybe we need get the output
2081	 * from the callback to update the error type/count, etc
2082	 */
 
2083	ret = data->cb(obj->adev, &err_data, entry);
2084	/* ue will trigger an interrupt, and in that case
2085	 * we need do a reset to recovery the whole system.
2086	 * But leave IP do that recovery, here we just dispatch
2087	 * the error.
2088	 */
2089	if (ret == AMDGPU_RAS_SUCCESS) {
2090		/* these counts could be left as 0 if
2091		 * some blocks do not count error number
2092		 */
2093		obj->err_data.ue_count += err_data.ue_count;
2094		obj->err_data.ce_count += err_data.ce_count;
2095		obj->err_data.de_count += err_data.de_count;
2096	}
2097
2098	amdgpu_ras_error_data_fini(&err_data);
2099}
2100
2101static void amdgpu_ras_interrupt_handler(struct ras_manager *obj)
2102{
2103	struct ras_ih_data *data = &obj->ih_data;
2104	struct amdgpu_iv_entry entry;
2105
2106	while (data->rptr != data->wptr) {
2107		rmb();
2108		memcpy(&entry, &data->ring[data->rptr],
2109				data->element_size);
2110
2111		wmb();
2112		data->rptr = (data->aligned_element_size +
2113				data->rptr) % data->ring_size;
2114
2115		if (amdgpu_ras_is_poison_mode_supported(obj->adev)) {
2116			if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
2117				amdgpu_ras_interrupt_poison_creation_handler(obj, &entry);
2118			else
2119				amdgpu_ras_interrupt_poison_consumption_handler(obj, &entry);
2120		} else {
2121			if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
2122				amdgpu_ras_interrupt_umc_handler(obj, &entry);
2123			else
2124				dev_warn(obj->adev->dev,
2125					"No RAS interrupt handler for non-UMC block with poison disabled.\n");
2126		}
2127	}
2128}
2129
2130static void amdgpu_ras_interrupt_process_handler(struct work_struct *work)
2131{
2132	struct ras_ih_data *data =
2133		container_of(work, struct ras_ih_data, ih_work);
2134	struct ras_manager *obj =
2135		container_of(data, struct ras_manager, ih_data);
2136
2137	amdgpu_ras_interrupt_handler(obj);
2138}
2139
2140int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
2141		struct ras_dispatch_if *info)
2142{
2143	struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
2144	struct ras_ih_data *data = &obj->ih_data;
2145
 
2146	if (!obj)
2147		return -EINVAL;
2148
 
 
2149	if (data->inuse == 0)
2150		return 0;
2151
2152	/* Might be overflow... */
2153	memcpy(&data->ring[data->wptr], info->entry,
2154			data->element_size);
2155
2156	wmb();
2157	data->wptr = (data->aligned_element_size +
2158			data->wptr) % data->ring_size;
2159
2160	schedule_work(&data->ih_work);
2161
2162	return 0;
2163}
2164
2165int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
2166		struct ras_common_if *head)
2167{
2168	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
2169	struct ras_ih_data *data;
2170
2171	if (!obj)
2172		return -EINVAL;
2173
2174	data = &obj->ih_data;
2175	if (data->inuse == 0)
2176		return 0;
2177
2178	cancel_work_sync(&data->ih_work);
2179
2180	kfree(data->ring);
2181	memset(data, 0, sizeof(*data));
2182	put_obj(obj);
2183
2184	return 0;
2185}
2186
2187int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev,
2188		struct ras_common_if *head)
2189{
2190	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
2191	struct ras_ih_data *data;
2192	struct amdgpu_ras_block_object *ras_obj;
2193
2194	if (!obj) {
2195		/* in case we registe the IH before enable ras feature */
2196		obj = amdgpu_ras_create_obj(adev, head);
2197		if (!obj)
2198			return -EINVAL;
2199	} else
2200		get_obj(obj);
2201
2202	ras_obj = container_of(head, struct amdgpu_ras_block_object, ras_comm);
2203
2204	data = &obj->ih_data;
2205	/* add the callback.etc */
2206	*data = (struct ras_ih_data) {
2207		.inuse = 0,
2208		.cb = ras_obj->ras_cb,
2209		.element_size = sizeof(struct amdgpu_iv_entry),
2210		.rptr = 0,
2211		.wptr = 0,
2212	};
2213
2214	INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler);
2215
2216	data->aligned_element_size = ALIGN(data->element_size, 8);
2217	/* the ring can store 64 iv entries. */
2218	data->ring_size = 64 * data->aligned_element_size;
2219	data->ring = kmalloc(data->ring_size, GFP_KERNEL);
2220	if (!data->ring) {
2221		put_obj(obj);
2222		return -ENOMEM;
2223	}
2224
2225	/* IH is ready */
2226	data->inuse = 1;
2227
2228	return 0;
2229}
2230
2231static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev)
2232{
2233	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2234	struct ras_manager *obj, *tmp;
2235
2236	list_for_each_entry_safe(obj, tmp, &con->head, node) {
2237		amdgpu_ras_interrupt_remove_handler(adev, &obj->head);
2238	}
2239
2240	return 0;
2241}
2242/* ih end */
2243
2244/* traversal all IPs except NBIO to query error counter */
2245static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
2246{
2247	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2248	struct ras_manager *obj;
2249
2250	if (!adev->ras_enabled || !con)
2251		return;
2252
2253	list_for_each_entry(obj, &con->head, node) {
2254		struct ras_query_if info = {
2255			.head = obj->head,
2256		};
2257
2258		/*
2259		 * PCIE_BIF IP has one different isr by ras controller
2260		 * interrupt, the specific ras counter query will be
2261		 * done in that isr. So skip such block from common
2262		 * sync flood interrupt isr calling.
2263		 */
2264		if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF)
2265			continue;
2266
2267		/*
2268		 * this is a workaround for aldebaran, skip send msg to
2269		 * smu to get ecc_info table due to smu handle get ecc
2270		 * info table failed temporarily.
2271		 * should be removed until smu fix handle ecc_info table.
2272		 */
2273		if ((info.head.block == AMDGPU_RAS_BLOCK__UMC) &&
2274		    (amdgpu_ip_version(adev, MP1_HWIP, 0) ==
2275		     IP_VERSION(13, 0, 2)))
2276			continue;
2277
2278		amdgpu_ras_query_error_status(adev, &info);
2279
2280		if (amdgpu_ip_version(adev, MP0_HWIP, 0) !=
2281			    IP_VERSION(11, 0, 2) &&
2282		    amdgpu_ip_version(adev, MP0_HWIP, 0) !=
2283			    IP_VERSION(11, 0, 4) &&
2284		    amdgpu_ip_version(adev, MP0_HWIP, 0) !=
2285			    IP_VERSION(13, 0, 0)) {
2286			if (amdgpu_ras_reset_error_status(adev, info.head.block))
2287				dev_warn(adev->dev, "Failed to reset error counter and error status");
2288		}
2289	}
2290}
2291
2292/* Parse RdRspStatus and WrRspStatus */
2293static void amdgpu_ras_error_status_query(struct amdgpu_device *adev,
2294					  struct ras_query_if *info)
2295{
2296	struct amdgpu_ras_block_object *block_obj;
2297	/*
2298	 * Only two block need to query read/write
2299	 * RspStatus at current state
2300	 */
2301	if ((info->head.block != AMDGPU_RAS_BLOCK__GFX) &&
2302		(info->head.block != AMDGPU_RAS_BLOCK__MMHUB))
2303		return;
2304
2305	block_obj = amdgpu_ras_get_ras_block(adev,
2306					info->head.block,
2307					info->head.sub_block_index);
2308
2309	if (!block_obj || !block_obj->hw_ops) {
2310		dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
2311			     get_ras_block_str(&info->head));
2312		return;
2313	}
2314
2315	if (block_obj->hw_ops->query_ras_error_status)
2316		block_obj->hw_ops->query_ras_error_status(adev);
2317
2318}
2319
2320static void amdgpu_ras_query_err_status(struct amdgpu_device *adev)
2321{
2322	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2323	struct ras_manager *obj;
2324
2325	if (!adev->ras_enabled || !con)
2326		return;
2327
2328	list_for_each_entry(obj, &con->head, node) {
2329		struct ras_query_if info = {
2330			.head = obj->head,
2331		};
2332
2333		amdgpu_ras_error_status_query(adev, &info);
2334	}
2335}
2336
2337/* recovery begin */
2338
2339/* return 0 on success.
2340 * caller need free bps.
2341 */
2342static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
2343		struct ras_badpage **bps, unsigned int *count)
2344{
2345	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2346	struct ras_err_handler_data *data;
2347	int i = 0;
2348	int ret = 0, status;
2349
2350	if (!con || !con->eh_data || !bps || !count)
2351		return -EINVAL;
2352
2353	mutex_lock(&con->recovery_lock);
2354	data = con->eh_data;
2355	if (!data || data->count == 0) {
2356		*bps = NULL;
2357		ret = -EINVAL;
2358		goto out;
2359	}
2360
2361	*bps = kmalloc(sizeof(struct ras_badpage) * data->count, GFP_KERNEL);
2362	if (!*bps) {
2363		ret = -ENOMEM;
2364		goto out;
2365	}
2366
2367	for (; i < data->count; i++) {
2368		(*bps)[i] = (struct ras_badpage){
2369			.bp = data->bps[i].retired_page,
2370			.size = AMDGPU_GPU_PAGE_SIZE,
2371			.flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
2372		};
2373		status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr,
2374				data->bps[i].retired_page);
2375		if (status == -EBUSY)
2376			(*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
2377		else if (status == -ENOENT)
2378			(*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
2379	}
2380
2381	*count = data->count;
2382out:
2383	mutex_unlock(&con->recovery_lock);
2384	return ret;
2385}
2386
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2387static void amdgpu_ras_do_recovery(struct work_struct *work)
2388{
2389	struct amdgpu_ras *ras =
2390		container_of(work, struct amdgpu_ras, recovery_work);
2391	struct amdgpu_device *remote_adev = NULL;
2392	struct amdgpu_device *adev = ras->adev;
2393	struct list_head device_list, *device_list_handle =  NULL;
2394	struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
 
2395
2396	if (hive)
2397		atomic_set(&hive->ras_recovery, 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
2398	if (!ras->disable_ras_err_cnt_harvest) {
2399
2400		/* Build list of devices to query RAS related errors */
2401		if  (hive && adev->gmc.xgmi.num_physical_nodes > 1) {
2402			device_list_handle = &hive->device_list;
2403		} else {
2404			INIT_LIST_HEAD(&device_list);
2405			list_add_tail(&adev->gmc.xgmi.head, &device_list);
2406			device_list_handle = &device_list;
2407		}
2408
 
2409		list_for_each_entry(remote_adev,
2410				device_list_handle, gmc.xgmi.head) {
2411			amdgpu_ras_query_err_status(remote_adev);
2412			amdgpu_ras_log_on_err_counter(remote_adev);
2413		}
2414
2415	}
2416
2417	if (amdgpu_device_should_recover_gpu(ras->adev)) {
2418		struct amdgpu_reset_context reset_context;
2419		memset(&reset_context, 0, sizeof(reset_context));
2420
2421		reset_context.method = AMD_RESET_METHOD_NONE;
2422		reset_context.reset_req_dev = adev;
 
 
2423
2424		/* Perform full reset in fatal error mode */
2425		if (!amdgpu_ras_is_poison_mode_supported(ras->adev))
2426			set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2427		else {
2428			clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2429
2430			if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET) {
2431				ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE2_RESET;
2432				reset_context.method = AMD_RESET_METHOD_MODE2;
2433			}
2434
2435			/* Fatal error occurs in poison mode, mode1 reset is used to
2436			 * recover gpu.
2437			 */
2438			if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET) {
2439				ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE1_RESET;
2440				set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2441
2442				/* For any RAS error that needs a full reset to
2443				 * recover, set the fatal error status
2444				 */
2445				if (hive) {
2446					list_for_each_entry(remote_adev,
2447							    &hive->device_list,
2448							    gmc.xgmi.head)
2449						amdgpu_ras_set_fed(remote_adev,
2450								   true);
2451				} else {
2452					amdgpu_ras_set_fed(adev, true);
2453				}
2454				psp_fatal_error_recovery_quirk(&adev->psp);
2455			}
2456		}
2457
2458		amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context);
2459	}
2460	atomic_set(&ras->in_recovery, 0);
2461	if (hive) {
2462		atomic_set(&hive->ras_recovery, 0);
2463		amdgpu_put_xgmi_hive(hive);
2464	}
2465}
2466
2467/* alloc/realloc bps array */
2468static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
2469		struct ras_err_handler_data *data, int pages)
2470{
2471	unsigned int old_space = data->count + data->space_left;
2472	unsigned int new_space = old_space + pages;
2473	unsigned int align_space = ALIGN(new_space, 512);
2474	void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
2475
2476	if (!bps) {
2477		return -ENOMEM;
2478	}
2479
2480	if (data->bps) {
2481		memcpy(bps, data->bps,
2482				data->count * sizeof(*data->bps));
2483		kfree(data->bps);
2484	}
2485
2486	data->bps = bps;
2487	data->space_left += align_space - old_space;
2488	return 0;
2489}
2490
2491/* it deal with vram only. */
2492int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
2493		struct eeprom_table_record *bps, int pages)
2494{
2495	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2496	struct ras_err_handler_data *data;
2497	int ret = 0;
2498	uint32_t i;
2499
2500	if (!con || !con->eh_data || !bps || pages <= 0)
2501		return 0;
2502
2503	mutex_lock(&con->recovery_lock);
2504	data = con->eh_data;
2505	if (!data)
2506		goto out;
2507
2508	for (i = 0; i < pages; i++) {
2509		if (amdgpu_ras_check_bad_page_unlock(con,
2510			bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT))
2511			continue;
2512
2513		if (!data->space_left &&
2514			amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
2515			ret = -ENOMEM;
2516			goto out;
2517		}
2518
2519		amdgpu_vram_mgr_reserve_range(&adev->mman.vram_mgr,
2520			bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT,
2521			AMDGPU_GPU_PAGE_SIZE);
2522
2523		memcpy(&data->bps[data->count], &bps[i], sizeof(*data->bps));
2524		data->count++;
2525		data->space_left--;
2526	}
2527out:
2528	mutex_unlock(&con->recovery_lock);
2529
2530	return ret;
2531}
2532
2533/*
2534 * write error record array to eeprom, the function should be
2535 * protected by recovery_lock
2536 * new_cnt: new added UE count, excluding reserved bad pages, can be NULL
2537 */
2538int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
2539		unsigned long *new_cnt)
2540{
2541	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2542	struct ras_err_handler_data *data;
2543	struct amdgpu_ras_eeprom_control *control;
2544	int save_count;
2545
2546	if (!con || !con->eh_data) {
2547		if (new_cnt)
2548			*new_cnt = 0;
2549
2550		return 0;
2551	}
2552
2553	mutex_lock(&con->recovery_lock);
2554	control = &con->eeprom_control;
2555	data = con->eh_data;
2556	save_count = data->count - control->ras_num_recs;
2557	mutex_unlock(&con->recovery_lock);
2558
2559	if (new_cnt)
2560		*new_cnt = save_count / adev->umc.retire_unit;
2561
2562	/* only new entries are saved */
2563	if (save_count > 0) {
2564		if (amdgpu_ras_eeprom_append(control,
2565					     &data->bps[control->ras_num_recs],
2566					     save_count)) {
2567			dev_err(adev->dev, "Failed to save EEPROM table data!");
2568			return -EIO;
2569		}
2570
2571		dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count);
2572	}
2573
2574	return 0;
2575}
2576
2577/*
2578 * read error record array in eeprom and reserve enough space for
2579 * storing new bad pages
2580 */
2581static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
2582{
2583	struct amdgpu_ras_eeprom_control *control =
2584		&adev->psp.ras_context.ras->eeprom_control;
2585	struct eeprom_table_record *bps;
2586	int ret;
2587
2588	/* no bad page record, skip eeprom access */
2589	if (control->ras_num_recs == 0 || amdgpu_bad_page_threshold == 0)
2590		return 0;
2591
2592	bps = kcalloc(control->ras_num_recs, sizeof(*bps), GFP_KERNEL);
2593	if (!bps)
2594		return -ENOMEM;
2595
2596	ret = amdgpu_ras_eeprom_read(control, bps, control->ras_num_recs);
2597	if (ret)
2598		dev_err(adev->dev, "Failed to load EEPROM table records!");
2599	else
2600		ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs);
2601
2602	kfree(bps);
2603	return ret;
2604}
2605
2606static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
2607				uint64_t addr)
2608{
2609	struct ras_err_handler_data *data = con->eh_data;
2610	int i;
2611
2612	addr >>= AMDGPU_GPU_PAGE_SHIFT;
2613	for (i = 0; i < data->count; i++)
2614		if (addr == data->bps[i].retired_page)
2615			return true;
2616
2617	return false;
2618}
2619
2620/*
2621 * check if an address belongs to bad page
2622 *
2623 * Note: this check is only for umc block
2624 */
2625static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
2626				uint64_t addr)
2627{
2628	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2629	bool ret = false;
2630
2631	if (!con || !con->eh_data)
2632		return ret;
2633
2634	mutex_lock(&con->recovery_lock);
2635	ret = amdgpu_ras_check_bad_page_unlock(con, addr);
2636	mutex_unlock(&con->recovery_lock);
2637	return ret;
2638}
2639
2640static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
2641					  uint32_t max_count)
2642{
2643	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2644
2645	/*
2646	 * Justification of value bad_page_cnt_threshold in ras structure
2647	 *
2648	 * Generally, 0 <= amdgpu_bad_page_threshold <= max record length
2649	 * in eeprom or amdgpu_bad_page_threshold == -2, introduce two
2650	 * scenarios accordingly.
2651	 *
2652	 * Bad page retirement enablement:
2653	 *    - If amdgpu_bad_page_threshold = -2,
2654	 *      bad_page_cnt_threshold = typical value by formula.
2655	 *
2656	 *    - When the value from user is 0 < amdgpu_bad_page_threshold <
2657	 *      max record length in eeprom, use it directly.
2658	 *
2659	 * Bad page retirement disablement:
2660	 *    - If amdgpu_bad_page_threshold = 0, bad page retirement
2661	 *      functionality is disabled, and bad_page_cnt_threshold will
2662	 *      take no effect.
2663	 */
2664
2665	if (amdgpu_bad_page_threshold < 0) {
2666		u64 val = adev->gmc.mc_vram_size;
2667
2668		do_div(val, RAS_BAD_PAGE_COVER);
2669		con->bad_page_cnt_threshold = min(lower_32_bits(val),
2670						  max_count);
2671	} else {
2672		con->bad_page_cnt_threshold = min_t(int, max_count,
2673						    amdgpu_bad_page_threshold);
2674	}
2675}
2676
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2677static int amdgpu_ras_page_retirement_thread(void *param)
2678{
2679	struct amdgpu_device *adev = (struct amdgpu_device *)param;
2680	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
 
 
 
2681
2682	while (!kthread_should_stop()) {
2683
2684		wait_event_interruptible(con->page_retirement_wq,
2685				kthread_should_stop() ||
2686				atomic_read(&con->page_retirement_req_cnt));
2687
2688		if (kthread_should_stop())
2689			break;
2690
2691		dev_info(adev->dev, "Start processing page retirement. request:%d\n",
2692			atomic_read(&con->page_retirement_req_cnt));
 
 
 
 
 
 
 
 
 
 
 
2693
2694		atomic_dec(&con->page_retirement_req_cnt);
 
 
 
 
 
 
 
 
 
2695
2696		amdgpu_umc_bad_page_polling_timeout(adev,
2697				false, MAX_UMC_POISON_POLLING_TIME_ASYNC);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2698	}
2699
2700	return 0;
2701}
2702
2703int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2704{
2705	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2706	struct ras_err_handler_data **data;
2707	u32  max_eeprom_records_count = 0;
2708	bool exc_err_limit = false;
2709	int ret;
2710
2711	if (!con || amdgpu_sriov_vf(adev))
2712		return 0;
2713
2714	/* Allow access to RAS EEPROM via debugfs, when the ASIC
2715	 * supports RAS and debugfs is enabled, but when
2716	 * adev->ras_enabled is unset, i.e. when "ras_enable"
2717	 * module parameter is set to 0.
2718	 */
2719	con->adev = adev;
2720
2721	if (!adev->ras_enabled)
2722		return 0;
2723
2724	data = &con->eh_data;
2725	*data = kzalloc(sizeof(**data), GFP_KERNEL);
2726	if (!*data) {
2727		ret = -ENOMEM;
2728		goto out;
2729	}
2730
2731	mutex_init(&con->recovery_lock);
2732	INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
2733	atomic_set(&con->in_recovery, 0);
2734	con->eeprom_control.bad_channel_bitmap = 0;
2735
2736	max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(&con->eeprom_control);
2737	amdgpu_ras_validate_threshold(adev, max_eeprom_records_count);
2738
2739	/* Todo: During test the SMU might fail to read the eeprom through I2C
2740	 * when the GPU is pending on XGMI reset during probe time
2741	 * (Mostly after second bus reset), skip it now
2742	 */
2743	if (adev->gmc.xgmi.pending_reset)
2744		return 0;
2745	ret = amdgpu_ras_eeprom_init(&con->eeprom_control, &exc_err_limit);
2746	/*
2747	 * This calling fails when exc_err_limit is true or
2748	 * ret != 0.
2749	 */
2750	if (exc_err_limit || ret)
2751		goto free;
2752
2753	if (con->eeprom_control.ras_num_recs) {
2754		ret = amdgpu_ras_load_bad_pages(adev);
2755		if (ret)
2756			goto free;
2757
2758		amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs);
2759
2760		if (con->update_channel_flag == true) {
2761			amdgpu_dpm_send_hbm_bad_channel_flag(adev, con->eeprom_control.bad_channel_bitmap);
2762			con->update_channel_flag = false;
2763		}
2764	}
2765
 
 
2766	mutex_init(&con->page_retirement_lock);
2767	init_waitqueue_head(&con->page_retirement_wq);
2768	atomic_set(&con->page_retirement_req_cnt, 0);
 
2769	con->page_retirement_thread =
2770		kthread_run(amdgpu_ras_page_retirement_thread, adev, "umc_page_retirement");
2771	if (IS_ERR(con->page_retirement_thread)) {
2772		con->page_retirement_thread = NULL;
2773		dev_warn(adev->dev, "Failed to create umc_page_retirement thread!!!\n");
2774	}
2775
 
 
2776#ifdef CONFIG_X86_MCE_AMD
2777	if ((adev->asic_type == CHIP_ALDEBARAN) &&
2778	    (adev->gmc.xgmi.connected_to_cpu))
2779		amdgpu_register_bad_pages_mca_notifier(adev);
2780#endif
2781	return 0;
2782
2783free:
2784	kfree((*data)->bps);
2785	kfree(*data);
2786	con->eh_data = NULL;
2787out:
2788	dev_warn(adev->dev, "Failed to initialize ras recovery! (%d)\n", ret);
2789
2790	/*
2791	 * Except error threshold exceeding case, other failure cases in this
2792	 * function would not fail amdgpu driver init.
2793	 */
2794	if (!exc_err_limit)
2795		ret = 0;
2796	else
2797		ret = -EINVAL;
2798
2799	return ret;
2800}
2801
2802static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
2803{
2804	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2805	struct ras_err_handler_data *data = con->eh_data;
 
 
2806
2807	/* recovery_init failed to init it, fini is useless */
2808	if (!data)
2809		return 0;
2810
 
 
 
 
 
 
2811	if (con->page_retirement_thread)
2812		kthread_stop(con->page_retirement_thread);
2813
2814	atomic_set(&con->page_retirement_req_cnt, 0);
 
 
 
2815
2816	cancel_work_sync(&con->recovery_work);
2817
 
 
 
 
2818	mutex_lock(&con->recovery_lock);
2819	con->eh_data = NULL;
2820	kfree(data->bps);
2821	kfree(data);
2822	mutex_unlock(&con->recovery_lock);
2823
2824	return 0;
2825}
2826/* recovery end */
2827
2828static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
2829{
2830	if (amdgpu_sriov_vf(adev)) {
2831		switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
2832		case IP_VERSION(13, 0, 2):
2833		case IP_VERSION(13, 0, 6):
 
2834			return true;
2835		default:
2836			return false;
2837		}
2838	}
2839
2840	if (adev->asic_type == CHIP_IP_DISCOVERY) {
2841		switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
2842		case IP_VERSION(13, 0, 0):
2843		case IP_VERSION(13, 0, 6):
2844		case IP_VERSION(13, 0, 10):
 
2845			return true;
2846		default:
2847			return false;
2848		}
2849	}
2850
2851	return adev->asic_type == CHIP_VEGA10 ||
2852		adev->asic_type == CHIP_VEGA20 ||
2853		adev->asic_type == CHIP_ARCTURUS ||
2854		adev->asic_type == CHIP_ALDEBARAN ||
2855		adev->asic_type == CHIP_SIENNA_CICHLID;
2856}
2857
2858/*
2859 * this is workaround for vega20 workstation sku,
2860 * force enable gfx ras, ignore vbios gfx ras flag
2861 * due to GC EDC can not write
2862 */
2863static void amdgpu_ras_get_quirks(struct amdgpu_device *adev)
2864{
2865	struct atom_context *ctx = adev->mode_info.atom_context;
2866
2867	if (!ctx)
2868		return;
2869
2870	if (strnstr(ctx->vbios_pn, "D16406",
2871		    sizeof(ctx->vbios_pn)) ||
2872		strnstr(ctx->vbios_pn, "D36002",
2873			sizeof(ctx->vbios_pn)))
2874		adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX);
2875}
2876
2877/* Query ras capablity via atomfirmware interface */
2878static void amdgpu_ras_query_ras_capablity_from_vbios(struct amdgpu_device *adev)
2879{
2880	/* mem_ecc cap */
2881	if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
2882		dev_info(adev->dev, "MEM ECC is active.\n");
2883		adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC |
2884					 1 << AMDGPU_RAS_BLOCK__DF);
2885	} else {
2886		dev_info(adev->dev, "MEM ECC is not presented.\n");
2887	}
2888
2889	/* sram_ecc cap */
2890	if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
2891		dev_info(adev->dev, "SRAM ECC is active.\n");
2892		if (!amdgpu_sriov_vf(adev))
2893			adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
2894						  1 << AMDGPU_RAS_BLOCK__DF);
2895		else
2896			adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__PCIE_BIF |
2897						 1 << AMDGPU_RAS_BLOCK__SDMA |
2898						 1 << AMDGPU_RAS_BLOCK__GFX);
2899
2900		/*
2901		 * VCN/JPEG RAS can be supported on both bare metal and
2902		 * SRIOV environment
2903		 */
2904		if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(2, 6, 0) ||
2905		    amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 0) ||
2906		    amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 3))
2907			adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
2908						 1 << AMDGPU_RAS_BLOCK__JPEG);
2909		else
2910			adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN |
2911						  1 << AMDGPU_RAS_BLOCK__JPEG);
2912
2913		/*
2914		 * XGMI RAS is not supported if xgmi num physical nodes
2915		 * is zero
2916		 */
2917		if (!adev->gmc.xgmi.num_physical_nodes)
2918			adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__XGMI_WAFL);
2919	} else {
2920		dev_info(adev->dev, "SRAM ECC is not presented.\n");
2921	}
2922}
2923
2924/* Query poison mode from umc/df IP callbacks */
2925static void amdgpu_ras_query_poison_mode(struct amdgpu_device *adev)
2926{
2927	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2928	bool df_poison, umc_poison;
2929
2930	/* poison setting is useless on SRIOV guest */
2931	if (amdgpu_sriov_vf(adev) || !con)
2932		return;
2933
2934	/* Init poison supported flag, the default value is false */
2935	if (adev->gmc.xgmi.connected_to_cpu ||
2936	    adev->gmc.is_app_apu) {
2937		/* enabled by default when GPU is connected to CPU */
2938		con->poison_supported = true;
2939	} else if (adev->df.funcs &&
2940	    adev->df.funcs->query_ras_poison_mode &&
2941	    adev->umc.ras &&
2942	    adev->umc.ras->query_ras_poison_mode) {
2943		df_poison =
2944			adev->df.funcs->query_ras_poison_mode(adev);
2945		umc_poison =
2946			adev->umc.ras->query_ras_poison_mode(adev);
2947
2948		/* Only poison is set in both DF and UMC, we can support it */
2949		if (df_poison && umc_poison)
2950			con->poison_supported = true;
2951		else if (df_poison != umc_poison)
2952			dev_warn(adev->dev,
2953				"Poison setting is inconsistent in DF/UMC(%d:%d)!\n",
2954				df_poison, umc_poison);
2955	}
2956}
2957
2958/*
2959 * check hardware's ras ability which will be saved in hw_supported.
2960 * if hardware does not support ras, we can skip some ras initializtion and
2961 * forbid some ras operations from IP.
2962 * if software itself, say boot parameter, limit the ras ability. We still
2963 * need allow IP do some limited operations, like disable. In such case,
2964 * we have to initialize ras as normal. but need check if operation is
2965 * allowed or not in each function.
2966 */
2967static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
2968{
2969	adev->ras_hw_enabled = adev->ras_enabled = 0;
2970
2971	if (!amdgpu_ras_asic_supported(adev))
2972		return;
2973
 
 
 
 
 
2974	/* query ras capability from psp */
2975	if (amdgpu_psp_get_ras_capability(&adev->psp))
2976		goto init_ras_enabled_flag;
2977
2978	/* query ras capablity from bios */
2979	if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
2980		amdgpu_ras_query_ras_capablity_from_vbios(adev);
2981	} else {
2982		/* driver only manages a few IP blocks RAS feature
2983		 * when GPU is connected cpu through XGMI */
2984		adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX |
2985					   1 << AMDGPU_RAS_BLOCK__SDMA |
2986					   1 << AMDGPU_RAS_BLOCK__MMHUB);
2987	}
2988
2989	/* apply asic specific settings (vega20 only for now) */
2990	amdgpu_ras_get_quirks(adev);
2991
2992	/* query poison mode from umc/df ip callback */
2993	amdgpu_ras_query_poison_mode(adev);
2994
2995init_ras_enabled_flag:
2996	/* hw_supported needs to be aligned with RAS block mask. */
2997	adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK;
2998
2999	adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 :
3000		adev->ras_hw_enabled & amdgpu_ras_mask;
3001
3002	/* aca is disabled by default */
3003	adev->aca.is_enabled = false;
 
 
 
 
 
3004}
3005
3006static void amdgpu_ras_counte_dw(struct work_struct *work)
3007{
3008	struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
3009					      ras_counte_delay_work.work);
3010	struct amdgpu_device *adev = con->adev;
3011	struct drm_device *dev = adev_to_drm(adev);
3012	unsigned long ce_count, ue_count;
3013	int res;
3014
3015	res = pm_runtime_get_sync(dev->dev);
3016	if (res < 0)
3017		goto Out;
3018
3019	/* Cache new values.
3020	 */
3021	if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, NULL) == 0) {
3022		atomic_set(&con->ras_ce_count, ce_count);
3023		atomic_set(&con->ras_ue_count, ue_count);
3024	}
3025
3026	pm_runtime_mark_last_busy(dev->dev);
3027Out:
3028	pm_runtime_put_autosuspend(dev->dev);
3029}
3030
3031static int amdgpu_get_ras_schema(struct amdgpu_device *adev)
3032{
3033	return  amdgpu_ras_is_poison_mode_supported(adev) ? AMDGPU_RAS_ERROR__POISON : 0 |
3034			AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE |
3035			AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE |
3036			AMDGPU_RAS_ERROR__PARITY;
3037}
3038
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3039int amdgpu_ras_init(struct amdgpu_device *adev)
3040{
3041	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3042	int r;
3043
3044	if (con)
3045		return 0;
3046
3047	con = kzalloc(sizeof(*con) +
3048			sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT +
3049			sizeof(struct ras_manager) * AMDGPU_RAS_MCA_BLOCK_COUNT,
3050			GFP_KERNEL);
3051	if (!con)
3052		return -ENOMEM;
3053
3054	con->adev = adev;
3055	INIT_DELAYED_WORK(&con->ras_counte_delay_work, amdgpu_ras_counte_dw);
3056	atomic_set(&con->ras_ce_count, 0);
3057	atomic_set(&con->ras_ue_count, 0);
3058
3059	con->objs = (struct ras_manager *)(con + 1);
3060
3061	amdgpu_ras_set_context(adev, con);
3062
3063	amdgpu_ras_check_supported(adev);
3064
3065	if (!adev->ras_enabled || adev->asic_type == CHIP_VEGA10) {
3066		/* set gfx block ras context feature for VEGA20 Gaming
3067		 * send ras disable cmd to ras ta during ras late init.
3068		 */
3069		if (!adev->ras_enabled && adev->asic_type == CHIP_VEGA20) {
3070			con->features |= BIT(AMDGPU_RAS_BLOCK__GFX);
3071
3072			return 0;
3073		}
3074
3075		r = 0;
3076		goto release_con;
3077	}
3078
3079	con->update_channel_flag = false;
3080	con->features = 0;
3081	con->schema = 0;
3082	INIT_LIST_HEAD(&con->head);
3083	/* Might need get this flag from vbios. */
3084	con->flags = RAS_DEFAULT_FLAGS;
3085
3086	/* initialize nbio ras function ahead of any other
3087	 * ras functions so hardware fatal error interrupt
3088	 * can be enabled as early as possible */
3089	switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
3090	case IP_VERSION(7, 4, 0):
3091	case IP_VERSION(7, 4, 1):
3092	case IP_VERSION(7, 4, 4):
3093		if (!adev->gmc.xgmi.connected_to_cpu)
3094			adev->nbio.ras = &nbio_v7_4_ras;
3095		break;
3096	case IP_VERSION(4, 3, 0):
3097		if (adev->ras_hw_enabled & (1 << AMDGPU_RAS_BLOCK__DF))
3098			/* unlike other generation of nbio ras,
3099			 * nbio v4_3 only support fatal error interrupt
3100			 * to inform software that DF is freezed due to
3101			 * system fatal error event. driver should not
3102			 * enable nbio ras in such case. Instead,
3103			 * check DF RAS */
3104			adev->nbio.ras = &nbio_v4_3_ras;
3105		break;
3106	case IP_VERSION(7, 9, 0):
3107		if (!adev->gmc.is_app_apu)
3108			adev->nbio.ras = &nbio_v7_9_ras;
3109		break;
3110	default:
3111		/* nbio ras is not available */
3112		break;
3113	}
3114
3115	/* nbio ras block needs to be enabled ahead of other ras blocks
3116	 * to handle fatal error */
3117	r = amdgpu_nbio_ras_sw_init(adev);
3118	if (r)
3119		return r;
3120
3121	if (adev->nbio.ras &&
3122	    adev->nbio.ras->init_ras_controller_interrupt) {
3123		r = adev->nbio.ras->init_ras_controller_interrupt(adev);
3124		if (r)
3125			goto release_con;
3126	}
3127
3128	if (adev->nbio.ras &&
3129	    adev->nbio.ras->init_ras_err_event_athub_interrupt) {
3130		r = adev->nbio.ras->init_ras_err_event_athub_interrupt(adev);
3131		if (r)
3132			goto release_con;
3133	}
3134
3135	/* Packed socket_id to ras feature mask bits[31:29] */
3136	if (adev->smuio.funcs &&
3137	    adev->smuio.funcs->get_socket_id)
3138		con->features |= ((adev->smuio.funcs->get_socket_id(adev)) <<
3139					AMDGPU_RAS_FEATURES_SOCKETID_SHIFT);
3140
3141	/* Get RAS schema for particular SOC */
3142	con->schema = amdgpu_get_ras_schema(adev);
3143
 
 
3144	if (amdgpu_ras_fs_init(adev)) {
3145		r = -EINVAL;
3146		goto release_con;
3147	}
3148
 
 
 
 
 
 
 
 
 
3149	dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
3150		 "hardware ability[%x] ras_mask[%x]\n",
3151		 adev->ras_hw_enabled, adev->ras_enabled);
3152
3153	return 0;
3154release_con:
3155	amdgpu_ras_set_context(adev, NULL);
3156	kfree(con);
3157
3158	return r;
3159}
3160
3161int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev)
3162{
3163	if (adev->gmc.xgmi.connected_to_cpu ||
3164	    adev->gmc.is_app_apu)
3165		return 1;
3166	return 0;
3167}
3168
3169static int amdgpu_persistent_edc_harvesting(struct amdgpu_device *adev,
3170					struct ras_common_if *ras_block)
3171{
3172	struct ras_query_if info = {
3173		.head = *ras_block,
3174	};
3175
3176	if (!amdgpu_persistent_edc_harvesting_supported(adev))
3177		return 0;
3178
3179	if (amdgpu_ras_query_error_status(adev, &info) != 0)
3180		DRM_WARN("RAS init harvest failure");
3181
3182	if (amdgpu_ras_reset_error_status(adev, ras_block->block) != 0)
3183		DRM_WARN("RAS init harvest reset failure");
3184
3185	return 0;
3186}
3187
3188bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev)
3189{
3190       struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3191
3192       if (!con)
3193               return false;
3194
3195       return con->poison_supported;
3196}
3197
3198/* helper function to handle common stuff in ip late init phase */
3199int amdgpu_ras_block_late_init(struct amdgpu_device *adev,
3200			 struct ras_common_if *ras_block)
3201{
3202	struct amdgpu_ras_block_object *ras_obj = NULL;
3203	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3204	struct ras_query_if *query_info;
3205	unsigned long ue_count, ce_count;
3206	int r;
3207
3208	/* disable RAS feature per IP block if it is not supported */
3209	if (!amdgpu_ras_is_supported(adev, ras_block->block)) {
3210		amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
3211		return 0;
3212	}
3213
3214	r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1);
3215	if (r) {
3216		if (adev->in_suspend || amdgpu_in_reset(adev)) {
3217			/* in resume phase, if fail to enable ras,
3218			 * clean up all ras fs nodes, and disable ras */
3219			goto cleanup;
3220		} else
3221			return r;
3222	}
3223
3224	/* check for errors on warm reset edc persisant supported ASIC */
3225	amdgpu_persistent_edc_harvesting(adev, ras_block);
3226
3227	/* in resume phase, no need to create ras fs node */
3228	if (adev->in_suspend || amdgpu_in_reset(adev))
3229		return 0;
3230
3231	ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
3232	if (ras_obj->ras_cb || (ras_obj->hw_ops &&
3233	    (ras_obj->hw_ops->query_poison_status ||
3234	    ras_obj->hw_ops->handle_poison_consumption))) {
3235		r = amdgpu_ras_interrupt_add_handler(adev, ras_block);
3236		if (r)
3237			goto cleanup;
3238	}
3239
3240	if (ras_obj->hw_ops &&
3241	    (ras_obj->hw_ops->query_ras_error_count ||
3242	     ras_obj->hw_ops->query_ras_error_status)) {
3243		r = amdgpu_ras_sysfs_create(adev, ras_block);
3244		if (r)
3245			goto interrupt;
3246
3247		/* Those are the cached values at init.
3248		 */
3249		query_info = kzalloc(sizeof(*query_info), GFP_KERNEL);
3250		if (!query_info)
3251			return -ENOMEM;
3252		memcpy(&query_info->head, ras_block, sizeof(struct ras_common_if));
3253
3254		if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, query_info) == 0) {
3255			atomic_set(&con->ras_ce_count, ce_count);
3256			atomic_set(&con->ras_ue_count, ue_count);
3257		}
3258
3259		kfree(query_info);
3260	}
3261
3262	return 0;
3263
3264interrupt:
3265	if (ras_obj->ras_cb)
3266		amdgpu_ras_interrupt_remove_handler(adev, ras_block);
3267cleanup:
3268	amdgpu_ras_feature_enable(adev, ras_block, 0);
3269	return r;
3270}
3271
3272static int amdgpu_ras_block_late_init_default(struct amdgpu_device *adev,
3273			 struct ras_common_if *ras_block)
3274{
3275	return amdgpu_ras_block_late_init(adev, ras_block);
3276}
3277
3278/* helper function to remove ras fs node and interrupt handler */
3279void amdgpu_ras_block_late_fini(struct amdgpu_device *adev,
3280			  struct ras_common_if *ras_block)
3281{
3282	struct amdgpu_ras_block_object *ras_obj;
3283	if (!ras_block)
3284		return;
3285
3286	amdgpu_ras_sysfs_remove(adev, ras_block);
3287
3288	ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
3289	if (ras_obj->ras_cb)
3290		amdgpu_ras_interrupt_remove_handler(adev, ras_block);
3291}
3292
3293static void amdgpu_ras_block_late_fini_default(struct amdgpu_device *adev,
3294			  struct ras_common_if *ras_block)
3295{
3296	return amdgpu_ras_block_late_fini(adev, ras_block);
3297}
3298
3299/* do some init work after IP late init as dependence.
3300 * and it runs in resume/gpu reset/booting up cases.
3301 */
3302void amdgpu_ras_resume(struct amdgpu_device *adev)
3303{
3304	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3305	struct ras_manager *obj, *tmp;
3306
3307	if (!adev->ras_enabled || !con) {
3308		/* clean ras context for VEGA20 Gaming after send ras disable cmd */
3309		amdgpu_release_ras_context(adev);
3310
3311		return;
3312	}
3313
3314	if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
3315		/* Set up all other IPs which are not implemented. There is a
3316		 * tricky thing that IP's actual ras error type should be
3317		 * MULTI_UNCORRECTABLE, but as driver does not handle it, so
3318		 * ERROR_NONE make sense anyway.
3319		 */
3320		amdgpu_ras_enable_all_features(adev, 1);
3321
3322		/* We enable ras on all hw_supported block, but as boot
3323		 * parameter might disable some of them and one or more IP has
3324		 * not implemented yet. So we disable them on behalf.
3325		 */
3326		list_for_each_entry_safe(obj, tmp, &con->head, node) {
3327			if (!amdgpu_ras_is_supported(adev, obj->head.block)) {
3328				amdgpu_ras_feature_enable(adev, &obj->head, 0);
3329				/* there should be no any reference. */
3330				WARN_ON(alive_obj(obj));
3331			}
3332		}
3333	}
3334}
3335
3336void amdgpu_ras_suspend(struct amdgpu_device *adev)
3337{
3338	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3339
3340	if (!adev->ras_enabled || !con)
3341		return;
3342
3343	amdgpu_ras_disable_all_features(adev, 0);
3344	/* Make sure all ras objects are disabled. */
3345	if (AMDGPU_RAS_GET_FEATURES(con->features))
3346		amdgpu_ras_disable_all_features(adev, 1);
3347}
3348
3349int amdgpu_ras_late_init(struct amdgpu_device *adev)
3350{
3351	struct amdgpu_ras_block_list *node, *tmp;
3352	struct amdgpu_ras_block_object *obj;
3353	int r;
3354
3355	/* Guest side doesn't need init ras feature */
3356	if (amdgpu_sriov_vf(adev))
3357		return 0;
3358
3359	if (amdgpu_aca_is_enabled(adev)) {
3360		if (amdgpu_in_reset(adev))
3361			r = amdgpu_aca_reset(adev);
3362		 else
3363			r = amdgpu_aca_init(adev);
3364		if (r)
3365			return r;
 
 
3366
3367		amdgpu_ras_set_aca_debug_mode(adev, false);
3368	} else {
3369		amdgpu_ras_set_mca_debug_mode(adev, false);
 
 
 
3370	}
3371
 
 
 
 
3372	list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
3373		obj = node->ras_obj;
3374		if (!obj) {
3375			dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
3376			continue;
3377		}
3378
3379		if (!amdgpu_ras_is_supported(adev, obj->ras_comm.block))
3380			continue;
3381
3382		if (obj->ras_late_init) {
3383			r = obj->ras_late_init(adev, &obj->ras_comm);
3384			if (r) {
3385				dev_err(adev->dev, "%s failed to execute ras_late_init! ret:%d\n",
3386					obj->ras_comm.name, r);
3387				return r;
3388			}
3389		} else
3390			amdgpu_ras_block_late_init_default(adev, &obj->ras_comm);
3391	}
3392
3393	return 0;
3394}
3395
3396/* do some fini work before IP fini as dependence */
3397int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
3398{
3399	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3400
3401	if (!adev->ras_enabled || !con)
3402		return 0;
3403
3404
3405	/* Need disable ras on all IPs here before ip [hw/sw]fini */
3406	if (AMDGPU_RAS_GET_FEATURES(con->features))
3407		amdgpu_ras_disable_all_features(adev, 0);
3408	amdgpu_ras_recovery_fini(adev);
3409	return 0;
3410}
3411
3412int amdgpu_ras_fini(struct amdgpu_device *adev)
3413{
3414	struct amdgpu_ras_block_list *ras_node, *tmp;
3415	struct amdgpu_ras_block_object *obj = NULL;
3416	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3417
3418	if (!adev->ras_enabled || !con)
3419		return 0;
3420
3421	list_for_each_entry_safe(ras_node, tmp, &adev->ras_list, node) {
3422		if (ras_node->ras_obj) {
3423			obj = ras_node->ras_obj;
3424			if (amdgpu_ras_is_supported(adev, obj->ras_comm.block) &&
3425			    obj->ras_fini)
3426				obj->ras_fini(adev, &obj->ras_comm);
3427			else
3428				amdgpu_ras_block_late_fini_default(adev, &obj->ras_comm);
3429		}
3430
3431		/* Clear ras blocks from ras_list and free ras block list node */
3432		list_del(&ras_node->node);
3433		kfree(ras_node);
3434	}
3435
3436	amdgpu_ras_fs_fini(adev);
3437	amdgpu_ras_interrupt_remove_all(adev);
3438
3439	if (amdgpu_aca_is_enabled(adev))
3440		amdgpu_aca_fini(adev);
 
 
 
 
3441
3442	WARN(AMDGPU_RAS_GET_FEATURES(con->features), "Feature mask is not cleared");
3443
3444	if (AMDGPU_RAS_GET_FEATURES(con->features))
3445		amdgpu_ras_disable_all_features(adev, 0);
3446
3447	cancel_delayed_work_sync(&con->ras_counte_delay_work);
3448
3449	amdgpu_ras_set_context(adev, NULL);
3450	kfree(con);
3451
3452	return 0;
3453}
3454
3455bool amdgpu_ras_get_fed_status(struct amdgpu_device *adev)
3456{
3457	struct amdgpu_ras *ras;
3458
3459	ras = amdgpu_ras_get_context(adev);
3460	if (!ras)
3461		return false;
3462
3463	return atomic_read(&ras->fed);
3464}
3465
3466void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status)
3467{
3468	struct amdgpu_ras *ras;
3469
3470	ras = amdgpu_ras_get_context(adev);
3471	if (ras)
3472		atomic_set(&ras->fed, !!status);
3473}
3474
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3475void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
3476{
3477	if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
3478		struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
 
 
3479
3480		dev_info(adev->dev, "uncorrectable hardware error"
3481			"(ERREVENT_ATHUB_INTERRUPT) detected!\n");
3482
 
 
 
 
 
 
3483		ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
3484		amdgpu_ras_reset_gpu(adev);
3485	}
3486}
3487
3488bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev)
3489{
3490	if (adev->asic_type == CHIP_VEGA20 &&
3491	    adev->pm.fw_version <= 0x283400) {
3492		return !(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) &&
3493				amdgpu_ras_intr_triggered();
3494	}
3495
3496	return false;
3497}
3498
3499void amdgpu_release_ras_context(struct amdgpu_device *adev)
3500{
3501	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3502
3503	if (!con)
3504		return;
3505
3506	if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) {
3507		con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX);
3508		amdgpu_ras_set_context(adev, NULL);
3509		kfree(con);
3510	}
3511}
3512
3513#ifdef CONFIG_X86_MCE_AMD
3514static struct amdgpu_device *find_adev(uint32_t node_id)
3515{
3516	int i;
3517	struct amdgpu_device *adev = NULL;
3518
3519	for (i = 0; i < mce_adev_list.num_gpu; i++) {
3520		adev = mce_adev_list.devs[i];
3521
3522		if (adev && adev->gmc.xgmi.connected_to_cpu &&
3523		    adev->gmc.xgmi.physical_node_id == node_id)
3524			break;
3525		adev = NULL;
3526	}
3527
3528	return adev;
3529}
3530
3531#define GET_MCA_IPID_GPUID(m)	(((m) >> 44) & 0xF)
3532#define GET_UMC_INST(m)		(((m) >> 21) & 0x7)
3533#define GET_CHAN_INDEX(m)	((((m) >> 12) & 0x3) | (((m) >> 18) & 0x4))
3534#define GPU_ID_OFFSET		8
3535
3536static int amdgpu_bad_page_notifier(struct notifier_block *nb,
3537				    unsigned long val, void *data)
3538{
3539	struct mce *m = (struct mce *)data;
3540	struct amdgpu_device *adev = NULL;
3541	uint32_t gpu_id = 0;
3542	uint32_t umc_inst = 0, ch_inst = 0;
3543
3544	/*
3545	 * If the error was generated in UMC_V2, which belongs to GPU UMCs,
3546	 * and error occurred in DramECC (Extended error code = 0) then only
3547	 * process the error, else bail out.
3548	 */
3549	if (!m || !((smca_get_bank_type(m->extcpu, m->bank) == SMCA_UMC_V2) &&
3550		    (XEC(m->status, 0x3f) == 0x0)))
3551		return NOTIFY_DONE;
3552
3553	/*
3554	 * If it is correctable error, return.
3555	 */
3556	if (mce_is_correctable(m))
3557		return NOTIFY_OK;
3558
3559	/*
3560	 * GPU Id is offset by GPU_ID_OFFSET in MCA_IPID_UMC register.
3561	 */
3562	gpu_id = GET_MCA_IPID_GPUID(m->ipid) - GPU_ID_OFFSET;
3563
3564	adev = find_adev(gpu_id);
3565	if (!adev) {
3566		DRM_WARN("%s: Unable to find adev for gpu_id: %d\n", __func__,
3567								gpu_id);
3568		return NOTIFY_DONE;
3569	}
3570
3571	/*
3572	 * If it is uncorrectable error, then find out UMC instance and
3573	 * channel index.
3574	 */
3575	umc_inst = GET_UMC_INST(m->ipid);
3576	ch_inst = GET_CHAN_INDEX(m->ipid);
3577
3578	dev_info(adev->dev, "Uncorrectable error detected in UMC inst: %d, chan_idx: %d",
3579			     umc_inst, ch_inst);
3580
3581	if (!amdgpu_umc_page_retirement_mca(adev, m->addr, ch_inst, umc_inst))
3582		return NOTIFY_OK;
3583	else
3584		return NOTIFY_DONE;
3585}
3586
3587static struct notifier_block amdgpu_bad_page_nb = {
3588	.notifier_call  = amdgpu_bad_page_notifier,
3589	.priority       = MCE_PRIO_UC,
3590};
3591
3592static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev)
3593{
3594	/*
3595	 * Add the adev to the mce_adev_list.
3596	 * During mode2 reset, amdgpu device is temporarily
3597	 * removed from the mgpu_info list which can cause
3598	 * page retirement to fail.
3599	 * Use this list instead of mgpu_info to find the amdgpu
3600	 * device on which the UMC error was reported.
3601	 */
3602	mce_adev_list.devs[mce_adev_list.num_gpu++] = adev;
3603
3604	/*
3605	 * Register the x86 notifier only once
3606	 * with MCE subsystem.
3607	 */
3608	if (notifier_registered == false) {
3609		mce_register_decode_chain(&amdgpu_bad_page_nb);
3610		notifier_registered = true;
3611	}
3612}
3613#endif
3614
3615struct amdgpu_ras *amdgpu_ras_get_context(struct amdgpu_device *adev)
3616{
3617	if (!adev)
3618		return NULL;
3619
3620	return adev->psp.ras_context.ras;
3621}
3622
3623int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con)
3624{
3625	if (!adev)
3626		return -EINVAL;
3627
3628	adev->psp.ras_context.ras = ras_con;
3629	return 0;
3630}
3631
3632/* check if ras is supported on block, say, sdma, gfx */
3633int amdgpu_ras_is_supported(struct amdgpu_device *adev,
3634		unsigned int block)
3635{
3636	int ret = 0;
3637	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
3638
3639	if (block >= AMDGPU_RAS_BLOCK_COUNT)
3640		return 0;
3641
3642	ret = ras && (adev->ras_enabled & (1 << block));
3643
3644	/* For the special asic with mem ecc enabled but sram ecc
3645	 * not enabled, even if the ras block is not supported on
3646	 * .ras_enabled, if the asic supports poison mode and the
3647	 * ras block has ras configuration, it can be considered
3648	 * that the ras block supports ras function.
3649	 */
3650	if (!ret &&
3651	    (block == AMDGPU_RAS_BLOCK__GFX ||
3652	     block == AMDGPU_RAS_BLOCK__SDMA ||
3653	     block == AMDGPU_RAS_BLOCK__VCN ||
3654	     block == AMDGPU_RAS_BLOCK__JPEG) &&
3655		(amdgpu_ras_mask & (1 << block)) &&
3656	    amdgpu_ras_is_poison_mode_supported(adev) &&
3657	    amdgpu_ras_get_ras_block(adev, block, 0))
3658		ret = 1;
3659
3660	return ret;
3661}
3662
3663int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
3664{
3665	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
3666
3667	if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3668		amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work);
 
 
3669	return 0;
3670}
3671
3672int amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable)
3673{
3674	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3675	int ret = 0;
3676
3677	if (con) {
3678		ret = amdgpu_mca_smu_set_debug_mode(adev, enable);
3679		if (!ret)
3680			con->is_aca_debug_mode = enable;
3681	}
3682
3683	return ret;
3684}
3685
3686int amdgpu_ras_set_aca_debug_mode(struct amdgpu_device *adev, bool enable)
3687{
3688	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3689	int ret = 0;
3690
3691	if (con) {
3692		if (amdgpu_aca_is_enabled(adev))
3693			ret = amdgpu_aca_smu_set_debug_mode(adev, enable);
3694		else
3695			ret = amdgpu_mca_smu_set_debug_mode(adev, enable);
3696		if (!ret)
3697			con->is_aca_debug_mode = enable;
3698	}
3699
3700	return ret;
3701}
3702
3703bool amdgpu_ras_get_aca_debug_mode(struct amdgpu_device *adev)
3704{
3705	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3706	const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
3707	const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
3708
3709	if (!con)
3710		return false;
3711
3712	if ((amdgpu_aca_is_enabled(adev) && smu_funcs && smu_funcs->set_debug_mode) ||
3713	    (!amdgpu_aca_is_enabled(adev) && mca_funcs && mca_funcs->mca_set_debug_mode))
3714		return con->is_aca_debug_mode;
3715	else
3716		return true;
3717}
3718
3719bool amdgpu_ras_get_error_query_mode(struct amdgpu_device *adev,
3720				     unsigned int *error_query_mode)
3721{
3722	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3723	const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
3724	const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
3725
3726	if (!con) {
3727		*error_query_mode = AMDGPU_RAS_INVALID_ERROR_QUERY;
3728		return false;
3729	}
3730
3731	if ((smu_funcs && smu_funcs->set_debug_mode) || (mca_funcs && mca_funcs->mca_set_debug_mode))
 
 
3732		*error_query_mode =
3733			(con->is_aca_debug_mode) ? AMDGPU_RAS_DIRECT_ERROR_QUERY : AMDGPU_RAS_FIRMWARE_ERROR_QUERY;
3734	else
3735		*error_query_mode = AMDGPU_RAS_DIRECT_ERROR_QUERY;
 
3736
3737	return true;
3738}
3739
3740/* Register each ip ras block into amdgpu ras */
3741int amdgpu_ras_register_ras_block(struct amdgpu_device *adev,
3742		struct amdgpu_ras_block_object *ras_block_obj)
3743{
3744	struct amdgpu_ras_block_list *ras_node;
3745	if (!adev || !ras_block_obj)
3746		return -EINVAL;
3747
3748	ras_node = kzalloc(sizeof(*ras_node), GFP_KERNEL);
3749	if (!ras_node)
3750		return -ENOMEM;
3751
3752	INIT_LIST_HEAD(&ras_node->node);
3753	ras_node->ras_obj = ras_block_obj;
3754	list_add_tail(&ras_node->node, &adev->ras_list);
3755
3756	return 0;
3757}
3758
3759void amdgpu_ras_get_error_type_name(uint32_t err_type, char *err_type_name)
3760{
3761	if (!err_type_name)
3762		return;
3763
3764	switch (err_type) {
3765	case AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE:
3766		sprintf(err_type_name, "correctable");
3767		break;
3768	case AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE:
3769		sprintf(err_type_name, "uncorrectable");
3770		break;
3771	default:
3772		sprintf(err_type_name, "unknown");
3773		break;
3774	}
3775}
3776
3777bool amdgpu_ras_inst_get_memory_id_field(struct amdgpu_device *adev,
3778					 const struct amdgpu_ras_err_status_reg_entry *reg_entry,
3779					 uint32_t instance,
3780					 uint32_t *memory_id)
3781{
3782	uint32_t err_status_lo_data, err_status_lo_offset;
3783
3784	if (!reg_entry)
3785		return false;
3786
3787	err_status_lo_offset =
3788		AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance,
3789					    reg_entry->seg_lo, reg_entry->reg_lo);
3790	err_status_lo_data = RREG32(err_status_lo_offset);
3791
3792	if ((reg_entry->flags & AMDGPU_RAS_ERR_STATUS_VALID) &&
3793	    !REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, ERR_STATUS_VALID_FLAG))
3794		return false;
3795
3796	*memory_id = REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, MEMORY_ID);
3797
3798	return true;
3799}
3800
3801bool amdgpu_ras_inst_get_err_cnt_field(struct amdgpu_device *adev,
3802				       const struct amdgpu_ras_err_status_reg_entry *reg_entry,
3803				       uint32_t instance,
3804				       unsigned long *err_cnt)
3805{
3806	uint32_t err_status_hi_data, err_status_hi_offset;
3807
3808	if (!reg_entry)
3809		return false;
3810
3811	err_status_hi_offset =
3812		AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance,
3813					    reg_entry->seg_hi, reg_entry->reg_hi);
3814	err_status_hi_data = RREG32(err_status_hi_offset);
3815
3816	if ((reg_entry->flags & AMDGPU_RAS_ERR_INFO_VALID) &&
3817	    !REG_GET_FIELD(err_status_hi_data, ERR_STATUS_HI, ERR_INFO_VALID_FLAG))
3818		/* keep the check here in case we need to refer to the result later */
3819		dev_dbg(adev->dev, "Invalid err_info field\n");
3820
3821	/* read err count */
3822	*err_cnt = REG_GET_FIELD(err_status_hi_data, ERR_STATUS, ERR_CNT);
3823
3824	return true;
3825}
3826
3827void amdgpu_ras_inst_query_ras_error_count(struct amdgpu_device *adev,
3828					   const struct amdgpu_ras_err_status_reg_entry *reg_list,
3829					   uint32_t reg_list_size,
3830					   const struct amdgpu_ras_memory_id_entry *mem_list,
3831					   uint32_t mem_list_size,
3832					   uint32_t instance,
3833					   uint32_t err_type,
3834					   unsigned long *err_count)
3835{
3836	uint32_t memory_id;
3837	unsigned long err_cnt;
3838	char err_type_name[16];
3839	uint32_t i, j;
3840
3841	for (i = 0; i < reg_list_size; i++) {
3842		/* query memory_id from err_status_lo */
3843		if (!amdgpu_ras_inst_get_memory_id_field(adev, &reg_list[i],
3844							 instance, &memory_id))
3845			continue;
3846
3847		/* query err_cnt from err_status_hi */
3848		if (!amdgpu_ras_inst_get_err_cnt_field(adev, &reg_list[i],
3849						       instance, &err_cnt) ||
3850		    !err_cnt)
3851			continue;
3852
3853		*err_count += err_cnt;
3854
3855		/* log the errors */
3856		amdgpu_ras_get_error_type_name(err_type, err_type_name);
3857		if (!mem_list) {
3858			/* memory_list is not supported */
3859			dev_info(adev->dev,
3860				 "%ld %s hardware errors detected in %s, instance: %d, memory_id: %d\n",
3861				 err_cnt, err_type_name,
3862				 reg_list[i].block_name,
3863				 instance, memory_id);
3864		} else {
3865			for (j = 0; j < mem_list_size; j++) {
3866				if (memory_id == mem_list[j].memory_id) {
3867					dev_info(adev->dev,
3868						 "%ld %s hardware errors detected in %s, instance: %d, memory block: %s\n",
3869						 err_cnt, err_type_name,
3870						 reg_list[i].block_name,
3871						 instance, mem_list[j].name);
3872					break;
3873				}
3874			}
3875		}
3876	}
3877}
3878
3879void amdgpu_ras_inst_reset_ras_error_count(struct amdgpu_device *adev,
3880					   const struct amdgpu_ras_err_status_reg_entry *reg_list,
3881					   uint32_t reg_list_size,
3882					   uint32_t instance)
3883{
3884	uint32_t err_status_lo_offset, err_status_hi_offset;
3885	uint32_t i;
3886
3887	for (i = 0; i < reg_list_size; i++) {
3888		err_status_lo_offset =
3889			AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance,
3890						    reg_list[i].seg_lo, reg_list[i].reg_lo);
3891		err_status_hi_offset =
3892			AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance,
3893						    reg_list[i].seg_hi, reg_list[i].reg_hi);
3894		WREG32(err_status_lo_offset, 0);
3895		WREG32(err_status_hi_offset, 0);
3896	}
3897}
3898
3899int amdgpu_ras_error_data_init(struct ras_err_data *err_data)
3900{
3901	memset(err_data, 0, sizeof(*err_data));
3902
3903	INIT_LIST_HEAD(&err_data->err_node_list);
3904
3905	return 0;
3906}
3907
3908static void amdgpu_ras_error_node_release(struct ras_err_node *err_node)
3909{
3910	if (!err_node)
3911		return;
3912
3913	list_del(&err_node->node);
3914	kvfree(err_node);
3915}
3916
3917void amdgpu_ras_error_data_fini(struct ras_err_data *err_data)
3918{
3919	struct ras_err_node *err_node, *tmp;
3920
3921	list_for_each_entry_safe(err_node, tmp, &err_data->err_node_list, node)
3922		amdgpu_ras_error_node_release(err_node);
3923}
3924
3925static struct ras_err_node *amdgpu_ras_error_find_node_by_id(struct ras_err_data *err_data,
3926							     struct amdgpu_smuio_mcm_config_info *mcm_info)
3927{
3928	struct ras_err_node *err_node;
3929	struct amdgpu_smuio_mcm_config_info *ref_id;
3930
3931	if (!err_data || !mcm_info)
3932		return NULL;
3933
3934	for_each_ras_error(err_node, err_data) {
3935		ref_id = &err_node->err_info.mcm_info;
3936
3937		if (mcm_info->socket_id == ref_id->socket_id &&
3938		    mcm_info->die_id == ref_id->die_id)
3939			return err_node;
3940	}
3941
3942	return NULL;
3943}
3944
3945static struct ras_err_node *amdgpu_ras_error_node_new(void)
3946{
3947	struct ras_err_node *err_node;
3948
3949	err_node = kvzalloc(sizeof(*err_node), GFP_KERNEL);
3950	if (!err_node)
3951		return NULL;
3952
3953	INIT_LIST_HEAD(&err_node->node);
3954
3955	return err_node;
3956}
3957
3958static int ras_err_info_cmp(void *priv, const struct list_head *a, const struct list_head *b)
3959{
3960	struct ras_err_node *nodea = container_of(a, struct ras_err_node, node);
3961	struct ras_err_node *nodeb = container_of(b, struct ras_err_node, node);
3962	struct amdgpu_smuio_mcm_config_info *infoa = &nodea->err_info.mcm_info;
3963	struct amdgpu_smuio_mcm_config_info *infob = &nodeb->err_info.mcm_info;
3964
3965	if (unlikely(infoa->socket_id != infob->socket_id))
3966		return infoa->socket_id - infob->socket_id;
3967	else
3968		return infoa->die_id - infob->die_id;
3969
3970	return 0;
3971}
3972
3973static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_data,
3974				struct amdgpu_smuio_mcm_config_info *mcm_info)
3975{
3976	struct ras_err_node *err_node;
3977
3978	err_node = amdgpu_ras_error_find_node_by_id(err_data, mcm_info);
3979	if (err_node)
3980		return &err_node->err_info;
3981
3982	err_node = amdgpu_ras_error_node_new();
3983	if (!err_node)
3984		return NULL;
3985
3986	INIT_LIST_HEAD(&err_node->err_info.err_addr_list);
3987
3988	memcpy(&err_node->err_info.mcm_info, mcm_info, sizeof(*mcm_info));
3989
3990	err_data->err_list_count++;
3991	list_add_tail(&err_node->node, &err_data->err_node_list);
3992	list_sort(NULL, &err_data->err_node_list, ras_err_info_cmp);
3993
3994	return &err_node->err_info;
3995}
3996
3997void amdgpu_ras_add_mca_err_addr(struct ras_err_info *err_info, struct ras_err_addr *err_addr)
3998{
3999	struct ras_err_addr *mca_err_addr;
4000
4001	mca_err_addr = kzalloc(sizeof(*mca_err_addr), GFP_KERNEL);
4002	if (!mca_err_addr)
4003		return;
4004
4005	INIT_LIST_HEAD(&mca_err_addr->node);
4006
4007	mca_err_addr->err_status = err_addr->err_status;
4008	mca_err_addr->err_ipid = err_addr->err_ipid;
4009	mca_err_addr->err_addr = err_addr->err_addr;
4010
4011	list_add_tail(&mca_err_addr->node, &err_info->err_addr_list);
4012}
4013
4014void amdgpu_ras_del_mca_err_addr(struct ras_err_info *err_info, struct ras_err_addr *mca_err_addr)
4015{
4016	list_del(&mca_err_addr->node);
4017	kfree(mca_err_addr);
4018}
4019
4020int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
4021		struct amdgpu_smuio_mcm_config_info *mcm_info,
4022		struct ras_err_addr *err_addr, u64 count)
4023{
4024	struct ras_err_info *err_info;
4025
4026	if (!err_data || !mcm_info)
4027		return -EINVAL;
4028
4029	if (!count)
4030		return 0;
4031
4032	err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
4033	if (!err_info)
4034		return -EINVAL;
4035
4036	if (err_addr && err_addr->err_status)
4037		amdgpu_ras_add_mca_err_addr(err_info, err_addr);
4038
4039	err_info->ue_count += count;
4040	err_data->ue_count += count;
4041
4042	return 0;
4043}
4044
4045int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
4046		struct amdgpu_smuio_mcm_config_info *mcm_info,
4047		struct ras_err_addr *err_addr, u64 count)
4048{
4049	struct ras_err_info *err_info;
4050
4051	if (!err_data || !mcm_info)
4052		return -EINVAL;
4053
4054	if (!count)
4055		return 0;
4056
4057	err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
4058	if (!err_info)
4059		return -EINVAL;
4060
4061	err_info->ce_count += count;
4062	err_data->ce_count += count;
4063
4064	return 0;
4065}
4066
4067int amdgpu_ras_error_statistic_de_count(struct ras_err_data *err_data,
4068		struct amdgpu_smuio_mcm_config_info *mcm_info,
4069		struct ras_err_addr *err_addr, u64 count)
4070{
4071	struct ras_err_info *err_info;
4072
4073	if (!err_data || !mcm_info)
4074		return -EINVAL;
4075
4076	if (!count)
4077		return 0;
4078
4079	err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
4080	if (!err_info)
4081		return -EINVAL;
4082
4083	if (err_addr && err_addr->err_status)
4084		amdgpu_ras_add_mca_err_addr(err_info, err_addr);
4085
4086	err_info->de_count += count;
4087	err_data->de_count += count;
4088
4089	return 0;
4090}
4091
4092#define mmMP0_SMN_C2PMSG_92	0x1609C
4093#define mmMP0_SMN_C2PMSG_126	0x160BE
4094static void amdgpu_ras_boot_time_error_reporting(struct amdgpu_device *adev,
4095						 u32 instance, u32 boot_error)
4096{
4097	u32 socket_id, aid_id, hbm_id;
4098	u32 reg_data;
 
4099	u64 reg_addr;
4100
4101	socket_id = AMDGPU_RAS_GPU_ERR_SOCKET_ID(boot_error);
4102	aid_id = AMDGPU_RAS_GPU_ERR_AID_ID(boot_error);
4103	hbm_id = AMDGPU_RAS_GPU_ERR_HBM_ID(boot_error);
4104
4105	/* The pattern for smn addressing in other SOC could be different from
4106	 * the one for aqua_vanjaram. We should revisit the code if the pattern
4107	 * is changed. In such case, replace the aqua_vanjaram implementation
4108	 * with more common helper */
4109	reg_addr = (mmMP0_SMN_C2PMSG_92 << 2) +
4110		   aqua_vanjaram_encode_ext_smn_addressing(instance);
 
4111
4112	reg_data = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
4113	dev_err(adev->dev, "socket: %d, aid: %d, firmware boot failed, fw status is 0x%x\n",
4114		socket_id, aid_id, reg_data);
 
 
 
 
4115
4116	if (AMDGPU_RAS_GPU_ERR_MEM_TRAINING(boot_error))
4117		dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, memory training failed\n",
4118			 socket_id, aid_id, hbm_id);
 
4119
4120	if (AMDGPU_RAS_GPU_ERR_FW_LOAD(boot_error))
4121		dev_info(adev->dev, "socket: %d, aid: %d, firmware load failed at boot time\n",
4122			 socket_id, aid_id);
 
4123
4124	if (AMDGPU_RAS_GPU_ERR_WAFL_LINK_TRAINING(boot_error))
4125		dev_info(adev->dev, "socket: %d, aid: %d, wafl link training failed\n",
4126			 socket_id, aid_id);
 
4127
4128	if (AMDGPU_RAS_GPU_ERR_XGMI_LINK_TRAINING(boot_error))
4129		dev_info(adev->dev, "socket: %d, aid: %d, xgmi link training failed\n",
4130			 socket_id, aid_id);
 
4131
4132	if (AMDGPU_RAS_GPU_ERR_USR_CP_LINK_TRAINING(boot_error))
4133		dev_info(adev->dev, "socket: %d, aid: %d, usr cp link training failed\n",
4134			 socket_id, aid_id);
 
4135
4136	if (AMDGPU_RAS_GPU_ERR_USR_DP_LINK_TRAINING(boot_error))
4137		dev_info(adev->dev, "socket: %d, aid: %d, usr dp link training failed\n",
4138			 socket_id, aid_id);
 
4139
4140	if (AMDGPU_RAS_GPU_ERR_HBM_MEM_TEST(boot_error))
4141		dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, hbm memory test failed\n",
4142			 socket_id, aid_id, hbm_id);
 
4143
4144	if (AMDGPU_RAS_GPU_ERR_HBM_BIST_TEST(boot_error))
4145		dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, hbm bist test failed\n",
4146			 socket_id, aid_id, hbm_id);
 
 
 
 
 
 
 
 
 
 
 
4147}
4148
4149static int amdgpu_ras_wait_for_boot_complete(struct amdgpu_device *adev,
4150					     u32 instance, u32 *boot_error)
4151{
4152	u32 reg_addr;
4153	u32 reg_data;
4154	int retry_loop;
4155
4156	reg_addr = (mmMP0_SMN_C2PMSG_92 << 2) +
4157		   aqua_vanjaram_encode_ext_smn_addressing(instance);
4158
4159	for (retry_loop = 0; retry_loop < AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT; retry_loop++) {
4160		reg_data = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
4161		if ((reg_data & AMDGPU_RAS_BOOT_STATUS_MASK) == AMDGPU_RAS_BOOT_STEADY_STATUS) {
4162			*boot_error = AMDGPU_RAS_BOOT_SUCEESS;
4163			return 0;
4164		}
4165		msleep(1);
4166	}
4167
4168	/* The pattern for smn addressing in other SOC could be different from
4169	 * the one for aqua_vanjaram. We should revisit the code if the pattern
4170	 * is changed. In such case, replace the aqua_vanjaram implementation
4171	 * with more common helper */
4172	reg_addr = (mmMP0_SMN_C2PMSG_126 << 2) +
4173		   aqua_vanjaram_encode_ext_smn_addressing(instance);
4174
4175	for (retry_loop = 0; retry_loop < AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT; retry_loop++) {
4176		reg_data = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
4177		if (AMDGPU_RAS_GPU_ERR_BOOT_STATUS(reg_data)) {
4178			*boot_error = reg_data;
4179			return 0;
4180		}
4181		msleep(1);
4182	}
4183
4184	*boot_error = reg_data;
4185	return -ETIME;
4186}
4187
4188void amdgpu_ras_query_boot_status(struct amdgpu_device *adev, u32 num_instances)
4189{
4190	u32 boot_error = 0;
4191	u32 i;
4192
4193	for (i = 0; i < num_instances; i++) {
4194		if (amdgpu_ras_wait_for_boot_complete(adev, i, &boot_error))
4195			amdgpu_ras_boot_time_error_reporting(adev, i, boot_error);
4196	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4197}
v6.13.7
   1/*
   2 * Copyright 2018 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 *
  23 */
  24#include <linux/debugfs.h>
  25#include <linux/list.h>
  26#include <linux/module.h>
  27#include <linux/uaccess.h>
  28#include <linux/reboot.h>
  29#include <linux/syscalls.h>
  30#include <linux/pm_runtime.h>
  31#include <linux/list_sort.h>
  32
  33#include "amdgpu.h"
  34#include "amdgpu_ras.h"
  35#include "amdgpu_atomfirmware.h"
  36#include "amdgpu_xgmi.h"
  37#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
  38#include "nbio_v4_3.h"
  39#include "nbio_v7_9.h"
  40#include "atom.h"
  41#include "amdgpu_reset.h"
  42#include "amdgpu_psp.h"
  43
  44#ifdef CONFIG_X86_MCE_AMD
  45#include <asm/mce.h>
  46
  47static bool notifier_registered;
  48#endif
  49static const char *RAS_FS_NAME = "ras";
  50
  51const char *ras_error_string[] = {
  52	"none",
  53	"parity",
  54	"single_correctable",
  55	"multi_uncorrectable",
  56	"poison",
  57};
  58
  59const char *ras_block_string[] = {
  60	"umc",
  61	"sdma",
  62	"gfx",
  63	"mmhub",
  64	"athub",
  65	"pcie_bif",
  66	"hdp",
  67	"xgmi_wafl",
  68	"df",
  69	"smn",
  70	"sem",
  71	"mp0",
  72	"mp1",
  73	"fuse",
  74	"mca",
  75	"vcn",
  76	"jpeg",
  77	"ih",
  78	"mpio",
  79};
  80
  81const char *ras_mca_block_string[] = {
  82	"mca_mp0",
  83	"mca_mp1",
  84	"mca_mpio",
  85	"mca_iohc",
  86};
  87
  88struct amdgpu_ras_block_list {
  89	/* ras block link */
  90	struct list_head node;
  91
  92	struct amdgpu_ras_block_object *ras_obj;
  93};
  94
  95const char *get_ras_block_str(struct ras_common_if *ras_block)
  96{
  97	if (!ras_block)
  98		return "NULL";
  99
 100	if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNT ||
 101	    ras_block->block >= ARRAY_SIZE(ras_block_string))
 102		return "OUT OF RANGE";
 103
 104	if (ras_block->block == AMDGPU_RAS_BLOCK__MCA)
 105		return ras_mca_block_string[ras_block->sub_block_index];
 106
 107	return ras_block_string[ras_block->block];
 108}
 109
 110#define ras_block_str(_BLOCK_) \
 111	(((_BLOCK_) < ARRAY_SIZE(ras_block_string)) ? ras_block_string[_BLOCK_] : "Out Of Range")
 112
 113#define ras_err_str(i) (ras_error_string[ffs(i)])
 114
 115#define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
 116
 117/* inject address is 52 bits */
 118#define	RAS_UMC_INJECT_ADDR_LIMIT	(0x1ULL << 52)
 119
 120/* typical ECC bad page rate is 1 bad page per 100MB VRAM */
 121#define RAS_BAD_PAGE_COVER              (100 * 1024 * 1024ULL)
 122
 123#define MAX_UMC_POISON_POLLING_TIME_ASYNC  300  //ms
 124
 125#define AMDGPU_RAS_RETIRE_PAGE_INTERVAL 100  //ms
 126
 127#define MAX_FLUSH_RETIRE_DWORK_TIMES  100
 128
 129enum amdgpu_ras_retire_page_reservation {
 130	AMDGPU_RAS_RETIRE_PAGE_RESERVED,
 131	AMDGPU_RAS_RETIRE_PAGE_PENDING,
 132	AMDGPU_RAS_RETIRE_PAGE_FAULT,
 133};
 134
 135atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
 136
 137static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
 138				uint64_t addr);
 139static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
 140				uint64_t addr);
 141#ifdef CONFIG_X86_MCE_AMD
 142static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev);
 143struct mce_notifier_adev_list {
 144	struct amdgpu_device *devs[MAX_GPU_INSTANCE];
 145	int num_gpu;
 146};
 147static struct mce_notifier_adev_list mce_adev_list;
 148#endif
 149
 150void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready)
 151{
 152	if (adev && amdgpu_ras_get_context(adev))
 153		amdgpu_ras_get_context(adev)->error_query_ready = ready;
 154}
 155
 156static bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev)
 157{
 158	if (adev && amdgpu_ras_get_context(adev))
 159		return amdgpu_ras_get_context(adev)->error_query_ready;
 160
 161	return false;
 162}
 163
 164static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t address)
 165{
 166	struct ras_err_data err_data;
 167	struct eeprom_table_record err_rec;
 168	int ret;
 169
 170	if ((address >= adev->gmc.mc_vram_size) ||
 171	    (address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
 172		dev_warn(adev->dev,
 173		         "RAS WARN: input address 0x%llx is invalid.\n",
 174		         address);
 175		return -EINVAL;
 176	}
 177
 178	if (amdgpu_ras_check_bad_page(adev, address)) {
 179		dev_warn(adev->dev,
 180			 "RAS WARN: 0x%llx has already been marked as bad page!\n",
 181			 address);
 182		return 0;
 183	}
 184
 185	ret = amdgpu_ras_error_data_init(&err_data);
 186	if (ret)
 187		return ret;
 188
 189	memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
 190	err_data.err_addr = &err_rec;
 191	amdgpu_umc_fill_error_record(&err_data, address, address, 0, 0);
 192
 193	if (amdgpu_bad_page_threshold != 0) {
 194		amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
 195					 err_data.err_addr_cnt);
 196		amdgpu_ras_save_bad_pages(adev, NULL);
 197	}
 198
 199	amdgpu_ras_error_data_fini(&err_data);
 200
 201	dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n");
 202	dev_warn(adev->dev, "Clear EEPROM:\n");
 203	dev_warn(adev->dev, "    echo 1 > /sys/kernel/debug/dri/0/ras/ras_eeprom_reset\n");
 204
 205	return 0;
 206}
 207
 208static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
 209					size_t size, loff_t *pos)
 210{
 211	struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private;
 212	struct ras_query_if info = {
 213		.head = obj->head,
 214	};
 215	ssize_t s;
 216	char val[128];
 217
 218	if (amdgpu_ras_query_error_status(obj->adev, &info))
 219		return -EINVAL;
 220
 221	/* Hardware counter will be reset automatically after the query on Vega20 and Arcturus */
 222	if (amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
 223	    amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
 224		if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
 225			dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
 226	}
 227
 228	s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n",
 229			"ue", info.ue_count,
 230			"ce", info.ce_count);
 231	if (*pos >= s)
 232		return 0;
 233
 234	s -= *pos;
 235	s = min_t(u64, s, size);
 236
 237
 238	if (copy_to_user(buf, &val[*pos], s))
 239		return -EINVAL;
 240
 241	*pos += s;
 242
 243	return s;
 244}
 245
 246static const struct file_operations amdgpu_ras_debugfs_ops = {
 247	.owner = THIS_MODULE,
 248	.read = amdgpu_ras_debugfs_read,
 249	.write = NULL,
 250	.llseek = default_llseek
 251};
 252
 253static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id)
 254{
 255	int i;
 256
 257	for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) {
 258		*block_id = i;
 259		if (strcmp(name, ras_block_string[i]) == 0)
 260			return 0;
 261	}
 262	return -EINVAL;
 263}
 264
 265static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
 266		const char __user *buf, size_t size,
 267		loff_t *pos, struct ras_debug_if *data)
 268{
 269	ssize_t s = min_t(u64, 64, size);
 270	char str[65];
 271	char block_name[33];
 272	char err[9] = "ue";
 273	int op = -1;
 274	int block_id;
 275	uint32_t sub_block;
 276	u64 address, value;
 277	/* default value is 0 if the mask is not set by user */
 278	u32 instance_mask = 0;
 279
 280	if (*pos)
 281		return -EINVAL;
 282	*pos = size;
 283
 284	memset(str, 0, sizeof(str));
 285	memset(data, 0, sizeof(*data));
 286
 287	if (copy_from_user(str, buf, s))
 288		return -EINVAL;
 289
 290	if (sscanf(str, "disable %32s", block_name) == 1)
 291		op = 0;
 292	else if (sscanf(str, "enable %32s %8s", block_name, err) == 2)
 293		op = 1;
 294	else if (sscanf(str, "inject %32s %8s", block_name, err) == 2)
 295		op = 2;
 296	else if (strstr(str, "retire_page") != NULL)
 297		op = 3;
 298	else if (str[0] && str[1] && str[2] && str[3])
 299		/* ascii string, but commands are not matched. */
 300		return -EINVAL;
 301
 302	if (op != -1) {
 303		if (op == 3) {
 304			if (sscanf(str, "%*s 0x%llx", &address) != 1 &&
 305			    sscanf(str, "%*s %llu", &address) != 1)
 306				return -EINVAL;
 307
 308			data->op = op;
 309			data->inject.address = address;
 310
 311			return 0;
 312		}
 313
 314		if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
 315			return -EINVAL;
 316
 317		data->head.block = block_id;
 318		/* only ue, ce and poison errors are supported */
 319		if (!memcmp("ue", err, 2))
 320			data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
 321		else if (!memcmp("ce", err, 2))
 322			data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE;
 323		else if (!memcmp("poison", err, 6))
 324			data->head.type = AMDGPU_RAS_ERROR__POISON;
 325		else
 326			return -EINVAL;
 327
 328		data->op = op;
 329
 330		if (op == 2) {
 331			if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx 0x%x",
 332				   &sub_block, &address, &value, &instance_mask) != 4 &&
 333			    sscanf(str, "%*s %*s %*s %u %llu %llu %u",
 334				   &sub_block, &address, &value, &instance_mask) != 4 &&
 335				sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx",
 336				   &sub_block, &address, &value) != 3 &&
 337			    sscanf(str, "%*s %*s %*s %u %llu %llu",
 338				   &sub_block, &address, &value) != 3)
 339				return -EINVAL;
 340			data->head.sub_block_index = sub_block;
 341			data->inject.address = address;
 342			data->inject.value = value;
 343			data->inject.instance_mask = instance_mask;
 344		}
 345	} else {
 346		if (size < sizeof(*data))
 347			return -EINVAL;
 348
 349		if (copy_from_user(data, buf, sizeof(*data)))
 350			return -EINVAL;
 351	}
 352
 353	return 0;
 354}
 355
 356static void amdgpu_ras_instance_mask_check(struct amdgpu_device *adev,
 357				struct ras_debug_if *data)
 358{
 359	int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
 360	uint32_t mask, inst_mask = data->inject.instance_mask;
 361
 362	/* no need to set instance mask if there is only one instance */
 363	if (num_xcc <= 1 && inst_mask) {
 364		data->inject.instance_mask = 0;
 365		dev_dbg(adev->dev,
 366			"RAS inject mask(0x%x) isn't supported and force it to 0.\n",
 367			inst_mask);
 368
 369		return;
 370	}
 371
 372	switch (data->head.block) {
 373	case AMDGPU_RAS_BLOCK__GFX:
 374		mask = GENMASK(num_xcc - 1, 0);
 375		break;
 376	case AMDGPU_RAS_BLOCK__SDMA:
 377		mask = GENMASK(adev->sdma.num_instances - 1, 0);
 378		break;
 379	case AMDGPU_RAS_BLOCK__VCN:
 380	case AMDGPU_RAS_BLOCK__JPEG:
 381		mask = GENMASK(adev->vcn.num_vcn_inst - 1, 0);
 382		break;
 383	default:
 384		mask = inst_mask;
 385		break;
 386	}
 387
 388	/* remove invalid bits in instance mask */
 389	data->inject.instance_mask &= mask;
 390	if (inst_mask != data->inject.instance_mask)
 391		dev_dbg(adev->dev,
 392			"Adjust RAS inject mask 0x%x to 0x%x\n",
 393			inst_mask, data->inject.instance_mask);
 394}
 395
 396/**
 397 * DOC: AMDGPU RAS debugfs control interface
 398 *
 399 * The control interface accepts struct ras_debug_if which has two members.
 400 *
 401 * First member: ras_debug_if::head or ras_debug_if::inject.
 402 *
 403 * head is used to indicate which IP block will be under control.
 404 *
 405 * head has four members, they are block, type, sub_block_index, name.
 406 * block: which IP will be under control.
 407 * type: what kind of error will be enabled/disabled/injected.
 408 * sub_block_index: some IPs have subcomponets. say, GFX, sDMA.
 409 * name: the name of IP.
 410 *
 411 * inject has three more members than head, they are address, value and mask.
 412 * As their names indicate, inject operation will write the
 413 * value to the address.
 414 *
 415 * The second member: struct ras_debug_if::op.
 416 * It has three kinds of operations.
 417 *
 418 * - 0: disable RAS on the block. Take ::head as its data.
 419 * - 1: enable RAS on the block. Take ::head as its data.
 420 * - 2: inject errors on the block. Take ::inject as its data.
 421 *
 422 * How to use the interface?
 423 *
 424 * In a program
 425 *
 426 * Copy the struct ras_debug_if in your code and initialize it.
 427 * Write the struct to the control interface.
 428 *
 429 * From shell
 430 *
 431 * .. code-block:: bash
 432 *
 433 *	echo "disable <block>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
 434 *	echo "enable  <block> <error>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
 435 *	echo "inject  <block> <error> <sub-block> <address> <value> <mask>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
 436 *
 437 * Where N, is the card which you want to affect.
 438 *
 439 * "disable" requires only the block.
 440 * "enable" requires the block and error type.
 441 * "inject" requires the block, error type, address, and value.
 442 *
 443 * The block is one of: umc, sdma, gfx, etc.
 444 *	see ras_block_string[] for details
 445 *
 446 * The error type is one of: ue, ce and poison where,
 447 *	ue is multi-uncorrectable
 448 *	ce is single-correctable
 449 *	poison is poison
 450 *
 451 * The sub-block is a the sub-block index, pass 0 if there is no sub-block.
 452 * The address and value are hexadecimal numbers, leading 0x is optional.
 453 * The mask means instance mask, is optional, default value is 0x1.
 454 *
 455 * For instance,
 456 *
 457 * .. code-block:: bash
 458 *
 459 *	echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
 460 *	echo inject umc ce 0 0 0 3 > /sys/kernel/debug/dri/0/ras/ras_ctrl
 461 *	echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
 462 *
 463 * How to check the result of the operation?
 464 *
 465 * To check disable/enable, see "ras" features at,
 466 * /sys/class/drm/card[0/1/2...]/device/ras/features
 467 *
 468 * To check inject, see the corresponding error count at,
 469 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx|sdma|umc|...]_err_count
 470 *
 471 * .. note::
 472 *	Operations are only allowed on blocks which are supported.
 473 *	Check the "ras" mask at /sys/module/amdgpu/parameters/ras_mask
 474 *	to see which blocks support RAS on a particular asic.
 475 *
 476 */
 477static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
 478					     const char __user *buf,
 479					     size_t size, loff_t *pos)
 480{
 481	struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
 482	struct ras_debug_if data;
 483	int ret = 0;
 484
 485	if (!amdgpu_ras_get_error_query_ready(adev)) {
 486		dev_warn(adev->dev, "RAS WARN: error injection "
 487				"currently inaccessible\n");
 488		return size;
 489	}
 490
 491	ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
 492	if (ret)
 493		return ret;
 494
 495	if (data.op == 3) {
 496		ret = amdgpu_reserve_page_direct(adev, data.inject.address);
 497		if (!ret)
 498			return size;
 499		else
 500			return ret;
 501	}
 502
 503	if (!amdgpu_ras_is_supported(adev, data.head.block))
 504		return -EINVAL;
 505
 506	switch (data.op) {
 507	case 0:
 508		ret = amdgpu_ras_feature_enable(adev, &data.head, 0);
 509		break;
 510	case 1:
 511		ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
 512		break;
 513	case 2:
 514		if ((data.inject.address >= adev->gmc.mc_vram_size &&
 515		    adev->gmc.mc_vram_size) ||
 516		    (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
 517			dev_warn(adev->dev, "RAS WARN: input address "
 518					"0x%llx is invalid.",
 519					data.inject.address);
 520			ret = -EINVAL;
 521			break;
 522		}
 523
 524		/* umc ce/ue error injection for a bad page is not allowed */
 525		if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
 526		    amdgpu_ras_check_bad_page(adev, data.inject.address)) {
 527			dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has "
 528				 "already been marked as bad!\n",
 529				 data.inject.address);
 530			break;
 531		}
 532
 533		amdgpu_ras_instance_mask_check(adev, &data);
 534
 535		/* data.inject.address is offset instead of absolute gpu address */
 536		ret = amdgpu_ras_error_inject(adev, &data.inject);
 537		break;
 538	default:
 539		ret = -EINVAL;
 540		break;
 541	}
 542
 543	if (ret)
 544		return ret;
 545
 546	return size;
 547}
 548
 549/**
 550 * DOC: AMDGPU RAS debugfs EEPROM table reset interface
 551 *
 552 * Some boards contain an EEPROM which is used to persistently store a list of
 553 * bad pages which experiences ECC errors in vram.  This interface provides
 554 * a way to reset the EEPROM, e.g., after testing error injection.
 555 *
 556 * Usage:
 557 *
 558 * .. code-block:: bash
 559 *
 560 *	echo 1 > ../ras/ras_eeprom_reset
 561 *
 562 * will reset EEPROM table to 0 entries.
 563 *
 564 */
 565static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f,
 566					       const char __user *buf,
 567					       size_t size, loff_t *pos)
 568{
 569	struct amdgpu_device *adev =
 570		(struct amdgpu_device *)file_inode(f)->i_private;
 571	int ret;
 572
 573	ret = amdgpu_ras_eeprom_reset_table(
 574		&(amdgpu_ras_get_context(adev)->eeprom_control));
 575
 576	if (!ret) {
 577		/* Something was written to EEPROM.
 578		 */
 579		amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS;
 580		return size;
 581	} else {
 582		return ret;
 583	}
 584}
 585
 586static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
 587	.owner = THIS_MODULE,
 588	.read = NULL,
 589	.write = amdgpu_ras_debugfs_ctrl_write,
 590	.llseek = default_llseek
 591};
 592
 593static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = {
 594	.owner = THIS_MODULE,
 595	.read = NULL,
 596	.write = amdgpu_ras_debugfs_eeprom_write,
 597	.llseek = default_llseek
 598};
 599
 600/**
 601 * DOC: AMDGPU RAS sysfs Error Count Interface
 602 *
 603 * It allows the user to read the error count for each IP block on the gpu through
 604 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
 605 *
 606 * It outputs the multiple lines which report the uncorrected (ue) and corrected
 607 * (ce) error counts.
 608 *
 609 * The format of one line is below,
 610 *
 611 * [ce|ue]: count
 612 *
 613 * Example:
 614 *
 615 * .. code-block:: bash
 616 *
 617 *	ue: 0
 618 *	ce: 1
 619 *
 620 */
 621static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
 622		struct device_attribute *attr, char *buf)
 623{
 624	struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr);
 625	struct ras_query_if info = {
 626		.head = obj->head,
 627	};
 628
 629	if (!amdgpu_ras_get_error_query_ready(obj->adev))
 630		return sysfs_emit(buf, "Query currently inaccessible\n");
 631
 632	if (amdgpu_ras_query_error_status(obj->adev, &info))
 633		return -EINVAL;
 634
 635	if (amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
 636	    amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
 637		if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
 638			dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
 639	}
 640
 641	if (info.head.block == AMDGPU_RAS_BLOCK__UMC)
 642		return sysfs_emit(buf, "%s: %lu\n%s: %lu\n%s: %lu\n", "ue", info.ue_count,
 643				"ce", info.ce_count, "de", info.de_count);
 644	else
 645		return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
 646				"ce", info.ce_count);
 647}
 648
 649/* obj begin */
 650
 651#define get_obj(obj) do { (obj)->use++; } while (0)
 652#define alive_obj(obj) ((obj)->use)
 653
 654static inline void put_obj(struct ras_manager *obj)
 655{
 656	if (obj && (--obj->use == 0)) {
 657		list_del(&obj->node);
 658		amdgpu_ras_error_data_fini(&obj->err_data);
 659	}
 660
 661	if (obj && (obj->use < 0))
 662		DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", get_ras_block_str(&obj->head));
 663}
 664
 665/* make one obj and return it. */
 666static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
 667		struct ras_common_if *head)
 668{
 669	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
 670	struct ras_manager *obj;
 671
 672	if (!adev->ras_enabled || !con)
 673		return NULL;
 674
 675	if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
 676		return NULL;
 677
 678	if (head->block == AMDGPU_RAS_BLOCK__MCA) {
 679		if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
 680			return NULL;
 681
 682		obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
 683	} else
 684		obj = &con->objs[head->block];
 685
 686	/* already exist. return obj? */
 687	if (alive_obj(obj))
 688		return NULL;
 689
 690	if (amdgpu_ras_error_data_init(&obj->err_data))
 691		return NULL;
 692
 693	obj->head = *head;
 694	obj->adev = adev;
 695	list_add(&obj->node, &con->head);
 696	get_obj(obj);
 697
 698	return obj;
 699}
 700
 701/* return an obj equal to head, or the first when head is NULL */
 702struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
 703		struct ras_common_if *head)
 704{
 705	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
 706	struct ras_manager *obj;
 707	int i;
 708
 709	if (!adev->ras_enabled || !con)
 710		return NULL;
 711
 712	if (head) {
 713		if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
 714			return NULL;
 715
 716		if (head->block == AMDGPU_RAS_BLOCK__MCA) {
 717			if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
 718				return NULL;
 719
 720			obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
 721		} else
 722			obj = &con->objs[head->block];
 723
 724		if (alive_obj(obj))
 725			return obj;
 726	} else {
 727		for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT + AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
 728			obj = &con->objs[i];
 729			if (alive_obj(obj))
 730				return obj;
 731		}
 732	}
 733
 734	return NULL;
 735}
 736/* obj end */
 737
 738/* feature ctl begin */
 739static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev,
 740					 struct ras_common_if *head)
 741{
 742	return adev->ras_hw_enabled & BIT(head->block);
 743}
 744
 745static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev,
 746		struct ras_common_if *head)
 747{
 748	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
 749
 750	return con->features & BIT(head->block);
 751}
 752
 753/*
 754 * if obj is not created, then create one.
 755 * set feature enable flag.
 756 */
 757static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev,
 758		struct ras_common_if *head, int enable)
 759{
 760	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
 761	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
 762
 763	/* If hardware does not support ras, then do not create obj.
 764	 * But if hardware support ras, we can create the obj.
 765	 * Ras framework checks con->hw_supported to see if it need do
 766	 * corresponding initialization.
 767	 * IP checks con->support to see if it need disable ras.
 768	 */
 769	if (!amdgpu_ras_is_feature_allowed(adev, head))
 770		return 0;
 771
 772	if (enable) {
 773		if (!obj) {
 774			obj = amdgpu_ras_create_obj(adev, head);
 775			if (!obj)
 776				return -EINVAL;
 777		} else {
 778			/* In case we create obj somewhere else */
 779			get_obj(obj);
 780		}
 781		con->features |= BIT(head->block);
 782	} else {
 783		if (obj && amdgpu_ras_is_feature_enabled(adev, head)) {
 784			con->features &= ~BIT(head->block);
 785			put_obj(obj);
 786		}
 787	}
 788
 789	return 0;
 790}
 791
 792/* wrapper of psp_ras_enable_features */
 793int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
 794		struct ras_common_if *head, bool enable)
 795{
 796	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
 797	union ta_ras_cmd_input *info;
 798	int ret;
 799
 800	if (!con)
 801		return -EINVAL;
 802
 803	/* For non-gfx ip, do not enable ras feature if it is not allowed */
 804	/* For gfx ip, regardless of feature support status, */
 805	/* Force issue enable or disable ras feature commands */
 806	if (head->block != AMDGPU_RAS_BLOCK__GFX &&
 807	    !amdgpu_ras_is_feature_allowed(adev, head))
 808		return 0;
 809
 810	/* Only enable gfx ras feature from host side */
 811	if (head->block == AMDGPU_RAS_BLOCK__GFX &&
 812	    !amdgpu_sriov_vf(adev) &&
 813	    !amdgpu_ras_intr_triggered()) {
 814		info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL);
 815		if (!info)
 816			return -ENOMEM;
 817
 818		if (!enable) {
 819			info->disable_features = (struct ta_ras_disable_features_input) {
 820				.block_id =  amdgpu_ras_block_to_ta(head->block),
 821				.error_type = amdgpu_ras_error_to_ta(head->type),
 822			};
 823		} else {
 824			info->enable_features = (struct ta_ras_enable_features_input) {
 825				.block_id =  amdgpu_ras_block_to_ta(head->block),
 826				.error_type = amdgpu_ras_error_to_ta(head->type),
 827			};
 828		}
 829
 830		ret = psp_ras_enable_features(&adev->psp, info, enable);
 831		if (ret) {
 832			dev_err(adev->dev, "ras %s %s failed poison:%d ret:%d\n",
 833				enable ? "enable":"disable",
 834				get_ras_block_str(head),
 835				amdgpu_ras_is_poison_mode_supported(adev), ret);
 836			kfree(info);
 837			return ret;
 838		}
 839
 840		kfree(info);
 841	}
 842
 843	/* setup the obj */
 844	__amdgpu_ras_feature_enable(adev, head, enable);
 845
 846	return 0;
 847}
 848
 849/* Only used in device probe stage and called only once. */
 850int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
 851		struct ras_common_if *head, bool enable)
 852{
 853	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
 854	int ret;
 855
 856	if (!con)
 857		return -EINVAL;
 858
 859	if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
 860		if (enable) {
 861			/* There is no harm to issue a ras TA cmd regardless of
 862			 * the currecnt ras state.
 863			 * If current state == target state, it will do nothing
 864			 * But sometimes it requests driver to reset and repost
 865			 * with error code -EAGAIN.
 866			 */
 867			ret = amdgpu_ras_feature_enable(adev, head, 1);
 868			/* With old ras TA, we might fail to enable ras.
 869			 * Log it and just setup the object.
 870			 * TODO need remove this WA in the future.
 871			 */
 872			if (ret == -EINVAL) {
 873				ret = __amdgpu_ras_feature_enable(adev, head, 1);
 874				if (!ret)
 875					dev_info(adev->dev,
 876						"RAS INFO: %s setup object\n",
 877						get_ras_block_str(head));
 878			}
 879		} else {
 880			/* setup the object then issue a ras TA disable cmd.*/
 881			ret = __amdgpu_ras_feature_enable(adev, head, 1);
 882			if (ret)
 883				return ret;
 884
 885			/* gfx block ras disable cmd must send to ras-ta */
 886			if (head->block == AMDGPU_RAS_BLOCK__GFX)
 887				con->features |= BIT(head->block);
 888
 889			ret = amdgpu_ras_feature_enable(adev, head, 0);
 890
 891			/* clean gfx block ras features flag */
 892			if (adev->ras_enabled && head->block == AMDGPU_RAS_BLOCK__GFX)
 893				con->features &= ~BIT(head->block);
 894		}
 895	} else
 896		ret = amdgpu_ras_feature_enable(adev, head, enable);
 897
 898	return ret;
 899}
 900
 901static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev,
 902		bool bypass)
 903{
 904	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
 905	struct ras_manager *obj, *tmp;
 906
 907	list_for_each_entry_safe(obj, tmp, &con->head, node) {
 908		/* bypass psp.
 909		 * aka just release the obj and corresponding flags
 910		 */
 911		if (bypass) {
 912			if (__amdgpu_ras_feature_enable(adev, &obj->head, 0))
 913				break;
 914		} else {
 915			if (amdgpu_ras_feature_enable(adev, &obj->head, 0))
 916				break;
 917		}
 918	}
 919
 920	return con->features;
 921}
 922
 923static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
 924		bool bypass)
 925{
 926	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
 927	int i;
 928	const enum amdgpu_ras_error_type default_ras_type = AMDGPU_RAS_ERROR__NONE;
 929
 930	for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) {
 931		struct ras_common_if head = {
 932			.block = i,
 933			.type = default_ras_type,
 934			.sub_block_index = 0,
 935		};
 936
 937		if (i == AMDGPU_RAS_BLOCK__MCA)
 938			continue;
 939
 940		if (bypass) {
 941			/*
 942			 * bypass psp. vbios enable ras for us.
 943			 * so just create the obj
 944			 */
 945			if (__amdgpu_ras_feature_enable(adev, &head, 1))
 946				break;
 947		} else {
 948			if (amdgpu_ras_feature_enable(adev, &head, 1))
 949				break;
 950		}
 951	}
 952
 953	for (i = 0; i < AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
 954		struct ras_common_if head = {
 955			.block = AMDGPU_RAS_BLOCK__MCA,
 956			.type = default_ras_type,
 957			.sub_block_index = i,
 958		};
 959
 960		if (bypass) {
 961			/*
 962			 * bypass psp. vbios enable ras for us.
 963			 * so just create the obj
 964			 */
 965			if (__amdgpu_ras_feature_enable(adev, &head, 1))
 966				break;
 967		} else {
 968			if (amdgpu_ras_feature_enable(adev, &head, 1))
 969				break;
 970		}
 971	}
 972
 973	return con->features;
 974}
 975/* feature ctl end */
 976
 977static int amdgpu_ras_block_match_default(struct amdgpu_ras_block_object *block_obj,
 978		enum amdgpu_ras_block block)
 979{
 980	if (!block_obj)
 981		return -EINVAL;
 982
 983	if (block_obj->ras_comm.block == block)
 984		return 0;
 985
 986	return -EINVAL;
 987}
 988
 989static struct amdgpu_ras_block_object *amdgpu_ras_get_ras_block(struct amdgpu_device *adev,
 990					enum amdgpu_ras_block block, uint32_t sub_block_index)
 991{
 992	struct amdgpu_ras_block_list *node, *tmp;
 993	struct amdgpu_ras_block_object *obj;
 994
 995	if (block >= AMDGPU_RAS_BLOCK__LAST)
 996		return NULL;
 997
 998	list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
 999		if (!node->ras_obj) {
1000			dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
1001			continue;
1002		}
1003
1004		obj = node->ras_obj;
1005		if (obj->ras_block_match) {
1006			if (obj->ras_block_match(obj, block, sub_block_index) == 0)
1007				return obj;
1008		} else {
1009			if (amdgpu_ras_block_match_default(obj, block) == 0)
1010				return obj;
1011		}
1012	}
1013
1014	return NULL;
1015}
1016
1017static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_data *err_data)
1018{
1019	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
1020	int ret = 0;
1021
1022	/*
1023	 * choosing right query method according to
1024	 * whether smu support query error information
1025	 */
1026	ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(ras->umc_ecc));
1027	if (ret == -EOPNOTSUPP) {
1028		if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
1029			adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
1030			adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data);
1031
1032		/* umc query_ras_error_address is also responsible for clearing
1033		 * error status
1034		 */
1035		if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
1036		    adev->umc.ras->ras_block.hw_ops->query_ras_error_address)
1037			adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, err_data);
1038	} else if (!ret) {
1039		if (adev->umc.ras &&
1040			adev->umc.ras->ecc_info_query_ras_error_count)
1041			adev->umc.ras->ecc_info_query_ras_error_count(adev, err_data);
1042
1043		if (adev->umc.ras &&
1044			adev->umc.ras->ecc_info_query_ras_error_address)
1045			adev->umc.ras->ecc_info_query_ras_error_address(adev, err_data);
1046	}
1047}
1048
1049static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev,
1050					      struct ras_manager *ras_mgr,
1051					      struct ras_err_data *err_data,
1052					      struct ras_query_context *qctx,
1053					      const char *blk_name,
1054					      bool is_ue,
1055					      bool is_de)
1056{
1057	struct amdgpu_smuio_mcm_config_info *mcm_info;
1058	struct ras_err_node *err_node;
1059	struct ras_err_info *err_info;
1060	u64 event_id = qctx->evid.event_id;
1061
1062	if (is_ue) {
1063		for_each_ras_error(err_node, err_data) {
1064			err_info = &err_node->err_info;
1065			mcm_info = &err_info->mcm_info;
1066			if (err_info->ue_count) {
1067				RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1068					      "%lld new uncorrectable hardware errors detected in %s block\n",
1069					      mcm_info->socket_id,
1070					      mcm_info->die_id,
1071					      err_info->ue_count,
1072					      blk_name);
1073			}
1074		}
1075
1076		for_each_ras_error(err_node, &ras_mgr->err_data) {
1077			err_info = &err_node->err_info;
1078			mcm_info = &err_info->mcm_info;
1079			RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1080				      "%lld uncorrectable hardware errors detected in total in %s block\n",
1081				      mcm_info->socket_id, mcm_info->die_id, err_info->ue_count, blk_name);
1082		}
1083
1084	} else {
1085		if (is_de) {
1086			for_each_ras_error(err_node, err_data) {
1087				err_info = &err_node->err_info;
1088				mcm_info = &err_info->mcm_info;
1089				if (err_info->de_count) {
1090					RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1091						      "%lld new deferred hardware errors detected in %s block\n",
1092						      mcm_info->socket_id,
1093						      mcm_info->die_id,
1094						      err_info->de_count,
1095						      blk_name);
1096				}
1097			}
1098
1099			for_each_ras_error(err_node, &ras_mgr->err_data) {
1100				err_info = &err_node->err_info;
1101				mcm_info = &err_info->mcm_info;
1102				RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1103					      "%lld deferred hardware errors detected in total in %s block\n",
1104					      mcm_info->socket_id, mcm_info->die_id,
1105					      err_info->de_count, blk_name);
1106			}
1107		} else {
1108			for_each_ras_error(err_node, err_data) {
1109				err_info = &err_node->err_info;
1110				mcm_info = &err_info->mcm_info;
1111				if (err_info->ce_count) {
1112					RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1113						      "%lld new correctable hardware errors detected in %s block\n",
1114						      mcm_info->socket_id,
1115						      mcm_info->die_id,
1116						      err_info->ce_count,
1117						      blk_name);
1118				}
1119			}
1120
1121			for_each_ras_error(err_node, &ras_mgr->err_data) {
1122				err_info = &err_node->err_info;
1123				mcm_info = &err_info->mcm_info;
1124				RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1125					      "%lld correctable hardware errors detected in total in %s block\n",
1126					      mcm_info->socket_id, mcm_info->die_id,
1127					      err_info->ce_count, blk_name);
1128			}
1129		}
1130	}
1131}
1132
1133static inline bool err_data_has_source_info(struct ras_err_data *data)
1134{
1135	return !list_empty(&data->err_node_list);
1136}
1137
1138static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev,
1139					     struct ras_query_if *query_if,
1140					     struct ras_err_data *err_data,
1141					     struct ras_query_context *qctx)
1142{
1143	struct ras_manager *ras_mgr = amdgpu_ras_find_obj(adev, &query_if->head);
1144	const char *blk_name = get_ras_block_str(&query_if->head);
1145	u64 event_id = qctx->evid.event_id;
1146
1147	if (err_data->ce_count) {
1148		if (err_data_has_source_info(err_data)) {
1149			amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx,
1150							  blk_name, false, false);
1151		} else if (!adev->aid_mask &&
1152			   adev->smuio.funcs &&
1153			   adev->smuio.funcs->get_socket_id &&
1154			   adev->smuio.funcs->get_die_id) {
1155			RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d "
1156				      "%ld correctable hardware errors "
1157				      "detected in %s block\n",
1158				      adev->smuio.funcs->get_socket_id(adev),
1159				      adev->smuio.funcs->get_die_id(adev),
1160				      ras_mgr->err_data.ce_count,
1161				      blk_name);
1162		} else {
1163			RAS_EVENT_LOG(adev, event_id, "%ld correctable hardware errors "
1164				      "detected in %s block\n",
1165				      ras_mgr->err_data.ce_count,
1166				      blk_name);
1167		}
1168	}
1169
1170	if (err_data->ue_count) {
1171		if (err_data_has_source_info(err_data)) {
1172			amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx,
1173							  blk_name, true, false);
1174		} else if (!adev->aid_mask &&
1175			   adev->smuio.funcs &&
1176			   adev->smuio.funcs->get_socket_id &&
1177			   adev->smuio.funcs->get_die_id) {
1178			RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d "
1179				      "%ld uncorrectable hardware errors "
1180				      "detected in %s block\n",
1181				      adev->smuio.funcs->get_socket_id(adev),
1182				      adev->smuio.funcs->get_die_id(adev),
1183				      ras_mgr->err_data.ue_count,
1184				      blk_name);
1185		} else {
1186			RAS_EVENT_LOG(adev, event_id, "%ld uncorrectable hardware errors "
1187				      "detected in %s block\n",
1188				      ras_mgr->err_data.ue_count,
1189				      blk_name);
1190		}
1191	}
1192
1193	if (err_data->de_count) {
1194		if (err_data_has_source_info(err_data)) {
1195			amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx,
1196							  blk_name, false, true);
1197		} else if (!adev->aid_mask &&
1198			   adev->smuio.funcs &&
1199			   adev->smuio.funcs->get_socket_id &&
1200			   adev->smuio.funcs->get_die_id) {
1201			RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d "
1202				      "%ld deferred hardware errors "
1203				      "detected in %s block\n",
1204				      adev->smuio.funcs->get_socket_id(adev),
1205				      adev->smuio.funcs->get_die_id(adev),
1206				      ras_mgr->err_data.de_count,
1207				      blk_name);
1208		} else {
1209			RAS_EVENT_LOG(adev, event_id, "%ld deferred hardware errors "
1210				      "detected in %s block\n",
1211				      ras_mgr->err_data.de_count,
1212				      blk_name);
1213		}
1214	}
1215}
1216
1217static void amdgpu_ras_virt_error_generate_report(struct amdgpu_device *adev,
1218						  struct ras_query_if *query_if,
1219						  struct ras_err_data *err_data,
1220						  struct ras_query_context *qctx)
1221{
1222	unsigned long new_ue, new_ce, new_de;
1223	struct ras_manager *obj = amdgpu_ras_find_obj(adev, &query_if->head);
1224	const char *blk_name = get_ras_block_str(&query_if->head);
1225	u64 event_id = qctx->evid.event_id;
1226
1227	new_ce = err_data->ce_count - obj->err_data.ce_count;
1228	new_ue = err_data->ue_count - obj->err_data.ue_count;
1229	new_de = err_data->de_count - obj->err_data.de_count;
1230
1231	if (new_ce) {
1232		RAS_EVENT_LOG(adev, event_id, "%lu correctable hardware errors "
1233			      "detected in %s block\n",
1234			      new_ce,
1235			      blk_name);
1236	}
1237
1238	if (new_ue) {
1239		RAS_EVENT_LOG(adev, event_id, "%lu uncorrectable hardware errors "
1240			      "detected in %s block\n",
1241			      new_ue,
1242			      blk_name);
1243	}
1244
1245	if (new_de) {
1246		RAS_EVENT_LOG(adev, event_id, "%lu deferred hardware errors "
1247			      "detected in %s block\n",
1248			      new_de,
1249			      blk_name);
1250	}
1251}
1252
1253static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, struct ras_err_data *err_data)
1254{
1255	struct ras_err_node *err_node;
1256	struct ras_err_info *err_info;
1257
1258	if (err_data_has_source_info(err_data)) {
1259		for_each_ras_error(err_node, err_data) {
1260			err_info = &err_node->err_info;
1261			amdgpu_ras_error_statistic_de_count(&obj->err_data,
1262					&err_info->mcm_info, err_info->de_count);
1263			amdgpu_ras_error_statistic_ce_count(&obj->err_data,
1264					&err_info->mcm_info, err_info->ce_count);
1265			amdgpu_ras_error_statistic_ue_count(&obj->err_data,
1266					&err_info->mcm_info, err_info->ue_count);
1267		}
1268	} else {
1269		/* for legacy asic path which doesn't has error source info */
1270		obj->err_data.ue_count += err_data->ue_count;
1271		obj->err_data.ce_count += err_data->ce_count;
1272		obj->err_data.de_count += err_data->de_count;
1273	}
1274}
1275
1276static void amdgpu_ras_mgr_virt_error_data_statistics_update(struct ras_manager *obj,
1277							     struct ras_err_data *err_data)
1278{
1279	/* Host reports absolute counts */
1280	obj->err_data.ue_count = err_data->ue_count;
1281	obj->err_data.ce_count = err_data->ce_count;
1282	obj->err_data.de_count = err_data->de_count;
1283}
1284
1285static struct ras_manager *get_ras_manager(struct amdgpu_device *adev, enum amdgpu_ras_block blk)
1286{
1287	struct ras_common_if head;
1288
1289	memset(&head, 0, sizeof(head));
1290	head.block = blk;
1291
1292	return amdgpu_ras_find_obj(adev, &head);
1293}
1294
1295int amdgpu_ras_bind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
1296			const struct aca_info *aca_info, void *data)
1297{
1298	struct ras_manager *obj;
1299
1300	/* in resume phase, no need to create aca fs node */
1301	if (adev->in_suspend || amdgpu_reset_in_recovery(adev))
1302		return 0;
1303
1304	obj = get_ras_manager(adev, blk);
1305	if (!obj)
1306		return -EINVAL;
1307
1308	return amdgpu_aca_add_handle(adev, &obj->aca_handle, ras_block_str(blk), aca_info, data);
1309}
1310
1311int amdgpu_ras_unbind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk)
1312{
1313	struct ras_manager *obj;
1314
1315	obj = get_ras_manager(adev, blk);
1316	if (!obj)
1317		return -EINVAL;
1318
1319	amdgpu_aca_remove_handle(&obj->aca_handle);
1320
1321	return 0;
1322}
1323
1324static int amdgpu_aca_log_ras_error_data(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
1325					 enum aca_error_type type, struct ras_err_data *err_data,
1326					 struct ras_query_context *qctx)
1327{
1328	struct ras_manager *obj;
1329
1330	obj = get_ras_manager(adev, blk);
1331	if (!obj)
1332		return -EINVAL;
1333
1334	return amdgpu_aca_get_error_data(adev, &obj->aca_handle, type, err_data, qctx);
1335}
1336
1337ssize_t amdgpu_ras_aca_sysfs_read(struct device *dev, struct device_attribute *attr,
1338				  struct aca_handle *handle, char *buf, void *data)
1339{
1340	struct ras_manager *obj = container_of(handle, struct ras_manager, aca_handle);
1341	struct ras_query_if info = {
1342		.head = obj->head,
1343	};
1344
1345	if (!amdgpu_ras_get_error_query_ready(obj->adev))
1346		return sysfs_emit(buf, "Query currently inaccessible\n");
1347
1348	if (amdgpu_ras_query_error_status(obj->adev, &info))
1349		return -EINVAL;
1350
1351	return sysfs_emit(buf, "%s: %lu\n%s: %lu\n%s: %lu\n", "ue", info.ue_count,
1352			  "ce", info.ce_count, "de", info.de_count);
1353}
1354
1355static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev,
1356						struct ras_query_if *info,
1357						struct ras_err_data *err_data,
1358						struct ras_query_context *qctx,
1359						unsigned int error_query_mode)
1360{
1361	enum amdgpu_ras_block blk = info ? info->head.block : AMDGPU_RAS_BLOCK_COUNT;
1362	struct amdgpu_ras_block_object *block_obj = NULL;
1363	int ret;
1364
1365	if (blk == AMDGPU_RAS_BLOCK_COUNT)
1366		return -EINVAL;
1367
1368	if (error_query_mode == AMDGPU_RAS_INVALID_ERROR_QUERY)
1369		return -EINVAL;
1370
1371	if (error_query_mode == AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY) {
1372		return amdgpu_virt_req_ras_err_count(adev, blk, err_data);
1373	} else if (error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) {
1374		if (info->head.block == AMDGPU_RAS_BLOCK__UMC) {
1375			amdgpu_ras_get_ecc_info(adev, err_data);
1376		} else {
1377			block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, 0);
1378			if (!block_obj || !block_obj->hw_ops) {
1379				dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1380					     get_ras_block_str(&info->head));
1381				return -EINVAL;
1382			}
1383
1384			if (block_obj->hw_ops->query_ras_error_count)
1385				block_obj->hw_ops->query_ras_error_count(adev, err_data);
1386
1387			if ((info->head.block == AMDGPU_RAS_BLOCK__SDMA) ||
1388			    (info->head.block == AMDGPU_RAS_BLOCK__GFX) ||
1389			    (info->head.block == AMDGPU_RAS_BLOCK__MMHUB)) {
1390				if (block_obj->hw_ops->query_ras_error_status)
1391					block_obj->hw_ops->query_ras_error_status(adev);
1392			}
1393		}
1394	} else {
1395		if (amdgpu_aca_is_enabled(adev)) {
1396			ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_UE, err_data, qctx);
1397			if (ret)
1398				return ret;
1399
1400			ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_CE, err_data, qctx);
1401			if (ret)
1402				return ret;
1403
1404			ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_DEFERRED, err_data, qctx);
1405			if (ret)
1406				return ret;
1407		} else {
1408			/* FIXME: add code to check return value later */
1409			amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_UE, err_data, qctx);
1410			amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_CE, err_data, qctx);
1411		}
1412	}
1413
1414	return 0;
1415}
1416
1417/* query/inject/cure begin */
1418static int amdgpu_ras_query_error_status_with_event(struct amdgpu_device *adev,
1419						    struct ras_query_if *info,
1420						    enum ras_event_type type)
1421{
1422	struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1423	struct ras_err_data err_data;
1424	struct ras_query_context qctx;
1425	unsigned int error_query_mode;
1426	int ret;
1427
1428	if (!obj)
1429		return -EINVAL;
1430
1431	ret = amdgpu_ras_error_data_init(&err_data);
1432	if (ret)
1433		return ret;
1434
1435	if (!amdgpu_ras_get_error_query_mode(adev, &error_query_mode))
1436		return -EINVAL;
1437
1438	memset(&qctx, 0, sizeof(qctx));
1439	qctx.evid.type = type;
1440	qctx.evid.event_id = amdgpu_ras_acquire_event_id(adev, type);
1441
1442	if (!down_read_trylock(&adev->reset_domain->sem)) {
1443		ret = -EIO;
1444		goto out_fini_err_data;
1445	}
1446
1447	ret = amdgpu_ras_query_error_status_helper(adev, info,
1448						   &err_data,
1449						   &qctx,
1450						   error_query_mode);
1451	up_read(&adev->reset_domain->sem);
1452	if (ret)
1453		goto out_fini_err_data;
1454
1455	if (error_query_mode != AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY) {
1456		amdgpu_rasmgr_error_data_statistic_update(obj, &err_data);
1457		amdgpu_ras_error_generate_report(adev, info, &err_data, &qctx);
1458	} else {
1459		/* Host provides absolute error counts. First generate the report
1460		 * using the previous VF internal count against new host count.
1461		 * Then Update VF internal count.
1462		 */
1463		amdgpu_ras_virt_error_generate_report(adev, info, &err_data, &qctx);
1464		amdgpu_ras_mgr_virt_error_data_statistics_update(obj, &err_data);
1465	}
1466
1467	info->ue_count = obj->err_data.ue_count;
1468	info->ce_count = obj->err_data.ce_count;
1469	info->de_count = obj->err_data.de_count;
1470
 
 
1471out_fini_err_data:
1472	amdgpu_ras_error_data_fini(&err_data);
1473
1474	return ret;
1475}
1476
1477int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_if *info)
1478{
1479	return amdgpu_ras_query_error_status_with_event(adev, info, RAS_EVENT_TYPE_INVALID);
1480}
1481
1482int amdgpu_ras_reset_error_count(struct amdgpu_device *adev,
1483		enum amdgpu_ras_block block)
1484{
1485	struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
 
1486	const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
1487	const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
 
 
1488
1489	if (!block_obj || !block_obj->hw_ops) {
1490		dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1491				ras_block_str(block));
1492		return -EOPNOTSUPP;
1493	}
1494
1495	if (!amdgpu_ras_is_supported(adev, block) ||
1496	    !amdgpu_ras_get_aca_debug_mode(adev))
1497		return -EOPNOTSUPP;
1498
 
 
 
 
 
 
1499	/* skip ras error reset in gpu reset */
1500	if ((amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) &&
 
1501	    ((smu_funcs && smu_funcs->set_debug_mode) ||
1502	     (mca_funcs && mca_funcs->mca_set_debug_mode)))
1503		return -EOPNOTSUPP;
1504
1505	if (block_obj->hw_ops->reset_ras_error_count)
1506		block_obj->hw_ops->reset_ras_error_count(adev);
1507
1508	return 0;
1509}
1510
1511int amdgpu_ras_reset_error_status(struct amdgpu_device *adev,
1512		enum amdgpu_ras_block block)
1513{
1514	struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
1515
1516	if (amdgpu_ras_reset_error_count(adev, block) == -EOPNOTSUPP)
1517		return 0;
1518
1519	if ((block == AMDGPU_RAS_BLOCK__GFX) ||
1520	    (block == AMDGPU_RAS_BLOCK__MMHUB)) {
1521		if (block_obj->hw_ops->reset_ras_error_status)
1522			block_obj->hw_ops->reset_ras_error_status(adev);
1523	}
1524
1525	return 0;
1526}
1527
1528/* wrapper of psp_ras_trigger_error */
1529int amdgpu_ras_error_inject(struct amdgpu_device *adev,
1530		struct ras_inject_if *info)
1531{
1532	struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1533	struct ta_ras_trigger_error_input block_info = {
1534		.block_id =  amdgpu_ras_block_to_ta(info->head.block),
1535		.inject_error_type = amdgpu_ras_error_to_ta(info->head.type),
1536		.sub_block_index = info->head.sub_block_index,
1537		.address = info->address,
1538		.value = info->value,
1539	};
1540	int ret = -EINVAL;
1541	struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev,
1542							info->head.block,
1543							info->head.sub_block_index);
1544
1545	/* inject on guest isn't allowed, return success directly */
1546	if (amdgpu_sriov_vf(adev))
1547		return 0;
1548
1549	if (!obj)
1550		return -EINVAL;
1551
1552	if (!block_obj || !block_obj->hw_ops)	{
1553		dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1554			     get_ras_block_str(&info->head));
1555		return -EINVAL;
1556	}
1557
1558	/* Calculate XGMI relative offset */
1559	if (adev->gmc.xgmi.num_physical_nodes > 1 &&
1560	    info->head.block != AMDGPU_RAS_BLOCK__GFX) {
1561		block_info.address =
1562			amdgpu_xgmi_get_relative_phy_addr(adev,
1563							  block_info.address);
1564	}
1565
1566	if (block_obj->hw_ops->ras_error_inject) {
1567		if (info->head.block == AMDGPU_RAS_BLOCK__GFX)
1568			ret = block_obj->hw_ops->ras_error_inject(adev, info, info->instance_mask);
1569		else /* Special ras_error_inject is defined (e.g: xgmi) */
1570			ret = block_obj->hw_ops->ras_error_inject(adev, &block_info,
1571						info->instance_mask);
1572	} else {
1573		/* default path */
1574		ret = psp_ras_trigger_error(&adev->psp, &block_info, info->instance_mask);
1575	}
1576
1577	if (ret)
1578		dev_err(adev->dev, "ras inject %s failed %d\n",
1579			get_ras_block_str(&info->head), ret);
1580
1581	return ret;
1582}
1583
1584/**
1585 * amdgpu_ras_query_error_count_helper -- Get error counter for specific IP
1586 * @adev: pointer to AMD GPU device
1587 * @ce_count: pointer to an integer to be set to the count of correctible errors.
1588 * @ue_count: pointer to an integer to be set to the count of uncorrectible errors.
1589 * @query_info: pointer to ras_query_if
1590 *
1591 * Return 0 for query success or do nothing, otherwise return an error
1592 * on failures
1593 */
1594static int amdgpu_ras_query_error_count_helper(struct amdgpu_device *adev,
1595					       unsigned long *ce_count,
1596					       unsigned long *ue_count,
1597					       struct ras_query_if *query_info)
1598{
1599	int ret;
1600
1601	if (!query_info)
1602		/* do nothing if query_info is not specified */
1603		return 0;
1604
1605	ret = amdgpu_ras_query_error_status(adev, query_info);
1606	if (ret)
1607		return ret;
1608
1609	*ce_count += query_info->ce_count;
1610	*ue_count += query_info->ue_count;
1611
1612	/* some hardware/IP supports read to clear
1613	 * no need to explictly reset the err status after the query call */
1614	if (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
1615	    amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
1616		if (amdgpu_ras_reset_error_status(adev, query_info->head.block))
1617			dev_warn(adev->dev,
1618				 "Failed to reset error counter and error status\n");
1619	}
1620
1621	return 0;
1622}
1623
1624/**
1625 * amdgpu_ras_query_error_count -- Get error counts of all IPs or specific IP
1626 * @adev: pointer to AMD GPU device
1627 * @ce_count: pointer to an integer to be set to the count of correctible errors.
1628 * @ue_count: pointer to an integer to be set to the count of uncorrectible
1629 * errors.
1630 * @query_info: pointer to ras_query_if if the query request is only for
1631 * specific ip block; if info is NULL, then the qurey request is for
1632 * all the ip blocks that support query ras error counters/status
1633 *
1634 * If set, @ce_count or @ue_count, count and return the corresponding
1635 * error counts in those integer pointers. Return 0 if the device
1636 * supports RAS. Return -EOPNOTSUPP if the device doesn't support RAS.
1637 */
1638int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
1639				 unsigned long *ce_count,
1640				 unsigned long *ue_count,
1641				 struct ras_query_if *query_info)
1642{
1643	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1644	struct ras_manager *obj;
1645	unsigned long ce, ue;
1646	int ret;
1647
1648	if (!adev->ras_enabled || !con)
1649		return -EOPNOTSUPP;
1650
1651	/* Don't count since no reporting.
1652	 */
1653	if (!ce_count && !ue_count)
1654		return 0;
1655
1656	ce = 0;
1657	ue = 0;
1658	if (!query_info) {
1659		/* query all the ip blocks that support ras query interface */
1660		list_for_each_entry(obj, &con->head, node) {
1661			struct ras_query_if info = {
1662				.head = obj->head,
1663			};
1664
1665			ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, &info);
1666		}
1667	} else {
1668		/* query specific ip block */
1669		ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, query_info);
1670	}
1671
1672	if (ret)
1673		return ret;
1674
1675	if (ce_count)
1676		*ce_count = ce;
1677
1678	if (ue_count)
1679		*ue_count = ue;
1680
1681	return 0;
1682}
1683/* query/inject/cure end */
1684
1685
1686/* sysfs begin */
1687
1688static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
1689		struct ras_badpage **bps, unsigned int *count);
1690
1691static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
1692{
1693	switch (flags) {
1694	case AMDGPU_RAS_RETIRE_PAGE_RESERVED:
1695		return "R";
1696	case AMDGPU_RAS_RETIRE_PAGE_PENDING:
1697		return "P";
1698	case AMDGPU_RAS_RETIRE_PAGE_FAULT:
1699	default:
1700		return "F";
1701	}
1702}
1703
1704/**
1705 * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface
1706 *
1707 * It allows user to read the bad pages of vram on the gpu through
1708 * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages
1709 *
1710 * It outputs multiple lines, and each line stands for one gpu page.
1711 *
1712 * The format of one line is below,
1713 * gpu pfn : gpu page size : flags
1714 *
1715 * gpu pfn and gpu page size are printed in hex format.
1716 * flags can be one of below character,
1717 *
1718 * R: reserved, this gpu page is reserved and not able to use.
1719 *
1720 * P: pending for reserve, this gpu page is marked as bad, will be reserved
1721 * in next window of page_reserve.
1722 *
1723 * F: unable to reserve. this gpu page can't be reserved due to some reasons.
1724 *
1725 * Examples:
1726 *
1727 * .. code-block:: bash
1728 *
1729 *	0x00000001 : 0x00001000 : R
1730 *	0x00000002 : 0x00001000 : P
1731 *
1732 */
1733
1734static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
1735		struct kobject *kobj, struct bin_attribute *attr,
1736		char *buf, loff_t ppos, size_t count)
1737{
1738	struct amdgpu_ras *con =
1739		container_of(attr, struct amdgpu_ras, badpages_attr);
1740	struct amdgpu_device *adev = con->adev;
1741	const unsigned int element_size =
1742		sizeof("0xabcdabcd : 0x12345678 : R\n") - 1;
1743	unsigned int start = div64_ul(ppos + element_size - 1, element_size);
1744	unsigned int end = div64_ul(ppos + count - 1, element_size);
1745	ssize_t s = 0;
1746	struct ras_badpage *bps = NULL;
1747	unsigned int bps_count = 0;
1748
1749	memset(buf, 0, count);
1750
1751	if (amdgpu_ras_badpages_read(adev, &bps, &bps_count))
1752		return 0;
1753
1754	for (; start < end && start < bps_count; start++)
1755		s += scnprintf(&buf[s], element_size + 1,
1756				"0x%08x : 0x%08x : %1s\n",
1757				bps[start].bp,
1758				bps[start].size,
1759				amdgpu_ras_badpage_flags_str(bps[start].flags));
1760
1761	kfree(bps);
1762
1763	return s;
1764}
1765
1766static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
1767		struct device_attribute *attr, char *buf)
1768{
1769	struct amdgpu_ras *con =
1770		container_of(attr, struct amdgpu_ras, features_attr);
1771
1772	return sysfs_emit(buf, "feature mask: 0x%x\n", con->features);
1773}
1774
1775static ssize_t amdgpu_ras_sysfs_version_show(struct device *dev,
1776		struct device_attribute *attr, char *buf)
1777{
1778	struct amdgpu_ras *con =
1779		container_of(attr, struct amdgpu_ras, version_attr);
1780	return sysfs_emit(buf, "table version: 0x%x\n", con->eeprom_control.tbl_hdr.version);
1781}
1782
1783static ssize_t amdgpu_ras_sysfs_schema_show(struct device *dev,
1784		struct device_attribute *attr, char *buf)
1785{
1786	struct amdgpu_ras *con =
1787		container_of(attr, struct amdgpu_ras, schema_attr);
1788	return sysfs_emit(buf, "schema: 0x%x\n", con->schema);
1789}
1790
1791static struct {
1792	enum ras_event_type type;
1793	const char *name;
1794} dump_event[] = {
1795	{RAS_EVENT_TYPE_FATAL, "Fatal Error"},
1796	{RAS_EVENT_TYPE_POISON_CREATION, "Poison Creation"},
1797	{RAS_EVENT_TYPE_POISON_CONSUMPTION, "Poison Consumption"},
1798};
1799
1800static ssize_t amdgpu_ras_sysfs_event_state_show(struct device *dev,
1801						 struct device_attribute *attr, char *buf)
1802{
1803	struct amdgpu_ras *con =
1804		container_of(attr, struct amdgpu_ras, event_state_attr);
1805	struct ras_event_manager *event_mgr = con->event_mgr;
1806	struct ras_event_state *event_state;
1807	int i, size = 0;
1808
1809	if (!event_mgr)
1810		return -EINVAL;
1811
1812	size += sysfs_emit_at(buf, size, "current seqno: %llu\n", atomic64_read(&event_mgr->seqno));
1813	for (i = 0; i < ARRAY_SIZE(dump_event); i++) {
1814		event_state = &event_mgr->event_state[dump_event[i].type];
1815		size += sysfs_emit_at(buf, size, "%s: count:%llu, last_seqno:%llu\n",
1816				      dump_event[i].name,
1817				      atomic64_read(&event_state->count),
1818				      event_state->last_seqno);
1819	}
1820
1821	return (ssize_t)size;
1822}
1823
1824static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev)
1825{
1826	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1827
1828	if (adev->dev->kobj.sd)
1829		sysfs_remove_file_from_group(&adev->dev->kobj,
1830				&con->badpages_attr.attr,
1831				RAS_FS_NAME);
1832}
1833
1834static int amdgpu_ras_sysfs_remove_dev_attr_node(struct amdgpu_device *adev)
1835{
1836	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1837	struct attribute *attrs[] = {
1838		&con->features_attr.attr,
1839		&con->version_attr.attr,
1840		&con->schema_attr.attr,
1841		&con->event_state_attr.attr,
1842		NULL
1843	};
1844	struct attribute_group group = {
1845		.name = RAS_FS_NAME,
1846		.attrs = attrs,
1847	};
1848
1849	if (adev->dev->kobj.sd)
1850		sysfs_remove_group(&adev->dev->kobj, &group);
1851
1852	return 0;
1853}
1854
1855int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
1856		struct ras_common_if *head)
1857{
1858	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1859
1860	if (amdgpu_aca_is_enabled(adev))
1861		return 0;
1862
1863	if (!obj || obj->attr_inuse)
1864		return -EINVAL;
1865
1866	get_obj(obj);
1867
1868	snprintf(obj->fs_data.sysfs_name, sizeof(obj->fs_data.sysfs_name),
1869		"%s_err_count", head->name);
1870
1871	obj->sysfs_attr = (struct device_attribute){
1872		.attr = {
1873			.name = obj->fs_data.sysfs_name,
1874			.mode = S_IRUGO,
1875		},
1876			.show = amdgpu_ras_sysfs_read,
1877	};
1878	sysfs_attr_init(&obj->sysfs_attr.attr);
1879
1880	if (sysfs_add_file_to_group(&adev->dev->kobj,
1881				&obj->sysfs_attr.attr,
1882				RAS_FS_NAME)) {
1883		put_obj(obj);
1884		return -EINVAL;
1885	}
1886
1887	obj->attr_inuse = 1;
1888
1889	return 0;
1890}
1891
1892int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
1893		struct ras_common_if *head)
1894{
1895	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1896
1897	if (amdgpu_aca_is_enabled(adev))
1898		return 0;
1899
1900	if (!obj || !obj->attr_inuse)
1901		return -EINVAL;
1902
1903	if (adev->dev->kobj.sd)
1904		sysfs_remove_file_from_group(&adev->dev->kobj,
1905				&obj->sysfs_attr.attr,
1906				RAS_FS_NAME);
1907	obj->attr_inuse = 0;
1908	put_obj(obj);
1909
1910	return 0;
1911}
1912
1913static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
1914{
1915	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1916	struct ras_manager *obj, *tmp;
1917
1918	list_for_each_entry_safe(obj, tmp, &con->head, node) {
1919		amdgpu_ras_sysfs_remove(adev, &obj->head);
1920	}
1921
1922	if (amdgpu_bad_page_threshold != 0)
1923		amdgpu_ras_sysfs_remove_bad_page_node(adev);
1924
1925	amdgpu_ras_sysfs_remove_dev_attr_node(adev);
1926
1927	return 0;
1928}
1929/* sysfs end */
1930
1931/**
1932 * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors
1933 *
1934 * Normally when there is an uncorrectable error, the driver will reset
1935 * the GPU to recover.  However, in the event of an unrecoverable error,
1936 * the driver provides an interface to reboot the system automatically
1937 * in that event.
1938 *
1939 * The following file in debugfs provides that interface:
1940 * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot
1941 *
1942 * Usage:
1943 *
1944 * .. code-block:: bash
1945 *
1946 *	echo true > .../ras/auto_reboot
1947 *
1948 */
1949/* debugfs begin */
1950static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
1951{
1952	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1953	struct amdgpu_ras_eeprom_control *eeprom = &con->eeprom_control;
1954	struct drm_minor  *minor = adev_to_drm(adev)->primary;
1955	struct dentry     *dir;
1956
1957	dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root);
1958	debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, dir, adev,
1959			    &amdgpu_ras_debugfs_ctrl_ops);
1960	debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, dir, adev,
1961			    &amdgpu_ras_debugfs_eeprom_ops);
1962	debugfs_create_u32("bad_page_cnt_threshold", 0444, dir,
1963			   &con->bad_page_cnt_threshold);
1964	debugfs_create_u32("ras_num_recs", 0444, dir, &eeprom->ras_num_recs);
1965	debugfs_create_x32("ras_hw_enabled", 0444, dir, &adev->ras_hw_enabled);
1966	debugfs_create_x32("ras_enabled", 0444, dir, &adev->ras_enabled);
1967	debugfs_create_file("ras_eeprom_size", S_IRUGO, dir, adev,
1968			    &amdgpu_ras_debugfs_eeprom_size_ops);
1969	con->de_ras_eeprom_table = debugfs_create_file("ras_eeprom_table",
1970						       S_IRUGO, dir, adev,
1971						       &amdgpu_ras_debugfs_eeprom_table_ops);
1972	amdgpu_ras_debugfs_set_ret_size(&con->eeprom_control);
1973
1974	/*
1975	 * After one uncorrectable error happens, usually GPU recovery will
1976	 * be scheduled. But due to the known problem in GPU recovery failing
1977	 * to bring GPU back, below interface provides one direct way to
1978	 * user to reboot system automatically in such case within
1979	 * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine
1980	 * will never be called.
1981	 */
1982	debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot);
1983
1984	/*
1985	 * User could set this not to clean up hardware's error count register
1986	 * of RAS IPs during ras recovery.
1987	 */
1988	debugfs_create_bool("disable_ras_err_cnt_harvest", 0644, dir,
1989			    &con->disable_ras_err_cnt_harvest);
1990	return dir;
1991}
1992
1993static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
1994				      struct ras_fs_if *head,
1995				      struct dentry *dir)
1996{
1997	struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
1998
1999	if (!obj || !dir)
2000		return;
2001
2002	get_obj(obj);
2003
2004	memcpy(obj->fs_data.debugfs_name,
2005			head->debugfs_name,
2006			sizeof(obj->fs_data.debugfs_name));
2007
2008	debugfs_create_file(obj->fs_data.debugfs_name, S_IWUGO | S_IRUGO, dir,
2009			    obj, &amdgpu_ras_debugfs_ops);
2010}
2011
2012static bool amdgpu_ras_aca_is_supported(struct amdgpu_device *adev)
2013{
2014	bool ret;
2015
2016	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
2017	case IP_VERSION(13, 0, 6):
2018	case IP_VERSION(13, 0, 14):
2019		ret = true;
2020		break;
2021	default:
2022		ret = false;
2023		break;
2024	}
2025
2026	return ret;
2027}
2028
2029void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
2030{
2031	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2032	struct dentry *dir;
2033	struct ras_manager *obj;
2034	struct ras_fs_if fs_info;
2035
2036	/*
2037	 * it won't be called in resume path, no need to check
2038	 * suspend and gpu reset status
2039	 */
2040	if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con)
2041		return;
2042
2043	dir = amdgpu_ras_debugfs_create_ctrl_node(adev);
2044
2045	list_for_each_entry(obj, &con->head, node) {
2046		if (amdgpu_ras_is_supported(adev, obj->head.block) &&
2047			(obj->attr_inuse == 1)) {
2048			sprintf(fs_info.debugfs_name, "%s_err_inject",
2049					get_ras_block_str(&obj->head));
2050			fs_info.head = obj->head;
2051			amdgpu_ras_debugfs_create(adev, &fs_info, dir);
2052		}
2053	}
2054
2055	if (amdgpu_ras_aca_is_supported(adev)) {
2056		if (amdgpu_aca_is_enabled(adev))
2057			amdgpu_aca_smu_debugfs_init(adev, dir);
2058		else
2059			amdgpu_mca_smu_debugfs_init(adev, dir);
2060	}
2061}
2062
2063/* debugfs end */
2064
2065/* ras fs */
2066static BIN_ATTR(gpu_vram_bad_pages, S_IRUGO,
2067		amdgpu_ras_sysfs_badpages_read, NULL, 0);
2068static DEVICE_ATTR(features, S_IRUGO,
2069		amdgpu_ras_sysfs_features_read, NULL);
2070static DEVICE_ATTR(version, 0444,
2071		amdgpu_ras_sysfs_version_show, NULL);
2072static DEVICE_ATTR(schema, 0444,
2073		amdgpu_ras_sysfs_schema_show, NULL);
2074static DEVICE_ATTR(event_state, 0444,
2075		   amdgpu_ras_sysfs_event_state_show, NULL);
2076static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
2077{
2078	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2079	struct attribute_group group = {
2080		.name = RAS_FS_NAME,
2081	};
2082	struct attribute *attrs[] = {
2083		&con->features_attr.attr,
2084		&con->version_attr.attr,
2085		&con->schema_attr.attr,
2086		&con->event_state_attr.attr,
2087		NULL
2088	};
2089	struct bin_attribute *bin_attrs[] = {
2090		NULL,
2091		NULL,
2092	};
2093	int r;
2094
2095	group.attrs = attrs;
2096
2097	/* add features entry */
2098	con->features_attr = dev_attr_features;
2099	sysfs_attr_init(attrs[0]);
2100
2101	/* add version entry */
2102	con->version_attr = dev_attr_version;
2103	sysfs_attr_init(attrs[1]);
2104
2105	/* add schema entry */
2106	con->schema_attr = dev_attr_schema;
2107	sysfs_attr_init(attrs[2]);
2108
2109	/* add event_state entry */
2110	con->event_state_attr = dev_attr_event_state;
2111	sysfs_attr_init(attrs[3]);
2112
2113	if (amdgpu_bad_page_threshold != 0) {
2114		/* add bad_page_features entry */
2115		bin_attr_gpu_vram_bad_pages.private = NULL;
2116		con->badpages_attr = bin_attr_gpu_vram_bad_pages;
2117		bin_attrs[0] = &con->badpages_attr;
2118		group.bin_attrs = bin_attrs;
2119		sysfs_bin_attr_init(bin_attrs[0]);
2120	}
2121
2122	r = sysfs_create_group(&adev->dev->kobj, &group);
2123	if (r)
2124		dev_err(adev->dev, "Failed to create RAS sysfs group!");
2125
2126	return 0;
2127}
2128
2129static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
2130{
2131	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2132	struct ras_manager *con_obj, *ip_obj, *tmp;
2133
2134	if (IS_ENABLED(CONFIG_DEBUG_FS)) {
2135		list_for_each_entry_safe(con_obj, tmp, &con->head, node) {
2136			ip_obj = amdgpu_ras_find_obj(adev, &con_obj->head);
2137			if (ip_obj)
2138				put_obj(ip_obj);
2139		}
2140	}
2141
2142	amdgpu_ras_sysfs_remove_all(adev);
2143	return 0;
2144}
2145/* ras fs end */
2146
2147/* ih begin */
2148
2149/* For the hardware that cannot enable bif ring for both ras_controller_irq
2150 * and ras_err_evnet_athub_irq ih cookies, the driver has to poll status
2151 * register to check whether the interrupt is triggered or not, and properly
2152 * ack the interrupt if it is there
2153 */
2154void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev)
2155{
2156	/* Fatal error events are handled on host side */
2157	if (amdgpu_sriov_vf(adev))
2158		return;
2159
2160	if (adev->nbio.ras &&
2161	    adev->nbio.ras->handle_ras_controller_intr_no_bifring)
2162		adev->nbio.ras->handle_ras_controller_intr_no_bifring(adev);
2163
2164	if (adev->nbio.ras &&
2165	    adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring)
2166		adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring(adev);
2167}
2168
2169static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *obj,
2170				struct amdgpu_iv_entry *entry)
2171{
2172	bool poison_stat = false;
2173	struct amdgpu_device *adev = obj->adev;
2174	struct amdgpu_ras_block_object *block_obj =
2175		amdgpu_ras_get_ras_block(adev, obj->head.block, 0);
2176	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2177	enum ras_event_type type = RAS_EVENT_TYPE_POISON_CONSUMPTION;
2178	u64 event_id;
2179	int ret;
2180
2181	if (!block_obj || !con)
2182		return;
2183
2184	ret = amdgpu_ras_mark_ras_event(adev, type);
2185	if (ret)
2186		return;
2187
2188	/* both query_poison_status and handle_poison_consumption are optional,
2189	 * but at least one of them should be implemented if we need poison
2190	 * consumption handler
2191	 */
2192	if (block_obj->hw_ops && block_obj->hw_ops->query_poison_status) {
2193		poison_stat = block_obj->hw_ops->query_poison_status(adev);
2194		if (!poison_stat) {
2195			/* Not poison consumption interrupt, no need to handle it */
2196			dev_info(adev->dev, "No RAS poison status in %s poison IH.\n",
2197					block_obj->ras_comm.name);
2198
2199			return;
2200		}
2201	}
2202
2203	amdgpu_umc_poison_handler(adev, obj->head.block, 0);
2204
2205	if (block_obj->hw_ops && block_obj->hw_ops->handle_poison_consumption)
2206		poison_stat = block_obj->hw_ops->handle_poison_consumption(adev);
2207
2208	/* gpu reset is fallback for failed and default cases.
2209	 * For RMA case, amdgpu_umc_poison_handler will handle gpu reset.
2210	 */
2211	if (poison_stat && !amdgpu_ras_is_rma(adev)) {
2212		event_id = amdgpu_ras_acquire_event_id(adev, type);
2213		RAS_EVENT_LOG(adev, event_id,
2214			      "GPU reset for %s RAS poison consumption is issued!\n",
2215			      block_obj->ras_comm.name);
2216		amdgpu_ras_reset_gpu(adev);
 
 
2217	}
2218
2219	if (!poison_stat)
2220		amdgpu_gfx_poison_consumption_handler(adev, entry);
2221}
2222
2223static void amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager *obj,
2224				struct amdgpu_iv_entry *entry)
2225{
2226	struct amdgpu_device *adev = obj->adev;
2227	enum ras_event_type type = RAS_EVENT_TYPE_POISON_CREATION;
2228	u64 event_id;
2229	int ret;
2230
2231	ret = amdgpu_ras_mark_ras_event(adev, type);
2232	if (ret)
2233		return;
2234
2235	event_id = amdgpu_ras_acquire_event_id(adev, type);
2236	RAS_EVENT_LOG(adev, event_id, "Poison is created\n");
2237
2238	if (amdgpu_ip_version(obj->adev, UMC_HWIP, 0) >= IP_VERSION(12, 0, 0)) {
2239		struct amdgpu_ras *con = amdgpu_ras_get_context(obj->adev);
2240
2241		atomic_inc(&con->page_retirement_req_cnt);
2242		atomic_inc(&con->poison_creation_count);
2243
2244		wake_up(&con->page_retirement_wq);
2245	}
2246}
2247
2248static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj,
2249				struct amdgpu_iv_entry *entry)
2250{
2251	struct ras_ih_data *data = &obj->ih_data;
2252	struct ras_err_data err_data;
2253	int ret;
2254
2255	if (!data->cb)
2256		return;
2257
2258	ret = amdgpu_ras_error_data_init(&err_data);
2259	if (ret)
2260		return;
2261
2262	/* Let IP handle its data, maybe we need get the output
2263	 * from the callback to update the error type/count, etc
2264	 */
2265	amdgpu_ras_set_fed(obj->adev, true);
2266	ret = data->cb(obj->adev, &err_data, entry);
2267	/* ue will trigger an interrupt, and in that case
2268	 * we need do a reset to recovery the whole system.
2269	 * But leave IP do that recovery, here we just dispatch
2270	 * the error.
2271	 */
2272	if (ret == AMDGPU_RAS_SUCCESS) {
2273		/* these counts could be left as 0 if
2274		 * some blocks do not count error number
2275		 */
2276		obj->err_data.ue_count += err_data.ue_count;
2277		obj->err_data.ce_count += err_data.ce_count;
2278		obj->err_data.de_count += err_data.de_count;
2279	}
2280
2281	amdgpu_ras_error_data_fini(&err_data);
2282}
2283
2284static void amdgpu_ras_interrupt_handler(struct ras_manager *obj)
2285{
2286	struct ras_ih_data *data = &obj->ih_data;
2287	struct amdgpu_iv_entry entry;
2288
2289	while (data->rptr != data->wptr) {
2290		rmb();
2291		memcpy(&entry, &data->ring[data->rptr],
2292				data->element_size);
2293
2294		wmb();
2295		data->rptr = (data->aligned_element_size +
2296				data->rptr) % data->ring_size;
2297
2298		if (amdgpu_ras_is_poison_mode_supported(obj->adev)) {
2299			if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
2300				amdgpu_ras_interrupt_poison_creation_handler(obj, &entry);
2301			else
2302				amdgpu_ras_interrupt_poison_consumption_handler(obj, &entry);
2303		} else {
2304			if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
2305				amdgpu_ras_interrupt_umc_handler(obj, &entry);
2306			else
2307				dev_warn(obj->adev->dev,
2308					"No RAS interrupt handler for non-UMC block with poison disabled.\n");
2309		}
2310	}
2311}
2312
2313static void amdgpu_ras_interrupt_process_handler(struct work_struct *work)
2314{
2315	struct ras_ih_data *data =
2316		container_of(work, struct ras_ih_data, ih_work);
2317	struct ras_manager *obj =
2318		container_of(data, struct ras_manager, ih_data);
2319
2320	amdgpu_ras_interrupt_handler(obj);
2321}
2322
2323int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
2324		struct ras_dispatch_if *info)
2325{
2326	struct ras_manager *obj;
2327	struct ras_ih_data *data;
2328
2329	obj = amdgpu_ras_find_obj(adev, &info->head);
2330	if (!obj)
2331		return -EINVAL;
2332
2333	data = &obj->ih_data;
2334
2335	if (data->inuse == 0)
2336		return 0;
2337
2338	/* Might be overflow... */
2339	memcpy(&data->ring[data->wptr], info->entry,
2340			data->element_size);
2341
2342	wmb();
2343	data->wptr = (data->aligned_element_size +
2344			data->wptr) % data->ring_size;
2345
2346	schedule_work(&data->ih_work);
2347
2348	return 0;
2349}
2350
2351int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
2352		struct ras_common_if *head)
2353{
2354	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
2355	struct ras_ih_data *data;
2356
2357	if (!obj)
2358		return -EINVAL;
2359
2360	data = &obj->ih_data;
2361	if (data->inuse == 0)
2362		return 0;
2363
2364	cancel_work_sync(&data->ih_work);
2365
2366	kfree(data->ring);
2367	memset(data, 0, sizeof(*data));
2368	put_obj(obj);
2369
2370	return 0;
2371}
2372
2373int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev,
2374		struct ras_common_if *head)
2375{
2376	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
2377	struct ras_ih_data *data;
2378	struct amdgpu_ras_block_object *ras_obj;
2379
2380	if (!obj) {
2381		/* in case we registe the IH before enable ras feature */
2382		obj = amdgpu_ras_create_obj(adev, head);
2383		if (!obj)
2384			return -EINVAL;
2385	} else
2386		get_obj(obj);
2387
2388	ras_obj = container_of(head, struct amdgpu_ras_block_object, ras_comm);
2389
2390	data = &obj->ih_data;
2391	/* add the callback.etc */
2392	*data = (struct ras_ih_data) {
2393		.inuse = 0,
2394		.cb = ras_obj->ras_cb,
2395		.element_size = sizeof(struct amdgpu_iv_entry),
2396		.rptr = 0,
2397		.wptr = 0,
2398	};
2399
2400	INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler);
2401
2402	data->aligned_element_size = ALIGN(data->element_size, 8);
2403	/* the ring can store 64 iv entries. */
2404	data->ring_size = 64 * data->aligned_element_size;
2405	data->ring = kmalloc(data->ring_size, GFP_KERNEL);
2406	if (!data->ring) {
2407		put_obj(obj);
2408		return -ENOMEM;
2409	}
2410
2411	/* IH is ready */
2412	data->inuse = 1;
2413
2414	return 0;
2415}
2416
2417static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev)
2418{
2419	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2420	struct ras_manager *obj, *tmp;
2421
2422	list_for_each_entry_safe(obj, tmp, &con->head, node) {
2423		amdgpu_ras_interrupt_remove_handler(adev, &obj->head);
2424	}
2425
2426	return 0;
2427}
2428/* ih end */
2429
2430/* traversal all IPs except NBIO to query error counter */
2431static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev, enum ras_event_type type)
2432{
2433	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2434	struct ras_manager *obj;
2435
2436	if (!adev->ras_enabled || !con)
2437		return;
2438
2439	list_for_each_entry(obj, &con->head, node) {
2440		struct ras_query_if info = {
2441			.head = obj->head,
2442		};
2443
2444		/*
2445		 * PCIE_BIF IP has one different isr by ras controller
2446		 * interrupt, the specific ras counter query will be
2447		 * done in that isr. So skip such block from common
2448		 * sync flood interrupt isr calling.
2449		 */
2450		if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF)
2451			continue;
2452
2453		/*
2454		 * this is a workaround for aldebaran, skip send msg to
2455		 * smu to get ecc_info table due to smu handle get ecc
2456		 * info table failed temporarily.
2457		 * should be removed until smu fix handle ecc_info table.
2458		 */
2459		if ((info.head.block == AMDGPU_RAS_BLOCK__UMC) &&
2460		    (amdgpu_ip_version(adev, MP1_HWIP, 0) ==
2461		     IP_VERSION(13, 0, 2)))
2462			continue;
2463
2464		amdgpu_ras_query_error_status_with_event(adev, &info, type);
2465
2466		if (amdgpu_ip_version(adev, MP0_HWIP, 0) !=
2467			    IP_VERSION(11, 0, 2) &&
2468		    amdgpu_ip_version(adev, MP0_HWIP, 0) !=
2469			    IP_VERSION(11, 0, 4) &&
2470		    amdgpu_ip_version(adev, MP0_HWIP, 0) !=
2471			    IP_VERSION(13, 0, 0)) {
2472			if (amdgpu_ras_reset_error_status(adev, info.head.block))
2473				dev_warn(adev->dev, "Failed to reset error counter and error status");
2474		}
2475	}
2476}
2477
2478/* Parse RdRspStatus and WrRspStatus */
2479static void amdgpu_ras_error_status_query(struct amdgpu_device *adev,
2480					  struct ras_query_if *info)
2481{
2482	struct amdgpu_ras_block_object *block_obj;
2483	/*
2484	 * Only two block need to query read/write
2485	 * RspStatus at current state
2486	 */
2487	if ((info->head.block != AMDGPU_RAS_BLOCK__GFX) &&
2488		(info->head.block != AMDGPU_RAS_BLOCK__MMHUB))
2489		return;
2490
2491	block_obj = amdgpu_ras_get_ras_block(adev,
2492					info->head.block,
2493					info->head.sub_block_index);
2494
2495	if (!block_obj || !block_obj->hw_ops) {
2496		dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
2497			     get_ras_block_str(&info->head));
2498		return;
2499	}
2500
2501	if (block_obj->hw_ops->query_ras_error_status)
2502		block_obj->hw_ops->query_ras_error_status(adev);
2503
2504}
2505
2506static void amdgpu_ras_query_err_status(struct amdgpu_device *adev)
2507{
2508	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2509	struct ras_manager *obj;
2510
2511	if (!adev->ras_enabled || !con)
2512		return;
2513
2514	list_for_each_entry(obj, &con->head, node) {
2515		struct ras_query_if info = {
2516			.head = obj->head,
2517		};
2518
2519		amdgpu_ras_error_status_query(adev, &info);
2520	}
2521}
2522
2523/* recovery begin */
2524
2525/* return 0 on success.
2526 * caller need free bps.
2527 */
2528static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
2529		struct ras_badpage **bps, unsigned int *count)
2530{
2531	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2532	struct ras_err_handler_data *data;
2533	int i = 0;
2534	int ret = 0, status;
2535
2536	if (!con || !con->eh_data || !bps || !count)
2537		return -EINVAL;
2538
2539	mutex_lock(&con->recovery_lock);
2540	data = con->eh_data;
2541	if (!data || data->count == 0) {
2542		*bps = NULL;
2543		ret = -EINVAL;
2544		goto out;
2545	}
2546
2547	*bps = kmalloc(sizeof(struct ras_badpage) * data->count, GFP_KERNEL);
2548	if (!*bps) {
2549		ret = -ENOMEM;
2550		goto out;
2551	}
2552
2553	for (; i < data->count; i++) {
2554		(*bps)[i] = (struct ras_badpage){
2555			.bp = data->bps[i].retired_page,
2556			.size = AMDGPU_GPU_PAGE_SIZE,
2557			.flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
2558		};
2559		status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr,
2560				data->bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT);
2561		if (status == -EBUSY)
2562			(*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
2563		else if (status == -ENOENT)
2564			(*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
2565	}
2566
2567	*count = data->count;
2568out:
2569	mutex_unlock(&con->recovery_lock);
2570	return ret;
2571}
2572
2573static void amdgpu_ras_set_fed_all(struct amdgpu_device *adev,
2574				   struct amdgpu_hive_info *hive, bool status)
2575{
2576	struct amdgpu_device *tmp_adev;
2577
2578	if (hive) {
2579		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
2580			amdgpu_ras_set_fed(tmp_adev, status);
2581	} else {
2582		amdgpu_ras_set_fed(adev, status);
2583	}
2584}
2585
2586bool amdgpu_ras_in_recovery(struct amdgpu_device *adev)
2587{
2588	struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2589	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
2590	int hive_ras_recovery = 0;
2591
2592	if (hive) {
2593		hive_ras_recovery = atomic_read(&hive->ras_recovery);
2594		amdgpu_put_xgmi_hive(hive);
2595	}
2596
2597	if (ras && (atomic_read(&ras->in_recovery) || hive_ras_recovery))
2598		return true;
2599
2600	return false;
2601}
2602
2603static enum ras_event_type amdgpu_ras_get_fatal_error_event(struct amdgpu_device *adev)
2604{
2605	if (amdgpu_ras_intr_triggered())
2606		return RAS_EVENT_TYPE_FATAL;
2607	else
2608		return RAS_EVENT_TYPE_POISON_CONSUMPTION;
2609}
2610
2611static void amdgpu_ras_do_recovery(struct work_struct *work)
2612{
2613	struct amdgpu_ras *ras =
2614		container_of(work, struct amdgpu_ras, recovery_work);
2615	struct amdgpu_device *remote_adev = NULL;
2616	struct amdgpu_device *adev = ras->adev;
2617	struct list_head device_list, *device_list_handle =  NULL;
2618	struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2619	enum ras_event_type type;
2620
2621	if (hive) {
2622		atomic_set(&hive->ras_recovery, 1);
2623
2624		/* If any device which is part of the hive received RAS fatal
2625		 * error interrupt, set fatal error status on all. This
2626		 * condition will need a recovery, and flag will be cleared
2627		 * as part of recovery.
2628		 */
2629		list_for_each_entry(remote_adev, &hive->device_list,
2630				    gmc.xgmi.head)
2631			if (amdgpu_ras_get_fed_status(remote_adev)) {
2632				amdgpu_ras_set_fed_all(adev, hive, true);
2633				break;
2634			}
2635	}
2636	if (!ras->disable_ras_err_cnt_harvest) {
2637
2638		/* Build list of devices to query RAS related errors */
2639		if  (hive && adev->gmc.xgmi.num_physical_nodes > 1) {
2640			device_list_handle = &hive->device_list;
2641		} else {
2642			INIT_LIST_HEAD(&device_list);
2643			list_add_tail(&adev->gmc.xgmi.head, &device_list);
2644			device_list_handle = &device_list;
2645		}
2646
2647		type = amdgpu_ras_get_fatal_error_event(adev);
2648		list_for_each_entry(remote_adev,
2649				device_list_handle, gmc.xgmi.head) {
2650			amdgpu_ras_query_err_status(remote_adev);
2651			amdgpu_ras_log_on_err_counter(remote_adev, type);
2652		}
2653
2654	}
2655
2656	if (amdgpu_device_should_recover_gpu(ras->adev)) {
2657		struct amdgpu_reset_context reset_context;
2658		memset(&reset_context, 0, sizeof(reset_context));
2659
2660		reset_context.method = AMD_RESET_METHOD_NONE;
2661		reset_context.reset_req_dev = adev;
2662		reset_context.src = AMDGPU_RESET_SRC_RAS;
2663		set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
2664
2665		/* Perform full reset in fatal error mode */
2666		if (!amdgpu_ras_is_poison_mode_supported(ras->adev))
2667			set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2668		else {
2669			clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2670
2671			if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET) {
2672				ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE2_RESET;
2673				reset_context.method = AMD_RESET_METHOD_MODE2;
2674			}
2675
2676			/* Fatal error occurs in poison mode, mode1 reset is used to
2677			 * recover gpu.
2678			 */
2679			if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET) {
2680				ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE1_RESET;
2681				set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2682
 
 
 
 
 
 
 
 
 
 
 
 
2683				psp_fatal_error_recovery_quirk(&adev->psp);
2684			}
2685		}
2686
2687		amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context);
2688	}
2689	atomic_set(&ras->in_recovery, 0);
2690	if (hive) {
2691		atomic_set(&hive->ras_recovery, 0);
2692		amdgpu_put_xgmi_hive(hive);
2693	}
2694}
2695
2696/* alloc/realloc bps array */
2697static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
2698		struct ras_err_handler_data *data, int pages)
2699{
2700	unsigned int old_space = data->count + data->space_left;
2701	unsigned int new_space = old_space + pages;
2702	unsigned int align_space = ALIGN(new_space, 512);
2703	void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
2704
2705	if (!bps) {
2706		return -ENOMEM;
2707	}
2708
2709	if (data->bps) {
2710		memcpy(bps, data->bps,
2711				data->count * sizeof(*data->bps));
2712		kfree(data->bps);
2713	}
2714
2715	data->bps = bps;
2716	data->space_left += align_space - old_space;
2717	return 0;
2718}
2719
2720/* it deal with vram only. */
2721int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
2722		struct eeprom_table_record *bps, int pages)
2723{
2724	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2725	struct ras_err_handler_data *data;
2726	int ret = 0;
2727	uint32_t i;
2728
2729	if (!con || !con->eh_data || !bps || pages <= 0)
2730		return 0;
2731
2732	mutex_lock(&con->recovery_lock);
2733	data = con->eh_data;
2734	if (!data)
2735		goto out;
2736
2737	for (i = 0; i < pages; i++) {
2738		if (amdgpu_ras_check_bad_page_unlock(con,
2739			bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT))
2740			continue;
2741
2742		if (!data->space_left &&
2743			amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
2744			ret = -ENOMEM;
2745			goto out;
2746		}
2747
2748		amdgpu_ras_reserve_page(adev, bps[i].retired_page);
 
 
2749
2750		memcpy(&data->bps[data->count], &bps[i], sizeof(*data->bps));
2751		data->count++;
2752		data->space_left--;
2753	}
2754out:
2755	mutex_unlock(&con->recovery_lock);
2756
2757	return ret;
2758}
2759
2760/*
2761 * write error record array to eeprom, the function should be
2762 * protected by recovery_lock
2763 * new_cnt: new added UE count, excluding reserved bad pages, can be NULL
2764 */
2765int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
2766		unsigned long *new_cnt)
2767{
2768	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2769	struct ras_err_handler_data *data;
2770	struct amdgpu_ras_eeprom_control *control;
2771	int save_count;
2772
2773	if (!con || !con->eh_data) {
2774		if (new_cnt)
2775			*new_cnt = 0;
2776
2777		return 0;
2778	}
2779
2780	mutex_lock(&con->recovery_lock);
2781	control = &con->eeprom_control;
2782	data = con->eh_data;
2783	save_count = data->count - control->ras_num_recs;
2784	mutex_unlock(&con->recovery_lock);
2785
2786	if (new_cnt)
2787		*new_cnt = save_count / adev->umc.retire_unit;
2788
2789	/* only new entries are saved */
2790	if (save_count > 0) {
2791		if (amdgpu_ras_eeprom_append(control,
2792					     &data->bps[control->ras_num_recs],
2793					     save_count)) {
2794			dev_err(adev->dev, "Failed to save EEPROM table data!");
2795			return -EIO;
2796		}
2797
2798		dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count);
2799	}
2800
2801	return 0;
2802}
2803
2804/*
2805 * read error record array in eeprom and reserve enough space for
2806 * storing new bad pages
2807 */
2808static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
2809{
2810	struct amdgpu_ras_eeprom_control *control =
2811		&adev->psp.ras_context.ras->eeprom_control;
2812	struct eeprom_table_record *bps;
2813	int ret;
2814
2815	/* no bad page record, skip eeprom access */
2816	if (control->ras_num_recs == 0 || amdgpu_bad_page_threshold == 0)
2817		return 0;
2818
2819	bps = kcalloc(control->ras_num_recs, sizeof(*bps), GFP_KERNEL);
2820	if (!bps)
2821		return -ENOMEM;
2822
2823	ret = amdgpu_ras_eeprom_read(control, bps, control->ras_num_recs);
2824	if (ret)
2825		dev_err(adev->dev, "Failed to load EEPROM table records!");
2826	else
2827		ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs);
2828
2829	kfree(bps);
2830	return ret;
2831}
2832
2833static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
2834				uint64_t addr)
2835{
2836	struct ras_err_handler_data *data = con->eh_data;
2837	int i;
2838
2839	addr >>= AMDGPU_GPU_PAGE_SHIFT;
2840	for (i = 0; i < data->count; i++)
2841		if (addr == data->bps[i].retired_page)
2842			return true;
2843
2844	return false;
2845}
2846
2847/*
2848 * check if an address belongs to bad page
2849 *
2850 * Note: this check is only for umc block
2851 */
2852static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
2853				uint64_t addr)
2854{
2855	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2856	bool ret = false;
2857
2858	if (!con || !con->eh_data)
2859		return ret;
2860
2861	mutex_lock(&con->recovery_lock);
2862	ret = amdgpu_ras_check_bad_page_unlock(con, addr);
2863	mutex_unlock(&con->recovery_lock);
2864	return ret;
2865}
2866
2867static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
2868					  uint32_t max_count)
2869{
2870	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2871
2872	/*
2873	 * Justification of value bad_page_cnt_threshold in ras structure
2874	 *
2875	 * Generally, 0 <= amdgpu_bad_page_threshold <= max record length
2876	 * in eeprom or amdgpu_bad_page_threshold == -2, introduce two
2877	 * scenarios accordingly.
2878	 *
2879	 * Bad page retirement enablement:
2880	 *    - If amdgpu_bad_page_threshold = -2,
2881	 *      bad_page_cnt_threshold = typical value by formula.
2882	 *
2883	 *    - When the value from user is 0 < amdgpu_bad_page_threshold <
2884	 *      max record length in eeprom, use it directly.
2885	 *
2886	 * Bad page retirement disablement:
2887	 *    - If amdgpu_bad_page_threshold = 0, bad page retirement
2888	 *      functionality is disabled, and bad_page_cnt_threshold will
2889	 *      take no effect.
2890	 */
2891
2892	if (amdgpu_bad_page_threshold < 0) {
2893		u64 val = adev->gmc.mc_vram_size;
2894
2895		do_div(val, RAS_BAD_PAGE_COVER);
2896		con->bad_page_cnt_threshold = min(lower_32_bits(val),
2897						  max_count);
2898	} else {
2899		con->bad_page_cnt_threshold = min_t(int, max_count,
2900						    amdgpu_bad_page_threshold);
2901	}
2902}
2903
2904int amdgpu_ras_put_poison_req(struct amdgpu_device *adev,
2905		enum amdgpu_ras_block block, uint16_t pasid,
2906		pasid_notify pasid_fn, void *data, uint32_t reset)
2907{
2908	int ret = 0;
2909	struct ras_poison_msg poison_msg;
2910	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2911
2912	memset(&poison_msg, 0, sizeof(poison_msg));
2913	poison_msg.block = block;
2914	poison_msg.pasid = pasid;
2915	poison_msg.reset = reset;
2916	poison_msg.pasid_fn = pasid_fn;
2917	poison_msg.data = data;
2918
2919	ret = kfifo_put(&con->poison_fifo, poison_msg);
2920	if (!ret) {
2921		dev_err(adev->dev, "Poison message fifo is full!\n");
2922		return -ENOSPC;
2923	}
2924
2925	return 0;
2926}
2927
2928static int amdgpu_ras_get_poison_req(struct amdgpu_device *adev,
2929		struct ras_poison_msg *poison_msg)
2930{
2931	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2932
2933	return kfifo_get(&con->poison_fifo, poison_msg);
2934}
2935
2936static void amdgpu_ras_ecc_log_init(struct ras_ecc_log_info *ecc_log)
2937{
2938	mutex_init(&ecc_log->lock);
2939
2940	INIT_RADIX_TREE(&ecc_log->de_page_tree, GFP_KERNEL);
2941	ecc_log->de_queried_count = 0;
2942	ecc_log->prev_de_queried_count = 0;
2943}
2944
2945static void amdgpu_ras_ecc_log_fini(struct ras_ecc_log_info *ecc_log)
2946{
2947	struct radix_tree_iter iter;
2948	void __rcu **slot;
2949	struct ras_ecc_err *ecc_err;
2950
2951	mutex_lock(&ecc_log->lock);
2952	radix_tree_for_each_slot(slot, &ecc_log->de_page_tree, &iter, 0) {
2953		ecc_err = radix_tree_deref_slot(slot);
2954		kfree(ecc_err->err_pages.pfn);
2955		kfree(ecc_err);
2956		radix_tree_iter_delete(&ecc_log->de_page_tree, &iter, slot);
2957	}
2958	mutex_unlock(&ecc_log->lock);
2959
2960	mutex_destroy(&ecc_log->lock);
2961	ecc_log->de_queried_count = 0;
2962	ecc_log->prev_de_queried_count = 0;
2963}
2964
2965static bool amdgpu_ras_schedule_retirement_dwork(struct amdgpu_ras *con,
2966				uint32_t delayed_ms)
2967{
2968	int ret;
2969
2970	mutex_lock(&con->umc_ecc_log.lock);
2971	ret = radix_tree_tagged(&con->umc_ecc_log.de_page_tree,
2972			UMC_ECC_NEW_DETECTED_TAG);
2973	mutex_unlock(&con->umc_ecc_log.lock);
2974
2975	if (ret)
2976		schedule_delayed_work(&con->page_retirement_dwork,
2977			msecs_to_jiffies(delayed_ms));
2978
2979	return ret ? true : false;
2980}
2981
2982static void amdgpu_ras_do_page_retirement(struct work_struct *work)
2983{
2984	struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
2985					      page_retirement_dwork.work);
2986	struct amdgpu_device *adev = con->adev;
2987	struct ras_err_data err_data;
2988	unsigned long err_cnt;
2989
2990	/* If gpu reset is ongoing, delay retiring the bad pages */
2991	if (amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) {
2992		amdgpu_ras_schedule_retirement_dwork(con,
2993				AMDGPU_RAS_RETIRE_PAGE_INTERVAL * 3);
2994		return;
2995	}
2996
2997	amdgpu_ras_error_data_init(&err_data);
2998
2999	amdgpu_umc_handle_bad_pages(adev, &err_data);
3000	err_cnt = err_data.err_addr_cnt;
3001
3002	amdgpu_ras_error_data_fini(&err_data);
3003
3004	if (err_cnt && amdgpu_ras_is_rma(adev))
3005		amdgpu_ras_reset_gpu(adev);
3006
3007	amdgpu_ras_schedule_retirement_dwork(con,
3008			AMDGPU_RAS_RETIRE_PAGE_INTERVAL);
3009}
3010
3011static int amdgpu_ras_poison_creation_handler(struct amdgpu_device *adev,
3012				uint32_t poison_creation_count)
3013{
3014	int ret = 0;
3015	struct ras_ecc_log_info *ecc_log;
3016	struct ras_query_if info;
3017	uint32_t timeout = 0;
3018	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
3019	uint64_t de_queried_count;
3020	uint32_t new_detect_count, total_detect_count;
3021	uint32_t need_query_count = poison_creation_count;
3022	bool query_data_timeout = false;
3023	enum ras_event_type type = RAS_EVENT_TYPE_POISON_CREATION;
3024
3025	memset(&info, 0, sizeof(info));
3026	info.head.block = AMDGPU_RAS_BLOCK__UMC;
3027
3028	ecc_log = &ras->umc_ecc_log;
3029	total_detect_count = 0;
3030	do {
3031		ret = amdgpu_ras_query_error_status_with_event(adev, &info, type);
3032		if (ret)
3033			return ret;
3034
3035		de_queried_count = ecc_log->de_queried_count;
3036		if (de_queried_count > ecc_log->prev_de_queried_count) {
3037			new_detect_count = de_queried_count - ecc_log->prev_de_queried_count;
3038			ecc_log->prev_de_queried_count = de_queried_count;
3039			timeout = 0;
3040		} else {
3041			new_detect_count = 0;
3042		}
3043
3044		if (new_detect_count) {
3045			total_detect_count += new_detect_count;
3046		} else {
3047			if (!timeout && need_query_count)
3048				timeout = MAX_UMC_POISON_POLLING_TIME_ASYNC;
3049
3050			if (timeout) {
3051				if (!--timeout) {
3052					query_data_timeout = true;
3053					break;
3054				}
3055				msleep(1);
3056			}
3057		}
3058	} while (total_detect_count < need_query_count);
3059
3060	if (query_data_timeout) {
3061		dev_warn(adev->dev, "Can't find deferred error! count: %u\n",
3062			(need_query_count - total_detect_count));
3063		return -ENOENT;
3064	}
3065
3066	if (total_detect_count)
3067		schedule_delayed_work(&ras->page_retirement_dwork, 0);
3068
3069	return 0;
3070}
3071
3072static void amdgpu_ras_clear_poison_fifo(struct amdgpu_device *adev)
3073{
3074	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3075	struct ras_poison_msg msg;
3076	int ret;
3077
3078	do {
3079		ret = kfifo_get(&con->poison_fifo, &msg);
3080	} while (ret);
3081}
3082
3083static int amdgpu_ras_poison_consumption_handler(struct amdgpu_device *adev,
3084			uint32_t msg_count, uint32_t *gpu_reset)
3085{
3086	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3087	uint32_t reset_flags = 0, reset = 0;
3088	struct ras_poison_msg msg;
3089	int ret, i;
3090
3091	kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
3092
3093	for (i = 0; i < msg_count; i++) {
3094		ret = amdgpu_ras_get_poison_req(adev, &msg);
3095		if (!ret)
3096			continue;
3097
3098		if (msg.pasid_fn)
3099			msg.pasid_fn(adev, msg.pasid, msg.data);
3100
3101		reset_flags |= msg.reset;
3102	}
3103
3104	/* for RMA, amdgpu_ras_poison_creation_handler will trigger gpu reset */
3105	if (reset_flags && !amdgpu_ras_is_rma(adev)) {
3106		if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET)
3107			reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET;
3108		else if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET)
3109			reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
3110		else
3111			reset = reset_flags;
3112
3113		flush_delayed_work(&con->page_retirement_dwork);
3114
3115		con->gpu_reset_flags |= reset;
3116		amdgpu_ras_reset_gpu(adev);
3117
3118		*gpu_reset = reset;
3119
3120		/* Wait for gpu recovery to complete */
3121		flush_work(&con->recovery_work);
3122	}
3123
3124	return 0;
3125}
3126
3127static int amdgpu_ras_page_retirement_thread(void *param)
3128{
3129	struct amdgpu_device *adev = (struct amdgpu_device *)param;
3130	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3131	uint32_t poison_creation_count, msg_count;
3132	uint32_t gpu_reset;
3133	int ret;
3134
3135	while (!kthread_should_stop()) {
3136
3137		wait_event_interruptible(con->page_retirement_wq,
3138				kthread_should_stop() ||
3139				atomic_read(&con->page_retirement_req_cnt));
3140
3141		if (kthread_should_stop())
3142			break;
3143
3144		gpu_reset = 0;
3145
3146		do {
3147			poison_creation_count = atomic_read(&con->poison_creation_count);
3148			ret = amdgpu_ras_poison_creation_handler(adev, poison_creation_count);
3149			if (ret == -EIO)
3150				break;
3151
3152			if (poison_creation_count) {
3153				atomic_sub(poison_creation_count, &con->poison_creation_count);
3154				atomic_sub(poison_creation_count, &con->page_retirement_req_cnt);
3155			}
3156		} while (atomic_read(&con->poison_creation_count));
3157
3158		if (ret != -EIO) {
3159			msg_count = kfifo_len(&con->poison_fifo);
3160			if (msg_count) {
3161				ret = amdgpu_ras_poison_consumption_handler(adev,
3162						msg_count, &gpu_reset);
3163				if ((ret != -EIO) &&
3164				    (gpu_reset != AMDGPU_RAS_GPU_RESET_MODE1_RESET))
3165					atomic_sub(msg_count, &con->page_retirement_req_cnt);
3166			}
3167		}
3168
3169		if ((ret == -EIO) || (gpu_reset == AMDGPU_RAS_GPU_RESET_MODE1_RESET)) {
3170			/* gpu mode-1 reset is ongoing or just completed ras mode-1 reset */
3171			/* Clear poison creation request */
3172			atomic_set(&con->poison_creation_count, 0);
3173
3174			/* Clear poison fifo */
3175			amdgpu_ras_clear_poison_fifo(adev);
3176
3177			/* Clear all poison requests */
3178			atomic_set(&con->page_retirement_req_cnt, 0);
3179
3180			if (ret == -EIO) {
3181				/* Wait for mode-1 reset to complete */
3182				down_read(&adev->reset_domain->sem);
3183				up_read(&adev->reset_domain->sem);
3184			}
3185
3186			/* Wake up work to save bad pages to eeprom */
3187			schedule_delayed_work(&con->page_retirement_dwork, 0);
3188		} else if (gpu_reset) {
3189			/* gpu just completed mode-2 reset or other reset */
3190			/* Clear poison consumption messages cached in fifo */
3191			msg_count = kfifo_len(&con->poison_fifo);
3192			if (msg_count) {
3193				amdgpu_ras_clear_poison_fifo(adev);
3194				atomic_sub(msg_count, &con->page_retirement_req_cnt);
3195			}
3196
3197			/* Wake up work to save bad pages to eeprom */
3198			schedule_delayed_work(&con->page_retirement_dwork, 0);
3199		}
3200	}
3201
3202	return 0;
3203}
3204
3205int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev)
3206{
3207	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3208	int ret;
3209
3210	if (!con || amdgpu_sriov_vf(adev))
3211		return 0;
3212
3213	ret = amdgpu_ras_eeprom_init(&con->eeprom_control);
3214
3215	if (ret)
3216		return ret;
3217
3218	/* HW not usable */
3219	if (amdgpu_ras_is_rma(adev))
3220		return -EHWPOISON;
3221
3222	if (con->eeprom_control.ras_num_recs) {
3223		ret = amdgpu_ras_load_bad_pages(adev);
3224		if (ret)
3225			return ret;
3226
3227		amdgpu_dpm_send_hbm_bad_pages_num(
3228			adev, con->eeprom_control.ras_num_recs);
3229
3230		if (con->update_channel_flag == true) {
3231			amdgpu_dpm_send_hbm_bad_channel_flag(
3232				adev, con->eeprom_control.bad_channel_bitmap);
3233			con->update_channel_flag = false;
3234		}
3235	}
3236
3237	return ret;
3238}
3239
3240int amdgpu_ras_recovery_init(struct amdgpu_device *adev, bool init_bp_info)
3241{
3242	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3243	struct ras_err_handler_data **data;
3244	u32  max_eeprom_records_count = 0;
 
3245	int ret;
3246
3247	if (!con || amdgpu_sriov_vf(adev))
3248		return 0;
3249
3250	/* Allow access to RAS EEPROM via debugfs, when the ASIC
3251	 * supports RAS and debugfs is enabled, but when
3252	 * adev->ras_enabled is unset, i.e. when "ras_enable"
3253	 * module parameter is set to 0.
3254	 */
3255	con->adev = adev;
3256
3257	if (!adev->ras_enabled)
3258		return 0;
3259
3260	data = &con->eh_data;
3261	*data = kzalloc(sizeof(**data), GFP_KERNEL);
3262	if (!*data) {
3263		ret = -ENOMEM;
3264		goto out;
3265	}
3266
3267	mutex_init(&con->recovery_lock);
3268	INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
3269	atomic_set(&con->in_recovery, 0);
3270	con->eeprom_control.bad_channel_bitmap = 0;
3271
3272	max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(&con->eeprom_control);
3273	amdgpu_ras_validate_threshold(adev, max_eeprom_records_count);
3274
3275	if (init_bp_info) {
3276		ret = amdgpu_ras_init_badpage_info(adev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3277		if (ret)
3278			goto free;
 
 
 
 
 
 
 
3279	}
3280
3281	mutex_init(&con->page_rsv_lock);
3282	INIT_KFIFO(con->poison_fifo);
3283	mutex_init(&con->page_retirement_lock);
3284	init_waitqueue_head(&con->page_retirement_wq);
3285	atomic_set(&con->page_retirement_req_cnt, 0);
3286	atomic_set(&con->poison_creation_count, 0);
3287	con->page_retirement_thread =
3288		kthread_run(amdgpu_ras_page_retirement_thread, adev, "umc_page_retirement");
3289	if (IS_ERR(con->page_retirement_thread)) {
3290		con->page_retirement_thread = NULL;
3291		dev_warn(adev->dev, "Failed to create umc_page_retirement thread!!!\n");
3292	}
3293
3294	INIT_DELAYED_WORK(&con->page_retirement_dwork, amdgpu_ras_do_page_retirement);
3295	amdgpu_ras_ecc_log_init(&con->umc_ecc_log);
3296#ifdef CONFIG_X86_MCE_AMD
3297	if ((adev->asic_type == CHIP_ALDEBARAN) &&
3298	    (adev->gmc.xgmi.connected_to_cpu))
3299		amdgpu_register_bad_pages_mca_notifier(adev);
3300#endif
3301	return 0;
3302
3303free:
3304	kfree((*data)->bps);
3305	kfree(*data);
3306	con->eh_data = NULL;
3307out:
3308	dev_warn(adev->dev, "Failed to initialize ras recovery! (%d)\n", ret);
3309
3310	/*
3311	 * Except error threshold exceeding case, other failure cases in this
3312	 * function would not fail amdgpu driver init.
3313	 */
3314	if (!amdgpu_ras_is_rma(adev))
3315		ret = 0;
3316	else
3317		ret = -EINVAL;
3318
3319	return ret;
3320}
3321
3322static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
3323{
3324	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3325	struct ras_err_handler_data *data = con->eh_data;
3326	int max_flush_timeout = MAX_FLUSH_RETIRE_DWORK_TIMES;
3327	bool ret;
3328
3329	/* recovery_init failed to init it, fini is useless */
3330	if (!data)
3331		return 0;
3332
3333	/* Save all cached bad pages to eeprom */
3334	do {
3335		flush_delayed_work(&con->page_retirement_dwork);
3336		ret = amdgpu_ras_schedule_retirement_dwork(con, 0);
3337	} while (ret && max_flush_timeout--);
3338
3339	if (con->page_retirement_thread)
3340		kthread_stop(con->page_retirement_thread);
3341
3342	atomic_set(&con->page_retirement_req_cnt, 0);
3343	atomic_set(&con->poison_creation_count, 0);
3344
3345	mutex_destroy(&con->page_rsv_lock);
3346
3347	cancel_work_sync(&con->recovery_work);
3348
3349	cancel_delayed_work_sync(&con->page_retirement_dwork);
3350
3351	amdgpu_ras_ecc_log_fini(&con->umc_ecc_log);
3352
3353	mutex_lock(&con->recovery_lock);
3354	con->eh_data = NULL;
3355	kfree(data->bps);
3356	kfree(data);
3357	mutex_unlock(&con->recovery_lock);
3358
3359	return 0;
3360}
3361/* recovery end */
3362
3363static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
3364{
3365	if (amdgpu_sriov_vf(adev)) {
3366		switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
3367		case IP_VERSION(13, 0, 2):
3368		case IP_VERSION(13, 0, 6):
3369		case IP_VERSION(13, 0, 14):
3370			return true;
3371		default:
3372			return false;
3373		}
3374	}
3375
3376	if (adev->asic_type == CHIP_IP_DISCOVERY) {
3377		switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
3378		case IP_VERSION(13, 0, 0):
3379		case IP_VERSION(13, 0, 6):
3380		case IP_VERSION(13, 0, 10):
3381		case IP_VERSION(13, 0, 14):
3382			return true;
3383		default:
3384			return false;
3385		}
3386	}
3387
3388	return adev->asic_type == CHIP_VEGA10 ||
3389		adev->asic_type == CHIP_VEGA20 ||
3390		adev->asic_type == CHIP_ARCTURUS ||
3391		adev->asic_type == CHIP_ALDEBARAN ||
3392		adev->asic_type == CHIP_SIENNA_CICHLID;
3393}
3394
3395/*
3396 * this is workaround for vega20 workstation sku,
3397 * force enable gfx ras, ignore vbios gfx ras flag
3398 * due to GC EDC can not write
3399 */
3400static void amdgpu_ras_get_quirks(struct amdgpu_device *adev)
3401{
3402	struct atom_context *ctx = adev->mode_info.atom_context;
3403
3404	if (!ctx)
3405		return;
3406
3407	if (strnstr(ctx->vbios_pn, "D16406",
3408		    sizeof(ctx->vbios_pn)) ||
3409		strnstr(ctx->vbios_pn, "D36002",
3410			sizeof(ctx->vbios_pn)))
3411		adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX);
3412}
3413
3414/* Query ras capablity via atomfirmware interface */
3415static void amdgpu_ras_query_ras_capablity_from_vbios(struct amdgpu_device *adev)
3416{
3417	/* mem_ecc cap */
3418	if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
3419		dev_info(adev->dev, "MEM ECC is active.\n");
3420		adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC |
3421					 1 << AMDGPU_RAS_BLOCK__DF);
3422	} else {
3423		dev_info(adev->dev, "MEM ECC is not presented.\n");
3424	}
3425
3426	/* sram_ecc cap */
3427	if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
3428		dev_info(adev->dev, "SRAM ECC is active.\n");
3429		if (!amdgpu_sriov_vf(adev))
3430			adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
3431						  1 << AMDGPU_RAS_BLOCK__DF);
3432		else
3433			adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__PCIE_BIF |
3434						 1 << AMDGPU_RAS_BLOCK__SDMA |
3435						 1 << AMDGPU_RAS_BLOCK__GFX);
3436
3437		/*
3438		 * VCN/JPEG RAS can be supported on both bare metal and
3439		 * SRIOV environment
3440		 */
3441		if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(2, 6, 0) ||
3442		    amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 0) ||
3443		    amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 3))
3444			adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
3445						 1 << AMDGPU_RAS_BLOCK__JPEG);
3446		else
3447			adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN |
3448						  1 << AMDGPU_RAS_BLOCK__JPEG);
3449
3450		/*
3451		 * XGMI RAS is not supported if xgmi num physical nodes
3452		 * is zero
3453		 */
3454		if (!adev->gmc.xgmi.num_physical_nodes)
3455			adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__XGMI_WAFL);
3456	} else {
3457		dev_info(adev->dev, "SRAM ECC is not presented.\n");
3458	}
3459}
3460
3461/* Query poison mode from umc/df IP callbacks */
3462static void amdgpu_ras_query_poison_mode(struct amdgpu_device *adev)
3463{
3464	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3465	bool df_poison, umc_poison;
3466
3467	/* poison setting is useless on SRIOV guest */
3468	if (amdgpu_sriov_vf(adev) || !con)
3469		return;
3470
3471	/* Init poison supported flag, the default value is false */
3472	if (adev->gmc.xgmi.connected_to_cpu ||
3473	    adev->gmc.is_app_apu) {
3474		/* enabled by default when GPU is connected to CPU */
3475		con->poison_supported = true;
3476	} else if (adev->df.funcs &&
3477	    adev->df.funcs->query_ras_poison_mode &&
3478	    adev->umc.ras &&
3479	    adev->umc.ras->query_ras_poison_mode) {
3480		df_poison =
3481			adev->df.funcs->query_ras_poison_mode(adev);
3482		umc_poison =
3483			adev->umc.ras->query_ras_poison_mode(adev);
3484
3485		/* Only poison is set in both DF and UMC, we can support it */
3486		if (df_poison && umc_poison)
3487			con->poison_supported = true;
3488		else if (df_poison != umc_poison)
3489			dev_warn(adev->dev,
3490				"Poison setting is inconsistent in DF/UMC(%d:%d)!\n",
3491				df_poison, umc_poison);
3492	}
3493}
3494
3495/*
3496 * check hardware's ras ability which will be saved in hw_supported.
3497 * if hardware does not support ras, we can skip some ras initializtion and
3498 * forbid some ras operations from IP.
3499 * if software itself, say boot parameter, limit the ras ability. We still
3500 * need allow IP do some limited operations, like disable. In such case,
3501 * we have to initialize ras as normal. but need check if operation is
3502 * allowed or not in each function.
3503 */
3504static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
3505{
3506	adev->ras_hw_enabled = adev->ras_enabled = 0;
3507
3508	if (!amdgpu_ras_asic_supported(adev))
3509		return;
3510
3511	if (amdgpu_sriov_vf(adev)) {
3512		if (amdgpu_virt_get_ras_capability(adev))
3513			goto init_ras_enabled_flag;
3514	}
3515
3516	/* query ras capability from psp */
3517	if (amdgpu_psp_get_ras_capability(&adev->psp))
3518		goto init_ras_enabled_flag;
3519
3520	/* query ras capablity from bios */
3521	if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
3522		amdgpu_ras_query_ras_capablity_from_vbios(adev);
3523	} else {
3524		/* driver only manages a few IP blocks RAS feature
3525		 * when GPU is connected cpu through XGMI */
3526		adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX |
3527					   1 << AMDGPU_RAS_BLOCK__SDMA |
3528					   1 << AMDGPU_RAS_BLOCK__MMHUB);
3529	}
3530
3531	/* apply asic specific settings (vega20 only for now) */
3532	amdgpu_ras_get_quirks(adev);
3533
3534	/* query poison mode from umc/df ip callback */
3535	amdgpu_ras_query_poison_mode(adev);
3536
3537init_ras_enabled_flag:
3538	/* hw_supported needs to be aligned with RAS block mask. */
3539	adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK;
3540
3541	adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 :
3542		adev->ras_hw_enabled & amdgpu_ras_mask;
3543
3544	/* aca is disabled by default */
3545	adev->aca.is_enabled = false;
3546
3547	/* bad page feature is not applicable to specific app platform */
3548	if (adev->gmc.is_app_apu &&
3549	    amdgpu_ip_version(adev, UMC_HWIP, 0) == IP_VERSION(12, 0, 0))
3550		amdgpu_bad_page_threshold = 0;
3551}
3552
3553static void amdgpu_ras_counte_dw(struct work_struct *work)
3554{
3555	struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
3556					      ras_counte_delay_work.work);
3557	struct amdgpu_device *adev = con->adev;
3558	struct drm_device *dev = adev_to_drm(adev);
3559	unsigned long ce_count, ue_count;
3560	int res;
3561
3562	res = pm_runtime_get_sync(dev->dev);
3563	if (res < 0)
3564		goto Out;
3565
3566	/* Cache new values.
3567	 */
3568	if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, NULL) == 0) {
3569		atomic_set(&con->ras_ce_count, ce_count);
3570		atomic_set(&con->ras_ue_count, ue_count);
3571	}
3572
3573	pm_runtime_mark_last_busy(dev->dev);
3574Out:
3575	pm_runtime_put_autosuspend(dev->dev);
3576}
3577
3578static int amdgpu_get_ras_schema(struct amdgpu_device *adev)
3579{
3580	return  amdgpu_ras_is_poison_mode_supported(adev) ? AMDGPU_RAS_ERROR__POISON : 0 |
3581			AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE |
3582			AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE |
3583			AMDGPU_RAS_ERROR__PARITY;
3584}
3585
3586static void ras_event_mgr_init(struct ras_event_manager *mgr)
3587{
3588	struct ras_event_state *event_state;
3589	int i;
3590
3591	memset(mgr, 0, sizeof(*mgr));
3592	atomic64_set(&mgr->seqno, 0);
3593
3594	for (i = 0; i < ARRAY_SIZE(mgr->event_state); i++) {
3595		event_state = &mgr->event_state[i];
3596		event_state->last_seqno = RAS_EVENT_INVALID_ID;
3597		atomic64_set(&event_state->count, 0);
3598	}
3599}
3600
3601static void amdgpu_ras_event_mgr_init(struct amdgpu_device *adev)
3602{
3603	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
3604	struct amdgpu_hive_info *hive;
3605
3606	if (!ras)
3607		return;
3608
3609	hive = amdgpu_get_xgmi_hive(adev);
3610	ras->event_mgr = hive ? &hive->event_mgr : &ras->__event_mgr;
3611
3612	/* init event manager with node 0 on xgmi system */
3613	if (!amdgpu_reset_in_recovery(adev)) {
3614		if (!hive || adev->gmc.xgmi.node_id == 0)
3615			ras_event_mgr_init(ras->event_mgr);
3616	}
3617
3618	if (hive)
3619		amdgpu_put_xgmi_hive(hive);
3620}
3621
3622static void amdgpu_ras_init_reserved_vram_size(struct amdgpu_device *adev)
3623{
3624	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3625
3626	if (!con || (adev->flags & AMD_IS_APU))
3627		return;
3628
3629	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
3630	case IP_VERSION(13, 0, 2):
3631	case IP_VERSION(13, 0, 6):
3632	case IP_VERSION(13, 0, 14):
3633		con->reserved_pages_in_bytes = AMDGPU_RAS_RESERVED_VRAM_SIZE;
3634		break;
3635	default:
3636		break;
3637	}
3638}
3639
3640int amdgpu_ras_init(struct amdgpu_device *adev)
3641{
3642	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3643	int r;
3644
3645	if (con)
3646		return 0;
3647
3648	con = kzalloc(sizeof(*con) +
3649			sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT +
3650			sizeof(struct ras_manager) * AMDGPU_RAS_MCA_BLOCK_COUNT,
3651			GFP_KERNEL);
3652	if (!con)
3653		return -ENOMEM;
3654
3655	con->adev = adev;
3656	INIT_DELAYED_WORK(&con->ras_counte_delay_work, amdgpu_ras_counte_dw);
3657	atomic_set(&con->ras_ce_count, 0);
3658	atomic_set(&con->ras_ue_count, 0);
3659
3660	con->objs = (struct ras_manager *)(con + 1);
3661
3662	amdgpu_ras_set_context(adev, con);
3663
3664	amdgpu_ras_check_supported(adev);
3665
3666	if (!adev->ras_enabled || adev->asic_type == CHIP_VEGA10) {
3667		/* set gfx block ras context feature for VEGA20 Gaming
3668		 * send ras disable cmd to ras ta during ras late init.
3669		 */
3670		if (!adev->ras_enabled && adev->asic_type == CHIP_VEGA20) {
3671			con->features |= BIT(AMDGPU_RAS_BLOCK__GFX);
3672
3673			return 0;
3674		}
3675
3676		r = 0;
3677		goto release_con;
3678	}
3679
3680	con->update_channel_flag = false;
3681	con->features = 0;
3682	con->schema = 0;
3683	INIT_LIST_HEAD(&con->head);
3684	/* Might need get this flag from vbios. */
3685	con->flags = RAS_DEFAULT_FLAGS;
3686
3687	/* initialize nbio ras function ahead of any other
3688	 * ras functions so hardware fatal error interrupt
3689	 * can be enabled as early as possible */
3690	switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
3691	case IP_VERSION(7, 4, 0):
3692	case IP_VERSION(7, 4, 1):
3693	case IP_VERSION(7, 4, 4):
3694		if (!adev->gmc.xgmi.connected_to_cpu)
3695			adev->nbio.ras = &nbio_v7_4_ras;
3696		break;
3697	case IP_VERSION(4, 3, 0):
3698		if (adev->ras_hw_enabled & (1 << AMDGPU_RAS_BLOCK__DF))
3699			/* unlike other generation of nbio ras,
3700			 * nbio v4_3 only support fatal error interrupt
3701			 * to inform software that DF is freezed due to
3702			 * system fatal error event. driver should not
3703			 * enable nbio ras in such case. Instead,
3704			 * check DF RAS */
3705			adev->nbio.ras = &nbio_v4_3_ras;
3706		break;
3707	case IP_VERSION(7, 9, 0):
3708		if (!adev->gmc.is_app_apu)
3709			adev->nbio.ras = &nbio_v7_9_ras;
3710		break;
3711	default:
3712		/* nbio ras is not available */
3713		break;
3714	}
3715
3716	/* nbio ras block needs to be enabled ahead of other ras blocks
3717	 * to handle fatal error */
3718	r = amdgpu_nbio_ras_sw_init(adev);
3719	if (r)
3720		return r;
3721
3722	if (adev->nbio.ras &&
3723	    adev->nbio.ras->init_ras_controller_interrupt) {
3724		r = adev->nbio.ras->init_ras_controller_interrupt(adev);
3725		if (r)
3726			goto release_con;
3727	}
3728
3729	if (adev->nbio.ras &&
3730	    adev->nbio.ras->init_ras_err_event_athub_interrupt) {
3731		r = adev->nbio.ras->init_ras_err_event_athub_interrupt(adev);
3732		if (r)
3733			goto release_con;
3734	}
3735
3736	/* Packed socket_id to ras feature mask bits[31:29] */
3737	if (adev->smuio.funcs &&
3738	    adev->smuio.funcs->get_socket_id)
3739		con->features |= ((adev->smuio.funcs->get_socket_id(adev)) <<
3740					AMDGPU_RAS_FEATURES_SOCKETID_SHIFT);
3741
3742	/* Get RAS schema for particular SOC */
3743	con->schema = amdgpu_get_ras_schema(adev);
3744
3745	amdgpu_ras_init_reserved_vram_size(adev);
3746
3747	if (amdgpu_ras_fs_init(adev)) {
3748		r = -EINVAL;
3749		goto release_con;
3750	}
3751
3752	if (amdgpu_ras_aca_is_supported(adev)) {
3753		if (amdgpu_aca_is_enabled(adev))
3754			r = amdgpu_aca_init(adev);
3755		else
3756			r = amdgpu_mca_init(adev);
3757		if (r)
3758			goto release_con;
3759	}
3760
3761	dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
3762		 "hardware ability[%x] ras_mask[%x]\n",
3763		 adev->ras_hw_enabled, adev->ras_enabled);
3764
3765	return 0;
3766release_con:
3767	amdgpu_ras_set_context(adev, NULL);
3768	kfree(con);
3769
3770	return r;
3771}
3772
3773int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev)
3774{
3775	if (adev->gmc.xgmi.connected_to_cpu ||
3776	    adev->gmc.is_app_apu)
3777		return 1;
3778	return 0;
3779}
3780
3781static int amdgpu_persistent_edc_harvesting(struct amdgpu_device *adev,
3782					struct ras_common_if *ras_block)
3783{
3784	struct ras_query_if info = {
3785		.head = *ras_block,
3786	};
3787
3788	if (!amdgpu_persistent_edc_harvesting_supported(adev))
3789		return 0;
3790
3791	if (amdgpu_ras_query_error_status(adev, &info) != 0)
3792		DRM_WARN("RAS init harvest failure");
3793
3794	if (amdgpu_ras_reset_error_status(adev, ras_block->block) != 0)
3795		DRM_WARN("RAS init harvest reset failure");
3796
3797	return 0;
3798}
3799
3800bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev)
3801{
3802       struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3803
3804       if (!con)
3805               return false;
3806
3807       return con->poison_supported;
3808}
3809
3810/* helper function to handle common stuff in ip late init phase */
3811int amdgpu_ras_block_late_init(struct amdgpu_device *adev,
3812			 struct ras_common_if *ras_block)
3813{
3814	struct amdgpu_ras_block_object *ras_obj = NULL;
3815	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3816	struct ras_query_if *query_info;
3817	unsigned long ue_count, ce_count;
3818	int r;
3819
3820	/* disable RAS feature per IP block if it is not supported */
3821	if (!amdgpu_ras_is_supported(adev, ras_block->block)) {
3822		amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
3823		return 0;
3824	}
3825
3826	r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1);
3827	if (r) {
3828		if (adev->in_suspend || amdgpu_reset_in_recovery(adev)) {
3829			/* in resume phase, if fail to enable ras,
3830			 * clean up all ras fs nodes, and disable ras */
3831			goto cleanup;
3832		} else
3833			return r;
3834	}
3835
3836	/* check for errors on warm reset edc persisant supported ASIC */
3837	amdgpu_persistent_edc_harvesting(adev, ras_block);
3838
3839	/* in resume phase, no need to create ras fs node */
3840	if (adev->in_suspend || amdgpu_reset_in_recovery(adev))
3841		return 0;
3842
3843	ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
3844	if (ras_obj->ras_cb || (ras_obj->hw_ops &&
3845	    (ras_obj->hw_ops->query_poison_status ||
3846	    ras_obj->hw_ops->handle_poison_consumption))) {
3847		r = amdgpu_ras_interrupt_add_handler(adev, ras_block);
3848		if (r)
3849			goto cleanup;
3850	}
3851
3852	if (ras_obj->hw_ops &&
3853	    (ras_obj->hw_ops->query_ras_error_count ||
3854	     ras_obj->hw_ops->query_ras_error_status)) {
3855		r = amdgpu_ras_sysfs_create(adev, ras_block);
3856		if (r)
3857			goto interrupt;
3858
3859		/* Those are the cached values at init.
3860		 */
3861		query_info = kzalloc(sizeof(*query_info), GFP_KERNEL);
3862		if (!query_info)
3863			return -ENOMEM;
3864		memcpy(&query_info->head, ras_block, sizeof(struct ras_common_if));
3865
3866		if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, query_info) == 0) {
3867			atomic_set(&con->ras_ce_count, ce_count);
3868			atomic_set(&con->ras_ue_count, ue_count);
3869		}
3870
3871		kfree(query_info);
3872	}
3873
3874	return 0;
3875
3876interrupt:
3877	if (ras_obj->ras_cb)
3878		amdgpu_ras_interrupt_remove_handler(adev, ras_block);
3879cleanup:
3880	amdgpu_ras_feature_enable(adev, ras_block, 0);
3881	return r;
3882}
3883
3884static int amdgpu_ras_block_late_init_default(struct amdgpu_device *adev,
3885			 struct ras_common_if *ras_block)
3886{
3887	return amdgpu_ras_block_late_init(adev, ras_block);
3888}
3889
3890/* helper function to remove ras fs node and interrupt handler */
3891void amdgpu_ras_block_late_fini(struct amdgpu_device *adev,
3892			  struct ras_common_if *ras_block)
3893{
3894	struct amdgpu_ras_block_object *ras_obj;
3895	if (!ras_block)
3896		return;
3897
3898	amdgpu_ras_sysfs_remove(adev, ras_block);
3899
3900	ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
3901	if (ras_obj->ras_cb)
3902		amdgpu_ras_interrupt_remove_handler(adev, ras_block);
3903}
3904
3905static void amdgpu_ras_block_late_fini_default(struct amdgpu_device *adev,
3906			  struct ras_common_if *ras_block)
3907{
3908	return amdgpu_ras_block_late_fini(adev, ras_block);
3909}
3910
3911/* do some init work after IP late init as dependence.
3912 * and it runs in resume/gpu reset/booting up cases.
3913 */
3914void amdgpu_ras_resume(struct amdgpu_device *adev)
3915{
3916	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3917	struct ras_manager *obj, *tmp;
3918
3919	if (!adev->ras_enabled || !con) {
3920		/* clean ras context for VEGA20 Gaming after send ras disable cmd */
3921		amdgpu_release_ras_context(adev);
3922
3923		return;
3924	}
3925
3926	if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
3927		/* Set up all other IPs which are not implemented. There is a
3928		 * tricky thing that IP's actual ras error type should be
3929		 * MULTI_UNCORRECTABLE, but as driver does not handle it, so
3930		 * ERROR_NONE make sense anyway.
3931		 */
3932		amdgpu_ras_enable_all_features(adev, 1);
3933
3934		/* We enable ras on all hw_supported block, but as boot
3935		 * parameter might disable some of them and one or more IP has
3936		 * not implemented yet. So we disable them on behalf.
3937		 */
3938		list_for_each_entry_safe(obj, tmp, &con->head, node) {
3939			if (!amdgpu_ras_is_supported(adev, obj->head.block)) {
3940				amdgpu_ras_feature_enable(adev, &obj->head, 0);
3941				/* there should be no any reference. */
3942				WARN_ON(alive_obj(obj));
3943			}
3944		}
3945	}
3946}
3947
3948void amdgpu_ras_suspend(struct amdgpu_device *adev)
3949{
3950	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3951
3952	if (!adev->ras_enabled || !con)
3953		return;
3954
3955	amdgpu_ras_disable_all_features(adev, 0);
3956	/* Make sure all ras objects are disabled. */
3957	if (AMDGPU_RAS_GET_FEATURES(con->features))
3958		amdgpu_ras_disable_all_features(adev, 1);
3959}
3960
3961int amdgpu_ras_late_init(struct amdgpu_device *adev)
3962{
3963	struct amdgpu_ras_block_list *node, *tmp;
3964	struct amdgpu_ras_block_object *obj;
3965	int r;
3966
3967	amdgpu_ras_event_mgr_init(adev);
 
 
3968
3969	if (amdgpu_ras_aca_is_supported(adev)) {
3970		if (amdgpu_reset_in_recovery(adev)) {
3971			if (amdgpu_aca_is_enabled(adev))
3972				r = amdgpu_aca_reset(adev);
3973			else
3974				r = amdgpu_mca_reset(adev);
3975			if (r)
3976				return r;
3977		}
3978
3979		if (!amdgpu_sriov_vf(adev)) {
3980			if (amdgpu_aca_is_enabled(adev))
3981				amdgpu_ras_set_aca_debug_mode(adev, false);
3982			else
3983				amdgpu_ras_set_mca_debug_mode(adev, false);
3984		}
3985	}
3986
3987	/* Guest side doesn't need init ras feature */
3988	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_ras_telemetry_en(adev))
3989		return 0;
3990
3991	list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
3992		obj = node->ras_obj;
3993		if (!obj) {
3994			dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
3995			continue;
3996		}
3997
3998		if (!amdgpu_ras_is_supported(adev, obj->ras_comm.block))
3999			continue;
4000
4001		if (obj->ras_late_init) {
4002			r = obj->ras_late_init(adev, &obj->ras_comm);
4003			if (r) {
4004				dev_err(adev->dev, "%s failed to execute ras_late_init! ret:%d\n",
4005					obj->ras_comm.name, r);
4006				return r;
4007			}
4008		} else
4009			amdgpu_ras_block_late_init_default(adev, &obj->ras_comm);
4010	}
4011
4012	return 0;
4013}
4014
4015/* do some fini work before IP fini as dependence */
4016int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
4017{
4018	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4019
4020	if (!adev->ras_enabled || !con)
4021		return 0;
4022
4023
4024	/* Need disable ras on all IPs here before ip [hw/sw]fini */
4025	if (AMDGPU_RAS_GET_FEATURES(con->features))
4026		amdgpu_ras_disable_all_features(adev, 0);
4027	amdgpu_ras_recovery_fini(adev);
4028	return 0;
4029}
4030
4031int amdgpu_ras_fini(struct amdgpu_device *adev)
4032{
4033	struct amdgpu_ras_block_list *ras_node, *tmp;
4034	struct amdgpu_ras_block_object *obj = NULL;
4035	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4036
4037	if (!adev->ras_enabled || !con)
4038		return 0;
4039
4040	list_for_each_entry_safe(ras_node, tmp, &adev->ras_list, node) {
4041		if (ras_node->ras_obj) {
4042			obj = ras_node->ras_obj;
4043			if (amdgpu_ras_is_supported(adev, obj->ras_comm.block) &&
4044			    obj->ras_fini)
4045				obj->ras_fini(adev, &obj->ras_comm);
4046			else
4047				amdgpu_ras_block_late_fini_default(adev, &obj->ras_comm);
4048		}
4049
4050		/* Clear ras blocks from ras_list and free ras block list node */
4051		list_del(&ras_node->node);
4052		kfree(ras_node);
4053	}
4054
4055	amdgpu_ras_fs_fini(adev);
4056	amdgpu_ras_interrupt_remove_all(adev);
4057
4058	if (amdgpu_ras_aca_is_supported(adev)) {
4059		if (amdgpu_aca_is_enabled(adev))
4060			amdgpu_aca_fini(adev);
4061		else
4062			amdgpu_mca_fini(adev);
4063	}
4064
4065	WARN(AMDGPU_RAS_GET_FEATURES(con->features), "Feature mask is not cleared");
4066
4067	if (AMDGPU_RAS_GET_FEATURES(con->features))
4068		amdgpu_ras_disable_all_features(adev, 0);
4069
4070	cancel_delayed_work_sync(&con->ras_counte_delay_work);
4071
4072	amdgpu_ras_set_context(adev, NULL);
4073	kfree(con);
4074
4075	return 0;
4076}
4077
4078bool amdgpu_ras_get_fed_status(struct amdgpu_device *adev)
4079{
4080	struct amdgpu_ras *ras;
4081
4082	ras = amdgpu_ras_get_context(adev);
4083	if (!ras)
4084		return false;
4085
4086	return atomic_read(&ras->fed);
4087}
4088
4089void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status)
4090{
4091	struct amdgpu_ras *ras;
4092
4093	ras = amdgpu_ras_get_context(adev);
4094	if (ras)
4095		atomic_set(&ras->fed, !!status);
4096}
4097
4098static struct ras_event_manager *__get_ras_event_mgr(struct amdgpu_device *adev)
4099{
4100	struct amdgpu_ras *ras;
4101
4102	ras = amdgpu_ras_get_context(adev);
4103	if (!ras)
4104		return NULL;
4105
4106	return ras->event_mgr;
4107}
4108
4109int amdgpu_ras_mark_ras_event_caller(struct amdgpu_device *adev, enum ras_event_type type,
4110				     const void *caller)
4111{
4112	struct ras_event_manager *event_mgr;
4113	struct ras_event_state *event_state;
4114	int ret = 0;
4115
4116	if (type >= RAS_EVENT_TYPE_COUNT) {
4117		ret = -EINVAL;
4118		goto out;
4119	}
4120
4121	event_mgr = __get_ras_event_mgr(adev);
4122	if (!event_mgr) {
4123		ret = -EINVAL;
4124		goto out;
4125	}
4126
4127	event_state = &event_mgr->event_state[type];
4128	event_state->last_seqno = atomic64_inc_return(&event_mgr->seqno);
4129	atomic64_inc(&event_state->count);
4130
4131out:
4132	if (ret && caller)
4133		dev_warn(adev->dev, "failed mark ras event (%d) in %ps, ret:%d\n",
4134			 (int)type, caller, ret);
4135
4136	return ret;
4137}
4138
4139u64 amdgpu_ras_acquire_event_id(struct amdgpu_device *adev, enum ras_event_type type)
4140{
4141	struct ras_event_manager *event_mgr;
4142	u64 id;
4143
4144	if (type >= RAS_EVENT_TYPE_COUNT)
4145		return RAS_EVENT_INVALID_ID;
4146
4147	switch (type) {
4148	case RAS_EVENT_TYPE_FATAL:
4149	case RAS_EVENT_TYPE_POISON_CREATION:
4150	case RAS_EVENT_TYPE_POISON_CONSUMPTION:
4151		event_mgr = __get_ras_event_mgr(adev);
4152		if (!event_mgr)
4153			return RAS_EVENT_INVALID_ID;
4154
4155		id = event_mgr->event_state[type].last_seqno;
4156		break;
4157	case RAS_EVENT_TYPE_INVALID:
4158	default:
4159		id = RAS_EVENT_INVALID_ID;
4160		break;
4161	}
4162
4163	return id;
4164}
4165
4166void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
4167{
4168	if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
4169		struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
4170		enum ras_event_type type = RAS_EVENT_TYPE_FATAL;
4171		u64 event_id;
4172
4173		if (amdgpu_ras_mark_ras_event(adev, type))
4174			return;
4175
4176		event_id = amdgpu_ras_acquire_event_id(adev, type);
4177
4178		RAS_EVENT_LOG(adev, event_id, "uncorrectable hardware error"
4179			      "(ERREVENT_ATHUB_INTERRUPT) detected!\n");
4180
4181		amdgpu_ras_set_fed(adev, true);
4182		ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
4183		amdgpu_ras_reset_gpu(adev);
4184	}
4185}
4186
4187bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev)
4188{
4189	if (adev->asic_type == CHIP_VEGA20 &&
4190	    adev->pm.fw_version <= 0x283400) {
4191		return !(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) &&
4192				amdgpu_ras_intr_triggered();
4193	}
4194
4195	return false;
4196}
4197
4198void amdgpu_release_ras_context(struct amdgpu_device *adev)
4199{
4200	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4201
4202	if (!con)
4203		return;
4204
4205	if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) {
4206		con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX);
4207		amdgpu_ras_set_context(adev, NULL);
4208		kfree(con);
4209	}
4210}
4211
4212#ifdef CONFIG_X86_MCE_AMD
4213static struct amdgpu_device *find_adev(uint32_t node_id)
4214{
4215	int i;
4216	struct amdgpu_device *adev = NULL;
4217
4218	for (i = 0; i < mce_adev_list.num_gpu; i++) {
4219		adev = mce_adev_list.devs[i];
4220
4221		if (adev && adev->gmc.xgmi.connected_to_cpu &&
4222		    adev->gmc.xgmi.physical_node_id == node_id)
4223			break;
4224		adev = NULL;
4225	}
4226
4227	return adev;
4228}
4229
4230#define GET_MCA_IPID_GPUID(m)	(((m) >> 44) & 0xF)
4231#define GET_UMC_INST(m)		(((m) >> 21) & 0x7)
4232#define GET_CHAN_INDEX(m)	((((m) >> 12) & 0x3) | (((m) >> 18) & 0x4))
4233#define GPU_ID_OFFSET		8
4234
4235static int amdgpu_bad_page_notifier(struct notifier_block *nb,
4236				    unsigned long val, void *data)
4237{
4238	struct mce *m = (struct mce *)data;
4239	struct amdgpu_device *adev = NULL;
4240	uint32_t gpu_id = 0;
4241	uint32_t umc_inst = 0, ch_inst = 0;
4242
4243	/*
4244	 * If the error was generated in UMC_V2, which belongs to GPU UMCs,
4245	 * and error occurred in DramECC (Extended error code = 0) then only
4246	 * process the error, else bail out.
4247	 */
4248	if (!m || !((smca_get_bank_type(m->extcpu, m->bank) == SMCA_UMC_V2) &&
4249		    (XEC(m->status, 0x3f) == 0x0)))
4250		return NOTIFY_DONE;
4251
4252	/*
4253	 * If it is correctable error, return.
4254	 */
4255	if (mce_is_correctable(m))
4256		return NOTIFY_OK;
4257
4258	/*
4259	 * GPU Id is offset by GPU_ID_OFFSET in MCA_IPID_UMC register.
4260	 */
4261	gpu_id = GET_MCA_IPID_GPUID(m->ipid) - GPU_ID_OFFSET;
4262
4263	adev = find_adev(gpu_id);
4264	if (!adev) {
4265		DRM_WARN("%s: Unable to find adev for gpu_id: %d\n", __func__,
4266								gpu_id);
4267		return NOTIFY_DONE;
4268	}
4269
4270	/*
4271	 * If it is uncorrectable error, then find out UMC instance and
4272	 * channel index.
4273	 */
4274	umc_inst = GET_UMC_INST(m->ipid);
4275	ch_inst = GET_CHAN_INDEX(m->ipid);
4276
4277	dev_info(adev->dev, "Uncorrectable error detected in UMC inst: %d, chan_idx: %d",
4278			     umc_inst, ch_inst);
4279
4280	if (!amdgpu_umc_page_retirement_mca(adev, m->addr, ch_inst, umc_inst))
4281		return NOTIFY_OK;
4282	else
4283		return NOTIFY_DONE;
4284}
4285
4286static struct notifier_block amdgpu_bad_page_nb = {
4287	.notifier_call  = amdgpu_bad_page_notifier,
4288	.priority       = MCE_PRIO_UC,
4289};
4290
4291static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev)
4292{
4293	/*
4294	 * Add the adev to the mce_adev_list.
4295	 * During mode2 reset, amdgpu device is temporarily
4296	 * removed from the mgpu_info list which can cause
4297	 * page retirement to fail.
4298	 * Use this list instead of mgpu_info to find the amdgpu
4299	 * device on which the UMC error was reported.
4300	 */
4301	mce_adev_list.devs[mce_adev_list.num_gpu++] = adev;
4302
4303	/*
4304	 * Register the x86 notifier only once
4305	 * with MCE subsystem.
4306	 */
4307	if (notifier_registered == false) {
4308		mce_register_decode_chain(&amdgpu_bad_page_nb);
4309		notifier_registered = true;
4310	}
4311}
4312#endif
4313
4314struct amdgpu_ras *amdgpu_ras_get_context(struct amdgpu_device *adev)
4315{
4316	if (!adev)
4317		return NULL;
4318
4319	return adev->psp.ras_context.ras;
4320}
4321
4322int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con)
4323{
4324	if (!adev)
4325		return -EINVAL;
4326
4327	adev->psp.ras_context.ras = ras_con;
4328	return 0;
4329}
4330
4331/* check if ras is supported on block, say, sdma, gfx */
4332int amdgpu_ras_is_supported(struct amdgpu_device *adev,
4333		unsigned int block)
4334{
4335	int ret = 0;
4336	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
4337
4338	if (block >= AMDGPU_RAS_BLOCK_COUNT)
4339		return 0;
4340
4341	ret = ras && (adev->ras_enabled & (1 << block));
4342
4343	/* For the special asic with mem ecc enabled but sram ecc
4344	 * not enabled, even if the ras block is not supported on
4345	 * .ras_enabled, if the asic supports poison mode and the
4346	 * ras block has ras configuration, it can be considered
4347	 * that the ras block supports ras function.
4348	 */
4349	if (!ret &&
4350	    (block == AMDGPU_RAS_BLOCK__GFX ||
4351	     block == AMDGPU_RAS_BLOCK__SDMA ||
4352	     block == AMDGPU_RAS_BLOCK__VCN ||
4353	     block == AMDGPU_RAS_BLOCK__JPEG) &&
4354		(amdgpu_ras_mask & (1 << block)) &&
4355	    amdgpu_ras_is_poison_mode_supported(adev) &&
4356	    amdgpu_ras_get_ras_block(adev, block, 0))
4357		ret = 1;
4358
4359	return ret;
4360}
4361
4362int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
4363{
4364	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
4365
4366	/* mode1 is the only selection for RMA status */
4367	if (amdgpu_ras_is_rma(adev)) {
4368		ras->gpu_reset_flags = 0;
4369		ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
4370	}
4371
4372	if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0) {
4373		struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
4374		int hive_ras_recovery = 0;
4375
4376		if (hive) {
4377			hive_ras_recovery = atomic_read(&hive->ras_recovery);
4378			amdgpu_put_xgmi_hive(hive);
4379		}
4380		/* In the case of multiple GPUs, after a GPU has started
4381		 * resetting all GPUs on hive, other GPUs do not need to
4382		 * trigger GPU reset again.
4383		 */
4384		if (!hive_ras_recovery)
4385			amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work);
4386		else
4387			atomic_set(&ras->in_recovery, 0);
4388	} else {
4389		flush_work(&ras->recovery_work);
4390		amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work);
4391	}
4392
4393	return 0;
4394}
4395
4396int amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable)
4397{
4398	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4399	int ret = 0;
4400
4401	if (con) {
4402		ret = amdgpu_mca_smu_set_debug_mode(adev, enable);
4403		if (!ret)
4404			con->is_aca_debug_mode = enable;
4405	}
4406
4407	return ret;
4408}
4409
4410int amdgpu_ras_set_aca_debug_mode(struct amdgpu_device *adev, bool enable)
4411{
4412	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4413	int ret = 0;
4414
4415	if (con) {
4416		if (amdgpu_aca_is_enabled(adev))
4417			ret = amdgpu_aca_smu_set_debug_mode(adev, enable);
4418		else
4419			ret = amdgpu_mca_smu_set_debug_mode(adev, enable);
4420		if (!ret)
4421			con->is_aca_debug_mode = enable;
4422	}
4423
4424	return ret;
4425}
4426
4427bool amdgpu_ras_get_aca_debug_mode(struct amdgpu_device *adev)
4428{
4429	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4430	const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
4431	const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
4432
4433	if (!con)
4434		return false;
4435
4436	if ((amdgpu_aca_is_enabled(adev) && smu_funcs && smu_funcs->set_debug_mode) ||
4437	    (!amdgpu_aca_is_enabled(adev) && mca_funcs && mca_funcs->mca_set_debug_mode))
4438		return con->is_aca_debug_mode;
4439	else
4440		return true;
4441}
4442
4443bool amdgpu_ras_get_error_query_mode(struct amdgpu_device *adev,
4444				     unsigned int *error_query_mode)
4445{
4446	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4447	const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
4448	const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
4449
4450	if (!con) {
4451		*error_query_mode = AMDGPU_RAS_INVALID_ERROR_QUERY;
4452		return false;
4453	}
4454
4455	if (amdgpu_sriov_vf(adev)) {
4456		*error_query_mode = AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY;
4457	} else if ((smu_funcs && smu_funcs->set_debug_mode) || (mca_funcs && mca_funcs->mca_set_debug_mode)) {
4458		*error_query_mode =
4459			(con->is_aca_debug_mode) ? AMDGPU_RAS_DIRECT_ERROR_QUERY : AMDGPU_RAS_FIRMWARE_ERROR_QUERY;
4460	} else {
4461		*error_query_mode = AMDGPU_RAS_DIRECT_ERROR_QUERY;
4462	}
4463
4464	return true;
4465}
4466
4467/* Register each ip ras block into amdgpu ras */
4468int amdgpu_ras_register_ras_block(struct amdgpu_device *adev,
4469		struct amdgpu_ras_block_object *ras_block_obj)
4470{
4471	struct amdgpu_ras_block_list *ras_node;
4472	if (!adev || !ras_block_obj)
4473		return -EINVAL;
4474
4475	ras_node = kzalloc(sizeof(*ras_node), GFP_KERNEL);
4476	if (!ras_node)
4477		return -ENOMEM;
4478
4479	INIT_LIST_HEAD(&ras_node->node);
4480	ras_node->ras_obj = ras_block_obj;
4481	list_add_tail(&ras_node->node, &adev->ras_list);
4482
4483	return 0;
4484}
4485
4486void amdgpu_ras_get_error_type_name(uint32_t err_type, char *err_type_name)
4487{
4488	if (!err_type_name)
4489		return;
4490
4491	switch (err_type) {
4492	case AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE:
4493		sprintf(err_type_name, "correctable");
4494		break;
4495	case AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE:
4496		sprintf(err_type_name, "uncorrectable");
4497		break;
4498	default:
4499		sprintf(err_type_name, "unknown");
4500		break;
4501	}
4502}
4503
4504bool amdgpu_ras_inst_get_memory_id_field(struct amdgpu_device *adev,
4505					 const struct amdgpu_ras_err_status_reg_entry *reg_entry,
4506					 uint32_t instance,
4507					 uint32_t *memory_id)
4508{
4509	uint32_t err_status_lo_data, err_status_lo_offset;
4510
4511	if (!reg_entry)
4512		return false;
4513
4514	err_status_lo_offset =
4515		AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance,
4516					    reg_entry->seg_lo, reg_entry->reg_lo);
4517	err_status_lo_data = RREG32(err_status_lo_offset);
4518
4519	if ((reg_entry->flags & AMDGPU_RAS_ERR_STATUS_VALID) &&
4520	    !REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, ERR_STATUS_VALID_FLAG))
4521		return false;
4522
4523	*memory_id = REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, MEMORY_ID);
4524
4525	return true;
4526}
4527
4528bool amdgpu_ras_inst_get_err_cnt_field(struct amdgpu_device *adev,
4529				       const struct amdgpu_ras_err_status_reg_entry *reg_entry,
4530				       uint32_t instance,
4531				       unsigned long *err_cnt)
4532{
4533	uint32_t err_status_hi_data, err_status_hi_offset;
4534
4535	if (!reg_entry)
4536		return false;
4537
4538	err_status_hi_offset =
4539		AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance,
4540					    reg_entry->seg_hi, reg_entry->reg_hi);
4541	err_status_hi_data = RREG32(err_status_hi_offset);
4542
4543	if ((reg_entry->flags & AMDGPU_RAS_ERR_INFO_VALID) &&
4544	    !REG_GET_FIELD(err_status_hi_data, ERR_STATUS_HI, ERR_INFO_VALID_FLAG))
4545		/* keep the check here in case we need to refer to the result later */
4546		dev_dbg(adev->dev, "Invalid err_info field\n");
4547
4548	/* read err count */
4549	*err_cnt = REG_GET_FIELD(err_status_hi_data, ERR_STATUS, ERR_CNT);
4550
4551	return true;
4552}
4553
4554void amdgpu_ras_inst_query_ras_error_count(struct amdgpu_device *adev,
4555					   const struct amdgpu_ras_err_status_reg_entry *reg_list,
4556					   uint32_t reg_list_size,
4557					   const struct amdgpu_ras_memory_id_entry *mem_list,
4558					   uint32_t mem_list_size,
4559					   uint32_t instance,
4560					   uint32_t err_type,
4561					   unsigned long *err_count)
4562{
4563	uint32_t memory_id;
4564	unsigned long err_cnt;
4565	char err_type_name[16];
4566	uint32_t i, j;
4567
4568	for (i = 0; i < reg_list_size; i++) {
4569		/* query memory_id from err_status_lo */
4570		if (!amdgpu_ras_inst_get_memory_id_field(adev, &reg_list[i],
4571							 instance, &memory_id))
4572			continue;
4573
4574		/* query err_cnt from err_status_hi */
4575		if (!amdgpu_ras_inst_get_err_cnt_field(adev, &reg_list[i],
4576						       instance, &err_cnt) ||
4577		    !err_cnt)
4578			continue;
4579
4580		*err_count += err_cnt;
4581
4582		/* log the errors */
4583		amdgpu_ras_get_error_type_name(err_type, err_type_name);
4584		if (!mem_list) {
4585			/* memory_list is not supported */
4586			dev_info(adev->dev,
4587				 "%ld %s hardware errors detected in %s, instance: %d, memory_id: %d\n",
4588				 err_cnt, err_type_name,
4589				 reg_list[i].block_name,
4590				 instance, memory_id);
4591		} else {
4592			for (j = 0; j < mem_list_size; j++) {
4593				if (memory_id == mem_list[j].memory_id) {
4594					dev_info(adev->dev,
4595						 "%ld %s hardware errors detected in %s, instance: %d, memory block: %s\n",
4596						 err_cnt, err_type_name,
4597						 reg_list[i].block_name,
4598						 instance, mem_list[j].name);
4599					break;
4600				}
4601			}
4602		}
4603	}
4604}
4605
4606void amdgpu_ras_inst_reset_ras_error_count(struct amdgpu_device *adev,
4607					   const struct amdgpu_ras_err_status_reg_entry *reg_list,
4608					   uint32_t reg_list_size,
4609					   uint32_t instance)
4610{
4611	uint32_t err_status_lo_offset, err_status_hi_offset;
4612	uint32_t i;
4613
4614	for (i = 0; i < reg_list_size; i++) {
4615		err_status_lo_offset =
4616			AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance,
4617						    reg_list[i].seg_lo, reg_list[i].reg_lo);
4618		err_status_hi_offset =
4619			AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance,
4620						    reg_list[i].seg_hi, reg_list[i].reg_hi);
4621		WREG32(err_status_lo_offset, 0);
4622		WREG32(err_status_hi_offset, 0);
4623	}
4624}
4625
4626int amdgpu_ras_error_data_init(struct ras_err_data *err_data)
4627{
4628	memset(err_data, 0, sizeof(*err_data));
4629
4630	INIT_LIST_HEAD(&err_data->err_node_list);
4631
4632	return 0;
4633}
4634
4635static void amdgpu_ras_error_node_release(struct ras_err_node *err_node)
4636{
4637	if (!err_node)
4638		return;
4639
4640	list_del(&err_node->node);
4641	kvfree(err_node);
4642}
4643
4644void amdgpu_ras_error_data_fini(struct ras_err_data *err_data)
4645{
4646	struct ras_err_node *err_node, *tmp;
4647
4648	list_for_each_entry_safe(err_node, tmp, &err_data->err_node_list, node)
4649		amdgpu_ras_error_node_release(err_node);
4650}
4651
4652static struct ras_err_node *amdgpu_ras_error_find_node_by_id(struct ras_err_data *err_data,
4653							     struct amdgpu_smuio_mcm_config_info *mcm_info)
4654{
4655	struct ras_err_node *err_node;
4656	struct amdgpu_smuio_mcm_config_info *ref_id;
4657
4658	if (!err_data || !mcm_info)
4659		return NULL;
4660
4661	for_each_ras_error(err_node, err_data) {
4662		ref_id = &err_node->err_info.mcm_info;
4663
4664		if (mcm_info->socket_id == ref_id->socket_id &&
4665		    mcm_info->die_id == ref_id->die_id)
4666			return err_node;
4667	}
4668
4669	return NULL;
4670}
4671
4672static struct ras_err_node *amdgpu_ras_error_node_new(void)
4673{
4674	struct ras_err_node *err_node;
4675
4676	err_node = kvzalloc(sizeof(*err_node), GFP_KERNEL);
4677	if (!err_node)
4678		return NULL;
4679
4680	INIT_LIST_HEAD(&err_node->node);
4681
4682	return err_node;
4683}
4684
4685static int ras_err_info_cmp(void *priv, const struct list_head *a, const struct list_head *b)
4686{
4687	struct ras_err_node *nodea = container_of(a, struct ras_err_node, node);
4688	struct ras_err_node *nodeb = container_of(b, struct ras_err_node, node);
4689	struct amdgpu_smuio_mcm_config_info *infoa = &nodea->err_info.mcm_info;
4690	struct amdgpu_smuio_mcm_config_info *infob = &nodeb->err_info.mcm_info;
4691
4692	if (unlikely(infoa->socket_id != infob->socket_id))
4693		return infoa->socket_id - infob->socket_id;
4694	else
4695		return infoa->die_id - infob->die_id;
4696
4697	return 0;
4698}
4699
4700static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_data,
4701				struct amdgpu_smuio_mcm_config_info *mcm_info)
4702{
4703	struct ras_err_node *err_node;
4704
4705	err_node = amdgpu_ras_error_find_node_by_id(err_data, mcm_info);
4706	if (err_node)
4707		return &err_node->err_info;
4708
4709	err_node = amdgpu_ras_error_node_new();
4710	if (!err_node)
4711		return NULL;
4712
 
 
4713	memcpy(&err_node->err_info.mcm_info, mcm_info, sizeof(*mcm_info));
4714
4715	err_data->err_list_count++;
4716	list_add_tail(&err_node->node, &err_data->err_node_list);
4717	list_sort(NULL, &err_data->err_node_list, ras_err_info_cmp);
4718
4719	return &err_node->err_info;
4720}
4721
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4722int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
4723					struct amdgpu_smuio_mcm_config_info *mcm_info,
4724					u64 count)
4725{
4726	struct ras_err_info *err_info;
4727
4728	if (!err_data || !mcm_info)
4729		return -EINVAL;
4730
4731	if (!count)
4732		return 0;
4733
4734	err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
4735	if (!err_info)
4736		return -EINVAL;
4737
 
 
 
4738	err_info->ue_count += count;
4739	err_data->ue_count += count;
4740
4741	return 0;
4742}
4743
4744int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
4745					struct amdgpu_smuio_mcm_config_info *mcm_info,
4746					u64 count)
4747{
4748	struct ras_err_info *err_info;
4749
4750	if (!err_data || !mcm_info)
4751		return -EINVAL;
4752
4753	if (!count)
4754		return 0;
4755
4756	err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
4757	if (!err_info)
4758		return -EINVAL;
4759
4760	err_info->ce_count += count;
4761	err_data->ce_count += count;
4762
4763	return 0;
4764}
4765
4766int amdgpu_ras_error_statistic_de_count(struct ras_err_data *err_data,
4767					struct amdgpu_smuio_mcm_config_info *mcm_info,
4768					u64 count)
4769{
4770	struct ras_err_info *err_info;
4771
4772	if (!err_data || !mcm_info)
4773		return -EINVAL;
4774
4775	if (!count)
4776		return 0;
4777
4778	err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
4779	if (!err_info)
4780		return -EINVAL;
4781
 
 
 
4782	err_info->de_count += count;
4783	err_data->de_count += count;
4784
4785	return 0;
4786}
4787
4788#define mmMP0_SMN_C2PMSG_92	0x1609C
4789#define mmMP0_SMN_C2PMSG_126	0x160BE
4790static void amdgpu_ras_boot_time_error_reporting(struct amdgpu_device *adev,
4791						 u32 instance)
4792{
4793	u32 socket_id, aid_id, hbm_id;
4794	u32 fw_status;
4795	u32 boot_error;
4796	u64 reg_addr;
4797
 
 
 
 
4798	/* The pattern for smn addressing in other SOC could be different from
4799	 * the one for aqua_vanjaram. We should revisit the code if the pattern
4800	 * is changed. In such case, replace the aqua_vanjaram implementation
4801	 * with more common helper */
4802	reg_addr = (mmMP0_SMN_C2PMSG_92 << 2) +
4803		   aqua_vanjaram_encode_ext_smn_addressing(instance);
4804	fw_status = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
4805
4806	reg_addr = (mmMP0_SMN_C2PMSG_126 << 2) +
4807		   aqua_vanjaram_encode_ext_smn_addressing(instance);
4808	boot_error = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
4809
4810	socket_id = AMDGPU_RAS_GPU_ERR_SOCKET_ID(boot_error);
4811	aid_id = AMDGPU_RAS_GPU_ERR_AID_ID(boot_error);
4812	hbm_id = ((1 == AMDGPU_RAS_GPU_ERR_HBM_ID(boot_error)) ? 0 : 1);
4813
4814	if (AMDGPU_RAS_GPU_ERR_MEM_TRAINING(boot_error))
4815		dev_info(adev->dev,
4816			 "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, memory training failed\n",
4817			 socket_id, aid_id, hbm_id, fw_status);
4818
4819	if (AMDGPU_RAS_GPU_ERR_FW_LOAD(boot_error))
4820		dev_info(adev->dev,
4821			 "socket: %d, aid: %d, fw_status: 0x%x, firmware load failed at boot time\n",
4822			 socket_id, aid_id, fw_status);
4823
4824	if (AMDGPU_RAS_GPU_ERR_WAFL_LINK_TRAINING(boot_error))
4825		dev_info(adev->dev,
4826			 "socket: %d, aid: %d, fw_status: 0x%x, wafl link training failed\n",
4827			 socket_id, aid_id, fw_status);
4828
4829	if (AMDGPU_RAS_GPU_ERR_XGMI_LINK_TRAINING(boot_error))
4830		dev_info(adev->dev,
4831			 "socket: %d, aid: %d, fw_status: 0x%x, xgmi link training failed\n",
4832			 socket_id, aid_id, fw_status);
4833
4834	if (AMDGPU_RAS_GPU_ERR_USR_CP_LINK_TRAINING(boot_error))
4835		dev_info(adev->dev,
4836			 "socket: %d, aid: %d, fw_status: 0x%x, usr cp link training failed\n",
4837			 socket_id, aid_id, fw_status);
4838
4839	if (AMDGPU_RAS_GPU_ERR_USR_DP_LINK_TRAINING(boot_error))
4840		dev_info(adev->dev,
4841			 "socket: %d, aid: %d, fw_status: 0x%x, usr dp link training failed\n",
4842			 socket_id, aid_id, fw_status);
4843
4844	if (AMDGPU_RAS_GPU_ERR_HBM_MEM_TEST(boot_error))
4845		dev_info(adev->dev,
4846			 "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, hbm memory test failed\n",
4847			 socket_id, aid_id, hbm_id, fw_status);
4848
4849	if (AMDGPU_RAS_GPU_ERR_HBM_BIST_TEST(boot_error))
4850		dev_info(adev->dev,
4851			 "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, hbm bist test failed\n",
4852			 socket_id, aid_id, hbm_id, fw_status);
4853
4854	if (AMDGPU_RAS_GPU_ERR_DATA_ABORT(boot_error))
4855		dev_info(adev->dev,
4856			 "socket: %d, aid: %d, fw_status: 0x%x, data abort exception\n",
4857			 socket_id, aid_id, fw_status);
4858
4859	if (AMDGPU_RAS_GPU_ERR_UNKNOWN(boot_error))
4860		dev_info(adev->dev,
4861			 "socket: %d, aid: %d, fw_status: 0x%x, unknown boot time errors\n",
4862			 socket_id, aid_id, fw_status);
4863}
4864
4865static bool amdgpu_ras_boot_error_detected(struct amdgpu_device *adev,
4866					   u32 instance)
4867{
4868	u64 reg_addr;
4869	u32 reg_data;
4870	int retry_loop;
4871
4872	reg_addr = (mmMP0_SMN_C2PMSG_92 << 2) +
4873		   aqua_vanjaram_encode_ext_smn_addressing(instance);
4874
4875	for (retry_loop = 0; retry_loop < AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT; retry_loop++) {
4876		reg_data = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
4877		if ((reg_data & AMDGPU_RAS_BOOT_STATUS_MASK) == AMDGPU_RAS_BOOT_STEADY_STATUS)
4878			return false;
4879		else
4880			msleep(1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4881	}
4882
4883	return true;
 
4884}
4885
4886void amdgpu_ras_query_boot_status(struct amdgpu_device *adev, u32 num_instances)
4887{
 
4888	u32 i;
4889
4890	for (i = 0; i < num_instances; i++) {
4891		if (amdgpu_ras_boot_error_detected(adev, i))
4892			amdgpu_ras_boot_time_error_reporting(adev, i);
4893	}
4894}
4895
4896int amdgpu_ras_reserve_page(struct amdgpu_device *adev, uint64_t pfn)
4897{
4898	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4899	struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
4900	uint64_t start = pfn << AMDGPU_GPU_PAGE_SHIFT;
4901	int ret = 0;
4902
4903	mutex_lock(&con->page_rsv_lock);
4904	ret = amdgpu_vram_mgr_query_page_status(mgr, start);
4905	if (ret == -ENOENT)
4906		ret = amdgpu_vram_mgr_reserve_range(mgr, start, AMDGPU_GPU_PAGE_SIZE);
4907	mutex_unlock(&con->page_rsv_lock);
4908
4909	return ret;
4910}
4911
4912void amdgpu_ras_event_log_print(struct amdgpu_device *adev, u64 event_id,
4913				const char *fmt, ...)
4914{
4915	struct va_format vaf;
4916	va_list args;
4917
4918	va_start(args, fmt);
4919	vaf.fmt = fmt;
4920	vaf.va = &args;
4921
4922	if (RAS_EVENT_ID_IS_VALID(event_id))
4923		dev_printk(KERN_INFO, adev->dev, "{%llu}%pV", event_id, &vaf);
4924	else
4925		dev_printk(KERN_INFO, adev->dev, "%pV", &vaf);
4926
4927	va_end(args);
4928}
4929
4930bool amdgpu_ras_is_rma(struct amdgpu_device *adev)
4931{
4932	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4933
4934	if (!con)
4935		return false;
4936
4937	return con->is_rma;
4938}