Loading...
1/*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 *
23 */
24#include <linux/debugfs.h>
25#include <linux/list.h>
26#include <linux/module.h>
27#include <linux/uaccess.h>
28#include <linux/reboot.h>
29#include <linux/syscalls.h>
30#include <linux/pm_runtime.h>
31#include <linux/list_sort.h>
32
33#include "amdgpu.h"
34#include "amdgpu_ras.h"
35#include "amdgpu_atomfirmware.h"
36#include "amdgpu_xgmi.h"
37#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
38#include "nbio_v4_3.h"
39#include "nbio_v7_9.h"
40#include "atom.h"
41#include "amdgpu_reset.h"
42#include "amdgpu_psp.h"
43
44#ifdef CONFIG_X86_MCE_AMD
45#include <asm/mce.h>
46
47static bool notifier_registered;
48#endif
49static const char *RAS_FS_NAME = "ras";
50
51const char *ras_error_string[] = {
52 "none",
53 "parity",
54 "single_correctable",
55 "multi_uncorrectable",
56 "poison",
57};
58
59const char *ras_block_string[] = {
60 "umc",
61 "sdma",
62 "gfx",
63 "mmhub",
64 "athub",
65 "pcie_bif",
66 "hdp",
67 "xgmi_wafl",
68 "df",
69 "smn",
70 "sem",
71 "mp0",
72 "mp1",
73 "fuse",
74 "mca",
75 "vcn",
76 "jpeg",
77 "ih",
78 "mpio",
79};
80
81const char *ras_mca_block_string[] = {
82 "mca_mp0",
83 "mca_mp1",
84 "mca_mpio",
85 "mca_iohc",
86};
87
88struct amdgpu_ras_block_list {
89 /* ras block link */
90 struct list_head node;
91
92 struct amdgpu_ras_block_object *ras_obj;
93};
94
95const char *get_ras_block_str(struct ras_common_if *ras_block)
96{
97 if (!ras_block)
98 return "NULL";
99
100 if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNT ||
101 ras_block->block >= ARRAY_SIZE(ras_block_string))
102 return "OUT OF RANGE";
103
104 if (ras_block->block == AMDGPU_RAS_BLOCK__MCA)
105 return ras_mca_block_string[ras_block->sub_block_index];
106
107 return ras_block_string[ras_block->block];
108}
109
110#define ras_block_str(_BLOCK_) \
111 (((_BLOCK_) < ARRAY_SIZE(ras_block_string)) ? ras_block_string[_BLOCK_] : "Out Of Range")
112
113#define ras_err_str(i) (ras_error_string[ffs(i)])
114
115#define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
116
117/* inject address is 52 bits */
118#define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52)
119
120/* typical ECC bad page rate is 1 bad page per 100MB VRAM */
121#define RAS_BAD_PAGE_COVER (100 * 1024 * 1024ULL)
122
123#define MAX_UMC_POISON_POLLING_TIME_ASYNC 100 //ms
124
125enum amdgpu_ras_retire_page_reservation {
126 AMDGPU_RAS_RETIRE_PAGE_RESERVED,
127 AMDGPU_RAS_RETIRE_PAGE_PENDING,
128 AMDGPU_RAS_RETIRE_PAGE_FAULT,
129};
130
131atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
132
133static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
134 uint64_t addr);
135static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
136 uint64_t addr);
137#ifdef CONFIG_X86_MCE_AMD
138static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev);
139struct mce_notifier_adev_list {
140 struct amdgpu_device *devs[MAX_GPU_INSTANCE];
141 int num_gpu;
142};
143static struct mce_notifier_adev_list mce_adev_list;
144#endif
145
146void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready)
147{
148 if (adev && amdgpu_ras_get_context(adev))
149 amdgpu_ras_get_context(adev)->error_query_ready = ready;
150}
151
152static bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev)
153{
154 if (adev && amdgpu_ras_get_context(adev))
155 return amdgpu_ras_get_context(adev)->error_query_ready;
156
157 return false;
158}
159
160static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t address)
161{
162 struct ras_err_data err_data;
163 struct eeprom_table_record err_rec;
164 int ret;
165
166 if ((address >= adev->gmc.mc_vram_size) ||
167 (address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
168 dev_warn(adev->dev,
169 "RAS WARN: input address 0x%llx is invalid.\n",
170 address);
171 return -EINVAL;
172 }
173
174 if (amdgpu_ras_check_bad_page(adev, address)) {
175 dev_warn(adev->dev,
176 "RAS WARN: 0x%llx has already been marked as bad page!\n",
177 address);
178 return 0;
179 }
180
181 ret = amdgpu_ras_error_data_init(&err_data);
182 if (ret)
183 return ret;
184
185 memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
186 err_data.err_addr = &err_rec;
187 amdgpu_umc_fill_error_record(&err_data, address, address, 0, 0);
188
189 if (amdgpu_bad_page_threshold != 0) {
190 amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
191 err_data.err_addr_cnt);
192 amdgpu_ras_save_bad_pages(adev, NULL);
193 }
194
195 amdgpu_ras_error_data_fini(&err_data);
196
197 dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n");
198 dev_warn(adev->dev, "Clear EEPROM:\n");
199 dev_warn(adev->dev, " echo 1 > /sys/kernel/debug/dri/0/ras/ras_eeprom_reset\n");
200
201 return 0;
202}
203
204static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
205 size_t size, loff_t *pos)
206{
207 struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private;
208 struct ras_query_if info = {
209 .head = obj->head,
210 };
211 ssize_t s;
212 char val[128];
213
214 if (amdgpu_ras_query_error_status(obj->adev, &info))
215 return -EINVAL;
216
217 /* Hardware counter will be reset automatically after the query on Vega20 and Arcturus */
218 if (amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
219 amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
220 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
221 dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
222 }
223
224 s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n",
225 "ue", info.ue_count,
226 "ce", info.ce_count);
227 if (*pos >= s)
228 return 0;
229
230 s -= *pos;
231 s = min_t(u64, s, size);
232
233
234 if (copy_to_user(buf, &val[*pos], s))
235 return -EINVAL;
236
237 *pos += s;
238
239 return s;
240}
241
242static const struct file_operations amdgpu_ras_debugfs_ops = {
243 .owner = THIS_MODULE,
244 .read = amdgpu_ras_debugfs_read,
245 .write = NULL,
246 .llseek = default_llseek
247};
248
249static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id)
250{
251 int i;
252
253 for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) {
254 *block_id = i;
255 if (strcmp(name, ras_block_string[i]) == 0)
256 return 0;
257 }
258 return -EINVAL;
259}
260
261static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
262 const char __user *buf, size_t size,
263 loff_t *pos, struct ras_debug_if *data)
264{
265 ssize_t s = min_t(u64, 64, size);
266 char str[65];
267 char block_name[33];
268 char err[9] = "ue";
269 int op = -1;
270 int block_id;
271 uint32_t sub_block;
272 u64 address, value;
273 /* default value is 0 if the mask is not set by user */
274 u32 instance_mask = 0;
275
276 if (*pos)
277 return -EINVAL;
278 *pos = size;
279
280 memset(str, 0, sizeof(str));
281 memset(data, 0, sizeof(*data));
282
283 if (copy_from_user(str, buf, s))
284 return -EINVAL;
285
286 if (sscanf(str, "disable %32s", block_name) == 1)
287 op = 0;
288 else if (sscanf(str, "enable %32s %8s", block_name, err) == 2)
289 op = 1;
290 else if (sscanf(str, "inject %32s %8s", block_name, err) == 2)
291 op = 2;
292 else if (strstr(str, "retire_page") != NULL)
293 op = 3;
294 else if (str[0] && str[1] && str[2] && str[3])
295 /* ascii string, but commands are not matched. */
296 return -EINVAL;
297
298 if (op != -1) {
299 if (op == 3) {
300 if (sscanf(str, "%*s 0x%llx", &address) != 1 &&
301 sscanf(str, "%*s %llu", &address) != 1)
302 return -EINVAL;
303
304 data->op = op;
305 data->inject.address = address;
306
307 return 0;
308 }
309
310 if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
311 return -EINVAL;
312
313 data->head.block = block_id;
314 /* only ue, ce and poison errors are supported */
315 if (!memcmp("ue", err, 2))
316 data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
317 else if (!memcmp("ce", err, 2))
318 data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE;
319 else if (!memcmp("poison", err, 6))
320 data->head.type = AMDGPU_RAS_ERROR__POISON;
321 else
322 return -EINVAL;
323
324 data->op = op;
325
326 if (op == 2) {
327 if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx 0x%x",
328 &sub_block, &address, &value, &instance_mask) != 4 &&
329 sscanf(str, "%*s %*s %*s %u %llu %llu %u",
330 &sub_block, &address, &value, &instance_mask) != 4 &&
331 sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx",
332 &sub_block, &address, &value) != 3 &&
333 sscanf(str, "%*s %*s %*s %u %llu %llu",
334 &sub_block, &address, &value) != 3)
335 return -EINVAL;
336 data->head.sub_block_index = sub_block;
337 data->inject.address = address;
338 data->inject.value = value;
339 data->inject.instance_mask = instance_mask;
340 }
341 } else {
342 if (size < sizeof(*data))
343 return -EINVAL;
344
345 if (copy_from_user(data, buf, sizeof(*data)))
346 return -EINVAL;
347 }
348
349 return 0;
350}
351
352static void amdgpu_ras_instance_mask_check(struct amdgpu_device *adev,
353 struct ras_debug_if *data)
354{
355 int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
356 uint32_t mask, inst_mask = data->inject.instance_mask;
357
358 /* no need to set instance mask if there is only one instance */
359 if (num_xcc <= 1 && inst_mask) {
360 data->inject.instance_mask = 0;
361 dev_dbg(adev->dev,
362 "RAS inject mask(0x%x) isn't supported and force it to 0.\n",
363 inst_mask);
364
365 return;
366 }
367
368 switch (data->head.block) {
369 case AMDGPU_RAS_BLOCK__GFX:
370 mask = GENMASK(num_xcc - 1, 0);
371 break;
372 case AMDGPU_RAS_BLOCK__SDMA:
373 mask = GENMASK(adev->sdma.num_instances - 1, 0);
374 break;
375 case AMDGPU_RAS_BLOCK__VCN:
376 case AMDGPU_RAS_BLOCK__JPEG:
377 mask = GENMASK(adev->vcn.num_vcn_inst - 1, 0);
378 break;
379 default:
380 mask = inst_mask;
381 break;
382 }
383
384 /* remove invalid bits in instance mask */
385 data->inject.instance_mask &= mask;
386 if (inst_mask != data->inject.instance_mask)
387 dev_dbg(adev->dev,
388 "Adjust RAS inject mask 0x%x to 0x%x\n",
389 inst_mask, data->inject.instance_mask);
390}
391
392/**
393 * DOC: AMDGPU RAS debugfs control interface
394 *
395 * The control interface accepts struct ras_debug_if which has two members.
396 *
397 * First member: ras_debug_if::head or ras_debug_if::inject.
398 *
399 * head is used to indicate which IP block will be under control.
400 *
401 * head has four members, they are block, type, sub_block_index, name.
402 * block: which IP will be under control.
403 * type: what kind of error will be enabled/disabled/injected.
404 * sub_block_index: some IPs have subcomponets. say, GFX, sDMA.
405 * name: the name of IP.
406 *
407 * inject has three more members than head, they are address, value and mask.
408 * As their names indicate, inject operation will write the
409 * value to the address.
410 *
411 * The second member: struct ras_debug_if::op.
412 * It has three kinds of operations.
413 *
414 * - 0: disable RAS on the block. Take ::head as its data.
415 * - 1: enable RAS on the block. Take ::head as its data.
416 * - 2: inject errors on the block. Take ::inject as its data.
417 *
418 * How to use the interface?
419 *
420 * In a program
421 *
422 * Copy the struct ras_debug_if in your code and initialize it.
423 * Write the struct to the control interface.
424 *
425 * From shell
426 *
427 * .. code-block:: bash
428 *
429 * echo "disable <block>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
430 * echo "enable <block> <error>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
431 * echo "inject <block> <error> <sub-block> <address> <value> <mask>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
432 *
433 * Where N, is the card which you want to affect.
434 *
435 * "disable" requires only the block.
436 * "enable" requires the block and error type.
437 * "inject" requires the block, error type, address, and value.
438 *
439 * The block is one of: umc, sdma, gfx, etc.
440 * see ras_block_string[] for details
441 *
442 * The error type is one of: ue, ce and poison where,
443 * ue is multi-uncorrectable
444 * ce is single-correctable
445 * poison is poison
446 *
447 * The sub-block is a the sub-block index, pass 0 if there is no sub-block.
448 * The address and value are hexadecimal numbers, leading 0x is optional.
449 * The mask means instance mask, is optional, default value is 0x1.
450 *
451 * For instance,
452 *
453 * .. code-block:: bash
454 *
455 * echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
456 * echo inject umc ce 0 0 0 3 > /sys/kernel/debug/dri/0/ras/ras_ctrl
457 * echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
458 *
459 * How to check the result of the operation?
460 *
461 * To check disable/enable, see "ras" features at,
462 * /sys/class/drm/card[0/1/2...]/device/ras/features
463 *
464 * To check inject, see the corresponding error count at,
465 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx|sdma|umc|...]_err_count
466 *
467 * .. note::
468 * Operations are only allowed on blocks which are supported.
469 * Check the "ras" mask at /sys/module/amdgpu/parameters/ras_mask
470 * to see which blocks support RAS on a particular asic.
471 *
472 */
473static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
474 const char __user *buf,
475 size_t size, loff_t *pos)
476{
477 struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
478 struct ras_debug_if data;
479 int ret = 0;
480
481 if (!amdgpu_ras_get_error_query_ready(adev)) {
482 dev_warn(adev->dev, "RAS WARN: error injection "
483 "currently inaccessible\n");
484 return size;
485 }
486
487 ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
488 if (ret)
489 return ret;
490
491 if (data.op == 3) {
492 ret = amdgpu_reserve_page_direct(adev, data.inject.address);
493 if (!ret)
494 return size;
495 else
496 return ret;
497 }
498
499 if (!amdgpu_ras_is_supported(adev, data.head.block))
500 return -EINVAL;
501
502 switch (data.op) {
503 case 0:
504 ret = amdgpu_ras_feature_enable(adev, &data.head, 0);
505 break;
506 case 1:
507 ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
508 break;
509 case 2:
510 if ((data.inject.address >= adev->gmc.mc_vram_size &&
511 adev->gmc.mc_vram_size) ||
512 (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
513 dev_warn(adev->dev, "RAS WARN: input address "
514 "0x%llx is invalid.",
515 data.inject.address);
516 ret = -EINVAL;
517 break;
518 }
519
520 /* umc ce/ue error injection for a bad page is not allowed */
521 if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
522 amdgpu_ras_check_bad_page(adev, data.inject.address)) {
523 dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has "
524 "already been marked as bad!\n",
525 data.inject.address);
526 break;
527 }
528
529 amdgpu_ras_instance_mask_check(adev, &data);
530
531 /* data.inject.address is offset instead of absolute gpu address */
532 ret = amdgpu_ras_error_inject(adev, &data.inject);
533 break;
534 default:
535 ret = -EINVAL;
536 break;
537 }
538
539 if (ret)
540 return ret;
541
542 return size;
543}
544
545/**
546 * DOC: AMDGPU RAS debugfs EEPROM table reset interface
547 *
548 * Some boards contain an EEPROM which is used to persistently store a list of
549 * bad pages which experiences ECC errors in vram. This interface provides
550 * a way to reset the EEPROM, e.g., after testing error injection.
551 *
552 * Usage:
553 *
554 * .. code-block:: bash
555 *
556 * echo 1 > ../ras/ras_eeprom_reset
557 *
558 * will reset EEPROM table to 0 entries.
559 *
560 */
561static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f,
562 const char __user *buf,
563 size_t size, loff_t *pos)
564{
565 struct amdgpu_device *adev =
566 (struct amdgpu_device *)file_inode(f)->i_private;
567 int ret;
568
569 ret = amdgpu_ras_eeprom_reset_table(
570 &(amdgpu_ras_get_context(adev)->eeprom_control));
571
572 if (!ret) {
573 /* Something was written to EEPROM.
574 */
575 amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS;
576 return size;
577 } else {
578 return ret;
579 }
580}
581
582static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
583 .owner = THIS_MODULE,
584 .read = NULL,
585 .write = amdgpu_ras_debugfs_ctrl_write,
586 .llseek = default_llseek
587};
588
589static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = {
590 .owner = THIS_MODULE,
591 .read = NULL,
592 .write = amdgpu_ras_debugfs_eeprom_write,
593 .llseek = default_llseek
594};
595
596/**
597 * DOC: AMDGPU RAS sysfs Error Count Interface
598 *
599 * It allows the user to read the error count for each IP block on the gpu through
600 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
601 *
602 * It outputs the multiple lines which report the uncorrected (ue) and corrected
603 * (ce) error counts.
604 *
605 * The format of one line is below,
606 *
607 * [ce|ue]: count
608 *
609 * Example:
610 *
611 * .. code-block:: bash
612 *
613 * ue: 0
614 * ce: 1
615 *
616 */
617static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
618 struct device_attribute *attr, char *buf)
619{
620 struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr);
621 struct ras_query_if info = {
622 .head = obj->head,
623 };
624
625 if (!amdgpu_ras_get_error_query_ready(obj->adev))
626 return sysfs_emit(buf, "Query currently inaccessible\n");
627
628 if (amdgpu_ras_query_error_status(obj->adev, &info))
629 return -EINVAL;
630
631 if (amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
632 amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
633 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
634 dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
635 }
636
637 if (info.head.block == AMDGPU_RAS_BLOCK__UMC)
638 return sysfs_emit(buf, "%s: %lu\n%s: %lu\n%s: %lu\n", "ue", info.ue_count,
639 "ce", info.ce_count, "de", info.de_count);
640 else
641 return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
642 "ce", info.ce_count);
643}
644
645/* obj begin */
646
647#define get_obj(obj) do { (obj)->use++; } while (0)
648#define alive_obj(obj) ((obj)->use)
649
650static inline void put_obj(struct ras_manager *obj)
651{
652 if (obj && (--obj->use == 0)) {
653 list_del(&obj->node);
654 amdgpu_ras_error_data_fini(&obj->err_data);
655 }
656
657 if (obj && (obj->use < 0))
658 DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", get_ras_block_str(&obj->head));
659}
660
661/* make one obj and return it. */
662static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
663 struct ras_common_if *head)
664{
665 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
666 struct ras_manager *obj;
667
668 if (!adev->ras_enabled || !con)
669 return NULL;
670
671 if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
672 return NULL;
673
674 if (head->block == AMDGPU_RAS_BLOCK__MCA) {
675 if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
676 return NULL;
677
678 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
679 } else
680 obj = &con->objs[head->block];
681
682 /* already exist. return obj? */
683 if (alive_obj(obj))
684 return NULL;
685
686 if (amdgpu_ras_error_data_init(&obj->err_data))
687 return NULL;
688
689 obj->head = *head;
690 obj->adev = adev;
691 list_add(&obj->node, &con->head);
692 get_obj(obj);
693
694 return obj;
695}
696
697/* return an obj equal to head, or the first when head is NULL */
698struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
699 struct ras_common_if *head)
700{
701 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
702 struct ras_manager *obj;
703 int i;
704
705 if (!adev->ras_enabled || !con)
706 return NULL;
707
708 if (head) {
709 if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
710 return NULL;
711
712 if (head->block == AMDGPU_RAS_BLOCK__MCA) {
713 if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
714 return NULL;
715
716 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
717 } else
718 obj = &con->objs[head->block];
719
720 if (alive_obj(obj))
721 return obj;
722 } else {
723 for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT + AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
724 obj = &con->objs[i];
725 if (alive_obj(obj))
726 return obj;
727 }
728 }
729
730 return NULL;
731}
732/* obj end */
733
734/* feature ctl begin */
735static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev,
736 struct ras_common_if *head)
737{
738 return adev->ras_hw_enabled & BIT(head->block);
739}
740
741static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev,
742 struct ras_common_if *head)
743{
744 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
745
746 return con->features & BIT(head->block);
747}
748
749/*
750 * if obj is not created, then create one.
751 * set feature enable flag.
752 */
753static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev,
754 struct ras_common_if *head, int enable)
755{
756 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
757 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
758
759 /* If hardware does not support ras, then do not create obj.
760 * But if hardware support ras, we can create the obj.
761 * Ras framework checks con->hw_supported to see if it need do
762 * corresponding initialization.
763 * IP checks con->support to see if it need disable ras.
764 */
765 if (!amdgpu_ras_is_feature_allowed(adev, head))
766 return 0;
767
768 if (enable) {
769 if (!obj) {
770 obj = amdgpu_ras_create_obj(adev, head);
771 if (!obj)
772 return -EINVAL;
773 } else {
774 /* In case we create obj somewhere else */
775 get_obj(obj);
776 }
777 con->features |= BIT(head->block);
778 } else {
779 if (obj && amdgpu_ras_is_feature_enabled(adev, head)) {
780 con->features &= ~BIT(head->block);
781 put_obj(obj);
782 }
783 }
784
785 return 0;
786}
787
788/* wrapper of psp_ras_enable_features */
789int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
790 struct ras_common_if *head, bool enable)
791{
792 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
793 union ta_ras_cmd_input *info;
794 int ret;
795
796 if (!con)
797 return -EINVAL;
798
799 /* For non-gfx ip, do not enable ras feature if it is not allowed */
800 /* For gfx ip, regardless of feature support status, */
801 /* Force issue enable or disable ras feature commands */
802 if (head->block != AMDGPU_RAS_BLOCK__GFX &&
803 !amdgpu_ras_is_feature_allowed(adev, head))
804 return 0;
805
806 /* Only enable gfx ras feature from host side */
807 if (head->block == AMDGPU_RAS_BLOCK__GFX &&
808 !amdgpu_sriov_vf(adev) &&
809 !amdgpu_ras_intr_triggered()) {
810 info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL);
811 if (!info)
812 return -ENOMEM;
813
814 if (!enable) {
815 info->disable_features = (struct ta_ras_disable_features_input) {
816 .block_id = amdgpu_ras_block_to_ta(head->block),
817 .error_type = amdgpu_ras_error_to_ta(head->type),
818 };
819 } else {
820 info->enable_features = (struct ta_ras_enable_features_input) {
821 .block_id = amdgpu_ras_block_to_ta(head->block),
822 .error_type = amdgpu_ras_error_to_ta(head->type),
823 };
824 }
825
826 ret = psp_ras_enable_features(&adev->psp, info, enable);
827 if (ret) {
828 dev_err(adev->dev, "ras %s %s failed poison:%d ret:%d\n",
829 enable ? "enable":"disable",
830 get_ras_block_str(head),
831 amdgpu_ras_is_poison_mode_supported(adev), ret);
832 kfree(info);
833 return ret;
834 }
835
836 kfree(info);
837 }
838
839 /* setup the obj */
840 __amdgpu_ras_feature_enable(adev, head, enable);
841
842 return 0;
843}
844
845/* Only used in device probe stage and called only once. */
846int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
847 struct ras_common_if *head, bool enable)
848{
849 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
850 int ret;
851
852 if (!con)
853 return -EINVAL;
854
855 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
856 if (enable) {
857 /* There is no harm to issue a ras TA cmd regardless of
858 * the currecnt ras state.
859 * If current state == target state, it will do nothing
860 * But sometimes it requests driver to reset and repost
861 * with error code -EAGAIN.
862 */
863 ret = amdgpu_ras_feature_enable(adev, head, 1);
864 /* With old ras TA, we might fail to enable ras.
865 * Log it and just setup the object.
866 * TODO need remove this WA in the future.
867 */
868 if (ret == -EINVAL) {
869 ret = __amdgpu_ras_feature_enable(adev, head, 1);
870 if (!ret)
871 dev_info(adev->dev,
872 "RAS INFO: %s setup object\n",
873 get_ras_block_str(head));
874 }
875 } else {
876 /* setup the object then issue a ras TA disable cmd.*/
877 ret = __amdgpu_ras_feature_enable(adev, head, 1);
878 if (ret)
879 return ret;
880
881 /* gfx block ras dsiable cmd must send to ras-ta */
882 if (head->block == AMDGPU_RAS_BLOCK__GFX)
883 con->features |= BIT(head->block);
884
885 ret = amdgpu_ras_feature_enable(adev, head, 0);
886
887 /* clean gfx block ras features flag */
888 if (adev->ras_enabled && head->block == AMDGPU_RAS_BLOCK__GFX)
889 con->features &= ~BIT(head->block);
890 }
891 } else
892 ret = amdgpu_ras_feature_enable(adev, head, enable);
893
894 return ret;
895}
896
897static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev,
898 bool bypass)
899{
900 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
901 struct ras_manager *obj, *tmp;
902
903 list_for_each_entry_safe(obj, tmp, &con->head, node) {
904 /* bypass psp.
905 * aka just release the obj and corresponding flags
906 */
907 if (bypass) {
908 if (__amdgpu_ras_feature_enable(adev, &obj->head, 0))
909 break;
910 } else {
911 if (amdgpu_ras_feature_enable(adev, &obj->head, 0))
912 break;
913 }
914 }
915
916 return con->features;
917}
918
919static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
920 bool bypass)
921{
922 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
923 int i;
924 const enum amdgpu_ras_error_type default_ras_type = AMDGPU_RAS_ERROR__NONE;
925
926 for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) {
927 struct ras_common_if head = {
928 .block = i,
929 .type = default_ras_type,
930 .sub_block_index = 0,
931 };
932
933 if (i == AMDGPU_RAS_BLOCK__MCA)
934 continue;
935
936 if (bypass) {
937 /*
938 * bypass psp. vbios enable ras for us.
939 * so just create the obj
940 */
941 if (__amdgpu_ras_feature_enable(adev, &head, 1))
942 break;
943 } else {
944 if (amdgpu_ras_feature_enable(adev, &head, 1))
945 break;
946 }
947 }
948
949 for (i = 0; i < AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
950 struct ras_common_if head = {
951 .block = AMDGPU_RAS_BLOCK__MCA,
952 .type = default_ras_type,
953 .sub_block_index = i,
954 };
955
956 if (bypass) {
957 /*
958 * bypass psp. vbios enable ras for us.
959 * so just create the obj
960 */
961 if (__amdgpu_ras_feature_enable(adev, &head, 1))
962 break;
963 } else {
964 if (amdgpu_ras_feature_enable(adev, &head, 1))
965 break;
966 }
967 }
968
969 return con->features;
970}
971/* feature ctl end */
972
973static int amdgpu_ras_block_match_default(struct amdgpu_ras_block_object *block_obj,
974 enum amdgpu_ras_block block)
975{
976 if (!block_obj)
977 return -EINVAL;
978
979 if (block_obj->ras_comm.block == block)
980 return 0;
981
982 return -EINVAL;
983}
984
985static struct amdgpu_ras_block_object *amdgpu_ras_get_ras_block(struct amdgpu_device *adev,
986 enum amdgpu_ras_block block, uint32_t sub_block_index)
987{
988 struct amdgpu_ras_block_list *node, *tmp;
989 struct amdgpu_ras_block_object *obj;
990
991 if (block >= AMDGPU_RAS_BLOCK__LAST)
992 return NULL;
993
994 list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
995 if (!node->ras_obj) {
996 dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
997 continue;
998 }
999
1000 obj = node->ras_obj;
1001 if (obj->ras_block_match) {
1002 if (obj->ras_block_match(obj, block, sub_block_index) == 0)
1003 return obj;
1004 } else {
1005 if (amdgpu_ras_block_match_default(obj, block) == 0)
1006 return obj;
1007 }
1008 }
1009
1010 return NULL;
1011}
1012
1013static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_data *err_data)
1014{
1015 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
1016 int ret = 0;
1017
1018 /*
1019 * choosing right query method according to
1020 * whether smu support query error information
1021 */
1022 ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(ras->umc_ecc));
1023 if (ret == -EOPNOTSUPP) {
1024 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
1025 adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
1026 adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data);
1027
1028 /* umc query_ras_error_address is also responsible for clearing
1029 * error status
1030 */
1031 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
1032 adev->umc.ras->ras_block.hw_ops->query_ras_error_address)
1033 adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, err_data);
1034 } else if (!ret) {
1035 if (adev->umc.ras &&
1036 adev->umc.ras->ecc_info_query_ras_error_count)
1037 adev->umc.ras->ecc_info_query_ras_error_count(adev, err_data);
1038
1039 if (adev->umc.ras &&
1040 adev->umc.ras->ecc_info_query_ras_error_address)
1041 adev->umc.ras->ecc_info_query_ras_error_address(adev, err_data);
1042 }
1043}
1044
1045static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev,
1046 struct ras_manager *ras_mgr,
1047 struct ras_err_data *err_data,
1048 const char *blk_name,
1049 bool is_ue,
1050 bool is_de)
1051{
1052 struct amdgpu_smuio_mcm_config_info *mcm_info;
1053 struct ras_err_node *err_node;
1054 struct ras_err_info *err_info;
1055
1056 if (is_ue) {
1057 for_each_ras_error(err_node, err_data) {
1058 err_info = &err_node->err_info;
1059 mcm_info = &err_info->mcm_info;
1060 if (err_info->ue_count) {
1061 dev_info(adev->dev, "socket: %d, die: %d, "
1062 "%lld new uncorrectable hardware errors detected in %s block\n",
1063 mcm_info->socket_id,
1064 mcm_info->die_id,
1065 err_info->ue_count,
1066 blk_name);
1067 }
1068 }
1069
1070 for_each_ras_error(err_node, &ras_mgr->err_data) {
1071 err_info = &err_node->err_info;
1072 mcm_info = &err_info->mcm_info;
1073 dev_info(adev->dev, "socket: %d, die: %d, "
1074 "%lld uncorrectable hardware errors detected in total in %s block\n",
1075 mcm_info->socket_id, mcm_info->die_id, err_info->ue_count, blk_name);
1076 }
1077
1078 } else {
1079 if (is_de) {
1080 for_each_ras_error(err_node, err_data) {
1081 err_info = &err_node->err_info;
1082 mcm_info = &err_info->mcm_info;
1083 if (err_info->de_count) {
1084 dev_info(adev->dev, "socket: %d, die: %d, "
1085 "%lld new deferred hardware errors detected in %s block\n",
1086 mcm_info->socket_id,
1087 mcm_info->die_id,
1088 err_info->de_count,
1089 blk_name);
1090 }
1091 }
1092
1093 for_each_ras_error(err_node, &ras_mgr->err_data) {
1094 err_info = &err_node->err_info;
1095 mcm_info = &err_info->mcm_info;
1096 dev_info(adev->dev, "socket: %d, die: %d, "
1097 "%lld deferred hardware errors detected in total in %s block\n",
1098 mcm_info->socket_id, mcm_info->die_id,
1099 err_info->de_count, blk_name);
1100 }
1101 } else {
1102 for_each_ras_error(err_node, err_data) {
1103 err_info = &err_node->err_info;
1104 mcm_info = &err_info->mcm_info;
1105 if (err_info->ce_count) {
1106 dev_info(adev->dev, "socket: %d, die: %d, "
1107 "%lld new correctable hardware errors detected in %s block\n",
1108 mcm_info->socket_id,
1109 mcm_info->die_id,
1110 err_info->ce_count,
1111 blk_name);
1112 }
1113 }
1114
1115 for_each_ras_error(err_node, &ras_mgr->err_data) {
1116 err_info = &err_node->err_info;
1117 mcm_info = &err_info->mcm_info;
1118 dev_info(adev->dev, "socket: %d, die: %d, "
1119 "%lld correctable hardware errors detected in total in %s block\n",
1120 mcm_info->socket_id, mcm_info->die_id,
1121 err_info->ce_count, blk_name);
1122 }
1123 }
1124 }
1125}
1126
1127static inline bool err_data_has_source_info(struct ras_err_data *data)
1128{
1129 return !list_empty(&data->err_node_list);
1130}
1131
1132static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev,
1133 struct ras_query_if *query_if,
1134 struct ras_err_data *err_data)
1135{
1136 struct ras_manager *ras_mgr = amdgpu_ras_find_obj(adev, &query_if->head);
1137 const char *blk_name = get_ras_block_str(&query_if->head);
1138
1139 if (err_data->ce_count) {
1140 if (err_data_has_source_info(err_data)) {
1141 amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data,
1142 blk_name, false, false);
1143 } else if (!adev->aid_mask &&
1144 adev->smuio.funcs &&
1145 adev->smuio.funcs->get_socket_id &&
1146 adev->smuio.funcs->get_die_id) {
1147 dev_info(adev->dev, "socket: %d, die: %d "
1148 "%ld correctable hardware errors "
1149 "detected in %s block\n",
1150 adev->smuio.funcs->get_socket_id(adev),
1151 adev->smuio.funcs->get_die_id(adev),
1152 ras_mgr->err_data.ce_count,
1153 blk_name);
1154 } else {
1155 dev_info(adev->dev, "%ld correctable hardware errors "
1156 "detected in %s block\n",
1157 ras_mgr->err_data.ce_count,
1158 blk_name);
1159 }
1160 }
1161
1162 if (err_data->ue_count) {
1163 if (err_data_has_source_info(err_data)) {
1164 amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data,
1165 blk_name, true, false);
1166 } else if (!adev->aid_mask &&
1167 adev->smuio.funcs &&
1168 adev->smuio.funcs->get_socket_id &&
1169 adev->smuio.funcs->get_die_id) {
1170 dev_info(adev->dev, "socket: %d, die: %d "
1171 "%ld uncorrectable hardware errors "
1172 "detected in %s block\n",
1173 adev->smuio.funcs->get_socket_id(adev),
1174 adev->smuio.funcs->get_die_id(adev),
1175 ras_mgr->err_data.ue_count,
1176 blk_name);
1177 } else {
1178 dev_info(adev->dev, "%ld uncorrectable hardware errors "
1179 "detected in %s block\n",
1180 ras_mgr->err_data.ue_count,
1181 blk_name);
1182 }
1183 }
1184
1185 if (err_data->de_count) {
1186 if (err_data_has_source_info(err_data)) {
1187 amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data,
1188 blk_name, false, true);
1189 } else if (!adev->aid_mask &&
1190 adev->smuio.funcs &&
1191 adev->smuio.funcs->get_socket_id &&
1192 adev->smuio.funcs->get_die_id) {
1193 dev_info(adev->dev, "socket: %d, die: %d "
1194 "%ld deferred hardware errors "
1195 "detected in %s block\n",
1196 adev->smuio.funcs->get_socket_id(adev),
1197 adev->smuio.funcs->get_die_id(adev),
1198 ras_mgr->err_data.de_count,
1199 blk_name);
1200 } else {
1201 dev_info(adev->dev, "%ld deferred hardware errors "
1202 "detected in %s block\n",
1203 ras_mgr->err_data.de_count,
1204 blk_name);
1205 }
1206 }
1207}
1208
1209static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, struct ras_err_data *err_data)
1210{
1211 struct ras_err_node *err_node;
1212 struct ras_err_info *err_info;
1213
1214 if (err_data_has_source_info(err_data)) {
1215 for_each_ras_error(err_node, err_data) {
1216 err_info = &err_node->err_info;
1217 amdgpu_ras_error_statistic_de_count(&obj->err_data,
1218 &err_info->mcm_info, NULL, err_info->de_count);
1219 amdgpu_ras_error_statistic_ce_count(&obj->err_data,
1220 &err_info->mcm_info, NULL, err_info->ce_count);
1221 amdgpu_ras_error_statistic_ue_count(&obj->err_data,
1222 &err_info->mcm_info, NULL, err_info->ue_count);
1223 }
1224 } else {
1225 /* for legacy asic path which doesn't has error source info */
1226 obj->err_data.ue_count += err_data->ue_count;
1227 obj->err_data.ce_count += err_data->ce_count;
1228 obj->err_data.de_count += err_data->de_count;
1229 }
1230}
1231
1232static struct ras_manager *get_ras_manager(struct amdgpu_device *adev, enum amdgpu_ras_block blk)
1233{
1234 struct ras_common_if head;
1235
1236 memset(&head, 0, sizeof(head));
1237 head.block = blk;
1238
1239 return amdgpu_ras_find_obj(adev, &head);
1240}
1241
1242int amdgpu_ras_bind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
1243 const struct aca_info *aca_info, void *data)
1244{
1245 struct ras_manager *obj;
1246
1247 obj = get_ras_manager(adev, blk);
1248 if (!obj)
1249 return -EINVAL;
1250
1251 return amdgpu_aca_add_handle(adev, &obj->aca_handle, ras_block_str(blk), aca_info, data);
1252}
1253
1254int amdgpu_ras_unbind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk)
1255{
1256 struct ras_manager *obj;
1257
1258 obj = get_ras_manager(adev, blk);
1259 if (!obj)
1260 return -EINVAL;
1261
1262 amdgpu_aca_remove_handle(&obj->aca_handle);
1263
1264 return 0;
1265}
1266
1267static int amdgpu_aca_log_ras_error_data(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
1268 enum aca_error_type type, struct ras_err_data *err_data)
1269{
1270 struct ras_manager *obj;
1271
1272 obj = get_ras_manager(adev, blk);
1273 if (!obj)
1274 return -EINVAL;
1275
1276 return amdgpu_aca_get_error_data(adev, &obj->aca_handle, type, err_data);
1277}
1278
1279ssize_t amdgpu_ras_aca_sysfs_read(struct device *dev, struct device_attribute *attr,
1280 struct aca_handle *handle, char *buf, void *data)
1281{
1282 struct ras_manager *obj = container_of(handle, struct ras_manager, aca_handle);
1283 struct ras_query_if info = {
1284 .head = obj->head,
1285 };
1286
1287 if (amdgpu_ras_query_error_status(obj->adev, &info))
1288 return -EINVAL;
1289
1290 return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
1291 "ce", info.ce_count);
1292}
1293
1294static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev,
1295 struct ras_query_if *info,
1296 struct ras_err_data *err_data,
1297 unsigned int error_query_mode)
1298{
1299 enum amdgpu_ras_block blk = info ? info->head.block : AMDGPU_RAS_BLOCK_COUNT;
1300 struct amdgpu_ras_block_object *block_obj = NULL;
1301 int ret;
1302
1303 if (blk == AMDGPU_RAS_BLOCK_COUNT)
1304 return -EINVAL;
1305
1306 if (error_query_mode == AMDGPU_RAS_INVALID_ERROR_QUERY)
1307 return -EINVAL;
1308
1309 if (error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) {
1310 if (info->head.block == AMDGPU_RAS_BLOCK__UMC) {
1311 amdgpu_ras_get_ecc_info(adev, err_data);
1312 } else {
1313 block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, 0);
1314 if (!block_obj || !block_obj->hw_ops) {
1315 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1316 get_ras_block_str(&info->head));
1317 return -EINVAL;
1318 }
1319
1320 if (block_obj->hw_ops->query_ras_error_count)
1321 block_obj->hw_ops->query_ras_error_count(adev, err_data);
1322
1323 if ((info->head.block == AMDGPU_RAS_BLOCK__SDMA) ||
1324 (info->head.block == AMDGPU_RAS_BLOCK__GFX) ||
1325 (info->head.block == AMDGPU_RAS_BLOCK__MMHUB)) {
1326 if (block_obj->hw_ops->query_ras_error_status)
1327 block_obj->hw_ops->query_ras_error_status(adev);
1328 }
1329 }
1330 } else {
1331 if (amdgpu_aca_is_enabled(adev)) {
1332 ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_UE, err_data);
1333 if (ret)
1334 return ret;
1335
1336 ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_CE, err_data);
1337 if (ret)
1338 return ret;
1339 } else {
1340 /* FIXME: add code to check return value later */
1341 amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_UE, err_data);
1342 amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_CE, err_data);
1343 }
1344 }
1345
1346 return 0;
1347}
1348
1349/* query/inject/cure begin */
1350int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_if *info)
1351{
1352 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1353 struct ras_err_data err_data;
1354 unsigned int error_query_mode;
1355 int ret;
1356
1357 if (!obj)
1358 return -EINVAL;
1359
1360 ret = amdgpu_ras_error_data_init(&err_data);
1361 if (ret)
1362 return ret;
1363
1364 if (!amdgpu_ras_get_error_query_mode(adev, &error_query_mode))
1365 return -EINVAL;
1366
1367 ret = amdgpu_ras_query_error_status_helper(adev, info,
1368 &err_data,
1369 error_query_mode);
1370 if (ret)
1371 goto out_fini_err_data;
1372
1373 amdgpu_rasmgr_error_data_statistic_update(obj, &err_data);
1374
1375 info->ue_count = obj->err_data.ue_count;
1376 info->ce_count = obj->err_data.ce_count;
1377 info->de_count = obj->err_data.de_count;
1378
1379 amdgpu_ras_error_generate_report(adev, info, &err_data);
1380
1381out_fini_err_data:
1382 amdgpu_ras_error_data_fini(&err_data);
1383
1384 return ret;
1385}
1386
1387int amdgpu_ras_reset_error_count(struct amdgpu_device *adev,
1388 enum amdgpu_ras_block block)
1389{
1390 struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
1391 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
1392 const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
1393 const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
1394 struct amdgpu_hive_info *hive;
1395 int hive_ras_recovery = 0;
1396
1397 if (!block_obj || !block_obj->hw_ops) {
1398 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1399 ras_block_str(block));
1400 return -EOPNOTSUPP;
1401 }
1402
1403 if (!amdgpu_ras_is_supported(adev, block) ||
1404 !amdgpu_ras_get_aca_debug_mode(adev))
1405 return -EOPNOTSUPP;
1406
1407 hive = amdgpu_get_xgmi_hive(adev);
1408 if (hive) {
1409 hive_ras_recovery = atomic_read(&hive->ras_recovery);
1410 amdgpu_put_xgmi_hive(hive);
1411 }
1412
1413 /* skip ras error reset in gpu reset */
1414 if ((amdgpu_in_reset(adev) || atomic_read(&ras->in_recovery) ||
1415 hive_ras_recovery) &&
1416 ((smu_funcs && smu_funcs->set_debug_mode) ||
1417 (mca_funcs && mca_funcs->mca_set_debug_mode)))
1418 return -EOPNOTSUPP;
1419
1420 if (block_obj->hw_ops->reset_ras_error_count)
1421 block_obj->hw_ops->reset_ras_error_count(adev);
1422
1423 return 0;
1424}
1425
1426int amdgpu_ras_reset_error_status(struct amdgpu_device *adev,
1427 enum amdgpu_ras_block block)
1428{
1429 struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
1430
1431 if (amdgpu_ras_reset_error_count(adev, block) == -EOPNOTSUPP)
1432 return 0;
1433
1434 if ((block == AMDGPU_RAS_BLOCK__GFX) ||
1435 (block == AMDGPU_RAS_BLOCK__MMHUB)) {
1436 if (block_obj->hw_ops->reset_ras_error_status)
1437 block_obj->hw_ops->reset_ras_error_status(adev);
1438 }
1439
1440 return 0;
1441}
1442
1443/* wrapper of psp_ras_trigger_error */
1444int amdgpu_ras_error_inject(struct amdgpu_device *adev,
1445 struct ras_inject_if *info)
1446{
1447 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1448 struct ta_ras_trigger_error_input block_info = {
1449 .block_id = amdgpu_ras_block_to_ta(info->head.block),
1450 .inject_error_type = amdgpu_ras_error_to_ta(info->head.type),
1451 .sub_block_index = info->head.sub_block_index,
1452 .address = info->address,
1453 .value = info->value,
1454 };
1455 int ret = -EINVAL;
1456 struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev,
1457 info->head.block,
1458 info->head.sub_block_index);
1459
1460 /* inject on guest isn't allowed, return success directly */
1461 if (amdgpu_sriov_vf(adev))
1462 return 0;
1463
1464 if (!obj)
1465 return -EINVAL;
1466
1467 if (!block_obj || !block_obj->hw_ops) {
1468 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1469 get_ras_block_str(&info->head));
1470 return -EINVAL;
1471 }
1472
1473 /* Calculate XGMI relative offset */
1474 if (adev->gmc.xgmi.num_physical_nodes > 1 &&
1475 info->head.block != AMDGPU_RAS_BLOCK__GFX) {
1476 block_info.address =
1477 amdgpu_xgmi_get_relative_phy_addr(adev,
1478 block_info.address);
1479 }
1480
1481 if (block_obj->hw_ops->ras_error_inject) {
1482 if (info->head.block == AMDGPU_RAS_BLOCK__GFX)
1483 ret = block_obj->hw_ops->ras_error_inject(adev, info, info->instance_mask);
1484 else /* Special ras_error_inject is defined (e.g: xgmi) */
1485 ret = block_obj->hw_ops->ras_error_inject(adev, &block_info,
1486 info->instance_mask);
1487 } else {
1488 /* default path */
1489 ret = psp_ras_trigger_error(&adev->psp, &block_info, info->instance_mask);
1490 }
1491
1492 if (ret)
1493 dev_err(adev->dev, "ras inject %s failed %d\n",
1494 get_ras_block_str(&info->head), ret);
1495
1496 return ret;
1497}
1498
1499/**
1500 * amdgpu_ras_query_error_count_helper -- Get error counter for specific IP
1501 * @adev: pointer to AMD GPU device
1502 * @ce_count: pointer to an integer to be set to the count of correctible errors.
1503 * @ue_count: pointer to an integer to be set to the count of uncorrectible errors.
1504 * @query_info: pointer to ras_query_if
1505 *
1506 * Return 0 for query success or do nothing, otherwise return an error
1507 * on failures
1508 */
1509static int amdgpu_ras_query_error_count_helper(struct amdgpu_device *adev,
1510 unsigned long *ce_count,
1511 unsigned long *ue_count,
1512 struct ras_query_if *query_info)
1513{
1514 int ret;
1515
1516 if (!query_info)
1517 /* do nothing if query_info is not specified */
1518 return 0;
1519
1520 ret = amdgpu_ras_query_error_status(adev, query_info);
1521 if (ret)
1522 return ret;
1523
1524 *ce_count += query_info->ce_count;
1525 *ue_count += query_info->ue_count;
1526
1527 /* some hardware/IP supports read to clear
1528 * no need to explictly reset the err status after the query call */
1529 if (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
1530 amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
1531 if (amdgpu_ras_reset_error_status(adev, query_info->head.block))
1532 dev_warn(adev->dev,
1533 "Failed to reset error counter and error status\n");
1534 }
1535
1536 return 0;
1537}
1538
1539/**
1540 * amdgpu_ras_query_error_count -- Get error counts of all IPs or specific IP
1541 * @adev: pointer to AMD GPU device
1542 * @ce_count: pointer to an integer to be set to the count of correctible errors.
1543 * @ue_count: pointer to an integer to be set to the count of uncorrectible
1544 * errors.
1545 * @query_info: pointer to ras_query_if if the query request is only for
1546 * specific ip block; if info is NULL, then the qurey request is for
1547 * all the ip blocks that support query ras error counters/status
1548 *
1549 * If set, @ce_count or @ue_count, count and return the corresponding
1550 * error counts in those integer pointers. Return 0 if the device
1551 * supports RAS. Return -EOPNOTSUPP if the device doesn't support RAS.
1552 */
1553int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
1554 unsigned long *ce_count,
1555 unsigned long *ue_count,
1556 struct ras_query_if *query_info)
1557{
1558 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1559 struct ras_manager *obj;
1560 unsigned long ce, ue;
1561 int ret;
1562
1563 if (!adev->ras_enabled || !con)
1564 return -EOPNOTSUPP;
1565
1566 /* Don't count since no reporting.
1567 */
1568 if (!ce_count && !ue_count)
1569 return 0;
1570
1571 ce = 0;
1572 ue = 0;
1573 if (!query_info) {
1574 /* query all the ip blocks that support ras query interface */
1575 list_for_each_entry(obj, &con->head, node) {
1576 struct ras_query_if info = {
1577 .head = obj->head,
1578 };
1579
1580 ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, &info);
1581 }
1582 } else {
1583 /* query specific ip block */
1584 ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, query_info);
1585 }
1586
1587 if (ret)
1588 return ret;
1589
1590 if (ce_count)
1591 *ce_count = ce;
1592
1593 if (ue_count)
1594 *ue_count = ue;
1595
1596 return 0;
1597}
1598/* query/inject/cure end */
1599
1600
1601/* sysfs begin */
1602
1603static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
1604 struct ras_badpage **bps, unsigned int *count);
1605
1606static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
1607{
1608 switch (flags) {
1609 case AMDGPU_RAS_RETIRE_PAGE_RESERVED:
1610 return "R";
1611 case AMDGPU_RAS_RETIRE_PAGE_PENDING:
1612 return "P";
1613 case AMDGPU_RAS_RETIRE_PAGE_FAULT:
1614 default:
1615 return "F";
1616 }
1617}
1618
1619/**
1620 * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface
1621 *
1622 * It allows user to read the bad pages of vram on the gpu through
1623 * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages
1624 *
1625 * It outputs multiple lines, and each line stands for one gpu page.
1626 *
1627 * The format of one line is below,
1628 * gpu pfn : gpu page size : flags
1629 *
1630 * gpu pfn and gpu page size are printed in hex format.
1631 * flags can be one of below character,
1632 *
1633 * R: reserved, this gpu page is reserved and not able to use.
1634 *
1635 * P: pending for reserve, this gpu page is marked as bad, will be reserved
1636 * in next window of page_reserve.
1637 *
1638 * F: unable to reserve. this gpu page can't be reserved due to some reasons.
1639 *
1640 * Examples:
1641 *
1642 * .. code-block:: bash
1643 *
1644 * 0x00000001 : 0x00001000 : R
1645 * 0x00000002 : 0x00001000 : P
1646 *
1647 */
1648
1649static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
1650 struct kobject *kobj, struct bin_attribute *attr,
1651 char *buf, loff_t ppos, size_t count)
1652{
1653 struct amdgpu_ras *con =
1654 container_of(attr, struct amdgpu_ras, badpages_attr);
1655 struct amdgpu_device *adev = con->adev;
1656 const unsigned int element_size =
1657 sizeof("0xabcdabcd : 0x12345678 : R\n") - 1;
1658 unsigned int start = div64_ul(ppos + element_size - 1, element_size);
1659 unsigned int end = div64_ul(ppos + count - 1, element_size);
1660 ssize_t s = 0;
1661 struct ras_badpage *bps = NULL;
1662 unsigned int bps_count = 0;
1663
1664 memset(buf, 0, count);
1665
1666 if (amdgpu_ras_badpages_read(adev, &bps, &bps_count))
1667 return 0;
1668
1669 for (; start < end && start < bps_count; start++)
1670 s += scnprintf(&buf[s], element_size + 1,
1671 "0x%08x : 0x%08x : %1s\n",
1672 bps[start].bp,
1673 bps[start].size,
1674 amdgpu_ras_badpage_flags_str(bps[start].flags));
1675
1676 kfree(bps);
1677
1678 return s;
1679}
1680
1681static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
1682 struct device_attribute *attr, char *buf)
1683{
1684 struct amdgpu_ras *con =
1685 container_of(attr, struct amdgpu_ras, features_attr);
1686
1687 return sysfs_emit(buf, "feature mask: 0x%x\n", con->features);
1688}
1689
1690static ssize_t amdgpu_ras_sysfs_version_show(struct device *dev,
1691 struct device_attribute *attr, char *buf)
1692{
1693 struct amdgpu_ras *con =
1694 container_of(attr, struct amdgpu_ras, version_attr);
1695 return sysfs_emit(buf, "table version: 0x%x\n", con->eeprom_control.tbl_hdr.version);
1696}
1697
1698static ssize_t amdgpu_ras_sysfs_schema_show(struct device *dev,
1699 struct device_attribute *attr, char *buf)
1700{
1701 struct amdgpu_ras *con =
1702 container_of(attr, struct amdgpu_ras, schema_attr);
1703 return sysfs_emit(buf, "schema: 0x%x\n", con->schema);
1704}
1705
1706static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev)
1707{
1708 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1709
1710 if (adev->dev->kobj.sd)
1711 sysfs_remove_file_from_group(&adev->dev->kobj,
1712 &con->badpages_attr.attr,
1713 RAS_FS_NAME);
1714}
1715
1716static int amdgpu_ras_sysfs_remove_dev_attr_node(struct amdgpu_device *adev)
1717{
1718 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1719 struct attribute *attrs[] = {
1720 &con->features_attr.attr,
1721 &con->version_attr.attr,
1722 &con->schema_attr.attr,
1723 NULL
1724 };
1725 struct attribute_group group = {
1726 .name = RAS_FS_NAME,
1727 .attrs = attrs,
1728 };
1729
1730 if (adev->dev->kobj.sd)
1731 sysfs_remove_group(&adev->dev->kobj, &group);
1732
1733 return 0;
1734}
1735
1736int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
1737 struct ras_common_if *head)
1738{
1739 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1740
1741 if (!obj || obj->attr_inuse)
1742 return -EINVAL;
1743
1744 get_obj(obj);
1745
1746 snprintf(obj->fs_data.sysfs_name, sizeof(obj->fs_data.sysfs_name),
1747 "%s_err_count", head->name);
1748
1749 obj->sysfs_attr = (struct device_attribute){
1750 .attr = {
1751 .name = obj->fs_data.sysfs_name,
1752 .mode = S_IRUGO,
1753 },
1754 .show = amdgpu_ras_sysfs_read,
1755 };
1756 sysfs_attr_init(&obj->sysfs_attr.attr);
1757
1758 if (sysfs_add_file_to_group(&adev->dev->kobj,
1759 &obj->sysfs_attr.attr,
1760 RAS_FS_NAME)) {
1761 put_obj(obj);
1762 return -EINVAL;
1763 }
1764
1765 obj->attr_inuse = 1;
1766
1767 return 0;
1768}
1769
1770int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
1771 struct ras_common_if *head)
1772{
1773 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1774
1775 if (!obj || !obj->attr_inuse)
1776 return -EINVAL;
1777
1778 if (adev->dev->kobj.sd)
1779 sysfs_remove_file_from_group(&adev->dev->kobj,
1780 &obj->sysfs_attr.attr,
1781 RAS_FS_NAME);
1782 obj->attr_inuse = 0;
1783 put_obj(obj);
1784
1785 return 0;
1786}
1787
1788static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
1789{
1790 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1791 struct ras_manager *obj, *tmp;
1792
1793 list_for_each_entry_safe(obj, tmp, &con->head, node) {
1794 amdgpu_ras_sysfs_remove(adev, &obj->head);
1795 }
1796
1797 if (amdgpu_bad_page_threshold != 0)
1798 amdgpu_ras_sysfs_remove_bad_page_node(adev);
1799
1800 amdgpu_ras_sysfs_remove_dev_attr_node(adev);
1801
1802 return 0;
1803}
1804/* sysfs end */
1805
1806/**
1807 * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors
1808 *
1809 * Normally when there is an uncorrectable error, the driver will reset
1810 * the GPU to recover. However, in the event of an unrecoverable error,
1811 * the driver provides an interface to reboot the system automatically
1812 * in that event.
1813 *
1814 * The following file in debugfs provides that interface:
1815 * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot
1816 *
1817 * Usage:
1818 *
1819 * .. code-block:: bash
1820 *
1821 * echo true > .../ras/auto_reboot
1822 *
1823 */
1824/* debugfs begin */
1825static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
1826{
1827 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1828 struct amdgpu_ras_eeprom_control *eeprom = &con->eeprom_control;
1829 struct drm_minor *minor = adev_to_drm(adev)->primary;
1830 struct dentry *dir;
1831
1832 dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root);
1833 debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, dir, adev,
1834 &amdgpu_ras_debugfs_ctrl_ops);
1835 debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, dir, adev,
1836 &amdgpu_ras_debugfs_eeprom_ops);
1837 debugfs_create_u32("bad_page_cnt_threshold", 0444, dir,
1838 &con->bad_page_cnt_threshold);
1839 debugfs_create_u32("ras_num_recs", 0444, dir, &eeprom->ras_num_recs);
1840 debugfs_create_x32("ras_hw_enabled", 0444, dir, &adev->ras_hw_enabled);
1841 debugfs_create_x32("ras_enabled", 0444, dir, &adev->ras_enabled);
1842 debugfs_create_file("ras_eeprom_size", S_IRUGO, dir, adev,
1843 &amdgpu_ras_debugfs_eeprom_size_ops);
1844 con->de_ras_eeprom_table = debugfs_create_file("ras_eeprom_table",
1845 S_IRUGO, dir, adev,
1846 &amdgpu_ras_debugfs_eeprom_table_ops);
1847 amdgpu_ras_debugfs_set_ret_size(&con->eeprom_control);
1848
1849 /*
1850 * After one uncorrectable error happens, usually GPU recovery will
1851 * be scheduled. But due to the known problem in GPU recovery failing
1852 * to bring GPU back, below interface provides one direct way to
1853 * user to reboot system automatically in such case within
1854 * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine
1855 * will never be called.
1856 */
1857 debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot);
1858
1859 /*
1860 * User could set this not to clean up hardware's error count register
1861 * of RAS IPs during ras recovery.
1862 */
1863 debugfs_create_bool("disable_ras_err_cnt_harvest", 0644, dir,
1864 &con->disable_ras_err_cnt_harvest);
1865 return dir;
1866}
1867
1868static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
1869 struct ras_fs_if *head,
1870 struct dentry *dir)
1871{
1872 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
1873
1874 if (!obj || !dir)
1875 return;
1876
1877 get_obj(obj);
1878
1879 memcpy(obj->fs_data.debugfs_name,
1880 head->debugfs_name,
1881 sizeof(obj->fs_data.debugfs_name));
1882
1883 debugfs_create_file(obj->fs_data.debugfs_name, S_IWUGO | S_IRUGO, dir,
1884 obj, &amdgpu_ras_debugfs_ops);
1885}
1886
1887void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
1888{
1889 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1890 struct dentry *dir;
1891 struct ras_manager *obj;
1892 struct ras_fs_if fs_info;
1893
1894 /*
1895 * it won't be called in resume path, no need to check
1896 * suspend and gpu reset status
1897 */
1898 if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con)
1899 return;
1900
1901 dir = amdgpu_ras_debugfs_create_ctrl_node(adev);
1902
1903 list_for_each_entry(obj, &con->head, node) {
1904 if (amdgpu_ras_is_supported(adev, obj->head.block) &&
1905 (obj->attr_inuse == 1)) {
1906 sprintf(fs_info.debugfs_name, "%s_err_inject",
1907 get_ras_block_str(&obj->head));
1908 fs_info.head = obj->head;
1909 amdgpu_ras_debugfs_create(adev, &fs_info, dir);
1910 }
1911 }
1912
1913 if (amdgpu_aca_is_enabled(adev))
1914 amdgpu_aca_smu_debugfs_init(adev, dir);
1915 else
1916 amdgpu_mca_smu_debugfs_init(adev, dir);
1917}
1918
1919/* debugfs end */
1920
1921/* ras fs */
1922static BIN_ATTR(gpu_vram_bad_pages, S_IRUGO,
1923 amdgpu_ras_sysfs_badpages_read, NULL, 0);
1924static DEVICE_ATTR(features, S_IRUGO,
1925 amdgpu_ras_sysfs_features_read, NULL);
1926static DEVICE_ATTR(version, 0444,
1927 amdgpu_ras_sysfs_version_show, NULL);
1928static DEVICE_ATTR(schema, 0444,
1929 amdgpu_ras_sysfs_schema_show, NULL);
1930static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
1931{
1932 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1933 struct attribute_group group = {
1934 .name = RAS_FS_NAME,
1935 };
1936 struct attribute *attrs[] = {
1937 &con->features_attr.attr,
1938 &con->version_attr.attr,
1939 &con->schema_attr.attr,
1940 NULL
1941 };
1942 struct bin_attribute *bin_attrs[] = {
1943 NULL,
1944 NULL,
1945 };
1946 int r;
1947
1948 group.attrs = attrs;
1949
1950 /* add features entry */
1951 con->features_attr = dev_attr_features;
1952 sysfs_attr_init(attrs[0]);
1953
1954 /* add version entry */
1955 con->version_attr = dev_attr_version;
1956 sysfs_attr_init(attrs[1]);
1957
1958 /* add schema entry */
1959 con->schema_attr = dev_attr_schema;
1960 sysfs_attr_init(attrs[2]);
1961
1962 if (amdgpu_bad_page_threshold != 0) {
1963 /* add bad_page_features entry */
1964 bin_attr_gpu_vram_bad_pages.private = NULL;
1965 con->badpages_attr = bin_attr_gpu_vram_bad_pages;
1966 bin_attrs[0] = &con->badpages_attr;
1967 group.bin_attrs = bin_attrs;
1968 sysfs_bin_attr_init(bin_attrs[0]);
1969 }
1970
1971 r = sysfs_create_group(&adev->dev->kobj, &group);
1972 if (r)
1973 dev_err(adev->dev, "Failed to create RAS sysfs group!");
1974
1975 return 0;
1976}
1977
1978static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
1979{
1980 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1981 struct ras_manager *con_obj, *ip_obj, *tmp;
1982
1983 if (IS_ENABLED(CONFIG_DEBUG_FS)) {
1984 list_for_each_entry_safe(con_obj, tmp, &con->head, node) {
1985 ip_obj = amdgpu_ras_find_obj(adev, &con_obj->head);
1986 if (ip_obj)
1987 put_obj(ip_obj);
1988 }
1989 }
1990
1991 amdgpu_ras_sysfs_remove_all(adev);
1992 return 0;
1993}
1994/* ras fs end */
1995
1996/* ih begin */
1997
1998/* For the hardware that cannot enable bif ring for both ras_controller_irq
1999 * and ras_err_evnet_athub_irq ih cookies, the driver has to poll status
2000 * register to check whether the interrupt is triggered or not, and properly
2001 * ack the interrupt if it is there
2002 */
2003void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev)
2004{
2005 /* Fatal error events are handled on host side */
2006 if (amdgpu_sriov_vf(adev))
2007 return;
2008
2009 if (adev->nbio.ras &&
2010 adev->nbio.ras->handle_ras_controller_intr_no_bifring)
2011 adev->nbio.ras->handle_ras_controller_intr_no_bifring(adev);
2012
2013 if (adev->nbio.ras &&
2014 adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring)
2015 adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring(adev);
2016}
2017
2018static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *obj,
2019 struct amdgpu_iv_entry *entry)
2020{
2021 bool poison_stat = false;
2022 struct amdgpu_device *adev = obj->adev;
2023 struct amdgpu_ras_block_object *block_obj =
2024 amdgpu_ras_get_ras_block(adev, obj->head.block, 0);
2025
2026 if (!block_obj)
2027 return;
2028
2029 /* both query_poison_status and handle_poison_consumption are optional,
2030 * but at least one of them should be implemented if we need poison
2031 * consumption handler
2032 */
2033 if (block_obj->hw_ops && block_obj->hw_ops->query_poison_status) {
2034 poison_stat = block_obj->hw_ops->query_poison_status(adev);
2035 if (!poison_stat) {
2036 /* Not poison consumption interrupt, no need to handle it */
2037 dev_info(adev->dev, "No RAS poison status in %s poison IH.\n",
2038 block_obj->ras_comm.name);
2039
2040 return;
2041 }
2042 }
2043
2044 amdgpu_umc_poison_handler(adev, obj->head.block, false);
2045
2046 if (block_obj->hw_ops && block_obj->hw_ops->handle_poison_consumption)
2047 poison_stat = block_obj->hw_ops->handle_poison_consumption(adev);
2048
2049 /* gpu reset is fallback for failed and default cases */
2050 if (poison_stat) {
2051 dev_info(adev->dev, "GPU reset for %s RAS poison consumption is issued!\n",
2052 block_obj->ras_comm.name);
2053 amdgpu_ras_reset_gpu(adev);
2054 } else {
2055 amdgpu_gfx_poison_consumption_handler(adev, entry);
2056 }
2057}
2058
2059static void amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager *obj,
2060 struct amdgpu_iv_entry *entry)
2061{
2062 dev_info(obj->adev->dev,
2063 "Poison is created\n");
2064}
2065
2066static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj,
2067 struct amdgpu_iv_entry *entry)
2068{
2069 struct ras_ih_data *data = &obj->ih_data;
2070 struct ras_err_data err_data;
2071 int ret;
2072
2073 if (!data->cb)
2074 return;
2075
2076 ret = amdgpu_ras_error_data_init(&err_data);
2077 if (ret)
2078 return;
2079
2080 /* Let IP handle its data, maybe we need get the output
2081 * from the callback to update the error type/count, etc
2082 */
2083 ret = data->cb(obj->adev, &err_data, entry);
2084 /* ue will trigger an interrupt, and in that case
2085 * we need do a reset to recovery the whole system.
2086 * But leave IP do that recovery, here we just dispatch
2087 * the error.
2088 */
2089 if (ret == AMDGPU_RAS_SUCCESS) {
2090 /* these counts could be left as 0 if
2091 * some blocks do not count error number
2092 */
2093 obj->err_data.ue_count += err_data.ue_count;
2094 obj->err_data.ce_count += err_data.ce_count;
2095 obj->err_data.de_count += err_data.de_count;
2096 }
2097
2098 amdgpu_ras_error_data_fini(&err_data);
2099}
2100
2101static void amdgpu_ras_interrupt_handler(struct ras_manager *obj)
2102{
2103 struct ras_ih_data *data = &obj->ih_data;
2104 struct amdgpu_iv_entry entry;
2105
2106 while (data->rptr != data->wptr) {
2107 rmb();
2108 memcpy(&entry, &data->ring[data->rptr],
2109 data->element_size);
2110
2111 wmb();
2112 data->rptr = (data->aligned_element_size +
2113 data->rptr) % data->ring_size;
2114
2115 if (amdgpu_ras_is_poison_mode_supported(obj->adev)) {
2116 if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
2117 amdgpu_ras_interrupt_poison_creation_handler(obj, &entry);
2118 else
2119 amdgpu_ras_interrupt_poison_consumption_handler(obj, &entry);
2120 } else {
2121 if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
2122 amdgpu_ras_interrupt_umc_handler(obj, &entry);
2123 else
2124 dev_warn(obj->adev->dev,
2125 "No RAS interrupt handler for non-UMC block with poison disabled.\n");
2126 }
2127 }
2128}
2129
2130static void amdgpu_ras_interrupt_process_handler(struct work_struct *work)
2131{
2132 struct ras_ih_data *data =
2133 container_of(work, struct ras_ih_data, ih_work);
2134 struct ras_manager *obj =
2135 container_of(data, struct ras_manager, ih_data);
2136
2137 amdgpu_ras_interrupt_handler(obj);
2138}
2139
2140int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
2141 struct ras_dispatch_if *info)
2142{
2143 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
2144 struct ras_ih_data *data = &obj->ih_data;
2145
2146 if (!obj)
2147 return -EINVAL;
2148
2149 if (data->inuse == 0)
2150 return 0;
2151
2152 /* Might be overflow... */
2153 memcpy(&data->ring[data->wptr], info->entry,
2154 data->element_size);
2155
2156 wmb();
2157 data->wptr = (data->aligned_element_size +
2158 data->wptr) % data->ring_size;
2159
2160 schedule_work(&data->ih_work);
2161
2162 return 0;
2163}
2164
2165int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
2166 struct ras_common_if *head)
2167{
2168 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
2169 struct ras_ih_data *data;
2170
2171 if (!obj)
2172 return -EINVAL;
2173
2174 data = &obj->ih_data;
2175 if (data->inuse == 0)
2176 return 0;
2177
2178 cancel_work_sync(&data->ih_work);
2179
2180 kfree(data->ring);
2181 memset(data, 0, sizeof(*data));
2182 put_obj(obj);
2183
2184 return 0;
2185}
2186
2187int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev,
2188 struct ras_common_if *head)
2189{
2190 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
2191 struct ras_ih_data *data;
2192 struct amdgpu_ras_block_object *ras_obj;
2193
2194 if (!obj) {
2195 /* in case we registe the IH before enable ras feature */
2196 obj = amdgpu_ras_create_obj(adev, head);
2197 if (!obj)
2198 return -EINVAL;
2199 } else
2200 get_obj(obj);
2201
2202 ras_obj = container_of(head, struct amdgpu_ras_block_object, ras_comm);
2203
2204 data = &obj->ih_data;
2205 /* add the callback.etc */
2206 *data = (struct ras_ih_data) {
2207 .inuse = 0,
2208 .cb = ras_obj->ras_cb,
2209 .element_size = sizeof(struct amdgpu_iv_entry),
2210 .rptr = 0,
2211 .wptr = 0,
2212 };
2213
2214 INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler);
2215
2216 data->aligned_element_size = ALIGN(data->element_size, 8);
2217 /* the ring can store 64 iv entries. */
2218 data->ring_size = 64 * data->aligned_element_size;
2219 data->ring = kmalloc(data->ring_size, GFP_KERNEL);
2220 if (!data->ring) {
2221 put_obj(obj);
2222 return -ENOMEM;
2223 }
2224
2225 /* IH is ready */
2226 data->inuse = 1;
2227
2228 return 0;
2229}
2230
2231static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev)
2232{
2233 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2234 struct ras_manager *obj, *tmp;
2235
2236 list_for_each_entry_safe(obj, tmp, &con->head, node) {
2237 amdgpu_ras_interrupt_remove_handler(adev, &obj->head);
2238 }
2239
2240 return 0;
2241}
2242/* ih end */
2243
2244/* traversal all IPs except NBIO to query error counter */
2245static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
2246{
2247 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2248 struct ras_manager *obj;
2249
2250 if (!adev->ras_enabled || !con)
2251 return;
2252
2253 list_for_each_entry(obj, &con->head, node) {
2254 struct ras_query_if info = {
2255 .head = obj->head,
2256 };
2257
2258 /*
2259 * PCIE_BIF IP has one different isr by ras controller
2260 * interrupt, the specific ras counter query will be
2261 * done in that isr. So skip such block from common
2262 * sync flood interrupt isr calling.
2263 */
2264 if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF)
2265 continue;
2266
2267 /*
2268 * this is a workaround for aldebaran, skip send msg to
2269 * smu to get ecc_info table due to smu handle get ecc
2270 * info table failed temporarily.
2271 * should be removed until smu fix handle ecc_info table.
2272 */
2273 if ((info.head.block == AMDGPU_RAS_BLOCK__UMC) &&
2274 (amdgpu_ip_version(adev, MP1_HWIP, 0) ==
2275 IP_VERSION(13, 0, 2)))
2276 continue;
2277
2278 amdgpu_ras_query_error_status(adev, &info);
2279
2280 if (amdgpu_ip_version(adev, MP0_HWIP, 0) !=
2281 IP_VERSION(11, 0, 2) &&
2282 amdgpu_ip_version(adev, MP0_HWIP, 0) !=
2283 IP_VERSION(11, 0, 4) &&
2284 amdgpu_ip_version(adev, MP0_HWIP, 0) !=
2285 IP_VERSION(13, 0, 0)) {
2286 if (amdgpu_ras_reset_error_status(adev, info.head.block))
2287 dev_warn(adev->dev, "Failed to reset error counter and error status");
2288 }
2289 }
2290}
2291
2292/* Parse RdRspStatus and WrRspStatus */
2293static void amdgpu_ras_error_status_query(struct amdgpu_device *adev,
2294 struct ras_query_if *info)
2295{
2296 struct amdgpu_ras_block_object *block_obj;
2297 /*
2298 * Only two block need to query read/write
2299 * RspStatus at current state
2300 */
2301 if ((info->head.block != AMDGPU_RAS_BLOCK__GFX) &&
2302 (info->head.block != AMDGPU_RAS_BLOCK__MMHUB))
2303 return;
2304
2305 block_obj = amdgpu_ras_get_ras_block(adev,
2306 info->head.block,
2307 info->head.sub_block_index);
2308
2309 if (!block_obj || !block_obj->hw_ops) {
2310 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
2311 get_ras_block_str(&info->head));
2312 return;
2313 }
2314
2315 if (block_obj->hw_ops->query_ras_error_status)
2316 block_obj->hw_ops->query_ras_error_status(adev);
2317
2318}
2319
2320static void amdgpu_ras_query_err_status(struct amdgpu_device *adev)
2321{
2322 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2323 struct ras_manager *obj;
2324
2325 if (!adev->ras_enabled || !con)
2326 return;
2327
2328 list_for_each_entry(obj, &con->head, node) {
2329 struct ras_query_if info = {
2330 .head = obj->head,
2331 };
2332
2333 amdgpu_ras_error_status_query(adev, &info);
2334 }
2335}
2336
2337/* recovery begin */
2338
2339/* return 0 on success.
2340 * caller need free bps.
2341 */
2342static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
2343 struct ras_badpage **bps, unsigned int *count)
2344{
2345 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2346 struct ras_err_handler_data *data;
2347 int i = 0;
2348 int ret = 0, status;
2349
2350 if (!con || !con->eh_data || !bps || !count)
2351 return -EINVAL;
2352
2353 mutex_lock(&con->recovery_lock);
2354 data = con->eh_data;
2355 if (!data || data->count == 0) {
2356 *bps = NULL;
2357 ret = -EINVAL;
2358 goto out;
2359 }
2360
2361 *bps = kmalloc(sizeof(struct ras_badpage) * data->count, GFP_KERNEL);
2362 if (!*bps) {
2363 ret = -ENOMEM;
2364 goto out;
2365 }
2366
2367 for (; i < data->count; i++) {
2368 (*bps)[i] = (struct ras_badpage){
2369 .bp = data->bps[i].retired_page,
2370 .size = AMDGPU_GPU_PAGE_SIZE,
2371 .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
2372 };
2373 status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr,
2374 data->bps[i].retired_page);
2375 if (status == -EBUSY)
2376 (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
2377 else if (status == -ENOENT)
2378 (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
2379 }
2380
2381 *count = data->count;
2382out:
2383 mutex_unlock(&con->recovery_lock);
2384 return ret;
2385}
2386
2387static void amdgpu_ras_do_recovery(struct work_struct *work)
2388{
2389 struct amdgpu_ras *ras =
2390 container_of(work, struct amdgpu_ras, recovery_work);
2391 struct amdgpu_device *remote_adev = NULL;
2392 struct amdgpu_device *adev = ras->adev;
2393 struct list_head device_list, *device_list_handle = NULL;
2394 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2395
2396 if (hive)
2397 atomic_set(&hive->ras_recovery, 1);
2398 if (!ras->disable_ras_err_cnt_harvest) {
2399
2400 /* Build list of devices to query RAS related errors */
2401 if (hive && adev->gmc.xgmi.num_physical_nodes > 1) {
2402 device_list_handle = &hive->device_list;
2403 } else {
2404 INIT_LIST_HEAD(&device_list);
2405 list_add_tail(&adev->gmc.xgmi.head, &device_list);
2406 device_list_handle = &device_list;
2407 }
2408
2409 list_for_each_entry(remote_adev,
2410 device_list_handle, gmc.xgmi.head) {
2411 amdgpu_ras_query_err_status(remote_adev);
2412 amdgpu_ras_log_on_err_counter(remote_adev);
2413 }
2414
2415 }
2416
2417 if (amdgpu_device_should_recover_gpu(ras->adev)) {
2418 struct amdgpu_reset_context reset_context;
2419 memset(&reset_context, 0, sizeof(reset_context));
2420
2421 reset_context.method = AMD_RESET_METHOD_NONE;
2422 reset_context.reset_req_dev = adev;
2423
2424 /* Perform full reset in fatal error mode */
2425 if (!amdgpu_ras_is_poison_mode_supported(ras->adev))
2426 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2427 else {
2428 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2429
2430 if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET) {
2431 ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE2_RESET;
2432 reset_context.method = AMD_RESET_METHOD_MODE2;
2433 }
2434
2435 /* Fatal error occurs in poison mode, mode1 reset is used to
2436 * recover gpu.
2437 */
2438 if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET) {
2439 ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE1_RESET;
2440 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2441
2442 /* For any RAS error that needs a full reset to
2443 * recover, set the fatal error status
2444 */
2445 if (hive) {
2446 list_for_each_entry(remote_adev,
2447 &hive->device_list,
2448 gmc.xgmi.head)
2449 amdgpu_ras_set_fed(remote_adev,
2450 true);
2451 } else {
2452 amdgpu_ras_set_fed(adev, true);
2453 }
2454 psp_fatal_error_recovery_quirk(&adev->psp);
2455 }
2456 }
2457
2458 amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context);
2459 }
2460 atomic_set(&ras->in_recovery, 0);
2461 if (hive) {
2462 atomic_set(&hive->ras_recovery, 0);
2463 amdgpu_put_xgmi_hive(hive);
2464 }
2465}
2466
2467/* alloc/realloc bps array */
2468static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
2469 struct ras_err_handler_data *data, int pages)
2470{
2471 unsigned int old_space = data->count + data->space_left;
2472 unsigned int new_space = old_space + pages;
2473 unsigned int align_space = ALIGN(new_space, 512);
2474 void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
2475
2476 if (!bps) {
2477 return -ENOMEM;
2478 }
2479
2480 if (data->bps) {
2481 memcpy(bps, data->bps,
2482 data->count * sizeof(*data->bps));
2483 kfree(data->bps);
2484 }
2485
2486 data->bps = bps;
2487 data->space_left += align_space - old_space;
2488 return 0;
2489}
2490
2491/* it deal with vram only. */
2492int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
2493 struct eeprom_table_record *bps, int pages)
2494{
2495 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2496 struct ras_err_handler_data *data;
2497 int ret = 0;
2498 uint32_t i;
2499
2500 if (!con || !con->eh_data || !bps || pages <= 0)
2501 return 0;
2502
2503 mutex_lock(&con->recovery_lock);
2504 data = con->eh_data;
2505 if (!data)
2506 goto out;
2507
2508 for (i = 0; i < pages; i++) {
2509 if (amdgpu_ras_check_bad_page_unlock(con,
2510 bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT))
2511 continue;
2512
2513 if (!data->space_left &&
2514 amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
2515 ret = -ENOMEM;
2516 goto out;
2517 }
2518
2519 amdgpu_vram_mgr_reserve_range(&adev->mman.vram_mgr,
2520 bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT,
2521 AMDGPU_GPU_PAGE_SIZE);
2522
2523 memcpy(&data->bps[data->count], &bps[i], sizeof(*data->bps));
2524 data->count++;
2525 data->space_left--;
2526 }
2527out:
2528 mutex_unlock(&con->recovery_lock);
2529
2530 return ret;
2531}
2532
2533/*
2534 * write error record array to eeprom, the function should be
2535 * protected by recovery_lock
2536 * new_cnt: new added UE count, excluding reserved bad pages, can be NULL
2537 */
2538int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
2539 unsigned long *new_cnt)
2540{
2541 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2542 struct ras_err_handler_data *data;
2543 struct amdgpu_ras_eeprom_control *control;
2544 int save_count;
2545
2546 if (!con || !con->eh_data) {
2547 if (new_cnt)
2548 *new_cnt = 0;
2549
2550 return 0;
2551 }
2552
2553 mutex_lock(&con->recovery_lock);
2554 control = &con->eeprom_control;
2555 data = con->eh_data;
2556 save_count = data->count - control->ras_num_recs;
2557 mutex_unlock(&con->recovery_lock);
2558
2559 if (new_cnt)
2560 *new_cnt = save_count / adev->umc.retire_unit;
2561
2562 /* only new entries are saved */
2563 if (save_count > 0) {
2564 if (amdgpu_ras_eeprom_append(control,
2565 &data->bps[control->ras_num_recs],
2566 save_count)) {
2567 dev_err(adev->dev, "Failed to save EEPROM table data!");
2568 return -EIO;
2569 }
2570
2571 dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count);
2572 }
2573
2574 return 0;
2575}
2576
2577/*
2578 * read error record array in eeprom and reserve enough space for
2579 * storing new bad pages
2580 */
2581static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
2582{
2583 struct amdgpu_ras_eeprom_control *control =
2584 &adev->psp.ras_context.ras->eeprom_control;
2585 struct eeprom_table_record *bps;
2586 int ret;
2587
2588 /* no bad page record, skip eeprom access */
2589 if (control->ras_num_recs == 0 || amdgpu_bad_page_threshold == 0)
2590 return 0;
2591
2592 bps = kcalloc(control->ras_num_recs, sizeof(*bps), GFP_KERNEL);
2593 if (!bps)
2594 return -ENOMEM;
2595
2596 ret = amdgpu_ras_eeprom_read(control, bps, control->ras_num_recs);
2597 if (ret)
2598 dev_err(adev->dev, "Failed to load EEPROM table records!");
2599 else
2600 ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs);
2601
2602 kfree(bps);
2603 return ret;
2604}
2605
2606static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
2607 uint64_t addr)
2608{
2609 struct ras_err_handler_data *data = con->eh_data;
2610 int i;
2611
2612 addr >>= AMDGPU_GPU_PAGE_SHIFT;
2613 for (i = 0; i < data->count; i++)
2614 if (addr == data->bps[i].retired_page)
2615 return true;
2616
2617 return false;
2618}
2619
2620/*
2621 * check if an address belongs to bad page
2622 *
2623 * Note: this check is only for umc block
2624 */
2625static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
2626 uint64_t addr)
2627{
2628 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2629 bool ret = false;
2630
2631 if (!con || !con->eh_data)
2632 return ret;
2633
2634 mutex_lock(&con->recovery_lock);
2635 ret = amdgpu_ras_check_bad_page_unlock(con, addr);
2636 mutex_unlock(&con->recovery_lock);
2637 return ret;
2638}
2639
2640static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
2641 uint32_t max_count)
2642{
2643 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2644
2645 /*
2646 * Justification of value bad_page_cnt_threshold in ras structure
2647 *
2648 * Generally, 0 <= amdgpu_bad_page_threshold <= max record length
2649 * in eeprom or amdgpu_bad_page_threshold == -2, introduce two
2650 * scenarios accordingly.
2651 *
2652 * Bad page retirement enablement:
2653 * - If amdgpu_bad_page_threshold = -2,
2654 * bad_page_cnt_threshold = typical value by formula.
2655 *
2656 * - When the value from user is 0 < amdgpu_bad_page_threshold <
2657 * max record length in eeprom, use it directly.
2658 *
2659 * Bad page retirement disablement:
2660 * - If amdgpu_bad_page_threshold = 0, bad page retirement
2661 * functionality is disabled, and bad_page_cnt_threshold will
2662 * take no effect.
2663 */
2664
2665 if (amdgpu_bad_page_threshold < 0) {
2666 u64 val = adev->gmc.mc_vram_size;
2667
2668 do_div(val, RAS_BAD_PAGE_COVER);
2669 con->bad_page_cnt_threshold = min(lower_32_bits(val),
2670 max_count);
2671 } else {
2672 con->bad_page_cnt_threshold = min_t(int, max_count,
2673 amdgpu_bad_page_threshold);
2674 }
2675}
2676
2677static int amdgpu_ras_page_retirement_thread(void *param)
2678{
2679 struct amdgpu_device *adev = (struct amdgpu_device *)param;
2680 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2681
2682 while (!kthread_should_stop()) {
2683
2684 wait_event_interruptible(con->page_retirement_wq,
2685 kthread_should_stop() ||
2686 atomic_read(&con->page_retirement_req_cnt));
2687
2688 if (kthread_should_stop())
2689 break;
2690
2691 dev_info(adev->dev, "Start processing page retirement. request:%d\n",
2692 atomic_read(&con->page_retirement_req_cnt));
2693
2694 atomic_dec(&con->page_retirement_req_cnt);
2695
2696 amdgpu_umc_bad_page_polling_timeout(adev,
2697 false, MAX_UMC_POISON_POLLING_TIME_ASYNC);
2698 }
2699
2700 return 0;
2701}
2702
2703int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
2704{
2705 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2706 struct ras_err_handler_data **data;
2707 u32 max_eeprom_records_count = 0;
2708 bool exc_err_limit = false;
2709 int ret;
2710
2711 if (!con || amdgpu_sriov_vf(adev))
2712 return 0;
2713
2714 /* Allow access to RAS EEPROM via debugfs, when the ASIC
2715 * supports RAS and debugfs is enabled, but when
2716 * adev->ras_enabled is unset, i.e. when "ras_enable"
2717 * module parameter is set to 0.
2718 */
2719 con->adev = adev;
2720
2721 if (!adev->ras_enabled)
2722 return 0;
2723
2724 data = &con->eh_data;
2725 *data = kzalloc(sizeof(**data), GFP_KERNEL);
2726 if (!*data) {
2727 ret = -ENOMEM;
2728 goto out;
2729 }
2730
2731 mutex_init(&con->recovery_lock);
2732 INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
2733 atomic_set(&con->in_recovery, 0);
2734 con->eeprom_control.bad_channel_bitmap = 0;
2735
2736 max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(&con->eeprom_control);
2737 amdgpu_ras_validate_threshold(adev, max_eeprom_records_count);
2738
2739 /* Todo: During test the SMU might fail to read the eeprom through I2C
2740 * when the GPU is pending on XGMI reset during probe time
2741 * (Mostly after second bus reset), skip it now
2742 */
2743 if (adev->gmc.xgmi.pending_reset)
2744 return 0;
2745 ret = amdgpu_ras_eeprom_init(&con->eeprom_control, &exc_err_limit);
2746 /*
2747 * This calling fails when exc_err_limit is true or
2748 * ret != 0.
2749 */
2750 if (exc_err_limit || ret)
2751 goto free;
2752
2753 if (con->eeprom_control.ras_num_recs) {
2754 ret = amdgpu_ras_load_bad_pages(adev);
2755 if (ret)
2756 goto free;
2757
2758 amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs);
2759
2760 if (con->update_channel_flag == true) {
2761 amdgpu_dpm_send_hbm_bad_channel_flag(adev, con->eeprom_control.bad_channel_bitmap);
2762 con->update_channel_flag = false;
2763 }
2764 }
2765
2766 mutex_init(&con->page_retirement_lock);
2767 init_waitqueue_head(&con->page_retirement_wq);
2768 atomic_set(&con->page_retirement_req_cnt, 0);
2769 con->page_retirement_thread =
2770 kthread_run(amdgpu_ras_page_retirement_thread, adev, "umc_page_retirement");
2771 if (IS_ERR(con->page_retirement_thread)) {
2772 con->page_retirement_thread = NULL;
2773 dev_warn(adev->dev, "Failed to create umc_page_retirement thread!!!\n");
2774 }
2775
2776#ifdef CONFIG_X86_MCE_AMD
2777 if ((adev->asic_type == CHIP_ALDEBARAN) &&
2778 (adev->gmc.xgmi.connected_to_cpu))
2779 amdgpu_register_bad_pages_mca_notifier(adev);
2780#endif
2781 return 0;
2782
2783free:
2784 kfree((*data)->bps);
2785 kfree(*data);
2786 con->eh_data = NULL;
2787out:
2788 dev_warn(adev->dev, "Failed to initialize ras recovery! (%d)\n", ret);
2789
2790 /*
2791 * Except error threshold exceeding case, other failure cases in this
2792 * function would not fail amdgpu driver init.
2793 */
2794 if (!exc_err_limit)
2795 ret = 0;
2796 else
2797 ret = -EINVAL;
2798
2799 return ret;
2800}
2801
2802static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
2803{
2804 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2805 struct ras_err_handler_data *data = con->eh_data;
2806
2807 /* recovery_init failed to init it, fini is useless */
2808 if (!data)
2809 return 0;
2810
2811 if (con->page_retirement_thread)
2812 kthread_stop(con->page_retirement_thread);
2813
2814 atomic_set(&con->page_retirement_req_cnt, 0);
2815
2816 cancel_work_sync(&con->recovery_work);
2817
2818 mutex_lock(&con->recovery_lock);
2819 con->eh_data = NULL;
2820 kfree(data->bps);
2821 kfree(data);
2822 mutex_unlock(&con->recovery_lock);
2823
2824 return 0;
2825}
2826/* recovery end */
2827
2828static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
2829{
2830 if (amdgpu_sriov_vf(adev)) {
2831 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
2832 case IP_VERSION(13, 0, 2):
2833 case IP_VERSION(13, 0, 6):
2834 return true;
2835 default:
2836 return false;
2837 }
2838 }
2839
2840 if (adev->asic_type == CHIP_IP_DISCOVERY) {
2841 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
2842 case IP_VERSION(13, 0, 0):
2843 case IP_VERSION(13, 0, 6):
2844 case IP_VERSION(13, 0, 10):
2845 return true;
2846 default:
2847 return false;
2848 }
2849 }
2850
2851 return adev->asic_type == CHIP_VEGA10 ||
2852 adev->asic_type == CHIP_VEGA20 ||
2853 adev->asic_type == CHIP_ARCTURUS ||
2854 adev->asic_type == CHIP_ALDEBARAN ||
2855 adev->asic_type == CHIP_SIENNA_CICHLID;
2856}
2857
2858/*
2859 * this is workaround for vega20 workstation sku,
2860 * force enable gfx ras, ignore vbios gfx ras flag
2861 * due to GC EDC can not write
2862 */
2863static void amdgpu_ras_get_quirks(struct amdgpu_device *adev)
2864{
2865 struct atom_context *ctx = adev->mode_info.atom_context;
2866
2867 if (!ctx)
2868 return;
2869
2870 if (strnstr(ctx->vbios_pn, "D16406",
2871 sizeof(ctx->vbios_pn)) ||
2872 strnstr(ctx->vbios_pn, "D36002",
2873 sizeof(ctx->vbios_pn)))
2874 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX);
2875}
2876
2877/* Query ras capablity via atomfirmware interface */
2878static void amdgpu_ras_query_ras_capablity_from_vbios(struct amdgpu_device *adev)
2879{
2880 /* mem_ecc cap */
2881 if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
2882 dev_info(adev->dev, "MEM ECC is active.\n");
2883 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC |
2884 1 << AMDGPU_RAS_BLOCK__DF);
2885 } else {
2886 dev_info(adev->dev, "MEM ECC is not presented.\n");
2887 }
2888
2889 /* sram_ecc cap */
2890 if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
2891 dev_info(adev->dev, "SRAM ECC is active.\n");
2892 if (!amdgpu_sriov_vf(adev))
2893 adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
2894 1 << AMDGPU_RAS_BLOCK__DF);
2895 else
2896 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__PCIE_BIF |
2897 1 << AMDGPU_RAS_BLOCK__SDMA |
2898 1 << AMDGPU_RAS_BLOCK__GFX);
2899
2900 /*
2901 * VCN/JPEG RAS can be supported on both bare metal and
2902 * SRIOV environment
2903 */
2904 if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(2, 6, 0) ||
2905 amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 0) ||
2906 amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 3))
2907 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
2908 1 << AMDGPU_RAS_BLOCK__JPEG);
2909 else
2910 adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN |
2911 1 << AMDGPU_RAS_BLOCK__JPEG);
2912
2913 /*
2914 * XGMI RAS is not supported if xgmi num physical nodes
2915 * is zero
2916 */
2917 if (!adev->gmc.xgmi.num_physical_nodes)
2918 adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__XGMI_WAFL);
2919 } else {
2920 dev_info(adev->dev, "SRAM ECC is not presented.\n");
2921 }
2922}
2923
2924/* Query poison mode from umc/df IP callbacks */
2925static void amdgpu_ras_query_poison_mode(struct amdgpu_device *adev)
2926{
2927 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2928 bool df_poison, umc_poison;
2929
2930 /* poison setting is useless on SRIOV guest */
2931 if (amdgpu_sriov_vf(adev) || !con)
2932 return;
2933
2934 /* Init poison supported flag, the default value is false */
2935 if (adev->gmc.xgmi.connected_to_cpu ||
2936 adev->gmc.is_app_apu) {
2937 /* enabled by default when GPU is connected to CPU */
2938 con->poison_supported = true;
2939 } else if (adev->df.funcs &&
2940 adev->df.funcs->query_ras_poison_mode &&
2941 adev->umc.ras &&
2942 adev->umc.ras->query_ras_poison_mode) {
2943 df_poison =
2944 adev->df.funcs->query_ras_poison_mode(adev);
2945 umc_poison =
2946 adev->umc.ras->query_ras_poison_mode(adev);
2947
2948 /* Only poison is set in both DF and UMC, we can support it */
2949 if (df_poison && umc_poison)
2950 con->poison_supported = true;
2951 else if (df_poison != umc_poison)
2952 dev_warn(adev->dev,
2953 "Poison setting is inconsistent in DF/UMC(%d:%d)!\n",
2954 df_poison, umc_poison);
2955 }
2956}
2957
2958/*
2959 * check hardware's ras ability which will be saved in hw_supported.
2960 * if hardware does not support ras, we can skip some ras initializtion and
2961 * forbid some ras operations from IP.
2962 * if software itself, say boot parameter, limit the ras ability. We still
2963 * need allow IP do some limited operations, like disable. In such case,
2964 * we have to initialize ras as normal. but need check if operation is
2965 * allowed or not in each function.
2966 */
2967static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
2968{
2969 adev->ras_hw_enabled = adev->ras_enabled = 0;
2970
2971 if (!amdgpu_ras_asic_supported(adev))
2972 return;
2973
2974 /* query ras capability from psp */
2975 if (amdgpu_psp_get_ras_capability(&adev->psp))
2976 goto init_ras_enabled_flag;
2977
2978 /* query ras capablity from bios */
2979 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
2980 amdgpu_ras_query_ras_capablity_from_vbios(adev);
2981 } else {
2982 /* driver only manages a few IP blocks RAS feature
2983 * when GPU is connected cpu through XGMI */
2984 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX |
2985 1 << AMDGPU_RAS_BLOCK__SDMA |
2986 1 << AMDGPU_RAS_BLOCK__MMHUB);
2987 }
2988
2989 /* apply asic specific settings (vega20 only for now) */
2990 amdgpu_ras_get_quirks(adev);
2991
2992 /* query poison mode from umc/df ip callback */
2993 amdgpu_ras_query_poison_mode(adev);
2994
2995init_ras_enabled_flag:
2996 /* hw_supported needs to be aligned with RAS block mask. */
2997 adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK;
2998
2999 adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 :
3000 adev->ras_hw_enabled & amdgpu_ras_mask;
3001
3002 /* aca is disabled by default */
3003 adev->aca.is_enabled = false;
3004}
3005
3006static void amdgpu_ras_counte_dw(struct work_struct *work)
3007{
3008 struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
3009 ras_counte_delay_work.work);
3010 struct amdgpu_device *adev = con->adev;
3011 struct drm_device *dev = adev_to_drm(adev);
3012 unsigned long ce_count, ue_count;
3013 int res;
3014
3015 res = pm_runtime_get_sync(dev->dev);
3016 if (res < 0)
3017 goto Out;
3018
3019 /* Cache new values.
3020 */
3021 if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, NULL) == 0) {
3022 atomic_set(&con->ras_ce_count, ce_count);
3023 atomic_set(&con->ras_ue_count, ue_count);
3024 }
3025
3026 pm_runtime_mark_last_busy(dev->dev);
3027Out:
3028 pm_runtime_put_autosuspend(dev->dev);
3029}
3030
3031static int amdgpu_get_ras_schema(struct amdgpu_device *adev)
3032{
3033 return amdgpu_ras_is_poison_mode_supported(adev) ? AMDGPU_RAS_ERROR__POISON : 0 |
3034 AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE |
3035 AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE |
3036 AMDGPU_RAS_ERROR__PARITY;
3037}
3038
3039int amdgpu_ras_init(struct amdgpu_device *adev)
3040{
3041 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3042 int r;
3043
3044 if (con)
3045 return 0;
3046
3047 con = kzalloc(sizeof(*con) +
3048 sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT +
3049 sizeof(struct ras_manager) * AMDGPU_RAS_MCA_BLOCK_COUNT,
3050 GFP_KERNEL);
3051 if (!con)
3052 return -ENOMEM;
3053
3054 con->adev = adev;
3055 INIT_DELAYED_WORK(&con->ras_counte_delay_work, amdgpu_ras_counte_dw);
3056 atomic_set(&con->ras_ce_count, 0);
3057 atomic_set(&con->ras_ue_count, 0);
3058
3059 con->objs = (struct ras_manager *)(con + 1);
3060
3061 amdgpu_ras_set_context(adev, con);
3062
3063 amdgpu_ras_check_supported(adev);
3064
3065 if (!adev->ras_enabled || adev->asic_type == CHIP_VEGA10) {
3066 /* set gfx block ras context feature for VEGA20 Gaming
3067 * send ras disable cmd to ras ta during ras late init.
3068 */
3069 if (!adev->ras_enabled && adev->asic_type == CHIP_VEGA20) {
3070 con->features |= BIT(AMDGPU_RAS_BLOCK__GFX);
3071
3072 return 0;
3073 }
3074
3075 r = 0;
3076 goto release_con;
3077 }
3078
3079 con->update_channel_flag = false;
3080 con->features = 0;
3081 con->schema = 0;
3082 INIT_LIST_HEAD(&con->head);
3083 /* Might need get this flag from vbios. */
3084 con->flags = RAS_DEFAULT_FLAGS;
3085
3086 /* initialize nbio ras function ahead of any other
3087 * ras functions so hardware fatal error interrupt
3088 * can be enabled as early as possible */
3089 switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
3090 case IP_VERSION(7, 4, 0):
3091 case IP_VERSION(7, 4, 1):
3092 case IP_VERSION(7, 4, 4):
3093 if (!adev->gmc.xgmi.connected_to_cpu)
3094 adev->nbio.ras = &nbio_v7_4_ras;
3095 break;
3096 case IP_VERSION(4, 3, 0):
3097 if (adev->ras_hw_enabled & (1 << AMDGPU_RAS_BLOCK__DF))
3098 /* unlike other generation of nbio ras,
3099 * nbio v4_3 only support fatal error interrupt
3100 * to inform software that DF is freezed due to
3101 * system fatal error event. driver should not
3102 * enable nbio ras in such case. Instead,
3103 * check DF RAS */
3104 adev->nbio.ras = &nbio_v4_3_ras;
3105 break;
3106 case IP_VERSION(7, 9, 0):
3107 if (!adev->gmc.is_app_apu)
3108 adev->nbio.ras = &nbio_v7_9_ras;
3109 break;
3110 default:
3111 /* nbio ras is not available */
3112 break;
3113 }
3114
3115 /* nbio ras block needs to be enabled ahead of other ras blocks
3116 * to handle fatal error */
3117 r = amdgpu_nbio_ras_sw_init(adev);
3118 if (r)
3119 return r;
3120
3121 if (adev->nbio.ras &&
3122 adev->nbio.ras->init_ras_controller_interrupt) {
3123 r = adev->nbio.ras->init_ras_controller_interrupt(adev);
3124 if (r)
3125 goto release_con;
3126 }
3127
3128 if (adev->nbio.ras &&
3129 adev->nbio.ras->init_ras_err_event_athub_interrupt) {
3130 r = adev->nbio.ras->init_ras_err_event_athub_interrupt(adev);
3131 if (r)
3132 goto release_con;
3133 }
3134
3135 /* Packed socket_id to ras feature mask bits[31:29] */
3136 if (adev->smuio.funcs &&
3137 adev->smuio.funcs->get_socket_id)
3138 con->features |= ((adev->smuio.funcs->get_socket_id(adev)) <<
3139 AMDGPU_RAS_FEATURES_SOCKETID_SHIFT);
3140
3141 /* Get RAS schema for particular SOC */
3142 con->schema = amdgpu_get_ras_schema(adev);
3143
3144 if (amdgpu_ras_fs_init(adev)) {
3145 r = -EINVAL;
3146 goto release_con;
3147 }
3148
3149 dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
3150 "hardware ability[%x] ras_mask[%x]\n",
3151 adev->ras_hw_enabled, adev->ras_enabled);
3152
3153 return 0;
3154release_con:
3155 amdgpu_ras_set_context(adev, NULL);
3156 kfree(con);
3157
3158 return r;
3159}
3160
3161int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev)
3162{
3163 if (adev->gmc.xgmi.connected_to_cpu ||
3164 adev->gmc.is_app_apu)
3165 return 1;
3166 return 0;
3167}
3168
3169static int amdgpu_persistent_edc_harvesting(struct amdgpu_device *adev,
3170 struct ras_common_if *ras_block)
3171{
3172 struct ras_query_if info = {
3173 .head = *ras_block,
3174 };
3175
3176 if (!amdgpu_persistent_edc_harvesting_supported(adev))
3177 return 0;
3178
3179 if (amdgpu_ras_query_error_status(adev, &info) != 0)
3180 DRM_WARN("RAS init harvest failure");
3181
3182 if (amdgpu_ras_reset_error_status(adev, ras_block->block) != 0)
3183 DRM_WARN("RAS init harvest reset failure");
3184
3185 return 0;
3186}
3187
3188bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev)
3189{
3190 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3191
3192 if (!con)
3193 return false;
3194
3195 return con->poison_supported;
3196}
3197
3198/* helper function to handle common stuff in ip late init phase */
3199int amdgpu_ras_block_late_init(struct amdgpu_device *adev,
3200 struct ras_common_if *ras_block)
3201{
3202 struct amdgpu_ras_block_object *ras_obj = NULL;
3203 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3204 struct ras_query_if *query_info;
3205 unsigned long ue_count, ce_count;
3206 int r;
3207
3208 /* disable RAS feature per IP block if it is not supported */
3209 if (!amdgpu_ras_is_supported(adev, ras_block->block)) {
3210 amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
3211 return 0;
3212 }
3213
3214 r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1);
3215 if (r) {
3216 if (adev->in_suspend || amdgpu_in_reset(adev)) {
3217 /* in resume phase, if fail to enable ras,
3218 * clean up all ras fs nodes, and disable ras */
3219 goto cleanup;
3220 } else
3221 return r;
3222 }
3223
3224 /* check for errors on warm reset edc persisant supported ASIC */
3225 amdgpu_persistent_edc_harvesting(adev, ras_block);
3226
3227 /* in resume phase, no need to create ras fs node */
3228 if (adev->in_suspend || amdgpu_in_reset(adev))
3229 return 0;
3230
3231 ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
3232 if (ras_obj->ras_cb || (ras_obj->hw_ops &&
3233 (ras_obj->hw_ops->query_poison_status ||
3234 ras_obj->hw_ops->handle_poison_consumption))) {
3235 r = amdgpu_ras_interrupt_add_handler(adev, ras_block);
3236 if (r)
3237 goto cleanup;
3238 }
3239
3240 if (ras_obj->hw_ops &&
3241 (ras_obj->hw_ops->query_ras_error_count ||
3242 ras_obj->hw_ops->query_ras_error_status)) {
3243 r = amdgpu_ras_sysfs_create(adev, ras_block);
3244 if (r)
3245 goto interrupt;
3246
3247 /* Those are the cached values at init.
3248 */
3249 query_info = kzalloc(sizeof(*query_info), GFP_KERNEL);
3250 if (!query_info)
3251 return -ENOMEM;
3252 memcpy(&query_info->head, ras_block, sizeof(struct ras_common_if));
3253
3254 if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, query_info) == 0) {
3255 atomic_set(&con->ras_ce_count, ce_count);
3256 atomic_set(&con->ras_ue_count, ue_count);
3257 }
3258
3259 kfree(query_info);
3260 }
3261
3262 return 0;
3263
3264interrupt:
3265 if (ras_obj->ras_cb)
3266 amdgpu_ras_interrupt_remove_handler(adev, ras_block);
3267cleanup:
3268 amdgpu_ras_feature_enable(adev, ras_block, 0);
3269 return r;
3270}
3271
3272static int amdgpu_ras_block_late_init_default(struct amdgpu_device *adev,
3273 struct ras_common_if *ras_block)
3274{
3275 return amdgpu_ras_block_late_init(adev, ras_block);
3276}
3277
3278/* helper function to remove ras fs node and interrupt handler */
3279void amdgpu_ras_block_late_fini(struct amdgpu_device *adev,
3280 struct ras_common_if *ras_block)
3281{
3282 struct amdgpu_ras_block_object *ras_obj;
3283 if (!ras_block)
3284 return;
3285
3286 amdgpu_ras_sysfs_remove(adev, ras_block);
3287
3288 ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
3289 if (ras_obj->ras_cb)
3290 amdgpu_ras_interrupt_remove_handler(adev, ras_block);
3291}
3292
3293static void amdgpu_ras_block_late_fini_default(struct amdgpu_device *adev,
3294 struct ras_common_if *ras_block)
3295{
3296 return amdgpu_ras_block_late_fini(adev, ras_block);
3297}
3298
3299/* do some init work after IP late init as dependence.
3300 * and it runs in resume/gpu reset/booting up cases.
3301 */
3302void amdgpu_ras_resume(struct amdgpu_device *adev)
3303{
3304 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3305 struct ras_manager *obj, *tmp;
3306
3307 if (!adev->ras_enabled || !con) {
3308 /* clean ras context for VEGA20 Gaming after send ras disable cmd */
3309 amdgpu_release_ras_context(adev);
3310
3311 return;
3312 }
3313
3314 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
3315 /* Set up all other IPs which are not implemented. There is a
3316 * tricky thing that IP's actual ras error type should be
3317 * MULTI_UNCORRECTABLE, but as driver does not handle it, so
3318 * ERROR_NONE make sense anyway.
3319 */
3320 amdgpu_ras_enable_all_features(adev, 1);
3321
3322 /* We enable ras on all hw_supported block, but as boot
3323 * parameter might disable some of them and one or more IP has
3324 * not implemented yet. So we disable them on behalf.
3325 */
3326 list_for_each_entry_safe(obj, tmp, &con->head, node) {
3327 if (!amdgpu_ras_is_supported(adev, obj->head.block)) {
3328 amdgpu_ras_feature_enable(adev, &obj->head, 0);
3329 /* there should be no any reference. */
3330 WARN_ON(alive_obj(obj));
3331 }
3332 }
3333 }
3334}
3335
3336void amdgpu_ras_suspend(struct amdgpu_device *adev)
3337{
3338 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3339
3340 if (!adev->ras_enabled || !con)
3341 return;
3342
3343 amdgpu_ras_disable_all_features(adev, 0);
3344 /* Make sure all ras objects are disabled. */
3345 if (AMDGPU_RAS_GET_FEATURES(con->features))
3346 amdgpu_ras_disable_all_features(adev, 1);
3347}
3348
3349int amdgpu_ras_late_init(struct amdgpu_device *adev)
3350{
3351 struct amdgpu_ras_block_list *node, *tmp;
3352 struct amdgpu_ras_block_object *obj;
3353 int r;
3354
3355 /* Guest side doesn't need init ras feature */
3356 if (amdgpu_sriov_vf(adev))
3357 return 0;
3358
3359 if (amdgpu_aca_is_enabled(adev)) {
3360 if (amdgpu_in_reset(adev))
3361 r = amdgpu_aca_reset(adev);
3362 else
3363 r = amdgpu_aca_init(adev);
3364 if (r)
3365 return r;
3366
3367 amdgpu_ras_set_aca_debug_mode(adev, false);
3368 } else {
3369 amdgpu_ras_set_mca_debug_mode(adev, false);
3370 }
3371
3372 list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
3373 obj = node->ras_obj;
3374 if (!obj) {
3375 dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
3376 continue;
3377 }
3378
3379 if (!amdgpu_ras_is_supported(adev, obj->ras_comm.block))
3380 continue;
3381
3382 if (obj->ras_late_init) {
3383 r = obj->ras_late_init(adev, &obj->ras_comm);
3384 if (r) {
3385 dev_err(adev->dev, "%s failed to execute ras_late_init! ret:%d\n",
3386 obj->ras_comm.name, r);
3387 return r;
3388 }
3389 } else
3390 amdgpu_ras_block_late_init_default(adev, &obj->ras_comm);
3391 }
3392
3393 return 0;
3394}
3395
3396/* do some fini work before IP fini as dependence */
3397int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
3398{
3399 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3400
3401 if (!adev->ras_enabled || !con)
3402 return 0;
3403
3404
3405 /* Need disable ras on all IPs here before ip [hw/sw]fini */
3406 if (AMDGPU_RAS_GET_FEATURES(con->features))
3407 amdgpu_ras_disable_all_features(adev, 0);
3408 amdgpu_ras_recovery_fini(adev);
3409 return 0;
3410}
3411
3412int amdgpu_ras_fini(struct amdgpu_device *adev)
3413{
3414 struct amdgpu_ras_block_list *ras_node, *tmp;
3415 struct amdgpu_ras_block_object *obj = NULL;
3416 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3417
3418 if (!adev->ras_enabled || !con)
3419 return 0;
3420
3421 list_for_each_entry_safe(ras_node, tmp, &adev->ras_list, node) {
3422 if (ras_node->ras_obj) {
3423 obj = ras_node->ras_obj;
3424 if (amdgpu_ras_is_supported(adev, obj->ras_comm.block) &&
3425 obj->ras_fini)
3426 obj->ras_fini(adev, &obj->ras_comm);
3427 else
3428 amdgpu_ras_block_late_fini_default(adev, &obj->ras_comm);
3429 }
3430
3431 /* Clear ras blocks from ras_list and free ras block list node */
3432 list_del(&ras_node->node);
3433 kfree(ras_node);
3434 }
3435
3436 amdgpu_ras_fs_fini(adev);
3437 amdgpu_ras_interrupt_remove_all(adev);
3438
3439 if (amdgpu_aca_is_enabled(adev))
3440 amdgpu_aca_fini(adev);
3441
3442 WARN(AMDGPU_RAS_GET_FEATURES(con->features), "Feature mask is not cleared");
3443
3444 if (AMDGPU_RAS_GET_FEATURES(con->features))
3445 amdgpu_ras_disable_all_features(adev, 0);
3446
3447 cancel_delayed_work_sync(&con->ras_counte_delay_work);
3448
3449 amdgpu_ras_set_context(adev, NULL);
3450 kfree(con);
3451
3452 return 0;
3453}
3454
3455bool amdgpu_ras_get_fed_status(struct amdgpu_device *adev)
3456{
3457 struct amdgpu_ras *ras;
3458
3459 ras = amdgpu_ras_get_context(adev);
3460 if (!ras)
3461 return false;
3462
3463 return atomic_read(&ras->fed);
3464}
3465
3466void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status)
3467{
3468 struct amdgpu_ras *ras;
3469
3470 ras = amdgpu_ras_get_context(adev);
3471 if (ras)
3472 atomic_set(&ras->fed, !!status);
3473}
3474
3475void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
3476{
3477 if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
3478 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
3479
3480 dev_info(adev->dev, "uncorrectable hardware error"
3481 "(ERREVENT_ATHUB_INTERRUPT) detected!\n");
3482
3483 ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
3484 amdgpu_ras_reset_gpu(adev);
3485 }
3486}
3487
3488bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev)
3489{
3490 if (adev->asic_type == CHIP_VEGA20 &&
3491 adev->pm.fw_version <= 0x283400) {
3492 return !(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) &&
3493 amdgpu_ras_intr_triggered();
3494 }
3495
3496 return false;
3497}
3498
3499void amdgpu_release_ras_context(struct amdgpu_device *adev)
3500{
3501 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3502
3503 if (!con)
3504 return;
3505
3506 if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) {
3507 con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX);
3508 amdgpu_ras_set_context(adev, NULL);
3509 kfree(con);
3510 }
3511}
3512
3513#ifdef CONFIG_X86_MCE_AMD
3514static struct amdgpu_device *find_adev(uint32_t node_id)
3515{
3516 int i;
3517 struct amdgpu_device *adev = NULL;
3518
3519 for (i = 0; i < mce_adev_list.num_gpu; i++) {
3520 adev = mce_adev_list.devs[i];
3521
3522 if (adev && adev->gmc.xgmi.connected_to_cpu &&
3523 adev->gmc.xgmi.physical_node_id == node_id)
3524 break;
3525 adev = NULL;
3526 }
3527
3528 return adev;
3529}
3530
3531#define GET_MCA_IPID_GPUID(m) (((m) >> 44) & 0xF)
3532#define GET_UMC_INST(m) (((m) >> 21) & 0x7)
3533#define GET_CHAN_INDEX(m) ((((m) >> 12) & 0x3) | (((m) >> 18) & 0x4))
3534#define GPU_ID_OFFSET 8
3535
3536static int amdgpu_bad_page_notifier(struct notifier_block *nb,
3537 unsigned long val, void *data)
3538{
3539 struct mce *m = (struct mce *)data;
3540 struct amdgpu_device *adev = NULL;
3541 uint32_t gpu_id = 0;
3542 uint32_t umc_inst = 0, ch_inst = 0;
3543
3544 /*
3545 * If the error was generated in UMC_V2, which belongs to GPU UMCs,
3546 * and error occurred in DramECC (Extended error code = 0) then only
3547 * process the error, else bail out.
3548 */
3549 if (!m || !((smca_get_bank_type(m->extcpu, m->bank) == SMCA_UMC_V2) &&
3550 (XEC(m->status, 0x3f) == 0x0)))
3551 return NOTIFY_DONE;
3552
3553 /*
3554 * If it is correctable error, return.
3555 */
3556 if (mce_is_correctable(m))
3557 return NOTIFY_OK;
3558
3559 /*
3560 * GPU Id is offset by GPU_ID_OFFSET in MCA_IPID_UMC register.
3561 */
3562 gpu_id = GET_MCA_IPID_GPUID(m->ipid) - GPU_ID_OFFSET;
3563
3564 adev = find_adev(gpu_id);
3565 if (!adev) {
3566 DRM_WARN("%s: Unable to find adev for gpu_id: %d\n", __func__,
3567 gpu_id);
3568 return NOTIFY_DONE;
3569 }
3570
3571 /*
3572 * If it is uncorrectable error, then find out UMC instance and
3573 * channel index.
3574 */
3575 umc_inst = GET_UMC_INST(m->ipid);
3576 ch_inst = GET_CHAN_INDEX(m->ipid);
3577
3578 dev_info(adev->dev, "Uncorrectable error detected in UMC inst: %d, chan_idx: %d",
3579 umc_inst, ch_inst);
3580
3581 if (!amdgpu_umc_page_retirement_mca(adev, m->addr, ch_inst, umc_inst))
3582 return NOTIFY_OK;
3583 else
3584 return NOTIFY_DONE;
3585}
3586
3587static struct notifier_block amdgpu_bad_page_nb = {
3588 .notifier_call = amdgpu_bad_page_notifier,
3589 .priority = MCE_PRIO_UC,
3590};
3591
3592static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev)
3593{
3594 /*
3595 * Add the adev to the mce_adev_list.
3596 * During mode2 reset, amdgpu device is temporarily
3597 * removed from the mgpu_info list which can cause
3598 * page retirement to fail.
3599 * Use this list instead of mgpu_info to find the amdgpu
3600 * device on which the UMC error was reported.
3601 */
3602 mce_adev_list.devs[mce_adev_list.num_gpu++] = adev;
3603
3604 /*
3605 * Register the x86 notifier only once
3606 * with MCE subsystem.
3607 */
3608 if (notifier_registered == false) {
3609 mce_register_decode_chain(&amdgpu_bad_page_nb);
3610 notifier_registered = true;
3611 }
3612}
3613#endif
3614
3615struct amdgpu_ras *amdgpu_ras_get_context(struct amdgpu_device *adev)
3616{
3617 if (!adev)
3618 return NULL;
3619
3620 return adev->psp.ras_context.ras;
3621}
3622
3623int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con)
3624{
3625 if (!adev)
3626 return -EINVAL;
3627
3628 adev->psp.ras_context.ras = ras_con;
3629 return 0;
3630}
3631
3632/* check if ras is supported on block, say, sdma, gfx */
3633int amdgpu_ras_is_supported(struct amdgpu_device *adev,
3634 unsigned int block)
3635{
3636 int ret = 0;
3637 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
3638
3639 if (block >= AMDGPU_RAS_BLOCK_COUNT)
3640 return 0;
3641
3642 ret = ras && (adev->ras_enabled & (1 << block));
3643
3644 /* For the special asic with mem ecc enabled but sram ecc
3645 * not enabled, even if the ras block is not supported on
3646 * .ras_enabled, if the asic supports poison mode and the
3647 * ras block has ras configuration, it can be considered
3648 * that the ras block supports ras function.
3649 */
3650 if (!ret &&
3651 (block == AMDGPU_RAS_BLOCK__GFX ||
3652 block == AMDGPU_RAS_BLOCK__SDMA ||
3653 block == AMDGPU_RAS_BLOCK__VCN ||
3654 block == AMDGPU_RAS_BLOCK__JPEG) &&
3655 (amdgpu_ras_mask & (1 << block)) &&
3656 amdgpu_ras_is_poison_mode_supported(adev) &&
3657 amdgpu_ras_get_ras_block(adev, block, 0))
3658 ret = 1;
3659
3660 return ret;
3661}
3662
3663int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
3664{
3665 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
3666
3667 if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0)
3668 amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work);
3669 return 0;
3670}
3671
3672int amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable)
3673{
3674 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3675 int ret = 0;
3676
3677 if (con) {
3678 ret = amdgpu_mca_smu_set_debug_mode(adev, enable);
3679 if (!ret)
3680 con->is_aca_debug_mode = enable;
3681 }
3682
3683 return ret;
3684}
3685
3686int amdgpu_ras_set_aca_debug_mode(struct amdgpu_device *adev, bool enable)
3687{
3688 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3689 int ret = 0;
3690
3691 if (con) {
3692 if (amdgpu_aca_is_enabled(adev))
3693 ret = amdgpu_aca_smu_set_debug_mode(adev, enable);
3694 else
3695 ret = amdgpu_mca_smu_set_debug_mode(adev, enable);
3696 if (!ret)
3697 con->is_aca_debug_mode = enable;
3698 }
3699
3700 return ret;
3701}
3702
3703bool amdgpu_ras_get_aca_debug_mode(struct amdgpu_device *adev)
3704{
3705 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3706 const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
3707 const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
3708
3709 if (!con)
3710 return false;
3711
3712 if ((amdgpu_aca_is_enabled(adev) && smu_funcs && smu_funcs->set_debug_mode) ||
3713 (!amdgpu_aca_is_enabled(adev) && mca_funcs && mca_funcs->mca_set_debug_mode))
3714 return con->is_aca_debug_mode;
3715 else
3716 return true;
3717}
3718
3719bool amdgpu_ras_get_error_query_mode(struct amdgpu_device *adev,
3720 unsigned int *error_query_mode)
3721{
3722 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3723 const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
3724 const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
3725
3726 if (!con) {
3727 *error_query_mode = AMDGPU_RAS_INVALID_ERROR_QUERY;
3728 return false;
3729 }
3730
3731 if ((smu_funcs && smu_funcs->set_debug_mode) || (mca_funcs && mca_funcs->mca_set_debug_mode))
3732 *error_query_mode =
3733 (con->is_aca_debug_mode) ? AMDGPU_RAS_DIRECT_ERROR_QUERY : AMDGPU_RAS_FIRMWARE_ERROR_QUERY;
3734 else
3735 *error_query_mode = AMDGPU_RAS_DIRECT_ERROR_QUERY;
3736
3737 return true;
3738}
3739
3740/* Register each ip ras block into amdgpu ras */
3741int amdgpu_ras_register_ras_block(struct amdgpu_device *adev,
3742 struct amdgpu_ras_block_object *ras_block_obj)
3743{
3744 struct amdgpu_ras_block_list *ras_node;
3745 if (!adev || !ras_block_obj)
3746 return -EINVAL;
3747
3748 ras_node = kzalloc(sizeof(*ras_node), GFP_KERNEL);
3749 if (!ras_node)
3750 return -ENOMEM;
3751
3752 INIT_LIST_HEAD(&ras_node->node);
3753 ras_node->ras_obj = ras_block_obj;
3754 list_add_tail(&ras_node->node, &adev->ras_list);
3755
3756 return 0;
3757}
3758
3759void amdgpu_ras_get_error_type_name(uint32_t err_type, char *err_type_name)
3760{
3761 if (!err_type_name)
3762 return;
3763
3764 switch (err_type) {
3765 case AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE:
3766 sprintf(err_type_name, "correctable");
3767 break;
3768 case AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE:
3769 sprintf(err_type_name, "uncorrectable");
3770 break;
3771 default:
3772 sprintf(err_type_name, "unknown");
3773 break;
3774 }
3775}
3776
3777bool amdgpu_ras_inst_get_memory_id_field(struct amdgpu_device *adev,
3778 const struct amdgpu_ras_err_status_reg_entry *reg_entry,
3779 uint32_t instance,
3780 uint32_t *memory_id)
3781{
3782 uint32_t err_status_lo_data, err_status_lo_offset;
3783
3784 if (!reg_entry)
3785 return false;
3786
3787 err_status_lo_offset =
3788 AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance,
3789 reg_entry->seg_lo, reg_entry->reg_lo);
3790 err_status_lo_data = RREG32(err_status_lo_offset);
3791
3792 if ((reg_entry->flags & AMDGPU_RAS_ERR_STATUS_VALID) &&
3793 !REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, ERR_STATUS_VALID_FLAG))
3794 return false;
3795
3796 *memory_id = REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, MEMORY_ID);
3797
3798 return true;
3799}
3800
3801bool amdgpu_ras_inst_get_err_cnt_field(struct amdgpu_device *adev,
3802 const struct amdgpu_ras_err_status_reg_entry *reg_entry,
3803 uint32_t instance,
3804 unsigned long *err_cnt)
3805{
3806 uint32_t err_status_hi_data, err_status_hi_offset;
3807
3808 if (!reg_entry)
3809 return false;
3810
3811 err_status_hi_offset =
3812 AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance,
3813 reg_entry->seg_hi, reg_entry->reg_hi);
3814 err_status_hi_data = RREG32(err_status_hi_offset);
3815
3816 if ((reg_entry->flags & AMDGPU_RAS_ERR_INFO_VALID) &&
3817 !REG_GET_FIELD(err_status_hi_data, ERR_STATUS_HI, ERR_INFO_VALID_FLAG))
3818 /* keep the check here in case we need to refer to the result later */
3819 dev_dbg(adev->dev, "Invalid err_info field\n");
3820
3821 /* read err count */
3822 *err_cnt = REG_GET_FIELD(err_status_hi_data, ERR_STATUS, ERR_CNT);
3823
3824 return true;
3825}
3826
3827void amdgpu_ras_inst_query_ras_error_count(struct amdgpu_device *adev,
3828 const struct amdgpu_ras_err_status_reg_entry *reg_list,
3829 uint32_t reg_list_size,
3830 const struct amdgpu_ras_memory_id_entry *mem_list,
3831 uint32_t mem_list_size,
3832 uint32_t instance,
3833 uint32_t err_type,
3834 unsigned long *err_count)
3835{
3836 uint32_t memory_id;
3837 unsigned long err_cnt;
3838 char err_type_name[16];
3839 uint32_t i, j;
3840
3841 for (i = 0; i < reg_list_size; i++) {
3842 /* query memory_id from err_status_lo */
3843 if (!amdgpu_ras_inst_get_memory_id_field(adev, ®_list[i],
3844 instance, &memory_id))
3845 continue;
3846
3847 /* query err_cnt from err_status_hi */
3848 if (!amdgpu_ras_inst_get_err_cnt_field(adev, ®_list[i],
3849 instance, &err_cnt) ||
3850 !err_cnt)
3851 continue;
3852
3853 *err_count += err_cnt;
3854
3855 /* log the errors */
3856 amdgpu_ras_get_error_type_name(err_type, err_type_name);
3857 if (!mem_list) {
3858 /* memory_list is not supported */
3859 dev_info(adev->dev,
3860 "%ld %s hardware errors detected in %s, instance: %d, memory_id: %d\n",
3861 err_cnt, err_type_name,
3862 reg_list[i].block_name,
3863 instance, memory_id);
3864 } else {
3865 for (j = 0; j < mem_list_size; j++) {
3866 if (memory_id == mem_list[j].memory_id) {
3867 dev_info(adev->dev,
3868 "%ld %s hardware errors detected in %s, instance: %d, memory block: %s\n",
3869 err_cnt, err_type_name,
3870 reg_list[i].block_name,
3871 instance, mem_list[j].name);
3872 break;
3873 }
3874 }
3875 }
3876 }
3877}
3878
3879void amdgpu_ras_inst_reset_ras_error_count(struct amdgpu_device *adev,
3880 const struct amdgpu_ras_err_status_reg_entry *reg_list,
3881 uint32_t reg_list_size,
3882 uint32_t instance)
3883{
3884 uint32_t err_status_lo_offset, err_status_hi_offset;
3885 uint32_t i;
3886
3887 for (i = 0; i < reg_list_size; i++) {
3888 err_status_lo_offset =
3889 AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance,
3890 reg_list[i].seg_lo, reg_list[i].reg_lo);
3891 err_status_hi_offset =
3892 AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance,
3893 reg_list[i].seg_hi, reg_list[i].reg_hi);
3894 WREG32(err_status_lo_offset, 0);
3895 WREG32(err_status_hi_offset, 0);
3896 }
3897}
3898
3899int amdgpu_ras_error_data_init(struct ras_err_data *err_data)
3900{
3901 memset(err_data, 0, sizeof(*err_data));
3902
3903 INIT_LIST_HEAD(&err_data->err_node_list);
3904
3905 return 0;
3906}
3907
3908static void amdgpu_ras_error_node_release(struct ras_err_node *err_node)
3909{
3910 if (!err_node)
3911 return;
3912
3913 list_del(&err_node->node);
3914 kvfree(err_node);
3915}
3916
3917void amdgpu_ras_error_data_fini(struct ras_err_data *err_data)
3918{
3919 struct ras_err_node *err_node, *tmp;
3920
3921 list_for_each_entry_safe(err_node, tmp, &err_data->err_node_list, node)
3922 amdgpu_ras_error_node_release(err_node);
3923}
3924
3925static struct ras_err_node *amdgpu_ras_error_find_node_by_id(struct ras_err_data *err_data,
3926 struct amdgpu_smuio_mcm_config_info *mcm_info)
3927{
3928 struct ras_err_node *err_node;
3929 struct amdgpu_smuio_mcm_config_info *ref_id;
3930
3931 if (!err_data || !mcm_info)
3932 return NULL;
3933
3934 for_each_ras_error(err_node, err_data) {
3935 ref_id = &err_node->err_info.mcm_info;
3936
3937 if (mcm_info->socket_id == ref_id->socket_id &&
3938 mcm_info->die_id == ref_id->die_id)
3939 return err_node;
3940 }
3941
3942 return NULL;
3943}
3944
3945static struct ras_err_node *amdgpu_ras_error_node_new(void)
3946{
3947 struct ras_err_node *err_node;
3948
3949 err_node = kvzalloc(sizeof(*err_node), GFP_KERNEL);
3950 if (!err_node)
3951 return NULL;
3952
3953 INIT_LIST_HEAD(&err_node->node);
3954
3955 return err_node;
3956}
3957
3958static int ras_err_info_cmp(void *priv, const struct list_head *a, const struct list_head *b)
3959{
3960 struct ras_err_node *nodea = container_of(a, struct ras_err_node, node);
3961 struct ras_err_node *nodeb = container_of(b, struct ras_err_node, node);
3962 struct amdgpu_smuio_mcm_config_info *infoa = &nodea->err_info.mcm_info;
3963 struct amdgpu_smuio_mcm_config_info *infob = &nodeb->err_info.mcm_info;
3964
3965 if (unlikely(infoa->socket_id != infob->socket_id))
3966 return infoa->socket_id - infob->socket_id;
3967 else
3968 return infoa->die_id - infob->die_id;
3969
3970 return 0;
3971}
3972
3973static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_data,
3974 struct amdgpu_smuio_mcm_config_info *mcm_info)
3975{
3976 struct ras_err_node *err_node;
3977
3978 err_node = amdgpu_ras_error_find_node_by_id(err_data, mcm_info);
3979 if (err_node)
3980 return &err_node->err_info;
3981
3982 err_node = amdgpu_ras_error_node_new();
3983 if (!err_node)
3984 return NULL;
3985
3986 INIT_LIST_HEAD(&err_node->err_info.err_addr_list);
3987
3988 memcpy(&err_node->err_info.mcm_info, mcm_info, sizeof(*mcm_info));
3989
3990 err_data->err_list_count++;
3991 list_add_tail(&err_node->node, &err_data->err_node_list);
3992 list_sort(NULL, &err_data->err_node_list, ras_err_info_cmp);
3993
3994 return &err_node->err_info;
3995}
3996
3997void amdgpu_ras_add_mca_err_addr(struct ras_err_info *err_info, struct ras_err_addr *err_addr)
3998{
3999 struct ras_err_addr *mca_err_addr;
4000
4001 mca_err_addr = kzalloc(sizeof(*mca_err_addr), GFP_KERNEL);
4002 if (!mca_err_addr)
4003 return;
4004
4005 INIT_LIST_HEAD(&mca_err_addr->node);
4006
4007 mca_err_addr->err_status = err_addr->err_status;
4008 mca_err_addr->err_ipid = err_addr->err_ipid;
4009 mca_err_addr->err_addr = err_addr->err_addr;
4010
4011 list_add_tail(&mca_err_addr->node, &err_info->err_addr_list);
4012}
4013
4014void amdgpu_ras_del_mca_err_addr(struct ras_err_info *err_info, struct ras_err_addr *mca_err_addr)
4015{
4016 list_del(&mca_err_addr->node);
4017 kfree(mca_err_addr);
4018}
4019
4020int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
4021 struct amdgpu_smuio_mcm_config_info *mcm_info,
4022 struct ras_err_addr *err_addr, u64 count)
4023{
4024 struct ras_err_info *err_info;
4025
4026 if (!err_data || !mcm_info)
4027 return -EINVAL;
4028
4029 if (!count)
4030 return 0;
4031
4032 err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
4033 if (!err_info)
4034 return -EINVAL;
4035
4036 if (err_addr && err_addr->err_status)
4037 amdgpu_ras_add_mca_err_addr(err_info, err_addr);
4038
4039 err_info->ue_count += count;
4040 err_data->ue_count += count;
4041
4042 return 0;
4043}
4044
4045int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
4046 struct amdgpu_smuio_mcm_config_info *mcm_info,
4047 struct ras_err_addr *err_addr, u64 count)
4048{
4049 struct ras_err_info *err_info;
4050
4051 if (!err_data || !mcm_info)
4052 return -EINVAL;
4053
4054 if (!count)
4055 return 0;
4056
4057 err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
4058 if (!err_info)
4059 return -EINVAL;
4060
4061 err_info->ce_count += count;
4062 err_data->ce_count += count;
4063
4064 return 0;
4065}
4066
4067int amdgpu_ras_error_statistic_de_count(struct ras_err_data *err_data,
4068 struct amdgpu_smuio_mcm_config_info *mcm_info,
4069 struct ras_err_addr *err_addr, u64 count)
4070{
4071 struct ras_err_info *err_info;
4072
4073 if (!err_data || !mcm_info)
4074 return -EINVAL;
4075
4076 if (!count)
4077 return 0;
4078
4079 err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
4080 if (!err_info)
4081 return -EINVAL;
4082
4083 if (err_addr && err_addr->err_status)
4084 amdgpu_ras_add_mca_err_addr(err_info, err_addr);
4085
4086 err_info->de_count += count;
4087 err_data->de_count += count;
4088
4089 return 0;
4090}
4091
4092#define mmMP0_SMN_C2PMSG_92 0x1609C
4093#define mmMP0_SMN_C2PMSG_126 0x160BE
4094static void amdgpu_ras_boot_time_error_reporting(struct amdgpu_device *adev,
4095 u32 instance, u32 boot_error)
4096{
4097 u32 socket_id, aid_id, hbm_id;
4098 u32 reg_data;
4099 u64 reg_addr;
4100
4101 socket_id = AMDGPU_RAS_GPU_ERR_SOCKET_ID(boot_error);
4102 aid_id = AMDGPU_RAS_GPU_ERR_AID_ID(boot_error);
4103 hbm_id = AMDGPU_RAS_GPU_ERR_HBM_ID(boot_error);
4104
4105 /* The pattern for smn addressing in other SOC could be different from
4106 * the one for aqua_vanjaram. We should revisit the code if the pattern
4107 * is changed. In such case, replace the aqua_vanjaram implementation
4108 * with more common helper */
4109 reg_addr = (mmMP0_SMN_C2PMSG_92 << 2) +
4110 aqua_vanjaram_encode_ext_smn_addressing(instance);
4111
4112 reg_data = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
4113 dev_err(adev->dev, "socket: %d, aid: %d, firmware boot failed, fw status is 0x%x\n",
4114 socket_id, aid_id, reg_data);
4115
4116 if (AMDGPU_RAS_GPU_ERR_MEM_TRAINING(boot_error))
4117 dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, memory training failed\n",
4118 socket_id, aid_id, hbm_id);
4119
4120 if (AMDGPU_RAS_GPU_ERR_FW_LOAD(boot_error))
4121 dev_info(adev->dev, "socket: %d, aid: %d, firmware load failed at boot time\n",
4122 socket_id, aid_id);
4123
4124 if (AMDGPU_RAS_GPU_ERR_WAFL_LINK_TRAINING(boot_error))
4125 dev_info(adev->dev, "socket: %d, aid: %d, wafl link training failed\n",
4126 socket_id, aid_id);
4127
4128 if (AMDGPU_RAS_GPU_ERR_XGMI_LINK_TRAINING(boot_error))
4129 dev_info(adev->dev, "socket: %d, aid: %d, xgmi link training failed\n",
4130 socket_id, aid_id);
4131
4132 if (AMDGPU_RAS_GPU_ERR_USR_CP_LINK_TRAINING(boot_error))
4133 dev_info(adev->dev, "socket: %d, aid: %d, usr cp link training failed\n",
4134 socket_id, aid_id);
4135
4136 if (AMDGPU_RAS_GPU_ERR_USR_DP_LINK_TRAINING(boot_error))
4137 dev_info(adev->dev, "socket: %d, aid: %d, usr dp link training failed\n",
4138 socket_id, aid_id);
4139
4140 if (AMDGPU_RAS_GPU_ERR_HBM_MEM_TEST(boot_error))
4141 dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, hbm memory test failed\n",
4142 socket_id, aid_id, hbm_id);
4143
4144 if (AMDGPU_RAS_GPU_ERR_HBM_BIST_TEST(boot_error))
4145 dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, hbm bist test failed\n",
4146 socket_id, aid_id, hbm_id);
4147}
4148
4149static int amdgpu_ras_wait_for_boot_complete(struct amdgpu_device *adev,
4150 u32 instance, u32 *boot_error)
4151{
4152 u32 reg_addr;
4153 u32 reg_data;
4154 int retry_loop;
4155
4156 reg_addr = (mmMP0_SMN_C2PMSG_92 << 2) +
4157 aqua_vanjaram_encode_ext_smn_addressing(instance);
4158
4159 for (retry_loop = 0; retry_loop < AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT; retry_loop++) {
4160 reg_data = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
4161 if ((reg_data & AMDGPU_RAS_BOOT_STATUS_MASK) == AMDGPU_RAS_BOOT_STEADY_STATUS) {
4162 *boot_error = AMDGPU_RAS_BOOT_SUCEESS;
4163 return 0;
4164 }
4165 msleep(1);
4166 }
4167
4168 /* The pattern for smn addressing in other SOC could be different from
4169 * the one for aqua_vanjaram. We should revisit the code if the pattern
4170 * is changed. In such case, replace the aqua_vanjaram implementation
4171 * with more common helper */
4172 reg_addr = (mmMP0_SMN_C2PMSG_126 << 2) +
4173 aqua_vanjaram_encode_ext_smn_addressing(instance);
4174
4175 for (retry_loop = 0; retry_loop < AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT; retry_loop++) {
4176 reg_data = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
4177 if (AMDGPU_RAS_GPU_ERR_BOOT_STATUS(reg_data)) {
4178 *boot_error = reg_data;
4179 return 0;
4180 }
4181 msleep(1);
4182 }
4183
4184 *boot_error = reg_data;
4185 return -ETIME;
4186}
4187
4188void amdgpu_ras_query_boot_status(struct amdgpu_device *adev, u32 num_instances)
4189{
4190 u32 boot_error = 0;
4191 u32 i;
4192
4193 for (i = 0; i < num_instances; i++) {
4194 if (amdgpu_ras_wait_for_boot_complete(adev, i, &boot_error))
4195 amdgpu_ras_boot_time_error_reporting(adev, i, boot_error);
4196 }
4197}
1/*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 *
23 */
24#include <linux/debugfs.h>
25#include <linux/list.h>
26#include <linux/module.h>
27#include <linux/uaccess.h>
28#include <linux/reboot.h>
29#include <linux/syscalls.h>
30#include <linux/pm_runtime.h>
31
32#include "amdgpu.h"
33#include "amdgpu_ras.h"
34#include "amdgpu_atomfirmware.h"
35#include "amdgpu_xgmi.h"
36#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
37#include "atom.h"
38#include "amdgpu_reset.h"
39
40#ifdef CONFIG_X86_MCE_AMD
41#include <asm/mce.h>
42
43static bool notifier_registered;
44#endif
45static const char *RAS_FS_NAME = "ras";
46
47const char *ras_error_string[] = {
48 "none",
49 "parity",
50 "single_correctable",
51 "multi_uncorrectable",
52 "poison",
53};
54
55const char *ras_block_string[] = {
56 "umc",
57 "sdma",
58 "gfx",
59 "mmhub",
60 "athub",
61 "pcie_bif",
62 "hdp",
63 "xgmi_wafl",
64 "df",
65 "smn",
66 "sem",
67 "mp0",
68 "mp1",
69 "fuse",
70 "mca",
71 "vcn",
72 "jpeg",
73};
74
75const char *ras_mca_block_string[] = {
76 "mca_mp0",
77 "mca_mp1",
78 "mca_mpio",
79 "mca_iohc",
80};
81
82struct amdgpu_ras_block_list {
83 /* ras block link */
84 struct list_head node;
85
86 struct amdgpu_ras_block_object *ras_obj;
87};
88
89const char *get_ras_block_str(struct ras_common_if *ras_block)
90{
91 if (!ras_block)
92 return "NULL";
93
94 if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNT)
95 return "OUT OF RANGE";
96
97 if (ras_block->block == AMDGPU_RAS_BLOCK__MCA)
98 return ras_mca_block_string[ras_block->sub_block_index];
99
100 return ras_block_string[ras_block->block];
101}
102
103#define ras_block_str(_BLOCK_) \
104 (((_BLOCK_) < ARRAY_SIZE(ras_block_string)) ? ras_block_string[_BLOCK_] : "Out Of Range")
105
106#define ras_err_str(i) (ras_error_string[ffs(i)])
107
108#define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
109
110/* inject address is 52 bits */
111#define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52)
112
113/* typical ECC bad page rate is 1 bad page per 100MB VRAM */
114#define RAS_BAD_PAGE_COVER (100 * 1024 * 1024ULL)
115
116enum amdgpu_ras_retire_page_reservation {
117 AMDGPU_RAS_RETIRE_PAGE_RESERVED,
118 AMDGPU_RAS_RETIRE_PAGE_PENDING,
119 AMDGPU_RAS_RETIRE_PAGE_FAULT,
120};
121
122atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
123
124static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
125 uint64_t addr);
126static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
127 uint64_t addr);
128#ifdef CONFIG_X86_MCE_AMD
129static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev);
130struct mce_notifier_adev_list {
131 struct amdgpu_device *devs[MAX_GPU_INSTANCE];
132 int num_gpu;
133};
134static struct mce_notifier_adev_list mce_adev_list;
135#endif
136
137void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready)
138{
139 if (adev && amdgpu_ras_get_context(adev))
140 amdgpu_ras_get_context(adev)->error_query_ready = ready;
141}
142
143static bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev)
144{
145 if (adev && amdgpu_ras_get_context(adev))
146 return amdgpu_ras_get_context(adev)->error_query_ready;
147
148 return false;
149}
150
151static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t address)
152{
153 struct ras_err_data err_data = {0, 0, 0, NULL};
154 struct eeprom_table_record err_rec;
155
156 if ((address >= adev->gmc.mc_vram_size) ||
157 (address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
158 dev_warn(adev->dev,
159 "RAS WARN: input address 0x%llx is invalid.\n",
160 address);
161 return -EINVAL;
162 }
163
164 if (amdgpu_ras_check_bad_page(adev, address)) {
165 dev_warn(adev->dev,
166 "RAS WARN: 0x%llx has already been marked as bad page!\n",
167 address);
168 return 0;
169 }
170
171 memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
172 err_data.err_addr = &err_rec;
173 amdgpu_umc_fill_error_record(&err_data, address,
174 (address >> AMDGPU_GPU_PAGE_SHIFT), 0, 0);
175
176 if (amdgpu_bad_page_threshold != 0) {
177 amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
178 err_data.err_addr_cnt);
179 amdgpu_ras_save_bad_pages(adev);
180 }
181
182 dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n");
183 dev_warn(adev->dev, "Clear EEPROM:\n");
184 dev_warn(adev->dev, " echo 1 > /sys/kernel/debug/dri/0/ras/ras_eeprom_reset\n");
185
186 return 0;
187}
188
189static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
190 size_t size, loff_t *pos)
191{
192 struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private;
193 struct ras_query_if info = {
194 .head = obj->head,
195 };
196 ssize_t s;
197 char val[128];
198
199 if (amdgpu_ras_query_error_status(obj->adev, &info))
200 return -EINVAL;
201
202 /* Hardware counter will be reset automatically after the query on Vega20 and Arcturus */
203 if (obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
204 obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {
205 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
206 dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
207 }
208
209 s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n",
210 "ue", info.ue_count,
211 "ce", info.ce_count);
212 if (*pos >= s)
213 return 0;
214
215 s -= *pos;
216 s = min_t(u64, s, size);
217
218
219 if (copy_to_user(buf, &val[*pos], s))
220 return -EINVAL;
221
222 *pos += s;
223
224 return s;
225}
226
227static const struct file_operations amdgpu_ras_debugfs_ops = {
228 .owner = THIS_MODULE,
229 .read = amdgpu_ras_debugfs_read,
230 .write = NULL,
231 .llseek = default_llseek
232};
233
234static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id)
235{
236 int i;
237
238 for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) {
239 *block_id = i;
240 if (strcmp(name, ras_block_string[i]) == 0)
241 return 0;
242 }
243 return -EINVAL;
244}
245
246static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
247 const char __user *buf, size_t size,
248 loff_t *pos, struct ras_debug_if *data)
249{
250 ssize_t s = min_t(u64, 64, size);
251 char str[65];
252 char block_name[33];
253 char err[9] = "ue";
254 int op = -1;
255 int block_id;
256 uint32_t sub_block;
257 u64 address, value;
258
259 if (*pos)
260 return -EINVAL;
261 *pos = size;
262
263 memset(str, 0, sizeof(str));
264 memset(data, 0, sizeof(*data));
265
266 if (copy_from_user(str, buf, s))
267 return -EINVAL;
268
269 if (sscanf(str, "disable %32s", block_name) == 1)
270 op = 0;
271 else if (sscanf(str, "enable %32s %8s", block_name, err) == 2)
272 op = 1;
273 else if (sscanf(str, "inject %32s %8s", block_name, err) == 2)
274 op = 2;
275 else if (strstr(str, "retire_page") != NULL)
276 op = 3;
277 else if (str[0] && str[1] && str[2] && str[3])
278 /* ascii string, but commands are not matched. */
279 return -EINVAL;
280
281 if (op != -1) {
282 if (op == 3) {
283 if (sscanf(str, "%*s 0x%llx", &address) != 1 &&
284 sscanf(str, "%*s %llu", &address) != 1)
285 return -EINVAL;
286
287 data->op = op;
288 data->inject.address = address;
289
290 return 0;
291 }
292
293 if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
294 return -EINVAL;
295
296 data->head.block = block_id;
297 /* only ue and ce errors are supported */
298 if (!memcmp("ue", err, 2))
299 data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
300 else if (!memcmp("ce", err, 2))
301 data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE;
302 else
303 return -EINVAL;
304
305 data->op = op;
306
307 if (op == 2) {
308 if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx",
309 &sub_block, &address, &value) != 3 &&
310 sscanf(str, "%*s %*s %*s %u %llu %llu",
311 &sub_block, &address, &value) != 3)
312 return -EINVAL;
313 data->head.sub_block_index = sub_block;
314 data->inject.address = address;
315 data->inject.value = value;
316 }
317 } else {
318 if (size < sizeof(*data))
319 return -EINVAL;
320
321 if (copy_from_user(data, buf, sizeof(*data)))
322 return -EINVAL;
323 }
324
325 return 0;
326}
327
328/**
329 * DOC: AMDGPU RAS debugfs control interface
330 *
331 * The control interface accepts struct ras_debug_if which has two members.
332 *
333 * First member: ras_debug_if::head or ras_debug_if::inject.
334 *
335 * head is used to indicate which IP block will be under control.
336 *
337 * head has four members, they are block, type, sub_block_index, name.
338 * block: which IP will be under control.
339 * type: what kind of error will be enabled/disabled/injected.
340 * sub_block_index: some IPs have subcomponets. say, GFX, sDMA.
341 * name: the name of IP.
342 *
343 * inject has two more members than head, they are address, value.
344 * As their names indicate, inject operation will write the
345 * value to the address.
346 *
347 * The second member: struct ras_debug_if::op.
348 * It has three kinds of operations.
349 *
350 * - 0: disable RAS on the block. Take ::head as its data.
351 * - 1: enable RAS on the block. Take ::head as its data.
352 * - 2: inject errors on the block. Take ::inject as its data.
353 *
354 * How to use the interface?
355 *
356 * In a program
357 *
358 * Copy the struct ras_debug_if in your code and initialize it.
359 * Write the struct to the control interface.
360 *
361 * From shell
362 *
363 * .. code-block:: bash
364 *
365 * echo "disable <block>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
366 * echo "enable <block> <error>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
367 * echo "inject <block> <error> <sub-block> <address> <value> > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
368 *
369 * Where N, is the card which you want to affect.
370 *
371 * "disable" requires only the block.
372 * "enable" requires the block and error type.
373 * "inject" requires the block, error type, address, and value.
374 *
375 * The block is one of: umc, sdma, gfx, etc.
376 * see ras_block_string[] for details
377 *
378 * The error type is one of: ue, ce, where,
379 * ue is multi-uncorrectable
380 * ce is single-correctable
381 *
382 * The sub-block is a the sub-block index, pass 0 if there is no sub-block.
383 * The address and value are hexadecimal numbers, leading 0x is optional.
384 *
385 * For instance,
386 *
387 * .. code-block:: bash
388 *
389 * echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
390 * echo inject umc ce 0 0 0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
391 * echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
392 *
393 * How to check the result of the operation?
394 *
395 * To check disable/enable, see "ras" features at,
396 * /sys/class/drm/card[0/1/2...]/device/ras/features
397 *
398 * To check inject, see the corresponding error count at,
399 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx|sdma|umc|...]_err_count
400 *
401 * .. note::
402 * Operations are only allowed on blocks which are supported.
403 * Check the "ras" mask at /sys/module/amdgpu/parameters/ras_mask
404 * to see which blocks support RAS on a particular asic.
405 *
406 */
407static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
408 const char __user *buf,
409 size_t size, loff_t *pos)
410{
411 struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
412 struct ras_debug_if data;
413 int ret = 0;
414
415 if (!amdgpu_ras_get_error_query_ready(adev)) {
416 dev_warn(adev->dev, "RAS WARN: error injection "
417 "currently inaccessible\n");
418 return size;
419 }
420
421 ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
422 if (ret)
423 return ret;
424
425 if (data.op == 3) {
426 ret = amdgpu_reserve_page_direct(adev, data.inject.address);
427 if (!ret)
428 return size;
429 else
430 return ret;
431 }
432
433 if (!amdgpu_ras_is_supported(adev, data.head.block))
434 return -EINVAL;
435
436 switch (data.op) {
437 case 0:
438 ret = amdgpu_ras_feature_enable(adev, &data.head, 0);
439 break;
440 case 1:
441 ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
442 break;
443 case 2:
444 if ((data.inject.address >= adev->gmc.mc_vram_size) ||
445 (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
446 dev_warn(adev->dev, "RAS WARN: input address "
447 "0x%llx is invalid.",
448 data.inject.address);
449 ret = -EINVAL;
450 break;
451 }
452
453 /* umc ce/ue error injection for a bad page is not allowed */
454 if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
455 amdgpu_ras_check_bad_page(adev, data.inject.address)) {
456 dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has "
457 "already been marked as bad!\n",
458 data.inject.address);
459 break;
460 }
461
462 /* data.inject.address is offset instead of absolute gpu address */
463 ret = amdgpu_ras_error_inject(adev, &data.inject);
464 break;
465 default:
466 ret = -EINVAL;
467 break;
468 }
469
470 if (ret)
471 return ret;
472
473 return size;
474}
475
476/**
477 * DOC: AMDGPU RAS debugfs EEPROM table reset interface
478 *
479 * Some boards contain an EEPROM which is used to persistently store a list of
480 * bad pages which experiences ECC errors in vram. This interface provides
481 * a way to reset the EEPROM, e.g., after testing error injection.
482 *
483 * Usage:
484 *
485 * .. code-block:: bash
486 *
487 * echo 1 > ../ras/ras_eeprom_reset
488 *
489 * will reset EEPROM table to 0 entries.
490 *
491 */
492static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f,
493 const char __user *buf,
494 size_t size, loff_t *pos)
495{
496 struct amdgpu_device *adev =
497 (struct amdgpu_device *)file_inode(f)->i_private;
498 int ret;
499
500 ret = amdgpu_ras_eeprom_reset_table(
501 &(amdgpu_ras_get_context(adev)->eeprom_control));
502
503 if (!ret) {
504 /* Something was written to EEPROM.
505 */
506 amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS;
507 return size;
508 } else {
509 return ret;
510 }
511}
512
513static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
514 .owner = THIS_MODULE,
515 .read = NULL,
516 .write = amdgpu_ras_debugfs_ctrl_write,
517 .llseek = default_llseek
518};
519
520static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = {
521 .owner = THIS_MODULE,
522 .read = NULL,
523 .write = amdgpu_ras_debugfs_eeprom_write,
524 .llseek = default_llseek
525};
526
527/**
528 * DOC: AMDGPU RAS sysfs Error Count Interface
529 *
530 * It allows the user to read the error count for each IP block on the gpu through
531 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
532 *
533 * It outputs the multiple lines which report the uncorrected (ue) and corrected
534 * (ce) error counts.
535 *
536 * The format of one line is below,
537 *
538 * [ce|ue]: count
539 *
540 * Example:
541 *
542 * .. code-block:: bash
543 *
544 * ue: 0
545 * ce: 1
546 *
547 */
548static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
549 struct device_attribute *attr, char *buf)
550{
551 struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr);
552 struct ras_query_if info = {
553 .head = obj->head,
554 };
555
556 if (!amdgpu_ras_get_error_query_ready(obj->adev))
557 return sysfs_emit(buf, "Query currently inaccessible\n");
558
559 if (amdgpu_ras_query_error_status(obj->adev, &info))
560 return -EINVAL;
561
562 if (obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
563 obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {
564 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
565 dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
566 }
567
568 return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
569 "ce", info.ce_count);
570}
571
572/* obj begin */
573
574#define get_obj(obj) do { (obj)->use++; } while (0)
575#define alive_obj(obj) ((obj)->use)
576
577static inline void put_obj(struct ras_manager *obj)
578{
579 if (obj && (--obj->use == 0))
580 list_del(&obj->node);
581 if (obj && (obj->use < 0))
582 DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", get_ras_block_str(&obj->head));
583}
584
585/* make one obj and return it. */
586static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
587 struct ras_common_if *head)
588{
589 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
590 struct ras_manager *obj;
591
592 if (!adev->ras_enabled || !con)
593 return NULL;
594
595 if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
596 return NULL;
597
598 if (head->block == AMDGPU_RAS_BLOCK__MCA) {
599 if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
600 return NULL;
601
602 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
603 } else
604 obj = &con->objs[head->block];
605
606 /* already exist. return obj? */
607 if (alive_obj(obj))
608 return NULL;
609
610 obj->head = *head;
611 obj->adev = adev;
612 list_add(&obj->node, &con->head);
613 get_obj(obj);
614
615 return obj;
616}
617
618/* return an obj equal to head, or the first when head is NULL */
619struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
620 struct ras_common_if *head)
621{
622 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
623 struct ras_manager *obj;
624 int i;
625
626 if (!adev->ras_enabled || !con)
627 return NULL;
628
629 if (head) {
630 if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
631 return NULL;
632
633 if (head->block == AMDGPU_RAS_BLOCK__MCA) {
634 if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
635 return NULL;
636
637 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
638 } else
639 obj = &con->objs[head->block];
640
641 if (alive_obj(obj))
642 return obj;
643 } else {
644 for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT + AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
645 obj = &con->objs[i];
646 if (alive_obj(obj))
647 return obj;
648 }
649 }
650
651 return NULL;
652}
653/* obj end */
654
655/* feature ctl begin */
656static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev,
657 struct ras_common_if *head)
658{
659 return adev->ras_hw_enabled & BIT(head->block);
660}
661
662static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev,
663 struct ras_common_if *head)
664{
665 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
666
667 return con->features & BIT(head->block);
668}
669
670/*
671 * if obj is not created, then create one.
672 * set feature enable flag.
673 */
674static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev,
675 struct ras_common_if *head, int enable)
676{
677 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
678 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
679
680 /* If hardware does not support ras, then do not create obj.
681 * But if hardware support ras, we can create the obj.
682 * Ras framework checks con->hw_supported to see if it need do
683 * corresponding initialization.
684 * IP checks con->support to see if it need disable ras.
685 */
686 if (!amdgpu_ras_is_feature_allowed(adev, head))
687 return 0;
688
689 if (enable) {
690 if (!obj) {
691 obj = amdgpu_ras_create_obj(adev, head);
692 if (!obj)
693 return -EINVAL;
694 } else {
695 /* In case we create obj somewhere else */
696 get_obj(obj);
697 }
698 con->features |= BIT(head->block);
699 } else {
700 if (obj && amdgpu_ras_is_feature_enabled(adev, head)) {
701 con->features &= ~BIT(head->block);
702 put_obj(obj);
703 }
704 }
705
706 return 0;
707}
708
709/* wrapper of psp_ras_enable_features */
710int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
711 struct ras_common_if *head, bool enable)
712{
713 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
714 union ta_ras_cmd_input *info;
715 int ret;
716
717 if (!con)
718 return -EINVAL;
719
720 if (head->block == AMDGPU_RAS_BLOCK__GFX) {
721 info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL);
722 if (!info)
723 return -ENOMEM;
724
725 if (!enable) {
726 info->disable_features = (struct ta_ras_disable_features_input) {
727 .block_id = amdgpu_ras_block_to_ta(head->block),
728 .error_type = amdgpu_ras_error_to_ta(head->type),
729 };
730 } else {
731 info->enable_features = (struct ta_ras_enable_features_input) {
732 .block_id = amdgpu_ras_block_to_ta(head->block),
733 .error_type = amdgpu_ras_error_to_ta(head->type),
734 };
735 }
736 }
737
738 /* Do not enable if it is not allowed. */
739 WARN_ON(enable && !amdgpu_ras_is_feature_allowed(adev, head));
740
741 /* Only enable ras feature operation handle on host side */
742 if (head->block == AMDGPU_RAS_BLOCK__GFX &&
743 !amdgpu_sriov_vf(adev) &&
744 !amdgpu_ras_intr_triggered()) {
745 ret = psp_ras_enable_features(&adev->psp, info, enable);
746 if (ret) {
747 dev_err(adev->dev, "ras %s %s failed poison:%d ret:%d\n",
748 enable ? "enable":"disable",
749 get_ras_block_str(head),
750 amdgpu_ras_is_poison_mode_supported(adev), ret);
751 goto out;
752 }
753 }
754
755 /* setup the obj */
756 __amdgpu_ras_feature_enable(adev, head, enable);
757 ret = 0;
758out:
759 if (head->block == AMDGPU_RAS_BLOCK__GFX)
760 kfree(info);
761 return ret;
762}
763
764/* Only used in device probe stage and called only once. */
765int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
766 struct ras_common_if *head, bool enable)
767{
768 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
769 int ret;
770
771 if (!con)
772 return -EINVAL;
773
774 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
775 if (enable) {
776 /* There is no harm to issue a ras TA cmd regardless of
777 * the currecnt ras state.
778 * If current state == target state, it will do nothing
779 * But sometimes it requests driver to reset and repost
780 * with error code -EAGAIN.
781 */
782 ret = amdgpu_ras_feature_enable(adev, head, 1);
783 /* With old ras TA, we might fail to enable ras.
784 * Log it and just setup the object.
785 * TODO need remove this WA in the future.
786 */
787 if (ret == -EINVAL) {
788 ret = __amdgpu_ras_feature_enable(adev, head, 1);
789 if (!ret)
790 dev_info(adev->dev,
791 "RAS INFO: %s setup object\n",
792 get_ras_block_str(head));
793 }
794 } else {
795 /* setup the object then issue a ras TA disable cmd.*/
796 ret = __amdgpu_ras_feature_enable(adev, head, 1);
797 if (ret)
798 return ret;
799
800 /* gfx block ras dsiable cmd must send to ras-ta */
801 if (head->block == AMDGPU_RAS_BLOCK__GFX)
802 con->features |= BIT(head->block);
803
804 ret = amdgpu_ras_feature_enable(adev, head, 0);
805
806 /* clean gfx block ras features flag */
807 if (adev->ras_enabled && head->block == AMDGPU_RAS_BLOCK__GFX)
808 con->features &= ~BIT(head->block);
809 }
810 } else
811 ret = amdgpu_ras_feature_enable(adev, head, enable);
812
813 return ret;
814}
815
816static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev,
817 bool bypass)
818{
819 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
820 struct ras_manager *obj, *tmp;
821
822 list_for_each_entry_safe(obj, tmp, &con->head, node) {
823 /* bypass psp.
824 * aka just release the obj and corresponding flags
825 */
826 if (bypass) {
827 if (__amdgpu_ras_feature_enable(adev, &obj->head, 0))
828 break;
829 } else {
830 if (amdgpu_ras_feature_enable(adev, &obj->head, 0))
831 break;
832 }
833 }
834
835 return con->features;
836}
837
838static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
839 bool bypass)
840{
841 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
842 int i;
843 const enum amdgpu_ras_error_type default_ras_type = AMDGPU_RAS_ERROR__NONE;
844
845 for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) {
846 struct ras_common_if head = {
847 .block = i,
848 .type = default_ras_type,
849 .sub_block_index = 0,
850 };
851
852 if (i == AMDGPU_RAS_BLOCK__MCA)
853 continue;
854
855 if (bypass) {
856 /*
857 * bypass psp. vbios enable ras for us.
858 * so just create the obj
859 */
860 if (__amdgpu_ras_feature_enable(adev, &head, 1))
861 break;
862 } else {
863 if (amdgpu_ras_feature_enable(adev, &head, 1))
864 break;
865 }
866 }
867
868 for (i = 0; i < AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
869 struct ras_common_if head = {
870 .block = AMDGPU_RAS_BLOCK__MCA,
871 .type = default_ras_type,
872 .sub_block_index = i,
873 };
874
875 if (bypass) {
876 /*
877 * bypass psp. vbios enable ras for us.
878 * so just create the obj
879 */
880 if (__amdgpu_ras_feature_enable(adev, &head, 1))
881 break;
882 } else {
883 if (amdgpu_ras_feature_enable(adev, &head, 1))
884 break;
885 }
886 }
887
888 return con->features;
889}
890/* feature ctl end */
891
892static int amdgpu_ras_block_match_default(struct amdgpu_ras_block_object *block_obj,
893 enum amdgpu_ras_block block)
894{
895 if (!block_obj)
896 return -EINVAL;
897
898 if (block_obj->ras_comm.block == block)
899 return 0;
900
901 return -EINVAL;
902}
903
904static struct amdgpu_ras_block_object *amdgpu_ras_get_ras_block(struct amdgpu_device *adev,
905 enum amdgpu_ras_block block, uint32_t sub_block_index)
906{
907 struct amdgpu_ras_block_list *node, *tmp;
908 struct amdgpu_ras_block_object *obj;
909
910 if (block >= AMDGPU_RAS_BLOCK__LAST)
911 return NULL;
912
913 if (!amdgpu_ras_is_supported(adev, block))
914 return NULL;
915
916 list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
917 if (!node->ras_obj) {
918 dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
919 continue;
920 }
921
922 obj = node->ras_obj;
923 if (obj->ras_block_match) {
924 if (obj->ras_block_match(obj, block, sub_block_index) == 0)
925 return obj;
926 } else {
927 if (amdgpu_ras_block_match_default(obj, block) == 0)
928 return obj;
929 }
930 }
931
932 return NULL;
933}
934
935static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_data *err_data)
936{
937 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
938 int ret = 0;
939
940 /*
941 * choosing right query method according to
942 * whether smu support query error information
943 */
944 ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(ras->umc_ecc));
945 if (ret == -EOPNOTSUPP) {
946 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
947 adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
948 adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data);
949
950 /* umc query_ras_error_address is also responsible for clearing
951 * error status
952 */
953 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
954 adev->umc.ras->ras_block.hw_ops->query_ras_error_address)
955 adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, err_data);
956 } else if (!ret) {
957 if (adev->umc.ras &&
958 adev->umc.ras->ecc_info_query_ras_error_count)
959 adev->umc.ras->ecc_info_query_ras_error_count(adev, err_data);
960
961 if (adev->umc.ras &&
962 adev->umc.ras->ecc_info_query_ras_error_address)
963 adev->umc.ras->ecc_info_query_ras_error_address(adev, err_data);
964 }
965}
966
967/* query/inject/cure begin */
968int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
969 struct ras_query_if *info)
970{
971 struct amdgpu_ras_block_object *block_obj = NULL;
972 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
973 struct ras_err_data err_data = {0, 0, 0, NULL};
974
975 if (!obj)
976 return -EINVAL;
977
978 if (info->head.block == AMDGPU_RAS_BLOCK__UMC) {
979 amdgpu_ras_get_ecc_info(adev, &err_data);
980 } else {
981 block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, 0);
982 if (!block_obj || !block_obj->hw_ops) {
983 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
984 get_ras_block_str(&info->head));
985 return -EINVAL;
986 }
987
988 if (block_obj->hw_ops->query_ras_error_count)
989 block_obj->hw_ops->query_ras_error_count(adev, &err_data);
990
991 if ((info->head.block == AMDGPU_RAS_BLOCK__SDMA) ||
992 (info->head.block == AMDGPU_RAS_BLOCK__GFX) ||
993 (info->head.block == AMDGPU_RAS_BLOCK__MMHUB)) {
994 if (block_obj->hw_ops->query_ras_error_status)
995 block_obj->hw_ops->query_ras_error_status(adev);
996 }
997 }
998
999 obj->err_data.ue_count += err_data.ue_count;
1000 obj->err_data.ce_count += err_data.ce_count;
1001
1002 info->ue_count = obj->err_data.ue_count;
1003 info->ce_count = obj->err_data.ce_count;
1004
1005 if (err_data.ce_count) {
1006 if (adev->smuio.funcs &&
1007 adev->smuio.funcs->get_socket_id &&
1008 adev->smuio.funcs->get_die_id) {
1009 dev_info(adev->dev, "socket: %d, die: %d "
1010 "%ld correctable hardware errors "
1011 "detected in %s block, no user "
1012 "action is needed.\n",
1013 adev->smuio.funcs->get_socket_id(adev),
1014 adev->smuio.funcs->get_die_id(adev),
1015 obj->err_data.ce_count,
1016 get_ras_block_str(&info->head));
1017 } else {
1018 dev_info(adev->dev, "%ld correctable hardware errors "
1019 "detected in %s block, no user "
1020 "action is needed.\n",
1021 obj->err_data.ce_count,
1022 get_ras_block_str(&info->head));
1023 }
1024 }
1025 if (err_data.ue_count) {
1026 if (adev->smuio.funcs &&
1027 adev->smuio.funcs->get_socket_id &&
1028 adev->smuio.funcs->get_die_id) {
1029 dev_info(adev->dev, "socket: %d, die: %d "
1030 "%ld uncorrectable hardware errors "
1031 "detected in %s block\n",
1032 adev->smuio.funcs->get_socket_id(adev),
1033 adev->smuio.funcs->get_die_id(adev),
1034 obj->err_data.ue_count,
1035 get_ras_block_str(&info->head));
1036 } else {
1037 dev_info(adev->dev, "%ld uncorrectable hardware errors "
1038 "detected in %s block\n",
1039 obj->err_data.ue_count,
1040 get_ras_block_str(&info->head));
1041 }
1042 }
1043
1044 return 0;
1045}
1046
1047int amdgpu_ras_reset_error_status(struct amdgpu_device *adev,
1048 enum amdgpu_ras_block block)
1049{
1050 struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
1051
1052 if (!amdgpu_ras_is_supported(adev, block))
1053 return -EINVAL;
1054
1055 if (!block_obj || !block_obj->hw_ops) {
1056 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1057 ras_block_str(block));
1058 return -EINVAL;
1059 }
1060
1061 if (block_obj->hw_ops->reset_ras_error_count)
1062 block_obj->hw_ops->reset_ras_error_count(adev);
1063
1064 if ((block == AMDGPU_RAS_BLOCK__GFX) ||
1065 (block == AMDGPU_RAS_BLOCK__MMHUB)) {
1066 if (block_obj->hw_ops->reset_ras_error_status)
1067 block_obj->hw_ops->reset_ras_error_status(adev);
1068 }
1069
1070 return 0;
1071}
1072
1073/* wrapper of psp_ras_trigger_error */
1074int amdgpu_ras_error_inject(struct amdgpu_device *adev,
1075 struct ras_inject_if *info)
1076{
1077 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1078 struct ta_ras_trigger_error_input block_info = {
1079 .block_id = amdgpu_ras_block_to_ta(info->head.block),
1080 .inject_error_type = amdgpu_ras_error_to_ta(info->head.type),
1081 .sub_block_index = info->head.sub_block_index,
1082 .address = info->address,
1083 .value = info->value,
1084 };
1085 int ret = -EINVAL;
1086 struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev,
1087 info->head.block,
1088 info->head.sub_block_index);
1089
1090 if (!obj)
1091 return -EINVAL;
1092
1093 if (!block_obj || !block_obj->hw_ops) {
1094 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1095 get_ras_block_str(&info->head));
1096 return -EINVAL;
1097 }
1098
1099 /* Calculate XGMI relative offset */
1100 if (adev->gmc.xgmi.num_physical_nodes > 1) {
1101 block_info.address =
1102 amdgpu_xgmi_get_relative_phy_addr(adev,
1103 block_info.address);
1104 }
1105
1106 if (info->head.block == AMDGPU_RAS_BLOCK__GFX) {
1107 if (block_obj->hw_ops->ras_error_inject)
1108 ret = block_obj->hw_ops->ras_error_inject(adev, info);
1109 } else {
1110 /* If defined special ras_error_inject(e.g: xgmi), implement special ras_error_inject */
1111 if (block_obj->hw_ops->ras_error_inject)
1112 ret = block_obj->hw_ops->ras_error_inject(adev, &block_info);
1113 else /*If not defined .ras_error_inject, use default ras_error_inject*/
1114 ret = psp_ras_trigger_error(&adev->psp, &block_info);
1115 }
1116
1117 if (ret)
1118 dev_err(adev->dev, "ras inject %s failed %d\n",
1119 get_ras_block_str(&info->head), ret);
1120
1121 return ret;
1122}
1123
1124/**
1125 * amdgpu_ras_query_error_count -- Get error counts of all IPs
1126 * @adev: pointer to AMD GPU device
1127 * @ce_count: pointer to an integer to be set to the count of correctible errors.
1128 * @ue_count: pointer to an integer to be set to the count of uncorrectible
1129 * errors.
1130 *
1131 * If set, @ce_count or @ue_count, count and return the corresponding
1132 * error counts in those integer pointers. Return 0 if the device
1133 * supports RAS. Return -EOPNOTSUPP if the device doesn't support RAS.
1134 */
1135int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
1136 unsigned long *ce_count,
1137 unsigned long *ue_count)
1138{
1139 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1140 struct ras_manager *obj;
1141 unsigned long ce, ue;
1142
1143 if (!adev->ras_enabled || !con)
1144 return -EOPNOTSUPP;
1145
1146 /* Don't count since no reporting.
1147 */
1148 if (!ce_count && !ue_count)
1149 return 0;
1150
1151 ce = 0;
1152 ue = 0;
1153 list_for_each_entry(obj, &con->head, node) {
1154 struct ras_query_if info = {
1155 .head = obj->head,
1156 };
1157 int res;
1158
1159 res = amdgpu_ras_query_error_status(adev, &info);
1160 if (res)
1161 return res;
1162
1163 if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
1164 adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {
1165 if (amdgpu_ras_reset_error_status(adev, info.head.block))
1166 dev_warn(adev->dev, "Failed to reset error counter and error status");
1167 }
1168
1169 ce += info.ce_count;
1170 ue += info.ue_count;
1171 }
1172
1173 if (ce_count)
1174 *ce_count = ce;
1175
1176 if (ue_count)
1177 *ue_count = ue;
1178
1179 return 0;
1180}
1181/* query/inject/cure end */
1182
1183
1184/* sysfs begin */
1185
1186static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
1187 struct ras_badpage **bps, unsigned int *count);
1188
1189static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
1190{
1191 switch (flags) {
1192 case AMDGPU_RAS_RETIRE_PAGE_RESERVED:
1193 return "R";
1194 case AMDGPU_RAS_RETIRE_PAGE_PENDING:
1195 return "P";
1196 case AMDGPU_RAS_RETIRE_PAGE_FAULT:
1197 default:
1198 return "F";
1199 }
1200}
1201
1202/**
1203 * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface
1204 *
1205 * It allows user to read the bad pages of vram on the gpu through
1206 * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages
1207 *
1208 * It outputs multiple lines, and each line stands for one gpu page.
1209 *
1210 * The format of one line is below,
1211 * gpu pfn : gpu page size : flags
1212 *
1213 * gpu pfn and gpu page size are printed in hex format.
1214 * flags can be one of below character,
1215 *
1216 * R: reserved, this gpu page is reserved and not able to use.
1217 *
1218 * P: pending for reserve, this gpu page is marked as bad, will be reserved
1219 * in next window of page_reserve.
1220 *
1221 * F: unable to reserve. this gpu page can't be reserved due to some reasons.
1222 *
1223 * Examples:
1224 *
1225 * .. code-block:: bash
1226 *
1227 * 0x00000001 : 0x00001000 : R
1228 * 0x00000002 : 0x00001000 : P
1229 *
1230 */
1231
1232static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
1233 struct kobject *kobj, struct bin_attribute *attr,
1234 char *buf, loff_t ppos, size_t count)
1235{
1236 struct amdgpu_ras *con =
1237 container_of(attr, struct amdgpu_ras, badpages_attr);
1238 struct amdgpu_device *adev = con->adev;
1239 const unsigned int element_size =
1240 sizeof("0xabcdabcd : 0x12345678 : R\n") - 1;
1241 unsigned int start = div64_ul(ppos + element_size - 1, element_size);
1242 unsigned int end = div64_ul(ppos + count - 1, element_size);
1243 ssize_t s = 0;
1244 struct ras_badpage *bps = NULL;
1245 unsigned int bps_count = 0;
1246
1247 memset(buf, 0, count);
1248
1249 if (amdgpu_ras_badpages_read(adev, &bps, &bps_count))
1250 return 0;
1251
1252 for (; start < end && start < bps_count; start++)
1253 s += scnprintf(&buf[s], element_size + 1,
1254 "0x%08x : 0x%08x : %1s\n",
1255 bps[start].bp,
1256 bps[start].size,
1257 amdgpu_ras_badpage_flags_str(bps[start].flags));
1258
1259 kfree(bps);
1260
1261 return s;
1262}
1263
1264static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
1265 struct device_attribute *attr, char *buf)
1266{
1267 struct amdgpu_ras *con =
1268 container_of(attr, struct amdgpu_ras, features_attr);
1269
1270 return sysfs_emit(buf, "feature mask: 0x%x\n", con->features);
1271}
1272
1273static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev)
1274{
1275 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1276
1277 sysfs_remove_file_from_group(&adev->dev->kobj,
1278 &con->badpages_attr.attr,
1279 RAS_FS_NAME);
1280}
1281
1282static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev)
1283{
1284 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1285 struct attribute *attrs[] = {
1286 &con->features_attr.attr,
1287 NULL
1288 };
1289 struct attribute_group group = {
1290 .name = RAS_FS_NAME,
1291 .attrs = attrs,
1292 };
1293
1294 sysfs_remove_group(&adev->dev->kobj, &group);
1295
1296 return 0;
1297}
1298
1299int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
1300 struct ras_common_if *head)
1301{
1302 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1303
1304 if (!obj || obj->attr_inuse)
1305 return -EINVAL;
1306
1307 get_obj(obj);
1308
1309 snprintf(obj->fs_data.sysfs_name, sizeof(obj->fs_data.sysfs_name),
1310 "%s_err_count", head->name);
1311
1312 obj->sysfs_attr = (struct device_attribute){
1313 .attr = {
1314 .name = obj->fs_data.sysfs_name,
1315 .mode = S_IRUGO,
1316 },
1317 .show = amdgpu_ras_sysfs_read,
1318 };
1319 sysfs_attr_init(&obj->sysfs_attr.attr);
1320
1321 if (sysfs_add_file_to_group(&adev->dev->kobj,
1322 &obj->sysfs_attr.attr,
1323 RAS_FS_NAME)) {
1324 put_obj(obj);
1325 return -EINVAL;
1326 }
1327
1328 obj->attr_inuse = 1;
1329
1330 return 0;
1331}
1332
1333int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
1334 struct ras_common_if *head)
1335{
1336 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1337
1338 if (!obj || !obj->attr_inuse)
1339 return -EINVAL;
1340
1341 sysfs_remove_file_from_group(&adev->dev->kobj,
1342 &obj->sysfs_attr.attr,
1343 RAS_FS_NAME);
1344 obj->attr_inuse = 0;
1345 put_obj(obj);
1346
1347 return 0;
1348}
1349
1350static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
1351{
1352 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1353 struct ras_manager *obj, *tmp;
1354
1355 list_for_each_entry_safe(obj, tmp, &con->head, node) {
1356 amdgpu_ras_sysfs_remove(adev, &obj->head);
1357 }
1358
1359 if (amdgpu_bad_page_threshold != 0)
1360 amdgpu_ras_sysfs_remove_bad_page_node(adev);
1361
1362 amdgpu_ras_sysfs_remove_feature_node(adev);
1363
1364 return 0;
1365}
1366/* sysfs end */
1367
1368/**
1369 * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors
1370 *
1371 * Normally when there is an uncorrectable error, the driver will reset
1372 * the GPU to recover. However, in the event of an unrecoverable error,
1373 * the driver provides an interface to reboot the system automatically
1374 * in that event.
1375 *
1376 * The following file in debugfs provides that interface:
1377 * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot
1378 *
1379 * Usage:
1380 *
1381 * .. code-block:: bash
1382 *
1383 * echo true > .../ras/auto_reboot
1384 *
1385 */
1386/* debugfs begin */
1387static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
1388{
1389 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1390 struct drm_minor *minor = adev_to_drm(adev)->primary;
1391 struct dentry *dir;
1392
1393 dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root);
1394 debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, dir, adev,
1395 &amdgpu_ras_debugfs_ctrl_ops);
1396 debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, dir, adev,
1397 &amdgpu_ras_debugfs_eeprom_ops);
1398 debugfs_create_u32("bad_page_cnt_threshold", 0444, dir,
1399 &con->bad_page_cnt_threshold);
1400 debugfs_create_x32("ras_hw_enabled", 0444, dir, &adev->ras_hw_enabled);
1401 debugfs_create_x32("ras_enabled", 0444, dir, &adev->ras_enabled);
1402 debugfs_create_file("ras_eeprom_size", S_IRUGO, dir, adev,
1403 &amdgpu_ras_debugfs_eeprom_size_ops);
1404 con->de_ras_eeprom_table = debugfs_create_file("ras_eeprom_table",
1405 S_IRUGO, dir, adev,
1406 &amdgpu_ras_debugfs_eeprom_table_ops);
1407 amdgpu_ras_debugfs_set_ret_size(&con->eeprom_control);
1408
1409 /*
1410 * After one uncorrectable error happens, usually GPU recovery will
1411 * be scheduled. But due to the known problem in GPU recovery failing
1412 * to bring GPU back, below interface provides one direct way to
1413 * user to reboot system automatically in such case within
1414 * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine
1415 * will never be called.
1416 */
1417 debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot);
1418
1419 /*
1420 * User could set this not to clean up hardware's error count register
1421 * of RAS IPs during ras recovery.
1422 */
1423 debugfs_create_bool("disable_ras_err_cnt_harvest", 0644, dir,
1424 &con->disable_ras_err_cnt_harvest);
1425 return dir;
1426}
1427
1428static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
1429 struct ras_fs_if *head,
1430 struct dentry *dir)
1431{
1432 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
1433
1434 if (!obj || !dir)
1435 return;
1436
1437 get_obj(obj);
1438
1439 memcpy(obj->fs_data.debugfs_name,
1440 head->debugfs_name,
1441 sizeof(obj->fs_data.debugfs_name));
1442
1443 debugfs_create_file(obj->fs_data.debugfs_name, S_IWUGO | S_IRUGO, dir,
1444 obj, &amdgpu_ras_debugfs_ops);
1445}
1446
1447void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
1448{
1449 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1450 struct dentry *dir;
1451 struct ras_manager *obj;
1452 struct ras_fs_if fs_info;
1453
1454 /*
1455 * it won't be called in resume path, no need to check
1456 * suspend and gpu reset status
1457 */
1458 if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con)
1459 return;
1460
1461 dir = amdgpu_ras_debugfs_create_ctrl_node(adev);
1462
1463 list_for_each_entry(obj, &con->head, node) {
1464 if (amdgpu_ras_is_supported(adev, obj->head.block) &&
1465 (obj->attr_inuse == 1)) {
1466 sprintf(fs_info.debugfs_name, "%s_err_inject",
1467 get_ras_block_str(&obj->head));
1468 fs_info.head = obj->head;
1469 amdgpu_ras_debugfs_create(adev, &fs_info, dir);
1470 }
1471 }
1472}
1473
1474/* debugfs end */
1475
1476/* ras fs */
1477static BIN_ATTR(gpu_vram_bad_pages, S_IRUGO,
1478 amdgpu_ras_sysfs_badpages_read, NULL, 0);
1479static DEVICE_ATTR(features, S_IRUGO,
1480 amdgpu_ras_sysfs_features_read, NULL);
1481static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
1482{
1483 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1484 struct attribute_group group = {
1485 .name = RAS_FS_NAME,
1486 };
1487 struct attribute *attrs[] = {
1488 &con->features_attr.attr,
1489 NULL
1490 };
1491 struct bin_attribute *bin_attrs[] = {
1492 NULL,
1493 NULL,
1494 };
1495 int r;
1496
1497 /* add features entry */
1498 con->features_attr = dev_attr_features;
1499 group.attrs = attrs;
1500 sysfs_attr_init(attrs[0]);
1501
1502 if (amdgpu_bad_page_threshold != 0) {
1503 /* add bad_page_features entry */
1504 bin_attr_gpu_vram_bad_pages.private = NULL;
1505 con->badpages_attr = bin_attr_gpu_vram_bad_pages;
1506 bin_attrs[0] = &con->badpages_attr;
1507 group.bin_attrs = bin_attrs;
1508 sysfs_bin_attr_init(bin_attrs[0]);
1509 }
1510
1511 r = sysfs_create_group(&adev->dev->kobj, &group);
1512 if (r)
1513 dev_err(adev->dev, "Failed to create RAS sysfs group!");
1514
1515 return 0;
1516}
1517
1518static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
1519{
1520 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1521 struct ras_manager *con_obj, *ip_obj, *tmp;
1522
1523 if (IS_ENABLED(CONFIG_DEBUG_FS)) {
1524 list_for_each_entry_safe(con_obj, tmp, &con->head, node) {
1525 ip_obj = amdgpu_ras_find_obj(adev, &con_obj->head);
1526 if (ip_obj)
1527 put_obj(ip_obj);
1528 }
1529 }
1530
1531 amdgpu_ras_sysfs_remove_all(adev);
1532 return 0;
1533}
1534/* ras fs end */
1535
1536/* ih begin */
1537
1538/* For the hardware that cannot enable bif ring for both ras_controller_irq
1539 * and ras_err_evnet_athub_irq ih cookies, the driver has to poll status
1540 * register to check whether the interrupt is triggered or not, and properly
1541 * ack the interrupt if it is there
1542 */
1543void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev)
1544{
1545 /* Fatal error events are handled on host side */
1546 if (amdgpu_sriov_vf(adev) ||
1547 !amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__PCIE_BIF))
1548 return;
1549
1550 if (adev->nbio.ras &&
1551 adev->nbio.ras->handle_ras_controller_intr_no_bifring)
1552 adev->nbio.ras->handle_ras_controller_intr_no_bifring(adev);
1553
1554 if (adev->nbio.ras &&
1555 adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring)
1556 adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring(adev);
1557}
1558
1559static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *obj,
1560 struct amdgpu_iv_entry *entry)
1561{
1562 bool poison_stat = false;
1563 struct amdgpu_device *adev = obj->adev;
1564 struct amdgpu_ras_block_object *block_obj =
1565 amdgpu_ras_get_ras_block(adev, obj->head.block, 0);
1566
1567 if (!block_obj || !block_obj->hw_ops)
1568 return;
1569
1570 /* both query_poison_status and handle_poison_consumption are optional,
1571 * but at least one of them should be implemented if we need poison
1572 * consumption handler
1573 */
1574 if (block_obj->hw_ops->query_poison_status) {
1575 poison_stat = block_obj->hw_ops->query_poison_status(adev);
1576 if (!poison_stat) {
1577 /* Not poison consumption interrupt, no need to handle it */
1578 dev_info(adev->dev, "No RAS poison status in %s poison IH.\n",
1579 block_obj->ras_comm.name);
1580
1581 return;
1582 }
1583 }
1584
1585 if (!adev->gmc.xgmi.connected_to_cpu)
1586 amdgpu_umc_poison_handler(adev, false);
1587
1588 if (block_obj->hw_ops->handle_poison_consumption)
1589 poison_stat = block_obj->hw_ops->handle_poison_consumption(adev);
1590
1591 /* gpu reset is fallback for failed and default cases */
1592 if (poison_stat) {
1593 dev_info(adev->dev, "GPU reset for %s RAS poison consumption is issued!\n",
1594 block_obj->ras_comm.name);
1595 amdgpu_ras_reset_gpu(adev);
1596 }
1597}
1598
1599static void amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager *obj,
1600 struct amdgpu_iv_entry *entry)
1601{
1602 dev_info(obj->adev->dev,
1603 "Poison is created, no user action is needed.\n");
1604}
1605
1606static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj,
1607 struct amdgpu_iv_entry *entry)
1608{
1609 struct ras_ih_data *data = &obj->ih_data;
1610 struct ras_err_data err_data = {0, 0, 0, NULL};
1611 int ret;
1612
1613 if (!data->cb)
1614 return;
1615
1616 /* Let IP handle its data, maybe we need get the output
1617 * from the callback to update the error type/count, etc
1618 */
1619 ret = data->cb(obj->adev, &err_data, entry);
1620 /* ue will trigger an interrupt, and in that case
1621 * we need do a reset to recovery the whole system.
1622 * But leave IP do that recovery, here we just dispatch
1623 * the error.
1624 */
1625 if (ret == AMDGPU_RAS_SUCCESS) {
1626 /* these counts could be left as 0 if
1627 * some blocks do not count error number
1628 */
1629 obj->err_data.ue_count += err_data.ue_count;
1630 obj->err_data.ce_count += err_data.ce_count;
1631 }
1632}
1633
1634static void amdgpu_ras_interrupt_handler(struct ras_manager *obj)
1635{
1636 struct ras_ih_data *data = &obj->ih_data;
1637 struct amdgpu_iv_entry entry;
1638
1639 while (data->rptr != data->wptr) {
1640 rmb();
1641 memcpy(&entry, &data->ring[data->rptr],
1642 data->element_size);
1643
1644 wmb();
1645 data->rptr = (data->aligned_element_size +
1646 data->rptr) % data->ring_size;
1647
1648 if (amdgpu_ras_is_poison_mode_supported(obj->adev)) {
1649 if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
1650 amdgpu_ras_interrupt_poison_creation_handler(obj, &entry);
1651 else
1652 amdgpu_ras_interrupt_poison_consumption_handler(obj, &entry);
1653 } else {
1654 if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
1655 amdgpu_ras_interrupt_umc_handler(obj, &entry);
1656 else
1657 dev_warn(obj->adev->dev,
1658 "No RAS interrupt handler for non-UMC block with poison disabled.\n");
1659 }
1660 }
1661}
1662
1663static void amdgpu_ras_interrupt_process_handler(struct work_struct *work)
1664{
1665 struct ras_ih_data *data =
1666 container_of(work, struct ras_ih_data, ih_work);
1667 struct ras_manager *obj =
1668 container_of(data, struct ras_manager, ih_data);
1669
1670 amdgpu_ras_interrupt_handler(obj);
1671}
1672
1673int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
1674 struct ras_dispatch_if *info)
1675{
1676 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1677 struct ras_ih_data *data = &obj->ih_data;
1678
1679 if (!obj)
1680 return -EINVAL;
1681
1682 if (data->inuse == 0)
1683 return 0;
1684
1685 /* Might be overflow... */
1686 memcpy(&data->ring[data->wptr], info->entry,
1687 data->element_size);
1688
1689 wmb();
1690 data->wptr = (data->aligned_element_size +
1691 data->wptr) % data->ring_size;
1692
1693 schedule_work(&data->ih_work);
1694
1695 return 0;
1696}
1697
1698int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
1699 struct ras_common_if *head)
1700{
1701 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1702 struct ras_ih_data *data;
1703
1704 if (!obj)
1705 return -EINVAL;
1706
1707 data = &obj->ih_data;
1708 if (data->inuse == 0)
1709 return 0;
1710
1711 cancel_work_sync(&data->ih_work);
1712
1713 kfree(data->ring);
1714 memset(data, 0, sizeof(*data));
1715 put_obj(obj);
1716
1717 return 0;
1718}
1719
1720int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev,
1721 struct ras_common_if *head)
1722{
1723 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1724 struct ras_ih_data *data;
1725 struct amdgpu_ras_block_object *ras_obj;
1726
1727 if (!obj) {
1728 /* in case we registe the IH before enable ras feature */
1729 obj = amdgpu_ras_create_obj(adev, head);
1730 if (!obj)
1731 return -EINVAL;
1732 } else
1733 get_obj(obj);
1734
1735 ras_obj = container_of(head, struct amdgpu_ras_block_object, ras_comm);
1736
1737 data = &obj->ih_data;
1738 /* add the callback.etc */
1739 *data = (struct ras_ih_data) {
1740 .inuse = 0,
1741 .cb = ras_obj->ras_cb,
1742 .element_size = sizeof(struct amdgpu_iv_entry),
1743 .rptr = 0,
1744 .wptr = 0,
1745 };
1746
1747 INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler);
1748
1749 data->aligned_element_size = ALIGN(data->element_size, 8);
1750 /* the ring can store 64 iv entries. */
1751 data->ring_size = 64 * data->aligned_element_size;
1752 data->ring = kmalloc(data->ring_size, GFP_KERNEL);
1753 if (!data->ring) {
1754 put_obj(obj);
1755 return -ENOMEM;
1756 }
1757
1758 /* IH is ready */
1759 data->inuse = 1;
1760
1761 return 0;
1762}
1763
1764static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev)
1765{
1766 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1767 struct ras_manager *obj, *tmp;
1768
1769 list_for_each_entry_safe(obj, tmp, &con->head, node) {
1770 amdgpu_ras_interrupt_remove_handler(adev, &obj->head);
1771 }
1772
1773 return 0;
1774}
1775/* ih end */
1776
1777/* traversal all IPs except NBIO to query error counter */
1778static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
1779{
1780 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1781 struct ras_manager *obj;
1782
1783 if (!adev->ras_enabled || !con)
1784 return;
1785
1786 list_for_each_entry(obj, &con->head, node) {
1787 struct ras_query_if info = {
1788 .head = obj->head,
1789 };
1790
1791 /*
1792 * PCIE_BIF IP has one different isr by ras controller
1793 * interrupt, the specific ras counter query will be
1794 * done in that isr. So skip such block from common
1795 * sync flood interrupt isr calling.
1796 */
1797 if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF)
1798 continue;
1799
1800 /*
1801 * this is a workaround for aldebaran, skip send msg to
1802 * smu to get ecc_info table due to smu handle get ecc
1803 * info table failed temporarily.
1804 * should be removed until smu fix handle ecc_info table.
1805 */
1806 if ((info.head.block == AMDGPU_RAS_BLOCK__UMC) &&
1807 (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2)))
1808 continue;
1809
1810 amdgpu_ras_query_error_status(adev, &info);
1811
1812 if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
1813 adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4) &&
1814 adev->ip_versions[MP0_HWIP][0] != IP_VERSION(13, 0, 0)) {
1815 if (amdgpu_ras_reset_error_status(adev, info.head.block))
1816 dev_warn(adev->dev, "Failed to reset error counter and error status");
1817 }
1818 }
1819}
1820
1821/* Parse RdRspStatus and WrRspStatus */
1822static void amdgpu_ras_error_status_query(struct amdgpu_device *adev,
1823 struct ras_query_if *info)
1824{
1825 struct amdgpu_ras_block_object *block_obj;
1826 /*
1827 * Only two block need to query read/write
1828 * RspStatus at current state
1829 */
1830 if ((info->head.block != AMDGPU_RAS_BLOCK__GFX) &&
1831 (info->head.block != AMDGPU_RAS_BLOCK__MMHUB))
1832 return;
1833
1834 block_obj = amdgpu_ras_get_ras_block(adev,
1835 info->head.block,
1836 info->head.sub_block_index);
1837
1838 if (!block_obj || !block_obj->hw_ops) {
1839 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1840 get_ras_block_str(&info->head));
1841 return;
1842 }
1843
1844 if (block_obj->hw_ops->query_ras_error_status)
1845 block_obj->hw_ops->query_ras_error_status(adev);
1846
1847}
1848
1849static void amdgpu_ras_query_err_status(struct amdgpu_device *adev)
1850{
1851 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1852 struct ras_manager *obj;
1853
1854 if (!adev->ras_enabled || !con)
1855 return;
1856
1857 list_for_each_entry(obj, &con->head, node) {
1858 struct ras_query_if info = {
1859 .head = obj->head,
1860 };
1861
1862 amdgpu_ras_error_status_query(adev, &info);
1863 }
1864}
1865
1866/* recovery begin */
1867
1868/* return 0 on success.
1869 * caller need free bps.
1870 */
1871static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
1872 struct ras_badpage **bps, unsigned int *count)
1873{
1874 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1875 struct ras_err_handler_data *data;
1876 int i = 0;
1877 int ret = 0, status;
1878
1879 if (!con || !con->eh_data || !bps || !count)
1880 return -EINVAL;
1881
1882 mutex_lock(&con->recovery_lock);
1883 data = con->eh_data;
1884 if (!data || data->count == 0) {
1885 *bps = NULL;
1886 ret = -EINVAL;
1887 goto out;
1888 }
1889
1890 *bps = kmalloc(sizeof(struct ras_badpage) * data->count, GFP_KERNEL);
1891 if (!*bps) {
1892 ret = -ENOMEM;
1893 goto out;
1894 }
1895
1896 for (; i < data->count; i++) {
1897 (*bps)[i] = (struct ras_badpage){
1898 .bp = data->bps[i].retired_page,
1899 .size = AMDGPU_GPU_PAGE_SIZE,
1900 .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
1901 };
1902 status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr,
1903 data->bps[i].retired_page);
1904 if (status == -EBUSY)
1905 (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
1906 else if (status == -ENOENT)
1907 (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
1908 }
1909
1910 *count = data->count;
1911out:
1912 mutex_unlock(&con->recovery_lock);
1913 return ret;
1914}
1915
1916static void amdgpu_ras_do_recovery(struct work_struct *work)
1917{
1918 struct amdgpu_ras *ras =
1919 container_of(work, struct amdgpu_ras, recovery_work);
1920 struct amdgpu_device *remote_adev = NULL;
1921 struct amdgpu_device *adev = ras->adev;
1922 struct list_head device_list, *device_list_handle = NULL;
1923
1924 if (!ras->disable_ras_err_cnt_harvest) {
1925 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
1926
1927 /* Build list of devices to query RAS related errors */
1928 if (hive && adev->gmc.xgmi.num_physical_nodes > 1) {
1929 device_list_handle = &hive->device_list;
1930 } else {
1931 INIT_LIST_HEAD(&device_list);
1932 list_add_tail(&adev->gmc.xgmi.head, &device_list);
1933 device_list_handle = &device_list;
1934 }
1935
1936 list_for_each_entry(remote_adev,
1937 device_list_handle, gmc.xgmi.head) {
1938 amdgpu_ras_query_err_status(remote_adev);
1939 amdgpu_ras_log_on_err_counter(remote_adev);
1940 }
1941
1942 amdgpu_put_xgmi_hive(hive);
1943 }
1944
1945 if (amdgpu_device_should_recover_gpu(ras->adev)) {
1946 struct amdgpu_reset_context reset_context;
1947 memset(&reset_context, 0, sizeof(reset_context));
1948
1949 reset_context.method = AMD_RESET_METHOD_NONE;
1950 reset_context.reset_req_dev = adev;
1951
1952 /* Perform full reset in fatal error mode */
1953 if (!amdgpu_ras_is_poison_mode_supported(ras->adev))
1954 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
1955 else
1956 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
1957
1958 amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context);
1959 }
1960 atomic_set(&ras->in_recovery, 0);
1961}
1962
1963/* alloc/realloc bps array */
1964static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
1965 struct ras_err_handler_data *data, int pages)
1966{
1967 unsigned int old_space = data->count + data->space_left;
1968 unsigned int new_space = old_space + pages;
1969 unsigned int align_space = ALIGN(new_space, 512);
1970 void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
1971
1972 if (!bps) {
1973 return -ENOMEM;
1974 }
1975
1976 if (data->bps) {
1977 memcpy(bps, data->bps,
1978 data->count * sizeof(*data->bps));
1979 kfree(data->bps);
1980 }
1981
1982 data->bps = bps;
1983 data->space_left += align_space - old_space;
1984 return 0;
1985}
1986
1987/* it deal with vram only. */
1988int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
1989 struct eeprom_table_record *bps, int pages)
1990{
1991 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1992 struct ras_err_handler_data *data;
1993 int ret = 0;
1994 uint32_t i;
1995
1996 if (!con || !con->eh_data || !bps || pages <= 0)
1997 return 0;
1998
1999 mutex_lock(&con->recovery_lock);
2000 data = con->eh_data;
2001 if (!data)
2002 goto out;
2003
2004 for (i = 0; i < pages; i++) {
2005 if (amdgpu_ras_check_bad_page_unlock(con,
2006 bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT))
2007 continue;
2008
2009 if (!data->space_left &&
2010 amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
2011 ret = -ENOMEM;
2012 goto out;
2013 }
2014
2015 amdgpu_vram_mgr_reserve_range(&adev->mman.vram_mgr,
2016 bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT,
2017 AMDGPU_GPU_PAGE_SIZE);
2018
2019 memcpy(&data->bps[data->count], &bps[i], sizeof(*data->bps));
2020 data->count++;
2021 data->space_left--;
2022 }
2023out:
2024 mutex_unlock(&con->recovery_lock);
2025
2026 return ret;
2027}
2028
2029/*
2030 * write error record array to eeprom, the function should be
2031 * protected by recovery_lock
2032 */
2033int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
2034{
2035 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2036 struct ras_err_handler_data *data;
2037 struct amdgpu_ras_eeprom_control *control;
2038 int save_count;
2039
2040 if (!con || !con->eh_data)
2041 return 0;
2042
2043 mutex_lock(&con->recovery_lock);
2044 control = &con->eeprom_control;
2045 data = con->eh_data;
2046 save_count = data->count - control->ras_num_recs;
2047 mutex_unlock(&con->recovery_lock);
2048 /* only new entries are saved */
2049 if (save_count > 0) {
2050 if (amdgpu_ras_eeprom_append(control,
2051 &data->bps[control->ras_num_recs],
2052 save_count)) {
2053 dev_err(adev->dev, "Failed to save EEPROM table data!");
2054 return -EIO;
2055 }
2056
2057 dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count);
2058 }
2059
2060 return 0;
2061}
2062
2063/*
2064 * read error record array in eeprom and reserve enough space for
2065 * storing new bad pages
2066 */
2067static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
2068{
2069 struct amdgpu_ras_eeprom_control *control =
2070 &adev->psp.ras_context.ras->eeprom_control;
2071 struct eeprom_table_record *bps;
2072 int ret;
2073
2074 /* no bad page record, skip eeprom access */
2075 if (control->ras_num_recs == 0 || amdgpu_bad_page_threshold == 0)
2076 return 0;
2077
2078 bps = kcalloc(control->ras_num_recs, sizeof(*bps), GFP_KERNEL);
2079 if (!bps)
2080 return -ENOMEM;
2081
2082 ret = amdgpu_ras_eeprom_read(control, bps, control->ras_num_recs);
2083 if (ret)
2084 dev_err(adev->dev, "Failed to load EEPROM table records!");
2085 else
2086 ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs);
2087
2088 kfree(bps);
2089 return ret;
2090}
2091
2092static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
2093 uint64_t addr)
2094{
2095 struct ras_err_handler_data *data = con->eh_data;
2096 int i;
2097
2098 addr >>= AMDGPU_GPU_PAGE_SHIFT;
2099 for (i = 0; i < data->count; i++)
2100 if (addr == data->bps[i].retired_page)
2101 return true;
2102
2103 return false;
2104}
2105
2106/*
2107 * check if an address belongs to bad page
2108 *
2109 * Note: this check is only for umc block
2110 */
2111static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
2112 uint64_t addr)
2113{
2114 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2115 bool ret = false;
2116
2117 if (!con || !con->eh_data)
2118 return ret;
2119
2120 mutex_lock(&con->recovery_lock);
2121 ret = amdgpu_ras_check_bad_page_unlock(con, addr);
2122 mutex_unlock(&con->recovery_lock);
2123 return ret;
2124}
2125
2126static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
2127 uint32_t max_count)
2128{
2129 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2130
2131 /*
2132 * Justification of value bad_page_cnt_threshold in ras structure
2133 *
2134 * Generally, -1 <= amdgpu_bad_page_threshold <= max record length
2135 * in eeprom, and introduce two scenarios accordingly.
2136 *
2137 * Bad page retirement enablement:
2138 * - If amdgpu_bad_page_threshold = -1,
2139 * bad_page_cnt_threshold = typical value by formula.
2140 *
2141 * - When the value from user is 0 < amdgpu_bad_page_threshold <
2142 * max record length in eeprom, use it directly.
2143 *
2144 * Bad page retirement disablement:
2145 * - If amdgpu_bad_page_threshold = 0, bad page retirement
2146 * functionality is disabled, and bad_page_cnt_threshold will
2147 * take no effect.
2148 */
2149
2150 if (amdgpu_bad_page_threshold < 0) {
2151 u64 val = adev->gmc.mc_vram_size;
2152
2153 do_div(val, RAS_BAD_PAGE_COVER);
2154 con->bad_page_cnt_threshold = min(lower_32_bits(val),
2155 max_count);
2156 } else {
2157 con->bad_page_cnt_threshold = min_t(int, max_count,
2158 amdgpu_bad_page_threshold);
2159 }
2160}
2161
2162int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
2163{
2164 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2165 struct ras_err_handler_data **data;
2166 u32 max_eeprom_records_count = 0;
2167 bool exc_err_limit = false;
2168 int ret;
2169
2170 if (!con || amdgpu_sriov_vf(adev))
2171 return 0;
2172
2173 /* Allow access to RAS EEPROM via debugfs, when the ASIC
2174 * supports RAS and debugfs is enabled, but when
2175 * adev->ras_enabled is unset, i.e. when "ras_enable"
2176 * module parameter is set to 0.
2177 */
2178 con->adev = adev;
2179
2180 if (!adev->ras_enabled)
2181 return 0;
2182
2183 data = &con->eh_data;
2184 *data = kmalloc(sizeof(**data), GFP_KERNEL | __GFP_ZERO);
2185 if (!*data) {
2186 ret = -ENOMEM;
2187 goto out;
2188 }
2189
2190 mutex_init(&con->recovery_lock);
2191 INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
2192 atomic_set(&con->in_recovery, 0);
2193 con->eeprom_control.bad_channel_bitmap = 0;
2194
2195 max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count();
2196 amdgpu_ras_validate_threshold(adev, max_eeprom_records_count);
2197
2198 /* Todo: During test the SMU might fail to read the eeprom through I2C
2199 * when the GPU is pending on XGMI reset during probe time
2200 * (Mostly after second bus reset), skip it now
2201 */
2202 if (adev->gmc.xgmi.pending_reset)
2203 return 0;
2204 ret = amdgpu_ras_eeprom_init(&con->eeprom_control, &exc_err_limit);
2205 /*
2206 * This calling fails when exc_err_limit is true or
2207 * ret != 0.
2208 */
2209 if (exc_err_limit || ret)
2210 goto free;
2211
2212 if (con->eeprom_control.ras_num_recs) {
2213 ret = amdgpu_ras_load_bad_pages(adev);
2214 if (ret)
2215 goto free;
2216
2217 amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs);
2218
2219 if (con->update_channel_flag == true) {
2220 amdgpu_dpm_send_hbm_bad_channel_flag(adev, con->eeprom_control.bad_channel_bitmap);
2221 con->update_channel_flag = false;
2222 }
2223 }
2224
2225#ifdef CONFIG_X86_MCE_AMD
2226 if ((adev->asic_type == CHIP_ALDEBARAN) &&
2227 (adev->gmc.xgmi.connected_to_cpu))
2228 amdgpu_register_bad_pages_mca_notifier(adev);
2229#endif
2230 return 0;
2231
2232free:
2233 kfree((*data)->bps);
2234 kfree(*data);
2235 con->eh_data = NULL;
2236out:
2237 dev_warn(adev->dev, "Failed to initialize ras recovery! (%d)\n", ret);
2238
2239 /*
2240 * Except error threshold exceeding case, other failure cases in this
2241 * function would not fail amdgpu driver init.
2242 */
2243 if (!exc_err_limit)
2244 ret = 0;
2245 else
2246 ret = -EINVAL;
2247
2248 return ret;
2249}
2250
2251static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
2252{
2253 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2254 struct ras_err_handler_data *data = con->eh_data;
2255
2256 /* recovery_init failed to init it, fini is useless */
2257 if (!data)
2258 return 0;
2259
2260 cancel_work_sync(&con->recovery_work);
2261
2262 mutex_lock(&con->recovery_lock);
2263 con->eh_data = NULL;
2264 kfree(data->bps);
2265 kfree(data);
2266 mutex_unlock(&con->recovery_lock);
2267
2268 return 0;
2269}
2270/* recovery end */
2271
2272static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
2273{
2274 if (amdgpu_sriov_vf(adev)) {
2275 switch (adev->ip_versions[MP0_HWIP][0]) {
2276 case IP_VERSION(13, 0, 2):
2277 return true;
2278 default:
2279 return false;
2280 }
2281 }
2282
2283 if (adev->asic_type == CHIP_IP_DISCOVERY) {
2284 switch (adev->ip_versions[MP0_HWIP][0]) {
2285 case IP_VERSION(13, 0, 0):
2286 case IP_VERSION(13, 0, 10):
2287 return true;
2288 default:
2289 return false;
2290 }
2291 }
2292
2293 return adev->asic_type == CHIP_VEGA10 ||
2294 adev->asic_type == CHIP_VEGA20 ||
2295 adev->asic_type == CHIP_ARCTURUS ||
2296 adev->asic_type == CHIP_ALDEBARAN ||
2297 adev->asic_type == CHIP_SIENNA_CICHLID;
2298}
2299
2300/*
2301 * this is workaround for vega20 workstation sku,
2302 * force enable gfx ras, ignore vbios gfx ras flag
2303 * due to GC EDC can not write
2304 */
2305static void amdgpu_ras_get_quirks(struct amdgpu_device *adev)
2306{
2307 struct atom_context *ctx = adev->mode_info.atom_context;
2308
2309 if (!ctx)
2310 return;
2311
2312 if (strnstr(ctx->vbios_version, "D16406",
2313 sizeof(ctx->vbios_version)) ||
2314 strnstr(ctx->vbios_version, "D36002",
2315 sizeof(ctx->vbios_version)))
2316 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX);
2317}
2318
2319/*
2320 * check hardware's ras ability which will be saved in hw_supported.
2321 * if hardware does not support ras, we can skip some ras initializtion and
2322 * forbid some ras operations from IP.
2323 * if software itself, say boot parameter, limit the ras ability. We still
2324 * need allow IP do some limited operations, like disable. In such case,
2325 * we have to initialize ras as normal. but need check if operation is
2326 * allowed or not in each function.
2327 */
2328static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
2329{
2330 adev->ras_hw_enabled = adev->ras_enabled = 0;
2331
2332 if (!adev->is_atom_fw ||
2333 !amdgpu_ras_asic_supported(adev))
2334 return;
2335
2336 if (!adev->gmc.xgmi.connected_to_cpu) {
2337 if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
2338 dev_info(adev->dev, "MEM ECC is active.\n");
2339 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC |
2340 1 << AMDGPU_RAS_BLOCK__DF);
2341 } else {
2342 dev_info(adev->dev, "MEM ECC is not presented.\n");
2343 }
2344
2345 if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
2346 dev_info(adev->dev, "SRAM ECC is active.\n");
2347 if (!amdgpu_sriov_vf(adev)) {
2348 adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
2349 1 << AMDGPU_RAS_BLOCK__DF);
2350
2351 if (adev->ip_versions[VCN_HWIP][0] == IP_VERSION(2, 6, 0) ||
2352 adev->ip_versions[VCN_HWIP][0] == IP_VERSION(4, 0, 0))
2353 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
2354 1 << AMDGPU_RAS_BLOCK__JPEG);
2355 else
2356 adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN |
2357 1 << AMDGPU_RAS_BLOCK__JPEG);
2358 } else {
2359 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__PCIE_BIF |
2360 1 << AMDGPU_RAS_BLOCK__SDMA |
2361 1 << AMDGPU_RAS_BLOCK__GFX);
2362 }
2363 } else {
2364 dev_info(adev->dev, "SRAM ECC is not presented.\n");
2365 }
2366 } else {
2367 /* driver only manages a few IP blocks RAS feature
2368 * when GPU is connected cpu through XGMI */
2369 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX |
2370 1 << AMDGPU_RAS_BLOCK__SDMA |
2371 1 << AMDGPU_RAS_BLOCK__MMHUB);
2372 }
2373
2374 amdgpu_ras_get_quirks(adev);
2375
2376 /* hw_supported needs to be aligned with RAS block mask. */
2377 adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK;
2378
2379 adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 :
2380 adev->ras_hw_enabled & amdgpu_ras_mask;
2381}
2382
2383static void amdgpu_ras_counte_dw(struct work_struct *work)
2384{
2385 struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
2386 ras_counte_delay_work.work);
2387 struct amdgpu_device *adev = con->adev;
2388 struct drm_device *dev = adev_to_drm(adev);
2389 unsigned long ce_count, ue_count;
2390 int res;
2391
2392 res = pm_runtime_get_sync(dev->dev);
2393 if (res < 0)
2394 goto Out;
2395
2396 /* Cache new values.
2397 */
2398 if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count) == 0) {
2399 atomic_set(&con->ras_ce_count, ce_count);
2400 atomic_set(&con->ras_ue_count, ue_count);
2401 }
2402
2403 pm_runtime_mark_last_busy(dev->dev);
2404Out:
2405 pm_runtime_put_autosuspend(dev->dev);
2406}
2407
2408int amdgpu_ras_init(struct amdgpu_device *adev)
2409{
2410 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2411 int r;
2412 bool df_poison, umc_poison;
2413
2414 if (con)
2415 return 0;
2416
2417 con = kmalloc(sizeof(struct amdgpu_ras) +
2418 sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT +
2419 sizeof(struct ras_manager) * AMDGPU_RAS_MCA_BLOCK_COUNT,
2420 GFP_KERNEL|__GFP_ZERO);
2421 if (!con)
2422 return -ENOMEM;
2423
2424 con->adev = adev;
2425 INIT_DELAYED_WORK(&con->ras_counte_delay_work, amdgpu_ras_counte_dw);
2426 atomic_set(&con->ras_ce_count, 0);
2427 atomic_set(&con->ras_ue_count, 0);
2428
2429 con->objs = (struct ras_manager *)(con + 1);
2430
2431 amdgpu_ras_set_context(adev, con);
2432
2433 amdgpu_ras_check_supported(adev);
2434
2435 if (!adev->ras_enabled || adev->asic_type == CHIP_VEGA10) {
2436 /* set gfx block ras context feature for VEGA20 Gaming
2437 * send ras disable cmd to ras ta during ras late init.
2438 */
2439 if (!adev->ras_enabled && adev->asic_type == CHIP_VEGA20) {
2440 con->features |= BIT(AMDGPU_RAS_BLOCK__GFX);
2441
2442 return 0;
2443 }
2444
2445 r = 0;
2446 goto release_con;
2447 }
2448
2449 con->update_channel_flag = false;
2450 con->features = 0;
2451 INIT_LIST_HEAD(&con->head);
2452 /* Might need get this flag from vbios. */
2453 con->flags = RAS_DEFAULT_FLAGS;
2454
2455 /* initialize nbio ras function ahead of any other
2456 * ras functions so hardware fatal error interrupt
2457 * can be enabled as early as possible */
2458 switch (adev->asic_type) {
2459 case CHIP_VEGA20:
2460 case CHIP_ARCTURUS:
2461 case CHIP_ALDEBARAN:
2462 if (!adev->gmc.xgmi.connected_to_cpu) {
2463 adev->nbio.ras = &nbio_v7_4_ras;
2464 amdgpu_ras_register_ras_block(adev, &adev->nbio.ras->ras_block);
2465 adev->nbio.ras_if = &adev->nbio.ras->ras_block.ras_comm;
2466 }
2467 break;
2468 default:
2469 /* nbio ras is not available */
2470 break;
2471 }
2472
2473 if (adev->nbio.ras &&
2474 adev->nbio.ras->init_ras_controller_interrupt) {
2475 r = adev->nbio.ras->init_ras_controller_interrupt(adev);
2476 if (r)
2477 goto release_con;
2478 }
2479
2480 if (adev->nbio.ras &&
2481 adev->nbio.ras->init_ras_err_event_athub_interrupt) {
2482 r = adev->nbio.ras->init_ras_err_event_athub_interrupt(adev);
2483 if (r)
2484 goto release_con;
2485 }
2486
2487 /* Init poison supported flag, the default value is false */
2488 if (adev->gmc.xgmi.connected_to_cpu) {
2489 /* enabled by default when GPU is connected to CPU */
2490 con->poison_supported = true;
2491 }
2492 else if (adev->df.funcs &&
2493 adev->df.funcs->query_ras_poison_mode &&
2494 adev->umc.ras &&
2495 adev->umc.ras->query_ras_poison_mode) {
2496 df_poison =
2497 adev->df.funcs->query_ras_poison_mode(adev);
2498 umc_poison =
2499 adev->umc.ras->query_ras_poison_mode(adev);
2500 /* Only poison is set in both DF and UMC, we can support it */
2501 if (df_poison && umc_poison)
2502 con->poison_supported = true;
2503 else if (df_poison != umc_poison)
2504 dev_warn(adev->dev, "Poison setting is inconsistent in DF/UMC(%d:%d)!\n",
2505 df_poison, umc_poison);
2506 }
2507
2508 if (amdgpu_ras_fs_init(adev)) {
2509 r = -EINVAL;
2510 goto release_con;
2511 }
2512
2513 dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
2514 "hardware ability[%x] ras_mask[%x]\n",
2515 adev->ras_hw_enabled, adev->ras_enabled);
2516
2517 return 0;
2518release_con:
2519 amdgpu_ras_set_context(adev, NULL);
2520 kfree(con);
2521
2522 return r;
2523}
2524
2525int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev)
2526{
2527 if (adev->gmc.xgmi.connected_to_cpu)
2528 return 1;
2529 return 0;
2530}
2531
2532static int amdgpu_persistent_edc_harvesting(struct amdgpu_device *adev,
2533 struct ras_common_if *ras_block)
2534{
2535 struct ras_query_if info = {
2536 .head = *ras_block,
2537 };
2538
2539 if (!amdgpu_persistent_edc_harvesting_supported(adev))
2540 return 0;
2541
2542 if (amdgpu_ras_query_error_status(adev, &info) != 0)
2543 DRM_WARN("RAS init harvest failure");
2544
2545 if (amdgpu_ras_reset_error_status(adev, ras_block->block) != 0)
2546 DRM_WARN("RAS init harvest reset failure");
2547
2548 return 0;
2549}
2550
2551bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev)
2552{
2553 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2554
2555 if (!con)
2556 return false;
2557
2558 return con->poison_supported;
2559}
2560
2561/* helper function to handle common stuff in ip late init phase */
2562int amdgpu_ras_block_late_init(struct amdgpu_device *adev,
2563 struct ras_common_if *ras_block)
2564{
2565 struct amdgpu_ras_block_object *ras_obj = NULL;
2566 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2567 unsigned long ue_count, ce_count;
2568 int r;
2569
2570 /* disable RAS feature per IP block if it is not supported */
2571 if (!amdgpu_ras_is_supported(adev, ras_block->block)) {
2572 amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
2573 return 0;
2574 }
2575
2576 r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1);
2577 if (r) {
2578 if (adev->in_suspend || amdgpu_in_reset(adev)) {
2579 /* in resume phase, if fail to enable ras,
2580 * clean up all ras fs nodes, and disable ras */
2581 goto cleanup;
2582 } else
2583 return r;
2584 }
2585
2586 /* check for errors on warm reset edc persisant supported ASIC */
2587 amdgpu_persistent_edc_harvesting(adev, ras_block);
2588
2589 /* in resume phase, no need to create ras fs node */
2590 if (adev->in_suspend || amdgpu_in_reset(adev))
2591 return 0;
2592
2593 ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
2594 if (ras_obj->ras_cb || (ras_obj->hw_ops &&
2595 (ras_obj->hw_ops->query_poison_status ||
2596 ras_obj->hw_ops->handle_poison_consumption))) {
2597 r = amdgpu_ras_interrupt_add_handler(adev, ras_block);
2598 if (r)
2599 goto cleanup;
2600 }
2601
2602 r = amdgpu_ras_sysfs_create(adev, ras_block);
2603 if (r)
2604 goto interrupt;
2605
2606 /* Those are the cached values at init.
2607 */
2608 if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count) == 0) {
2609 atomic_set(&con->ras_ce_count, ce_count);
2610 atomic_set(&con->ras_ue_count, ue_count);
2611 }
2612
2613 return 0;
2614
2615interrupt:
2616 if (ras_obj->ras_cb)
2617 amdgpu_ras_interrupt_remove_handler(adev, ras_block);
2618cleanup:
2619 amdgpu_ras_feature_enable(adev, ras_block, 0);
2620 return r;
2621}
2622
2623static int amdgpu_ras_block_late_init_default(struct amdgpu_device *adev,
2624 struct ras_common_if *ras_block)
2625{
2626 return amdgpu_ras_block_late_init(adev, ras_block);
2627}
2628
2629/* helper function to remove ras fs node and interrupt handler */
2630void amdgpu_ras_block_late_fini(struct amdgpu_device *adev,
2631 struct ras_common_if *ras_block)
2632{
2633 struct amdgpu_ras_block_object *ras_obj;
2634 if (!ras_block)
2635 return;
2636
2637 amdgpu_ras_sysfs_remove(adev, ras_block);
2638
2639 ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
2640 if (ras_obj->ras_cb)
2641 amdgpu_ras_interrupt_remove_handler(adev, ras_block);
2642}
2643
2644static void amdgpu_ras_block_late_fini_default(struct amdgpu_device *adev,
2645 struct ras_common_if *ras_block)
2646{
2647 return amdgpu_ras_block_late_fini(adev, ras_block);
2648}
2649
2650/* do some init work after IP late init as dependence.
2651 * and it runs in resume/gpu reset/booting up cases.
2652 */
2653void amdgpu_ras_resume(struct amdgpu_device *adev)
2654{
2655 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2656 struct ras_manager *obj, *tmp;
2657
2658 if (!adev->ras_enabled || !con) {
2659 /* clean ras context for VEGA20 Gaming after send ras disable cmd */
2660 amdgpu_release_ras_context(adev);
2661
2662 return;
2663 }
2664
2665 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
2666 /* Set up all other IPs which are not implemented. There is a
2667 * tricky thing that IP's actual ras error type should be
2668 * MULTI_UNCORRECTABLE, but as driver does not handle it, so
2669 * ERROR_NONE make sense anyway.
2670 */
2671 amdgpu_ras_enable_all_features(adev, 1);
2672
2673 /* We enable ras on all hw_supported block, but as boot
2674 * parameter might disable some of them and one or more IP has
2675 * not implemented yet. So we disable them on behalf.
2676 */
2677 list_for_each_entry_safe(obj, tmp, &con->head, node) {
2678 if (!amdgpu_ras_is_supported(adev, obj->head.block)) {
2679 amdgpu_ras_feature_enable(adev, &obj->head, 0);
2680 /* there should be no any reference. */
2681 WARN_ON(alive_obj(obj));
2682 }
2683 }
2684 }
2685}
2686
2687void amdgpu_ras_suspend(struct amdgpu_device *adev)
2688{
2689 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2690
2691 if (!adev->ras_enabled || !con)
2692 return;
2693
2694 amdgpu_ras_disable_all_features(adev, 0);
2695 /* Make sure all ras objects are disabled. */
2696 if (con->features)
2697 amdgpu_ras_disable_all_features(adev, 1);
2698}
2699
2700int amdgpu_ras_late_init(struct amdgpu_device *adev)
2701{
2702 struct amdgpu_ras_block_list *node, *tmp;
2703 struct amdgpu_ras_block_object *obj;
2704 int r;
2705
2706 /* Guest side doesn't need init ras feature */
2707 if (amdgpu_sriov_vf(adev))
2708 return 0;
2709
2710 list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
2711 if (!node->ras_obj) {
2712 dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
2713 continue;
2714 }
2715
2716 obj = node->ras_obj;
2717 if (obj->ras_late_init) {
2718 r = obj->ras_late_init(adev, &obj->ras_comm);
2719 if (r) {
2720 dev_err(adev->dev, "%s failed to execute ras_late_init! ret:%d\n",
2721 obj->ras_comm.name, r);
2722 return r;
2723 }
2724 } else
2725 amdgpu_ras_block_late_init_default(adev, &obj->ras_comm);
2726 }
2727
2728 return 0;
2729}
2730
2731/* do some fini work before IP fini as dependence */
2732int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
2733{
2734 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2735
2736 if (!adev->ras_enabled || !con)
2737 return 0;
2738
2739
2740 /* Need disable ras on all IPs here before ip [hw/sw]fini */
2741 if (con->features)
2742 amdgpu_ras_disable_all_features(adev, 0);
2743 amdgpu_ras_recovery_fini(adev);
2744 return 0;
2745}
2746
2747int amdgpu_ras_fini(struct amdgpu_device *adev)
2748{
2749 struct amdgpu_ras_block_list *ras_node, *tmp;
2750 struct amdgpu_ras_block_object *obj = NULL;
2751 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2752
2753 if (!adev->ras_enabled || !con)
2754 return 0;
2755
2756 list_for_each_entry_safe(ras_node, tmp, &adev->ras_list, node) {
2757 if (ras_node->ras_obj) {
2758 obj = ras_node->ras_obj;
2759 if (amdgpu_ras_is_supported(adev, obj->ras_comm.block) &&
2760 obj->ras_fini)
2761 obj->ras_fini(adev, &obj->ras_comm);
2762 else
2763 amdgpu_ras_block_late_fini_default(adev, &obj->ras_comm);
2764 }
2765
2766 /* Clear ras blocks from ras_list and free ras block list node */
2767 list_del(&ras_node->node);
2768 kfree(ras_node);
2769 }
2770
2771 amdgpu_ras_fs_fini(adev);
2772 amdgpu_ras_interrupt_remove_all(adev);
2773
2774 WARN(con->features, "Feature mask is not cleared");
2775
2776 if (con->features)
2777 amdgpu_ras_disable_all_features(adev, 1);
2778
2779 cancel_delayed_work_sync(&con->ras_counte_delay_work);
2780
2781 amdgpu_ras_set_context(adev, NULL);
2782 kfree(con);
2783
2784 return 0;
2785}
2786
2787void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
2788{
2789 amdgpu_ras_check_supported(adev);
2790 if (!adev->ras_hw_enabled)
2791 return;
2792
2793 if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
2794 dev_info(adev->dev, "uncorrectable hardware error"
2795 "(ERREVENT_ATHUB_INTERRUPT) detected!\n");
2796
2797 amdgpu_ras_reset_gpu(adev);
2798 }
2799}
2800
2801bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev)
2802{
2803 if (adev->asic_type == CHIP_VEGA20 &&
2804 adev->pm.fw_version <= 0x283400) {
2805 return !(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) &&
2806 amdgpu_ras_intr_triggered();
2807 }
2808
2809 return false;
2810}
2811
2812void amdgpu_release_ras_context(struct amdgpu_device *adev)
2813{
2814 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2815
2816 if (!con)
2817 return;
2818
2819 if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) {
2820 con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX);
2821 amdgpu_ras_set_context(adev, NULL);
2822 kfree(con);
2823 }
2824}
2825
2826#ifdef CONFIG_X86_MCE_AMD
2827static struct amdgpu_device *find_adev(uint32_t node_id)
2828{
2829 int i;
2830 struct amdgpu_device *adev = NULL;
2831
2832 for (i = 0; i < mce_adev_list.num_gpu; i++) {
2833 adev = mce_adev_list.devs[i];
2834
2835 if (adev && adev->gmc.xgmi.connected_to_cpu &&
2836 adev->gmc.xgmi.physical_node_id == node_id)
2837 break;
2838 adev = NULL;
2839 }
2840
2841 return adev;
2842}
2843
2844#define GET_MCA_IPID_GPUID(m) (((m) >> 44) & 0xF)
2845#define GET_UMC_INST(m) (((m) >> 21) & 0x7)
2846#define GET_CHAN_INDEX(m) ((((m) >> 12) & 0x3) | (((m) >> 18) & 0x4))
2847#define GPU_ID_OFFSET 8
2848
2849static int amdgpu_bad_page_notifier(struct notifier_block *nb,
2850 unsigned long val, void *data)
2851{
2852 struct mce *m = (struct mce *)data;
2853 struct amdgpu_device *adev = NULL;
2854 uint32_t gpu_id = 0;
2855 uint32_t umc_inst = 0, ch_inst = 0;
2856
2857 /*
2858 * If the error was generated in UMC_V2, which belongs to GPU UMCs,
2859 * and error occurred in DramECC (Extended error code = 0) then only
2860 * process the error, else bail out.
2861 */
2862 if (!m || !((smca_get_bank_type(m->extcpu, m->bank) == SMCA_UMC_V2) &&
2863 (XEC(m->status, 0x3f) == 0x0)))
2864 return NOTIFY_DONE;
2865
2866 /*
2867 * If it is correctable error, return.
2868 */
2869 if (mce_is_correctable(m))
2870 return NOTIFY_OK;
2871
2872 /*
2873 * GPU Id is offset by GPU_ID_OFFSET in MCA_IPID_UMC register.
2874 */
2875 gpu_id = GET_MCA_IPID_GPUID(m->ipid) - GPU_ID_OFFSET;
2876
2877 adev = find_adev(gpu_id);
2878 if (!adev) {
2879 DRM_WARN("%s: Unable to find adev for gpu_id: %d\n", __func__,
2880 gpu_id);
2881 return NOTIFY_DONE;
2882 }
2883
2884 /*
2885 * If it is uncorrectable error, then find out UMC instance and
2886 * channel index.
2887 */
2888 umc_inst = GET_UMC_INST(m->ipid);
2889 ch_inst = GET_CHAN_INDEX(m->ipid);
2890
2891 dev_info(adev->dev, "Uncorrectable error detected in UMC inst: %d, chan_idx: %d",
2892 umc_inst, ch_inst);
2893
2894 if (!amdgpu_umc_page_retirement_mca(adev, m->addr, ch_inst, umc_inst))
2895 return NOTIFY_OK;
2896 else
2897 return NOTIFY_DONE;
2898}
2899
2900static struct notifier_block amdgpu_bad_page_nb = {
2901 .notifier_call = amdgpu_bad_page_notifier,
2902 .priority = MCE_PRIO_UC,
2903};
2904
2905static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev)
2906{
2907 /*
2908 * Add the adev to the mce_adev_list.
2909 * During mode2 reset, amdgpu device is temporarily
2910 * removed from the mgpu_info list which can cause
2911 * page retirement to fail.
2912 * Use this list instead of mgpu_info to find the amdgpu
2913 * device on which the UMC error was reported.
2914 */
2915 mce_adev_list.devs[mce_adev_list.num_gpu++] = adev;
2916
2917 /*
2918 * Register the x86 notifier only once
2919 * with MCE subsystem.
2920 */
2921 if (notifier_registered == false) {
2922 mce_register_decode_chain(&amdgpu_bad_page_nb);
2923 notifier_registered = true;
2924 }
2925}
2926#endif
2927
2928struct amdgpu_ras *amdgpu_ras_get_context(struct amdgpu_device *adev)
2929{
2930 if (!adev)
2931 return NULL;
2932
2933 return adev->psp.ras_context.ras;
2934}
2935
2936int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con)
2937{
2938 if (!adev)
2939 return -EINVAL;
2940
2941 adev->psp.ras_context.ras = ras_con;
2942 return 0;
2943}
2944
2945/* check if ras is supported on block, say, sdma, gfx */
2946int amdgpu_ras_is_supported(struct amdgpu_device *adev,
2947 unsigned int block)
2948{
2949 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
2950
2951 if (block >= AMDGPU_RAS_BLOCK_COUNT)
2952 return 0;
2953 return ras && (adev->ras_enabled & (1 << block));
2954}
2955
2956int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
2957{
2958 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
2959
2960 if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0)
2961 amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work);
2962 return 0;
2963}
2964
2965
2966/* Register each ip ras block into amdgpu ras */
2967int amdgpu_ras_register_ras_block(struct amdgpu_device *adev,
2968 struct amdgpu_ras_block_object *ras_block_obj)
2969{
2970 struct amdgpu_ras_block_list *ras_node;
2971 if (!adev || !ras_block_obj)
2972 return -EINVAL;
2973
2974 if (!amdgpu_ras_asic_supported(adev))
2975 return 0;
2976
2977 ras_node = kzalloc(sizeof(*ras_node), GFP_KERNEL);
2978 if (!ras_node)
2979 return -ENOMEM;
2980
2981 INIT_LIST_HEAD(&ras_node->node);
2982 ras_node->ras_obj = ras_block_obj;
2983 list_add_tail(&ras_node->node, &adev->ras_list);
2984
2985 return 0;
2986}