Loading...
Note: File does not exist in v4.17.
1/*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 *
23 */
24#include <linux/list.h>
25#include "amdgpu.h"
26#include "amdgpu_xgmi.h"
27#include "amdgpu_ras.h"
28#include "soc15.h"
29#include "df/df_3_6_offset.h"
30#include "xgmi/xgmi_4_0_0_smn.h"
31#include "xgmi/xgmi_4_0_0_sh_mask.h"
32#include "wafl/wafl2_4_0_0_smn.h"
33#include "wafl/wafl2_4_0_0_sh_mask.h"
34
35static DEFINE_MUTEX(xgmi_mutex);
36
37#define AMDGPU_MAX_XGMI_DEVICE_PER_HIVE 4
38
39static LIST_HEAD(xgmi_hive_list);
40
41static const int xgmi_pcs_err_status_reg_vg20[] = {
42 smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS,
43 smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x100000,
44};
45
46static const int wafl_pcs_err_status_reg_vg20[] = {
47 smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS,
48 smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS + 0x100000,
49};
50
51static const int xgmi_pcs_err_status_reg_arct[] = {
52 smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS,
53 smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x100000,
54 smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x500000,
55 smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x600000,
56 smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x700000,
57 smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x800000,
58};
59
60/* same as vg20*/
61static const int wafl_pcs_err_status_reg_arct[] = {
62 smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS,
63 smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS + 0x100000,
64};
65
66static const struct amdgpu_pcs_ras_field xgmi_pcs_ras_fields[] = {
67 {"XGMI PCS DataLossErr",
68 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataLossErr)},
69 {"XGMI PCS TrainingErr",
70 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, TrainingErr)},
71 {"XGMI PCS CRCErr",
72 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, CRCErr)},
73 {"XGMI PCS BERExceededErr",
74 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, BERExceededErr)},
75 {"XGMI PCS TxMetaDataErr",
76 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, TxMetaDataErr)},
77 {"XGMI PCS ReplayBufParityErr",
78 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayBufParityErr)},
79 {"XGMI PCS DataParityErr",
80 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataParityErr)},
81 {"XGMI PCS ReplayFifoOverflowErr",
82 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayFifoOverflowErr)},
83 {"XGMI PCS ReplayFifoUnderflowErr",
84 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayFifoUnderflowErr)},
85 {"XGMI PCS ElasticFifoOverflowErr",
86 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ElasticFifoOverflowErr)},
87 {"XGMI PCS DeskewErr",
88 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DeskewErr)},
89 {"XGMI PCS DataStartupLimitErr",
90 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataStartupLimitErr)},
91 {"XGMI PCS FCInitTimeoutErr",
92 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, FCInitTimeoutErr)},
93 {"XGMI PCS RecoveryTimeoutErr",
94 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryTimeoutErr)},
95 {"XGMI PCS ReadySerialTimeoutErr",
96 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReadySerialTimeoutErr)},
97 {"XGMI PCS ReadySerialAttemptErr",
98 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReadySerialAttemptErr)},
99 {"XGMI PCS RecoveryAttemptErr",
100 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryAttemptErr)},
101 {"XGMI PCS RecoveryRelockAttemptErr",
102 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)},
103};
104
105static const struct amdgpu_pcs_ras_field wafl_pcs_ras_fields[] = {
106 {"WAFL PCS DataLossErr",
107 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataLossErr)},
108 {"WAFL PCS TrainingErr",
109 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, TrainingErr)},
110 {"WAFL PCS CRCErr",
111 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, CRCErr)},
112 {"WAFL PCS BERExceededErr",
113 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, BERExceededErr)},
114 {"WAFL PCS TxMetaDataErr",
115 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, TxMetaDataErr)},
116 {"WAFL PCS ReplayBufParityErr",
117 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayBufParityErr)},
118 {"WAFL PCS DataParityErr",
119 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataParityErr)},
120 {"WAFL PCS ReplayFifoOverflowErr",
121 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayFifoOverflowErr)},
122 {"WAFL PCS ReplayFifoUnderflowErr",
123 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayFifoUnderflowErr)},
124 {"WAFL PCS ElasticFifoOverflowErr",
125 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ElasticFifoOverflowErr)},
126 {"WAFL PCS DeskewErr",
127 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DeskewErr)},
128 {"WAFL PCS DataStartupLimitErr",
129 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataStartupLimitErr)},
130 {"WAFL PCS FCInitTimeoutErr",
131 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, FCInitTimeoutErr)},
132 {"WAFL PCS RecoveryTimeoutErr",
133 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryTimeoutErr)},
134 {"WAFL PCS ReadySerialTimeoutErr",
135 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReadySerialTimeoutErr)},
136 {"WAFL PCS ReadySerialAttemptErr",
137 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReadySerialAttemptErr)},
138 {"WAFL PCS RecoveryAttemptErr",
139 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryAttemptErr)},
140 {"WAFL PCS RecoveryRelockAttemptErr",
141 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)},
142};
143
144/**
145 * DOC: AMDGPU XGMI Support
146 *
147 * XGMI is a high speed interconnect that joins multiple GPU cards
148 * into a homogeneous memory space that is organized by a collective
149 * hive ID and individual node IDs, both of which are 64-bit numbers.
150 *
151 * The file xgmi_device_id contains the unique per GPU device ID and
152 * is stored in the /sys/class/drm/card${cardno}/device/ directory.
153 *
154 * Inside the device directory a sub-directory 'xgmi_hive_info' is
155 * created which contains the hive ID and the list of nodes.
156 *
157 * The hive ID is stored in:
158 * /sys/class/drm/card${cardno}/device/xgmi_hive_info/xgmi_hive_id
159 *
160 * The node information is stored in numbered directories:
161 * /sys/class/drm/card${cardno}/device/xgmi_hive_info/node${nodeno}/xgmi_device_id
162 *
163 * Each device has their own xgmi_hive_info direction with a mirror
164 * set of node sub-directories.
165 *
166 * The XGMI memory space is built by contiguously adding the power of
167 * two padded VRAM space from each node to each other.
168 *
169 */
170
171static struct attribute amdgpu_xgmi_hive_id = {
172 .name = "xgmi_hive_id",
173 .mode = S_IRUGO
174};
175
176static struct attribute *amdgpu_xgmi_hive_attrs[] = {
177 &amdgpu_xgmi_hive_id,
178 NULL
179};
180
181static ssize_t amdgpu_xgmi_show_attrs(struct kobject *kobj,
182 struct attribute *attr, char *buf)
183{
184 struct amdgpu_hive_info *hive = container_of(
185 kobj, struct amdgpu_hive_info, kobj);
186
187 if (attr == &amdgpu_xgmi_hive_id)
188 return snprintf(buf, PAGE_SIZE, "%llu\n", hive->hive_id);
189
190 return 0;
191}
192
193static void amdgpu_xgmi_hive_release(struct kobject *kobj)
194{
195 struct amdgpu_hive_info *hive = container_of(
196 kobj, struct amdgpu_hive_info, kobj);
197
198 mutex_destroy(&hive->hive_lock);
199 kfree(hive);
200}
201
202static const struct sysfs_ops amdgpu_xgmi_hive_ops = {
203 .show = amdgpu_xgmi_show_attrs,
204};
205
206struct kobj_type amdgpu_xgmi_hive_type = {
207 .release = amdgpu_xgmi_hive_release,
208 .sysfs_ops = &amdgpu_xgmi_hive_ops,
209 .default_attrs = amdgpu_xgmi_hive_attrs,
210};
211
212static ssize_t amdgpu_xgmi_show_device_id(struct device *dev,
213 struct device_attribute *attr,
214 char *buf)
215{
216 struct drm_device *ddev = dev_get_drvdata(dev);
217 struct amdgpu_device *adev = drm_to_adev(ddev);
218
219 return sysfs_emit(buf, "%llu\n", adev->gmc.xgmi.node_id);
220
221}
222
223#define AMDGPU_XGMI_SET_FICAA(o) ((o) | 0x456801)
224static ssize_t amdgpu_xgmi_show_error(struct device *dev,
225 struct device_attribute *attr,
226 char *buf)
227{
228 struct drm_device *ddev = dev_get_drvdata(dev);
229 struct amdgpu_device *adev = drm_to_adev(ddev);
230 uint32_t ficaa_pie_ctl_in, ficaa_pie_status_in;
231 uint64_t fica_out;
232 unsigned int error_count = 0;
233
234 ficaa_pie_ctl_in = AMDGPU_XGMI_SET_FICAA(0x200);
235 ficaa_pie_status_in = AMDGPU_XGMI_SET_FICAA(0x208);
236
237 fica_out = adev->df.funcs->get_fica(adev, ficaa_pie_ctl_in);
238 if (fica_out != 0x1f)
239 pr_err("xGMI error counters not enabled!\n");
240
241 fica_out = adev->df.funcs->get_fica(adev, ficaa_pie_status_in);
242
243 if ((fica_out & 0xffff) == 2)
244 error_count = ((fica_out >> 62) & 0x1) + (fica_out >> 63);
245
246 adev->df.funcs->set_fica(adev, ficaa_pie_status_in, 0, 0);
247
248 return sysfs_emit(buf, "%u\n", error_count);
249}
250
251
252static DEVICE_ATTR(xgmi_device_id, S_IRUGO, amdgpu_xgmi_show_device_id, NULL);
253static DEVICE_ATTR(xgmi_error, S_IRUGO, amdgpu_xgmi_show_error, NULL);
254
255static int amdgpu_xgmi_sysfs_add_dev_info(struct amdgpu_device *adev,
256 struct amdgpu_hive_info *hive)
257{
258 int ret = 0;
259 char node[10] = { 0 };
260
261 /* Create xgmi device id file */
262 ret = device_create_file(adev->dev, &dev_attr_xgmi_device_id);
263 if (ret) {
264 dev_err(adev->dev, "XGMI: Failed to create device file xgmi_device_id\n");
265 return ret;
266 }
267
268 /* Create xgmi error file */
269 ret = device_create_file(adev->dev, &dev_attr_xgmi_error);
270 if (ret)
271 pr_err("failed to create xgmi_error\n");
272
273
274 /* Create sysfs link to hive info folder on the first device */
275 if (hive->kobj.parent != (&adev->dev->kobj)) {
276 ret = sysfs_create_link(&adev->dev->kobj, &hive->kobj,
277 "xgmi_hive_info");
278 if (ret) {
279 dev_err(adev->dev, "XGMI: Failed to create link to hive info");
280 goto remove_file;
281 }
282 }
283
284 sprintf(node, "node%d", atomic_read(&hive->number_devices));
285 /* Create sysfs link form the hive folder to yourself */
286 ret = sysfs_create_link(&hive->kobj, &adev->dev->kobj, node);
287 if (ret) {
288 dev_err(adev->dev, "XGMI: Failed to create link from hive info");
289 goto remove_link;
290 }
291
292 goto success;
293
294
295remove_link:
296 sysfs_remove_link(&adev->dev->kobj, adev_to_drm(adev)->unique);
297
298remove_file:
299 device_remove_file(adev->dev, &dev_attr_xgmi_device_id);
300
301success:
302 return ret;
303}
304
305static void amdgpu_xgmi_sysfs_rem_dev_info(struct amdgpu_device *adev,
306 struct amdgpu_hive_info *hive)
307{
308 char node[10];
309 memset(node, 0, sizeof(node));
310
311 device_remove_file(adev->dev, &dev_attr_xgmi_device_id);
312 device_remove_file(adev->dev, &dev_attr_xgmi_error);
313
314 if (hive->kobj.parent != (&adev->dev->kobj))
315 sysfs_remove_link(&adev->dev->kobj,"xgmi_hive_info");
316
317 sprintf(node, "node%d", atomic_read(&hive->number_devices));
318 sysfs_remove_link(&hive->kobj, node);
319
320}
321
322
323
324struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
325{
326 struct amdgpu_hive_info *hive = NULL;
327 int ret;
328
329 if (!adev->gmc.xgmi.hive_id)
330 return NULL;
331
332 if (adev->hive) {
333 kobject_get(&adev->hive->kobj);
334 return adev->hive;
335 }
336
337 mutex_lock(&xgmi_mutex);
338
339 list_for_each_entry(hive, &xgmi_hive_list, node) {
340 if (hive->hive_id == adev->gmc.xgmi.hive_id)
341 goto pro_end;
342 }
343
344 hive = kzalloc(sizeof(*hive), GFP_KERNEL);
345 if (!hive) {
346 dev_err(adev->dev, "XGMI: allocation failed\n");
347 hive = NULL;
348 goto pro_end;
349 }
350
351 /* initialize new hive if not exist */
352 ret = kobject_init_and_add(&hive->kobj,
353 &amdgpu_xgmi_hive_type,
354 &adev->dev->kobj,
355 "%s", "xgmi_hive_info");
356 if (ret) {
357 dev_err(adev->dev, "XGMI: failed initializing kobject for xgmi hive\n");
358 kfree(hive);
359 hive = NULL;
360 goto pro_end;
361 }
362
363 hive->hive_id = adev->gmc.xgmi.hive_id;
364 INIT_LIST_HEAD(&hive->device_list);
365 INIT_LIST_HEAD(&hive->node);
366 mutex_init(&hive->hive_lock);
367 atomic_set(&hive->in_reset, 0);
368 atomic_set(&hive->number_devices, 0);
369 task_barrier_init(&hive->tb);
370 hive->pstate = AMDGPU_XGMI_PSTATE_UNKNOWN;
371 hive->hi_req_gpu = NULL;
372 /*
373 * hive pstate on boot is high in vega20 so we have to go to low
374 * pstate on after boot.
375 */
376 hive->hi_req_count = AMDGPU_MAX_XGMI_DEVICE_PER_HIVE;
377 list_add_tail(&hive->node, &xgmi_hive_list);
378
379pro_end:
380 if (hive)
381 kobject_get(&hive->kobj);
382 mutex_unlock(&xgmi_mutex);
383 return hive;
384}
385
386void amdgpu_put_xgmi_hive(struct amdgpu_hive_info *hive)
387{
388 if (hive)
389 kobject_put(&hive->kobj);
390}
391
392int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate)
393{
394 int ret = 0;
395 struct amdgpu_hive_info *hive;
396 struct amdgpu_device *request_adev;
397 bool is_hi_req = pstate == AMDGPU_XGMI_PSTATE_MAX_VEGA20;
398 bool init_low;
399
400 hive = amdgpu_get_xgmi_hive(adev);
401 if (!hive)
402 return 0;
403
404 request_adev = hive->hi_req_gpu ? hive->hi_req_gpu : adev;
405 init_low = hive->pstate == AMDGPU_XGMI_PSTATE_UNKNOWN;
406 amdgpu_put_xgmi_hive(hive);
407 /* fw bug so temporarily disable pstate switching */
408 return 0;
409
410 if (!hive || adev->asic_type != CHIP_VEGA20)
411 return 0;
412
413 mutex_lock(&hive->hive_lock);
414
415 if (is_hi_req)
416 hive->hi_req_count++;
417 else
418 hive->hi_req_count--;
419
420 /*
421 * Vega20 only needs single peer to request pstate high for the hive to
422 * go high but all peers must request pstate low for the hive to go low
423 */
424 if (hive->pstate == pstate ||
425 (!is_hi_req && hive->hi_req_count && !init_low))
426 goto out;
427
428 dev_dbg(request_adev->dev, "Set xgmi pstate %d.\n", pstate);
429
430 ret = amdgpu_dpm_set_xgmi_pstate(request_adev, pstate);
431 if (ret) {
432 dev_err(request_adev->dev,
433 "XGMI: Set pstate failure on device %llx, hive %llx, ret %d",
434 request_adev->gmc.xgmi.node_id,
435 request_adev->gmc.xgmi.hive_id, ret);
436 goto out;
437 }
438
439 if (init_low)
440 hive->pstate = hive->hi_req_count ?
441 hive->pstate : AMDGPU_XGMI_PSTATE_MIN;
442 else {
443 hive->pstate = pstate;
444 hive->hi_req_gpu = pstate != AMDGPU_XGMI_PSTATE_MIN ?
445 adev : NULL;
446 }
447out:
448 mutex_unlock(&hive->hive_lock);
449 return ret;
450}
451
452int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev)
453{
454 int ret;
455
456 /* Each psp need to set the latest topology */
457 ret = psp_xgmi_set_topology_info(&adev->psp,
458 atomic_read(&hive->number_devices),
459 &adev->psp.xgmi_context.top_info);
460 if (ret)
461 dev_err(adev->dev,
462 "XGMI: Set topology failure on device %llx, hive %llx, ret %d",
463 adev->gmc.xgmi.node_id,
464 adev->gmc.xgmi.hive_id, ret);
465
466 return ret;
467}
468
469
470/*
471 * NOTE psp_xgmi_node_info.num_hops layout is as follows:
472 * num_hops[7:6] = link type (0 = xGMI2, 1 = xGMI3, 2/3 = reserved)
473 * num_hops[5:3] = reserved
474 * num_hops[2:0] = number of hops
475 */
476int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
477 struct amdgpu_device *peer_adev)
478{
479 struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
480 uint8_t num_hops_mask = 0x7;
481 int i;
482
483 for (i = 0 ; i < top->num_nodes; ++i)
484 if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id)
485 return top->nodes[i].num_hops & num_hops_mask;
486 return -EINVAL;
487}
488
489int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
490{
491 struct psp_xgmi_topology_info *top_info;
492 struct amdgpu_hive_info *hive;
493 struct amdgpu_xgmi *entry;
494 struct amdgpu_device *tmp_adev = NULL;
495
496 int count = 0, ret = 0;
497
498 if (!adev->gmc.xgmi.supported)
499 return 0;
500
501 if (!adev->gmc.xgmi.pending_reset &&
502 amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
503 ret = psp_xgmi_initialize(&adev->psp);
504 if (ret) {
505 dev_err(adev->dev,
506 "XGMI: Failed to initialize xgmi session\n");
507 return ret;
508 }
509
510 ret = psp_xgmi_get_hive_id(&adev->psp, &adev->gmc.xgmi.hive_id);
511 if (ret) {
512 dev_err(adev->dev,
513 "XGMI: Failed to get hive id\n");
514 return ret;
515 }
516
517 ret = psp_xgmi_get_node_id(&adev->psp, &adev->gmc.xgmi.node_id);
518 if (ret) {
519 dev_err(adev->dev,
520 "XGMI: Failed to get node id\n");
521 return ret;
522 }
523 } else {
524 adev->gmc.xgmi.hive_id = 16;
525 adev->gmc.xgmi.node_id = adev->gmc.xgmi.physical_node_id + 16;
526 }
527
528 hive = amdgpu_get_xgmi_hive(adev);
529 if (!hive) {
530 ret = -EINVAL;
531 dev_err(adev->dev,
532 "XGMI: node 0x%llx, can not match hive 0x%llx in the hive list.\n",
533 adev->gmc.xgmi.node_id, adev->gmc.xgmi.hive_id);
534 goto exit;
535 }
536 mutex_lock(&hive->hive_lock);
537
538 top_info = &adev->psp.xgmi_context.top_info;
539
540 list_add_tail(&adev->gmc.xgmi.head, &hive->device_list);
541 list_for_each_entry(entry, &hive->device_list, head)
542 top_info->nodes[count++].node_id = entry->node_id;
543 top_info->num_nodes = count;
544 atomic_set(&hive->number_devices, count);
545
546 task_barrier_add_task(&hive->tb);
547
548 if (!adev->gmc.xgmi.pending_reset &&
549 amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
550 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
551 /* update node list for other device in the hive */
552 if (tmp_adev != adev) {
553 top_info = &tmp_adev->psp.xgmi_context.top_info;
554 top_info->nodes[count - 1].node_id =
555 adev->gmc.xgmi.node_id;
556 top_info->num_nodes = count;
557 }
558 ret = amdgpu_xgmi_update_topology(hive, tmp_adev);
559 if (ret)
560 goto exit_unlock;
561 }
562
563 /* get latest topology info for each device from psp */
564 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
565 ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count,
566 &tmp_adev->psp.xgmi_context.top_info);
567 if (ret) {
568 dev_err(tmp_adev->dev,
569 "XGMI: Get topology failure on device %llx, hive %llx, ret %d",
570 tmp_adev->gmc.xgmi.node_id,
571 tmp_adev->gmc.xgmi.hive_id, ret);
572 /* To do : continue with some node failed or disable the whole hive */
573 goto exit_unlock;
574 }
575 }
576 }
577
578 if (!ret && !adev->gmc.xgmi.pending_reset)
579 ret = amdgpu_xgmi_sysfs_add_dev_info(adev, hive);
580
581exit_unlock:
582 mutex_unlock(&hive->hive_lock);
583exit:
584 if (!ret) {
585 adev->hive = hive;
586 dev_info(adev->dev, "XGMI: Add node %d, hive 0x%llx.\n",
587 adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id);
588 } else {
589 amdgpu_put_xgmi_hive(hive);
590 dev_err(adev->dev, "XGMI: Failed to add node %d, hive 0x%llx ret: %d\n",
591 adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id,
592 ret);
593 }
594
595 return ret;
596}
597
598int amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
599{
600 struct amdgpu_hive_info *hive = adev->hive;
601
602 if (!adev->gmc.xgmi.supported)
603 return -EINVAL;
604
605 if (!hive)
606 return -EINVAL;
607
608 mutex_lock(&hive->hive_lock);
609 task_barrier_rem_task(&hive->tb);
610 amdgpu_xgmi_sysfs_rem_dev_info(adev, hive);
611 if (hive->hi_req_gpu == adev)
612 hive->hi_req_gpu = NULL;
613 list_del(&adev->gmc.xgmi.head);
614 mutex_unlock(&hive->hive_lock);
615
616 amdgpu_put_xgmi_hive(hive);
617 adev->hive = NULL;
618
619 if (atomic_dec_return(&hive->number_devices) == 0) {
620 /* Remove the hive from global hive list */
621 mutex_lock(&xgmi_mutex);
622 list_del(&hive->node);
623 mutex_unlock(&xgmi_mutex);
624
625 amdgpu_put_xgmi_hive(hive);
626 }
627
628 return psp_xgmi_terminate(&adev->psp);
629}
630
631static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
632{
633 int r;
634 struct ras_ih_if ih_info = {
635 .cb = NULL,
636 };
637 struct ras_fs_if fs_info = {
638 .sysfs_name = "xgmi_wafl_err_count",
639 };
640
641 if (!adev->gmc.xgmi.supported ||
642 adev->gmc.xgmi.num_physical_nodes == 0)
643 return 0;
644
645 adev->gmc.xgmi.ras_funcs->reset_ras_error_count(adev);
646
647 if (!adev->gmc.xgmi.ras_if) {
648 adev->gmc.xgmi.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
649 if (!adev->gmc.xgmi.ras_if)
650 return -ENOMEM;
651 adev->gmc.xgmi.ras_if->block = AMDGPU_RAS_BLOCK__XGMI_WAFL;
652 adev->gmc.xgmi.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
653 adev->gmc.xgmi.ras_if->sub_block_index = 0;
654 strcpy(adev->gmc.xgmi.ras_if->name, "xgmi_wafl");
655 }
656 ih_info.head = fs_info.head = *adev->gmc.xgmi.ras_if;
657 r = amdgpu_ras_late_init(adev, adev->gmc.xgmi.ras_if,
658 &fs_info, &ih_info);
659 if (r || !amdgpu_ras_is_supported(adev, adev->gmc.xgmi.ras_if->block)) {
660 kfree(adev->gmc.xgmi.ras_if);
661 adev->gmc.xgmi.ras_if = NULL;
662 }
663
664 return r;
665}
666
667static void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev)
668{
669 if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL) &&
670 adev->gmc.xgmi.ras_if) {
671 struct ras_common_if *ras_if = adev->gmc.xgmi.ras_if;
672 struct ras_ih_if ih_info = {
673 .cb = NULL,
674 };
675
676 amdgpu_ras_late_fini(adev, ras_if, &ih_info);
677 kfree(ras_if);
678 }
679}
680
681uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev,
682 uint64_t addr)
683{
684 struct amdgpu_xgmi *xgmi = &adev->gmc.xgmi;
685 return (addr + xgmi->physical_node_id * xgmi->node_segment_size);
686}
687
688static void pcs_clear_status(struct amdgpu_device *adev, uint32_t pcs_status_reg)
689{
690 WREG32_PCIE(pcs_status_reg, 0xFFFFFFFF);
691 WREG32_PCIE(pcs_status_reg, 0);
692}
693
694static void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev)
695{
696 uint32_t i;
697
698 switch (adev->asic_type) {
699 case CHIP_ARCTURUS:
700 for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_arct); i++)
701 pcs_clear_status(adev,
702 xgmi_pcs_err_status_reg_arct[i]);
703 break;
704 case CHIP_VEGA20:
705 for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_vg20); i++)
706 pcs_clear_status(adev,
707 xgmi_pcs_err_status_reg_vg20[i]);
708 break;
709 default:
710 break;
711 }
712}
713
714static int amdgpu_xgmi_query_pcs_error_status(struct amdgpu_device *adev,
715 uint32_t value,
716 uint32_t *ue_count,
717 uint32_t *ce_count,
718 bool is_xgmi_pcs)
719{
720 int i;
721 int ue_cnt;
722
723 if (is_xgmi_pcs) {
724 /* query xgmi pcs error status,
725 * only ue is supported */
726 for (i = 0; i < ARRAY_SIZE(xgmi_pcs_ras_fields); i ++) {
727 ue_cnt = (value &
728 xgmi_pcs_ras_fields[i].pcs_err_mask) >>
729 xgmi_pcs_ras_fields[i].pcs_err_shift;
730 if (ue_cnt) {
731 dev_info(adev->dev, "%s detected\n",
732 xgmi_pcs_ras_fields[i].err_name);
733 *ue_count += ue_cnt;
734 }
735 }
736 } else {
737 /* query wafl pcs error status,
738 * only ue is supported */
739 for (i = 0; i < ARRAY_SIZE(wafl_pcs_ras_fields); i++) {
740 ue_cnt = (value &
741 wafl_pcs_ras_fields[i].pcs_err_mask) >>
742 wafl_pcs_ras_fields[i].pcs_err_shift;
743 if (ue_cnt) {
744 dev_info(adev->dev, "%s detected\n",
745 wafl_pcs_ras_fields[i].err_name);
746 *ue_count += ue_cnt;
747 }
748 }
749 }
750
751 return 0;
752}
753
754static int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
755 void *ras_error_status)
756{
757 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
758 int i;
759 uint32_t data;
760 uint32_t ue_cnt = 0, ce_cnt = 0;
761
762 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL))
763 return -EINVAL;
764
765 err_data->ue_count = 0;
766 err_data->ce_count = 0;
767
768 switch (adev->asic_type) {
769 case CHIP_ARCTURUS:
770 /* check xgmi pcs error */
771 for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_arct); i++) {
772 data = RREG32_PCIE(xgmi_pcs_err_status_reg_arct[i]);
773 if (data)
774 amdgpu_xgmi_query_pcs_error_status(adev,
775 data, &ue_cnt, &ce_cnt, true);
776 }
777 /* check wafl pcs error */
778 for (i = 0; i < ARRAY_SIZE(wafl_pcs_err_status_reg_arct); i++) {
779 data = RREG32_PCIE(wafl_pcs_err_status_reg_arct[i]);
780 if (data)
781 amdgpu_xgmi_query_pcs_error_status(adev,
782 data, &ue_cnt, &ce_cnt, false);
783 }
784 break;
785 case CHIP_VEGA20:
786 default:
787 /* check xgmi pcs error */
788 for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_vg20); i++) {
789 data = RREG32_PCIE(xgmi_pcs_err_status_reg_vg20[i]);
790 if (data)
791 amdgpu_xgmi_query_pcs_error_status(adev,
792 data, &ue_cnt, &ce_cnt, true);
793 }
794 /* check wafl pcs error */
795 for (i = 0; i < ARRAY_SIZE(wafl_pcs_err_status_reg_vg20); i++) {
796 data = RREG32_PCIE(wafl_pcs_err_status_reg_vg20[i]);
797 if (data)
798 amdgpu_xgmi_query_pcs_error_status(adev,
799 data, &ue_cnt, &ce_cnt, false);
800 }
801 break;
802 }
803
804 adev->gmc.xgmi.ras_funcs->reset_ras_error_count(adev);
805
806 err_data->ue_count += ue_cnt;
807 err_data->ce_count += ce_cnt;
808
809 return 0;
810}
811
812const struct amdgpu_xgmi_ras_funcs xgmi_ras_funcs = {
813 .ras_late_init = amdgpu_xgmi_ras_late_init,
814 .ras_fini = amdgpu_xgmi_ras_fini,
815 .query_ras_error_count = amdgpu_xgmi_query_ras_error_count,
816 .reset_ras_error_count = amdgpu_xgmi_reset_ras_error_count,
817};