Linux Audio

Check our new training course

Loading...
v6.9.4
  1/*
  2 * Copyright 2019 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 */
 23
 
 24#include "amdgpu.h"
 25#include "umc_v6_7.h"
 26#define MAX_UMC_POISON_POLLING_TIME_SYNC   20  //ms
 27
 
 
 28static int amdgpu_umc_convert_error_address(struct amdgpu_device *adev,
 29				    struct ras_err_data *err_data, uint64_t err_addr,
 30				    uint32_t ch_inst, uint32_t umc_inst)
 31{
 32	switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) {
 33	case IP_VERSION(6, 7, 0):
 34		umc_v6_7_convert_error_address(adev,
 35				err_data, err_addr, ch_inst, umc_inst);
 36		break;
 37	default:
 38		dev_warn(adev->dev,
 39			 "UMC address to Physical address translation is not supported\n");
 40		return AMDGPU_RAS_FAIL;
 41	}
 42
 43	return AMDGPU_RAS_SUCCESS;
 44}
 45
 46int amdgpu_umc_page_retirement_mca(struct amdgpu_device *adev,
 47			uint64_t err_addr, uint32_t ch_inst, uint32_t umc_inst)
 48{
 49	struct ras_err_data err_data;
 50	int ret;
 51
 52	ret = amdgpu_ras_error_data_init(&err_data);
 53	if (ret)
 54		return ret;
 55
 56	err_data.err_addr =
 57		kcalloc(adev->umc.max_ras_err_cnt_per_query,
 58			sizeof(struct eeprom_table_record), GFP_KERNEL);
 59	if (!err_data.err_addr) {
 60		dev_warn(adev->dev,
 61			"Failed to alloc memory for umc error record in MCA notifier!\n");
 62		ret = AMDGPU_RAS_FAIL;
 63		goto out_fini_err_data;
 64	}
 65
 
 
 66	/*
 67	 * Translate UMC channel address to Physical address
 68	 */
 69	ret = amdgpu_umc_convert_error_address(adev, &err_data, err_addr,
 70					ch_inst, umc_inst);
 71	if (ret)
 72		goto out_free_err_addr;
 73
 74	if (amdgpu_bad_page_threshold != 0) {
 75		amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
 76						err_data.err_addr_cnt);
 77		amdgpu_ras_save_bad_pages(adev, NULL);
 78	}
 79
 80out_free_err_addr:
 81	kfree(err_data.err_addr);
 82
 83out_fini_err_data:
 84	amdgpu_ras_error_data_fini(&err_data);
 85
 86	return ret;
 87}
 88
 89static void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev,
 90			void *ras_error_status)
 91{
 92	struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
 93	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
 94	unsigned int error_query_mode;
 95	int ret = 0;
 96	unsigned long err_count;
 97
 98	amdgpu_ras_get_error_query_mode(adev, &error_query_mode);
 99
100	mutex_lock(&con->page_retirement_lock);
101	ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(con->umc_ecc));
102	if (ret == -EOPNOTSUPP &&
103	    error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) {
104		if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
105		    adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
106		    adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, ras_error_status);
107
108		if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
109		    adev->umc.ras->ras_block.hw_ops->query_ras_error_address &&
110		    adev->umc.max_ras_err_cnt_per_query) {
111			err_data->err_addr =
112				kcalloc(adev->umc.max_ras_err_cnt_per_query,
113					sizeof(struct eeprom_table_record), GFP_KERNEL);
114
115			/* still call query_ras_error_address to clear error status
116			 * even NOMEM error is encountered
117			 */
118			if(!err_data->err_addr)
119				dev_warn(adev->dev, "Failed to alloc memory for "
120						"umc error address record!\n");
 
 
121
122			/* umc query_ras_error_address is also responsible for clearing
123			 * error status
124			 */
125			adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, ras_error_status);
126		}
127	} else if (error_query_mode == AMDGPU_RAS_FIRMWARE_ERROR_QUERY ||
128	    (!ret && error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY)) {
129		if (adev->umc.ras &&
130		    adev->umc.ras->ecc_info_query_ras_error_count)
131		    adev->umc.ras->ecc_info_query_ras_error_count(adev, ras_error_status);
132
133		if (adev->umc.ras &&
134		    adev->umc.ras->ecc_info_query_ras_error_address &&
135		    adev->umc.max_ras_err_cnt_per_query) {
136			err_data->err_addr =
137				kcalloc(adev->umc.max_ras_err_cnt_per_query,
138					sizeof(struct eeprom_table_record), GFP_KERNEL);
139
140			/* still call query_ras_error_address to clear error status
141			 * even NOMEM error is encountered
142			 */
143			if(!err_data->err_addr)
144				dev_warn(adev->dev, "Failed to alloc memory for "
145						"umc error address record!\n");
 
 
146
147			/* umc query_ras_error_address is also responsible for clearing
148			 * error status
149			 */
150			adev->umc.ras->ecc_info_query_ras_error_address(adev, ras_error_status);
151		}
152	}
153
154	/* only uncorrectable error needs gpu reset */
155	if (err_data->ue_count || err_data->de_count) {
156		err_count = err_data->ue_count + err_data->de_count;
157		if ((amdgpu_bad_page_threshold != 0) &&
158			err_data->err_addr_cnt) {
159			amdgpu_ras_add_bad_pages(adev, err_data->err_addr,
160						err_data->err_addr_cnt);
161			amdgpu_ras_save_bad_pages(adev, &err_count);
162
163			amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs);
164
165			if (con->update_channel_flag == true) {
166				amdgpu_dpm_send_hbm_bad_channel_flag(adev, con->eeprom_control.bad_channel_bitmap);
167				con->update_channel_flag = false;
168			}
169		}
170	}
171
172	kfree(err_data->err_addr);
 
173
174	mutex_unlock(&con->page_retirement_lock);
175}
176
177static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
178		void *ras_error_status,
179		struct amdgpu_iv_entry *entry,
180		bool reset)
181{
182	struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
183	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
184
185	kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
186	amdgpu_umc_handle_bad_pages(adev, ras_error_status);
187
188	if (err_data->ue_count && reset) {
189		/* use mode-2 reset for poison consumption */
190		if (!entry)
191			con->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE2_RESET;
192		amdgpu_ras_reset_gpu(adev);
193	}
194
195	return AMDGPU_RAS_SUCCESS;
196}
197
198int amdgpu_umc_bad_page_polling_timeout(struct amdgpu_device *adev,
199			bool reset, uint32_t timeout_ms)
200{
201	struct ras_err_data err_data;
202	struct ras_common_if head = {
203		.block = AMDGPU_RAS_BLOCK__UMC,
204	};
205	struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head);
206	uint32_t timeout = timeout_ms;
207
208	memset(&err_data, 0, sizeof(err_data));
209	amdgpu_ras_error_data_init(&err_data);
210
211	do {
212
213		amdgpu_umc_handle_bad_pages(adev, &err_data);
214
215		if (timeout && !err_data.de_count) {
216			msleep(1);
217			timeout--;
218		}
219
220	} while (timeout && !err_data.de_count);
221
222	if (!timeout)
223		dev_warn(adev->dev, "Can't find bad pages\n");
224
225	if (err_data.de_count)
226		dev_info(adev->dev, "%ld new deferred hardware errors detected\n", err_data.de_count);
227
228	if (obj) {
229		obj->err_data.ue_count += err_data.ue_count;
230		obj->err_data.ce_count += err_data.ce_count;
231		obj->err_data.de_count += err_data.de_count;
232	}
233
234	amdgpu_ras_error_data_fini(&err_data);
235
236	kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
237
238	if (reset) {
239		struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
240
241		/* use mode-2 reset for poison consumption */
242		con->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE2_RESET;
243		amdgpu_ras_reset_gpu(adev);
244	}
245
246	return 0;
247}
248
249int amdgpu_umc_poison_handler(struct amdgpu_device *adev,
250			enum amdgpu_ras_block block, bool reset)
251{
252	int ret = AMDGPU_RAS_SUCCESS;
253
254	if (adev->gmc.xgmi.connected_to_cpu ||
255		adev->gmc.is_app_apu) {
256		if (reset) {
257			/* MCA poison handler is only responsible for GPU reset,
258			 * let MCA notifier do page retirement.
259			 */
260			kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
261			amdgpu_ras_reset_gpu(adev);
262		}
263		return ret;
264	}
265
266	if (!amdgpu_sriov_vf(adev)) {
267		if (amdgpu_ip_version(adev, UMC_HWIP, 0) < IP_VERSION(12, 0, 0)) {
268			struct ras_err_data err_data;
269			struct ras_common_if head = {
270				.block = AMDGPU_RAS_BLOCK__UMC,
271			};
272			struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head);
273
274			ret = amdgpu_ras_error_data_init(&err_data);
275			if (ret)
276				return ret;
277
278			ret = amdgpu_umc_do_page_retirement(adev, &err_data, NULL, reset);
279
280			if (ret == AMDGPU_RAS_SUCCESS && obj) {
281				obj->err_data.ue_count += err_data.ue_count;
282				obj->err_data.ce_count += err_data.ce_count;
283				obj->err_data.de_count += err_data.de_count;
284			}
285
286			amdgpu_ras_error_data_fini(&err_data);
287		} else {
288			if (reset) {
289				amdgpu_umc_bad_page_polling_timeout(adev,
290							reset, MAX_UMC_POISON_POLLING_TIME_SYNC);
291			} else {
292				struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
293
 
 
 
294				atomic_inc(&con->page_retirement_req_cnt);
295
296				wake_up(&con->page_retirement_wq);
297			}
298		}
299	} else {
300		if (adev->virt.ops && adev->virt.ops->ras_poison_handler)
301			adev->virt.ops->ras_poison_handler(adev, block);
302		else
303			dev_warn(adev->dev,
304				"No ras_poison_handler interface in SRIOV!\n");
305	}
306
307	return ret;
308}
309
 
 
 
 
 
 
 
310int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
311		void *ras_error_status,
312		struct amdgpu_iv_entry *entry)
313{
314	return amdgpu_umc_do_page_retirement(adev, ras_error_status, entry, true);
 
315}
316
317int amdgpu_umc_ras_sw_init(struct amdgpu_device *adev)
318{
319	int err;
320	struct amdgpu_umc_ras *ras;
321
322	if (!adev->umc.ras)
323		return 0;
324
325	ras = adev->umc.ras;
326
327	err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
328	if (err) {
329		dev_err(adev->dev, "Failed to register umc ras block!\n");
330		return err;
331	}
332
333	strcpy(adev->umc.ras->ras_block.ras_comm.name, "umc");
334	ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__UMC;
335	ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
336	adev->umc.ras_if = &ras->ras_block.ras_comm;
337
338	if (!ras->ras_block.ras_late_init)
339		ras->ras_block.ras_late_init = amdgpu_umc_ras_late_init;
340
341	if (!ras->ras_block.ras_cb)
342		ras->ras_block.ras_cb = amdgpu_umc_process_ras_data_cb;
343
344	return 0;
345}
346
347int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
348{
349	int r;
350
351	r = amdgpu_ras_block_late_init(adev, ras_block);
352	if (r)
353		return r;
354
 
 
 
355	if (amdgpu_ras_is_supported(adev, ras_block->block)) {
356		r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0);
357		if (r)
358			goto late_fini;
359	}
360
361	/* ras init of specific umc version */
362	if (adev->umc.ras &&
363	    adev->umc.ras->err_cnt_init)
364		adev->umc.ras->err_cnt_init(adev);
365
366	return 0;
367
368late_fini:
369	amdgpu_ras_block_late_fini(adev, ras_block);
370	return r;
371}
372
373int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev,
374		struct amdgpu_irq_src *source,
375		struct amdgpu_iv_entry *entry)
376{
377	struct ras_common_if *ras_if = adev->umc.ras_if;
378	struct ras_dispatch_if ih_data = {
379		.entry = entry,
380	};
381
382	if (!ras_if)
383		return 0;
384
385	ih_data.head = *ras_if;
386
387	amdgpu_ras_interrupt_dispatch(adev, &ih_data);
388	return 0;
389}
390
391void amdgpu_umc_fill_error_record(struct ras_err_data *err_data,
392		uint64_t err_addr,
393		uint64_t retired_page,
394		uint32_t channel_index,
395		uint32_t umc_inst)
396{
397	struct eeprom_table_record *err_rec =
398		&err_data->err_addr[err_data->err_addr_cnt];
 
 
 
 
 
 
399
400	err_rec->address = err_addr;
401	/* page frame address is saved */
402	err_rec->retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
403	err_rec->ts = (uint64_t)ktime_get_real_seconds();
404	err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
405	err_rec->cu = 0;
406	err_rec->mem_channel = channel_index;
407	err_rec->mcumc_id = umc_inst;
408
409	err_data->err_addr_cnt++;
 
 
410}
411
412int amdgpu_umc_loop_channels(struct amdgpu_device *adev,
413			umc_func func, void *data)
414{
415	uint32_t node_inst       = 0;
416	uint32_t umc_inst        = 0;
417	uint32_t ch_inst         = 0;
418	int ret = 0;
419
420	if (adev->umc.node_inst_num) {
421		LOOP_UMC_EACH_NODE_INST_AND_CH(node_inst, umc_inst, ch_inst) {
422			ret = func(adev, node_inst, umc_inst, ch_inst, data);
423			if (ret) {
424				dev_err(adev->dev, "Node %d umc %d ch %d func returns %d\n",
425					node_inst, umc_inst, ch_inst, ret);
426				return ret;
427			}
428		}
429	} else {
430		LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
431			ret = func(adev, 0, umc_inst, ch_inst, data);
432			if (ret) {
433				dev_err(adev->dev, "Umc %d ch %d func returns %d\n",
434					umc_inst, ch_inst, ret);
435				return ret;
436			}
437		}
438	}
439
440	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
441}
v6.13.7
  1/*
  2 * Copyright 2019 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 */
 23
 24#include <linux/sort.h>
 25#include "amdgpu.h"
 26#include "umc_v6_7.h"
 27#define MAX_UMC_POISON_POLLING_TIME_SYNC   20  //ms
 28
 29#define MAX_UMC_HASH_STRING_SIZE  256
 30
 31static int amdgpu_umc_convert_error_address(struct amdgpu_device *adev,
 32				    struct ras_err_data *err_data, uint64_t err_addr,
 33				    uint32_t ch_inst, uint32_t umc_inst)
 34{
 35	switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) {
 36	case IP_VERSION(6, 7, 0):
 37		umc_v6_7_convert_error_address(adev,
 38				err_data, err_addr, ch_inst, umc_inst);
 39		break;
 40	default:
 41		dev_warn(adev->dev,
 42			 "UMC address to Physical address translation is not supported\n");
 43		return AMDGPU_RAS_FAIL;
 44	}
 45
 46	return AMDGPU_RAS_SUCCESS;
 47}
 48
 49int amdgpu_umc_page_retirement_mca(struct amdgpu_device *adev,
 50			uint64_t err_addr, uint32_t ch_inst, uint32_t umc_inst)
 51{
 52	struct ras_err_data err_data;
 53	int ret;
 54
 55	ret = amdgpu_ras_error_data_init(&err_data);
 56	if (ret)
 57		return ret;
 58
 59	err_data.err_addr =
 60		kcalloc(adev->umc.max_ras_err_cnt_per_query,
 61			sizeof(struct eeprom_table_record), GFP_KERNEL);
 62	if (!err_data.err_addr) {
 63		dev_warn(adev->dev,
 64			"Failed to alloc memory for umc error record in MCA notifier!\n");
 65		ret = AMDGPU_RAS_FAIL;
 66		goto out_fini_err_data;
 67	}
 68
 69	err_data.err_addr_len = adev->umc.max_ras_err_cnt_per_query;
 70
 71	/*
 72	 * Translate UMC channel address to Physical address
 73	 */
 74	ret = amdgpu_umc_convert_error_address(adev, &err_data, err_addr,
 75					ch_inst, umc_inst);
 76	if (ret)
 77		goto out_free_err_addr;
 78
 79	if (amdgpu_bad_page_threshold != 0) {
 80		amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
 81						err_data.err_addr_cnt);
 82		amdgpu_ras_save_bad_pages(adev, NULL);
 83	}
 84
 85out_free_err_addr:
 86	kfree(err_data.err_addr);
 87
 88out_fini_err_data:
 89	amdgpu_ras_error_data_fini(&err_data);
 90
 91	return ret;
 92}
 93
 94void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev,
 95			void *ras_error_status)
 96{
 97	struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
 98	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
 99	unsigned int error_query_mode;
100	int ret = 0;
101	unsigned long err_count;
102
103	amdgpu_ras_get_error_query_mode(adev, &error_query_mode);
104
105	mutex_lock(&con->page_retirement_lock);
106	ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(con->umc_ecc));
107	if (ret == -EOPNOTSUPP &&
108	    error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) {
109		if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
110		    adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
111		    adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, ras_error_status);
112
113		if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
114		    adev->umc.ras->ras_block.hw_ops->query_ras_error_address &&
115		    adev->umc.max_ras_err_cnt_per_query) {
116			err_data->err_addr =
117				kcalloc(adev->umc.max_ras_err_cnt_per_query,
118					sizeof(struct eeprom_table_record), GFP_KERNEL);
119
120			/* still call query_ras_error_address to clear error status
121			 * even NOMEM error is encountered
122			 */
123			if(!err_data->err_addr)
124				dev_warn(adev->dev, "Failed to alloc memory for "
125						"umc error address record!\n");
126			else
127				err_data->err_addr_len = adev->umc.max_ras_err_cnt_per_query;
128
129			/* umc query_ras_error_address is also responsible for clearing
130			 * error status
131			 */
132			adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, ras_error_status);
133		}
134	} else if (error_query_mode == AMDGPU_RAS_FIRMWARE_ERROR_QUERY ||
135	    (!ret && error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY)) {
136		if (adev->umc.ras &&
137		    adev->umc.ras->ecc_info_query_ras_error_count)
138		    adev->umc.ras->ecc_info_query_ras_error_count(adev, ras_error_status);
139
140		if (adev->umc.ras &&
141		    adev->umc.ras->ecc_info_query_ras_error_address &&
142		    adev->umc.max_ras_err_cnt_per_query) {
143			err_data->err_addr =
144				kcalloc(adev->umc.max_ras_err_cnt_per_query,
145					sizeof(struct eeprom_table_record), GFP_KERNEL);
146
147			/* still call query_ras_error_address to clear error status
148			 * even NOMEM error is encountered
149			 */
150			if(!err_data->err_addr)
151				dev_warn(adev->dev, "Failed to alloc memory for "
152						"umc error address record!\n");
153			else
154				err_data->err_addr_len = adev->umc.max_ras_err_cnt_per_query;
155
156			/* umc query_ras_error_address is also responsible for clearing
157			 * error status
158			 */
159			adev->umc.ras->ecc_info_query_ras_error_address(adev, ras_error_status);
160		}
161	}
162
163	/* only uncorrectable error needs gpu reset */
164	if (err_data->ue_count || err_data->de_count) {
165		err_count = err_data->ue_count + err_data->de_count;
166		if ((amdgpu_bad_page_threshold != 0) &&
167			err_data->err_addr_cnt) {
168			amdgpu_ras_add_bad_pages(adev, err_data->err_addr,
169						err_data->err_addr_cnt);
170			amdgpu_ras_save_bad_pages(adev, &err_count);
171
172			amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs);
173
174			if (con->update_channel_flag == true) {
175				amdgpu_dpm_send_hbm_bad_channel_flag(adev, con->eeprom_control.bad_channel_bitmap);
176				con->update_channel_flag = false;
177			}
178		}
179	}
180
181	kfree(err_data->err_addr);
182	err_data->err_addr = NULL;
183
184	mutex_unlock(&con->page_retirement_lock);
185}
186
187static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
188		void *ras_error_status,
189		struct amdgpu_iv_entry *entry,
190		uint32_t reset)
191{
192	struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
193	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
194
195	kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
196	amdgpu_umc_handle_bad_pages(adev, ras_error_status);
197
198	if ((err_data->ue_count || err_data->de_count) &&
199	    (reset || amdgpu_ras_is_rma(adev))) {
200		con->gpu_reset_flags |= reset;
 
201		amdgpu_ras_reset_gpu(adev);
202	}
203
204	return AMDGPU_RAS_SUCCESS;
205}
206
207int amdgpu_umc_pasid_poison_handler(struct amdgpu_device *adev,
208			enum amdgpu_ras_block block, uint16_t pasid,
209			pasid_notify pasid_fn, void *data, uint32_t reset)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
210{
211	int ret = AMDGPU_RAS_SUCCESS;
212
213	if (adev->gmc.xgmi.connected_to_cpu ||
214		adev->gmc.is_app_apu) {
215		if (reset) {
216			/* MCA poison handler is only responsible for GPU reset,
217			 * let MCA notifier do page retirement.
218			 */
219			kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
220			amdgpu_ras_reset_gpu(adev);
221		}
222		return ret;
223	}
224
225	if (!amdgpu_sriov_vf(adev)) {
226		if (amdgpu_ip_version(adev, UMC_HWIP, 0) < IP_VERSION(12, 0, 0)) {
227			struct ras_err_data err_data;
228			struct ras_common_if head = {
229				.block = AMDGPU_RAS_BLOCK__UMC,
230			};
231			struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head);
232
233			ret = amdgpu_ras_error_data_init(&err_data);
234			if (ret)
235				return ret;
236
237			ret = amdgpu_umc_do_page_retirement(adev, &err_data, NULL, reset);
238
239			if (ret == AMDGPU_RAS_SUCCESS && obj) {
240				obj->err_data.ue_count += err_data.ue_count;
241				obj->err_data.ce_count += err_data.ce_count;
242				obj->err_data.de_count += err_data.de_count;
243			}
244
245			amdgpu_ras_error_data_fini(&err_data);
246		} else {
247			struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
248			int ret;
 
 
 
249
250			ret = amdgpu_ras_put_poison_req(adev,
251				block, pasid, pasid_fn, data, reset);
252			if (!ret) {
253				atomic_inc(&con->page_retirement_req_cnt);
 
254				wake_up(&con->page_retirement_wq);
255			}
256		}
257	} else {
258		if (adev->virt.ops && adev->virt.ops->ras_poison_handler)
259			adev->virt.ops->ras_poison_handler(adev, block);
260		else
261			dev_warn(adev->dev,
262				"No ras_poison_handler interface in SRIOV!\n");
263	}
264
265	return ret;
266}
267
268int amdgpu_umc_poison_handler(struct amdgpu_device *adev,
269			enum amdgpu_ras_block block, uint32_t reset)
270{
271	return amdgpu_umc_pasid_poison_handler(adev,
272				block, 0, NULL, NULL, reset);
273}
274
275int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
276		void *ras_error_status,
277		struct amdgpu_iv_entry *entry)
278{
279	return amdgpu_umc_do_page_retirement(adev, ras_error_status, entry,
280				AMDGPU_RAS_GPU_RESET_MODE1_RESET);
281}
282
283int amdgpu_umc_ras_sw_init(struct amdgpu_device *adev)
284{
285	int err;
286	struct amdgpu_umc_ras *ras;
287
288	if (!adev->umc.ras)
289		return 0;
290
291	ras = adev->umc.ras;
292
293	err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
294	if (err) {
295		dev_err(adev->dev, "Failed to register umc ras block!\n");
296		return err;
297	}
298
299	strcpy(adev->umc.ras->ras_block.ras_comm.name, "umc");
300	ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__UMC;
301	ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
302	adev->umc.ras_if = &ras->ras_block.ras_comm;
303
304	if (!ras->ras_block.ras_late_init)
305		ras->ras_block.ras_late_init = amdgpu_umc_ras_late_init;
306
307	if (!ras->ras_block.ras_cb)
308		ras->ras_block.ras_cb = amdgpu_umc_process_ras_data_cb;
309
310	return 0;
311}
312
313int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
314{
315	int r;
316
317	r = amdgpu_ras_block_late_init(adev, ras_block);
318	if (r)
319		return r;
320
321	if (amdgpu_sriov_vf(adev))
322		return r;
323
324	if (amdgpu_ras_is_supported(adev, ras_block->block)) {
325		r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0);
326		if (r)
327			goto late_fini;
328	}
329
330	/* ras init of specific umc version */
331	if (adev->umc.ras &&
332	    adev->umc.ras->err_cnt_init)
333		adev->umc.ras->err_cnt_init(adev);
334
335	return 0;
336
337late_fini:
338	amdgpu_ras_block_late_fini(adev, ras_block);
339	return r;
340}
341
342int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev,
343		struct amdgpu_irq_src *source,
344		struct amdgpu_iv_entry *entry)
345{
346	struct ras_common_if *ras_if = adev->umc.ras_if;
347	struct ras_dispatch_if ih_data = {
348		.entry = entry,
349	};
350
351	if (!ras_if)
352		return 0;
353
354	ih_data.head = *ras_if;
355
356	amdgpu_ras_interrupt_dispatch(adev, &ih_data);
357	return 0;
358}
359
360int amdgpu_umc_fill_error_record(struct ras_err_data *err_data,
361		uint64_t err_addr,
362		uint64_t retired_page,
363		uint32_t channel_index,
364		uint32_t umc_inst)
365{
366	struct eeprom_table_record *err_rec;
367
368	if (!err_data ||
369	    !err_data->err_addr ||
370	    (err_data->err_addr_cnt >= err_data->err_addr_len))
371		return -EINVAL;
372
373	err_rec = &err_data->err_addr[err_data->err_addr_cnt];
374
375	err_rec->address = err_addr;
376	/* page frame address is saved */
377	err_rec->retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
378	err_rec->ts = (uint64_t)ktime_get_real_seconds();
379	err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
380	err_rec->cu = 0;
381	err_rec->mem_channel = channel_index;
382	err_rec->mcumc_id = umc_inst;
383
384	err_data->err_addr_cnt++;
385
386	return 0;
387}
388
389int amdgpu_umc_loop_channels(struct amdgpu_device *adev,
390			umc_func func, void *data)
391{
392	uint32_t node_inst       = 0;
393	uint32_t umc_inst        = 0;
394	uint32_t ch_inst         = 0;
395	int ret = 0;
396
397	if (adev->umc.node_inst_num) {
398		LOOP_UMC_EACH_NODE_INST_AND_CH(node_inst, umc_inst, ch_inst) {
399			ret = func(adev, node_inst, umc_inst, ch_inst, data);
400			if (ret) {
401				dev_err(adev->dev, "Node %d umc %d ch %d func returns %d\n",
402					node_inst, umc_inst, ch_inst, ret);
403				return ret;
404			}
405		}
406	} else {
407		LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
408			ret = func(adev, 0, umc_inst, ch_inst, data);
409			if (ret) {
410				dev_err(adev->dev, "Umc %d ch %d func returns %d\n",
411					umc_inst, ch_inst, ret);
412				return ret;
413			}
414		}
415	}
416
417	return 0;
418}
419
420int amdgpu_umc_update_ecc_status(struct amdgpu_device *adev,
421				uint64_t status, uint64_t ipid, uint64_t addr)
422{
423	if (adev->umc.ras->update_ecc_status)
424		return adev->umc.ras->update_ecc_status(adev,
425					status, ipid, addr);
426	return 0;
427}
428
429int amdgpu_umc_logs_ecc_err(struct amdgpu_device *adev,
430		struct radix_tree_root *ecc_tree, struct ras_ecc_err *ecc_err)
431{
432	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
433	struct ras_ecc_log_info *ecc_log;
434	int ret;
435
436	ecc_log = &con->umc_ecc_log;
437
438	mutex_lock(&ecc_log->lock);
439	ret = radix_tree_insert(ecc_tree, ecc_err->pa_pfn, ecc_err);
440	if (!ret)
441		radix_tree_tag_set(ecc_tree,
442			ecc_err->pa_pfn, UMC_ECC_NEW_DETECTED_TAG);
443	mutex_unlock(&ecc_log->lock);
444
445	return ret;
446}