Linux Audio

Check our new training course

Buildroot integration, development and maintenance

Need a Buildroot system for your embedded project?
Loading...
Note: File does not exist in v4.6.
  1/*
  2 * Copyright 2015 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 */
 23
 24
 25#include "pp_debug.h"
 26#include "smumgr.h"
 27#include "smu_ucode_xfer_vi.h"
 28#include "ppatomctrl.h"
 29#include "cgs_common.h"
 30#include "smu7_ppsmc.h"
 31#include "smu7_smumgr.h"
 32#include "smu7_common.h"
 33
 34#include "polaris10_pwrvirus.h"
 35
 36#define SMU7_SMC_SIZE 0x20000
 37
 38static int smu7_set_smc_sram_address(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t limit)
 39{
 40	PP_ASSERT_WITH_CODE((0 == (3 & smc_addr)), "SMC address must be 4 byte aligned.", return -EINVAL);
 41	PP_ASSERT_WITH_CODE((limit > (smc_addr + 3)), "SMC addr is beyond the SMC RAM area.", return -EINVAL);
 42
 43	cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_11, smc_addr);
 44	PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0); /* on ci, SMC_IND_ACCESS_CNTL is different */
 45	return 0;
 46}
 47
 48
 49int smu7_copy_bytes_from_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address, uint32_t *dest, uint32_t byte_count, uint32_t limit)
 50{
 51	uint32_t data;
 52	uint32_t addr;
 53	uint8_t *dest_byte;
 54	uint8_t i, data_byte[4] = {0};
 55	uint32_t *pdata = (uint32_t *)&data_byte;
 56
 57	PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -EINVAL);
 58	PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -EINVAL);
 59
 60	addr = smc_start_address;
 61
 62	while (byte_count >= 4) {
 63		smu7_read_smc_sram_dword(hwmgr, addr, &data, limit);
 64
 65		*dest = PP_SMC_TO_HOST_UL(data);
 66
 67		dest += 1;
 68		byte_count -= 4;
 69		addr += 4;
 70	}
 71
 72	if (byte_count) {
 73		smu7_read_smc_sram_dword(hwmgr, addr, &data, limit);
 74		*pdata = PP_SMC_TO_HOST_UL(data);
 75	/* Cast dest into byte type in dest_byte.  This way, we don't overflow if the allocated memory is not 4-byte aligned. */
 76		dest_byte = (uint8_t *)dest;
 77		for (i = 0; i < byte_count; i++)
 78			dest_byte[i] = data_byte[i];
 79	}
 80
 81	return 0;
 82}
 83
 84
 85int smu7_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address,
 86				const uint8_t *src, uint32_t byte_count, uint32_t limit)
 87{
 88	int result;
 89	uint32_t data = 0;
 90	uint32_t original_data;
 91	uint32_t addr = 0;
 92	uint32_t extra_shift;
 93
 94	PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -EINVAL);
 95	PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -EINVAL);
 96
 97	addr = smc_start_address;
 98
 99	while (byte_count >= 4) {
100	/* Bytes are written into the SMC addres space with the MSB first. */
101		data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
102
103		result = smu7_set_smc_sram_address(hwmgr, addr, limit);
104
105		if (0 != result)
106			return result;
107
108		cgs_write_register(hwmgr->device, mmSMC_IND_DATA_11, data);
109
110		src += 4;
111		byte_count -= 4;
112		addr += 4;
113	}
114
115	if (0 != byte_count) {
116
117		data = 0;
118
119		result = smu7_set_smc_sram_address(hwmgr, addr, limit);
120
121		if (0 != result)
122			return result;
123
124
125		original_data = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_11);
126
127		extra_shift = 8 * (4 - byte_count);
128
129		while (byte_count > 0) {
130			/* Bytes are written into the SMC addres space with the MSB first. */
131			data = (0x100 * data) + *src++;
132			byte_count--;
133		}
134
135		data <<= extra_shift;
136
137		data |= (original_data & ~((~0UL) << extra_shift));
138
139		result = smu7_set_smc_sram_address(hwmgr, addr, limit);
140
141		if (0 != result)
142			return result;
143
144		cgs_write_register(hwmgr->device, mmSMC_IND_DATA_11, data);
145	}
146
147	return 0;
148}
149
150
151int smu7_program_jump_on_start(struct pp_hwmgr *hwmgr)
152{
153	static const unsigned char data[4] = { 0xE0, 0x00, 0x80, 0x40 };
154
155	smu7_copy_bytes_to_smc(hwmgr, 0x0, data, 4, sizeof(data)+1);
156
157	return 0;
158}
159
160bool smu7_is_smc_ram_running(struct pp_hwmgr *hwmgr)
161{
162	return ((0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
163	&& (0x20100 <= cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMC_PC_C)));
164}
165
166int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
167{
168	int ret;
169
170	if (!smu7_is_smc_ram_running(hwmgr))
171		return -EINVAL;
172
173
174	PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
175
176	ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP);
177
178	if (ret != 1)
179		pr_info("\n failed to send pre message %x ret is %d \n",  msg, ret);
180
181	cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg);
182
183	PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
184
185	ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP);
186
187	if (ret != 1)
188		pr_info("\n failed to send message %x ret is %d \n",  msg, ret);
189
190	return 0;
191}
192
193int smu7_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg)
194{
195	cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg);
196
197	return 0;
198}
199
200int smu7_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter)
201{
202	if (!smu7_is_smc_ram_running(hwmgr)) {
203		return -EINVAL;
204	}
205
206	PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
207
208	cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter);
209
210	return smu7_send_msg_to_smc(hwmgr, msg);
211}
212
213int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter)
214{
215	cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter);
216
217	return smu7_send_msg_to_smc_without_waiting(hwmgr, msg);
218}
219
220int smu7_send_msg_to_smc_offset(struct pp_hwmgr *hwmgr)
221{
222	cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, 0x20000);
223
224	cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test);
225
226	PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
227
228	if (1 != PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP))
229		pr_info("Failed to send Message.\n");
230
231	return 0;
232}
233
234int smu7_wait_for_smc_inactive(struct pp_hwmgr *hwmgr)
235{
236	if (!smu7_is_smc_ram_running(hwmgr))
237		return -EINVAL;
238
239	PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, SMC_SYSCON_CLOCK_CNTL_0, cken, 0);
240	return 0;
241}
242
243
244enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type)
245{
246	enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
247
248	switch (fw_type) {
249	case UCODE_ID_SMU:
250		result = CGS_UCODE_ID_SMU;
251		break;
252	case UCODE_ID_SMU_SK:
253		result = CGS_UCODE_ID_SMU_SK;
254		break;
255	case UCODE_ID_SDMA0:
256		result = CGS_UCODE_ID_SDMA0;
257		break;
258	case UCODE_ID_SDMA1:
259		result = CGS_UCODE_ID_SDMA1;
260		break;
261	case UCODE_ID_CP_CE:
262		result = CGS_UCODE_ID_CP_CE;
263		break;
264	case UCODE_ID_CP_PFP:
265		result = CGS_UCODE_ID_CP_PFP;
266		break;
267	case UCODE_ID_CP_ME:
268		result = CGS_UCODE_ID_CP_ME;
269		break;
270	case UCODE_ID_CP_MEC:
271		result = CGS_UCODE_ID_CP_MEC;
272		break;
273	case UCODE_ID_CP_MEC_JT1:
274		result = CGS_UCODE_ID_CP_MEC_JT1;
275		break;
276	case UCODE_ID_CP_MEC_JT2:
277		result = CGS_UCODE_ID_CP_MEC_JT2;
278		break;
279	case UCODE_ID_RLC_G:
280		result = CGS_UCODE_ID_RLC_G;
281		break;
282	case UCODE_ID_MEC_STORAGE:
283		result = CGS_UCODE_ID_STORAGE;
284		break;
285	default:
286		break;
287	}
288
289	return result;
290}
291
292
293int smu7_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t *value, uint32_t limit)
294{
295	int result;
296
297	result = smu7_set_smc_sram_address(hwmgr, smc_addr, limit);
298
299	if (result)
300		return result;
301
302	*value = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_11);
303	return 0;
304}
305
306int smu7_write_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t value, uint32_t limit)
307{
308	int result;
309
310	result = smu7_set_smc_sram_address(hwmgr, smc_addr, limit);
311
312	if (result)
313		return result;
314
315	cgs_write_register(hwmgr->device, mmSMC_IND_DATA_11, value);
316
317	return 0;
318}
319
320/* Convert the firmware type to SMU type mask. For MEC, we need to check all MEC related type */
321
322static uint32_t smu7_get_mask_for_firmware_type(uint32_t fw_type)
323{
324	uint32_t result = 0;
325
326	switch (fw_type) {
327	case UCODE_ID_SDMA0:
328		result = UCODE_ID_SDMA0_MASK;
329		break;
330	case UCODE_ID_SDMA1:
331		result = UCODE_ID_SDMA1_MASK;
332		break;
333	case UCODE_ID_CP_CE:
334		result = UCODE_ID_CP_CE_MASK;
335		break;
336	case UCODE_ID_CP_PFP:
337		result = UCODE_ID_CP_PFP_MASK;
338		break;
339	case UCODE_ID_CP_ME:
340		result = UCODE_ID_CP_ME_MASK;
341		break;
342	case UCODE_ID_CP_MEC:
343	case UCODE_ID_CP_MEC_JT1:
344	case UCODE_ID_CP_MEC_JT2:
345		result = UCODE_ID_CP_MEC_MASK;
346		break;
347	case UCODE_ID_RLC_G:
348		result = UCODE_ID_RLC_G_MASK;
349		break;
350	default:
351		pr_info("UCode type is out of range! \n");
352		result = 0;
353	}
354
355	return result;
356}
357
358static int smu7_populate_single_firmware_entry(struct pp_hwmgr *hwmgr,
359						uint32_t fw_type,
360						struct SMU_Entry *entry)
361{
362	int result = 0;
363	struct cgs_firmware_info info = {0};
364
365	result = cgs_get_firmware_info(hwmgr->device,
366				smu7_convert_fw_type_to_cgs(fw_type),
367				&info);
368
369	if (!result) {
370		entry->version = info.fw_version;
371		entry->id = (uint16_t)fw_type;
372		entry->image_addr_high = upper_32_bits(info.mc_addr);
373		entry->image_addr_low = lower_32_bits(info.mc_addr);
374		entry->meta_data_addr_high = 0;
375		entry->meta_data_addr_low = 0;
376
377		/* digest need be excluded out */
378		if (cgs_is_virtualization_enabled(hwmgr->device))
379			info.image_size -= 20;
380		entry->data_size_byte = info.image_size;
381		entry->num_register_entries = 0;
382	}
383
384	if ((fw_type == UCODE_ID_RLC_G)
385		|| (fw_type == UCODE_ID_CP_MEC))
386		entry->flags = 1;
387	else
388		entry->flags = 0;
389
390	return 0;
391}
392
393int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
394{
395	struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
396	uint32_t fw_to_load;
397	int result = 0;
398	struct SMU_DRAMData_TOC *toc;
399
400	if (!hwmgr->reload_fw) {
401		pr_info("skip reloading...\n");
402		return 0;
403	}
404
405	if (smu_data->soft_regs_start)
406		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
407					smu_data->soft_regs_start + smum_get_offsetof(hwmgr,
408					SMU_SoftRegisters, UcodeLoadStatus),
409					0x0);
410
411	if (hwmgr->chip_id > CHIP_TOPAZ) { /* add support for Topaz */
412		if (!cgs_is_virtualization_enabled(hwmgr->device)) {
413			smu7_send_msg_to_smc_with_parameter(hwmgr,
414						PPSMC_MSG_SMU_DRAM_ADDR_HI,
415						upper_32_bits(smu_data->smu_buffer.mc_addr));
416			smu7_send_msg_to_smc_with_parameter(hwmgr,
417						PPSMC_MSG_SMU_DRAM_ADDR_LO,
418						lower_32_bits(smu_data->smu_buffer.mc_addr));
419		}
420		fw_to_load = UCODE_ID_RLC_G_MASK
421			   + UCODE_ID_SDMA0_MASK
422			   + UCODE_ID_SDMA1_MASK
423			   + UCODE_ID_CP_CE_MASK
424			   + UCODE_ID_CP_ME_MASK
425			   + UCODE_ID_CP_PFP_MASK
426			   + UCODE_ID_CP_MEC_MASK;
427	} else {
428		fw_to_load = UCODE_ID_RLC_G_MASK
429			   + UCODE_ID_SDMA0_MASK
430			   + UCODE_ID_SDMA1_MASK
431			   + UCODE_ID_CP_CE_MASK
432			   + UCODE_ID_CP_ME_MASK
433			   + UCODE_ID_CP_PFP_MASK
434			   + UCODE_ID_CP_MEC_MASK
435			   + UCODE_ID_CP_MEC_JT1_MASK
436			   + UCODE_ID_CP_MEC_JT2_MASK;
437	}
438
439	toc = (struct SMU_DRAMData_TOC *)smu_data->header;
440	toc->num_entries = 0;
441	toc->structure_version = 1;
442
443	PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
444				UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]),
445				"Failed to Get Firmware Entry.", return -EINVAL);
446	PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
447				UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]),
448				"Failed to Get Firmware Entry.", return -EINVAL);
449	PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
450				UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]),
451				"Failed to Get Firmware Entry.", return -EINVAL);
452	PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
453				UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]),
454				"Failed to Get Firmware Entry.", return -EINVAL);
455	PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
456				UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]),
457				"Failed to Get Firmware Entry.", return -EINVAL);
458	PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
459				UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]),
460				"Failed to Get Firmware Entry.", return -EINVAL);
461	PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
462				UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]),
463				"Failed to Get Firmware Entry.", return -EINVAL);
464	PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
465				UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]),
466				"Failed to Get Firmware Entry.", return -EINVAL);
467	PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
468				UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
469				"Failed to Get Firmware Entry.", return -EINVAL);
470	if (cgs_is_virtualization_enabled(hwmgr->device))
471		PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
472				UCODE_ID_MEC_STORAGE, &toc->entry[toc->num_entries++]),
473				"Failed to Get Firmware Entry.", return -EINVAL);
474
475	smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, upper_32_bits(smu_data->header_buffer.mc_addr));
476	smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, lower_32_bits(smu_data->header_buffer.mc_addr));
477
478	if (smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_LoadUcodes, fw_to_load))
479		pr_err("Fail to Request SMU Load uCode");
480
481	return result;
482}
483
484/* Check if the FW has been loaded, SMU will not return if loading has not finished. */
485int smu7_check_fw_load_finish(struct pp_hwmgr *hwmgr, uint32_t fw_type)
486{
487	struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
488	uint32_t fw_mask = smu7_get_mask_for_firmware_type(fw_type);
489	uint32_t ret;
490
491	ret = phm_wait_on_indirect_register(hwmgr, mmSMC_IND_INDEX_11,
492					smu_data->soft_regs_start + smum_get_offsetof(hwmgr,
493					SMU_SoftRegisters, UcodeLoadStatus),
494					fw_mask, fw_mask);
495	return ret;
496}
497
498int smu7_reload_firmware(struct pp_hwmgr *hwmgr)
499{
500	return hwmgr->smumgr_funcs->start_smu(hwmgr);
501}
502
503static int smu7_upload_smc_firmware_data(struct pp_hwmgr *hwmgr, uint32_t length, uint32_t *src, uint32_t limit)
504{
505	uint32_t byte_count = length;
506
507	PP_ASSERT_WITH_CODE((limit >= byte_count), "SMC address is beyond the SMC RAM area.", return -EINVAL);
508
509	cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_11, 0x20000);
510	PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 1);
511
512	for (; byte_count >= 4; byte_count -= 4)
513		cgs_write_register(hwmgr->device, mmSMC_IND_DATA_11, *src++);
514
515	PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0);
516
517	PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be divisible by 4.", return -EINVAL);
518
519	return 0;
520}
521
522
523int smu7_upload_smu_firmware_image(struct pp_hwmgr *hwmgr)
524{
525	int result = 0;
526	struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
527
528	struct cgs_firmware_info info = {0};
529
530	if (smu_data->security_hard_key == 1)
531		cgs_get_firmware_info(hwmgr->device,
532			smu7_convert_fw_type_to_cgs(UCODE_ID_SMU), &info);
533	else
534		cgs_get_firmware_info(hwmgr->device,
535			smu7_convert_fw_type_to_cgs(UCODE_ID_SMU_SK), &info);
536
537	hwmgr->is_kicker = info.is_kicker;
538	hwmgr->smu_version = info.version;
539	result = smu7_upload_smc_firmware_data(hwmgr, info.image_size, (uint32_t *)info.kptr, SMU7_SMC_SIZE);
540
541	return result;
542}
543
544static void execute_pwr_table(struct pp_hwmgr *hwmgr, const PWR_Command_Table *pvirus, int size)
545{
546	int i;
547	uint32_t reg, data;
548
549	for (i = 0; i < size; i++) {
550		reg  = pvirus->reg;
551		data = pvirus->data;
552		if (reg != 0xffffffff)
553			cgs_write_register(hwmgr->device, reg, data);
554		else
555			break;
556		pvirus++;
557	}
558}
559
560static void execute_pwr_dfy_table(struct pp_hwmgr *hwmgr, const PWR_DFY_Section *section)
561{
562	int i;
563
564	cgs_write_register(hwmgr->device, mmCP_DFY_CNTL, section->dfy_cntl);
565	cgs_write_register(hwmgr->device, mmCP_DFY_ADDR_HI, section->dfy_addr_hi);
566	cgs_write_register(hwmgr->device, mmCP_DFY_ADDR_LO, section->dfy_addr_lo);
567	for (i = 0; i < section->dfy_size; i++)
568		cgs_write_register(hwmgr->device, mmCP_DFY_DATA_0, section->dfy_data[i]);
569}
570
571int smu7_setup_pwr_virus(struct pp_hwmgr *hwmgr)
572{
573	execute_pwr_table(hwmgr, pwr_virus_table_pre, ARRAY_SIZE(pwr_virus_table_pre));
574	execute_pwr_dfy_table(hwmgr, &pwr_virus_section1);
575	execute_pwr_dfy_table(hwmgr, &pwr_virus_section2);
576	execute_pwr_dfy_table(hwmgr, &pwr_virus_section3);
577	execute_pwr_dfy_table(hwmgr, &pwr_virus_section4);
578	execute_pwr_dfy_table(hwmgr, &pwr_virus_section5);
579	execute_pwr_dfy_table(hwmgr, &pwr_virus_section6);
580	execute_pwr_table(hwmgr, pwr_virus_table_post, ARRAY_SIZE(pwr_virus_table_post));
581
582	return 0;
583}
584
585int smu7_init(struct pp_hwmgr *hwmgr)
586{
587	struct smu7_smumgr *smu_data;
588	uint64_t mc_addr = 0;
589	int r;
590	/* Allocate memory for backend private data */
591	smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
592	smu_data->header_buffer.data_size =
593			((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
594
595/* Allocate FW image data structure and header buffer and
596 * send the header buffer address to SMU */
597	r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
598		smu_data->header_buffer.data_size,
599		PAGE_SIZE,
600		AMDGPU_GEM_DOMAIN_VRAM,
601		&smu_data->header_buffer.handle,
602		&mc_addr,
603		&smu_data->header_buffer.kaddr);
604
605	if (r)
606		return -EINVAL;
607
608	smu_data->header = smu_data->header_buffer.kaddr;
609	smu_data->header_buffer.mc_addr = mc_addr;
610
611	if (cgs_is_virtualization_enabled(hwmgr->device))
612		return 0;
613
614	smu_data->smu_buffer.data_size = 200*4096;
615	r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
616		smu_data->smu_buffer.data_size,
617		PAGE_SIZE,
618		AMDGPU_GEM_DOMAIN_VRAM,
619		&smu_data->smu_buffer.handle,
620		&mc_addr,
621		&smu_data->smu_buffer.kaddr);
622
623	if (r) {
624		amdgpu_bo_free_kernel(&smu_data->header_buffer.handle,
625					&smu_data->header_buffer.mc_addr,
626					&smu_data->header_buffer.kaddr);
627		return -EINVAL;
628	}
629	smu_data->smu_buffer.mc_addr = mc_addr;
630
631	if (smum_is_hw_avfs_present(hwmgr))
632		hwmgr->avfs_supported = true;
633
634	return 0;
635}
636
637
638int smu7_smu_fini(struct pp_hwmgr *hwmgr)
639{
640	struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
641
642	amdgpu_bo_free_kernel(&smu_data->header_buffer.handle,
643					&smu_data->header_buffer.mc_addr,
644					&smu_data->header_buffer.kaddr);
645
646	if (!cgs_is_virtualization_enabled(hwmgr->device))
647		amdgpu_bo_free_kernel(&smu_data->smu_buffer.handle,
648					&smu_data->smu_buffer.mc_addr,
649					&smu_data->smu_buffer.kaddr);
650
651	kfree(hwmgr->smu_backend);
652	hwmgr->smu_backend = NULL;
653	cgs_rel_firmware(hwmgr->device, CGS_UCODE_ID_SMU);
654	return 0;
655}