Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.9.4.
  1/*
  2 * Copyright 2014 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 */
 23
 24#include <linux/firmware.h>
 25#include "drmP.h"
 26#include "amdgpu.h"
 27#include "fiji_ppsmc.h"
 28#include "fiji_smum.h"
 29#include "smu_ucode_xfer_vi.h"
 30#include "amdgpu_ucode.h"
 31
 32#include "smu/smu_7_1_3_d.h"
 33#include "smu/smu_7_1_3_sh_mask.h"
 34
 35#define FIJI_SMC_SIZE 0x20000
 36
 37static int fiji_set_smc_sram_address(struct amdgpu_device *adev, uint32_t smc_address, uint32_t limit)
 38{
 39	uint32_t val;
 40
 41	if (smc_address & 3)
 42		return -EINVAL;
 43
 44	if ((smc_address + 3) > limit)
 45		return -EINVAL;
 46
 47	WREG32(mmSMC_IND_INDEX_0, smc_address);
 48
 49	val = RREG32(mmSMC_IND_ACCESS_CNTL);
 50	val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
 51	WREG32(mmSMC_IND_ACCESS_CNTL, val);
 52
 53	return 0;
 54}
 55
 56static int fiji_copy_bytes_to_smc(struct amdgpu_device *adev, uint32_t smc_start_address, const uint8_t *src, uint32_t byte_count, uint32_t limit)
 57{
 58	uint32_t addr;
 59	uint32_t data, orig_data;
 60	int result = 0;
 61	uint32_t extra_shift;
 62	unsigned long flags;
 63
 64	if (smc_start_address & 3)
 65		return -EINVAL;
 66
 67	if ((smc_start_address + byte_count) > limit)
 68		return -EINVAL;
 69
 70	addr = smc_start_address;
 71
 72	spin_lock_irqsave(&adev->smc_idx_lock, flags);
 73	while (byte_count >= 4) {
 74		/* Bytes are written into the SMC addres space with the MSB first */
 75		data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
 76
 77		result = fiji_set_smc_sram_address(adev, addr, limit);
 78
 79		if (result)
 80			goto out;
 81
 82		WREG32(mmSMC_IND_DATA_0, data);
 83
 84		src += 4;
 85		byte_count -= 4;
 86		addr += 4;
 87	}
 88
 89	if (0 != byte_count) {
 90		/* Now write odd bytes left, do a read modify write cycle */
 91		data = 0;
 92
 93		result = fiji_set_smc_sram_address(adev, addr, limit);
 94		if (result)
 95			goto out;
 96
 97		orig_data = RREG32(mmSMC_IND_DATA_0);
 98		extra_shift = 8 * (4 - byte_count);
 99
100		while (byte_count > 0) {
101			data = (data << 8) + *src++;
102			byte_count--;
103		}
104
105		data <<= extra_shift;
106		data |= (orig_data & ~((~0UL) << extra_shift));
107
108		result = fiji_set_smc_sram_address(adev, addr, limit);
109		if (result)
110			goto out;
111
112		WREG32(mmSMC_IND_DATA_0, data);
113	}
114
115out:
116	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
117	return result;
118}
119
120static int fiji_program_jump_on_start(struct amdgpu_device *adev)
121{
122	static unsigned char data[] = {0xE0, 0x00, 0x80, 0x40};
123	fiji_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1);
124
125	return 0;
126}
127
128static bool fiji_is_smc_ram_running(struct amdgpu_device *adev)
129{
130	uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
131	val = REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable);
132
133	return ((0 == val) && (0x20100 <= RREG32_SMC(ixSMC_PC_C)));
134}
135
136static int wait_smu_response(struct amdgpu_device *adev)
137{
138	int i;
139	uint32_t val;
140
141	for (i = 0; i < adev->usec_timeout; i++) {
142		val = RREG32(mmSMC_RESP_0);
143		if (REG_GET_FIELD(val, SMC_RESP_0, SMC_RESP))
144			break;
145		udelay(1);
146	}
147
148	if (i == adev->usec_timeout)
149		return -EINVAL;
150
151	return 0;
152}
153
154static int fiji_send_msg_to_smc_offset(struct amdgpu_device *adev)
155{
156	if (wait_smu_response(adev)) {
157		DRM_ERROR("Failed to send previous message\n");
158		return -EINVAL;
159	}
160
161	WREG32(mmSMC_MSG_ARG_0, 0x20000);
162	WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_Test);
163
164	if (wait_smu_response(adev)) {
165		DRM_ERROR("Failed to send message\n");
166		return -EINVAL;
167	}
168
169	return 0;
170}
171
172static int fiji_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg)
173{
174	if (!fiji_is_smc_ram_running(adev))
175	{
176		return -EINVAL;;
177	}
178
179	if (wait_smu_response(adev)) {
180		DRM_ERROR("Failed to send previous message\n");
181		return -EINVAL;
182	}
183
184	WREG32(mmSMC_MESSAGE_0, msg);
185
186	if (wait_smu_response(adev)) {
187		DRM_ERROR("Failed to send message\n");
188		return -EINVAL;
189	}
190
191	return 0;
192}
193
194static int fiji_send_msg_to_smc_without_waiting(struct amdgpu_device *adev,
195						PPSMC_Msg msg)
196{
197	if (wait_smu_response(adev)) {
198		DRM_ERROR("Failed to send previous message\n");
199		return -EINVAL;
200	}
201
202	WREG32(mmSMC_MESSAGE_0, msg);
203
204	return 0;
205}
206
207static int fiji_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
208						PPSMC_Msg msg,
209						uint32_t parameter)
210{
211	if (!fiji_is_smc_ram_running(adev))
212		return -EINVAL;
213
214	if (wait_smu_response(adev)) {
215		DRM_ERROR("Failed to send previous message\n");
216		return -EINVAL;
217	}
218
219	WREG32(mmSMC_MSG_ARG_0, parameter);
220
221	return fiji_send_msg_to_smc(adev, msg);
222}
223
224static int fiji_send_msg_to_smc_with_parameter_without_waiting(
225					struct amdgpu_device *adev,
226					PPSMC_Msg msg, uint32_t parameter)
227{
228	if (wait_smu_response(adev)) {
229		DRM_ERROR("Failed to send previous message\n");
230		return -EINVAL;
231	}
232
233	WREG32(mmSMC_MSG_ARG_0, parameter);
234
235	return fiji_send_msg_to_smc_without_waiting(adev, msg);
236}
237
238#if 0 /* not used yet */
239static int fiji_wait_for_smc_inactive(struct amdgpu_device *adev)
240{
241	int i;
242	uint32_t val;
243
244	if (!fiji_is_smc_ram_running(adev))
245		return -EINVAL;
246
247	for (i = 0; i < adev->usec_timeout; i++) {
248		val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
249		if (REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, cken) == 0)
250			break;
251		udelay(1);
252	}
253
254	if (i == adev->usec_timeout)
255		return -EINVAL;
256
257	return 0;
258}
259#endif
260
261static int fiji_smu_upload_firmware_image(struct amdgpu_device *adev)
262{
263	const struct smc_firmware_header_v1_0 *hdr;
264	uint32_t ucode_size;
265	uint32_t ucode_start_address;
266	const uint8_t *src;
267	uint32_t val;
268	uint32_t byte_count;
269	uint32_t *data;
270	unsigned long flags;
271
272	if (!adev->pm.fw)
273		return -EINVAL;
274
275	/* Skip SMC ucode loading on SR-IOV capable boards.
276	 * vbios does this for us in asic_init in that case.
277	 */
278	if (adev->virtualization.supports_sr_iov)
279		return 0;
280
281	hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
282	amdgpu_ucode_print_smc_hdr(&hdr->header);
283
284	adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
285	ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
286	ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
287	src = (const uint8_t *)
288		(adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
289
290	if (ucode_size & 3) {
291		DRM_ERROR("SMC ucode is not 4 bytes aligned\n");
292		return -EINVAL;
293	}
294
295	if (ucode_size > FIJI_SMC_SIZE) {
296		DRM_ERROR("SMC address is beyond the SMC RAM area\n");
297		return -EINVAL;
298	}
299
300	spin_lock_irqsave(&adev->smc_idx_lock, flags);
301	WREG32(mmSMC_IND_INDEX_0, ucode_start_address);
302
303	val = RREG32(mmSMC_IND_ACCESS_CNTL);
304	val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
305	WREG32(mmSMC_IND_ACCESS_CNTL, val);
306
307	byte_count = ucode_size;
308	data = (uint32_t *)src;
309	for (; byte_count >= 4; data++, byte_count -= 4)
310		WREG32(mmSMC_IND_DATA_0, data[0]);
311
312	val = RREG32(mmSMC_IND_ACCESS_CNTL);
313	val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
314	WREG32(mmSMC_IND_ACCESS_CNTL, val);
315	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
316
317	return 0;
318}
319
320#if 0 /* not used yet */
321static int fiji_read_smc_sram_dword(struct amdgpu_device *adev,
322				uint32_t smc_address,
323				uint32_t *value,
324				uint32_t limit)
325{
326	int result;
327	unsigned long flags;
328
329	spin_lock_irqsave(&adev->smc_idx_lock, flags);
330	result = fiji_set_smc_sram_address(adev, smc_address, limit);
331	if (result == 0)
332		*value = RREG32(mmSMC_IND_DATA_0);
333	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
334	return result;
335}
336
337static int fiji_write_smc_sram_dword(struct amdgpu_device *adev,
338				uint32_t smc_address,
339				uint32_t value,
340				uint32_t limit)
341{
342	int result;
343	unsigned long flags;
344
345	spin_lock_irqsave(&adev->smc_idx_lock, flags);
346	result = fiji_set_smc_sram_address(adev, smc_address, limit);
347	if (result == 0)
348		WREG32(mmSMC_IND_DATA_0, value);
349	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
350	return result;
351}
352
353static int fiji_smu_stop_smc(struct amdgpu_device *adev)
354{
355	uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
356	val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
357	WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
358
359	val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
360	val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1);
361	WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
362
363	return 0;
364}
365#endif
366
367static enum AMDGPU_UCODE_ID fiji_convert_fw_type(uint32_t fw_type)
368{
369	switch (fw_type) {
370		case UCODE_ID_SDMA0:
371			return AMDGPU_UCODE_ID_SDMA0;
372		case UCODE_ID_SDMA1:
373			return AMDGPU_UCODE_ID_SDMA1;
374		case UCODE_ID_CP_CE:
375			return AMDGPU_UCODE_ID_CP_CE;
376		case UCODE_ID_CP_PFP:
377			return AMDGPU_UCODE_ID_CP_PFP;
378		case UCODE_ID_CP_ME:
379			return AMDGPU_UCODE_ID_CP_ME;
380		case UCODE_ID_CP_MEC:
381		case UCODE_ID_CP_MEC_JT1:
382		case UCODE_ID_CP_MEC_JT2:
383			return AMDGPU_UCODE_ID_CP_MEC1;
384		case UCODE_ID_RLC_G:
385			return AMDGPU_UCODE_ID_RLC_G;
386		default:
387			DRM_ERROR("ucode type is out of range!\n");
388			return AMDGPU_UCODE_ID_MAXIMUM;
389	}
390}
391
392static int fiji_smu_populate_single_firmware_entry(struct amdgpu_device *adev,
393						uint32_t fw_type,
394						struct SMU_Entry *entry)
395{
396	enum AMDGPU_UCODE_ID id = fiji_convert_fw_type(fw_type);
397	struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id];
398	const struct gfx_firmware_header_v1_0 *header = NULL;
399	uint64_t gpu_addr;
400	uint32_t data_size;
401
402	if (ucode->fw == NULL)
403		return -EINVAL;
404	gpu_addr  = ucode->mc_addr;
405	header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
406	data_size = le32_to_cpu(header->header.ucode_size_bytes);
407
408	if ((fw_type == UCODE_ID_CP_MEC_JT1) ||
409		(fw_type == UCODE_ID_CP_MEC_JT2)) {
410		gpu_addr += le32_to_cpu(header->jt_offset) << 2;
411		data_size = le32_to_cpu(header->jt_size) << 2;
412	}
413
414	entry->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
415	entry->id = (uint16_t)fw_type;
416	entry->image_addr_high = upper_32_bits(gpu_addr);
417	entry->image_addr_low = lower_32_bits(gpu_addr);
418	entry->meta_data_addr_high = 0;
419	entry->meta_data_addr_low = 0;
420	entry->data_size_byte = data_size;
421	entry->num_register_entries = 0;
422
423	if (fw_type == UCODE_ID_RLC_G)
424		entry->flags = 1;
425	else
426		entry->flags = 0;
427
428	return 0;
429}
430
431static int fiji_smu_request_load_fw(struct amdgpu_device *adev)
432{
433	struct fiji_smu_private_data *private = (struct fiji_smu_private_data *)adev->smu.priv;
434	struct SMU_DRAMData_TOC *toc;
435	uint32_t fw_to_load;
436
437	WREG32_SMC(ixSOFT_REGISTERS_TABLE_28, 0);
438
439	fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_HI, private->smu_buffer_addr_high);
440	fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_LO, private->smu_buffer_addr_low);
441
442	toc = (struct SMU_DRAMData_TOC *)private->header;
443	toc->num_entries = 0;
444	toc->structure_version = 1;
445
446	if (!adev->firmware.smu_load)
447		return 0;
448
449	if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_RLC_G,
450			&toc->entry[toc->num_entries++])) {
451		DRM_ERROR("Failed to get firmware entry for RLC\n");
452		return -EINVAL;
453	}
454
455	if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_CE,
456			&toc->entry[toc->num_entries++])) {
457		DRM_ERROR("Failed to get firmware entry for CE\n");
458		return -EINVAL;
459	}
460
461	if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_PFP,
462			&toc->entry[toc->num_entries++])) {
463		DRM_ERROR("Failed to get firmware entry for PFP\n");
464		return -EINVAL;
465	}
466
467	if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_ME,
468			&toc->entry[toc->num_entries++])) {
469		DRM_ERROR("Failed to get firmware entry for ME\n");
470		return -EINVAL;
471	}
472
473	if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC,
474			&toc->entry[toc->num_entries++])) {
475		DRM_ERROR("Failed to get firmware entry for MEC\n");
476		return -EINVAL;
477	}
478
479	if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT1,
480			&toc->entry[toc->num_entries++])) {
481		DRM_ERROR("Failed to get firmware entry for MEC_JT1\n");
482		return -EINVAL;
483	}
484
485	if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2,
486			&toc->entry[toc->num_entries++])) {
487		DRM_ERROR("Failed to get firmware entry for MEC_JT2\n");
488		return -EINVAL;
489	}
490
491	if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0,
492			&toc->entry[toc->num_entries++])) {
493		DRM_ERROR("Failed to get firmware entry for SDMA0\n");
494		return -EINVAL;
495	}
496
497	if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA1,
498			&toc->entry[toc->num_entries++])) {
499		DRM_ERROR("Failed to get firmware entry for SDMA1\n");
500		return -EINVAL;
501	}
502
503	fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_HI, private->header_addr_high);
504	fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_LO, private->header_addr_low);
505
506	fw_to_load = UCODE_ID_RLC_G_MASK |
507			UCODE_ID_SDMA0_MASK |
508			UCODE_ID_SDMA1_MASK |
509			UCODE_ID_CP_CE_MASK |
510			UCODE_ID_CP_ME_MASK |
511			UCODE_ID_CP_PFP_MASK |
512			UCODE_ID_CP_MEC_MASK;
513
514	if (fiji_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) {
515		DRM_ERROR("Fail to request SMU load ucode\n");
516		return -EINVAL;
517	}
518
519	return 0;
520}
521
522static uint32_t fiji_smu_get_mask_for_fw_type(uint32_t fw_type)
523{
524	switch (fw_type) {
525		case AMDGPU_UCODE_ID_SDMA0:
526			return UCODE_ID_SDMA0_MASK;
527		case AMDGPU_UCODE_ID_SDMA1:
528			return UCODE_ID_SDMA1_MASK;
529		case AMDGPU_UCODE_ID_CP_CE:
530			return UCODE_ID_CP_CE_MASK;
531		case AMDGPU_UCODE_ID_CP_PFP:
532			return UCODE_ID_CP_PFP_MASK;
533		case AMDGPU_UCODE_ID_CP_ME:
534			return UCODE_ID_CP_ME_MASK;
535		case AMDGPU_UCODE_ID_CP_MEC1:
536			return UCODE_ID_CP_MEC_MASK;
537		case AMDGPU_UCODE_ID_CP_MEC2:
538			return UCODE_ID_CP_MEC_MASK;
539		case AMDGPU_UCODE_ID_RLC_G:
540			return UCODE_ID_RLC_G_MASK;
541		default:
542			DRM_ERROR("ucode type is out of range!\n");
543			return 0;
544	}
545}
546
547static int fiji_smu_check_fw_load_finish(struct amdgpu_device *adev,
548					uint32_t fw_type)
549{
550	uint32_t fw_mask = fiji_smu_get_mask_for_fw_type(fw_type);
551	int i;
552
553	for (i = 0; i < adev->usec_timeout; i++) {
554		if (fw_mask == (RREG32_SMC(ixSOFT_REGISTERS_TABLE_28) & fw_mask))
555			break;
556		udelay(1);
557	}
558
559	if (i == adev->usec_timeout) {
560		DRM_ERROR("check firmware loading failed\n");
561		return -EINVAL;
562	}
563
564	return 0;
565}
566
567static int fiji_smu_start_in_protection_mode(struct amdgpu_device *adev)
568{
569	int result;
570	uint32_t val;
571	int i;
572
573	/* Assert reset */
574	val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
575	val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
576	WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
577
578	result = fiji_smu_upload_firmware_image(adev);
579	if (result)
580		return result;
581
582	/* Clear status */
583	WREG32_SMC(ixSMU_STATUS, 0);
584
585	/* Enable clock */
586	val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
587	val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
588	WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
589
590	/* De-assert reset */
591	val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
592	val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
593	WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
594
595	/* Set SMU Auto Start */
596	val = RREG32_SMC(ixSMU_INPUT_DATA);
597	val = REG_SET_FIELD(val, SMU_INPUT_DATA, AUTO_START, 1);
598	WREG32_SMC(ixSMU_INPUT_DATA, val);
599
600	/* Clear firmware interrupt enable flag */
601	WREG32_SMC(ixFIRMWARE_FLAGS, 0);
602
603	for (i = 0; i < adev->usec_timeout; i++) {
604		val = RREG32_SMC(ixRCU_UC_EVENTS);
605		if (REG_GET_FIELD(val, RCU_UC_EVENTS, INTERRUPTS_ENABLED))
606			break;
607		udelay(1);
608	}
609
610	if (i == adev->usec_timeout) {
611		DRM_ERROR("Interrupt is not enabled by firmware\n");
612		return -EINVAL;
613	}
614
615	/* Call Test SMU message with 0x20000 offset
616	 * to trigger SMU start
617	 */
618	fiji_send_msg_to_smc_offset(adev);
619	DRM_INFO("[FM]try triger smu start\n");
620	/* Wait for done bit to be set */
621	for (i = 0; i < adev->usec_timeout; i++) {
622		val = RREG32_SMC(ixSMU_STATUS);
623		if (REG_GET_FIELD(val, SMU_STATUS, SMU_DONE))
624			break;
625		udelay(1);
626	}
627
628	if (i == adev->usec_timeout) {
629		DRM_ERROR("Timeout for SMU start\n");
630		return -EINVAL;
631	}
632
633	/* Check pass/failed indicator */
634	val = RREG32_SMC(ixSMU_STATUS);
635	if (!REG_GET_FIELD(val, SMU_STATUS, SMU_PASS)) {
636		DRM_ERROR("SMU Firmware start failed\n");
637		return -EINVAL;
638	}
639	DRM_INFO("[FM]smu started\n");
640	/* Wait for firmware to initialize */
641	for (i = 0; i < adev->usec_timeout; i++) {
642		val = RREG32_SMC(ixFIRMWARE_FLAGS);
643		if(REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED))
644			break;
645		udelay(1);
646	}
647
648	if (i == adev->usec_timeout) {
649		DRM_ERROR("SMU firmware initialization failed\n");
650		return -EINVAL;
651	}
652	DRM_INFO("[FM]smu initialized\n");
653
654	return 0;
655}
656
657static int fiji_smu_start_in_non_protection_mode(struct amdgpu_device *adev)
658{
659	int i, result;
660	uint32_t val;
661
662	/* wait for smc boot up */
663	for (i = 0; i < adev->usec_timeout; i++) {
664		val = RREG32_SMC(ixRCU_UC_EVENTS);
665		val = REG_GET_FIELD(val, RCU_UC_EVENTS, boot_seq_done);
666		if (val)
667			break;
668		udelay(1);
669	}
670
671	if (i == adev->usec_timeout) {
672		DRM_ERROR("SMC boot sequence is not completed\n");
673		return -EINVAL;
674	}
675
676	/* Clear firmware interrupt enable flag */
677	WREG32_SMC(ixFIRMWARE_FLAGS, 0);
678
679	/* Assert reset */
680	val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
681	val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
682	WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
683
684	result = fiji_smu_upload_firmware_image(adev);
685	if (result)
686		return result;
687
688	/* Set smc instruct start point at 0x0 */
689	fiji_program_jump_on_start(adev);
690
691	/* Enable clock */
692	val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
693	val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
694	WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
695
696	/* De-assert reset */
697	val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
698	val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
699	WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
700
701	/* Wait for firmware to initialize */
702	for (i = 0; i < adev->usec_timeout; i++) {
703		val = RREG32_SMC(ixFIRMWARE_FLAGS);
704		if (REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED))
705			break;
706		udelay(1);
707	}
708
709	if (i == adev->usec_timeout) {
710		DRM_ERROR("Timeout for SMC firmware initialization\n");
711		return -EINVAL;
712	}
713
714	return 0;
715}
716
717int fiji_smu_start(struct amdgpu_device *adev)
718{
719	int result;
720	uint32_t val;
721
722	if (!fiji_is_smc_ram_running(adev)) {
723		val = RREG32_SMC(ixSMU_FIRMWARE);
724		if (!REG_GET_FIELD(val, SMU_FIRMWARE, SMU_MODE)) {
725			DRM_INFO("[FM]start smu in nonprotection mode\n");
726			result = fiji_smu_start_in_non_protection_mode(adev);
727			if (result)
728				return result;
729		} else {
730			DRM_INFO("[FM]start smu in protection mode\n");
731			result = fiji_smu_start_in_protection_mode(adev);
732			if (result)
733				return result;
734		}
735	}
736
737	return fiji_smu_request_load_fw(adev);
738}
739
740static const struct amdgpu_smumgr_funcs fiji_smumgr_funcs = {
741	.check_fw_load_finish = fiji_smu_check_fw_load_finish,
742	.request_smu_load_fw = NULL,
743	.request_smu_specific_fw = NULL,
744};
745
746int fiji_smu_init(struct amdgpu_device *adev)
747{
748	struct fiji_smu_private_data *private;
749	uint32_t image_size = ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
750	uint32_t smu_internal_buffer_size = 200*4096;
751	struct amdgpu_bo **toc_buf = &adev->smu.toc_buf;
752	struct amdgpu_bo **smu_buf = &adev->smu.smu_buf;
753	uint64_t mc_addr;
754	void *toc_buf_ptr;
755	void *smu_buf_ptr;
756	int ret;
757
758	private = kzalloc(sizeof(struct fiji_smu_private_data), GFP_KERNEL);
759	if (NULL == private)
760		return -ENOMEM;
761
762	/* allocate firmware buffers */
763	if (adev->firmware.smu_load)
764		amdgpu_ucode_init_bo(adev);
765
766	adev->smu.priv = private;
767	adev->smu.fw_flags = 0;
768
769	/* Allocate FW image data structure and header buffer */
770	ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
771			       true, AMDGPU_GEM_DOMAIN_VRAM,
772			       AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
773			       NULL, NULL, toc_buf);
774	if (ret) {
775		DRM_ERROR("Failed to allocate memory for TOC buffer\n");
776		return -ENOMEM;
777	}
778
779	/* Allocate buffer for SMU internal buffer */
780	ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE,
781			       true, AMDGPU_GEM_DOMAIN_VRAM,
782			       AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
783			       NULL, NULL, smu_buf);
784	if (ret) {
785		DRM_ERROR("Failed to allocate memory for SMU internal buffer\n");
786		return -ENOMEM;
787	}
788
789	/* Retrieve GPU address for header buffer and internal buffer */
790	ret = amdgpu_bo_reserve(adev->smu.toc_buf, false);
791	if (ret) {
792		amdgpu_bo_unref(&adev->smu.toc_buf);
793		DRM_ERROR("Failed to reserve the TOC buffer\n");
794		return -EINVAL;
795	}
796
797	ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
798	if (ret) {
799		amdgpu_bo_unreserve(adev->smu.toc_buf);
800		amdgpu_bo_unref(&adev->smu.toc_buf);
801		DRM_ERROR("Failed to pin the TOC buffer\n");
802		return -EINVAL;
803	}
804
805	ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr);
806	if (ret) {
807		amdgpu_bo_unreserve(adev->smu.toc_buf);
808		amdgpu_bo_unref(&adev->smu.toc_buf);
809		DRM_ERROR("Failed to map the TOC buffer\n");
810		return -EINVAL;
811	}
812
813	amdgpu_bo_unreserve(adev->smu.toc_buf);
814	private->header_addr_low = lower_32_bits(mc_addr);
815	private->header_addr_high = upper_32_bits(mc_addr);
816	private->header = toc_buf_ptr;
817
818	ret = amdgpu_bo_reserve(adev->smu.smu_buf, false);
819	if (ret) {
820		amdgpu_bo_unref(&adev->smu.smu_buf);
821		amdgpu_bo_unref(&adev->smu.toc_buf);
822		DRM_ERROR("Failed to reserve the SMU internal buffer\n");
823		return -EINVAL;
824	}
825
826	ret = amdgpu_bo_pin(adev->smu.smu_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
827	if (ret) {
828		amdgpu_bo_unreserve(adev->smu.smu_buf);
829		amdgpu_bo_unref(&adev->smu.smu_buf);
830		amdgpu_bo_unref(&adev->smu.toc_buf);
831		DRM_ERROR("Failed to pin the SMU internal buffer\n");
832		return -EINVAL;
833	}
834
835	ret = amdgpu_bo_kmap(*smu_buf, &smu_buf_ptr);
836	if (ret) {
837		amdgpu_bo_unreserve(adev->smu.smu_buf);
838		amdgpu_bo_unref(&adev->smu.smu_buf);
839		amdgpu_bo_unref(&adev->smu.toc_buf);
840		DRM_ERROR("Failed to map the SMU internal buffer\n");
841		return -EINVAL;
842	}
843
844	amdgpu_bo_unreserve(adev->smu.smu_buf);
845	private->smu_buffer_addr_low = lower_32_bits(mc_addr);
846	private->smu_buffer_addr_high = upper_32_bits(mc_addr);
847
848	adev->smu.smumgr_funcs = &fiji_smumgr_funcs;
849
850	return 0;
851}
852
853int fiji_smu_fini(struct amdgpu_device *adev)
854{
855	amdgpu_bo_unref(&adev->smu.toc_buf);
856	amdgpu_bo_unref(&adev->smu.smu_buf);
857	kfree(adev->smu.priv);
858	adev->smu.priv = NULL;
859	if (adev->firmware.fw_buf)
860		amdgpu_ucode_fini_bo(adev);
861
862	return 0;
863}