Loading...
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/firmware.h>
24#include "drmP.h"
25#include "amdgpu.h"
26#include "smu8.h"
27#include "smu8_fusion.h"
28#include "cz_ppsmc.h"
29#include "cz_smumgr.h"
30#include "smu_ucode_xfer_cz.h"
31#include "amdgpu_ucode.h"
32
33#include "smu/smu_8_0_d.h"
34#include "smu/smu_8_0_sh_mask.h"
35#include "gca/gfx_8_0_d.h"
36#include "gca/gfx_8_0_sh_mask.h"
37
38uint32_t cz_get_argument(struct amdgpu_device *adev)
39{
40 return RREG32(mmSMU_MP1_SRBM2P_ARG_0);
41}
42
43static struct cz_smu_private_data *cz_smu_get_priv(struct amdgpu_device *adev)
44{
45 struct cz_smu_private_data *priv =
46 (struct cz_smu_private_data *)(adev->smu.priv);
47
48 return priv;
49}
50
51int cz_send_msg_to_smc_async(struct amdgpu_device *adev, u16 msg)
52{
53 int i;
54 u32 content = 0, tmp;
55
56 for (i = 0; i < adev->usec_timeout; i++) {
57 tmp = REG_GET_FIELD(RREG32(mmSMU_MP1_SRBM2P_RESP_0),
58 SMU_MP1_SRBM2P_RESP_0, CONTENT);
59 if (content != tmp)
60 break;
61 udelay(1);
62 }
63
64 /* timeout means wrong logic*/
65 if (i == adev->usec_timeout)
66 return -EINVAL;
67
68 WREG32(mmSMU_MP1_SRBM2P_RESP_0, 0);
69 WREG32(mmSMU_MP1_SRBM2P_MSG_0, msg);
70
71 return 0;
72}
73
74int cz_send_msg_to_smc(struct amdgpu_device *adev, u16 msg)
75{
76 int i;
77 u32 content = 0, tmp = 0;
78
79 if (cz_send_msg_to_smc_async(adev, msg))
80 return -EINVAL;
81
82 for (i = 0; i < adev->usec_timeout; i++) {
83 tmp = REG_GET_FIELD(RREG32(mmSMU_MP1_SRBM2P_RESP_0),
84 SMU_MP1_SRBM2P_RESP_0, CONTENT);
85 if (content != tmp)
86 break;
87 udelay(1);
88 }
89
90 /* timeout means wrong logic*/
91 if (i == adev->usec_timeout)
92 return -EINVAL;
93
94 if (PPSMC_Result_OK != tmp) {
95 dev_err(adev->dev, "SMC Failed to send Message.\n");
96 return -EINVAL;
97 }
98
99 return 0;
100}
101
102int cz_send_msg_to_smc_with_parameter_async(struct amdgpu_device *adev,
103 u16 msg, u32 parameter)
104{
105 WREG32(mmSMU_MP1_SRBM2P_ARG_0, parameter);
106 return cz_send_msg_to_smc_async(adev, msg);
107}
108
109int cz_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
110 u16 msg, u32 parameter)
111{
112 WREG32(mmSMU_MP1_SRBM2P_ARG_0, parameter);
113 return cz_send_msg_to_smc(adev, msg);
114}
115
116static int cz_set_smc_sram_address(struct amdgpu_device *adev,
117 u32 smc_address, u32 limit)
118{
119 if (smc_address & 3)
120 return -EINVAL;
121 if ((smc_address + 3) > limit)
122 return -EINVAL;
123
124 WREG32(mmMP0PUB_IND_INDEX_0, SMN_MP1_SRAM_START_ADDR + smc_address);
125
126 return 0;
127}
128
129int cz_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
130 u32 *value, u32 limit)
131{
132 int ret;
133
134 ret = cz_set_smc_sram_address(adev, smc_address, limit);
135 if (ret)
136 return ret;
137
138 *value = RREG32(mmMP0PUB_IND_DATA_0);
139
140 return 0;
141}
142
143int cz_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
144 u32 value, u32 limit)
145{
146 int ret;
147
148 ret = cz_set_smc_sram_address(adev, smc_address, limit);
149 if (ret)
150 return ret;
151
152 WREG32(mmMP0PUB_IND_DATA_0, value);
153
154 return 0;
155}
156
157static int cz_smu_request_load_fw(struct amdgpu_device *adev)
158{
159 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
160
161 uint32_t smc_addr = SMU8_FIRMWARE_HEADER_LOCATION +
162 offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
163
164 cz_write_smc_sram_dword(adev, smc_addr, 0, smc_addr + 4);
165
166 /*prepare toc buffers*/
167 cz_send_msg_to_smc_with_parameter(adev,
168 PPSMC_MSG_DriverDramAddrHi,
169 priv->toc_buffer.mc_addr_high);
170 cz_send_msg_to_smc_with_parameter(adev,
171 PPSMC_MSG_DriverDramAddrLo,
172 priv->toc_buffer.mc_addr_low);
173 cz_send_msg_to_smc(adev, PPSMC_MSG_InitJobs);
174
175 /*execute jobs*/
176 cz_send_msg_to_smc_with_parameter(adev,
177 PPSMC_MSG_ExecuteJob,
178 priv->toc_entry_aram);
179
180 cz_send_msg_to_smc_with_parameter(adev,
181 PPSMC_MSG_ExecuteJob,
182 priv->toc_entry_power_profiling_index);
183
184 cz_send_msg_to_smc_with_parameter(adev,
185 PPSMC_MSG_ExecuteJob,
186 priv->toc_entry_initialize_index);
187
188 return 0;
189}
190
191/*
192 *Check if the FW has been loaded, SMU will not return if loading
193 *has not finished.
194 */
195static int cz_smu_check_fw_load_finish(struct amdgpu_device *adev,
196 uint32_t fw_mask)
197{
198 int i;
199 uint32_t index = SMN_MP1_SRAM_START_ADDR +
200 SMU8_FIRMWARE_HEADER_LOCATION +
201 offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
202
203 WREG32(mmMP0PUB_IND_INDEX, index);
204
205 for (i = 0; i < adev->usec_timeout; i++) {
206 if (fw_mask == (RREG32(mmMP0PUB_IND_DATA) & fw_mask))
207 break;
208 udelay(1);
209 }
210
211 if (i >= adev->usec_timeout) {
212 dev_err(adev->dev,
213 "SMU check loaded firmware failed, expecting 0x%x, getting 0x%x",
214 fw_mask, RREG32(mmMP0PUB_IND_DATA));
215 return -EINVAL;
216 }
217
218 return 0;
219}
220
221/*
222 * interfaces for different ip blocks to check firmware loading status
223 * 0 for success otherwise failed
224 */
225static int cz_smu_check_finished(struct amdgpu_device *adev,
226 enum AMDGPU_UCODE_ID id)
227{
228 switch (id) {
229 case AMDGPU_UCODE_ID_SDMA0:
230 if (adev->smu.fw_flags & AMDGPU_SDMA0_UCODE_LOADED)
231 return 0;
232 break;
233 case AMDGPU_UCODE_ID_SDMA1:
234 if (adev->smu.fw_flags & AMDGPU_SDMA1_UCODE_LOADED)
235 return 0;
236 break;
237 case AMDGPU_UCODE_ID_CP_CE:
238 if (adev->smu.fw_flags & AMDGPU_CPCE_UCODE_LOADED)
239 return 0;
240 break;
241 case AMDGPU_UCODE_ID_CP_PFP:
242 if (adev->smu.fw_flags & AMDGPU_CPPFP_UCODE_LOADED)
243 return 0;
244 case AMDGPU_UCODE_ID_CP_ME:
245 if (adev->smu.fw_flags & AMDGPU_CPME_UCODE_LOADED)
246 return 0;
247 break;
248 case AMDGPU_UCODE_ID_CP_MEC1:
249 if (adev->smu.fw_flags & AMDGPU_CPMEC1_UCODE_LOADED)
250 return 0;
251 break;
252 case AMDGPU_UCODE_ID_CP_MEC2:
253 if (adev->smu.fw_flags & AMDGPU_CPMEC2_UCODE_LOADED)
254 return 0;
255 break;
256 case AMDGPU_UCODE_ID_RLC_G:
257 if (adev->smu.fw_flags & AMDGPU_CPRLC_UCODE_LOADED)
258 return 0;
259 break;
260 case AMDGPU_UCODE_ID_MAXIMUM:
261 default:
262 break;
263 }
264
265 return 1;
266}
267
268static int cz_load_mec_firmware(struct amdgpu_device *adev)
269{
270 struct amdgpu_firmware_info *ucode =
271 &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
272 uint32_t reg_data;
273 uint32_t tmp;
274
275 if (ucode->fw == NULL)
276 return -EINVAL;
277
278 /* Disable MEC parsing/prefetching */
279 tmp = RREG32(mmCP_MEC_CNTL);
280 tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
281 tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
282 WREG32(mmCP_MEC_CNTL, tmp);
283
284 tmp = RREG32(mmCP_CPC_IC_BASE_CNTL);
285 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
286 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ATC, 0);
287 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
288 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, MTYPE, 1);
289 WREG32(mmCP_CPC_IC_BASE_CNTL, tmp);
290
291 reg_data = lower_32_bits(ucode->mc_addr) &
292 REG_FIELD_MASK(CP_CPC_IC_BASE_LO, IC_BASE_LO);
293 WREG32(mmCP_CPC_IC_BASE_LO, reg_data);
294
295 reg_data = upper_32_bits(ucode->mc_addr) &
296 REG_FIELD_MASK(CP_CPC_IC_BASE_HI, IC_BASE_HI);
297 WREG32(mmCP_CPC_IC_BASE_HI, reg_data);
298
299 return 0;
300}
301
302int cz_smu_start(struct amdgpu_device *adev)
303{
304 int ret = 0;
305
306 uint32_t fw_to_check = UCODE_ID_RLC_G_MASK |
307 UCODE_ID_SDMA0_MASK |
308 UCODE_ID_SDMA1_MASK |
309 UCODE_ID_CP_CE_MASK |
310 UCODE_ID_CP_ME_MASK |
311 UCODE_ID_CP_PFP_MASK |
312 UCODE_ID_CP_MEC_JT1_MASK |
313 UCODE_ID_CP_MEC_JT2_MASK;
314
315 if (adev->asic_type == CHIP_STONEY)
316 fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK);
317
318 cz_smu_request_load_fw(adev);
319 ret = cz_smu_check_fw_load_finish(adev, fw_to_check);
320 if (ret)
321 return ret;
322
323 /* manually load MEC firmware for CZ */
324 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY) {
325 ret = cz_load_mec_firmware(adev);
326 if (ret) {
327 dev_err(adev->dev, "(%d) Mec Firmware load failed\n", ret);
328 return ret;
329 }
330 }
331
332 /* setup fw load flag */
333 adev->smu.fw_flags = AMDGPU_SDMA0_UCODE_LOADED |
334 AMDGPU_SDMA1_UCODE_LOADED |
335 AMDGPU_CPCE_UCODE_LOADED |
336 AMDGPU_CPPFP_UCODE_LOADED |
337 AMDGPU_CPME_UCODE_LOADED |
338 AMDGPU_CPMEC1_UCODE_LOADED |
339 AMDGPU_CPMEC2_UCODE_LOADED |
340 AMDGPU_CPRLC_UCODE_LOADED;
341
342 if (adev->asic_type == CHIP_STONEY)
343 adev->smu.fw_flags &= ~(AMDGPU_SDMA1_UCODE_LOADED | AMDGPU_CPMEC2_UCODE_LOADED);
344
345 return ret;
346}
347
348static uint32_t cz_convert_fw_type(uint32_t fw_type)
349{
350 enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM;
351
352 switch (fw_type) {
353 case UCODE_ID_SDMA0:
354 result = AMDGPU_UCODE_ID_SDMA0;
355 break;
356 case UCODE_ID_SDMA1:
357 result = AMDGPU_UCODE_ID_SDMA1;
358 break;
359 case UCODE_ID_CP_CE:
360 result = AMDGPU_UCODE_ID_CP_CE;
361 break;
362 case UCODE_ID_CP_PFP:
363 result = AMDGPU_UCODE_ID_CP_PFP;
364 break;
365 case UCODE_ID_CP_ME:
366 result = AMDGPU_UCODE_ID_CP_ME;
367 break;
368 case UCODE_ID_CP_MEC_JT1:
369 case UCODE_ID_CP_MEC_JT2:
370 result = AMDGPU_UCODE_ID_CP_MEC1;
371 break;
372 case UCODE_ID_RLC_G:
373 result = AMDGPU_UCODE_ID_RLC_G;
374 break;
375 default:
376 DRM_ERROR("UCode type is out of range!");
377 }
378
379 return result;
380}
381
382static uint8_t cz_smu_translate_firmware_enum_to_arg(
383 enum cz_scratch_entry firmware_enum)
384{
385 uint8_t ret = 0;
386
387 switch (firmware_enum) {
388 case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0:
389 ret = UCODE_ID_SDMA0;
390 break;
391 case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1:
392 ret = UCODE_ID_SDMA1;
393 break;
394 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE:
395 ret = UCODE_ID_CP_CE;
396 break;
397 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP:
398 ret = UCODE_ID_CP_PFP;
399 break;
400 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME:
401 ret = UCODE_ID_CP_ME;
402 break;
403 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1:
404 ret = UCODE_ID_CP_MEC_JT1;
405 break;
406 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2:
407 ret = UCODE_ID_CP_MEC_JT2;
408 break;
409 case CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG:
410 ret = UCODE_ID_GMCON_RENG;
411 break;
412 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G:
413 ret = UCODE_ID_RLC_G;
414 break;
415 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH:
416 ret = UCODE_ID_RLC_SCRATCH;
417 break;
418 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM:
419 ret = UCODE_ID_RLC_SRM_ARAM;
420 break;
421 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM:
422 ret = UCODE_ID_RLC_SRM_DRAM;
423 break;
424 case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM:
425 ret = UCODE_ID_DMCU_ERAM;
426 break;
427 case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM:
428 ret = UCODE_ID_DMCU_IRAM;
429 break;
430 case CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING:
431 ret = TASK_ARG_INIT_MM_PWR_LOG;
432 break;
433 case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_HALT:
434 case CZ_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING:
435 case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS:
436 case CZ_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT:
437 case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_START:
438 case CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS:
439 ret = TASK_ARG_REG_MMIO;
440 break;
441 case CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE:
442 ret = TASK_ARG_INIT_CLK_TABLE;
443 break;
444 }
445
446 return ret;
447}
448
449static int cz_smu_populate_single_firmware_entry(struct amdgpu_device *adev,
450 enum cz_scratch_entry firmware_enum,
451 struct cz_buffer_entry *entry)
452{
453 uint64_t gpu_addr;
454 uint32_t data_size;
455 uint8_t ucode_id = cz_smu_translate_firmware_enum_to_arg(firmware_enum);
456 enum AMDGPU_UCODE_ID id = cz_convert_fw_type(ucode_id);
457 struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id];
458 const struct gfx_firmware_header_v1_0 *header;
459
460 if (ucode->fw == NULL)
461 return -EINVAL;
462
463 gpu_addr = ucode->mc_addr;
464 header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
465 data_size = le32_to_cpu(header->header.ucode_size_bytes);
466
467 if ((firmware_enum == CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1) ||
468 (firmware_enum == CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2)) {
469 gpu_addr += le32_to_cpu(header->jt_offset) << 2;
470 data_size = le32_to_cpu(header->jt_size) << 2;
471 }
472
473 entry->mc_addr_low = lower_32_bits(gpu_addr);
474 entry->mc_addr_high = upper_32_bits(gpu_addr);
475 entry->data_size = data_size;
476 entry->firmware_ID = firmware_enum;
477
478 return 0;
479}
480
481static int cz_smu_populate_single_scratch_entry(struct amdgpu_device *adev,
482 enum cz_scratch_entry scratch_type,
483 uint32_t size_in_byte,
484 struct cz_buffer_entry *entry)
485{
486 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
487 uint64_t mc_addr = (((uint64_t) priv->smu_buffer.mc_addr_high) << 32) |
488 priv->smu_buffer.mc_addr_low;
489 mc_addr += size_in_byte;
490
491 priv->smu_buffer_used_bytes += size_in_byte;
492 entry->data_size = size_in_byte;
493 entry->kaddr = priv->smu_buffer.kaddr + priv->smu_buffer_used_bytes;
494 entry->mc_addr_low = lower_32_bits(mc_addr);
495 entry->mc_addr_high = upper_32_bits(mc_addr);
496 entry->firmware_ID = scratch_type;
497
498 return 0;
499}
500
501static int cz_smu_populate_single_ucode_load_task(struct amdgpu_device *adev,
502 enum cz_scratch_entry firmware_enum,
503 bool is_last)
504{
505 uint8_t i;
506 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
507 struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
508 struct SMU_Task *task = &toc->tasks[priv->toc_entry_used_count++];
509
510 task->type = TASK_TYPE_UCODE_LOAD;
511 task->arg = cz_smu_translate_firmware_enum_to_arg(firmware_enum);
512 task->next = is_last ? END_OF_TASK_LIST : priv->toc_entry_used_count;
513
514 for (i = 0; i < priv->driver_buffer_length; i++)
515 if (priv->driver_buffer[i].firmware_ID == firmware_enum)
516 break;
517
518 if (i >= priv->driver_buffer_length) {
519 dev_err(adev->dev, "Invalid Firmware Type\n");
520 return -EINVAL;
521 }
522
523 task->addr.low = priv->driver_buffer[i].mc_addr_low;
524 task->addr.high = priv->driver_buffer[i].mc_addr_high;
525 task->size_bytes = priv->driver_buffer[i].data_size;
526
527 return 0;
528}
529
530static int cz_smu_populate_single_scratch_task(struct amdgpu_device *adev,
531 enum cz_scratch_entry firmware_enum,
532 uint8_t type, bool is_last)
533{
534 uint8_t i;
535 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
536 struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
537 struct SMU_Task *task = &toc->tasks[priv->toc_entry_used_count++];
538
539 task->type = type;
540 task->arg = cz_smu_translate_firmware_enum_to_arg(firmware_enum);
541 task->next = is_last ? END_OF_TASK_LIST : priv->toc_entry_used_count;
542
543 for (i = 0; i < priv->scratch_buffer_length; i++)
544 if (priv->scratch_buffer[i].firmware_ID == firmware_enum)
545 break;
546
547 if (i >= priv->scratch_buffer_length) {
548 dev_err(adev->dev, "Invalid Firmware Type\n");
549 return -EINVAL;
550 }
551
552 task->addr.low = priv->scratch_buffer[i].mc_addr_low;
553 task->addr.high = priv->scratch_buffer[i].mc_addr_high;
554 task->size_bytes = priv->scratch_buffer[i].data_size;
555
556 if (CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS == firmware_enum) {
557 struct cz_ih_meta_data *pIHReg_restore =
558 (struct cz_ih_meta_data *)priv->scratch_buffer[i].kaddr;
559 pIHReg_restore->command =
560 METADATA_CMD_MODE0 | METADATA_PERFORM_ON_LOAD;
561 }
562
563 return 0;
564}
565
566static int cz_smu_construct_toc_for_rlc_aram_save(struct amdgpu_device *adev)
567{
568 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
569 priv->toc_entry_aram = priv->toc_entry_used_count;
570 cz_smu_populate_single_scratch_task(adev,
571 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
572 TASK_TYPE_UCODE_SAVE, true);
573
574 return 0;
575}
576
577static int cz_smu_construct_toc_for_vddgfx_enter(struct amdgpu_device *adev)
578{
579 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
580 struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
581
582 toc->JobList[JOB_GFX_SAVE] = (uint8_t)priv->toc_entry_used_count;
583 cz_smu_populate_single_scratch_task(adev,
584 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
585 TASK_TYPE_UCODE_SAVE, false);
586 cz_smu_populate_single_scratch_task(adev,
587 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
588 TASK_TYPE_UCODE_SAVE, true);
589
590 return 0;
591}
592
593static int cz_smu_construct_toc_for_vddgfx_exit(struct amdgpu_device *adev)
594{
595 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
596 struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
597
598 toc->JobList[JOB_GFX_RESTORE] = (uint8_t)priv->toc_entry_used_count;
599
600 /* populate ucode */
601 if (adev->firmware.smu_load) {
602 cz_smu_populate_single_ucode_load_task(adev,
603 CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
604 cz_smu_populate_single_ucode_load_task(adev,
605 CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
606 cz_smu_populate_single_ucode_load_task(adev,
607 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
608 cz_smu_populate_single_ucode_load_task(adev,
609 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
610 if (adev->asic_type == CHIP_STONEY) {
611 cz_smu_populate_single_ucode_load_task(adev,
612 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
613 } else {
614 cz_smu_populate_single_ucode_load_task(adev,
615 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
616 }
617 cz_smu_populate_single_ucode_load_task(adev,
618 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, false);
619 }
620
621 /* populate scratch */
622 cz_smu_populate_single_scratch_task(adev,
623 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
624 TASK_TYPE_UCODE_LOAD, false);
625 cz_smu_populate_single_scratch_task(adev,
626 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
627 TASK_TYPE_UCODE_LOAD, false);
628 cz_smu_populate_single_scratch_task(adev,
629 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
630 TASK_TYPE_UCODE_LOAD, true);
631
632 return 0;
633}
634
635static int cz_smu_construct_toc_for_power_profiling(struct amdgpu_device *adev)
636{
637 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
638
639 priv->toc_entry_power_profiling_index = priv->toc_entry_used_count;
640
641 cz_smu_populate_single_scratch_task(adev,
642 CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
643 TASK_TYPE_INITIALIZE, true);
644 return 0;
645}
646
647static int cz_smu_construct_toc_for_bootup(struct amdgpu_device *adev)
648{
649 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
650
651 priv->toc_entry_initialize_index = priv->toc_entry_used_count;
652
653 if (adev->firmware.smu_load) {
654 cz_smu_populate_single_ucode_load_task(adev,
655 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
656 if (adev->asic_type == CHIP_STONEY) {
657 cz_smu_populate_single_ucode_load_task(adev,
658 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
659 } else {
660 cz_smu_populate_single_ucode_load_task(adev,
661 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false);
662 }
663 cz_smu_populate_single_ucode_load_task(adev,
664 CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
665 cz_smu_populate_single_ucode_load_task(adev,
666 CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
667 cz_smu_populate_single_ucode_load_task(adev,
668 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
669 cz_smu_populate_single_ucode_load_task(adev,
670 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
671 if (adev->asic_type == CHIP_STONEY) {
672 cz_smu_populate_single_ucode_load_task(adev,
673 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
674 } else {
675 cz_smu_populate_single_ucode_load_task(adev,
676 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
677 }
678 cz_smu_populate_single_ucode_load_task(adev,
679 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, true);
680 }
681
682 return 0;
683}
684
685static int cz_smu_construct_toc_for_clock_table(struct amdgpu_device *adev)
686{
687 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
688
689 priv->toc_entry_clock_table = priv->toc_entry_used_count;
690
691 cz_smu_populate_single_scratch_task(adev,
692 CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
693 TASK_TYPE_INITIALIZE, true);
694
695 return 0;
696}
697
698static int cz_smu_initialize_toc_empty_job_list(struct amdgpu_device *adev)
699{
700 int i;
701 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
702 struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
703
704 for (i = 0; i < NUM_JOBLIST_ENTRIES; i++)
705 toc->JobList[i] = (uint8_t)IGNORE_JOB;
706
707 return 0;
708}
709
710/*
711 * cz smu uninitialization
712 */
713int cz_smu_fini(struct amdgpu_device *adev)
714{
715 amdgpu_bo_unref(&adev->smu.toc_buf);
716 amdgpu_bo_unref(&adev->smu.smu_buf);
717 kfree(adev->smu.priv);
718 adev->smu.priv = NULL;
719 if (adev->firmware.smu_load)
720 amdgpu_ucode_fini_bo(adev);
721
722 return 0;
723}
724
725int cz_smu_download_pptable(struct amdgpu_device *adev, void **table)
726{
727 uint8_t i;
728 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
729
730 for (i = 0; i < priv->scratch_buffer_length; i++)
731 if (priv->scratch_buffer[i].firmware_ID ==
732 CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
733 break;
734
735 if (i >= priv->scratch_buffer_length) {
736 dev_err(adev->dev, "Invalid Scratch Type\n");
737 return -EINVAL;
738 }
739
740 *table = (struct SMU8_Fusion_ClkTable *)priv->scratch_buffer[i].kaddr;
741
742 /* prepare buffer for pptable */
743 cz_send_msg_to_smc_with_parameter(adev,
744 PPSMC_MSG_SetClkTableAddrHi,
745 priv->scratch_buffer[i].mc_addr_high);
746 cz_send_msg_to_smc_with_parameter(adev,
747 PPSMC_MSG_SetClkTableAddrLo,
748 priv->scratch_buffer[i].mc_addr_low);
749 cz_send_msg_to_smc_with_parameter(adev,
750 PPSMC_MSG_ExecuteJob,
751 priv->toc_entry_clock_table);
752
753 /* actual downloading */
754 cz_send_msg_to_smc(adev, PPSMC_MSG_ClkTableXferToDram);
755
756 return 0;
757}
758
759int cz_smu_upload_pptable(struct amdgpu_device *adev)
760{
761 uint8_t i;
762 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
763
764 for (i = 0; i < priv->scratch_buffer_length; i++)
765 if (priv->scratch_buffer[i].firmware_ID ==
766 CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
767 break;
768
769 if (i >= priv->scratch_buffer_length) {
770 dev_err(adev->dev, "Invalid Scratch Type\n");
771 return -EINVAL;
772 }
773
774 /* prepare SMU */
775 cz_send_msg_to_smc_with_parameter(adev,
776 PPSMC_MSG_SetClkTableAddrHi,
777 priv->scratch_buffer[i].mc_addr_high);
778 cz_send_msg_to_smc_with_parameter(adev,
779 PPSMC_MSG_SetClkTableAddrLo,
780 priv->scratch_buffer[i].mc_addr_low);
781 cz_send_msg_to_smc_with_parameter(adev,
782 PPSMC_MSG_ExecuteJob,
783 priv->toc_entry_clock_table);
784
785 /* actual uploading */
786 cz_send_msg_to_smc(adev, PPSMC_MSG_ClkTableXferToSmu);
787
788 return 0;
789}
790
791/*
792 * cz smumgr functions initialization
793 */
794static const struct amdgpu_smumgr_funcs cz_smumgr_funcs = {
795 .check_fw_load_finish = cz_smu_check_finished,
796 .request_smu_load_fw = NULL,
797 .request_smu_specific_fw = NULL,
798};
799
800/*
801 * cz smu initialization
802 */
803int cz_smu_init(struct amdgpu_device *adev)
804{
805 int ret = -EINVAL;
806 uint64_t mc_addr = 0;
807 struct amdgpu_bo **toc_buf = &adev->smu.toc_buf;
808 struct amdgpu_bo **smu_buf = &adev->smu.smu_buf;
809 void *toc_buf_ptr = NULL;
810 void *smu_buf_ptr = NULL;
811
812 struct cz_smu_private_data *priv =
813 kzalloc(sizeof(struct cz_smu_private_data), GFP_KERNEL);
814 if (priv == NULL)
815 return -ENOMEM;
816
817 /* allocate firmware buffers */
818 if (adev->firmware.smu_load)
819 amdgpu_ucode_init_bo(adev);
820
821 adev->smu.priv = priv;
822 adev->smu.fw_flags = 0;
823 priv->toc_buffer.data_size = 4096;
824
825 priv->smu_buffer.data_size =
826 ALIGN(UCODE_ID_RLC_SCRATCH_SIZE_BYTE, 32) +
827 ALIGN(UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE, 32) +
828 ALIGN(UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE, 32) +
829 ALIGN(sizeof(struct SMU8_MultimediaPowerLogData), 32) +
830 ALIGN(sizeof(struct SMU8_Fusion_ClkTable), 32);
831
832 /* prepare toc buffer and smu buffer:
833 * 1. create amdgpu_bo for toc buffer and smu buffer
834 * 2. pin mc address
835 * 3. map kernel virtual address
836 */
837 ret = amdgpu_bo_create(adev, priv->toc_buffer.data_size, PAGE_SIZE,
838 true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
839 toc_buf);
840
841 if (ret) {
842 dev_err(adev->dev, "(%d) SMC TOC buffer allocation failed\n", ret);
843 return ret;
844 }
845
846 ret = amdgpu_bo_create(adev, priv->smu_buffer.data_size, PAGE_SIZE,
847 true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
848 smu_buf);
849
850 if (ret) {
851 dev_err(adev->dev, "(%d) SMC Internal buffer allocation failed\n", ret);
852 return ret;
853 }
854
855 /* toc buffer reserve/pin/map */
856 ret = amdgpu_bo_reserve(adev->smu.toc_buf, false);
857 if (ret) {
858 amdgpu_bo_unref(&adev->smu.toc_buf);
859 dev_err(adev->dev, "(%d) SMC TOC buffer reserve failed\n", ret);
860 return ret;
861 }
862
863 ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_GTT, &mc_addr);
864 if (ret) {
865 amdgpu_bo_unreserve(adev->smu.toc_buf);
866 amdgpu_bo_unref(&adev->smu.toc_buf);
867 dev_err(adev->dev, "(%d) SMC TOC buffer pin failed\n", ret);
868 return ret;
869 }
870
871 ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr);
872 if (ret)
873 goto smu_init_failed;
874
875 amdgpu_bo_unreserve(adev->smu.toc_buf);
876
877 priv->toc_buffer.mc_addr_low = lower_32_bits(mc_addr);
878 priv->toc_buffer.mc_addr_high = upper_32_bits(mc_addr);
879 priv->toc_buffer.kaddr = toc_buf_ptr;
880
881 /* smu buffer reserve/pin/map */
882 ret = amdgpu_bo_reserve(adev->smu.smu_buf, false);
883 if (ret) {
884 amdgpu_bo_unref(&adev->smu.smu_buf);
885 dev_err(adev->dev, "(%d) SMC Internal buffer reserve failed\n", ret);
886 return ret;
887 }
888
889 ret = amdgpu_bo_pin(adev->smu.smu_buf, AMDGPU_GEM_DOMAIN_GTT, &mc_addr);
890 if (ret) {
891 amdgpu_bo_unreserve(adev->smu.smu_buf);
892 amdgpu_bo_unref(&adev->smu.smu_buf);
893 dev_err(adev->dev, "(%d) SMC Internal buffer pin failed\n", ret);
894 return ret;
895 }
896
897 ret = amdgpu_bo_kmap(*smu_buf, &smu_buf_ptr);
898 if (ret)
899 goto smu_init_failed;
900
901 amdgpu_bo_unreserve(adev->smu.smu_buf);
902
903 priv->smu_buffer.mc_addr_low = lower_32_bits(mc_addr);
904 priv->smu_buffer.mc_addr_high = upper_32_bits(mc_addr);
905 priv->smu_buffer.kaddr = smu_buf_ptr;
906
907 if (adev->firmware.smu_load) {
908 if (cz_smu_populate_single_firmware_entry(adev,
909 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0,
910 &priv->driver_buffer[priv->driver_buffer_length++]))
911 goto smu_init_failed;
912
913 if (adev->asic_type == CHIP_STONEY) {
914 if (cz_smu_populate_single_firmware_entry(adev,
915 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0,
916 &priv->driver_buffer[priv->driver_buffer_length++]))
917 goto smu_init_failed;
918 } else {
919 if (cz_smu_populate_single_firmware_entry(adev,
920 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1,
921 &priv->driver_buffer[priv->driver_buffer_length++]))
922 goto smu_init_failed;
923 }
924 if (cz_smu_populate_single_firmware_entry(adev,
925 CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE,
926 &priv->driver_buffer[priv->driver_buffer_length++]))
927 goto smu_init_failed;
928 if (cz_smu_populate_single_firmware_entry(adev,
929 CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP,
930 &priv->driver_buffer[priv->driver_buffer_length++]))
931 goto smu_init_failed;
932 if (cz_smu_populate_single_firmware_entry(adev,
933 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME,
934 &priv->driver_buffer[priv->driver_buffer_length++]))
935 goto smu_init_failed;
936 if (cz_smu_populate_single_firmware_entry(adev,
937 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
938 &priv->driver_buffer[priv->driver_buffer_length++]))
939 goto smu_init_failed;
940 if (adev->asic_type == CHIP_STONEY) {
941 if (cz_smu_populate_single_firmware_entry(adev,
942 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
943 &priv->driver_buffer[priv->driver_buffer_length++]))
944 goto smu_init_failed;
945 } else {
946 if (cz_smu_populate_single_firmware_entry(adev,
947 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2,
948 &priv->driver_buffer[priv->driver_buffer_length++]))
949 goto smu_init_failed;
950 }
951 if (cz_smu_populate_single_firmware_entry(adev,
952 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G,
953 &priv->driver_buffer[priv->driver_buffer_length++]))
954 goto smu_init_failed;
955 }
956
957 if (cz_smu_populate_single_scratch_entry(adev,
958 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
959 UCODE_ID_RLC_SCRATCH_SIZE_BYTE,
960 &priv->scratch_buffer[priv->scratch_buffer_length++]))
961 goto smu_init_failed;
962 if (cz_smu_populate_single_scratch_entry(adev,
963 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
964 UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE,
965 &priv->scratch_buffer[priv->scratch_buffer_length++]))
966 goto smu_init_failed;
967 if (cz_smu_populate_single_scratch_entry(adev,
968 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
969 UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE,
970 &priv->scratch_buffer[priv->scratch_buffer_length++]))
971 goto smu_init_failed;
972 if (cz_smu_populate_single_scratch_entry(adev,
973 CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
974 sizeof(struct SMU8_MultimediaPowerLogData),
975 &priv->scratch_buffer[priv->scratch_buffer_length++]))
976 goto smu_init_failed;
977 if (cz_smu_populate_single_scratch_entry(adev,
978 CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
979 sizeof(struct SMU8_Fusion_ClkTable),
980 &priv->scratch_buffer[priv->scratch_buffer_length++]))
981 goto smu_init_failed;
982
983 cz_smu_initialize_toc_empty_job_list(adev);
984 cz_smu_construct_toc_for_rlc_aram_save(adev);
985 cz_smu_construct_toc_for_vddgfx_enter(adev);
986 cz_smu_construct_toc_for_vddgfx_exit(adev);
987 cz_smu_construct_toc_for_power_profiling(adev);
988 cz_smu_construct_toc_for_bootup(adev);
989 cz_smu_construct_toc_for_clock_table(adev);
990 /* init the smumgr functions */
991 adev->smu.smumgr_funcs = &cz_smumgr_funcs;
992
993 return 0;
994
995smu_init_failed:
996 amdgpu_bo_unref(toc_buf);
997 amdgpu_bo_unref(smu_buf);
998
999 return ret;
1000}
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/firmware.h>
24#include "drmP.h"
25#include "amdgpu.h"
26#include "smu8.h"
27#include "smu8_fusion.h"
28#include "cz_ppsmc.h"
29#include "cz_smumgr.h"
30#include "smu_ucode_xfer_cz.h"
31#include "amdgpu_ucode.h"
32#include "cz_dpm.h"
33#include "vi_dpm.h"
34
35#include "smu/smu_8_0_d.h"
36#include "smu/smu_8_0_sh_mask.h"
37#include "gca/gfx_8_0_d.h"
38#include "gca/gfx_8_0_sh_mask.h"
39
40uint32_t cz_get_argument(struct amdgpu_device *adev)
41{
42 return RREG32(mmSMU_MP1_SRBM2P_ARG_0);
43}
44
45static struct cz_smu_private_data *cz_smu_get_priv(struct amdgpu_device *adev)
46{
47 struct cz_smu_private_data *priv =
48 (struct cz_smu_private_data *)(adev->smu.priv);
49
50 return priv;
51}
52
53static int cz_send_msg_to_smc_async(struct amdgpu_device *adev, u16 msg)
54{
55 int i;
56 u32 content = 0, tmp;
57
58 for (i = 0; i < adev->usec_timeout; i++) {
59 tmp = REG_GET_FIELD(RREG32(mmSMU_MP1_SRBM2P_RESP_0),
60 SMU_MP1_SRBM2P_RESP_0, CONTENT);
61 if (content != tmp)
62 break;
63 udelay(1);
64 }
65
66 /* timeout means wrong logic*/
67 if (i == adev->usec_timeout)
68 return -EINVAL;
69
70 WREG32(mmSMU_MP1_SRBM2P_RESP_0, 0);
71 WREG32(mmSMU_MP1_SRBM2P_MSG_0, msg);
72
73 return 0;
74}
75
76int cz_send_msg_to_smc(struct amdgpu_device *adev, u16 msg)
77{
78 int i;
79 u32 content = 0, tmp = 0;
80
81 if (cz_send_msg_to_smc_async(adev, msg))
82 return -EINVAL;
83
84 for (i = 0; i < adev->usec_timeout; i++) {
85 tmp = REG_GET_FIELD(RREG32(mmSMU_MP1_SRBM2P_RESP_0),
86 SMU_MP1_SRBM2P_RESP_0, CONTENT);
87 if (content != tmp)
88 break;
89 udelay(1);
90 }
91
92 /* timeout means wrong logic*/
93 if (i == adev->usec_timeout)
94 return -EINVAL;
95
96 if (PPSMC_Result_OK != tmp) {
97 dev_err(adev->dev, "SMC Failed to send Message.\n");
98 return -EINVAL;
99 }
100
101 return 0;
102}
103
104int cz_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
105 u16 msg, u32 parameter)
106{
107 WREG32(mmSMU_MP1_SRBM2P_ARG_0, parameter);
108 return cz_send_msg_to_smc(adev, msg);
109}
110
111static int cz_set_smc_sram_address(struct amdgpu_device *adev,
112 u32 smc_address, u32 limit)
113{
114 if (smc_address & 3)
115 return -EINVAL;
116 if ((smc_address + 3) > limit)
117 return -EINVAL;
118
119 WREG32(mmMP0PUB_IND_INDEX_0, SMN_MP1_SRAM_START_ADDR + smc_address);
120
121 return 0;
122}
123
124int cz_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
125 u32 *value, u32 limit)
126{
127 int ret;
128
129 ret = cz_set_smc_sram_address(adev, smc_address, limit);
130 if (ret)
131 return ret;
132
133 *value = RREG32(mmMP0PUB_IND_DATA_0);
134
135 return 0;
136}
137
138static int cz_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
139 u32 value, u32 limit)
140{
141 int ret;
142
143 ret = cz_set_smc_sram_address(adev, smc_address, limit);
144 if (ret)
145 return ret;
146
147 WREG32(mmMP0PUB_IND_DATA_0, value);
148
149 return 0;
150}
151
152static int cz_smu_request_load_fw(struct amdgpu_device *adev)
153{
154 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
155
156 uint32_t smc_addr = SMU8_FIRMWARE_HEADER_LOCATION +
157 offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
158
159 cz_write_smc_sram_dword(adev, smc_addr, 0, smc_addr + 4);
160
161 /*prepare toc buffers*/
162 cz_send_msg_to_smc_with_parameter(adev,
163 PPSMC_MSG_DriverDramAddrHi,
164 priv->toc_buffer.mc_addr_high);
165 cz_send_msg_to_smc_with_parameter(adev,
166 PPSMC_MSG_DriverDramAddrLo,
167 priv->toc_buffer.mc_addr_low);
168 cz_send_msg_to_smc(adev, PPSMC_MSG_InitJobs);
169
170 /*execute jobs*/
171 cz_send_msg_to_smc_with_parameter(adev,
172 PPSMC_MSG_ExecuteJob,
173 priv->toc_entry_aram);
174
175 cz_send_msg_to_smc_with_parameter(adev,
176 PPSMC_MSG_ExecuteJob,
177 priv->toc_entry_power_profiling_index);
178
179 cz_send_msg_to_smc_with_parameter(adev,
180 PPSMC_MSG_ExecuteJob,
181 priv->toc_entry_initialize_index);
182
183 return 0;
184}
185
186/*
187 *Check if the FW has been loaded, SMU will not return if loading
188 *has not finished.
189 */
190static int cz_smu_check_fw_load_finish(struct amdgpu_device *adev,
191 uint32_t fw_mask)
192{
193 int i;
194 uint32_t index = SMN_MP1_SRAM_START_ADDR +
195 SMU8_FIRMWARE_HEADER_LOCATION +
196 offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
197
198 WREG32(mmMP0PUB_IND_INDEX, index);
199
200 for (i = 0; i < adev->usec_timeout; i++) {
201 if (fw_mask == (RREG32(mmMP0PUB_IND_DATA) & fw_mask))
202 break;
203 udelay(1);
204 }
205
206 if (i >= adev->usec_timeout) {
207 dev_err(adev->dev,
208 "SMU check loaded firmware failed, expecting 0x%x, getting 0x%x",
209 fw_mask, RREG32(mmMP0PUB_IND_DATA));
210 return -EINVAL;
211 }
212
213 return 0;
214}
215
216/*
217 * interfaces for different ip blocks to check firmware loading status
218 * 0 for success otherwise failed
219 */
220static int cz_smu_check_finished(struct amdgpu_device *adev,
221 enum AMDGPU_UCODE_ID id)
222{
223 switch (id) {
224 case AMDGPU_UCODE_ID_SDMA0:
225 if (adev->smu.fw_flags & AMDGPU_SDMA0_UCODE_LOADED)
226 return 0;
227 break;
228 case AMDGPU_UCODE_ID_SDMA1:
229 if (adev->smu.fw_flags & AMDGPU_SDMA1_UCODE_LOADED)
230 return 0;
231 break;
232 case AMDGPU_UCODE_ID_CP_CE:
233 if (adev->smu.fw_flags & AMDGPU_CPCE_UCODE_LOADED)
234 return 0;
235 break;
236 case AMDGPU_UCODE_ID_CP_PFP:
237 if (adev->smu.fw_flags & AMDGPU_CPPFP_UCODE_LOADED)
238 return 0;
239 case AMDGPU_UCODE_ID_CP_ME:
240 if (adev->smu.fw_flags & AMDGPU_CPME_UCODE_LOADED)
241 return 0;
242 break;
243 case AMDGPU_UCODE_ID_CP_MEC1:
244 if (adev->smu.fw_flags & AMDGPU_CPMEC1_UCODE_LOADED)
245 return 0;
246 break;
247 case AMDGPU_UCODE_ID_CP_MEC2:
248 if (adev->smu.fw_flags & AMDGPU_CPMEC2_UCODE_LOADED)
249 return 0;
250 break;
251 case AMDGPU_UCODE_ID_RLC_G:
252 if (adev->smu.fw_flags & AMDGPU_CPRLC_UCODE_LOADED)
253 return 0;
254 break;
255 case AMDGPU_UCODE_ID_MAXIMUM:
256 default:
257 break;
258 }
259
260 return 1;
261}
262
263static int cz_load_mec_firmware(struct amdgpu_device *adev)
264{
265 struct amdgpu_firmware_info *ucode =
266 &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
267 uint32_t reg_data;
268 uint32_t tmp;
269
270 if (ucode->fw == NULL)
271 return -EINVAL;
272
273 /* Disable MEC parsing/prefetching */
274 tmp = RREG32(mmCP_MEC_CNTL);
275 tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
276 tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
277 WREG32(mmCP_MEC_CNTL, tmp);
278
279 tmp = RREG32(mmCP_CPC_IC_BASE_CNTL);
280 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
281 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ATC, 0);
282 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
283 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, MTYPE, 1);
284 WREG32(mmCP_CPC_IC_BASE_CNTL, tmp);
285
286 reg_data = lower_32_bits(ucode->mc_addr) &
287 REG_FIELD_MASK(CP_CPC_IC_BASE_LO, IC_BASE_LO);
288 WREG32(mmCP_CPC_IC_BASE_LO, reg_data);
289
290 reg_data = upper_32_bits(ucode->mc_addr) &
291 REG_FIELD_MASK(CP_CPC_IC_BASE_HI, IC_BASE_HI);
292 WREG32(mmCP_CPC_IC_BASE_HI, reg_data);
293
294 return 0;
295}
296
297int cz_smu_start(struct amdgpu_device *adev)
298{
299 int ret = 0;
300
301 uint32_t fw_to_check = UCODE_ID_RLC_G_MASK |
302 UCODE_ID_SDMA0_MASK |
303 UCODE_ID_SDMA1_MASK |
304 UCODE_ID_CP_CE_MASK |
305 UCODE_ID_CP_ME_MASK |
306 UCODE_ID_CP_PFP_MASK |
307 UCODE_ID_CP_MEC_JT1_MASK |
308 UCODE_ID_CP_MEC_JT2_MASK;
309
310 if (adev->asic_type == CHIP_STONEY)
311 fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK);
312
313 cz_smu_request_load_fw(adev);
314 ret = cz_smu_check_fw_load_finish(adev, fw_to_check);
315 if (ret)
316 return ret;
317
318 /* manually load MEC firmware for CZ */
319 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY) {
320 ret = cz_load_mec_firmware(adev);
321 if (ret) {
322 dev_err(adev->dev, "(%d) Mec Firmware load failed\n", ret);
323 return ret;
324 }
325 }
326
327 /* setup fw load flag */
328 adev->smu.fw_flags = AMDGPU_SDMA0_UCODE_LOADED |
329 AMDGPU_SDMA1_UCODE_LOADED |
330 AMDGPU_CPCE_UCODE_LOADED |
331 AMDGPU_CPPFP_UCODE_LOADED |
332 AMDGPU_CPME_UCODE_LOADED |
333 AMDGPU_CPMEC1_UCODE_LOADED |
334 AMDGPU_CPMEC2_UCODE_LOADED |
335 AMDGPU_CPRLC_UCODE_LOADED;
336
337 if (adev->asic_type == CHIP_STONEY)
338 adev->smu.fw_flags &= ~(AMDGPU_SDMA1_UCODE_LOADED | AMDGPU_CPMEC2_UCODE_LOADED);
339
340 return ret;
341}
342
343static uint32_t cz_convert_fw_type(uint32_t fw_type)
344{
345 enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM;
346
347 switch (fw_type) {
348 case UCODE_ID_SDMA0:
349 result = AMDGPU_UCODE_ID_SDMA0;
350 break;
351 case UCODE_ID_SDMA1:
352 result = AMDGPU_UCODE_ID_SDMA1;
353 break;
354 case UCODE_ID_CP_CE:
355 result = AMDGPU_UCODE_ID_CP_CE;
356 break;
357 case UCODE_ID_CP_PFP:
358 result = AMDGPU_UCODE_ID_CP_PFP;
359 break;
360 case UCODE_ID_CP_ME:
361 result = AMDGPU_UCODE_ID_CP_ME;
362 break;
363 case UCODE_ID_CP_MEC_JT1:
364 case UCODE_ID_CP_MEC_JT2:
365 result = AMDGPU_UCODE_ID_CP_MEC1;
366 break;
367 case UCODE_ID_RLC_G:
368 result = AMDGPU_UCODE_ID_RLC_G;
369 break;
370 default:
371 DRM_ERROR("UCode type is out of range!");
372 }
373
374 return result;
375}
376
377static uint8_t cz_smu_translate_firmware_enum_to_arg(
378 enum cz_scratch_entry firmware_enum)
379{
380 uint8_t ret = 0;
381
382 switch (firmware_enum) {
383 case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0:
384 ret = UCODE_ID_SDMA0;
385 break;
386 case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1:
387 ret = UCODE_ID_SDMA1;
388 break;
389 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE:
390 ret = UCODE_ID_CP_CE;
391 break;
392 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP:
393 ret = UCODE_ID_CP_PFP;
394 break;
395 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME:
396 ret = UCODE_ID_CP_ME;
397 break;
398 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1:
399 ret = UCODE_ID_CP_MEC_JT1;
400 break;
401 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2:
402 ret = UCODE_ID_CP_MEC_JT2;
403 break;
404 case CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG:
405 ret = UCODE_ID_GMCON_RENG;
406 break;
407 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G:
408 ret = UCODE_ID_RLC_G;
409 break;
410 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH:
411 ret = UCODE_ID_RLC_SCRATCH;
412 break;
413 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM:
414 ret = UCODE_ID_RLC_SRM_ARAM;
415 break;
416 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM:
417 ret = UCODE_ID_RLC_SRM_DRAM;
418 break;
419 case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM:
420 ret = UCODE_ID_DMCU_ERAM;
421 break;
422 case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM:
423 ret = UCODE_ID_DMCU_IRAM;
424 break;
425 case CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING:
426 ret = TASK_ARG_INIT_MM_PWR_LOG;
427 break;
428 case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_HALT:
429 case CZ_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING:
430 case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS:
431 case CZ_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT:
432 case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_START:
433 case CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS:
434 ret = TASK_ARG_REG_MMIO;
435 break;
436 case CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE:
437 ret = TASK_ARG_INIT_CLK_TABLE;
438 break;
439 }
440
441 return ret;
442}
443
444static int cz_smu_populate_single_firmware_entry(struct amdgpu_device *adev,
445 enum cz_scratch_entry firmware_enum,
446 struct cz_buffer_entry *entry)
447{
448 uint64_t gpu_addr;
449 uint32_t data_size;
450 uint8_t ucode_id = cz_smu_translate_firmware_enum_to_arg(firmware_enum);
451 enum AMDGPU_UCODE_ID id = cz_convert_fw_type(ucode_id);
452 struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id];
453 const struct gfx_firmware_header_v1_0 *header;
454
455 if (ucode->fw == NULL)
456 return -EINVAL;
457
458 gpu_addr = ucode->mc_addr;
459 header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
460 data_size = le32_to_cpu(header->header.ucode_size_bytes);
461
462 if ((firmware_enum == CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1) ||
463 (firmware_enum == CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2)) {
464 gpu_addr += le32_to_cpu(header->jt_offset) << 2;
465 data_size = le32_to_cpu(header->jt_size) << 2;
466 }
467
468 entry->mc_addr_low = lower_32_bits(gpu_addr);
469 entry->mc_addr_high = upper_32_bits(gpu_addr);
470 entry->data_size = data_size;
471 entry->firmware_ID = firmware_enum;
472
473 return 0;
474}
475
476static int cz_smu_populate_single_scratch_entry(struct amdgpu_device *adev,
477 enum cz_scratch_entry scratch_type,
478 uint32_t size_in_byte,
479 struct cz_buffer_entry *entry)
480{
481 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
482 uint64_t mc_addr = (((uint64_t) priv->smu_buffer.mc_addr_high) << 32) |
483 priv->smu_buffer.mc_addr_low;
484 mc_addr += size_in_byte;
485
486 priv->smu_buffer_used_bytes += size_in_byte;
487 entry->data_size = size_in_byte;
488 entry->kaddr = priv->smu_buffer.kaddr + priv->smu_buffer_used_bytes;
489 entry->mc_addr_low = lower_32_bits(mc_addr);
490 entry->mc_addr_high = upper_32_bits(mc_addr);
491 entry->firmware_ID = scratch_type;
492
493 return 0;
494}
495
496static int cz_smu_populate_single_ucode_load_task(struct amdgpu_device *adev,
497 enum cz_scratch_entry firmware_enum,
498 bool is_last)
499{
500 uint8_t i;
501 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
502 struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
503 struct SMU_Task *task = &toc->tasks[priv->toc_entry_used_count++];
504
505 task->type = TASK_TYPE_UCODE_LOAD;
506 task->arg = cz_smu_translate_firmware_enum_to_arg(firmware_enum);
507 task->next = is_last ? END_OF_TASK_LIST : priv->toc_entry_used_count;
508
509 for (i = 0; i < priv->driver_buffer_length; i++)
510 if (priv->driver_buffer[i].firmware_ID == firmware_enum)
511 break;
512
513 if (i >= priv->driver_buffer_length) {
514 dev_err(adev->dev, "Invalid Firmware Type\n");
515 return -EINVAL;
516 }
517
518 task->addr.low = priv->driver_buffer[i].mc_addr_low;
519 task->addr.high = priv->driver_buffer[i].mc_addr_high;
520 task->size_bytes = priv->driver_buffer[i].data_size;
521
522 return 0;
523}
524
525static int cz_smu_populate_single_scratch_task(struct amdgpu_device *adev,
526 enum cz_scratch_entry firmware_enum,
527 uint8_t type, bool is_last)
528{
529 uint8_t i;
530 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
531 struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
532 struct SMU_Task *task = &toc->tasks[priv->toc_entry_used_count++];
533
534 task->type = type;
535 task->arg = cz_smu_translate_firmware_enum_to_arg(firmware_enum);
536 task->next = is_last ? END_OF_TASK_LIST : priv->toc_entry_used_count;
537
538 for (i = 0; i < priv->scratch_buffer_length; i++)
539 if (priv->scratch_buffer[i].firmware_ID == firmware_enum)
540 break;
541
542 if (i >= priv->scratch_buffer_length) {
543 dev_err(adev->dev, "Invalid Firmware Type\n");
544 return -EINVAL;
545 }
546
547 task->addr.low = priv->scratch_buffer[i].mc_addr_low;
548 task->addr.high = priv->scratch_buffer[i].mc_addr_high;
549 task->size_bytes = priv->scratch_buffer[i].data_size;
550
551 if (CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS == firmware_enum) {
552 struct cz_ih_meta_data *pIHReg_restore =
553 (struct cz_ih_meta_data *)priv->scratch_buffer[i].kaddr;
554 pIHReg_restore->command =
555 METADATA_CMD_MODE0 | METADATA_PERFORM_ON_LOAD;
556 }
557
558 return 0;
559}
560
561static int cz_smu_construct_toc_for_rlc_aram_save(struct amdgpu_device *adev)
562{
563 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
564 priv->toc_entry_aram = priv->toc_entry_used_count;
565 cz_smu_populate_single_scratch_task(adev,
566 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
567 TASK_TYPE_UCODE_SAVE, true);
568
569 return 0;
570}
571
572static int cz_smu_construct_toc_for_vddgfx_enter(struct amdgpu_device *adev)
573{
574 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
575 struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
576
577 toc->JobList[JOB_GFX_SAVE] = (uint8_t)priv->toc_entry_used_count;
578 cz_smu_populate_single_scratch_task(adev,
579 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
580 TASK_TYPE_UCODE_SAVE, false);
581 cz_smu_populate_single_scratch_task(adev,
582 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
583 TASK_TYPE_UCODE_SAVE, true);
584
585 return 0;
586}
587
588static int cz_smu_construct_toc_for_vddgfx_exit(struct amdgpu_device *adev)
589{
590 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
591 struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
592
593 toc->JobList[JOB_GFX_RESTORE] = (uint8_t)priv->toc_entry_used_count;
594
595 /* populate ucode */
596 if (adev->firmware.smu_load) {
597 cz_smu_populate_single_ucode_load_task(adev,
598 CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
599 cz_smu_populate_single_ucode_load_task(adev,
600 CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
601 cz_smu_populate_single_ucode_load_task(adev,
602 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
603 cz_smu_populate_single_ucode_load_task(adev,
604 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
605 if (adev->asic_type == CHIP_STONEY) {
606 cz_smu_populate_single_ucode_load_task(adev,
607 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
608 } else {
609 cz_smu_populate_single_ucode_load_task(adev,
610 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
611 }
612 cz_smu_populate_single_ucode_load_task(adev,
613 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, false);
614 }
615
616 /* populate scratch */
617 cz_smu_populate_single_scratch_task(adev,
618 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
619 TASK_TYPE_UCODE_LOAD, false);
620 cz_smu_populate_single_scratch_task(adev,
621 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
622 TASK_TYPE_UCODE_LOAD, false);
623 cz_smu_populate_single_scratch_task(adev,
624 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
625 TASK_TYPE_UCODE_LOAD, true);
626
627 return 0;
628}
629
630static int cz_smu_construct_toc_for_power_profiling(struct amdgpu_device *adev)
631{
632 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
633
634 priv->toc_entry_power_profiling_index = priv->toc_entry_used_count;
635
636 cz_smu_populate_single_scratch_task(adev,
637 CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
638 TASK_TYPE_INITIALIZE, true);
639 return 0;
640}
641
642static int cz_smu_construct_toc_for_bootup(struct amdgpu_device *adev)
643{
644 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
645
646 priv->toc_entry_initialize_index = priv->toc_entry_used_count;
647
648 if (adev->firmware.smu_load) {
649 cz_smu_populate_single_ucode_load_task(adev,
650 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
651 if (adev->asic_type == CHIP_STONEY) {
652 cz_smu_populate_single_ucode_load_task(adev,
653 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
654 } else {
655 cz_smu_populate_single_ucode_load_task(adev,
656 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false);
657 }
658 cz_smu_populate_single_ucode_load_task(adev,
659 CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
660 cz_smu_populate_single_ucode_load_task(adev,
661 CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
662 cz_smu_populate_single_ucode_load_task(adev,
663 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
664 cz_smu_populate_single_ucode_load_task(adev,
665 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
666 if (adev->asic_type == CHIP_STONEY) {
667 cz_smu_populate_single_ucode_load_task(adev,
668 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
669 } else {
670 cz_smu_populate_single_ucode_load_task(adev,
671 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
672 }
673 cz_smu_populate_single_ucode_load_task(adev,
674 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, true);
675 }
676
677 return 0;
678}
679
680static int cz_smu_construct_toc_for_clock_table(struct amdgpu_device *adev)
681{
682 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
683
684 priv->toc_entry_clock_table = priv->toc_entry_used_count;
685
686 cz_smu_populate_single_scratch_task(adev,
687 CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
688 TASK_TYPE_INITIALIZE, true);
689
690 return 0;
691}
692
693static int cz_smu_initialize_toc_empty_job_list(struct amdgpu_device *adev)
694{
695 int i;
696 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
697 struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
698
699 for (i = 0; i < NUM_JOBLIST_ENTRIES; i++)
700 toc->JobList[i] = (uint8_t)IGNORE_JOB;
701
702 return 0;
703}
704
705/*
706 * cz smu uninitialization
707 */
708int cz_smu_fini(struct amdgpu_device *adev)
709{
710 amdgpu_bo_unref(&adev->smu.toc_buf);
711 amdgpu_bo_unref(&adev->smu.smu_buf);
712 kfree(adev->smu.priv);
713 adev->smu.priv = NULL;
714 if (adev->firmware.smu_load)
715 amdgpu_ucode_fini_bo(adev);
716
717 return 0;
718}
719
720int cz_smu_download_pptable(struct amdgpu_device *adev, void **table)
721{
722 uint8_t i;
723 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
724
725 for (i = 0; i < priv->scratch_buffer_length; i++)
726 if (priv->scratch_buffer[i].firmware_ID ==
727 CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
728 break;
729
730 if (i >= priv->scratch_buffer_length) {
731 dev_err(adev->dev, "Invalid Scratch Type\n");
732 return -EINVAL;
733 }
734
735 *table = (struct SMU8_Fusion_ClkTable *)priv->scratch_buffer[i].kaddr;
736
737 /* prepare buffer for pptable */
738 cz_send_msg_to_smc_with_parameter(adev,
739 PPSMC_MSG_SetClkTableAddrHi,
740 priv->scratch_buffer[i].mc_addr_high);
741 cz_send_msg_to_smc_with_parameter(adev,
742 PPSMC_MSG_SetClkTableAddrLo,
743 priv->scratch_buffer[i].mc_addr_low);
744 cz_send_msg_to_smc_with_parameter(adev,
745 PPSMC_MSG_ExecuteJob,
746 priv->toc_entry_clock_table);
747
748 /* actual downloading */
749 cz_send_msg_to_smc(adev, PPSMC_MSG_ClkTableXferToDram);
750
751 return 0;
752}
753
754int cz_smu_upload_pptable(struct amdgpu_device *adev)
755{
756 uint8_t i;
757 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
758
759 for (i = 0; i < priv->scratch_buffer_length; i++)
760 if (priv->scratch_buffer[i].firmware_ID ==
761 CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
762 break;
763
764 if (i >= priv->scratch_buffer_length) {
765 dev_err(adev->dev, "Invalid Scratch Type\n");
766 return -EINVAL;
767 }
768
769 /* prepare SMU */
770 cz_send_msg_to_smc_with_parameter(adev,
771 PPSMC_MSG_SetClkTableAddrHi,
772 priv->scratch_buffer[i].mc_addr_high);
773 cz_send_msg_to_smc_with_parameter(adev,
774 PPSMC_MSG_SetClkTableAddrLo,
775 priv->scratch_buffer[i].mc_addr_low);
776 cz_send_msg_to_smc_with_parameter(adev,
777 PPSMC_MSG_ExecuteJob,
778 priv->toc_entry_clock_table);
779
780 /* actual uploading */
781 cz_send_msg_to_smc(adev, PPSMC_MSG_ClkTableXferToSmu);
782
783 return 0;
784}
785
786/*
787 * cz smumgr functions initialization
788 */
789static const struct amdgpu_smumgr_funcs cz_smumgr_funcs = {
790 .check_fw_load_finish = cz_smu_check_finished,
791 .request_smu_load_fw = NULL,
792 .request_smu_specific_fw = NULL,
793};
794
795/*
796 * cz smu initialization
797 */
798int cz_smu_init(struct amdgpu_device *adev)
799{
800 int ret = -EINVAL;
801 uint64_t mc_addr = 0;
802 struct amdgpu_bo **toc_buf = &adev->smu.toc_buf;
803 struct amdgpu_bo **smu_buf = &adev->smu.smu_buf;
804 void *toc_buf_ptr = NULL;
805 void *smu_buf_ptr = NULL;
806
807 struct cz_smu_private_data *priv =
808 kzalloc(sizeof(struct cz_smu_private_data), GFP_KERNEL);
809 if (priv == NULL)
810 return -ENOMEM;
811
812 /* allocate firmware buffers */
813 if (adev->firmware.smu_load)
814 amdgpu_ucode_init_bo(adev);
815
816 adev->smu.priv = priv;
817 adev->smu.fw_flags = 0;
818 priv->toc_buffer.data_size = 4096;
819
820 priv->smu_buffer.data_size =
821 ALIGN(UCODE_ID_RLC_SCRATCH_SIZE_BYTE, 32) +
822 ALIGN(UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE, 32) +
823 ALIGN(UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE, 32) +
824 ALIGN(sizeof(struct SMU8_MultimediaPowerLogData), 32) +
825 ALIGN(sizeof(struct SMU8_Fusion_ClkTable), 32);
826
827 /* prepare toc buffer and smu buffer:
828 * 1. create amdgpu_bo for toc buffer and smu buffer
829 * 2. pin mc address
830 * 3. map kernel virtual address
831 */
832 ret = amdgpu_bo_create(adev, priv->toc_buffer.data_size, PAGE_SIZE,
833 true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
834 toc_buf);
835
836 if (ret) {
837 dev_err(adev->dev, "(%d) SMC TOC buffer allocation failed\n", ret);
838 return ret;
839 }
840
841 ret = amdgpu_bo_create(adev, priv->smu_buffer.data_size, PAGE_SIZE,
842 true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
843 smu_buf);
844
845 if (ret) {
846 dev_err(adev->dev, "(%d) SMC Internal buffer allocation failed\n", ret);
847 return ret;
848 }
849
850 /* toc buffer reserve/pin/map */
851 ret = amdgpu_bo_reserve(adev->smu.toc_buf, false);
852 if (ret) {
853 amdgpu_bo_unref(&adev->smu.toc_buf);
854 dev_err(adev->dev, "(%d) SMC TOC buffer reserve failed\n", ret);
855 return ret;
856 }
857
858 ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_GTT, &mc_addr);
859 if (ret) {
860 amdgpu_bo_unreserve(adev->smu.toc_buf);
861 amdgpu_bo_unref(&adev->smu.toc_buf);
862 dev_err(adev->dev, "(%d) SMC TOC buffer pin failed\n", ret);
863 return ret;
864 }
865
866 ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr);
867 if (ret)
868 goto smu_init_failed;
869
870 amdgpu_bo_unreserve(adev->smu.toc_buf);
871
872 priv->toc_buffer.mc_addr_low = lower_32_bits(mc_addr);
873 priv->toc_buffer.mc_addr_high = upper_32_bits(mc_addr);
874 priv->toc_buffer.kaddr = toc_buf_ptr;
875
876 /* smu buffer reserve/pin/map */
877 ret = amdgpu_bo_reserve(adev->smu.smu_buf, false);
878 if (ret) {
879 amdgpu_bo_unref(&adev->smu.smu_buf);
880 dev_err(adev->dev, "(%d) SMC Internal buffer reserve failed\n", ret);
881 return ret;
882 }
883
884 ret = amdgpu_bo_pin(adev->smu.smu_buf, AMDGPU_GEM_DOMAIN_GTT, &mc_addr);
885 if (ret) {
886 amdgpu_bo_unreserve(adev->smu.smu_buf);
887 amdgpu_bo_unref(&adev->smu.smu_buf);
888 dev_err(adev->dev, "(%d) SMC Internal buffer pin failed\n", ret);
889 return ret;
890 }
891
892 ret = amdgpu_bo_kmap(*smu_buf, &smu_buf_ptr);
893 if (ret)
894 goto smu_init_failed;
895
896 amdgpu_bo_unreserve(adev->smu.smu_buf);
897
898 priv->smu_buffer.mc_addr_low = lower_32_bits(mc_addr);
899 priv->smu_buffer.mc_addr_high = upper_32_bits(mc_addr);
900 priv->smu_buffer.kaddr = smu_buf_ptr;
901
902 if (adev->firmware.smu_load) {
903 if (cz_smu_populate_single_firmware_entry(adev,
904 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0,
905 &priv->driver_buffer[priv->driver_buffer_length++]))
906 goto smu_init_failed;
907
908 if (adev->asic_type == CHIP_STONEY) {
909 if (cz_smu_populate_single_firmware_entry(adev,
910 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0,
911 &priv->driver_buffer[priv->driver_buffer_length++]))
912 goto smu_init_failed;
913 } else {
914 if (cz_smu_populate_single_firmware_entry(adev,
915 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1,
916 &priv->driver_buffer[priv->driver_buffer_length++]))
917 goto smu_init_failed;
918 }
919 if (cz_smu_populate_single_firmware_entry(adev,
920 CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE,
921 &priv->driver_buffer[priv->driver_buffer_length++]))
922 goto smu_init_failed;
923 if (cz_smu_populate_single_firmware_entry(adev,
924 CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP,
925 &priv->driver_buffer[priv->driver_buffer_length++]))
926 goto smu_init_failed;
927 if (cz_smu_populate_single_firmware_entry(adev,
928 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME,
929 &priv->driver_buffer[priv->driver_buffer_length++]))
930 goto smu_init_failed;
931 if (cz_smu_populate_single_firmware_entry(adev,
932 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
933 &priv->driver_buffer[priv->driver_buffer_length++]))
934 goto smu_init_failed;
935 if (adev->asic_type == CHIP_STONEY) {
936 if (cz_smu_populate_single_firmware_entry(adev,
937 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
938 &priv->driver_buffer[priv->driver_buffer_length++]))
939 goto smu_init_failed;
940 } else {
941 if (cz_smu_populate_single_firmware_entry(adev,
942 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2,
943 &priv->driver_buffer[priv->driver_buffer_length++]))
944 goto smu_init_failed;
945 }
946 if (cz_smu_populate_single_firmware_entry(adev,
947 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G,
948 &priv->driver_buffer[priv->driver_buffer_length++]))
949 goto smu_init_failed;
950 }
951
952 if (cz_smu_populate_single_scratch_entry(adev,
953 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
954 UCODE_ID_RLC_SCRATCH_SIZE_BYTE,
955 &priv->scratch_buffer[priv->scratch_buffer_length++]))
956 goto smu_init_failed;
957 if (cz_smu_populate_single_scratch_entry(adev,
958 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
959 UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE,
960 &priv->scratch_buffer[priv->scratch_buffer_length++]))
961 goto smu_init_failed;
962 if (cz_smu_populate_single_scratch_entry(adev,
963 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
964 UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE,
965 &priv->scratch_buffer[priv->scratch_buffer_length++]))
966 goto smu_init_failed;
967 if (cz_smu_populate_single_scratch_entry(adev,
968 CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
969 sizeof(struct SMU8_MultimediaPowerLogData),
970 &priv->scratch_buffer[priv->scratch_buffer_length++]))
971 goto smu_init_failed;
972 if (cz_smu_populate_single_scratch_entry(adev,
973 CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
974 sizeof(struct SMU8_Fusion_ClkTable),
975 &priv->scratch_buffer[priv->scratch_buffer_length++]))
976 goto smu_init_failed;
977
978 cz_smu_initialize_toc_empty_job_list(adev);
979 cz_smu_construct_toc_for_rlc_aram_save(adev);
980 cz_smu_construct_toc_for_vddgfx_enter(adev);
981 cz_smu_construct_toc_for_vddgfx_exit(adev);
982 cz_smu_construct_toc_for_power_profiling(adev);
983 cz_smu_construct_toc_for_bootup(adev);
984 cz_smu_construct_toc_for_clock_table(adev);
985 /* init the smumgr functions */
986 adev->smu.smumgr_funcs = &cz_smumgr_funcs;
987
988 return 0;
989
990smu_init_failed:
991 amdgpu_bo_unref(toc_buf);
992 amdgpu_bo_unref(smu_buf);
993
994 return ret;
995}