Loading...
Note: File does not exist in v6.13.7.
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#define SWSMU_CODE_LAYER_L3
24
25#include <linux/firmware.h>
26#include "amdgpu.h"
27#include "amdgpu_smu.h"
28#include "atomfirmware.h"
29#include "amdgpu_atomfirmware.h"
30#include "smu_v12_0.h"
31#include "soc15_common.h"
32#include "atom.h"
33#include "smu_cmn.h"
34
35#include "asic_reg/mp/mp_12_0_0_offset.h"
36#include "asic_reg/mp/mp_12_0_0_sh_mask.h"
37#include "asic_reg/smuio/smuio_12_0_0_offset.h"
38#include "asic_reg/smuio/smuio_12_0_0_sh_mask.h"
39
40/*
41 * DO NOT use these for err/warn/info/debug messages.
42 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
43 * They are more MGPU friendly.
44 */
45#undef pr_err
46#undef pr_warn
47#undef pr_info
48#undef pr_debug
49
50// because some SMU12 based ASICs use older ip offset tables
51// we should undefine this register from the smuio12 header
52// to prevent confusion down the road
53#undef mmPWR_MISC_CNTL_STATUS
54
55#define smnMP1_FIRMWARE_FLAGS 0x3010024
56
57int smu_v12_0_check_fw_status(struct smu_context *smu)
58{
59 struct amdgpu_device *adev = smu->adev;
60 uint32_t mp1_fw_flags;
61
62 mp1_fw_flags = RREG32_PCIE(MP1_Public |
63 (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
64
65 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
66 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
67 return 0;
68
69 return -EIO;
70}
71
72int smu_v12_0_check_fw_version(struct smu_context *smu)
73{
74 uint32_t if_version = 0xff, smu_version = 0xff;
75 uint16_t smu_major;
76 uint8_t smu_minor, smu_debug;
77 int ret = 0;
78
79 ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
80 if (ret)
81 return ret;
82
83 smu_major = (smu_version >> 16) & 0xffff;
84 smu_minor = (smu_version >> 8) & 0xff;
85 smu_debug = (smu_version >> 0) & 0xff;
86
87 /*
88 * 1. if_version mismatch is not critical as our fw is designed
89 * to be backward compatible.
90 * 2. New fw usually brings some optimizations. But that's visible
91 * only on the paired driver.
92 * Considering above, we just leave user a warning message instead
93 * of halt driver loading.
94 */
95 if (if_version != smu->smc_driver_if_version) {
96 dev_info(smu->adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
97 "smu fw version = 0x%08x (%d.%d.%d)\n",
98 smu->smc_driver_if_version, if_version,
99 smu_version, smu_major, smu_minor, smu_debug);
100 dev_warn(smu->adev->dev, "SMU driver if version not matched\n");
101 }
102
103 return ret;
104}
105
106int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate)
107{
108 if (!smu->is_apu)
109 return 0;
110
111 if (gate)
112 return smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownSdma, NULL);
113 else
114 return smu_cmn_send_smc_msg(smu, SMU_MSG_PowerUpSdma, NULL);
115}
116
117int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
118{
119 if (!(smu->adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
120 return 0;
121
122 return smu_cmn_send_smc_msg_with_param(smu,
123 SMU_MSG_SetGfxCGPG,
124 enable ? 1 : 0,
125 NULL);
126}
127
128/**
129 * smu_v12_0_get_gfxoff_status - get gfxoff status
130 *
131 * @smu: amdgpu_device pointer
132 *
133 * This function will be used to get gfxoff status
134 *
135 * Returns 0=GFXOFF(default).
136 * Returns 1=Transition out of GFX State.
137 * Returns 2=Not in GFXOFF.
138 * Returns 3=Transition into GFXOFF.
139 */
140uint32_t smu_v12_0_get_gfxoff_status(struct smu_context *smu)
141{
142 uint32_t reg;
143 uint32_t gfxOff_Status = 0;
144 struct amdgpu_device *adev = smu->adev;
145
146 reg = RREG32_SOC15(SMUIO, 0, mmSMUIO_GFX_MISC_CNTL);
147 gfxOff_Status = (reg & SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK)
148 >> SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT;
149
150 return gfxOff_Status;
151}
152
153int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable)
154{
155 int ret = 0, timeout = 500;
156
157 if (enable) {
158 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
159
160 } else {
161 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
162
163 /* confirm gfx is back to "on" state, timeout is 0.5 second */
164 while (!(smu_v12_0_get_gfxoff_status(smu) == 2)) {
165 msleep(1);
166 timeout--;
167 if (timeout == 0) {
168 DRM_ERROR("disable gfxoff timeout and failed!\n");
169 break;
170 }
171 }
172 }
173
174 return ret;
175}
176
177int smu_v12_0_fini_smc_tables(struct smu_context *smu)
178{
179 struct smu_table_context *smu_table = &smu->smu_table;
180
181 kfree(smu_table->clocks_table);
182 smu_table->clocks_table = NULL;
183
184 kfree(smu_table->metrics_table);
185 smu_table->metrics_table = NULL;
186
187 kfree(smu_table->watermarks_table);
188 smu_table->watermarks_table = NULL;
189
190 return 0;
191}
192
193int smu_v12_0_set_default_dpm_tables(struct smu_context *smu)
194{
195 struct smu_table_context *smu_table = &smu->smu_table;
196
197 return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false);
198}
199
200int smu_v12_0_mode2_reset(struct smu_context *smu){
201 return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2, NULL);
202}
203
204int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
205 uint32_t min, uint32_t max)
206{
207 int ret = 0;
208
209 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
210 return 0;
211
212 switch (clk_type) {
213 case SMU_GFXCLK:
214 case SMU_SCLK:
215 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, min, NULL);
216 if (ret)
217 return ret;
218
219 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, max, NULL);
220 if (ret)
221 return ret;
222 break;
223 case SMU_FCLK:
224 case SMU_MCLK:
225 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min, NULL);
226 if (ret)
227 return ret;
228
229 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max, NULL);
230 if (ret)
231 return ret;
232 break;
233 case SMU_SOCCLK:
234 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min, NULL);
235 if (ret)
236 return ret;
237
238 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max, NULL);
239 if (ret)
240 return ret;
241 break;
242 case SMU_VCLK:
243 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn, min, NULL);
244 if (ret)
245 return ret;
246
247 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn, max, NULL);
248 if (ret)
249 return ret;
250 break;
251 default:
252 return -EINVAL;
253 }
254
255 return ret;
256}
257
258int smu_v12_0_set_driver_table_location(struct smu_context *smu)
259{
260 struct smu_table *driver_table = &smu->smu_table.driver_table;
261 int ret = 0;
262
263 if (driver_table->mc_address) {
264 ret = smu_cmn_send_smc_msg_with_param(smu,
265 SMU_MSG_SetDriverDramAddrHigh,
266 upper_32_bits(driver_table->mc_address),
267 NULL);
268 if (!ret)
269 ret = smu_cmn_send_smc_msg_with_param(smu,
270 SMU_MSG_SetDriverDramAddrLow,
271 lower_32_bits(driver_table->mc_address),
272 NULL);
273 }
274
275 return ret;
276}