Loading...
Note: File does not exist in v5.14.15.
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * AMD Platform Management Framework Driver
4 *
5 * Copyright (c) 2022, Advanced Micro Devices, Inc.
6 * All Rights Reserved.
7 *
8 * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
9 */
10
11#include <linux/debugfs.h>
12#include <linux/iopoll.h>
13#include <linux/module.h>
14#include <linux/pci.h>
15#include <linux/platform_device.h>
16#include <linux/power_supply.h>
17#include "pmf.h"
18
19/* PMF-SMU communication registers */
20#define AMD_PMF_REGISTER_MESSAGE 0xA18
21#define AMD_PMF_REGISTER_RESPONSE 0xA78
22#define AMD_PMF_REGISTER_ARGUMENT 0xA58
23
24/* Base address of SMU for mapping physical address to virtual address */
25#define AMD_PMF_SMU_INDEX_ADDRESS 0xB8
26#define AMD_PMF_SMU_INDEX_DATA 0xBC
27#define AMD_PMF_MAPPING_SIZE 0x01000
28#define AMD_PMF_BASE_ADDR_OFFSET 0x10000
29#define AMD_PMF_BASE_ADDR_LO 0x13B102E8
30#define AMD_PMF_BASE_ADDR_HI 0x13B102EC
31#define AMD_PMF_BASE_ADDR_LO_MASK GENMASK(15, 0)
32#define AMD_PMF_BASE_ADDR_HI_MASK GENMASK(31, 20)
33
34/* SMU Response Codes */
35#define AMD_PMF_RESULT_OK 0x01
36#define AMD_PMF_RESULT_CMD_REJECT_BUSY 0xFC
37#define AMD_PMF_RESULT_CMD_REJECT_PREREQ 0xFD
38#define AMD_PMF_RESULT_CMD_UNKNOWN 0xFE
39#define AMD_PMF_RESULT_FAILED 0xFF
40
41/* List of supported CPU ids */
42#define AMD_CPU_ID_RMB 0x14b5
43#define AMD_CPU_ID_PS 0x14e8
44
45#define PMF_MSG_DELAY_MIN_US 50
46#define RESPONSE_REGISTER_LOOP_MAX 20000
47
48#define DELAY_MIN_US 2000
49#define DELAY_MAX_US 3000
50
51/* override Metrics Table sample size time (in ms) */
52static int metrics_table_loop_ms = 1000;
53module_param(metrics_table_loop_ms, int, 0644);
54MODULE_PARM_DESC(metrics_table_loop_ms, "Metrics Table sample size time (default = 1000ms)");
55
56/* Force load on supported older platforms */
57static bool force_load;
58module_param(force_load, bool, 0444);
59MODULE_PARM_DESC(force_load, "Force load this driver on supported older platforms (experimental)");
60
61static int amd_pmf_pwr_src_notify_call(struct notifier_block *nb, unsigned long event, void *data)
62{
63 struct amd_pmf_dev *pmf = container_of(nb, struct amd_pmf_dev, pwr_src_notifier);
64
65 if (event != PSY_EVENT_PROP_CHANGED)
66 return NOTIFY_OK;
67
68 if (is_apmf_func_supported(pmf, APMF_FUNC_AUTO_MODE) ||
69 is_apmf_func_supported(pmf, APMF_FUNC_DYN_SLIDER_DC) ||
70 is_apmf_func_supported(pmf, APMF_FUNC_DYN_SLIDER_AC)) {
71 if ((pmf->amt_enabled || pmf->cnqf_enabled) && is_pprof_balanced(pmf))
72 return NOTIFY_DONE;
73 }
74
75 amd_pmf_set_sps_power_limits(pmf);
76
77 return NOTIFY_OK;
78}
79
80static int current_power_limits_show(struct seq_file *seq, void *unused)
81{
82 struct amd_pmf_dev *dev = seq->private;
83 struct amd_pmf_static_slider_granular table;
84 int mode, src = 0;
85
86 mode = amd_pmf_get_pprof_modes(dev);
87 if (mode < 0)
88 return mode;
89
90 src = amd_pmf_get_power_source();
91 amd_pmf_update_slider(dev, SLIDER_OP_GET, mode, &table);
92 seq_printf(seq, "spl:%u fppt:%u sppt:%u sppt_apu_only:%u stt_min:%u stt[APU]:%u stt[HS2]: %u\n",
93 table.prop[src][mode].spl,
94 table.prop[src][mode].fppt,
95 table.prop[src][mode].sppt,
96 table.prop[src][mode].sppt_apu_only,
97 table.prop[src][mode].stt_min,
98 table.prop[src][mode].stt_skin_temp[STT_TEMP_APU],
99 table.prop[src][mode].stt_skin_temp[STT_TEMP_HS2]);
100 return 0;
101}
102DEFINE_SHOW_ATTRIBUTE(current_power_limits);
103
104static void amd_pmf_dbgfs_unregister(struct amd_pmf_dev *dev)
105{
106 debugfs_remove_recursive(dev->dbgfs_dir);
107}
108
109static void amd_pmf_dbgfs_register(struct amd_pmf_dev *dev)
110{
111 dev->dbgfs_dir = debugfs_create_dir("amd_pmf", NULL);
112 debugfs_create_file("current_power_limits", 0644, dev->dbgfs_dir, dev,
113 ¤t_power_limits_fops);
114}
115
116int amd_pmf_get_power_source(void)
117{
118 if (power_supply_is_system_supplied() > 0)
119 return POWER_SOURCE_AC;
120 else
121 return POWER_SOURCE_DC;
122}
123
124static void amd_pmf_get_metrics(struct work_struct *work)
125{
126 struct amd_pmf_dev *dev = container_of(work, struct amd_pmf_dev, work_buffer.work);
127 ktime_t time_elapsed_ms;
128 int socket_power;
129
130 mutex_lock(&dev->update_mutex);
131 /* Transfer table contents */
132 memset(dev->buf, 0, sizeof(dev->m_table));
133 amd_pmf_send_cmd(dev, SET_TRANSFER_TABLE, 0, 7, NULL);
134 memcpy(&dev->m_table, dev->buf, sizeof(dev->m_table));
135
136 time_elapsed_ms = ktime_to_ms(ktime_get()) - dev->start_time;
137 /* Calculate the avg SoC power consumption */
138 socket_power = dev->m_table.apu_power + dev->m_table.dgpu_power;
139
140 if (dev->amt_enabled) {
141 /* Apply the Auto Mode transition */
142 amd_pmf_trans_automode(dev, socket_power, time_elapsed_ms);
143 }
144
145 if (dev->cnqf_enabled) {
146 /* Apply the CnQF transition */
147 amd_pmf_trans_cnqf(dev, socket_power, time_elapsed_ms);
148 }
149
150 dev->start_time = ktime_to_ms(ktime_get());
151 schedule_delayed_work(&dev->work_buffer, msecs_to_jiffies(metrics_table_loop_ms));
152 mutex_unlock(&dev->update_mutex);
153}
154
155static inline u32 amd_pmf_reg_read(struct amd_pmf_dev *dev, int reg_offset)
156{
157 return ioread32(dev->regbase + reg_offset);
158}
159
160static inline void amd_pmf_reg_write(struct amd_pmf_dev *dev, int reg_offset, u32 val)
161{
162 iowrite32(val, dev->regbase + reg_offset);
163}
164
165static void __maybe_unused amd_pmf_dump_registers(struct amd_pmf_dev *dev)
166{
167 u32 value;
168
169 value = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_RESPONSE);
170 dev_dbg(dev->dev, "AMD_PMF_REGISTER_RESPONSE:%x\n", value);
171
172 value = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_ARGUMENT);
173 dev_dbg(dev->dev, "AMD_PMF_REGISTER_ARGUMENT:%d\n", value);
174
175 value = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_MESSAGE);
176 dev_dbg(dev->dev, "AMD_PMF_REGISTER_MESSAGE:%x\n", value);
177}
178
179int amd_pmf_send_cmd(struct amd_pmf_dev *dev, u8 message, bool get, u32 arg, u32 *data)
180{
181 int rc;
182 u32 val;
183
184 mutex_lock(&dev->lock);
185
186 /* Wait until we get a valid response */
187 rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMF_REGISTER_RESPONSE,
188 val, val != 0, PMF_MSG_DELAY_MIN_US,
189 PMF_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
190 if (rc) {
191 dev_err(dev->dev, "failed to talk to SMU\n");
192 goto out_unlock;
193 }
194
195 /* Write zero to response register */
196 amd_pmf_reg_write(dev, AMD_PMF_REGISTER_RESPONSE, 0);
197
198 /* Write argument into argument register */
199 amd_pmf_reg_write(dev, AMD_PMF_REGISTER_ARGUMENT, arg);
200
201 /* Write message ID to message ID register */
202 amd_pmf_reg_write(dev, AMD_PMF_REGISTER_MESSAGE, message);
203
204 /* Wait until we get a valid response */
205 rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMF_REGISTER_RESPONSE,
206 val, val != 0, PMF_MSG_DELAY_MIN_US,
207 PMF_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
208 if (rc) {
209 dev_err(dev->dev, "SMU response timed out\n");
210 goto out_unlock;
211 }
212
213 switch (val) {
214 case AMD_PMF_RESULT_OK:
215 if (get) {
216 /* PMFW may take longer time to return back the data */
217 usleep_range(DELAY_MIN_US, 10 * DELAY_MAX_US);
218 *data = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_ARGUMENT);
219 }
220 break;
221 case AMD_PMF_RESULT_CMD_REJECT_BUSY:
222 dev_err(dev->dev, "SMU not ready. err: 0x%x\n", val);
223 rc = -EBUSY;
224 goto out_unlock;
225 case AMD_PMF_RESULT_CMD_UNKNOWN:
226 dev_err(dev->dev, "SMU cmd unknown. err: 0x%x\n", val);
227 rc = -EINVAL;
228 goto out_unlock;
229 case AMD_PMF_RESULT_CMD_REJECT_PREREQ:
230 case AMD_PMF_RESULT_FAILED:
231 default:
232 dev_err(dev->dev, "SMU cmd failed. err: 0x%x\n", val);
233 rc = -EIO;
234 goto out_unlock;
235 }
236
237out_unlock:
238 mutex_unlock(&dev->lock);
239 amd_pmf_dump_registers(dev);
240 return rc;
241}
242
243static const struct pci_device_id pmf_pci_ids[] = {
244 { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RMB) },
245 { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PS) },
246 { }
247};
248
249int amd_pmf_init_metrics_table(struct amd_pmf_dev *dev)
250{
251 u64 phys_addr;
252 u32 hi, low;
253
254 INIT_DELAYED_WORK(&dev->work_buffer, amd_pmf_get_metrics);
255
256 /* Get Metrics Table Address */
257 dev->buf = kzalloc(sizeof(dev->m_table), GFP_KERNEL);
258 if (!dev->buf)
259 return -ENOMEM;
260
261 phys_addr = virt_to_phys(dev->buf);
262 hi = phys_addr >> 32;
263 low = phys_addr & GENMASK(31, 0);
264
265 amd_pmf_send_cmd(dev, SET_DRAM_ADDR_HIGH, 0, hi, NULL);
266 amd_pmf_send_cmd(dev, SET_DRAM_ADDR_LOW, 0, low, NULL);
267
268 /*
269 * Start collecting the metrics data after a small delay
270 * or else, we might end up getting stale values from PMFW.
271 */
272 schedule_delayed_work(&dev->work_buffer, msecs_to_jiffies(metrics_table_loop_ms * 3));
273
274 return 0;
275}
276
277static void amd_pmf_init_features(struct amd_pmf_dev *dev)
278{
279 int ret;
280
281 /* Enable Static Slider */
282 if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) {
283 amd_pmf_init_sps(dev);
284 dev_dbg(dev->dev, "SPS enabled and Platform Profiles registered\n");
285 }
286
287 /* Enable Auto Mode */
288 if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) {
289 amd_pmf_init_auto_mode(dev);
290 dev_dbg(dev->dev, "Auto Mode Init done\n");
291 } else if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC) ||
292 is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC)) {
293 /* Enable Cool n Quiet Framework (CnQF) */
294 ret = amd_pmf_init_cnqf(dev);
295 if (ret)
296 dev_warn(dev->dev, "CnQF Init failed\n");
297 }
298}
299
300static void amd_pmf_deinit_features(struct amd_pmf_dev *dev)
301{
302 if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR))
303 amd_pmf_deinit_sps(dev);
304
305 if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) {
306 amd_pmf_deinit_auto_mode(dev);
307 } else if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC) ||
308 is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC)) {
309 amd_pmf_deinit_cnqf(dev);
310 }
311}
312
313static const struct acpi_device_id amd_pmf_acpi_ids[] = {
314 {"AMDI0100", 0x100},
315 {"AMDI0102", 0},
316 { }
317};
318MODULE_DEVICE_TABLE(acpi, amd_pmf_acpi_ids);
319
320static int amd_pmf_probe(struct platform_device *pdev)
321{
322 const struct acpi_device_id *id;
323 struct amd_pmf_dev *dev;
324 struct pci_dev *rdev;
325 u32 base_addr_lo;
326 u32 base_addr_hi;
327 u64 base_addr;
328 u32 val;
329 int err;
330
331 id = acpi_match_device(amd_pmf_acpi_ids, &pdev->dev);
332 if (!id)
333 return -ENODEV;
334
335 if (id->driver_data == 0x100 && !force_load)
336 return -ENODEV;
337
338 dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
339 if (!dev)
340 return -ENOMEM;
341
342 dev->dev = &pdev->dev;
343
344 rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
345 if (!rdev || !pci_match_id(pmf_pci_ids, rdev)) {
346 pci_dev_put(rdev);
347 return -ENODEV;
348 }
349
350 dev->cpu_id = rdev->device;
351 err = pci_write_config_dword(rdev, AMD_PMF_SMU_INDEX_ADDRESS, AMD_PMF_BASE_ADDR_LO);
352 if (err) {
353 dev_err(dev->dev, "error writing to 0x%x\n", AMD_PMF_SMU_INDEX_ADDRESS);
354 pci_dev_put(rdev);
355 return pcibios_err_to_errno(err);
356 }
357
358 err = pci_read_config_dword(rdev, AMD_PMF_SMU_INDEX_DATA, &val);
359 if (err) {
360 pci_dev_put(rdev);
361 return pcibios_err_to_errno(err);
362 }
363
364 base_addr_lo = val & AMD_PMF_BASE_ADDR_HI_MASK;
365
366 err = pci_write_config_dword(rdev, AMD_PMF_SMU_INDEX_ADDRESS, AMD_PMF_BASE_ADDR_HI);
367 if (err) {
368 dev_err(dev->dev, "error writing to 0x%x\n", AMD_PMF_SMU_INDEX_ADDRESS);
369 pci_dev_put(rdev);
370 return pcibios_err_to_errno(err);
371 }
372
373 err = pci_read_config_dword(rdev, AMD_PMF_SMU_INDEX_DATA, &val);
374 if (err) {
375 pci_dev_put(rdev);
376 return pcibios_err_to_errno(err);
377 }
378
379 base_addr_hi = val & AMD_PMF_BASE_ADDR_LO_MASK;
380 pci_dev_put(rdev);
381 base_addr = ((u64)base_addr_hi << 32 | base_addr_lo);
382
383 dev->regbase = devm_ioremap(dev->dev, base_addr + AMD_PMF_BASE_ADDR_OFFSET,
384 AMD_PMF_MAPPING_SIZE);
385 if (!dev->regbase)
386 return -ENOMEM;
387
388 mutex_init(&dev->lock);
389 mutex_init(&dev->update_mutex);
390
391 apmf_acpi_init(dev);
392 platform_set_drvdata(pdev, dev);
393 amd_pmf_init_features(dev);
394 apmf_install_handler(dev);
395 amd_pmf_dbgfs_register(dev);
396
397 dev->pwr_src_notifier.notifier_call = amd_pmf_pwr_src_notify_call;
398 power_supply_reg_notifier(&dev->pwr_src_notifier);
399
400 dev_info(dev->dev, "registered PMF device successfully\n");
401
402 return 0;
403}
404
405static int amd_pmf_remove(struct platform_device *pdev)
406{
407 struct amd_pmf_dev *dev = platform_get_drvdata(pdev);
408
409 power_supply_unreg_notifier(&dev->pwr_src_notifier);
410 amd_pmf_deinit_features(dev);
411 apmf_acpi_deinit(dev);
412 amd_pmf_dbgfs_unregister(dev);
413 mutex_destroy(&dev->lock);
414 mutex_destroy(&dev->update_mutex);
415 kfree(dev->buf);
416 return 0;
417}
418
419static const struct attribute_group *amd_pmf_driver_groups[] = {
420 &cnqf_feature_attribute_group,
421 NULL,
422};
423
424static struct platform_driver amd_pmf_driver = {
425 .driver = {
426 .name = "amd-pmf",
427 .acpi_match_table = amd_pmf_acpi_ids,
428 .dev_groups = amd_pmf_driver_groups,
429 },
430 .probe = amd_pmf_probe,
431 .remove = amd_pmf_remove,
432};
433module_platform_driver(amd_pmf_driver);
434
435MODULE_LICENSE("GPL");
436MODULE_DESCRIPTION("AMD Platform Management Framework Driver");