Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * System Control and Power Interface (SCMI) based CPUFreq Interface driver
4 *
5 * Copyright (C) 2018-2021 ARM Ltd.
6 * Sudeep Holla <sudeep.holla@arm.com>
7 */
8
9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11#include <linux/clk-provider.h>
12#include <linux/cpu.h>
13#include <linux/cpufreq.h>
14#include <linux/cpumask.h>
15#include <linux/energy_model.h>
16#include <linux/export.h>
17#include <linux/module.h>
18#include <linux/pm_opp.h>
19#include <linux/slab.h>
20#include <linux/scmi_protocol.h>
21#include <linux/types.h>
22#include <linux/units.h>
23
24struct scmi_data {
25 int domain_id;
26 int nr_opp;
27 struct device *cpu_dev;
28 cpumask_var_t opp_shared_cpus;
29};
30
31static struct scmi_protocol_handle *ph;
32static const struct scmi_perf_proto_ops *perf_ops;
33
34static unsigned int scmi_cpufreq_get_rate(unsigned int cpu)
35{
36 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
37 struct scmi_data *priv = policy->driver_data;
38 unsigned long rate;
39 int ret;
40
41 ret = perf_ops->freq_get(ph, priv->domain_id, &rate, false);
42 if (ret)
43 return 0;
44 return rate / 1000;
45}
46
47/*
48 * perf_ops->freq_set is not a synchronous, the actual OPP change will
49 * happen asynchronously and can get notified if the events are
50 * subscribed for by the SCMI firmware
51 */
52static int
53scmi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
54{
55 struct scmi_data *priv = policy->driver_data;
56 u64 freq = policy->freq_table[index].frequency;
57
58 return perf_ops->freq_set(ph, priv->domain_id, freq * 1000, false);
59}
60
61static unsigned int scmi_cpufreq_fast_switch(struct cpufreq_policy *policy,
62 unsigned int target_freq)
63{
64 struct scmi_data *priv = policy->driver_data;
65
66 if (!perf_ops->freq_set(ph, priv->domain_id,
67 target_freq * 1000, true))
68 return target_freq;
69
70 return 0;
71}
72
73static int scmi_cpu_domain_id(struct device *cpu_dev)
74{
75 struct device_node *np = cpu_dev->of_node;
76 struct of_phandle_args domain_id;
77 int index;
78
79 if (of_parse_phandle_with_args(np, "clocks", "#clock-cells", 0,
80 &domain_id)) {
81 /* Find the corresponding index for power-domain "perf". */
82 index = of_property_match_string(np, "power-domain-names",
83 "perf");
84 if (index < 0)
85 return -EINVAL;
86
87 if (of_parse_phandle_with_args(np, "power-domains",
88 "#power-domain-cells", index,
89 &domain_id))
90 return -EINVAL;
91 }
92
93 return domain_id.args[0];
94}
95
96static int
97scmi_get_sharing_cpus(struct device *cpu_dev, int domain,
98 struct cpumask *cpumask)
99{
100 int cpu, tdomain;
101 struct device *tcpu_dev;
102
103 for_each_possible_cpu(cpu) {
104 if (cpu == cpu_dev->id)
105 continue;
106
107 tcpu_dev = get_cpu_device(cpu);
108 if (!tcpu_dev)
109 continue;
110
111 tdomain = scmi_cpu_domain_id(tcpu_dev);
112 if (tdomain == domain)
113 cpumask_set_cpu(cpu, cpumask);
114 }
115
116 return 0;
117}
118
119static int __maybe_unused
120scmi_get_cpu_power(struct device *cpu_dev, unsigned long *power,
121 unsigned long *KHz)
122{
123 enum scmi_power_scale power_scale = perf_ops->power_scale_get(ph);
124 unsigned long Hz;
125 int ret, domain;
126
127 domain = scmi_cpu_domain_id(cpu_dev);
128 if (domain < 0)
129 return domain;
130
131 /* Get the power cost of the performance domain. */
132 Hz = *KHz * 1000;
133 ret = perf_ops->est_power_get(ph, domain, &Hz, power);
134 if (ret)
135 return ret;
136
137 /* Convert the power to uW if it is mW (ignore bogoW) */
138 if (power_scale == SCMI_POWER_MILLIWATTS)
139 *power *= MICROWATT_PER_MILLIWATT;
140
141 /* The EM framework specifies the frequency in KHz. */
142 *KHz = Hz / 1000;
143
144 return 0;
145}
146
147static int scmi_cpufreq_init(struct cpufreq_policy *policy)
148{
149 int ret, nr_opp, domain;
150 unsigned int latency;
151 struct device *cpu_dev;
152 struct scmi_data *priv;
153 struct cpufreq_frequency_table *freq_table;
154
155 cpu_dev = get_cpu_device(policy->cpu);
156 if (!cpu_dev) {
157 pr_err("failed to get cpu%d device\n", policy->cpu);
158 return -ENODEV;
159 }
160
161 domain = scmi_cpu_domain_id(cpu_dev);
162 if (domain < 0)
163 return domain;
164
165 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
166 if (!priv)
167 return -ENOMEM;
168
169 if (!zalloc_cpumask_var(&priv->opp_shared_cpus, GFP_KERNEL)) {
170 ret = -ENOMEM;
171 goto out_free_priv;
172 }
173
174 /* Obtain CPUs that share SCMI performance controls */
175 ret = scmi_get_sharing_cpus(cpu_dev, domain, policy->cpus);
176 if (ret) {
177 dev_warn(cpu_dev, "failed to get sharing cpumask\n");
178 goto out_free_cpumask;
179 }
180
181 /*
182 * Obtain CPUs that share performance levels.
183 * The OPP 'sharing cpus' info may come from DT through an empty opp
184 * table and opp-shared.
185 */
186 ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, priv->opp_shared_cpus);
187 if (ret || cpumask_empty(priv->opp_shared_cpus)) {
188 /*
189 * Either opp-table is not set or no opp-shared was found.
190 * Use the CPU mask from SCMI to designate CPUs sharing an OPP
191 * table.
192 */
193 cpumask_copy(priv->opp_shared_cpus, policy->cpus);
194 }
195
196 /*
197 * A previous CPU may have marked OPPs as shared for a few CPUs, based on
198 * what OPP core provided. If the current CPU is part of those few, then
199 * there is no need to add OPPs again.
200 */
201 nr_opp = dev_pm_opp_get_opp_count(cpu_dev);
202 if (nr_opp <= 0) {
203 ret = perf_ops->device_opps_add(ph, cpu_dev, domain);
204 if (ret) {
205 dev_warn(cpu_dev, "failed to add opps to the device\n");
206 goto out_free_cpumask;
207 }
208
209 nr_opp = dev_pm_opp_get_opp_count(cpu_dev);
210 if (nr_opp <= 0) {
211 dev_err(cpu_dev, "%s: No OPPs for this device: %d\n",
212 __func__, nr_opp);
213
214 ret = -ENODEV;
215 goto out_free_opp;
216 }
217
218 ret = dev_pm_opp_set_sharing_cpus(cpu_dev, priv->opp_shared_cpus);
219 if (ret) {
220 dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
221 __func__, ret);
222
223 goto out_free_opp;
224 }
225
226 priv->nr_opp = nr_opp;
227 }
228
229 ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
230 if (ret) {
231 dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
232 goto out_free_opp;
233 }
234
235 priv->cpu_dev = cpu_dev;
236 priv->domain_id = domain;
237
238 policy->driver_data = priv;
239 policy->freq_table = freq_table;
240
241 /* SCMI allows DVFS request for any domain from any CPU */
242 policy->dvfs_possible_from_any_cpu = true;
243
244 latency = perf_ops->transition_latency_get(ph, domain);
245 if (!latency)
246 latency = CPUFREQ_ETERNAL;
247
248 policy->cpuinfo.transition_latency = latency;
249
250 policy->fast_switch_possible =
251 perf_ops->fast_switch_possible(ph, domain);
252
253 return 0;
254
255out_free_opp:
256 dev_pm_opp_remove_all_dynamic(cpu_dev);
257
258out_free_cpumask:
259 free_cpumask_var(priv->opp_shared_cpus);
260
261out_free_priv:
262 kfree(priv);
263
264 return ret;
265}
266
267static int scmi_cpufreq_exit(struct cpufreq_policy *policy)
268{
269 struct scmi_data *priv = policy->driver_data;
270
271 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
272 dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
273 free_cpumask_var(priv->opp_shared_cpus);
274 kfree(priv);
275
276 return 0;
277}
278
279static void scmi_cpufreq_register_em(struct cpufreq_policy *policy)
280{
281 struct em_data_callback em_cb = EM_DATA_CB(scmi_get_cpu_power);
282 enum scmi_power_scale power_scale = perf_ops->power_scale_get(ph);
283 struct scmi_data *priv = policy->driver_data;
284 bool em_power_scale = false;
285
286 /*
287 * This callback will be called for each policy, but we don't need to
288 * register with EM every time. Despite not being part of the same
289 * policy, some CPUs may still share their perf-domains, and a CPU from
290 * another policy may already have registered with EM on behalf of CPUs
291 * of this policy.
292 */
293 if (!priv->nr_opp)
294 return;
295
296 if (power_scale == SCMI_POWER_MILLIWATTS
297 || power_scale == SCMI_POWER_MICROWATTS)
298 em_power_scale = true;
299
300 em_dev_register_perf_domain(get_cpu_device(policy->cpu), priv->nr_opp,
301 &em_cb, priv->opp_shared_cpus,
302 em_power_scale);
303}
304
305static struct cpufreq_driver scmi_cpufreq_driver = {
306 .name = "scmi",
307 .flags = CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
308 CPUFREQ_NEED_INITIAL_FREQ_CHECK |
309 CPUFREQ_IS_COOLING_DEV,
310 .verify = cpufreq_generic_frequency_table_verify,
311 .attr = cpufreq_generic_attr,
312 .target_index = scmi_cpufreq_set_target,
313 .fast_switch = scmi_cpufreq_fast_switch,
314 .get = scmi_cpufreq_get_rate,
315 .init = scmi_cpufreq_init,
316 .exit = scmi_cpufreq_exit,
317 .register_em = scmi_cpufreq_register_em,
318};
319
320static int scmi_cpufreq_probe(struct scmi_device *sdev)
321{
322 int ret;
323 struct device *dev = &sdev->dev;
324 const struct scmi_handle *handle;
325
326 handle = sdev->handle;
327
328 if (!handle)
329 return -ENODEV;
330
331 perf_ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_PERF, &ph);
332 if (IS_ERR(perf_ops))
333 return PTR_ERR(perf_ops);
334
335#ifdef CONFIG_COMMON_CLK
336 /* dummy clock provider as needed by OPP if clocks property is used */
337 if (of_property_present(dev->of_node, "#clock-cells")) {
338 ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, NULL);
339 if (ret)
340 return dev_err_probe(dev, ret, "%s: registering clock provider failed\n", __func__);
341 }
342#endif
343
344 ret = cpufreq_register_driver(&scmi_cpufreq_driver);
345 if (ret) {
346 dev_err(dev, "%s: registering cpufreq failed, err: %d\n",
347 __func__, ret);
348 }
349
350 return ret;
351}
352
353static void scmi_cpufreq_remove(struct scmi_device *sdev)
354{
355 cpufreq_unregister_driver(&scmi_cpufreq_driver);
356}
357
358static const struct scmi_device_id scmi_id_table[] = {
359 { SCMI_PROTOCOL_PERF, "cpufreq" },
360 { },
361};
362MODULE_DEVICE_TABLE(scmi, scmi_id_table);
363
364static struct scmi_driver scmi_cpufreq_drv = {
365 .name = "scmi-cpufreq",
366 .probe = scmi_cpufreq_probe,
367 .remove = scmi_cpufreq_remove,
368 .id_table = scmi_id_table,
369};
370module_scmi_driver(scmi_cpufreq_drv);
371
372MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
373MODULE_DESCRIPTION("ARM SCMI CPUFreq interface driver");
374MODULE_LICENSE("GPL v2");
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * System Control and Power Interface (SCMI) based CPUFreq Interface driver
4 *
5 * Copyright (C) 2018 ARM Ltd.
6 * Sudeep Holla <sudeep.holla@arm.com>
7 */
8
9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11#include <linux/cpu.h>
12#include <linux/cpufreq.h>
13#include <linux/cpumask.h>
14#include <linux/energy_model.h>
15#include <linux/export.h>
16#include <linux/module.h>
17#include <linux/pm_opp.h>
18#include <linux/slab.h>
19#include <linux/scmi_protocol.h>
20#include <linux/types.h>
21
22struct scmi_data {
23 int domain_id;
24 struct device *cpu_dev;
25};
26
27static const struct scmi_handle *handle;
28
29static unsigned int scmi_cpufreq_get_rate(unsigned int cpu)
30{
31 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
32 struct scmi_perf_ops *perf_ops = handle->perf_ops;
33 struct scmi_data *priv = policy->driver_data;
34 unsigned long rate;
35 int ret;
36
37 ret = perf_ops->freq_get(handle, priv->domain_id, &rate, false);
38 if (ret)
39 return 0;
40 return rate / 1000;
41}
42
43/*
44 * perf_ops->freq_set is not a synchronous, the actual OPP change will
45 * happen asynchronously and can get notified if the events are
46 * subscribed for by the SCMI firmware
47 */
48static int
49scmi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
50{
51 int ret;
52 struct scmi_data *priv = policy->driver_data;
53 struct scmi_perf_ops *perf_ops = handle->perf_ops;
54 u64 freq = policy->freq_table[index].frequency;
55
56 ret = perf_ops->freq_set(handle, priv->domain_id, freq * 1000, false);
57 if (!ret)
58 arch_set_freq_scale(policy->related_cpus, freq,
59 policy->cpuinfo.max_freq);
60 return ret;
61}
62
63static unsigned int scmi_cpufreq_fast_switch(struct cpufreq_policy *policy,
64 unsigned int target_freq)
65{
66 struct scmi_data *priv = policy->driver_data;
67 struct scmi_perf_ops *perf_ops = handle->perf_ops;
68
69 if (!perf_ops->freq_set(handle, priv->domain_id,
70 target_freq * 1000, true)) {
71 arch_set_freq_scale(policy->related_cpus, target_freq,
72 policy->cpuinfo.max_freq);
73 return target_freq;
74 }
75
76 return 0;
77}
78
79static int
80scmi_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
81{
82 int cpu, domain, tdomain;
83 struct device *tcpu_dev;
84
85 domain = handle->perf_ops->device_domain_id(cpu_dev);
86 if (domain < 0)
87 return domain;
88
89 for_each_possible_cpu(cpu) {
90 if (cpu == cpu_dev->id)
91 continue;
92
93 tcpu_dev = get_cpu_device(cpu);
94 if (!tcpu_dev)
95 continue;
96
97 tdomain = handle->perf_ops->device_domain_id(tcpu_dev);
98 if (tdomain == domain)
99 cpumask_set_cpu(cpu, cpumask);
100 }
101
102 return 0;
103}
104
105static int __maybe_unused
106scmi_get_cpu_power(unsigned long *power, unsigned long *KHz,
107 struct device *cpu_dev)
108{
109 unsigned long Hz;
110 int ret, domain;
111
112 domain = handle->perf_ops->device_domain_id(cpu_dev);
113 if (domain < 0)
114 return domain;
115
116 /* Get the power cost of the performance domain. */
117 Hz = *KHz * 1000;
118 ret = handle->perf_ops->est_power_get(handle, domain, &Hz, power);
119 if (ret)
120 return ret;
121
122 /* The EM framework specifies the frequency in KHz. */
123 *KHz = Hz / 1000;
124
125 return 0;
126}
127
128static int scmi_cpufreq_init(struct cpufreq_policy *policy)
129{
130 int ret, nr_opp;
131 unsigned int latency;
132 struct device *cpu_dev;
133 struct scmi_data *priv;
134 struct cpufreq_frequency_table *freq_table;
135 struct em_data_callback em_cb = EM_DATA_CB(scmi_get_cpu_power);
136
137 cpu_dev = get_cpu_device(policy->cpu);
138 if (!cpu_dev) {
139 pr_err("failed to get cpu%d device\n", policy->cpu);
140 return -ENODEV;
141 }
142
143 ret = handle->perf_ops->device_opps_add(handle, cpu_dev);
144 if (ret) {
145 dev_warn(cpu_dev, "failed to add opps to the device\n");
146 return ret;
147 }
148
149 ret = scmi_get_sharing_cpus(cpu_dev, policy->cpus);
150 if (ret) {
151 dev_warn(cpu_dev, "failed to get sharing cpumask\n");
152 return ret;
153 }
154
155 ret = dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus);
156 if (ret) {
157 dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
158 __func__, ret);
159 return ret;
160 }
161
162 nr_opp = dev_pm_opp_get_opp_count(cpu_dev);
163 if (nr_opp <= 0) {
164 dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n");
165 ret = -EPROBE_DEFER;
166 goto out_free_opp;
167 }
168
169 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
170 if (!priv) {
171 ret = -ENOMEM;
172 goto out_free_opp;
173 }
174
175 ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
176 if (ret) {
177 dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
178 goto out_free_priv;
179 }
180
181 priv->cpu_dev = cpu_dev;
182 priv->domain_id = handle->perf_ops->device_domain_id(cpu_dev);
183
184 policy->driver_data = priv;
185 policy->freq_table = freq_table;
186
187 /* SCMI allows DVFS request for any domain from any CPU */
188 policy->dvfs_possible_from_any_cpu = true;
189
190 latency = handle->perf_ops->transition_latency_get(handle, cpu_dev);
191 if (!latency)
192 latency = CPUFREQ_ETERNAL;
193
194 policy->cpuinfo.transition_latency = latency;
195
196 policy->fast_switch_possible =
197 handle->perf_ops->fast_switch_possible(handle, cpu_dev);
198
199 em_dev_register_perf_domain(cpu_dev, nr_opp, &em_cb, policy->cpus);
200
201 return 0;
202
203out_free_priv:
204 kfree(priv);
205out_free_opp:
206 dev_pm_opp_remove_all_dynamic(cpu_dev);
207
208 return ret;
209}
210
211static int scmi_cpufreq_exit(struct cpufreq_policy *policy)
212{
213 struct scmi_data *priv = policy->driver_data;
214
215 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
216 dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
217 kfree(priv);
218
219 return 0;
220}
221
222static struct cpufreq_driver scmi_cpufreq_driver = {
223 .name = "scmi",
224 .flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
225 CPUFREQ_NEED_INITIAL_FREQ_CHECK |
226 CPUFREQ_IS_COOLING_DEV,
227 .verify = cpufreq_generic_frequency_table_verify,
228 .attr = cpufreq_generic_attr,
229 .target_index = scmi_cpufreq_set_target,
230 .fast_switch = scmi_cpufreq_fast_switch,
231 .get = scmi_cpufreq_get_rate,
232 .init = scmi_cpufreq_init,
233 .exit = scmi_cpufreq_exit,
234};
235
236static int scmi_cpufreq_probe(struct scmi_device *sdev)
237{
238 int ret;
239
240 handle = sdev->handle;
241
242 if (!handle || !handle->perf_ops)
243 return -ENODEV;
244
245 ret = cpufreq_register_driver(&scmi_cpufreq_driver);
246 if (ret) {
247 dev_err(&sdev->dev, "%s: registering cpufreq failed, err: %d\n",
248 __func__, ret);
249 }
250
251 return ret;
252}
253
254static void scmi_cpufreq_remove(struct scmi_device *sdev)
255{
256 cpufreq_unregister_driver(&scmi_cpufreq_driver);
257}
258
259static const struct scmi_device_id scmi_id_table[] = {
260 { SCMI_PROTOCOL_PERF, "cpufreq" },
261 { },
262};
263MODULE_DEVICE_TABLE(scmi, scmi_id_table);
264
265static struct scmi_driver scmi_cpufreq_drv = {
266 .name = "scmi-cpufreq",
267 .probe = scmi_cpufreq_probe,
268 .remove = scmi_cpufreq_remove,
269 .id_table = scmi_id_table,
270};
271module_scmi_driver(scmi_cpufreq_drv);
272
273MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
274MODULE_DESCRIPTION("ARM SCMI CPUFreq interface driver");
275MODULE_LICENSE("GPL v2");