Linux Audio

Check our new training course

Embedded Linux training

Mar 10-20, 2025, special US time zones
Register
Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2012 Freescale Semiconductor, Inc.
  4 *
  5 * Copyright (C) 2014 Linaro.
  6 * Viresh Kumar <viresh.kumar@linaro.org>
 
 
 
 
  7 */
  8
  9#define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
 10
 11#include <linux/clk.h>
 12#include <linux/cpu.h>
 
 13#include <linux/cpufreq.h>
 14#include <linux/cpumask.h>
 15#include <linux/err.h>
 16#include <linux/list.h>
 17#include <linux/module.h>
 18#include <linux/of.h>
 19#include <linux/pm_opp.h>
 20#include <linux/platform_device.h>
 21#include <linux/regulator/consumer.h>
 22#include <linux/slab.h>
 23#include <linux/thermal.h>
 24
 25#include "cpufreq-dt.h"
 26
 27struct private_data {
 28	struct list_head node;
 29
 30	cpumask_var_t cpus;
 31	struct device *cpu_dev;
 32	struct cpufreq_frequency_table *freq_table;
 33	bool have_static_opps;
 34	int opp_token;
 35};
 36
 37static LIST_HEAD(priv_list);
 38
 39static struct freq_attr *cpufreq_dt_attr[] = {
 40	&cpufreq_freq_attr_scaling_available_freqs,
 41	NULL,   /* Extra space for boost-attr if required */
 42	NULL,
 43};
 44
 45static struct private_data *cpufreq_dt_find_data(int cpu)
 46{
 47	struct private_data *priv;
 48
 49	list_for_each_entry(priv, &priv_list, node) {
 50		if (cpumask_test_cpu(cpu, priv->cpus))
 51			return priv;
 52	}
 53
 54	return NULL;
 55}
 56
 57static int set_target(struct cpufreq_policy *policy, unsigned int index)
 58{
 59	struct private_data *priv = policy->driver_data;
 60	unsigned long freq = policy->freq_table[index].frequency;
 
 
 
 
 
 
 
 
 61
 62	return dev_pm_opp_set_rate(priv->cpu_dev, freq * 1000);
 63}
 64
 65/*
 66 * An earlier version of opp-v1 bindings used to name the regulator
 67 * "cpu0-supply", we still need to handle that for backwards compatibility.
 68 */
 69static const char *find_supply_name(struct device *dev)
 70{
 71	struct device_node *np;
 72	struct property *pp;
 73	int cpu = dev->id;
 74	const char *name = NULL;
 75
 76	np = of_node_get(dev->of_node);
 77
 78	/* This must be valid for sure */
 79	if (WARN_ON(!np))
 80		return NULL;
 81
 82	/* Try "cpu0" for older DTs */
 83	if (!cpu) {
 84		pp = of_find_property(np, "cpu0-supply", NULL);
 85		if (pp) {
 86			name = "cpu0";
 87			goto node_put;
 88		}
 89	}
 90
 91	pp = of_find_property(np, "cpu-supply", NULL);
 92	if (pp) {
 93		name = "cpu";
 94		goto node_put;
 95	}
 96
 97	dev_dbg(dev, "no regulator for cpu%d\n", cpu);
 98node_put:
 99	of_node_put(np);
100	return name;
101}
102
103static int cpufreq_init(struct cpufreq_policy *policy)
104{
105	struct private_data *priv;
106	struct device *cpu_dev;
 
107	struct clk *cpu_clk;
108	unsigned int transition_latency;
109	int ret;
110
111	priv = cpufreq_dt_find_data(policy->cpu);
112	if (!priv) {
113		pr_err("failed to find data for cpu%d\n", policy->cpu);
114		return -ENODEV;
115	}
116	cpu_dev = priv->cpu_dev;
117
118	cpu_clk = clk_get(cpu_dev, NULL);
119	if (IS_ERR(cpu_clk)) {
120		ret = PTR_ERR(cpu_clk);
121		dev_err(cpu_dev, "%s: failed to get clk: %d\n", __func__, ret);
122		return ret;
123	}
124
125	transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev);
126	if (!transition_latency)
127		transition_latency = CPUFREQ_ETERNAL;
128
129	cpumask_copy(policy->cpus, priv->cpus);
130	policy->driver_data = priv;
131	policy->clk = cpu_clk;
132	policy->freq_table = priv->freq_table;
133	policy->suspend_freq = dev_pm_opp_get_suspend_opp_freq(cpu_dev) / 1000;
134	policy->cpuinfo.transition_latency = transition_latency;
135	policy->dvfs_possible_from_any_cpu = true;
136
137	/* Support turbo/boost mode */
138	if (policy_has_boost_freq(policy)) {
139		/* This gets disabled by core on driver unregister */
140		ret = cpufreq_enable_boost_support();
141		if (ret)
142			goto out_clk_put;
143		cpufreq_dt_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
144	}
145
146	return 0;
147
148out_clk_put:
149	clk_put(cpu_clk);
150
151	return ret;
152}
 
 
153
154static int cpufreq_online(struct cpufreq_policy *policy)
155{
156	/* We did light-weight tear down earlier, nothing to do here */
157	return 0;
158}
 
 
 
 
 
 
159
160static int cpufreq_offline(struct cpufreq_policy *policy)
161{
162	/*
163	 * Preserve policy->driver_data and don't free resources on light-weight
164	 * tear down.
165	 */
166	return 0;
167}
168
169static int cpufreq_exit(struct cpufreq_policy *policy)
170{
171	clk_put(policy->clk);
172	return 0;
173}
174
175static struct cpufreq_driver dt_cpufreq_driver = {
176	.flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK |
177		 CPUFREQ_IS_COOLING_DEV,
178	.verify = cpufreq_generic_frequency_table_verify,
179	.target_index = set_target,
180	.get = cpufreq_generic_get,
181	.init = cpufreq_init,
182	.exit = cpufreq_exit,
183	.online = cpufreq_online,
184	.offline = cpufreq_offline,
185	.register_em = cpufreq_register_em_with_opp,
186	.name = "cpufreq-dt",
187	.attr = cpufreq_dt_attr,
188	.suspend = cpufreq_generic_suspend,
189};
190
191static int dt_cpufreq_early_init(struct device *dev, int cpu)
192{
 
 
193	struct private_data *priv;
194	struct device *cpu_dev;
 
 
195	bool fallback = false;
196	const char *reg_name[] = { NULL, NULL };
197	int ret;
198
199	/* Check if this CPU is already covered by some other policy */
200	if (cpufreq_dt_find_data(cpu))
201		return 0;
202
203	cpu_dev = get_cpu_device(cpu);
204	if (!cpu_dev)
205		return -EPROBE_DEFER;
206
207	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
208	if (!priv)
209		return -ENOMEM;
210
211	if (!alloc_cpumask_var(&priv->cpus, GFP_KERNEL))
212		return -ENOMEM;
213
214	cpumask_set_cpu(cpu, priv->cpus);
215	priv->cpu_dev = cpu_dev;
216
217	/*
218	 * OPP layer will be taking care of regulators now, but it needs to know
219	 * the name of the regulator first.
220	 */
221	reg_name[0] = find_supply_name(cpu_dev);
222	if (reg_name[0]) {
223		priv->opp_token = dev_pm_opp_set_regulators(cpu_dev, reg_name);
224		if (priv->opp_token < 0) {
225			ret = dev_err_probe(cpu_dev, priv->opp_token,
226					    "failed to set regulators\n");
227			goto free_cpumask;
228		}
229	}
230
231	/* Get OPP-sharing information from "operating-points-v2" bindings */
232	ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, priv->cpus);
233	if (ret) {
234		if (ret != -ENOENT)
235			goto out;
236
237		/*
238		 * operating-points-v2 not supported, fallback to all CPUs share
239		 * OPP for backward compatibility if the platform hasn't set
240		 * sharing CPUs.
241		 */
242		if (dev_pm_opp_get_sharing_cpus(cpu_dev, priv->cpus))
243			fallback = true;
244	}
245
246	/*
247	 * Initialize OPP tables for all priv->cpus. They will be shared by
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
248	 * all CPUs which have marked their CPUs shared with OPP bindings.
249	 *
250	 * For platforms not using operating-points-v2 bindings, we do this
251	 * before updating priv->cpus. Otherwise, we will end up creating
252	 * duplicate OPPs for the CPUs.
253	 *
254	 * OPPs might be populated at runtime, don't fail for error here unless
255	 * it is -EPROBE_DEFER.
256	 */
257	ret = dev_pm_opp_of_cpumask_add_table(priv->cpus);
258	if (!ret) {
259		priv->have_static_opps = true;
260	} else if (ret == -EPROBE_DEFER) {
261		goto out;
262	}
263
264	/*
265	 * The OPP table must be initialized, statically or dynamically, by this
266	 * point.
267	 */
268	ret = dev_pm_opp_get_opp_count(cpu_dev);
269	if (ret <= 0) {
270		dev_err(cpu_dev, "OPP table can't be empty\n");
271		ret = -ENODEV;
272		goto out;
273	}
274
275	if (fallback) {
276		cpumask_setall(priv->cpus);
277		ret = dev_pm_opp_set_sharing_cpus(cpu_dev, priv->cpus);
 
 
 
 
 
278		if (ret)
279			dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
280				__func__, ret);
281	}
282
283	ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &priv->freq_table);
 
 
 
 
 
 
 
 
 
284	if (ret) {
285		dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
286		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
287	}
288
289	list_add(&priv->node, &priv_list);
 
 
 
 
 
 
290	return 0;
291
292out:
293	if (priv->have_static_opps)
294		dev_pm_opp_of_cpumask_remove_table(priv->cpus);
295	dev_pm_opp_put_regulators(priv->opp_token);
296free_cpumask:
297	free_cpumask_var(priv->cpus);
 
 
 
 
 
298	return ret;
299}
300
301static void dt_cpufreq_release(void)
302{
303	struct private_data *priv, *tmp;
304
305	list_for_each_entry_safe(priv, tmp, &priv_list, node) {
306		dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &priv->freq_table);
307		if (priv->have_static_opps)
308			dev_pm_opp_of_cpumask_remove_table(priv->cpus);
309		dev_pm_opp_put_regulators(priv->opp_token);
310		free_cpumask_var(priv->cpus);
311		list_del(&priv->node);
312	}
 
 
313}
314
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
315static int dt_cpufreq_probe(struct platform_device *pdev)
316{
317	struct cpufreq_dt_platform_data *data = dev_get_platdata(&pdev->dev);
318	int ret, cpu;
319
320	/* Request resources early so we can return in case of -EPROBE_DEFER */
321	for_each_possible_cpu(cpu) {
322		ret = dt_cpufreq_early_init(&pdev->dev, cpu);
323		if (ret)
324			goto err;
325	}
 
 
 
 
326
327	if (data) {
328		if (data->have_governor_per_policy)
329			dt_cpufreq_driver.flags |= CPUFREQ_HAVE_GOVERNOR_PER_POLICY;
330
331		dt_cpufreq_driver.resume = data->resume;
332		if (data->suspend)
333			dt_cpufreq_driver.suspend = data->suspend;
334		if (data->get_intermediate) {
335			dt_cpufreq_driver.target_intermediate = data->target_intermediate;
336			dt_cpufreq_driver.get_intermediate = data->get_intermediate;
337		}
338	}
339
340	ret = cpufreq_register_driver(&dt_cpufreq_driver);
341	if (ret) {
342		dev_err(&pdev->dev, "failed register driver: %d\n", ret);
343		goto err;
344	}
345
346	return 0;
347err:
348	dt_cpufreq_release();
349	return ret;
350}
351
352static void dt_cpufreq_remove(struct platform_device *pdev)
353{
354	cpufreq_unregister_driver(&dt_cpufreq_driver);
355	dt_cpufreq_release();
356}
357
358static struct platform_driver dt_cpufreq_platdrv = {
359	.driver = {
360		.name	= "cpufreq-dt",
361	},
362	.probe		= dt_cpufreq_probe,
363	.remove_new	= dt_cpufreq_remove,
364};
365module_platform_driver(dt_cpufreq_platdrv);
366
367MODULE_ALIAS("platform:cpufreq-dt");
368MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
369MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
370MODULE_DESCRIPTION("Generic cpufreq driver");
371MODULE_LICENSE("GPL");
v4.17
 
  1/*
  2 * Copyright (C) 2012 Freescale Semiconductor, Inc.
  3 *
  4 * Copyright (C) 2014 Linaro.
  5 * Viresh Kumar <viresh.kumar@linaro.org>
  6 *
  7 * This program is free software; you can redistribute it and/or modify
  8 * it under the terms of the GNU General Public License version 2 as
  9 * published by the Free Software Foundation.
 10 */
 11
 12#define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
 13
 14#include <linux/clk.h>
 15#include <linux/cpu.h>
 16#include <linux/cpu_cooling.h>
 17#include <linux/cpufreq.h>
 18#include <linux/cpumask.h>
 19#include <linux/err.h>
 
 20#include <linux/module.h>
 21#include <linux/of.h>
 22#include <linux/pm_opp.h>
 23#include <linux/platform_device.h>
 24#include <linux/regulator/consumer.h>
 25#include <linux/slab.h>
 26#include <linux/thermal.h>
 27
 28#include "cpufreq-dt.h"
 29
 30struct private_data {
 31	struct opp_table *opp_table;
 
 
 32	struct device *cpu_dev;
 33	struct thermal_cooling_device *cdev;
 34	const char *reg_name;
 
 35};
 36
 
 
 37static struct freq_attr *cpufreq_dt_attr[] = {
 38	&cpufreq_freq_attr_scaling_available_freqs,
 39	NULL,   /* Extra space for boost-attr if required */
 40	NULL,
 41};
 42
 
 
 
 
 
 
 
 
 
 
 
 
 43static int set_target(struct cpufreq_policy *policy, unsigned int index)
 44{
 45	struct private_data *priv = policy->driver_data;
 46	unsigned long freq = policy->freq_table[index].frequency;
 47	int ret;
 48
 49	ret = dev_pm_opp_set_rate(priv->cpu_dev, freq * 1000);
 50
 51	if (!ret) {
 52		arch_set_freq_scale(policy->related_cpus, freq,
 53				    policy->cpuinfo.max_freq);
 54	}
 55
 56	return ret;
 57}
 58
 59/*
 60 * An earlier version of opp-v1 bindings used to name the regulator
 61 * "cpu0-supply", we still need to handle that for backwards compatibility.
 62 */
 63static const char *find_supply_name(struct device *dev)
 64{
 65	struct device_node *np;
 66	struct property *pp;
 67	int cpu = dev->id;
 68	const char *name = NULL;
 69
 70	np = of_node_get(dev->of_node);
 71
 72	/* This must be valid for sure */
 73	if (WARN_ON(!np))
 74		return NULL;
 75
 76	/* Try "cpu0" for older DTs */
 77	if (!cpu) {
 78		pp = of_find_property(np, "cpu0-supply", NULL);
 79		if (pp) {
 80			name = "cpu0";
 81			goto node_put;
 82		}
 83	}
 84
 85	pp = of_find_property(np, "cpu-supply", NULL);
 86	if (pp) {
 87		name = "cpu";
 88		goto node_put;
 89	}
 90
 91	dev_dbg(dev, "no regulator for cpu%d\n", cpu);
 92node_put:
 93	of_node_put(np);
 94	return name;
 95}
 96
 97static int resources_available(void)
 98{
 
 99	struct device *cpu_dev;
100	struct regulator *cpu_reg;
101	struct clk *cpu_clk;
102	int ret = 0;
103	const char *name;
104
105	cpu_dev = get_cpu_device(0);
106	if (!cpu_dev) {
107		pr_err("failed to get cpu0 device\n");
108		return -ENODEV;
109	}
 
110
111	cpu_clk = clk_get(cpu_dev, NULL);
112	ret = PTR_ERR_OR_ZERO(cpu_clk);
113	if (ret) {
114		/*
115		 * If cpu's clk node is present, but clock is not yet
116		 * registered, we should try defering probe.
117		 */
118		if (ret == -EPROBE_DEFER)
119			dev_dbg(cpu_dev, "clock not ready, retry\n");
120		else
121			dev_err(cpu_dev, "failed to get clock: %d\n", ret);
 
 
 
 
 
 
 
122
123		return ret;
 
 
 
 
 
 
124	}
125
 
 
 
126	clk_put(cpu_clk);
127
128	name = find_supply_name(cpu_dev);
129	/* Platform doesn't require regulator */
130	if (!name)
131		return 0;
132
133	cpu_reg = regulator_get_optional(cpu_dev, name);
134	ret = PTR_ERR_OR_ZERO(cpu_reg);
135	if (ret) {
136		/*
137		 * If cpu's regulator supply node is present, but regulator is
138		 * not yet registered, we should try defering probe.
139		 */
140		if (ret == -EPROBE_DEFER)
141			dev_dbg(cpu_dev, "cpu0 regulator not ready, retry\n");
142		else
143			dev_dbg(cpu_dev, "no regulator for cpu0: %d\n", ret);
144
145		return ret;
146	}
 
 
 
 
 
 
147
148	regulator_put(cpu_reg);
 
 
149	return 0;
150}
151
152static int cpufreq_init(struct cpufreq_policy *policy)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
153{
154	struct cpufreq_frequency_table *freq_table;
155	struct opp_table *opp_table = NULL;
156	struct private_data *priv;
157	struct device *cpu_dev;
158	struct clk *cpu_clk;
159	unsigned int transition_latency;
160	bool fallback = false;
161	const char *name;
162	int ret;
163
164	cpu_dev = get_cpu_device(policy->cpu);
165	if (!cpu_dev) {
166		pr_err("failed to get cpu%d device\n", policy->cpu);
167		return -ENODEV;
168	}
 
 
 
 
 
 
 
 
 
 
 
 
169
170	cpu_clk = clk_get(cpu_dev, NULL);
171	if (IS_ERR(cpu_clk)) {
172		ret = PTR_ERR(cpu_clk);
173		dev_err(cpu_dev, "%s: failed to get clk: %d\n", __func__, ret);
174		return ret;
 
 
 
 
 
 
 
175	}
176
177	/* Get OPP-sharing information from "operating-points-v2" bindings */
178	ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, policy->cpus);
179	if (ret) {
180		if (ret != -ENOENT)
181			goto out_put_clk;
182
183		/*
184		 * operating-points-v2 not supported, fallback to old method of
185		 * finding shared-OPPs for backward compatibility if the
186		 * platform hasn't set sharing CPUs.
187		 */
188		if (dev_pm_opp_get_sharing_cpus(cpu_dev, policy->cpus))
189			fallback = true;
190	}
191
192	/*
193	 * OPP layer will be taking care of regulators now, but it needs to know
194	 * the name of the regulator first.
195	 */
196	name = find_supply_name(cpu_dev);
197	if (name) {
198		opp_table = dev_pm_opp_set_regulators(cpu_dev, &name, 1);
199		if (IS_ERR(opp_table)) {
200			ret = PTR_ERR(opp_table);
201			dev_err(cpu_dev, "Failed to set regulator for cpu%d: %d\n",
202				policy->cpu, ret);
203			goto out_put_clk;
204		}
205	}
206
207	/*
208	 * Initialize OPP tables for all policy->cpus. They will be shared by
209	 * all CPUs which have marked their CPUs shared with OPP bindings.
210	 *
211	 * For platforms not using operating-points-v2 bindings, we do this
212	 * before updating policy->cpus. Otherwise, we will end up creating
213	 * duplicate OPPs for policy->cpus.
214	 *
215	 * OPPs might be populated at runtime, don't check for error here
 
216	 */
217	dev_pm_opp_of_cpumask_add_table(policy->cpus);
 
 
 
 
 
218
219	/*
220	 * But we need OPP table to function so if it is not there let's
221	 * give platform code chance to provide it for us.
222	 */
223	ret = dev_pm_opp_get_opp_count(cpu_dev);
224	if (ret <= 0) {
225		dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n");
226		ret = -EPROBE_DEFER;
227		goto out_free_opp;
228	}
229
230	if (fallback) {
231		cpumask_setall(policy->cpus);
232
233		/*
234		 * OPP tables are initialized only for policy->cpu, do it for
235		 * others as well.
236		 */
237		ret = dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus);
238		if (ret)
239			dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
240				__func__, ret);
241	}
242
243	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
244	if (!priv) {
245		ret = -ENOMEM;
246		goto out_free_opp;
247	}
248
249	priv->reg_name = name;
250	priv->opp_table = opp_table;
251
252	ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
253	if (ret) {
254		dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
255		goto out_free_priv;
256	}
257
258	priv->cpu_dev = cpu_dev;
259	policy->driver_data = priv;
260	policy->clk = cpu_clk;
261	policy->freq_table = freq_table;
262
263	policy->suspend_freq = dev_pm_opp_get_suspend_opp_freq(cpu_dev) / 1000;
264
265	/* Support turbo/boost mode */
266	if (policy_has_boost_freq(policy)) {
267		/* This gets disabled by core on driver unregister */
268		ret = cpufreq_enable_boost_support();
269		if (ret)
270			goto out_free_cpufreq_table;
271		cpufreq_dt_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
272	}
273
274	transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev);
275	if (!transition_latency)
276		transition_latency = CPUFREQ_ETERNAL;
277
278	policy->cpuinfo.transition_latency = transition_latency;
279	policy->dvfs_possible_from_any_cpu = true;
280
281	return 0;
282
283out_free_cpufreq_table:
284	dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
285out_free_priv:
286	kfree(priv);
287out_free_opp:
288	dev_pm_opp_of_cpumask_remove_table(policy->cpus);
289	if (name)
290		dev_pm_opp_put_regulators(opp_table);
291out_put_clk:
292	clk_put(cpu_clk);
293
294	return ret;
295}
296
297static int cpufreq_exit(struct cpufreq_policy *policy)
298{
299	struct private_data *priv = policy->driver_data;
300
301	cpufreq_cooling_unregister(priv->cdev);
302	dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
303	dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
304	if (priv->reg_name)
305		dev_pm_opp_put_regulators(priv->opp_table);
306
307	clk_put(policy->clk);
308	kfree(priv);
309
310	return 0;
311}
312
313static void cpufreq_ready(struct cpufreq_policy *policy)
314{
315	struct private_data *priv = policy->driver_data;
316
317	priv->cdev = of_cpufreq_cooling_register(policy);
318}
319
320static struct cpufreq_driver dt_cpufreq_driver = {
321	.flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
322	.verify = cpufreq_generic_frequency_table_verify,
323	.target_index = set_target,
324	.get = cpufreq_generic_get,
325	.init = cpufreq_init,
326	.exit = cpufreq_exit,
327	.ready = cpufreq_ready,
328	.name = "cpufreq-dt",
329	.attr = cpufreq_dt_attr,
330	.suspend = cpufreq_generic_suspend,
331};
332
333static int dt_cpufreq_probe(struct platform_device *pdev)
334{
335	struct cpufreq_dt_platform_data *data = dev_get_platdata(&pdev->dev);
336	int ret;
337
338	/*
339	 * All per-cluster (CPUs sharing clock/voltages) initialization is done
340	 * from ->init(). In probe(), we just need to make sure that clk and
341	 * regulators are available. Else defer probe and retry.
342	 *
343	 * FIXME: Is checking this only for CPU0 sufficient ?
344	 */
345	ret = resources_available();
346	if (ret)
347		return ret;
348
349	if (data && data->have_governor_per_policy)
350		dt_cpufreq_driver.flags |= CPUFREQ_HAVE_GOVERNOR_PER_POLICY;
 
 
 
 
 
 
 
 
 
 
351
352	ret = cpufreq_register_driver(&dt_cpufreq_driver);
353	if (ret)
354		dev_err(&pdev->dev, "failed register driver: %d\n", ret);
 
 
355
 
 
 
356	return ret;
357}
358
359static int dt_cpufreq_remove(struct platform_device *pdev)
360{
361	cpufreq_unregister_driver(&dt_cpufreq_driver);
362	return 0;
363}
364
365static struct platform_driver dt_cpufreq_platdrv = {
366	.driver = {
367		.name	= "cpufreq-dt",
368	},
369	.probe		= dt_cpufreq_probe,
370	.remove		= dt_cpufreq_remove,
371};
372module_platform_driver(dt_cpufreq_platdrv);
373
374MODULE_ALIAS("platform:cpufreq-dt");
375MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
376MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
377MODULE_DESCRIPTION("Generic cpufreq driver");
378MODULE_LICENSE("GPL");