Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * devfreq_cooling: Thermal cooling device implementation for devices using
  4 *                  devfreq
  5 *
  6 * Copyright (C) 2014-2015 ARM Limited
  7 *
 
 
 
 
 
 
 
 
 
  8 * TODO:
  9 *    - If OPPs are added or removed after devfreq cooling has
 10 *      registered, the devfreq cooling won't react to it.
 11 */
 12
 13#include <linux/devfreq.h>
 14#include <linux/devfreq_cooling.h>
 15#include <linux/energy_model.h>
 16#include <linux/export.h>
 17#include <linux/slab.h>
 18#include <linux/pm_opp.h>
 19#include <linux/pm_qos.h>
 20#include <linux/thermal.h>
 21#include <linux/units.h>
 22
 23#include "thermal_trace.h"
 24
 25#define SCALE_ERROR_MITIGATION	100
 
 26
 27/**
 28 * struct devfreq_cooling_device - Devfreq cooling device
 
 29 *		devfreq_cooling_device registered.
 30 * @cdev:	Pointer to associated thermal cooling device.
 31 * @cooling_ops: devfreq callbacks to thermal cooling device ops
 32 * @devfreq:	Pointer to associated devfreq device.
 33 * @cooling_state:	Current cooling state.
 
 
 
 34 * @freq_table:	Pointer to a table with the frequencies sorted in descending
 35 *		order.  You can index the table by cooling device state
 36 * @max_state:	It is the last index, that is, one less than the number of the
 37 *		OPPs
 38 * @power_ops:	Pointer to devfreq_cooling_power, a more precised model.
 39 * @res_util:	Resource utilization scaling factor for the power.
 40 *		It is multiplied by 100 to minimize the error. It is used
 41 *		for estimation of the power budget instead of using
 42 *		'utilization' (which is	'busy_time' / 'total_time').
 43 *		The 'res_util' range is from 100 to power * 100	for the
 44 *		corresponding 'state'.
 45 * @capped_state:	index to cooling state with in dynamic power budget
 46 * @req_max_freq:	PM QoS request for limiting the maximum frequency
 47 *			of the devfreq device.
 48 * @em_pd:		Energy Model for the associated Devfreq device
 49 */
 50struct devfreq_cooling_device {
 
 51	struct thermal_cooling_device *cdev;
 52	struct thermal_cooling_device_ops cooling_ops;
 53	struct devfreq *devfreq;
 54	unsigned long cooling_state;
 
 55	u32 *freq_table;
 56	size_t max_state;
 57	struct devfreq_cooling_power *power_ops;
 58	u32 res_util;
 59	int capped_state;
 60	struct dev_pm_qos_request req_max_freq;
 61	struct em_perf_domain *em_pd;
 62};
 63
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 64static int devfreq_cooling_get_max_state(struct thermal_cooling_device *cdev,
 65					 unsigned long *state)
 66{
 67	struct devfreq_cooling_device *dfc = cdev->devdata;
 68
 69	*state = dfc->max_state;
 70
 71	return 0;
 72}
 73
 74static int devfreq_cooling_get_cur_state(struct thermal_cooling_device *cdev,
 75					 unsigned long *state)
 76{
 77	struct devfreq_cooling_device *dfc = cdev->devdata;
 78
 79	*state = dfc->cooling_state;
 80
 81	return 0;
 82}
 83
 84static int devfreq_cooling_set_cur_state(struct thermal_cooling_device *cdev,
 85					 unsigned long state)
 86{
 87	struct devfreq_cooling_device *dfc = cdev->devdata;
 88	struct devfreq *df = dfc->devfreq;
 89	struct device *dev = df->dev.parent;
 90	struct em_perf_state *table;
 91	unsigned long freq;
 92	int perf_idx;
 93
 94	if (state == dfc->cooling_state)
 95		return 0;
 96
 97	dev_dbg(dev, "Setting cooling state %lu\n", state);
 98
 99	if (state > dfc->max_state)
100		return -EINVAL;
101
102	if (dfc->em_pd) {
103		perf_idx = dfc->max_state - state;
104
105		rcu_read_lock();
106		table = em_perf_state_from_pd(dfc->em_pd);
107		freq = table[perf_idx].frequency * 1000;
108		rcu_read_unlock();
109	} else {
110		freq = dfc->freq_table[state];
111	}
112
113	dev_pm_qos_update_request(&dfc->req_max_freq,
114				  DIV_ROUND_UP(freq, HZ_PER_KHZ));
115
116	dfc->cooling_state = state;
117
118	return 0;
119}
120
121/**
122 * get_perf_idx() - get the performance index corresponding to a frequency
123 * @em_pd:	Pointer to device's Energy Model
124 * @freq:	frequency in kHz
125 *
126 * Return: the performance index associated with the @freq, or
127 * -EINVAL if it wasn't found.
128 */
129static int get_perf_idx(struct em_perf_domain *em_pd, unsigned long freq)
 
130{
131	struct em_perf_state *table;
132	int i, idx = -EINVAL;
133
134	rcu_read_lock();
135	table = em_perf_state_from_pd(em_pd);
136	for (i = 0; i < em_pd->nr_perf_states; i++) {
137		if (table[i].frequency != freq)
138			continue;
139
140		idx = i;
141		break;
 
142	}
143	rcu_read_unlock();
144
145	return idx;
146}
147
148static unsigned long get_voltage(struct devfreq *df, unsigned long freq)
 
 
 
 
 
 
 
 
 
 
 
149{
 
150	struct device *dev = df->dev.parent;
151	unsigned long voltage;
152	struct dev_pm_opp *opp;
153
 
 
 
 
 
154	opp = dev_pm_opp_find_freq_exact(dev, freq, true);
155	if (PTR_ERR(opp) == -ERANGE)
156		opp = dev_pm_opp_find_freq_exact(dev, freq, false);
157
158	if (IS_ERR(opp)) {
159		dev_err_ratelimited(dev, "Failed to find OPP for frequency %lu: %ld\n",
160				    freq, PTR_ERR(opp));
161		return 0;
162	}
163
164	voltage = dev_pm_opp_get_voltage(opp) / 1000; /* mV */
165	dev_pm_opp_put(opp);
 
166
167	if (voltage == 0) {
168		dev_err_ratelimited(dev,
169				    "Failed to get voltage for frequency %lu\n",
170				    freq);
 
171	}
172
173	return voltage;
174}
175
176static void _normalize_load(struct devfreq_dev_status *status)
177{
178	if (status->total_time > 0xfffff) {
179		status->total_time >>= 10;
180		status->busy_time >>= 10;
181	}
182
183	status->busy_time <<= 10;
184	status->busy_time /= status->total_time ? : 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
185
186	status->busy_time = status->busy_time ? : 1;
187	status->total_time = 1024;
188}
189
190static int devfreq_cooling_get_requested_power(struct thermal_cooling_device *cdev,
 
191					       u32 *power)
192{
193	struct devfreq_cooling_device *dfc = cdev->devdata;
194	struct devfreq *df = dfc->devfreq;
195	struct devfreq_dev_status status;
196	struct em_perf_state *table;
197	unsigned long state;
198	unsigned long freq;
199	unsigned long voltage;
200	int res, perf_idx;
201
202	mutex_lock(&df->lock);
203	status = df->last_status;
204	mutex_unlock(&df->lock);
205
206	freq = status.current_frequency;
207
208	if (dfc->power_ops && dfc->power_ops->get_real_power) {
209		voltage = get_voltage(df, freq);
210		if (voltage == 0) {
211			res = -EINVAL;
212			goto fail;
213		}
214
215		res = dfc->power_ops->get_real_power(df, power, freq, voltage);
216		if (!res) {
217			state = dfc->max_state - dfc->capped_state;
218
219			/* Convert EM power into milli-Watts first */
220			rcu_read_lock();
221			table = em_perf_state_from_pd(dfc->em_pd);
222			dfc->res_util = table[state].power;
223			rcu_read_unlock();
224
225			dfc->res_util /= MICROWATT_PER_MILLIWATT;
 
226
227			dfc->res_util *= SCALE_ERROR_MITIGATION;
 
228
229			if (*power > 1)
230				dfc->res_util /= *power;
231		} else {
232			goto fail;
233		}
234	} else {
235		/* Energy Model frequencies are in kHz */
236		perf_idx = get_perf_idx(dfc->em_pd, freq / 1000);
237		if (perf_idx < 0) {
238			res = -EAGAIN;
239			goto fail;
240		}
241
242		_normalize_load(&status);
243
244		/* Convert EM power into milli-Watts first */
245		rcu_read_lock();
246		table = em_perf_state_from_pd(dfc->em_pd);
247		*power = table[perf_idx].power;
248		rcu_read_unlock();
249
250		*power /= MICROWATT_PER_MILLIWATT;
251		/* Scale power for utilization */
252		*power *= status.busy_time;
253		*power >>= 10;
254	}
255
256	trace_thermal_power_devfreq_get_power(cdev, &status, freq, *power);
257
258	return 0;
259fail:
260	/* It is safe to set max in this case */
261	dfc->res_util = SCALE_ERROR_MITIGATION;
262	return res;
263}
264
265static int devfreq_cooling_state2power(struct thermal_cooling_device *cdev,
266				       unsigned long state, u32 *power)
 
 
267{
268	struct devfreq_cooling_device *dfc = cdev->devdata;
269	struct em_perf_state *table;
270	int perf_idx;
271
272	if (state > dfc->max_state)
273		return -EINVAL;
274
275	perf_idx = dfc->max_state - state;
276
277	rcu_read_lock();
278	table = em_perf_state_from_pd(dfc->em_pd);
279	*power = table[perf_idx].power;
280	rcu_read_unlock();
281
282	*power /= MICROWATT_PER_MILLIWATT;
283
 
284	return 0;
285}
286
287static int devfreq_cooling_power2state(struct thermal_cooling_device *cdev,
 
288				       u32 power, unsigned long *state)
289{
290	struct devfreq_cooling_device *dfc = cdev->devdata;
291	struct devfreq *df = dfc->devfreq;
292	struct devfreq_dev_status status;
293	unsigned long freq, em_power_mw;
294	struct em_perf_state *table;
295	s32 est_power;
 
296	int i;
297
298	mutex_lock(&df->lock);
299	status = df->last_status;
300	mutex_unlock(&df->lock);
301
302	freq = status.current_frequency;
303
304	if (dfc->power_ops && dfc->power_ops->get_real_power) {
305		/* Scale for resource utilization */
306		est_power = power * dfc->res_util;
307		est_power /= SCALE_ERROR_MITIGATION;
308	} else {
309		/* Scale dynamic power for utilization */
310		_normalize_load(&status);
311		est_power = power << 10;
312		est_power /= status.busy_time;
313	}
314
315	/*
316	 * Find the first cooling state that is within the power
317	 * budget. The EM power table is sorted ascending.
318	 */
319	rcu_read_lock();
320	table = em_perf_state_from_pd(dfc->em_pd);
321	for (i = dfc->max_state; i > 0; i--) {
322		/* Convert EM power to milli-Watts to make safe comparison */
323		em_power_mw = table[i].power;
324		em_power_mw /= MICROWATT_PER_MILLIWATT;
325		if (est_power >= em_power_mw)
326			break;
327	}
328	rcu_read_unlock();
329
330	*state = dfc->max_state - i;
331	dfc->capped_state = *state;
332
 
333	trace_thermal_power_devfreq_limit(cdev, freq, *state, power);
334	return 0;
335}
336
 
 
 
 
 
 
337/**
338 * devfreq_cooling_gen_tables() - Generate frequency table.
339 * @dfc:	Pointer to devfreq cooling device.
340 * @num_opps:	Number of OPPs
341 *
342 * Generate frequency table which holds the frequencies in descending
343 * order. That way its indexed by cooling device state. This is for
344 * compatibility with drivers which do not register Energy Model.
 
 
 
 
 
 
 
345 *
346 * Return: 0 on success, negative error code on failure.
347 */
348static int devfreq_cooling_gen_tables(struct devfreq_cooling_device *dfc,
349				      int num_opps)
350{
351	struct devfreq *df = dfc->devfreq;
352	struct device *dev = df->dev.parent;
 
353	unsigned long freq;
 
 
354	int i;
355
356	dfc->freq_table = kcalloc(num_opps, sizeof(*dfc->freq_table),
 
 
 
 
 
 
 
 
 
357			     GFP_KERNEL);
358	if (!dfc->freq_table)
359		return -ENOMEM;
 
 
360
361	for (i = 0, freq = ULONG_MAX; i < num_opps; i++, freq--) {
 
362		struct dev_pm_opp *opp;
363
 
 
364		opp = dev_pm_opp_find_freq_floor(dev, &freq);
365		if (IS_ERR(opp)) {
366			kfree(dfc->freq_table);
367			return PTR_ERR(opp);
 
368		}
369
370		dev_pm_opp_put(opp);
371		dfc->freq_table[i] = freq;
 
 
 
 
 
 
 
 
 
 
 
 
372	}
373
 
 
 
 
 
 
374	return 0;
 
 
 
 
 
 
 
375}
376
377/**
378 * of_devfreq_cooling_register_power() - Register devfreq cooling device,
379 *                                      with OF and power information.
380 * @np:	Pointer to OF device_node.
381 * @df:	Pointer to devfreq device.
382 * @dfc_power:	Pointer to devfreq_cooling_power.
383 *
384 * Register a devfreq cooling device.  The available OPPs must be
385 * registered on the device.
386 *
387 * If @dfc_power is provided, the cooling device is registered with the
388 * power extensions.  For the power extensions to work correctly,
389 * devfreq should use the simple_ondemand governor, other governors
390 * are not currently supported.
391 */
392struct thermal_cooling_device *
393of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df,
394				  struct devfreq_cooling_power *dfc_power)
395{
396	struct thermal_cooling_device *cdev;
397	struct device *dev = df->dev.parent;
398	struct devfreq_cooling_device *dfc;
399	struct em_perf_domain *em;
400	struct thermal_cooling_device_ops *ops;
401	char *name;
402	int err, num_opps;
403
404
405	dfc = kzalloc(sizeof(*dfc), GFP_KERNEL);
406	if (!dfc)
407		return ERR_PTR(-ENOMEM);
408
409	dfc->devfreq = df;
410
411	ops = &dfc->cooling_ops;
412	ops->get_max_state = devfreq_cooling_get_max_state;
413	ops->get_cur_state = devfreq_cooling_get_cur_state;
414	ops->set_cur_state = devfreq_cooling_set_cur_state;
415
416	em = em_pd_get(dev);
417	if (em && !em_is_artificial(em)) {
418		dfc->em_pd = em;
419		ops->get_requested_power =
420			devfreq_cooling_get_requested_power;
421		ops->state2power = devfreq_cooling_state2power;
422		ops->power2state = devfreq_cooling_power2state;
423
424		dfc->power_ops = dfc_power;
425
426		num_opps = em_pd_nr_perf_states(dfc->em_pd);
427	} else {
428		/* Backward compatibility for drivers which do not use IPA */
429		dev_dbg(dev, "missing proper EM for cooling device\n");
430
431		num_opps = dev_pm_opp_get_opp_count(dev);
432
433		err = devfreq_cooling_gen_tables(dfc, num_opps);
434		if (err)
435			goto free_dfc;
436	}
437
438	if (num_opps <= 0) {
439		err = -EINVAL;
440		goto free_dfc;
441	}
442
443	/* max_state is an index, not a counter */
444	dfc->max_state = num_opps - 1;
445
446	err = dev_pm_qos_add_request(dev, &dfc->req_max_freq,
447				     DEV_PM_QOS_MAX_FREQUENCY,
448				     PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
449	if (err < 0)
450		goto free_table;
451
452	err = -ENOMEM;
453	name = kasprintf(GFP_KERNEL, "devfreq-%s", dev_name(dev));
454	if (!name)
455		goto remove_qos_req;
456
457	cdev = thermal_of_cooling_device_register(np, name, dfc, ops);
458	kfree(name);
459
 
 
460	if (IS_ERR(cdev)) {
461		err = PTR_ERR(cdev);
462		dev_err(dev,
463			"Failed to register devfreq cooling device (%d)\n",
464			err);
465		goto remove_qos_req;
466	}
467
468	dfc->cdev = cdev;
469
470	return cdev;
471
472remove_qos_req:
473	dev_pm_qos_remove_request(&dfc->req_max_freq);
474free_table:
 
475	kfree(dfc->freq_table);
476free_dfc:
477	kfree(dfc);
478
479	return ERR_PTR(err);
480}
481EXPORT_SYMBOL_GPL(of_devfreq_cooling_register_power);
482
483/**
484 * of_devfreq_cooling_register() - Register devfreq cooling device,
485 *                                with OF information.
486 * @np: Pointer to OF device_node.
487 * @df: Pointer to devfreq device.
488 */
489struct thermal_cooling_device *
490of_devfreq_cooling_register(struct device_node *np, struct devfreq *df)
491{
492	return of_devfreq_cooling_register_power(np, df, NULL);
493}
494EXPORT_SYMBOL_GPL(of_devfreq_cooling_register);
495
496/**
497 * devfreq_cooling_register() - Register devfreq cooling device.
498 * @df: Pointer to devfreq device.
499 */
500struct thermal_cooling_device *devfreq_cooling_register(struct devfreq *df)
501{
502	return of_devfreq_cooling_register(NULL, df);
503}
504EXPORT_SYMBOL_GPL(devfreq_cooling_register);
505
506/**
507 * devfreq_cooling_em_register() - Register devfreq cooling device with
508 *		power information and automatically register Energy Model (EM)
509 * @df:		Pointer to devfreq device.
510 * @dfc_power:	Pointer to devfreq_cooling_power.
511 *
512 * Register a devfreq cooling device and automatically register EM. The
513 * available OPPs must be registered for the device.
514 *
515 * If @dfc_power is provided, the cooling device is registered with the
516 * power extensions. It is using the simple Energy Model which requires
517 * "dynamic-power-coefficient" a devicetree property. To not break drivers
518 * which miss that DT property, the function won't bail out when the EM
519 * registration failed. The cooling device will be registered if everything
520 * else is OK.
521 */
522struct thermal_cooling_device *
523devfreq_cooling_em_register(struct devfreq *df,
524			    struct devfreq_cooling_power *dfc_power)
525{
526	struct thermal_cooling_device *cdev;
527	struct device *dev;
528	int ret;
529
530	if (IS_ERR_OR_NULL(df))
531		return ERR_PTR(-EINVAL);
532
533	dev = df->dev.parent;
534
535	ret = dev_pm_opp_of_register_em(dev, NULL);
536	if (ret)
537		dev_dbg(dev, "Unable to register EM for devfreq cooling device (%d)\n",
538			ret);
539
540	cdev = of_devfreq_cooling_register_power(dev->of_node, df, dfc_power);
541
542	if (IS_ERR_OR_NULL(cdev))
543		em_dev_unregister_perf_domain(dev);
544
545	return cdev;
546}
547EXPORT_SYMBOL_GPL(devfreq_cooling_em_register);
548
549/**
550 * devfreq_cooling_unregister() - Unregister devfreq cooling device.
551 * @cdev: Pointer to devfreq cooling device to unregister.
552 *
553 * Unregisters devfreq cooling device and related Energy Model if it was
554 * present.
555 */
556void devfreq_cooling_unregister(struct thermal_cooling_device *cdev)
557{
558	struct devfreq_cooling_device *dfc;
559	struct device *dev;
560
561	if (IS_ERR_OR_NULL(cdev))
562		return;
563
564	dfc = cdev->devdata;
565	dev = dfc->devfreq->dev.parent;
566
567	thermal_cooling_device_unregister(dfc->cdev);
568	dev_pm_qos_remove_request(&dfc->req_max_freq);
569
570	em_dev_unregister_perf_domain(dev);
571
572	kfree(dfc->freq_table);
 
573	kfree(dfc);
574}
575EXPORT_SYMBOL_GPL(devfreq_cooling_unregister);
v4.10.11
 
  1/*
  2 * devfreq_cooling: Thermal cooling device implementation for devices using
  3 *                  devfreq
  4 *
  5 * Copyright (C) 2014-2015 ARM Limited
  6 *
  7 * This program is free software; you can redistribute it and/or modify
  8 * it under the terms of the GNU General Public License version 2 as
  9 * published by the Free Software Foundation.
 10 *
 11 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
 12 * kind, whether express or implied; without even the implied warranty
 13 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 14 * GNU General Public License for more details.
 15 *
 16 * TODO:
 17 *    - If OPPs are added or removed after devfreq cooling has
 18 *      registered, the devfreq cooling won't react to it.
 19 */
 20
 21#include <linux/devfreq.h>
 22#include <linux/devfreq_cooling.h>
 
 23#include <linux/export.h>
 24#include <linux/slab.h>
 25#include <linux/pm_opp.h>
 
 26#include <linux/thermal.h>
 
 27
 28#include <trace/events/thermal.h>
 29
 30static DEFINE_MUTEX(devfreq_lock);
 31static DEFINE_IDR(devfreq_idr);
 32
 33/**
 34 * struct devfreq_cooling_device - Devfreq cooling device
 35 * @id:		unique integer value corresponding to each
 36 *		devfreq_cooling_device registered.
 37 * @cdev:	Pointer to associated thermal cooling device.
 
 38 * @devfreq:	Pointer to associated devfreq device.
 39 * @cooling_state:	Current cooling state.
 40 * @power_table:	Pointer to table with maximum power draw for each
 41 *			cooling state. State is the index into the table, and
 42 *			the power is in mW.
 43 * @freq_table:	Pointer to a table with the frequencies sorted in descending
 44 *		order.  You can index the table by cooling device state
 45 * @freq_table_size:	Size of the @freq_table and @power_table
 46 * @power_ops:	Pointer to devfreq_cooling_power, used to generate the
 47 *		@power_table.
 
 
 
 
 
 
 
 
 
 
 48 */
 49struct devfreq_cooling_device {
 50	int id;
 51	struct thermal_cooling_device *cdev;
 
 52	struct devfreq *devfreq;
 53	unsigned long cooling_state;
 54	u32 *power_table;
 55	u32 *freq_table;
 56	size_t freq_table_size;
 57	struct devfreq_cooling_power *power_ops;
 
 
 
 
 58};
 59
 60/**
 61 * get_idr - function to get a unique id.
 62 * @idr: struct idr * handle used to create a id.
 63 * @id: int * value generated by this function.
 64 *
 65 * This function will populate @id with an unique
 66 * id, using the idr API.
 67 *
 68 * Return: 0 on success, an error code on failure.
 69 */
 70static int get_idr(struct idr *idr, int *id)
 71{
 72	int ret;
 73
 74	mutex_lock(&devfreq_lock);
 75	ret = idr_alloc(idr, NULL, 0, 0, GFP_KERNEL);
 76	mutex_unlock(&devfreq_lock);
 77	if (unlikely(ret < 0))
 78		return ret;
 79	*id = ret;
 80
 81	return 0;
 82}
 83
 84/**
 85 * release_idr - function to free the unique id.
 86 * @idr: struct idr * handle used for creating the id.
 87 * @id: int value representing the unique id.
 88 */
 89static void release_idr(struct idr *idr, int id)
 90{
 91	mutex_lock(&devfreq_lock);
 92	idr_remove(idr, id);
 93	mutex_unlock(&devfreq_lock);
 94}
 95
 96/**
 97 * partition_enable_opps() - disable all opps above a given state
 98 * @dfc:	Pointer to devfreq we are operating on
 99 * @cdev_state:	cooling device state we're setting
100 *
101 * Go through the OPPs of the device, enabling all OPPs until
102 * @cdev_state and disabling those frequencies above it.
103 */
104static int partition_enable_opps(struct devfreq_cooling_device *dfc,
105				 unsigned long cdev_state)
106{
107	int i;
108	struct device *dev = dfc->devfreq->dev.parent;
109
110	for (i = 0; i < dfc->freq_table_size; i++) {
111		struct dev_pm_opp *opp;
112		int ret = 0;
113		unsigned int freq = dfc->freq_table[i];
114		bool want_enable = i >= cdev_state ? true : false;
115
116		rcu_read_lock();
117		opp = dev_pm_opp_find_freq_exact(dev, freq, !want_enable);
118		rcu_read_unlock();
119
120		if (PTR_ERR(opp) == -ERANGE)
121			continue;
122		else if (IS_ERR(opp))
123			return PTR_ERR(opp);
124
125		if (want_enable)
126			ret = dev_pm_opp_enable(dev, freq);
127		else
128			ret = dev_pm_opp_disable(dev, freq);
129
130		if (ret)
131			return ret;
132	}
133
134	return 0;
135}
136
137static int devfreq_cooling_get_max_state(struct thermal_cooling_device *cdev,
138					 unsigned long *state)
139{
140	struct devfreq_cooling_device *dfc = cdev->devdata;
141
142	*state = dfc->freq_table_size - 1;
143
144	return 0;
145}
146
147static int devfreq_cooling_get_cur_state(struct thermal_cooling_device *cdev,
148					 unsigned long *state)
149{
150	struct devfreq_cooling_device *dfc = cdev->devdata;
151
152	*state = dfc->cooling_state;
153
154	return 0;
155}
156
157static int devfreq_cooling_set_cur_state(struct thermal_cooling_device *cdev,
158					 unsigned long state)
159{
160	struct devfreq_cooling_device *dfc = cdev->devdata;
161	struct devfreq *df = dfc->devfreq;
162	struct device *dev = df->dev.parent;
163	int ret;
 
 
164
165	if (state == dfc->cooling_state)
166		return 0;
167
168	dev_dbg(dev, "Setting cooling state %lu\n", state);
169
170	if (state >= dfc->freq_table_size)
171		return -EINVAL;
172
173	ret = partition_enable_opps(dfc, state);
174	if (ret)
175		return ret;
 
 
 
 
 
 
 
 
 
 
176
177	dfc->cooling_state = state;
178
179	return 0;
180}
181
182/**
183 * freq_get_state() - get the cooling state corresponding to a frequency
184 * @dfc:	Pointer to devfreq cooling device
185 * @freq:	frequency in Hz
186 *
187 * Return: the cooling state associated with the @freq, or
188 * THERMAL_CSTATE_INVALID if it wasn't found.
189 */
190static unsigned long
191freq_get_state(struct devfreq_cooling_device *dfc, unsigned long freq)
192{
193	int i;
 
 
 
 
 
 
 
194
195	for (i = 0; i < dfc->freq_table_size; i++) {
196		if (dfc->freq_table[i] == freq)
197			return i;
198	}
 
199
200	return THERMAL_CSTATE_INVALID;
201}
202
203/**
204 * get_static_power() - calculate the static power
205 * @dfc:	Pointer to devfreq cooling device
206 * @freq:	Frequency in Hz
207 *
208 * Calculate the static power in milliwatts using the supplied
209 * get_static_power().  The current voltage is calculated using the
210 * OPP library.  If no get_static_power() was supplied, assume the
211 * static power is negligible.
212 */
213static unsigned long
214get_static_power(struct devfreq_cooling_device *dfc, unsigned long freq)
215{
216	struct devfreq *df = dfc->devfreq;
217	struct device *dev = df->dev.parent;
218	unsigned long voltage;
219	struct dev_pm_opp *opp;
220
221	if (!dfc->power_ops->get_static_power)
222		return 0;
223
224	rcu_read_lock();
225
226	opp = dev_pm_opp_find_freq_exact(dev, freq, true);
227	if (IS_ERR(opp) && (PTR_ERR(opp) == -ERANGE))
228		opp = dev_pm_opp_find_freq_exact(dev, freq, false);
229
 
 
 
 
 
 
230	voltage = dev_pm_opp_get_voltage(opp) / 1000; /* mV */
231
232	rcu_read_unlock();
233
234	if (voltage == 0) {
235		dev_warn_ratelimited(dev,
236				     "Failed to get voltage for frequency %lu: %ld\n",
237				     freq, IS_ERR(opp) ? PTR_ERR(opp) : 0);
238		return 0;
239	}
240
241	return dfc->power_ops->get_static_power(df, voltage);
242}
243
244/**
245 * get_dynamic_power - calculate the dynamic power
246 * @dfc:	Pointer to devfreq cooling device
247 * @freq:	Frequency in Hz
248 * @voltage:	Voltage in millivolts
249 *
250 * Calculate the dynamic power in milliwatts consumed by the device at
251 * frequency @freq and voltage @voltage.  If the get_dynamic_power()
252 * was supplied as part of the devfreq_cooling_power struct, then that
253 * function is used.  Otherwise, a simple power model (Pdyn = Coeff *
254 * Voltage^2 * Frequency) is used.
255 */
256static unsigned long
257get_dynamic_power(struct devfreq_cooling_device *dfc, unsigned long freq,
258		  unsigned long voltage)
259{
260	u64 power;
261	u32 freq_mhz;
262	struct devfreq_cooling_power *dfc_power = dfc->power_ops;
263
264	if (dfc_power->get_dynamic_power)
265		return dfc_power->get_dynamic_power(dfc->devfreq, freq,
266						    voltage);
267
268	freq_mhz = freq / 1000000;
269	power = (u64)dfc_power->dyn_power_coeff * freq_mhz * voltage * voltage;
270	do_div(power, 1000000000);
271
272	return power;
 
273}
274
275static int devfreq_cooling_get_requested_power(struct thermal_cooling_device *cdev,
276					       struct thermal_zone_device *tz,
277					       u32 *power)
278{
279	struct devfreq_cooling_device *dfc = cdev->devdata;
280	struct devfreq *df = dfc->devfreq;
281	struct devfreq_dev_status *status = &df->last_status;
 
282	unsigned long state;
283	unsigned long freq = status->current_frequency;
284	u32 dyn_power, static_power;
 
285
286	/* Get dynamic power for state */
287	state = freq_get_state(dfc, freq);
288	if (state == THERMAL_CSTATE_INVALID)
289		return -EAGAIN;
 
 
 
 
 
 
 
 
290
291	dyn_power = dfc->power_table[state];
 
 
 
 
 
 
 
 
292
293	/* Scale dynamic power for utilization */
294	dyn_power = (dyn_power * status->busy_time) / status->total_time;
295
296	/* Get static power */
297	static_power = get_static_power(dfc, freq);
298
299	trace_thermal_power_devfreq_get_power(cdev, status, freq, dyn_power,
300					      static_power);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
301
302	*power = dyn_power + static_power;
 
 
 
 
 
 
303
304	return 0;
 
 
 
 
305}
306
307static int devfreq_cooling_state2power(struct thermal_cooling_device *cdev,
308				       struct thermal_zone_device *tz,
309				       unsigned long state,
310				       u32 *power)
311{
312	struct devfreq_cooling_device *dfc = cdev->devdata;
313	unsigned long freq;
314	u32 static_power;
315
316	if (state >= dfc->freq_table_size)
317		return -EINVAL;
318
319	freq = dfc->freq_table[state];
320	static_power = get_static_power(dfc, freq);
 
 
 
 
 
 
321
322	*power = dfc->power_table[state] + static_power;
323	return 0;
324}
325
326static int devfreq_cooling_power2state(struct thermal_cooling_device *cdev,
327				       struct thermal_zone_device *tz,
328				       u32 power, unsigned long *state)
329{
330	struct devfreq_cooling_device *dfc = cdev->devdata;
331	struct devfreq *df = dfc->devfreq;
332	struct devfreq_dev_status *status = &df->last_status;
333	unsigned long freq = status->current_frequency;
334	unsigned long busy_time;
335	s32 dyn_power;
336	u32 static_power;
337	int i;
338
339	static_power = get_static_power(dfc, freq);
340
341	dyn_power = power - static_power;
342	dyn_power = dyn_power > 0 ? dyn_power : 0;
343
344	/* Scale dynamic power for utilization */
345	busy_time = status->busy_time ?: 1;
346	dyn_power = (dyn_power * status->total_time) / busy_time;
 
 
 
 
 
 
 
 
347
348	/*
349	 * Find the first cooling state that is within the power
350	 * budget for dynamic power.
351	 */
352	for (i = 0; i < dfc->freq_table_size - 1; i++)
353		if (dyn_power >= dfc->power_table[i])
 
 
 
 
 
354			break;
 
 
 
 
 
355
356	*state = i;
357	trace_thermal_power_devfreq_limit(cdev, freq, *state, power);
358	return 0;
359}
360
361static struct thermal_cooling_device_ops devfreq_cooling_ops = {
362	.get_max_state = devfreq_cooling_get_max_state,
363	.get_cur_state = devfreq_cooling_get_cur_state,
364	.set_cur_state = devfreq_cooling_set_cur_state,
365};
366
367/**
368 * devfreq_cooling_gen_tables() - Generate power and freq tables.
369 * @dfc: Pointer to devfreq cooling device.
370 *
371 * Generate power and frequency tables: the power table hold the
372 * device's maximum power usage at each cooling state (OPP).  The
373 * static and dynamic power using the appropriate voltage and
374 * frequency for the state, is acquired from the struct
375 * devfreq_cooling_power, and summed to make the maximum power draw.
376 *
377 * The frequency table holds the frequencies in descending order.
378 * That way its indexed by cooling device state.
379 *
380 * The tables are malloced, and pointers put in dfc.  They must be
381 * freed when unregistering the devfreq cooling device.
382 *
383 * Return: 0 on success, negative error code on failure.
384 */
385static int devfreq_cooling_gen_tables(struct devfreq_cooling_device *dfc)
 
386{
387	struct devfreq *df = dfc->devfreq;
388	struct device *dev = df->dev.parent;
389	int ret, num_opps;
390	unsigned long freq;
391	u32 *power_table = NULL;
392	u32 *freq_table;
393	int i;
394
395	num_opps = dev_pm_opp_get_opp_count(dev);
396
397	if (dfc->power_ops) {
398		power_table = kcalloc(num_opps, sizeof(*power_table),
399				      GFP_KERNEL);
400		if (!power_table)
401			return -ENOMEM;
402	}
403
404	freq_table = kcalloc(num_opps, sizeof(*freq_table),
405			     GFP_KERNEL);
406	if (!freq_table) {
407		ret = -ENOMEM;
408		goto free_power_table;
409	}
410
411	for (i = 0, freq = ULONG_MAX; i < num_opps; i++, freq--) {
412		unsigned long power_dyn, voltage;
413		struct dev_pm_opp *opp;
414
415		rcu_read_lock();
416
417		opp = dev_pm_opp_find_freq_floor(dev, &freq);
418		if (IS_ERR(opp)) {
419			rcu_read_unlock();
420			ret = PTR_ERR(opp);
421			goto free_tables;
422		}
423
424		voltage = dev_pm_opp_get_voltage(opp) / 1000; /* mV */
425
426		rcu_read_unlock();
427
428		if (dfc->power_ops) {
429			power_dyn = get_dynamic_power(dfc, freq, voltage);
430
431			dev_dbg(dev, "Dynamic power table: %lu MHz @ %lu mV: %lu = %lu mW\n",
432				freq / 1000000, voltage, power_dyn, power_dyn);
433
434			power_table[i] = power_dyn;
435		}
436
437		freq_table[i] = freq;
438	}
439
440	if (dfc->power_ops)
441		dfc->power_table = power_table;
442
443	dfc->freq_table = freq_table;
444	dfc->freq_table_size = num_opps;
445
446	return 0;
447
448free_tables:
449	kfree(freq_table);
450free_power_table:
451	kfree(power_table);
452
453	return ret;
454}
455
456/**
457 * of_devfreq_cooling_register_power() - Register devfreq cooling device,
458 *                                      with OF and power information.
459 * @np:	Pointer to OF device_node.
460 * @df:	Pointer to devfreq device.
461 * @dfc_power:	Pointer to devfreq_cooling_power.
462 *
463 * Register a devfreq cooling device.  The available OPPs must be
464 * registered on the device.
465 *
466 * If @dfc_power is provided, the cooling device is registered with the
467 * power extensions.  For the power extensions to work correctly,
468 * devfreq should use the simple_ondemand governor, other governors
469 * are not currently supported.
470 */
471struct thermal_cooling_device *
472of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df,
473				  struct devfreq_cooling_power *dfc_power)
474{
475	struct thermal_cooling_device *cdev;
 
476	struct devfreq_cooling_device *dfc;
477	char dev_name[THERMAL_NAME_LENGTH];
478	int err;
 
 
 
479
480	dfc = kzalloc(sizeof(*dfc), GFP_KERNEL);
481	if (!dfc)
482		return ERR_PTR(-ENOMEM);
483
484	dfc->devfreq = df;
485
486	if (dfc_power) {
 
 
 
 
 
 
 
 
 
 
 
 
487		dfc->power_ops = dfc_power;
488
489		devfreq_cooling_ops.get_requested_power =
490			devfreq_cooling_get_requested_power;
491		devfreq_cooling_ops.state2power = devfreq_cooling_state2power;
492		devfreq_cooling_ops.power2state = devfreq_cooling_power2state;
 
 
 
 
 
 
493	}
494
495	err = devfreq_cooling_gen_tables(dfc);
496	if (err)
497		goto free_dfc;
 
 
 
 
498
499	err = get_idr(&devfreq_idr, &dfc->id);
500	if (err)
501		goto free_tables;
 
 
 
 
 
 
 
502
503	snprintf(dev_name, sizeof(dev_name), "thermal-devfreq-%d", dfc->id);
 
504
505	cdev = thermal_of_cooling_device_register(np, dev_name, dfc,
506						  &devfreq_cooling_ops);
507	if (IS_ERR(cdev)) {
508		err = PTR_ERR(cdev);
509		dev_err(df->dev.parent,
510			"Failed to register devfreq cooling device (%d)\n",
511			err);
512		goto release_idr;
513	}
514
515	dfc->cdev = cdev;
516
517	return cdev;
518
519release_idr:
520	release_idr(&devfreq_idr, dfc->id);
521free_tables:
522	kfree(dfc->power_table);
523	kfree(dfc->freq_table);
524free_dfc:
525	kfree(dfc);
526
527	return ERR_PTR(err);
528}
529EXPORT_SYMBOL_GPL(of_devfreq_cooling_register_power);
530
531/**
532 * of_devfreq_cooling_register() - Register devfreq cooling device,
533 *                                with OF information.
534 * @np: Pointer to OF device_node.
535 * @df: Pointer to devfreq device.
536 */
537struct thermal_cooling_device *
538of_devfreq_cooling_register(struct device_node *np, struct devfreq *df)
539{
540	return of_devfreq_cooling_register_power(np, df, NULL);
541}
542EXPORT_SYMBOL_GPL(of_devfreq_cooling_register);
543
544/**
545 * devfreq_cooling_register() - Register devfreq cooling device.
546 * @df: Pointer to devfreq device.
547 */
548struct thermal_cooling_device *devfreq_cooling_register(struct devfreq *df)
549{
550	return of_devfreq_cooling_register(NULL, df);
551}
552EXPORT_SYMBOL_GPL(devfreq_cooling_register);
553
554/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
555 * devfreq_cooling_unregister() - Unregister devfreq cooling device.
556 * @dfc: Pointer to devfreq cooling device to unregister.
 
 
 
557 */
558void devfreq_cooling_unregister(struct thermal_cooling_device *cdev)
559{
560	struct devfreq_cooling_device *dfc;
 
561
562	if (!cdev)
563		return;
564
565	dfc = cdev->devdata;
 
566
567	thermal_cooling_device_unregister(dfc->cdev);
568	release_idr(&devfreq_idr, dfc->id);
569	kfree(dfc->power_table);
 
 
570	kfree(dfc->freq_table);
571
572	kfree(dfc);
573}
574EXPORT_SYMBOL_GPL(devfreq_cooling_unregister);