Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
  1/*
  2 * Generic OPP Interface
  3 *
  4 * Copyright (C) 2009-2010 Texas Instruments Incorporated.
  5 *	Nishanth Menon
  6 *	Romit Dasgupta
  7 *	Kevin Hilman
  8 *
  9 * This program is free software; you can redistribute it and/or modify
 10 * it under the terms of the GNU General Public License version 2 as
 11 * published by the Free Software Foundation.
 12 */
 13
 14#include <linux/kernel.h>
 15#include <linux/errno.h>
 16#include <linux/err.h>
 17#include <linux/slab.h>
 18#include <linux/cpufreq.h>
 19#include <linux/device.h>
 20#include <linux/list.h>
 21#include <linux/rculist.h>
 22#include <linux/rcupdate.h>
 23#include <linux/pm_opp.h>
 24#include <linux/of.h>
 25#include <linux/export.h>
 26
 27/*
 28 * Internal data structure organization with the OPP layer library is as
 29 * follows:
 30 * dev_opp_list (root)
 31 *	|- device 1 (represents voltage domain 1)
 32 *	|	|- opp 1 (availability, freq, voltage)
 33 *	|	|- opp 2 ..
 34 *	...	...
 35 *	|	`- opp n ..
 36 *	|- device 2 (represents the next voltage domain)
 37 *	...
 38 *	`- device m (represents mth voltage domain)
 39 * device 1, 2.. are represented by dev_opp structure while each opp
 40 * is represented by the opp structure.
 41 */
 42
 43/**
 44 * struct dev_pm_opp - Generic OPP description structure
 45 * @node:	opp list node. The nodes are maintained throughout the lifetime
 46 *		of boot. It is expected only an optimal set of OPPs are
 47 *		added to the library by the SoC framework.
 48 *		RCU usage: opp list is traversed with RCU locks. node
 49 *		modification is possible realtime, hence the modifications
 50 *		are protected by the dev_opp_list_lock for integrity.
 51 *		IMPORTANT: the opp nodes should be maintained in increasing
 52 *		order.
 53 * @available:	true/false - marks if this OPP as available or not
 54 * @rate:	Frequency in hertz
 55 * @u_volt:	Nominal voltage in microvolts corresponding to this OPP
 56 * @dev_opp:	points back to the device_opp struct this opp belongs to
 57 * @head:	RCU callback head used for deferred freeing
 58 *
 59 * This structure stores the OPP information for a given device.
 60 */
 61struct dev_pm_opp {
 62	struct list_head node;
 63
 64	bool available;
 65	unsigned long rate;
 66	unsigned long u_volt;
 67
 68	struct device_opp *dev_opp;
 69	struct rcu_head head;
 70};
 71
 72/**
 73 * struct device_opp - Device opp structure
 74 * @node:	list node - contains the devices with OPPs that
 75 *		have been registered. Nodes once added are not modified in this
 76 *		list.
 77 *		RCU usage: nodes are not modified in the list of device_opp,
 78 *		however addition is possible and is secured by dev_opp_list_lock
 79 * @dev:	device pointer
 80 * @head:	notifier head to notify the OPP availability changes.
 81 * @opp_list:	list of opps
 82 *
 83 * This is an internal data structure maintaining the link to opps attached to
 84 * a device. This structure is not meant to be shared to users as it is
 85 * meant for book keeping and private to OPP library
 86 */
 87struct device_opp {
 88	struct list_head node;
 89
 90	struct device *dev;
 91	struct srcu_notifier_head head;
 92	struct list_head opp_list;
 93};
 94
 95/*
 96 * The root of the list of all devices. All device_opp structures branch off
 97 * from here, with each device_opp containing the list of opp it supports in
 98 * various states of availability.
 99 */
100static LIST_HEAD(dev_opp_list);
101/* Lock to allow exclusive modification to the device and opp lists */
102static DEFINE_MUTEX(dev_opp_list_lock);
103
104/**
105 * find_device_opp() - find device_opp struct using device pointer
106 * @dev:	device pointer used to lookup device OPPs
107 *
108 * Search list of device OPPs for one containing matching device. Does a RCU
109 * reader operation to grab the pointer needed.
110 *
111 * Returns pointer to 'struct device_opp' if found, otherwise -ENODEV or
112 * -EINVAL based on type of error.
113 *
114 * Locking: This function must be called under rcu_read_lock(). device_opp
115 * is a RCU protected pointer. This means that device_opp is valid as long
116 * as we are under RCU lock.
117 */
118static struct device_opp *find_device_opp(struct device *dev)
119{
120	struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV);
121
122	if (unlikely(IS_ERR_OR_NULL(dev))) {
123		pr_err("%s: Invalid parameters\n", __func__);
124		return ERR_PTR(-EINVAL);
125	}
126
127	list_for_each_entry_rcu(tmp_dev_opp, &dev_opp_list, node) {
128		if (tmp_dev_opp->dev == dev) {
129			dev_opp = tmp_dev_opp;
130			break;
131		}
132	}
133
134	return dev_opp;
135}
136
137/**
138 * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an available opp
139 * @opp:	opp for which voltage has to be returned for
140 *
141 * Return voltage in micro volt corresponding to the opp, else
142 * return 0
143 *
144 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
145 * protected pointer. This means that opp which could have been fetched by
146 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
147 * under RCU lock. The pointer returned by the opp_find_freq family must be
148 * used in the same section as the usage of this function with the pointer
149 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
150 * pointer.
151 */
152unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
153{
154	struct dev_pm_opp *tmp_opp;
155	unsigned long v = 0;
156
157	tmp_opp = rcu_dereference(opp);
158	if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available)
159		pr_err("%s: Invalid parameters\n", __func__);
160	else
161		v = tmp_opp->u_volt;
162
163	return v;
164}
165EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
166
167/**
168 * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
169 * @opp:	opp for which frequency has to be returned for
170 *
171 * Return frequency in hertz corresponding to the opp, else
172 * return 0
173 *
174 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
175 * protected pointer. This means that opp which could have been fetched by
176 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
177 * under RCU lock. The pointer returned by the opp_find_freq family must be
178 * used in the same section as the usage of this function with the pointer
179 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
180 * pointer.
181 */
182unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
183{
184	struct dev_pm_opp *tmp_opp;
185	unsigned long f = 0;
186
187	tmp_opp = rcu_dereference(opp);
188	if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available)
189		pr_err("%s: Invalid parameters\n", __func__);
190	else
191		f = tmp_opp->rate;
192
193	return f;
194}
195EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
196
197/**
198 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list
199 * @dev:	device for which we do this operation
200 *
201 * This function returns the number of available opps if there are any,
202 * else returns 0 if none or the corresponding error value.
203 *
204 * Locking: This function must be called under rcu_read_lock(). This function
205 * internally references two RCU protected structures: device_opp and opp which
206 * are safe as long as we are under a common RCU locked section.
207 */
208int dev_pm_opp_get_opp_count(struct device *dev)
209{
210	struct device_opp *dev_opp;
211	struct dev_pm_opp *temp_opp;
212	int count = 0;
213
214	dev_opp = find_device_opp(dev);
215	if (IS_ERR(dev_opp)) {
216		int r = PTR_ERR(dev_opp);
217		dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
218		return r;
219	}
220
221	list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
222		if (temp_opp->available)
223			count++;
224	}
225
226	return count;
227}
228EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
229
230/**
231 * dev_pm_opp_find_freq_exact() - search for an exact frequency
232 * @dev:		device for which we do this operation
233 * @freq:		frequency to search for
234 * @available:		true/false - match for available opp
235 *
236 * Searches for exact match in the opp list and returns pointer to the matching
237 * opp if found, else returns ERR_PTR in case of error and should be handled
238 * using IS_ERR. Error return values can be:
239 * EINVAL:	for bad pointer
240 * ERANGE:	no match found for search
241 * ENODEV:	if device not found in list of registered devices
242 *
243 * Note: available is a modifier for the search. if available=true, then the
244 * match is for exact matching frequency and is available in the stored OPP
245 * table. if false, the match is for exact frequency which is not available.
246 *
247 * This provides a mechanism to enable an opp which is not available currently
248 * or the opposite as well.
249 *
250 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
251 * protected pointer. The reason for the same is that the opp pointer which is
252 * returned will remain valid for use with opp_get_{voltage, freq} only while
253 * under the locked area. The pointer returned must be used prior to unlocking
254 * with rcu_read_unlock() to maintain the integrity of the pointer.
255 */
256struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
257					      unsigned long freq,
258					      bool available)
259{
260	struct device_opp *dev_opp;
261	struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
262
263	dev_opp = find_device_opp(dev);
264	if (IS_ERR(dev_opp)) {
265		int r = PTR_ERR(dev_opp);
266		dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
267		return ERR_PTR(r);
268	}
269
270	list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
271		if (temp_opp->available == available &&
272				temp_opp->rate == freq) {
273			opp = temp_opp;
274			break;
275		}
276	}
277
278	return opp;
279}
280EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
281
282/**
283 * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
284 * @dev:	device for which we do this operation
285 * @freq:	Start frequency
286 *
287 * Search for the matching ceil *available* OPP from a starting freq
288 * for a device.
289 *
290 * Returns matching *opp and refreshes *freq accordingly, else returns
291 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
292 * values can be:
293 * EINVAL:	for bad pointer
294 * ERANGE:	no match found for search
295 * ENODEV:	if device not found in list of registered devices
296 *
297 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
298 * protected pointer. The reason for the same is that the opp pointer which is
299 * returned will remain valid for use with opp_get_{voltage, freq} only while
300 * under the locked area. The pointer returned must be used prior to unlocking
301 * with rcu_read_unlock() to maintain the integrity of the pointer.
302 */
303struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
304					     unsigned long *freq)
305{
306	struct device_opp *dev_opp;
307	struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
308
309	if (!dev || !freq) {
310		dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
311		return ERR_PTR(-EINVAL);
312	}
313
314	dev_opp = find_device_opp(dev);
315	if (IS_ERR(dev_opp))
316		return ERR_CAST(dev_opp);
317
318	list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
319		if (temp_opp->available && temp_opp->rate >= *freq) {
320			opp = temp_opp;
321			*freq = opp->rate;
322			break;
323		}
324	}
325
326	return opp;
327}
328EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
329
330/**
331 * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
332 * @dev:	device for which we do this operation
333 * @freq:	Start frequency
334 *
335 * Search for the matching floor *available* OPP from a starting freq
336 * for a device.
337 *
338 * Returns matching *opp and refreshes *freq accordingly, else returns
339 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
340 * values can be:
341 * EINVAL:	for bad pointer
342 * ERANGE:	no match found for search
343 * ENODEV:	if device not found in list of registered devices
344 *
345 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
346 * protected pointer. The reason for the same is that the opp pointer which is
347 * returned will remain valid for use with opp_get_{voltage, freq} only while
348 * under the locked area. The pointer returned must be used prior to unlocking
349 * with rcu_read_unlock() to maintain the integrity of the pointer.
350 */
351struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
352					      unsigned long *freq)
353{
354	struct device_opp *dev_opp;
355	struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
356
357	if (!dev || !freq) {
358		dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
359		return ERR_PTR(-EINVAL);
360	}
361
362	dev_opp = find_device_opp(dev);
363	if (IS_ERR(dev_opp))
364		return ERR_CAST(dev_opp);
365
366	list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
367		if (temp_opp->available) {
368			/* go to the next node, before choosing prev */
369			if (temp_opp->rate > *freq)
370				break;
371			else
372				opp = temp_opp;
373		}
374	}
375	if (!IS_ERR(opp))
376		*freq = opp->rate;
377
378	return opp;
379}
380EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
381
382/**
383 * dev_pm_opp_add()  - Add an OPP table from a table definitions
384 * @dev:	device for which we do this operation
385 * @freq:	Frequency in Hz for this OPP
386 * @u_volt:	Voltage in uVolts for this OPP
387 *
388 * This function adds an opp definition to the opp list and returns status.
389 * The opp is made available by default and it can be controlled using
390 * dev_pm_opp_enable/disable functions.
391 *
392 * Locking: The internal device_opp and opp structures are RCU protected.
393 * Hence this function internally uses RCU updater strategy with mutex locks
394 * to keep the integrity of the internal data structures. Callers should ensure
395 * that this function is *NOT* called under RCU protection or in contexts where
396 * mutex cannot be locked.
397 */
398int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
399{
400	struct device_opp *dev_opp = NULL;
401	struct dev_pm_opp *opp, *new_opp;
402	struct list_head *head;
403
404	/* allocate new OPP node */
405	new_opp = kzalloc(sizeof(*new_opp), GFP_KERNEL);
406	if (!new_opp) {
407		dev_warn(dev, "%s: Unable to create new OPP node\n", __func__);
408		return -ENOMEM;
409	}
410
411	/* Hold our list modification lock here */
412	mutex_lock(&dev_opp_list_lock);
413
414	/* Check for existing list for 'dev' */
415	dev_opp = find_device_opp(dev);
416	if (IS_ERR(dev_opp)) {
417		/*
418		 * Allocate a new device OPP table. In the infrequent case
419		 * where a new device is needed to be added, we pay this
420		 * penalty.
421		 */
422		dev_opp = kzalloc(sizeof(struct device_opp), GFP_KERNEL);
423		if (!dev_opp) {
424			mutex_unlock(&dev_opp_list_lock);
425			kfree(new_opp);
426			dev_warn(dev,
427				"%s: Unable to create device OPP structure\n",
428				__func__);
429			return -ENOMEM;
430		}
431
432		dev_opp->dev = dev;
433		srcu_init_notifier_head(&dev_opp->head);
434		INIT_LIST_HEAD(&dev_opp->opp_list);
435
436		/* Secure the device list modification */
437		list_add_rcu(&dev_opp->node, &dev_opp_list);
438	}
439
440	/* populate the opp table */
441	new_opp->dev_opp = dev_opp;
442	new_opp->rate = freq;
443	new_opp->u_volt = u_volt;
444	new_opp->available = true;
445
446	/* Insert new OPP in order of increasing frequency */
447	head = &dev_opp->opp_list;
448	list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) {
449		if (new_opp->rate < opp->rate)
450			break;
451		else
452			head = &opp->node;
453	}
454
455	list_add_rcu(&new_opp->node, head);
456	mutex_unlock(&dev_opp_list_lock);
457
458	/*
459	 * Notify the changes in the availability of the operable
460	 * frequency/voltage list.
461	 */
462	srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ADD, new_opp);
463	return 0;
464}
465EXPORT_SYMBOL_GPL(dev_pm_opp_add);
466
467/**
468 * opp_set_availability() - helper to set the availability of an opp
469 * @dev:		device for which we do this operation
470 * @freq:		OPP frequency to modify availability
471 * @availability_req:	availability status requested for this opp
472 *
473 * Set the availability of an OPP with an RCU operation, opp_{enable,disable}
474 * share a common logic which is isolated here.
475 *
476 * Returns -EINVAL for bad pointers, -ENOMEM if no memory available for the
477 * copy operation, returns 0 if no modifcation was done OR modification was
478 * successful.
479 *
480 * Locking: The internal device_opp and opp structures are RCU protected.
481 * Hence this function internally uses RCU updater strategy with mutex locks to
482 * keep the integrity of the internal data structures. Callers should ensure
483 * that this function is *NOT* called under RCU protection or in contexts where
484 * mutex locking or synchronize_rcu() blocking calls cannot be used.
485 */
486static int opp_set_availability(struct device *dev, unsigned long freq,
487		bool availability_req)
488{
489	struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV);
490	struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
491	int r = 0;
492
493	/* keep the node allocated */
494	new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL);
495	if (!new_opp) {
496		dev_warn(dev, "%s: Unable to create OPP\n", __func__);
497		return -ENOMEM;
498	}
499
500	mutex_lock(&dev_opp_list_lock);
501
502	/* Find the device_opp */
503	list_for_each_entry(tmp_dev_opp, &dev_opp_list, node) {
504		if (dev == tmp_dev_opp->dev) {
505			dev_opp = tmp_dev_opp;
506			break;
507		}
508	}
509	if (IS_ERR(dev_opp)) {
510		r = PTR_ERR(dev_opp);
511		dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
512		goto unlock;
513	}
514
515	/* Do we have the frequency? */
516	list_for_each_entry(tmp_opp, &dev_opp->opp_list, node) {
517		if (tmp_opp->rate == freq) {
518			opp = tmp_opp;
519			break;
520		}
521	}
522	if (IS_ERR(opp)) {
523		r = PTR_ERR(opp);
524		goto unlock;
525	}
526
527	/* Is update really needed? */
528	if (opp->available == availability_req)
529		goto unlock;
530	/* copy the old data over */
531	*new_opp = *opp;
532
533	/* plug in new node */
534	new_opp->available = availability_req;
535
536	list_replace_rcu(&opp->node, &new_opp->node);
537	mutex_unlock(&dev_opp_list_lock);
538	kfree_rcu(opp, head);
539
540	/* Notify the change of the OPP availability */
541	if (availability_req)
542		srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ENABLE,
543					 new_opp);
544	else
545		srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_DISABLE,
546					 new_opp);
547
548	return 0;
549
550unlock:
551	mutex_unlock(&dev_opp_list_lock);
552	kfree(new_opp);
553	return r;
554}
555
556/**
557 * dev_pm_opp_enable() - Enable a specific OPP
558 * @dev:	device for which we do this operation
559 * @freq:	OPP frequency to enable
560 *
561 * Enables a provided opp. If the operation is valid, this returns 0, else the
562 * corresponding error value. It is meant to be used for users an OPP available
563 * after being temporarily made unavailable with dev_pm_opp_disable.
564 *
565 * Locking: The internal device_opp and opp structures are RCU protected.
566 * Hence this function indirectly uses RCU and mutex locks to keep the
567 * integrity of the internal data structures. Callers should ensure that
568 * this function is *NOT* called under RCU protection or in contexts where
569 * mutex locking or synchronize_rcu() blocking calls cannot be used.
570 */
571int dev_pm_opp_enable(struct device *dev, unsigned long freq)
572{
573	return opp_set_availability(dev, freq, true);
574}
575EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
576
577/**
578 * dev_pm_opp_disable() - Disable a specific OPP
579 * @dev:	device for which we do this operation
580 * @freq:	OPP frequency to disable
581 *
582 * Disables a provided opp. If the operation is valid, this returns
583 * 0, else the corresponding error value. It is meant to be a temporary
584 * control by users to make this OPP not available until the circumstances are
585 * right to make it available again (with a call to dev_pm_opp_enable).
586 *
587 * Locking: The internal device_opp and opp structures are RCU protected.
588 * Hence this function indirectly uses RCU and mutex locks to keep the
589 * integrity of the internal data structures. Callers should ensure that
590 * this function is *NOT* called under RCU protection or in contexts where
591 * mutex locking or synchronize_rcu() blocking calls cannot be used.
592 */
593int dev_pm_opp_disable(struct device *dev, unsigned long freq)
594{
595	return opp_set_availability(dev, freq, false);
596}
597EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
598
599#ifdef CONFIG_CPU_FREQ
600/**
601 * dev_pm_opp_init_cpufreq_table() - create a cpufreq table for a device
602 * @dev:	device for which we do this operation
603 * @table:	Cpufreq table returned back to caller
604 *
605 * Generate a cpufreq table for a provided device- this assumes that the
606 * opp list is already initialized and ready for usage.
607 *
608 * This function allocates required memory for the cpufreq table. It is
609 * expected that the caller does the required maintenance such as freeing
610 * the table as required.
611 *
612 * Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM
613 * if no memory available for the operation (table is not populated), returns 0
614 * if successful and table is populated.
615 *
616 * WARNING: It is  important for the callers to ensure refreshing their copy of
617 * the table if any of the mentioned functions have been invoked in the interim.
618 *
619 * Locking: The internal device_opp and opp structures are RCU protected.
620 * To simplify the logic, we pretend we are updater and hold relevant mutex here
621 * Callers should ensure that this function is *NOT* called under RCU protection
622 * or in contexts where mutex locking cannot be used.
623 */
624int dev_pm_opp_init_cpufreq_table(struct device *dev,
625			    struct cpufreq_frequency_table **table)
626{
627	struct device_opp *dev_opp;
628	struct dev_pm_opp *opp;
629	struct cpufreq_frequency_table *freq_table;
630	int i = 0;
631
632	/* Pretend as if I am an updater */
633	mutex_lock(&dev_opp_list_lock);
634
635	dev_opp = find_device_opp(dev);
636	if (IS_ERR(dev_opp)) {
637		int r = PTR_ERR(dev_opp);
638		mutex_unlock(&dev_opp_list_lock);
639		dev_err(dev, "%s: Device OPP not found (%d)\n", __func__, r);
640		return r;
641	}
642
643	freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) *
644			     (dev_pm_opp_get_opp_count(dev) + 1), GFP_KERNEL);
645	if (!freq_table) {
646		mutex_unlock(&dev_opp_list_lock);
647		dev_warn(dev, "%s: Unable to allocate frequency table\n",
648			__func__);
649		return -ENOMEM;
650	}
651
652	list_for_each_entry(opp, &dev_opp->opp_list, node) {
653		if (opp->available) {
654			freq_table[i].driver_data = i;
655			freq_table[i].frequency = opp->rate / 1000;
656			i++;
657		}
658	}
659	mutex_unlock(&dev_opp_list_lock);
660
661	freq_table[i].driver_data = i;
662	freq_table[i].frequency = CPUFREQ_TABLE_END;
663
664	*table = &freq_table[0];
665
666	return 0;
667}
668EXPORT_SYMBOL_GPL(dev_pm_opp_init_cpufreq_table);
669
670/**
671 * dev_pm_opp_free_cpufreq_table() - free the cpufreq table
672 * @dev:	device for which we do this operation
673 * @table:	table to free
674 *
675 * Free up the table allocated by dev_pm_opp_init_cpufreq_table
676 */
677void dev_pm_opp_free_cpufreq_table(struct device *dev,
678				struct cpufreq_frequency_table **table)
679{
680	if (!table)
681		return;
682
683	kfree(*table);
684	*table = NULL;
685}
686EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table);
687#endif		/* CONFIG_CPU_FREQ */
688
689/**
690 * dev_pm_opp_get_notifier() - find notifier_head of the device with opp
691 * @dev:	device pointer used to lookup device OPPs.
692 */
693struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
694{
695	struct device_opp *dev_opp = find_device_opp(dev);
696
697	if (IS_ERR(dev_opp))
698		return ERR_CAST(dev_opp); /* matching type */
699
700	return &dev_opp->head;
701}
702
703#ifdef CONFIG_OF
704/**
705 * of_init_opp_table() - Initialize opp table from device tree
706 * @dev:	device pointer used to lookup device OPPs.
707 *
708 * Register the initial OPP table with the OPP library for given device.
709 */
710int of_init_opp_table(struct device *dev)
711{
712	const struct property *prop;
713	const __be32 *val;
714	int nr;
715
716	prop = of_find_property(dev->of_node, "operating-points", NULL);
717	if (!prop)
718		return -ENODEV;
719	if (!prop->value)
720		return -ENODATA;
721
722	/*
723	 * Each OPP is a set of tuples consisting of frequency and
724	 * voltage like <freq-kHz vol-uV>.
725	 */
726	nr = prop->length / sizeof(u32);
727	if (nr % 2) {
728		dev_err(dev, "%s: Invalid OPP list\n", __func__);
729		return -EINVAL;
730	}
731
732	val = prop->value;
733	while (nr) {
734		unsigned long freq = be32_to_cpup(val++) * 1000;
735		unsigned long volt = be32_to_cpup(val++);
736
737		if (dev_pm_opp_add(dev, freq, volt)) {
738			dev_warn(dev, "%s: Failed to add OPP %ld\n",
739				 __func__, freq);
740			continue;
741		}
742		nr -= 2;
743	}
744
745	return 0;
746}
747EXPORT_SYMBOL_GPL(of_init_opp_table);
748#endif