Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.2.
   1/*
   2 * Generic OPP Interface
   3 *
   4 * Copyright (C) 2009-2010 Texas Instruments Incorporated.
   5 *	Nishanth Menon
   6 *	Romit Dasgupta
   7 *	Kevin Hilman
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License version 2 as
  11 * published by the Free Software Foundation.
  12 */
  13
  14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  15
  16#include <linux/clk.h>
  17#include <linux/errno.h>
  18#include <linux/err.h>
  19#include <linux/slab.h>
  20#include <linux/device.h>
  21#include <linux/export.h>
  22#include <linux/regulator/consumer.h>
  23
  24#include "opp.h"
  25
  26/*
  27 * The root of the list of all opp-tables. All opp_table structures branch off
  28 * from here, with each opp_table containing the list of opps it supports in
  29 * various states of availability.
  30 */
  31LIST_HEAD(opp_tables);
  32/* Lock to allow exclusive modification to the device and opp lists */
  33DEFINE_MUTEX(opp_table_lock);
  34
  35#define opp_rcu_lockdep_assert()					\
  36do {									\
  37	RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&			\
  38			 !lockdep_is_held(&opp_table_lock),		\
  39			 "Missing rcu_read_lock() or "			\
  40			 "opp_table_lock protection");			\
  41} while (0)
  42
  43static struct opp_device *_find_opp_dev(const struct device *dev,
  44					struct opp_table *opp_table)
  45{
  46	struct opp_device *opp_dev;
  47
  48	list_for_each_entry(opp_dev, &opp_table->dev_list, node)
  49		if (opp_dev->dev == dev)
  50			return opp_dev;
  51
  52	return NULL;
  53}
  54
  55/**
  56 * _find_opp_table() - find opp_table struct using device pointer
  57 * @dev:	device pointer used to lookup OPP table
  58 *
  59 * Search OPP table for one containing matching device. Does a RCU reader
  60 * operation to grab the pointer needed.
  61 *
  62 * Return: pointer to 'struct opp_table' if found, otherwise -ENODEV or
  63 * -EINVAL based on type of error.
  64 *
  65 * Locking: For readers, this function must be called under rcu_read_lock().
  66 * opp_table is a RCU protected pointer, which means that opp_table is valid
  67 * as long as we are under RCU lock.
  68 *
  69 * For Writers, this function must be called with opp_table_lock held.
  70 */
  71struct opp_table *_find_opp_table(struct device *dev)
  72{
  73	struct opp_table *opp_table;
  74
  75	opp_rcu_lockdep_assert();
  76
  77	if (IS_ERR_OR_NULL(dev)) {
  78		pr_err("%s: Invalid parameters\n", __func__);
  79		return ERR_PTR(-EINVAL);
  80	}
  81
  82	list_for_each_entry_rcu(opp_table, &opp_tables, node)
  83		if (_find_opp_dev(dev, opp_table))
  84			return opp_table;
  85
  86	return ERR_PTR(-ENODEV);
  87}
  88
  89/**
  90 * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp
  91 * @opp:	opp for which voltage has to be returned for
  92 *
  93 * Return: voltage in micro volt corresponding to the opp, else
  94 * return 0
  95 *
  96 * This is useful only for devices with single power supply.
  97 *
  98 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  99 * protected pointer. This means that opp which could have been fetched by
 100 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
 101 * under RCU lock. The pointer returned by the opp_find_freq family must be
 102 * used in the same section as the usage of this function with the pointer
 103 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
 104 * pointer.
 105 */
 106unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
 107{
 108	struct dev_pm_opp *tmp_opp;
 109	unsigned long v = 0;
 110
 111	opp_rcu_lockdep_assert();
 112
 113	tmp_opp = rcu_dereference(opp);
 114	if (IS_ERR_OR_NULL(tmp_opp))
 115		pr_err("%s: Invalid parameters\n", __func__);
 116	else
 117		v = tmp_opp->supplies[0].u_volt;
 118
 119	return v;
 120}
 121EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
 122
 123/**
 124 * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
 125 * @opp:	opp for which frequency has to be returned for
 126 *
 127 * Return: frequency in hertz corresponding to the opp, else
 128 * return 0
 129 *
 130 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
 131 * protected pointer. This means that opp which could have been fetched by
 132 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
 133 * under RCU lock. The pointer returned by the opp_find_freq family must be
 134 * used in the same section as the usage of this function with the pointer
 135 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
 136 * pointer.
 137 */
 138unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
 139{
 140	struct dev_pm_opp *tmp_opp;
 141	unsigned long f = 0;
 142
 143	opp_rcu_lockdep_assert();
 144
 145	tmp_opp = rcu_dereference(opp);
 146	if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available)
 147		pr_err("%s: Invalid parameters\n", __func__);
 148	else
 149		f = tmp_opp->rate;
 150
 151	return f;
 152}
 153EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
 154
 155/**
 156 * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
 157 * @opp: opp for which turbo mode is being verified
 158 *
 159 * Turbo OPPs are not for normal use, and can be enabled (under certain
 160 * conditions) for short duration of times to finish high throughput work
 161 * quickly. Running on them for longer times may overheat the chip.
 162 *
 163 * Return: true if opp is turbo opp, else false.
 164 *
 165 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
 166 * protected pointer. This means that opp which could have been fetched by
 167 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
 168 * under RCU lock. The pointer returned by the opp_find_freq family must be
 169 * used in the same section as the usage of this function with the pointer
 170 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
 171 * pointer.
 172 */
 173bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
 174{
 175	struct dev_pm_opp *tmp_opp;
 176
 177	opp_rcu_lockdep_assert();
 178
 179	tmp_opp = rcu_dereference(opp);
 180	if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) {
 181		pr_err("%s: Invalid parameters\n", __func__);
 182		return false;
 183	}
 184
 185	return tmp_opp->turbo;
 186}
 187EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo);
 188
 189/**
 190 * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds
 191 * @dev:	device for which we do this operation
 192 *
 193 * Return: This function returns the max clock latency in nanoseconds.
 194 *
 195 * Locking: This function takes rcu_read_lock().
 196 */
 197unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
 198{
 199	struct opp_table *opp_table;
 200	unsigned long clock_latency_ns;
 201
 202	rcu_read_lock();
 203
 204	opp_table = _find_opp_table(dev);
 205	if (IS_ERR(opp_table))
 206		clock_latency_ns = 0;
 207	else
 208		clock_latency_ns = opp_table->clock_latency_ns_max;
 209
 210	rcu_read_unlock();
 211	return clock_latency_ns;
 212}
 213EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
 214
 215static int _get_regulator_count(struct device *dev)
 216{
 217	struct opp_table *opp_table;
 218	int count;
 219
 220	rcu_read_lock();
 221
 222	opp_table = _find_opp_table(dev);
 223	if (!IS_ERR(opp_table))
 224		count = opp_table->regulator_count;
 225	else
 226		count = 0;
 227
 228	rcu_read_unlock();
 229
 230	return count;
 231}
 232
 233/**
 234 * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds
 235 * @dev: device for which we do this operation
 236 *
 237 * Return: This function returns the max voltage latency in nanoseconds.
 238 *
 239 * Locking: This function takes rcu_read_lock().
 240 */
 241unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
 242{
 243	struct opp_table *opp_table;
 244	struct dev_pm_opp *opp;
 245	struct regulator *reg, **regulators;
 246	unsigned long latency_ns = 0;
 247	int ret, i, count;
 248	struct {
 249		unsigned long min;
 250		unsigned long max;
 251	} *uV;
 252
 253	count = _get_regulator_count(dev);
 254
 255	/* Regulator may not be required for the device */
 256	if (!count)
 257		return 0;
 258
 259	regulators = kmalloc_array(count, sizeof(*regulators), GFP_KERNEL);
 260	if (!regulators)
 261		return 0;
 262
 263	uV = kmalloc_array(count, sizeof(*uV), GFP_KERNEL);
 264	if (!uV)
 265		goto free_regulators;
 266
 267	rcu_read_lock();
 268
 269	opp_table = _find_opp_table(dev);
 270	if (IS_ERR(opp_table)) {
 271		rcu_read_unlock();
 272		goto free_uV;
 273	}
 274
 275	memcpy(regulators, opp_table->regulators, count * sizeof(*regulators));
 276
 277	for (i = 0; i < count; i++) {
 278		uV[i].min = ~0;
 279		uV[i].max = 0;
 280
 281		list_for_each_entry_rcu(opp, &opp_table->opp_list, node) {
 282			if (!opp->available)
 283				continue;
 284
 285			if (opp->supplies[i].u_volt_min < uV[i].min)
 286				uV[i].min = opp->supplies[i].u_volt_min;
 287			if (opp->supplies[i].u_volt_max > uV[i].max)
 288				uV[i].max = opp->supplies[i].u_volt_max;
 289		}
 290	}
 291
 292	rcu_read_unlock();
 293
 294	/*
 295	 * The caller needs to ensure that opp_table (and hence the regulator)
 296	 * isn't freed, while we are executing this routine.
 297	 */
 298	for (i = 0; reg = regulators[i], i < count; i++) {
 299		ret = regulator_set_voltage_time(reg, uV[i].min, uV[i].max);
 300		if (ret > 0)
 301			latency_ns += ret * 1000;
 302	}
 303
 304free_uV:
 305	kfree(uV);
 306free_regulators:
 307	kfree(regulators);
 308
 309	return latency_ns;
 310}
 311EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_volt_latency);
 312
 313/**
 314 * dev_pm_opp_get_max_transition_latency() - Get max transition latency in
 315 *					     nanoseconds
 316 * @dev: device for which we do this operation
 317 *
 318 * Return: This function returns the max transition latency, in nanoseconds, to
 319 * switch from one OPP to other.
 320 *
 321 * Locking: This function takes rcu_read_lock().
 322 */
 323unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev)
 324{
 325	return dev_pm_opp_get_max_volt_latency(dev) +
 326		dev_pm_opp_get_max_clock_latency(dev);
 327}
 328EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency);
 329
 330/**
 331 * dev_pm_opp_get_suspend_opp() - Get suspend opp
 332 * @dev:	device for which we do this operation
 333 *
 334 * Return: This function returns pointer to the suspend opp if it is
 335 * defined and available, otherwise it returns NULL.
 336 *
 337 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
 338 * protected pointer. The reason for the same is that the opp pointer which is
 339 * returned will remain valid for use with opp_get_{voltage, freq} only while
 340 * under the locked area. The pointer returned must be used prior to unlocking
 341 * with rcu_read_unlock() to maintain the integrity of the pointer.
 342 */
 343struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
 344{
 345	struct opp_table *opp_table;
 346
 347	opp_rcu_lockdep_assert();
 348
 349	opp_table = _find_opp_table(dev);
 350	if (IS_ERR(opp_table) || !opp_table->suspend_opp ||
 351	    !opp_table->suspend_opp->available)
 352		return NULL;
 353
 354	return opp_table->suspend_opp;
 355}
 356EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp);
 357
 358/**
 359 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp table
 360 * @dev:	device for which we do this operation
 361 *
 362 * Return: This function returns the number of available opps if there are any,
 363 * else returns 0 if none or the corresponding error value.
 364 *
 365 * Locking: This function takes rcu_read_lock().
 366 */
 367int dev_pm_opp_get_opp_count(struct device *dev)
 368{
 369	struct opp_table *opp_table;
 370	struct dev_pm_opp *temp_opp;
 371	int count = 0;
 372
 373	rcu_read_lock();
 374
 375	opp_table = _find_opp_table(dev);
 376	if (IS_ERR(opp_table)) {
 377		count = PTR_ERR(opp_table);
 378		dev_err(dev, "%s: OPP table not found (%d)\n",
 379			__func__, count);
 380		goto out_unlock;
 381	}
 382
 383	list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
 384		if (temp_opp->available)
 385			count++;
 386	}
 387
 388out_unlock:
 389	rcu_read_unlock();
 390	return count;
 391}
 392EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
 393
 394/**
 395 * dev_pm_opp_find_freq_exact() - search for an exact frequency
 396 * @dev:		device for which we do this operation
 397 * @freq:		frequency to search for
 398 * @available:		true/false - match for available opp
 399 *
 400 * Return: Searches for exact match in the opp table and returns pointer to the
 401 * matching opp if found, else returns ERR_PTR in case of error and should
 402 * be handled using IS_ERR. Error return values can be:
 403 * EINVAL:	for bad pointer
 404 * ERANGE:	no match found for search
 405 * ENODEV:	if device not found in list of registered devices
 406 *
 407 * Note: available is a modifier for the search. if available=true, then the
 408 * match is for exact matching frequency and is available in the stored OPP
 409 * table. if false, the match is for exact frequency which is not available.
 410 *
 411 * This provides a mechanism to enable an opp which is not available currently
 412 * or the opposite as well.
 413 *
 414 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
 415 * protected pointer. The reason for the same is that the opp pointer which is
 416 * returned will remain valid for use with opp_get_{voltage, freq} only while
 417 * under the locked area. The pointer returned must be used prior to unlocking
 418 * with rcu_read_unlock() to maintain the integrity of the pointer.
 419 */
 420struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
 421					      unsigned long freq,
 422					      bool available)
 423{
 424	struct opp_table *opp_table;
 425	struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
 426
 427	opp_rcu_lockdep_assert();
 428
 429	opp_table = _find_opp_table(dev);
 430	if (IS_ERR(opp_table)) {
 431		int r = PTR_ERR(opp_table);
 432
 433		dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
 434		return ERR_PTR(r);
 435	}
 436
 437	list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
 438		if (temp_opp->available == available &&
 439				temp_opp->rate == freq) {
 440			opp = temp_opp;
 441			break;
 442		}
 443	}
 444
 445	return opp;
 446}
 447EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
 448
 449static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table,
 450						   unsigned long *freq)
 451{
 452	struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
 453
 454	list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
 455		if (temp_opp->available && temp_opp->rate >= *freq) {
 456			opp = temp_opp;
 457			*freq = opp->rate;
 458			break;
 459		}
 460	}
 461
 462	return opp;
 463}
 464
 465/**
 466 * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
 467 * @dev:	device for which we do this operation
 468 * @freq:	Start frequency
 469 *
 470 * Search for the matching ceil *available* OPP from a starting freq
 471 * for a device.
 472 *
 473 * Return: matching *opp and refreshes *freq accordingly, else returns
 474 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
 475 * values can be:
 476 * EINVAL:	for bad pointer
 477 * ERANGE:	no match found for search
 478 * ENODEV:	if device not found in list of registered devices
 479 *
 480 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
 481 * protected pointer. The reason for the same is that the opp pointer which is
 482 * returned will remain valid for use with opp_get_{voltage, freq} only while
 483 * under the locked area. The pointer returned must be used prior to unlocking
 484 * with rcu_read_unlock() to maintain the integrity of the pointer.
 485 */
 486struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
 487					     unsigned long *freq)
 488{
 489	struct opp_table *opp_table;
 490
 491	opp_rcu_lockdep_assert();
 492
 493	if (!dev || !freq) {
 494		dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
 495		return ERR_PTR(-EINVAL);
 496	}
 497
 498	opp_table = _find_opp_table(dev);
 499	if (IS_ERR(opp_table))
 500		return ERR_CAST(opp_table);
 501
 502	return _find_freq_ceil(opp_table, freq);
 503}
 504EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
 505
 506/**
 507 * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
 508 * @dev:	device for which we do this operation
 509 * @freq:	Start frequency
 510 *
 511 * Search for the matching floor *available* OPP from a starting freq
 512 * for a device.
 513 *
 514 * Return: matching *opp and refreshes *freq accordingly, else returns
 515 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
 516 * values can be:
 517 * EINVAL:	for bad pointer
 518 * ERANGE:	no match found for search
 519 * ENODEV:	if device not found in list of registered devices
 520 *
 521 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
 522 * protected pointer. The reason for the same is that the opp pointer which is
 523 * returned will remain valid for use with opp_get_{voltage, freq} only while
 524 * under the locked area. The pointer returned must be used prior to unlocking
 525 * with rcu_read_unlock() to maintain the integrity of the pointer.
 526 */
 527struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
 528					      unsigned long *freq)
 529{
 530	struct opp_table *opp_table;
 531	struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
 532
 533	opp_rcu_lockdep_assert();
 534
 535	if (!dev || !freq) {
 536		dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
 537		return ERR_PTR(-EINVAL);
 538	}
 539
 540	opp_table = _find_opp_table(dev);
 541	if (IS_ERR(opp_table))
 542		return ERR_CAST(opp_table);
 543
 544	list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
 545		if (temp_opp->available) {
 546			/* go to the next node, before choosing prev */
 547			if (temp_opp->rate > *freq)
 548				break;
 549			else
 550				opp = temp_opp;
 551		}
 552	}
 553	if (!IS_ERR(opp))
 554		*freq = opp->rate;
 555
 556	return opp;
 557}
 558EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
 559
 560/*
 561 * The caller needs to ensure that opp_table (and hence the clk) isn't freed,
 562 * while clk returned here is used.
 563 */
 564static struct clk *_get_opp_clk(struct device *dev)
 565{
 566	struct opp_table *opp_table;
 567	struct clk *clk;
 568
 569	rcu_read_lock();
 570
 571	opp_table = _find_opp_table(dev);
 572	if (IS_ERR(opp_table)) {
 573		dev_err(dev, "%s: device opp doesn't exist\n", __func__);
 574		clk = ERR_CAST(opp_table);
 575		goto unlock;
 576	}
 577
 578	clk = opp_table->clk;
 579	if (IS_ERR(clk))
 580		dev_err(dev, "%s: No clock available for the device\n",
 581			__func__);
 582
 583unlock:
 584	rcu_read_unlock();
 585	return clk;
 586}
 587
 588static int _set_opp_voltage(struct device *dev, struct regulator *reg,
 589			    struct dev_pm_opp_supply *supply)
 590{
 591	int ret;
 592
 593	/* Regulator not available for device */
 594	if (IS_ERR(reg)) {
 595		dev_dbg(dev, "%s: regulator not available: %ld\n", __func__,
 596			PTR_ERR(reg));
 597		return 0;
 598	}
 599
 600	dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__,
 601		supply->u_volt_min, supply->u_volt, supply->u_volt_max);
 602
 603	ret = regulator_set_voltage_triplet(reg, supply->u_volt_min,
 604					    supply->u_volt, supply->u_volt_max);
 605	if (ret)
 606		dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n",
 607			__func__, supply->u_volt_min, supply->u_volt,
 608			supply->u_volt_max, ret);
 609
 610	return ret;
 611}
 612
 613static inline int
 614_generic_set_opp_clk_only(struct device *dev, struct clk *clk,
 615			  unsigned long old_freq, unsigned long freq)
 616{
 617	int ret;
 618
 619	ret = clk_set_rate(clk, freq);
 620	if (ret) {
 621		dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
 622			ret);
 623	}
 624
 625	return ret;
 626}
 627
 628static int _generic_set_opp(struct dev_pm_set_opp_data *data)
 629{
 630	struct dev_pm_opp_supply *old_supply = data->old_opp.supplies;
 631	struct dev_pm_opp_supply *new_supply = data->new_opp.supplies;
 632	unsigned long old_freq = data->old_opp.rate, freq = data->new_opp.rate;
 633	struct regulator *reg = data->regulators[0];
 634	struct device *dev= data->dev;
 635	int ret;
 636
 637	/* This function only supports single regulator per device */
 638	if (WARN_ON(data->regulator_count > 1)) {
 639		dev_err(dev, "multiple regulators are not supported\n");
 640		return -EINVAL;
 641	}
 642
 643	/* Scaling up? Scale voltage before frequency */
 644	if (freq > old_freq) {
 645		ret = _set_opp_voltage(dev, reg, new_supply);
 646		if (ret)
 647			goto restore_voltage;
 648	}
 649
 650	/* Change frequency */
 651	ret = _generic_set_opp_clk_only(dev, data->clk, old_freq, freq);
 652	if (ret)
 653		goto restore_voltage;
 654
 655	/* Scaling down? Scale voltage after frequency */
 656	if (freq < old_freq) {
 657		ret = _set_opp_voltage(dev, reg, new_supply);
 658		if (ret)
 659			goto restore_freq;
 660	}
 661
 662	return 0;
 663
 664restore_freq:
 665	if (_generic_set_opp_clk_only(dev, data->clk, freq, old_freq))
 666		dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
 667			__func__, old_freq);
 668restore_voltage:
 669	/* This shouldn't harm even if the voltages weren't updated earlier */
 670	if (old_supply->u_volt)
 671		_set_opp_voltage(dev, reg, old_supply);
 672
 673	return ret;
 674}
 675
 676/**
 677 * dev_pm_opp_set_rate() - Configure new OPP based on frequency
 678 * @dev:	 device for which we do this operation
 679 * @target_freq: frequency to achieve
 680 *
 681 * This configures the power-supplies and clock source to the levels specified
 682 * by the OPP corresponding to the target_freq.
 683 *
 684 * Locking: This function takes rcu_read_lock().
 685 */
 686int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
 687{
 688	struct opp_table *opp_table;
 689	unsigned long freq, old_freq;
 690	int (*set_opp)(struct dev_pm_set_opp_data *data);
 691	struct dev_pm_opp *old_opp, *opp;
 692	struct regulator **regulators;
 693	struct dev_pm_set_opp_data *data;
 694	struct clk *clk;
 695	int ret, size;
 696
 697	if (unlikely(!target_freq)) {
 698		dev_err(dev, "%s: Invalid target frequency %lu\n", __func__,
 699			target_freq);
 700		return -EINVAL;
 701	}
 702
 703	clk = _get_opp_clk(dev);
 704	if (IS_ERR(clk))
 705		return PTR_ERR(clk);
 706
 707	freq = clk_round_rate(clk, target_freq);
 708	if ((long)freq <= 0)
 709		freq = target_freq;
 710
 711	old_freq = clk_get_rate(clk);
 712
 713	/* Return early if nothing to do */
 714	if (old_freq == freq) {
 715		dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
 716			__func__, freq);
 717		return 0;
 718	}
 719
 720	rcu_read_lock();
 721
 722	opp_table = _find_opp_table(dev);
 723	if (IS_ERR(opp_table)) {
 724		dev_err(dev, "%s: device opp doesn't exist\n", __func__);
 725		rcu_read_unlock();
 726		return PTR_ERR(opp_table);
 727	}
 728
 729	old_opp = _find_freq_ceil(opp_table, &old_freq);
 730	if (IS_ERR(old_opp)) {
 731		dev_err(dev, "%s: failed to find current OPP for freq %lu (%ld)\n",
 732			__func__, old_freq, PTR_ERR(old_opp));
 733	}
 734
 735	opp = _find_freq_ceil(opp_table, &freq);
 736	if (IS_ERR(opp)) {
 737		ret = PTR_ERR(opp);
 738		dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",
 739			__func__, freq, ret);
 740		rcu_read_unlock();
 741		return ret;
 742	}
 743
 744	dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n", __func__,
 745		old_freq, freq);
 746
 747	regulators = opp_table->regulators;
 748
 749	/* Only frequency scaling */
 750	if (!regulators) {
 751		rcu_read_unlock();
 752		return _generic_set_opp_clk_only(dev, clk, old_freq, freq);
 753	}
 754
 755	if (opp_table->set_opp)
 756		set_opp = opp_table->set_opp;
 757	else
 758		set_opp = _generic_set_opp;
 759
 760	data = opp_table->set_opp_data;
 761	data->regulators = regulators;
 762	data->regulator_count = opp_table->regulator_count;
 763	data->clk = clk;
 764	data->dev = dev;
 765
 766	data->old_opp.rate = old_freq;
 767	size = sizeof(*opp->supplies) * opp_table->regulator_count;
 768	if (IS_ERR(old_opp))
 769		memset(data->old_opp.supplies, 0, size);
 770	else
 771		memcpy(data->old_opp.supplies, old_opp->supplies, size);
 772
 773	data->new_opp.rate = freq;
 774	memcpy(data->new_opp.supplies, opp->supplies, size);
 775
 776	rcu_read_unlock();
 777
 778	return set_opp(data);
 779}
 780EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);
 781
 782/* OPP-dev Helpers */
 783static void _kfree_opp_dev_rcu(struct rcu_head *head)
 784{
 785	struct opp_device *opp_dev;
 786
 787	opp_dev = container_of(head, struct opp_device, rcu_head);
 788	kfree_rcu(opp_dev, rcu_head);
 789}
 790
 791static void _remove_opp_dev(struct opp_device *opp_dev,
 792			    struct opp_table *opp_table)
 793{
 794	opp_debug_unregister(opp_dev, opp_table);
 795	list_del(&opp_dev->node);
 796	call_srcu(&opp_table->srcu_head.srcu, &opp_dev->rcu_head,
 797		  _kfree_opp_dev_rcu);
 798}
 799
 800struct opp_device *_add_opp_dev(const struct device *dev,
 801				struct opp_table *opp_table)
 802{
 803	struct opp_device *opp_dev;
 804	int ret;
 805
 806	opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL);
 807	if (!opp_dev)
 808		return NULL;
 809
 810	/* Initialize opp-dev */
 811	opp_dev->dev = dev;
 812	list_add_rcu(&opp_dev->node, &opp_table->dev_list);
 813
 814	/* Create debugfs entries for the opp_table */
 815	ret = opp_debug_register(opp_dev, opp_table);
 816	if (ret)
 817		dev_err(dev, "%s: Failed to register opp debugfs (%d)\n",
 818			__func__, ret);
 819
 820	return opp_dev;
 821}
 822
 823/**
 824 * _add_opp_table() - Find OPP table or allocate a new one
 825 * @dev:	device for which we do this operation
 826 *
 827 * It tries to find an existing table first, if it couldn't find one, it
 828 * allocates a new OPP table and returns that.
 829 *
 830 * Return: valid opp_table pointer if success, else NULL.
 831 */
 832static struct opp_table *_add_opp_table(struct device *dev)
 833{
 834	struct opp_table *opp_table;
 835	struct opp_device *opp_dev;
 836	int ret;
 837
 838	/* Check for existing table for 'dev' first */
 839	opp_table = _find_opp_table(dev);
 840	if (!IS_ERR(opp_table))
 841		return opp_table;
 842
 843	/*
 844	 * Allocate a new OPP table. In the infrequent case where a new
 845	 * device is needed to be added, we pay this penalty.
 846	 */
 847	opp_table = kzalloc(sizeof(*opp_table), GFP_KERNEL);
 848	if (!opp_table)
 849		return NULL;
 850
 851	INIT_LIST_HEAD(&opp_table->dev_list);
 852
 853	opp_dev = _add_opp_dev(dev, opp_table);
 854	if (!opp_dev) {
 855		kfree(opp_table);
 856		return NULL;
 857	}
 858
 859	_of_init_opp_table(opp_table, dev);
 860
 861	/* Find clk for the device */
 862	opp_table->clk = clk_get(dev, NULL);
 863	if (IS_ERR(opp_table->clk)) {
 864		ret = PTR_ERR(opp_table->clk);
 865		if (ret != -EPROBE_DEFER)
 866			dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__,
 867				ret);
 868	}
 869
 870	srcu_init_notifier_head(&opp_table->srcu_head);
 871	INIT_LIST_HEAD(&opp_table->opp_list);
 872
 873	/* Secure the device table modification */
 874	list_add_rcu(&opp_table->node, &opp_tables);
 875	return opp_table;
 876}
 877
 878/**
 879 * _kfree_device_rcu() - Free opp_table RCU handler
 880 * @head:	RCU head
 881 */
 882static void _kfree_device_rcu(struct rcu_head *head)
 883{
 884	struct opp_table *opp_table = container_of(head, struct opp_table,
 885						   rcu_head);
 886
 887	kfree_rcu(opp_table, rcu_head);
 888}
 889
 890/**
 891 * _remove_opp_table() - Removes a OPP table
 892 * @opp_table: OPP table to be removed.
 893 *
 894 * Removes/frees OPP table if it doesn't contain any OPPs.
 895 */
 896static void _remove_opp_table(struct opp_table *opp_table)
 897{
 898	struct opp_device *opp_dev;
 899
 900	if (!list_empty(&opp_table->opp_list))
 901		return;
 902
 903	if (opp_table->supported_hw)
 904		return;
 905
 906	if (opp_table->prop_name)
 907		return;
 908
 909	if (opp_table->regulators)
 910		return;
 911
 912	if (opp_table->set_opp)
 913		return;
 914
 915	/* Release clk */
 916	if (!IS_ERR(opp_table->clk))
 917		clk_put(opp_table->clk);
 918
 919	opp_dev = list_first_entry(&opp_table->dev_list, struct opp_device,
 920				   node);
 921
 922	_remove_opp_dev(opp_dev, opp_table);
 923
 924	/* dev_list must be empty now */
 925	WARN_ON(!list_empty(&opp_table->dev_list));
 926
 927	list_del_rcu(&opp_table->node);
 928	call_srcu(&opp_table->srcu_head.srcu, &opp_table->rcu_head,
 929		  _kfree_device_rcu);
 930}
 931
 932/**
 933 * _kfree_opp_rcu() - Free OPP RCU handler
 934 * @head:	RCU head
 935 */
 936static void _kfree_opp_rcu(struct rcu_head *head)
 937{
 938	struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head);
 939
 940	kfree_rcu(opp, rcu_head);
 941}
 942
 943/**
 944 * _opp_remove()  - Remove an OPP from a table definition
 945 * @opp_table:	points back to the opp_table struct this opp belongs to
 946 * @opp:	pointer to the OPP to remove
 947 * @notify:	OPP_EVENT_REMOVE notification should be sent or not
 948 *
 949 * This function removes an opp definition from the opp table.
 950 *
 951 * Locking: The internal opp_table and opp structures are RCU protected.
 952 * It is assumed that the caller holds required mutex for an RCU updater
 953 * strategy.
 954 */
 955void _opp_remove(struct opp_table *opp_table, struct dev_pm_opp *opp,
 956		 bool notify)
 957{
 958	/*
 959	 * Notify the changes in the availability of the operable
 960	 * frequency/voltage list.
 961	 */
 962	if (notify)
 963		srcu_notifier_call_chain(&opp_table->srcu_head,
 964					 OPP_EVENT_REMOVE, opp);
 965	opp_debug_remove_one(opp);
 966	list_del_rcu(&opp->node);
 967	call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
 968
 969	_remove_opp_table(opp_table);
 970}
 971
 972/**
 973 * dev_pm_opp_remove()  - Remove an OPP from OPP table
 974 * @dev:	device for which we do this operation
 975 * @freq:	OPP to remove with matching 'freq'
 976 *
 977 * This function removes an opp from the opp table.
 978 *
 979 * Locking: The internal opp_table and opp structures are RCU protected.
 980 * Hence this function internally uses RCU updater strategy with mutex locks
 981 * to keep the integrity of the internal data structures. Callers should ensure
 982 * that this function is *NOT* called under RCU protection or in contexts where
 983 * mutex cannot be locked.
 984 */
 985void dev_pm_opp_remove(struct device *dev, unsigned long freq)
 986{
 987	struct dev_pm_opp *opp;
 988	struct opp_table *opp_table;
 989	bool found = false;
 990
 991	/* Hold our table modification lock here */
 992	mutex_lock(&opp_table_lock);
 993
 994	opp_table = _find_opp_table(dev);
 995	if (IS_ERR(opp_table))
 996		goto unlock;
 997
 998	list_for_each_entry(opp, &opp_table->opp_list, node) {
 999		if (opp->rate == freq) {
1000			found = true;
1001			break;
1002		}
1003	}
1004
1005	if (!found) {
1006		dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
1007			 __func__, freq);
1008		goto unlock;
1009	}
1010
1011	_opp_remove(opp_table, opp, true);
1012unlock:
1013	mutex_unlock(&opp_table_lock);
1014}
1015EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
1016
1017struct dev_pm_opp *_allocate_opp(struct device *dev,
1018				 struct opp_table **opp_table)
1019{
1020	struct dev_pm_opp *opp;
1021	int count, supply_size;
1022	struct opp_table *table;
1023
1024	table = _add_opp_table(dev);
1025	if (!table)
1026		return NULL;
1027
1028	/* Allocate space for at least one supply */
1029	count = table->regulator_count ? table->regulator_count : 1;
1030	supply_size = sizeof(*opp->supplies) * count;
1031
1032	/* allocate new OPP node and supplies structures */
1033	opp = kzalloc(sizeof(*opp) + supply_size, GFP_KERNEL);
1034	if (!opp) {
1035		kfree(table);
1036		return NULL;
1037	}
1038
1039	/* Put the supplies at the end of the OPP structure as an empty array */
1040	opp->supplies = (struct dev_pm_opp_supply *)(opp + 1);
1041	INIT_LIST_HEAD(&opp->node);
1042
1043	*opp_table = table;
1044
1045	return opp;
1046}
1047
1048static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
1049					 struct opp_table *opp_table)
1050{
1051	struct regulator *reg;
1052	int i;
1053
1054	for (i = 0; i < opp_table->regulator_count; i++) {
1055		reg = opp_table->regulators[i];
1056
1057		if (!regulator_is_supported_voltage(reg,
1058					opp->supplies[i].u_volt_min,
1059					opp->supplies[i].u_volt_max)) {
1060			pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n",
1061				__func__, opp->supplies[i].u_volt_min,
1062				opp->supplies[i].u_volt_max);
1063			return false;
1064		}
1065	}
1066
1067	return true;
1068}
1069
1070int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
1071	     struct opp_table *opp_table)
1072{
1073	struct dev_pm_opp *opp;
1074	struct list_head *head = &opp_table->opp_list;
1075	int ret;
1076
1077	/*
1078	 * Insert new OPP in order of increasing frequency and discard if
1079	 * already present.
1080	 *
1081	 * Need to use &opp_table->opp_list in the condition part of the 'for'
1082	 * loop, don't replace it with head otherwise it will become an infinite
1083	 * loop.
1084	 */
1085	list_for_each_entry_rcu(opp, &opp_table->opp_list, node) {
1086		if (new_opp->rate > opp->rate) {
1087			head = &opp->node;
1088			continue;
1089		}
1090
1091		if (new_opp->rate < opp->rate)
1092			break;
1093
1094		/* Duplicate OPPs */
1095		dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
1096			 __func__, opp->rate, opp->supplies[0].u_volt,
1097			 opp->available, new_opp->rate,
1098			 new_opp->supplies[0].u_volt, new_opp->available);
1099
1100		/* Should we compare voltages for all regulators here ? */
1101		return opp->available &&
1102		       new_opp->supplies[0].u_volt == opp->supplies[0].u_volt ? 0 : -EEXIST;
1103	}
1104
1105	new_opp->opp_table = opp_table;
1106	list_add_rcu(&new_opp->node, head);
1107
1108	ret = opp_debug_create_one(new_opp, opp_table);
1109	if (ret)
1110		dev_err(dev, "%s: Failed to register opp to debugfs (%d)\n",
1111			__func__, ret);
1112
1113	if (!_opp_supported_by_regulators(new_opp, opp_table)) {
1114		new_opp->available = false;
1115		dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n",
1116			 __func__, new_opp->rate);
1117	}
1118
1119	return 0;
1120}
1121
1122/**
1123 * _opp_add_v1() - Allocate a OPP based on v1 bindings.
1124 * @dev:	device for which we do this operation
1125 * @freq:	Frequency in Hz for this OPP
1126 * @u_volt:	Voltage in uVolts for this OPP
1127 * @dynamic:	Dynamically added OPPs.
1128 *
1129 * This function adds an opp definition to the opp table and returns status.
1130 * The opp is made available by default and it can be controlled using
1131 * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
1132 *
1133 * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table
1134 * and freed by dev_pm_opp_of_remove_table.
1135 *
1136 * Locking: The internal opp_table and opp structures are RCU protected.
1137 * Hence this function internally uses RCU updater strategy with mutex locks
1138 * to keep the integrity of the internal data structures. Callers should ensure
1139 * that this function is *NOT* called under RCU protection or in contexts where
1140 * mutex cannot be locked.
1141 *
1142 * Return:
1143 * 0		On success OR
1144 *		Duplicate OPPs (both freq and volt are same) and opp->available
1145 * -EEXIST	Freq are same and volt are different OR
1146 *		Duplicate OPPs (both freq and volt are same) and !opp->available
1147 * -ENOMEM	Memory allocation failure
1148 */
1149int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
1150		bool dynamic)
1151{
1152	struct opp_table *opp_table;
1153	struct dev_pm_opp *new_opp;
1154	unsigned long tol;
1155	int ret;
1156
1157	/* Hold our table modification lock here */
1158	mutex_lock(&opp_table_lock);
1159
1160	new_opp = _allocate_opp(dev, &opp_table);
1161	if (!new_opp) {
1162		ret = -ENOMEM;
1163		goto unlock;
1164	}
1165
1166	/* populate the opp table */
1167	new_opp->rate = freq;
1168	tol = u_volt * opp_table->voltage_tolerance_v1 / 100;
1169	new_opp->supplies[0].u_volt = u_volt;
1170	new_opp->supplies[0].u_volt_min = u_volt - tol;
1171	new_opp->supplies[0].u_volt_max = u_volt + tol;
1172	new_opp->available = true;
1173	new_opp->dynamic = dynamic;
1174
1175	ret = _opp_add(dev, new_opp, opp_table);
1176	if (ret)
1177		goto free_opp;
1178
1179	mutex_unlock(&opp_table_lock);
1180
1181	/*
1182	 * Notify the changes in the availability of the operable
1183	 * frequency/voltage list.
1184	 */
1185	srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp);
1186	return 0;
1187
1188free_opp:
1189	_opp_remove(opp_table, new_opp, false);
1190unlock:
1191	mutex_unlock(&opp_table_lock);
1192	return ret;
1193}
1194
1195/**
1196 * dev_pm_opp_set_supported_hw() - Set supported platforms
1197 * @dev: Device for which supported-hw has to be set.
1198 * @versions: Array of hierarchy of versions to match.
1199 * @count: Number of elements in the array.
1200 *
1201 * This is required only for the V2 bindings, and it enables a platform to
1202 * specify the hierarchy of versions it supports. OPP layer will then enable
1203 * OPPs, which are available for those versions, based on its 'opp-supported-hw'
1204 * property.
1205 *
1206 * Locking: The internal opp_table and opp structures are RCU protected.
1207 * Hence this function internally uses RCU updater strategy with mutex locks
1208 * to keep the integrity of the internal data structures. Callers should ensure
1209 * that this function is *NOT* called under RCU protection or in contexts where
1210 * mutex cannot be locked.
1211 */
1212int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions,
1213				unsigned int count)
1214{
1215	struct opp_table *opp_table;
1216	int ret = 0;
1217
1218	/* Hold our table modification lock here */
1219	mutex_lock(&opp_table_lock);
1220
1221	opp_table = _add_opp_table(dev);
1222	if (!opp_table) {
1223		ret = -ENOMEM;
1224		goto unlock;
1225	}
1226
1227	/* Make sure there are no concurrent readers while updating opp_table */
1228	WARN_ON(!list_empty(&opp_table->opp_list));
1229
1230	/* Do we already have a version hierarchy associated with opp_table? */
1231	if (opp_table->supported_hw) {
1232		dev_err(dev, "%s: Already have supported hardware list\n",
1233			__func__);
1234		ret = -EBUSY;
1235		goto err;
1236	}
1237
1238	opp_table->supported_hw = kmemdup(versions, count * sizeof(*versions),
1239					GFP_KERNEL);
1240	if (!opp_table->supported_hw) {
1241		ret = -ENOMEM;
1242		goto err;
1243	}
1244
1245	opp_table->supported_hw_count = count;
1246	mutex_unlock(&opp_table_lock);
1247	return 0;
1248
1249err:
1250	_remove_opp_table(opp_table);
1251unlock:
1252	mutex_unlock(&opp_table_lock);
1253
1254	return ret;
1255}
1256EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw);
1257
1258/**
1259 * dev_pm_opp_put_supported_hw() - Releases resources blocked for supported hw
1260 * @dev: Device for which supported-hw has to be put.
1261 *
1262 * This is required only for the V2 bindings, and is called for a matching
1263 * dev_pm_opp_set_supported_hw(). Until this is called, the opp_table structure
1264 * will not be freed.
1265 *
1266 * Locking: The internal opp_table and opp structures are RCU protected.
1267 * Hence this function internally uses RCU updater strategy with mutex locks
1268 * to keep the integrity of the internal data structures. Callers should ensure
1269 * that this function is *NOT* called under RCU protection or in contexts where
1270 * mutex cannot be locked.
1271 */
1272void dev_pm_opp_put_supported_hw(struct device *dev)
1273{
1274	struct opp_table *opp_table;
1275
1276	/* Hold our table modification lock here */
1277	mutex_lock(&opp_table_lock);
1278
1279	/* Check for existing table for 'dev' first */
1280	opp_table = _find_opp_table(dev);
1281	if (IS_ERR(opp_table)) {
1282		dev_err(dev, "Failed to find opp_table: %ld\n",
1283			PTR_ERR(opp_table));
1284		goto unlock;
1285	}
1286
1287	/* Make sure there are no concurrent readers while updating opp_table */
1288	WARN_ON(!list_empty(&opp_table->opp_list));
1289
1290	if (!opp_table->supported_hw) {
1291		dev_err(dev, "%s: Doesn't have supported hardware list\n",
1292			__func__);
1293		goto unlock;
1294	}
1295
1296	kfree(opp_table->supported_hw);
1297	opp_table->supported_hw = NULL;
1298	opp_table->supported_hw_count = 0;
1299
1300	/* Try freeing opp_table if this was the last blocking resource */
1301	_remove_opp_table(opp_table);
1302
1303unlock:
1304	mutex_unlock(&opp_table_lock);
1305}
1306EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw);
1307
1308/**
1309 * dev_pm_opp_set_prop_name() - Set prop-extn name
1310 * @dev: Device for which the prop-name has to be set.
1311 * @name: name to postfix to properties.
1312 *
1313 * This is required only for the V2 bindings, and it enables a platform to
1314 * specify the extn to be used for certain property names. The properties to
1315 * which the extension will apply are opp-microvolt and opp-microamp. OPP core
1316 * should postfix the property name with -<name> while looking for them.
1317 *
1318 * Locking: The internal opp_table and opp structures are RCU protected.
1319 * Hence this function internally uses RCU updater strategy with mutex locks
1320 * to keep the integrity of the internal data structures. Callers should ensure
1321 * that this function is *NOT* called under RCU protection or in contexts where
1322 * mutex cannot be locked.
1323 */
1324int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
1325{
1326	struct opp_table *opp_table;
1327	int ret = 0;
1328
1329	/* Hold our table modification lock here */
1330	mutex_lock(&opp_table_lock);
1331
1332	opp_table = _add_opp_table(dev);
1333	if (!opp_table) {
1334		ret = -ENOMEM;
1335		goto unlock;
1336	}
1337
1338	/* Make sure there are no concurrent readers while updating opp_table */
1339	WARN_ON(!list_empty(&opp_table->opp_list));
1340
1341	/* Do we already have a prop-name associated with opp_table? */
1342	if (opp_table->prop_name) {
1343		dev_err(dev, "%s: Already have prop-name %s\n", __func__,
1344			opp_table->prop_name);
1345		ret = -EBUSY;
1346		goto err;
1347	}
1348
1349	opp_table->prop_name = kstrdup(name, GFP_KERNEL);
1350	if (!opp_table->prop_name) {
1351		ret = -ENOMEM;
1352		goto err;
1353	}
1354
1355	mutex_unlock(&opp_table_lock);
1356	return 0;
1357
1358err:
1359	_remove_opp_table(opp_table);
1360unlock:
1361	mutex_unlock(&opp_table_lock);
1362
1363	return ret;
1364}
1365EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name);
1366
1367/**
1368 * dev_pm_opp_put_prop_name() - Releases resources blocked for prop-name
1369 * @dev: Device for which the prop-name has to be put.
1370 *
1371 * This is required only for the V2 bindings, and is called for a matching
1372 * dev_pm_opp_set_prop_name(). Until this is called, the opp_table structure
1373 * will not be freed.
1374 *
1375 * Locking: The internal opp_table and opp structures are RCU protected.
1376 * Hence this function internally uses RCU updater strategy with mutex locks
1377 * to keep the integrity of the internal data structures. Callers should ensure
1378 * that this function is *NOT* called under RCU protection or in contexts where
1379 * mutex cannot be locked.
1380 */
1381void dev_pm_opp_put_prop_name(struct device *dev)
1382{
1383	struct opp_table *opp_table;
1384
1385	/* Hold our table modification lock here */
1386	mutex_lock(&opp_table_lock);
1387
1388	/* Check for existing table for 'dev' first */
1389	opp_table = _find_opp_table(dev);
1390	if (IS_ERR(opp_table)) {
1391		dev_err(dev, "Failed to find opp_table: %ld\n",
1392			PTR_ERR(opp_table));
1393		goto unlock;
1394	}
1395
1396	/* Make sure there are no concurrent readers while updating opp_table */
1397	WARN_ON(!list_empty(&opp_table->opp_list));
1398
1399	if (!opp_table->prop_name) {
1400		dev_err(dev, "%s: Doesn't have a prop-name\n", __func__);
1401		goto unlock;
1402	}
1403
1404	kfree(opp_table->prop_name);
1405	opp_table->prop_name = NULL;
1406
1407	/* Try freeing opp_table if this was the last blocking resource */
1408	_remove_opp_table(opp_table);
1409
1410unlock:
1411	mutex_unlock(&opp_table_lock);
1412}
1413EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
1414
1415static int _allocate_set_opp_data(struct opp_table *opp_table)
1416{
1417	struct dev_pm_set_opp_data *data;
1418	int len, count = opp_table->regulator_count;
1419
1420	if (WARN_ON(!count))
1421		return -EINVAL;
1422
1423	/* space for set_opp_data */
1424	len = sizeof(*data);
1425
1426	/* space for old_opp.supplies and new_opp.supplies */
1427	len += 2 * sizeof(struct dev_pm_opp_supply) * count;
1428
1429	data = kzalloc(len, GFP_KERNEL);
1430	if (!data)
1431		return -ENOMEM;
1432
1433	data->old_opp.supplies = (void *)(data + 1);
1434	data->new_opp.supplies = data->old_opp.supplies + count;
1435
1436	opp_table->set_opp_data = data;
1437
1438	return 0;
1439}
1440
1441static void _free_set_opp_data(struct opp_table *opp_table)
1442{
1443	kfree(opp_table->set_opp_data);
1444	opp_table->set_opp_data = NULL;
1445}
1446
1447/**
1448 * dev_pm_opp_set_regulators() - Set regulator names for the device
1449 * @dev: Device for which regulator name is being set.
1450 * @names: Array of pointers to the names of the regulator.
1451 * @count: Number of regulators.
1452 *
1453 * In order to support OPP switching, OPP layer needs to know the name of the
1454 * device's regulators, as the core would be required to switch voltages as
1455 * well.
1456 *
1457 * This must be called before any OPPs are initialized for the device.
1458 *
1459 * Locking: The internal opp_table and opp structures are RCU protected.
1460 * Hence this function internally uses RCU updater strategy with mutex locks
1461 * to keep the integrity of the internal data structures. Callers should ensure
1462 * that this function is *NOT* called under RCU protection or in contexts where
1463 * mutex cannot be locked.
1464 */
1465struct opp_table *dev_pm_opp_set_regulators(struct device *dev,
1466					    const char * const names[],
1467					    unsigned int count)
1468{
1469	struct opp_table *opp_table;
1470	struct regulator *reg;
1471	int ret, i;
1472
1473	mutex_lock(&opp_table_lock);
1474
1475	opp_table = _add_opp_table(dev);
1476	if (!opp_table) {
1477		ret = -ENOMEM;
1478		goto unlock;
1479	}
1480
1481	/* This should be called before OPPs are initialized */
1482	if (WARN_ON(!list_empty(&opp_table->opp_list))) {
1483		ret = -EBUSY;
1484		goto err;
1485	}
1486
1487	/* Already have regulators set */
1488	if (opp_table->regulators) {
1489		ret = -EBUSY;
1490		goto err;
1491	}
1492
1493	opp_table->regulators = kmalloc_array(count,
1494					      sizeof(*opp_table->regulators),
1495					      GFP_KERNEL);
1496	if (!opp_table->regulators) {
1497		ret = -ENOMEM;
1498		goto err;
1499	}
1500
1501	for (i = 0; i < count; i++) {
1502		reg = regulator_get_optional(dev, names[i]);
1503		if (IS_ERR(reg)) {
1504			ret = PTR_ERR(reg);
1505			if (ret != -EPROBE_DEFER)
1506				dev_err(dev, "%s: no regulator (%s) found: %d\n",
1507					__func__, names[i], ret);
1508			goto free_regulators;
1509		}
1510
1511		opp_table->regulators[i] = reg;
1512	}
1513
1514	opp_table->regulator_count = count;
1515
1516	/* Allocate block only once to pass to set_opp() routines */
1517	ret = _allocate_set_opp_data(opp_table);
1518	if (ret)
1519		goto free_regulators;
1520
1521	mutex_unlock(&opp_table_lock);
1522	return opp_table;
1523
1524free_regulators:
1525	while (i != 0)
1526		regulator_put(opp_table->regulators[--i]);
1527
1528	kfree(opp_table->regulators);
1529	opp_table->regulators = NULL;
1530	opp_table->regulator_count = 0;
1531err:
1532	_remove_opp_table(opp_table);
1533unlock:
1534	mutex_unlock(&opp_table_lock);
1535
1536	return ERR_PTR(ret);
1537}
1538EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulators);
1539
1540/**
1541 * dev_pm_opp_put_regulators() - Releases resources blocked for regulator
1542 * @opp_table: OPP table returned from dev_pm_opp_set_regulators().
1543 *
1544 * Locking: The internal opp_table and opp structures are RCU protected.
1545 * Hence this function internally uses RCU updater strategy with mutex locks
1546 * to keep the integrity of the internal data structures. Callers should ensure
1547 * that this function is *NOT* called under RCU protection or in contexts where
1548 * mutex cannot be locked.
1549 */
1550void dev_pm_opp_put_regulators(struct opp_table *opp_table)
1551{
1552	int i;
1553
1554	mutex_lock(&opp_table_lock);
1555
1556	if (!opp_table->regulators) {
1557		pr_err("%s: Doesn't have regulators set\n", __func__);
1558		goto unlock;
1559	}
1560
1561	/* Make sure there are no concurrent readers while updating opp_table */
1562	WARN_ON(!list_empty(&opp_table->opp_list));
1563
1564	for (i = opp_table->regulator_count - 1; i >= 0; i--)
1565		regulator_put(opp_table->regulators[i]);
1566
1567	_free_set_opp_data(opp_table);
1568
1569	kfree(opp_table->regulators);
1570	opp_table->regulators = NULL;
1571	opp_table->regulator_count = 0;
1572
1573	/* Try freeing opp_table if this was the last blocking resource */
1574	_remove_opp_table(opp_table);
1575
1576unlock:
1577	mutex_unlock(&opp_table_lock);
1578}
1579EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulators);
1580
1581/**
1582 * dev_pm_opp_register_set_opp_helper() - Register custom set OPP helper
1583 * @dev: Device for which the helper is getting registered.
1584 * @set_opp: Custom set OPP helper.
1585 *
1586 * This is useful to support complex platforms (like platforms with multiple
1587 * regulators per device), instead of the generic OPP set rate helper.
1588 *
1589 * This must be called before any OPPs are initialized for the device.
1590 *
1591 * Locking: The internal opp_table and opp structures are RCU protected.
1592 * Hence this function internally uses RCU updater strategy with mutex locks
1593 * to keep the integrity of the internal data structures. Callers should ensure
1594 * that this function is *NOT* called under RCU protection or in contexts where
1595 * mutex cannot be locked.
1596 */
1597int dev_pm_opp_register_set_opp_helper(struct device *dev,
1598			int (*set_opp)(struct dev_pm_set_opp_data *data))
1599{
1600	struct opp_table *opp_table;
1601	int ret;
1602
1603	if (!set_opp)
1604		return -EINVAL;
1605
1606	mutex_lock(&opp_table_lock);
1607
1608	opp_table = _add_opp_table(dev);
1609	if (!opp_table) {
1610		ret = -ENOMEM;
1611		goto unlock;
1612	}
1613
1614	/* This should be called before OPPs are initialized */
1615	if (WARN_ON(!list_empty(&opp_table->opp_list))) {
1616		ret = -EBUSY;
1617		goto err;
1618	}
1619
1620	/* Already have custom set_opp helper */
1621	if (WARN_ON(opp_table->set_opp)) {
1622		ret = -EBUSY;
1623		goto err;
1624	}
1625
1626	opp_table->set_opp = set_opp;
1627
1628	mutex_unlock(&opp_table_lock);
1629	return 0;
1630
1631err:
1632	_remove_opp_table(opp_table);
1633unlock:
1634	mutex_unlock(&opp_table_lock);
1635
1636	return ret;
1637}
1638EXPORT_SYMBOL_GPL(dev_pm_opp_register_set_opp_helper);
1639
1640/**
1641 * dev_pm_opp_register_put_opp_helper() - Releases resources blocked for
1642 *					   set_opp helper
1643 * @dev: Device for which custom set_opp helper has to be cleared.
1644 *
1645 * Locking: The internal opp_table and opp structures are RCU protected.
1646 * Hence this function internally uses RCU updater strategy with mutex locks
1647 * to keep the integrity of the internal data structures. Callers should ensure
1648 * that this function is *NOT* called under RCU protection or in contexts where
1649 * mutex cannot be locked.
1650 */
1651void dev_pm_opp_register_put_opp_helper(struct device *dev)
1652{
1653	struct opp_table *opp_table;
1654
1655	mutex_lock(&opp_table_lock);
1656
1657	/* Check for existing table for 'dev' first */
1658	opp_table = _find_opp_table(dev);
1659	if (IS_ERR(opp_table)) {
1660		dev_err(dev, "Failed to find opp_table: %ld\n",
1661			PTR_ERR(opp_table));
1662		goto unlock;
1663	}
1664
1665	if (!opp_table->set_opp) {
1666		dev_err(dev, "%s: Doesn't have custom set_opp helper set\n",
1667			__func__);
1668		goto unlock;
1669	}
1670
1671	/* Make sure there are no concurrent readers while updating opp_table */
1672	WARN_ON(!list_empty(&opp_table->opp_list));
1673
1674	opp_table->set_opp = NULL;
1675
1676	/* Try freeing opp_table if this was the last blocking resource */
1677	_remove_opp_table(opp_table);
1678
1679unlock:
1680	mutex_unlock(&opp_table_lock);
1681}
1682EXPORT_SYMBOL_GPL(dev_pm_opp_register_put_opp_helper);
1683
1684/**
1685 * dev_pm_opp_add()  - Add an OPP table from a table definitions
1686 * @dev:	device for which we do this operation
1687 * @freq:	Frequency in Hz for this OPP
1688 * @u_volt:	Voltage in uVolts for this OPP
1689 *
1690 * This function adds an opp definition to the opp table and returns status.
1691 * The opp is made available by default and it can be controlled using
1692 * dev_pm_opp_enable/disable functions.
1693 *
1694 * Locking: The internal opp_table and opp structures are RCU protected.
1695 * Hence this function internally uses RCU updater strategy with mutex locks
1696 * to keep the integrity of the internal data structures. Callers should ensure
1697 * that this function is *NOT* called under RCU protection or in contexts where
1698 * mutex cannot be locked.
1699 *
1700 * Return:
1701 * 0		On success OR
1702 *		Duplicate OPPs (both freq and volt are same) and opp->available
1703 * -EEXIST	Freq are same and volt are different OR
1704 *		Duplicate OPPs (both freq and volt are same) and !opp->available
1705 * -ENOMEM	Memory allocation failure
1706 */
1707int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
1708{
1709	return _opp_add_v1(dev, freq, u_volt, true);
1710}
1711EXPORT_SYMBOL_GPL(dev_pm_opp_add);
1712
1713/**
1714 * _opp_set_availability() - helper to set the availability of an opp
1715 * @dev:		device for which we do this operation
1716 * @freq:		OPP frequency to modify availability
1717 * @availability_req:	availability status requested for this opp
1718 *
1719 * Set the availability of an OPP with an RCU operation, opp_{enable,disable}
1720 * share a common logic which is isolated here.
1721 *
1722 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1723 * copy operation, returns 0 if no modification was done OR modification was
1724 * successful.
1725 *
1726 * Locking: The internal opp_table and opp structures are RCU protected.
1727 * Hence this function internally uses RCU updater strategy with mutex locks to
1728 * keep the integrity of the internal data structures. Callers should ensure
1729 * that this function is *NOT* called under RCU protection or in contexts where
1730 * mutex locking or synchronize_rcu() blocking calls cannot be used.
1731 */
1732static int _opp_set_availability(struct device *dev, unsigned long freq,
1733				 bool availability_req)
1734{
1735	struct opp_table *opp_table;
1736	struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
1737	int r = 0;
1738
1739	/* keep the node allocated */
1740	new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL);
1741	if (!new_opp)
1742		return -ENOMEM;
1743
1744	mutex_lock(&opp_table_lock);
1745
1746	/* Find the opp_table */
1747	opp_table = _find_opp_table(dev);
1748	if (IS_ERR(opp_table)) {
1749		r = PTR_ERR(opp_table);
1750		dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
1751		goto unlock;
1752	}
1753
1754	/* Do we have the frequency? */
1755	list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
1756		if (tmp_opp->rate == freq) {
1757			opp = tmp_opp;
1758			break;
1759		}
1760	}
1761	if (IS_ERR(opp)) {
1762		r = PTR_ERR(opp);
1763		goto unlock;
1764	}
1765
1766	/* Is update really needed? */
1767	if (opp->available == availability_req)
1768		goto unlock;
1769	/* copy the old data over */
1770	*new_opp = *opp;
1771
1772	/* plug in new node */
1773	new_opp->available = availability_req;
1774
1775	list_replace_rcu(&opp->node, &new_opp->node);
1776	mutex_unlock(&opp_table_lock);
1777	call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
1778
1779	/* Notify the change of the OPP availability */
1780	if (availability_req)
1781		srcu_notifier_call_chain(&opp_table->srcu_head,
1782					 OPP_EVENT_ENABLE, new_opp);
1783	else
1784		srcu_notifier_call_chain(&opp_table->srcu_head,
1785					 OPP_EVENT_DISABLE, new_opp);
1786
1787	return 0;
1788
1789unlock:
1790	mutex_unlock(&opp_table_lock);
1791	kfree(new_opp);
1792	return r;
1793}
1794
1795/**
1796 * dev_pm_opp_enable() - Enable a specific OPP
1797 * @dev:	device for which we do this operation
1798 * @freq:	OPP frequency to enable
1799 *
1800 * Enables a provided opp. If the operation is valid, this returns 0, else the
1801 * corresponding error value. It is meant to be used for users an OPP available
1802 * after being temporarily made unavailable with dev_pm_opp_disable.
1803 *
1804 * Locking: The internal opp_table and opp structures are RCU protected.
1805 * Hence this function indirectly uses RCU and mutex locks to keep the
1806 * integrity of the internal data structures. Callers should ensure that
1807 * this function is *NOT* called under RCU protection or in contexts where
1808 * mutex locking or synchronize_rcu() blocking calls cannot be used.
1809 *
1810 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1811 * copy operation, returns 0 if no modification was done OR modification was
1812 * successful.
1813 */
1814int dev_pm_opp_enable(struct device *dev, unsigned long freq)
1815{
1816	return _opp_set_availability(dev, freq, true);
1817}
1818EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
1819
1820/**
1821 * dev_pm_opp_disable() - Disable a specific OPP
1822 * @dev:	device for which we do this operation
1823 * @freq:	OPP frequency to disable
1824 *
1825 * Disables a provided opp. If the operation is valid, this returns
1826 * 0, else the corresponding error value. It is meant to be a temporary
1827 * control by users to make this OPP not available until the circumstances are
1828 * right to make it available again (with a call to dev_pm_opp_enable).
1829 *
1830 * Locking: The internal opp_table and opp structures are RCU protected.
1831 * Hence this function indirectly uses RCU and mutex locks to keep the
1832 * integrity of the internal data structures. Callers should ensure that
1833 * this function is *NOT* called under RCU protection or in contexts where
1834 * mutex locking or synchronize_rcu() blocking calls cannot be used.
1835 *
1836 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1837 * copy operation, returns 0 if no modification was done OR modification was
1838 * successful.
1839 */
1840int dev_pm_opp_disable(struct device *dev, unsigned long freq)
1841{
1842	return _opp_set_availability(dev, freq, false);
1843}
1844EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
1845
1846/**
1847 * dev_pm_opp_get_notifier() - find notifier_head of the device with opp
1848 * @dev:	device pointer used to lookup OPP table.
1849 *
1850 * Return: pointer to  notifier head if found, otherwise -ENODEV or
1851 * -EINVAL based on type of error casted as pointer. value must be checked
1852 *  with IS_ERR to determine valid pointer or error result.
1853 *
1854 * Locking: This function must be called under rcu_read_lock(). opp_table is a
1855 * RCU protected pointer. The reason for the same is that the opp pointer which
1856 * is returned will remain valid for use with opp_get_{voltage, freq} only while
1857 * under the locked area. The pointer returned must be used prior to unlocking
1858 * with rcu_read_unlock() to maintain the integrity of the pointer.
1859 */
1860struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
1861{
1862	struct opp_table *opp_table = _find_opp_table(dev);
1863
1864	if (IS_ERR(opp_table))
1865		return ERR_CAST(opp_table); /* matching type */
1866
1867	return &opp_table->srcu_head;
1868}
1869EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
1870
1871/*
1872 * Free OPPs either created using static entries present in DT or even the
1873 * dynamically added entries based on remove_all param.
1874 */
1875void _dev_pm_opp_remove_table(struct device *dev, bool remove_all)
1876{
1877	struct opp_table *opp_table;
1878	struct dev_pm_opp *opp, *tmp;
1879
1880	/* Hold our table modification lock here */
1881	mutex_lock(&opp_table_lock);
1882
1883	/* Check for existing table for 'dev' */
1884	opp_table = _find_opp_table(dev);
1885	if (IS_ERR(opp_table)) {
1886		int error = PTR_ERR(opp_table);
1887
1888		if (error != -ENODEV)
1889			WARN(1, "%s: opp_table: %d\n",
1890			     IS_ERR_OR_NULL(dev) ?
1891					"Invalid device" : dev_name(dev),
1892			     error);
1893		goto unlock;
1894	}
1895
1896	/* Find if opp_table manages a single device */
1897	if (list_is_singular(&opp_table->dev_list)) {
1898		/* Free static OPPs */
1899		list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
1900			if (remove_all || !opp->dynamic)
1901				_opp_remove(opp_table, opp, true);
1902		}
1903	} else {
1904		_remove_opp_dev(_find_opp_dev(dev, opp_table), opp_table);
1905	}
1906
1907unlock:
1908	mutex_unlock(&opp_table_lock);
1909}
1910
1911/**
1912 * dev_pm_opp_remove_table() - Free all OPPs associated with the device
1913 * @dev:	device pointer used to lookup OPP table.
1914 *
1915 * Free both OPPs created using static entries present in DT and the
1916 * dynamically added entries.
1917 *
1918 * Locking: The internal opp_table and opp structures are RCU protected.
1919 * Hence this function indirectly uses RCU updater strategy with mutex locks
1920 * to keep the integrity of the internal data structures. Callers should ensure
1921 * that this function is *NOT* called under RCU protection or in contexts where
1922 * mutex cannot be locked.
1923 */
1924void dev_pm_opp_remove_table(struct device *dev)
1925{
1926	_dev_pm_opp_remove_table(dev, true);
1927}
1928EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table);