Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * drivers/base/power/domain.c - Common code related to device power domains.
   4 *
   5 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
   6 */
   7#define pr_fmt(fmt) "PM: " fmt
   8
   9#include <linux/delay.h>
  10#include <linux/kernel.h>
  11#include <linux/io.h>
  12#include <linux/platform_device.h>
  13#include <linux/pm_opp.h>
  14#include <linux/pm_runtime.h>
  15#include <linux/pm_domain.h>
  16#include <linux/pm_qos.h>
  17#include <linux/pm_clock.h>
  18#include <linux/slab.h>
  19#include <linux/err.h>
  20#include <linux/sched.h>
  21#include <linux/suspend.h>
  22#include <linux/export.h>
  23#include <linux/cpu.h>
 
  24
  25#include "power.h"
  26
  27#define GENPD_RETRY_MAX_MS	250		/* Approximate */
  28
  29#define GENPD_DEV_CALLBACK(genpd, type, callback, dev)		\
  30({								\
  31	type (*__routine)(struct device *__d); 			\
  32	type __ret = (type)0;					\
  33								\
  34	__routine = genpd->dev_ops.callback; 			\
  35	if (__routine) {					\
  36		__ret = __routine(dev); 			\
  37	}							\
  38	__ret;							\
  39})
  40
  41static LIST_HEAD(gpd_list);
  42static DEFINE_MUTEX(gpd_list_lock);
  43
  44struct genpd_lock_ops {
  45	void (*lock)(struct generic_pm_domain *genpd);
  46	void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
  47	int (*lock_interruptible)(struct generic_pm_domain *genpd);
  48	void (*unlock)(struct generic_pm_domain *genpd);
  49};
  50
  51static void genpd_lock_mtx(struct generic_pm_domain *genpd)
  52{
  53	mutex_lock(&genpd->mlock);
  54}
  55
  56static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
  57					int depth)
  58{
  59	mutex_lock_nested(&genpd->mlock, depth);
  60}
  61
  62static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
  63{
  64	return mutex_lock_interruptible(&genpd->mlock);
  65}
  66
  67static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
  68{
  69	return mutex_unlock(&genpd->mlock);
  70}
  71
  72static const struct genpd_lock_ops genpd_mtx_ops = {
  73	.lock = genpd_lock_mtx,
  74	.lock_nested = genpd_lock_nested_mtx,
  75	.lock_interruptible = genpd_lock_interruptible_mtx,
  76	.unlock = genpd_unlock_mtx,
  77};
  78
  79static void genpd_lock_spin(struct generic_pm_domain *genpd)
  80	__acquires(&genpd->slock)
  81{
  82	unsigned long flags;
  83
  84	spin_lock_irqsave(&genpd->slock, flags);
  85	genpd->lock_flags = flags;
  86}
  87
  88static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
  89					int depth)
  90	__acquires(&genpd->slock)
  91{
  92	unsigned long flags;
  93
  94	spin_lock_irqsave_nested(&genpd->slock, flags, depth);
  95	genpd->lock_flags = flags;
  96}
  97
  98static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
  99	__acquires(&genpd->slock)
 100{
 101	unsigned long flags;
 102
 103	spin_lock_irqsave(&genpd->slock, flags);
 104	genpd->lock_flags = flags;
 105	return 0;
 106}
 107
 108static void genpd_unlock_spin(struct generic_pm_domain *genpd)
 109	__releases(&genpd->slock)
 110{
 111	spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
 112}
 113
 114static const struct genpd_lock_ops genpd_spin_ops = {
 115	.lock = genpd_lock_spin,
 116	.lock_nested = genpd_lock_nested_spin,
 117	.lock_interruptible = genpd_lock_interruptible_spin,
 118	.unlock = genpd_unlock_spin,
 119};
 120
 121#define genpd_lock(p)			p->lock_ops->lock(p)
 122#define genpd_lock_nested(p, d)		p->lock_ops->lock_nested(p, d)
 123#define genpd_lock_interruptible(p)	p->lock_ops->lock_interruptible(p)
 124#define genpd_unlock(p)			p->lock_ops->unlock(p)
 125
 126#define genpd_status_on(genpd)		(genpd->status == GPD_STATE_ACTIVE)
 127#define genpd_is_irq_safe(genpd)	(genpd->flags & GENPD_FLAG_IRQ_SAFE)
 128#define genpd_is_always_on(genpd)	(genpd->flags & GENPD_FLAG_ALWAYS_ON)
 129#define genpd_is_active_wakeup(genpd)	(genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
 130#define genpd_is_cpu_domain(genpd)	(genpd->flags & GENPD_FLAG_CPU_DOMAIN)
 131#define genpd_is_rpm_always_on(genpd)	(genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON)
 132
 133static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
 134		const struct generic_pm_domain *genpd)
 135{
 136	bool ret;
 137
 138	ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
 139
 140	/*
 141	 * Warn once if an IRQ safe device is attached to a no sleep domain, as
 142	 * to indicate a suboptimal configuration for PM. For an always on
 143	 * domain this isn't case, thus don't warn.
 144	 */
 145	if (ret && !genpd_is_always_on(genpd))
 
 
 
 146		dev_warn_once(dev, "PM domain %s will not be powered off\n",
 147				genpd->name);
 148
 149	return ret;
 150}
 151
 152static int genpd_runtime_suspend(struct device *dev);
 153
 154/*
 155 * Get the generic PM domain for a particular struct device.
 156 * This validates the struct device pointer, the PM domain pointer,
 157 * and checks that the PM domain pointer is a real generic PM domain.
 158 * Any failure results in NULL being returned.
 159 */
 160static struct generic_pm_domain *dev_to_genpd_safe(struct device *dev)
 161{
 162	if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
 163		return NULL;
 164
 165	/* A genpd's always have its ->runtime_suspend() callback assigned. */
 166	if (dev->pm_domain->ops.runtime_suspend == genpd_runtime_suspend)
 167		return pd_to_genpd(dev->pm_domain);
 168
 169	return NULL;
 170}
 171
 172/*
 173 * This should only be used where we are certain that the pm_domain
 174 * attached to the device is a genpd domain.
 175 */
 176static struct generic_pm_domain *dev_to_genpd(struct device *dev)
 177{
 178	if (IS_ERR_OR_NULL(dev->pm_domain))
 179		return ERR_PTR(-EINVAL);
 180
 181	return pd_to_genpd(dev->pm_domain);
 182}
 183
 184static int genpd_stop_dev(const struct generic_pm_domain *genpd,
 185			  struct device *dev)
 186{
 187	return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
 188}
 189
 190static int genpd_start_dev(const struct generic_pm_domain *genpd,
 191			   struct device *dev)
 192{
 193	return GENPD_DEV_CALLBACK(genpd, int, start, dev);
 194}
 195
 196static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
 197{
 198	bool ret = false;
 199
 200	if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
 201		ret = !!atomic_dec_and_test(&genpd->sd_count);
 202
 203	return ret;
 204}
 205
 206static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
 207{
 208	atomic_inc(&genpd->sd_count);
 209	smp_mb__after_atomic();
 210}
 211
 212#ifdef CONFIG_DEBUG_FS
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 213static void genpd_update_accounting(struct generic_pm_domain *genpd)
 214{
 215	ktime_t delta, now;
 216
 217	now = ktime_get();
 218	delta = ktime_sub(now, genpd->accounting_time);
 
 
 
 219
 220	/*
 221	 * If genpd->status is active, it means we are just
 222	 * out of off and so update the idle time and vice
 223	 * versa.
 224	 */
 225	if (genpd->status == GPD_STATE_ACTIVE) {
 226		int state_idx = genpd->state_idx;
 227
 228		genpd->states[state_idx].idle_time =
 229			ktime_add(genpd->states[state_idx].idle_time, delta);
 230	} else {
 231		genpd->on_time = ktime_add(genpd->on_time, delta);
 232	}
 233
 234	genpd->accounting_time = now;
 235}
 236#else
 
 
 237static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
 238#endif
 239
 240static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
 241					   unsigned int state)
 242{
 243	struct generic_pm_domain_data *pd_data;
 244	struct pm_domain_data *pdd;
 245	struct gpd_link *link;
 246
 247	/* New requested state is same as Max requested state */
 248	if (state == genpd->performance_state)
 249		return state;
 250
 251	/* New requested state is higher than Max requested state */
 252	if (state > genpd->performance_state)
 253		return state;
 254
 255	/* Traverse all devices within the domain */
 256	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
 257		pd_data = to_gpd_data(pdd);
 258
 259		if (pd_data->performance_state > state)
 260			state = pd_data->performance_state;
 261	}
 262
 263	/*
 264	 * Traverse all sub-domains within the domain. This can be
 265	 * done without any additional locking as the link->performance_state
 266	 * field is protected by the parent genpd->lock, which is already taken.
 267	 *
 268	 * Also note that link->performance_state (subdomain's performance state
 269	 * requirement to parent domain) is different from
 270	 * link->child->performance_state (current performance state requirement
 271	 * of the devices/sub-domains of the subdomain) and so can have a
 272	 * different value.
 273	 *
 274	 * Note that we also take vote from powered-off sub-domains into account
 275	 * as the same is done for devices right now.
 276	 */
 277	list_for_each_entry(link, &genpd->parent_links, parent_node) {
 278		if (link->performance_state > state)
 279			state = link->performance_state;
 280	}
 281
 282	return state;
 283}
 284
 
 
 
 
 
 
 
 
 
 
 
 
 285static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
 286					unsigned int state, int depth)
 287{
 288	struct generic_pm_domain *parent;
 289	struct gpd_link *link;
 290	int parent_state, ret;
 291
 292	if (state == genpd->performance_state)
 293		return 0;
 294
 295	/* Propagate to parents of genpd */
 296	list_for_each_entry(link, &genpd->child_links, child_node) {
 297		parent = link->parent;
 298
 299		if (!parent->set_performance_state)
 300			continue;
 301
 302		/* Find parent's performance state */
 303		ret = dev_pm_opp_xlate_performance_state(genpd->opp_table,
 304							 parent->opp_table,
 305							 state);
 306		if (unlikely(ret < 0))
 307			goto err;
 308
 309		parent_state = ret;
 310
 311		genpd_lock_nested(parent, depth + 1);
 312
 313		link->prev_performance_state = link->performance_state;
 314		link->performance_state = parent_state;
 315		parent_state = _genpd_reeval_performance_state(parent,
 316						parent_state);
 317		ret = _genpd_set_performance_state(parent, parent_state, depth + 1);
 318		if (ret)
 319			link->performance_state = link->prev_performance_state;
 320
 321		genpd_unlock(parent);
 322
 323		if (ret)
 324			goto err;
 325	}
 326
 327	ret = genpd->set_performance_state(genpd, state);
 328	if (ret)
 329		goto err;
 
 
 330
 331	genpd->performance_state = state;
 332	return 0;
 333
 334err:
 335	/* Encountered an error, lets rollback */
 336	list_for_each_entry_continue_reverse(link, &genpd->child_links,
 337					     child_node) {
 338		parent = link->parent;
 339
 340		if (!parent->set_performance_state)
 341			continue;
 342
 343		genpd_lock_nested(parent, depth + 1);
 344
 345		parent_state = link->prev_performance_state;
 346		link->performance_state = parent_state;
 347
 348		parent_state = _genpd_reeval_performance_state(parent,
 349						parent_state);
 350		if (_genpd_set_performance_state(parent, parent_state, depth + 1)) {
 351			pr_err("%s: Failed to roll back to %d performance state\n",
 352			       parent->name, parent_state);
 353		}
 354
 355		genpd_unlock(parent);
 356	}
 357
 358	return ret;
 359}
 360
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 361/**
 362 * dev_pm_genpd_set_performance_state- Set performance state of device's power
 363 * domain.
 364 *
 365 * @dev: Device for which the performance-state needs to be set.
 366 * @state: Target performance state of the device. This can be set as 0 when the
 367 *	   device doesn't have any performance state constraints left (And so
 368 *	   the device wouldn't participate anymore to find the target
 369 *	   performance state of the genpd).
 370 *
 371 * It is assumed that the users guarantee that the genpd wouldn't be detached
 372 * while this routine is getting called.
 373 *
 374 * Returns 0 on success and negative error values on failures.
 375 */
 376int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
 377{
 378	struct generic_pm_domain *genpd;
 379	struct generic_pm_domain_data *gpd_data;
 380	unsigned int prev;
 381	int ret;
 382
 383	genpd = dev_to_genpd_safe(dev);
 384	if (!genpd)
 385		return -ENODEV;
 386
 387	if (unlikely(!genpd->set_performance_state))
 388		return -EINVAL;
 389
 390	if (WARN_ON(!dev->power.subsys_data ||
 391		     !dev->power.subsys_data->domain_data))
 392		return -EINVAL;
 393
 394	genpd_lock(genpd);
 395
 396	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
 397	prev = gpd_data->performance_state;
 398	gpd_data->performance_state = state;
 399
 400	state = _genpd_reeval_performance_state(genpd, state);
 401	ret = _genpd_set_performance_state(genpd, state, 0);
 402	if (ret)
 403		gpd_data->performance_state = prev;
 404
 405	genpd_unlock(genpd);
 406
 407	return ret;
 408}
 409EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);
 410
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 411static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
 412{
 413	unsigned int state_idx = genpd->state_idx;
 414	ktime_t time_start;
 415	s64 elapsed_ns;
 416	int ret;
 417
 
 
 
 
 
 
 
 
 418	if (!genpd->power_on)
 419		return 0;
 420
 421	if (!timed)
 422		return genpd->power_on(genpd);
 
 
 
 
 
 
 423
 424	time_start = ktime_get();
 425	ret = genpd->power_on(genpd);
 426	if (ret)
 427		return ret;
 428
 429	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
 430	if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
 431		return ret;
 432
 433	genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
 434	genpd->max_off_time_changed = true;
 435	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
 436		 genpd->name, "on", elapsed_ns);
 437
 
 
 
 
 
 
 438	return ret;
 439}
 440
 441static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
 442{
 443	unsigned int state_idx = genpd->state_idx;
 444	ktime_t time_start;
 445	s64 elapsed_ns;
 446	int ret;
 447
 
 
 
 
 
 
 
 
 448	if (!genpd->power_off)
 449		return 0;
 450
 451	if (!timed)
 452		return genpd->power_off(genpd);
 
 
 
 
 
 
 453
 454	time_start = ktime_get();
 455	ret = genpd->power_off(genpd);
 456	if (ret)
 457		return ret;
 458
 459	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
 460	if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
 461		return 0;
 462
 463	genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
 464	genpd->max_off_time_changed = true;
 465	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
 466		 genpd->name, "off", elapsed_ns);
 467
 
 
 
 468	return 0;
 
 
 
 469}
 470
 471/**
 472 * genpd_queue_power_off_work - Queue up the execution of genpd_power_off().
 473 * @genpd: PM domain to power off.
 474 *
 475 * Queue up the execution of genpd_power_off() unless it's already been done
 476 * before.
 477 */
 478static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
 479{
 480	queue_work(pm_wq, &genpd->power_off_work);
 481}
 482
 483/**
 484 * genpd_power_off - Remove power from a given PM domain.
 485 * @genpd: PM domain to power down.
 486 * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the
 487 * RPM status of the releated device is in an intermediate state, not yet turned
 488 * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not
 489 * be RPM_SUSPENDED, while it tries to power off the PM domain.
 
 490 *
 491 * If all of the @genpd's devices have been suspended and all of its subdomains
 492 * have been powered down, remove power from @genpd.
 493 */
 494static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
 495			   unsigned int depth)
 496{
 497	struct pm_domain_data *pdd;
 498	struct gpd_link *link;
 499	unsigned int not_suspended = 0;
 
 500
 501	/*
 502	 * Do not try to power off the domain in the following situations:
 503	 * (1) The domain is already in the "power off" state.
 504	 * (2) System suspend is in progress.
 505	 */
 506	if (!genpd_status_on(genpd) || genpd->prepared_count > 0)
 507		return 0;
 508
 509	/*
 510	 * Abort power off for the PM domain in the following situations:
 511	 * (1) The domain is configured as always on.
 512	 * (2) When the domain has a subdomain being powered on.
 513	 */
 514	if (genpd_is_always_on(genpd) ||
 515			genpd_is_rpm_always_on(genpd) ||
 516			atomic_read(&genpd->sd_count) > 0)
 517		return -EBUSY;
 518
 519	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
 520		enum pm_qos_flags_status stat;
 521
 522		stat = dev_pm_qos_flags(pdd->dev, PM_QOS_FLAG_NO_POWER_OFF);
 523		if (stat > PM_QOS_FLAGS_NONE)
 
 
 
 
 524			return -EBUSY;
 
 525
 
 526		/*
 527		 * Do not allow PM domain to be powered off, when an IRQ safe
 528		 * device is part of a non-IRQ safe domain.
 529		 */
 530		if (!pm_runtime_suspended(pdd->dev) ||
 531			irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd))
 532			not_suspended++;
 533	}
 534
 535	if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
 536		return -EBUSY;
 537
 538	if (genpd->gov && genpd->gov->power_down_ok) {
 539		if (!genpd->gov->power_down_ok(&genpd->domain))
 540			return -EAGAIN;
 541	}
 542
 543	/* Default to shallowest state. */
 544	if (!genpd->gov)
 545		genpd->state_idx = 0;
 546
 547	if (genpd->power_off) {
 548		int ret;
 549
 550		if (atomic_read(&genpd->sd_count) > 0)
 551			return -EBUSY;
 552
 553		/*
 554		 * If sd_count > 0 at this point, one of the subdomains hasn't
 555		 * managed to call genpd_power_on() for the parent yet after
 556		 * incrementing it.  In that case genpd_power_on() will wait
 557		 * for us to drop the lock, so we can call .power_off() and let
 558		 * the genpd_power_on() restore power for us (this shouldn't
 559		 * happen very often).
 560		 */
 561		ret = _genpd_power_off(genpd, true);
 562		if (ret)
 563			return ret;
 564	}
 565
 566	genpd->status = GPD_STATE_POWER_OFF;
 567	genpd_update_accounting(genpd);
 
 568
 569	list_for_each_entry(link, &genpd->child_links, child_node) {
 570		genpd_sd_counter_dec(link->parent);
 571		genpd_lock_nested(link->parent, depth + 1);
 572		genpd_power_off(link->parent, false, depth + 1);
 573		genpd_unlock(link->parent);
 574	}
 575
 576	return 0;
 577}
 578
 579/**
 580 * genpd_power_on - Restore power to a given PM domain and its parents.
 581 * @genpd: PM domain to power up.
 582 * @depth: nesting count for lockdep.
 583 *
 584 * Restore power to @genpd and all of its parents so that it is possible to
 585 * resume a device belonging to it.
 586 */
 587static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
 588{
 589	struct gpd_link *link;
 590	int ret = 0;
 591
 592	if (genpd_status_on(genpd))
 593		return 0;
 594
 595	/*
 596	 * The list is guaranteed not to change while the loop below is being
 597	 * executed, unless one of the parents' .power_on() callbacks fiddles
 598	 * with it.
 599	 */
 600	list_for_each_entry(link, &genpd->child_links, child_node) {
 601		struct generic_pm_domain *parent = link->parent;
 602
 603		genpd_sd_counter_inc(parent);
 604
 605		genpd_lock_nested(parent, depth + 1);
 606		ret = genpd_power_on(parent, depth + 1);
 607		genpd_unlock(parent);
 608
 609		if (ret) {
 610			genpd_sd_counter_dec(parent);
 611			goto err;
 612		}
 613	}
 614
 615	ret = _genpd_power_on(genpd, true);
 616	if (ret)
 617		goto err;
 618
 619	genpd->status = GPD_STATE_ACTIVE;
 620	genpd_update_accounting(genpd);
 621
 622	return 0;
 623
 624 err:
 625	list_for_each_entry_continue_reverse(link,
 626					&genpd->child_links,
 627					child_node) {
 628		genpd_sd_counter_dec(link->parent);
 629		genpd_lock_nested(link->parent, depth + 1);
 630		genpd_power_off(link->parent, false, depth + 1);
 631		genpd_unlock(link->parent);
 632	}
 633
 634	return ret;
 635}
 636
 637static int genpd_dev_pm_start(struct device *dev)
 638{
 639	struct generic_pm_domain *genpd = dev_to_genpd(dev);
 640
 641	return genpd_start_dev(genpd, dev);
 642}
 643
 644static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
 645				     unsigned long val, void *ptr)
 646{
 647	struct generic_pm_domain_data *gpd_data;
 648	struct device *dev;
 649
 650	gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
 651	dev = gpd_data->base.dev;
 652
 653	for (;;) {
 654		struct generic_pm_domain *genpd;
 655		struct pm_domain_data *pdd;
 
 656
 657		spin_lock_irq(&dev->power.lock);
 658
 659		pdd = dev->power.subsys_data ?
 660				dev->power.subsys_data->domain_data : NULL;
 661		if (pdd) {
 662			to_gpd_data(pdd)->td.constraint_changed = true;
 663			genpd = dev_to_genpd(dev);
 664		} else {
 665			genpd = ERR_PTR(-ENODATA);
 
 666		}
 667
 668		spin_unlock_irq(&dev->power.lock);
 669
 670		if (!IS_ERR(genpd)) {
 671			genpd_lock(genpd);
 672			genpd->max_off_time_changed = true;
 673			genpd_unlock(genpd);
 674		}
 675
 676		dev = dev->parent;
 677		if (!dev || dev->power.ignore_children)
 678			break;
 679	}
 680
 681	return NOTIFY_DONE;
 682}
 683
 684/**
 685 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
 686 * @work: Work structure used for scheduling the execution of this function.
 687 */
 688static void genpd_power_off_work_fn(struct work_struct *work)
 689{
 690	struct generic_pm_domain *genpd;
 691
 692	genpd = container_of(work, struct generic_pm_domain, power_off_work);
 693
 694	genpd_lock(genpd);
 695	genpd_power_off(genpd, false, 0);
 696	genpd_unlock(genpd);
 697}
 698
 699/**
 700 * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks
 701 * @dev: Device to handle.
 702 */
 703static int __genpd_runtime_suspend(struct device *dev)
 704{
 705	int (*cb)(struct device *__dev);
 706
 707	if (dev->type && dev->type->pm)
 708		cb = dev->type->pm->runtime_suspend;
 709	else if (dev->class && dev->class->pm)
 710		cb = dev->class->pm->runtime_suspend;
 711	else if (dev->bus && dev->bus->pm)
 712		cb = dev->bus->pm->runtime_suspend;
 713	else
 714		cb = NULL;
 715
 716	if (!cb && dev->driver && dev->driver->pm)
 717		cb = dev->driver->pm->runtime_suspend;
 718
 719	return cb ? cb(dev) : 0;
 720}
 721
 722/**
 723 * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks
 724 * @dev: Device to handle.
 725 */
 726static int __genpd_runtime_resume(struct device *dev)
 727{
 728	int (*cb)(struct device *__dev);
 729
 730	if (dev->type && dev->type->pm)
 731		cb = dev->type->pm->runtime_resume;
 732	else if (dev->class && dev->class->pm)
 733		cb = dev->class->pm->runtime_resume;
 734	else if (dev->bus && dev->bus->pm)
 735		cb = dev->bus->pm->runtime_resume;
 736	else
 737		cb = NULL;
 738
 739	if (!cb && dev->driver && dev->driver->pm)
 740		cb = dev->driver->pm->runtime_resume;
 741
 742	return cb ? cb(dev) : 0;
 743}
 744
 745/**
 746 * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
 747 * @dev: Device to suspend.
 748 *
 749 * Carry out a runtime suspend of a device under the assumption that its
 750 * pm_domain field points to the domain member of an object of type
 751 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
 752 */
 753static int genpd_runtime_suspend(struct device *dev)
 754{
 755	struct generic_pm_domain *genpd;
 756	bool (*suspend_ok)(struct device *__dev);
 757	struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
 
 758	bool runtime_pm = pm_runtime_enabled(dev);
 759	ktime_t time_start;
 760	s64 elapsed_ns;
 761	int ret;
 762
 763	dev_dbg(dev, "%s()\n", __func__);
 764
 765	genpd = dev_to_genpd(dev);
 766	if (IS_ERR(genpd))
 767		return -EINVAL;
 768
 769	/*
 770	 * A runtime PM centric subsystem/driver may re-use the runtime PM
 771	 * callbacks for other purposes than runtime PM. In those scenarios
 772	 * runtime PM is disabled. Under these circumstances, we shall skip
 773	 * validating/measuring the PM QoS latency.
 774	 */
 775	suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
 776	if (runtime_pm && suspend_ok && !suspend_ok(dev))
 777		return -EBUSY;
 778
 779	/* Measure suspend latency. */
 780	time_start = 0;
 781	if (runtime_pm)
 782		time_start = ktime_get();
 783
 784	ret = __genpd_runtime_suspend(dev);
 785	if (ret)
 786		return ret;
 787
 788	ret = genpd_stop_dev(genpd, dev);
 789	if (ret) {
 790		__genpd_runtime_resume(dev);
 791		return ret;
 792	}
 793
 794	/* Update suspend latency value if the measured time exceeds it. */
 795	if (runtime_pm) {
 796		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
 797		if (elapsed_ns > td->suspend_latency_ns) {
 798			td->suspend_latency_ns = elapsed_ns;
 799			dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
 800				elapsed_ns);
 801			genpd->max_off_time_changed = true;
 802			td->constraint_changed = true;
 803		}
 804	}
 805
 806	/*
 807	 * If power.irq_safe is set, this routine may be run with
 808	 * IRQs disabled, so suspend only if the PM domain also is irq_safe.
 809	 */
 810	if (irq_safe_dev_in_no_sleep_domain(dev, genpd))
 811		return 0;
 812
 813	genpd_lock(genpd);
 814	genpd_power_off(genpd, true, 0);
 
 815	genpd_unlock(genpd);
 816
 817	return 0;
 818}
 819
 820/**
 821 * genpd_runtime_resume - Resume a device belonging to I/O PM domain.
 822 * @dev: Device to resume.
 823 *
 824 * Carry out a runtime resume of a device under the assumption that its
 825 * pm_domain field points to the domain member of an object of type
 826 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
 827 */
 828static int genpd_runtime_resume(struct device *dev)
 829{
 830	struct generic_pm_domain *genpd;
 831	struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
 832	bool runtime_pm = pm_runtime_enabled(dev);
 833	ktime_t time_start;
 
 834	s64 elapsed_ns;
 835	int ret;
 836	bool timed = true;
 837
 838	dev_dbg(dev, "%s()\n", __func__);
 839
 840	genpd = dev_to_genpd(dev);
 841	if (IS_ERR(genpd))
 842		return -EINVAL;
 843
 844	/*
 845	 * As we don't power off a non IRQ safe domain, which holds
 846	 * an IRQ safe device, we don't need to restore power to it.
 847	 */
 848	if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) {
 849		timed = false;
 850		goto out;
 851	}
 852
 853	genpd_lock(genpd);
 
 854	ret = genpd_power_on(genpd, 0);
 855	genpd_unlock(genpd);
 856
 857	if (ret)
 858		return ret;
 859
 860 out:
 861	/* Measure resume latency. */
 862	time_start = 0;
 863	if (timed && runtime_pm)
 864		time_start = ktime_get();
 865
 866	ret = genpd_start_dev(genpd, dev);
 867	if (ret)
 868		goto err_poweroff;
 869
 870	ret = __genpd_runtime_resume(dev);
 871	if (ret)
 872		goto err_stop;
 873
 874	/* Update resume latency value if the measured time exceeds it. */
 875	if (timed && runtime_pm) {
 876		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
 877		if (elapsed_ns > td->resume_latency_ns) {
 878			td->resume_latency_ns = elapsed_ns;
 879			dev_dbg(dev, "resume latency exceeded, %lld ns\n",
 880				elapsed_ns);
 881			genpd->max_off_time_changed = true;
 882			td->constraint_changed = true;
 883		}
 884	}
 885
 886	return 0;
 887
 888err_stop:
 889	genpd_stop_dev(genpd, dev);
 890err_poweroff:
 891	if (!pm_runtime_is_irq_safe(dev) ||
 892		(pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) {
 893		genpd_lock(genpd);
 894		genpd_power_off(genpd, true, 0);
 
 895		genpd_unlock(genpd);
 896	}
 897
 898	return ret;
 899}
 900
 901static bool pd_ignore_unused;
 902static int __init pd_ignore_unused_setup(char *__unused)
 903{
 904	pd_ignore_unused = true;
 905	return 1;
 906}
 907__setup("pd_ignore_unused", pd_ignore_unused_setup);
 908
 909/**
 910 * genpd_power_off_unused - Power off all PM domains with no devices in use.
 911 */
 912static int __init genpd_power_off_unused(void)
 913{
 914	struct generic_pm_domain *genpd;
 915
 916	if (pd_ignore_unused) {
 917		pr_warn("genpd: Not disabling unused power domains\n");
 918		return 0;
 919	}
 920
 921	mutex_lock(&gpd_list_lock);
 922
 923	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
 924		genpd_queue_power_off_work(genpd);
 925
 926	mutex_unlock(&gpd_list_lock);
 927
 928	return 0;
 929}
 930late_initcall(genpd_power_off_unused);
 931
 932#ifdef CONFIG_PM_SLEEP
 933
 934/**
 935 * genpd_sync_power_off - Synchronously power off a PM domain and its parents.
 936 * @genpd: PM domain to power off, if possible.
 937 * @use_lock: use the lock.
 938 * @depth: nesting count for lockdep.
 939 *
 940 * Check if the given PM domain can be powered off (during system suspend or
 941 * hibernation) and do that if so.  Also, in that case propagate to its parents.
 942 *
 943 * This function is only called in "noirq" and "syscore" stages of system power
 944 * transitions. The "noirq" callbacks may be executed asynchronously, thus in
 945 * these cases the lock must be held.
 946 */
 947static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
 948				 unsigned int depth)
 949{
 950	struct gpd_link *link;
 951
 952	if (!genpd_status_on(genpd) || genpd_is_always_on(genpd))
 953		return;
 954
 955	if (genpd->suspended_count != genpd->device_count
 956	    || atomic_read(&genpd->sd_count) > 0)
 957		return;
 958
 
 
 
 
 
 
 
 959	/* Choose the deepest state when suspending */
 960	genpd->state_idx = genpd->state_count - 1;
 961	if (_genpd_power_off(genpd, false))
 962		return;
 963
 964	genpd->status = GPD_STATE_POWER_OFF;
 965
 966	list_for_each_entry(link, &genpd->child_links, child_node) {
 967		genpd_sd_counter_dec(link->parent);
 968
 969		if (use_lock)
 970			genpd_lock_nested(link->parent, depth + 1);
 971
 972		genpd_sync_power_off(link->parent, use_lock, depth + 1);
 973
 974		if (use_lock)
 975			genpd_unlock(link->parent);
 976	}
 977}
 978
 979/**
 980 * genpd_sync_power_on - Synchronously power on a PM domain and its parents.
 981 * @genpd: PM domain to power on.
 982 * @use_lock: use the lock.
 983 * @depth: nesting count for lockdep.
 984 *
 985 * This function is only called in "noirq" and "syscore" stages of system power
 986 * transitions. The "noirq" callbacks may be executed asynchronously, thus in
 987 * these cases the lock must be held.
 988 */
 989static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
 990				unsigned int depth)
 991{
 992	struct gpd_link *link;
 993
 994	if (genpd_status_on(genpd))
 995		return;
 996
 997	list_for_each_entry(link, &genpd->child_links, child_node) {
 998		genpd_sd_counter_inc(link->parent);
 999
1000		if (use_lock)
1001			genpd_lock_nested(link->parent, depth + 1);
1002
1003		genpd_sync_power_on(link->parent, use_lock, depth + 1);
1004
1005		if (use_lock)
1006			genpd_unlock(link->parent);
1007	}
1008
1009	_genpd_power_on(genpd, false);
1010
1011	genpd->status = GPD_STATE_ACTIVE;
1012}
1013
1014/**
1015 * resume_needed - Check whether to resume a device before system suspend.
1016 * @dev: Device to check.
1017 * @genpd: PM domain the device belongs to.
1018 *
1019 * There are two cases in which a device that can wake up the system from sleep
1020 * states should be resumed by genpd_prepare(): (1) if the device is enabled
1021 * to wake up the system and it has to remain active for this purpose while the
1022 * system is in the sleep state and (2) if the device is not enabled to wake up
1023 * the system from sleep states and it generally doesn't generate wakeup signals
1024 * by itself (those signals are generated on its behalf by other parts of the
1025 * system).  In the latter case it may be necessary to reconfigure the device's
1026 * wakeup settings during system suspend, because it may have been set up to
1027 * signal remote wakeup from the system's working state as needed by runtime PM.
1028 * Return 'true' in either of the above cases.
1029 */
1030static bool resume_needed(struct device *dev,
1031			  const struct generic_pm_domain *genpd)
1032{
1033	bool active_wakeup;
1034
1035	if (!device_can_wakeup(dev))
1036		return false;
1037
1038	active_wakeup = genpd_is_active_wakeup(genpd);
1039	return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
1040}
1041
1042/**
1043 * genpd_prepare - Start power transition of a device in a PM domain.
1044 * @dev: Device to start the transition of.
1045 *
1046 * Start a power transition of a device (during a system-wide power transition)
1047 * under the assumption that its pm_domain field points to the domain member of
1048 * an object of type struct generic_pm_domain representing a PM domain
1049 * consisting of I/O devices.
1050 */
1051static int genpd_prepare(struct device *dev)
1052{
1053	struct generic_pm_domain *genpd;
1054	int ret;
1055
1056	dev_dbg(dev, "%s()\n", __func__);
1057
1058	genpd = dev_to_genpd(dev);
1059	if (IS_ERR(genpd))
1060		return -EINVAL;
1061
1062	/*
1063	 * If a wakeup request is pending for the device, it should be woken up
1064	 * at this point and a system wakeup event should be reported if it's
1065	 * set up to wake up the system from sleep states.
1066	 */
1067	if (resume_needed(dev, genpd))
1068		pm_runtime_resume(dev);
1069
1070	genpd_lock(genpd);
1071
1072	if (genpd->prepared_count++ == 0)
1073		genpd->suspended_count = 0;
1074
1075	genpd_unlock(genpd);
1076
1077	ret = pm_generic_prepare(dev);
1078	if (ret < 0) {
1079		genpd_lock(genpd);
1080
1081		genpd->prepared_count--;
1082
1083		genpd_unlock(genpd);
1084	}
1085
1086	/* Never return 1, as genpd don't cope with the direct_complete path. */
1087	return ret >= 0 ? 0 : ret;
1088}
1089
1090/**
1091 * genpd_finish_suspend - Completion of suspend or hibernation of device in an
1092 *   I/O pm domain.
1093 * @dev: Device to suspend.
1094 * @poweroff: Specifies if this is a poweroff_noirq or suspend_noirq callback.
 
1095 *
1096 * Stop the device and remove power from the domain if all devices in it have
1097 * been stopped.
1098 */
1099static int genpd_finish_suspend(struct device *dev, bool poweroff)
 
 
1100{
1101	struct generic_pm_domain *genpd;
1102	int ret = 0;
1103
1104	genpd = dev_to_genpd(dev);
1105	if (IS_ERR(genpd))
1106		return -EINVAL;
1107
1108	if (poweroff)
1109		ret = pm_generic_poweroff_noirq(dev);
1110	else
1111		ret = pm_generic_suspend_noirq(dev);
1112	if (ret)
1113		return ret;
1114
1115	if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
1116		return 0;
1117
1118	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1119	    !pm_runtime_status_suspended(dev)) {
1120		ret = genpd_stop_dev(genpd, dev);
1121		if (ret) {
1122			if (poweroff)
1123				pm_generic_restore_noirq(dev);
1124			else
1125				pm_generic_resume_noirq(dev);
1126			return ret;
1127		}
1128	}
1129
1130	genpd_lock(genpd);
1131	genpd->suspended_count++;
1132	genpd_sync_power_off(genpd, true, 0);
1133	genpd_unlock(genpd);
1134
1135	return 0;
1136}
1137
1138/**
1139 * genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
1140 * @dev: Device to suspend.
1141 *
1142 * Stop the device and remove power from the domain if all devices in it have
1143 * been stopped.
1144 */
1145static int genpd_suspend_noirq(struct device *dev)
1146{
1147	dev_dbg(dev, "%s()\n", __func__);
1148
1149	return genpd_finish_suspend(dev, false);
 
 
1150}
1151
1152/**
1153 * genpd_resume_noirq - Start of resume of device in an I/O PM domain.
1154 * @dev: Device to resume.
 
1155 *
1156 * Restore power to the device's PM domain, if necessary, and start the device.
1157 */
1158static int genpd_resume_noirq(struct device *dev)
 
1159{
1160	struct generic_pm_domain *genpd;
1161	int ret;
1162
1163	dev_dbg(dev, "%s()\n", __func__);
1164
1165	genpd = dev_to_genpd(dev);
1166	if (IS_ERR(genpd))
1167		return -EINVAL;
1168
1169	if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
1170		return pm_generic_resume_noirq(dev);
1171
1172	genpd_lock(genpd);
1173	genpd_sync_power_on(genpd, true, 0);
1174	genpd->suspended_count--;
1175	genpd_unlock(genpd);
1176
1177	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1178	    !pm_runtime_status_suspended(dev)) {
1179		ret = genpd_start_dev(genpd, dev);
1180		if (ret)
1181			return ret;
1182	}
1183
1184	return pm_generic_resume_noirq(dev);
1185}
1186
1187/**
 
 
 
 
 
 
 
 
 
 
 
 
 
1188 * genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
1189 * @dev: Device to freeze.
1190 *
1191 * Carry out a late freeze of a device under the assumption that its
1192 * pm_domain field points to the domain member of an object of type
1193 * struct generic_pm_domain representing a power domain consisting of I/O
1194 * devices.
1195 */
1196static int genpd_freeze_noirq(struct device *dev)
1197{
1198	const struct generic_pm_domain *genpd;
1199	int ret = 0;
1200
1201	dev_dbg(dev, "%s()\n", __func__);
1202
1203	genpd = dev_to_genpd(dev);
1204	if (IS_ERR(genpd))
1205		return -EINVAL;
1206
1207	ret = pm_generic_freeze_noirq(dev);
1208	if (ret)
1209		return ret;
1210
1211	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1212	    !pm_runtime_status_suspended(dev))
1213		ret = genpd_stop_dev(genpd, dev);
1214
1215	return ret;
1216}
1217
1218/**
1219 * genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
1220 * @dev: Device to thaw.
1221 *
1222 * Start the device, unless power has been removed from the domain already
1223 * before the system transition.
1224 */
1225static int genpd_thaw_noirq(struct device *dev)
1226{
1227	const struct generic_pm_domain *genpd;
1228	int ret = 0;
1229
1230	dev_dbg(dev, "%s()\n", __func__);
1231
1232	genpd = dev_to_genpd(dev);
1233	if (IS_ERR(genpd))
1234		return -EINVAL;
1235
1236	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1237	    !pm_runtime_status_suspended(dev)) {
1238		ret = genpd_start_dev(genpd, dev);
1239		if (ret)
1240			return ret;
1241	}
1242
1243	return pm_generic_thaw_noirq(dev);
1244}
1245
1246/**
1247 * genpd_poweroff_noirq - Completion of hibernation of device in an
1248 *   I/O PM domain.
1249 * @dev: Device to poweroff.
1250 *
1251 * Stop the device and remove power from the domain if all devices in it have
1252 * been stopped.
1253 */
1254static int genpd_poweroff_noirq(struct device *dev)
1255{
1256	dev_dbg(dev, "%s()\n", __func__);
1257
1258	return genpd_finish_suspend(dev, true);
 
 
1259}
1260
1261/**
1262 * genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1263 * @dev: Device to resume.
1264 *
1265 * Make sure the domain will be in the same power state as before the
1266 * hibernation the system is resuming from and start the device if necessary.
1267 */
1268static int genpd_restore_noirq(struct device *dev)
1269{
1270	struct generic_pm_domain *genpd;
1271	int ret = 0;
1272
1273	dev_dbg(dev, "%s()\n", __func__);
1274
1275	genpd = dev_to_genpd(dev);
1276	if (IS_ERR(genpd))
1277		return -EINVAL;
1278
1279	/*
1280	 * At this point suspended_count == 0 means we are being run for the
1281	 * first time for the given domain in the present cycle.
1282	 */
1283	genpd_lock(genpd);
1284	if (genpd->suspended_count++ == 0)
1285		/*
1286		 * The boot kernel might put the domain into arbitrary state,
1287		 * so make it appear as powered off to genpd_sync_power_on(),
1288		 * so that it tries to power it on in case it was really off.
1289		 */
1290		genpd->status = GPD_STATE_POWER_OFF;
1291
1292	genpd_sync_power_on(genpd, true, 0);
1293	genpd_unlock(genpd);
1294
1295	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1296	    !pm_runtime_status_suspended(dev)) {
1297		ret = genpd_start_dev(genpd, dev);
1298		if (ret)
1299			return ret;
1300	}
1301
1302	return pm_generic_restore_noirq(dev);
1303}
1304
1305/**
1306 * genpd_complete - Complete power transition of a device in a power domain.
1307 * @dev: Device to complete the transition of.
1308 *
1309 * Complete a power transition of a device (during a system-wide power
1310 * transition) under the assumption that its pm_domain field points to the
1311 * domain member of an object of type struct generic_pm_domain representing
1312 * a power domain consisting of I/O devices.
1313 */
1314static void genpd_complete(struct device *dev)
1315{
1316	struct generic_pm_domain *genpd;
1317
1318	dev_dbg(dev, "%s()\n", __func__);
1319
1320	genpd = dev_to_genpd(dev);
1321	if (IS_ERR(genpd))
1322		return;
1323
1324	pm_generic_complete(dev);
1325
1326	genpd_lock(genpd);
1327
1328	genpd->prepared_count--;
1329	if (!genpd->prepared_count)
1330		genpd_queue_power_off_work(genpd);
1331
1332	genpd_unlock(genpd);
1333}
1334
1335/**
1336 * genpd_syscore_switch - Switch power during system core suspend or resume.
1337 * @dev: Device that normally is marked as "always on" to switch power for.
1338 *
1339 * This routine may only be called during the system core (syscore) suspend or
1340 * resume phase for devices whose "always on" flags are set.
1341 */
1342static void genpd_syscore_switch(struct device *dev, bool suspend)
1343{
1344	struct generic_pm_domain *genpd;
 
1345
1346	genpd = dev_to_genpd_safe(dev);
1347	if (!genpd)
1348		return;
1349
 
 
 
 
 
1350	if (suspend) {
1351		genpd->suspended_count++;
1352		genpd_sync_power_off(genpd, false, 0);
1353	} else {
1354		genpd_sync_power_on(genpd, false, 0);
1355		genpd->suspended_count--;
1356	}
 
 
 
1357}
1358
1359void pm_genpd_syscore_poweroff(struct device *dev)
 
 
 
 
 
 
 
 
 
1360{
1361	genpd_syscore_switch(dev, true);
1362}
1363EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff);
1364
1365void pm_genpd_syscore_poweron(struct device *dev)
 
 
 
 
 
 
 
 
1366{
1367	genpd_syscore_switch(dev, false);
1368}
1369EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
1370
1371#else /* !CONFIG_PM_SLEEP */
1372
1373#define genpd_prepare		NULL
1374#define genpd_suspend_noirq	NULL
1375#define genpd_resume_noirq	NULL
1376#define genpd_freeze_noirq	NULL
1377#define genpd_thaw_noirq	NULL
1378#define genpd_poweroff_noirq	NULL
1379#define genpd_restore_noirq	NULL
1380#define genpd_complete		NULL
1381
1382#endif /* CONFIG_PM_SLEEP */
1383
1384static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev)
 
1385{
1386	struct generic_pm_domain_data *gpd_data;
 
1387	int ret;
1388
1389	ret = dev_pm_get_subsys_data(dev);
1390	if (ret)
1391		return ERR_PTR(ret);
1392
1393	gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1394	if (!gpd_data) {
1395		ret = -ENOMEM;
1396		goto err_put;
1397	}
1398
1399	gpd_data->base.dev = dev;
1400	gpd_data->td.constraint_changed = true;
1401	gpd_data->td.effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
1402	gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1403
1404	spin_lock_irq(&dev->power.lock);
 
 
 
 
 
 
1405
1406	if (dev->power.subsys_data->domain_data) {
1407		ret = -EINVAL;
1408		goto err_free;
 
1409	}
1410
1411	dev->power.subsys_data->domain_data = &gpd_data->base;
 
 
 
 
 
1412
1413	spin_unlock_irq(&dev->power.lock);
1414
 
 
 
1415	return gpd_data;
1416
1417 err_free:
1418	spin_unlock_irq(&dev->power.lock);
1419	kfree(gpd_data);
1420 err_put:
1421	dev_pm_put_subsys_data(dev);
1422	return ERR_PTR(ret);
1423}
1424
1425static void genpd_free_dev_data(struct device *dev,
1426				struct generic_pm_domain_data *gpd_data)
1427{
1428	spin_lock_irq(&dev->power.lock);
1429
1430	dev->power.subsys_data->domain_data = NULL;
1431
1432	spin_unlock_irq(&dev->power.lock);
1433
 
1434	kfree(gpd_data);
1435	dev_pm_put_subsys_data(dev);
1436}
1437
1438static void genpd_update_cpumask(struct generic_pm_domain *genpd,
1439				 int cpu, bool set, unsigned int depth)
1440{
1441	struct gpd_link *link;
1442
1443	if (!genpd_is_cpu_domain(genpd))
1444		return;
1445
1446	list_for_each_entry(link, &genpd->child_links, child_node) {
1447		struct generic_pm_domain *parent = link->parent;
1448
1449		genpd_lock_nested(parent, depth + 1);
1450		genpd_update_cpumask(parent, cpu, set, depth + 1);
1451		genpd_unlock(parent);
1452	}
1453
1454	if (set)
1455		cpumask_set_cpu(cpu, genpd->cpus);
1456	else
1457		cpumask_clear_cpu(cpu, genpd->cpus);
1458}
1459
1460static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu)
1461{
1462	if (cpu >= 0)
1463		genpd_update_cpumask(genpd, cpu, true, 0);
1464}
1465
1466static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu)
1467{
1468	if (cpu >= 0)
1469		genpd_update_cpumask(genpd, cpu, false, 0);
1470}
1471
1472static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev)
1473{
1474	int cpu;
1475
1476	if (!genpd_is_cpu_domain(genpd))
1477		return -1;
1478
1479	for_each_possible_cpu(cpu) {
1480		if (get_cpu_device(cpu) == dev)
1481			return cpu;
1482	}
1483
1484	return -1;
1485}
1486
1487static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1488			    struct device *base_dev)
1489{
 
1490	struct generic_pm_domain_data *gpd_data;
1491	int ret;
1492
1493	dev_dbg(dev, "%s()\n", __func__);
1494
1495	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1496		return -EINVAL;
1497
1498	gpd_data = genpd_alloc_dev_data(dev);
1499	if (IS_ERR(gpd_data))
1500		return PTR_ERR(gpd_data);
1501
1502	gpd_data->cpu = genpd_get_cpu(genpd, base_dev);
1503
1504	ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1505	if (ret)
1506		goto out;
1507
1508	genpd_lock(genpd);
1509
1510	genpd_set_cpumask(genpd, gpd_data->cpu);
1511	dev_pm_domain_set(dev, &genpd->domain);
1512
1513	genpd->device_count++;
1514	genpd->max_off_time_changed = true;
 
1515
1516	list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1517
1518	genpd_unlock(genpd);
1519 out:
1520	if (ret)
1521		genpd_free_dev_data(dev, gpd_data);
1522	else
1523		dev_pm_qos_add_notifier(dev, &gpd_data->nb,
1524					DEV_PM_QOS_RESUME_LATENCY);
1525
1526	return ret;
1527}
1528
1529/**
1530 * pm_genpd_add_device - Add a device to an I/O PM domain.
1531 * @genpd: PM domain to add the device to.
1532 * @dev: Device to be added.
1533 */
1534int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
1535{
1536	int ret;
1537
1538	mutex_lock(&gpd_list_lock);
1539	ret = genpd_add_device(genpd, dev, dev);
1540	mutex_unlock(&gpd_list_lock);
1541
1542	return ret;
1543}
1544EXPORT_SYMBOL_GPL(pm_genpd_add_device);
1545
1546static int genpd_remove_device(struct generic_pm_domain *genpd,
1547			       struct device *dev)
1548{
1549	struct generic_pm_domain_data *gpd_data;
1550	struct pm_domain_data *pdd;
1551	int ret = 0;
1552
1553	dev_dbg(dev, "%s()\n", __func__);
1554
1555	pdd = dev->power.subsys_data->domain_data;
1556	gpd_data = to_gpd_data(pdd);
1557	dev_pm_qos_remove_notifier(dev, &gpd_data->nb,
1558				   DEV_PM_QOS_RESUME_LATENCY);
1559
1560	genpd_lock(genpd);
1561
1562	if (genpd->prepared_count > 0) {
1563		ret = -EAGAIN;
1564		goto out;
1565	}
1566
1567	genpd->device_count--;
1568	genpd->max_off_time_changed = true;
 
1569
1570	genpd_clear_cpumask(genpd, gpd_data->cpu);
1571	dev_pm_domain_set(dev, NULL);
1572
1573	list_del_init(&pdd->list_node);
1574
1575	genpd_unlock(genpd);
1576
1577	if (genpd->detach_dev)
1578		genpd->detach_dev(genpd, dev);
1579
1580	genpd_free_dev_data(dev, gpd_data);
1581
1582	return 0;
1583
1584 out:
1585	genpd_unlock(genpd);
1586	dev_pm_qos_add_notifier(dev, &gpd_data->nb, DEV_PM_QOS_RESUME_LATENCY);
1587
1588	return ret;
1589}
1590
1591/**
1592 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1593 * @dev: Device to be removed.
1594 */
1595int pm_genpd_remove_device(struct device *dev)
1596{
1597	struct generic_pm_domain *genpd = dev_to_genpd_safe(dev);
1598
1599	if (!genpd)
1600		return -EINVAL;
1601
1602	return genpd_remove_device(genpd, dev);
1603}
1604EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
1605
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1606static int genpd_add_subdomain(struct generic_pm_domain *genpd,
1607			       struct generic_pm_domain *subdomain)
1608{
1609	struct gpd_link *link, *itr;
1610	int ret = 0;
1611
1612	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1613	    || genpd == subdomain)
1614		return -EINVAL;
1615
1616	/*
1617	 * If the domain can be powered on/off in an IRQ safe
1618	 * context, ensure that the subdomain can also be
1619	 * powered on/off in that context.
1620	 */
1621	if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
1622		WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
1623				genpd->name, subdomain->name);
1624		return -EINVAL;
1625	}
1626
1627	link = kzalloc(sizeof(*link), GFP_KERNEL);
1628	if (!link)
1629		return -ENOMEM;
1630
1631	genpd_lock(subdomain);
1632	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1633
1634	if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) {
1635		ret = -EINVAL;
1636		goto out;
1637	}
1638
1639	list_for_each_entry(itr, &genpd->parent_links, parent_node) {
1640		if (itr->child == subdomain && itr->parent == genpd) {
1641			ret = -EINVAL;
1642			goto out;
1643		}
1644	}
1645
1646	link->parent = genpd;
1647	list_add_tail(&link->parent_node, &genpd->parent_links);
1648	link->child = subdomain;
1649	list_add_tail(&link->child_node, &subdomain->child_links);
1650	if (genpd_status_on(subdomain))
1651		genpd_sd_counter_inc(genpd);
1652
1653 out:
1654	genpd_unlock(genpd);
1655	genpd_unlock(subdomain);
1656	if (ret)
1657		kfree(link);
1658	return ret;
1659}
1660
1661/**
1662 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1663 * @genpd: Leader PM domain to add the subdomain to.
1664 * @subdomain: Subdomain to be added.
1665 */
1666int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1667			   struct generic_pm_domain *subdomain)
1668{
1669	int ret;
1670
1671	mutex_lock(&gpd_list_lock);
1672	ret = genpd_add_subdomain(genpd, subdomain);
1673	mutex_unlock(&gpd_list_lock);
1674
1675	return ret;
1676}
1677EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
1678
1679/**
1680 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1681 * @genpd: Leader PM domain to remove the subdomain from.
1682 * @subdomain: Subdomain to be removed.
1683 */
1684int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1685			      struct generic_pm_domain *subdomain)
1686{
1687	struct gpd_link *l, *link;
1688	int ret = -EINVAL;
1689
1690	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1691		return -EINVAL;
1692
1693	genpd_lock(subdomain);
1694	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1695
1696	if (!list_empty(&subdomain->parent_links) || subdomain->device_count) {
1697		pr_warn("%s: unable to remove subdomain %s\n",
1698			genpd->name, subdomain->name);
1699		ret = -EBUSY;
1700		goto out;
1701	}
1702
1703	list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) {
1704		if (link->child != subdomain)
1705			continue;
1706
1707		list_del(&link->parent_node);
1708		list_del(&link->child_node);
1709		kfree(link);
1710		if (genpd_status_on(subdomain))
1711			genpd_sd_counter_dec(genpd);
1712
1713		ret = 0;
1714		break;
1715	}
1716
1717out:
1718	genpd_unlock(genpd);
1719	genpd_unlock(subdomain);
1720
1721	return ret;
1722}
1723EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
1724
1725static void genpd_free_default_power_state(struct genpd_power_state *states,
1726					   unsigned int state_count)
1727{
1728	kfree(states);
1729}
1730
1731static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
1732{
1733	struct genpd_power_state *state;
1734
1735	state = kzalloc(sizeof(*state), GFP_KERNEL);
1736	if (!state)
1737		return -ENOMEM;
1738
1739	genpd->states = state;
1740	genpd->state_count = 1;
1741	genpd->free_states = genpd_free_default_power_state;
1742
1743	return 0;
1744}
1745
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1746static void genpd_lock_init(struct generic_pm_domain *genpd)
1747{
1748	if (genpd->flags & GENPD_FLAG_IRQ_SAFE) {
1749		spin_lock_init(&genpd->slock);
1750		genpd->lock_ops = &genpd_spin_ops;
1751	} else {
1752		mutex_init(&genpd->mlock);
1753		genpd->lock_ops = &genpd_mtx_ops;
1754	}
1755}
1756
1757/**
1758 * pm_genpd_init - Initialize a generic I/O PM domain object.
1759 * @genpd: PM domain object to initialize.
1760 * @gov: PM domain governor to associate with the domain (may be NULL).
1761 * @is_off: Initial value of the domain's power_is_off field.
1762 *
1763 * Returns 0 on successful initialization, else a negative error code.
1764 */
1765int pm_genpd_init(struct generic_pm_domain *genpd,
1766		  struct dev_power_governor *gov, bool is_off)
1767{
1768	int ret;
1769
1770	if (IS_ERR_OR_NULL(genpd))
1771		return -EINVAL;
1772
1773	INIT_LIST_HEAD(&genpd->parent_links);
1774	INIT_LIST_HEAD(&genpd->child_links);
1775	INIT_LIST_HEAD(&genpd->dev_list);
 
1776	genpd_lock_init(genpd);
1777	genpd->gov = gov;
1778	INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1779	atomic_set(&genpd->sd_count, 0);
1780	genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1781	genpd->device_count = 0;
1782	genpd->max_off_time_ns = -1;
1783	genpd->max_off_time_changed = true;
1784	genpd->provider = NULL;
1785	genpd->has_provider = false;
1786	genpd->accounting_time = ktime_get();
1787	genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
1788	genpd->domain.ops.runtime_resume = genpd_runtime_resume;
1789	genpd->domain.ops.prepare = genpd_prepare;
1790	genpd->domain.ops.suspend_noirq = genpd_suspend_noirq;
1791	genpd->domain.ops.resume_noirq = genpd_resume_noirq;
1792	genpd->domain.ops.freeze_noirq = genpd_freeze_noirq;
1793	genpd->domain.ops.thaw_noirq = genpd_thaw_noirq;
1794	genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq;
1795	genpd->domain.ops.restore_noirq = genpd_restore_noirq;
1796	genpd->domain.ops.complete = genpd_complete;
1797	genpd->domain.start = genpd_dev_pm_start;
1798
1799	if (genpd->flags & GENPD_FLAG_PM_CLK) {
1800		genpd->dev_ops.stop = pm_clk_suspend;
1801		genpd->dev_ops.start = pm_clk_resume;
1802	}
1803
 
 
 
 
1804	/* Always-on domains must be powered on at initialization. */
1805	if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) &&
1806			!genpd_status_on(genpd))
 
1807		return -EINVAL;
 
1808
1809	if (genpd_is_cpu_domain(genpd) &&
1810	    !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL))
1811		return -ENOMEM;
1812
1813	/* Use only one "off" state if there were no states declared */
1814	if (genpd->state_count == 0) {
1815		ret = genpd_set_default_power_state(genpd);
1816		if (ret) {
1817			if (genpd_is_cpu_domain(genpd))
1818				free_cpumask_var(genpd->cpus);
1819			return ret;
1820		}
1821	} else if (!gov && genpd->state_count > 1) {
1822		pr_warn("%s: no governor for states\n", genpd->name);
1823	}
 
 
 
1824
1825	device_initialize(&genpd->dev);
1826	dev_set_name(&genpd->dev, "%s", genpd->name);
1827
1828	mutex_lock(&gpd_list_lock);
1829	list_add(&genpd->gpd_list_node, &gpd_list);
1830	mutex_unlock(&gpd_list_lock);
 
1831
1832	return 0;
1833}
1834EXPORT_SYMBOL_GPL(pm_genpd_init);
1835
1836static int genpd_remove(struct generic_pm_domain *genpd)
1837{
1838	struct gpd_link *l, *link;
1839
1840	if (IS_ERR_OR_NULL(genpd))
1841		return -EINVAL;
1842
1843	genpd_lock(genpd);
1844
1845	if (genpd->has_provider) {
1846		genpd_unlock(genpd);
1847		pr_err("Provider present, unable to remove %s\n", genpd->name);
1848		return -EBUSY;
1849	}
1850
1851	if (!list_empty(&genpd->parent_links) || genpd->device_count) {
1852		genpd_unlock(genpd);
1853		pr_err("%s: unable to remove %s\n", __func__, genpd->name);
1854		return -EBUSY;
1855	}
1856
1857	list_for_each_entry_safe(link, l, &genpd->child_links, child_node) {
1858		list_del(&link->parent_node);
1859		list_del(&link->child_node);
1860		kfree(link);
1861	}
1862
1863	list_del(&genpd->gpd_list_node);
1864	genpd_unlock(genpd);
 
1865	cancel_work_sync(&genpd->power_off_work);
1866	if (genpd_is_cpu_domain(genpd))
1867		free_cpumask_var(genpd->cpus);
1868	if (genpd->free_states)
1869		genpd->free_states(genpd->states, genpd->state_count);
1870
1871	pr_debug("%s: removed %s\n", __func__, genpd->name);
1872
1873	return 0;
1874}
1875
1876/**
1877 * pm_genpd_remove - Remove a generic I/O PM domain
1878 * @genpd: Pointer to PM domain that is to be removed.
1879 *
1880 * To remove the PM domain, this function:
1881 *  - Removes the PM domain as a subdomain to any parent domains,
1882 *    if it was added.
1883 *  - Removes the PM domain from the list of registered PM domains.
1884 *
1885 * The PM domain will only be removed, if the associated provider has
1886 * been removed, it is not a parent to any other PM domain and has no
1887 * devices associated with it.
1888 */
1889int pm_genpd_remove(struct generic_pm_domain *genpd)
1890{
1891	int ret;
1892
1893	mutex_lock(&gpd_list_lock);
1894	ret = genpd_remove(genpd);
1895	mutex_unlock(&gpd_list_lock);
1896
1897	return ret;
1898}
1899EXPORT_SYMBOL_GPL(pm_genpd_remove);
1900
1901#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
1902
1903/*
1904 * Device Tree based PM domain providers.
1905 *
1906 * The code below implements generic device tree based PM domain providers that
1907 * bind device tree nodes with generic PM domains registered in the system.
1908 *
1909 * Any driver that registers generic PM domains and needs to support binding of
1910 * devices to these domains is supposed to register a PM domain provider, which
1911 * maps a PM domain specifier retrieved from the device tree to a PM domain.
1912 *
1913 * Two simple mapping functions have been provided for convenience:
1914 *  - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
1915 *  - genpd_xlate_onecell() for mapping of multiple PM domains per node by
1916 *    index.
1917 */
1918
1919/**
1920 * struct of_genpd_provider - PM domain provider registration structure
1921 * @link: Entry in global list of PM domain providers
1922 * @node: Pointer to device tree node of PM domain provider
1923 * @xlate: Provider-specific xlate callback mapping a set of specifier cells
1924 *         into a PM domain.
1925 * @data: context pointer to be passed into @xlate callback
1926 */
1927struct of_genpd_provider {
1928	struct list_head link;
1929	struct device_node *node;
1930	genpd_xlate_t xlate;
1931	void *data;
1932};
1933
1934/* List of registered PM domain providers. */
1935static LIST_HEAD(of_genpd_providers);
1936/* Mutex to protect the list above. */
1937static DEFINE_MUTEX(of_genpd_mutex);
1938
1939/**
1940 * genpd_xlate_simple() - Xlate function for direct node-domain mapping
1941 * @genpdspec: OF phandle args to map into a PM domain
1942 * @data: xlate function private data - pointer to struct generic_pm_domain
1943 *
1944 * This is a generic xlate function that can be used to model PM domains that
1945 * have their own device tree nodes. The private data of xlate function needs
1946 * to be a valid pointer to struct generic_pm_domain.
1947 */
1948static struct generic_pm_domain *genpd_xlate_simple(
1949					struct of_phandle_args *genpdspec,
1950					void *data)
1951{
1952	return data;
1953}
1954
1955/**
1956 * genpd_xlate_onecell() - Xlate function using a single index.
1957 * @genpdspec: OF phandle args to map into a PM domain
1958 * @data: xlate function private data - pointer to struct genpd_onecell_data
1959 *
1960 * This is a generic xlate function that can be used to model simple PM domain
1961 * controllers that have one device tree node and provide multiple PM domains.
1962 * A single cell is used as an index into an array of PM domains specified in
1963 * the genpd_onecell_data struct when registering the provider.
1964 */
1965static struct generic_pm_domain *genpd_xlate_onecell(
1966					struct of_phandle_args *genpdspec,
1967					void *data)
1968{
1969	struct genpd_onecell_data *genpd_data = data;
1970	unsigned int idx = genpdspec->args[0];
1971
1972	if (genpdspec->args_count != 1)
1973		return ERR_PTR(-EINVAL);
1974
1975	if (idx >= genpd_data->num_domains) {
1976		pr_err("%s: invalid domain index %u\n", __func__, idx);
1977		return ERR_PTR(-EINVAL);
1978	}
1979
1980	if (!genpd_data->domains[idx])
1981		return ERR_PTR(-ENOENT);
1982
1983	return genpd_data->domains[idx];
1984}
1985
1986/**
1987 * genpd_add_provider() - Register a PM domain provider for a node
1988 * @np: Device node pointer associated with the PM domain provider.
1989 * @xlate: Callback for decoding PM domain from phandle arguments.
1990 * @data: Context pointer for @xlate callback.
1991 */
1992static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
1993			      void *data)
1994{
1995	struct of_genpd_provider *cp;
1996
1997	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
1998	if (!cp)
1999		return -ENOMEM;
2000
2001	cp->node = of_node_get(np);
2002	cp->data = data;
2003	cp->xlate = xlate;
 
2004
2005	mutex_lock(&of_genpd_mutex);
2006	list_add(&cp->link, &of_genpd_providers);
2007	mutex_unlock(&of_genpd_mutex);
2008	pr_debug("Added domain provider from %pOF\n", np);
2009
2010	return 0;
2011}
2012
2013static bool genpd_present(const struct generic_pm_domain *genpd)
2014{
 
2015	const struct generic_pm_domain *gpd;
2016
2017	list_for_each_entry(gpd, &gpd_list, gpd_list_node)
2018		if (gpd == genpd)
2019			return true;
2020	return false;
 
 
 
 
 
 
2021}
2022
2023/**
2024 * of_genpd_add_provider_simple() - Register a simple PM domain provider
2025 * @np: Device node pointer associated with the PM domain provider.
2026 * @genpd: Pointer to PM domain associated with the PM domain provider.
2027 */
2028int of_genpd_add_provider_simple(struct device_node *np,
2029				 struct generic_pm_domain *genpd)
2030{
2031	int ret = -EINVAL;
2032
2033	if (!np || !genpd)
2034		return -EINVAL;
2035
2036	mutex_lock(&gpd_list_lock);
2037
2038	if (!genpd_present(genpd))
2039		goto unlock;
2040
2041	genpd->dev.of_node = np;
2042
2043	/* Parse genpd OPP table */
2044	if (genpd->set_performance_state) {
2045		ret = dev_pm_opp_of_add_table(&genpd->dev);
2046		if (ret) {
2047			dev_err(&genpd->dev, "Failed to add OPP table: %d\n",
2048				ret);
2049			goto unlock;
2050		}
2051
2052		/*
2053		 * Save table for faster processing while setting performance
2054		 * state.
2055		 */
2056		genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
2057		WARN_ON(!genpd->opp_table);
2058	}
2059
2060	ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
2061	if (ret) {
2062		if (genpd->set_performance_state) {
2063			dev_pm_opp_put_opp_table(genpd->opp_table);
2064			dev_pm_opp_of_remove_table(&genpd->dev);
2065		}
2066
2067		goto unlock;
2068	}
2069
2070	genpd->provider = &np->fwnode;
2071	genpd->has_provider = true;
2072
2073unlock:
2074	mutex_unlock(&gpd_list_lock);
2075
2076	return ret;
2077}
2078EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);
2079
2080/**
2081 * of_genpd_add_provider_onecell() - Register a onecell PM domain provider
2082 * @np: Device node pointer associated with the PM domain provider.
2083 * @data: Pointer to the data associated with the PM domain provider.
2084 */
2085int of_genpd_add_provider_onecell(struct device_node *np,
2086				  struct genpd_onecell_data *data)
2087{
2088	struct generic_pm_domain *genpd;
2089	unsigned int i;
2090	int ret = -EINVAL;
2091
2092	if (!np || !data)
2093		return -EINVAL;
2094
2095	mutex_lock(&gpd_list_lock);
2096
2097	if (!data->xlate)
2098		data->xlate = genpd_xlate_onecell;
2099
2100	for (i = 0; i < data->num_domains; i++) {
2101		genpd = data->domains[i];
2102
2103		if (!genpd)
2104			continue;
2105		if (!genpd_present(genpd))
2106			goto error;
2107
2108		genpd->dev.of_node = np;
2109
2110		/* Parse genpd OPP table */
2111		if (genpd->set_performance_state) {
2112			ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
2113			if (ret) {
2114				dev_err(&genpd->dev, "Failed to add OPP table for index %d: %d\n",
2115					i, ret);
2116				goto error;
2117			}
2118
2119			/*
2120			 * Save table for faster processing while setting
2121			 * performance state.
2122			 */
2123			genpd->opp_table = dev_pm_opp_get_opp_table_indexed(&genpd->dev, i);
2124			WARN_ON(!genpd->opp_table);
2125		}
2126
2127		genpd->provider = &np->fwnode;
2128		genpd->has_provider = true;
2129	}
2130
2131	ret = genpd_add_provider(np, data->xlate, data);
2132	if (ret < 0)
2133		goto error;
2134
2135	mutex_unlock(&gpd_list_lock);
2136
2137	return 0;
2138
2139error:
2140	while (i--) {
2141		genpd = data->domains[i];
2142
2143		if (!genpd)
2144			continue;
2145
2146		genpd->provider = NULL;
2147		genpd->has_provider = false;
2148
2149		if (genpd->set_performance_state) {
2150			dev_pm_opp_put_opp_table(genpd->opp_table);
2151			dev_pm_opp_of_remove_table(&genpd->dev);
2152		}
2153	}
2154
2155	mutex_unlock(&gpd_list_lock);
2156
2157	return ret;
2158}
2159EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
2160
2161/**
2162 * of_genpd_del_provider() - Remove a previously registered PM domain provider
2163 * @np: Device node pointer associated with the PM domain provider
2164 */
2165void of_genpd_del_provider(struct device_node *np)
2166{
2167	struct of_genpd_provider *cp, *tmp;
2168	struct generic_pm_domain *gpd;
2169
2170	mutex_lock(&gpd_list_lock);
2171	mutex_lock(&of_genpd_mutex);
2172	list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) {
2173		if (cp->node == np) {
2174			/*
2175			 * For each PM domain associated with the
2176			 * provider, set the 'has_provider' to false
2177			 * so that the PM domain can be safely removed.
2178			 */
2179			list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2180				if (gpd->provider == &np->fwnode) {
2181					gpd->has_provider = false;
2182
2183					if (!gpd->set_performance_state)
2184						continue;
2185
2186					dev_pm_opp_put_opp_table(gpd->opp_table);
2187					dev_pm_opp_of_remove_table(&gpd->dev);
2188				}
2189			}
2190
 
2191			list_del(&cp->link);
2192			of_node_put(cp->node);
2193			kfree(cp);
2194			break;
2195		}
2196	}
2197	mutex_unlock(&of_genpd_mutex);
2198	mutex_unlock(&gpd_list_lock);
2199}
2200EXPORT_SYMBOL_GPL(of_genpd_del_provider);
2201
2202/**
2203 * genpd_get_from_provider() - Look-up PM domain
2204 * @genpdspec: OF phandle args to use for look-up
2205 *
2206 * Looks for a PM domain provider under the node specified by @genpdspec and if
2207 * found, uses xlate function of the provider to map phandle args to a PM
2208 * domain.
2209 *
2210 * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
2211 * on failure.
2212 */
2213static struct generic_pm_domain *genpd_get_from_provider(
2214					struct of_phandle_args *genpdspec)
2215{
2216	struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
2217	struct of_genpd_provider *provider;
2218
2219	if (!genpdspec)
2220		return ERR_PTR(-EINVAL);
2221
2222	mutex_lock(&of_genpd_mutex);
2223
2224	/* Check if we have such a provider in our array */
2225	list_for_each_entry(provider, &of_genpd_providers, link) {
2226		if (provider->node == genpdspec->np)
2227			genpd = provider->xlate(genpdspec, provider->data);
2228		if (!IS_ERR(genpd))
2229			break;
2230	}
2231
2232	mutex_unlock(&of_genpd_mutex);
2233
2234	return genpd;
2235}
2236
2237/**
2238 * of_genpd_add_device() - Add a device to an I/O PM domain
2239 * @genpdspec: OF phandle args to use for look-up PM domain
2240 * @dev: Device to be added.
2241 *
2242 * Looks-up an I/O PM domain based upon phandle args provided and adds
2243 * the device to the PM domain. Returns a negative error code on failure.
2244 */
2245int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev)
2246{
2247	struct generic_pm_domain *genpd;
2248	int ret;
2249
2250	mutex_lock(&gpd_list_lock);
2251
2252	genpd = genpd_get_from_provider(genpdspec);
2253	if (IS_ERR(genpd)) {
2254		ret = PTR_ERR(genpd);
2255		goto out;
2256	}
2257
2258	ret = genpd_add_device(genpd, dev, dev);
2259
2260out:
2261	mutex_unlock(&gpd_list_lock);
2262
2263	return ret;
2264}
2265EXPORT_SYMBOL_GPL(of_genpd_add_device);
2266
2267/**
2268 * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
2269 * @parent_spec: OF phandle args to use for parent PM domain look-up
2270 * @subdomain_spec: OF phandle args to use for subdomain look-up
2271 *
2272 * Looks-up a parent PM domain and subdomain based upon phandle args
2273 * provided and adds the subdomain to the parent PM domain. Returns a
2274 * negative error code on failure.
2275 */
2276int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
2277			   struct of_phandle_args *subdomain_spec)
2278{
2279	struct generic_pm_domain *parent, *subdomain;
2280	int ret;
2281
2282	mutex_lock(&gpd_list_lock);
2283
2284	parent = genpd_get_from_provider(parent_spec);
2285	if (IS_ERR(parent)) {
2286		ret = PTR_ERR(parent);
2287		goto out;
2288	}
2289
2290	subdomain = genpd_get_from_provider(subdomain_spec);
2291	if (IS_ERR(subdomain)) {
2292		ret = PTR_ERR(subdomain);
2293		goto out;
2294	}
2295
2296	ret = genpd_add_subdomain(parent, subdomain);
2297
2298out:
2299	mutex_unlock(&gpd_list_lock);
2300
2301	return ret;
2302}
2303EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
2304
2305/**
2306 * of_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
2307 * @parent_spec: OF phandle args to use for parent PM domain look-up
2308 * @subdomain_spec: OF phandle args to use for subdomain look-up
2309 *
2310 * Looks-up a parent PM domain and subdomain based upon phandle args
2311 * provided and removes the subdomain from the parent PM domain. Returns a
2312 * negative error code on failure.
2313 */
2314int of_genpd_remove_subdomain(struct of_phandle_args *parent_spec,
2315			      struct of_phandle_args *subdomain_spec)
2316{
2317	struct generic_pm_domain *parent, *subdomain;
2318	int ret;
2319
2320	mutex_lock(&gpd_list_lock);
2321
2322	parent = genpd_get_from_provider(parent_spec);
2323	if (IS_ERR(parent)) {
2324		ret = PTR_ERR(parent);
2325		goto out;
2326	}
2327
2328	subdomain = genpd_get_from_provider(subdomain_spec);
2329	if (IS_ERR(subdomain)) {
2330		ret = PTR_ERR(subdomain);
2331		goto out;
2332	}
2333
2334	ret = pm_genpd_remove_subdomain(parent, subdomain);
2335
2336out:
2337	mutex_unlock(&gpd_list_lock);
2338
2339	return ret;
2340}
2341EXPORT_SYMBOL_GPL(of_genpd_remove_subdomain);
2342
2343/**
2344 * of_genpd_remove_last - Remove the last PM domain registered for a provider
2345 * @provider: Pointer to device structure associated with provider
2346 *
2347 * Find the last PM domain that was added by a particular provider and
2348 * remove this PM domain from the list of PM domains. The provider is
2349 * identified by the 'provider' device structure that is passed. The PM
2350 * domain will only be removed, if the provider associated with domain
2351 * has been removed.
2352 *
2353 * Returns a valid pointer to struct generic_pm_domain on success or
2354 * ERR_PTR() on failure.
2355 */
2356struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
2357{
2358	struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT);
2359	int ret;
2360
2361	if (IS_ERR_OR_NULL(np))
2362		return ERR_PTR(-EINVAL);
2363
2364	mutex_lock(&gpd_list_lock);
2365	list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) {
2366		if (gpd->provider == &np->fwnode) {
2367			ret = genpd_remove(gpd);
2368			genpd = ret ? ERR_PTR(ret) : gpd;
2369			break;
2370		}
2371	}
2372	mutex_unlock(&gpd_list_lock);
2373
2374	return genpd;
2375}
2376EXPORT_SYMBOL_GPL(of_genpd_remove_last);
2377
2378static void genpd_release_dev(struct device *dev)
2379{
2380	of_node_put(dev->of_node);
2381	kfree(dev);
2382}
2383
2384static struct bus_type genpd_bus_type = {
2385	.name		= "genpd",
2386};
2387
2388/**
2389 * genpd_dev_pm_detach - Detach a device from its PM domain.
2390 * @dev: Device to detach.
2391 * @power_off: Currently not used
2392 *
2393 * Try to locate a corresponding generic PM domain, which the device was
2394 * attached to previously. If such is found, the device is detached from it.
2395 */
2396static void genpd_dev_pm_detach(struct device *dev, bool power_off)
2397{
2398	struct generic_pm_domain *pd;
2399	unsigned int i;
2400	int ret = 0;
2401
2402	pd = dev_to_genpd(dev);
2403	if (IS_ERR(pd))
2404		return;
2405
2406	dev_dbg(dev, "removing from PM domain %s\n", pd->name);
2407
 
 
 
 
 
 
2408	for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
2409		ret = genpd_remove_device(pd, dev);
2410		if (ret != -EAGAIN)
2411			break;
2412
2413		mdelay(i);
2414		cond_resched();
2415	}
2416
2417	if (ret < 0) {
2418		dev_err(dev, "failed to remove from PM domain %s: %d",
2419			pd->name, ret);
2420		return;
2421	}
2422
2423	/* Check if PM domain can be powered off after removing this device. */
2424	genpd_queue_power_off_work(pd);
2425
2426	/* Unregister the device if it was created by genpd. */
2427	if (dev->bus == &genpd_bus_type)
2428		device_unregister(dev);
2429}
2430
2431static void genpd_dev_pm_sync(struct device *dev)
2432{
2433	struct generic_pm_domain *pd;
2434
2435	pd = dev_to_genpd(dev);
2436	if (IS_ERR(pd))
2437		return;
2438
2439	genpd_queue_power_off_work(pd);
2440}
2441
2442static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
2443				 unsigned int index, bool power_on)
2444{
2445	struct of_phandle_args pd_args;
2446	struct generic_pm_domain *pd;
 
2447	int ret;
2448
2449	ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
2450				"#power-domain-cells", index, &pd_args);
2451	if (ret < 0)
2452		return ret;
2453
2454	mutex_lock(&gpd_list_lock);
2455	pd = genpd_get_from_provider(&pd_args);
2456	of_node_put(pd_args.np);
2457	if (IS_ERR(pd)) {
2458		mutex_unlock(&gpd_list_lock);
2459		dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
2460			__func__, PTR_ERR(pd));
2461		return driver_deferred_probe_check_state(base_dev);
2462	}
2463
2464	dev_dbg(dev, "adding to PM domain %s\n", pd->name);
2465
2466	ret = genpd_add_device(pd, dev, base_dev);
2467	mutex_unlock(&gpd_list_lock);
2468
2469	if (ret < 0) {
2470		if (ret != -EPROBE_DEFER)
2471			dev_err(dev, "failed to add to PM domain %s: %d",
2472				pd->name, ret);
2473		return ret;
2474	}
2475
2476	dev->pm_domain->detach = genpd_dev_pm_detach;
2477	dev->pm_domain->sync = genpd_dev_pm_sync;
2478
 
 
 
 
 
 
 
 
 
 
 
 
2479	if (power_on) {
2480		genpd_lock(pd);
2481		ret = genpd_power_on(pd, 0);
2482		genpd_unlock(pd);
2483	}
2484
2485	if (ret)
 
 
 
 
 
 
2486		genpd_remove_device(pd, dev);
 
 
 
 
2487
2488	return ret ? -EPROBE_DEFER : 1;
 
 
 
 
2489}
2490
2491/**
2492 * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
2493 * @dev: Device to attach.
2494 *
2495 * Parse device's OF node to find a PM domain specifier. If such is found,
2496 * attaches the device to retrieved pm_domain ops.
2497 *
2498 * Returns 1 on successfully attached PM domain, 0 when the device don't need a
2499 * PM domain or when multiple power-domains exists for it, else a negative error
2500 * code. Note that if a power-domain exists for the device, but it cannot be
2501 * found or turned on, then return -EPROBE_DEFER to ensure that the device is
2502 * not probed and to re-try again later.
2503 */
2504int genpd_dev_pm_attach(struct device *dev)
2505{
2506	if (!dev->of_node)
2507		return 0;
2508
2509	/*
2510	 * Devices with multiple PM domains must be attached separately, as we
2511	 * can only attach one PM domain per device.
2512	 */
2513	if (of_count_phandle_with_args(dev->of_node, "power-domains",
2514				       "#power-domain-cells") != 1)
2515		return 0;
2516
2517	return __genpd_dev_pm_attach(dev, dev, 0, true);
2518}
2519EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
2520
2521/**
2522 * genpd_dev_pm_attach_by_id - Associate a device with one of its PM domains.
2523 * @dev: The device used to lookup the PM domain.
2524 * @index: The index of the PM domain.
2525 *
2526 * Parse device's OF node to find a PM domain specifier at the provided @index.
2527 * If such is found, creates a virtual device and attaches it to the retrieved
2528 * pm_domain ops. To deal with detaching of the virtual device, the ->detach()
2529 * callback in the struct dev_pm_domain are assigned to genpd_dev_pm_detach().
2530 *
2531 * Returns the created virtual device if successfully attached PM domain, NULL
2532 * when the device don't need a PM domain, else an ERR_PTR() in case of
2533 * failures. If a power-domain exists for the device, but cannot be found or
2534 * turned on, then ERR_PTR(-EPROBE_DEFER) is returned to ensure that the device
2535 * is not probed and to re-try again later.
2536 */
2537struct device *genpd_dev_pm_attach_by_id(struct device *dev,
2538					 unsigned int index)
2539{
2540	struct device *virt_dev;
2541	int num_domains;
2542	int ret;
2543
2544	if (!dev->of_node)
2545		return NULL;
2546
2547	/* Verify that the index is within a valid range. */
2548	num_domains = of_count_phandle_with_args(dev->of_node, "power-domains",
2549						 "#power-domain-cells");
2550	if (index >= num_domains)
2551		return NULL;
2552
2553	/* Allocate and register device on the genpd bus. */
2554	virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL);
2555	if (!virt_dev)
2556		return ERR_PTR(-ENOMEM);
2557
2558	dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev));
2559	virt_dev->bus = &genpd_bus_type;
2560	virt_dev->release = genpd_release_dev;
2561	virt_dev->of_node = of_node_get(dev->of_node);
2562
2563	ret = device_register(virt_dev);
2564	if (ret) {
2565		put_device(virt_dev);
2566		return ERR_PTR(ret);
2567	}
2568
2569	/* Try to attach the device to the PM domain at the specified index. */
2570	ret = __genpd_dev_pm_attach(virt_dev, dev, index, false);
2571	if (ret < 1) {
2572		device_unregister(virt_dev);
2573		return ret ? ERR_PTR(ret) : NULL;
2574	}
2575
2576	pm_runtime_enable(virt_dev);
2577	genpd_queue_power_off_work(dev_to_genpd(virt_dev));
2578
2579	return virt_dev;
2580}
2581EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id);
2582
2583/**
2584 * genpd_dev_pm_attach_by_name - Associate a device with one of its PM domains.
2585 * @dev: The device used to lookup the PM domain.
2586 * @name: The name of the PM domain.
2587 *
2588 * Parse device's OF node to find a PM domain specifier using the
2589 * power-domain-names DT property. For further description see
2590 * genpd_dev_pm_attach_by_id().
2591 */
2592struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name)
2593{
2594	int index;
2595
2596	if (!dev->of_node)
2597		return NULL;
2598
2599	index = of_property_match_string(dev->of_node, "power-domain-names",
2600					 name);
2601	if (index < 0)
2602		return NULL;
2603
2604	return genpd_dev_pm_attach_by_id(dev, index);
2605}
2606
2607static const struct of_device_id idle_state_match[] = {
2608	{ .compatible = "domain-idle-state", },
2609	{ }
2610};
2611
2612static int genpd_parse_state(struct genpd_power_state *genpd_state,
2613				    struct device_node *state_node)
2614{
2615	int err;
2616	u32 residency;
2617	u32 entry_latency, exit_latency;
2618
2619	err = of_property_read_u32(state_node, "entry-latency-us",
2620						&entry_latency);
2621	if (err) {
2622		pr_debug(" * %pOF missing entry-latency-us property\n",
2623			 state_node);
2624		return -EINVAL;
2625	}
2626
2627	err = of_property_read_u32(state_node, "exit-latency-us",
2628						&exit_latency);
2629	if (err) {
2630		pr_debug(" * %pOF missing exit-latency-us property\n",
2631			 state_node);
2632		return -EINVAL;
2633	}
2634
2635	err = of_property_read_u32(state_node, "min-residency-us", &residency);
2636	if (!err)
2637		genpd_state->residency_ns = 1000 * residency;
2638
2639	genpd_state->power_on_latency_ns = 1000 * exit_latency;
2640	genpd_state->power_off_latency_ns = 1000 * entry_latency;
2641	genpd_state->fwnode = &state_node->fwnode;
2642
2643	return 0;
2644}
2645
2646static int genpd_iterate_idle_states(struct device_node *dn,
2647				     struct genpd_power_state *states)
2648{
2649	int ret;
2650	struct of_phandle_iterator it;
2651	struct device_node *np;
2652	int i = 0;
2653
2654	ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
2655	if (ret <= 0)
2656		return ret == -ENOENT ? 0 : ret;
2657
2658	/* Loop over the phandles until all the requested entry is found */
2659	of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) {
2660		np = it.node;
2661		if (!of_match_node(idle_state_match, np))
2662			continue;
 
 
 
 
2663		if (states) {
2664			ret = genpd_parse_state(&states[i], np);
2665			if (ret) {
2666				pr_err("Parsing idle state node %pOF failed with err %d\n",
2667				       np, ret);
2668				of_node_put(np);
2669				return ret;
2670			}
2671		}
2672		i++;
2673	}
2674
2675	return i;
2676}
2677
2678/**
2679 * of_genpd_parse_idle_states: Return array of idle states for the genpd.
2680 *
2681 * @dn: The genpd device node
2682 * @states: The pointer to which the state array will be saved.
2683 * @n: The count of elements in the array returned from this function.
2684 *
2685 * Returns the device states parsed from the OF node. The memory for the states
2686 * is allocated by this function and is the responsibility of the caller to
2687 * free the memory after use. If any or zero compatible domain idle states is
2688 * found it returns 0 and in case of errors, a negative error code is returned.
2689 */
2690int of_genpd_parse_idle_states(struct device_node *dn,
2691			struct genpd_power_state **states, int *n)
2692{
2693	struct genpd_power_state *st;
2694	int ret;
2695
2696	ret = genpd_iterate_idle_states(dn, NULL);
2697	if (ret < 0)
2698		return ret;
2699
2700	if (!ret) {
2701		*states = NULL;
2702		*n = 0;
2703		return 0;
2704	}
2705
2706	st = kcalloc(ret, sizeof(*st), GFP_KERNEL);
2707	if (!st)
2708		return -ENOMEM;
2709
2710	ret = genpd_iterate_idle_states(dn, st);
2711	if (ret <= 0) {
2712		kfree(st);
2713		return ret < 0 ? ret : -EINVAL;
2714	}
2715
2716	*states = st;
2717	*n = ret;
2718
2719	return 0;
2720}
2721EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
2722
2723/**
2724 * pm_genpd_opp_to_performance_state - Gets performance state of the genpd from its OPP node.
2725 *
2726 * @genpd_dev: Genpd's device for which the performance-state needs to be found.
2727 * @opp: struct dev_pm_opp of the OPP for which we need to find performance
2728 *	state.
2729 *
2730 * Returns performance state encoded in the OPP of the genpd. This calls
2731 * platform specific genpd->opp_to_performance_state() callback to translate
2732 * power domain OPP to performance state.
2733 *
2734 * Returns performance state on success and 0 on failure.
2735 */
2736unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev,
2737					       struct dev_pm_opp *opp)
2738{
2739	struct generic_pm_domain *genpd = NULL;
2740	int state;
2741
2742	genpd = container_of(genpd_dev, struct generic_pm_domain, dev);
2743
2744	if (unlikely(!genpd->opp_to_performance_state))
2745		return 0;
2746
2747	genpd_lock(genpd);
2748	state = genpd->opp_to_performance_state(genpd, opp);
2749	genpd_unlock(genpd);
2750
2751	return state;
2752}
2753EXPORT_SYMBOL_GPL(pm_genpd_opp_to_performance_state);
2754
2755static int __init genpd_bus_init(void)
2756{
2757	return bus_register(&genpd_bus_type);
2758}
2759core_initcall(genpd_bus_init);
2760
2761#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
2762
2763
2764/***        debugfs support        ***/
2765
2766#ifdef CONFIG_DEBUG_FS
2767#include <linux/pm.h>
2768#include <linux/device.h>
2769#include <linux/debugfs.h>
2770#include <linux/seq_file.h>
2771#include <linux/init.h>
2772#include <linux/kobject.h>
2773static struct dentry *genpd_debugfs_dir;
2774
2775/*
2776 * TODO: This function is a slightly modified version of rtpm_status_show
2777 * from sysfs.c, so generalize it.
2778 */
2779static void rtpm_status_str(struct seq_file *s, struct device *dev)
2780{
2781	static const char * const status_lookup[] = {
2782		[RPM_ACTIVE] = "active",
2783		[RPM_RESUMING] = "resuming",
2784		[RPM_SUSPENDED] = "suspended",
2785		[RPM_SUSPENDING] = "suspending"
2786	};
2787	const char *p = "";
2788
2789	if (dev->power.runtime_error)
2790		p = "error";
2791	else if (dev->power.disable_depth)
2792		p = "unsupported";
2793	else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
2794		p = status_lookup[dev->power.runtime_status];
2795	else
2796		WARN_ON(1);
2797
2798	seq_puts(s, p);
 
 
 
 
 
 
 
 
2799}
2800
2801static int genpd_summary_one(struct seq_file *s,
2802			struct generic_pm_domain *genpd)
2803{
2804	static const char * const status_lookup[] = {
2805		[GPD_STATE_ACTIVE] = "on",
2806		[GPD_STATE_POWER_OFF] = "off"
2807	};
2808	struct pm_domain_data *pm_data;
2809	const char *kobj_path;
2810	struct gpd_link *link;
2811	char state[16];
2812	int ret;
2813
2814	ret = genpd_lock_interruptible(genpd);
2815	if (ret)
2816		return -ERESTARTSYS;
2817
2818	if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
2819		goto exit;
2820	if (!genpd_status_on(genpd))
2821		snprintf(state, sizeof(state), "%s-%u",
2822			 status_lookup[genpd->status], genpd->state_idx);
2823	else
2824		snprintf(state, sizeof(state), "%s",
2825			 status_lookup[genpd->status]);
2826	seq_printf(s, "%-30s  %-15s ", genpd->name, state);
2827
2828	/*
2829	 * Modifications on the list require holding locks on both
2830	 * parent and child, so we are safe.
2831	 * Also genpd->name is immutable.
2832	 */
2833	list_for_each_entry(link, &genpd->parent_links, parent_node) {
 
 
2834		seq_printf(s, "%s", link->child->name);
2835		if (!list_is_last(&link->parent_node, &genpd->parent_links))
2836			seq_puts(s, ", ");
2837	}
2838
2839	list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
2840		kobj_path = kobject_get_path(&pm_data->dev->kobj,
2841				genpd_is_irq_safe(genpd) ?
2842				GFP_ATOMIC : GFP_KERNEL);
2843		if (kobj_path == NULL)
2844			continue;
2845
2846		seq_printf(s, "\n    %-50s  ", kobj_path);
2847		rtpm_status_str(s, pm_data->dev);
 
2848		kfree(kobj_path);
2849	}
2850
2851	seq_puts(s, "\n");
2852exit:
2853	genpd_unlock(genpd);
2854
2855	return 0;
2856}
2857
2858static int summary_show(struct seq_file *s, void *data)
2859{
2860	struct generic_pm_domain *genpd;
2861	int ret = 0;
2862
2863	seq_puts(s, "domain                          status          children\n");
2864	seq_puts(s, "    /device                                             runtime status\n");
2865	seq_puts(s, "----------------------------------------------------------------------\n");
2866
2867	ret = mutex_lock_interruptible(&gpd_list_lock);
2868	if (ret)
2869		return -ERESTARTSYS;
2870
2871	list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
2872		ret = genpd_summary_one(s, genpd);
2873		if (ret)
2874			break;
2875	}
2876	mutex_unlock(&gpd_list_lock);
2877
2878	return ret;
2879}
2880
2881static int status_show(struct seq_file *s, void *data)
2882{
2883	static const char * const status_lookup[] = {
2884		[GPD_STATE_ACTIVE] = "on",
2885		[GPD_STATE_POWER_OFF] = "off"
2886	};
2887
2888	struct generic_pm_domain *genpd = s->private;
2889	int ret = 0;
2890
2891	ret = genpd_lock_interruptible(genpd);
2892	if (ret)
2893		return -ERESTARTSYS;
2894
2895	if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup)))
2896		goto exit;
2897
2898	if (genpd->status == GPD_STATE_POWER_OFF)
2899		seq_printf(s, "%s-%u\n", status_lookup[genpd->status],
2900			genpd->state_idx);
2901	else
2902		seq_printf(s, "%s\n", status_lookup[genpd->status]);
2903exit:
2904	genpd_unlock(genpd);
2905	return ret;
2906}
2907
2908static int sub_domains_show(struct seq_file *s, void *data)
2909{
2910	struct generic_pm_domain *genpd = s->private;
2911	struct gpd_link *link;
2912	int ret = 0;
2913
2914	ret = genpd_lock_interruptible(genpd);
2915	if (ret)
2916		return -ERESTARTSYS;
2917
2918	list_for_each_entry(link, &genpd->parent_links, parent_node)
2919		seq_printf(s, "%s\n", link->child->name);
2920
2921	genpd_unlock(genpd);
2922	return ret;
2923}
2924
2925static int idle_states_show(struct seq_file *s, void *data)
2926{
2927	struct generic_pm_domain *genpd = s->private;
 
2928	unsigned int i;
2929	int ret = 0;
2930
2931	ret = genpd_lock_interruptible(genpd);
2932	if (ret)
2933		return -ERESTARTSYS;
2934
2935	seq_puts(s, "State          Time Spent(ms)\n");
2936
2937	for (i = 0; i < genpd->state_count; i++) {
2938		ktime_t delta = 0;
2939		s64 msecs;
 
 
 
 
 
 
 
2940
2941		if ((genpd->status == GPD_STATE_POWER_OFF) &&
2942				(genpd->state_idx == i))
2943			delta = ktime_sub(ktime_get(), genpd->accounting_time);
2944
2945		msecs = ktime_to_ms(
2946			ktime_add(genpd->states[i].idle_time, delta));
2947		seq_printf(s, "S%-13i %lld\n", i, msecs);
2948	}
2949
2950	genpd_unlock(genpd);
2951	return ret;
2952}
2953
2954static int active_time_show(struct seq_file *s, void *data)
2955{
2956	struct generic_pm_domain *genpd = s->private;
2957	ktime_t delta = 0;
2958	int ret = 0;
2959
2960	ret = genpd_lock_interruptible(genpd);
2961	if (ret)
2962		return -ERESTARTSYS;
2963
2964	if (genpd->status == GPD_STATE_ACTIVE)
2965		delta = ktime_sub(ktime_get(), genpd->accounting_time);
 
 
 
2966
2967	seq_printf(s, "%lld ms\n", ktime_to_ms(
2968				ktime_add(genpd->on_time, delta)));
 
2969
2970	genpd_unlock(genpd);
2971	return ret;
2972}
2973
2974static int total_idle_time_show(struct seq_file *s, void *data)
2975{
2976	struct generic_pm_domain *genpd = s->private;
2977	ktime_t delta = 0, total = 0;
2978	unsigned int i;
2979	int ret = 0;
2980
2981	ret = genpd_lock_interruptible(genpd);
2982	if (ret)
2983		return -ERESTARTSYS;
2984
2985	for (i = 0; i < genpd->state_count; i++) {
 
2986
2987		if ((genpd->status == GPD_STATE_POWER_OFF) &&
2988				(genpd->state_idx == i))
2989			delta = ktime_sub(ktime_get(), genpd->accounting_time);
2990
2991		total = ktime_add(total, genpd->states[i].idle_time);
 
 
2992	}
2993	total = ktime_add(total, delta);
2994
2995	seq_printf(s, "%lld ms\n", ktime_to_ms(total));
 
2996
2997	genpd_unlock(genpd);
2998	return ret;
2999}
3000
3001
3002static int devices_show(struct seq_file *s, void *data)
3003{
3004	struct generic_pm_domain *genpd = s->private;
3005	struct pm_domain_data *pm_data;
3006	const char *kobj_path;
3007	int ret = 0;
3008
3009	ret = genpd_lock_interruptible(genpd);
3010	if (ret)
3011		return -ERESTARTSYS;
3012
3013	list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
3014		kobj_path = kobject_get_path(&pm_data->dev->kobj,
3015				genpd_is_irq_safe(genpd) ?
3016				GFP_ATOMIC : GFP_KERNEL);
3017		if (kobj_path == NULL)
3018			continue;
3019
3020		seq_printf(s, "%s\n", kobj_path);
3021		kfree(kobj_path);
3022	}
3023
3024	genpd_unlock(genpd);
3025	return ret;
3026}
3027
3028static int perf_state_show(struct seq_file *s, void *data)
3029{
3030	struct generic_pm_domain *genpd = s->private;
3031
3032	if (genpd_lock_interruptible(genpd))
3033		return -ERESTARTSYS;
3034
3035	seq_printf(s, "%u\n", genpd->performance_state);
3036
3037	genpd_unlock(genpd);
3038	return 0;
3039}
3040
3041DEFINE_SHOW_ATTRIBUTE(summary);
3042DEFINE_SHOW_ATTRIBUTE(status);
3043DEFINE_SHOW_ATTRIBUTE(sub_domains);
3044DEFINE_SHOW_ATTRIBUTE(idle_states);
3045DEFINE_SHOW_ATTRIBUTE(active_time);
3046DEFINE_SHOW_ATTRIBUTE(total_idle_time);
3047DEFINE_SHOW_ATTRIBUTE(devices);
3048DEFINE_SHOW_ATTRIBUTE(perf_state);
3049
3050static int __init genpd_debug_init(void)
3051{
3052	struct dentry *d;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3053	struct generic_pm_domain *genpd;
3054
3055	genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
3056
3057	debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir,
3058			    NULL, &summary_fops);
3059
3060	list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
3061		d = debugfs_create_dir(genpd->name, genpd_debugfs_dir);
3062
3063		debugfs_create_file("current_state", 0444,
3064				d, genpd, &status_fops);
3065		debugfs_create_file("sub_domains", 0444,
3066				d, genpd, &sub_domains_fops);
3067		debugfs_create_file("idle_states", 0444,
3068				d, genpd, &idle_states_fops);
3069		debugfs_create_file("active_time", 0444,
3070				d, genpd, &active_time_fops);
3071		debugfs_create_file("total_idle_time", 0444,
3072				d, genpd, &total_idle_time_fops);
3073		debugfs_create_file("devices", 0444,
3074				d, genpd, &devices_fops);
3075		if (genpd->set_performance_state)
3076			debugfs_create_file("perf_state", 0444,
3077					    d, genpd, &perf_state_fops);
3078	}
3079
3080	return 0;
3081}
3082late_initcall(genpd_debug_init);
3083
3084static void __exit genpd_debug_exit(void)
3085{
3086	debugfs_remove_recursive(genpd_debugfs_dir);
3087}
3088__exitcall(genpd_debug_exit);
3089#endif /* CONFIG_DEBUG_FS */
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * drivers/base/power/domain.c - Common code related to device power domains.
   4 *
   5 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
   6 */
   7#define pr_fmt(fmt) "PM: " fmt
   8
   9#include <linux/delay.h>
  10#include <linux/kernel.h>
  11#include <linux/io.h>
  12#include <linux/platform_device.h>
  13#include <linux/pm_opp.h>
  14#include <linux/pm_runtime.h>
  15#include <linux/pm_domain.h>
  16#include <linux/pm_qos.h>
  17#include <linux/pm_clock.h>
  18#include <linux/slab.h>
  19#include <linux/err.h>
  20#include <linux/sched.h>
  21#include <linux/suspend.h>
  22#include <linux/export.h>
  23#include <linux/cpu.h>
  24#include <linux/debugfs.h>
  25
  26#include "power.h"
  27
  28#define GENPD_RETRY_MAX_MS	250		/* Approximate */
  29
  30#define GENPD_DEV_CALLBACK(genpd, type, callback, dev)		\
  31({								\
  32	type (*__routine)(struct device *__d); 			\
  33	type __ret = (type)0;					\
  34								\
  35	__routine = genpd->dev_ops.callback; 			\
  36	if (__routine) {					\
  37		__ret = __routine(dev); 			\
  38	}							\
  39	__ret;							\
  40})
  41
  42static LIST_HEAD(gpd_list);
  43static DEFINE_MUTEX(gpd_list_lock);
  44
  45struct genpd_lock_ops {
  46	void (*lock)(struct generic_pm_domain *genpd);
  47	void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
  48	int (*lock_interruptible)(struct generic_pm_domain *genpd);
  49	void (*unlock)(struct generic_pm_domain *genpd);
  50};
  51
  52static void genpd_lock_mtx(struct generic_pm_domain *genpd)
  53{
  54	mutex_lock(&genpd->mlock);
  55}
  56
  57static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
  58					int depth)
  59{
  60	mutex_lock_nested(&genpd->mlock, depth);
  61}
  62
  63static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
  64{
  65	return mutex_lock_interruptible(&genpd->mlock);
  66}
  67
  68static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
  69{
  70	return mutex_unlock(&genpd->mlock);
  71}
  72
  73static const struct genpd_lock_ops genpd_mtx_ops = {
  74	.lock = genpd_lock_mtx,
  75	.lock_nested = genpd_lock_nested_mtx,
  76	.lock_interruptible = genpd_lock_interruptible_mtx,
  77	.unlock = genpd_unlock_mtx,
  78};
  79
  80static void genpd_lock_spin(struct generic_pm_domain *genpd)
  81	__acquires(&genpd->slock)
  82{
  83	unsigned long flags;
  84
  85	spin_lock_irqsave(&genpd->slock, flags);
  86	genpd->lock_flags = flags;
  87}
  88
  89static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
  90					int depth)
  91	__acquires(&genpd->slock)
  92{
  93	unsigned long flags;
  94
  95	spin_lock_irqsave_nested(&genpd->slock, flags, depth);
  96	genpd->lock_flags = flags;
  97}
  98
  99static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
 100	__acquires(&genpd->slock)
 101{
 102	unsigned long flags;
 103
 104	spin_lock_irqsave(&genpd->slock, flags);
 105	genpd->lock_flags = flags;
 106	return 0;
 107}
 108
 109static void genpd_unlock_spin(struct generic_pm_domain *genpd)
 110	__releases(&genpd->slock)
 111{
 112	spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
 113}
 114
 115static const struct genpd_lock_ops genpd_spin_ops = {
 116	.lock = genpd_lock_spin,
 117	.lock_nested = genpd_lock_nested_spin,
 118	.lock_interruptible = genpd_lock_interruptible_spin,
 119	.unlock = genpd_unlock_spin,
 120};
 121
 122#define genpd_lock(p)			p->lock_ops->lock(p)
 123#define genpd_lock_nested(p, d)		p->lock_ops->lock_nested(p, d)
 124#define genpd_lock_interruptible(p)	p->lock_ops->lock_interruptible(p)
 125#define genpd_unlock(p)			p->lock_ops->unlock(p)
 126
 127#define genpd_status_on(genpd)		(genpd->status == GENPD_STATE_ON)
 128#define genpd_is_irq_safe(genpd)	(genpd->flags & GENPD_FLAG_IRQ_SAFE)
 129#define genpd_is_always_on(genpd)	(genpd->flags & GENPD_FLAG_ALWAYS_ON)
 130#define genpd_is_active_wakeup(genpd)	(genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
 131#define genpd_is_cpu_domain(genpd)	(genpd->flags & GENPD_FLAG_CPU_DOMAIN)
 132#define genpd_is_rpm_always_on(genpd)	(genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON)
 133
 134static inline bool irq_safe_dev_in_sleep_domain(struct device *dev,
 135		const struct generic_pm_domain *genpd)
 136{
 137	bool ret;
 138
 139	ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
 140
 141	/*
 142	 * Warn once if an IRQ safe device is attached to a domain, which
 143	 * callbacks are allowed to sleep. This indicates a suboptimal
 144	 * configuration for PM, but it doesn't matter for an always on domain.
 145	 */
 146	if (genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd))
 147		return ret;
 148
 149	if (ret)
 150		dev_warn_once(dev, "PM domain %s will not be powered off\n",
 151				genpd->name);
 152
 153	return ret;
 154}
 155
 156static int genpd_runtime_suspend(struct device *dev);
 157
 158/*
 159 * Get the generic PM domain for a particular struct device.
 160 * This validates the struct device pointer, the PM domain pointer,
 161 * and checks that the PM domain pointer is a real generic PM domain.
 162 * Any failure results in NULL being returned.
 163 */
 164static struct generic_pm_domain *dev_to_genpd_safe(struct device *dev)
 165{
 166	if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
 167		return NULL;
 168
 169	/* A genpd's always have its ->runtime_suspend() callback assigned. */
 170	if (dev->pm_domain->ops.runtime_suspend == genpd_runtime_suspend)
 171		return pd_to_genpd(dev->pm_domain);
 172
 173	return NULL;
 174}
 175
 176/*
 177 * This should only be used where we are certain that the pm_domain
 178 * attached to the device is a genpd domain.
 179 */
 180static struct generic_pm_domain *dev_to_genpd(struct device *dev)
 181{
 182	if (IS_ERR_OR_NULL(dev->pm_domain))
 183		return ERR_PTR(-EINVAL);
 184
 185	return pd_to_genpd(dev->pm_domain);
 186}
 187
 188static int genpd_stop_dev(const struct generic_pm_domain *genpd,
 189			  struct device *dev)
 190{
 191	return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
 192}
 193
 194static int genpd_start_dev(const struct generic_pm_domain *genpd,
 195			   struct device *dev)
 196{
 197	return GENPD_DEV_CALLBACK(genpd, int, start, dev);
 198}
 199
 200static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
 201{
 202	bool ret = false;
 203
 204	if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
 205		ret = !!atomic_dec_and_test(&genpd->sd_count);
 206
 207	return ret;
 208}
 209
 210static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
 211{
 212	atomic_inc(&genpd->sd_count);
 213	smp_mb__after_atomic();
 214}
 215
 216#ifdef CONFIG_DEBUG_FS
 217static struct dentry *genpd_debugfs_dir;
 218
 219static void genpd_debug_add(struct generic_pm_domain *genpd);
 220
 221static void genpd_debug_remove(struct generic_pm_domain *genpd)
 222{
 223	struct dentry *d;
 224
 225	if (!genpd_debugfs_dir)
 226		return;
 227
 228	d = debugfs_lookup(genpd->name, genpd_debugfs_dir);
 229	debugfs_remove(d);
 230}
 231
 232static void genpd_update_accounting(struct generic_pm_domain *genpd)
 233{
 234	u64 delta, now;
 235
 236	now = ktime_get_mono_fast_ns();
 237	if (now <= genpd->accounting_time)
 238		return;
 239
 240	delta = now - genpd->accounting_time;
 241
 242	/*
 243	 * If genpd->status is active, it means we are just
 244	 * out of off and so update the idle time and vice
 245	 * versa.
 246	 */
 247	if (genpd->status == GENPD_STATE_ON)
 248		genpd->states[genpd->state_idx].idle_time += delta;
 249	else
 250		genpd->on_time += delta;
 
 
 
 
 251
 252	genpd->accounting_time = now;
 253}
 254#else
 255static inline void genpd_debug_add(struct generic_pm_domain *genpd) {}
 256static inline void genpd_debug_remove(struct generic_pm_domain *genpd) {}
 257static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
 258#endif
 259
 260static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
 261					   unsigned int state)
 262{
 263	struct generic_pm_domain_data *pd_data;
 264	struct pm_domain_data *pdd;
 265	struct gpd_link *link;
 266
 267	/* New requested state is same as Max requested state */
 268	if (state == genpd->performance_state)
 269		return state;
 270
 271	/* New requested state is higher than Max requested state */
 272	if (state > genpd->performance_state)
 273		return state;
 274
 275	/* Traverse all devices within the domain */
 276	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
 277		pd_data = to_gpd_data(pdd);
 278
 279		if (pd_data->performance_state > state)
 280			state = pd_data->performance_state;
 281	}
 282
 283	/*
 284	 * Traverse all sub-domains within the domain. This can be
 285	 * done without any additional locking as the link->performance_state
 286	 * field is protected by the parent genpd->lock, which is already taken.
 287	 *
 288	 * Also note that link->performance_state (subdomain's performance state
 289	 * requirement to parent domain) is different from
 290	 * link->child->performance_state (current performance state requirement
 291	 * of the devices/sub-domains of the subdomain) and so can have a
 292	 * different value.
 293	 *
 294	 * Note that we also take vote from powered-off sub-domains into account
 295	 * as the same is done for devices right now.
 296	 */
 297	list_for_each_entry(link, &genpd->parent_links, parent_node) {
 298		if (link->performance_state > state)
 299			state = link->performance_state;
 300	}
 301
 302	return state;
 303}
 304
 305static int genpd_xlate_performance_state(struct generic_pm_domain *genpd,
 306					 struct generic_pm_domain *parent,
 307					 unsigned int pstate)
 308{
 309	if (!parent->set_performance_state)
 310		return pstate;
 311
 312	return dev_pm_opp_xlate_performance_state(genpd->opp_table,
 313						  parent->opp_table,
 314						  pstate);
 315}
 316
 317static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
 318					unsigned int state, int depth)
 319{
 320	struct generic_pm_domain *parent;
 321	struct gpd_link *link;
 322	int parent_state, ret;
 323
 324	if (state == genpd->performance_state)
 325		return 0;
 326
 327	/* Propagate to parents of genpd */
 328	list_for_each_entry(link, &genpd->child_links, child_node) {
 329		parent = link->parent;
 330
 
 
 
 331		/* Find parent's performance state */
 332		ret = genpd_xlate_performance_state(genpd, parent, state);
 
 
 333		if (unlikely(ret < 0))
 334			goto err;
 335
 336		parent_state = ret;
 337
 338		genpd_lock_nested(parent, depth + 1);
 339
 340		link->prev_performance_state = link->performance_state;
 341		link->performance_state = parent_state;
 342		parent_state = _genpd_reeval_performance_state(parent,
 343						parent_state);
 344		ret = _genpd_set_performance_state(parent, parent_state, depth + 1);
 345		if (ret)
 346			link->performance_state = link->prev_performance_state;
 347
 348		genpd_unlock(parent);
 349
 350		if (ret)
 351			goto err;
 352	}
 353
 354	if (genpd->set_performance_state) {
 355		ret = genpd->set_performance_state(genpd, state);
 356		if (ret)
 357			goto err;
 358	}
 359
 360	genpd->performance_state = state;
 361	return 0;
 362
 363err:
 364	/* Encountered an error, lets rollback */
 365	list_for_each_entry_continue_reverse(link, &genpd->child_links,
 366					     child_node) {
 367		parent = link->parent;
 368
 
 
 
 369		genpd_lock_nested(parent, depth + 1);
 370
 371		parent_state = link->prev_performance_state;
 372		link->performance_state = parent_state;
 373
 374		parent_state = _genpd_reeval_performance_state(parent,
 375						parent_state);
 376		if (_genpd_set_performance_state(parent, parent_state, depth + 1)) {
 377			pr_err("%s: Failed to roll back to %d performance state\n",
 378			       parent->name, parent_state);
 379		}
 380
 381		genpd_unlock(parent);
 382	}
 383
 384	return ret;
 385}
 386
 387static int genpd_set_performance_state(struct device *dev, unsigned int state)
 388{
 389	struct generic_pm_domain *genpd = dev_to_genpd(dev);
 390	struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
 391	unsigned int prev_state;
 392	int ret;
 393
 394	prev_state = gpd_data->performance_state;
 395	if (prev_state == state)
 396		return 0;
 397
 398	gpd_data->performance_state = state;
 399	state = _genpd_reeval_performance_state(genpd, state);
 400
 401	ret = _genpd_set_performance_state(genpd, state, 0);
 402	if (ret)
 403		gpd_data->performance_state = prev_state;
 404
 405	return ret;
 406}
 407
 408static int genpd_drop_performance_state(struct device *dev)
 409{
 410	unsigned int prev_state = dev_gpd_data(dev)->performance_state;
 411
 412	if (!genpd_set_performance_state(dev, 0))
 413		return prev_state;
 414
 415	return 0;
 416}
 417
 418static void genpd_restore_performance_state(struct device *dev,
 419					    unsigned int state)
 420{
 421	if (state)
 422		genpd_set_performance_state(dev, state);
 423}
 424
 425/**
 426 * dev_pm_genpd_set_performance_state- Set performance state of device's power
 427 * domain.
 428 *
 429 * @dev: Device for which the performance-state needs to be set.
 430 * @state: Target performance state of the device. This can be set as 0 when the
 431 *	   device doesn't have any performance state constraints left (And so
 432 *	   the device wouldn't participate anymore to find the target
 433 *	   performance state of the genpd).
 434 *
 435 * It is assumed that the users guarantee that the genpd wouldn't be detached
 436 * while this routine is getting called.
 437 *
 438 * Returns 0 on success and negative error values on failures.
 439 */
 440int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
 441{
 442	struct generic_pm_domain *genpd;
 443	int ret = 0;
 
 
 444
 445	genpd = dev_to_genpd_safe(dev);
 446	if (!genpd)
 447		return -ENODEV;
 448
 
 
 
 449	if (WARN_ON(!dev->power.subsys_data ||
 450		     !dev->power.subsys_data->domain_data))
 451		return -EINVAL;
 452
 453	genpd_lock(genpd);
 454	if (pm_runtime_suspended(dev)) {
 455		dev_gpd_data(dev)->rpm_pstate = state;
 456	} else {
 457		ret = genpd_set_performance_state(dev, state);
 458		if (!ret)
 459			dev_gpd_data(dev)->rpm_pstate = 0;
 460	}
 
 
 
 461	genpd_unlock(genpd);
 462
 463	return ret;
 464}
 465EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);
 466
 467/**
 468 * dev_pm_genpd_set_next_wakeup - Notify PM framework of an impending wakeup.
 469 *
 470 * @dev: Device to handle
 471 * @next: impending interrupt/wakeup for the device
 472 *
 473 *
 474 * Allow devices to inform of the next wakeup. It's assumed that the users
 475 * guarantee that the genpd wouldn't be detached while this routine is getting
 476 * called. Additionally, it's also assumed that @dev isn't runtime suspended
 477 * (RPM_SUSPENDED)."
 478 * Although devices are expected to update the next_wakeup after the end of
 479 * their usecase as well, it is possible the devices themselves may not know
 480 * about that, so stale @next will be ignored when powering off the domain.
 481 */
 482void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next)
 483{
 484	struct generic_pm_domain *genpd;
 485	struct gpd_timing_data *td;
 486
 487	genpd = dev_to_genpd_safe(dev);
 488	if (!genpd)
 489		return;
 490
 491	td = to_gpd_data(dev->power.subsys_data->domain_data)->td;
 492	if (td)
 493		td->next_wakeup = next;
 494}
 495EXPORT_SYMBOL_GPL(dev_pm_genpd_set_next_wakeup);
 496
 497/**
 498 * dev_pm_genpd_get_next_hrtimer - Return the next_hrtimer for the genpd
 499 * @dev: A device that is attached to the genpd.
 500 *
 501 * This routine should typically be called for a device, at the point of when a
 502 * GENPD_NOTIFY_PRE_OFF notification has been sent for it.
 503 *
 504 * Returns the aggregated value of the genpd's next hrtimer or KTIME_MAX if no
 505 * valid value have been set.
 506 */
 507ktime_t dev_pm_genpd_get_next_hrtimer(struct device *dev)
 508{
 509	struct generic_pm_domain *genpd;
 510
 511	genpd = dev_to_genpd_safe(dev);
 512	if (!genpd)
 513		return KTIME_MAX;
 514
 515	if (genpd->gd)
 516		return genpd->gd->next_hrtimer;
 517
 518	return KTIME_MAX;
 519}
 520EXPORT_SYMBOL_GPL(dev_pm_genpd_get_next_hrtimer);
 521
 522static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
 523{
 524	unsigned int state_idx = genpd->state_idx;
 525	ktime_t time_start;
 526	s64 elapsed_ns;
 527	int ret;
 528
 529	/* Notify consumers that we are about to power on. */
 530	ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
 531					     GENPD_NOTIFY_PRE_ON,
 532					     GENPD_NOTIFY_OFF, NULL);
 533	ret = notifier_to_errno(ret);
 534	if (ret)
 535		return ret;
 536
 537	if (!genpd->power_on)
 538		goto out;
 539
 540	timed = timed && genpd->gd && !genpd->states[state_idx].fwnode;
 541	if (!timed) {
 542		ret = genpd->power_on(genpd);
 543		if (ret)
 544			goto err;
 545
 546		goto out;
 547	}
 548
 549	time_start = ktime_get();
 550	ret = genpd->power_on(genpd);
 551	if (ret)
 552		goto err;
 553
 554	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
 555	if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
 556		goto out;
 557
 558	genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
 559	genpd->gd->max_off_time_changed = true;
 560	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
 561		 genpd->name, "on", elapsed_ns);
 562
 563out:
 564	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
 565	return 0;
 566err:
 567	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
 568				NULL);
 569	return ret;
 570}
 571
 572static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
 573{
 574	unsigned int state_idx = genpd->state_idx;
 575	ktime_t time_start;
 576	s64 elapsed_ns;
 577	int ret;
 578
 579	/* Notify consumers that we are about to power off. */
 580	ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
 581					     GENPD_NOTIFY_PRE_OFF,
 582					     GENPD_NOTIFY_ON, NULL);
 583	ret = notifier_to_errno(ret);
 584	if (ret)
 585		return ret;
 586
 587	if (!genpd->power_off)
 588		goto out;
 589
 590	timed = timed && genpd->gd && !genpd->states[state_idx].fwnode;
 591	if (!timed) {
 592		ret = genpd->power_off(genpd);
 593		if (ret)
 594			goto busy;
 595
 596		goto out;
 597	}
 598
 599	time_start = ktime_get();
 600	ret = genpd->power_off(genpd);
 601	if (ret)
 602		goto busy;
 603
 604	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
 605	if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
 606		goto out;
 607
 608	genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
 609	genpd->gd->max_off_time_changed = true;
 610	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
 611		 genpd->name, "off", elapsed_ns);
 612
 613out:
 614	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
 615				NULL);
 616	return 0;
 617busy:
 618	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
 619	return ret;
 620}
 621
 622/**
 623 * genpd_queue_power_off_work - Queue up the execution of genpd_power_off().
 624 * @genpd: PM domain to power off.
 625 *
 626 * Queue up the execution of genpd_power_off() unless it's already been done
 627 * before.
 628 */
 629static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
 630{
 631	queue_work(pm_wq, &genpd->power_off_work);
 632}
 633
 634/**
 635 * genpd_power_off - Remove power from a given PM domain.
 636 * @genpd: PM domain to power down.
 637 * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the
 638 * RPM status of the releated device is in an intermediate state, not yet turned
 639 * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not
 640 * be RPM_SUSPENDED, while it tries to power off the PM domain.
 641 * @depth: nesting count for lockdep.
 642 *
 643 * If all of the @genpd's devices have been suspended and all of its subdomains
 644 * have been powered down, remove power from @genpd.
 645 */
 646static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
 647			   unsigned int depth)
 648{
 649	struct pm_domain_data *pdd;
 650	struct gpd_link *link;
 651	unsigned int not_suspended = 0;
 652	int ret;
 653
 654	/*
 655	 * Do not try to power off the domain in the following situations:
 656	 * (1) The domain is already in the "power off" state.
 657	 * (2) System suspend is in progress.
 658	 */
 659	if (!genpd_status_on(genpd) || genpd->prepared_count > 0)
 660		return 0;
 661
 662	/*
 663	 * Abort power off for the PM domain in the following situations:
 664	 * (1) The domain is configured as always on.
 665	 * (2) When the domain has a subdomain being powered on.
 666	 */
 667	if (genpd_is_always_on(genpd) ||
 668			genpd_is_rpm_always_on(genpd) ||
 669			atomic_read(&genpd->sd_count) > 0)
 670		return -EBUSY;
 671
 672	/*
 673	 * The children must be in their deepest (powered-off) states to allow
 674	 * the parent to be powered off. Note that, there's no need for
 675	 * additional locking, as powering on a child, requires the parent's
 676	 * lock to be acquired first.
 677	 */
 678	list_for_each_entry(link, &genpd->parent_links, parent_node) {
 679		struct generic_pm_domain *child = link->child;
 680		if (child->state_idx < child->state_count - 1)
 681			return -EBUSY;
 682	}
 683
 684	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
 685		/*
 686		 * Do not allow PM domain to be powered off, when an IRQ safe
 687		 * device is part of a non-IRQ safe domain.
 688		 */
 689		if (!pm_runtime_suspended(pdd->dev) ||
 690			irq_safe_dev_in_sleep_domain(pdd->dev, genpd))
 691			not_suspended++;
 692	}
 693
 694	if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
 695		return -EBUSY;
 696
 697	if (genpd->gov && genpd->gov->power_down_ok) {
 698		if (!genpd->gov->power_down_ok(&genpd->domain))
 699			return -EAGAIN;
 700	}
 701
 702	/* Default to shallowest state. */
 703	if (!genpd->gov)
 704		genpd->state_idx = 0;
 705
 706	/* Don't power off, if a child domain is waiting to power on. */
 707	if (atomic_read(&genpd->sd_count) > 0)
 708		return -EBUSY;
 
 
 709
 710	ret = _genpd_power_off(genpd, true);
 711	if (ret) {
 712		genpd->states[genpd->state_idx].rejected++;
 713		return ret;
 
 
 
 
 
 
 
 714	}
 715
 716	genpd->status = GENPD_STATE_OFF;
 717	genpd_update_accounting(genpd);
 718	genpd->states[genpd->state_idx].usage++;
 719
 720	list_for_each_entry(link, &genpd->child_links, child_node) {
 721		genpd_sd_counter_dec(link->parent);
 722		genpd_lock_nested(link->parent, depth + 1);
 723		genpd_power_off(link->parent, false, depth + 1);
 724		genpd_unlock(link->parent);
 725	}
 726
 727	return 0;
 728}
 729
 730/**
 731 * genpd_power_on - Restore power to a given PM domain and its parents.
 732 * @genpd: PM domain to power up.
 733 * @depth: nesting count for lockdep.
 734 *
 735 * Restore power to @genpd and all of its parents so that it is possible to
 736 * resume a device belonging to it.
 737 */
 738static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
 739{
 740	struct gpd_link *link;
 741	int ret = 0;
 742
 743	if (genpd_status_on(genpd))
 744		return 0;
 745
 746	/*
 747	 * The list is guaranteed not to change while the loop below is being
 748	 * executed, unless one of the parents' .power_on() callbacks fiddles
 749	 * with it.
 750	 */
 751	list_for_each_entry(link, &genpd->child_links, child_node) {
 752		struct generic_pm_domain *parent = link->parent;
 753
 754		genpd_sd_counter_inc(parent);
 755
 756		genpd_lock_nested(parent, depth + 1);
 757		ret = genpd_power_on(parent, depth + 1);
 758		genpd_unlock(parent);
 759
 760		if (ret) {
 761			genpd_sd_counter_dec(parent);
 762			goto err;
 763		}
 764	}
 765
 766	ret = _genpd_power_on(genpd, true);
 767	if (ret)
 768		goto err;
 769
 770	genpd->status = GENPD_STATE_ON;
 771	genpd_update_accounting(genpd);
 772
 773	return 0;
 774
 775 err:
 776	list_for_each_entry_continue_reverse(link,
 777					&genpd->child_links,
 778					child_node) {
 779		genpd_sd_counter_dec(link->parent);
 780		genpd_lock_nested(link->parent, depth + 1);
 781		genpd_power_off(link->parent, false, depth + 1);
 782		genpd_unlock(link->parent);
 783	}
 784
 785	return ret;
 786}
 787
 788static int genpd_dev_pm_start(struct device *dev)
 789{
 790	struct generic_pm_domain *genpd = dev_to_genpd(dev);
 791
 792	return genpd_start_dev(genpd, dev);
 793}
 794
 795static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
 796				     unsigned long val, void *ptr)
 797{
 798	struct generic_pm_domain_data *gpd_data;
 799	struct device *dev;
 800
 801	gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
 802	dev = gpd_data->base.dev;
 803
 804	for (;;) {
 805		struct generic_pm_domain *genpd = ERR_PTR(-ENODATA);
 806		struct pm_domain_data *pdd;
 807		struct gpd_timing_data *td;
 808
 809		spin_lock_irq(&dev->power.lock);
 810
 811		pdd = dev->power.subsys_data ?
 812				dev->power.subsys_data->domain_data : NULL;
 813		if (pdd) {
 814			td = to_gpd_data(pdd)->td;
 815			if (td) {
 816				td->constraint_changed = true;
 817				genpd = dev_to_genpd(dev);
 818			}
 819		}
 820
 821		spin_unlock_irq(&dev->power.lock);
 822
 823		if (!IS_ERR(genpd)) {
 824			genpd_lock(genpd);
 825			genpd->gd->max_off_time_changed = true;
 826			genpd_unlock(genpd);
 827		}
 828
 829		dev = dev->parent;
 830		if (!dev || dev->power.ignore_children)
 831			break;
 832	}
 833
 834	return NOTIFY_DONE;
 835}
 836
 837/**
 838 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
 839 * @work: Work structure used for scheduling the execution of this function.
 840 */
 841static void genpd_power_off_work_fn(struct work_struct *work)
 842{
 843	struct generic_pm_domain *genpd;
 844
 845	genpd = container_of(work, struct generic_pm_domain, power_off_work);
 846
 847	genpd_lock(genpd);
 848	genpd_power_off(genpd, false, 0);
 849	genpd_unlock(genpd);
 850}
 851
 852/**
 853 * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks
 854 * @dev: Device to handle.
 855 */
 856static int __genpd_runtime_suspend(struct device *dev)
 857{
 858	int (*cb)(struct device *__dev);
 859
 860	if (dev->type && dev->type->pm)
 861		cb = dev->type->pm->runtime_suspend;
 862	else if (dev->class && dev->class->pm)
 863		cb = dev->class->pm->runtime_suspend;
 864	else if (dev->bus && dev->bus->pm)
 865		cb = dev->bus->pm->runtime_suspend;
 866	else
 867		cb = NULL;
 868
 869	if (!cb && dev->driver && dev->driver->pm)
 870		cb = dev->driver->pm->runtime_suspend;
 871
 872	return cb ? cb(dev) : 0;
 873}
 874
 875/**
 876 * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks
 877 * @dev: Device to handle.
 878 */
 879static int __genpd_runtime_resume(struct device *dev)
 880{
 881	int (*cb)(struct device *__dev);
 882
 883	if (dev->type && dev->type->pm)
 884		cb = dev->type->pm->runtime_resume;
 885	else if (dev->class && dev->class->pm)
 886		cb = dev->class->pm->runtime_resume;
 887	else if (dev->bus && dev->bus->pm)
 888		cb = dev->bus->pm->runtime_resume;
 889	else
 890		cb = NULL;
 891
 892	if (!cb && dev->driver && dev->driver->pm)
 893		cb = dev->driver->pm->runtime_resume;
 894
 895	return cb ? cb(dev) : 0;
 896}
 897
 898/**
 899 * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
 900 * @dev: Device to suspend.
 901 *
 902 * Carry out a runtime suspend of a device under the assumption that its
 903 * pm_domain field points to the domain member of an object of type
 904 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
 905 */
 906static int genpd_runtime_suspend(struct device *dev)
 907{
 908	struct generic_pm_domain *genpd;
 909	bool (*suspend_ok)(struct device *__dev);
 910	struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
 911	struct gpd_timing_data *td = gpd_data->td;
 912	bool runtime_pm = pm_runtime_enabled(dev);
 913	ktime_t time_start = 0;
 914	s64 elapsed_ns;
 915	int ret;
 916
 917	dev_dbg(dev, "%s()\n", __func__);
 918
 919	genpd = dev_to_genpd(dev);
 920	if (IS_ERR(genpd))
 921		return -EINVAL;
 922
 923	/*
 924	 * A runtime PM centric subsystem/driver may re-use the runtime PM
 925	 * callbacks for other purposes than runtime PM. In those scenarios
 926	 * runtime PM is disabled. Under these circumstances, we shall skip
 927	 * validating/measuring the PM QoS latency.
 928	 */
 929	suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
 930	if (runtime_pm && suspend_ok && !suspend_ok(dev))
 931		return -EBUSY;
 932
 933	/* Measure suspend latency. */
 934	if (td && runtime_pm)
 
 935		time_start = ktime_get();
 936
 937	ret = __genpd_runtime_suspend(dev);
 938	if (ret)
 939		return ret;
 940
 941	ret = genpd_stop_dev(genpd, dev);
 942	if (ret) {
 943		__genpd_runtime_resume(dev);
 944		return ret;
 945	}
 946
 947	/* Update suspend latency value if the measured time exceeds it. */
 948	if (td && runtime_pm) {
 949		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
 950		if (elapsed_ns > td->suspend_latency_ns) {
 951			td->suspend_latency_ns = elapsed_ns;
 952			dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
 953				elapsed_ns);
 954			genpd->gd->max_off_time_changed = true;
 955			td->constraint_changed = true;
 956		}
 957	}
 958
 959	/*
 960	 * If power.irq_safe is set, this routine may be run with
 961	 * IRQs disabled, so suspend only if the PM domain also is irq_safe.
 962	 */
 963	if (irq_safe_dev_in_sleep_domain(dev, genpd))
 964		return 0;
 965
 966	genpd_lock(genpd);
 967	genpd_power_off(genpd, true, 0);
 968	gpd_data->rpm_pstate = genpd_drop_performance_state(dev);
 969	genpd_unlock(genpd);
 970
 971	return 0;
 972}
 973
 974/**
 975 * genpd_runtime_resume - Resume a device belonging to I/O PM domain.
 976 * @dev: Device to resume.
 977 *
 978 * Carry out a runtime resume of a device under the assumption that its
 979 * pm_domain field points to the domain member of an object of type
 980 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
 981 */
 982static int genpd_runtime_resume(struct device *dev)
 983{
 984	struct generic_pm_domain *genpd;
 985	struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
 986	struct gpd_timing_data *td = gpd_data->td;
 987	bool timed = td && pm_runtime_enabled(dev);
 988	ktime_t time_start = 0;
 989	s64 elapsed_ns;
 990	int ret;
 
 991
 992	dev_dbg(dev, "%s()\n", __func__);
 993
 994	genpd = dev_to_genpd(dev);
 995	if (IS_ERR(genpd))
 996		return -EINVAL;
 997
 998	/*
 999	 * As we don't power off a non IRQ safe domain, which holds
1000	 * an IRQ safe device, we don't need to restore power to it.
1001	 */
1002	if (irq_safe_dev_in_sleep_domain(dev, genpd))
 
1003		goto out;
 
1004
1005	genpd_lock(genpd);
1006	genpd_restore_performance_state(dev, gpd_data->rpm_pstate);
1007	ret = genpd_power_on(genpd, 0);
1008	genpd_unlock(genpd);
1009
1010	if (ret)
1011		return ret;
1012
1013 out:
1014	/* Measure resume latency. */
1015	if (timed)
 
1016		time_start = ktime_get();
1017
1018	ret = genpd_start_dev(genpd, dev);
1019	if (ret)
1020		goto err_poweroff;
1021
1022	ret = __genpd_runtime_resume(dev);
1023	if (ret)
1024		goto err_stop;
1025
1026	/* Update resume latency value if the measured time exceeds it. */
1027	if (timed) {
1028		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
1029		if (elapsed_ns > td->resume_latency_ns) {
1030			td->resume_latency_ns = elapsed_ns;
1031			dev_dbg(dev, "resume latency exceeded, %lld ns\n",
1032				elapsed_ns);
1033			genpd->gd->max_off_time_changed = true;
1034			td->constraint_changed = true;
1035		}
1036	}
1037
1038	return 0;
1039
1040err_stop:
1041	genpd_stop_dev(genpd, dev);
1042err_poweroff:
1043	if (!pm_runtime_is_irq_safe(dev) || genpd_is_irq_safe(genpd)) {
 
1044		genpd_lock(genpd);
1045		genpd_power_off(genpd, true, 0);
1046		gpd_data->rpm_pstate = genpd_drop_performance_state(dev);
1047		genpd_unlock(genpd);
1048	}
1049
1050	return ret;
1051}
1052
1053static bool pd_ignore_unused;
1054static int __init pd_ignore_unused_setup(char *__unused)
1055{
1056	pd_ignore_unused = true;
1057	return 1;
1058}
1059__setup("pd_ignore_unused", pd_ignore_unused_setup);
1060
1061/**
1062 * genpd_power_off_unused - Power off all PM domains with no devices in use.
1063 */
1064static int __init genpd_power_off_unused(void)
1065{
1066	struct generic_pm_domain *genpd;
1067
1068	if (pd_ignore_unused) {
1069		pr_warn("genpd: Not disabling unused power domains\n");
1070		return 0;
1071	}
1072
1073	mutex_lock(&gpd_list_lock);
1074
1075	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
1076		genpd_queue_power_off_work(genpd);
1077
1078	mutex_unlock(&gpd_list_lock);
1079
1080	return 0;
1081}
1082late_initcall(genpd_power_off_unused);
1083
1084#ifdef CONFIG_PM_SLEEP
1085
1086/**
1087 * genpd_sync_power_off - Synchronously power off a PM domain and its parents.
1088 * @genpd: PM domain to power off, if possible.
1089 * @use_lock: use the lock.
1090 * @depth: nesting count for lockdep.
1091 *
1092 * Check if the given PM domain can be powered off (during system suspend or
1093 * hibernation) and do that if so.  Also, in that case propagate to its parents.
1094 *
1095 * This function is only called in "noirq" and "syscore" stages of system power
1096 * transitions. The "noirq" callbacks may be executed asynchronously, thus in
1097 * these cases the lock must be held.
1098 */
1099static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
1100				 unsigned int depth)
1101{
1102	struct gpd_link *link;
1103
1104	if (!genpd_status_on(genpd) || genpd_is_always_on(genpd))
1105		return;
1106
1107	if (genpd->suspended_count != genpd->device_count
1108	    || atomic_read(&genpd->sd_count) > 0)
1109		return;
1110
1111	/* Check that the children are in their deepest (powered-off) state. */
1112	list_for_each_entry(link, &genpd->parent_links, parent_node) {
1113		struct generic_pm_domain *child = link->child;
1114		if (child->state_idx < child->state_count - 1)
1115			return;
1116	}
1117
1118	/* Choose the deepest state when suspending */
1119	genpd->state_idx = genpd->state_count - 1;
1120	if (_genpd_power_off(genpd, false))
1121		return;
1122
1123	genpd->status = GENPD_STATE_OFF;
1124
1125	list_for_each_entry(link, &genpd->child_links, child_node) {
1126		genpd_sd_counter_dec(link->parent);
1127
1128		if (use_lock)
1129			genpd_lock_nested(link->parent, depth + 1);
1130
1131		genpd_sync_power_off(link->parent, use_lock, depth + 1);
1132
1133		if (use_lock)
1134			genpd_unlock(link->parent);
1135	}
1136}
1137
1138/**
1139 * genpd_sync_power_on - Synchronously power on a PM domain and its parents.
1140 * @genpd: PM domain to power on.
1141 * @use_lock: use the lock.
1142 * @depth: nesting count for lockdep.
1143 *
1144 * This function is only called in "noirq" and "syscore" stages of system power
1145 * transitions. The "noirq" callbacks may be executed asynchronously, thus in
1146 * these cases the lock must be held.
1147 */
1148static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
1149				unsigned int depth)
1150{
1151	struct gpd_link *link;
1152
1153	if (genpd_status_on(genpd))
1154		return;
1155
1156	list_for_each_entry(link, &genpd->child_links, child_node) {
1157		genpd_sd_counter_inc(link->parent);
1158
1159		if (use_lock)
1160			genpd_lock_nested(link->parent, depth + 1);
1161
1162		genpd_sync_power_on(link->parent, use_lock, depth + 1);
1163
1164		if (use_lock)
1165			genpd_unlock(link->parent);
1166	}
1167
1168	_genpd_power_on(genpd, false);
1169	genpd->status = GENPD_STATE_ON;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1170}
1171
1172/**
1173 * genpd_prepare - Start power transition of a device in a PM domain.
1174 * @dev: Device to start the transition of.
1175 *
1176 * Start a power transition of a device (during a system-wide power transition)
1177 * under the assumption that its pm_domain field points to the domain member of
1178 * an object of type struct generic_pm_domain representing a PM domain
1179 * consisting of I/O devices.
1180 */
1181static int genpd_prepare(struct device *dev)
1182{
1183	struct generic_pm_domain *genpd;
1184	int ret;
1185
1186	dev_dbg(dev, "%s()\n", __func__);
1187
1188	genpd = dev_to_genpd(dev);
1189	if (IS_ERR(genpd))
1190		return -EINVAL;
1191
 
 
 
 
 
 
 
 
1192	genpd_lock(genpd);
1193
1194	if (genpd->prepared_count++ == 0)
1195		genpd->suspended_count = 0;
1196
1197	genpd_unlock(genpd);
1198
1199	ret = pm_generic_prepare(dev);
1200	if (ret < 0) {
1201		genpd_lock(genpd);
1202
1203		genpd->prepared_count--;
1204
1205		genpd_unlock(genpd);
1206	}
1207
1208	/* Never return 1, as genpd don't cope with the direct_complete path. */
1209	return ret >= 0 ? 0 : ret;
1210}
1211
1212/**
1213 * genpd_finish_suspend - Completion of suspend or hibernation of device in an
1214 *   I/O pm domain.
1215 * @dev: Device to suspend.
1216 * @suspend_noirq: Generic suspend_noirq callback.
1217 * @resume_noirq: Generic resume_noirq callback.
1218 *
1219 * Stop the device and remove power from the domain if all devices in it have
1220 * been stopped.
1221 */
1222static int genpd_finish_suspend(struct device *dev,
1223				int (*suspend_noirq)(struct device *dev),
1224				int (*resume_noirq)(struct device *dev))
1225{
1226	struct generic_pm_domain *genpd;
1227	int ret = 0;
1228
1229	genpd = dev_to_genpd(dev);
1230	if (IS_ERR(genpd))
1231		return -EINVAL;
1232
1233	ret = suspend_noirq(dev);
 
 
 
1234	if (ret)
1235		return ret;
1236
1237	if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd))
1238		return 0;
1239
1240	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1241	    !pm_runtime_status_suspended(dev)) {
1242		ret = genpd_stop_dev(genpd, dev);
1243		if (ret) {
1244			resume_noirq(dev);
 
 
 
1245			return ret;
1246		}
1247	}
1248
1249	genpd_lock(genpd);
1250	genpd->suspended_count++;
1251	genpd_sync_power_off(genpd, true, 0);
1252	genpd_unlock(genpd);
1253
1254	return 0;
1255}
1256
1257/**
1258 * genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
1259 * @dev: Device to suspend.
1260 *
1261 * Stop the device and remove power from the domain if all devices in it have
1262 * been stopped.
1263 */
1264static int genpd_suspend_noirq(struct device *dev)
1265{
1266	dev_dbg(dev, "%s()\n", __func__);
1267
1268	return genpd_finish_suspend(dev,
1269				    pm_generic_suspend_noirq,
1270				    pm_generic_resume_noirq);
1271}
1272
1273/**
1274 * genpd_finish_resume - Completion of resume of device in an I/O PM domain.
1275 * @dev: Device to resume.
1276 * @resume_noirq: Generic resume_noirq callback.
1277 *
1278 * Restore power to the device's PM domain, if necessary, and start the device.
1279 */
1280static int genpd_finish_resume(struct device *dev,
1281			       int (*resume_noirq)(struct device *dev))
1282{
1283	struct generic_pm_domain *genpd;
1284	int ret;
1285
1286	dev_dbg(dev, "%s()\n", __func__);
1287
1288	genpd = dev_to_genpd(dev);
1289	if (IS_ERR(genpd))
1290		return -EINVAL;
1291
1292	if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd))
1293		return resume_noirq(dev);
1294
1295	genpd_lock(genpd);
1296	genpd_sync_power_on(genpd, true, 0);
1297	genpd->suspended_count--;
1298	genpd_unlock(genpd);
1299
1300	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1301	    !pm_runtime_status_suspended(dev)) {
1302		ret = genpd_start_dev(genpd, dev);
1303		if (ret)
1304			return ret;
1305	}
1306
1307	return pm_generic_resume_noirq(dev);
1308}
1309
1310/**
1311 * genpd_resume_noirq - Start of resume of device in an I/O PM domain.
1312 * @dev: Device to resume.
1313 *
1314 * Restore power to the device's PM domain, if necessary, and start the device.
1315 */
1316static int genpd_resume_noirq(struct device *dev)
1317{
1318	dev_dbg(dev, "%s()\n", __func__);
1319
1320	return genpd_finish_resume(dev, pm_generic_resume_noirq);
1321}
1322
1323/**
1324 * genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
1325 * @dev: Device to freeze.
1326 *
1327 * Carry out a late freeze of a device under the assumption that its
1328 * pm_domain field points to the domain member of an object of type
1329 * struct generic_pm_domain representing a power domain consisting of I/O
1330 * devices.
1331 */
1332static int genpd_freeze_noirq(struct device *dev)
1333{
 
 
 
1334	dev_dbg(dev, "%s()\n", __func__);
1335
1336	return genpd_finish_suspend(dev,
1337				    pm_generic_freeze_noirq,
1338				    pm_generic_thaw_noirq);
 
 
 
 
 
 
 
 
 
 
1339}
1340
1341/**
1342 * genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
1343 * @dev: Device to thaw.
1344 *
1345 * Start the device, unless power has been removed from the domain already
1346 * before the system transition.
1347 */
1348static int genpd_thaw_noirq(struct device *dev)
1349{
 
 
 
1350	dev_dbg(dev, "%s()\n", __func__);
1351
1352	return genpd_finish_resume(dev, pm_generic_thaw_noirq);
 
 
 
 
 
 
 
 
 
 
 
1353}
1354
1355/**
1356 * genpd_poweroff_noirq - Completion of hibernation of device in an
1357 *   I/O PM domain.
1358 * @dev: Device to poweroff.
1359 *
1360 * Stop the device and remove power from the domain if all devices in it have
1361 * been stopped.
1362 */
1363static int genpd_poweroff_noirq(struct device *dev)
1364{
1365	dev_dbg(dev, "%s()\n", __func__);
1366
1367	return genpd_finish_suspend(dev,
1368				    pm_generic_poweroff_noirq,
1369				    pm_generic_restore_noirq);
1370}
1371
1372/**
1373 * genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1374 * @dev: Device to resume.
1375 *
1376 * Make sure the domain will be in the same power state as before the
1377 * hibernation the system is resuming from and start the device if necessary.
1378 */
1379static int genpd_restore_noirq(struct device *dev)
1380{
 
 
 
1381	dev_dbg(dev, "%s()\n", __func__);
1382
1383	return genpd_finish_resume(dev, pm_generic_restore_noirq);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1384}
1385
1386/**
1387 * genpd_complete - Complete power transition of a device in a power domain.
1388 * @dev: Device to complete the transition of.
1389 *
1390 * Complete a power transition of a device (during a system-wide power
1391 * transition) under the assumption that its pm_domain field points to the
1392 * domain member of an object of type struct generic_pm_domain representing
1393 * a power domain consisting of I/O devices.
1394 */
1395static void genpd_complete(struct device *dev)
1396{
1397	struct generic_pm_domain *genpd;
1398
1399	dev_dbg(dev, "%s()\n", __func__);
1400
1401	genpd = dev_to_genpd(dev);
1402	if (IS_ERR(genpd))
1403		return;
1404
1405	pm_generic_complete(dev);
1406
1407	genpd_lock(genpd);
1408
1409	genpd->prepared_count--;
1410	if (!genpd->prepared_count)
1411		genpd_queue_power_off_work(genpd);
1412
1413	genpd_unlock(genpd);
1414}
1415
1416static void genpd_switch_state(struct device *dev, bool suspend)
 
 
 
 
 
 
 
1417{
1418	struct generic_pm_domain *genpd;
1419	bool use_lock;
1420
1421	genpd = dev_to_genpd_safe(dev);
1422	if (!genpd)
1423		return;
1424
1425	use_lock = genpd_is_irq_safe(genpd);
1426
1427	if (use_lock)
1428		genpd_lock(genpd);
1429
1430	if (suspend) {
1431		genpd->suspended_count++;
1432		genpd_sync_power_off(genpd, use_lock, 0);
1433	} else {
1434		genpd_sync_power_on(genpd, use_lock, 0);
1435		genpd->suspended_count--;
1436	}
1437
1438	if (use_lock)
1439		genpd_unlock(genpd);
1440}
1441
1442/**
1443 * dev_pm_genpd_suspend - Synchronously try to suspend the genpd for @dev
1444 * @dev: The device that is attached to the genpd, that can be suspended.
1445 *
1446 * This routine should typically be called for a device that needs to be
1447 * suspended during the syscore suspend phase. It may also be called during
1448 * suspend-to-idle to suspend a corresponding CPU device that is attached to a
1449 * genpd.
1450 */
1451void dev_pm_genpd_suspend(struct device *dev)
1452{
1453	genpd_switch_state(dev, true);
1454}
1455EXPORT_SYMBOL_GPL(dev_pm_genpd_suspend);
1456
1457/**
1458 * dev_pm_genpd_resume - Synchronously try to resume the genpd for @dev
1459 * @dev: The device that is attached to the genpd, which needs to be resumed.
1460 *
1461 * This routine should typically be called for a device that needs to be resumed
1462 * during the syscore resume phase. It may also be called during suspend-to-idle
1463 * to resume a corresponding CPU device that is attached to a genpd.
1464 */
1465void dev_pm_genpd_resume(struct device *dev)
1466{
1467	genpd_switch_state(dev, false);
1468}
1469EXPORT_SYMBOL_GPL(dev_pm_genpd_resume);
1470
1471#else /* !CONFIG_PM_SLEEP */
1472
1473#define genpd_prepare		NULL
1474#define genpd_suspend_noirq	NULL
1475#define genpd_resume_noirq	NULL
1476#define genpd_freeze_noirq	NULL
1477#define genpd_thaw_noirq	NULL
1478#define genpd_poweroff_noirq	NULL
1479#define genpd_restore_noirq	NULL
1480#define genpd_complete		NULL
1481
1482#endif /* CONFIG_PM_SLEEP */
1483
1484static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
1485							   bool has_governor)
1486{
1487	struct generic_pm_domain_data *gpd_data;
1488	struct gpd_timing_data *td;
1489	int ret;
1490
1491	ret = dev_pm_get_subsys_data(dev);
1492	if (ret)
1493		return ERR_PTR(ret);
1494
1495	gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1496	if (!gpd_data) {
1497		ret = -ENOMEM;
1498		goto err_put;
1499	}
1500
1501	gpd_data->base.dev = dev;
 
 
1502	gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1503
1504	/* Allocate data used by a governor. */
1505	if (has_governor) {
1506		td = kzalloc(sizeof(*td), GFP_KERNEL);
1507		if (!td) {
1508			ret = -ENOMEM;
1509			goto err_free;
1510		}
1511
1512		td->constraint_changed = true;
1513		td->effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
1514		td->next_wakeup = KTIME_MAX;
1515		gpd_data->td = td;
1516	}
1517
1518	spin_lock_irq(&dev->power.lock);
1519
1520	if (dev->power.subsys_data->domain_data)
1521		ret = -EINVAL;
1522	else
1523		dev->power.subsys_data->domain_data = &gpd_data->base;
1524
1525	spin_unlock_irq(&dev->power.lock);
1526
1527	if (ret)
1528		goto err_free;
1529
1530	return gpd_data;
1531
1532 err_free:
1533	kfree(gpd_data->td);
1534	kfree(gpd_data);
1535 err_put:
1536	dev_pm_put_subsys_data(dev);
1537	return ERR_PTR(ret);
1538}
1539
1540static void genpd_free_dev_data(struct device *dev,
1541				struct generic_pm_domain_data *gpd_data)
1542{
1543	spin_lock_irq(&dev->power.lock);
1544
1545	dev->power.subsys_data->domain_data = NULL;
1546
1547	spin_unlock_irq(&dev->power.lock);
1548
1549	kfree(gpd_data->td);
1550	kfree(gpd_data);
1551	dev_pm_put_subsys_data(dev);
1552}
1553
1554static void genpd_update_cpumask(struct generic_pm_domain *genpd,
1555				 int cpu, bool set, unsigned int depth)
1556{
1557	struct gpd_link *link;
1558
1559	if (!genpd_is_cpu_domain(genpd))
1560		return;
1561
1562	list_for_each_entry(link, &genpd->child_links, child_node) {
1563		struct generic_pm_domain *parent = link->parent;
1564
1565		genpd_lock_nested(parent, depth + 1);
1566		genpd_update_cpumask(parent, cpu, set, depth + 1);
1567		genpd_unlock(parent);
1568	}
1569
1570	if (set)
1571		cpumask_set_cpu(cpu, genpd->cpus);
1572	else
1573		cpumask_clear_cpu(cpu, genpd->cpus);
1574}
1575
1576static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu)
1577{
1578	if (cpu >= 0)
1579		genpd_update_cpumask(genpd, cpu, true, 0);
1580}
1581
1582static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu)
1583{
1584	if (cpu >= 0)
1585		genpd_update_cpumask(genpd, cpu, false, 0);
1586}
1587
1588static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev)
1589{
1590	int cpu;
1591
1592	if (!genpd_is_cpu_domain(genpd))
1593		return -1;
1594
1595	for_each_possible_cpu(cpu) {
1596		if (get_cpu_device(cpu) == dev)
1597			return cpu;
1598	}
1599
1600	return -1;
1601}
1602
1603static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1604			    struct device *base_dev)
1605{
1606	struct genpd_governor_data *gd = genpd->gd;
1607	struct generic_pm_domain_data *gpd_data;
1608	int ret;
1609
1610	dev_dbg(dev, "%s()\n", __func__);
1611
1612	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1613		return -EINVAL;
1614
1615	gpd_data = genpd_alloc_dev_data(dev, gd);
1616	if (IS_ERR(gpd_data))
1617		return PTR_ERR(gpd_data);
1618
1619	gpd_data->cpu = genpd_get_cpu(genpd, base_dev);
1620
1621	ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1622	if (ret)
1623		goto out;
1624
1625	genpd_lock(genpd);
1626
1627	genpd_set_cpumask(genpd, gpd_data->cpu);
1628	dev_pm_domain_set(dev, &genpd->domain);
1629
1630	genpd->device_count++;
1631	if (gd)
1632		gd->max_off_time_changed = true;
1633
1634	list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1635
1636	genpd_unlock(genpd);
1637 out:
1638	if (ret)
1639		genpd_free_dev_data(dev, gpd_data);
1640	else
1641		dev_pm_qos_add_notifier(dev, &gpd_data->nb,
1642					DEV_PM_QOS_RESUME_LATENCY);
1643
1644	return ret;
1645}
1646
1647/**
1648 * pm_genpd_add_device - Add a device to an I/O PM domain.
1649 * @genpd: PM domain to add the device to.
1650 * @dev: Device to be added.
1651 */
1652int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
1653{
1654	int ret;
1655
1656	mutex_lock(&gpd_list_lock);
1657	ret = genpd_add_device(genpd, dev, dev);
1658	mutex_unlock(&gpd_list_lock);
1659
1660	return ret;
1661}
1662EXPORT_SYMBOL_GPL(pm_genpd_add_device);
1663
1664static int genpd_remove_device(struct generic_pm_domain *genpd,
1665			       struct device *dev)
1666{
1667	struct generic_pm_domain_data *gpd_data;
1668	struct pm_domain_data *pdd;
1669	int ret = 0;
1670
1671	dev_dbg(dev, "%s()\n", __func__);
1672
1673	pdd = dev->power.subsys_data->domain_data;
1674	gpd_data = to_gpd_data(pdd);
1675	dev_pm_qos_remove_notifier(dev, &gpd_data->nb,
1676				   DEV_PM_QOS_RESUME_LATENCY);
1677
1678	genpd_lock(genpd);
1679
1680	if (genpd->prepared_count > 0) {
1681		ret = -EAGAIN;
1682		goto out;
1683	}
1684
1685	genpd->device_count--;
1686	if (genpd->gd)
1687		genpd->gd->max_off_time_changed = true;
1688
1689	genpd_clear_cpumask(genpd, gpd_data->cpu);
1690	dev_pm_domain_set(dev, NULL);
1691
1692	list_del_init(&pdd->list_node);
1693
1694	genpd_unlock(genpd);
1695
1696	if (genpd->detach_dev)
1697		genpd->detach_dev(genpd, dev);
1698
1699	genpd_free_dev_data(dev, gpd_data);
1700
1701	return 0;
1702
1703 out:
1704	genpd_unlock(genpd);
1705	dev_pm_qos_add_notifier(dev, &gpd_data->nb, DEV_PM_QOS_RESUME_LATENCY);
1706
1707	return ret;
1708}
1709
1710/**
1711 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1712 * @dev: Device to be removed.
1713 */
1714int pm_genpd_remove_device(struct device *dev)
1715{
1716	struct generic_pm_domain *genpd = dev_to_genpd_safe(dev);
1717
1718	if (!genpd)
1719		return -EINVAL;
1720
1721	return genpd_remove_device(genpd, dev);
1722}
1723EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
1724
1725/**
1726 * dev_pm_genpd_add_notifier - Add a genpd power on/off notifier for @dev
1727 *
1728 * @dev: Device that should be associated with the notifier
1729 * @nb: The notifier block to register
1730 *
1731 * Users may call this function to add a genpd power on/off notifier for an
1732 * attached @dev. Only one notifier per device is allowed. The notifier is
1733 * sent when genpd is powering on/off the PM domain.
1734 *
1735 * It is assumed that the user guarantee that the genpd wouldn't be detached
1736 * while this routine is getting called.
1737 *
1738 * Returns 0 on success and negative error values on failures.
1739 */
1740int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb)
1741{
1742	struct generic_pm_domain *genpd;
1743	struct generic_pm_domain_data *gpd_data;
1744	int ret;
1745
1746	genpd = dev_to_genpd_safe(dev);
1747	if (!genpd)
1748		return -ENODEV;
1749
1750	if (WARN_ON(!dev->power.subsys_data ||
1751		     !dev->power.subsys_data->domain_data))
1752		return -EINVAL;
1753
1754	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1755	if (gpd_data->power_nb)
1756		return -EEXIST;
1757
1758	genpd_lock(genpd);
1759	ret = raw_notifier_chain_register(&genpd->power_notifiers, nb);
1760	genpd_unlock(genpd);
1761
1762	if (ret) {
1763		dev_warn(dev, "failed to add notifier for PM domain %s\n",
1764			 genpd->name);
1765		return ret;
1766	}
1767
1768	gpd_data->power_nb = nb;
1769	return 0;
1770}
1771EXPORT_SYMBOL_GPL(dev_pm_genpd_add_notifier);
1772
1773/**
1774 * dev_pm_genpd_remove_notifier - Remove a genpd power on/off notifier for @dev
1775 *
1776 * @dev: Device that is associated with the notifier
1777 *
1778 * Users may call this function to remove a genpd power on/off notifier for an
1779 * attached @dev.
1780 *
1781 * It is assumed that the user guarantee that the genpd wouldn't be detached
1782 * while this routine is getting called.
1783 *
1784 * Returns 0 on success and negative error values on failures.
1785 */
1786int dev_pm_genpd_remove_notifier(struct device *dev)
1787{
1788	struct generic_pm_domain *genpd;
1789	struct generic_pm_domain_data *gpd_data;
1790	int ret;
1791
1792	genpd = dev_to_genpd_safe(dev);
1793	if (!genpd)
1794		return -ENODEV;
1795
1796	if (WARN_ON(!dev->power.subsys_data ||
1797		     !dev->power.subsys_data->domain_data))
1798		return -EINVAL;
1799
1800	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1801	if (!gpd_data->power_nb)
1802		return -ENODEV;
1803
1804	genpd_lock(genpd);
1805	ret = raw_notifier_chain_unregister(&genpd->power_notifiers,
1806					    gpd_data->power_nb);
1807	genpd_unlock(genpd);
1808
1809	if (ret) {
1810		dev_warn(dev, "failed to remove notifier for PM domain %s\n",
1811			 genpd->name);
1812		return ret;
1813	}
1814
1815	gpd_data->power_nb = NULL;
1816	return 0;
1817}
1818EXPORT_SYMBOL_GPL(dev_pm_genpd_remove_notifier);
1819
1820static int genpd_add_subdomain(struct generic_pm_domain *genpd,
1821			       struct generic_pm_domain *subdomain)
1822{
1823	struct gpd_link *link, *itr;
1824	int ret = 0;
1825
1826	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1827	    || genpd == subdomain)
1828		return -EINVAL;
1829
1830	/*
1831	 * If the domain can be powered on/off in an IRQ safe
1832	 * context, ensure that the subdomain can also be
1833	 * powered on/off in that context.
1834	 */
1835	if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
1836		WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
1837				genpd->name, subdomain->name);
1838		return -EINVAL;
1839	}
1840
1841	link = kzalloc(sizeof(*link), GFP_KERNEL);
1842	if (!link)
1843		return -ENOMEM;
1844
1845	genpd_lock(subdomain);
1846	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1847
1848	if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) {
1849		ret = -EINVAL;
1850		goto out;
1851	}
1852
1853	list_for_each_entry(itr, &genpd->parent_links, parent_node) {
1854		if (itr->child == subdomain && itr->parent == genpd) {
1855			ret = -EINVAL;
1856			goto out;
1857		}
1858	}
1859
1860	link->parent = genpd;
1861	list_add_tail(&link->parent_node, &genpd->parent_links);
1862	link->child = subdomain;
1863	list_add_tail(&link->child_node, &subdomain->child_links);
1864	if (genpd_status_on(subdomain))
1865		genpd_sd_counter_inc(genpd);
1866
1867 out:
1868	genpd_unlock(genpd);
1869	genpd_unlock(subdomain);
1870	if (ret)
1871		kfree(link);
1872	return ret;
1873}
1874
1875/**
1876 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1877 * @genpd: Leader PM domain to add the subdomain to.
1878 * @subdomain: Subdomain to be added.
1879 */
1880int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1881			   struct generic_pm_domain *subdomain)
1882{
1883	int ret;
1884
1885	mutex_lock(&gpd_list_lock);
1886	ret = genpd_add_subdomain(genpd, subdomain);
1887	mutex_unlock(&gpd_list_lock);
1888
1889	return ret;
1890}
1891EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
1892
1893/**
1894 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1895 * @genpd: Leader PM domain to remove the subdomain from.
1896 * @subdomain: Subdomain to be removed.
1897 */
1898int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1899			      struct generic_pm_domain *subdomain)
1900{
1901	struct gpd_link *l, *link;
1902	int ret = -EINVAL;
1903
1904	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1905		return -EINVAL;
1906
1907	genpd_lock(subdomain);
1908	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1909
1910	if (!list_empty(&subdomain->parent_links) || subdomain->device_count) {
1911		pr_warn("%s: unable to remove subdomain %s\n",
1912			genpd->name, subdomain->name);
1913		ret = -EBUSY;
1914		goto out;
1915	}
1916
1917	list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) {
1918		if (link->child != subdomain)
1919			continue;
1920
1921		list_del(&link->parent_node);
1922		list_del(&link->child_node);
1923		kfree(link);
1924		if (genpd_status_on(subdomain))
1925			genpd_sd_counter_dec(genpd);
1926
1927		ret = 0;
1928		break;
1929	}
1930
1931out:
1932	genpd_unlock(genpd);
1933	genpd_unlock(subdomain);
1934
1935	return ret;
1936}
1937EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
1938
1939static void genpd_free_default_power_state(struct genpd_power_state *states,
1940					   unsigned int state_count)
1941{
1942	kfree(states);
1943}
1944
1945static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
1946{
1947	struct genpd_power_state *state;
1948
1949	state = kzalloc(sizeof(*state), GFP_KERNEL);
1950	if (!state)
1951		return -ENOMEM;
1952
1953	genpd->states = state;
1954	genpd->state_count = 1;
1955	genpd->free_states = genpd_free_default_power_state;
1956
1957	return 0;
1958}
1959
1960static int genpd_alloc_data(struct generic_pm_domain *genpd)
1961{
1962	struct genpd_governor_data *gd = NULL;
1963	int ret;
1964
1965	if (genpd_is_cpu_domain(genpd) &&
1966	    !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL))
1967		return -ENOMEM;
1968
1969	if (genpd->gov) {
1970		gd = kzalloc(sizeof(*gd), GFP_KERNEL);
1971		if (!gd) {
1972			ret = -ENOMEM;
1973			goto free;
1974		}
1975
1976		gd->max_off_time_ns = -1;
1977		gd->max_off_time_changed = true;
1978		gd->next_wakeup = KTIME_MAX;
1979		gd->next_hrtimer = KTIME_MAX;
1980	}
1981
1982	/* Use only one "off" state if there were no states declared */
1983	if (genpd->state_count == 0) {
1984		ret = genpd_set_default_power_state(genpd);
1985		if (ret)
1986			goto free;
1987	}
1988
1989	genpd->gd = gd;
1990	return 0;
1991
1992free:
1993	if (genpd_is_cpu_domain(genpd))
1994		free_cpumask_var(genpd->cpus);
1995	kfree(gd);
1996	return ret;
1997}
1998
1999static void genpd_free_data(struct generic_pm_domain *genpd)
2000{
2001	if (genpd_is_cpu_domain(genpd))
2002		free_cpumask_var(genpd->cpus);
2003	if (genpd->free_states)
2004		genpd->free_states(genpd->states, genpd->state_count);
2005	kfree(genpd->gd);
2006}
2007
2008static void genpd_lock_init(struct generic_pm_domain *genpd)
2009{
2010	if (genpd->flags & GENPD_FLAG_IRQ_SAFE) {
2011		spin_lock_init(&genpd->slock);
2012		genpd->lock_ops = &genpd_spin_ops;
2013	} else {
2014		mutex_init(&genpd->mlock);
2015		genpd->lock_ops = &genpd_mtx_ops;
2016	}
2017}
2018
2019/**
2020 * pm_genpd_init - Initialize a generic I/O PM domain object.
2021 * @genpd: PM domain object to initialize.
2022 * @gov: PM domain governor to associate with the domain (may be NULL).
2023 * @is_off: Initial value of the domain's power_is_off field.
2024 *
2025 * Returns 0 on successful initialization, else a negative error code.
2026 */
2027int pm_genpd_init(struct generic_pm_domain *genpd,
2028		  struct dev_power_governor *gov, bool is_off)
2029{
2030	int ret;
2031
2032	if (IS_ERR_OR_NULL(genpd))
2033		return -EINVAL;
2034
2035	INIT_LIST_HEAD(&genpd->parent_links);
2036	INIT_LIST_HEAD(&genpd->child_links);
2037	INIT_LIST_HEAD(&genpd->dev_list);
2038	RAW_INIT_NOTIFIER_HEAD(&genpd->power_notifiers);
2039	genpd_lock_init(genpd);
2040	genpd->gov = gov;
2041	INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
2042	atomic_set(&genpd->sd_count, 0);
2043	genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON;
2044	genpd->device_count = 0;
 
 
2045	genpd->provider = NULL;
2046	genpd->has_provider = false;
2047	genpd->accounting_time = ktime_get_mono_fast_ns();
2048	genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
2049	genpd->domain.ops.runtime_resume = genpd_runtime_resume;
2050	genpd->domain.ops.prepare = genpd_prepare;
2051	genpd->domain.ops.suspend_noirq = genpd_suspend_noirq;
2052	genpd->domain.ops.resume_noirq = genpd_resume_noirq;
2053	genpd->domain.ops.freeze_noirq = genpd_freeze_noirq;
2054	genpd->domain.ops.thaw_noirq = genpd_thaw_noirq;
2055	genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq;
2056	genpd->domain.ops.restore_noirq = genpd_restore_noirq;
2057	genpd->domain.ops.complete = genpd_complete;
2058	genpd->domain.start = genpd_dev_pm_start;
2059
2060	if (genpd->flags & GENPD_FLAG_PM_CLK) {
2061		genpd->dev_ops.stop = pm_clk_suspend;
2062		genpd->dev_ops.start = pm_clk_resume;
2063	}
2064
2065	/* The always-on governor works better with the corresponding flag. */
2066	if (gov == &pm_domain_always_on_gov)
2067		genpd->flags |= GENPD_FLAG_RPM_ALWAYS_ON;
2068
2069	/* Always-on domains must be powered on at initialization. */
2070	if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) &&
2071			!genpd_status_on(genpd)) {
2072		pr_err("always-on PM domain %s is not on\n", genpd->name);
2073		return -EINVAL;
2074	}
2075
2076	/* Multiple states but no governor doesn't make sense. */
2077	if (!gov && genpd->state_count > 1)
 
 
 
 
 
 
 
 
 
 
 
2078		pr_warn("%s: no governor for states\n", genpd->name);
2079
2080	ret = genpd_alloc_data(genpd);
2081	if (ret)
2082		return ret;
2083
2084	device_initialize(&genpd->dev);
2085	dev_set_name(&genpd->dev, "%s", genpd->name);
2086
2087	mutex_lock(&gpd_list_lock);
2088	list_add(&genpd->gpd_list_node, &gpd_list);
2089	mutex_unlock(&gpd_list_lock);
2090	genpd_debug_add(genpd);
2091
2092	return 0;
2093}
2094EXPORT_SYMBOL_GPL(pm_genpd_init);
2095
2096static int genpd_remove(struct generic_pm_domain *genpd)
2097{
2098	struct gpd_link *l, *link;
2099
2100	if (IS_ERR_OR_NULL(genpd))
2101		return -EINVAL;
2102
2103	genpd_lock(genpd);
2104
2105	if (genpd->has_provider) {
2106		genpd_unlock(genpd);
2107		pr_err("Provider present, unable to remove %s\n", genpd->name);
2108		return -EBUSY;
2109	}
2110
2111	if (!list_empty(&genpd->parent_links) || genpd->device_count) {
2112		genpd_unlock(genpd);
2113		pr_err("%s: unable to remove %s\n", __func__, genpd->name);
2114		return -EBUSY;
2115	}
2116
2117	list_for_each_entry_safe(link, l, &genpd->child_links, child_node) {
2118		list_del(&link->parent_node);
2119		list_del(&link->child_node);
2120		kfree(link);
2121	}
2122
2123	list_del(&genpd->gpd_list_node);
2124	genpd_unlock(genpd);
2125	genpd_debug_remove(genpd);
2126	cancel_work_sync(&genpd->power_off_work);
2127	genpd_free_data(genpd);
 
 
 
2128
2129	pr_debug("%s: removed %s\n", __func__, genpd->name);
2130
2131	return 0;
2132}
2133
2134/**
2135 * pm_genpd_remove - Remove a generic I/O PM domain
2136 * @genpd: Pointer to PM domain that is to be removed.
2137 *
2138 * To remove the PM domain, this function:
2139 *  - Removes the PM domain as a subdomain to any parent domains,
2140 *    if it was added.
2141 *  - Removes the PM domain from the list of registered PM domains.
2142 *
2143 * The PM domain will only be removed, if the associated provider has
2144 * been removed, it is not a parent to any other PM domain and has no
2145 * devices associated with it.
2146 */
2147int pm_genpd_remove(struct generic_pm_domain *genpd)
2148{
2149	int ret;
2150
2151	mutex_lock(&gpd_list_lock);
2152	ret = genpd_remove(genpd);
2153	mutex_unlock(&gpd_list_lock);
2154
2155	return ret;
2156}
2157EXPORT_SYMBOL_GPL(pm_genpd_remove);
2158
2159#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
2160
2161/*
2162 * Device Tree based PM domain providers.
2163 *
2164 * The code below implements generic device tree based PM domain providers that
2165 * bind device tree nodes with generic PM domains registered in the system.
2166 *
2167 * Any driver that registers generic PM domains and needs to support binding of
2168 * devices to these domains is supposed to register a PM domain provider, which
2169 * maps a PM domain specifier retrieved from the device tree to a PM domain.
2170 *
2171 * Two simple mapping functions have been provided for convenience:
2172 *  - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
2173 *  - genpd_xlate_onecell() for mapping of multiple PM domains per node by
2174 *    index.
2175 */
2176
2177/**
2178 * struct of_genpd_provider - PM domain provider registration structure
2179 * @link: Entry in global list of PM domain providers
2180 * @node: Pointer to device tree node of PM domain provider
2181 * @xlate: Provider-specific xlate callback mapping a set of specifier cells
2182 *         into a PM domain.
2183 * @data: context pointer to be passed into @xlate callback
2184 */
2185struct of_genpd_provider {
2186	struct list_head link;
2187	struct device_node *node;
2188	genpd_xlate_t xlate;
2189	void *data;
2190};
2191
2192/* List of registered PM domain providers. */
2193static LIST_HEAD(of_genpd_providers);
2194/* Mutex to protect the list above. */
2195static DEFINE_MUTEX(of_genpd_mutex);
2196
2197/**
2198 * genpd_xlate_simple() - Xlate function for direct node-domain mapping
2199 * @genpdspec: OF phandle args to map into a PM domain
2200 * @data: xlate function private data - pointer to struct generic_pm_domain
2201 *
2202 * This is a generic xlate function that can be used to model PM domains that
2203 * have their own device tree nodes. The private data of xlate function needs
2204 * to be a valid pointer to struct generic_pm_domain.
2205 */
2206static struct generic_pm_domain *genpd_xlate_simple(
2207					struct of_phandle_args *genpdspec,
2208					void *data)
2209{
2210	return data;
2211}
2212
2213/**
2214 * genpd_xlate_onecell() - Xlate function using a single index.
2215 * @genpdspec: OF phandle args to map into a PM domain
2216 * @data: xlate function private data - pointer to struct genpd_onecell_data
2217 *
2218 * This is a generic xlate function that can be used to model simple PM domain
2219 * controllers that have one device tree node and provide multiple PM domains.
2220 * A single cell is used as an index into an array of PM domains specified in
2221 * the genpd_onecell_data struct when registering the provider.
2222 */
2223static struct generic_pm_domain *genpd_xlate_onecell(
2224					struct of_phandle_args *genpdspec,
2225					void *data)
2226{
2227	struct genpd_onecell_data *genpd_data = data;
2228	unsigned int idx = genpdspec->args[0];
2229
2230	if (genpdspec->args_count != 1)
2231		return ERR_PTR(-EINVAL);
2232
2233	if (idx >= genpd_data->num_domains) {
2234		pr_err("%s: invalid domain index %u\n", __func__, idx);
2235		return ERR_PTR(-EINVAL);
2236	}
2237
2238	if (!genpd_data->domains[idx])
2239		return ERR_PTR(-ENOENT);
2240
2241	return genpd_data->domains[idx];
2242}
2243
2244/**
2245 * genpd_add_provider() - Register a PM domain provider for a node
2246 * @np: Device node pointer associated with the PM domain provider.
2247 * @xlate: Callback for decoding PM domain from phandle arguments.
2248 * @data: Context pointer for @xlate callback.
2249 */
2250static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
2251			      void *data)
2252{
2253	struct of_genpd_provider *cp;
2254
2255	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2256	if (!cp)
2257		return -ENOMEM;
2258
2259	cp->node = of_node_get(np);
2260	cp->data = data;
2261	cp->xlate = xlate;
2262	fwnode_dev_initialized(&np->fwnode, true);
2263
2264	mutex_lock(&of_genpd_mutex);
2265	list_add(&cp->link, &of_genpd_providers);
2266	mutex_unlock(&of_genpd_mutex);
2267	pr_debug("Added domain provider from %pOF\n", np);
2268
2269	return 0;
2270}
2271
2272static bool genpd_present(const struct generic_pm_domain *genpd)
2273{
2274	bool ret = false;
2275	const struct generic_pm_domain *gpd;
2276
2277	mutex_lock(&gpd_list_lock);
2278	list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2279		if (gpd == genpd) {
2280			ret = true;
2281			break;
2282		}
2283	}
2284	mutex_unlock(&gpd_list_lock);
2285
2286	return ret;
2287}
2288
2289/**
2290 * of_genpd_add_provider_simple() - Register a simple PM domain provider
2291 * @np: Device node pointer associated with the PM domain provider.
2292 * @genpd: Pointer to PM domain associated with the PM domain provider.
2293 */
2294int of_genpd_add_provider_simple(struct device_node *np,
2295				 struct generic_pm_domain *genpd)
2296{
2297	int ret;
2298
2299	if (!np || !genpd)
2300		return -EINVAL;
2301
 
 
2302	if (!genpd_present(genpd))
2303		return -EINVAL;
2304
2305	genpd->dev.of_node = np;
2306
2307	/* Parse genpd OPP table */
2308	if (genpd->set_performance_state) {
2309		ret = dev_pm_opp_of_add_table(&genpd->dev);
2310		if (ret)
2311			return dev_err_probe(&genpd->dev, ret, "Failed to add OPP table\n");
 
 
 
2312
2313		/*
2314		 * Save table for faster processing while setting performance
2315		 * state.
2316		 */
2317		genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
2318		WARN_ON(IS_ERR(genpd->opp_table));
2319	}
2320
2321	ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
2322	if (ret) {
2323		if (genpd->set_performance_state) {
2324			dev_pm_opp_put_opp_table(genpd->opp_table);
2325			dev_pm_opp_of_remove_table(&genpd->dev);
2326		}
2327
2328		return ret;
2329	}
2330
2331	genpd->provider = &np->fwnode;
2332	genpd->has_provider = true;
2333
2334	return 0;
 
 
 
2335}
2336EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);
2337
2338/**
2339 * of_genpd_add_provider_onecell() - Register a onecell PM domain provider
2340 * @np: Device node pointer associated with the PM domain provider.
2341 * @data: Pointer to the data associated with the PM domain provider.
2342 */
2343int of_genpd_add_provider_onecell(struct device_node *np,
2344				  struct genpd_onecell_data *data)
2345{
2346	struct generic_pm_domain *genpd;
2347	unsigned int i;
2348	int ret = -EINVAL;
2349
2350	if (!np || !data)
2351		return -EINVAL;
2352
 
 
2353	if (!data->xlate)
2354		data->xlate = genpd_xlate_onecell;
2355
2356	for (i = 0; i < data->num_domains; i++) {
2357		genpd = data->domains[i];
2358
2359		if (!genpd)
2360			continue;
2361		if (!genpd_present(genpd))
2362			goto error;
2363
2364		genpd->dev.of_node = np;
2365
2366		/* Parse genpd OPP table */
2367		if (genpd->set_performance_state) {
2368			ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
2369			if (ret) {
2370				dev_err_probe(&genpd->dev, ret,
2371					      "Failed to add OPP table for index %d\n", i);
2372				goto error;
2373			}
2374
2375			/*
2376			 * Save table for faster processing while setting
2377			 * performance state.
2378			 */
2379			genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
2380			WARN_ON(IS_ERR(genpd->opp_table));
2381		}
2382
2383		genpd->provider = &np->fwnode;
2384		genpd->has_provider = true;
2385	}
2386
2387	ret = genpd_add_provider(np, data->xlate, data);
2388	if (ret < 0)
2389		goto error;
2390
 
 
2391	return 0;
2392
2393error:
2394	while (i--) {
2395		genpd = data->domains[i];
2396
2397		if (!genpd)
2398			continue;
2399
2400		genpd->provider = NULL;
2401		genpd->has_provider = false;
2402
2403		if (genpd->set_performance_state) {
2404			dev_pm_opp_put_opp_table(genpd->opp_table);
2405			dev_pm_opp_of_remove_table(&genpd->dev);
2406		}
2407	}
2408
 
 
2409	return ret;
2410}
2411EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
2412
2413/**
2414 * of_genpd_del_provider() - Remove a previously registered PM domain provider
2415 * @np: Device node pointer associated with the PM domain provider
2416 */
2417void of_genpd_del_provider(struct device_node *np)
2418{
2419	struct of_genpd_provider *cp, *tmp;
2420	struct generic_pm_domain *gpd;
2421
2422	mutex_lock(&gpd_list_lock);
2423	mutex_lock(&of_genpd_mutex);
2424	list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) {
2425		if (cp->node == np) {
2426			/*
2427			 * For each PM domain associated with the
2428			 * provider, set the 'has_provider' to false
2429			 * so that the PM domain can be safely removed.
2430			 */
2431			list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2432				if (gpd->provider == &np->fwnode) {
2433					gpd->has_provider = false;
2434
2435					if (!gpd->set_performance_state)
2436						continue;
2437
2438					dev_pm_opp_put_opp_table(gpd->opp_table);
2439					dev_pm_opp_of_remove_table(&gpd->dev);
2440				}
2441			}
2442
2443			fwnode_dev_initialized(&cp->node->fwnode, false);
2444			list_del(&cp->link);
2445			of_node_put(cp->node);
2446			kfree(cp);
2447			break;
2448		}
2449	}
2450	mutex_unlock(&of_genpd_mutex);
2451	mutex_unlock(&gpd_list_lock);
2452}
2453EXPORT_SYMBOL_GPL(of_genpd_del_provider);
2454
2455/**
2456 * genpd_get_from_provider() - Look-up PM domain
2457 * @genpdspec: OF phandle args to use for look-up
2458 *
2459 * Looks for a PM domain provider under the node specified by @genpdspec and if
2460 * found, uses xlate function of the provider to map phandle args to a PM
2461 * domain.
2462 *
2463 * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
2464 * on failure.
2465 */
2466static struct generic_pm_domain *genpd_get_from_provider(
2467					struct of_phandle_args *genpdspec)
2468{
2469	struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
2470	struct of_genpd_provider *provider;
2471
2472	if (!genpdspec)
2473		return ERR_PTR(-EINVAL);
2474
2475	mutex_lock(&of_genpd_mutex);
2476
2477	/* Check if we have such a provider in our array */
2478	list_for_each_entry(provider, &of_genpd_providers, link) {
2479		if (provider->node == genpdspec->np)
2480			genpd = provider->xlate(genpdspec, provider->data);
2481		if (!IS_ERR(genpd))
2482			break;
2483	}
2484
2485	mutex_unlock(&of_genpd_mutex);
2486
2487	return genpd;
2488}
2489
2490/**
2491 * of_genpd_add_device() - Add a device to an I/O PM domain
2492 * @genpdspec: OF phandle args to use for look-up PM domain
2493 * @dev: Device to be added.
2494 *
2495 * Looks-up an I/O PM domain based upon phandle args provided and adds
2496 * the device to the PM domain. Returns a negative error code on failure.
2497 */
2498int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev)
2499{
2500	struct generic_pm_domain *genpd;
2501	int ret;
2502
2503	mutex_lock(&gpd_list_lock);
2504
2505	genpd = genpd_get_from_provider(genpdspec);
2506	if (IS_ERR(genpd)) {
2507		ret = PTR_ERR(genpd);
2508		goto out;
2509	}
2510
2511	ret = genpd_add_device(genpd, dev, dev);
2512
2513out:
2514	mutex_unlock(&gpd_list_lock);
2515
2516	return ret;
2517}
2518EXPORT_SYMBOL_GPL(of_genpd_add_device);
2519
2520/**
2521 * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
2522 * @parent_spec: OF phandle args to use for parent PM domain look-up
2523 * @subdomain_spec: OF phandle args to use for subdomain look-up
2524 *
2525 * Looks-up a parent PM domain and subdomain based upon phandle args
2526 * provided and adds the subdomain to the parent PM domain. Returns a
2527 * negative error code on failure.
2528 */
2529int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
2530			   struct of_phandle_args *subdomain_spec)
2531{
2532	struct generic_pm_domain *parent, *subdomain;
2533	int ret;
2534
2535	mutex_lock(&gpd_list_lock);
2536
2537	parent = genpd_get_from_provider(parent_spec);
2538	if (IS_ERR(parent)) {
2539		ret = PTR_ERR(parent);
2540		goto out;
2541	}
2542
2543	subdomain = genpd_get_from_provider(subdomain_spec);
2544	if (IS_ERR(subdomain)) {
2545		ret = PTR_ERR(subdomain);
2546		goto out;
2547	}
2548
2549	ret = genpd_add_subdomain(parent, subdomain);
2550
2551out:
2552	mutex_unlock(&gpd_list_lock);
2553
2554	return ret == -ENOENT ? -EPROBE_DEFER : ret;
2555}
2556EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
2557
2558/**
2559 * of_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
2560 * @parent_spec: OF phandle args to use for parent PM domain look-up
2561 * @subdomain_spec: OF phandle args to use for subdomain look-up
2562 *
2563 * Looks-up a parent PM domain and subdomain based upon phandle args
2564 * provided and removes the subdomain from the parent PM domain. Returns a
2565 * negative error code on failure.
2566 */
2567int of_genpd_remove_subdomain(struct of_phandle_args *parent_spec,
2568			      struct of_phandle_args *subdomain_spec)
2569{
2570	struct generic_pm_domain *parent, *subdomain;
2571	int ret;
2572
2573	mutex_lock(&gpd_list_lock);
2574
2575	parent = genpd_get_from_provider(parent_spec);
2576	if (IS_ERR(parent)) {
2577		ret = PTR_ERR(parent);
2578		goto out;
2579	}
2580
2581	subdomain = genpd_get_from_provider(subdomain_spec);
2582	if (IS_ERR(subdomain)) {
2583		ret = PTR_ERR(subdomain);
2584		goto out;
2585	}
2586
2587	ret = pm_genpd_remove_subdomain(parent, subdomain);
2588
2589out:
2590	mutex_unlock(&gpd_list_lock);
2591
2592	return ret;
2593}
2594EXPORT_SYMBOL_GPL(of_genpd_remove_subdomain);
2595
2596/**
2597 * of_genpd_remove_last - Remove the last PM domain registered for a provider
2598 * @np: Pointer to device node associated with provider
2599 *
2600 * Find the last PM domain that was added by a particular provider and
2601 * remove this PM domain from the list of PM domains. The provider is
2602 * identified by the 'provider' device structure that is passed. The PM
2603 * domain will only be removed, if the provider associated with domain
2604 * has been removed.
2605 *
2606 * Returns a valid pointer to struct generic_pm_domain on success or
2607 * ERR_PTR() on failure.
2608 */
2609struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
2610{
2611	struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT);
2612	int ret;
2613
2614	if (IS_ERR_OR_NULL(np))
2615		return ERR_PTR(-EINVAL);
2616
2617	mutex_lock(&gpd_list_lock);
2618	list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) {
2619		if (gpd->provider == &np->fwnode) {
2620			ret = genpd_remove(gpd);
2621			genpd = ret ? ERR_PTR(ret) : gpd;
2622			break;
2623		}
2624	}
2625	mutex_unlock(&gpd_list_lock);
2626
2627	return genpd;
2628}
2629EXPORT_SYMBOL_GPL(of_genpd_remove_last);
2630
2631static void genpd_release_dev(struct device *dev)
2632{
2633	of_node_put(dev->of_node);
2634	kfree(dev);
2635}
2636
2637static struct bus_type genpd_bus_type = {
2638	.name		= "genpd",
2639};
2640
2641/**
2642 * genpd_dev_pm_detach - Detach a device from its PM domain.
2643 * @dev: Device to detach.
2644 * @power_off: Currently not used
2645 *
2646 * Try to locate a corresponding generic PM domain, which the device was
2647 * attached to previously. If such is found, the device is detached from it.
2648 */
2649static void genpd_dev_pm_detach(struct device *dev, bool power_off)
2650{
2651	struct generic_pm_domain *pd;
2652	unsigned int i;
2653	int ret = 0;
2654
2655	pd = dev_to_genpd(dev);
2656	if (IS_ERR(pd))
2657		return;
2658
2659	dev_dbg(dev, "removing from PM domain %s\n", pd->name);
2660
2661	/* Drop the default performance state */
2662	if (dev_gpd_data(dev)->default_pstate) {
2663		dev_pm_genpd_set_performance_state(dev, 0);
2664		dev_gpd_data(dev)->default_pstate = 0;
2665	}
2666
2667	for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
2668		ret = genpd_remove_device(pd, dev);
2669		if (ret != -EAGAIN)
2670			break;
2671
2672		mdelay(i);
2673		cond_resched();
2674	}
2675
2676	if (ret < 0) {
2677		dev_err(dev, "failed to remove from PM domain %s: %d",
2678			pd->name, ret);
2679		return;
2680	}
2681
2682	/* Check if PM domain can be powered off after removing this device. */
2683	genpd_queue_power_off_work(pd);
2684
2685	/* Unregister the device if it was created by genpd. */
2686	if (dev->bus == &genpd_bus_type)
2687		device_unregister(dev);
2688}
2689
2690static void genpd_dev_pm_sync(struct device *dev)
2691{
2692	struct generic_pm_domain *pd;
2693
2694	pd = dev_to_genpd(dev);
2695	if (IS_ERR(pd))
2696		return;
2697
2698	genpd_queue_power_off_work(pd);
2699}
2700
2701static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
2702				 unsigned int index, bool power_on)
2703{
2704	struct of_phandle_args pd_args;
2705	struct generic_pm_domain *pd;
2706	int pstate;
2707	int ret;
2708
2709	ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
2710				"#power-domain-cells", index, &pd_args);
2711	if (ret < 0)
2712		return ret;
2713
2714	mutex_lock(&gpd_list_lock);
2715	pd = genpd_get_from_provider(&pd_args);
2716	of_node_put(pd_args.np);
2717	if (IS_ERR(pd)) {
2718		mutex_unlock(&gpd_list_lock);
2719		dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
2720			__func__, PTR_ERR(pd));
2721		return driver_deferred_probe_check_state(base_dev);
2722	}
2723
2724	dev_dbg(dev, "adding to PM domain %s\n", pd->name);
2725
2726	ret = genpd_add_device(pd, dev, base_dev);
2727	mutex_unlock(&gpd_list_lock);
2728
2729	if (ret < 0)
2730		return dev_err_probe(dev, ret, "failed to add to PM domain %s\n", pd->name);
 
 
 
 
2731
2732	dev->pm_domain->detach = genpd_dev_pm_detach;
2733	dev->pm_domain->sync = genpd_dev_pm_sync;
2734
2735	/* Set the default performance state */
2736	pstate = of_get_required_opp_performance_state(dev->of_node, index);
2737	if (pstate < 0 && pstate != -ENODEV && pstate != -EOPNOTSUPP) {
2738		ret = pstate;
2739		goto err;
2740	} else if (pstate > 0) {
2741		ret = dev_pm_genpd_set_performance_state(dev, pstate);
2742		if (ret)
2743			goto err;
2744		dev_gpd_data(dev)->default_pstate = pstate;
2745	}
2746
2747	if (power_on) {
2748		genpd_lock(pd);
2749		ret = genpd_power_on(pd, 0);
2750		genpd_unlock(pd);
2751	}
2752
2753	if (ret) {
2754		/* Drop the default performance state */
2755		if (dev_gpd_data(dev)->default_pstate) {
2756			dev_pm_genpd_set_performance_state(dev, 0);
2757			dev_gpd_data(dev)->default_pstate = 0;
2758		}
2759
2760		genpd_remove_device(pd, dev);
2761		return -EPROBE_DEFER;
2762	}
2763
2764	return 1;
2765
2766err:
2767	dev_err(dev, "failed to set required performance state for power-domain %s: %d\n",
2768		pd->name, ret);
2769	genpd_remove_device(pd, dev);
2770	return ret;
2771}
2772
2773/**
2774 * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
2775 * @dev: Device to attach.
2776 *
2777 * Parse device's OF node to find a PM domain specifier. If such is found,
2778 * attaches the device to retrieved pm_domain ops.
2779 *
2780 * Returns 1 on successfully attached PM domain, 0 when the device don't need a
2781 * PM domain or when multiple power-domains exists for it, else a negative error
2782 * code. Note that if a power-domain exists for the device, but it cannot be
2783 * found or turned on, then return -EPROBE_DEFER to ensure that the device is
2784 * not probed and to re-try again later.
2785 */
2786int genpd_dev_pm_attach(struct device *dev)
2787{
2788	if (!dev->of_node)
2789		return 0;
2790
2791	/*
2792	 * Devices with multiple PM domains must be attached separately, as we
2793	 * can only attach one PM domain per device.
2794	 */
2795	if (of_count_phandle_with_args(dev->of_node, "power-domains",
2796				       "#power-domain-cells") != 1)
2797		return 0;
2798
2799	return __genpd_dev_pm_attach(dev, dev, 0, true);
2800}
2801EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
2802
2803/**
2804 * genpd_dev_pm_attach_by_id - Associate a device with one of its PM domains.
2805 * @dev: The device used to lookup the PM domain.
2806 * @index: The index of the PM domain.
2807 *
2808 * Parse device's OF node to find a PM domain specifier at the provided @index.
2809 * If such is found, creates a virtual device and attaches it to the retrieved
2810 * pm_domain ops. To deal with detaching of the virtual device, the ->detach()
2811 * callback in the struct dev_pm_domain are assigned to genpd_dev_pm_detach().
2812 *
2813 * Returns the created virtual device if successfully attached PM domain, NULL
2814 * when the device don't need a PM domain, else an ERR_PTR() in case of
2815 * failures. If a power-domain exists for the device, but cannot be found or
2816 * turned on, then ERR_PTR(-EPROBE_DEFER) is returned to ensure that the device
2817 * is not probed and to re-try again later.
2818 */
2819struct device *genpd_dev_pm_attach_by_id(struct device *dev,
2820					 unsigned int index)
2821{
2822	struct device *virt_dev;
2823	int num_domains;
2824	int ret;
2825
2826	if (!dev->of_node)
2827		return NULL;
2828
2829	/* Verify that the index is within a valid range. */
2830	num_domains = of_count_phandle_with_args(dev->of_node, "power-domains",
2831						 "#power-domain-cells");
2832	if (index >= num_domains)
2833		return NULL;
2834
2835	/* Allocate and register device on the genpd bus. */
2836	virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL);
2837	if (!virt_dev)
2838		return ERR_PTR(-ENOMEM);
2839
2840	dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev));
2841	virt_dev->bus = &genpd_bus_type;
2842	virt_dev->release = genpd_release_dev;
2843	virt_dev->of_node = of_node_get(dev->of_node);
2844
2845	ret = device_register(virt_dev);
2846	if (ret) {
2847		put_device(virt_dev);
2848		return ERR_PTR(ret);
2849	}
2850
2851	/* Try to attach the device to the PM domain at the specified index. */
2852	ret = __genpd_dev_pm_attach(virt_dev, dev, index, false);
2853	if (ret < 1) {
2854		device_unregister(virt_dev);
2855		return ret ? ERR_PTR(ret) : NULL;
2856	}
2857
2858	pm_runtime_enable(virt_dev);
2859	genpd_queue_power_off_work(dev_to_genpd(virt_dev));
2860
2861	return virt_dev;
2862}
2863EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id);
2864
2865/**
2866 * genpd_dev_pm_attach_by_name - Associate a device with one of its PM domains.
2867 * @dev: The device used to lookup the PM domain.
2868 * @name: The name of the PM domain.
2869 *
2870 * Parse device's OF node to find a PM domain specifier using the
2871 * power-domain-names DT property. For further description see
2872 * genpd_dev_pm_attach_by_id().
2873 */
2874struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name)
2875{
2876	int index;
2877
2878	if (!dev->of_node)
2879		return NULL;
2880
2881	index = of_property_match_string(dev->of_node, "power-domain-names",
2882					 name);
2883	if (index < 0)
2884		return NULL;
2885
2886	return genpd_dev_pm_attach_by_id(dev, index);
2887}
2888
2889static const struct of_device_id idle_state_match[] = {
2890	{ .compatible = "domain-idle-state", },
2891	{ }
2892};
2893
2894static int genpd_parse_state(struct genpd_power_state *genpd_state,
2895				    struct device_node *state_node)
2896{
2897	int err;
2898	u32 residency;
2899	u32 entry_latency, exit_latency;
2900
2901	err = of_property_read_u32(state_node, "entry-latency-us",
2902						&entry_latency);
2903	if (err) {
2904		pr_debug(" * %pOF missing entry-latency-us property\n",
2905			 state_node);
2906		return -EINVAL;
2907	}
2908
2909	err = of_property_read_u32(state_node, "exit-latency-us",
2910						&exit_latency);
2911	if (err) {
2912		pr_debug(" * %pOF missing exit-latency-us property\n",
2913			 state_node);
2914		return -EINVAL;
2915	}
2916
2917	err = of_property_read_u32(state_node, "min-residency-us", &residency);
2918	if (!err)
2919		genpd_state->residency_ns = 1000 * residency;
2920
2921	genpd_state->power_on_latency_ns = 1000 * exit_latency;
2922	genpd_state->power_off_latency_ns = 1000 * entry_latency;
2923	genpd_state->fwnode = &state_node->fwnode;
2924
2925	return 0;
2926}
2927
2928static int genpd_iterate_idle_states(struct device_node *dn,
2929				     struct genpd_power_state *states)
2930{
2931	int ret;
2932	struct of_phandle_iterator it;
2933	struct device_node *np;
2934	int i = 0;
2935
2936	ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
2937	if (ret <= 0)
2938		return ret == -ENOENT ? 0 : ret;
2939
2940	/* Loop over the phandles until all the requested entry is found */
2941	of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) {
2942		np = it.node;
2943		if (!of_match_node(idle_state_match, np))
2944			continue;
2945
2946		if (!of_device_is_available(np))
2947			continue;
2948
2949		if (states) {
2950			ret = genpd_parse_state(&states[i], np);
2951			if (ret) {
2952				pr_err("Parsing idle state node %pOF failed with err %d\n",
2953				       np, ret);
2954				of_node_put(np);
2955				return ret;
2956			}
2957		}
2958		i++;
2959	}
2960
2961	return i;
2962}
2963
2964/**
2965 * of_genpd_parse_idle_states: Return array of idle states for the genpd.
2966 *
2967 * @dn: The genpd device node
2968 * @states: The pointer to which the state array will be saved.
2969 * @n: The count of elements in the array returned from this function.
2970 *
2971 * Returns the device states parsed from the OF node. The memory for the states
2972 * is allocated by this function and is the responsibility of the caller to
2973 * free the memory after use. If any or zero compatible domain idle states is
2974 * found it returns 0 and in case of errors, a negative error code is returned.
2975 */
2976int of_genpd_parse_idle_states(struct device_node *dn,
2977			struct genpd_power_state **states, int *n)
2978{
2979	struct genpd_power_state *st;
2980	int ret;
2981
2982	ret = genpd_iterate_idle_states(dn, NULL);
2983	if (ret < 0)
2984		return ret;
2985
2986	if (!ret) {
2987		*states = NULL;
2988		*n = 0;
2989		return 0;
2990	}
2991
2992	st = kcalloc(ret, sizeof(*st), GFP_KERNEL);
2993	if (!st)
2994		return -ENOMEM;
2995
2996	ret = genpd_iterate_idle_states(dn, st);
2997	if (ret <= 0) {
2998		kfree(st);
2999		return ret < 0 ? ret : -EINVAL;
3000	}
3001
3002	*states = st;
3003	*n = ret;
3004
3005	return 0;
3006}
3007EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
3008
3009/**
3010 * pm_genpd_opp_to_performance_state - Gets performance state of the genpd from its OPP node.
3011 *
3012 * @genpd_dev: Genpd's device for which the performance-state needs to be found.
3013 * @opp: struct dev_pm_opp of the OPP for which we need to find performance
3014 *	state.
3015 *
3016 * Returns performance state encoded in the OPP of the genpd. This calls
3017 * platform specific genpd->opp_to_performance_state() callback to translate
3018 * power domain OPP to performance state.
3019 *
3020 * Returns performance state on success and 0 on failure.
3021 */
3022unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev,
3023					       struct dev_pm_opp *opp)
3024{
3025	struct generic_pm_domain *genpd = NULL;
3026	int state;
3027
3028	genpd = container_of(genpd_dev, struct generic_pm_domain, dev);
3029
3030	if (unlikely(!genpd->opp_to_performance_state))
3031		return 0;
3032
3033	genpd_lock(genpd);
3034	state = genpd->opp_to_performance_state(genpd, opp);
3035	genpd_unlock(genpd);
3036
3037	return state;
3038}
3039EXPORT_SYMBOL_GPL(pm_genpd_opp_to_performance_state);
3040
3041static int __init genpd_bus_init(void)
3042{
3043	return bus_register(&genpd_bus_type);
3044}
3045core_initcall(genpd_bus_init);
3046
3047#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
3048
3049
3050/***        debugfs support        ***/
3051
3052#ifdef CONFIG_DEBUG_FS
 
 
 
 
 
 
 
 
3053/*
3054 * TODO: This function is a slightly modified version of rtpm_status_show
3055 * from sysfs.c, so generalize it.
3056 */
3057static void rtpm_status_str(struct seq_file *s, struct device *dev)
3058{
3059	static const char * const status_lookup[] = {
3060		[RPM_ACTIVE] = "active",
3061		[RPM_RESUMING] = "resuming",
3062		[RPM_SUSPENDED] = "suspended",
3063		[RPM_SUSPENDING] = "suspending"
3064	};
3065	const char *p = "";
3066
3067	if (dev->power.runtime_error)
3068		p = "error";
3069	else if (dev->power.disable_depth)
3070		p = "unsupported";
3071	else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
3072		p = status_lookup[dev->power.runtime_status];
3073	else
3074		WARN_ON(1);
3075
3076	seq_printf(s, "%-25s  ", p);
3077}
3078
3079static void perf_status_str(struct seq_file *s, struct device *dev)
3080{
3081	struct generic_pm_domain_data *gpd_data;
3082
3083	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
3084	seq_put_decimal_ull(s, "", gpd_data->performance_state);
3085}
3086
3087static int genpd_summary_one(struct seq_file *s,
3088			struct generic_pm_domain *genpd)
3089{
3090	static const char * const status_lookup[] = {
3091		[GENPD_STATE_ON] = "on",
3092		[GENPD_STATE_OFF] = "off"
3093	};
3094	struct pm_domain_data *pm_data;
3095	const char *kobj_path;
3096	struct gpd_link *link;
3097	char state[16];
3098	int ret;
3099
3100	ret = genpd_lock_interruptible(genpd);
3101	if (ret)
3102		return -ERESTARTSYS;
3103
3104	if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
3105		goto exit;
3106	if (!genpd_status_on(genpd))
3107		snprintf(state, sizeof(state), "%s-%u",
3108			 status_lookup[genpd->status], genpd->state_idx);
3109	else
3110		snprintf(state, sizeof(state), "%s",
3111			 status_lookup[genpd->status]);
3112	seq_printf(s, "%-30s  %-50s %u", genpd->name, state, genpd->performance_state);
3113
3114	/*
3115	 * Modifications on the list require holding locks on both
3116	 * parent and child, so we are safe.
3117	 * Also genpd->name is immutable.
3118	 */
3119	list_for_each_entry(link, &genpd->parent_links, parent_node) {
3120		if (list_is_first(&link->parent_node, &genpd->parent_links))
3121			seq_printf(s, "\n%48s", " ");
3122		seq_printf(s, "%s", link->child->name);
3123		if (!list_is_last(&link->parent_node, &genpd->parent_links))
3124			seq_puts(s, ", ");
3125	}
3126
3127	list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
3128		kobj_path = kobject_get_path(&pm_data->dev->kobj,
3129				genpd_is_irq_safe(genpd) ?
3130				GFP_ATOMIC : GFP_KERNEL);
3131		if (kobj_path == NULL)
3132			continue;
3133
3134		seq_printf(s, "\n    %-50s  ", kobj_path);
3135		rtpm_status_str(s, pm_data->dev);
3136		perf_status_str(s, pm_data->dev);
3137		kfree(kobj_path);
3138	}
3139
3140	seq_puts(s, "\n");
3141exit:
3142	genpd_unlock(genpd);
3143
3144	return 0;
3145}
3146
3147static int summary_show(struct seq_file *s, void *data)
3148{
3149	struct generic_pm_domain *genpd;
3150	int ret = 0;
3151
3152	seq_puts(s, "domain                          status          children                           performance\n");
3153	seq_puts(s, "    /device                                             runtime status\n");
3154	seq_puts(s, "----------------------------------------------------------------------------------------------\n");
3155
3156	ret = mutex_lock_interruptible(&gpd_list_lock);
3157	if (ret)
3158		return -ERESTARTSYS;
3159
3160	list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
3161		ret = genpd_summary_one(s, genpd);
3162		if (ret)
3163			break;
3164	}
3165	mutex_unlock(&gpd_list_lock);
3166
3167	return ret;
3168}
3169
3170static int status_show(struct seq_file *s, void *data)
3171{
3172	static const char * const status_lookup[] = {
3173		[GENPD_STATE_ON] = "on",
3174		[GENPD_STATE_OFF] = "off"
3175	};
3176
3177	struct generic_pm_domain *genpd = s->private;
3178	int ret = 0;
3179
3180	ret = genpd_lock_interruptible(genpd);
3181	if (ret)
3182		return -ERESTARTSYS;
3183
3184	if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup)))
3185		goto exit;
3186
3187	if (genpd->status == GENPD_STATE_OFF)
3188		seq_printf(s, "%s-%u\n", status_lookup[genpd->status],
3189			genpd->state_idx);
3190	else
3191		seq_printf(s, "%s\n", status_lookup[genpd->status]);
3192exit:
3193	genpd_unlock(genpd);
3194	return ret;
3195}
3196
3197static int sub_domains_show(struct seq_file *s, void *data)
3198{
3199	struct generic_pm_domain *genpd = s->private;
3200	struct gpd_link *link;
3201	int ret = 0;
3202
3203	ret = genpd_lock_interruptible(genpd);
3204	if (ret)
3205		return -ERESTARTSYS;
3206
3207	list_for_each_entry(link, &genpd->parent_links, parent_node)
3208		seq_printf(s, "%s\n", link->child->name);
3209
3210	genpd_unlock(genpd);
3211	return ret;
3212}
3213
3214static int idle_states_show(struct seq_file *s, void *data)
3215{
3216	struct generic_pm_domain *genpd = s->private;
3217	u64 now, delta, idle_time = 0;
3218	unsigned int i;
3219	int ret = 0;
3220
3221	ret = genpd_lock_interruptible(genpd);
3222	if (ret)
3223		return -ERESTARTSYS;
3224
3225	seq_puts(s, "State          Time Spent(ms) Usage          Rejected\n");
3226
3227	for (i = 0; i < genpd->state_count; i++) {
3228		idle_time += genpd->states[i].idle_time;
3229
3230		if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) {
3231			now = ktime_get_mono_fast_ns();
3232			if (now > genpd->accounting_time) {
3233				delta = now - genpd->accounting_time;
3234				idle_time += delta;
3235			}
3236		}
3237
3238		do_div(idle_time, NSEC_PER_MSEC);
3239		seq_printf(s, "S%-13i %-14llu %-14llu %llu\n", i, idle_time,
3240			   genpd->states[i].usage, genpd->states[i].rejected);
 
 
 
 
3241	}
3242
3243	genpd_unlock(genpd);
3244	return ret;
3245}
3246
3247static int active_time_show(struct seq_file *s, void *data)
3248{
3249	struct generic_pm_domain *genpd = s->private;
3250	u64 now, on_time, delta = 0;
3251	int ret = 0;
3252
3253	ret = genpd_lock_interruptible(genpd);
3254	if (ret)
3255		return -ERESTARTSYS;
3256
3257	if (genpd->status == GENPD_STATE_ON) {
3258		now = ktime_get_mono_fast_ns();
3259		if (now > genpd->accounting_time)
3260			delta = now - genpd->accounting_time;
3261	}
3262
3263	on_time = genpd->on_time + delta;
3264	do_div(on_time, NSEC_PER_MSEC);
3265	seq_printf(s, "%llu ms\n", on_time);
3266
3267	genpd_unlock(genpd);
3268	return ret;
3269}
3270
3271static int total_idle_time_show(struct seq_file *s, void *data)
3272{
3273	struct generic_pm_domain *genpd = s->private;
3274	u64 now, delta, total = 0;
3275	unsigned int i;
3276	int ret = 0;
3277
3278	ret = genpd_lock_interruptible(genpd);
3279	if (ret)
3280		return -ERESTARTSYS;
3281
3282	for (i = 0; i < genpd->state_count; i++) {
3283		total += genpd->states[i].idle_time;
3284
3285		if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) {
3286			now = ktime_get_mono_fast_ns();
3287			if (now > genpd->accounting_time) {
3288				delta = now - genpd->accounting_time;
3289				total += delta;
3290			}
3291		}
3292	}
 
3293
3294	do_div(total, NSEC_PER_MSEC);
3295	seq_printf(s, "%llu ms\n", total);
3296
3297	genpd_unlock(genpd);
3298	return ret;
3299}
3300
3301
3302static int devices_show(struct seq_file *s, void *data)
3303{
3304	struct generic_pm_domain *genpd = s->private;
3305	struct pm_domain_data *pm_data;
3306	const char *kobj_path;
3307	int ret = 0;
3308
3309	ret = genpd_lock_interruptible(genpd);
3310	if (ret)
3311		return -ERESTARTSYS;
3312
3313	list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
3314		kobj_path = kobject_get_path(&pm_data->dev->kobj,
3315				genpd_is_irq_safe(genpd) ?
3316				GFP_ATOMIC : GFP_KERNEL);
3317		if (kobj_path == NULL)
3318			continue;
3319
3320		seq_printf(s, "%s\n", kobj_path);
3321		kfree(kobj_path);
3322	}
3323
3324	genpd_unlock(genpd);
3325	return ret;
3326}
3327
3328static int perf_state_show(struct seq_file *s, void *data)
3329{
3330	struct generic_pm_domain *genpd = s->private;
3331
3332	if (genpd_lock_interruptible(genpd))
3333		return -ERESTARTSYS;
3334
3335	seq_printf(s, "%u\n", genpd->performance_state);
3336
3337	genpd_unlock(genpd);
3338	return 0;
3339}
3340
3341DEFINE_SHOW_ATTRIBUTE(summary);
3342DEFINE_SHOW_ATTRIBUTE(status);
3343DEFINE_SHOW_ATTRIBUTE(sub_domains);
3344DEFINE_SHOW_ATTRIBUTE(idle_states);
3345DEFINE_SHOW_ATTRIBUTE(active_time);
3346DEFINE_SHOW_ATTRIBUTE(total_idle_time);
3347DEFINE_SHOW_ATTRIBUTE(devices);
3348DEFINE_SHOW_ATTRIBUTE(perf_state);
3349
3350static void genpd_debug_add(struct generic_pm_domain *genpd)
3351{
3352	struct dentry *d;
3353
3354	if (!genpd_debugfs_dir)
3355		return;
3356
3357	d = debugfs_create_dir(genpd->name, genpd_debugfs_dir);
3358
3359	debugfs_create_file("current_state", 0444,
3360			    d, genpd, &status_fops);
3361	debugfs_create_file("sub_domains", 0444,
3362			    d, genpd, &sub_domains_fops);
3363	debugfs_create_file("idle_states", 0444,
3364			    d, genpd, &idle_states_fops);
3365	debugfs_create_file("active_time", 0444,
3366			    d, genpd, &active_time_fops);
3367	debugfs_create_file("total_idle_time", 0444,
3368			    d, genpd, &total_idle_time_fops);
3369	debugfs_create_file("devices", 0444,
3370			    d, genpd, &devices_fops);
3371	if (genpd->set_performance_state)
3372		debugfs_create_file("perf_state", 0444,
3373				    d, genpd, &perf_state_fops);
3374}
3375
3376static int __init genpd_debug_init(void)
3377{
3378	struct generic_pm_domain *genpd;
3379
3380	genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
3381
3382	debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir,
3383			    NULL, &summary_fops);
3384
3385	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
3386		genpd_debug_add(genpd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3387
3388	return 0;
3389}
3390late_initcall(genpd_debug_init);
3391
3392static void __exit genpd_debug_exit(void)
3393{
3394	debugfs_remove_recursive(genpd_debugfs_dir);
3395}
3396__exitcall(genpd_debug_exit);
3397#endif /* CONFIG_DEBUG_FS */