Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * drivers/base/power/domain.c - Common code related to device power domains.
   4 *
   5 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
 
 
   6 */
   7#define pr_fmt(fmt) "PM: " fmt
   8
   9#include <linux/delay.h>
  10#include <linux/kernel.h>
  11#include <linux/io.h>
  12#include <linux/platform_device.h>
  13#include <linux/pm_opp.h>
  14#include <linux/pm_runtime.h>
  15#include <linux/pm_domain.h>
  16#include <linux/pm_qos.h>
  17#include <linux/pm_clock.h>
  18#include <linux/slab.h>
  19#include <linux/err.h>
  20#include <linux/sched.h>
  21#include <linux/suspend.h>
  22#include <linux/export.h>
  23#include <linux/cpu.h>
  24#include <linux/debugfs.h>
  25
  26#include "power.h"
  27
  28#define GENPD_RETRY_MAX_MS	250		/* Approximate */
  29
  30#define GENPD_DEV_CALLBACK(genpd, type, callback, dev)		\
  31({								\
  32	type (*__routine)(struct device *__d); 			\
  33	type __ret = (type)0;					\
  34								\
  35	__routine = genpd->dev_ops.callback; 			\
  36	if (__routine) {					\
  37		__ret = __routine(dev); 			\
 
 
 
 
  38	}							\
  39	__ret;							\
  40})
  41
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  42static LIST_HEAD(gpd_list);
  43static DEFINE_MUTEX(gpd_list_lock);
  44
  45struct genpd_lock_ops {
  46	void (*lock)(struct generic_pm_domain *genpd);
  47	void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
  48	int (*lock_interruptible)(struct generic_pm_domain *genpd);
  49	void (*unlock)(struct generic_pm_domain *genpd);
  50};
  51
  52static void genpd_lock_mtx(struct generic_pm_domain *genpd)
  53{
  54	mutex_lock(&genpd->mlock);
  55}
  56
  57static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
  58					int depth)
  59{
  60	mutex_lock_nested(&genpd->mlock, depth);
  61}
  62
  63static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
  64{
  65	return mutex_lock_interruptible(&genpd->mlock);
  66}
  67
  68static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
  69{
  70	return mutex_unlock(&genpd->mlock);
  71}
  72
  73static const struct genpd_lock_ops genpd_mtx_ops = {
  74	.lock = genpd_lock_mtx,
  75	.lock_nested = genpd_lock_nested_mtx,
  76	.lock_interruptible = genpd_lock_interruptible_mtx,
  77	.unlock = genpd_unlock_mtx,
  78};
  79
  80static void genpd_lock_spin(struct generic_pm_domain *genpd)
  81	__acquires(&genpd->slock)
  82{
  83	unsigned long flags;
  84
  85	spin_lock_irqsave(&genpd->slock, flags);
  86	genpd->lock_flags = flags;
  87}
  88
  89static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
  90					int depth)
  91	__acquires(&genpd->slock)
  92{
  93	unsigned long flags;
  94
  95	spin_lock_irqsave_nested(&genpd->slock, flags, depth);
  96	genpd->lock_flags = flags;
  97}
  98
  99static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
 100	__acquires(&genpd->slock)
 101{
 102	unsigned long flags;
 103
 104	spin_lock_irqsave(&genpd->slock, flags);
 105	genpd->lock_flags = flags;
 106	return 0;
 107}
 108
 109static void genpd_unlock_spin(struct generic_pm_domain *genpd)
 110	__releases(&genpd->slock)
 111{
 112	spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
 113}
 114
 115static const struct genpd_lock_ops genpd_spin_ops = {
 116	.lock = genpd_lock_spin,
 117	.lock_nested = genpd_lock_nested_spin,
 118	.lock_interruptible = genpd_lock_interruptible_spin,
 119	.unlock = genpd_unlock_spin,
 120};
 121
 122#define genpd_lock(p)			p->lock_ops->lock(p)
 123#define genpd_lock_nested(p, d)		p->lock_ops->lock_nested(p, d)
 124#define genpd_lock_interruptible(p)	p->lock_ops->lock_interruptible(p)
 125#define genpd_unlock(p)			p->lock_ops->unlock(p)
 126
 127#define genpd_status_on(genpd)		(genpd->status == GENPD_STATE_ON)
 128#define genpd_is_irq_safe(genpd)	(genpd->flags & GENPD_FLAG_IRQ_SAFE)
 129#define genpd_is_always_on(genpd)	(genpd->flags & GENPD_FLAG_ALWAYS_ON)
 130#define genpd_is_active_wakeup(genpd)	(genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
 131#define genpd_is_cpu_domain(genpd)	(genpd->flags & GENPD_FLAG_CPU_DOMAIN)
 132#define genpd_is_rpm_always_on(genpd)	(genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON)
 133
 134static inline bool irq_safe_dev_in_sleep_domain(struct device *dev,
 135		const struct generic_pm_domain *genpd)
 136{
 137	bool ret;
 138
 139	ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
 140
 141	/*
 142	 * Warn once if an IRQ safe device is attached to a domain, which
 143	 * callbacks are allowed to sleep. This indicates a suboptimal
 144	 * configuration for PM, but it doesn't matter for an always on domain.
 145	 */
 146	if (genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd))
 147		return ret;
 148
 149	if (ret)
 150		dev_warn_once(dev, "PM domain %s will not be powered off\n",
 151				genpd->name);
 152
 153	return ret;
 154}
 155
 156static int genpd_runtime_suspend(struct device *dev);
 157
 158/*
 159 * Get the generic PM domain for a particular struct device.
 160 * This validates the struct device pointer, the PM domain pointer,
 161 * and checks that the PM domain pointer is a real generic PM domain.
 162 * Any failure results in NULL being returned.
 163 */
 164static struct generic_pm_domain *dev_to_genpd_safe(struct device *dev)
 165{
 166	if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
 167		return NULL;
 168
 169	/* A genpd's always have its ->runtime_suspend() callback assigned. */
 170	if (dev->pm_domain->ops.runtime_suspend == genpd_runtime_suspend)
 171		return pd_to_genpd(dev->pm_domain);
 172
 173	return NULL;
 
 
 
 
 174}
 175
 176/*
 177 * This should only be used where we are certain that the pm_domain
 178 * attached to the device is a genpd domain.
 179 */
 180static struct generic_pm_domain *dev_to_genpd(struct device *dev)
 181{
 182	if (IS_ERR_OR_NULL(dev->pm_domain))
 183		return ERR_PTR(-EINVAL);
 184
 185	return pd_to_genpd(dev->pm_domain);
 186}
 187
 188static int genpd_stop_dev(const struct generic_pm_domain *genpd,
 189			  struct device *dev)
 190{
 191	return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
 
 192}
 193
 194static int genpd_start_dev(const struct generic_pm_domain *genpd,
 195			   struct device *dev)
 196{
 197	return GENPD_DEV_CALLBACK(genpd, int, start, dev);
 
 198}
 199
 200static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
 201{
 202	bool ret = false;
 203
 204	if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
 205		ret = !!atomic_dec_and_test(&genpd->sd_count);
 206
 207	return ret;
 208}
 209
 210static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
 211{
 212	atomic_inc(&genpd->sd_count);
 213	smp_mb__after_atomic();
 214}
 215
 216#ifdef CONFIG_DEBUG_FS
 217static struct dentry *genpd_debugfs_dir;
 218
 219static void genpd_debug_add(struct generic_pm_domain *genpd);
 220
 221static void genpd_debug_remove(struct generic_pm_domain *genpd)
 222{
 223	struct dentry *d;
 224
 225	if (!genpd_debugfs_dir)
 226		return;
 
 
 
 
 
 
 
 
 
 
 
 
 227
 228	d = debugfs_lookup(genpd->name, genpd_debugfs_dir);
 229	debugfs_remove(d);
 
 230}
 231
 232static void genpd_update_accounting(struct generic_pm_domain *genpd)
 233{
 234	u64 delta, now;
 
 235
 236	now = ktime_get_mono_fast_ns();
 237	if (now <= genpd->accounting_time)
 238		return;
 
 
 239
 240	delta = now - genpd->accounting_time;
 
 
 241
 242	/*
 243	 * If genpd->status is active, it means we are just
 244	 * out of off and so update the idle time and vice
 245	 * versa.
 246	 */
 247	if (genpd->status == GENPD_STATE_ON)
 248		genpd->states[genpd->state_idx].idle_time += delta;
 249	else
 250		genpd->on_time += delta;
 251
 252	genpd->accounting_time = now;
 
 
 
 253}
 254#else
 255static inline void genpd_debug_add(struct generic_pm_domain *genpd) {}
 256static inline void genpd_debug_remove(struct generic_pm_domain *genpd) {}
 257static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
 258#endif
 259
 260static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
 261					   unsigned int state)
 
 
 
 
 
 
 
 262{
 263	struct generic_pm_domain_data *pd_data;
 264	struct pm_domain_data *pdd;
 265	struct gpd_link *link;
 
 
 266
 267	/* New requested state is same as Max requested state */
 268	if (state == genpd->performance_state)
 269		return state;
 270
 271	/* New requested state is higher than Max requested state */
 272	if (state > genpd->performance_state)
 273		return state;
 274
 275	/* Traverse all devices within the domain */
 276	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
 277		pd_data = to_gpd_data(pdd);
 278
 279		if (pd_data->performance_state > state)
 280			state = pd_data->performance_state;
 281	}
 282
 283	/*
 284	 * Traverse all sub-domains within the domain. This can be
 285	 * done without any additional locking as the link->performance_state
 286	 * field is protected by the parent genpd->lock, which is already taken.
 287	 *
 288	 * Also note that link->performance_state (subdomain's performance state
 289	 * requirement to parent domain) is different from
 290	 * link->child->performance_state (current performance state requirement
 291	 * of the devices/sub-domains of the subdomain) and so can have a
 292	 * different value.
 293	 *
 294	 * Note that we also take vote from powered-off sub-domains into account
 295	 * as the same is done for devices right now.
 296	 */
 297	list_for_each_entry(link, &genpd->parent_links, parent_node) {
 298		if (link->performance_state > state)
 299			state = link->performance_state;
 300	}
 
 301
 302	return state;
 303}
 304
 305static int genpd_xlate_performance_state(struct generic_pm_domain *genpd,
 306					 struct generic_pm_domain *parent,
 307					 unsigned int pstate)
 308{
 309	if (!parent->set_performance_state)
 310		return pstate;
 311
 312	return dev_pm_opp_xlate_performance_state(genpd->opp_table,
 313						  parent->opp_table,
 314						  pstate);
 315}
 316
 317static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
 318					unsigned int state, int depth)
 319{
 320	struct generic_pm_domain *parent;
 321	struct gpd_link *link;
 322	int parent_state, ret;
 323
 324	if (state == genpd->performance_state)
 
 325		return 0;
 
 326
 327	/* Propagate to parents of genpd */
 328	list_for_each_entry(link, &genpd->child_links, child_node) {
 329		parent = link->parent;
 330
 331		/* Find parent's performance state */
 332		ret = genpd_xlate_performance_state(genpd, parent, state);
 333		if (unlikely(ret < 0))
 334			goto err;
 335
 336		parent_state = ret;
 
 
 
 
 
 
 
 337
 338		genpd_lock_nested(parent, depth + 1);
 339
 340		link->prev_performance_state = link->performance_state;
 341		link->performance_state = parent_state;
 342		parent_state = _genpd_reeval_performance_state(parent,
 343						parent_state);
 344		ret = _genpd_set_performance_state(parent, parent_state, depth + 1);
 345		if (ret)
 346			link->performance_state = link->prev_performance_state;
 347
 348		genpd_unlock(parent);
 349
 350		if (ret)
 
 
 
 
 
 
 
 351			goto err;
 
 352	}
 353
 354	if (genpd->set_performance_state) {
 355		ret = genpd->set_performance_state(genpd, state);
 
 
 
 356		if (ret)
 357			goto err;
 
 
 
 
 
 
 
 
 
 
 
 358	}
 359
 360	genpd->performance_state = state;
 361	return 0;
 362
 363err:
 364	/* Encountered an error, lets rollback */
 365	list_for_each_entry_continue_reverse(link, &genpd->child_links,
 366					     child_node) {
 367		parent = link->parent;
 368
 369		genpd_lock_nested(parent, depth + 1);
 370
 371		parent_state = link->prev_performance_state;
 372		link->performance_state = parent_state;
 373
 374		parent_state = _genpd_reeval_performance_state(parent,
 375						parent_state);
 376		if (_genpd_set_performance_state(parent, parent_state, depth + 1)) {
 377			pr_err("%s: Failed to roll back to %d performance state\n",
 378			       parent->name, parent_state);
 379		}
 380
 381		genpd_unlock(parent);
 382	}
 
 383
 384	return ret;
 385}
 386
 387static int genpd_set_performance_state(struct device *dev, unsigned int state)
 
 
 
 
 388{
 389	struct generic_pm_domain *genpd = dev_to_genpd(dev);
 390	struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
 391	unsigned int prev_state;
 392	int ret;
 393
 394	prev_state = gpd_data->performance_state;
 395	if (prev_state == state)
 396		return 0;
 397
 398	gpd_data->performance_state = state;
 399	state = _genpd_reeval_performance_state(genpd, state);
 400
 401	ret = _genpd_set_performance_state(genpd, state, 0);
 402	if (ret)
 403		gpd_data->performance_state = prev_state;
 404
 405	return ret;
 406}
 407
 408static int genpd_drop_performance_state(struct device *dev)
 
 
 
 
 409{
 410	unsigned int prev_state = dev_gpd_data(dev)->performance_state;
 411
 412	if (!genpd_set_performance_state(dev, 0))
 413		return prev_state;
 
 414
 415	return 0;
 
 
 
 
 
 
 
 416}
 417
 418static void genpd_restore_performance_state(struct device *dev,
 419					    unsigned int state)
 420{
 421	if (state)
 422		genpd_set_performance_state(dev, state);
 423}
 424
 425/**
 426 * dev_pm_genpd_set_performance_state- Set performance state of device's power
 427 * domain.
 428 *
 429 * @dev: Device for which the performance-state needs to be set.
 430 * @state: Target performance state of the device. This can be set as 0 when the
 431 *	   device doesn't have any performance state constraints left (And so
 432 *	   the device wouldn't participate anymore to find the target
 433 *	   performance state of the genpd).
 434 *
 435 * It is assumed that the users guarantee that the genpd wouldn't be detached
 436 * while this routine is getting called.
 437 *
 438 * Returns 0 on success and negative error values on failures.
 439 */
 440int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
 441{
 442	struct generic_pm_domain *genpd;
 443	int ret = 0;
 
 
 444
 445	genpd = dev_to_genpd_safe(dev);
 446	if (!genpd)
 447		return -ENODEV;
 
 
 448
 449	if (WARN_ON(!dev->power.subsys_data ||
 450		     !dev->power.subsys_data->domain_data))
 451		return -EINVAL;
 452
 453	genpd_lock(genpd);
 454	if (pm_runtime_suspended(dev)) {
 455		dev_gpd_data(dev)->rpm_pstate = state;
 456	} else {
 457		ret = genpd_set_performance_state(dev, state);
 458		if (!ret)
 459			dev_gpd_data(dev)->rpm_pstate = 0;
 460	}
 461	genpd_unlock(genpd);
 462
 463	return ret;
 464}
 465EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);
 466
 467/**
 468 * dev_pm_genpd_set_next_wakeup - Notify PM framework of an impending wakeup.
 469 *
 470 * @dev: Device to handle
 471 * @next: impending interrupt/wakeup for the device
 472 *
 473 *
 474 * Allow devices to inform of the next wakeup. It's assumed that the users
 475 * guarantee that the genpd wouldn't be detached while this routine is getting
 476 * called. Additionally, it's also assumed that @dev isn't runtime suspended
 477 * (RPM_SUSPENDED)."
 478 * Although devices are expected to update the next_wakeup after the end of
 479 * their usecase as well, it is possible the devices themselves may not know
 480 * about that, so stale @next will be ignored when powering off the domain.
 481 */
 482void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next)
 483{
 484	struct generic_pm_domain *genpd;
 485	struct gpd_timing_data *td;
 486
 487	genpd = dev_to_genpd_safe(dev);
 488	if (!genpd)
 489		return;
 490
 491	td = to_gpd_data(dev->power.subsys_data->domain_data)->td;
 492	if (td)
 493		td->next_wakeup = next;
 494}
 495EXPORT_SYMBOL_GPL(dev_pm_genpd_set_next_wakeup);
 
 
 
 496
 497/**
 498 * dev_pm_genpd_get_next_hrtimer - Return the next_hrtimer for the genpd
 499 * @dev: A device that is attached to the genpd.
 500 *
 501 * This routine should typically be called for a device, at the point of when a
 502 * GENPD_NOTIFY_PRE_OFF notification has been sent for it.
 503 *
 504 * Returns the aggregated value of the genpd's next hrtimer or KTIME_MAX if no
 505 * valid value have been set.
 506 */
 507ktime_t dev_pm_genpd_get_next_hrtimer(struct device *dev)
 508{
 509	struct generic_pm_domain *genpd;
 510
 511	genpd = dev_to_genpd_safe(dev);
 512	if (!genpd)
 513		return KTIME_MAX;
 
 
 514
 515	if (genpd->gd)
 516		return genpd->gd->next_hrtimer;
 
 
 517
 518	return KTIME_MAX;
 519}
 520EXPORT_SYMBOL_GPL(dev_pm_genpd_get_next_hrtimer);
 521
 522static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
 
 
 
 
 
 
 
 523{
 524	unsigned int state_idx = genpd->state_idx;
 525	ktime_t time_start;
 526	s64 elapsed_ns;
 527	int ret;
 528
 529	/* Notify consumers that we are about to power on. */
 530	ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
 531					     GENPD_NOTIFY_PRE_ON,
 532					     GENPD_NOTIFY_OFF, NULL);
 533	ret = notifier_to_errno(ret);
 534	if (ret)
 535		return ret;
 536
 537	if (!genpd->power_on)
 538		goto out;
 539
 540	timed = timed && genpd->gd && !genpd->states[state_idx].fwnode;
 541	if (!timed) {
 542		ret = genpd->power_on(genpd);
 543		if (ret)
 544			goto err;
 545
 546		goto out;
 547	}
 
 548
 549	time_start = ktime_get();
 550	ret = genpd->power_on(genpd);
 551	if (ret)
 552		goto err;
 553
 554	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
 555	if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
 556		goto out;
 557
 558	genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
 559	genpd->gd->max_off_time_changed = true;
 560	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
 561		 genpd->name, "on", elapsed_ns);
 562
 563out:
 564	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
 565	return 0;
 566err:
 567	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
 568				NULL);
 569	return ret;
 570}
 571
 572static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
 
 
 
 
 
 
 
 573{
 574	unsigned int state_idx = genpd->state_idx;
 575	ktime_t time_start;
 576	s64 elapsed_ns;
 577	int ret;
 578
 579	/* Notify consumers that we are about to power off. */
 580	ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
 581					     GENPD_NOTIFY_PRE_OFF,
 582					     GENPD_NOTIFY_ON, NULL);
 583	ret = notifier_to_errno(ret);
 584	if (ret)
 585		return ret;
 586
 587	if (!genpd->power_off)
 588		goto out;
 589
 590	timed = timed && genpd->gd && !genpd->states[state_idx].fwnode;
 591	if (!timed) {
 592		ret = genpd->power_off(genpd);
 593		if (ret)
 594			goto busy;
 595
 596		goto out;
 597	}
 598
 599	time_start = ktime_get();
 600	ret = genpd->power_off(genpd);
 601	if (ret)
 602		goto busy;
 603
 604	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
 605	if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
 606		goto out;
 607
 608	genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
 609	genpd->gd->max_off_time_changed = true;
 610	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
 611		 genpd->name, "off", elapsed_ns);
 612
 613out:
 614	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
 615				NULL);
 616	return 0;
 617busy:
 618	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
 619	return ret;
 
 620}
 621
 622/**
 623 * genpd_queue_power_off_work - Queue up the execution of genpd_power_off().
 624 * @genpd: PM domain to power off.
 625 *
 626 * Queue up the execution of genpd_power_off() unless it's already been done
 627 * before.
 628 */
 629static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
 630{
 631	queue_work(pm_wq, &genpd->power_off_work);
 632}
 633
 634/**
 635 * genpd_power_off - Remove power from a given PM domain.
 636 * @genpd: PM domain to power down.
 637 * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the
 638 * RPM status of the releated device is in an intermediate state, not yet turned
 639 * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not
 640 * be RPM_SUSPENDED, while it tries to power off the PM domain.
 641 * @depth: nesting count for lockdep.
 642 *
 643 * If all of the @genpd's devices have been suspended and all of its subdomains
 644 * have been powered down, remove power from @genpd.
 
 645 */
 646static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
 647			   unsigned int depth)
 648{
 649	struct pm_domain_data *pdd;
 650	struct gpd_link *link;
 651	unsigned int not_suspended = 0;
 652	int ret;
 653
 
 654	/*
 655	 * Do not try to power off the domain in the following situations:
 656	 * (1) The domain is already in the "power off" state.
 657	 * (2) System suspend is in progress.
 
 
 658	 */
 659	if (!genpd_status_on(genpd) || genpd->prepared_count > 0)
 
 
 660		return 0;
 661
 662	/*
 663	 * Abort power off for the PM domain in the following situations:
 664	 * (1) The domain is configured as always on.
 665	 * (2) When the domain has a subdomain being powered on.
 666	 */
 667	if (genpd_is_always_on(genpd) ||
 668			genpd_is_rpm_always_on(genpd) ||
 669			atomic_read(&genpd->sd_count) > 0)
 670		return -EBUSY;
 671
 672	/*
 673	 * The children must be in their deepest (powered-off) states to allow
 674	 * the parent to be powered off. Note that, there's no need for
 675	 * additional locking, as powering on a child, requires the parent's
 676	 * lock to be acquired first.
 677	 */
 678	list_for_each_entry(link, &genpd->parent_links, parent_node) {
 679		struct generic_pm_domain *child = link->child;
 680		if (child->state_idx < child->state_count - 1)
 681			return -EBUSY;
 682	}
 683
 684	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
 685		/*
 686		 * Do not allow PM domain to be powered off, when an IRQ safe
 687		 * device is part of a non-IRQ safe domain.
 688		 */
 689		if (!pm_runtime_suspended(pdd->dev) ||
 690			irq_safe_dev_in_sleep_domain(pdd->dev, genpd))
 691			not_suspended++;
 692	}
 693
 694	if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
 695		return -EBUSY;
 696
 
 
 
 
 
 
 
 
 
 697	if (genpd->gov && genpd->gov->power_down_ok) {
 698		if (!genpd->gov->power_down_ok(&genpd->domain))
 699			return -EAGAIN;
 700	}
 701
 702	/* Default to shallowest state. */
 703	if (!genpd->gov)
 704		genpd->state_idx = 0;
 705
 706	/* Don't power off, if a child domain is waiting to power on. */
 707	if (atomic_read(&genpd->sd_count) > 0)
 708		return -EBUSY;
 709
 710	ret = _genpd_power_off(genpd, true);
 711	if (ret) {
 712		genpd->states[genpd->state_idx].rejected++;
 713		return ret;
 714	}
 715
 716	genpd->status = GENPD_STATE_OFF;
 717	genpd_update_accounting(genpd);
 718	genpd->states[genpd->state_idx].usage++;
 719
 720	list_for_each_entry(link, &genpd->child_links, child_node) {
 721		genpd_sd_counter_dec(link->parent);
 722		genpd_lock_nested(link->parent, depth + 1);
 723		genpd_power_off(link->parent, false, depth + 1);
 724		genpd_unlock(link->parent);
 725	}
 726
 727	return 0;
 728}
 729
 730/**
 731 * genpd_power_on - Restore power to a given PM domain and its parents.
 732 * @genpd: PM domain to power up.
 733 * @depth: nesting count for lockdep.
 734 *
 735 * Restore power to @genpd and all of its parents so that it is possible to
 736 * resume a device belonging to it.
 737 */
 738static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
 739{
 740	struct gpd_link *link;
 741	int ret = 0;
 742
 743	if (genpd_status_on(genpd))
 744		return 0;
 745
 746	/*
 747	 * The list is guaranteed not to change while the loop below is being
 748	 * executed, unless one of the parents' .power_on() callbacks fiddles
 749	 * with it.
 750	 */
 751	list_for_each_entry(link, &genpd->child_links, child_node) {
 752		struct generic_pm_domain *parent = link->parent;
 753
 754		genpd_sd_counter_inc(parent);
 755
 756		genpd_lock_nested(parent, depth + 1);
 757		ret = genpd_power_on(parent, depth + 1);
 758		genpd_unlock(parent);
 759
 760		if (ret) {
 761			genpd_sd_counter_dec(parent);
 762			goto err;
 763		}
 764	}
 765
 766	ret = _genpd_power_on(genpd, true);
 767	if (ret)
 768		goto err;
 769
 770	genpd->status = GENPD_STATE_ON;
 771	genpd_update_accounting(genpd);
 772
 773	return 0;
 774
 775 err:
 776	list_for_each_entry_continue_reverse(link,
 777					&genpd->child_links,
 778					child_node) {
 779		genpd_sd_counter_dec(link->parent);
 780		genpd_lock_nested(link->parent, depth + 1);
 781		genpd_power_off(link->parent, false, depth + 1);
 782		genpd_unlock(link->parent);
 783	}
 784
 785	return ret;
 786}
 787
 788static int genpd_dev_pm_start(struct device *dev)
 789{
 790	struct generic_pm_domain *genpd = dev_to_genpd(dev);
 791
 792	return genpd_start_dev(genpd, dev);
 793}
 794
 795static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
 796				     unsigned long val, void *ptr)
 797{
 798	struct generic_pm_domain_data *gpd_data;
 799	struct device *dev;
 800
 801	gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
 802	dev = gpd_data->base.dev;
 
 803
 804	for (;;) {
 805		struct generic_pm_domain *genpd = ERR_PTR(-ENODATA);
 806		struct pm_domain_data *pdd;
 807		struct gpd_timing_data *td;
 808
 809		spin_lock_irq(&dev->power.lock);
 810
 811		pdd = dev->power.subsys_data ?
 812				dev->power.subsys_data->domain_data : NULL;
 813		if (pdd) {
 814			td = to_gpd_data(pdd)->td;
 815			if (td) {
 816				td->constraint_changed = true;
 817				genpd = dev_to_genpd(dev);
 818			}
 
 
 
 
 819		}
 820
 821		spin_unlock_irq(&dev->power.lock);
 822
 823		if (!IS_ERR(genpd)) {
 824			genpd_lock(genpd);
 825			genpd->gd->max_off_time_changed = true;
 826			genpd_unlock(genpd);
 
 
 827		}
 
 828
 829		dev = dev->parent;
 830		if (!dev || dev->power.ignore_children)
 831			break;
 
 
 832	}
 833
 834	return NOTIFY_DONE;
 
 
 
 835}
 836
 837/**
 838 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
 839 * @work: Work structure used for scheduling the execution of this function.
 840 */
 841static void genpd_power_off_work_fn(struct work_struct *work)
 842{
 843	struct generic_pm_domain *genpd;
 844
 845	genpd = container_of(work, struct generic_pm_domain, power_off_work);
 846
 847	genpd_lock(genpd);
 848	genpd_power_off(genpd, false, 0);
 849	genpd_unlock(genpd);
 850}
 851
 852/**
 853 * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks
 854 * @dev: Device to handle.
 855 */
 856static int __genpd_runtime_suspend(struct device *dev)
 857{
 858	int (*cb)(struct device *__dev);
 859
 860	if (dev->type && dev->type->pm)
 861		cb = dev->type->pm->runtime_suspend;
 862	else if (dev->class && dev->class->pm)
 863		cb = dev->class->pm->runtime_suspend;
 864	else if (dev->bus && dev->bus->pm)
 865		cb = dev->bus->pm->runtime_suspend;
 866	else
 867		cb = NULL;
 868
 869	if (!cb && dev->driver && dev->driver->pm)
 870		cb = dev->driver->pm->runtime_suspend;
 871
 872	return cb ? cb(dev) : 0;
 873}
 874
 875/**
 876 * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks
 877 * @dev: Device to handle.
 878 */
 879static int __genpd_runtime_resume(struct device *dev)
 880{
 881	int (*cb)(struct device *__dev);
 882
 883	if (dev->type && dev->type->pm)
 884		cb = dev->type->pm->runtime_resume;
 885	else if (dev->class && dev->class->pm)
 886		cb = dev->class->pm->runtime_resume;
 887	else if (dev->bus && dev->bus->pm)
 888		cb = dev->bus->pm->runtime_resume;
 889	else
 890		cb = NULL;
 891
 892	if (!cb && dev->driver && dev->driver->pm)
 893		cb = dev->driver->pm->runtime_resume;
 894
 895	return cb ? cb(dev) : 0;
 896}
 897
 898/**
 899 * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
 900 * @dev: Device to suspend.
 901 *
 902 * Carry out a runtime suspend of a device under the assumption that its
 903 * pm_domain field points to the domain member of an object of type
 904 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
 905 */
 906static int genpd_runtime_suspend(struct device *dev)
 907{
 908	struct generic_pm_domain *genpd;
 909	bool (*suspend_ok)(struct device *__dev);
 910	struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
 911	struct gpd_timing_data *td = gpd_data->td;
 912	bool runtime_pm = pm_runtime_enabled(dev);
 913	ktime_t time_start = 0;
 914	s64 elapsed_ns;
 915	int ret;
 916
 917	dev_dbg(dev, "%s()\n", __func__);
 918
 919	genpd = dev_to_genpd(dev);
 920	if (IS_ERR(genpd))
 921		return -EINVAL;
 922
 923	/*
 924	 * A runtime PM centric subsystem/driver may re-use the runtime PM
 925	 * callbacks for other purposes than runtime PM. In those scenarios
 926	 * runtime PM is disabled. Under these circumstances, we shall skip
 927	 * validating/measuring the PM QoS latency.
 928	 */
 929	suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
 930	if (runtime_pm && suspend_ok && !suspend_ok(dev))
 931		return -EBUSY;
 932
 933	/* Measure suspend latency. */
 934	if (td && runtime_pm)
 935		time_start = ktime_get();
 936
 937	ret = __genpd_runtime_suspend(dev);
 938	if (ret)
 939		return ret;
 940
 941	ret = genpd_stop_dev(genpd, dev);
 942	if (ret) {
 943		__genpd_runtime_resume(dev);
 944		return ret;
 945	}
 946
 947	/* Update suspend latency value if the measured time exceeds it. */
 948	if (td && runtime_pm) {
 949		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
 950		if (elapsed_ns > td->suspend_latency_ns) {
 951			td->suspend_latency_ns = elapsed_ns;
 952			dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
 953				elapsed_ns);
 954			genpd->gd->max_off_time_changed = true;
 955			td->constraint_changed = true;
 956		}
 957	}
 958
 959	/*
 960	 * If power.irq_safe is set, this routine may be run with
 961	 * IRQs disabled, so suspend only if the PM domain also is irq_safe.
 962	 */
 963	if (irq_safe_dev_in_sleep_domain(dev, genpd))
 964		return 0;
 965
 966	genpd_lock(genpd);
 967	genpd_power_off(genpd, true, 0);
 968	gpd_data->rpm_pstate = genpd_drop_performance_state(dev);
 969	genpd_unlock(genpd);
 
 970
 971	return 0;
 972}
 973
 974/**
 975 * genpd_runtime_resume - Resume a device belonging to I/O PM domain.
 976 * @dev: Device to resume.
 977 *
 978 * Carry out a runtime resume of a device under the assumption that its
 979 * pm_domain field points to the domain member of an object of type
 980 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
 981 */
 982static int genpd_runtime_resume(struct device *dev)
 983{
 984	struct generic_pm_domain *genpd;
 985	struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
 986	struct gpd_timing_data *td = gpd_data->td;
 987	bool timed = td && pm_runtime_enabled(dev);
 988	ktime_t time_start = 0;
 989	s64 elapsed_ns;
 990	int ret;
 991
 992	dev_dbg(dev, "%s()\n", __func__);
 993
 994	genpd = dev_to_genpd(dev);
 995	if (IS_ERR(genpd))
 996		return -EINVAL;
 997
 998	/*
 999	 * As we don't power off a non IRQ safe domain, which holds
1000	 * an IRQ safe device, we don't need to restore power to it.
1001	 */
1002	if (irq_safe_dev_in_sleep_domain(dev, genpd))
1003		goto out;
1004
1005	genpd_lock(genpd);
1006	genpd_restore_performance_state(dev, gpd_data->rpm_pstate);
1007	ret = genpd_power_on(genpd, 0);
1008	genpd_unlock(genpd);
1009
1010	if (ret)
 
 
 
1011		return ret;
1012
1013 out:
1014	/* Measure resume latency. */
1015	if (timed)
1016		time_start = ktime_get();
1017
1018	ret = genpd_start_dev(genpd, dev);
1019	if (ret)
1020		goto err_poweroff;
1021
1022	ret = __genpd_runtime_resume(dev);
1023	if (ret)
1024		goto err_stop;
1025
1026	/* Update resume latency value if the measured time exceeds it. */
1027	if (timed) {
1028		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
1029		if (elapsed_ns > td->resume_latency_ns) {
1030			td->resume_latency_ns = elapsed_ns;
1031			dev_dbg(dev, "resume latency exceeded, %lld ns\n",
1032				elapsed_ns);
1033			genpd->gd->max_off_time_changed = true;
1034			td->constraint_changed = true;
1035		}
1036	}
 
 
 
 
 
 
 
 
 
 
 
 
 
1037
1038	return 0;
1039
1040err_stop:
1041	genpd_stop_dev(genpd, dev);
1042err_poweroff:
1043	if (!pm_runtime_is_irq_safe(dev) || genpd_is_irq_safe(genpd)) {
1044		genpd_lock(genpd);
1045		genpd_power_off(genpd, true, 0);
1046		gpd_data->rpm_pstate = genpd_drop_performance_state(dev);
1047		genpd_unlock(genpd);
1048	}
 
 
 
 
 
 
1049
1050	return ret;
1051}
1052
1053static bool pd_ignore_unused;
1054static int __init pd_ignore_unused_setup(char *__unused)
1055{
1056	pd_ignore_unused = true;
1057	return 1;
1058}
1059__setup("pd_ignore_unused", pd_ignore_unused_setup);
1060
1061/**
1062 * genpd_power_off_unused - Power off all PM domains with no devices in use.
1063 */
1064static int __init genpd_power_off_unused(void)
1065{
1066	struct generic_pm_domain *genpd;
1067
1068	if (pd_ignore_unused) {
1069		pr_warn("genpd: Not disabling unused power domains\n");
1070		return 0;
1071	}
1072
1073	mutex_lock(&gpd_list_lock);
1074
1075	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
1076		genpd_queue_power_off_work(genpd);
1077
1078	mutex_unlock(&gpd_list_lock);
 
1079
1080	return 0;
 
 
 
 
 
1081}
1082late_initcall(genpd_power_off_unused);
 
 
 
 
 
 
1083
1084#ifdef CONFIG_PM_SLEEP
1085
1086/**
1087 * genpd_sync_power_off - Synchronously power off a PM domain and its parents.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1088 * @genpd: PM domain to power off, if possible.
1089 * @use_lock: use the lock.
1090 * @depth: nesting count for lockdep.
1091 *
1092 * Check if the given PM domain can be powered off (during system suspend or
1093 * hibernation) and do that if so.  Also, in that case propagate to its parents.
1094 *
1095 * This function is only called in "noirq" and "syscore" stages of system power
1096 * transitions. The "noirq" callbacks may be executed asynchronously, thus in
1097 * these cases the lock must be held.
 
1098 */
1099static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
1100				 unsigned int depth)
1101{
1102	struct gpd_link *link;
1103
1104	if (!genpd_status_on(genpd) || genpd_is_always_on(genpd))
1105		return;
1106
1107	if (genpd->suspended_count != genpd->device_count
1108	    || atomic_read(&genpd->sd_count) > 0)
1109		return;
1110
1111	/* Check that the children are in their deepest (powered-off) state. */
1112	list_for_each_entry(link, &genpd->parent_links, parent_node) {
1113		struct generic_pm_domain *child = link->child;
1114		if (child->state_idx < child->state_count - 1)
1115			return;
1116	}
1117
1118	/* Choose the deepest state when suspending */
1119	genpd->state_idx = genpd->state_count - 1;
1120	if (_genpd_power_off(genpd, false))
1121		return;
1122
1123	genpd->status = GENPD_STATE_OFF;
1124
1125	list_for_each_entry(link, &genpd->child_links, child_node) {
1126		genpd_sd_counter_dec(link->parent);
1127
1128		if (use_lock)
1129			genpd_lock_nested(link->parent, depth + 1);
1130
1131		genpd_sync_power_off(link->parent, use_lock, depth + 1);
1132
1133		if (use_lock)
1134			genpd_unlock(link->parent);
1135	}
1136}
1137
1138/**
1139 * genpd_sync_power_on - Synchronously power on a PM domain and its parents.
1140 * @genpd: PM domain to power on.
1141 * @use_lock: use the lock.
1142 * @depth: nesting count for lockdep.
1143 *
1144 * This function is only called in "noirq" and "syscore" stages of system power
1145 * transitions. The "noirq" callbacks may be executed asynchronously, thus in
1146 * these cases the lock must be held.
 
1147 */
1148static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
1149				unsigned int depth)
1150{
1151	struct gpd_link *link;
1152
1153	if (genpd_status_on(genpd))
1154		return;
1155
1156	list_for_each_entry(link, &genpd->child_links, child_node) {
1157		genpd_sd_counter_inc(link->parent);
 
 
1158
1159		if (use_lock)
1160			genpd_lock_nested(link->parent, depth + 1);
1161
1162		genpd_sync_power_on(link->parent, use_lock, depth + 1);
 
1163
1164		if (use_lock)
1165			genpd_unlock(link->parent);
1166	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1167
1168	_genpd_power_on(genpd, false);
1169	genpd->status = GENPD_STATE_ON;
 
 
 
1170}
1171
1172/**
1173 * genpd_prepare - Start power transition of a device in a PM domain.
1174 * @dev: Device to start the transition of.
1175 *
1176 * Start a power transition of a device (during a system-wide power transition)
1177 * under the assumption that its pm_domain field points to the domain member of
1178 * an object of type struct generic_pm_domain representing a PM domain
1179 * consisting of I/O devices.
1180 */
1181static int genpd_prepare(struct device *dev)
1182{
1183	struct generic_pm_domain *genpd;
1184	int ret;
1185
1186	dev_dbg(dev, "%s()\n", __func__);
1187
1188	genpd = dev_to_genpd(dev);
1189	if (IS_ERR(genpd))
1190		return -EINVAL;
1191
1192	genpd_lock(genpd);
 
 
 
 
 
 
 
1193
1194	if (genpd->prepared_count++ == 0)
 
 
 
 
 
 
 
 
 
 
1195		genpd->suspended_count = 0;
 
 
1196
1197	genpd_unlock(genpd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1198
1199	ret = pm_generic_prepare(dev);
1200	if (ret < 0) {
1201		genpd_lock(genpd);
1202
1203		genpd->prepared_count--;
 
1204
1205		genpd_unlock(genpd);
 
1206	}
1207
1208	/* Never return 1, as genpd don't cope with the direct_complete path. */
1209	return ret >= 0 ? 0 : ret;
1210}
1211
1212/**
1213 * genpd_finish_suspend - Completion of suspend or hibernation of device in an
1214 *   I/O pm domain.
1215 * @dev: Device to suspend.
1216 * @suspend_noirq: Generic suspend_noirq callback.
1217 * @resume_noirq: Generic resume_noirq callback.
1218 *
1219 * Stop the device and remove power from the domain if all devices in it have
1220 * been stopped.
 
1221 */
1222static int genpd_finish_suspend(struct device *dev,
1223				int (*suspend_noirq)(struct device *dev),
1224				int (*resume_noirq)(struct device *dev))
1225{
1226	struct generic_pm_domain *genpd;
1227	int ret = 0;
 
1228
1229	genpd = dev_to_genpd(dev);
1230	if (IS_ERR(genpd))
1231		return -EINVAL;
1232
1233	ret = suspend_noirq(dev);
1234	if (ret)
1235		return ret;
1236
1237	if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd))
1238		return 0;
1239
1240	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1241	    !pm_runtime_status_suspended(dev)) {
1242		ret = genpd_stop_dev(genpd, dev);
1243		if (ret) {
1244			resume_noirq(dev);
1245			return ret;
1246		}
1247	}
1248
1249	genpd_lock(genpd);
1250	genpd->suspended_count++;
1251	genpd_sync_power_off(genpd, true, 0);
1252	genpd_unlock(genpd);
1253
1254	return 0;
1255}
1256
1257/**
1258 * genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
1259 * @dev: Device to suspend.
1260 *
1261 * Stop the device and remove power from the domain if all devices in it have
1262 * been stopped.
 
1263 */
1264static int genpd_suspend_noirq(struct device *dev)
1265{
 
 
1266	dev_dbg(dev, "%s()\n", __func__);
1267
1268	return genpd_finish_suspend(dev,
1269				    pm_generic_suspend_noirq,
1270				    pm_generic_resume_noirq);
 
 
1271}
1272
1273/**
1274 * genpd_finish_resume - Completion of resume of device in an I/O PM domain.
1275 * @dev: Device to resume.
1276 * @resume_noirq: Generic resume_noirq callback.
1277 *
1278 * Restore power to the device's PM domain, if necessary, and start the device.
 
1279 */
1280static int genpd_finish_resume(struct device *dev,
1281			       int (*resume_noirq)(struct device *dev))
1282{
1283	struct generic_pm_domain *genpd;
1284	int ret;
1285
1286	dev_dbg(dev, "%s()\n", __func__);
1287
1288	genpd = dev_to_genpd(dev);
1289	if (IS_ERR(genpd))
1290		return -EINVAL;
1291
1292	if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd))
1293		return resume_noirq(dev);
 
1294
1295	genpd_lock(genpd);
1296	genpd_sync_power_on(genpd, true, 0);
1297	genpd->suspended_count--;
1298	genpd_unlock(genpd);
1299
1300	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1301	    !pm_runtime_status_suspended(dev)) {
1302		ret = genpd_start_dev(genpd, dev);
1303		if (ret)
1304			return ret;
1305	}
 
1306
1307	return pm_generic_resume_noirq(dev);
1308}
1309
1310/**
1311 * genpd_resume_noirq - Start of resume of device in an I/O PM domain.
1312 * @dev: Device to resume.
1313 *
1314 * Restore power to the device's PM domain, if necessary, and start the device.
1315 */
1316static int genpd_resume_noirq(struct device *dev)
1317{
 
 
1318	dev_dbg(dev, "%s()\n", __func__);
1319
1320	return genpd_finish_resume(dev, pm_generic_resume_noirq);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1321}
1322
1323/**
1324 * genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
1325 * @dev: Device to freeze.
1326 *
1327 * Carry out a late freeze of a device under the assumption that its
1328 * pm_domain field points to the domain member of an object of type
1329 * struct generic_pm_domain representing a power domain consisting of I/O
1330 * devices.
1331 */
1332static int genpd_freeze_noirq(struct device *dev)
1333{
 
 
1334	dev_dbg(dev, "%s()\n", __func__);
1335
1336	return genpd_finish_suspend(dev,
1337				    pm_generic_freeze_noirq,
1338				    pm_generic_thaw_noirq);
 
 
1339}
1340
1341/**
1342 * genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
1343 * @dev: Device to thaw.
1344 *
1345 * Start the device, unless power has been removed from the domain already
1346 * before the system transition.
 
1347 */
1348static int genpd_thaw_noirq(struct device *dev)
1349{
 
 
1350	dev_dbg(dev, "%s()\n", __func__);
1351
1352	return genpd_finish_resume(dev, pm_generic_thaw_noirq);
 
 
 
 
1353}
1354
1355/**
1356 * genpd_poweroff_noirq - Completion of hibernation of device in an
1357 *   I/O PM domain.
1358 * @dev: Device to poweroff.
1359 *
1360 * Stop the device and remove power from the domain if all devices in it have
1361 * been stopped.
 
1362 */
1363static int genpd_poweroff_noirq(struct device *dev)
1364{
 
 
1365	dev_dbg(dev, "%s()\n", __func__);
1366
1367	return genpd_finish_suspend(dev,
1368				    pm_generic_poweroff_noirq,
1369				    pm_generic_restore_noirq);
 
 
1370}
1371
1372/**
1373 * genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1374 * @dev: Device to resume.
1375 *
1376 * Make sure the domain will be in the same power state as before the
1377 * hibernation the system is resuming from and start the device if necessary.
 
 
1378 */
1379static int genpd_restore_noirq(struct device *dev)
1380{
 
 
1381	dev_dbg(dev, "%s()\n", __func__);
1382
1383	return genpd_finish_resume(dev, pm_generic_restore_noirq);
 
 
 
 
1384}
1385
1386/**
1387 * genpd_complete - Complete power transition of a device in a power domain.
1388 * @dev: Device to complete the transition of.
1389 *
1390 * Complete a power transition of a device (during a system-wide power
1391 * transition) under the assumption that its pm_domain field points to the
1392 * domain member of an object of type struct generic_pm_domain representing
1393 * a power domain consisting of I/O devices.
1394 */
1395static void genpd_complete(struct device *dev)
1396{
1397	struct generic_pm_domain *genpd;
1398
1399	dev_dbg(dev, "%s()\n", __func__);
1400
1401	genpd = dev_to_genpd(dev);
1402	if (IS_ERR(genpd))
1403		return;
1404
1405	pm_generic_complete(dev);
1406
1407	genpd_lock(genpd);
1408
1409	genpd->prepared_count--;
1410	if (!genpd->prepared_count)
1411		genpd_queue_power_off_work(genpd);
1412
1413	genpd_unlock(genpd);
1414}
1415
1416static void genpd_switch_state(struct device *dev, bool suspend)
 
 
 
 
 
 
 
1417{
1418	struct generic_pm_domain *genpd;
1419	bool use_lock;
1420
1421	genpd = dev_to_genpd_safe(dev);
1422	if (!genpd)
1423		return;
1424
1425	use_lock = genpd_is_irq_safe(genpd);
1426
1427	if (use_lock)
1428		genpd_lock(genpd);
1429
1430	if (suspend) {
1431		genpd->suspended_count++;
1432		genpd_sync_power_off(genpd, use_lock, 0);
1433	} else {
1434		genpd_sync_power_on(genpd, use_lock, 0);
1435		genpd->suspended_count--;
1436	}
1437
1438	if (use_lock)
1439		genpd_unlock(genpd);
1440}
1441
1442/**
1443 * dev_pm_genpd_suspend - Synchronously try to suspend the genpd for @dev
1444 * @dev: The device that is attached to the genpd, that can be suspended.
1445 *
1446 * This routine should typically be called for a device that needs to be
1447 * suspended during the syscore suspend phase. It may also be called during
1448 * suspend-to-idle to suspend a corresponding CPU device that is attached to a
1449 * genpd.
1450 */
1451void dev_pm_genpd_suspend(struct device *dev)
1452{
1453	genpd_switch_state(dev, true);
 
 
 
 
 
 
 
 
1454}
1455EXPORT_SYMBOL_GPL(dev_pm_genpd_suspend);
1456
1457/**
1458 * dev_pm_genpd_resume - Synchronously try to resume the genpd for @dev
1459 * @dev: The device that is attached to the genpd, which needs to be resumed.
1460 *
1461 * This routine should typically be called for a device that needs to be resumed
1462 * during the syscore resume phase. It may also be called during suspend-to-idle
1463 * to resume a corresponding CPU device that is attached to a genpd.
1464 */
1465void dev_pm_genpd_resume(struct device *dev)
1466{
1467	genpd_switch_state(dev, false);
1468}
1469EXPORT_SYMBOL_GPL(dev_pm_genpd_resume);
1470
1471#else /* !CONFIG_PM_SLEEP */
1472
1473#define genpd_prepare		NULL
1474#define genpd_suspend_noirq	NULL
1475#define genpd_resume_noirq	NULL
1476#define genpd_freeze_noirq	NULL
1477#define genpd_thaw_noirq	NULL
1478#define genpd_poweroff_noirq	NULL
1479#define genpd_restore_noirq	NULL
1480#define genpd_complete		NULL
1481
1482#endif /* CONFIG_PM_SLEEP */
 
1483
1484static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
1485							   bool has_governor)
 
 
 
 
 
 
1486{
1487	struct generic_pm_domain_data *gpd_data;
1488	struct gpd_timing_data *td;
1489	int ret;
1490
1491	ret = dev_pm_get_subsys_data(dev);
1492	if (ret)
1493		return ERR_PTR(ret);
1494
1495	gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1496	if (!gpd_data) {
1497		ret = -ENOMEM;
1498		goto err_put;
1499	}
1500
1501	gpd_data->base.dev = dev;
1502	gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1503
1504	/* Allocate data used by a governor. */
1505	if (has_governor) {
1506		td = kzalloc(sizeof(*td), GFP_KERNEL);
1507		if (!td) {
1508			ret = -ENOMEM;
1509			goto err_free;
1510		}
1511
1512		td->constraint_changed = true;
1513		td->effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
1514		td->next_wakeup = KTIME_MAX;
1515		gpd_data->td = td;
1516	}
1517
1518	spin_lock_irq(&dev->power.lock);
 
1519
1520	if (dev->power.subsys_data->domain_data)
1521		ret = -EINVAL;
1522	else
1523		dev->power.subsys_data->domain_data = &gpd_data->base;
1524
1525	spin_unlock_irq(&dev->power.lock);
 
1526
1527	if (ret)
1528		goto err_free;
 
 
 
 
 
 
 
 
 
 
 
1529
1530	return gpd_data;
1531
1532 err_free:
1533	kfree(gpd_data->td);
1534	kfree(gpd_data);
1535 err_put:
1536	dev_pm_put_subsys_data(dev);
1537	return ERR_PTR(ret);
1538}
1539
1540static void genpd_free_dev_data(struct device *dev,
1541				struct generic_pm_domain_data *gpd_data)
1542{
1543	spin_lock_irq(&dev->power.lock);
1544
1545	dev->power.subsys_data->domain_data = NULL;
 
 
1546
1547	spin_unlock_irq(&dev->power.lock);
1548
1549	kfree(gpd_data->td);
1550	kfree(gpd_data);
1551	dev_pm_put_subsys_data(dev);
 
 
 
1552}
1553
1554static void genpd_update_cpumask(struct generic_pm_domain *genpd,
1555				 int cpu, bool set, unsigned int depth)
 
 
 
 
 
 
1556{
1557	struct gpd_link *link;
1558
1559	if (!genpd_is_cpu_domain(genpd))
 
1560		return;
1561
1562	list_for_each_entry(link, &genpd->child_links, child_node) {
1563		struct generic_pm_domain *parent = link->parent;
1564
1565		genpd_lock_nested(parent, depth + 1);
1566		genpd_update_cpumask(parent, cpu, set, depth + 1);
1567		genpd_unlock(parent);
1568	}
1569
1570	if (set)
1571		cpumask_set_cpu(cpu, genpd->cpus);
1572	else
1573		cpumask_clear_cpu(cpu, genpd->cpus);
1574}
 
1575
1576static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu)
1577{
1578	if (cpu >= 0)
1579		genpd_update_cpumask(genpd, cpu, true, 0);
1580}
1581
1582static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu)
1583{
1584	if (cpu >= 0)
1585		genpd_update_cpumask(genpd, cpu, false, 0);
1586}
 
 
 
 
 
 
 
 
 
 
 
 
1587
1588static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev)
1589{
1590	int cpu;
1591
1592	if (!genpd_is_cpu_domain(genpd))
1593		return -1;
 
1594
1595	for_each_possible_cpu(cpu) {
1596		if (get_cpu_device(cpu) == dev)
1597			return cpu;
1598	}
 
1599
1600	return -1;
 
 
 
 
1601}
1602
1603static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1604			    struct device *base_dev)
 
 
 
 
 
 
1605{
1606	struct genpd_governor_data *gd = genpd->gd;
1607	struct generic_pm_domain_data *gpd_data;
1608	int ret;
1609
1610	dev_dbg(dev, "%s()\n", __func__);
1611
1612	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1613		return -EINVAL;
1614
1615	gpd_data = genpd_alloc_dev_data(dev, gd);
1616	if (IS_ERR(gpd_data))
1617		return PTR_ERR(gpd_data);
1618
1619	gpd_data->cpu = genpd_get_cpu(genpd, base_dev);
1620
1621	ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1622	if (ret)
1623		goto out;
 
1624
1625	genpd_lock(genpd);
 
 
 
 
1626
1627	genpd_set_cpumask(genpd, gpd_data->cpu);
1628	dev_pm_domain_set(dev, &genpd->domain);
 
1629
1630	genpd->device_count++;
1631	if (gd)
1632		gd->max_off_time_changed = true;
 
1633
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1634	list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
 
 
 
 
1635
1636	genpd_unlock(genpd);
1637 out:
1638	if (ret)
1639		genpd_free_dev_data(dev, gpd_data);
1640	else
1641		dev_pm_qos_add_notifier(dev, &gpd_data->nb,
1642					DEV_PM_QOS_RESUME_LATENCY);
1643
1644	return ret;
1645}
1646
1647/**
1648 * pm_genpd_add_device - Add a device to an I/O PM domain.
1649 * @genpd: PM domain to add the device to.
 
1650 * @dev: Device to be added.
 
1651 */
1652int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
 
1653{
1654	int ret;
 
 
 
 
 
1655
1656	mutex_lock(&gpd_list_lock);
1657	ret = genpd_add_device(genpd, dev, dev);
 
 
 
 
 
1658	mutex_unlock(&gpd_list_lock);
1659
1660	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1661}
1662EXPORT_SYMBOL_GPL(pm_genpd_add_device);
1663
1664static int genpd_remove_device(struct generic_pm_domain *genpd,
1665			       struct device *dev)
 
 
 
 
 
1666{
1667	struct generic_pm_domain_data *gpd_data;
1668	struct pm_domain_data *pdd;
 
1669	int ret = 0;
1670
1671	dev_dbg(dev, "%s()\n", __func__);
1672
1673	pdd = dev->power.subsys_data->domain_data;
1674	gpd_data = to_gpd_data(pdd);
1675	dev_pm_qos_remove_notifier(dev, &gpd_data->nb,
1676				   DEV_PM_QOS_RESUME_LATENCY);
1677
1678	genpd_lock(genpd);
1679
1680	if (genpd->prepared_count > 0) {
1681		ret = -EAGAIN;
1682		goto out;
1683	}
1684
1685	genpd->device_count--;
1686	if (genpd->gd)
1687		genpd->gd->max_off_time_changed = true;
1688
1689	genpd_clear_cpumask(genpd, gpd_data->cpu);
1690	dev_pm_domain_set(dev, NULL);
1691
 
 
1692	list_del_init(&pdd->list_node);
 
 
 
 
 
1693
1694	genpd_unlock(genpd);
 
 
 
 
1695
1696	if (genpd->detach_dev)
1697		genpd->detach_dev(genpd, dev);
1698
1699	genpd_free_dev_data(dev, gpd_data);
 
 
1700
1701	return 0;
1702
1703 out:
1704	genpd_unlock(genpd);
1705	dev_pm_qos_add_notifier(dev, &gpd_data->nb, DEV_PM_QOS_RESUME_LATENCY);
1706
1707	return ret;
1708}
1709
1710/**
1711 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1712 * @dev: Device to be removed.
 
1713 */
1714int pm_genpd_remove_device(struct device *dev)
1715{
1716	struct generic_pm_domain *genpd = dev_to_genpd_safe(dev);
1717
1718	if (!genpd)
1719		return -EINVAL;
1720
1721	return genpd_remove_device(genpd, dev);
1722}
1723EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
1724
1725/**
1726 * dev_pm_genpd_add_notifier - Add a genpd power on/off notifier for @dev
1727 *
1728 * @dev: Device that should be associated with the notifier
1729 * @nb: The notifier block to register
1730 *
1731 * Users may call this function to add a genpd power on/off notifier for an
1732 * attached @dev. Only one notifier per device is allowed. The notifier is
1733 * sent when genpd is powering on/off the PM domain.
1734 *
1735 * It is assumed that the user guarantee that the genpd wouldn't be detached
1736 * while this routine is getting called.
1737 *
1738 * Returns 0 on success and negative error values on failures.
1739 */
1740int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb)
1741{
1742	struct generic_pm_domain *genpd;
1743	struct generic_pm_domain_data *gpd_data;
1744	int ret;
1745
1746	genpd = dev_to_genpd_safe(dev);
1747	if (!genpd)
1748		return -ENODEV;
1749
1750	if (WARN_ON(!dev->power.subsys_data ||
1751		     !dev->power.subsys_data->domain_data))
1752		return -EINVAL;
1753
1754	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1755	if (gpd_data->power_nb)
1756		return -EEXIST;
1757
1758	genpd_lock(genpd);
1759	ret = raw_notifier_chain_register(&genpd->power_notifiers, nb);
1760	genpd_unlock(genpd);
1761
1762	if (ret) {
1763		dev_warn(dev, "failed to add notifier for PM domain %s\n",
1764			 genpd->name);
1765		return ret;
1766	}
1767
1768	gpd_data->power_nb = nb;
1769	return 0;
1770}
1771EXPORT_SYMBOL_GPL(dev_pm_genpd_add_notifier);
1772
1773/**
1774 * dev_pm_genpd_remove_notifier - Remove a genpd power on/off notifier for @dev
1775 *
1776 * @dev: Device that is associated with the notifier
1777 *
1778 * Users may call this function to remove a genpd power on/off notifier for an
1779 * attached @dev.
1780 *
1781 * It is assumed that the user guarantee that the genpd wouldn't be detached
1782 * while this routine is getting called.
1783 *
1784 * Returns 0 on success and negative error values on failures.
1785 */
1786int dev_pm_genpd_remove_notifier(struct device *dev)
1787{
1788	struct generic_pm_domain *genpd;
1789	struct generic_pm_domain_data *gpd_data;
1790	int ret;
1791
1792	genpd = dev_to_genpd_safe(dev);
1793	if (!genpd)
1794		return -ENODEV;
1795
1796	if (WARN_ON(!dev->power.subsys_data ||
1797		     !dev->power.subsys_data->domain_data))
1798		return -EINVAL;
1799
1800	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1801	if (!gpd_data->power_nb)
1802		return -ENODEV;
1803
1804	genpd_lock(genpd);
1805	ret = raw_notifier_chain_unregister(&genpd->power_notifiers,
1806					    gpd_data->power_nb);
1807	genpd_unlock(genpd);
1808
1809	if (ret) {
1810		dev_warn(dev, "failed to remove notifier for PM domain %s\n",
1811			 genpd->name);
1812		return ret;
1813	}
1814
1815	gpd_data->power_nb = NULL;
1816	return 0;
1817}
1818EXPORT_SYMBOL_GPL(dev_pm_genpd_remove_notifier);
1819
1820static int genpd_add_subdomain(struct generic_pm_domain *genpd,
1821			       struct generic_pm_domain *subdomain)
1822{
1823	struct gpd_link *link, *itr;
1824	int ret = 0;
1825
1826	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1827	    || genpd == subdomain)
1828		return -EINVAL;
1829
1830	/*
1831	 * If the domain can be powered on/off in an IRQ safe
1832	 * context, ensure that the subdomain can also be
1833	 * powered on/off in that context.
1834	 */
1835	if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
1836		WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
1837				genpd->name, subdomain->name);
1838		return -EINVAL;
1839	}
1840
1841	link = kzalloc(sizeof(*link), GFP_KERNEL);
1842	if (!link)
1843		return -ENOMEM;
1844
1845	genpd_lock(subdomain);
1846	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1847
1848	if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) {
1849		ret = -EINVAL;
1850		goto out;
1851	}
1852
1853	list_for_each_entry(itr, &genpd->parent_links, parent_node) {
1854		if (itr->child == subdomain && itr->parent == genpd) {
1855			ret = -EINVAL;
1856			goto out;
1857		}
1858	}
1859
1860	link->parent = genpd;
1861	list_add_tail(&link->parent_node, &genpd->parent_links);
1862	link->child = subdomain;
1863	list_add_tail(&link->child_node, &subdomain->child_links);
1864	if (genpd_status_on(subdomain))
 
 
 
 
 
1865		genpd_sd_counter_inc(genpd);
1866
1867 out:
1868	genpd_unlock(genpd);
1869	genpd_unlock(subdomain);
1870	if (ret)
1871		kfree(link);
1872	return ret;
1873}
1874
1875/**
1876 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1877 * @genpd: Leader PM domain to add the subdomain to.
1878 * @subdomain: Subdomain to be added.
1879 */
1880int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1881			   struct generic_pm_domain *subdomain)
1882{
1883	int ret;
 
 
 
1884
1885	mutex_lock(&gpd_list_lock);
1886	ret = genpd_add_subdomain(genpd, subdomain);
 
 
 
 
 
 
 
 
 
1887	mutex_unlock(&gpd_list_lock);
1888
1889	return ret;
1890}
1891EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
1892
1893/**
1894 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1895 * @genpd: Leader PM domain to remove the subdomain from.
1896 * @subdomain: Subdomain to be removed.
1897 */
1898int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1899			      struct generic_pm_domain *subdomain)
1900{
1901	struct gpd_link *l, *link;
1902	int ret = -EINVAL;
1903
1904	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1905		return -EINVAL;
1906
1907	genpd_lock(subdomain);
1908	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1909
1910	if (!list_empty(&subdomain->parent_links) || subdomain->device_count) {
1911		pr_warn("%s: unable to remove subdomain %s\n",
1912			genpd->name, subdomain->name);
1913		ret = -EBUSY;
1914		goto out;
1915	}
1916
1917	list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) {
1918		if (link->child != subdomain)
1919			continue;
1920
1921		list_del(&link->parent_node);
1922		list_del(&link->child_node);
 
 
 
 
 
 
 
 
 
1923		kfree(link);
1924		if (genpd_status_on(subdomain))
1925			genpd_sd_counter_dec(genpd);
1926
 
 
1927		ret = 0;
1928		break;
1929	}
1930
1931out:
1932	genpd_unlock(genpd);
1933	genpd_unlock(subdomain);
1934
1935	return ret;
1936}
1937EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
1938
1939static void genpd_free_default_power_state(struct genpd_power_state *states,
1940					   unsigned int state_count)
1941{
1942	kfree(states);
1943}
1944
1945static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
1946{
1947	struct genpd_power_state *state;
1948
1949	state = kzalloc(sizeof(*state), GFP_KERNEL);
1950	if (!state)
1951		return -ENOMEM;
1952
1953	genpd->states = state;
1954	genpd->state_count = 1;
1955	genpd->free_states = genpd_free_default_power_state;
1956
1957	return 0;
1958}
1959
1960static int genpd_alloc_data(struct generic_pm_domain *genpd)
1961{
1962	struct genpd_governor_data *gd = NULL;
1963	int ret;
1964
1965	if (genpd_is_cpu_domain(genpd) &&
1966	    !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL))
1967		return -ENOMEM;
1968
1969	if (genpd->gov) {
1970		gd = kzalloc(sizeof(*gd), GFP_KERNEL);
1971		if (!gd) {
1972			ret = -ENOMEM;
1973			goto free;
1974		}
1975
1976		gd->max_off_time_ns = -1;
1977		gd->max_off_time_changed = true;
1978		gd->next_wakeup = KTIME_MAX;
1979		gd->next_hrtimer = KTIME_MAX;
1980	}
1981
1982	/* Use only one "off" state if there were no states declared */
1983	if (genpd->state_count == 0) {
1984		ret = genpd_set_default_power_state(genpd);
1985		if (ret)
1986			goto free;
1987	}
1988
1989	genpd->gd = gd;
1990	return 0;
1991
1992free:
1993	if (genpd_is_cpu_domain(genpd))
1994		free_cpumask_var(genpd->cpus);
1995	kfree(gd);
1996	return ret;
1997}
1998
1999static void genpd_free_data(struct generic_pm_domain *genpd)
2000{
2001	if (genpd_is_cpu_domain(genpd))
2002		free_cpumask_var(genpd->cpus);
2003	if (genpd->free_states)
2004		genpd->free_states(genpd->states, genpd->state_count);
2005	kfree(genpd->gd);
2006}
2007
2008static void genpd_lock_init(struct generic_pm_domain *genpd)
2009{
2010	if (genpd->flags & GENPD_FLAG_IRQ_SAFE) {
2011		spin_lock_init(&genpd->slock);
2012		genpd->lock_ops = &genpd_spin_ops;
2013	} else {
2014		mutex_init(&genpd->mlock);
2015		genpd->lock_ops = &genpd_mtx_ops;
2016	}
2017}
2018
2019/**
2020 * pm_genpd_init - Initialize a generic I/O PM domain object.
2021 * @genpd: PM domain object to initialize.
2022 * @gov: PM domain governor to associate with the domain (may be NULL).
2023 * @is_off: Initial value of the domain's power_is_off field.
2024 *
2025 * Returns 0 on successful initialization, else a negative error code.
 
2026 */
2027int pm_genpd_init(struct generic_pm_domain *genpd,
2028		  struct dev_power_governor *gov, bool is_off)
2029{
2030	int ret;
 
2031
2032	if (IS_ERR_OR_NULL(genpd))
2033		return -EINVAL;
2034
2035	INIT_LIST_HEAD(&genpd->parent_links);
2036	INIT_LIST_HEAD(&genpd->child_links);
2037	INIT_LIST_HEAD(&genpd->dev_list);
2038	RAW_INIT_NOTIFIER_HEAD(&genpd->power_notifiers);
2039	genpd_lock_init(genpd);
2040	genpd->gov = gov;
2041	INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
2042	atomic_set(&genpd->sd_count, 0);
2043	genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON;
2044	genpd->device_count = 0;
2045	genpd->provider = NULL;
2046	genpd->has_provider = false;
2047	genpd->accounting_time = ktime_get_mono_fast_ns();
2048	genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
2049	genpd->domain.ops.runtime_resume = genpd_runtime_resume;
2050	genpd->domain.ops.prepare = genpd_prepare;
2051	genpd->domain.ops.suspend_noirq = genpd_suspend_noirq;
2052	genpd->domain.ops.resume_noirq = genpd_resume_noirq;
2053	genpd->domain.ops.freeze_noirq = genpd_freeze_noirq;
2054	genpd->domain.ops.thaw_noirq = genpd_thaw_noirq;
2055	genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq;
2056	genpd->domain.ops.restore_noirq = genpd_restore_noirq;
2057	genpd->domain.ops.complete = genpd_complete;
2058	genpd->domain.start = genpd_dev_pm_start;
2059
2060	if (genpd->flags & GENPD_FLAG_PM_CLK) {
2061		genpd->dev_ops.stop = pm_clk_suspend;
2062		genpd->dev_ops.start = pm_clk_resume;
2063	}
2064
2065	/* The always-on governor works better with the corresponding flag. */
2066	if (gov == &pm_domain_always_on_gov)
2067		genpd->flags |= GENPD_FLAG_RPM_ALWAYS_ON;
2068
2069	/* Always-on domains must be powered on at initialization. */
2070	if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) &&
2071			!genpd_status_on(genpd)) {
2072		pr_err("always-on PM domain %s is not on\n", genpd->name);
2073		return -EINVAL;
2074	}
2075
2076	/* Multiple states but no governor doesn't make sense. */
2077	if (!gov && genpd->state_count > 1)
2078		pr_warn("%s: no governor for states\n", genpd->name);
2079
2080	ret = genpd_alloc_data(genpd);
2081	if (ret)
2082		return ret;
2083
2084	device_initialize(&genpd->dev);
2085	dev_set_name(&genpd->dev, "%s", genpd->name);
2086
2087	mutex_lock(&gpd_list_lock);
2088	list_add(&genpd->gpd_list_node, &gpd_list);
2089	mutex_unlock(&gpd_list_lock);
2090	genpd_debug_add(genpd);
2091
2092	return 0;
2093}
2094EXPORT_SYMBOL_GPL(pm_genpd_init);
2095
2096static int genpd_remove(struct generic_pm_domain *genpd)
2097{
2098	struct gpd_link *l, *link;
2099
2100	if (IS_ERR_OR_NULL(genpd))
2101		return -EINVAL;
2102
2103	genpd_lock(genpd);
2104
2105	if (genpd->has_provider) {
2106		genpd_unlock(genpd);
2107		pr_err("Provider present, unable to remove %s\n", genpd->name);
2108		return -EBUSY;
2109	}
2110
2111	if (!list_empty(&genpd->parent_links) || genpd->device_count) {
2112		genpd_unlock(genpd);
2113		pr_err("%s: unable to remove %s\n", __func__, genpd->name);
2114		return -EBUSY;
2115	}
2116
2117	list_for_each_entry_safe(link, l, &genpd->child_links, child_node) {
2118		list_del(&link->parent_node);
2119		list_del(&link->child_node);
2120		kfree(link);
 
2121	}
 
 
 
 
2122
2123	list_del(&genpd->gpd_list_node);
2124	genpd_unlock(genpd);
2125	genpd_debug_remove(genpd);
2126	cancel_work_sync(&genpd->power_off_work);
2127	genpd_free_data(genpd);
2128
2129	pr_debug("%s: removed %s\n", __func__, genpd->name);
2130
2131	return 0;
2132}
2133
2134/**
2135 * pm_genpd_remove - Remove a generic I/O PM domain
2136 * @genpd: Pointer to PM domain that is to be removed.
2137 *
2138 * To remove the PM domain, this function:
2139 *  - Removes the PM domain as a subdomain to any parent domains,
2140 *    if it was added.
2141 *  - Removes the PM domain from the list of registered PM domains.
2142 *
2143 * The PM domain will only be removed, if the associated provider has
2144 * been removed, it is not a parent to any other PM domain and has no
2145 * devices associated with it.
2146 */
2147int pm_genpd_remove(struct generic_pm_domain *genpd)
2148{
2149	int ret;
2150
2151	mutex_lock(&gpd_list_lock);
2152	ret = genpd_remove(genpd);
2153	mutex_unlock(&gpd_list_lock);
2154
2155	return ret;
2156}
2157EXPORT_SYMBOL_GPL(pm_genpd_remove);
2158
2159#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
2160
2161/*
2162 * Device Tree based PM domain providers.
2163 *
2164 * The code below implements generic device tree based PM domain providers that
2165 * bind device tree nodes with generic PM domains registered in the system.
2166 *
2167 * Any driver that registers generic PM domains and needs to support binding of
2168 * devices to these domains is supposed to register a PM domain provider, which
2169 * maps a PM domain specifier retrieved from the device tree to a PM domain.
2170 *
2171 * Two simple mapping functions have been provided for convenience:
2172 *  - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
2173 *  - genpd_xlate_onecell() for mapping of multiple PM domains per node by
2174 *    index.
2175 */
2176
2177/**
2178 * struct of_genpd_provider - PM domain provider registration structure
2179 * @link: Entry in global list of PM domain providers
2180 * @node: Pointer to device tree node of PM domain provider
2181 * @xlate: Provider-specific xlate callback mapping a set of specifier cells
2182 *         into a PM domain.
2183 * @data: context pointer to be passed into @xlate callback
2184 */
2185struct of_genpd_provider {
2186	struct list_head link;
2187	struct device_node *node;
2188	genpd_xlate_t xlate;
2189	void *data;
2190};
2191
2192/* List of registered PM domain providers. */
2193static LIST_HEAD(of_genpd_providers);
2194/* Mutex to protect the list above. */
2195static DEFINE_MUTEX(of_genpd_mutex);
2196
2197/**
2198 * genpd_xlate_simple() - Xlate function for direct node-domain mapping
2199 * @genpdspec: OF phandle args to map into a PM domain
2200 * @data: xlate function private data - pointer to struct generic_pm_domain
2201 *
2202 * This is a generic xlate function that can be used to model PM domains that
2203 * have their own device tree nodes. The private data of xlate function needs
2204 * to be a valid pointer to struct generic_pm_domain.
2205 */
2206static struct generic_pm_domain *genpd_xlate_simple(
2207					struct of_phandle_args *genpdspec,
2208					void *data)
2209{
2210	return data;
2211}
2212
2213/**
2214 * genpd_xlate_onecell() - Xlate function using a single index.
2215 * @genpdspec: OF phandle args to map into a PM domain
2216 * @data: xlate function private data - pointer to struct genpd_onecell_data
2217 *
2218 * This is a generic xlate function that can be used to model simple PM domain
2219 * controllers that have one device tree node and provide multiple PM domains.
2220 * A single cell is used as an index into an array of PM domains specified in
2221 * the genpd_onecell_data struct when registering the provider.
2222 */
2223static struct generic_pm_domain *genpd_xlate_onecell(
2224					struct of_phandle_args *genpdspec,
2225					void *data)
2226{
2227	struct genpd_onecell_data *genpd_data = data;
2228	unsigned int idx = genpdspec->args[0];
2229
2230	if (genpdspec->args_count != 1)
2231		return ERR_PTR(-EINVAL);
2232
2233	if (idx >= genpd_data->num_domains) {
2234		pr_err("%s: invalid domain index %u\n", __func__, idx);
2235		return ERR_PTR(-EINVAL);
2236	}
2237
2238	if (!genpd_data->domains[idx])
2239		return ERR_PTR(-ENOENT);
2240
2241	return genpd_data->domains[idx];
2242}
2243
2244/**
2245 * genpd_add_provider() - Register a PM domain provider for a node
2246 * @np: Device node pointer associated with the PM domain provider.
2247 * @xlate: Callback for decoding PM domain from phandle arguments.
2248 * @data: Context pointer for @xlate callback.
2249 */
2250static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
2251			      void *data)
2252{
2253	struct of_genpd_provider *cp;
2254
2255	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2256	if (!cp)
2257		return -ENOMEM;
2258
2259	cp->node = of_node_get(np);
2260	cp->data = data;
2261	cp->xlate = xlate;
2262	fwnode_dev_initialized(&np->fwnode, true);
2263
2264	mutex_lock(&of_genpd_mutex);
2265	list_add(&cp->link, &of_genpd_providers);
2266	mutex_unlock(&of_genpd_mutex);
2267	pr_debug("Added domain provider from %pOF\n", np);
2268
2269	return 0;
2270}
2271
2272static bool genpd_present(const struct generic_pm_domain *genpd)
2273{
2274	bool ret = false;
2275	const struct generic_pm_domain *gpd;
2276
2277	mutex_lock(&gpd_list_lock);
2278	list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2279		if (gpd == genpd) {
2280			ret = true;
2281			break;
 
 
 
 
2282		}
 
 
2283	}
2284	mutex_unlock(&gpd_list_lock);
2285
2286	return ret;
2287}
2288
2289/**
2290 * of_genpd_add_provider_simple() - Register a simple PM domain provider
2291 * @np: Device node pointer associated with the PM domain provider.
2292 * @genpd: Pointer to PM domain associated with the PM domain provider.
2293 */
2294int of_genpd_add_provider_simple(struct device_node *np,
2295				 struct generic_pm_domain *genpd)
2296{
2297	int ret;
2298
2299	if (!np || !genpd)
2300		return -EINVAL;
2301
2302	if (!genpd_present(genpd))
2303		return -EINVAL;
2304
2305	genpd->dev.of_node = np;
2306
2307	/* Parse genpd OPP table */
2308	if (genpd->set_performance_state) {
2309		ret = dev_pm_opp_of_add_table(&genpd->dev);
2310		if (ret)
2311			return dev_err_probe(&genpd->dev, ret, "Failed to add OPP table\n");
2312
2313		/*
2314		 * Save table for faster processing while setting performance
2315		 * state.
2316		 */
2317		genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
2318		WARN_ON(IS_ERR(genpd->opp_table));
2319	}
2320
2321	ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
2322	if (ret) {
2323		if (genpd->set_performance_state) {
2324			dev_pm_opp_put_opp_table(genpd->opp_table);
2325			dev_pm_opp_of_remove_table(&genpd->dev);
2326		}
2327
 
2328		return ret;
2329	}
2330
2331	genpd->provider = &np->fwnode;
2332	genpd->has_provider = true;
 
2333
2334	return 0;
2335}
2336EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);
2337
2338/**
2339 * of_genpd_add_provider_onecell() - Register a onecell PM domain provider
2340 * @np: Device node pointer associated with the PM domain provider.
2341 * @data: Pointer to the data associated with the PM domain provider.
 
 
 
 
2342 */
2343int of_genpd_add_provider_onecell(struct device_node *np,
2344				  struct genpd_onecell_data *data)
2345{
2346	struct generic_pm_domain *genpd;
2347	unsigned int i;
2348	int ret = -EINVAL;
 
2349
2350	if (!np || !data)
2351		return -EINVAL;
2352
2353	if (!data->xlate)
2354		data->xlate = genpd_xlate_onecell;
2355
2356	for (i = 0; i < data->num_domains; i++) {
2357		genpd = data->domains[i];
2358
2359		if (!genpd)
2360			continue;
2361		if (!genpd_present(genpd))
2362			goto error;
2363
2364		genpd->dev.of_node = np;
2365
2366		/* Parse genpd OPP table */
2367		if (genpd->set_performance_state) {
2368			ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
2369			if (ret) {
2370				dev_err_probe(&genpd->dev, ret,
2371					      "Failed to add OPP table for index %d\n", i);
2372				goto error;
2373			}
2374
2375			/*
2376			 * Save table for faster processing while setting
2377			 * performance state.
2378			 */
2379			genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
2380			WARN_ON(IS_ERR(genpd->opp_table));
2381		}
2382
2383		genpd->provider = &np->fwnode;
2384		genpd->has_provider = true;
2385	}
2386
2387	ret = genpd_add_provider(np, data->xlate, data);
2388	if (ret < 0)
2389		goto error;
2390
2391	return 0;
2392
2393error:
2394	while (i--) {
2395		genpd = data->domains[i];
2396
2397		if (!genpd)
2398			continue;
2399
2400		genpd->provider = NULL;
2401		genpd->has_provider = false;
2402
2403		if (genpd->set_performance_state) {
2404			dev_pm_opp_put_opp_table(genpd->opp_table);
2405			dev_pm_opp_of_remove_table(&genpd->dev);
2406		}
2407	}
2408
2409	return ret;
2410}
2411EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
2412
2413/**
2414 * of_genpd_del_provider() - Remove a previously registered PM domain provider
2415 * @np: Device node pointer associated with the PM domain provider
2416 */
2417void of_genpd_del_provider(struct device_node *np)
2418{
2419	struct of_genpd_provider *cp, *tmp;
2420	struct generic_pm_domain *gpd;
2421
2422	mutex_lock(&gpd_list_lock);
2423	mutex_lock(&of_genpd_mutex);
2424	list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) {
2425		if (cp->node == np) {
2426			/*
2427			 * For each PM domain associated with the
2428			 * provider, set the 'has_provider' to false
2429			 * so that the PM domain can be safely removed.
2430			 */
2431			list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2432				if (gpd->provider == &np->fwnode) {
2433					gpd->has_provider = false;
2434
2435					if (!gpd->set_performance_state)
2436						continue;
2437
2438					dev_pm_opp_put_opp_table(gpd->opp_table);
2439					dev_pm_opp_of_remove_table(&gpd->dev);
2440				}
2441			}
2442
2443			fwnode_dev_initialized(&cp->node->fwnode, false);
2444			list_del(&cp->link);
2445			of_node_put(cp->node);
2446			kfree(cp);
2447			break;
2448		}
2449	}
2450	mutex_unlock(&of_genpd_mutex);
2451	mutex_unlock(&gpd_list_lock);
2452}
2453EXPORT_SYMBOL_GPL(of_genpd_del_provider);
2454
2455/**
2456 * genpd_get_from_provider() - Look-up PM domain
2457 * @genpdspec: OF phandle args to use for look-up
2458 *
2459 * Looks for a PM domain provider under the node specified by @genpdspec and if
2460 * found, uses xlate function of the provider to map phandle args to a PM
2461 * domain.
2462 *
2463 * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
2464 * on failure.
2465 */
2466static struct generic_pm_domain *genpd_get_from_provider(
2467					struct of_phandle_args *genpdspec)
2468{
2469	struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
2470	struct of_genpd_provider *provider;
2471
2472	if (!genpdspec)
2473		return ERR_PTR(-EINVAL);
2474
2475	mutex_lock(&of_genpd_mutex);
2476
2477	/* Check if we have such a provider in our array */
2478	list_for_each_entry(provider, &of_genpd_providers, link) {
2479		if (provider->node == genpdspec->np)
2480			genpd = provider->xlate(genpdspec, provider->data);
2481		if (!IS_ERR(genpd))
2482			break;
2483	}
2484
2485	mutex_unlock(&of_genpd_mutex);
2486
2487	return genpd;
2488}
2489
2490/**
2491 * of_genpd_add_device() - Add a device to an I/O PM domain
2492 * @genpdspec: OF phandle args to use for look-up PM domain
2493 * @dev: Device to be added.
2494 *
2495 * Looks-up an I/O PM domain based upon phandle args provided and adds
2496 * the device to the PM domain. Returns a negative error code on failure.
2497 */
2498int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev)
2499{
2500	struct generic_pm_domain *genpd;
2501	int ret;
2502
2503	mutex_lock(&gpd_list_lock);
2504
2505	genpd = genpd_get_from_provider(genpdspec);
2506	if (IS_ERR(genpd)) {
2507		ret = PTR_ERR(genpd);
2508		goto out;
2509	}
 
 
 
 
2510
2511	ret = genpd_add_device(genpd, dev, dev);
 
 
2512
2513out:
2514	mutex_unlock(&gpd_list_lock);
2515
2516	return ret;
 
 
2517}
2518EXPORT_SYMBOL_GPL(of_genpd_add_device);
2519
2520/**
2521 * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
2522 * @parent_spec: OF phandle args to use for parent PM domain look-up
2523 * @subdomain_spec: OF phandle args to use for subdomain look-up
2524 *
2525 * Looks-up a parent PM domain and subdomain based upon phandle args
2526 * provided and adds the subdomain to the parent PM domain. Returns a
2527 * negative error code on failure.
2528 */
2529int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
2530			   struct of_phandle_args *subdomain_spec)
2531{
2532	struct generic_pm_domain *parent, *subdomain;
2533	int ret;
2534
2535	mutex_lock(&gpd_list_lock);
2536
2537	parent = genpd_get_from_provider(parent_spec);
2538	if (IS_ERR(parent)) {
2539		ret = PTR_ERR(parent);
2540		goto out;
2541	}
2542
2543	subdomain = genpd_get_from_provider(subdomain_spec);
2544	if (IS_ERR(subdomain)) {
2545		ret = PTR_ERR(subdomain);
2546		goto out;
2547	}
2548
2549	ret = genpd_add_subdomain(parent, subdomain);
2550
2551out:
2552	mutex_unlock(&gpd_list_lock);
2553
2554	return ret == -ENOENT ? -EPROBE_DEFER : ret;
2555}
2556EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
2557
2558/**
2559 * of_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
2560 * @parent_spec: OF phandle args to use for parent PM domain look-up
2561 * @subdomain_spec: OF phandle args to use for subdomain look-up
2562 *
2563 * Looks-up a parent PM domain and subdomain based upon phandle args
2564 * provided and removes the subdomain from the parent PM domain. Returns a
2565 * negative error code on failure.
2566 */
2567int of_genpd_remove_subdomain(struct of_phandle_args *parent_spec,
2568			      struct of_phandle_args *subdomain_spec)
2569{
2570	struct generic_pm_domain *parent, *subdomain;
2571	int ret;
 
2572
2573	mutex_lock(&gpd_list_lock);
 
2574
2575	parent = genpd_get_from_provider(parent_spec);
2576	if (IS_ERR(parent)) {
2577		ret = PTR_ERR(parent);
 
 
2578		goto out;
2579	}
2580
2581	subdomain = genpd_get_from_provider(subdomain_spec);
2582	if (IS_ERR(subdomain)) {
2583		ret = PTR_ERR(subdomain);
2584		goto out;
2585	}
 
 
 
 
2586
2587	ret = pm_genpd_remove_subdomain(parent, subdomain);
2588
2589out:
2590	mutex_unlock(&gpd_list_lock);
2591
2592	return ret;
2593}
2594EXPORT_SYMBOL_GPL(of_genpd_remove_subdomain);
2595
2596/**
2597 * of_genpd_remove_last - Remove the last PM domain registered for a provider
2598 * @np: Pointer to device node associated with provider
2599 *
2600 * Find the last PM domain that was added by a particular provider and
2601 * remove this PM domain from the list of PM domains. The provider is
2602 * identified by the 'provider' device structure that is passed. The PM
2603 * domain will only be removed, if the provider associated with domain
2604 * has been removed.
2605 *
2606 * Returns a valid pointer to struct generic_pm_domain on success or
2607 * ERR_PTR() on failure.
2608 */
2609struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
2610{
2611	struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT);
2612	int ret;
2613
2614	if (IS_ERR_OR_NULL(np))
2615		return ERR_PTR(-EINVAL);
2616
2617	mutex_lock(&gpd_list_lock);
2618	list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) {
2619		if (gpd->provider == &np->fwnode) {
2620			ret = genpd_remove(gpd);
2621			genpd = ret ? ERR_PTR(ret) : gpd;
2622			break;
2623		}
2624	}
2625	mutex_unlock(&gpd_list_lock);
2626
2627	return genpd;
2628}
2629EXPORT_SYMBOL_GPL(of_genpd_remove_last);
2630
2631static void genpd_release_dev(struct device *dev)
2632{
2633	of_node_put(dev->of_node);
2634	kfree(dev);
2635}
2636
2637static struct bus_type genpd_bus_type = {
2638	.name		= "genpd",
2639};
2640
2641/**
2642 * genpd_dev_pm_detach - Detach a device from its PM domain.
2643 * @dev: Device to detach.
2644 * @power_off: Currently not used
2645 *
2646 * Try to locate a corresponding generic PM domain, which the device was
2647 * attached to previously. If such is found, the device is detached from it.
2648 */
2649static void genpd_dev_pm_detach(struct device *dev, bool power_off)
2650{
2651	struct generic_pm_domain *pd;
2652	unsigned int i;
2653	int ret = 0;
2654
2655	pd = dev_to_genpd(dev);
2656	if (IS_ERR(pd))
2657		return;
2658
2659	dev_dbg(dev, "removing from PM domain %s\n", pd->name);
2660
2661	/* Drop the default performance state */
2662	if (dev_gpd_data(dev)->default_pstate) {
2663		dev_pm_genpd_set_performance_state(dev, 0);
2664		dev_gpd_data(dev)->default_pstate = 0;
2665	}
2666
2667	for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
2668		ret = genpd_remove_device(pd, dev);
2669		if (ret != -EAGAIN)
2670			break;
2671
2672		mdelay(i);
2673		cond_resched();
2674	}
2675
2676	if (ret < 0) {
2677		dev_err(dev, "failed to remove from PM domain %s: %d",
2678			pd->name, ret);
2679		return;
2680	}
2681
2682	/* Check if PM domain can be powered off after removing this device. */
2683	genpd_queue_power_off_work(pd);
2684
2685	/* Unregister the device if it was created by genpd. */
2686	if (dev->bus == &genpd_bus_type)
2687		device_unregister(dev);
2688}
2689
2690static void genpd_dev_pm_sync(struct device *dev)
2691{
2692	struct generic_pm_domain *pd;
 
 
 
 
 
2693
2694	pd = dev_to_genpd(dev);
2695	if (IS_ERR(pd))
2696		return;
2697
2698	genpd_queue_power_off_work(pd);
2699}
2700
2701static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
2702				 unsigned int index, bool power_on)
 
 
 
2703{
2704	struct of_phandle_args pd_args;
2705	struct generic_pm_domain *pd;
2706	int pstate;
2707	int ret;
2708
2709	ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
2710				"#power-domain-cells", index, &pd_args);
2711	if (ret < 0)
2712		return ret;
2713
2714	mutex_lock(&gpd_list_lock);
2715	pd = genpd_get_from_provider(&pd_args);
2716	of_node_put(pd_args.np);
2717	if (IS_ERR(pd)) {
2718		mutex_unlock(&gpd_list_lock);
2719		dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
2720			__func__, PTR_ERR(pd));
2721		return driver_deferred_probe_check_state(base_dev);
2722	}
2723
2724	dev_dbg(dev, "adding to PM domain %s\n", pd->name);
2725
2726	ret = genpd_add_device(pd, dev, base_dev);
2727	mutex_unlock(&gpd_list_lock);
2728
2729	if (ret < 0)
2730		return dev_err_probe(dev, ret, "failed to add to PM domain %s\n", pd->name);
2731
2732	dev->pm_domain->detach = genpd_dev_pm_detach;
2733	dev->pm_domain->sync = genpd_dev_pm_sync;
2734
2735	/* Set the default performance state */
2736	pstate = of_get_required_opp_performance_state(dev->of_node, index);
2737	if (pstate < 0 && pstate != -ENODEV && pstate != -EOPNOTSUPP) {
2738		ret = pstate;
2739		goto err;
2740	} else if (pstate > 0) {
2741		ret = dev_pm_genpd_set_performance_state(dev, pstate);
2742		if (ret)
2743			goto err;
2744		dev_gpd_data(dev)->default_pstate = pstate;
2745	}
2746
2747	if (power_on) {
2748		genpd_lock(pd);
2749		ret = genpd_power_on(pd, 0);
2750		genpd_unlock(pd);
2751	}
2752
2753	if (ret) {
2754		/* Drop the default performance state */
2755		if (dev_gpd_data(dev)->default_pstate) {
2756			dev_pm_genpd_set_performance_state(dev, 0);
2757			dev_gpd_data(dev)->default_pstate = 0;
2758		}
2759
2760		genpd_remove_device(pd, dev);
2761		return -EPROBE_DEFER;
2762	}
 
 
 
 
 
2763
2764	return 1;
 
2765
2766err:
2767	dev_err(dev, "failed to set required performance state for power-domain %s: %d\n",
2768		pd->name, ret);
2769	genpd_remove_device(pd, dev);
2770	return ret;
2771}
2772
 
 
2773/**
2774 * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
2775 * @dev: Device to attach.
2776 *
2777 * Parse device's OF node to find a PM domain specifier. If such is found,
2778 * attaches the device to retrieved pm_domain ops.
2779 *
2780 * Returns 1 on successfully attached PM domain, 0 when the device don't need a
2781 * PM domain or when multiple power-domains exists for it, else a negative error
2782 * code. Note that if a power-domain exists for the device, but it cannot be
2783 * found or turned on, then return -EPROBE_DEFER to ensure that the device is
2784 * not probed and to re-try again later.
2785 */
2786int genpd_dev_pm_attach(struct device *dev)
2787{
2788	if (!dev->of_node)
2789		return 0;
2790
2791	/*
2792	 * Devices with multiple PM domains must be attached separately, as we
2793	 * can only attach one PM domain per device.
2794	 */
2795	if (of_count_phandle_with_args(dev->of_node, "power-domains",
2796				       "#power-domain-cells") != 1)
2797		return 0;
2798
2799	return __genpd_dev_pm_attach(dev, dev, 0, true);
2800}
2801EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
2802
2803/**
2804 * genpd_dev_pm_attach_by_id - Associate a device with one of its PM domains.
2805 * @dev: The device used to lookup the PM domain.
2806 * @index: The index of the PM domain.
2807 *
2808 * Parse device's OF node to find a PM domain specifier at the provided @index.
2809 * If such is found, creates a virtual device and attaches it to the retrieved
2810 * pm_domain ops. To deal with detaching of the virtual device, the ->detach()
2811 * callback in the struct dev_pm_domain are assigned to genpd_dev_pm_detach().
2812 *
2813 * Returns the created virtual device if successfully attached PM domain, NULL
2814 * when the device don't need a PM domain, else an ERR_PTR() in case of
2815 * failures. If a power-domain exists for the device, but cannot be found or
2816 * turned on, then ERR_PTR(-EPROBE_DEFER) is returned to ensure that the device
2817 * is not probed and to re-try again later.
2818 */
2819struct device *genpd_dev_pm_attach_by_id(struct device *dev,
2820					 unsigned int index)
2821{
2822	struct device *virt_dev;
2823	int num_domains;
2824	int ret;
2825
2826	if (!dev->of_node)
2827		return NULL;
2828
2829	/* Verify that the index is within a valid range. */
2830	num_domains = of_count_phandle_with_args(dev->of_node, "power-domains",
2831						 "#power-domain-cells");
2832	if (index >= num_domains)
2833		return NULL;
2834
2835	/* Allocate and register device on the genpd bus. */
2836	virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL);
2837	if (!virt_dev)
2838		return ERR_PTR(-ENOMEM);
2839
2840	dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev));
2841	virt_dev->bus = &genpd_bus_type;
2842	virt_dev->release = genpd_release_dev;
2843	virt_dev->of_node = of_node_get(dev->of_node);
2844
2845	ret = device_register(virt_dev);
2846	if (ret) {
2847		put_device(virt_dev);
2848		return ERR_PTR(ret);
2849	}
2850
2851	/* Try to attach the device to the PM domain at the specified index. */
2852	ret = __genpd_dev_pm_attach(virt_dev, dev, index, false);
2853	if (ret < 1) {
2854		device_unregister(virt_dev);
2855		return ret ? ERR_PTR(ret) : NULL;
2856	}
2857
2858	pm_runtime_enable(virt_dev);
2859	genpd_queue_power_off_work(dev_to_genpd(virt_dev));
2860
2861	return virt_dev;
2862}
2863EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id);
2864
2865/**
2866 * genpd_dev_pm_attach_by_name - Associate a device with one of its PM domains.
2867 * @dev: The device used to lookup the PM domain.
2868 * @name: The name of the PM domain.
2869 *
2870 * Parse device's OF node to find a PM domain specifier using the
2871 * power-domain-names DT property. For further description see
2872 * genpd_dev_pm_attach_by_id().
2873 */
2874struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name)
2875{
2876	int index;
2877
2878	if (!dev->of_node)
2879		return NULL;
2880
2881	index = of_property_match_string(dev->of_node, "power-domain-names",
2882					 name);
2883	if (index < 0)
2884		return NULL;
2885
2886	return genpd_dev_pm_attach_by_id(dev, index);
2887}
2888
2889static const struct of_device_id idle_state_match[] = {
2890	{ .compatible = "domain-idle-state", },
2891	{ }
2892};
2893
2894static int genpd_parse_state(struct genpd_power_state *genpd_state,
2895				    struct device_node *state_node)
2896{
2897	int err;
2898	u32 residency;
2899	u32 entry_latency, exit_latency;
2900
2901	err = of_property_read_u32(state_node, "entry-latency-us",
2902						&entry_latency);
2903	if (err) {
2904		pr_debug(" * %pOF missing entry-latency-us property\n",
2905			 state_node);
2906		return -EINVAL;
2907	}
2908
2909	err = of_property_read_u32(state_node, "exit-latency-us",
2910						&exit_latency);
2911	if (err) {
2912		pr_debug(" * %pOF missing exit-latency-us property\n",
2913			 state_node);
2914		return -EINVAL;
2915	}
2916
2917	err = of_property_read_u32(state_node, "min-residency-us", &residency);
2918	if (!err)
2919		genpd_state->residency_ns = 1000 * residency;
2920
2921	genpd_state->power_on_latency_ns = 1000 * exit_latency;
2922	genpd_state->power_off_latency_ns = 1000 * entry_latency;
2923	genpd_state->fwnode = &state_node->fwnode;
2924
2925	return 0;
2926}
2927
2928static int genpd_iterate_idle_states(struct device_node *dn,
2929				     struct genpd_power_state *states)
2930{
2931	int ret;
2932	struct of_phandle_iterator it;
2933	struct device_node *np;
2934	int i = 0;
2935
2936	ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
2937	if (ret <= 0)
2938		return ret == -ENOENT ? 0 : ret;
2939
2940	/* Loop over the phandles until all the requested entry is found */
2941	of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) {
2942		np = it.node;
2943		if (!of_match_node(idle_state_match, np))
2944			continue;
2945
2946		if (!of_device_is_available(np))
2947			continue;
2948
2949		if (states) {
2950			ret = genpd_parse_state(&states[i], np);
2951			if (ret) {
2952				pr_err("Parsing idle state node %pOF failed with err %d\n",
2953				       np, ret);
2954				of_node_put(np);
2955				return ret;
2956			}
2957		}
2958		i++;
2959	}
2960
2961	return i;
2962}
2963
2964/**
2965 * of_genpd_parse_idle_states: Return array of idle states for the genpd.
2966 *
2967 * @dn: The genpd device node
2968 * @states: The pointer to which the state array will be saved.
2969 * @n: The count of elements in the array returned from this function.
2970 *
2971 * Returns the device states parsed from the OF node. The memory for the states
2972 * is allocated by this function and is the responsibility of the caller to
2973 * free the memory after use. If any or zero compatible domain idle states is
2974 * found it returns 0 and in case of errors, a negative error code is returned.
2975 */
2976int of_genpd_parse_idle_states(struct device_node *dn,
2977			struct genpd_power_state **states, int *n)
2978{
2979	struct genpd_power_state *st;
2980	int ret;
2981
2982	ret = genpd_iterate_idle_states(dn, NULL);
2983	if (ret < 0)
2984		return ret;
2985
2986	if (!ret) {
2987		*states = NULL;
2988		*n = 0;
2989		return 0;
2990	}
2991
2992	st = kcalloc(ret, sizeof(*st), GFP_KERNEL);
2993	if (!st)
2994		return -ENOMEM;
2995
2996	ret = genpd_iterate_idle_states(dn, st);
2997	if (ret <= 0) {
2998		kfree(st);
2999		return ret < 0 ? ret : -EINVAL;
3000	}
3001
3002	*states = st;
3003	*n = ret;
3004
3005	return 0;
3006}
3007EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
3008
3009/**
3010 * pm_genpd_opp_to_performance_state - Gets performance state of the genpd from its OPP node.
3011 *
3012 * @genpd_dev: Genpd's device for which the performance-state needs to be found.
3013 * @opp: struct dev_pm_opp of the OPP for which we need to find performance
3014 *	state.
3015 *
3016 * Returns performance state encoded in the OPP of the genpd. This calls
3017 * platform specific genpd->opp_to_performance_state() callback to translate
3018 * power domain OPP to performance state.
3019 *
3020 * Returns performance state on success and 0 on failure.
3021 */
3022unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev,
3023					       struct dev_pm_opp *opp)
3024{
3025	struct generic_pm_domain *genpd = NULL;
3026	int state;
3027
3028	genpd = container_of(genpd_dev, struct generic_pm_domain, dev);
3029
3030	if (unlikely(!genpd->opp_to_performance_state))
3031		return 0;
3032
3033	genpd_lock(genpd);
3034	state = genpd->opp_to_performance_state(genpd, opp);
3035	genpd_unlock(genpd);
3036
3037	return state;
3038}
3039EXPORT_SYMBOL_GPL(pm_genpd_opp_to_performance_state);
3040
3041static int __init genpd_bus_init(void)
3042{
3043	return bus_register(&genpd_bus_type);
3044}
3045core_initcall(genpd_bus_init);
3046
3047#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
3048
3049
3050/***        debugfs support        ***/
3051
3052#ifdef CONFIG_DEBUG_FS
3053/*
3054 * TODO: This function is a slightly modified version of rtpm_status_show
3055 * from sysfs.c, so generalize it.
3056 */
3057static void rtpm_status_str(struct seq_file *s, struct device *dev)
3058{
3059	static const char * const status_lookup[] = {
3060		[RPM_ACTIVE] = "active",
3061		[RPM_RESUMING] = "resuming",
3062		[RPM_SUSPENDED] = "suspended",
3063		[RPM_SUSPENDING] = "suspending"
3064	};
3065	const char *p = "";
3066
3067	if (dev->power.runtime_error)
3068		p = "error";
3069	else if (dev->power.disable_depth)
3070		p = "unsupported";
3071	else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
3072		p = status_lookup[dev->power.runtime_status];
3073	else
3074		WARN_ON(1);
3075
3076	seq_printf(s, "%-25s  ", p);
3077}
3078
3079static void perf_status_str(struct seq_file *s, struct device *dev)
3080{
3081	struct generic_pm_domain_data *gpd_data;
3082
3083	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
3084	seq_put_decimal_ull(s, "", gpd_data->performance_state);
3085}
3086
3087static int genpd_summary_one(struct seq_file *s,
3088			struct generic_pm_domain *genpd)
3089{
3090	static const char * const status_lookup[] = {
3091		[GENPD_STATE_ON] = "on",
3092		[GENPD_STATE_OFF] = "off"
3093	};
3094	struct pm_domain_data *pm_data;
3095	const char *kobj_path;
3096	struct gpd_link *link;
3097	char state[16];
3098	int ret;
3099
3100	ret = genpd_lock_interruptible(genpd);
3101	if (ret)
3102		return -ERESTARTSYS;
3103
3104	if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
3105		goto exit;
3106	if (!genpd_status_on(genpd))
3107		snprintf(state, sizeof(state), "%s-%u",
3108			 status_lookup[genpd->status], genpd->state_idx);
3109	else
3110		snprintf(state, sizeof(state), "%s",
3111			 status_lookup[genpd->status]);
3112	seq_printf(s, "%-30s  %-50s %u", genpd->name, state, genpd->performance_state);
3113
3114	/*
3115	 * Modifications on the list require holding locks on both
3116	 * parent and child, so we are safe.
3117	 * Also genpd->name is immutable.
3118	 */
3119	list_for_each_entry(link, &genpd->parent_links, parent_node) {
3120		if (list_is_first(&link->parent_node, &genpd->parent_links))
3121			seq_printf(s, "\n%48s", " ");
3122		seq_printf(s, "%s", link->child->name);
3123		if (!list_is_last(&link->parent_node, &genpd->parent_links))
3124			seq_puts(s, ", ");
3125	}
3126
3127	list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
3128		kobj_path = kobject_get_path(&pm_data->dev->kobj,
3129				genpd_is_irq_safe(genpd) ?
3130				GFP_ATOMIC : GFP_KERNEL);
3131		if (kobj_path == NULL)
3132			continue;
3133
3134		seq_printf(s, "\n    %-50s  ", kobj_path);
3135		rtpm_status_str(s, pm_data->dev);
3136		perf_status_str(s, pm_data->dev);
3137		kfree(kobj_path);
3138	}
3139
3140	seq_puts(s, "\n");
3141exit:
3142	genpd_unlock(genpd);
3143
3144	return 0;
3145}
3146
3147static int summary_show(struct seq_file *s, void *data)
3148{
3149	struct generic_pm_domain *genpd;
3150	int ret = 0;
3151
3152	seq_puts(s, "domain                          status          children                           performance\n");
3153	seq_puts(s, "    /device                                             runtime status\n");
3154	seq_puts(s, "----------------------------------------------------------------------------------------------\n");
3155
3156	ret = mutex_lock_interruptible(&gpd_list_lock);
3157	if (ret)
3158		return -ERESTARTSYS;
3159
3160	list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
3161		ret = genpd_summary_one(s, genpd);
3162		if (ret)
3163			break;
3164	}
3165	mutex_unlock(&gpd_list_lock);
3166
3167	return ret;
3168}
3169
3170static int status_show(struct seq_file *s, void *data)
3171{
3172	static const char * const status_lookup[] = {
3173		[GENPD_STATE_ON] = "on",
3174		[GENPD_STATE_OFF] = "off"
3175	};
3176
3177	struct generic_pm_domain *genpd = s->private;
3178	int ret = 0;
3179
3180	ret = genpd_lock_interruptible(genpd);
3181	if (ret)
3182		return -ERESTARTSYS;
3183
3184	if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup)))
3185		goto exit;
3186
3187	if (genpd->status == GENPD_STATE_OFF)
3188		seq_printf(s, "%s-%u\n", status_lookup[genpd->status],
3189			genpd->state_idx);
3190	else
3191		seq_printf(s, "%s\n", status_lookup[genpd->status]);
3192exit:
3193	genpd_unlock(genpd);
3194	return ret;
3195}
3196
3197static int sub_domains_show(struct seq_file *s, void *data)
3198{
3199	struct generic_pm_domain *genpd = s->private;
3200	struct gpd_link *link;
3201	int ret = 0;
3202
3203	ret = genpd_lock_interruptible(genpd);
3204	if (ret)
3205		return -ERESTARTSYS;
3206
3207	list_for_each_entry(link, &genpd->parent_links, parent_node)
3208		seq_printf(s, "%s\n", link->child->name);
3209
3210	genpd_unlock(genpd);
3211	return ret;
3212}
3213
3214static int idle_states_show(struct seq_file *s, void *data)
3215{
3216	struct generic_pm_domain *genpd = s->private;
3217	u64 now, delta, idle_time = 0;
3218	unsigned int i;
3219	int ret = 0;
3220
3221	ret = genpd_lock_interruptible(genpd);
3222	if (ret)
3223		return -ERESTARTSYS;
3224
3225	seq_puts(s, "State          Time Spent(ms) Usage          Rejected\n");
3226
3227	for (i = 0; i < genpd->state_count; i++) {
3228		idle_time += genpd->states[i].idle_time;
3229
3230		if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) {
3231			now = ktime_get_mono_fast_ns();
3232			if (now > genpd->accounting_time) {
3233				delta = now - genpd->accounting_time;
3234				idle_time += delta;
3235			}
3236		}
3237
3238		do_div(idle_time, NSEC_PER_MSEC);
3239		seq_printf(s, "S%-13i %-14llu %-14llu %llu\n", i, idle_time,
3240			   genpd->states[i].usage, genpd->states[i].rejected);
3241	}
3242
3243	genpd_unlock(genpd);
3244	return ret;
3245}
3246
3247static int active_time_show(struct seq_file *s, void *data)
3248{
3249	struct generic_pm_domain *genpd = s->private;
3250	u64 now, on_time, delta = 0;
3251	int ret = 0;
3252
3253	ret = genpd_lock_interruptible(genpd);
3254	if (ret)
3255		return -ERESTARTSYS;
3256
3257	if (genpd->status == GENPD_STATE_ON) {
3258		now = ktime_get_mono_fast_ns();
3259		if (now > genpd->accounting_time)
3260			delta = now - genpd->accounting_time;
3261	}
3262
3263	on_time = genpd->on_time + delta;
3264	do_div(on_time, NSEC_PER_MSEC);
3265	seq_printf(s, "%llu ms\n", on_time);
3266
3267	genpd_unlock(genpd);
3268	return ret;
3269}
3270
3271static int total_idle_time_show(struct seq_file *s, void *data)
3272{
3273	struct generic_pm_domain *genpd = s->private;
3274	u64 now, delta, total = 0;
3275	unsigned int i;
3276	int ret = 0;
3277
3278	ret = genpd_lock_interruptible(genpd);
3279	if (ret)
3280		return -ERESTARTSYS;
3281
3282	for (i = 0; i < genpd->state_count; i++) {
3283		total += genpd->states[i].idle_time;
3284
3285		if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) {
3286			now = ktime_get_mono_fast_ns();
3287			if (now > genpd->accounting_time) {
3288				delta = now - genpd->accounting_time;
3289				total += delta;
3290			}
3291		}
3292	}
3293
3294	do_div(total, NSEC_PER_MSEC);
3295	seq_printf(s, "%llu ms\n", total);
3296
3297	genpd_unlock(genpd);
3298	return ret;
3299}
3300
3301
3302static int devices_show(struct seq_file *s, void *data)
 
 
 
3303{
3304	struct generic_pm_domain *genpd = s->private;
3305	struct pm_domain_data *pm_data;
3306	const char *kobj_path;
3307	int ret = 0;
3308
3309	ret = genpd_lock_interruptible(genpd);
3310	if (ret)
3311		return -ERESTARTSYS;
3312
3313	list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
3314		kobj_path = kobject_get_path(&pm_data->dev->kobj,
3315				genpd_is_irq_safe(genpd) ?
3316				GFP_ATOMIC : GFP_KERNEL);
3317		if (kobj_path == NULL)
3318			continue;
3319
3320		seq_printf(s, "%s\n", kobj_path);
3321		kfree(kobj_path);
3322	}
3323
3324	genpd_unlock(genpd);
3325	return ret;
3326}
3327
3328static int perf_state_show(struct seq_file *s, void *data)
3329{
3330	struct generic_pm_domain *genpd = s->private;
3331
3332	if (genpd_lock_interruptible(genpd))
3333		return -ERESTARTSYS;
3334
3335	seq_printf(s, "%u\n", genpd->performance_state);
3336
3337	genpd_unlock(genpd);
3338	return 0;
3339}
 
 
 
 
 
3340
3341DEFINE_SHOW_ATTRIBUTE(summary);
3342DEFINE_SHOW_ATTRIBUTE(status);
3343DEFINE_SHOW_ATTRIBUTE(sub_domains);
3344DEFINE_SHOW_ATTRIBUTE(idle_states);
3345DEFINE_SHOW_ATTRIBUTE(active_time);
3346DEFINE_SHOW_ATTRIBUTE(total_idle_time);
3347DEFINE_SHOW_ATTRIBUTE(devices);
3348DEFINE_SHOW_ATTRIBUTE(perf_state);
3349
3350static void genpd_debug_add(struct generic_pm_domain *genpd)
 
 
 
 
 
 
 
3351{
3352	struct dentry *d;
3353
3354	if (!genpd_debugfs_dir)
3355		return;
3356
3357	d = debugfs_create_dir(genpd->name, genpd_debugfs_dir);
3358
3359	debugfs_create_file("current_state", 0444,
3360			    d, genpd, &status_fops);
3361	debugfs_create_file("sub_domains", 0444,
3362			    d, genpd, &sub_domains_fops);
3363	debugfs_create_file("idle_states", 0444,
3364			    d, genpd, &idle_states_fops);
3365	debugfs_create_file("active_time", 0444,
3366			    d, genpd, &active_time_fops);
3367	debugfs_create_file("total_idle_time", 0444,
3368			    d, genpd, &total_idle_time_fops);
3369	debugfs_create_file("devices", 0444,
3370			    d, genpd, &devices_fops);
3371	if (genpd->set_performance_state)
3372		debugfs_create_file("perf_state", 0444,
3373				    d, genpd, &perf_state_fops);
3374}
3375
3376static int __init genpd_debug_init(void)
3377{
3378	struct generic_pm_domain *genpd;
3379
3380	genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
3381
3382	debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir,
3383			    NULL, &summary_fops);
3384
3385	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
3386		genpd_debug_add(genpd);
3387
3388	return 0;
3389}
3390late_initcall(genpd_debug_init);
3391
3392static void __exit genpd_debug_exit(void)
3393{
3394	debugfs_remove_recursive(genpd_debugfs_dir);
 
 
 
 
 
 
 
 
 
 
 
 
3395}
3396__exitcall(genpd_debug_exit);
3397#endif /* CONFIG_DEBUG_FS */
v3.15
 
   1/*
   2 * drivers/base/power/domain.c - Common code related to device power domains.
   3 *
   4 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
   5 *
   6 * This file is released under the GPLv2.
   7 */
 
   8
 
   9#include <linux/kernel.h>
  10#include <linux/io.h>
 
 
  11#include <linux/pm_runtime.h>
  12#include <linux/pm_domain.h>
  13#include <linux/pm_qos.h>
 
  14#include <linux/slab.h>
  15#include <linux/err.h>
  16#include <linux/sched.h>
  17#include <linux/suspend.h>
  18#include <linux/export.h>
 
 
 
 
 
 
  19
  20#define GENPD_DEV_CALLBACK(genpd, type, callback, dev)		\
  21({								\
  22	type (*__routine)(struct device *__d); 			\
  23	type __ret = (type)0;					\
  24								\
  25	__routine = genpd->dev_ops.callback; 			\
  26	if (__routine) {					\
  27		__ret = __routine(dev); 			\
  28	} else {						\
  29		__routine = dev_gpd_data(dev)->ops.callback;	\
  30		if (__routine) 					\
  31			__ret = __routine(dev);			\
  32	}							\
  33	__ret;							\
  34})
  35
  36#define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name)	\
  37({										\
  38	ktime_t __start = ktime_get();						\
  39	type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev);		\
  40	s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start));		\
  41	struct gpd_timing_data *__td = &dev_gpd_data(dev)->td;			\
  42	if (!__retval && __elapsed > __td->field) {				\
  43		__td->field = __elapsed;					\
  44		dev_dbg(dev, name " latency exceeded, new value %lld ns\n",	\
  45			__elapsed);						\
  46		genpd->max_off_time_changed = true;				\
  47		__td->constraint_changed = true;				\
  48	}									\
  49	__retval;								\
  50})
  51
  52static LIST_HEAD(gpd_list);
  53static DEFINE_MUTEX(gpd_list_lock);
  54
  55static struct generic_pm_domain *pm_genpd_lookup_name(const char *domain_name)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  56{
  57	struct generic_pm_domain *genpd = NULL, *gpd;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  58
  59	if (IS_ERR_OR_NULL(domain_name))
 
 
 
 
 
 
 
 
  60		return NULL;
  61
  62	mutex_lock(&gpd_list_lock);
  63	list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
  64		if (!strcmp(gpd->name, domain_name)) {
  65			genpd = gpd;
  66			break;
  67		}
  68	}
  69	mutex_unlock(&gpd_list_lock);
  70	return genpd;
  71}
  72
  73#ifdef CONFIG_PM
  74
  75struct generic_pm_domain *dev_to_genpd(struct device *dev)
 
 
  76{
  77	if (IS_ERR_OR_NULL(dev->pm_domain))
  78		return ERR_PTR(-EINVAL);
  79
  80	return pd_to_genpd(dev->pm_domain);
  81}
  82
  83static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
 
  84{
  85	return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev,
  86					stop_latency_ns, "stop");
  87}
  88
  89static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
 
  90{
  91	return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev,
  92					start_latency_ns, "start");
  93}
  94
  95static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
  96{
  97	bool ret = false;
  98
  99	if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
 100		ret = !!atomic_dec_and_test(&genpd->sd_count);
 101
 102	return ret;
 103}
 104
 105static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
 106{
 107	atomic_inc(&genpd->sd_count);
 108	smp_mb__after_atomic_inc();
 109}
 110
 111static void genpd_acquire_lock(struct generic_pm_domain *genpd)
 
 
 
 
 
 112{
 113	DEFINE_WAIT(wait);
 114
 115	mutex_lock(&genpd->lock);
 116	/*
 117	 * Wait for the domain to transition into either the active,
 118	 * or the power off state.
 119	 */
 120	for (;;) {
 121		prepare_to_wait(&genpd->status_wait_queue, &wait,
 122				TASK_UNINTERRUPTIBLE);
 123		if (genpd->status == GPD_STATE_ACTIVE
 124		    || genpd->status == GPD_STATE_POWER_OFF)
 125			break;
 126		mutex_unlock(&genpd->lock);
 127
 128		schedule();
 129
 130		mutex_lock(&genpd->lock);
 131	}
 132	finish_wait(&genpd->status_wait_queue, &wait);
 133}
 134
 135static void genpd_release_lock(struct generic_pm_domain *genpd)
 136{
 137	mutex_unlock(&genpd->lock);
 138}
 139
 140static void genpd_set_active(struct generic_pm_domain *genpd)
 141{
 142	if (genpd->resume_count == 0)
 143		genpd->status = GPD_STATE_ACTIVE;
 144}
 145
 146static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd)
 147{
 148	s64 usecs64;
 149
 150	if (!genpd->cpu_data)
 151		return;
 
 
 
 
 
 
 
 152
 153	usecs64 = genpd->power_on_latency_ns;
 154	do_div(usecs64, NSEC_PER_USEC);
 155	usecs64 += genpd->cpu_data->saved_exit_latency;
 156	genpd->cpu_data->idle_state->exit_latency = usecs64;
 157}
 
 
 
 
 
 158
 159/**
 160 * __pm_genpd_poweron - Restore power to a given PM domain and its masters.
 161 * @genpd: PM domain to power up.
 162 *
 163 * Restore power to @genpd and all of its masters so that it is possible to
 164 * resume a device belonging to it.
 165 */
 166static int __pm_genpd_poweron(struct generic_pm_domain *genpd)
 167	__releases(&genpd->lock) __acquires(&genpd->lock)
 168{
 
 
 169	struct gpd_link *link;
 170	DEFINE_WAIT(wait);
 171	int ret = 0;
 172
 173	/* If the domain's master is being waited for, we have to wait too. */
 174	for (;;) {
 175		prepare_to_wait(&genpd->status_wait_queue, &wait,
 176				TASK_UNINTERRUPTIBLE);
 177		if (genpd->status != GPD_STATE_WAIT_MASTER)
 178			break;
 179		mutex_unlock(&genpd->lock);
 
 
 
 
 180
 181		schedule();
 
 
 182
 183		mutex_lock(&genpd->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 184	}
 185	finish_wait(&genpd->status_wait_queue, &wait);
 186
 187	if (genpd->status == GPD_STATE_ACTIVE
 188	    || (genpd->prepared_count > 0 && genpd->suspend_power_off))
 189		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 190
 191	if (genpd->status != GPD_STATE_POWER_OFF) {
 192		genpd_set_active(genpd);
 193		return 0;
 194	}
 195
 196	if (genpd->cpu_data) {
 197		cpuidle_pause_and_lock();
 198		genpd->cpu_data->idle_state->disabled = true;
 199		cpuidle_resume_and_unlock();
 200		goto out;
 201	}
 
 
 202
 203	/*
 204	 * The list is guaranteed not to change while the loop below is being
 205	 * executed, unless one of the masters' .power_on() callbacks fiddles
 206	 * with it.
 207	 */
 208	list_for_each_entry(link, &genpd->slave_links, slave_node) {
 209		genpd_sd_counter_inc(link->master);
 210		genpd->status = GPD_STATE_WAIT_MASTER;
 211
 212		mutex_unlock(&genpd->lock);
 213
 214		ret = pm_genpd_poweron(link->master);
 
 
 
 
 
 
 215
 216		mutex_lock(&genpd->lock);
 217
 218		/*
 219		 * The "wait for parent" status is guaranteed not to change
 220		 * while the master is powering on.
 221		 */
 222		genpd->status = GPD_STATE_POWER_OFF;
 223		wake_up_all(&genpd->status_wait_queue);
 224		if (ret) {
 225			genpd_sd_counter_dec(link->master);
 226			goto err;
 227		}
 228	}
 229
 230	if (genpd->power_on) {
 231		ktime_t time_start = ktime_get();
 232		s64 elapsed_ns;
 233
 234		ret = genpd->power_on(genpd);
 235		if (ret)
 236			goto err;
 237
 238		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
 239		if (elapsed_ns > genpd->power_on_latency_ns) {
 240			genpd->power_on_latency_ns = elapsed_ns;
 241			genpd->max_off_time_changed = true;
 242			genpd_recalc_cpu_exit_latency(genpd);
 243			if (genpd->name)
 244				pr_warning("%s: Power-on latency exceeded, "
 245					"new value %lld ns\n", genpd->name,
 246					elapsed_ns);
 247		}
 248	}
 249
 250 out:
 251	genpd_set_active(genpd);
 252
 253	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 254
 255 err:
 256	list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node)
 257		genpd_sd_counter_dec(link->master);
 258
 259	return ret;
 260}
 261
 262/**
 263 * pm_genpd_poweron - Restore power to a given PM domain and its masters.
 264 * @genpd: PM domain to power up.
 265 */
 266int pm_genpd_poweron(struct generic_pm_domain *genpd)
 267{
 
 
 
 268	int ret;
 269
 270	mutex_lock(&genpd->lock);
 271	ret = __pm_genpd_poweron(genpd);
 272	mutex_unlock(&genpd->lock);
 
 
 
 
 
 
 
 
 273	return ret;
 274}
 275
 276/**
 277 * pm_genpd_name_poweron - Restore power to a given PM domain and its masters.
 278 * @domain_name: Name of the PM domain to power up.
 279 */
 280int pm_genpd_name_poweron(const char *domain_name)
 281{
 282	struct generic_pm_domain *genpd;
 283
 284	genpd = pm_genpd_lookup_name(domain_name);
 285	return genpd ? pm_genpd_poweron(genpd) : -EINVAL;
 286}
 287
 288#endif /* CONFIG_PM */
 289
 290#ifdef CONFIG_PM_RUNTIME
 291
 292static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd,
 293				     struct device *dev)
 294{
 295	return GENPD_DEV_CALLBACK(genpd, int, start, dev);
 296}
 297
 298static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
 
 299{
 300	return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
 301					save_state_latency_ns, "state save");
 302}
 303
 304static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 305{
 306	return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev,
 307					restore_state_latency_ns,
 308					"state restore");
 309}
 310
 311static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
 312				     unsigned long val, void *ptr)
 313{
 314	struct generic_pm_domain_data *gpd_data;
 315	struct device *dev;
 316
 317	gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
 
 
 318
 319	mutex_lock(&gpd_data->lock);
 320	dev = gpd_data->base.dev;
 321	if (!dev) {
 322		mutex_unlock(&gpd_data->lock);
 323		return NOTIFY_DONE;
 
 
 324	}
 325	mutex_unlock(&gpd_data->lock);
 
 
 
 
 326
 327	for (;;) {
 328		struct generic_pm_domain *genpd;
 329		struct pm_domain_data *pdd;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 330
 331		spin_lock_irq(&dev->power.lock);
 
 
 332
 333		pdd = dev->power.subsys_data ?
 334				dev->power.subsys_data->domain_data : NULL;
 335		if (pdd && pdd->dev) {
 336			to_gpd_data(pdd)->td.constraint_changed = true;
 337			genpd = dev_to_genpd(dev);
 338		} else {
 339			genpd = ERR_PTR(-ENODATA);
 340		}
 341
 342		spin_unlock_irq(&dev->power.lock);
 
 
 
 
 
 
 
 
 
 
 
 
 343
 344		if (!IS_ERR(genpd)) {
 345			mutex_lock(&genpd->lock);
 346			genpd->max_off_time_changed = true;
 347			mutex_unlock(&genpd->lock);
 348		}
 349
 350		dev = dev->parent;
 351		if (!dev || dev->power.ignore_children)
 352			break;
 353	}
 354
 355	return NOTIFY_DONE;
 356}
 
 357
 358/**
 359 * __pm_genpd_save_device - Save the pre-suspend state of a device.
 360 * @pdd: Domain data of the device to save the state of.
 361 * @genpd: PM domain the device belongs to.
 362 */
 363static int __pm_genpd_save_device(struct pm_domain_data *pdd,
 364				  struct generic_pm_domain *genpd)
 365	__releases(&genpd->lock) __acquires(&genpd->lock)
 366{
 367	struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
 368	struct device *dev = pdd->dev;
 369	int ret = 0;
 
 
 
 
 
 
 
 
 
 370
 371	if (gpd_data->need_restore)
 372		return 0;
 373
 374	mutex_unlock(&genpd->lock);
 
 
 
 
 375
 376	genpd_start_dev(genpd, dev);
 377	ret = genpd_save_dev(genpd, dev);
 378	genpd_stop_dev(genpd, dev);
 379
 380	mutex_lock(&genpd->lock);
 
 
 
 381
 382	if (!ret)
 383		gpd_data->need_restore = true;
 
 384
 
 
 
 
 
 
 
 
 
 
 
 385	return ret;
 386}
 387
 388/**
 389 * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
 390 * @pdd: Domain data of the device to restore the state of.
 391 * @genpd: PM domain the device belongs to.
 392 */
 393static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
 394				      struct generic_pm_domain *genpd)
 395	__releases(&genpd->lock) __acquires(&genpd->lock)
 396{
 397	struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
 398	struct device *dev = pdd->dev;
 399	bool need_restore = gpd_data->need_restore;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 400
 401	gpd_data->need_restore = false;
 402	mutex_unlock(&genpd->lock);
 403
 404	genpd_start_dev(genpd, dev);
 405	if (need_restore)
 406		genpd_restore_dev(genpd, dev);
 
 407
 408	mutex_lock(&genpd->lock);
 409}
 
 410
 411/**
 412 * genpd_abort_poweroff - Check if a PM domain power off should be aborted.
 413 * @genpd: PM domain to check.
 414 *
 415 * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during
 416 * a "power off" operation, which means that a "power on" has occured in the
 417 * meantime, or if its resume_count field is different from zero, which means
 418 * that one of its devices has been resumed in the meantime.
 419 */
 420static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
 421{
 422	return genpd->status == GPD_STATE_WAIT_MASTER
 423		|| genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
 424}
 425
 426/**
 427 * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
 428 * @genpd: PM domait to power off.
 429 *
 430 * Queue up the execution of pm_genpd_poweroff() unless it's already been done
 431 * before.
 432 */
 433void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
 434{
 435	queue_work(pm_wq, &genpd->power_off_work);
 436}
 437
 438/**
 439 * pm_genpd_poweroff - Remove power from a given PM domain.
 440 * @genpd: PM domain to power down.
 
 
 
 
 
 441 *
 442 * If all of the @genpd's devices have been suspended and all of its subdomains
 443 * have been powered down, run the runtime suspend callbacks provided by all of
 444 * the @genpd's devices' drivers and remove power from @genpd.
 445 */
 446static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
 447	__releases(&genpd->lock) __acquires(&genpd->lock)
 448{
 449	struct pm_domain_data *pdd;
 450	struct gpd_link *link;
 451	unsigned int not_suspended;
 452	int ret = 0;
 453
 454 start:
 455	/*
 456	 * Do not try to power off the domain in the following situations:
 457	 * (1) The domain is already in the "power off" state.
 458	 * (2) The domain is waiting for its master to power up.
 459	 * (3) One of the domain's devices is being resumed right now.
 460	 * (4) System suspend is in progress.
 461	 */
 462	if (genpd->status == GPD_STATE_POWER_OFF
 463	    || genpd->status == GPD_STATE_WAIT_MASTER
 464	    || genpd->resume_count > 0 || genpd->prepared_count > 0)
 465		return 0;
 466
 467	if (atomic_read(&genpd->sd_count) > 0)
 
 
 
 
 
 
 
 468		return -EBUSY;
 469
 470	not_suspended = 0;
 471	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
 472		enum pm_qos_flags_status stat;
 473
 474		stat = dev_pm_qos_flags(pdd->dev,
 475					PM_QOS_FLAG_NO_POWER_OFF
 476						| PM_QOS_FLAG_REMOTE_WAKEUP);
 477		if (stat > PM_QOS_FLAGS_NONE)
 
 478			return -EBUSY;
 
 479
 480		if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
 481		    || pdd->dev->power.irq_safe))
 
 
 
 
 
 482			not_suspended++;
 483	}
 484
 485	if (not_suspended > genpd->in_progress)
 486		return -EBUSY;
 487
 488	if (genpd->poweroff_task) {
 489		/*
 490		 * Another instance of pm_genpd_poweroff() is executing
 491		 * callbacks, so tell it to start over and return.
 492		 */
 493		genpd->status = GPD_STATE_REPEAT;
 494		return 0;
 495	}
 496
 497	if (genpd->gov && genpd->gov->power_down_ok) {
 498		if (!genpd->gov->power_down_ok(&genpd->domain))
 499			return -EAGAIN;
 500	}
 501
 502	genpd->status = GPD_STATE_BUSY;
 503	genpd->poweroff_task = current;
 
 504
 505	list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
 506		ret = atomic_read(&genpd->sd_count) == 0 ?
 507			__pm_genpd_save_device(pdd, genpd) : -EBUSY;
 508
 509		if (genpd_abort_poweroff(genpd))
 510			goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 511
 512		if (ret) {
 513			genpd_set_active(genpd);
 514			goto out;
 515		}
 
 
 
 
 
 
 
 
 
 
 516
 517		if (genpd->status == GPD_STATE_REPEAT) {
 518			genpd->poweroff_task = NULL;
 519			goto start;
 520		}
 
 
 
 
 521	}
 522
 523	if (genpd->cpu_data) {
 524		/*
 525		 * If cpu_data is set, cpuidle should turn the domain off when
 526		 * the CPU in it is idle.  In that case we don't decrement the
 527		 * subdomain counts of the master domains, so that power is not
 528		 * removed from the current domain prematurely as a result of
 529		 * cutting off the masters' power.
 530		 */
 531		genpd->status = GPD_STATE_POWER_OFF;
 532		cpuidle_pause_and_lock();
 533		genpd->cpu_data->idle_state->disabled = false;
 534		cpuidle_resume_and_unlock();
 535		goto out;
 536	}
 
 537
 538	if (genpd->power_off) {
 539		ktime_t time_start;
 540		s64 elapsed_ns;
 541
 542		if (atomic_read(&genpd->sd_count) > 0) {
 543			ret = -EBUSY;
 544			goto out;
 545		}
 546
 547		time_start = ktime_get();
 548
 549		/*
 550		 * If sd_count > 0 at this point, one of the subdomains hasn't
 551		 * managed to call pm_genpd_poweron() for the master yet after
 552		 * incrementing it.  In that case pm_genpd_poweron() will wait
 553		 * for us to drop the lock, so we can call .power_off() and let
 554		 * the pm_genpd_poweron() restore power for us (this shouldn't
 555		 * happen very often).
 556		 */
 557		ret = genpd->power_off(genpd);
 558		if (ret == -EBUSY) {
 559			genpd_set_active(genpd);
 560			goto out;
 561		}
 562
 563		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
 564		if (elapsed_ns > genpd->power_off_latency_ns) {
 565			genpd->power_off_latency_ns = elapsed_ns;
 566			genpd->max_off_time_changed = true;
 567			if (genpd->name)
 568				pr_warning("%s: Power-off latency exceeded, "
 569					"new value %lld ns\n", genpd->name,
 570					elapsed_ns);
 571		}
 572	}
 573
 574	genpd->status = GPD_STATE_POWER_OFF;
 575
 576	list_for_each_entry(link, &genpd->slave_links, slave_node) {
 577		genpd_sd_counter_dec(link->master);
 578		genpd_queue_power_off_work(link->master);
 579	}
 580
 581 out:
 582	genpd->poweroff_task = NULL;
 583	wake_up_all(&genpd->status_wait_queue);
 584	return ret;
 585}
 586
 587/**
 588 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
 589 * @work: Work structure used for scheduling the execution of this function.
 590 */
 591static void genpd_power_off_work_fn(struct work_struct *work)
 592{
 593	struct generic_pm_domain *genpd;
 594
 595	genpd = container_of(work, struct generic_pm_domain, power_off_work);
 596
 597	genpd_acquire_lock(genpd);
 598	pm_genpd_poweroff(genpd);
 599	genpd_release_lock(genpd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 600}
 601
 602/**
 603 * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 604 * @dev: Device to suspend.
 605 *
 606 * Carry out a runtime suspend of a device under the assumption that its
 607 * pm_domain field points to the domain member of an object of type
 608 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
 609 */
 610static int pm_genpd_runtime_suspend(struct device *dev)
 611{
 612	struct generic_pm_domain *genpd;
 613	bool (*stop_ok)(struct device *__dev);
 
 
 
 
 
 614	int ret;
 615
 616	dev_dbg(dev, "%s()\n", __func__);
 617
 618	genpd = dev_to_genpd(dev);
 619	if (IS_ERR(genpd))
 620		return -EINVAL;
 621
 622	might_sleep_if(!genpd->dev_irq_safe);
 
 
 
 
 
 
 
 
 
 
 
 
 623
 624	stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
 625	if (stop_ok && !stop_ok(dev))
 626		return -EBUSY;
 627
 628	ret = genpd_stop_dev(genpd, dev);
 629	if (ret)
 
 630		return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 631
 632	/*
 633	 * If power.irq_safe is set, this routine will be run with interrupts
 634	 * off, so it can't use mutexes.
 635	 */
 636	if (dev->power.irq_safe)
 637		return 0;
 638
 639	mutex_lock(&genpd->lock);
 640	genpd->in_progress++;
 641	pm_genpd_poweroff(genpd);
 642	genpd->in_progress--;
 643	mutex_unlock(&genpd->lock);
 644
 645	return 0;
 646}
 647
 648/**
 649 * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
 650 * @dev: Device to resume.
 651 *
 652 * Carry out a runtime resume of a device under the assumption that its
 653 * pm_domain field points to the domain member of an object of type
 654 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
 655 */
 656static int pm_genpd_runtime_resume(struct device *dev)
 657{
 658	struct generic_pm_domain *genpd;
 659	DEFINE_WAIT(wait);
 
 
 
 
 660	int ret;
 661
 662	dev_dbg(dev, "%s()\n", __func__);
 663
 664	genpd = dev_to_genpd(dev);
 665	if (IS_ERR(genpd))
 666		return -EINVAL;
 667
 668	might_sleep_if(!genpd->dev_irq_safe);
 
 
 
 
 
 669
 670	/* If power.irq_safe, the PM domain is never powered off. */
 671	if (dev->power.irq_safe)
 672		return genpd_start_dev_no_timing(genpd, dev);
 
 673
 674	mutex_lock(&genpd->lock);
 675	ret = __pm_genpd_poweron(genpd);
 676	if (ret) {
 677		mutex_unlock(&genpd->lock);
 678		return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 679	}
 680	genpd->status = GPD_STATE_BUSY;
 681	genpd->resume_count++;
 682	for (;;) {
 683		prepare_to_wait(&genpd->status_wait_queue, &wait,
 684				TASK_UNINTERRUPTIBLE);
 685		/*
 686		 * If current is the powering off task, we have been called
 687		 * reentrantly from one of the device callbacks, so we should
 688		 * not wait.
 689		 */
 690		if (!genpd->poweroff_task || genpd->poweroff_task == current)
 691			break;
 692		mutex_unlock(&genpd->lock);
 693
 694		schedule();
 695
 696		mutex_lock(&genpd->lock);
 
 
 
 
 
 
 
 697	}
 698	finish_wait(&genpd->status_wait_queue, &wait);
 699	__pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd);
 700	genpd->resume_count--;
 701	genpd_set_active(genpd);
 702	wake_up_all(&genpd->status_wait_queue);
 703	mutex_unlock(&genpd->lock);
 704
 705	return 0;
 706}
 707
 708static bool pd_ignore_unused;
 709static int __init pd_ignore_unused_setup(char *__unused)
 710{
 711	pd_ignore_unused = true;
 712	return 1;
 713}
 714__setup("pd_ignore_unused", pd_ignore_unused_setup);
 715
 716/**
 717 * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
 718 */
 719void pm_genpd_poweroff_unused(void)
 720{
 721	struct generic_pm_domain *genpd;
 722
 723	if (pd_ignore_unused) {
 724		pr_warn("genpd: Not disabling unused power domains\n");
 725		return;
 726	}
 727
 728	mutex_lock(&gpd_list_lock);
 729
 730	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
 731		genpd_queue_power_off_work(genpd);
 732
 733	mutex_unlock(&gpd_list_lock);
 734}
 735
 736#else
 737
 738static inline int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
 739					    unsigned long val, void *ptr)
 740{
 741	return NOTIFY_DONE;
 742}
 743
 744static inline void genpd_power_off_work_fn(struct work_struct *work) {}
 745
 746#define pm_genpd_runtime_suspend	NULL
 747#define pm_genpd_runtime_resume		NULL
 748
 749#endif /* CONFIG_PM_RUNTIME */
 750
 751#ifdef CONFIG_PM_SLEEP
 752
 753/**
 754 * pm_genpd_present - Check if the given PM domain has been initialized.
 755 * @genpd: PM domain to check.
 756 */
 757static bool pm_genpd_present(struct generic_pm_domain *genpd)
 758{
 759	struct generic_pm_domain *gpd;
 760
 761	if (IS_ERR_OR_NULL(genpd))
 762		return false;
 763
 764	list_for_each_entry(gpd, &gpd_list, gpd_list_node)
 765		if (gpd == genpd)
 766			return true;
 767
 768	return false;
 769}
 770
 771static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
 772				    struct device *dev)
 773{
 774	return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
 775}
 776
 777static int genpd_suspend_dev(struct generic_pm_domain *genpd, struct device *dev)
 778{
 779	return GENPD_DEV_CALLBACK(genpd, int, suspend, dev);
 780}
 781
 782static int genpd_suspend_late(struct generic_pm_domain *genpd, struct device *dev)
 783{
 784	return GENPD_DEV_CALLBACK(genpd, int, suspend_late, dev);
 785}
 786
 787static int genpd_resume_early(struct generic_pm_domain *genpd, struct device *dev)
 788{
 789	return GENPD_DEV_CALLBACK(genpd, int, resume_early, dev);
 790}
 791
 792static int genpd_resume_dev(struct generic_pm_domain *genpd, struct device *dev)
 793{
 794	return GENPD_DEV_CALLBACK(genpd, int, resume, dev);
 795}
 796
 797static int genpd_freeze_dev(struct generic_pm_domain *genpd, struct device *dev)
 798{
 799	return GENPD_DEV_CALLBACK(genpd, int, freeze, dev);
 800}
 801
 802static int genpd_freeze_late(struct generic_pm_domain *genpd, struct device *dev)
 803{
 804	return GENPD_DEV_CALLBACK(genpd, int, freeze_late, dev);
 805}
 806
 807static int genpd_thaw_early(struct generic_pm_domain *genpd, struct device *dev)
 808{
 809	return GENPD_DEV_CALLBACK(genpd, int, thaw_early, dev);
 810}
 811
 812static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev)
 813{
 814	return GENPD_DEV_CALLBACK(genpd, int, thaw, dev);
 815}
 816
 817/**
 818 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
 819 * @genpd: PM domain to power off, if possible.
 
 
 820 *
 821 * Check if the given PM domain can be powered off (during system suspend or
 822 * hibernation) and do that if so.  Also, in that case propagate to its masters.
 823 *
 824 * This function is only called in "noirq" and "syscore" stages of system power
 825 * transitions, so it need not acquire locks (all of the "noirq" callbacks are
 826 * executed sequentially, so it is guaranteed that it will never run twice in
 827 * parallel).
 828 */
 829static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
 
 830{
 831	struct gpd_link *link;
 832
 833	if (genpd->status == GPD_STATE_POWER_OFF)
 834		return;
 835
 836	if (genpd->suspended_count != genpd->device_count
 837	    || atomic_read(&genpd->sd_count) > 0)
 838		return;
 839
 840	if (genpd->power_off)
 841		genpd->power_off(genpd);
 
 
 
 
 
 
 
 
 
 
 
 842
 843	genpd->status = GPD_STATE_POWER_OFF;
 
 844
 845	list_for_each_entry(link, &genpd->slave_links, slave_node) {
 846		genpd_sd_counter_dec(link->master);
 847		pm_genpd_sync_poweroff(link->master);
 
 
 
 
 848	}
 849}
 850
 851/**
 852 * pm_genpd_sync_poweron - Synchronously power on a PM domain and its masters.
 853 * @genpd: PM domain to power on.
 
 
 854 *
 855 * This function is only called in "noirq" and "syscore" stages of system power
 856 * transitions, so it need not acquire locks (all of the "noirq" callbacks are
 857 * executed sequentially, so it is guaranteed that it will never run twice in
 858 * parallel).
 859 */
 860static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd)
 
 861{
 862	struct gpd_link *link;
 863
 864	if (genpd->status != GPD_STATE_POWER_OFF)
 865		return;
 866
 867	list_for_each_entry(link, &genpd->slave_links, slave_node) {
 868		pm_genpd_sync_poweron(link->master);
 869		genpd_sd_counter_inc(link->master);
 870	}
 871
 872	if (genpd->power_on)
 873		genpd->power_on(genpd);
 874
 875	genpd->status = GPD_STATE_ACTIVE;
 876}
 877
 878/**
 879 * resume_needed - Check whether to resume a device before system suspend.
 880 * @dev: Device to check.
 881 * @genpd: PM domain the device belongs to.
 882 *
 883 * There are two cases in which a device that can wake up the system from sleep
 884 * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
 885 * to wake up the system and it has to remain active for this purpose while the
 886 * system is in the sleep state and (2) if the device is not enabled to wake up
 887 * the system from sleep states and it generally doesn't generate wakeup signals
 888 * by itself (those signals are generated on its behalf by other parts of the
 889 * system).  In the latter case it may be necessary to reconfigure the device's
 890 * wakeup settings during system suspend, because it may have been set up to
 891 * signal remote wakeup from the system's working state as needed by runtime PM.
 892 * Return 'true' in either of the above cases.
 893 */
 894static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
 895{
 896	bool active_wakeup;
 897
 898	if (!device_can_wakeup(dev))
 899		return false;
 900
 901	active_wakeup = genpd_dev_active_wakeup(genpd, dev);
 902	return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
 903}
 904
 905/**
 906 * pm_genpd_prepare - Start power transition of a device in a PM domain.
 907 * @dev: Device to start the transition of.
 908 *
 909 * Start a power transition of a device (during a system-wide power transition)
 910 * under the assumption that its pm_domain field points to the domain member of
 911 * an object of type struct generic_pm_domain representing a PM domain
 912 * consisting of I/O devices.
 913 */
 914static int pm_genpd_prepare(struct device *dev)
 915{
 916	struct generic_pm_domain *genpd;
 917	int ret;
 918
 919	dev_dbg(dev, "%s()\n", __func__);
 920
 921	genpd = dev_to_genpd(dev);
 922	if (IS_ERR(genpd))
 923		return -EINVAL;
 924
 925	/*
 926	 * If a wakeup request is pending for the device, it should be woken up
 927	 * at this point and a system wakeup event should be reported if it's
 928	 * set up to wake up the system from sleep states.
 929	 */
 930	pm_runtime_get_noresume(dev);
 931	if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
 932		pm_wakeup_event(dev, 0);
 933
 934	if (pm_wakeup_pending()) {
 935		pm_runtime_put(dev);
 936		return -EBUSY;
 937	}
 938
 939	if (resume_needed(dev, genpd))
 940		pm_runtime_resume(dev);
 941
 942	genpd_acquire_lock(genpd);
 943
 944	if (genpd->prepared_count++ == 0) {
 945		genpd->suspended_count = 0;
 946		genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
 947	}
 948
 949	genpd_release_lock(genpd);
 950
 951	if (genpd->suspend_power_off) {
 952		pm_runtime_put_noidle(dev);
 953		return 0;
 954	}
 955
 956	/*
 957	 * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
 958	 * so pm_genpd_poweron() will return immediately, but if the device
 959	 * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need
 960	 * to make it operational.
 961	 */
 962	pm_runtime_resume(dev);
 963	__pm_runtime_disable(dev, false);
 964
 965	ret = pm_generic_prepare(dev);
 966	if (ret) {
 967		mutex_lock(&genpd->lock);
 968
 969		if (--genpd->prepared_count == 0)
 970			genpd->suspend_power_off = false;
 971
 972		mutex_unlock(&genpd->lock);
 973		pm_runtime_enable(dev);
 974	}
 975
 976	pm_runtime_put(dev);
 977	return ret;
 978}
 979
 980/**
 981 * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
 
 982 * @dev: Device to suspend.
 
 
 983 *
 984 * Suspend a device under the assumption that its pm_domain field points to the
 985 * domain member of an object of type struct generic_pm_domain representing
 986 * a PM domain consisting of I/O devices.
 987 */
 988static int pm_genpd_suspend(struct device *dev)
 
 
 989{
 990	struct generic_pm_domain *genpd;
 991
 992	dev_dbg(dev, "%s()\n", __func__);
 993
 994	genpd = dev_to_genpd(dev);
 995	if (IS_ERR(genpd))
 996		return -EINVAL;
 997
 998	return genpd->suspend_power_off ? 0 : genpd_suspend_dev(genpd, dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 999}
1000
1001/**
1002 * pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain.
1003 * @dev: Device to suspend.
1004 *
1005 * Carry out a late suspend of a device under the assumption that its
1006 * pm_domain field points to the domain member of an object of type
1007 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
1008 */
1009static int pm_genpd_suspend_late(struct device *dev)
1010{
1011	struct generic_pm_domain *genpd;
1012
1013	dev_dbg(dev, "%s()\n", __func__);
1014
1015	genpd = dev_to_genpd(dev);
1016	if (IS_ERR(genpd))
1017		return -EINVAL;
1018
1019	return genpd->suspend_power_off ? 0 : genpd_suspend_late(genpd, dev);
1020}
1021
1022/**
1023 * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
1024 * @dev: Device to suspend.
 
1025 *
1026 * Stop the device and remove power from the domain if all devices in it have
1027 * been stopped.
1028 */
1029static int pm_genpd_suspend_noirq(struct device *dev)
 
1030{
1031	struct generic_pm_domain *genpd;
 
1032
1033	dev_dbg(dev, "%s()\n", __func__);
1034
1035	genpd = dev_to_genpd(dev);
1036	if (IS_ERR(genpd))
1037		return -EINVAL;
1038
1039	if (genpd->suspend_power_off
1040	    || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
1041		return 0;
1042
1043	genpd_stop_dev(genpd, dev);
 
 
 
1044
1045	/*
1046	 * Since all of the "noirq" callbacks are executed sequentially, it is
1047	 * guaranteed that this function will never run twice in parallel for
1048	 * the same PM domain, so it is not necessary to use locking here.
1049	 */
1050	genpd->suspended_count++;
1051	pm_genpd_sync_poweroff(genpd);
1052
1053	return 0;
1054}
1055
1056/**
1057 * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain.
1058 * @dev: Device to resume.
1059 *
1060 * Restore power to the device's PM domain, if necessary, and start the device.
1061 */
1062static int pm_genpd_resume_noirq(struct device *dev)
1063{
1064	struct generic_pm_domain *genpd;
1065
1066	dev_dbg(dev, "%s()\n", __func__);
1067
1068	genpd = dev_to_genpd(dev);
1069	if (IS_ERR(genpd))
1070		return -EINVAL;
1071
1072	if (genpd->suspend_power_off
1073	    || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
1074		return 0;
1075
1076	/*
1077	 * Since all of the "noirq" callbacks are executed sequentially, it is
1078	 * guaranteed that this function will never run twice in parallel for
1079	 * the same PM domain, so it is not necessary to use locking here.
1080	 */
1081	pm_genpd_sync_poweron(genpd);
1082	genpd->suspended_count--;
1083
1084	return genpd_start_dev(genpd, dev);
1085}
1086
1087/**
1088 * pm_genpd_resume_early - Early resume of a device in an I/O PM domain.
1089 * @dev: Device to resume.
1090 *
1091 * Carry out an early resume of a device under the assumption that its
1092 * pm_domain field points to the domain member of an object of type
1093 * struct generic_pm_domain representing a power domain consisting of I/O
1094 * devices.
1095 */
1096static int pm_genpd_resume_early(struct device *dev)
1097{
1098	struct generic_pm_domain *genpd;
1099
1100	dev_dbg(dev, "%s()\n", __func__);
1101
1102	genpd = dev_to_genpd(dev);
1103	if (IS_ERR(genpd))
1104		return -EINVAL;
1105
1106	return genpd->suspend_power_off ? 0 : genpd_resume_early(genpd, dev);
1107}
1108
1109/**
1110 * pm_genpd_resume - Resume of device in an I/O PM domain.
1111 * @dev: Device to resume.
1112 *
1113 * Resume a device under the assumption that its pm_domain field points to the
1114 * domain member of an object of type struct generic_pm_domain representing
1115 * a power domain consisting of I/O devices.
1116 */
1117static int pm_genpd_resume(struct device *dev)
1118{
1119	struct generic_pm_domain *genpd;
1120
1121	dev_dbg(dev, "%s()\n", __func__);
1122
1123	genpd = dev_to_genpd(dev);
1124	if (IS_ERR(genpd))
1125		return -EINVAL;
1126
1127	return genpd->suspend_power_off ? 0 : genpd_resume_dev(genpd, dev);
1128}
1129
1130/**
1131 * pm_genpd_freeze - Freezing a device in an I/O PM domain.
1132 * @dev: Device to freeze.
 
1133 *
1134 * Freeze a device under the assumption that its pm_domain field points to the
1135 * domain member of an object of type struct generic_pm_domain representing
1136 * a power domain consisting of I/O devices.
1137 */
1138static int pm_genpd_freeze(struct device *dev)
1139{
1140	struct generic_pm_domain *genpd;
1141
1142	dev_dbg(dev, "%s()\n", __func__);
1143
1144	genpd = dev_to_genpd(dev);
1145	if (IS_ERR(genpd))
1146		return -EINVAL;
1147
1148	return genpd->suspend_power_off ? 0 : genpd_freeze_dev(genpd, dev);
1149}
1150
1151/**
1152 * pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain.
1153 * @dev: Device to freeze.
1154 *
1155 * Carry out a late freeze of a device under the assumption that its
1156 * pm_domain field points to the domain member of an object of type
1157 * struct generic_pm_domain representing a power domain consisting of I/O
1158 * devices.
1159 */
1160static int pm_genpd_freeze_late(struct device *dev)
1161{
1162	struct generic_pm_domain *genpd;
1163
1164	dev_dbg(dev, "%s()\n", __func__);
1165
1166	genpd = dev_to_genpd(dev);
1167	if (IS_ERR(genpd))
1168		return -EINVAL;
1169
1170	return genpd->suspend_power_off ? 0 : genpd_freeze_late(genpd, dev);
1171}
1172
1173/**
1174 * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
1175 * @dev: Device to freeze.
1176 *
1177 * Carry out a late freeze of a device under the assumption that its
1178 * pm_domain field points to the domain member of an object of type
1179 * struct generic_pm_domain representing a power domain consisting of I/O
1180 * devices.
1181 */
1182static int pm_genpd_freeze_noirq(struct device *dev)
1183{
1184	struct generic_pm_domain *genpd;
1185
1186	dev_dbg(dev, "%s()\n", __func__);
1187
1188	genpd = dev_to_genpd(dev);
1189	if (IS_ERR(genpd))
1190		return -EINVAL;
 
 
 
 
 
 
 
 
1191
1192	return genpd->suspend_power_off ? 0 : genpd_stop_dev(genpd, dev);
1193}
1194
1195/**
1196 * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
1197 * @dev: Device to thaw.
1198 *
1199 * Start the device, unless power has been removed from the domain already
1200 * before the system transition.
1201 */
1202static int pm_genpd_thaw_noirq(struct device *dev)
1203{
1204	struct generic_pm_domain *genpd;
 
1205
1206	dev_dbg(dev, "%s()\n", __func__);
 
 
 
 
 
 
 
1207
1208	genpd = dev_to_genpd(dev);
1209	if (IS_ERR(genpd))
1210		return -EINVAL;
 
 
 
 
1211
1212	return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev);
 
1213}
1214
1215/**
1216 * pm_genpd_thaw_early - Early thaw of device in an I/O PM domain.
1217 * @dev: Device to thaw.
1218 *
1219 * Carry out an early thaw of a device under the assumption that its
1220 * pm_domain field points to the domain member of an object of type
1221 * struct generic_pm_domain representing a power domain consisting of I/O
1222 * devices.
1223 */
1224static int pm_genpd_thaw_early(struct device *dev)
1225{
1226	struct generic_pm_domain *genpd;
1227
1228	dev_dbg(dev, "%s()\n", __func__);
1229
1230	genpd = dev_to_genpd(dev);
1231	if (IS_ERR(genpd))
1232		return -EINVAL;
1233
1234	return genpd->suspend_power_off ? 0 : genpd_thaw_early(genpd, dev);
1235}
 
1236
1237/**
1238 * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
1239 * @dev: Device to thaw.
1240 *
1241 * Thaw a device under the assumption that its pm_domain field points to the
1242 * domain member of an object of type struct generic_pm_domain representing
1243 * a power domain consisting of I/O devices.
1244 */
1245static int pm_genpd_thaw(struct device *dev)
1246{
1247	struct generic_pm_domain *genpd;
 
 
1248
1249	dev_dbg(dev, "%s()\n", __func__);
1250
1251	genpd = dev_to_genpd(dev);
1252	if (IS_ERR(genpd))
1253		return -EINVAL;
 
 
 
 
 
1254
1255	return genpd->suspend_power_off ? 0 : genpd_thaw_dev(genpd, dev);
1256}
1257
1258/**
1259 * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1260 * @dev: Device to resume.
1261 *
1262 * Make sure the domain will be in the same power state as before the
1263 * hibernation the system is resuming from and start the device if necessary.
1264 */
1265static int pm_genpd_restore_noirq(struct device *dev)
1266{
1267	struct generic_pm_domain *genpd;
 
 
1268
1269	dev_dbg(dev, "%s()\n", __func__);
 
 
1270
1271	genpd = dev_to_genpd(dev);
1272	if (IS_ERR(genpd))
1273		return -EINVAL;
 
 
1274
1275	/*
1276	 * Since all of the "noirq" callbacks are executed sequentially, it is
1277	 * guaranteed that this function will never run twice in parallel for
1278	 * the same PM domain, so it is not necessary to use locking here.
1279	 *
1280	 * At this point suspended_count == 0 means we are being run for the
1281	 * first time for the given domain in the present cycle.
1282	 */
1283	if (genpd->suspended_count++ == 0) {
1284		/*
1285		 * The boot kernel might put the domain into arbitrary state,
1286		 * so make it appear as powered off to pm_genpd_sync_poweron(),
1287		 * so that it tries to power it on in case it was really off.
1288		 */
1289		genpd->status = GPD_STATE_POWER_OFF;
1290		if (genpd->suspend_power_off) {
1291			/*
1292			 * If the domain was off before the hibernation, make
1293			 * sure it will be off going forward.
1294			 */
1295			if (genpd->power_off)
1296				genpd->power_off(genpd);
1297
1298			return 0;
 
 
 
 
 
1299		}
 
 
 
 
 
1300	}
1301
1302	if (genpd->suspend_power_off)
1303		return 0;
1304
1305	pm_genpd_sync_poweron(genpd);
 
 
 
1306
1307	return genpd_start_dev(genpd, dev);
1308}
1309
1310/**
1311 * pm_genpd_complete - Complete power transition of a device in a power domain.
1312 * @dev: Device to complete the transition of.
1313 *
1314 * Complete a power transition of a device (during a system-wide power
1315 * transition) under the assumption that its pm_domain field points to the
1316 * domain member of an object of type struct generic_pm_domain representing
1317 * a power domain consisting of I/O devices.
1318 */
1319static void pm_genpd_complete(struct device *dev)
1320{
1321	struct generic_pm_domain *genpd;
1322	bool run_complete;
1323
1324	dev_dbg(dev, "%s()\n", __func__);
1325
1326	genpd = dev_to_genpd(dev);
1327	if (IS_ERR(genpd))
1328		return;
 
 
 
 
1329
1330	mutex_lock(&genpd->lock);
 
 
 
1331
1332	run_complete = !genpd->suspend_power_off;
1333	if (--genpd->prepared_count == 0)
1334		genpd->suspend_power_off = false;
1335
1336	mutex_unlock(&genpd->lock);
1337
1338	if (run_complete) {
1339		pm_generic_complete(dev);
1340		pm_runtime_set_active(dev);
1341		pm_runtime_enable(dev);
1342		pm_request_idle(dev);
1343	}
1344}
1345
1346/**
1347 * pm_genpd_syscore_switch - Switch power during system core suspend or resume.
1348 * @dev: Device that normally is marked as "always on" to switch power for.
1349 *
1350 * This routine may only be called during the system core (syscore) suspend or
1351 * resume phase for devices whose "always on" flags are set.
1352 */
1353void pm_genpd_syscore_switch(struct device *dev, bool suspend)
1354{
1355	struct generic_pm_domain *genpd;
1356
1357	genpd = dev_to_genpd(dev);
1358	if (!pm_genpd_present(genpd))
1359		return;
1360
1361	if (suspend) {
1362		genpd->suspended_count++;
1363		pm_genpd_sync_poweroff(genpd);
1364	} else {
1365		pm_genpd_sync_poweron(genpd);
1366		genpd->suspended_count--;
1367	}
 
 
 
 
 
1368}
1369EXPORT_SYMBOL_GPL(pm_genpd_syscore_switch);
1370
1371#else
 
 
 
 
1372
1373#define pm_genpd_prepare		NULL
1374#define pm_genpd_suspend		NULL
1375#define pm_genpd_suspend_late		NULL
1376#define pm_genpd_suspend_noirq		NULL
1377#define pm_genpd_resume_early		NULL
1378#define pm_genpd_resume_noirq		NULL
1379#define pm_genpd_resume			NULL
1380#define pm_genpd_freeze			NULL
1381#define pm_genpd_freeze_late		NULL
1382#define pm_genpd_freeze_noirq		NULL
1383#define pm_genpd_thaw_early		NULL
1384#define pm_genpd_thaw_noirq		NULL
1385#define pm_genpd_thaw			NULL
1386#define pm_genpd_restore_noirq		NULL
1387#define pm_genpd_complete		NULL
1388
1389#endif /* CONFIG_PM_SLEEP */
1390
1391static struct generic_pm_domain_data *__pm_genpd_alloc_dev_data(struct device *dev)
1392{
1393	struct generic_pm_domain_data *gpd_data;
1394
1395	gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1396	if (!gpd_data)
1397		return NULL;
1398
1399	mutex_init(&gpd_data->lock);
1400	gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1401	dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1402	return gpd_data;
1403}
1404
1405static void __pm_genpd_free_dev_data(struct device *dev,
1406				     struct generic_pm_domain_data *gpd_data)
1407{
1408	dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
1409	kfree(gpd_data);
1410}
1411
1412/**
1413 * __pm_genpd_add_device - Add a device to an I/O PM domain.
1414 * @genpd: PM domain to add the device to.
1415 * @dev: Device to be added.
1416 * @td: Set of PM QoS timing parameters to attach to the device.
1417 */
1418int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1419			  struct gpd_timing_data *td)
1420{
1421	struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL;
1422	struct pm_domain_data *pdd;
1423	int ret = 0;
1424
1425	dev_dbg(dev, "%s()\n", __func__);
1426
1427	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1428		return -EINVAL;
1429
1430	gpd_data_new = __pm_genpd_alloc_dev_data(dev);
1431	if (!gpd_data_new)
1432		return -ENOMEM;
1433
1434	genpd_acquire_lock(genpd);
1435
1436	if (genpd->prepared_count > 0) {
1437		ret = -EAGAIN;
1438		goto out;
1439	}
1440
1441	list_for_each_entry(pdd, &genpd->dev_list, list_node)
1442		if (pdd->dev == dev) {
1443			ret = -EINVAL;
1444			goto out;
1445		}
1446
1447	ret = dev_pm_get_subsys_data(dev);
1448	if (ret)
1449		goto out;
1450
1451	genpd->device_count++;
1452	genpd->max_off_time_changed = true;
1453
1454	spin_lock_irq(&dev->power.lock);
1455
1456	dev->pm_domain = &genpd->domain;
1457	if (dev->power.subsys_data->domain_data) {
1458		gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1459	} else {
1460		gpd_data = gpd_data_new;
1461		dev->power.subsys_data->domain_data = &gpd_data->base;
1462	}
1463	gpd_data->refcount++;
1464	if (td)
1465		gpd_data->td = *td;
1466
1467	spin_unlock_irq(&dev->power.lock);
1468
1469	mutex_lock(&gpd_data->lock);
1470	gpd_data->base.dev = dev;
1471	list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1472	gpd_data->need_restore = genpd->status == GPD_STATE_POWER_OFF;
1473	gpd_data->td.constraint_changed = true;
1474	gpd_data->td.effective_constraint_ns = -1;
1475	mutex_unlock(&gpd_data->lock);
1476
 
1477 out:
1478	genpd_release_lock(genpd);
1479
1480	if (gpd_data != gpd_data_new)
1481		__pm_genpd_free_dev_data(dev, gpd_data_new);
 
1482
1483	return ret;
1484}
1485
1486/**
1487 * __pm_genpd_of_add_device - Add a device to an I/O PM domain.
1488 * @genpd_node: Device tree node pointer representing a PM domain to which the
1489 *   the device is added to.
1490 * @dev: Device to be added.
1491 * @td: Set of PM QoS timing parameters to attach to the device.
1492 */
1493int __pm_genpd_of_add_device(struct device_node *genpd_node, struct device *dev,
1494			     struct gpd_timing_data *td)
1495{
1496	struct generic_pm_domain *genpd = NULL, *gpd;
1497
1498	dev_dbg(dev, "%s()\n", __func__);
1499
1500	if (IS_ERR_OR_NULL(genpd_node) || IS_ERR_OR_NULL(dev))
1501		return -EINVAL;
1502
1503	mutex_lock(&gpd_list_lock);
1504	list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
1505		if (gpd->of_node == genpd_node) {
1506			genpd = gpd;
1507			break;
1508		}
1509	}
1510	mutex_unlock(&gpd_list_lock);
1511
1512	if (!genpd)
1513		return -EINVAL;
1514
1515	return __pm_genpd_add_device(genpd, dev, td);
1516}
1517
1518
1519/**
1520 * __pm_genpd_name_add_device - Find I/O PM domain and add a device to it.
1521 * @domain_name: Name of the PM domain to add the device to.
1522 * @dev: Device to be added.
1523 * @td: Set of PM QoS timing parameters to attach to the device.
1524 */
1525int __pm_genpd_name_add_device(const char *domain_name, struct device *dev,
1526			       struct gpd_timing_data *td)
1527{
1528	return __pm_genpd_add_device(pm_genpd_lookup_name(domain_name), dev, td);
1529}
 
1530
1531/**
1532 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1533 * @genpd: PM domain to remove the device from.
1534 * @dev: Device to be removed.
1535 */
1536int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1537			   struct device *dev)
1538{
1539	struct generic_pm_domain_data *gpd_data;
1540	struct pm_domain_data *pdd;
1541	bool remove = false;
1542	int ret = 0;
1543
1544	dev_dbg(dev, "%s()\n", __func__);
1545
1546	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)
1547	    ||  IS_ERR_OR_NULL(dev->pm_domain)
1548	    ||  pd_to_genpd(dev->pm_domain) != genpd)
1549		return -EINVAL;
1550
1551	genpd_acquire_lock(genpd);
1552
1553	if (genpd->prepared_count > 0) {
1554		ret = -EAGAIN;
1555		goto out;
1556	}
1557
1558	genpd->device_count--;
1559	genpd->max_off_time_changed = true;
 
1560
1561	spin_lock_irq(&dev->power.lock);
 
1562
1563	dev->pm_domain = NULL;
1564	pdd = dev->power.subsys_data->domain_data;
1565	list_del_init(&pdd->list_node);
1566	gpd_data = to_gpd_data(pdd);
1567	if (--gpd_data->refcount == 0) {
1568		dev->power.subsys_data->domain_data = NULL;
1569		remove = true;
1570	}
1571
1572	spin_unlock_irq(&dev->power.lock);
1573
1574	mutex_lock(&gpd_data->lock);
1575	pdd->dev = NULL;
1576	mutex_unlock(&gpd_data->lock);
1577
1578	genpd_release_lock(genpd);
 
1579
1580	dev_pm_put_subsys_data(dev);
1581	if (remove)
1582		__pm_genpd_free_dev_data(dev, gpd_data);
1583
1584	return 0;
1585
1586 out:
1587	genpd_release_lock(genpd);
 
1588
1589	return ret;
1590}
1591
1592/**
1593 * pm_genpd_dev_need_restore - Set/unset the device's "need restore" flag.
1594 * @dev: Device to set/unset the flag for.
1595 * @val: The new value of the device's "need restore" flag.
1596 */
1597void pm_genpd_dev_need_restore(struct device *dev, bool val)
1598{
1599	struct pm_subsys_data *psd;
1600	unsigned long flags;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1601
1602	spin_lock_irqsave(&dev->power.lock, flags);
 
 
1603
1604	psd = dev_to_psd(dev);
1605	if (psd && psd->domain_data)
1606		to_gpd_data(psd->domain_data)->need_restore = val;
 
 
1607
1608	spin_unlock_irqrestore(&dev->power.lock, flags);
 
1609}
1610EXPORT_SYMBOL_GPL(pm_genpd_dev_need_restore);
1611
1612/**
1613 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1614 * @genpd: Master PM domain to add the subdomain to.
1615 * @subdomain: Subdomain to be added.
 
 
 
 
 
 
 
 
1616 */
1617int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1618			   struct generic_pm_domain *subdomain)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1619{
1620	struct gpd_link *link;
1621	int ret = 0;
1622
1623	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1624	    || genpd == subdomain)
1625		return -EINVAL;
1626
1627 start:
1628	genpd_acquire_lock(genpd);
1629	mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1630
1631	if (subdomain->status != GPD_STATE_POWER_OFF
1632	    && subdomain->status != GPD_STATE_ACTIVE) {
1633		mutex_unlock(&subdomain->lock);
1634		genpd_release_lock(genpd);
1635		goto start;
1636	}
1637
1638	if (genpd->status == GPD_STATE_POWER_OFF
1639	    &&  subdomain->status != GPD_STATE_POWER_OFF) {
 
 
 
 
 
 
1640		ret = -EINVAL;
1641		goto out;
1642	}
1643
1644	list_for_each_entry(link, &genpd->master_links, master_node) {
1645		if (link->slave == subdomain && link->master == genpd) {
1646			ret = -EINVAL;
1647			goto out;
1648		}
1649	}
1650
1651	link = kzalloc(sizeof(*link), GFP_KERNEL);
1652	if (!link) {
1653		ret = -ENOMEM;
1654		goto out;
1655	}
1656	link->master = genpd;
1657	list_add_tail(&link->master_node, &genpd->master_links);
1658	link->slave = subdomain;
1659	list_add_tail(&link->slave_node, &subdomain->slave_links);
1660	if (subdomain->status != GPD_STATE_POWER_OFF)
1661		genpd_sd_counter_inc(genpd);
1662
1663 out:
1664	mutex_unlock(&subdomain->lock);
1665	genpd_release_lock(genpd);
1666
 
1667	return ret;
1668}
1669
1670/**
1671 * pm_genpd_add_subdomain_names - Add a subdomain to an I/O PM domain.
1672 * @master_name: Name of the master PM domain to add the subdomain to.
1673 * @subdomain_name: Name of the subdomain to be added.
1674 */
1675int pm_genpd_add_subdomain_names(const char *master_name,
1676				 const char *subdomain_name)
1677{
1678	struct generic_pm_domain *master = NULL, *subdomain = NULL, *gpd;
1679
1680	if (IS_ERR_OR_NULL(master_name) || IS_ERR_OR_NULL(subdomain_name))
1681		return -EINVAL;
1682
1683	mutex_lock(&gpd_list_lock);
1684	list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
1685		if (!master && !strcmp(gpd->name, master_name))
1686			master = gpd;
1687
1688		if (!subdomain && !strcmp(gpd->name, subdomain_name))
1689			subdomain = gpd;
1690
1691		if (master && subdomain)
1692			break;
1693	}
1694	mutex_unlock(&gpd_list_lock);
1695
1696	return pm_genpd_add_subdomain(master, subdomain);
1697}
 
1698
1699/**
1700 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1701 * @genpd: Master PM domain to remove the subdomain from.
1702 * @subdomain: Subdomain to be removed.
1703 */
1704int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1705			      struct generic_pm_domain *subdomain)
1706{
1707	struct gpd_link *link;
1708	int ret = -EINVAL;
1709
1710	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1711		return -EINVAL;
1712
1713 start:
1714	genpd_acquire_lock(genpd);
 
 
 
 
 
 
 
1715
1716	list_for_each_entry(link, &genpd->master_links, master_node) {
1717		if (link->slave != subdomain)
1718			continue;
1719
1720		mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1721
1722		if (subdomain->status != GPD_STATE_POWER_OFF
1723		    && subdomain->status != GPD_STATE_ACTIVE) {
1724			mutex_unlock(&subdomain->lock);
1725			genpd_release_lock(genpd);
1726			goto start;
1727		}
1728
1729		list_del(&link->master_node);
1730		list_del(&link->slave_node);
1731		kfree(link);
1732		if (subdomain->status != GPD_STATE_POWER_OFF)
1733			genpd_sd_counter_dec(genpd);
1734
1735		mutex_unlock(&subdomain->lock);
1736
1737		ret = 0;
1738		break;
1739	}
1740
1741	genpd_release_lock(genpd);
 
 
1742
1743	return ret;
1744}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1745
1746/**
1747 * pm_genpd_add_callbacks - Add PM domain callbacks to a given device.
1748 * @dev: Device to add the callbacks to.
1749 * @ops: Set of callbacks to add.
1750 * @td: Timing data to add to the device along with the callbacks (optional).
1751 *
1752 * Every call to this routine should be balanced with a call to
1753 * __pm_genpd_remove_callbacks() and they must not be nested.
1754 */
1755int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops,
1756			   struct gpd_timing_data *td)
1757{
1758	struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL;
1759	int ret = 0;
1760
1761	if (!(dev && ops))
1762		return -EINVAL;
1763
1764	gpd_data_new = __pm_genpd_alloc_dev_data(dev);
1765	if (!gpd_data_new)
1766		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1767
1768	pm_runtime_disable(dev);
1769	device_pm_lock();
 
1770
1771	ret = dev_pm_get_subsys_data(dev);
1772	if (ret)
1773		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1774
1775	spin_lock_irq(&dev->power.lock);
 
 
 
 
1776
1777	if (dev->power.subsys_data->domain_data) {
1778		gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1779	} else {
1780		gpd_data = gpd_data_new;
1781		dev->power.subsys_data->domain_data = &gpd_data->base;
1782	}
1783	gpd_data->refcount++;
1784	gpd_data->ops = *ops;
1785	if (td)
1786		gpd_data->td = *td;
1787
1788	spin_unlock_irq(&dev->power.lock);
 
 
 
 
 
 
 
 
 
1789
1790 out:
1791	device_pm_unlock();
1792	pm_runtime_enable(dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
1793
1794	if (gpd_data != gpd_data_new)
1795		__pm_genpd_free_dev_data(dev, gpd_data_new);
 
1796
1797	return ret;
1798}
1799EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1800
1801/**
1802 * __pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device.
1803 * @dev: Device to remove the callbacks from.
1804 * @clear_td: If set, clear the device's timing data too.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1805 *
1806 * This routine can only be called after pm_genpd_add_callbacks().
 
 
1807 */
1808int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
 
 
1809{
1810	struct generic_pm_domain_data *gpd_data = NULL;
1811	bool remove = false;
1812	int ret = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1813
1814	if (!(dev && dev->power.subsys_data))
1815		return -EINVAL;
 
 
 
 
 
 
 
1816
1817	pm_runtime_disable(dev);
1818	device_pm_lock();
1819
1820	spin_lock_irq(&dev->power.lock);
 
 
 
1821
1822	if (dev->power.subsys_data->domain_data) {
1823		gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1824		gpd_data->ops = (struct gpd_dev_ops){ NULL };
1825		if (clear_td)
1826			gpd_data->td = (struct gpd_timing_data){ 0 };
1827
1828		if (--gpd_data->refcount == 0) {
1829			dev->power.subsys_data->domain_data = NULL;
1830			remove = true;
1831		}
1832	} else {
1833		ret = -EINVAL;
1834	}
 
 
 
 
1835
1836	spin_unlock_irq(&dev->power.lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1837
1838	device_pm_unlock();
1839	pm_runtime_enable(dev);
 
 
 
 
1840
1841	if (ret)
1842		return ret;
 
1843
1844	dev_pm_put_subsys_data(dev);
1845	if (remove)
1846		__pm_genpd_free_dev_data(dev, gpd_data);
1847
1848	return 0;
1849}
1850EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks);
1851
1852/**
1853 * pm_genpd_attach_cpuidle - Connect the given PM domain with cpuidle.
1854 * @genpd: PM domain to be connected with cpuidle.
1855 * @state: cpuidle state this domain can disable/enable.
1856 *
1857 * Make a PM domain behave as though it contained a CPU core, that is, instead
1858 * of calling its power down routine it will enable the given cpuidle state so
1859 * that the cpuidle subsystem can power it down (if possible and desirable).
1860 */
1861int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
 
1862{
1863	struct cpuidle_driver *cpuidle_drv;
1864	struct gpd_cpu_data *cpu_data;
1865	struct cpuidle_state *idle_state;
1866	int ret = 0;
1867
1868	if (IS_ERR_OR_NULL(genpd) || state < 0)
1869		return -EINVAL;
1870
1871	genpd_acquire_lock(genpd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1872
1873	if (genpd->cpu_data) {
1874		ret = -EEXIST;
1875		goto out;
 
 
 
 
 
 
 
1876	}
1877	cpu_data = kzalloc(sizeof(*cpu_data), GFP_KERNEL);
1878	if (!cpu_data) {
1879		ret = -ENOMEM;
1880		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1881	}
1882	cpuidle_drv = cpuidle_driver_ref();
1883	if (!cpuidle_drv) {
1884		ret = -ENODEV;
1885		goto err_drv;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1886	}
1887	if (cpuidle_drv->state_count <= state) {
1888		ret = -EINVAL;
1889		goto err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1890	}
1891	idle_state = &cpuidle_drv->states[state];
1892	if (!idle_state->disabled) {
1893		ret = -EAGAIN;
1894		goto err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1895	}
1896	cpu_data->idle_state = idle_state;
1897	cpu_data->saved_exit_latency = idle_state->exit_latency;
1898	genpd->cpu_data = cpu_data;
1899	genpd_recalc_cpu_exit_latency(genpd);
1900
1901 out:
1902	genpd_release_lock(genpd);
1903	return ret;
1904
1905 err:
1906	cpuidle_driver_unref();
1907
1908 err_drv:
1909	kfree(cpu_data);
1910	goto out;
1911}
 
1912
1913/**
1914 * pm_genpd_name_attach_cpuidle - Find PM domain and connect cpuidle to it.
1915 * @name: Name of the domain to connect to cpuidle.
1916 * @state: cpuidle state this domain can manipulate.
 
 
 
 
1917 */
1918int pm_genpd_name_attach_cpuidle(const char *name, int state)
 
1919{
1920	return pm_genpd_attach_cpuidle(pm_genpd_lookup_name(name), state);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1921}
 
1922
1923/**
1924 * pm_genpd_detach_cpuidle - Remove the cpuidle connection from a PM domain.
1925 * @genpd: PM domain to remove the cpuidle connection from.
 
1926 *
1927 * Remove the cpuidle connection set up by pm_genpd_attach_cpuidle() from the
1928 * given PM domain.
 
1929 */
1930int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
 
1931{
1932	struct gpd_cpu_data *cpu_data;
1933	struct cpuidle_state *idle_state;
1934	int ret = 0;
1935
1936	if (IS_ERR_OR_NULL(genpd))
1937		return -EINVAL;
1938
1939	genpd_acquire_lock(genpd);
1940
1941	cpu_data = genpd->cpu_data;
1942	if (!cpu_data) {
1943		ret = -ENODEV;
1944		goto out;
1945	}
1946	idle_state = cpu_data->idle_state;
1947	if (!idle_state->disabled) {
1948		ret = -EAGAIN;
 
1949		goto out;
1950	}
1951	idle_state->exit_latency = cpu_data->saved_exit_latency;
1952	cpuidle_driver_unref();
1953	genpd->cpu_data = NULL;
1954	kfree(cpu_data);
1955
1956 out:
1957	genpd_release_lock(genpd);
 
 
 
1958	return ret;
1959}
 
1960
1961/**
1962 * pm_genpd_name_detach_cpuidle - Find PM domain and disconnect cpuidle from it.
1963 * @name: Name of the domain to disconnect cpuidle from.
 
 
 
 
 
 
 
 
 
1964 */
1965int pm_genpd_name_detach_cpuidle(const char *name)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1966{
1967	return pm_genpd_detach_cpuidle(pm_genpd_lookup_name(name));
 
1968}
1969
1970/* Default device callbacks for generic PM domains. */
 
 
1971
1972/**
1973 * pm_genpd_default_save_state - Default "save device state" for PM domians.
1974 * @dev: Device to handle.
 
 
 
 
1975 */
1976static int pm_genpd_default_save_state(struct device *dev)
1977{
1978	int (*cb)(struct device *__dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1979
1980	cb = dev_gpd_data(dev)->ops.save_state;
1981	if (cb)
1982		return cb(dev);
 
1983
1984	if (dev->type && dev->type->pm)
1985		cb = dev->type->pm->runtime_suspend;
1986	else if (dev->class && dev->class->pm)
1987		cb = dev->class->pm->runtime_suspend;
1988	else if (dev->bus && dev->bus->pm)
1989		cb = dev->bus->pm->runtime_suspend;
1990	else
1991		cb = NULL;
1992
1993	if (!cb && dev->driver && dev->driver->pm)
1994		cb = dev->driver->pm->runtime_suspend;
 
1995
1996	return cb ? cb(dev) : 0;
1997}
1998
1999/**
2000 * pm_genpd_default_restore_state - Default PM domians "restore device state".
2001 * @dev: Device to handle.
2002 */
2003static int pm_genpd_default_restore_state(struct device *dev)
2004{
2005	int (*cb)(struct device *__dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2006
2007	cb = dev_gpd_data(dev)->ops.restore_state;
2008	if (cb)
2009		return cb(dev);
 
 
 
2010
2011	if (dev->type && dev->type->pm)
2012		cb = dev->type->pm->runtime_resume;
2013	else if (dev->class && dev->class->pm)
2014		cb = dev->class->pm->runtime_resume;
2015	else if (dev->bus && dev->bus->pm)
2016		cb = dev->bus->pm->runtime_resume;
2017	else
2018		cb = NULL;
2019
2020	if (!cb && dev->driver && dev->driver->pm)
2021		cb = dev->driver->pm->runtime_resume;
2022
2023	return cb ? cb(dev) : 0;
 
 
 
 
2024}
2025
2026#ifdef CONFIG_PM_SLEEP
2027
2028/**
2029 * pm_genpd_default_suspend - Default "device suspend" for PM domians.
2030 * @dev: Device to handle.
 
 
 
 
 
 
 
 
 
2031 */
2032static int pm_genpd_default_suspend(struct device *dev)
2033{
2034	int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend;
 
 
 
 
 
 
 
 
 
2035
2036	return cb ? cb(dev) : pm_generic_suspend(dev);
2037}
 
2038
2039/**
2040 * pm_genpd_default_suspend_late - Default "late device suspend" for PM domians.
2041 * @dev: Device to handle.
 
 
 
 
 
 
 
 
 
 
 
 
2042 */
2043static int pm_genpd_default_suspend_late(struct device *dev)
 
2044{
2045	int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend_late;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2046
2047	return cb ? cb(dev) : pm_generic_suspend_late(dev);
2048}
 
2049
2050/**
2051 * pm_genpd_default_resume_early - Default "early device resume" for PM domians.
2052 * @dev: Device to handle.
 
 
 
 
 
2053 */
2054static int pm_genpd_default_resume_early(struct device *dev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2055{
2056	int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume_early;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2057
2058	return cb ? cb(dev) : pm_generic_resume_early(dev);
2059}
2060
2061/**
2062 * pm_genpd_default_resume - Default "device resume" for PM domians.
2063 * @dev: Device to handle.
 
 
 
 
 
 
 
 
2064 */
2065static int pm_genpd_default_resume(struct device *dev)
 
2066{
2067	int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume;
 
2068
2069	return cb ? cb(dev) : pm_generic_resume(dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2070}
 
2071
2072/**
2073 * pm_genpd_default_freeze - Default "device freeze" for PM domians.
2074 * @dev: Device to handle.
 
 
 
 
 
 
 
 
 
2075 */
2076static int pm_genpd_default_freeze(struct device *dev)
 
2077{
2078	int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze;
 
 
 
 
 
 
2079
2080	return cb ? cb(dev) : pm_generic_freeze(dev);
 
 
 
 
 
 
 
 
 
 
2081}
 
 
 
 
2082
2083/**
2084 * pm_genpd_default_freeze_late - Default "late device freeze" for PM domians.
2085 * @dev: Device to handle.
 
 
 
2086 */
2087static int pm_genpd_default_freeze_late(struct device *dev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2088{
2089	int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2090
2091	return cb ? cb(dev) : pm_generic_freeze_late(dev);
2092}
2093
2094/**
2095 * pm_genpd_default_thaw_early - Default "early device thaw" for PM domians.
2096 * @dev: Device to handle.
2097 */
2098static int pm_genpd_default_thaw_early(struct device *dev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2099{
2100	int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early;
 
 
 
 
 
 
 
2101
2102	return cb ? cb(dev) : pm_generic_thaw_early(dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2103}
2104
2105/**
2106 * pm_genpd_default_thaw - Default "device thaw" for PM domians.
2107 * @dev: Device to handle.
2108 */
2109static int pm_genpd_default_thaw(struct device *dev)
2110{
2111	int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2112
2113	return cb ? cb(dev) : pm_generic_thaw(dev);
 
2114}
2115
2116#else /* !CONFIG_PM_SLEEP */
 
 
 
 
 
 
 
2117
2118#define pm_genpd_default_suspend	NULL
2119#define pm_genpd_default_suspend_late	NULL
2120#define pm_genpd_default_resume_early	NULL
2121#define pm_genpd_default_resume		NULL
2122#define pm_genpd_default_freeze		NULL
2123#define pm_genpd_default_freeze_late	NULL
2124#define pm_genpd_default_thaw_early	NULL
2125#define pm_genpd_default_thaw		NULL
2126
2127#endif /* !CONFIG_PM_SLEEP */
 
 
 
 
 
 
 
2128
2129/**
2130 * pm_genpd_init - Initialize a generic I/O PM domain object.
2131 * @genpd: PM domain object to initialize.
2132 * @gov: PM domain governor to associate with the domain (may be NULL).
2133 * @is_off: Initial value of the domain's power_is_off field.
2134 */
2135void pm_genpd_init(struct generic_pm_domain *genpd,
2136		   struct dev_power_governor *gov, bool is_off)
2137{
2138	if (IS_ERR_OR_NULL(genpd))
 
 
2139		return;
2140
2141	INIT_LIST_HEAD(&genpd->master_links);
2142	INIT_LIST_HEAD(&genpd->slave_links);
2143	INIT_LIST_HEAD(&genpd->dev_list);
2144	mutex_init(&genpd->lock);
2145	genpd->gov = gov;
2146	INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
2147	genpd->in_progress = 0;
2148	atomic_set(&genpd->sd_count, 0);
2149	genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
2150	init_waitqueue_head(&genpd->status_wait_queue);
2151	genpd->poweroff_task = NULL;
2152	genpd->resume_count = 0;
2153	genpd->device_count = 0;
2154	genpd->max_off_time_ns = -1;
2155	genpd->max_off_time_changed = true;
2156	genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
2157	genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
2158	genpd->domain.ops.prepare = pm_genpd_prepare;
2159	genpd->domain.ops.suspend = pm_genpd_suspend;
2160	genpd->domain.ops.suspend_late = pm_genpd_suspend_late;
2161	genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
2162	genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
2163	genpd->domain.ops.resume_early = pm_genpd_resume_early;
2164	genpd->domain.ops.resume = pm_genpd_resume;
2165	genpd->domain.ops.freeze = pm_genpd_freeze;
2166	genpd->domain.ops.freeze_late = pm_genpd_freeze_late;
2167	genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
2168	genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
2169	genpd->domain.ops.thaw_early = pm_genpd_thaw_early;
2170	genpd->domain.ops.thaw = pm_genpd_thaw;
2171	genpd->domain.ops.poweroff = pm_genpd_suspend;
2172	genpd->domain.ops.poweroff_late = pm_genpd_suspend_late;
2173	genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
2174	genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
2175	genpd->domain.ops.restore_early = pm_genpd_resume_early;
2176	genpd->domain.ops.restore = pm_genpd_resume;
2177	genpd->domain.ops.complete = pm_genpd_complete;
2178	genpd->dev_ops.save_state = pm_genpd_default_save_state;
2179	genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
2180	genpd->dev_ops.suspend = pm_genpd_default_suspend;
2181	genpd->dev_ops.suspend_late = pm_genpd_default_suspend_late;
2182	genpd->dev_ops.resume_early = pm_genpd_default_resume_early;
2183	genpd->dev_ops.resume = pm_genpd_default_resume;
2184	genpd->dev_ops.freeze = pm_genpd_default_freeze;
2185	genpd->dev_ops.freeze_late = pm_genpd_default_freeze_late;
2186	genpd->dev_ops.thaw_early = pm_genpd_default_thaw_early;
2187	genpd->dev_ops.thaw = pm_genpd_default_thaw;
2188	mutex_lock(&gpd_list_lock);
2189	list_add(&genpd->gpd_list_node, &gpd_list);
2190	mutex_unlock(&gpd_list_lock);
2191}