Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * drivers/base/power/domain.c - Common code related to device power domains.
   4 *
   5 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
 
 
   6 */
   7#define pr_fmt(fmt) "PM: " fmt
   8
   9#include <linux/delay.h>
  10#include <linux/kernel.h>
  11#include <linux/io.h>
  12#include <linux/platform_device.h>
  13#include <linux/pm_opp.h>
  14#include <linux/pm_runtime.h>
  15#include <linux/pm_domain.h>
  16#include <linux/pm_qos.h>
  17#include <linux/pm_clock.h>
  18#include <linux/slab.h>
  19#include <linux/err.h>
  20#include <linux/sched.h>
  21#include <linux/suspend.h>
  22#include <linux/export.h>
  23#include <linux/cpu.h>
  24#include <linux/debugfs.h>
  25
  26#include "power.h"
  27
  28#define GENPD_RETRY_MAX_MS	250		/* Approximate */
  29
  30#define GENPD_DEV_CALLBACK(genpd, type, callback, dev)		\
  31({								\
  32	type (*__routine)(struct device *__d); 			\
  33	type __ret = (type)0;					\
  34								\
  35	__routine = genpd->dev_ops.callback; 			\
  36	if (__routine) {					\
  37		__ret = __routine(dev); 			\
  38	}							\
  39	__ret;							\
  40})
  41
  42static LIST_HEAD(gpd_list);
  43static DEFINE_MUTEX(gpd_list_lock);
  44
  45struct genpd_lock_ops {
  46	void (*lock)(struct generic_pm_domain *genpd);
  47	void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
  48	int (*lock_interruptible)(struct generic_pm_domain *genpd);
  49	void (*unlock)(struct generic_pm_domain *genpd);
  50};
  51
  52static void genpd_lock_mtx(struct generic_pm_domain *genpd)
  53{
  54	mutex_lock(&genpd->mlock);
  55}
  56
  57static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
  58					int depth)
  59{
  60	mutex_lock_nested(&genpd->mlock, depth);
  61}
  62
  63static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
  64{
  65	return mutex_lock_interruptible(&genpd->mlock);
  66}
  67
  68static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
  69{
  70	return mutex_unlock(&genpd->mlock);
  71}
  72
  73static const struct genpd_lock_ops genpd_mtx_ops = {
  74	.lock = genpd_lock_mtx,
  75	.lock_nested = genpd_lock_nested_mtx,
  76	.lock_interruptible = genpd_lock_interruptible_mtx,
  77	.unlock = genpd_unlock_mtx,
  78};
  79
  80static void genpd_lock_spin(struct generic_pm_domain *genpd)
  81	__acquires(&genpd->slock)
  82{
  83	unsigned long flags;
  84
  85	spin_lock_irqsave(&genpd->slock, flags);
  86	genpd->lock_flags = flags;
  87}
  88
  89static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
  90					int depth)
  91	__acquires(&genpd->slock)
  92{
  93	unsigned long flags;
  94
  95	spin_lock_irqsave_nested(&genpd->slock, flags, depth);
  96	genpd->lock_flags = flags;
  97}
  98
  99static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
 100	__acquires(&genpd->slock)
 101{
 102	unsigned long flags;
 103
 104	spin_lock_irqsave(&genpd->slock, flags);
 105	genpd->lock_flags = flags;
 106	return 0;
 107}
 108
 109static void genpd_unlock_spin(struct generic_pm_domain *genpd)
 110	__releases(&genpd->slock)
 111{
 112	spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
 113}
 114
 115static const struct genpd_lock_ops genpd_spin_ops = {
 116	.lock = genpd_lock_spin,
 117	.lock_nested = genpd_lock_nested_spin,
 118	.lock_interruptible = genpd_lock_interruptible_spin,
 119	.unlock = genpd_unlock_spin,
 120};
 121
 122#define genpd_lock(p)			p->lock_ops->lock(p)
 123#define genpd_lock_nested(p, d)		p->lock_ops->lock_nested(p, d)
 124#define genpd_lock_interruptible(p)	p->lock_ops->lock_interruptible(p)
 125#define genpd_unlock(p)			p->lock_ops->unlock(p)
 126
 127#define genpd_status_on(genpd)		(genpd->status == GENPD_STATE_ON)
 128#define genpd_is_irq_safe(genpd)	(genpd->flags & GENPD_FLAG_IRQ_SAFE)
 129#define genpd_is_always_on(genpd)	(genpd->flags & GENPD_FLAG_ALWAYS_ON)
 130#define genpd_is_active_wakeup(genpd)	(genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
 131#define genpd_is_cpu_domain(genpd)	(genpd->flags & GENPD_FLAG_CPU_DOMAIN)
 132#define genpd_is_rpm_always_on(genpd)	(genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON)
 133
 134static inline bool irq_safe_dev_in_sleep_domain(struct device *dev,
 135		const struct generic_pm_domain *genpd)
 136{
 137	bool ret;
 138
 139	ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
 140
 141	/*
 142	 * Warn once if an IRQ safe device is attached to a domain, which
 143	 * callbacks are allowed to sleep. This indicates a suboptimal
 144	 * configuration for PM, but it doesn't matter for an always on domain.
 145	 */
 146	if (genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd))
 147		return ret;
 148
 149	if (ret)
 150		dev_warn_once(dev, "PM domain %s will not be powered off\n",
 151				genpd->name);
 152
 153	return ret;
 154}
 155
 156static int genpd_runtime_suspend(struct device *dev);
 157
 158/*
 159 * Get the generic PM domain for a particular struct device.
 160 * This validates the struct device pointer, the PM domain pointer,
 161 * and checks that the PM domain pointer is a real generic PM domain.
 162 * Any failure results in NULL being returned.
 163 */
 164static struct generic_pm_domain *dev_to_genpd_safe(struct device *dev)
 165{
 
 
 166	if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
 167		return NULL;
 168
 169	/* A genpd's always have its ->runtime_suspend() callback assigned. */
 170	if (dev->pm_domain->ops.runtime_suspend == genpd_runtime_suspend)
 171		return pd_to_genpd(dev->pm_domain);
 
 
 
 
 
 172
 173	return NULL;
 174}
 175
 176/*
 177 * This should only be used where we are certain that the pm_domain
 178 * attached to the device is a genpd domain.
 179 */
 180static struct generic_pm_domain *dev_to_genpd(struct device *dev)
 181{
 182	if (IS_ERR_OR_NULL(dev->pm_domain))
 183		return ERR_PTR(-EINVAL);
 184
 185	return pd_to_genpd(dev->pm_domain);
 186}
 187
 188static int genpd_stop_dev(const struct generic_pm_domain *genpd,
 189			  struct device *dev)
 190{
 191	return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
 192}
 193
 194static int genpd_start_dev(const struct generic_pm_domain *genpd,
 195			   struct device *dev)
 196{
 197	return GENPD_DEV_CALLBACK(genpd, int, start, dev);
 198}
 199
 200static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
 201{
 202	bool ret = false;
 203
 204	if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
 205		ret = !!atomic_dec_and_test(&genpd->sd_count);
 206
 207	return ret;
 208}
 209
 210static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
 211{
 212	atomic_inc(&genpd->sd_count);
 213	smp_mb__after_atomic();
 214}
 215
 216#ifdef CONFIG_DEBUG_FS
 217static struct dentry *genpd_debugfs_dir;
 218
 219static void genpd_debug_add(struct generic_pm_domain *genpd);
 220
 221static void genpd_debug_remove(struct generic_pm_domain *genpd)
 222{
 223	struct dentry *d;
 224
 225	if (!genpd_debugfs_dir)
 226		return;
 227
 228	d = debugfs_lookup(genpd->name, genpd_debugfs_dir);
 229	debugfs_remove(d);
 230}
 231
 232static void genpd_update_accounting(struct generic_pm_domain *genpd)
 233{
 234	u64 delta, now;
 235
 236	now = ktime_get_mono_fast_ns();
 237	if (now <= genpd->accounting_time)
 238		return;
 239
 240	delta = now - genpd->accounting_time;
 241
 242	/*
 243	 * If genpd->status is active, it means we are just
 244	 * out of off and so update the idle time and vice
 245	 * versa.
 246	 */
 247	if (genpd->status == GENPD_STATE_ON)
 248		genpd->states[genpd->state_idx].idle_time += delta;
 249	else
 250		genpd->on_time += delta;
 251
 252	genpd->accounting_time = now;
 253}
 254#else
 255static inline void genpd_debug_add(struct generic_pm_domain *genpd) {}
 256static inline void genpd_debug_remove(struct generic_pm_domain *genpd) {}
 257static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
 258#endif
 259
 260static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
 261					   unsigned int state)
 262{
 263	struct generic_pm_domain_data *pd_data;
 264	struct pm_domain_data *pdd;
 265	struct gpd_link *link;
 266
 267	/* New requested state is same as Max requested state */
 268	if (state == genpd->performance_state)
 269		return state;
 270
 271	/* New requested state is higher than Max requested state */
 272	if (state > genpd->performance_state)
 273		return state;
 274
 275	/* Traverse all devices within the domain */
 276	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
 277		pd_data = to_gpd_data(pdd);
 278
 279		if (pd_data->performance_state > state)
 280			state = pd_data->performance_state;
 281	}
 282
 283	/*
 284	 * Traverse all sub-domains within the domain. This can be
 285	 * done without any additional locking as the link->performance_state
 286	 * field is protected by the parent genpd->lock, which is already taken.
 287	 *
 288	 * Also note that link->performance_state (subdomain's performance state
 289	 * requirement to parent domain) is different from
 290	 * link->child->performance_state (current performance state requirement
 291	 * of the devices/sub-domains of the subdomain) and so can have a
 292	 * different value.
 293	 *
 294	 * Note that we also take vote from powered-off sub-domains into account
 295	 * as the same is done for devices right now.
 296	 */
 297	list_for_each_entry(link, &genpd->parent_links, parent_node) {
 298		if (link->performance_state > state)
 299			state = link->performance_state;
 300	}
 301
 302	return state;
 303}
 304
 305static int genpd_xlate_performance_state(struct generic_pm_domain *genpd,
 306					 struct generic_pm_domain *parent,
 307					 unsigned int pstate)
 308{
 309	if (!parent->set_performance_state)
 310		return pstate;
 311
 312	return dev_pm_opp_xlate_performance_state(genpd->opp_table,
 313						  parent->opp_table,
 314						  pstate);
 315}
 316
 317static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
 318					unsigned int state, int depth)
 319{
 320	struct generic_pm_domain *parent;
 321	struct gpd_link *link;
 322	int parent_state, ret;
 323
 324	if (state == genpd->performance_state)
 325		return 0;
 326
 327	/* Propagate to parents of genpd */
 328	list_for_each_entry(link, &genpd->child_links, child_node) {
 329		parent = link->parent;
 330
 331		/* Find parent's performance state */
 332		ret = genpd_xlate_performance_state(genpd, parent, state);
 333		if (unlikely(ret < 0))
 334			goto err;
 335
 336		parent_state = ret;
 337
 338		genpd_lock_nested(parent, depth + 1);
 339
 340		link->prev_performance_state = link->performance_state;
 341		link->performance_state = parent_state;
 342		parent_state = _genpd_reeval_performance_state(parent,
 343						parent_state);
 344		ret = _genpd_set_performance_state(parent, parent_state, depth + 1);
 345		if (ret)
 346			link->performance_state = link->prev_performance_state;
 347
 348		genpd_unlock(parent);
 349
 350		if (ret)
 351			goto err;
 352	}
 353
 354	if (genpd->set_performance_state) {
 355		ret = genpd->set_performance_state(genpd, state);
 356		if (ret)
 357			goto err;
 358	}
 359
 360	genpd->performance_state = state;
 361	return 0;
 362
 363err:
 364	/* Encountered an error, lets rollback */
 365	list_for_each_entry_continue_reverse(link, &genpd->child_links,
 366					     child_node) {
 367		parent = link->parent;
 368
 369		genpd_lock_nested(parent, depth + 1);
 370
 371		parent_state = link->prev_performance_state;
 372		link->performance_state = parent_state;
 373
 374		parent_state = _genpd_reeval_performance_state(parent,
 375						parent_state);
 376		if (_genpd_set_performance_state(parent, parent_state, depth + 1)) {
 377			pr_err("%s: Failed to roll back to %d performance state\n",
 378			       parent->name, parent_state);
 379		}
 380
 381		genpd_unlock(parent);
 382	}
 383
 384	return ret;
 385}
 386
 387static int genpd_set_performance_state(struct device *dev, unsigned int state)
 388{
 389	struct generic_pm_domain *genpd = dev_to_genpd(dev);
 390	struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
 391	unsigned int prev_state;
 392	int ret;
 393
 394	prev_state = gpd_data->performance_state;
 395	if (prev_state == state)
 396		return 0;
 397
 398	gpd_data->performance_state = state;
 399	state = _genpd_reeval_performance_state(genpd, state);
 400
 401	ret = _genpd_set_performance_state(genpd, state, 0);
 402	if (ret)
 403		gpd_data->performance_state = prev_state;
 404
 405	return ret;
 406}
 407
 408static int genpd_drop_performance_state(struct device *dev)
 409{
 410	unsigned int prev_state = dev_gpd_data(dev)->performance_state;
 411
 412	if (!genpd_set_performance_state(dev, 0))
 413		return prev_state;
 414
 415	return 0;
 416}
 417
 418static void genpd_restore_performance_state(struct device *dev,
 419					    unsigned int state)
 420{
 421	if (state)
 422		genpd_set_performance_state(dev, state);
 423}
 424
 425/**
 426 * dev_pm_genpd_set_performance_state- Set performance state of device's power
 427 * domain.
 428 *
 429 * @dev: Device for which the performance-state needs to be set.
 430 * @state: Target performance state of the device. This can be set as 0 when the
 431 *	   device doesn't have any performance state constraints left (And so
 432 *	   the device wouldn't participate anymore to find the target
 433 *	   performance state of the genpd).
 434 *
 435 * It is assumed that the users guarantee that the genpd wouldn't be detached
 436 * while this routine is getting called.
 437 *
 438 * Returns 0 on success and negative error values on failures.
 439 */
 440int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
 441{
 442	struct generic_pm_domain *genpd;
 443	int ret = 0;
 444
 445	genpd = dev_to_genpd_safe(dev);
 446	if (!genpd)
 447		return -ENODEV;
 448
 449	if (WARN_ON(!dev->power.subsys_data ||
 450		     !dev->power.subsys_data->domain_data))
 451		return -EINVAL;
 452
 453	genpd_lock(genpd);
 454	if (pm_runtime_suspended(dev)) {
 455		dev_gpd_data(dev)->rpm_pstate = state;
 456	} else {
 457		ret = genpd_set_performance_state(dev, state);
 458		if (!ret)
 459			dev_gpd_data(dev)->rpm_pstate = 0;
 460	}
 461	genpd_unlock(genpd);
 462
 463	return ret;
 464}
 465EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);
 466
 467/**
 468 * dev_pm_genpd_set_next_wakeup - Notify PM framework of an impending wakeup.
 469 *
 470 * @dev: Device to handle
 471 * @next: impending interrupt/wakeup for the device
 472 *
 473 *
 474 * Allow devices to inform of the next wakeup. It's assumed that the users
 475 * guarantee that the genpd wouldn't be detached while this routine is getting
 476 * called. Additionally, it's also assumed that @dev isn't runtime suspended
 477 * (RPM_SUSPENDED)."
 478 * Although devices are expected to update the next_wakeup after the end of
 479 * their usecase as well, it is possible the devices themselves may not know
 480 * about that, so stale @next will be ignored when powering off the domain.
 481 */
 482void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next)
 483{
 484	struct generic_pm_domain *genpd;
 485	struct gpd_timing_data *td;
 486
 487	genpd = dev_to_genpd_safe(dev);
 488	if (!genpd)
 489		return;
 490
 491	td = to_gpd_data(dev->power.subsys_data->domain_data)->td;
 492	if (td)
 493		td->next_wakeup = next;
 494}
 495EXPORT_SYMBOL_GPL(dev_pm_genpd_set_next_wakeup);
 496
 497/**
 498 * dev_pm_genpd_get_next_hrtimer - Return the next_hrtimer for the genpd
 499 * @dev: A device that is attached to the genpd.
 500 *
 501 * This routine should typically be called for a device, at the point of when a
 502 * GENPD_NOTIFY_PRE_OFF notification has been sent for it.
 503 *
 504 * Returns the aggregated value of the genpd's next hrtimer or KTIME_MAX if no
 505 * valid value have been set.
 506 */
 507ktime_t dev_pm_genpd_get_next_hrtimer(struct device *dev)
 508{
 509	struct generic_pm_domain *genpd;
 510
 511	genpd = dev_to_genpd_safe(dev);
 512	if (!genpd)
 513		return KTIME_MAX;
 514
 515	if (genpd->gd)
 516		return genpd->gd->next_hrtimer;
 517
 518	return KTIME_MAX;
 519}
 520EXPORT_SYMBOL_GPL(dev_pm_genpd_get_next_hrtimer);
 521
 522static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
 523{
 524	unsigned int state_idx = genpd->state_idx;
 525	ktime_t time_start;
 526	s64 elapsed_ns;
 527	int ret;
 528
 529	/* Notify consumers that we are about to power on. */
 530	ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
 531					     GENPD_NOTIFY_PRE_ON,
 532					     GENPD_NOTIFY_OFF, NULL);
 533	ret = notifier_to_errno(ret);
 534	if (ret)
 535		return ret;
 536
 537	if (!genpd->power_on)
 538		goto out;
 539
 540	timed = timed && genpd->gd && !genpd->states[state_idx].fwnode;
 541	if (!timed) {
 542		ret = genpd->power_on(genpd);
 543		if (ret)
 544			goto err;
 545
 546		goto out;
 547	}
 548
 549	time_start = ktime_get();
 550	ret = genpd->power_on(genpd);
 551	if (ret)
 552		goto err;
 553
 554	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
 555	if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
 556		goto out;
 557
 558	genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
 559	genpd->gd->max_off_time_changed = true;
 560	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
 561		 genpd->name, "on", elapsed_ns);
 562
 563out:
 564	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
 565	return 0;
 566err:
 567	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
 568				NULL);
 569	return ret;
 570}
 571
 572static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
 573{
 574	unsigned int state_idx = genpd->state_idx;
 575	ktime_t time_start;
 576	s64 elapsed_ns;
 577	int ret;
 578
 579	/* Notify consumers that we are about to power off. */
 580	ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
 581					     GENPD_NOTIFY_PRE_OFF,
 582					     GENPD_NOTIFY_ON, NULL);
 583	ret = notifier_to_errno(ret);
 584	if (ret)
 585		return ret;
 586
 587	if (!genpd->power_off)
 588		goto out;
 589
 590	timed = timed && genpd->gd && !genpd->states[state_idx].fwnode;
 591	if (!timed) {
 592		ret = genpd->power_off(genpd);
 593		if (ret)
 594			goto busy;
 595
 596		goto out;
 597	}
 598
 599	time_start = ktime_get();
 600	ret = genpd->power_off(genpd);
 601	if (ret)
 602		goto busy;
 603
 604	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
 605	if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
 606		goto out;
 607
 608	genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
 609	genpd->gd->max_off_time_changed = true;
 610	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
 611		 genpd->name, "off", elapsed_ns);
 612
 613out:
 614	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
 615				NULL);
 616	return 0;
 617busy:
 618	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
 619	return ret;
 620}
 621
 622/**
 623 * genpd_queue_power_off_work - Queue up the execution of genpd_power_off().
 624 * @genpd: PM domain to power off.
 625 *
 626 * Queue up the execution of genpd_power_off() unless it's already been done
 627 * before.
 628 */
 629static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
 630{
 631	queue_work(pm_wq, &genpd->power_off_work);
 632}
 633
 634/**
 635 * genpd_power_off - Remove power from a given PM domain.
 636 * @genpd: PM domain to power down.
 637 * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the
 638 * RPM status of the releated device is in an intermediate state, not yet turned
 639 * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not
 640 * be RPM_SUSPENDED, while it tries to power off the PM domain.
 641 * @depth: nesting count for lockdep.
 642 *
 643 * If all of the @genpd's devices have been suspended and all of its subdomains
 644 * have been powered down, remove power from @genpd.
 645 */
 646static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
 647			   unsigned int depth)
 648{
 649	struct pm_domain_data *pdd;
 650	struct gpd_link *link;
 651	unsigned int not_suspended = 0;
 652	int ret;
 653
 654	/*
 655	 * Do not try to power off the domain in the following situations:
 656	 * (1) The domain is already in the "power off" state.
 657	 * (2) System suspend is in progress.
 658	 */
 659	if (!genpd_status_on(genpd) || genpd->prepared_count > 0)
 660		return 0;
 661
 662	/*
 663	 * Abort power off for the PM domain in the following situations:
 664	 * (1) The domain is configured as always on.
 665	 * (2) When the domain has a subdomain being powered on.
 666	 */
 667	if (genpd_is_always_on(genpd) ||
 668			genpd_is_rpm_always_on(genpd) ||
 669			atomic_read(&genpd->sd_count) > 0)
 670		return -EBUSY;
 671
 672	/*
 673	 * The children must be in their deepest (powered-off) states to allow
 674	 * the parent to be powered off. Note that, there's no need for
 675	 * additional locking, as powering on a child, requires the parent's
 676	 * lock to be acquired first.
 677	 */
 678	list_for_each_entry(link, &genpd->parent_links, parent_node) {
 679		struct generic_pm_domain *child = link->child;
 680		if (child->state_idx < child->state_count - 1)
 681			return -EBUSY;
 682	}
 683
 684	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
 685		/*
 686		 * Do not allow PM domain to be powered off, when an IRQ safe
 687		 * device is part of a non-IRQ safe domain.
 688		 */
 689		if (!pm_runtime_suspended(pdd->dev) ||
 690			irq_safe_dev_in_sleep_domain(pdd->dev, genpd))
 691			not_suspended++;
 692	}
 693
 694	if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
 695		return -EBUSY;
 696
 697	if (genpd->gov && genpd->gov->power_down_ok) {
 698		if (!genpd->gov->power_down_ok(&genpd->domain))
 699			return -EAGAIN;
 700	}
 701
 702	/* Default to shallowest state. */
 703	if (!genpd->gov)
 704		genpd->state_idx = 0;
 705
 706	/* Don't power off, if a child domain is waiting to power on. */
 707	if (atomic_read(&genpd->sd_count) > 0)
 708		return -EBUSY;
 709
 710	ret = _genpd_power_off(genpd, true);
 711	if (ret) {
 712		genpd->states[genpd->state_idx].rejected++;
 713		return ret;
 714	}
 715
 716	genpd->status = GENPD_STATE_OFF;
 717	genpd_update_accounting(genpd);
 718	genpd->states[genpd->state_idx].usage++;
 719
 720	list_for_each_entry(link, &genpd->child_links, child_node) {
 721		genpd_sd_counter_dec(link->parent);
 722		genpd_lock_nested(link->parent, depth + 1);
 723		genpd_power_off(link->parent, false, depth + 1);
 724		genpd_unlock(link->parent);
 725	}
 726
 727	return 0;
 728}
 729
 730/**
 731 * genpd_power_on - Restore power to a given PM domain and its parents.
 732 * @genpd: PM domain to power up.
 733 * @depth: nesting count for lockdep.
 734 *
 735 * Restore power to @genpd and all of its parents so that it is possible to
 736 * resume a device belonging to it.
 737 */
 738static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
 739{
 740	struct gpd_link *link;
 741	int ret = 0;
 742
 743	if (genpd_status_on(genpd))
 744		return 0;
 745
 746	/*
 747	 * The list is guaranteed not to change while the loop below is being
 748	 * executed, unless one of the parents' .power_on() callbacks fiddles
 749	 * with it.
 750	 */
 751	list_for_each_entry(link, &genpd->child_links, child_node) {
 752		struct generic_pm_domain *parent = link->parent;
 753
 754		genpd_sd_counter_inc(parent);
 755
 756		genpd_lock_nested(parent, depth + 1);
 757		ret = genpd_power_on(parent, depth + 1);
 758		genpd_unlock(parent);
 759
 760		if (ret) {
 761			genpd_sd_counter_dec(parent);
 762			goto err;
 763		}
 764	}
 765
 766	ret = _genpd_power_on(genpd, true);
 767	if (ret)
 768		goto err;
 769
 770	genpd->status = GENPD_STATE_ON;
 771	genpd_update_accounting(genpd);
 772
 773	return 0;
 774
 775 err:
 776	list_for_each_entry_continue_reverse(link,
 777					&genpd->child_links,
 778					child_node) {
 779		genpd_sd_counter_dec(link->parent);
 780		genpd_lock_nested(link->parent, depth + 1);
 781		genpd_power_off(link->parent, false, depth + 1);
 782		genpd_unlock(link->parent);
 783	}
 784
 785	return ret;
 786}
 787
 788static int genpd_dev_pm_start(struct device *dev)
 789{
 790	struct generic_pm_domain *genpd = dev_to_genpd(dev);
 791
 792	return genpd_start_dev(genpd, dev);
 793}
 794
 795static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
 796				     unsigned long val, void *ptr)
 797{
 798	struct generic_pm_domain_data *gpd_data;
 799	struct device *dev;
 800
 801	gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
 802	dev = gpd_data->base.dev;
 803
 804	for (;;) {
 805		struct generic_pm_domain *genpd = ERR_PTR(-ENODATA);
 806		struct pm_domain_data *pdd;
 807		struct gpd_timing_data *td;
 808
 809		spin_lock_irq(&dev->power.lock);
 810
 811		pdd = dev->power.subsys_data ?
 812				dev->power.subsys_data->domain_data : NULL;
 813		if (pdd) {
 814			td = to_gpd_data(pdd)->td;
 815			if (td) {
 816				td->constraint_changed = true;
 817				genpd = dev_to_genpd(dev);
 818			}
 819		}
 820
 821		spin_unlock_irq(&dev->power.lock);
 822
 823		if (!IS_ERR(genpd)) {
 824			genpd_lock(genpd);
 825			genpd->gd->max_off_time_changed = true;
 826			genpd_unlock(genpd);
 827		}
 828
 829		dev = dev->parent;
 830		if (!dev || dev->power.ignore_children)
 831			break;
 832	}
 833
 834	return NOTIFY_DONE;
 835}
 836
 837/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 838 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
 839 * @work: Work structure used for scheduling the execution of this function.
 840 */
 841static void genpd_power_off_work_fn(struct work_struct *work)
 842{
 843	struct generic_pm_domain *genpd;
 844
 845	genpd = container_of(work, struct generic_pm_domain, power_off_work);
 846
 847	genpd_lock(genpd);
 848	genpd_power_off(genpd, false, 0);
 849	genpd_unlock(genpd);
 850}
 851
 852/**
 853 * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks
 854 * @dev: Device to handle.
 855 */
 856static int __genpd_runtime_suspend(struct device *dev)
 857{
 858	int (*cb)(struct device *__dev);
 859
 860	if (dev->type && dev->type->pm)
 861		cb = dev->type->pm->runtime_suspend;
 862	else if (dev->class && dev->class->pm)
 863		cb = dev->class->pm->runtime_suspend;
 864	else if (dev->bus && dev->bus->pm)
 865		cb = dev->bus->pm->runtime_suspend;
 866	else
 867		cb = NULL;
 868
 869	if (!cb && dev->driver && dev->driver->pm)
 870		cb = dev->driver->pm->runtime_suspend;
 871
 872	return cb ? cb(dev) : 0;
 873}
 874
 875/**
 876 * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks
 877 * @dev: Device to handle.
 878 */
 879static int __genpd_runtime_resume(struct device *dev)
 880{
 881	int (*cb)(struct device *__dev);
 882
 883	if (dev->type && dev->type->pm)
 884		cb = dev->type->pm->runtime_resume;
 885	else if (dev->class && dev->class->pm)
 886		cb = dev->class->pm->runtime_resume;
 887	else if (dev->bus && dev->bus->pm)
 888		cb = dev->bus->pm->runtime_resume;
 889	else
 890		cb = NULL;
 891
 892	if (!cb && dev->driver && dev->driver->pm)
 893		cb = dev->driver->pm->runtime_resume;
 894
 895	return cb ? cb(dev) : 0;
 896}
 897
 898/**
 899 * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
 900 * @dev: Device to suspend.
 901 *
 902 * Carry out a runtime suspend of a device under the assumption that its
 903 * pm_domain field points to the domain member of an object of type
 904 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
 905 */
 906static int genpd_runtime_suspend(struct device *dev)
 907{
 908	struct generic_pm_domain *genpd;
 909	bool (*suspend_ok)(struct device *__dev);
 910	struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
 911	struct gpd_timing_data *td = gpd_data->td;
 912	bool runtime_pm = pm_runtime_enabled(dev);
 913	ktime_t time_start = 0;
 914	s64 elapsed_ns;
 915	int ret;
 916
 917	dev_dbg(dev, "%s()\n", __func__);
 918
 919	genpd = dev_to_genpd(dev);
 920	if (IS_ERR(genpd))
 921		return -EINVAL;
 922
 923	/*
 924	 * A runtime PM centric subsystem/driver may re-use the runtime PM
 925	 * callbacks for other purposes than runtime PM. In those scenarios
 926	 * runtime PM is disabled. Under these circumstances, we shall skip
 927	 * validating/measuring the PM QoS latency.
 928	 */
 929	suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
 930	if (runtime_pm && suspend_ok && !suspend_ok(dev))
 931		return -EBUSY;
 932
 933	/* Measure suspend latency. */
 934	if (td && runtime_pm)
 
 935		time_start = ktime_get();
 936
 937	ret = __genpd_runtime_suspend(dev);
 938	if (ret)
 939		return ret;
 940
 941	ret = genpd_stop_dev(genpd, dev);
 942	if (ret) {
 943		__genpd_runtime_resume(dev);
 944		return ret;
 945	}
 946
 947	/* Update suspend latency value if the measured time exceeds it. */
 948	if (td && runtime_pm) {
 949		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
 950		if (elapsed_ns > td->suspend_latency_ns) {
 951			td->suspend_latency_ns = elapsed_ns;
 952			dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
 953				elapsed_ns);
 954			genpd->gd->max_off_time_changed = true;
 955			td->constraint_changed = true;
 956		}
 957	}
 958
 959	/*
 960	 * If power.irq_safe is set, this routine may be run with
 961	 * IRQs disabled, so suspend only if the PM domain also is irq_safe.
 962	 */
 963	if (irq_safe_dev_in_sleep_domain(dev, genpd))
 964		return 0;
 965
 966	genpd_lock(genpd);
 967	genpd_power_off(genpd, true, 0);
 968	gpd_data->rpm_pstate = genpd_drop_performance_state(dev);
 969	genpd_unlock(genpd);
 970
 971	return 0;
 972}
 973
 974/**
 975 * genpd_runtime_resume - Resume a device belonging to I/O PM domain.
 976 * @dev: Device to resume.
 977 *
 978 * Carry out a runtime resume of a device under the assumption that its
 979 * pm_domain field points to the domain member of an object of type
 980 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
 981 */
 982static int genpd_runtime_resume(struct device *dev)
 983{
 984	struct generic_pm_domain *genpd;
 985	struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
 986	struct gpd_timing_data *td = gpd_data->td;
 987	bool timed = td && pm_runtime_enabled(dev);
 988	ktime_t time_start = 0;
 989	s64 elapsed_ns;
 990	int ret;
 
 991
 992	dev_dbg(dev, "%s()\n", __func__);
 993
 994	genpd = dev_to_genpd(dev);
 995	if (IS_ERR(genpd))
 996		return -EINVAL;
 997
 998	/*
 999	 * As we don't power off a non IRQ safe domain, which holds
1000	 * an IRQ safe device, we don't need to restore power to it.
1001	 */
1002	if (irq_safe_dev_in_sleep_domain(dev, genpd))
 
1003		goto out;
 
1004
1005	genpd_lock(genpd);
1006	genpd_restore_performance_state(dev, gpd_data->rpm_pstate);
1007	ret = genpd_power_on(genpd, 0);
1008	genpd_unlock(genpd);
1009
1010	if (ret)
1011		return ret;
1012
1013 out:
1014	/* Measure resume latency. */
1015	if (timed)
 
1016		time_start = ktime_get();
1017
1018	ret = genpd_start_dev(genpd, dev);
1019	if (ret)
1020		goto err_poweroff;
1021
1022	ret = __genpd_runtime_resume(dev);
1023	if (ret)
1024		goto err_stop;
1025
1026	/* Update resume latency value if the measured time exceeds it. */
1027	if (timed) {
1028		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
1029		if (elapsed_ns > td->resume_latency_ns) {
1030			td->resume_latency_ns = elapsed_ns;
1031			dev_dbg(dev, "resume latency exceeded, %lld ns\n",
1032				elapsed_ns);
1033			genpd->gd->max_off_time_changed = true;
1034			td->constraint_changed = true;
1035		}
1036	}
1037
1038	return 0;
1039
1040err_stop:
1041	genpd_stop_dev(genpd, dev);
1042err_poweroff:
1043	if (!pm_runtime_is_irq_safe(dev) || genpd_is_irq_safe(genpd)) {
 
1044		genpd_lock(genpd);
1045		genpd_power_off(genpd, true, 0);
1046		gpd_data->rpm_pstate = genpd_drop_performance_state(dev);
1047		genpd_unlock(genpd);
1048	}
1049
1050	return ret;
1051}
1052
1053static bool pd_ignore_unused;
1054static int __init pd_ignore_unused_setup(char *__unused)
1055{
1056	pd_ignore_unused = true;
1057	return 1;
1058}
1059__setup("pd_ignore_unused", pd_ignore_unused_setup);
1060
1061/**
1062 * genpd_power_off_unused - Power off all PM domains with no devices in use.
1063 */
1064static int __init genpd_power_off_unused(void)
1065{
1066	struct generic_pm_domain *genpd;
1067
1068	if (pd_ignore_unused) {
1069		pr_warn("genpd: Not disabling unused power domains\n");
1070		return 0;
1071	}
1072
1073	mutex_lock(&gpd_list_lock);
1074
1075	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
1076		genpd_queue_power_off_work(genpd);
1077
1078	mutex_unlock(&gpd_list_lock);
1079
1080	return 0;
1081}
1082late_initcall(genpd_power_off_unused);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1083
1084#ifdef CONFIG_PM_SLEEP
1085
 
 
 
 
 
 
1086/**
1087 * genpd_sync_power_off - Synchronously power off a PM domain and its parents.
1088 * @genpd: PM domain to power off, if possible.
1089 * @use_lock: use the lock.
1090 * @depth: nesting count for lockdep.
1091 *
1092 * Check if the given PM domain can be powered off (during system suspend or
1093 * hibernation) and do that if so.  Also, in that case propagate to its parents.
1094 *
1095 * This function is only called in "noirq" and "syscore" stages of system power
1096 * transitions. The "noirq" callbacks may be executed asynchronously, thus in
1097 * these cases the lock must be held.
 
1098 */
1099static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
1100				 unsigned int depth)
1101{
1102	struct gpd_link *link;
1103
1104	if (!genpd_status_on(genpd) || genpd_is_always_on(genpd))
1105		return;
1106
1107	if (genpd->suspended_count != genpd->device_count
1108	    || atomic_read(&genpd->sd_count) > 0)
1109		return;
1110
1111	/* Check that the children are in their deepest (powered-off) state. */
1112	list_for_each_entry(link, &genpd->parent_links, parent_node) {
1113		struct generic_pm_domain *child = link->child;
1114		if (child->state_idx < child->state_count - 1)
1115			return;
1116	}
1117
1118	/* Choose the deepest state when suspending */
1119	genpd->state_idx = genpd->state_count - 1;
1120	if (_genpd_power_off(genpd, false))
1121		return;
1122
1123	genpd->status = GENPD_STATE_OFF;
1124
1125	list_for_each_entry(link, &genpd->child_links, child_node) {
1126		genpd_sd_counter_dec(link->parent);
1127
1128		if (use_lock)
1129			genpd_lock_nested(link->parent, depth + 1);
1130
1131		genpd_sync_power_off(link->parent, use_lock, depth + 1);
1132
1133		if (use_lock)
1134			genpd_unlock(link->parent);
 
1135	}
1136}
1137
1138/**
1139 * genpd_sync_power_on - Synchronously power on a PM domain and its parents.
1140 * @genpd: PM domain to power on.
1141 * @use_lock: use the lock.
1142 * @depth: nesting count for lockdep.
1143 *
1144 * This function is only called in "noirq" and "syscore" stages of system power
1145 * transitions. The "noirq" callbacks may be executed asynchronously, thus in
1146 * these cases the lock must be held.
 
1147 */
1148static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
1149				unsigned int depth)
1150{
1151	struct gpd_link *link;
1152
1153	if (genpd_status_on(genpd))
1154		return;
1155
1156	list_for_each_entry(link, &genpd->child_links, child_node) {
1157		genpd_sd_counter_inc(link->parent);
 
 
1158
1159		if (use_lock)
1160			genpd_lock_nested(link->parent, depth + 1);
1161
1162		genpd_sync_power_on(link->parent, use_lock, depth + 1);
 
1163
1164		if (use_lock)
1165			genpd_unlock(link->parent);
1166	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1167
1168	_genpd_power_on(genpd, false);
1169	genpd->status = GENPD_STATE_ON;
 
 
 
1170}
1171
1172/**
1173 * genpd_prepare - Start power transition of a device in a PM domain.
1174 * @dev: Device to start the transition of.
1175 *
1176 * Start a power transition of a device (during a system-wide power transition)
1177 * under the assumption that its pm_domain field points to the domain member of
1178 * an object of type struct generic_pm_domain representing a PM domain
1179 * consisting of I/O devices.
1180 */
1181static int genpd_prepare(struct device *dev)
1182{
1183	struct generic_pm_domain *genpd;
1184	int ret;
1185
1186	dev_dbg(dev, "%s()\n", __func__);
1187
1188	genpd = dev_to_genpd(dev);
1189	if (IS_ERR(genpd))
1190		return -EINVAL;
1191
 
 
 
 
 
 
 
 
1192	genpd_lock(genpd);
1193
1194	if (genpd->prepared_count++ == 0)
1195		genpd->suspended_count = 0;
1196
1197	genpd_unlock(genpd);
1198
1199	ret = pm_generic_prepare(dev);
1200	if (ret < 0) {
1201		genpd_lock(genpd);
1202
1203		genpd->prepared_count--;
1204
1205		genpd_unlock(genpd);
1206	}
1207
1208	/* Never return 1, as genpd don't cope with the direct_complete path. */
1209	return ret >= 0 ? 0 : ret;
1210}
1211
1212/**
1213 * genpd_finish_suspend - Completion of suspend or hibernation of device in an
1214 *   I/O pm domain.
1215 * @dev: Device to suspend.
1216 * @suspend_noirq: Generic suspend_noirq callback.
1217 * @resume_noirq: Generic resume_noirq callback.
1218 *
1219 * Stop the device and remove power from the domain if all devices in it have
1220 * been stopped.
1221 */
1222static int genpd_finish_suspend(struct device *dev,
1223				int (*suspend_noirq)(struct device *dev),
1224				int (*resume_noirq)(struct device *dev))
1225{
1226	struct generic_pm_domain *genpd;
1227	int ret = 0;
 
 
1228
1229	genpd = dev_to_genpd(dev);
1230	if (IS_ERR(genpd))
1231		return -EINVAL;
1232
1233	ret = suspend_noirq(dev);
1234	if (ret)
1235		return ret;
1236
1237	if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd))
1238		return 0;
1239
1240	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1241	    !pm_runtime_status_suspended(dev)) {
1242		ret = genpd_stop_dev(genpd, dev);
1243		if (ret) {
1244			resume_noirq(dev);
1245			return ret;
1246		}
1247	}
1248
1249	genpd_lock(genpd);
 
 
 
 
1250	genpd->suspended_count++;
1251	genpd_sync_power_off(genpd, true, 0);
1252	genpd_unlock(genpd);
1253
1254	return 0;
1255}
1256
1257/**
1258 * genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
1259 * @dev: Device to suspend.
1260 *
1261 * Stop the device and remove power from the domain if all devices in it have
1262 * been stopped.
1263 */
1264static int genpd_suspend_noirq(struct device *dev)
1265{
1266	dev_dbg(dev, "%s()\n", __func__);
1267
1268	return genpd_finish_suspend(dev,
1269				    pm_generic_suspend_noirq,
1270				    pm_generic_resume_noirq);
1271}
1272
1273/**
1274 * genpd_finish_resume - Completion of resume of device in an I/O PM domain.
1275 * @dev: Device to resume.
1276 * @resume_noirq: Generic resume_noirq callback.
1277 *
1278 * Restore power to the device's PM domain, if necessary, and start the device.
1279 */
1280static int genpd_finish_resume(struct device *dev,
1281			       int (*resume_noirq)(struct device *dev))
1282{
1283	struct generic_pm_domain *genpd;
1284	int ret;
1285
1286	dev_dbg(dev, "%s()\n", __func__);
1287
1288	genpd = dev_to_genpd(dev);
1289	if (IS_ERR(genpd))
1290		return -EINVAL;
1291
1292	if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd))
1293		return resume_noirq(dev);
1294
1295	genpd_lock(genpd);
1296	genpd_sync_power_on(genpd, true, 0);
 
 
 
 
1297	genpd->suspended_count--;
1298	genpd_unlock(genpd);
1299
1300	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1301	    !pm_runtime_status_suspended(dev)) {
1302		ret = genpd_start_dev(genpd, dev);
1303		if (ret)
1304			return ret;
1305	}
1306
1307	return pm_generic_resume_noirq(dev);
1308}
1309
1310/**
1311 * genpd_resume_noirq - Start of resume of device in an I/O PM domain.
1312 * @dev: Device to resume.
1313 *
1314 * Restore power to the device's PM domain, if necessary, and start the device.
1315 */
1316static int genpd_resume_noirq(struct device *dev)
1317{
1318	dev_dbg(dev, "%s()\n", __func__);
1319
1320	return genpd_finish_resume(dev, pm_generic_resume_noirq);
1321}
1322
1323/**
1324 * genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
1325 * @dev: Device to freeze.
1326 *
1327 * Carry out a late freeze of a device under the assumption that its
1328 * pm_domain field points to the domain member of an object of type
1329 * struct generic_pm_domain representing a power domain consisting of I/O
1330 * devices.
1331 */
1332static int genpd_freeze_noirq(struct device *dev)
1333{
 
 
 
1334	dev_dbg(dev, "%s()\n", __func__);
1335
1336	return genpd_finish_suspend(dev,
1337				    pm_generic_freeze_noirq,
1338				    pm_generic_thaw_noirq);
 
 
 
 
 
1339}
1340
1341/**
1342 * genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
1343 * @dev: Device to thaw.
1344 *
1345 * Start the device, unless power has been removed from the domain already
1346 * before the system transition.
1347 */
1348static int genpd_thaw_noirq(struct device *dev)
1349{
 
 
 
1350	dev_dbg(dev, "%s()\n", __func__);
1351
1352	return genpd_finish_resume(dev, pm_generic_thaw_noirq);
1353}
 
1354
1355/**
1356 * genpd_poweroff_noirq - Completion of hibernation of device in an
1357 *   I/O PM domain.
1358 * @dev: Device to poweroff.
1359 *
1360 * Stop the device and remove power from the domain if all devices in it have
1361 * been stopped.
1362 */
1363static int genpd_poweroff_noirq(struct device *dev)
1364{
1365	dev_dbg(dev, "%s()\n", __func__);
1366
1367	return genpd_finish_suspend(dev,
1368				    pm_generic_poweroff_noirq,
1369				    pm_generic_restore_noirq);
1370}
1371
1372/**
1373 * genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1374 * @dev: Device to resume.
1375 *
1376 * Make sure the domain will be in the same power state as before the
1377 * hibernation the system is resuming from and start the device if necessary.
1378 */
1379static int genpd_restore_noirq(struct device *dev)
1380{
 
 
 
1381	dev_dbg(dev, "%s()\n", __func__);
1382
1383	return genpd_finish_resume(dev, pm_generic_restore_noirq);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1384}
1385
1386/**
1387 * genpd_complete - Complete power transition of a device in a power domain.
1388 * @dev: Device to complete the transition of.
1389 *
1390 * Complete a power transition of a device (during a system-wide power
1391 * transition) under the assumption that its pm_domain field points to the
1392 * domain member of an object of type struct generic_pm_domain representing
1393 * a power domain consisting of I/O devices.
1394 */
1395static void genpd_complete(struct device *dev)
1396{
1397	struct generic_pm_domain *genpd;
1398
1399	dev_dbg(dev, "%s()\n", __func__);
1400
1401	genpd = dev_to_genpd(dev);
1402	if (IS_ERR(genpd))
1403		return;
1404
1405	pm_generic_complete(dev);
1406
1407	genpd_lock(genpd);
1408
1409	genpd->prepared_count--;
1410	if (!genpd->prepared_count)
1411		genpd_queue_power_off_work(genpd);
1412
1413	genpd_unlock(genpd);
1414}
1415
1416static void genpd_switch_state(struct device *dev, bool suspend)
 
 
 
 
 
 
 
1417{
1418	struct generic_pm_domain *genpd;
1419	bool use_lock;
1420
1421	genpd = dev_to_genpd_safe(dev);
1422	if (!genpd)
1423		return;
1424
1425	use_lock = genpd_is_irq_safe(genpd);
1426
1427	if (use_lock)
1428		genpd_lock(genpd);
1429
1430	if (suspend) {
1431		genpd->suspended_count++;
1432		genpd_sync_power_off(genpd, use_lock, 0);
1433	} else {
1434		genpd_sync_power_on(genpd, use_lock, 0);
1435		genpd->suspended_count--;
1436	}
1437
1438	if (use_lock)
1439		genpd_unlock(genpd);
1440}
1441
1442/**
1443 * dev_pm_genpd_suspend - Synchronously try to suspend the genpd for @dev
1444 * @dev: The device that is attached to the genpd, that can be suspended.
1445 *
1446 * This routine should typically be called for a device that needs to be
1447 * suspended during the syscore suspend phase. It may also be called during
1448 * suspend-to-idle to suspend a corresponding CPU device that is attached to a
1449 * genpd.
1450 */
1451void dev_pm_genpd_suspend(struct device *dev)
1452{
1453	genpd_switch_state(dev, true);
1454}
1455EXPORT_SYMBOL_GPL(dev_pm_genpd_suspend);
1456
1457/**
1458 * dev_pm_genpd_resume - Synchronously try to resume the genpd for @dev
1459 * @dev: The device that is attached to the genpd, which needs to be resumed.
1460 *
1461 * This routine should typically be called for a device that needs to be resumed
1462 * during the syscore resume phase. It may also be called during suspend-to-idle
1463 * to resume a corresponding CPU device that is attached to a genpd.
1464 */
1465void dev_pm_genpd_resume(struct device *dev)
1466{
1467	genpd_switch_state(dev, false);
1468}
1469EXPORT_SYMBOL_GPL(dev_pm_genpd_resume);
1470
1471#else /* !CONFIG_PM_SLEEP */
1472
1473#define genpd_prepare		NULL
1474#define genpd_suspend_noirq	NULL
1475#define genpd_resume_noirq	NULL
1476#define genpd_freeze_noirq	NULL
1477#define genpd_thaw_noirq	NULL
1478#define genpd_poweroff_noirq	NULL
1479#define genpd_restore_noirq	NULL
1480#define genpd_complete		NULL
1481
1482#endif /* CONFIG_PM_SLEEP */
1483
1484static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
1485							   bool has_governor)
 
1486{
1487	struct generic_pm_domain_data *gpd_data;
1488	struct gpd_timing_data *td;
1489	int ret;
1490
1491	ret = dev_pm_get_subsys_data(dev);
1492	if (ret)
1493		return ERR_PTR(ret);
1494
1495	gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1496	if (!gpd_data) {
1497		ret = -ENOMEM;
1498		goto err_put;
1499	}
1500
 
 
 
1501	gpd_data->base.dev = dev;
 
 
1502	gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1503
1504	/* Allocate data used by a governor. */
1505	if (has_governor) {
1506		td = kzalloc(sizeof(*td), GFP_KERNEL);
1507		if (!td) {
1508			ret = -ENOMEM;
1509			goto err_free;
1510		}
1511
1512		td->constraint_changed = true;
1513		td->effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
1514		td->next_wakeup = KTIME_MAX;
1515		gpd_data->td = td;
1516	}
1517
1518	spin_lock_irq(&dev->power.lock);
1519
1520	if (dev->power.subsys_data->domain_data)
1521		ret = -EINVAL;
1522	else
1523		dev->power.subsys_data->domain_data = &gpd_data->base;
 
 
1524
1525	spin_unlock_irq(&dev->power.lock);
1526
1527	if (ret)
1528		goto err_free;
1529
1530	return gpd_data;
1531
1532 err_free:
1533	kfree(gpd_data->td);
1534	kfree(gpd_data);
1535 err_put:
1536	dev_pm_put_subsys_data(dev);
1537	return ERR_PTR(ret);
1538}
1539
1540static void genpd_free_dev_data(struct device *dev,
1541				struct generic_pm_domain_data *gpd_data)
1542{
 
 
1543	spin_lock_irq(&dev->power.lock);
1544
1545	dev->power.subsys_data->domain_data = NULL;
1546
1547	spin_unlock_irq(&dev->power.lock);
1548
1549	kfree(gpd_data->td);
1550	kfree(gpd_data);
1551	dev_pm_put_subsys_data(dev);
1552}
1553
1554static void genpd_update_cpumask(struct generic_pm_domain *genpd,
1555				 int cpu, bool set, unsigned int depth)
1556{
1557	struct gpd_link *link;
1558
1559	if (!genpd_is_cpu_domain(genpd))
1560		return;
1561
1562	list_for_each_entry(link, &genpd->child_links, child_node) {
1563		struct generic_pm_domain *parent = link->parent;
1564
1565		genpd_lock_nested(parent, depth + 1);
1566		genpd_update_cpumask(parent, cpu, set, depth + 1);
1567		genpd_unlock(parent);
1568	}
1569
1570	if (set)
1571		cpumask_set_cpu(cpu, genpd->cpus);
1572	else
1573		cpumask_clear_cpu(cpu, genpd->cpus);
1574}
1575
1576static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu)
1577{
1578	if (cpu >= 0)
1579		genpd_update_cpumask(genpd, cpu, true, 0);
1580}
1581
1582static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu)
1583{
1584	if (cpu >= 0)
1585		genpd_update_cpumask(genpd, cpu, false, 0);
1586}
1587
1588static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev)
1589{
1590	int cpu;
1591
1592	if (!genpd_is_cpu_domain(genpd))
1593		return -1;
1594
1595	for_each_possible_cpu(cpu) {
1596		if (get_cpu_device(cpu) == dev)
1597			return cpu;
1598	}
1599
1600	return -1;
1601}
1602
1603static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1604			    struct device *base_dev)
1605{
1606	struct genpd_governor_data *gd = genpd->gd;
1607	struct generic_pm_domain_data *gpd_data;
1608	int ret;
1609
1610	dev_dbg(dev, "%s()\n", __func__);
1611
1612	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1613		return -EINVAL;
1614
1615	gpd_data = genpd_alloc_dev_data(dev, gd);
1616	if (IS_ERR(gpd_data))
1617		return PTR_ERR(gpd_data);
1618
1619	gpd_data->cpu = genpd_get_cpu(genpd, base_dev);
 
 
 
 
 
1620
1621	ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1622	if (ret)
1623		goto out;
1624
1625	genpd_lock(genpd);
1626
1627	genpd_set_cpumask(genpd, gpd_data->cpu);
1628	dev_pm_domain_set(dev, &genpd->domain);
1629
1630	genpd->device_count++;
1631	if (gd)
1632		gd->max_off_time_changed = true;
1633
1634	list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1635
1636	genpd_unlock(genpd);
1637 out:
 
 
1638	if (ret)
1639		genpd_free_dev_data(dev, gpd_data);
1640	else
1641		dev_pm_qos_add_notifier(dev, &gpd_data->nb,
1642					DEV_PM_QOS_RESUME_LATENCY);
1643
1644	return ret;
1645}
1646
1647/**
1648 * pm_genpd_add_device - Add a device to an I/O PM domain.
1649 * @genpd: PM domain to add the device to.
1650 * @dev: Device to be added.
 
1651 */
1652int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
 
1653{
1654	int ret;
1655
1656	mutex_lock(&gpd_list_lock);
1657	ret = genpd_add_device(genpd, dev, dev);
1658	mutex_unlock(&gpd_list_lock);
1659
1660	return ret;
1661}
1662EXPORT_SYMBOL_GPL(pm_genpd_add_device);
1663
1664static int genpd_remove_device(struct generic_pm_domain *genpd,
1665			       struct device *dev)
1666{
1667	struct generic_pm_domain_data *gpd_data;
1668	struct pm_domain_data *pdd;
1669	int ret = 0;
1670
1671	dev_dbg(dev, "%s()\n", __func__);
1672
1673	pdd = dev->power.subsys_data->domain_data;
1674	gpd_data = to_gpd_data(pdd);
1675	dev_pm_qos_remove_notifier(dev, &gpd_data->nb,
1676				   DEV_PM_QOS_RESUME_LATENCY);
1677
1678	genpd_lock(genpd);
1679
1680	if (genpd->prepared_count > 0) {
1681		ret = -EAGAIN;
1682		goto out;
1683	}
1684
1685	genpd->device_count--;
1686	if (genpd->gd)
1687		genpd->gd->max_off_time_changed = true;
1688
1689	genpd_clear_cpumask(genpd, gpd_data->cpu);
1690	dev_pm_domain_set(dev, NULL);
1691
1692	list_del_init(&pdd->list_node);
1693
1694	genpd_unlock(genpd);
1695
1696	if (genpd->detach_dev)
1697		genpd->detach_dev(genpd, dev);
1698
1699	genpd_free_dev_data(dev, gpd_data);
1700
1701	return 0;
1702
1703 out:
1704	genpd_unlock(genpd);
1705	dev_pm_qos_add_notifier(dev, &gpd_data->nb, DEV_PM_QOS_RESUME_LATENCY);
1706
1707	return ret;
1708}
1709
1710/**
1711 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
 
1712 * @dev: Device to be removed.
1713 */
1714int pm_genpd_remove_device(struct device *dev)
 
1715{
1716	struct generic_pm_domain *genpd = dev_to_genpd_safe(dev);
1717
1718	if (!genpd)
1719		return -EINVAL;
1720
1721	return genpd_remove_device(genpd, dev);
1722}
1723EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
1724
1725/**
1726 * dev_pm_genpd_add_notifier - Add a genpd power on/off notifier for @dev
1727 *
1728 * @dev: Device that should be associated with the notifier
1729 * @nb: The notifier block to register
1730 *
1731 * Users may call this function to add a genpd power on/off notifier for an
1732 * attached @dev. Only one notifier per device is allowed. The notifier is
1733 * sent when genpd is powering on/off the PM domain.
1734 *
1735 * It is assumed that the user guarantee that the genpd wouldn't be detached
1736 * while this routine is getting called.
1737 *
1738 * Returns 0 on success and negative error values on failures.
1739 */
1740int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb)
1741{
1742	struct generic_pm_domain *genpd;
1743	struct generic_pm_domain_data *gpd_data;
1744	int ret;
1745
1746	genpd = dev_to_genpd_safe(dev);
1747	if (!genpd)
1748		return -ENODEV;
1749
1750	if (WARN_ON(!dev->power.subsys_data ||
1751		     !dev->power.subsys_data->domain_data))
1752		return -EINVAL;
1753
1754	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1755	if (gpd_data->power_nb)
1756		return -EEXIST;
1757
1758	genpd_lock(genpd);
1759	ret = raw_notifier_chain_register(&genpd->power_notifiers, nb);
1760	genpd_unlock(genpd);
1761
1762	if (ret) {
1763		dev_warn(dev, "failed to add notifier for PM domain %s\n",
1764			 genpd->name);
1765		return ret;
1766	}
1767
1768	gpd_data->power_nb = nb;
1769	return 0;
1770}
1771EXPORT_SYMBOL_GPL(dev_pm_genpd_add_notifier);
1772
1773/**
1774 * dev_pm_genpd_remove_notifier - Remove a genpd power on/off notifier for @dev
1775 *
1776 * @dev: Device that is associated with the notifier
1777 *
1778 * Users may call this function to remove a genpd power on/off notifier for an
1779 * attached @dev.
1780 *
1781 * It is assumed that the user guarantee that the genpd wouldn't be detached
1782 * while this routine is getting called.
1783 *
1784 * Returns 0 on success and negative error values on failures.
1785 */
1786int dev_pm_genpd_remove_notifier(struct device *dev)
1787{
1788	struct generic_pm_domain *genpd;
1789	struct generic_pm_domain_data *gpd_data;
1790	int ret;
1791
1792	genpd = dev_to_genpd_safe(dev);
1793	if (!genpd)
1794		return -ENODEV;
1795
1796	if (WARN_ON(!dev->power.subsys_data ||
1797		     !dev->power.subsys_data->domain_data))
1798		return -EINVAL;
1799
1800	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1801	if (!gpd_data->power_nb)
1802		return -ENODEV;
1803
1804	genpd_lock(genpd);
1805	ret = raw_notifier_chain_unregister(&genpd->power_notifiers,
1806					    gpd_data->power_nb);
1807	genpd_unlock(genpd);
1808
1809	if (ret) {
1810		dev_warn(dev, "failed to remove notifier for PM domain %s\n",
1811			 genpd->name);
1812		return ret;
1813	}
1814
1815	gpd_data->power_nb = NULL;
1816	return 0;
1817}
1818EXPORT_SYMBOL_GPL(dev_pm_genpd_remove_notifier);
1819
1820static int genpd_add_subdomain(struct generic_pm_domain *genpd,
1821			       struct generic_pm_domain *subdomain)
1822{
1823	struct gpd_link *link, *itr;
1824	int ret = 0;
1825
1826	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1827	    || genpd == subdomain)
1828		return -EINVAL;
1829
1830	/*
1831	 * If the domain can be powered on/off in an IRQ safe
1832	 * context, ensure that the subdomain can also be
1833	 * powered on/off in that context.
1834	 */
1835	if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
1836		WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
1837				genpd->name, subdomain->name);
1838		return -EINVAL;
1839	}
1840
1841	link = kzalloc(sizeof(*link), GFP_KERNEL);
1842	if (!link)
1843		return -ENOMEM;
1844
1845	genpd_lock(subdomain);
1846	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1847
1848	if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) {
 
1849		ret = -EINVAL;
1850		goto out;
1851	}
1852
1853	list_for_each_entry(itr, &genpd->parent_links, parent_node) {
1854		if (itr->child == subdomain && itr->parent == genpd) {
1855			ret = -EINVAL;
1856			goto out;
1857		}
1858	}
1859
1860	link->parent = genpd;
1861	list_add_tail(&link->parent_node, &genpd->parent_links);
1862	link->child = subdomain;
1863	list_add_tail(&link->child_node, &subdomain->child_links);
1864	if (genpd_status_on(subdomain))
1865		genpd_sd_counter_inc(genpd);
1866
1867 out:
1868	genpd_unlock(genpd);
1869	genpd_unlock(subdomain);
1870	if (ret)
1871		kfree(link);
1872	return ret;
1873}
1874
1875/**
1876 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1877 * @genpd: Leader PM domain to add the subdomain to.
1878 * @subdomain: Subdomain to be added.
1879 */
1880int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1881			   struct generic_pm_domain *subdomain)
1882{
1883	int ret;
1884
1885	mutex_lock(&gpd_list_lock);
1886	ret = genpd_add_subdomain(genpd, subdomain);
1887	mutex_unlock(&gpd_list_lock);
1888
1889	return ret;
1890}
1891EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
1892
1893/**
1894 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1895 * @genpd: Leader PM domain to remove the subdomain from.
1896 * @subdomain: Subdomain to be removed.
1897 */
1898int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1899			      struct generic_pm_domain *subdomain)
1900{
1901	struct gpd_link *l, *link;
1902	int ret = -EINVAL;
1903
1904	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1905		return -EINVAL;
1906
1907	genpd_lock(subdomain);
1908	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1909
1910	if (!list_empty(&subdomain->parent_links) || subdomain->device_count) {
1911		pr_warn("%s: unable to remove subdomain %s\n",
1912			genpd->name, subdomain->name);
1913		ret = -EBUSY;
1914		goto out;
1915	}
1916
1917	list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) {
1918		if (link->child != subdomain)
1919			continue;
1920
1921		list_del(&link->parent_node);
1922		list_del(&link->child_node);
1923		kfree(link);
1924		if (genpd_status_on(subdomain))
1925			genpd_sd_counter_dec(genpd);
1926
1927		ret = 0;
1928		break;
1929	}
1930
1931out:
1932	genpd_unlock(genpd);
1933	genpd_unlock(subdomain);
1934
1935	return ret;
1936}
1937EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
1938
1939static void genpd_free_default_power_state(struct genpd_power_state *states,
1940					   unsigned int state_count)
1941{
1942	kfree(states);
1943}
1944
1945static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
1946{
1947	struct genpd_power_state *state;
1948
1949	state = kzalloc(sizeof(*state), GFP_KERNEL);
1950	if (!state)
1951		return -ENOMEM;
1952
1953	genpd->states = state;
1954	genpd->state_count = 1;
1955	genpd->free_states = genpd_free_default_power_state;
1956
1957	return 0;
1958}
1959
1960static int genpd_alloc_data(struct generic_pm_domain *genpd)
1961{
1962	struct genpd_governor_data *gd = NULL;
1963	int ret;
1964
1965	if (genpd_is_cpu_domain(genpd) &&
1966	    !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL))
1967		return -ENOMEM;
1968
1969	if (genpd->gov) {
1970		gd = kzalloc(sizeof(*gd), GFP_KERNEL);
1971		if (!gd) {
1972			ret = -ENOMEM;
1973			goto free;
1974		}
1975
1976		gd->max_off_time_ns = -1;
1977		gd->max_off_time_changed = true;
1978		gd->next_wakeup = KTIME_MAX;
1979		gd->next_hrtimer = KTIME_MAX;
1980	}
1981
1982	/* Use only one "off" state if there were no states declared */
1983	if (genpd->state_count == 0) {
1984		ret = genpd_set_default_power_state(genpd);
1985		if (ret)
1986			goto free;
1987	}
1988
1989	genpd->gd = gd;
1990	return 0;
1991
1992free:
1993	if (genpd_is_cpu_domain(genpd))
1994		free_cpumask_var(genpd->cpus);
1995	kfree(gd);
1996	return ret;
1997}
1998
1999static void genpd_free_data(struct generic_pm_domain *genpd)
2000{
2001	if (genpd_is_cpu_domain(genpd))
2002		free_cpumask_var(genpd->cpus);
2003	if (genpd->free_states)
2004		genpd->free_states(genpd->states, genpd->state_count);
2005	kfree(genpd->gd);
2006}
2007
2008static void genpd_lock_init(struct generic_pm_domain *genpd)
2009{
2010	if (genpd->flags & GENPD_FLAG_IRQ_SAFE) {
2011		spin_lock_init(&genpd->slock);
2012		genpd->lock_ops = &genpd_spin_ops;
2013	} else {
2014		mutex_init(&genpd->mlock);
2015		genpd->lock_ops = &genpd_mtx_ops;
2016	}
2017}
2018
2019/**
2020 * pm_genpd_init - Initialize a generic I/O PM domain object.
2021 * @genpd: PM domain object to initialize.
2022 * @gov: PM domain governor to associate with the domain (may be NULL).
2023 * @is_off: Initial value of the domain's power_is_off field.
2024 *
2025 * Returns 0 on successful initialization, else a negative error code.
2026 */
2027int pm_genpd_init(struct generic_pm_domain *genpd,
2028		  struct dev_power_governor *gov, bool is_off)
2029{
2030	int ret;
2031
2032	if (IS_ERR_OR_NULL(genpd))
2033		return -EINVAL;
2034
2035	INIT_LIST_HEAD(&genpd->parent_links);
2036	INIT_LIST_HEAD(&genpd->child_links);
2037	INIT_LIST_HEAD(&genpd->dev_list);
2038	RAW_INIT_NOTIFIER_HEAD(&genpd->power_notifiers);
2039	genpd_lock_init(genpd);
2040	genpd->gov = gov;
2041	INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
2042	atomic_set(&genpd->sd_count, 0);
2043	genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON;
2044	genpd->device_count = 0;
 
 
2045	genpd->provider = NULL;
2046	genpd->has_provider = false;
2047	genpd->accounting_time = ktime_get_mono_fast_ns();
2048	genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
2049	genpd->domain.ops.runtime_resume = genpd_runtime_resume;
2050	genpd->domain.ops.prepare = genpd_prepare;
2051	genpd->domain.ops.suspend_noirq = genpd_suspend_noirq;
2052	genpd->domain.ops.resume_noirq = genpd_resume_noirq;
2053	genpd->domain.ops.freeze_noirq = genpd_freeze_noirq;
2054	genpd->domain.ops.thaw_noirq = genpd_thaw_noirq;
2055	genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq;
2056	genpd->domain.ops.restore_noirq = genpd_restore_noirq;
2057	genpd->domain.ops.complete = genpd_complete;
2058	genpd->domain.start = genpd_dev_pm_start;
2059
2060	if (genpd->flags & GENPD_FLAG_PM_CLK) {
2061		genpd->dev_ops.stop = pm_clk_suspend;
2062		genpd->dev_ops.start = pm_clk_resume;
2063	}
2064
2065	/* The always-on governor works better with the corresponding flag. */
2066	if (gov == &pm_domain_always_on_gov)
2067		genpd->flags |= GENPD_FLAG_RPM_ALWAYS_ON;
2068
2069	/* Always-on domains must be powered on at initialization. */
2070	if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) &&
2071			!genpd_status_on(genpd)) {
2072		pr_err("always-on PM domain %s is not on\n", genpd->name);
2073		return -EINVAL;
2074	}
2075
2076	/* Multiple states but no governor doesn't make sense. */
2077	if (!gov && genpd->state_count > 1)
2078		pr_warn("%s: no governor for states\n", genpd->name);
2079
2080	ret = genpd_alloc_data(genpd);
2081	if (ret)
2082		return ret;
2083
2084	device_initialize(&genpd->dev);
2085	dev_set_name(&genpd->dev, "%s", genpd->name);
2086
2087	mutex_lock(&gpd_list_lock);
2088	list_add(&genpd->gpd_list_node, &gpd_list);
2089	mutex_unlock(&gpd_list_lock);
2090	genpd_debug_add(genpd);
2091
2092	return 0;
2093}
2094EXPORT_SYMBOL_GPL(pm_genpd_init);
2095
2096static int genpd_remove(struct generic_pm_domain *genpd)
2097{
2098	struct gpd_link *l, *link;
2099
2100	if (IS_ERR_OR_NULL(genpd))
2101		return -EINVAL;
2102
2103	genpd_lock(genpd);
2104
2105	if (genpd->has_provider) {
2106		genpd_unlock(genpd);
2107		pr_err("Provider present, unable to remove %s\n", genpd->name);
2108		return -EBUSY;
2109	}
2110
2111	if (!list_empty(&genpd->parent_links) || genpd->device_count) {
2112		genpd_unlock(genpd);
2113		pr_err("%s: unable to remove %s\n", __func__, genpd->name);
2114		return -EBUSY;
2115	}
2116
2117	list_for_each_entry_safe(link, l, &genpd->child_links, child_node) {
2118		list_del(&link->parent_node);
2119		list_del(&link->child_node);
2120		kfree(link);
2121	}
2122
2123	list_del(&genpd->gpd_list_node);
2124	genpd_unlock(genpd);
2125	genpd_debug_remove(genpd);
2126	cancel_work_sync(&genpd->power_off_work);
2127	genpd_free_data(genpd);
2128
2129	pr_debug("%s: removed %s\n", __func__, genpd->name);
2130
2131	return 0;
2132}
2133
2134/**
2135 * pm_genpd_remove - Remove a generic I/O PM domain
2136 * @genpd: Pointer to PM domain that is to be removed.
2137 *
2138 * To remove the PM domain, this function:
2139 *  - Removes the PM domain as a subdomain to any parent domains,
2140 *    if it was added.
2141 *  - Removes the PM domain from the list of registered PM domains.
2142 *
2143 * The PM domain will only be removed, if the associated provider has
2144 * been removed, it is not a parent to any other PM domain and has no
2145 * devices associated with it.
2146 */
2147int pm_genpd_remove(struct generic_pm_domain *genpd)
2148{
2149	int ret;
2150
2151	mutex_lock(&gpd_list_lock);
2152	ret = genpd_remove(genpd);
2153	mutex_unlock(&gpd_list_lock);
2154
2155	return ret;
2156}
2157EXPORT_SYMBOL_GPL(pm_genpd_remove);
2158
2159#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
2160
 
 
 
2161/*
2162 * Device Tree based PM domain providers.
2163 *
2164 * The code below implements generic device tree based PM domain providers that
2165 * bind device tree nodes with generic PM domains registered in the system.
2166 *
2167 * Any driver that registers generic PM domains and needs to support binding of
2168 * devices to these domains is supposed to register a PM domain provider, which
2169 * maps a PM domain specifier retrieved from the device tree to a PM domain.
2170 *
2171 * Two simple mapping functions have been provided for convenience:
2172 *  - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
2173 *  - genpd_xlate_onecell() for mapping of multiple PM domains per node by
2174 *    index.
2175 */
2176
2177/**
2178 * struct of_genpd_provider - PM domain provider registration structure
2179 * @link: Entry in global list of PM domain providers
2180 * @node: Pointer to device tree node of PM domain provider
2181 * @xlate: Provider-specific xlate callback mapping a set of specifier cells
2182 *         into a PM domain.
2183 * @data: context pointer to be passed into @xlate callback
2184 */
2185struct of_genpd_provider {
2186	struct list_head link;
2187	struct device_node *node;
2188	genpd_xlate_t xlate;
2189	void *data;
2190};
2191
2192/* List of registered PM domain providers. */
2193static LIST_HEAD(of_genpd_providers);
2194/* Mutex to protect the list above. */
2195static DEFINE_MUTEX(of_genpd_mutex);
2196
2197/**
2198 * genpd_xlate_simple() - Xlate function for direct node-domain mapping
2199 * @genpdspec: OF phandle args to map into a PM domain
2200 * @data: xlate function private data - pointer to struct generic_pm_domain
2201 *
2202 * This is a generic xlate function that can be used to model PM domains that
2203 * have their own device tree nodes. The private data of xlate function needs
2204 * to be a valid pointer to struct generic_pm_domain.
2205 */
2206static struct generic_pm_domain *genpd_xlate_simple(
2207					struct of_phandle_args *genpdspec,
2208					void *data)
2209{
 
 
2210	return data;
2211}
2212
2213/**
2214 * genpd_xlate_onecell() - Xlate function using a single index.
2215 * @genpdspec: OF phandle args to map into a PM domain
2216 * @data: xlate function private data - pointer to struct genpd_onecell_data
2217 *
2218 * This is a generic xlate function that can be used to model simple PM domain
2219 * controllers that have one device tree node and provide multiple PM domains.
2220 * A single cell is used as an index into an array of PM domains specified in
2221 * the genpd_onecell_data struct when registering the provider.
2222 */
2223static struct generic_pm_domain *genpd_xlate_onecell(
2224					struct of_phandle_args *genpdspec,
2225					void *data)
2226{
2227	struct genpd_onecell_data *genpd_data = data;
2228	unsigned int idx = genpdspec->args[0];
2229
2230	if (genpdspec->args_count != 1)
2231		return ERR_PTR(-EINVAL);
2232
2233	if (idx >= genpd_data->num_domains) {
2234		pr_err("%s: invalid domain index %u\n", __func__, idx);
2235		return ERR_PTR(-EINVAL);
2236	}
2237
2238	if (!genpd_data->domains[idx])
2239		return ERR_PTR(-ENOENT);
2240
2241	return genpd_data->domains[idx];
2242}
2243
2244/**
2245 * genpd_add_provider() - Register a PM domain provider for a node
2246 * @np: Device node pointer associated with the PM domain provider.
2247 * @xlate: Callback for decoding PM domain from phandle arguments.
2248 * @data: Context pointer for @xlate callback.
2249 */
2250static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
2251			      void *data)
2252{
2253	struct of_genpd_provider *cp;
2254
2255	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2256	if (!cp)
2257		return -ENOMEM;
2258
2259	cp->node = of_node_get(np);
2260	cp->data = data;
2261	cp->xlate = xlate;
2262	fwnode_dev_initialized(&np->fwnode, true);
2263
2264	mutex_lock(&of_genpd_mutex);
2265	list_add(&cp->link, &of_genpd_providers);
2266	mutex_unlock(&of_genpd_mutex);
2267	pr_debug("Added domain provider from %pOF\n", np);
2268
2269	return 0;
2270}
2271
2272static bool genpd_present(const struct generic_pm_domain *genpd)
2273{
2274	bool ret = false;
2275	const struct generic_pm_domain *gpd;
2276
2277	mutex_lock(&gpd_list_lock);
2278	list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2279		if (gpd == genpd) {
2280			ret = true;
2281			break;
2282		}
2283	}
2284	mutex_unlock(&gpd_list_lock);
2285
2286	return ret;
2287}
2288
2289/**
2290 * of_genpd_add_provider_simple() - Register a simple PM domain provider
2291 * @np: Device node pointer associated with the PM domain provider.
2292 * @genpd: Pointer to PM domain associated with the PM domain provider.
2293 */
2294int of_genpd_add_provider_simple(struct device_node *np,
2295				 struct generic_pm_domain *genpd)
2296{
2297	int ret;
2298
2299	if (!np || !genpd)
2300		return -EINVAL;
2301
2302	if (!genpd_present(genpd))
2303		return -EINVAL;
2304
2305	genpd->dev.of_node = np;
2306
2307	/* Parse genpd OPP table */
2308	if (genpd->set_performance_state) {
2309		ret = dev_pm_opp_of_add_table(&genpd->dev);
2310		if (ret)
2311			return dev_err_probe(&genpd->dev, ret, "Failed to add OPP table\n");
2312
2313		/*
2314		 * Save table for faster processing while setting performance
2315		 * state.
2316		 */
2317		genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
2318		WARN_ON(IS_ERR(genpd->opp_table));
2319	}
2320
2321	ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
2322	if (ret) {
2323		if (genpd->set_performance_state) {
2324			dev_pm_opp_put_opp_table(genpd->opp_table);
2325			dev_pm_opp_of_remove_table(&genpd->dev);
2326		}
2327
2328		return ret;
 
 
2329	}
2330
2331	genpd->provider = &np->fwnode;
2332	genpd->has_provider = true;
2333
2334	return 0;
2335}
2336EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);
2337
2338/**
2339 * of_genpd_add_provider_onecell() - Register a onecell PM domain provider
2340 * @np: Device node pointer associated with the PM domain provider.
2341 * @data: Pointer to the data associated with the PM domain provider.
2342 */
2343int of_genpd_add_provider_onecell(struct device_node *np,
2344				  struct genpd_onecell_data *data)
2345{
2346	struct generic_pm_domain *genpd;
2347	unsigned int i;
2348	int ret = -EINVAL;
2349
2350	if (!np || !data)
2351		return -EINVAL;
2352
2353	if (!data->xlate)
2354		data->xlate = genpd_xlate_onecell;
2355
2356	for (i = 0; i < data->num_domains; i++) {
2357		genpd = data->domains[i];
2358
2359		if (!genpd)
2360			continue;
2361		if (!genpd_present(genpd))
2362			goto error;
2363
2364		genpd->dev.of_node = np;
2365
2366		/* Parse genpd OPP table */
2367		if (genpd->set_performance_state) {
2368			ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
2369			if (ret) {
2370				dev_err_probe(&genpd->dev, ret,
2371					      "Failed to add OPP table for index %d\n", i);
2372				goto error;
2373			}
2374
2375			/*
2376			 * Save table for faster processing while setting
2377			 * performance state.
2378			 */
2379			genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
2380			WARN_ON(IS_ERR(genpd->opp_table));
2381		}
2382
2383		genpd->provider = &np->fwnode;
2384		genpd->has_provider = true;
2385	}
2386
2387	ret = genpd_add_provider(np, data->xlate, data);
2388	if (ret < 0)
2389		goto error;
2390
 
 
2391	return 0;
2392
2393error:
2394	while (i--) {
2395		genpd = data->domains[i];
2396
2397		if (!genpd)
2398			continue;
2399
2400		genpd->provider = NULL;
2401		genpd->has_provider = false;
2402
2403		if (genpd->set_performance_state) {
2404			dev_pm_opp_put_opp_table(genpd->opp_table);
2405			dev_pm_opp_of_remove_table(&genpd->dev);
2406		}
2407	}
2408
 
 
2409	return ret;
2410}
2411EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
2412
2413/**
2414 * of_genpd_del_provider() - Remove a previously registered PM domain provider
2415 * @np: Device node pointer associated with the PM domain provider
2416 */
2417void of_genpd_del_provider(struct device_node *np)
2418{
2419	struct of_genpd_provider *cp, *tmp;
2420	struct generic_pm_domain *gpd;
2421
2422	mutex_lock(&gpd_list_lock);
2423	mutex_lock(&of_genpd_mutex);
2424	list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) {
2425		if (cp->node == np) {
2426			/*
2427			 * For each PM domain associated with the
2428			 * provider, set the 'has_provider' to false
2429			 * so that the PM domain can be safely removed.
2430			 */
2431			list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2432				if (gpd->provider == &np->fwnode) {
2433					gpd->has_provider = false;
2434
2435					if (!gpd->set_performance_state)
2436						continue;
2437
2438					dev_pm_opp_put_opp_table(gpd->opp_table);
2439					dev_pm_opp_of_remove_table(&gpd->dev);
2440				}
2441			}
2442
2443			fwnode_dev_initialized(&cp->node->fwnode, false);
2444			list_del(&cp->link);
2445			of_node_put(cp->node);
2446			kfree(cp);
2447			break;
2448		}
2449	}
2450	mutex_unlock(&of_genpd_mutex);
2451	mutex_unlock(&gpd_list_lock);
2452}
2453EXPORT_SYMBOL_GPL(of_genpd_del_provider);
2454
2455/**
2456 * genpd_get_from_provider() - Look-up PM domain
2457 * @genpdspec: OF phandle args to use for look-up
2458 *
2459 * Looks for a PM domain provider under the node specified by @genpdspec and if
2460 * found, uses xlate function of the provider to map phandle args to a PM
2461 * domain.
2462 *
2463 * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
2464 * on failure.
2465 */
2466static struct generic_pm_domain *genpd_get_from_provider(
2467					struct of_phandle_args *genpdspec)
2468{
2469	struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
2470	struct of_genpd_provider *provider;
2471
2472	if (!genpdspec)
2473		return ERR_PTR(-EINVAL);
2474
2475	mutex_lock(&of_genpd_mutex);
2476
2477	/* Check if we have such a provider in our array */
2478	list_for_each_entry(provider, &of_genpd_providers, link) {
2479		if (provider->node == genpdspec->np)
2480			genpd = provider->xlate(genpdspec, provider->data);
2481		if (!IS_ERR(genpd))
2482			break;
2483	}
2484
2485	mutex_unlock(&of_genpd_mutex);
2486
2487	return genpd;
2488}
2489
2490/**
2491 * of_genpd_add_device() - Add a device to an I/O PM domain
2492 * @genpdspec: OF phandle args to use for look-up PM domain
2493 * @dev: Device to be added.
2494 *
2495 * Looks-up an I/O PM domain based upon phandle args provided and adds
2496 * the device to the PM domain. Returns a negative error code on failure.
2497 */
2498int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev)
2499{
2500	struct generic_pm_domain *genpd;
2501	int ret;
2502
2503	mutex_lock(&gpd_list_lock);
2504
2505	genpd = genpd_get_from_provider(genpdspec);
2506	if (IS_ERR(genpd)) {
2507		ret = PTR_ERR(genpd);
2508		goto out;
2509	}
2510
2511	ret = genpd_add_device(genpd, dev, dev);
2512
2513out:
2514	mutex_unlock(&gpd_list_lock);
2515
2516	return ret;
2517}
2518EXPORT_SYMBOL_GPL(of_genpd_add_device);
2519
2520/**
2521 * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
2522 * @parent_spec: OF phandle args to use for parent PM domain look-up
2523 * @subdomain_spec: OF phandle args to use for subdomain look-up
2524 *
2525 * Looks-up a parent PM domain and subdomain based upon phandle args
2526 * provided and adds the subdomain to the parent PM domain. Returns a
2527 * negative error code on failure.
2528 */
2529int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
2530			   struct of_phandle_args *subdomain_spec)
2531{
2532	struct generic_pm_domain *parent, *subdomain;
2533	int ret;
2534
2535	mutex_lock(&gpd_list_lock);
2536
2537	parent = genpd_get_from_provider(parent_spec);
2538	if (IS_ERR(parent)) {
2539		ret = PTR_ERR(parent);
2540		goto out;
2541	}
2542
2543	subdomain = genpd_get_from_provider(subdomain_spec);
2544	if (IS_ERR(subdomain)) {
2545		ret = PTR_ERR(subdomain);
2546		goto out;
2547	}
2548
2549	ret = genpd_add_subdomain(parent, subdomain);
2550
2551out:
2552	mutex_unlock(&gpd_list_lock);
2553
2554	return ret == -ENOENT ? -EPROBE_DEFER : ret;
2555}
2556EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
2557
2558/**
2559 * of_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
2560 * @parent_spec: OF phandle args to use for parent PM domain look-up
2561 * @subdomain_spec: OF phandle args to use for subdomain look-up
2562 *
2563 * Looks-up a parent PM domain and subdomain based upon phandle args
2564 * provided and removes the subdomain from the parent PM domain. Returns a
2565 * negative error code on failure.
2566 */
2567int of_genpd_remove_subdomain(struct of_phandle_args *parent_spec,
2568			      struct of_phandle_args *subdomain_spec)
2569{
2570	struct generic_pm_domain *parent, *subdomain;
2571	int ret;
2572
2573	mutex_lock(&gpd_list_lock);
2574
2575	parent = genpd_get_from_provider(parent_spec);
2576	if (IS_ERR(parent)) {
2577		ret = PTR_ERR(parent);
2578		goto out;
2579	}
2580
2581	subdomain = genpd_get_from_provider(subdomain_spec);
2582	if (IS_ERR(subdomain)) {
2583		ret = PTR_ERR(subdomain);
2584		goto out;
2585	}
2586
2587	ret = pm_genpd_remove_subdomain(parent, subdomain);
2588
2589out:
2590	mutex_unlock(&gpd_list_lock);
2591
2592	return ret;
2593}
2594EXPORT_SYMBOL_GPL(of_genpd_remove_subdomain);
2595
2596/**
2597 * of_genpd_remove_last - Remove the last PM domain registered for a provider
2598 * @np: Pointer to device node associated with provider
2599 *
2600 * Find the last PM domain that was added by a particular provider and
2601 * remove this PM domain from the list of PM domains. The provider is
2602 * identified by the 'provider' device structure that is passed. The PM
2603 * domain will only be removed, if the provider associated with domain
2604 * has been removed.
2605 *
2606 * Returns a valid pointer to struct generic_pm_domain on success or
2607 * ERR_PTR() on failure.
2608 */
2609struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
2610{
2611	struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT);
2612	int ret;
2613
2614	if (IS_ERR_OR_NULL(np))
2615		return ERR_PTR(-EINVAL);
2616
2617	mutex_lock(&gpd_list_lock);
2618	list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) {
2619		if (gpd->provider == &np->fwnode) {
2620			ret = genpd_remove(gpd);
2621			genpd = ret ? ERR_PTR(ret) : gpd;
2622			break;
2623		}
2624	}
2625	mutex_unlock(&gpd_list_lock);
2626
2627	return genpd;
2628}
2629EXPORT_SYMBOL_GPL(of_genpd_remove_last);
2630
2631static void genpd_release_dev(struct device *dev)
2632{
2633	of_node_put(dev->of_node);
2634	kfree(dev);
2635}
2636
2637static struct bus_type genpd_bus_type = {
2638	.name		= "genpd",
2639};
2640
2641/**
2642 * genpd_dev_pm_detach - Detach a device from its PM domain.
2643 * @dev: Device to detach.
2644 * @power_off: Currently not used
2645 *
2646 * Try to locate a corresponding generic PM domain, which the device was
2647 * attached to previously. If such is found, the device is detached from it.
2648 */
2649static void genpd_dev_pm_detach(struct device *dev, bool power_off)
2650{
2651	struct generic_pm_domain *pd;
2652	unsigned int i;
2653	int ret = 0;
2654
2655	pd = dev_to_genpd(dev);
2656	if (IS_ERR(pd))
2657		return;
2658
2659	dev_dbg(dev, "removing from PM domain %s\n", pd->name);
2660
2661	/* Drop the default performance state */
2662	if (dev_gpd_data(dev)->default_pstate) {
2663		dev_pm_genpd_set_performance_state(dev, 0);
2664		dev_gpd_data(dev)->default_pstate = 0;
2665	}
2666
2667	for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
2668		ret = genpd_remove_device(pd, dev);
2669		if (ret != -EAGAIN)
2670			break;
2671
2672		mdelay(i);
2673		cond_resched();
2674	}
2675
2676	if (ret < 0) {
2677		dev_err(dev, "failed to remove from PM domain %s: %d",
2678			pd->name, ret);
2679		return;
2680	}
2681
2682	/* Check if PM domain can be powered off after removing this device. */
2683	genpd_queue_power_off_work(pd);
2684
2685	/* Unregister the device if it was created by genpd. */
2686	if (dev->bus == &genpd_bus_type)
2687		device_unregister(dev);
2688}
2689
2690static void genpd_dev_pm_sync(struct device *dev)
2691{
2692	struct generic_pm_domain *pd;
2693
2694	pd = dev_to_genpd(dev);
2695	if (IS_ERR(pd))
2696		return;
2697
2698	genpd_queue_power_off_work(pd);
2699}
2700
2701static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
2702				 unsigned int index, bool power_on)
2703{
2704	struct of_phandle_args pd_args;
2705	struct generic_pm_domain *pd;
2706	int pstate;
2707	int ret;
2708
2709	ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
2710				"#power-domain-cells", index, &pd_args);
2711	if (ret < 0)
2712		return ret;
2713
2714	mutex_lock(&gpd_list_lock);
2715	pd = genpd_get_from_provider(&pd_args);
2716	of_node_put(pd_args.np);
2717	if (IS_ERR(pd)) {
2718		mutex_unlock(&gpd_list_lock);
2719		dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
2720			__func__, PTR_ERR(pd));
2721		return driver_deferred_probe_check_state(base_dev);
2722	}
2723
2724	dev_dbg(dev, "adding to PM domain %s\n", pd->name);
2725
2726	ret = genpd_add_device(pd, dev, base_dev);
2727	mutex_unlock(&gpd_list_lock);
2728
2729	if (ret < 0)
2730		return dev_err_probe(dev, ret, "failed to add to PM domain %s\n", pd->name);
2731
2732	dev->pm_domain->detach = genpd_dev_pm_detach;
2733	dev->pm_domain->sync = genpd_dev_pm_sync;
2734
2735	/* Set the default performance state */
2736	pstate = of_get_required_opp_performance_state(dev->of_node, index);
2737	if (pstate < 0 && pstate != -ENODEV && pstate != -EOPNOTSUPP) {
2738		ret = pstate;
2739		goto err;
2740	} else if (pstate > 0) {
2741		ret = dev_pm_genpd_set_performance_state(dev, pstate);
2742		if (ret)
2743			goto err;
2744		dev_gpd_data(dev)->default_pstate = pstate;
2745	}
2746
2747	if (power_on) {
2748		genpd_lock(pd);
2749		ret = genpd_power_on(pd, 0);
2750		genpd_unlock(pd);
2751	}
2752
2753	if (ret) {
2754		/* Drop the default performance state */
2755		if (dev_gpd_data(dev)->default_pstate) {
2756			dev_pm_genpd_set_performance_state(dev, 0);
2757			dev_gpd_data(dev)->default_pstate = 0;
2758		}
2759
2760		genpd_remove_device(pd, dev);
2761		return -EPROBE_DEFER;
2762	}
2763
2764	return 1;
2765
2766err:
2767	dev_err(dev, "failed to set required performance state for power-domain %s: %d\n",
2768		pd->name, ret);
2769	genpd_remove_device(pd, dev);
2770	return ret;
2771}
2772
2773/**
2774 * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
2775 * @dev: Device to attach.
2776 *
2777 * Parse device's OF node to find a PM domain specifier. If such is found,
2778 * attaches the device to retrieved pm_domain ops.
2779 *
2780 * Returns 1 on successfully attached PM domain, 0 when the device don't need a
2781 * PM domain or when multiple power-domains exists for it, else a negative error
2782 * code. Note that if a power-domain exists for the device, but it cannot be
2783 * found or turned on, then return -EPROBE_DEFER to ensure that the device is
2784 * not probed and to re-try again later.
 
 
2785 */
2786int genpd_dev_pm_attach(struct device *dev)
2787{
2788	if (!dev->of_node)
2789		return 0;
2790
2791	/*
2792	 * Devices with multiple PM domains must be attached separately, as we
2793	 * can only attach one PM domain per device.
2794	 */
2795	if (of_count_phandle_with_args(dev->of_node, "power-domains",
2796				       "#power-domain-cells") != 1)
2797		return 0;
2798
2799	return __genpd_dev_pm_attach(dev, dev, 0, true);
2800}
2801EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
2802
2803/**
2804 * genpd_dev_pm_attach_by_id - Associate a device with one of its PM domains.
2805 * @dev: The device used to lookup the PM domain.
2806 * @index: The index of the PM domain.
2807 *
2808 * Parse device's OF node to find a PM domain specifier at the provided @index.
2809 * If such is found, creates a virtual device and attaches it to the retrieved
2810 * pm_domain ops. To deal with detaching of the virtual device, the ->detach()
2811 * callback in the struct dev_pm_domain are assigned to genpd_dev_pm_detach().
2812 *
2813 * Returns the created virtual device if successfully attached PM domain, NULL
2814 * when the device don't need a PM domain, else an ERR_PTR() in case of
2815 * failures. If a power-domain exists for the device, but cannot be found or
2816 * turned on, then ERR_PTR(-EPROBE_DEFER) is returned to ensure that the device
2817 * is not probed and to re-try again later.
2818 */
2819struct device *genpd_dev_pm_attach_by_id(struct device *dev,
2820					 unsigned int index)
2821{
2822	struct device *virt_dev;
2823	int num_domains;
2824	int ret;
2825
2826	if (!dev->of_node)
2827		return NULL;
2828
2829	/* Verify that the index is within a valid range. */
2830	num_domains = of_count_phandle_with_args(dev->of_node, "power-domains",
2831						 "#power-domain-cells");
2832	if (index >= num_domains)
2833		return NULL;
2834
2835	/* Allocate and register device on the genpd bus. */
2836	virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL);
2837	if (!virt_dev)
2838		return ERR_PTR(-ENOMEM);
2839
2840	dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev));
2841	virt_dev->bus = &genpd_bus_type;
2842	virt_dev->release = genpd_release_dev;
2843	virt_dev->of_node = of_node_get(dev->of_node);
2844
2845	ret = device_register(virt_dev);
2846	if (ret) {
2847		put_device(virt_dev);
2848		return ERR_PTR(ret);
 
 
 
 
 
2849	}
2850
2851	/* Try to attach the device to the PM domain at the specified index. */
2852	ret = __genpd_dev_pm_attach(virt_dev, dev, index, false);
2853	if (ret < 1) {
2854		device_unregister(virt_dev);
2855		return ret ? ERR_PTR(ret) : NULL;
 
 
 
2856	}
2857
2858	pm_runtime_enable(virt_dev);
2859	genpd_queue_power_off_work(dev_to_genpd(virt_dev));
2860
2861	return virt_dev;
2862}
2863EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id);
 
2864
2865/**
2866 * genpd_dev_pm_attach_by_name - Associate a device with one of its PM domains.
2867 * @dev: The device used to lookup the PM domain.
2868 * @name: The name of the PM domain.
2869 *
2870 * Parse device's OF node to find a PM domain specifier using the
2871 * power-domain-names DT property. For further description see
2872 * genpd_dev_pm_attach_by_id().
2873 */
2874struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name)
2875{
2876	int index;
2877
2878	if (!dev->of_node)
2879		return NULL;
 
 
 
 
2880
2881	index = of_property_match_string(dev->of_node, "power-domain-names",
2882					 name);
2883	if (index < 0)
2884		return NULL;
2885
2886	return genpd_dev_pm_attach_by_id(dev, index);
 
 
 
 
2887}
 
2888
2889static const struct of_device_id idle_state_match[] = {
2890	{ .compatible = "domain-idle-state", },
2891	{ }
2892};
2893
2894static int genpd_parse_state(struct genpd_power_state *genpd_state,
2895				    struct device_node *state_node)
2896{
2897	int err;
2898	u32 residency;
2899	u32 entry_latency, exit_latency;
 
 
 
 
 
2900
2901	err = of_property_read_u32(state_node, "entry-latency-us",
2902						&entry_latency);
2903	if (err) {
2904		pr_debug(" * %pOF missing entry-latency-us property\n",
2905			 state_node);
2906		return -EINVAL;
2907	}
2908
2909	err = of_property_read_u32(state_node, "exit-latency-us",
2910						&exit_latency);
2911	if (err) {
2912		pr_debug(" * %pOF missing exit-latency-us property\n",
2913			 state_node);
2914		return -EINVAL;
2915	}
2916
2917	err = of_property_read_u32(state_node, "min-residency-us", &residency);
2918	if (!err)
2919		genpd_state->residency_ns = 1000 * residency;
2920
2921	genpd_state->power_on_latency_ns = 1000 * exit_latency;
2922	genpd_state->power_off_latency_ns = 1000 * entry_latency;
2923	genpd_state->fwnode = &state_node->fwnode;
2924
2925	return 0;
2926}
2927
2928static int genpd_iterate_idle_states(struct device_node *dn,
2929				     struct genpd_power_state *states)
2930{
2931	int ret;
2932	struct of_phandle_iterator it;
2933	struct device_node *np;
2934	int i = 0;
2935
2936	ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
2937	if (ret <= 0)
2938		return ret == -ENOENT ? 0 : ret;
2939
2940	/* Loop over the phandles until all the requested entry is found */
2941	of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) {
2942		np = it.node;
2943		if (!of_match_node(idle_state_match, np))
2944			continue;
2945
2946		if (!of_device_is_available(np))
2947			continue;
2948
2949		if (states) {
2950			ret = genpd_parse_state(&states[i], np);
2951			if (ret) {
2952				pr_err("Parsing idle state node %pOF failed with err %d\n",
2953				       np, ret);
2954				of_node_put(np);
2955				return ret;
2956			}
2957		}
2958		i++;
2959	}
2960
2961	return i;
2962}
2963
2964/**
2965 * of_genpd_parse_idle_states: Return array of idle states for the genpd.
2966 *
2967 * @dn: The genpd device node
2968 * @states: The pointer to which the state array will be saved.
2969 * @n: The count of elements in the array returned from this function.
2970 *
2971 * Returns the device states parsed from the OF node. The memory for the states
2972 * is allocated by this function and is the responsibility of the caller to
2973 * free the memory after use. If any or zero compatible domain idle states is
2974 * found it returns 0 and in case of errors, a negative error code is returned.
2975 */
2976int of_genpd_parse_idle_states(struct device_node *dn,
2977			struct genpd_power_state **states, int *n)
2978{
2979	struct genpd_power_state *st;
2980	int ret;
2981
2982	ret = genpd_iterate_idle_states(dn, NULL);
2983	if (ret < 0)
2984		return ret;
2985
2986	if (!ret) {
2987		*states = NULL;
2988		*n = 0;
2989		return 0;
2990	}
2991
2992	st = kcalloc(ret, sizeof(*st), GFP_KERNEL);
2993	if (!st)
2994		return -ENOMEM;
2995
2996	ret = genpd_iterate_idle_states(dn, st);
2997	if (ret <= 0) {
2998		kfree(st);
2999		return ret < 0 ? ret : -EINVAL;
 
 
 
 
 
 
 
 
3000	}
3001
 
3002	*states = st;
3003	*n = ret;
3004
3005	return 0;
3006}
3007EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
3008
3009/**
3010 * pm_genpd_opp_to_performance_state - Gets performance state of the genpd from its OPP node.
3011 *
3012 * @genpd_dev: Genpd's device for which the performance-state needs to be found.
3013 * @opp: struct dev_pm_opp of the OPP for which we need to find performance
3014 *	state.
3015 *
3016 * Returns performance state encoded in the OPP of the genpd. This calls
3017 * platform specific genpd->opp_to_performance_state() callback to translate
3018 * power domain OPP to performance state.
3019 *
3020 * Returns performance state on success and 0 on failure.
3021 */
3022unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev,
3023					       struct dev_pm_opp *opp)
3024{
3025	struct generic_pm_domain *genpd = NULL;
3026	int state;
3027
3028	genpd = container_of(genpd_dev, struct generic_pm_domain, dev);
3029
3030	if (unlikely(!genpd->opp_to_performance_state))
3031		return 0;
3032
3033	genpd_lock(genpd);
3034	state = genpd->opp_to_performance_state(genpd, opp);
3035	genpd_unlock(genpd);
3036
3037	return state;
3038}
3039EXPORT_SYMBOL_GPL(pm_genpd_opp_to_performance_state);
3040
3041static int __init genpd_bus_init(void)
3042{
3043	return bus_register(&genpd_bus_type);
3044}
3045core_initcall(genpd_bus_init);
3046
3047#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
3048
3049
3050/***        debugfs support        ***/
3051
3052#ifdef CONFIG_DEBUG_FS
 
 
 
 
 
 
 
 
3053/*
3054 * TODO: This function is a slightly modified version of rtpm_status_show
3055 * from sysfs.c, so generalize it.
3056 */
3057static void rtpm_status_str(struct seq_file *s, struct device *dev)
3058{
3059	static const char * const status_lookup[] = {
3060		[RPM_ACTIVE] = "active",
3061		[RPM_RESUMING] = "resuming",
3062		[RPM_SUSPENDED] = "suspended",
3063		[RPM_SUSPENDING] = "suspending"
3064	};
3065	const char *p = "";
3066
3067	if (dev->power.runtime_error)
3068		p = "error";
3069	else if (dev->power.disable_depth)
3070		p = "unsupported";
3071	else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
3072		p = status_lookup[dev->power.runtime_status];
3073	else
3074		WARN_ON(1);
3075
3076	seq_printf(s, "%-25s  ", p);
3077}
3078
3079static void perf_status_str(struct seq_file *s, struct device *dev)
3080{
3081	struct generic_pm_domain_data *gpd_data;
3082
3083	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
3084	seq_put_decimal_ull(s, "", gpd_data->performance_state);
3085}
3086
3087static int genpd_summary_one(struct seq_file *s,
3088			struct generic_pm_domain *genpd)
3089{
3090	static const char * const status_lookup[] = {
3091		[GENPD_STATE_ON] = "on",
3092		[GENPD_STATE_OFF] = "off"
3093	};
3094	struct pm_domain_data *pm_data;
3095	const char *kobj_path;
3096	struct gpd_link *link;
3097	char state[16];
3098	int ret;
3099
3100	ret = genpd_lock_interruptible(genpd);
3101	if (ret)
3102		return -ERESTARTSYS;
3103
3104	if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
3105		goto exit;
3106	if (!genpd_status_on(genpd))
3107		snprintf(state, sizeof(state), "%s-%u",
3108			 status_lookup[genpd->status], genpd->state_idx);
3109	else
3110		snprintf(state, sizeof(state), "%s",
3111			 status_lookup[genpd->status]);
3112	seq_printf(s, "%-30s  %-50s %u", genpd->name, state, genpd->performance_state);
3113
3114	/*
3115	 * Modifications on the list require holding locks on both
3116	 * parent and child, so we are safe.
3117	 * Also genpd->name is immutable.
3118	 */
3119	list_for_each_entry(link, &genpd->parent_links, parent_node) {
3120		if (list_is_first(&link->parent_node, &genpd->parent_links))
3121			seq_printf(s, "\n%48s", " ");
3122		seq_printf(s, "%s", link->child->name);
3123		if (!list_is_last(&link->parent_node, &genpd->parent_links))
3124			seq_puts(s, ", ");
3125	}
3126
3127	list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
3128		kobj_path = kobject_get_path(&pm_data->dev->kobj,
3129				genpd_is_irq_safe(genpd) ?
3130				GFP_ATOMIC : GFP_KERNEL);
3131		if (kobj_path == NULL)
3132			continue;
3133
3134		seq_printf(s, "\n    %-50s  ", kobj_path);
3135		rtpm_status_str(s, pm_data->dev);
3136		perf_status_str(s, pm_data->dev);
3137		kfree(kobj_path);
3138	}
3139
3140	seq_puts(s, "\n");
3141exit:
3142	genpd_unlock(genpd);
3143
3144	return 0;
3145}
3146
3147static int summary_show(struct seq_file *s, void *data)
3148{
3149	struct generic_pm_domain *genpd;
3150	int ret = 0;
3151
3152	seq_puts(s, "domain                          status          children                           performance\n");
3153	seq_puts(s, "    /device                                             runtime status\n");
3154	seq_puts(s, "----------------------------------------------------------------------------------------------\n");
3155
3156	ret = mutex_lock_interruptible(&gpd_list_lock);
3157	if (ret)
3158		return -ERESTARTSYS;
3159
3160	list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
3161		ret = genpd_summary_one(s, genpd);
3162		if (ret)
3163			break;
3164	}
3165	mutex_unlock(&gpd_list_lock);
3166
3167	return ret;
3168}
3169
3170static int status_show(struct seq_file *s, void *data)
3171{
3172	static const char * const status_lookup[] = {
3173		[GENPD_STATE_ON] = "on",
3174		[GENPD_STATE_OFF] = "off"
3175	};
3176
3177	struct generic_pm_domain *genpd = s->private;
3178	int ret = 0;
3179
3180	ret = genpd_lock_interruptible(genpd);
3181	if (ret)
3182		return -ERESTARTSYS;
3183
3184	if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup)))
3185		goto exit;
3186
3187	if (genpd->status == GENPD_STATE_OFF)
3188		seq_printf(s, "%s-%u\n", status_lookup[genpd->status],
3189			genpd->state_idx);
3190	else
3191		seq_printf(s, "%s\n", status_lookup[genpd->status]);
3192exit:
3193	genpd_unlock(genpd);
3194	return ret;
3195}
3196
3197static int sub_domains_show(struct seq_file *s, void *data)
3198{
3199	struct generic_pm_domain *genpd = s->private;
3200	struct gpd_link *link;
3201	int ret = 0;
3202
3203	ret = genpd_lock_interruptible(genpd);
3204	if (ret)
3205		return -ERESTARTSYS;
3206
3207	list_for_each_entry(link, &genpd->parent_links, parent_node)
3208		seq_printf(s, "%s\n", link->child->name);
3209
3210	genpd_unlock(genpd);
3211	return ret;
3212}
3213
3214static int idle_states_show(struct seq_file *s, void *data)
3215{
3216	struct generic_pm_domain *genpd = s->private;
3217	u64 now, delta, idle_time = 0;
3218	unsigned int i;
3219	int ret = 0;
3220
3221	ret = genpd_lock_interruptible(genpd);
3222	if (ret)
3223		return -ERESTARTSYS;
3224
3225	seq_puts(s, "State          Time Spent(ms) Usage          Rejected\n");
3226
3227	for (i = 0; i < genpd->state_count; i++) {
3228		idle_time += genpd->states[i].idle_time;
3229
3230		if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) {
3231			now = ktime_get_mono_fast_ns();
3232			if (now > genpd->accounting_time) {
3233				delta = now - genpd->accounting_time;
3234				idle_time += delta;
3235			}
3236		}
3237
3238		do_div(idle_time, NSEC_PER_MSEC);
3239		seq_printf(s, "S%-13i %-14llu %-14llu %llu\n", i, idle_time,
3240			   genpd->states[i].usage, genpd->states[i].rejected);
3241	}
3242
3243	genpd_unlock(genpd);
3244	return ret;
3245}
3246
3247static int active_time_show(struct seq_file *s, void *data)
3248{
3249	struct generic_pm_domain *genpd = s->private;
3250	u64 now, on_time, delta = 0;
3251	int ret = 0;
3252
3253	ret = genpd_lock_interruptible(genpd);
3254	if (ret)
3255		return -ERESTARTSYS;
3256
3257	if (genpd->status == GENPD_STATE_ON) {
3258		now = ktime_get_mono_fast_ns();
3259		if (now > genpd->accounting_time)
3260			delta = now - genpd->accounting_time;
3261	}
3262
3263	on_time = genpd->on_time + delta;
3264	do_div(on_time, NSEC_PER_MSEC);
3265	seq_printf(s, "%llu ms\n", on_time);
3266
3267	genpd_unlock(genpd);
3268	return ret;
3269}
3270
3271static int total_idle_time_show(struct seq_file *s, void *data)
3272{
3273	struct generic_pm_domain *genpd = s->private;
3274	u64 now, delta, total = 0;
3275	unsigned int i;
3276	int ret = 0;
3277
3278	ret = genpd_lock_interruptible(genpd);
3279	if (ret)
3280		return -ERESTARTSYS;
3281
3282	for (i = 0; i < genpd->state_count; i++) {
3283		total += genpd->states[i].idle_time;
3284
3285		if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) {
3286			now = ktime_get_mono_fast_ns();
3287			if (now > genpd->accounting_time) {
3288				delta = now - genpd->accounting_time;
3289				total += delta;
3290			}
3291		}
3292	}
3293
3294	do_div(total, NSEC_PER_MSEC);
3295	seq_printf(s, "%llu ms\n", total);
3296
3297	genpd_unlock(genpd);
3298	return ret;
3299}
3300
3301
3302static int devices_show(struct seq_file *s, void *data)
3303{
3304	struct generic_pm_domain *genpd = s->private;
3305	struct pm_domain_data *pm_data;
3306	const char *kobj_path;
3307	int ret = 0;
3308
3309	ret = genpd_lock_interruptible(genpd);
3310	if (ret)
3311		return -ERESTARTSYS;
3312
3313	list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
3314		kobj_path = kobject_get_path(&pm_data->dev->kobj,
3315				genpd_is_irq_safe(genpd) ?
3316				GFP_ATOMIC : GFP_KERNEL);
3317		if (kobj_path == NULL)
3318			continue;
3319
3320		seq_printf(s, "%s\n", kobj_path);
3321		kfree(kobj_path);
3322	}
3323
3324	genpd_unlock(genpd);
3325	return ret;
3326}
3327
3328static int perf_state_show(struct seq_file *s, void *data)
3329{
3330	struct generic_pm_domain *genpd = s->private;
3331
3332	if (genpd_lock_interruptible(genpd))
3333		return -ERESTARTSYS;
3334
3335	seq_printf(s, "%u\n", genpd->performance_state);
3336
3337	genpd_unlock(genpd);
3338	return 0;
3339}
3340
3341DEFINE_SHOW_ATTRIBUTE(summary);
3342DEFINE_SHOW_ATTRIBUTE(status);
3343DEFINE_SHOW_ATTRIBUTE(sub_domains);
3344DEFINE_SHOW_ATTRIBUTE(idle_states);
3345DEFINE_SHOW_ATTRIBUTE(active_time);
3346DEFINE_SHOW_ATTRIBUTE(total_idle_time);
3347DEFINE_SHOW_ATTRIBUTE(devices);
3348DEFINE_SHOW_ATTRIBUTE(perf_state);
3349
3350static void genpd_debug_add(struct generic_pm_domain *genpd)
3351{
3352	struct dentry *d;
3353
3354	if (!genpd_debugfs_dir)
3355		return;
3356
3357	d = debugfs_create_dir(genpd->name, genpd_debugfs_dir);
3358
3359	debugfs_create_file("current_state", 0444,
3360			    d, genpd, &status_fops);
3361	debugfs_create_file("sub_domains", 0444,
3362			    d, genpd, &sub_domains_fops);
3363	debugfs_create_file("idle_states", 0444,
3364			    d, genpd, &idle_states_fops);
3365	debugfs_create_file("active_time", 0444,
3366			    d, genpd, &active_time_fops);
3367	debugfs_create_file("total_idle_time", 0444,
3368			    d, genpd, &total_idle_time_fops);
3369	debugfs_create_file("devices", 0444,
3370			    d, genpd, &devices_fops);
3371	if (genpd->set_performance_state)
3372		debugfs_create_file("perf_state", 0444,
3373				    d, genpd, &perf_state_fops);
3374}
3375
3376static int __init genpd_debug_init(void)
3377{
3378	struct generic_pm_domain *genpd;
3379
3380	genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
3381
3382	debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir,
3383			    NULL, &summary_fops);
3384
3385	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
3386		genpd_debug_add(genpd);
 
 
3387
3388	return 0;
3389}
3390late_initcall(genpd_debug_init);
3391
3392static void __exit genpd_debug_exit(void)
3393{
3394	debugfs_remove_recursive(genpd_debugfs_dir);
3395}
3396__exitcall(genpd_debug_exit);
3397#endif /* CONFIG_DEBUG_FS */
v4.10.11
 
   1/*
   2 * drivers/base/power/domain.c - Common code related to device power domains.
   3 *
   4 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
   5 *
   6 * This file is released under the GPLv2.
   7 */
 
   8
   9#include <linux/delay.h>
  10#include <linux/kernel.h>
  11#include <linux/io.h>
  12#include <linux/platform_device.h>
 
  13#include <linux/pm_runtime.h>
  14#include <linux/pm_domain.h>
  15#include <linux/pm_qos.h>
  16#include <linux/pm_clock.h>
  17#include <linux/slab.h>
  18#include <linux/err.h>
  19#include <linux/sched.h>
  20#include <linux/suspend.h>
  21#include <linux/export.h>
 
 
  22
  23#include "power.h"
  24
  25#define GENPD_RETRY_MAX_MS	250		/* Approximate */
  26
  27#define GENPD_DEV_CALLBACK(genpd, type, callback, dev)		\
  28({								\
  29	type (*__routine)(struct device *__d); 			\
  30	type __ret = (type)0;					\
  31								\
  32	__routine = genpd->dev_ops.callback; 			\
  33	if (__routine) {					\
  34		__ret = __routine(dev); 			\
  35	}							\
  36	__ret;							\
  37})
  38
  39static LIST_HEAD(gpd_list);
  40static DEFINE_MUTEX(gpd_list_lock);
  41
  42struct genpd_lock_ops {
  43	void (*lock)(struct generic_pm_domain *genpd);
  44	void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
  45	int (*lock_interruptible)(struct generic_pm_domain *genpd);
  46	void (*unlock)(struct generic_pm_domain *genpd);
  47};
  48
  49static void genpd_lock_mtx(struct generic_pm_domain *genpd)
  50{
  51	mutex_lock(&genpd->mlock);
  52}
  53
  54static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
  55					int depth)
  56{
  57	mutex_lock_nested(&genpd->mlock, depth);
  58}
  59
  60static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
  61{
  62	return mutex_lock_interruptible(&genpd->mlock);
  63}
  64
  65static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
  66{
  67	return mutex_unlock(&genpd->mlock);
  68}
  69
  70static const struct genpd_lock_ops genpd_mtx_ops = {
  71	.lock = genpd_lock_mtx,
  72	.lock_nested = genpd_lock_nested_mtx,
  73	.lock_interruptible = genpd_lock_interruptible_mtx,
  74	.unlock = genpd_unlock_mtx,
  75};
  76
  77static void genpd_lock_spin(struct generic_pm_domain *genpd)
  78	__acquires(&genpd->slock)
  79{
  80	unsigned long flags;
  81
  82	spin_lock_irqsave(&genpd->slock, flags);
  83	genpd->lock_flags = flags;
  84}
  85
  86static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
  87					int depth)
  88	__acquires(&genpd->slock)
  89{
  90	unsigned long flags;
  91
  92	spin_lock_irqsave_nested(&genpd->slock, flags, depth);
  93	genpd->lock_flags = flags;
  94}
  95
  96static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
  97	__acquires(&genpd->slock)
  98{
  99	unsigned long flags;
 100
 101	spin_lock_irqsave(&genpd->slock, flags);
 102	genpd->lock_flags = flags;
 103	return 0;
 104}
 105
 106static void genpd_unlock_spin(struct generic_pm_domain *genpd)
 107	__releases(&genpd->slock)
 108{
 109	spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
 110}
 111
 112static const struct genpd_lock_ops genpd_spin_ops = {
 113	.lock = genpd_lock_spin,
 114	.lock_nested = genpd_lock_nested_spin,
 115	.lock_interruptible = genpd_lock_interruptible_spin,
 116	.unlock = genpd_unlock_spin,
 117};
 118
 119#define genpd_lock(p)			p->lock_ops->lock(p)
 120#define genpd_lock_nested(p, d)		p->lock_ops->lock_nested(p, d)
 121#define genpd_lock_interruptible(p)	p->lock_ops->lock_interruptible(p)
 122#define genpd_unlock(p)			p->lock_ops->unlock(p)
 123
 
 124#define genpd_is_irq_safe(genpd)	(genpd->flags & GENPD_FLAG_IRQ_SAFE)
 
 
 
 
 125
 126static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
 127		struct generic_pm_domain *genpd)
 128{
 129	bool ret;
 130
 131	ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
 132
 133	/* Warn once for each IRQ safe dev in no sleep domain */
 
 
 
 
 
 
 
 134	if (ret)
 135		dev_warn_once(dev, "PM domain %s will not be powered off\n",
 136				genpd->name);
 137
 138	return ret;
 139}
 140
 
 
 141/*
 142 * Get the generic PM domain for a particular struct device.
 143 * This validates the struct device pointer, the PM domain pointer,
 144 * and checks that the PM domain pointer is a real generic PM domain.
 145 * Any failure results in NULL being returned.
 146 */
 147static struct generic_pm_domain *genpd_lookup_dev(struct device *dev)
 148{
 149	struct generic_pm_domain *genpd = NULL, *gpd;
 150
 151	if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
 152		return NULL;
 153
 154	mutex_lock(&gpd_list_lock);
 155	list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
 156		if (&gpd->domain == dev->pm_domain) {
 157			genpd = gpd;
 158			break;
 159		}
 160	}
 161	mutex_unlock(&gpd_list_lock);
 162
 163	return genpd;
 164}
 165
 166/*
 167 * This should only be used where we are certain that the pm_domain
 168 * attached to the device is a genpd domain.
 169 */
 170static struct generic_pm_domain *dev_to_genpd(struct device *dev)
 171{
 172	if (IS_ERR_OR_NULL(dev->pm_domain))
 173		return ERR_PTR(-EINVAL);
 174
 175	return pd_to_genpd(dev->pm_domain);
 176}
 177
 178static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
 
 179{
 180	return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
 181}
 182
 183static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
 
 184{
 185	return GENPD_DEV_CALLBACK(genpd, int, start, dev);
 186}
 187
 188static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
 189{
 190	bool ret = false;
 191
 192	if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
 193		ret = !!atomic_dec_and_test(&genpd->sd_count);
 194
 195	return ret;
 196}
 197
 198static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
 199{
 200	atomic_inc(&genpd->sd_count);
 201	smp_mb__after_atomic();
 202}
 203
 204static int genpd_power_on(struct generic_pm_domain *genpd, bool timed)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 205{
 206	unsigned int state_idx = genpd->state_idx;
 207	ktime_t time_start;
 208	s64 elapsed_ns;
 209	int ret;
 210
 
 
 
 
 
 
 
 
 211	if (!genpd->power_on)
 212		return 0;
 
 
 
 
 
 
 213
 214	if (!timed)
 215		return genpd->power_on(genpd);
 216
 217	time_start = ktime_get();
 218	ret = genpd->power_on(genpd);
 219	if (ret)
 220		return ret;
 221
 222	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
 223	if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
 224		return ret;
 225
 226	genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
 227	genpd->max_off_time_changed = true;
 228	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
 229		 genpd->name, "on", elapsed_ns);
 230
 
 
 
 
 
 
 231	return ret;
 232}
 233
 234static int genpd_power_off(struct generic_pm_domain *genpd, bool timed)
 235{
 236	unsigned int state_idx = genpd->state_idx;
 237	ktime_t time_start;
 238	s64 elapsed_ns;
 239	int ret;
 240
 
 
 
 
 
 
 
 
 241	if (!genpd->power_off)
 242		return 0;
 
 
 
 
 
 
 243
 244	if (!timed)
 245		return genpd->power_off(genpd);
 246
 247	time_start = ktime_get();
 248	ret = genpd->power_off(genpd);
 249	if (ret == -EBUSY)
 250		return ret;
 251
 252	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
 253	if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
 254		return ret;
 255
 256	genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
 257	genpd->max_off_time_changed = true;
 258	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
 259		 genpd->name, "off", elapsed_ns);
 260
 
 
 
 
 
 
 261	return ret;
 262}
 263
 264/**
 265 * genpd_queue_power_off_work - Queue up the execution of genpd_poweroff().
 266 * @genpd: PM domain to power off.
 267 *
 268 * Queue up the execution of genpd_poweroff() unless it's already been done
 269 * before.
 270 */
 271static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
 272{
 273	queue_work(pm_wq, &genpd->power_off_work);
 274}
 275
 276/**
 277 * genpd_poweron - Restore power to a given PM domain and its masters.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 278 * @genpd: PM domain to power up.
 279 * @depth: nesting count for lockdep.
 280 *
 281 * Restore power to @genpd and all of its masters so that it is possible to
 282 * resume a device belonging to it.
 283 */
 284static int genpd_poweron(struct generic_pm_domain *genpd, unsigned int depth)
 285{
 286	struct gpd_link *link;
 287	int ret = 0;
 288
 289	if (genpd->status == GPD_STATE_ACTIVE)
 290		return 0;
 291
 292	/*
 293	 * The list is guaranteed not to change while the loop below is being
 294	 * executed, unless one of the masters' .power_on() callbacks fiddles
 295	 * with it.
 296	 */
 297	list_for_each_entry(link, &genpd->slave_links, slave_node) {
 298		struct generic_pm_domain *master = link->master;
 299
 300		genpd_sd_counter_inc(master);
 301
 302		genpd_lock_nested(master, depth + 1);
 303		ret = genpd_poweron(master, depth + 1);
 304		genpd_unlock(master);
 305
 306		if (ret) {
 307			genpd_sd_counter_dec(master);
 308			goto err;
 309		}
 310	}
 311
 312	ret = genpd_power_on(genpd, true);
 313	if (ret)
 314		goto err;
 315
 316	genpd->status = GPD_STATE_ACTIVE;
 
 
 317	return 0;
 318
 319 err:
 320	list_for_each_entry_continue_reverse(link,
 321					&genpd->slave_links,
 322					slave_node) {
 323		genpd_sd_counter_dec(link->master);
 324		genpd_queue_power_off_work(link->master);
 
 
 325	}
 326
 327	return ret;
 328}
 329
 
 
 
 
 
 
 
 330static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
 331				     unsigned long val, void *ptr)
 332{
 333	struct generic_pm_domain_data *gpd_data;
 334	struct device *dev;
 335
 336	gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
 337	dev = gpd_data->base.dev;
 338
 339	for (;;) {
 340		struct generic_pm_domain *genpd;
 341		struct pm_domain_data *pdd;
 
 342
 343		spin_lock_irq(&dev->power.lock);
 344
 345		pdd = dev->power.subsys_data ?
 346				dev->power.subsys_data->domain_data : NULL;
 347		if (pdd && pdd->dev) {
 348			to_gpd_data(pdd)->td.constraint_changed = true;
 349			genpd = dev_to_genpd(dev);
 350		} else {
 351			genpd = ERR_PTR(-ENODATA);
 
 352		}
 353
 354		spin_unlock_irq(&dev->power.lock);
 355
 356		if (!IS_ERR(genpd)) {
 357			genpd_lock(genpd);
 358			genpd->max_off_time_changed = true;
 359			genpd_unlock(genpd);
 360		}
 361
 362		dev = dev->parent;
 363		if (!dev || dev->power.ignore_children)
 364			break;
 365	}
 366
 367	return NOTIFY_DONE;
 368}
 369
 370/**
 371 * genpd_poweroff - Remove power from a given PM domain.
 372 * @genpd: PM domain to power down.
 373 * @is_async: PM domain is powered down from a scheduled work
 374 *
 375 * If all of the @genpd's devices have been suspended and all of its subdomains
 376 * have been powered down, remove power from @genpd.
 377 */
 378static int genpd_poweroff(struct generic_pm_domain *genpd, bool is_async)
 379{
 380	struct pm_domain_data *pdd;
 381	struct gpd_link *link;
 382	unsigned int not_suspended = 0;
 383
 384	/*
 385	 * Do not try to power off the domain in the following situations:
 386	 * (1) The domain is already in the "power off" state.
 387	 * (2) System suspend is in progress.
 388	 */
 389	if (genpd->status == GPD_STATE_POWER_OFF
 390	    || genpd->prepared_count > 0)
 391		return 0;
 392
 393	if (atomic_read(&genpd->sd_count) > 0)
 394		return -EBUSY;
 395
 396	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
 397		enum pm_qos_flags_status stat;
 398
 399		stat = dev_pm_qos_flags(pdd->dev,
 400					PM_QOS_FLAG_NO_POWER_OFF
 401						| PM_QOS_FLAG_REMOTE_WAKEUP);
 402		if (stat > PM_QOS_FLAGS_NONE)
 403			return -EBUSY;
 404
 405		/*
 406		 * Do not allow PM domain to be powered off, when an IRQ safe
 407		 * device is part of a non-IRQ safe domain.
 408		 */
 409		if (!pm_runtime_suspended(pdd->dev) ||
 410			irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd))
 411			not_suspended++;
 412	}
 413
 414	if (not_suspended > 1 || (not_suspended == 1 && is_async))
 415		return -EBUSY;
 416
 417	if (genpd->gov && genpd->gov->power_down_ok) {
 418		if (!genpd->gov->power_down_ok(&genpd->domain))
 419			return -EAGAIN;
 420	}
 421
 422	if (genpd->power_off) {
 423		int ret;
 424
 425		if (atomic_read(&genpd->sd_count) > 0)
 426			return -EBUSY;
 427
 428		/*
 429		 * If sd_count > 0 at this point, one of the subdomains hasn't
 430		 * managed to call genpd_poweron() for the master yet after
 431		 * incrementing it.  In that case genpd_poweron() will wait
 432		 * for us to drop the lock, so we can call .power_off() and let
 433		 * the genpd_poweron() restore power for us (this shouldn't
 434		 * happen very often).
 435		 */
 436		ret = genpd_power_off(genpd, true);
 437		if (ret)
 438			return ret;
 439	}
 440
 441	genpd->status = GPD_STATE_POWER_OFF;
 442
 443	list_for_each_entry(link, &genpd->slave_links, slave_node) {
 444		genpd_sd_counter_dec(link->master);
 445		genpd_queue_power_off_work(link->master);
 446	}
 447
 448	return 0;
 449}
 450
 451/**
 452 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
 453 * @work: Work structure used for scheduling the execution of this function.
 454 */
 455static void genpd_power_off_work_fn(struct work_struct *work)
 456{
 457	struct generic_pm_domain *genpd;
 458
 459	genpd = container_of(work, struct generic_pm_domain, power_off_work);
 460
 461	genpd_lock(genpd);
 462	genpd_poweroff(genpd, true);
 463	genpd_unlock(genpd);
 464}
 465
 466/**
 467 * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks
 468 * @dev: Device to handle.
 469 */
 470static int __genpd_runtime_suspend(struct device *dev)
 471{
 472	int (*cb)(struct device *__dev);
 473
 474	if (dev->type && dev->type->pm)
 475		cb = dev->type->pm->runtime_suspend;
 476	else if (dev->class && dev->class->pm)
 477		cb = dev->class->pm->runtime_suspend;
 478	else if (dev->bus && dev->bus->pm)
 479		cb = dev->bus->pm->runtime_suspend;
 480	else
 481		cb = NULL;
 482
 483	if (!cb && dev->driver && dev->driver->pm)
 484		cb = dev->driver->pm->runtime_suspend;
 485
 486	return cb ? cb(dev) : 0;
 487}
 488
 489/**
 490 * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks
 491 * @dev: Device to handle.
 492 */
 493static int __genpd_runtime_resume(struct device *dev)
 494{
 495	int (*cb)(struct device *__dev);
 496
 497	if (dev->type && dev->type->pm)
 498		cb = dev->type->pm->runtime_resume;
 499	else if (dev->class && dev->class->pm)
 500		cb = dev->class->pm->runtime_resume;
 501	else if (dev->bus && dev->bus->pm)
 502		cb = dev->bus->pm->runtime_resume;
 503	else
 504		cb = NULL;
 505
 506	if (!cb && dev->driver && dev->driver->pm)
 507		cb = dev->driver->pm->runtime_resume;
 508
 509	return cb ? cb(dev) : 0;
 510}
 511
 512/**
 513 * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
 514 * @dev: Device to suspend.
 515 *
 516 * Carry out a runtime suspend of a device under the assumption that its
 517 * pm_domain field points to the domain member of an object of type
 518 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
 519 */
 520static int genpd_runtime_suspend(struct device *dev)
 521{
 522	struct generic_pm_domain *genpd;
 523	bool (*suspend_ok)(struct device *__dev);
 524	struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
 
 525	bool runtime_pm = pm_runtime_enabled(dev);
 526	ktime_t time_start;
 527	s64 elapsed_ns;
 528	int ret;
 529
 530	dev_dbg(dev, "%s()\n", __func__);
 531
 532	genpd = dev_to_genpd(dev);
 533	if (IS_ERR(genpd))
 534		return -EINVAL;
 535
 536	/*
 537	 * A runtime PM centric subsystem/driver may re-use the runtime PM
 538	 * callbacks for other purposes than runtime PM. In those scenarios
 539	 * runtime PM is disabled. Under these circumstances, we shall skip
 540	 * validating/measuring the PM QoS latency.
 541	 */
 542	suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
 543	if (runtime_pm && suspend_ok && !suspend_ok(dev))
 544		return -EBUSY;
 545
 546	/* Measure suspend latency. */
 547	time_start = 0;
 548	if (runtime_pm)
 549		time_start = ktime_get();
 550
 551	ret = __genpd_runtime_suspend(dev);
 552	if (ret)
 553		return ret;
 554
 555	ret = genpd_stop_dev(genpd, dev);
 556	if (ret) {
 557		__genpd_runtime_resume(dev);
 558		return ret;
 559	}
 560
 561	/* Update suspend latency value if the measured time exceeds it. */
 562	if (runtime_pm) {
 563		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
 564		if (elapsed_ns > td->suspend_latency_ns) {
 565			td->suspend_latency_ns = elapsed_ns;
 566			dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
 567				elapsed_ns);
 568			genpd->max_off_time_changed = true;
 569			td->constraint_changed = true;
 570		}
 571	}
 572
 573	/*
 574	 * If power.irq_safe is set, this routine may be run with
 575	 * IRQs disabled, so suspend only if the PM domain also is irq_safe.
 576	 */
 577	if (irq_safe_dev_in_no_sleep_domain(dev, genpd))
 578		return 0;
 579
 580	genpd_lock(genpd);
 581	genpd_poweroff(genpd, false);
 
 582	genpd_unlock(genpd);
 583
 584	return 0;
 585}
 586
 587/**
 588 * genpd_runtime_resume - Resume a device belonging to I/O PM domain.
 589 * @dev: Device to resume.
 590 *
 591 * Carry out a runtime resume of a device under the assumption that its
 592 * pm_domain field points to the domain member of an object of type
 593 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
 594 */
 595static int genpd_runtime_resume(struct device *dev)
 596{
 597	struct generic_pm_domain *genpd;
 598	struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
 599	bool runtime_pm = pm_runtime_enabled(dev);
 600	ktime_t time_start;
 
 601	s64 elapsed_ns;
 602	int ret;
 603	bool timed = true;
 604
 605	dev_dbg(dev, "%s()\n", __func__);
 606
 607	genpd = dev_to_genpd(dev);
 608	if (IS_ERR(genpd))
 609		return -EINVAL;
 610
 611	/*
 612	 * As we don't power off a non IRQ safe domain, which holds
 613	 * an IRQ safe device, we don't need to restore power to it.
 614	 */
 615	if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) {
 616		timed = false;
 617		goto out;
 618	}
 619
 620	genpd_lock(genpd);
 621	ret = genpd_poweron(genpd, 0);
 
 622	genpd_unlock(genpd);
 623
 624	if (ret)
 625		return ret;
 626
 627 out:
 628	/* Measure resume latency. */
 629	time_start = 0;
 630	if (timed && runtime_pm)
 631		time_start = ktime_get();
 632
 633	ret = genpd_start_dev(genpd, dev);
 634	if (ret)
 635		goto err_poweroff;
 636
 637	ret = __genpd_runtime_resume(dev);
 638	if (ret)
 639		goto err_stop;
 640
 641	/* Update resume latency value if the measured time exceeds it. */
 642	if (timed && runtime_pm) {
 643		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
 644		if (elapsed_ns > td->resume_latency_ns) {
 645			td->resume_latency_ns = elapsed_ns;
 646			dev_dbg(dev, "resume latency exceeded, %lld ns\n",
 647				elapsed_ns);
 648			genpd->max_off_time_changed = true;
 649			td->constraint_changed = true;
 650		}
 651	}
 652
 653	return 0;
 654
 655err_stop:
 656	genpd_stop_dev(genpd, dev);
 657err_poweroff:
 658	if (!pm_runtime_is_irq_safe(dev) ||
 659		(pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) {
 660		genpd_lock(genpd);
 661		genpd_poweroff(genpd, 0);
 
 662		genpd_unlock(genpd);
 663	}
 664
 665	return ret;
 666}
 667
 668static bool pd_ignore_unused;
 669static int __init pd_ignore_unused_setup(char *__unused)
 670{
 671	pd_ignore_unused = true;
 672	return 1;
 673}
 674__setup("pd_ignore_unused", pd_ignore_unused_setup);
 675
 676/**
 677 * genpd_poweroff_unused - Power off all PM domains with no devices in use.
 678 */
 679static int __init genpd_poweroff_unused(void)
 680{
 681	struct generic_pm_domain *genpd;
 682
 683	if (pd_ignore_unused) {
 684		pr_warn("genpd: Not disabling unused power domains\n");
 685		return 0;
 686	}
 687
 688	mutex_lock(&gpd_list_lock);
 689
 690	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
 691		genpd_queue_power_off_work(genpd);
 692
 693	mutex_unlock(&gpd_list_lock);
 694
 695	return 0;
 696}
 697late_initcall(genpd_poweroff_unused);
 698
 699#if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_GENERIC_DOMAINS_OF)
 700
 701/**
 702 * pm_genpd_present - Check if the given PM domain has been initialized.
 703 * @genpd: PM domain to check.
 704 */
 705static bool pm_genpd_present(const struct generic_pm_domain *genpd)
 706{
 707	const struct generic_pm_domain *gpd;
 708
 709	if (IS_ERR_OR_NULL(genpd))
 710		return false;
 711
 712	list_for_each_entry(gpd, &gpd_list, gpd_list_node)
 713		if (gpd == genpd)
 714			return true;
 715
 716	return false;
 717}
 718
 719#endif
 720
 721#ifdef CONFIG_PM_SLEEP
 722
 723static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
 724				    struct device *dev)
 725{
 726	return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
 727}
 728
 729/**
 730 * genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
 731 * @genpd: PM domain to power off, if possible.
 
 
 732 *
 733 * Check if the given PM domain can be powered off (during system suspend or
 734 * hibernation) and do that if so.  Also, in that case propagate to its masters.
 735 *
 736 * This function is only called in "noirq" and "syscore" stages of system power
 737 * transitions, so it need not acquire locks (all of the "noirq" callbacks are
 738 * executed sequentially, so it is guaranteed that it will never run twice in
 739 * parallel).
 740 */
 741static void genpd_sync_poweroff(struct generic_pm_domain *genpd)
 
 742{
 743	struct gpd_link *link;
 744
 745	if (genpd->status == GPD_STATE_POWER_OFF)
 746		return;
 747
 748	if (genpd->suspended_count != genpd->device_count
 749	    || atomic_read(&genpd->sd_count) > 0)
 750		return;
 751
 
 
 
 
 
 
 
 752	/* Choose the deepest state when suspending */
 753	genpd->state_idx = genpd->state_count - 1;
 754	genpd_power_off(genpd, false);
 
 
 
 
 
 
 
 
 
 755
 756	genpd->status = GPD_STATE_POWER_OFF;
 757
 758	list_for_each_entry(link, &genpd->slave_links, slave_node) {
 759		genpd_sd_counter_dec(link->master);
 760		genpd_sync_poweroff(link->master);
 761	}
 762}
 763
 764/**
 765 * genpd_sync_poweron - Synchronously power on a PM domain and its masters.
 766 * @genpd: PM domain to power on.
 
 
 767 *
 768 * This function is only called in "noirq" and "syscore" stages of system power
 769 * transitions, so it need not acquire locks (all of the "noirq" callbacks are
 770 * executed sequentially, so it is guaranteed that it will never run twice in
 771 * parallel).
 772 */
 773static void genpd_sync_poweron(struct generic_pm_domain *genpd)
 
 774{
 775	struct gpd_link *link;
 776
 777	if (genpd->status == GPD_STATE_ACTIVE)
 778		return;
 779
 780	list_for_each_entry(link, &genpd->slave_links, slave_node) {
 781		genpd_sync_poweron(link->master);
 782		genpd_sd_counter_inc(link->master);
 783	}
 784
 785	genpd_power_on(genpd, false);
 
 786
 787	genpd->status = GPD_STATE_ACTIVE;
 788}
 789
 790/**
 791 * resume_needed - Check whether to resume a device before system suspend.
 792 * @dev: Device to check.
 793 * @genpd: PM domain the device belongs to.
 794 *
 795 * There are two cases in which a device that can wake up the system from sleep
 796 * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
 797 * to wake up the system and it has to remain active for this purpose while the
 798 * system is in the sleep state and (2) if the device is not enabled to wake up
 799 * the system from sleep states and it generally doesn't generate wakeup signals
 800 * by itself (those signals are generated on its behalf by other parts of the
 801 * system).  In the latter case it may be necessary to reconfigure the device's
 802 * wakeup settings during system suspend, because it may have been set up to
 803 * signal remote wakeup from the system's working state as needed by runtime PM.
 804 * Return 'true' in either of the above cases.
 805 */
 806static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
 807{
 808	bool active_wakeup;
 809
 810	if (!device_can_wakeup(dev))
 811		return false;
 812
 813	active_wakeup = genpd_dev_active_wakeup(genpd, dev);
 814	return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
 815}
 816
 817/**
 818 * pm_genpd_prepare - Start power transition of a device in a PM domain.
 819 * @dev: Device to start the transition of.
 820 *
 821 * Start a power transition of a device (during a system-wide power transition)
 822 * under the assumption that its pm_domain field points to the domain member of
 823 * an object of type struct generic_pm_domain representing a PM domain
 824 * consisting of I/O devices.
 825 */
 826static int pm_genpd_prepare(struct device *dev)
 827{
 828	struct generic_pm_domain *genpd;
 829	int ret;
 830
 831	dev_dbg(dev, "%s()\n", __func__);
 832
 833	genpd = dev_to_genpd(dev);
 834	if (IS_ERR(genpd))
 835		return -EINVAL;
 836
 837	/*
 838	 * If a wakeup request is pending for the device, it should be woken up
 839	 * at this point and a system wakeup event should be reported if it's
 840	 * set up to wake up the system from sleep states.
 841	 */
 842	if (resume_needed(dev, genpd))
 843		pm_runtime_resume(dev);
 844
 845	genpd_lock(genpd);
 846
 847	if (genpd->prepared_count++ == 0)
 848		genpd->suspended_count = 0;
 849
 850	genpd_unlock(genpd);
 851
 852	ret = pm_generic_prepare(dev);
 853	if (ret) {
 854		genpd_lock(genpd);
 855
 856		genpd->prepared_count--;
 857
 858		genpd_unlock(genpd);
 859	}
 860
 861	return ret;
 
 862}
 863
 864/**
 865 * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
 
 866 * @dev: Device to suspend.
 
 
 867 *
 868 * Stop the device and remove power from the domain if all devices in it have
 869 * been stopped.
 870 */
 871static int pm_genpd_suspend_noirq(struct device *dev)
 
 
 872{
 873	struct generic_pm_domain *genpd;
 874	int ret;
 875
 876	dev_dbg(dev, "%s()\n", __func__);
 877
 878	genpd = dev_to_genpd(dev);
 879	if (IS_ERR(genpd))
 880		return -EINVAL;
 881
 882	if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
 
 
 
 
 883		return 0;
 884
 885	if (genpd->dev_ops.stop && genpd->dev_ops.start) {
 886		ret = pm_runtime_force_suspend(dev);
 887		if (ret)
 
 
 888			return ret;
 
 889	}
 890
 891	/*
 892	 * Since all of the "noirq" callbacks are executed sequentially, it is
 893	 * guaranteed that this function will never run twice in parallel for
 894	 * the same PM domain, so it is not necessary to use locking here.
 895	 */
 896	genpd->suspended_count++;
 897	genpd_sync_poweroff(genpd);
 
 898
 899	return 0;
 900}
 901
 902/**
 903 * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 904 * @dev: Device to resume.
 
 905 *
 906 * Restore power to the device's PM domain, if necessary, and start the device.
 907 */
 908static int pm_genpd_resume_noirq(struct device *dev)
 
 909{
 910	struct generic_pm_domain *genpd;
 911	int ret = 0;
 912
 913	dev_dbg(dev, "%s()\n", __func__);
 914
 915	genpd = dev_to_genpd(dev);
 916	if (IS_ERR(genpd))
 917		return -EINVAL;
 918
 919	if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
 920		return 0;
 921
 922	/*
 923	 * Since all of the "noirq" callbacks are executed sequentially, it is
 924	 * guaranteed that this function will never run twice in parallel for
 925	 * the same PM domain, so it is not necessary to use locking here.
 926	 */
 927	genpd_sync_poweron(genpd);
 928	genpd->suspended_count--;
 
 
 
 
 
 
 
 
 929
 930	if (genpd->dev_ops.stop && genpd->dev_ops.start)
 931		ret = pm_runtime_force_resume(dev);
 932
 933	return ret;
 
 
 
 
 
 
 
 
 
 
 934}
 935
 936/**
 937 * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
 938 * @dev: Device to freeze.
 939 *
 940 * Carry out a late freeze of a device under the assumption that its
 941 * pm_domain field points to the domain member of an object of type
 942 * struct generic_pm_domain representing a power domain consisting of I/O
 943 * devices.
 944 */
 945static int pm_genpd_freeze_noirq(struct device *dev)
 946{
 947	struct generic_pm_domain *genpd;
 948	int ret = 0;
 949
 950	dev_dbg(dev, "%s()\n", __func__);
 951
 952	genpd = dev_to_genpd(dev);
 953	if (IS_ERR(genpd))
 954		return -EINVAL;
 955
 956	if (genpd->dev_ops.stop && genpd->dev_ops.start)
 957		ret = pm_runtime_force_suspend(dev);
 958
 959	return ret;
 960}
 961
 962/**
 963 * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
 964 * @dev: Device to thaw.
 965 *
 966 * Start the device, unless power has been removed from the domain already
 967 * before the system transition.
 968 */
 969static int pm_genpd_thaw_noirq(struct device *dev)
 970{
 971	struct generic_pm_domain *genpd;
 972	int ret = 0;
 973
 974	dev_dbg(dev, "%s()\n", __func__);
 975
 976	genpd = dev_to_genpd(dev);
 977	if (IS_ERR(genpd))
 978		return -EINVAL;
 979
 980	if (genpd->dev_ops.stop && genpd->dev_ops.start)
 981		ret = pm_runtime_force_resume(dev);
 
 
 
 
 
 
 
 
 
 982
 983	return ret;
 
 
 984}
 985
 986/**
 987 * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain.
 988 * @dev: Device to resume.
 989 *
 990 * Make sure the domain will be in the same power state as before the
 991 * hibernation the system is resuming from and start the device if necessary.
 992 */
 993static int pm_genpd_restore_noirq(struct device *dev)
 994{
 995	struct generic_pm_domain *genpd;
 996	int ret = 0;
 997
 998	dev_dbg(dev, "%s()\n", __func__);
 999
1000	genpd = dev_to_genpd(dev);
1001	if (IS_ERR(genpd))
1002		return -EINVAL;
1003
1004	/*
1005	 * Since all of the "noirq" callbacks are executed sequentially, it is
1006	 * guaranteed that this function will never run twice in parallel for
1007	 * the same PM domain, so it is not necessary to use locking here.
1008	 *
1009	 * At this point suspended_count == 0 means we are being run for the
1010	 * first time for the given domain in the present cycle.
1011	 */
1012	if (genpd->suspended_count++ == 0)
1013		/*
1014		 * The boot kernel might put the domain into arbitrary state,
1015		 * so make it appear as powered off to genpd_sync_poweron(),
1016		 * so that it tries to power it on in case it was really off.
1017		 */
1018		genpd->status = GPD_STATE_POWER_OFF;
1019
1020	genpd_sync_poweron(genpd);
1021
1022	if (genpd->dev_ops.stop && genpd->dev_ops.start)
1023		ret = pm_runtime_force_resume(dev);
1024
1025	return ret;
1026}
1027
1028/**
1029 * pm_genpd_complete - Complete power transition of a device in a power domain.
1030 * @dev: Device to complete the transition of.
1031 *
1032 * Complete a power transition of a device (during a system-wide power
1033 * transition) under the assumption that its pm_domain field points to the
1034 * domain member of an object of type struct generic_pm_domain representing
1035 * a power domain consisting of I/O devices.
1036 */
1037static void pm_genpd_complete(struct device *dev)
1038{
1039	struct generic_pm_domain *genpd;
1040
1041	dev_dbg(dev, "%s()\n", __func__);
1042
1043	genpd = dev_to_genpd(dev);
1044	if (IS_ERR(genpd))
1045		return;
1046
1047	pm_generic_complete(dev);
1048
1049	genpd_lock(genpd);
1050
1051	genpd->prepared_count--;
1052	if (!genpd->prepared_count)
1053		genpd_queue_power_off_work(genpd);
1054
1055	genpd_unlock(genpd);
1056}
1057
1058/**
1059 * genpd_syscore_switch - Switch power during system core suspend or resume.
1060 * @dev: Device that normally is marked as "always on" to switch power for.
1061 *
1062 * This routine may only be called during the system core (syscore) suspend or
1063 * resume phase for devices whose "always on" flags are set.
1064 */
1065static void genpd_syscore_switch(struct device *dev, bool suspend)
1066{
1067	struct generic_pm_domain *genpd;
 
1068
1069	genpd = dev_to_genpd(dev);
1070	if (!pm_genpd_present(genpd))
1071		return;
1072
 
 
 
 
 
1073	if (suspend) {
1074		genpd->suspended_count++;
1075		genpd_sync_poweroff(genpd);
1076	} else {
1077		genpd_sync_poweron(genpd);
1078		genpd->suspended_count--;
1079	}
 
 
 
1080}
1081
1082void pm_genpd_syscore_poweroff(struct device *dev)
 
 
 
 
 
 
 
 
 
1083{
1084	genpd_syscore_switch(dev, true);
1085}
1086EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff);
1087
1088void pm_genpd_syscore_poweron(struct device *dev)
 
 
 
 
 
 
 
 
1089{
1090	genpd_syscore_switch(dev, false);
1091}
1092EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
1093
1094#else /* !CONFIG_PM_SLEEP */
1095
1096#define pm_genpd_prepare		NULL
1097#define pm_genpd_suspend_noirq		NULL
1098#define pm_genpd_resume_noirq		NULL
1099#define pm_genpd_freeze_noirq		NULL
1100#define pm_genpd_thaw_noirq		NULL
1101#define pm_genpd_restore_noirq		NULL
1102#define pm_genpd_complete		NULL
 
1103
1104#endif /* CONFIG_PM_SLEEP */
1105
1106static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
1107					struct generic_pm_domain *genpd,
1108					struct gpd_timing_data *td)
1109{
1110	struct generic_pm_domain_data *gpd_data;
 
1111	int ret;
1112
1113	ret = dev_pm_get_subsys_data(dev);
1114	if (ret)
1115		return ERR_PTR(ret);
1116
1117	gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1118	if (!gpd_data) {
1119		ret = -ENOMEM;
1120		goto err_put;
1121	}
1122
1123	if (td)
1124		gpd_data->td = *td;
1125
1126	gpd_data->base.dev = dev;
1127	gpd_data->td.constraint_changed = true;
1128	gpd_data->td.effective_constraint_ns = -1;
1129	gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1130
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1131	spin_lock_irq(&dev->power.lock);
1132
1133	if (dev->power.subsys_data->domain_data) {
1134		ret = -EINVAL;
1135		goto err_free;
1136	}
1137
1138	dev->power.subsys_data->domain_data = &gpd_data->base;
1139
1140	spin_unlock_irq(&dev->power.lock);
1141
1142	dev_pm_domain_set(dev, &genpd->domain);
 
1143
1144	return gpd_data;
1145
1146 err_free:
1147	spin_unlock_irq(&dev->power.lock);
1148	kfree(gpd_data);
1149 err_put:
1150	dev_pm_put_subsys_data(dev);
1151	return ERR_PTR(ret);
1152}
1153
1154static void genpd_free_dev_data(struct device *dev,
1155				struct generic_pm_domain_data *gpd_data)
1156{
1157	dev_pm_domain_set(dev, NULL);
1158
1159	spin_lock_irq(&dev->power.lock);
1160
1161	dev->power.subsys_data->domain_data = NULL;
1162
1163	spin_unlock_irq(&dev->power.lock);
1164
 
1165	kfree(gpd_data);
1166	dev_pm_put_subsys_data(dev);
1167}
1168
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1169static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1170			    struct gpd_timing_data *td)
1171{
 
1172	struct generic_pm_domain_data *gpd_data;
1173	int ret = 0;
1174
1175	dev_dbg(dev, "%s()\n", __func__);
1176
1177	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1178		return -EINVAL;
1179
1180	gpd_data = genpd_alloc_dev_data(dev, genpd, td);
1181	if (IS_ERR(gpd_data))
1182		return PTR_ERR(gpd_data);
1183
1184	genpd_lock(genpd);
1185
1186	if (genpd->prepared_count > 0) {
1187		ret = -EAGAIN;
1188		goto out;
1189	}
1190
1191	ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1192	if (ret)
1193		goto out;
1194
 
 
 
 
 
1195	genpd->device_count++;
1196	genpd->max_off_time_changed = true;
 
1197
1198	list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1199
 
1200 out:
1201	genpd_unlock(genpd);
1202
1203	if (ret)
1204		genpd_free_dev_data(dev, gpd_data);
1205	else
1206		dev_pm_qos_add_notifier(dev, &gpd_data->nb);
 
1207
1208	return ret;
1209}
1210
1211/**
1212 * __pm_genpd_add_device - Add a device to an I/O PM domain.
1213 * @genpd: PM domain to add the device to.
1214 * @dev: Device to be added.
1215 * @td: Set of PM QoS timing parameters to attach to the device.
1216 */
1217int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1218			  struct gpd_timing_data *td)
1219{
1220	int ret;
1221
1222	mutex_lock(&gpd_list_lock);
1223	ret = genpd_add_device(genpd, dev, td);
1224	mutex_unlock(&gpd_list_lock);
1225
1226	return ret;
1227}
1228EXPORT_SYMBOL_GPL(__pm_genpd_add_device);
1229
1230static int genpd_remove_device(struct generic_pm_domain *genpd,
1231			       struct device *dev)
1232{
1233	struct generic_pm_domain_data *gpd_data;
1234	struct pm_domain_data *pdd;
1235	int ret = 0;
1236
1237	dev_dbg(dev, "%s()\n", __func__);
1238
1239	pdd = dev->power.subsys_data->domain_data;
1240	gpd_data = to_gpd_data(pdd);
1241	dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
 
1242
1243	genpd_lock(genpd);
1244
1245	if (genpd->prepared_count > 0) {
1246		ret = -EAGAIN;
1247		goto out;
1248	}
1249
1250	genpd->device_count--;
1251	genpd->max_off_time_changed = true;
 
1252
1253	if (genpd->detach_dev)
1254		genpd->detach_dev(genpd, dev);
1255
1256	list_del_init(&pdd->list_node);
1257
1258	genpd_unlock(genpd);
1259
 
 
 
1260	genpd_free_dev_data(dev, gpd_data);
1261
1262	return 0;
1263
1264 out:
1265	genpd_unlock(genpd);
1266	dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1267
1268	return ret;
1269}
1270
1271/**
1272 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1273 * @genpd: PM domain to remove the device from.
1274 * @dev: Device to be removed.
1275 */
1276int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1277			   struct device *dev)
1278{
1279	if (!genpd || genpd != genpd_lookup_dev(dev))
 
 
1280		return -EINVAL;
1281
1282	return genpd_remove_device(genpd, dev);
1283}
1284EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
1285
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1286static int genpd_add_subdomain(struct generic_pm_domain *genpd,
1287			       struct generic_pm_domain *subdomain)
1288{
1289	struct gpd_link *link, *itr;
1290	int ret = 0;
1291
1292	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1293	    || genpd == subdomain)
1294		return -EINVAL;
1295
1296	/*
1297	 * If the domain can be powered on/off in an IRQ safe
1298	 * context, ensure that the subdomain can also be
1299	 * powered on/off in that context.
1300	 */
1301	if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
1302		WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
1303				genpd->name, subdomain->name);
1304		return -EINVAL;
1305	}
1306
1307	link = kzalloc(sizeof(*link), GFP_KERNEL);
1308	if (!link)
1309		return -ENOMEM;
1310
1311	genpd_lock(subdomain);
1312	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1313
1314	if (genpd->status == GPD_STATE_POWER_OFF
1315	    &&  subdomain->status != GPD_STATE_POWER_OFF) {
1316		ret = -EINVAL;
1317		goto out;
1318	}
1319
1320	list_for_each_entry(itr, &genpd->master_links, master_node) {
1321		if (itr->slave == subdomain && itr->master == genpd) {
1322			ret = -EINVAL;
1323			goto out;
1324		}
1325	}
1326
1327	link->master = genpd;
1328	list_add_tail(&link->master_node, &genpd->master_links);
1329	link->slave = subdomain;
1330	list_add_tail(&link->slave_node, &subdomain->slave_links);
1331	if (subdomain->status != GPD_STATE_POWER_OFF)
1332		genpd_sd_counter_inc(genpd);
1333
1334 out:
1335	genpd_unlock(genpd);
1336	genpd_unlock(subdomain);
1337	if (ret)
1338		kfree(link);
1339	return ret;
1340}
1341
1342/**
1343 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1344 * @genpd: Master PM domain to add the subdomain to.
1345 * @subdomain: Subdomain to be added.
1346 */
1347int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1348			   struct generic_pm_domain *subdomain)
1349{
1350	int ret;
1351
1352	mutex_lock(&gpd_list_lock);
1353	ret = genpd_add_subdomain(genpd, subdomain);
1354	mutex_unlock(&gpd_list_lock);
1355
1356	return ret;
1357}
1358EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
1359
1360/**
1361 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1362 * @genpd: Master PM domain to remove the subdomain from.
1363 * @subdomain: Subdomain to be removed.
1364 */
1365int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1366			      struct generic_pm_domain *subdomain)
1367{
1368	struct gpd_link *link;
1369	int ret = -EINVAL;
1370
1371	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1372		return -EINVAL;
1373
1374	genpd_lock(subdomain);
1375	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1376
1377	if (!list_empty(&subdomain->master_links) || subdomain->device_count) {
1378		pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
1379			subdomain->name);
1380		ret = -EBUSY;
1381		goto out;
1382	}
1383
1384	list_for_each_entry(link, &genpd->master_links, master_node) {
1385		if (link->slave != subdomain)
1386			continue;
1387
1388		list_del(&link->master_node);
1389		list_del(&link->slave_node);
1390		kfree(link);
1391		if (subdomain->status != GPD_STATE_POWER_OFF)
1392			genpd_sd_counter_dec(genpd);
1393
1394		ret = 0;
1395		break;
1396	}
1397
1398out:
1399	genpd_unlock(genpd);
1400	genpd_unlock(subdomain);
1401
1402	return ret;
1403}
1404EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
1405
 
 
 
 
 
 
1406static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
1407{
1408	struct genpd_power_state *state;
1409
1410	state = kzalloc(sizeof(*state), GFP_KERNEL);
1411	if (!state)
1412		return -ENOMEM;
1413
1414	genpd->states = state;
1415	genpd->state_count = 1;
1416	genpd->free = state;
1417
1418	return 0;
1419}
1420
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1421static void genpd_lock_init(struct generic_pm_domain *genpd)
1422{
1423	if (genpd->flags & GENPD_FLAG_IRQ_SAFE) {
1424		spin_lock_init(&genpd->slock);
1425		genpd->lock_ops = &genpd_spin_ops;
1426	} else {
1427		mutex_init(&genpd->mlock);
1428		genpd->lock_ops = &genpd_mtx_ops;
1429	}
1430}
1431
1432/**
1433 * pm_genpd_init - Initialize a generic I/O PM domain object.
1434 * @genpd: PM domain object to initialize.
1435 * @gov: PM domain governor to associate with the domain (may be NULL).
1436 * @is_off: Initial value of the domain's power_is_off field.
1437 *
1438 * Returns 0 on successful initialization, else a negative error code.
1439 */
1440int pm_genpd_init(struct generic_pm_domain *genpd,
1441		  struct dev_power_governor *gov, bool is_off)
1442{
1443	int ret;
1444
1445	if (IS_ERR_OR_NULL(genpd))
1446		return -EINVAL;
1447
1448	INIT_LIST_HEAD(&genpd->master_links);
1449	INIT_LIST_HEAD(&genpd->slave_links);
1450	INIT_LIST_HEAD(&genpd->dev_list);
 
1451	genpd_lock_init(genpd);
1452	genpd->gov = gov;
1453	INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1454	atomic_set(&genpd->sd_count, 0);
1455	genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1456	genpd->device_count = 0;
1457	genpd->max_off_time_ns = -1;
1458	genpd->max_off_time_changed = true;
1459	genpd->provider = NULL;
1460	genpd->has_provider = false;
 
1461	genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
1462	genpd->domain.ops.runtime_resume = genpd_runtime_resume;
1463	genpd->domain.ops.prepare = pm_genpd_prepare;
1464	genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
1465	genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
1466	genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
1467	genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
1468	genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
1469	genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
1470	genpd->domain.ops.complete = pm_genpd_complete;
 
1471
1472	if (genpd->flags & GENPD_FLAG_PM_CLK) {
1473		genpd->dev_ops.stop = pm_clk_suspend;
1474		genpd->dev_ops.start = pm_clk_resume;
1475	}
1476
1477	/* Use only one "off" state if there were no states declared */
1478	if (genpd->state_count == 0) {
1479		ret = genpd_set_default_power_state(genpd);
1480		if (ret)
1481			return ret;
 
 
 
 
1482	}
1483
 
 
 
 
 
 
 
 
 
 
 
1484	mutex_lock(&gpd_list_lock);
1485	list_add(&genpd->gpd_list_node, &gpd_list);
1486	mutex_unlock(&gpd_list_lock);
 
1487
1488	return 0;
1489}
1490EXPORT_SYMBOL_GPL(pm_genpd_init);
1491
1492static int genpd_remove(struct generic_pm_domain *genpd)
1493{
1494	struct gpd_link *l, *link;
1495
1496	if (IS_ERR_OR_NULL(genpd))
1497		return -EINVAL;
1498
1499	genpd_lock(genpd);
1500
1501	if (genpd->has_provider) {
1502		genpd_unlock(genpd);
1503		pr_err("Provider present, unable to remove %s\n", genpd->name);
1504		return -EBUSY;
1505	}
1506
1507	if (!list_empty(&genpd->master_links) || genpd->device_count) {
1508		genpd_unlock(genpd);
1509		pr_err("%s: unable to remove %s\n", __func__, genpd->name);
1510		return -EBUSY;
1511	}
1512
1513	list_for_each_entry_safe(link, l, &genpd->slave_links, slave_node) {
1514		list_del(&link->master_node);
1515		list_del(&link->slave_node);
1516		kfree(link);
1517	}
1518
1519	list_del(&genpd->gpd_list_node);
1520	genpd_unlock(genpd);
 
1521	cancel_work_sync(&genpd->power_off_work);
1522	kfree(genpd->free);
 
1523	pr_debug("%s: removed %s\n", __func__, genpd->name);
1524
1525	return 0;
1526}
1527
1528/**
1529 * pm_genpd_remove - Remove a generic I/O PM domain
1530 * @genpd: Pointer to PM domain that is to be removed.
1531 *
1532 * To remove the PM domain, this function:
1533 *  - Removes the PM domain as a subdomain to any parent domains,
1534 *    if it was added.
1535 *  - Removes the PM domain from the list of registered PM domains.
1536 *
1537 * The PM domain will only be removed, if the associated provider has
1538 * been removed, it is not a parent to any other PM domain and has no
1539 * devices associated with it.
1540 */
1541int pm_genpd_remove(struct generic_pm_domain *genpd)
1542{
1543	int ret;
1544
1545	mutex_lock(&gpd_list_lock);
1546	ret = genpd_remove(genpd);
1547	mutex_unlock(&gpd_list_lock);
1548
1549	return ret;
1550}
1551EXPORT_SYMBOL_GPL(pm_genpd_remove);
1552
1553#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
1554
1555typedef struct generic_pm_domain *(*genpd_xlate_t)(struct of_phandle_args *args,
1556						   void *data);
1557
1558/*
1559 * Device Tree based PM domain providers.
1560 *
1561 * The code below implements generic device tree based PM domain providers that
1562 * bind device tree nodes with generic PM domains registered in the system.
1563 *
1564 * Any driver that registers generic PM domains and needs to support binding of
1565 * devices to these domains is supposed to register a PM domain provider, which
1566 * maps a PM domain specifier retrieved from the device tree to a PM domain.
1567 *
1568 * Two simple mapping functions have been provided for convenience:
1569 *  - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
1570 *  - genpd_xlate_onecell() for mapping of multiple PM domains per node by
1571 *    index.
1572 */
1573
1574/**
1575 * struct of_genpd_provider - PM domain provider registration structure
1576 * @link: Entry in global list of PM domain providers
1577 * @node: Pointer to device tree node of PM domain provider
1578 * @xlate: Provider-specific xlate callback mapping a set of specifier cells
1579 *         into a PM domain.
1580 * @data: context pointer to be passed into @xlate callback
1581 */
1582struct of_genpd_provider {
1583	struct list_head link;
1584	struct device_node *node;
1585	genpd_xlate_t xlate;
1586	void *data;
1587};
1588
1589/* List of registered PM domain providers. */
1590static LIST_HEAD(of_genpd_providers);
1591/* Mutex to protect the list above. */
1592static DEFINE_MUTEX(of_genpd_mutex);
1593
1594/**
1595 * genpd_xlate_simple() - Xlate function for direct node-domain mapping
1596 * @genpdspec: OF phandle args to map into a PM domain
1597 * @data: xlate function private data - pointer to struct generic_pm_domain
1598 *
1599 * This is a generic xlate function that can be used to model PM domains that
1600 * have their own device tree nodes. The private data of xlate function needs
1601 * to be a valid pointer to struct generic_pm_domain.
1602 */
1603static struct generic_pm_domain *genpd_xlate_simple(
1604					struct of_phandle_args *genpdspec,
1605					void *data)
1606{
1607	if (genpdspec->args_count != 0)
1608		return ERR_PTR(-EINVAL);
1609	return data;
1610}
1611
1612/**
1613 * genpd_xlate_onecell() - Xlate function using a single index.
1614 * @genpdspec: OF phandle args to map into a PM domain
1615 * @data: xlate function private data - pointer to struct genpd_onecell_data
1616 *
1617 * This is a generic xlate function that can be used to model simple PM domain
1618 * controllers that have one device tree node and provide multiple PM domains.
1619 * A single cell is used as an index into an array of PM domains specified in
1620 * the genpd_onecell_data struct when registering the provider.
1621 */
1622static struct generic_pm_domain *genpd_xlate_onecell(
1623					struct of_phandle_args *genpdspec,
1624					void *data)
1625{
1626	struct genpd_onecell_data *genpd_data = data;
1627	unsigned int idx = genpdspec->args[0];
1628
1629	if (genpdspec->args_count != 1)
1630		return ERR_PTR(-EINVAL);
1631
1632	if (idx >= genpd_data->num_domains) {
1633		pr_err("%s: invalid domain index %u\n", __func__, idx);
1634		return ERR_PTR(-EINVAL);
1635	}
1636
1637	if (!genpd_data->domains[idx])
1638		return ERR_PTR(-ENOENT);
1639
1640	return genpd_data->domains[idx];
1641}
1642
1643/**
1644 * genpd_add_provider() - Register a PM domain provider for a node
1645 * @np: Device node pointer associated with the PM domain provider.
1646 * @xlate: Callback for decoding PM domain from phandle arguments.
1647 * @data: Context pointer for @xlate callback.
1648 */
1649static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
1650			      void *data)
1651{
1652	struct of_genpd_provider *cp;
1653
1654	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
1655	if (!cp)
1656		return -ENOMEM;
1657
1658	cp->node = of_node_get(np);
1659	cp->data = data;
1660	cp->xlate = xlate;
 
1661
1662	mutex_lock(&of_genpd_mutex);
1663	list_add(&cp->link, &of_genpd_providers);
1664	mutex_unlock(&of_genpd_mutex);
1665	pr_debug("Added domain provider from %s\n", np->full_name);
1666
1667	return 0;
1668}
1669
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1670/**
1671 * of_genpd_add_provider_simple() - Register a simple PM domain provider
1672 * @np: Device node pointer associated with the PM domain provider.
1673 * @genpd: Pointer to PM domain associated with the PM domain provider.
1674 */
1675int of_genpd_add_provider_simple(struct device_node *np,
1676				 struct generic_pm_domain *genpd)
1677{
1678	int ret = -EINVAL;
1679
1680	if (!np || !genpd)
1681		return -EINVAL;
1682
1683	mutex_lock(&gpd_list_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1684
1685	if (pm_genpd_present(genpd))
1686		ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
 
 
 
 
1687
1688	if (!ret) {
1689		genpd->provider = &np->fwnode;
1690		genpd->has_provider = true;
1691	}
1692
1693	mutex_unlock(&gpd_list_lock);
 
1694
1695	return ret;
1696}
1697EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);
1698
1699/**
1700 * of_genpd_add_provider_onecell() - Register a onecell PM domain provider
1701 * @np: Device node pointer associated with the PM domain provider.
1702 * @data: Pointer to the data associated with the PM domain provider.
1703 */
1704int of_genpd_add_provider_onecell(struct device_node *np,
1705				  struct genpd_onecell_data *data)
1706{
 
1707	unsigned int i;
1708	int ret = -EINVAL;
1709
1710	if (!np || !data)
1711		return -EINVAL;
1712
1713	mutex_lock(&gpd_list_lock);
 
1714
1715	for (i = 0; i < data->num_domains; i++) {
1716		if (!data->domains[i])
 
 
1717			continue;
1718		if (!pm_genpd_present(data->domains[i]))
1719			goto error;
1720
1721		data->domains[i]->provider = &np->fwnode;
1722		data->domains[i]->has_provider = true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1723	}
1724
1725	ret = genpd_add_provider(np, genpd_xlate_onecell, data);
1726	if (ret < 0)
1727		goto error;
1728
1729	mutex_unlock(&gpd_list_lock);
1730
1731	return 0;
1732
1733error:
1734	while (i--) {
1735		if (!data->domains[i])
 
 
1736			continue;
1737		data->domains[i]->provider = NULL;
1738		data->domains[i]->has_provider = false;
 
 
 
 
 
 
1739	}
1740
1741	mutex_unlock(&gpd_list_lock);
1742
1743	return ret;
1744}
1745EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
1746
1747/**
1748 * of_genpd_del_provider() - Remove a previously registered PM domain provider
1749 * @np: Device node pointer associated with the PM domain provider
1750 */
1751void of_genpd_del_provider(struct device_node *np)
1752{
1753	struct of_genpd_provider *cp;
1754	struct generic_pm_domain *gpd;
1755
1756	mutex_lock(&gpd_list_lock);
1757	mutex_lock(&of_genpd_mutex);
1758	list_for_each_entry(cp, &of_genpd_providers, link) {
1759		if (cp->node == np) {
1760			/*
1761			 * For each PM domain associated with the
1762			 * provider, set the 'has_provider' to false
1763			 * so that the PM domain can be safely removed.
1764			 */
1765			list_for_each_entry(gpd, &gpd_list, gpd_list_node)
1766				if (gpd->provider == &np->fwnode)
1767					gpd->has_provider = false;
1768
 
 
 
 
 
 
 
 
 
1769			list_del(&cp->link);
1770			of_node_put(cp->node);
1771			kfree(cp);
1772			break;
1773		}
1774	}
1775	mutex_unlock(&of_genpd_mutex);
1776	mutex_unlock(&gpd_list_lock);
1777}
1778EXPORT_SYMBOL_GPL(of_genpd_del_provider);
1779
1780/**
1781 * genpd_get_from_provider() - Look-up PM domain
1782 * @genpdspec: OF phandle args to use for look-up
1783 *
1784 * Looks for a PM domain provider under the node specified by @genpdspec and if
1785 * found, uses xlate function of the provider to map phandle args to a PM
1786 * domain.
1787 *
1788 * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
1789 * on failure.
1790 */
1791static struct generic_pm_domain *genpd_get_from_provider(
1792					struct of_phandle_args *genpdspec)
1793{
1794	struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
1795	struct of_genpd_provider *provider;
1796
1797	if (!genpdspec)
1798		return ERR_PTR(-EINVAL);
1799
1800	mutex_lock(&of_genpd_mutex);
1801
1802	/* Check if we have such a provider in our array */
1803	list_for_each_entry(provider, &of_genpd_providers, link) {
1804		if (provider->node == genpdspec->np)
1805			genpd = provider->xlate(genpdspec, provider->data);
1806		if (!IS_ERR(genpd))
1807			break;
1808	}
1809
1810	mutex_unlock(&of_genpd_mutex);
1811
1812	return genpd;
1813}
1814
1815/**
1816 * of_genpd_add_device() - Add a device to an I/O PM domain
1817 * @genpdspec: OF phandle args to use for look-up PM domain
1818 * @dev: Device to be added.
1819 *
1820 * Looks-up an I/O PM domain based upon phandle args provided and adds
1821 * the device to the PM domain. Returns a negative error code on failure.
1822 */
1823int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev)
1824{
1825	struct generic_pm_domain *genpd;
1826	int ret;
1827
1828	mutex_lock(&gpd_list_lock);
1829
1830	genpd = genpd_get_from_provider(genpdspec);
1831	if (IS_ERR(genpd)) {
1832		ret = PTR_ERR(genpd);
1833		goto out;
1834	}
1835
1836	ret = genpd_add_device(genpd, dev, NULL);
1837
1838out:
1839	mutex_unlock(&gpd_list_lock);
1840
1841	return ret;
1842}
1843EXPORT_SYMBOL_GPL(of_genpd_add_device);
1844
1845/**
1846 * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1847 * @parent_spec: OF phandle args to use for parent PM domain look-up
1848 * @subdomain_spec: OF phandle args to use for subdomain look-up
1849 *
1850 * Looks-up a parent PM domain and subdomain based upon phandle args
1851 * provided and adds the subdomain to the parent PM domain. Returns a
1852 * negative error code on failure.
1853 */
1854int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
1855			   struct of_phandle_args *subdomain_spec)
1856{
1857	struct generic_pm_domain *parent, *subdomain;
1858	int ret;
1859
1860	mutex_lock(&gpd_list_lock);
1861
1862	parent = genpd_get_from_provider(parent_spec);
1863	if (IS_ERR(parent)) {
1864		ret = PTR_ERR(parent);
1865		goto out;
1866	}
1867
1868	subdomain = genpd_get_from_provider(subdomain_spec);
1869	if (IS_ERR(subdomain)) {
1870		ret = PTR_ERR(subdomain);
1871		goto out;
1872	}
1873
1874	ret = genpd_add_subdomain(parent, subdomain);
1875
1876out:
1877	mutex_unlock(&gpd_list_lock);
1878
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1879	return ret;
1880}
1881EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
1882
1883/**
1884 * of_genpd_remove_last - Remove the last PM domain registered for a provider
1885 * @provider: Pointer to device structure associated with provider
1886 *
1887 * Find the last PM domain that was added by a particular provider and
1888 * remove this PM domain from the list of PM domains. The provider is
1889 * identified by the 'provider' device structure that is passed. The PM
1890 * domain will only be removed, if the provider associated with domain
1891 * has been removed.
1892 *
1893 * Returns a valid pointer to struct generic_pm_domain on success or
1894 * ERR_PTR() on failure.
1895 */
1896struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
1897{
1898	struct generic_pm_domain *gpd, *genpd = ERR_PTR(-ENOENT);
1899	int ret;
1900
1901	if (IS_ERR_OR_NULL(np))
1902		return ERR_PTR(-EINVAL);
1903
1904	mutex_lock(&gpd_list_lock);
1905	list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
1906		if (gpd->provider == &np->fwnode) {
1907			ret = genpd_remove(gpd);
1908			genpd = ret ? ERR_PTR(ret) : gpd;
1909			break;
1910		}
1911	}
1912	mutex_unlock(&gpd_list_lock);
1913
1914	return genpd;
1915}
1916EXPORT_SYMBOL_GPL(of_genpd_remove_last);
1917
 
 
 
 
 
 
 
 
 
 
1918/**
1919 * genpd_dev_pm_detach - Detach a device from its PM domain.
1920 * @dev: Device to detach.
1921 * @power_off: Currently not used
1922 *
1923 * Try to locate a corresponding generic PM domain, which the device was
1924 * attached to previously. If such is found, the device is detached from it.
1925 */
1926static void genpd_dev_pm_detach(struct device *dev, bool power_off)
1927{
1928	struct generic_pm_domain *pd;
1929	unsigned int i;
1930	int ret = 0;
1931
1932	pd = dev_to_genpd(dev);
1933	if (IS_ERR(pd))
1934		return;
1935
1936	dev_dbg(dev, "removing from PM domain %s\n", pd->name);
1937
 
 
 
 
 
 
1938	for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
1939		ret = genpd_remove_device(pd, dev);
1940		if (ret != -EAGAIN)
1941			break;
1942
1943		mdelay(i);
1944		cond_resched();
1945	}
1946
1947	if (ret < 0) {
1948		dev_err(dev, "failed to remove from PM domain %s: %d",
1949			pd->name, ret);
1950		return;
1951	}
1952
1953	/* Check if PM domain can be powered off after removing this device. */
1954	genpd_queue_power_off_work(pd);
 
 
 
 
1955}
1956
1957static void genpd_dev_pm_sync(struct device *dev)
1958{
1959	struct generic_pm_domain *pd;
1960
1961	pd = dev_to_genpd(dev);
1962	if (IS_ERR(pd))
1963		return;
1964
1965	genpd_queue_power_off_work(pd);
1966}
1967
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1968/**
1969 * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
1970 * @dev: Device to attach.
1971 *
1972 * Parse device's OF node to find a PM domain specifier. If such is found,
1973 * attaches the device to retrieved pm_domain ops.
1974 *
1975 * Both generic and legacy Samsung-specific DT bindings are supported to keep
1976 * backwards compatibility with existing DTBs.
1977 *
1978 * Returns 0 on successfully attached PM domain or negative error code. Note
1979 * that if a power-domain exists for the device, but it cannot be found or
1980 * turned on, then return -EPROBE_DEFER to ensure that the device is not
1981 * probed and to re-try again later.
1982 */
1983int genpd_dev_pm_attach(struct device *dev)
1984{
1985	struct of_phandle_args pd_args;
1986	struct generic_pm_domain *pd;
1987	unsigned int i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1988	int ret;
1989
1990	if (!dev->of_node)
1991		return -ENODEV;
1992
1993	if (dev->pm_domain)
1994		return -EEXIST;
 
 
 
1995
1996	ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
1997					"#power-domain-cells", 0, &pd_args);
1998	if (ret < 0) {
1999		if (ret != -ENOENT)
2000			return ret;
 
 
 
 
2001
2002		/*
2003		 * Try legacy Samsung-specific bindings
2004		 * (for backwards compatibility of DT ABI)
2005		 */
2006		pd_args.args_count = 0;
2007		pd_args.np = of_parse_phandle(dev->of_node,
2008						"samsung,power-domain", 0);
2009		if (!pd_args.np)
2010			return -ENOENT;
2011	}
2012
2013	mutex_lock(&gpd_list_lock);
2014	pd = genpd_get_from_provider(&pd_args);
2015	of_node_put(pd_args.np);
2016	if (IS_ERR(pd)) {
2017		mutex_unlock(&gpd_list_lock);
2018		dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
2019			__func__, PTR_ERR(pd));
2020		return -EPROBE_DEFER;
2021	}
2022
2023	dev_dbg(dev, "adding to PM domain %s\n", pd->name);
 
2024
2025	for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
2026		ret = genpd_add_device(pd, dev, NULL);
2027		if (ret != -EAGAIN)
2028			break;
2029
2030		mdelay(i);
2031		cond_resched();
2032	}
2033	mutex_unlock(&gpd_list_lock);
 
 
 
 
 
 
 
 
2034
2035	if (ret < 0) {
2036		if (ret != -EPROBE_DEFER)
2037			dev_err(dev, "failed to add to PM domain %s: %d",
2038				pd->name, ret);
2039		goto out;
2040	}
2041
2042	dev->pm_domain->detach = genpd_dev_pm_detach;
2043	dev->pm_domain->sync = genpd_dev_pm_sync;
 
 
2044
2045	genpd_lock(pd);
2046	ret = genpd_poweron(pd, 0);
2047	genpd_unlock(pd);
2048out:
2049	return ret ? -EPROBE_DEFER : 0;
2050}
2051EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
2052
2053static const struct of_device_id idle_state_match[] = {
2054	{ .compatible = "domain-idle-state", },
2055	{ }
2056};
2057
2058static int genpd_parse_state(struct genpd_power_state *genpd_state,
2059				    struct device_node *state_node)
2060{
2061	int err;
2062	u32 residency;
2063	u32 entry_latency, exit_latency;
2064	const struct of_device_id *match_id;
2065
2066	match_id = of_match_node(idle_state_match, state_node);
2067	if (!match_id)
2068		return -EINVAL;
2069
2070	err = of_property_read_u32(state_node, "entry-latency-us",
2071						&entry_latency);
2072	if (err) {
2073		pr_debug(" * %s missing entry-latency-us property\n",
2074						state_node->full_name);
2075		return -EINVAL;
2076	}
2077
2078	err = of_property_read_u32(state_node, "exit-latency-us",
2079						&exit_latency);
2080	if (err) {
2081		pr_debug(" * %s missing exit-latency-us property\n",
2082						state_node->full_name);
2083		return -EINVAL;
2084	}
2085
2086	err = of_property_read_u32(state_node, "min-residency-us", &residency);
2087	if (!err)
2088		genpd_state->residency_ns = 1000 * residency;
2089
2090	genpd_state->power_on_latency_ns = 1000 * exit_latency;
2091	genpd_state->power_off_latency_ns = 1000 * entry_latency;
2092	genpd_state->fwnode = &state_node->fwnode;
2093
2094	return 0;
2095}
2096
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2097/**
2098 * of_genpd_parse_idle_states: Return array of idle states for the genpd.
2099 *
2100 * @dn: The genpd device node
2101 * @states: The pointer to which the state array will be saved.
2102 * @n: The count of elements in the array returned from this function.
2103 *
2104 * Returns the device states parsed from the OF node. The memory for the states
2105 * is allocated by this function and is the responsibility of the caller to
2106 * free the memory after use.
 
2107 */
2108int of_genpd_parse_idle_states(struct device_node *dn,
2109			struct genpd_power_state **states, int *n)
2110{
2111	struct genpd_power_state *st;
2112	struct device_node *np;
2113	int i = 0;
2114	int err, ret;
2115	int count;
2116	struct of_phandle_iterator it;
2117
2118	count = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
2119	if (count <= 0)
2120		return -EINVAL;
 
 
2121
2122	st = kcalloc(count, sizeof(*st), GFP_KERNEL);
2123	if (!st)
2124		return -ENOMEM;
2125
2126	/* Loop over the phandles until all the requested entry is found */
2127	of_for_each_phandle(&it, err, dn, "domain-idle-states", NULL, 0) {
2128		np = it.node;
2129		ret = genpd_parse_state(&st[i++], np);
2130		if (ret) {
2131			pr_err
2132			("Parsing idle state node %s failed with err %d\n",
2133							np->full_name, ret);
2134			of_node_put(np);
2135			kfree(st);
2136			return ret;
2137		}
2138	}
2139
2140	*n = count;
2141	*states = st;
 
2142
2143	return 0;
2144}
2145EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
2146
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2147#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
2148
2149
2150/***        debugfs support        ***/
2151
2152#ifdef CONFIG_DEBUG_FS
2153#include <linux/pm.h>
2154#include <linux/device.h>
2155#include <linux/debugfs.h>
2156#include <linux/seq_file.h>
2157#include <linux/init.h>
2158#include <linux/kobject.h>
2159static struct dentry *pm_genpd_debugfs_dir;
2160
2161/*
2162 * TODO: This function is a slightly modified version of rtpm_status_show
2163 * from sysfs.c, so generalize it.
2164 */
2165static void rtpm_status_str(struct seq_file *s, struct device *dev)
2166{
2167	static const char * const status_lookup[] = {
2168		[RPM_ACTIVE] = "active",
2169		[RPM_RESUMING] = "resuming",
2170		[RPM_SUSPENDED] = "suspended",
2171		[RPM_SUSPENDING] = "suspending"
2172	};
2173	const char *p = "";
2174
2175	if (dev->power.runtime_error)
2176		p = "error";
2177	else if (dev->power.disable_depth)
2178		p = "unsupported";
2179	else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
2180		p = status_lookup[dev->power.runtime_status];
2181	else
2182		WARN_ON(1);
2183
2184	seq_puts(s, p);
2185}
2186
2187static int pm_genpd_summary_one(struct seq_file *s,
2188				struct generic_pm_domain *genpd)
 
 
 
 
 
 
 
 
2189{
2190	static const char * const status_lookup[] = {
2191		[GPD_STATE_ACTIVE] = "on",
2192		[GPD_STATE_POWER_OFF] = "off"
2193	};
2194	struct pm_domain_data *pm_data;
2195	const char *kobj_path;
2196	struct gpd_link *link;
2197	char state[16];
2198	int ret;
2199
2200	ret = genpd_lock_interruptible(genpd);
2201	if (ret)
2202		return -ERESTARTSYS;
2203
2204	if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
2205		goto exit;
2206	if (genpd->status == GPD_STATE_POWER_OFF)
2207		snprintf(state, sizeof(state), "%s-%u",
2208			 status_lookup[genpd->status], genpd->state_idx);
2209	else
2210		snprintf(state, sizeof(state), "%s",
2211			 status_lookup[genpd->status]);
2212	seq_printf(s, "%-30s  %-15s ", genpd->name, state);
2213
2214	/*
2215	 * Modifications on the list require holding locks on both
2216	 * master and slave, so we are safe.
2217	 * Also genpd->name is immutable.
2218	 */
2219	list_for_each_entry(link, &genpd->master_links, master_node) {
2220		seq_printf(s, "%s", link->slave->name);
2221		if (!list_is_last(&link->master_node, &genpd->master_links))
 
 
2222			seq_puts(s, ", ");
2223	}
2224
2225	list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
2226		kobj_path = kobject_get_path(&pm_data->dev->kobj,
2227				genpd_is_irq_safe(genpd) ?
2228				GFP_ATOMIC : GFP_KERNEL);
2229		if (kobj_path == NULL)
2230			continue;
2231
2232		seq_printf(s, "\n    %-50s  ", kobj_path);
2233		rtpm_status_str(s, pm_data->dev);
 
2234		kfree(kobj_path);
2235	}
2236
2237	seq_puts(s, "\n");
2238exit:
2239	genpd_unlock(genpd);
2240
2241	return 0;
2242}
2243
2244static int pm_genpd_summary_show(struct seq_file *s, void *data)
2245{
2246	struct generic_pm_domain *genpd;
2247	int ret = 0;
2248
2249	seq_puts(s, "domain                          status          slaves\n");
2250	seq_puts(s, "    /device                                             runtime status\n");
2251	seq_puts(s, "----------------------------------------------------------------------\n");
2252
2253	ret = mutex_lock_interruptible(&gpd_list_lock);
2254	if (ret)
2255		return -ERESTARTSYS;
2256
2257	list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
2258		ret = pm_genpd_summary_one(s, genpd);
2259		if (ret)
2260			break;
2261	}
2262	mutex_unlock(&gpd_list_lock);
2263
2264	return ret;
2265}
2266
2267static int pm_genpd_summary_open(struct inode *inode, struct file *file)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2268{
2269	return single_open(file, pm_genpd_summary_show, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2270}
2271
2272static const struct file_operations pm_genpd_summary_fops = {
2273	.open = pm_genpd_summary_open,
2274	.read = seq_read,
2275	.llseek = seq_lseek,
2276	.release = single_release,
2277};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2278
2279static int __init pm_genpd_debug_init(void)
2280{
2281	struct dentry *d;
2282
2283	pm_genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2284
2285	if (!pm_genpd_debugfs_dir)
2286		return -ENOMEM;
 
 
 
 
 
 
2287
2288	d = debugfs_create_file("pm_genpd_summary", S_IRUGO,
2289			pm_genpd_debugfs_dir, NULL, &pm_genpd_summary_fops);
2290	if (!d)
2291		return -ENOMEM;
2292
2293	return 0;
2294}
2295late_initcall(pm_genpd_debug_init);
2296
2297static void __exit pm_genpd_debug_exit(void)
2298{
2299	debugfs_remove_recursive(pm_genpd_debugfs_dir);
2300}
2301__exitcall(pm_genpd_debug_exit);
2302#endif /* CONFIG_DEBUG_FS */