Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * drivers/base/power/domain.c - Common code related to device power domains.
4 *
5 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
6 */
7#define pr_fmt(fmt) "PM: " fmt
8
9#include <linux/delay.h>
10#include <linux/kernel.h>
11#include <linux/io.h>
12#include <linux/platform_device.h>
13#include <linux/pm_opp.h>
14#include <linux/pm_runtime.h>
15#include <linux/pm_domain.h>
16#include <linux/pm_qos.h>
17#include <linux/pm_clock.h>
18#include <linux/slab.h>
19#include <linux/err.h>
20#include <linux/sched.h>
21#include <linux/suspend.h>
22#include <linux/export.h>
23#include <linux/cpu.h>
24
25#include "power.h"
26
27#define GENPD_RETRY_MAX_MS 250 /* Approximate */
28
29#define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
30({ \
31 type (*__routine)(struct device *__d); \
32 type __ret = (type)0; \
33 \
34 __routine = genpd->dev_ops.callback; \
35 if (__routine) { \
36 __ret = __routine(dev); \
37 } \
38 __ret; \
39})
40
41static LIST_HEAD(gpd_list);
42static DEFINE_MUTEX(gpd_list_lock);
43
44struct genpd_lock_ops {
45 void (*lock)(struct generic_pm_domain *genpd);
46 void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
47 int (*lock_interruptible)(struct generic_pm_domain *genpd);
48 void (*unlock)(struct generic_pm_domain *genpd);
49};
50
51static void genpd_lock_mtx(struct generic_pm_domain *genpd)
52{
53 mutex_lock(&genpd->mlock);
54}
55
56static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
57 int depth)
58{
59 mutex_lock_nested(&genpd->mlock, depth);
60}
61
62static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
63{
64 return mutex_lock_interruptible(&genpd->mlock);
65}
66
67static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
68{
69 return mutex_unlock(&genpd->mlock);
70}
71
72static const struct genpd_lock_ops genpd_mtx_ops = {
73 .lock = genpd_lock_mtx,
74 .lock_nested = genpd_lock_nested_mtx,
75 .lock_interruptible = genpd_lock_interruptible_mtx,
76 .unlock = genpd_unlock_mtx,
77};
78
79static void genpd_lock_spin(struct generic_pm_domain *genpd)
80 __acquires(&genpd->slock)
81{
82 unsigned long flags;
83
84 spin_lock_irqsave(&genpd->slock, flags);
85 genpd->lock_flags = flags;
86}
87
88static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
89 int depth)
90 __acquires(&genpd->slock)
91{
92 unsigned long flags;
93
94 spin_lock_irqsave_nested(&genpd->slock, flags, depth);
95 genpd->lock_flags = flags;
96}
97
98static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
99 __acquires(&genpd->slock)
100{
101 unsigned long flags;
102
103 spin_lock_irqsave(&genpd->slock, flags);
104 genpd->lock_flags = flags;
105 return 0;
106}
107
108static void genpd_unlock_spin(struct generic_pm_domain *genpd)
109 __releases(&genpd->slock)
110{
111 spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
112}
113
114static const struct genpd_lock_ops genpd_spin_ops = {
115 .lock = genpd_lock_spin,
116 .lock_nested = genpd_lock_nested_spin,
117 .lock_interruptible = genpd_lock_interruptible_spin,
118 .unlock = genpd_unlock_spin,
119};
120
121#define genpd_lock(p) p->lock_ops->lock(p)
122#define genpd_lock_nested(p, d) p->lock_ops->lock_nested(p, d)
123#define genpd_lock_interruptible(p) p->lock_ops->lock_interruptible(p)
124#define genpd_unlock(p) p->lock_ops->unlock(p)
125
126#define genpd_status_on(genpd) (genpd->status == GPD_STATE_ACTIVE)
127#define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE)
128#define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON)
129#define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
130#define genpd_is_cpu_domain(genpd) (genpd->flags & GENPD_FLAG_CPU_DOMAIN)
131#define genpd_is_rpm_always_on(genpd) (genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON)
132
133static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
134 const struct generic_pm_domain *genpd)
135{
136 bool ret;
137
138 ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
139
140 /*
141 * Warn once if an IRQ safe device is attached to a no sleep domain, as
142 * to indicate a suboptimal configuration for PM. For an always on
143 * domain this isn't case, thus don't warn.
144 */
145 if (ret && !genpd_is_always_on(genpd))
146 dev_warn_once(dev, "PM domain %s will not be powered off\n",
147 genpd->name);
148
149 return ret;
150}
151
152static int genpd_runtime_suspend(struct device *dev);
153
154/*
155 * Get the generic PM domain for a particular struct device.
156 * This validates the struct device pointer, the PM domain pointer,
157 * and checks that the PM domain pointer is a real generic PM domain.
158 * Any failure results in NULL being returned.
159 */
160static struct generic_pm_domain *dev_to_genpd_safe(struct device *dev)
161{
162 if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
163 return NULL;
164
165 /* A genpd's always have its ->runtime_suspend() callback assigned. */
166 if (dev->pm_domain->ops.runtime_suspend == genpd_runtime_suspend)
167 return pd_to_genpd(dev->pm_domain);
168
169 return NULL;
170}
171
172/*
173 * This should only be used where we are certain that the pm_domain
174 * attached to the device is a genpd domain.
175 */
176static struct generic_pm_domain *dev_to_genpd(struct device *dev)
177{
178 if (IS_ERR_OR_NULL(dev->pm_domain))
179 return ERR_PTR(-EINVAL);
180
181 return pd_to_genpd(dev->pm_domain);
182}
183
184static int genpd_stop_dev(const struct generic_pm_domain *genpd,
185 struct device *dev)
186{
187 return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
188}
189
190static int genpd_start_dev(const struct generic_pm_domain *genpd,
191 struct device *dev)
192{
193 return GENPD_DEV_CALLBACK(genpd, int, start, dev);
194}
195
196static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
197{
198 bool ret = false;
199
200 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
201 ret = !!atomic_dec_and_test(&genpd->sd_count);
202
203 return ret;
204}
205
206static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
207{
208 atomic_inc(&genpd->sd_count);
209 smp_mb__after_atomic();
210}
211
212#ifdef CONFIG_DEBUG_FS
213static void genpd_update_accounting(struct generic_pm_domain *genpd)
214{
215 ktime_t delta, now;
216
217 now = ktime_get();
218 delta = ktime_sub(now, genpd->accounting_time);
219
220 /*
221 * If genpd->status is active, it means we are just
222 * out of off and so update the idle time and vice
223 * versa.
224 */
225 if (genpd->status == GPD_STATE_ACTIVE) {
226 int state_idx = genpd->state_idx;
227
228 genpd->states[state_idx].idle_time =
229 ktime_add(genpd->states[state_idx].idle_time, delta);
230 } else {
231 genpd->on_time = ktime_add(genpd->on_time, delta);
232 }
233
234 genpd->accounting_time = now;
235}
236#else
237static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
238#endif
239
240static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
241 unsigned int state)
242{
243 struct generic_pm_domain_data *pd_data;
244 struct pm_domain_data *pdd;
245 struct gpd_link *link;
246
247 /* New requested state is same as Max requested state */
248 if (state == genpd->performance_state)
249 return state;
250
251 /* New requested state is higher than Max requested state */
252 if (state > genpd->performance_state)
253 return state;
254
255 /* Traverse all devices within the domain */
256 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
257 pd_data = to_gpd_data(pdd);
258
259 if (pd_data->performance_state > state)
260 state = pd_data->performance_state;
261 }
262
263 /*
264 * Traverse all sub-domains within the domain. This can be
265 * done without any additional locking as the link->performance_state
266 * field is protected by the master genpd->lock, which is already taken.
267 *
268 * Also note that link->performance_state (subdomain's performance state
269 * requirement to master domain) is different from
270 * link->slave->performance_state (current performance state requirement
271 * of the devices/sub-domains of the subdomain) and so can have a
272 * different value.
273 *
274 * Note that we also take vote from powered-off sub-domains into account
275 * as the same is done for devices right now.
276 */
277 list_for_each_entry(link, &genpd->master_links, master_node) {
278 if (link->performance_state > state)
279 state = link->performance_state;
280 }
281
282 return state;
283}
284
285static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
286 unsigned int state, int depth)
287{
288 struct generic_pm_domain *master;
289 struct gpd_link *link;
290 int master_state, ret;
291
292 if (state == genpd->performance_state)
293 return 0;
294
295 /* Propagate to masters of genpd */
296 list_for_each_entry(link, &genpd->slave_links, slave_node) {
297 master = link->master;
298
299 if (!master->set_performance_state)
300 continue;
301
302 /* Find master's performance state */
303 ret = dev_pm_opp_xlate_performance_state(genpd->opp_table,
304 master->opp_table,
305 state);
306 if (unlikely(ret < 0))
307 goto err;
308
309 master_state = ret;
310
311 genpd_lock_nested(master, depth + 1);
312
313 link->prev_performance_state = link->performance_state;
314 link->performance_state = master_state;
315 master_state = _genpd_reeval_performance_state(master,
316 master_state);
317 ret = _genpd_set_performance_state(master, master_state, depth + 1);
318 if (ret)
319 link->performance_state = link->prev_performance_state;
320
321 genpd_unlock(master);
322
323 if (ret)
324 goto err;
325 }
326
327 ret = genpd->set_performance_state(genpd, state);
328 if (ret)
329 goto err;
330
331 genpd->performance_state = state;
332 return 0;
333
334err:
335 /* Encountered an error, lets rollback */
336 list_for_each_entry_continue_reverse(link, &genpd->slave_links,
337 slave_node) {
338 master = link->master;
339
340 if (!master->set_performance_state)
341 continue;
342
343 genpd_lock_nested(master, depth + 1);
344
345 master_state = link->prev_performance_state;
346 link->performance_state = master_state;
347
348 master_state = _genpd_reeval_performance_state(master,
349 master_state);
350 if (_genpd_set_performance_state(master, master_state, depth + 1)) {
351 pr_err("%s: Failed to roll back to %d performance state\n",
352 master->name, master_state);
353 }
354
355 genpd_unlock(master);
356 }
357
358 return ret;
359}
360
361/**
362 * dev_pm_genpd_set_performance_state- Set performance state of device's power
363 * domain.
364 *
365 * @dev: Device for which the performance-state needs to be set.
366 * @state: Target performance state of the device. This can be set as 0 when the
367 * device doesn't have any performance state constraints left (And so
368 * the device wouldn't participate anymore to find the target
369 * performance state of the genpd).
370 *
371 * It is assumed that the users guarantee that the genpd wouldn't be detached
372 * while this routine is getting called.
373 *
374 * Returns 0 on success and negative error values on failures.
375 */
376int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
377{
378 struct generic_pm_domain *genpd;
379 struct generic_pm_domain_data *gpd_data;
380 unsigned int prev;
381 int ret;
382
383 genpd = dev_to_genpd_safe(dev);
384 if (!genpd)
385 return -ENODEV;
386
387 if (unlikely(!genpd->set_performance_state))
388 return -EINVAL;
389
390 if (WARN_ON(!dev->power.subsys_data ||
391 !dev->power.subsys_data->domain_data))
392 return -EINVAL;
393
394 genpd_lock(genpd);
395
396 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
397 prev = gpd_data->performance_state;
398 gpd_data->performance_state = state;
399
400 state = _genpd_reeval_performance_state(genpd, state);
401 ret = _genpd_set_performance_state(genpd, state, 0);
402 if (ret)
403 gpd_data->performance_state = prev;
404
405 genpd_unlock(genpd);
406
407 return ret;
408}
409EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);
410
411static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
412{
413 unsigned int state_idx = genpd->state_idx;
414 ktime_t time_start;
415 s64 elapsed_ns;
416 int ret;
417
418 if (!genpd->power_on)
419 return 0;
420
421 if (!timed)
422 return genpd->power_on(genpd);
423
424 time_start = ktime_get();
425 ret = genpd->power_on(genpd);
426 if (ret)
427 return ret;
428
429 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
430 if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
431 return ret;
432
433 genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
434 genpd->max_off_time_changed = true;
435 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
436 genpd->name, "on", elapsed_ns);
437
438 return ret;
439}
440
441static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
442{
443 unsigned int state_idx = genpd->state_idx;
444 ktime_t time_start;
445 s64 elapsed_ns;
446 int ret;
447
448 if (!genpd->power_off)
449 return 0;
450
451 if (!timed)
452 return genpd->power_off(genpd);
453
454 time_start = ktime_get();
455 ret = genpd->power_off(genpd);
456 if (ret)
457 return ret;
458
459 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
460 if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
461 return 0;
462
463 genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
464 genpd->max_off_time_changed = true;
465 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
466 genpd->name, "off", elapsed_ns);
467
468 return 0;
469}
470
471/**
472 * genpd_queue_power_off_work - Queue up the execution of genpd_power_off().
473 * @genpd: PM domain to power off.
474 *
475 * Queue up the execution of genpd_power_off() unless it's already been done
476 * before.
477 */
478static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
479{
480 queue_work(pm_wq, &genpd->power_off_work);
481}
482
483/**
484 * genpd_power_off - Remove power from a given PM domain.
485 * @genpd: PM domain to power down.
486 * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the
487 * RPM status of the releated device is in an intermediate state, not yet turned
488 * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not
489 * be RPM_SUSPENDED, while it tries to power off the PM domain.
490 *
491 * If all of the @genpd's devices have been suspended and all of its subdomains
492 * have been powered down, remove power from @genpd.
493 */
494static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
495 unsigned int depth)
496{
497 struct pm_domain_data *pdd;
498 struct gpd_link *link;
499 unsigned int not_suspended = 0;
500
501 /*
502 * Do not try to power off the domain in the following situations:
503 * (1) The domain is already in the "power off" state.
504 * (2) System suspend is in progress.
505 */
506 if (!genpd_status_on(genpd) || genpd->prepared_count > 0)
507 return 0;
508
509 /*
510 * Abort power off for the PM domain in the following situations:
511 * (1) The domain is configured as always on.
512 * (2) When the domain has a subdomain being powered on.
513 */
514 if (genpd_is_always_on(genpd) ||
515 genpd_is_rpm_always_on(genpd) ||
516 atomic_read(&genpd->sd_count) > 0)
517 return -EBUSY;
518
519 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
520 enum pm_qos_flags_status stat;
521
522 stat = dev_pm_qos_flags(pdd->dev, PM_QOS_FLAG_NO_POWER_OFF);
523 if (stat > PM_QOS_FLAGS_NONE)
524 return -EBUSY;
525
526 /*
527 * Do not allow PM domain to be powered off, when an IRQ safe
528 * device is part of a non-IRQ safe domain.
529 */
530 if (!pm_runtime_suspended(pdd->dev) ||
531 irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd))
532 not_suspended++;
533 }
534
535 if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
536 return -EBUSY;
537
538 if (genpd->gov && genpd->gov->power_down_ok) {
539 if (!genpd->gov->power_down_ok(&genpd->domain))
540 return -EAGAIN;
541 }
542
543 /* Default to shallowest state. */
544 if (!genpd->gov)
545 genpd->state_idx = 0;
546
547 if (genpd->power_off) {
548 int ret;
549
550 if (atomic_read(&genpd->sd_count) > 0)
551 return -EBUSY;
552
553 /*
554 * If sd_count > 0 at this point, one of the subdomains hasn't
555 * managed to call genpd_power_on() for the master yet after
556 * incrementing it. In that case genpd_power_on() will wait
557 * for us to drop the lock, so we can call .power_off() and let
558 * the genpd_power_on() restore power for us (this shouldn't
559 * happen very often).
560 */
561 ret = _genpd_power_off(genpd, true);
562 if (ret)
563 return ret;
564 }
565
566 genpd->status = GPD_STATE_POWER_OFF;
567 genpd_update_accounting(genpd);
568
569 list_for_each_entry(link, &genpd->slave_links, slave_node) {
570 genpd_sd_counter_dec(link->master);
571 genpd_lock_nested(link->master, depth + 1);
572 genpd_power_off(link->master, false, depth + 1);
573 genpd_unlock(link->master);
574 }
575
576 return 0;
577}
578
579/**
580 * genpd_power_on - Restore power to a given PM domain and its masters.
581 * @genpd: PM domain to power up.
582 * @depth: nesting count for lockdep.
583 *
584 * Restore power to @genpd and all of its masters so that it is possible to
585 * resume a device belonging to it.
586 */
587static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
588{
589 struct gpd_link *link;
590 int ret = 0;
591
592 if (genpd_status_on(genpd))
593 return 0;
594
595 /*
596 * The list is guaranteed not to change while the loop below is being
597 * executed, unless one of the masters' .power_on() callbacks fiddles
598 * with it.
599 */
600 list_for_each_entry(link, &genpd->slave_links, slave_node) {
601 struct generic_pm_domain *master = link->master;
602
603 genpd_sd_counter_inc(master);
604
605 genpd_lock_nested(master, depth + 1);
606 ret = genpd_power_on(master, depth + 1);
607 genpd_unlock(master);
608
609 if (ret) {
610 genpd_sd_counter_dec(master);
611 goto err;
612 }
613 }
614
615 ret = _genpd_power_on(genpd, true);
616 if (ret)
617 goto err;
618
619 genpd->status = GPD_STATE_ACTIVE;
620 genpd_update_accounting(genpd);
621
622 return 0;
623
624 err:
625 list_for_each_entry_continue_reverse(link,
626 &genpd->slave_links,
627 slave_node) {
628 genpd_sd_counter_dec(link->master);
629 genpd_lock_nested(link->master, depth + 1);
630 genpd_power_off(link->master, false, depth + 1);
631 genpd_unlock(link->master);
632 }
633
634 return ret;
635}
636
637static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
638 unsigned long val, void *ptr)
639{
640 struct generic_pm_domain_data *gpd_data;
641 struct device *dev;
642
643 gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
644 dev = gpd_data->base.dev;
645
646 for (;;) {
647 struct generic_pm_domain *genpd;
648 struct pm_domain_data *pdd;
649
650 spin_lock_irq(&dev->power.lock);
651
652 pdd = dev->power.subsys_data ?
653 dev->power.subsys_data->domain_data : NULL;
654 if (pdd) {
655 to_gpd_data(pdd)->td.constraint_changed = true;
656 genpd = dev_to_genpd(dev);
657 } else {
658 genpd = ERR_PTR(-ENODATA);
659 }
660
661 spin_unlock_irq(&dev->power.lock);
662
663 if (!IS_ERR(genpd)) {
664 genpd_lock(genpd);
665 genpd->max_off_time_changed = true;
666 genpd_unlock(genpd);
667 }
668
669 dev = dev->parent;
670 if (!dev || dev->power.ignore_children)
671 break;
672 }
673
674 return NOTIFY_DONE;
675}
676
677/**
678 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
679 * @work: Work structure used for scheduling the execution of this function.
680 */
681static void genpd_power_off_work_fn(struct work_struct *work)
682{
683 struct generic_pm_domain *genpd;
684
685 genpd = container_of(work, struct generic_pm_domain, power_off_work);
686
687 genpd_lock(genpd);
688 genpd_power_off(genpd, false, 0);
689 genpd_unlock(genpd);
690}
691
692/**
693 * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks
694 * @dev: Device to handle.
695 */
696static int __genpd_runtime_suspend(struct device *dev)
697{
698 int (*cb)(struct device *__dev);
699
700 if (dev->type && dev->type->pm)
701 cb = dev->type->pm->runtime_suspend;
702 else if (dev->class && dev->class->pm)
703 cb = dev->class->pm->runtime_suspend;
704 else if (dev->bus && dev->bus->pm)
705 cb = dev->bus->pm->runtime_suspend;
706 else
707 cb = NULL;
708
709 if (!cb && dev->driver && dev->driver->pm)
710 cb = dev->driver->pm->runtime_suspend;
711
712 return cb ? cb(dev) : 0;
713}
714
715/**
716 * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks
717 * @dev: Device to handle.
718 */
719static int __genpd_runtime_resume(struct device *dev)
720{
721 int (*cb)(struct device *__dev);
722
723 if (dev->type && dev->type->pm)
724 cb = dev->type->pm->runtime_resume;
725 else if (dev->class && dev->class->pm)
726 cb = dev->class->pm->runtime_resume;
727 else if (dev->bus && dev->bus->pm)
728 cb = dev->bus->pm->runtime_resume;
729 else
730 cb = NULL;
731
732 if (!cb && dev->driver && dev->driver->pm)
733 cb = dev->driver->pm->runtime_resume;
734
735 return cb ? cb(dev) : 0;
736}
737
738/**
739 * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
740 * @dev: Device to suspend.
741 *
742 * Carry out a runtime suspend of a device under the assumption that its
743 * pm_domain field points to the domain member of an object of type
744 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
745 */
746static int genpd_runtime_suspend(struct device *dev)
747{
748 struct generic_pm_domain *genpd;
749 bool (*suspend_ok)(struct device *__dev);
750 struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
751 bool runtime_pm = pm_runtime_enabled(dev);
752 ktime_t time_start;
753 s64 elapsed_ns;
754 int ret;
755
756 dev_dbg(dev, "%s()\n", __func__);
757
758 genpd = dev_to_genpd(dev);
759 if (IS_ERR(genpd))
760 return -EINVAL;
761
762 /*
763 * A runtime PM centric subsystem/driver may re-use the runtime PM
764 * callbacks for other purposes than runtime PM. In those scenarios
765 * runtime PM is disabled. Under these circumstances, we shall skip
766 * validating/measuring the PM QoS latency.
767 */
768 suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
769 if (runtime_pm && suspend_ok && !suspend_ok(dev))
770 return -EBUSY;
771
772 /* Measure suspend latency. */
773 time_start = 0;
774 if (runtime_pm)
775 time_start = ktime_get();
776
777 ret = __genpd_runtime_suspend(dev);
778 if (ret)
779 return ret;
780
781 ret = genpd_stop_dev(genpd, dev);
782 if (ret) {
783 __genpd_runtime_resume(dev);
784 return ret;
785 }
786
787 /* Update suspend latency value if the measured time exceeds it. */
788 if (runtime_pm) {
789 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
790 if (elapsed_ns > td->suspend_latency_ns) {
791 td->suspend_latency_ns = elapsed_ns;
792 dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
793 elapsed_ns);
794 genpd->max_off_time_changed = true;
795 td->constraint_changed = true;
796 }
797 }
798
799 /*
800 * If power.irq_safe is set, this routine may be run with
801 * IRQs disabled, so suspend only if the PM domain also is irq_safe.
802 */
803 if (irq_safe_dev_in_no_sleep_domain(dev, genpd))
804 return 0;
805
806 genpd_lock(genpd);
807 genpd_power_off(genpd, true, 0);
808 genpd_unlock(genpd);
809
810 return 0;
811}
812
813/**
814 * genpd_runtime_resume - Resume a device belonging to I/O PM domain.
815 * @dev: Device to resume.
816 *
817 * Carry out a runtime resume of a device under the assumption that its
818 * pm_domain field points to the domain member of an object of type
819 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
820 */
821static int genpd_runtime_resume(struct device *dev)
822{
823 struct generic_pm_domain *genpd;
824 struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
825 bool runtime_pm = pm_runtime_enabled(dev);
826 ktime_t time_start;
827 s64 elapsed_ns;
828 int ret;
829 bool timed = true;
830
831 dev_dbg(dev, "%s()\n", __func__);
832
833 genpd = dev_to_genpd(dev);
834 if (IS_ERR(genpd))
835 return -EINVAL;
836
837 /*
838 * As we don't power off a non IRQ safe domain, which holds
839 * an IRQ safe device, we don't need to restore power to it.
840 */
841 if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) {
842 timed = false;
843 goto out;
844 }
845
846 genpd_lock(genpd);
847 ret = genpd_power_on(genpd, 0);
848 genpd_unlock(genpd);
849
850 if (ret)
851 return ret;
852
853 out:
854 /* Measure resume latency. */
855 time_start = 0;
856 if (timed && runtime_pm)
857 time_start = ktime_get();
858
859 ret = genpd_start_dev(genpd, dev);
860 if (ret)
861 goto err_poweroff;
862
863 ret = __genpd_runtime_resume(dev);
864 if (ret)
865 goto err_stop;
866
867 /* Update resume latency value if the measured time exceeds it. */
868 if (timed && runtime_pm) {
869 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
870 if (elapsed_ns > td->resume_latency_ns) {
871 td->resume_latency_ns = elapsed_ns;
872 dev_dbg(dev, "resume latency exceeded, %lld ns\n",
873 elapsed_ns);
874 genpd->max_off_time_changed = true;
875 td->constraint_changed = true;
876 }
877 }
878
879 return 0;
880
881err_stop:
882 genpd_stop_dev(genpd, dev);
883err_poweroff:
884 if (!pm_runtime_is_irq_safe(dev) ||
885 (pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) {
886 genpd_lock(genpd);
887 genpd_power_off(genpd, true, 0);
888 genpd_unlock(genpd);
889 }
890
891 return ret;
892}
893
894static bool pd_ignore_unused;
895static int __init pd_ignore_unused_setup(char *__unused)
896{
897 pd_ignore_unused = true;
898 return 1;
899}
900__setup("pd_ignore_unused", pd_ignore_unused_setup);
901
902/**
903 * genpd_power_off_unused - Power off all PM domains with no devices in use.
904 */
905static int __init genpd_power_off_unused(void)
906{
907 struct generic_pm_domain *genpd;
908
909 if (pd_ignore_unused) {
910 pr_warn("genpd: Not disabling unused power domains\n");
911 return 0;
912 }
913
914 mutex_lock(&gpd_list_lock);
915
916 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
917 genpd_queue_power_off_work(genpd);
918
919 mutex_unlock(&gpd_list_lock);
920
921 return 0;
922}
923late_initcall(genpd_power_off_unused);
924
925#if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_GENERIC_DOMAINS_OF)
926
927static bool genpd_present(const struct generic_pm_domain *genpd)
928{
929 const struct generic_pm_domain *gpd;
930
931 if (IS_ERR_OR_NULL(genpd))
932 return false;
933
934 list_for_each_entry(gpd, &gpd_list, gpd_list_node)
935 if (gpd == genpd)
936 return true;
937
938 return false;
939}
940
941#endif
942
943#ifdef CONFIG_PM_SLEEP
944
945/**
946 * genpd_sync_power_off - Synchronously power off a PM domain and its masters.
947 * @genpd: PM domain to power off, if possible.
948 * @use_lock: use the lock.
949 * @depth: nesting count for lockdep.
950 *
951 * Check if the given PM domain can be powered off (during system suspend or
952 * hibernation) and do that if so. Also, in that case propagate to its masters.
953 *
954 * This function is only called in "noirq" and "syscore" stages of system power
955 * transitions. The "noirq" callbacks may be executed asynchronously, thus in
956 * these cases the lock must be held.
957 */
958static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
959 unsigned int depth)
960{
961 struct gpd_link *link;
962
963 if (!genpd_status_on(genpd) || genpd_is_always_on(genpd))
964 return;
965
966 if (genpd->suspended_count != genpd->device_count
967 || atomic_read(&genpd->sd_count) > 0)
968 return;
969
970 /* Choose the deepest state when suspending */
971 genpd->state_idx = genpd->state_count - 1;
972 if (_genpd_power_off(genpd, false))
973 return;
974
975 genpd->status = GPD_STATE_POWER_OFF;
976
977 list_for_each_entry(link, &genpd->slave_links, slave_node) {
978 genpd_sd_counter_dec(link->master);
979
980 if (use_lock)
981 genpd_lock_nested(link->master, depth + 1);
982
983 genpd_sync_power_off(link->master, use_lock, depth + 1);
984
985 if (use_lock)
986 genpd_unlock(link->master);
987 }
988}
989
990/**
991 * genpd_sync_power_on - Synchronously power on a PM domain and its masters.
992 * @genpd: PM domain to power on.
993 * @use_lock: use the lock.
994 * @depth: nesting count for lockdep.
995 *
996 * This function is only called in "noirq" and "syscore" stages of system power
997 * transitions. The "noirq" callbacks may be executed asynchronously, thus in
998 * these cases the lock must be held.
999 */
1000static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
1001 unsigned int depth)
1002{
1003 struct gpd_link *link;
1004
1005 if (genpd_status_on(genpd))
1006 return;
1007
1008 list_for_each_entry(link, &genpd->slave_links, slave_node) {
1009 genpd_sd_counter_inc(link->master);
1010
1011 if (use_lock)
1012 genpd_lock_nested(link->master, depth + 1);
1013
1014 genpd_sync_power_on(link->master, use_lock, depth + 1);
1015
1016 if (use_lock)
1017 genpd_unlock(link->master);
1018 }
1019
1020 _genpd_power_on(genpd, false);
1021
1022 genpd->status = GPD_STATE_ACTIVE;
1023}
1024
1025/**
1026 * resume_needed - Check whether to resume a device before system suspend.
1027 * @dev: Device to check.
1028 * @genpd: PM domain the device belongs to.
1029 *
1030 * There are two cases in which a device that can wake up the system from sleep
1031 * states should be resumed by genpd_prepare(): (1) if the device is enabled
1032 * to wake up the system and it has to remain active for this purpose while the
1033 * system is in the sleep state and (2) if the device is not enabled to wake up
1034 * the system from sleep states and it generally doesn't generate wakeup signals
1035 * by itself (those signals are generated on its behalf by other parts of the
1036 * system). In the latter case it may be necessary to reconfigure the device's
1037 * wakeup settings during system suspend, because it may have been set up to
1038 * signal remote wakeup from the system's working state as needed by runtime PM.
1039 * Return 'true' in either of the above cases.
1040 */
1041static bool resume_needed(struct device *dev,
1042 const struct generic_pm_domain *genpd)
1043{
1044 bool active_wakeup;
1045
1046 if (!device_can_wakeup(dev))
1047 return false;
1048
1049 active_wakeup = genpd_is_active_wakeup(genpd);
1050 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
1051}
1052
1053/**
1054 * genpd_prepare - Start power transition of a device in a PM domain.
1055 * @dev: Device to start the transition of.
1056 *
1057 * Start a power transition of a device (during a system-wide power transition)
1058 * under the assumption that its pm_domain field points to the domain member of
1059 * an object of type struct generic_pm_domain representing a PM domain
1060 * consisting of I/O devices.
1061 */
1062static int genpd_prepare(struct device *dev)
1063{
1064 struct generic_pm_domain *genpd;
1065 int ret;
1066
1067 dev_dbg(dev, "%s()\n", __func__);
1068
1069 genpd = dev_to_genpd(dev);
1070 if (IS_ERR(genpd))
1071 return -EINVAL;
1072
1073 /*
1074 * If a wakeup request is pending for the device, it should be woken up
1075 * at this point and a system wakeup event should be reported if it's
1076 * set up to wake up the system from sleep states.
1077 */
1078 if (resume_needed(dev, genpd))
1079 pm_runtime_resume(dev);
1080
1081 genpd_lock(genpd);
1082
1083 if (genpd->prepared_count++ == 0)
1084 genpd->suspended_count = 0;
1085
1086 genpd_unlock(genpd);
1087
1088 ret = pm_generic_prepare(dev);
1089 if (ret < 0) {
1090 genpd_lock(genpd);
1091
1092 genpd->prepared_count--;
1093
1094 genpd_unlock(genpd);
1095 }
1096
1097 /* Never return 1, as genpd don't cope with the direct_complete path. */
1098 return ret >= 0 ? 0 : ret;
1099}
1100
1101/**
1102 * genpd_finish_suspend - Completion of suspend or hibernation of device in an
1103 * I/O pm domain.
1104 * @dev: Device to suspend.
1105 * @poweroff: Specifies if this is a poweroff_noirq or suspend_noirq callback.
1106 *
1107 * Stop the device and remove power from the domain if all devices in it have
1108 * been stopped.
1109 */
1110static int genpd_finish_suspend(struct device *dev, bool poweroff)
1111{
1112 struct generic_pm_domain *genpd;
1113 int ret = 0;
1114
1115 genpd = dev_to_genpd(dev);
1116 if (IS_ERR(genpd))
1117 return -EINVAL;
1118
1119 if (poweroff)
1120 ret = pm_generic_poweroff_noirq(dev);
1121 else
1122 ret = pm_generic_suspend_noirq(dev);
1123 if (ret)
1124 return ret;
1125
1126 if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
1127 return 0;
1128
1129 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1130 !pm_runtime_status_suspended(dev)) {
1131 ret = genpd_stop_dev(genpd, dev);
1132 if (ret) {
1133 if (poweroff)
1134 pm_generic_restore_noirq(dev);
1135 else
1136 pm_generic_resume_noirq(dev);
1137 return ret;
1138 }
1139 }
1140
1141 genpd_lock(genpd);
1142 genpd->suspended_count++;
1143 genpd_sync_power_off(genpd, true, 0);
1144 genpd_unlock(genpd);
1145
1146 return 0;
1147}
1148
1149/**
1150 * genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
1151 * @dev: Device to suspend.
1152 *
1153 * Stop the device and remove power from the domain if all devices in it have
1154 * been stopped.
1155 */
1156static int genpd_suspend_noirq(struct device *dev)
1157{
1158 dev_dbg(dev, "%s()\n", __func__);
1159
1160 return genpd_finish_suspend(dev, false);
1161}
1162
1163/**
1164 * genpd_resume_noirq - Start of resume of device in an I/O PM domain.
1165 * @dev: Device to resume.
1166 *
1167 * Restore power to the device's PM domain, if necessary, and start the device.
1168 */
1169static int genpd_resume_noirq(struct device *dev)
1170{
1171 struct generic_pm_domain *genpd;
1172 int ret;
1173
1174 dev_dbg(dev, "%s()\n", __func__);
1175
1176 genpd = dev_to_genpd(dev);
1177 if (IS_ERR(genpd))
1178 return -EINVAL;
1179
1180 if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
1181 return pm_generic_resume_noirq(dev);
1182
1183 genpd_lock(genpd);
1184 genpd_sync_power_on(genpd, true, 0);
1185 genpd->suspended_count--;
1186 genpd_unlock(genpd);
1187
1188 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1189 !pm_runtime_status_suspended(dev)) {
1190 ret = genpd_start_dev(genpd, dev);
1191 if (ret)
1192 return ret;
1193 }
1194
1195 return pm_generic_resume_noirq(dev);
1196}
1197
1198/**
1199 * genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
1200 * @dev: Device to freeze.
1201 *
1202 * Carry out a late freeze of a device under the assumption that its
1203 * pm_domain field points to the domain member of an object of type
1204 * struct generic_pm_domain representing a power domain consisting of I/O
1205 * devices.
1206 */
1207static int genpd_freeze_noirq(struct device *dev)
1208{
1209 const struct generic_pm_domain *genpd;
1210 int ret = 0;
1211
1212 dev_dbg(dev, "%s()\n", __func__);
1213
1214 genpd = dev_to_genpd(dev);
1215 if (IS_ERR(genpd))
1216 return -EINVAL;
1217
1218 ret = pm_generic_freeze_noirq(dev);
1219 if (ret)
1220 return ret;
1221
1222 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1223 !pm_runtime_status_suspended(dev))
1224 ret = genpd_stop_dev(genpd, dev);
1225
1226 return ret;
1227}
1228
1229/**
1230 * genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
1231 * @dev: Device to thaw.
1232 *
1233 * Start the device, unless power has been removed from the domain already
1234 * before the system transition.
1235 */
1236static int genpd_thaw_noirq(struct device *dev)
1237{
1238 const struct generic_pm_domain *genpd;
1239 int ret = 0;
1240
1241 dev_dbg(dev, "%s()\n", __func__);
1242
1243 genpd = dev_to_genpd(dev);
1244 if (IS_ERR(genpd))
1245 return -EINVAL;
1246
1247 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1248 !pm_runtime_status_suspended(dev)) {
1249 ret = genpd_start_dev(genpd, dev);
1250 if (ret)
1251 return ret;
1252 }
1253
1254 return pm_generic_thaw_noirq(dev);
1255}
1256
1257/**
1258 * genpd_poweroff_noirq - Completion of hibernation of device in an
1259 * I/O PM domain.
1260 * @dev: Device to poweroff.
1261 *
1262 * Stop the device and remove power from the domain if all devices in it have
1263 * been stopped.
1264 */
1265static int genpd_poweroff_noirq(struct device *dev)
1266{
1267 dev_dbg(dev, "%s()\n", __func__);
1268
1269 return genpd_finish_suspend(dev, true);
1270}
1271
1272/**
1273 * genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1274 * @dev: Device to resume.
1275 *
1276 * Make sure the domain will be in the same power state as before the
1277 * hibernation the system is resuming from and start the device if necessary.
1278 */
1279static int genpd_restore_noirq(struct device *dev)
1280{
1281 struct generic_pm_domain *genpd;
1282 int ret = 0;
1283
1284 dev_dbg(dev, "%s()\n", __func__);
1285
1286 genpd = dev_to_genpd(dev);
1287 if (IS_ERR(genpd))
1288 return -EINVAL;
1289
1290 /*
1291 * At this point suspended_count == 0 means we are being run for the
1292 * first time for the given domain in the present cycle.
1293 */
1294 genpd_lock(genpd);
1295 if (genpd->suspended_count++ == 0)
1296 /*
1297 * The boot kernel might put the domain into arbitrary state,
1298 * so make it appear as powered off to genpd_sync_power_on(),
1299 * so that it tries to power it on in case it was really off.
1300 */
1301 genpd->status = GPD_STATE_POWER_OFF;
1302
1303 genpd_sync_power_on(genpd, true, 0);
1304 genpd_unlock(genpd);
1305
1306 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1307 !pm_runtime_status_suspended(dev)) {
1308 ret = genpd_start_dev(genpd, dev);
1309 if (ret)
1310 return ret;
1311 }
1312
1313 return pm_generic_restore_noirq(dev);
1314}
1315
1316/**
1317 * genpd_complete - Complete power transition of a device in a power domain.
1318 * @dev: Device to complete the transition of.
1319 *
1320 * Complete a power transition of a device (during a system-wide power
1321 * transition) under the assumption that its pm_domain field points to the
1322 * domain member of an object of type struct generic_pm_domain representing
1323 * a power domain consisting of I/O devices.
1324 */
1325static void genpd_complete(struct device *dev)
1326{
1327 struct generic_pm_domain *genpd;
1328
1329 dev_dbg(dev, "%s()\n", __func__);
1330
1331 genpd = dev_to_genpd(dev);
1332 if (IS_ERR(genpd))
1333 return;
1334
1335 pm_generic_complete(dev);
1336
1337 genpd_lock(genpd);
1338
1339 genpd->prepared_count--;
1340 if (!genpd->prepared_count)
1341 genpd_queue_power_off_work(genpd);
1342
1343 genpd_unlock(genpd);
1344}
1345
1346/**
1347 * genpd_syscore_switch - Switch power during system core suspend or resume.
1348 * @dev: Device that normally is marked as "always on" to switch power for.
1349 *
1350 * This routine may only be called during the system core (syscore) suspend or
1351 * resume phase for devices whose "always on" flags are set.
1352 */
1353static void genpd_syscore_switch(struct device *dev, bool suspend)
1354{
1355 struct generic_pm_domain *genpd;
1356
1357 genpd = dev_to_genpd(dev);
1358 if (!genpd_present(genpd))
1359 return;
1360
1361 if (suspend) {
1362 genpd->suspended_count++;
1363 genpd_sync_power_off(genpd, false, 0);
1364 } else {
1365 genpd_sync_power_on(genpd, false, 0);
1366 genpd->suspended_count--;
1367 }
1368}
1369
1370void pm_genpd_syscore_poweroff(struct device *dev)
1371{
1372 genpd_syscore_switch(dev, true);
1373}
1374EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff);
1375
1376void pm_genpd_syscore_poweron(struct device *dev)
1377{
1378 genpd_syscore_switch(dev, false);
1379}
1380EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
1381
1382#else /* !CONFIG_PM_SLEEP */
1383
1384#define genpd_prepare NULL
1385#define genpd_suspend_noirq NULL
1386#define genpd_resume_noirq NULL
1387#define genpd_freeze_noirq NULL
1388#define genpd_thaw_noirq NULL
1389#define genpd_poweroff_noirq NULL
1390#define genpd_restore_noirq NULL
1391#define genpd_complete NULL
1392
1393#endif /* CONFIG_PM_SLEEP */
1394
1395static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev)
1396{
1397 struct generic_pm_domain_data *gpd_data;
1398 int ret;
1399
1400 ret = dev_pm_get_subsys_data(dev);
1401 if (ret)
1402 return ERR_PTR(ret);
1403
1404 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1405 if (!gpd_data) {
1406 ret = -ENOMEM;
1407 goto err_put;
1408 }
1409
1410 gpd_data->base.dev = dev;
1411 gpd_data->td.constraint_changed = true;
1412 gpd_data->td.effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
1413 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1414
1415 spin_lock_irq(&dev->power.lock);
1416
1417 if (dev->power.subsys_data->domain_data) {
1418 ret = -EINVAL;
1419 goto err_free;
1420 }
1421
1422 dev->power.subsys_data->domain_data = &gpd_data->base;
1423
1424 spin_unlock_irq(&dev->power.lock);
1425
1426 return gpd_data;
1427
1428 err_free:
1429 spin_unlock_irq(&dev->power.lock);
1430 kfree(gpd_data);
1431 err_put:
1432 dev_pm_put_subsys_data(dev);
1433 return ERR_PTR(ret);
1434}
1435
1436static void genpd_free_dev_data(struct device *dev,
1437 struct generic_pm_domain_data *gpd_data)
1438{
1439 spin_lock_irq(&dev->power.lock);
1440
1441 dev->power.subsys_data->domain_data = NULL;
1442
1443 spin_unlock_irq(&dev->power.lock);
1444
1445 kfree(gpd_data);
1446 dev_pm_put_subsys_data(dev);
1447}
1448
1449static void genpd_update_cpumask(struct generic_pm_domain *genpd,
1450 int cpu, bool set, unsigned int depth)
1451{
1452 struct gpd_link *link;
1453
1454 if (!genpd_is_cpu_domain(genpd))
1455 return;
1456
1457 list_for_each_entry(link, &genpd->slave_links, slave_node) {
1458 struct generic_pm_domain *master = link->master;
1459
1460 genpd_lock_nested(master, depth + 1);
1461 genpd_update_cpumask(master, cpu, set, depth + 1);
1462 genpd_unlock(master);
1463 }
1464
1465 if (set)
1466 cpumask_set_cpu(cpu, genpd->cpus);
1467 else
1468 cpumask_clear_cpu(cpu, genpd->cpus);
1469}
1470
1471static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu)
1472{
1473 if (cpu >= 0)
1474 genpd_update_cpumask(genpd, cpu, true, 0);
1475}
1476
1477static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu)
1478{
1479 if (cpu >= 0)
1480 genpd_update_cpumask(genpd, cpu, false, 0);
1481}
1482
1483static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev)
1484{
1485 int cpu;
1486
1487 if (!genpd_is_cpu_domain(genpd))
1488 return -1;
1489
1490 for_each_possible_cpu(cpu) {
1491 if (get_cpu_device(cpu) == dev)
1492 return cpu;
1493 }
1494
1495 return -1;
1496}
1497
1498static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1499 struct device *base_dev)
1500{
1501 struct generic_pm_domain_data *gpd_data;
1502 int ret;
1503
1504 dev_dbg(dev, "%s()\n", __func__);
1505
1506 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1507 return -EINVAL;
1508
1509 gpd_data = genpd_alloc_dev_data(dev);
1510 if (IS_ERR(gpd_data))
1511 return PTR_ERR(gpd_data);
1512
1513 gpd_data->cpu = genpd_get_cpu(genpd, base_dev);
1514
1515 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1516 if (ret)
1517 goto out;
1518
1519 genpd_lock(genpd);
1520
1521 genpd_set_cpumask(genpd, gpd_data->cpu);
1522 dev_pm_domain_set(dev, &genpd->domain);
1523
1524 genpd->device_count++;
1525 genpd->max_off_time_changed = true;
1526
1527 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1528
1529 genpd_unlock(genpd);
1530 out:
1531 if (ret)
1532 genpd_free_dev_data(dev, gpd_data);
1533 else
1534 dev_pm_qos_add_notifier(dev, &gpd_data->nb,
1535 DEV_PM_QOS_RESUME_LATENCY);
1536
1537 return ret;
1538}
1539
1540/**
1541 * pm_genpd_add_device - Add a device to an I/O PM domain.
1542 * @genpd: PM domain to add the device to.
1543 * @dev: Device to be added.
1544 */
1545int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
1546{
1547 int ret;
1548
1549 mutex_lock(&gpd_list_lock);
1550 ret = genpd_add_device(genpd, dev, dev);
1551 mutex_unlock(&gpd_list_lock);
1552
1553 return ret;
1554}
1555EXPORT_SYMBOL_GPL(pm_genpd_add_device);
1556
1557static int genpd_remove_device(struct generic_pm_domain *genpd,
1558 struct device *dev)
1559{
1560 struct generic_pm_domain_data *gpd_data;
1561 struct pm_domain_data *pdd;
1562 int ret = 0;
1563
1564 dev_dbg(dev, "%s()\n", __func__);
1565
1566 pdd = dev->power.subsys_data->domain_data;
1567 gpd_data = to_gpd_data(pdd);
1568 dev_pm_qos_remove_notifier(dev, &gpd_data->nb,
1569 DEV_PM_QOS_RESUME_LATENCY);
1570
1571 genpd_lock(genpd);
1572
1573 if (genpd->prepared_count > 0) {
1574 ret = -EAGAIN;
1575 goto out;
1576 }
1577
1578 genpd->device_count--;
1579 genpd->max_off_time_changed = true;
1580
1581 genpd_clear_cpumask(genpd, gpd_data->cpu);
1582 dev_pm_domain_set(dev, NULL);
1583
1584 list_del_init(&pdd->list_node);
1585
1586 genpd_unlock(genpd);
1587
1588 if (genpd->detach_dev)
1589 genpd->detach_dev(genpd, dev);
1590
1591 genpd_free_dev_data(dev, gpd_data);
1592
1593 return 0;
1594
1595 out:
1596 genpd_unlock(genpd);
1597 dev_pm_qos_add_notifier(dev, &gpd_data->nb, DEV_PM_QOS_RESUME_LATENCY);
1598
1599 return ret;
1600}
1601
1602/**
1603 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1604 * @dev: Device to be removed.
1605 */
1606int pm_genpd_remove_device(struct device *dev)
1607{
1608 struct generic_pm_domain *genpd = dev_to_genpd_safe(dev);
1609
1610 if (!genpd)
1611 return -EINVAL;
1612
1613 return genpd_remove_device(genpd, dev);
1614}
1615EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
1616
1617static int genpd_add_subdomain(struct generic_pm_domain *genpd,
1618 struct generic_pm_domain *subdomain)
1619{
1620 struct gpd_link *link, *itr;
1621 int ret = 0;
1622
1623 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1624 || genpd == subdomain)
1625 return -EINVAL;
1626
1627 /*
1628 * If the domain can be powered on/off in an IRQ safe
1629 * context, ensure that the subdomain can also be
1630 * powered on/off in that context.
1631 */
1632 if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
1633 WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
1634 genpd->name, subdomain->name);
1635 return -EINVAL;
1636 }
1637
1638 link = kzalloc(sizeof(*link), GFP_KERNEL);
1639 if (!link)
1640 return -ENOMEM;
1641
1642 genpd_lock(subdomain);
1643 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1644
1645 if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) {
1646 ret = -EINVAL;
1647 goto out;
1648 }
1649
1650 list_for_each_entry(itr, &genpd->master_links, master_node) {
1651 if (itr->slave == subdomain && itr->master == genpd) {
1652 ret = -EINVAL;
1653 goto out;
1654 }
1655 }
1656
1657 link->master = genpd;
1658 list_add_tail(&link->master_node, &genpd->master_links);
1659 link->slave = subdomain;
1660 list_add_tail(&link->slave_node, &subdomain->slave_links);
1661 if (genpd_status_on(subdomain))
1662 genpd_sd_counter_inc(genpd);
1663
1664 out:
1665 genpd_unlock(genpd);
1666 genpd_unlock(subdomain);
1667 if (ret)
1668 kfree(link);
1669 return ret;
1670}
1671
1672/**
1673 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1674 * @genpd: Master PM domain to add the subdomain to.
1675 * @subdomain: Subdomain to be added.
1676 */
1677int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1678 struct generic_pm_domain *subdomain)
1679{
1680 int ret;
1681
1682 mutex_lock(&gpd_list_lock);
1683 ret = genpd_add_subdomain(genpd, subdomain);
1684 mutex_unlock(&gpd_list_lock);
1685
1686 return ret;
1687}
1688EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
1689
1690/**
1691 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1692 * @genpd: Master PM domain to remove the subdomain from.
1693 * @subdomain: Subdomain to be removed.
1694 */
1695int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1696 struct generic_pm_domain *subdomain)
1697{
1698 struct gpd_link *l, *link;
1699 int ret = -EINVAL;
1700
1701 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1702 return -EINVAL;
1703
1704 genpd_lock(subdomain);
1705 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1706
1707 if (!list_empty(&subdomain->master_links) || subdomain->device_count) {
1708 pr_warn("%s: unable to remove subdomain %s\n",
1709 genpd->name, subdomain->name);
1710 ret = -EBUSY;
1711 goto out;
1712 }
1713
1714 list_for_each_entry_safe(link, l, &genpd->master_links, master_node) {
1715 if (link->slave != subdomain)
1716 continue;
1717
1718 list_del(&link->master_node);
1719 list_del(&link->slave_node);
1720 kfree(link);
1721 if (genpd_status_on(subdomain))
1722 genpd_sd_counter_dec(genpd);
1723
1724 ret = 0;
1725 break;
1726 }
1727
1728out:
1729 genpd_unlock(genpd);
1730 genpd_unlock(subdomain);
1731
1732 return ret;
1733}
1734EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
1735
1736static void genpd_free_default_power_state(struct genpd_power_state *states,
1737 unsigned int state_count)
1738{
1739 kfree(states);
1740}
1741
1742static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
1743{
1744 struct genpd_power_state *state;
1745
1746 state = kzalloc(sizeof(*state), GFP_KERNEL);
1747 if (!state)
1748 return -ENOMEM;
1749
1750 genpd->states = state;
1751 genpd->state_count = 1;
1752 genpd->free_states = genpd_free_default_power_state;
1753
1754 return 0;
1755}
1756
1757static void genpd_lock_init(struct generic_pm_domain *genpd)
1758{
1759 if (genpd->flags & GENPD_FLAG_IRQ_SAFE) {
1760 spin_lock_init(&genpd->slock);
1761 genpd->lock_ops = &genpd_spin_ops;
1762 } else {
1763 mutex_init(&genpd->mlock);
1764 genpd->lock_ops = &genpd_mtx_ops;
1765 }
1766}
1767
1768/**
1769 * pm_genpd_init - Initialize a generic I/O PM domain object.
1770 * @genpd: PM domain object to initialize.
1771 * @gov: PM domain governor to associate with the domain (may be NULL).
1772 * @is_off: Initial value of the domain's power_is_off field.
1773 *
1774 * Returns 0 on successful initialization, else a negative error code.
1775 */
1776int pm_genpd_init(struct generic_pm_domain *genpd,
1777 struct dev_power_governor *gov, bool is_off)
1778{
1779 int ret;
1780
1781 if (IS_ERR_OR_NULL(genpd))
1782 return -EINVAL;
1783
1784 INIT_LIST_HEAD(&genpd->master_links);
1785 INIT_LIST_HEAD(&genpd->slave_links);
1786 INIT_LIST_HEAD(&genpd->dev_list);
1787 genpd_lock_init(genpd);
1788 genpd->gov = gov;
1789 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1790 atomic_set(&genpd->sd_count, 0);
1791 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1792 genpd->device_count = 0;
1793 genpd->max_off_time_ns = -1;
1794 genpd->max_off_time_changed = true;
1795 genpd->provider = NULL;
1796 genpd->has_provider = false;
1797 genpd->accounting_time = ktime_get();
1798 genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
1799 genpd->domain.ops.runtime_resume = genpd_runtime_resume;
1800 genpd->domain.ops.prepare = genpd_prepare;
1801 genpd->domain.ops.suspend_noirq = genpd_suspend_noirq;
1802 genpd->domain.ops.resume_noirq = genpd_resume_noirq;
1803 genpd->domain.ops.freeze_noirq = genpd_freeze_noirq;
1804 genpd->domain.ops.thaw_noirq = genpd_thaw_noirq;
1805 genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq;
1806 genpd->domain.ops.restore_noirq = genpd_restore_noirq;
1807 genpd->domain.ops.complete = genpd_complete;
1808
1809 if (genpd->flags & GENPD_FLAG_PM_CLK) {
1810 genpd->dev_ops.stop = pm_clk_suspend;
1811 genpd->dev_ops.start = pm_clk_resume;
1812 }
1813
1814 /* Always-on domains must be powered on at initialization. */
1815 if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) &&
1816 !genpd_status_on(genpd))
1817 return -EINVAL;
1818
1819 if (genpd_is_cpu_domain(genpd) &&
1820 !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL))
1821 return -ENOMEM;
1822
1823 /* Use only one "off" state if there were no states declared */
1824 if (genpd->state_count == 0) {
1825 ret = genpd_set_default_power_state(genpd);
1826 if (ret) {
1827 if (genpd_is_cpu_domain(genpd))
1828 free_cpumask_var(genpd->cpus);
1829 return ret;
1830 }
1831 } else if (!gov && genpd->state_count > 1) {
1832 pr_warn("%s: no governor for states\n", genpd->name);
1833 }
1834
1835 device_initialize(&genpd->dev);
1836 dev_set_name(&genpd->dev, "%s", genpd->name);
1837
1838 mutex_lock(&gpd_list_lock);
1839 list_add(&genpd->gpd_list_node, &gpd_list);
1840 mutex_unlock(&gpd_list_lock);
1841
1842 return 0;
1843}
1844EXPORT_SYMBOL_GPL(pm_genpd_init);
1845
1846static int genpd_remove(struct generic_pm_domain *genpd)
1847{
1848 struct gpd_link *l, *link;
1849
1850 if (IS_ERR_OR_NULL(genpd))
1851 return -EINVAL;
1852
1853 genpd_lock(genpd);
1854
1855 if (genpd->has_provider) {
1856 genpd_unlock(genpd);
1857 pr_err("Provider present, unable to remove %s\n", genpd->name);
1858 return -EBUSY;
1859 }
1860
1861 if (!list_empty(&genpd->master_links) || genpd->device_count) {
1862 genpd_unlock(genpd);
1863 pr_err("%s: unable to remove %s\n", __func__, genpd->name);
1864 return -EBUSY;
1865 }
1866
1867 list_for_each_entry_safe(link, l, &genpd->slave_links, slave_node) {
1868 list_del(&link->master_node);
1869 list_del(&link->slave_node);
1870 kfree(link);
1871 }
1872
1873 list_del(&genpd->gpd_list_node);
1874 genpd_unlock(genpd);
1875 cancel_work_sync(&genpd->power_off_work);
1876 if (genpd_is_cpu_domain(genpd))
1877 free_cpumask_var(genpd->cpus);
1878 if (genpd->free_states)
1879 genpd->free_states(genpd->states, genpd->state_count);
1880
1881 pr_debug("%s: removed %s\n", __func__, genpd->name);
1882
1883 return 0;
1884}
1885
1886/**
1887 * pm_genpd_remove - Remove a generic I/O PM domain
1888 * @genpd: Pointer to PM domain that is to be removed.
1889 *
1890 * To remove the PM domain, this function:
1891 * - Removes the PM domain as a subdomain to any parent domains,
1892 * if it was added.
1893 * - Removes the PM domain from the list of registered PM domains.
1894 *
1895 * The PM domain will only be removed, if the associated provider has
1896 * been removed, it is not a parent to any other PM domain and has no
1897 * devices associated with it.
1898 */
1899int pm_genpd_remove(struct generic_pm_domain *genpd)
1900{
1901 int ret;
1902
1903 mutex_lock(&gpd_list_lock);
1904 ret = genpd_remove(genpd);
1905 mutex_unlock(&gpd_list_lock);
1906
1907 return ret;
1908}
1909EXPORT_SYMBOL_GPL(pm_genpd_remove);
1910
1911#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
1912
1913/*
1914 * Device Tree based PM domain providers.
1915 *
1916 * The code below implements generic device tree based PM domain providers that
1917 * bind device tree nodes with generic PM domains registered in the system.
1918 *
1919 * Any driver that registers generic PM domains and needs to support binding of
1920 * devices to these domains is supposed to register a PM domain provider, which
1921 * maps a PM domain specifier retrieved from the device tree to a PM domain.
1922 *
1923 * Two simple mapping functions have been provided for convenience:
1924 * - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
1925 * - genpd_xlate_onecell() for mapping of multiple PM domains per node by
1926 * index.
1927 */
1928
1929/**
1930 * struct of_genpd_provider - PM domain provider registration structure
1931 * @link: Entry in global list of PM domain providers
1932 * @node: Pointer to device tree node of PM domain provider
1933 * @xlate: Provider-specific xlate callback mapping a set of specifier cells
1934 * into a PM domain.
1935 * @data: context pointer to be passed into @xlate callback
1936 */
1937struct of_genpd_provider {
1938 struct list_head link;
1939 struct device_node *node;
1940 genpd_xlate_t xlate;
1941 void *data;
1942};
1943
1944/* List of registered PM domain providers. */
1945static LIST_HEAD(of_genpd_providers);
1946/* Mutex to protect the list above. */
1947static DEFINE_MUTEX(of_genpd_mutex);
1948
1949/**
1950 * genpd_xlate_simple() - Xlate function for direct node-domain mapping
1951 * @genpdspec: OF phandle args to map into a PM domain
1952 * @data: xlate function private data - pointer to struct generic_pm_domain
1953 *
1954 * This is a generic xlate function that can be used to model PM domains that
1955 * have their own device tree nodes. The private data of xlate function needs
1956 * to be a valid pointer to struct generic_pm_domain.
1957 */
1958static struct generic_pm_domain *genpd_xlate_simple(
1959 struct of_phandle_args *genpdspec,
1960 void *data)
1961{
1962 return data;
1963}
1964
1965/**
1966 * genpd_xlate_onecell() - Xlate function using a single index.
1967 * @genpdspec: OF phandle args to map into a PM domain
1968 * @data: xlate function private data - pointer to struct genpd_onecell_data
1969 *
1970 * This is a generic xlate function that can be used to model simple PM domain
1971 * controllers that have one device tree node and provide multiple PM domains.
1972 * A single cell is used as an index into an array of PM domains specified in
1973 * the genpd_onecell_data struct when registering the provider.
1974 */
1975static struct generic_pm_domain *genpd_xlate_onecell(
1976 struct of_phandle_args *genpdspec,
1977 void *data)
1978{
1979 struct genpd_onecell_data *genpd_data = data;
1980 unsigned int idx = genpdspec->args[0];
1981
1982 if (genpdspec->args_count != 1)
1983 return ERR_PTR(-EINVAL);
1984
1985 if (idx >= genpd_data->num_domains) {
1986 pr_err("%s: invalid domain index %u\n", __func__, idx);
1987 return ERR_PTR(-EINVAL);
1988 }
1989
1990 if (!genpd_data->domains[idx])
1991 return ERR_PTR(-ENOENT);
1992
1993 return genpd_data->domains[idx];
1994}
1995
1996/**
1997 * genpd_add_provider() - Register a PM domain provider for a node
1998 * @np: Device node pointer associated with the PM domain provider.
1999 * @xlate: Callback for decoding PM domain from phandle arguments.
2000 * @data: Context pointer for @xlate callback.
2001 */
2002static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
2003 void *data)
2004{
2005 struct of_genpd_provider *cp;
2006
2007 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2008 if (!cp)
2009 return -ENOMEM;
2010
2011 cp->node = of_node_get(np);
2012 cp->data = data;
2013 cp->xlate = xlate;
2014
2015 mutex_lock(&of_genpd_mutex);
2016 list_add(&cp->link, &of_genpd_providers);
2017 mutex_unlock(&of_genpd_mutex);
2018 pr_debug("Added domain provider from %pOF\n", np);
2019
2020 return 0;
2021}
2022
2023/**
2024 * of_genpd_add_provider_simple() - Register a simple PM domain provider
2025 * @np: Device node pointer associated with the PM domain provider.
2026 * @genpd: Pointer to PM domain associated with the PM domain provider.
2027 */
2028int of_genpd_add_provider_simple(struct device_node *np,
2029 struct generic_pm_domain *genpd)
2030{
2031 int ret = -EINVAL;
2032
2033 if (!np || !genpd)
2034 return -EINVAL;
2035
2036 mutex_lock(&gpd_list_lock);
2037
2038 if (!genpd_present(genpd))
2039 goto unlock;
2040
2041 genpd->dev.of_node = np;
2042
2043 /* Parse genpd OPP table */
2044 if (genpd->set_performance_state) {
2045 ret = dev_pm_opp_of_add_table(&genpd->dev);
2046 if (ret) {
2047 dev_err(&genpd->dev, "Failed to add OPP table: %d\n",
2048 ret);
2049 goto unlock;
2050 }
2051
2052 /*
2053 * Save table for faster processing while setting performance
2054 * state.
2055 */
2056 genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
2057 WARN_ON(!genpd->opp_table);
2058 }
2059
2060 ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
2061 if (ret) {
2062 if (genpd->set_performance_state) {
2063 dev_pm_opp_put_opp_table(genpd->opp_table);
2064 dev_pm_opp_of_remove_table(&genpd->dev);
2065 }
2066
2067 goto unlock;
2068 }
2069
2070 genpd->provider = &np->fwnode;
2071 genpd->has_provider = true;
2072
2073unlock:
2074 mutex_unlock(&gpd_list_lock);
2075
2076 return ret;
2077}
2078EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);
2079
2080/**
2081 * of_genpd_add_provider_onecell() - Register a onecell PM domain provider
2082 * @np: Device node pointer associated with the PM domain provider.
2083 * @data: Pointer to the data associated with the PM domain provider.
2084 */
2085int of_genpd_add_provider_onecell(struct device_node *np,
2086 struct genpd_onecell_data *data)
2087{
2088 struct generic_pm_domain *genpd;
2089 unsigned int i;
2090 int ret = -EINVAL;
2091
2092 if (!np || !data)
2093 return -EINVAL;
2094
2095 mutex_lock(&gpd_list_lock);
2096
2097 if (!data->xlate)
2098 data->xlate = genpd_xlate_onecell;
2099
2100 for (i = 0; i < data->num_domains; i++) {
2101 genpd = data->domains[i];
2102
2103 if (!genpd)
2104 continue;
2105 if (!genpd_present(genpd))
2106 goto error;
2107
2108 genpd->dev.of_node = np;
2109
2110 /* Parse genpd OPP table */
2111 if (genpd->set_performance_state) {
2112 ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
2113 if (ret) {
2114 dev_err(&genpd->dev, "Failed to add OPP table for index %d: %d\n",
2115 i, ret);
2116 goto error;
2117 }
2118
2119 /*
2120 * Save table for faster processing while setting
2121 * performance state.
2122 */
2123 genpd->opp_table = dev_pm_opp_get_opp_table_indexed(&genpd->dev, i);
2124 WARN_ON(!genpd->opp_table);
2125 }
2126
2127 genpd->provider = &np->fwnode;
2128 genpd->has_provider = true;
2129 }
2130
2131 ret = genpd_add_provider(np, data->xlate, data);
2132 if (ret < 0)
2133 goto error;
2134
2135 mutex_unlock(&gpd_list_lock);
2136
2137 return 0;
2138
2139error:
2140 while (i--) {
2141 genpd = data->domains[i];
2142
2143 if (!genpd)
2144 continue;
2145
2146 genpd->provider = NULL;
2147 genpd->has_provider = false;
2148
2149 if (genpd->set_performance_state) {
2150 dev_pm_opp_put_opp_table(genpd->opp_table);
2151 dev_pm_opp_of_remove_table(&genpd->dev);
2152 }
2153 }
2154
2155 mutex_unlock(&gpd_list_lock);
2156
2157 return ret;
2158}
2159EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
2160
2161/**
2162 * of_genpd_del_provider() - Remove a previously registered PM domain provider
2163 * @np: Device node pointer associated with the PM domain provider
2164 */
2165void of_genpd_del_provider(struct device_node *np)
2166{
2167 struct of_genpd_provider *cp, *tmp;
2168 struct generic_pm_domain *gpd;
2169
2170 mutex_lock(&gpd_list_lock);
2171 mutex_lock(&of_genpd_mutex);
2172 list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) {
2173 if (cp->node == np) {
2174 /*
2175 * For each PM domain associated with the
2176 * provider, set the 'has_provider' to false
2177 * so that the PM domain can be safely removed.
2178 */
2179 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2180 if (gpd->provider == &np->fwnode) {
2181 gpd->has_provider = false;
2182
2183 if (!gpd->set_performance_state)
2184 continue;
2185
2186 dev_pm_opp_put_opp_table(gpd->opp_table);
2187 dev_pm_opp_of_remove_table(&gpd->dev);
2188 }
2189 }
2190
2191 list_del(&cp->link);
2192 of_node_put(cp->node);
2193 kfree(cp);
2194 break;
2195 }
2196 }
2197 mutex_unlock(&of_genpd_mutex);
2198 mutex_unlock(&gpd_list_lock);
2199}
2200EXPORT_SYMBOL_GPL(of_genpd_del_provider);
2201
2202/**
2203 * genpd_get_from_provider() - Look-up PM domain
2204 * @genpdspec: OF phandle args to use for look-up
2205 *
2206 * Looks for a PM domain provider under the node specified by @genpdspec and if
2207 * found, uses xlate function of the provider to map phandle args to a PM
2208 * domain.
2209 *
2210 * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
2211 * on failure.
2212 */
2213static struct generic_pm_domain *genpd_get_from_provider(
2214 struct of_phandle_args *genpdspec)
2215{
2216 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
2217 struct of_genpd_provider *provider;
2218
2219 if (!genpdspec)
2220 return ERR_PTR(-EINVAL);
2221
2222 mutex_lock(&of_genpd_mutex);
2223
2224 /* Check if we have such a provider in our array */
2225 list_for_each_entry(provider, &of_genpd_providers, link) {
2226 if (provider->node == genpdspec->np)
2227 genpd = provider->xlate(genpdspec, provider->data);
2228 if (!IS_ERR(genpd))
2229 break;
2230 }
2231
2232 mutex_unlock(&of_genpd_mutex);
2233
2234 return genpd;
2235}
2236
2237/**
2238 * of_genpd_add_device() - Add a device to an I/O PM domain
2239 * @genpdspec: OF phandle args to use for look-up PM domain
2240 * @dev: Device to be added.
2241 *
2242 * Looks-up an I/O PM domain based upon phandle args provided and adds
2243 * the device to the PM domain. Returns a negative error code on failure.
2244 */
2245int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev)
2246{
2247 struct generic_pm_domain *genpd;
2248 int ret;
2249
2250 mutex_lock(&gpd_list_lock);
2251
2252 genpd = genpd_get_from_provider(genpdspec);
2253 if (IS_ERR(genpd)) {
2254 ret = PTR_ERR(genpd);
2255 goto out;
2256 }
2257
2258 ret = genpd_add_device(genpd, dev, dev);
2259
2260out:
2261 mutex_unlock(&gpd_list_lock);
2262
2263 return ret;
2264}
2265EXPORT_SYMBOL_GPL(of_genpd_add_device);
2266
2267/**
2268 * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
2269 * @parent_spec: OF phandle args to use for parent PM domain look-up
2270 * @subdomain_spec: OF phandle args to use for subdomain look-up
2271 *
2272 * Looks-up a parent PM domain and subdomain based upon phandle args
2273 * provided and adds the subdomain to the parent PM domain. Returns a
2274 * negative error code on failure.
2275 */
2276int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
2277 struct of_phandle_args *subdomain_spec)
2278{
2279 struct generic_pm_domain *parent, *subdomain;
2280 int ret;
2281
2282 mutex_lock(&gpd_list_lock);
2283
2284 parent = genpd_get_from_provider(parent_spec);
2285 if (IS_ERR(parent)) {
2286 ret = PTR_ERR(parent);
2287 goto out;
2288 }
2289
2290 subdomain = genpd_get_from_provider(subdomain_spec);
2291 if (IS_ERR(subdomain)) {
2292 ret = PTR_ERR(subdomain);
2293 goto out;
2294 }
2295
2296 ret = genpd_add_subdomain(parent, subdomain);
2297
2298out:
2299 mutex_unlock(&gpd_list_lock);
2300
2301 return ret;
2302}
2303EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
2304
2305/**
2306 * of_genpd_remove_last - Remove the last PM domain registered for a provider
2307 * @provider: Pointer to device structure associated with provider
2308 *
2309 * Find the last PM domain that was added by a particular provider and
2310 * remove this PM domain from the list of PM domains. The provider is
2311 * identified by the 'provider' device structure that is passed. The PM
2312 * domain will only be removed, if the provider associated with domain
2313 * has been removed.
2314 *
2315 * Returns a valid pointer to struct generic_pm_domain on success or
2316 * ERR_PTR() on failure.
2317 */
2318struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
2319{
2320 struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT);
2321 int ret;
2322
2323 if (IS_ERR_OR_NULL(np))
2324 return ERR_PTR(-EINVAL);
2325
2326 mutex_lock(&gpd_list_lock);
2327 list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) {
2328 if (gpd->provider == &np->fwnode) {
2329 ret = genpd_remove(gpd);
2330 genpd = ret ? ERR_PTR(ret) : gpd;
2331 break;
2332 }
2333 }
2334 mutex_unlock(&gpd_list_lock);
2335
2336 return genpd;
2337}
2338EXPORT_SYMBOL_GPL(of_genpd_remove_last);
2339
2340static void genpd_release_dev(struct device *dev)
2341{
2342 of_node_put(dev->of_node);
2343 kfree(dev);
2344}
2345
2346static struct bus_type genpd_bus_type = {
2347 .name = "genpd",
2348};
2349
2350/**
2351 * genpd_dev_pm_detach - Detach a device from its PM domain.
2352 * @dev: Device to detach.
2353 * @power_off: Currently not used
2354 *
2355 * Try to locate a corresponding generic PM domain, which the device was
2356 * attached to previously. If such is found, the device is detached from it.
2357 */
2358static void genpd_dev_pm_detach(struct device *dev, bool power_off)
2359{
2360 struct generic_pm_domain *pd;
2361 unsigned int i;
2362 int ret = 0;
2363
2364 pd = dev_to_genpd(dev);
2365 if (IS_ERR(pd))
2366 return;
2367
2368 dev_dbg(dev, "removing from PM domain %s\n", pd->name);
2369
2370 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
2371 ret = genpd_remove_device(pd, dev);
2372 if (ret != -EAGAIN)
2373 break;
2374
2375 mdelay(i);
2376 cond_resched();
2377 }
2378
2379 if (ret < 0) {
2380 dev_err(dev, "failed to remove from PM domain %s: %d",
2381 pd->name, ret);
2382 return;
2383 }
2384
2385 /* Check if PM domain can be powered off after removing this device. */
2386 genpd_queue_power_off_work(pd);
2387
2388 /* Unregister the device if it was created by genpd. */
2389 if (dev->bus == &genpd_bus_type)
2390 device_unregister(dev);
2391}
2392
2393static void genpd_dev_pm_sync(struct device *dev)
2394{
2395 struct generic_pm_domain *pd;
2396
2397 pd = dev_to_genpd(dev);
2398 if (IS_ERR(pd))
2399 return;
2400
2401 genpd_queue_power_off_work(pd);
2402}
2403
2404static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
2405 unsigned int index, bool power_on)
2406{
2407 struct of_phandle_args pd_args;
2408 struct generic_pm_domain *pd;
2409 int ret;
2410
2411 ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
2412 "#power-domain-cells", index, &pd_args);
2413 if (ret < 0)
2414 return ret;
2415
2416 mutex_lock(&gpd_list_lock);
2417 pd = genpd_get_from_provider(&pd_args);
2418 of_node_put(pd_args.np);
2419 if (IS_ERR(pd)) {
2420 mutex_unlock(&gpd_list_lock);
2421 dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
2422 __func__, PTR_ERR(pd));
2423 return driver_deferred_probe_check_state(base_dev);
2424 }
2425
2426 dev_dbg(dev, "adding to PM domain %s\n", pd->name);
2427
2428 ret = genpd_add_device(pd, dev, base_dev);
2429 mutex_unlock(&gpd_list_lock);
2430
2431 if (ret < 0) {
2432 if (ret != -EPROBE_DEFER)
2433 dev_err(dev, "failed to add to PM domain %s: %d",
2434 pd->name, ret);
2435 return ret;
2436 }
2437
2438 dev->pm_domain->detach = genpd_dev_pm_detach;
2439 dev->pm_domain->sync = genpd_dev_pm_sync;
2440
2441 if (power_on) {
2442 genpd_lock(pd);
2443 ret = genpd_power_on(pd, 0);
2444 genpd_unlock(pd);
2445 }
2446
2447 if (ret)
2448 genpd_remove_device(pd, dev);
2449
2450 return ret ? -EPROBE_DEFER : 1;
2451}
2452
2453/**
2454 * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
2455 * @dev: Device to attach.
2456 *
2457 * Parse device's OF node to find a PM domain specifier. If such is found,
2458 * attaches the device to retrieved pm_domain ops.
2459 *
2460 * Returns 1 on successfully attached PM domain, 0 when the device don't need a
2461 * PM domain or when multiple power-domains exists for it, else a negative error
2462 * code. Note that if a power-domain exists for the device, but it cannot be
2463 * found or turned on, then return -EPROBE_DEFER to ensure that the device is
2464 * not probed and to re-try again later.
2465 */
2466int genpd_dev_pm_attach(struct device *dev)
2467{
2468 if (!dev->of_node)
2469 return 0;
2470
2471 /*
2472 * Devices with multiple PM domains must be attached separately, as we
2473 * can only attach one PM domain per device.
2474 */
2475 if (of_count_phandle_with_args(dev->of_node, "power-domains",
2476 "#power-domain-cells") != 1)
2477 return 0;
2478
2479 return __genpd_dev_pm_attach(dev, dev, 0, true);
2480}
2481EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
2482
2483/**
2484 * genpd_dev_pm_attach_by_id - Associate a device with one of its PM domains.
2485 * @dev: The device used to lookup the PM domain.
2486 * @index: The index of the PM domain.
2487 *
2488 * Parse device's OF node to find a PM domain specifier at the provided @index.
2489 * If such is found, creates a virtual device and attaches it to the retrieved
2490 * pm_domain ops. To deal with detaching of the virtual device, the ->detach()
2491 * callback in the struct dev_pm_domain are assigned to genpd_dev_pm_detach().
2492 *
2493 * Returns the created virtual device if successfully attached PM domain, NULL
2494 * when the device don't need a PM domain, else an ERR_PTR() in case of
2495 * failures. If a power-domain exists for the device, but cannot be found or
2496 * turned on, then ERR_PTR(-EPROBE_DEFER) is returned to ensure that the device
2497 * is not probed and to re-try again later.
2498 */
2499struct device *genpd_dev_pm_attach_by_id(struct device *dev,
2500 unsigned int index)
2501{
2502 struct device *virt_dev;
2503 int num_domains;
2504 int ret;
2505
2506 if (!dev->of_node)
2507 return NULL;
2508
2509 /* Verify that the index is within a valid range. */
2510 num_domains = of_count_phandle_with_args(dev->of_node, "power-domains",
2511 "#power-domain-cells");
2512 if (index >= num_domains)
2513 return NULL;
2514
2515 /* Allocate and register device on the genpd bus. */
2516 virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL);
2517 if (!virt_dev)
2518 return ERR_PTR(-ENOMEM);
2519
2520 dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev));
2521 virt_dev->bus = &genpd_bus_type;
2522 virt_dev->release = genpd_release_dev;
2523 virt_dev->of_node = of_node_get(dev->of_node);
2524
2525 ret = device_register(virt_dev);
2526 if (ret) {
2527 put_device(virt_dev);
2528 return ERR_PTR(ret);
2529 }
2530
2531 /* Try to attach the device to the PM domain at the specified index. */
2532 ret = __genpd_dev_pm_attach(virt_dev, dev, index, false);
2533 if (ret < 1) {
2534 device_unregister(virt_dev);
2535 return ret ? ERR_PTR(ret) : NULL;
2536 }
2537
2538 pm_runtime_enable(virt_dev);
2539 genpd_queue_power_off_work(dev_to_genpd(virt_dev));
2540
2541 return virt_dev;
2542}
2543EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id);
2544
2545/**
2546 * genpd_dev_pm_attach_by_name - Associate a device with one of its PM domains.
2547 * @dev: The device used to lookup the PM domain.
2548 * @name: The name of the PM domain.
2549 *
2550 * Parse device's OF node to find a PM domain specifier using the
2551 * power-domain-names DT property. For further description see
2552 * genpd_dev_pm_attach_by_id().
2553 */
2554struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name)
2555{
2556 int index;
2557
2558 if (!dev->of_node)
2559 return NULL;
2560
2561 index = of_property_match_string(dev->of_node, "power-domain-names",
2562 name);
2563 if (index < 0)
2564 return NULL;
2565
2566 return genpd_dev_pm_attach_by_id(dev, index);
2567}
2568
2569static const struct of_device_id idle_state_match[] = {
2570 { .compatible = "domain-idle-state", },
2571 { }
2572};
2573
2574static int genpd_parse_state(struct genpd_power_state *genpd_state,
2575 struct device_node *state_node)
2576{
2577 int err;
2578 u32 residency;
2579 u32 entry_latency, exit_latency;
2580
2581 err = of_property_read_u32(state_node, "entry-latency-us",
2582 &entry_latency);
2583 if (err) {
2584 pr_debug(" * %pOF missing entry-latency-us property\n",
2585 state_node);
2586 return -EINVAL;
2587 }
2588
2589 err = of_property_read_u32(state_node, "exit-latency-us",
2590 &exit_latency);
2591 if (err) {
2592 pr_debug(" * %pOF missing exit-latency-us property\n",
2593 state_node);
2594 return -EINVAL;
2595 }
2596
2597 err = of_property_read_u32(state_node, "min-residency-us", &residency);
2598 if (!err)
2599 genpd_state->residency_ns = 1000 * residency;
2600
2601 genpd_state->power_on_latency_ns = 1000 * exit_latency;
2602 genpd_state->power_off_latency_ns = 1000 * entry_latency;
2603 genpd_state->fwnode = &state_node->fwnode;
2604
2605 return 0;
2606}
2607
2608static int genpd_iterate_idle_states(struct device_node *dn,
2609 struct genpd_power_state *states)
2610{
2611 int ret;
2612 struct of_phandle_iterator it;
2613 struct device_node *np;
2614 int i = 0;
2615
2616 ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
2617 if (ret <= 0)
2618 return ret;
2619
2620 /* Loop over the phandles until all the requested entry is found */
2621 of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) {
2622 np = it.node;
2623 if (!of_match_node(idle_state_match, np))
2624 continue;
2625 if (states) {
2626 ret = genpd_parse_state(&states[i], np);
2627 if (ret) {
2628 pr_err("Parsing idle state node %pOF failed with err %d\n",
2629 np, ret);
2630 of_node_put(np);
2631 return ret;
2632 }
2633 }
2634 i++;
2635 }
2636
2637 return i;
2638}
2639
2640/**
2641 * of_genpd_parse_idle_states: Return array of idle states for the genpd.
2642 *
2643 * @dn: The genpd device node
2644 * @states: The pointer to which the state array will be saved.
2645 * @n: The count of elements in the array returned from this function.
2646 *
2647 * Returns the device states parsed from the OF node. The memory for the states
2648 * is allocated by this function and is the responsibility of the caller to
2649 * free the memory after use. If any or zero compatible domain idle states is
2650 * found it returns 0 and in case of errors, a negative error code is returned.
2651 */
2652int of_genpd_parse_idle_states(struct device_node *dn,
2653 struct genpd_power_state **states, int *n)
2654{
2655 struct genpd_power_state *st;
2656 int ret;
2657
2658 ret = genpd_iterate_idle_states(dn, NULL);
2659 if (ret < 0)
2660 return ret;
2661
2662 if (!ret) {
2663 *states = NULL;
2664 *n = 0;
2665 return 0;
2666 }
2667
2668 st = kcalloc(ret, sizeof(*st), GFP_KERNEL);
2669 if (!st)
2670 return -ENOMEM;
2671
2672 ret = genpd_iterate_idle_states(dn, st);
2673 if (ret <= 0) {
2674 kfree(st);
2675 return ret < 0 ? ret : -EINVAL;
2676 }
2677
2678 *states = st;
2679 *n = ret;
2680
2681 return 0;
2682}
2683EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
2684
2685/**
2686 * pm_genpd_opp_to_performance_state - Gets performance state of the genpd from its OPP node.
2687 *
2688 * @genpd_dev: Genpd's device for which the performance-state needs to be found.
2689 * @opp: struct dev_pm_opp of the OPP for which we need to find performance
2690 * state.
2691 *
2692 * Returns performance state encoded in the OPP of the genpd. This calls
2693 * platform specific genpd->opp_to_performance_state() callback to translate
2694 * power domain OPP to performance state.
2695 *
2696 * Returns performance state on success and 0 on failure.
2697 */
2698unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev,
2699 struct dev_pm_opp *opp)
2700{
2701 struct generic_pm_domain *genpd = NULL;
2702 int state;
2703
2704 genpd = container_of(genpd_dev, struct generic_pm_domain, dev);
2705
2706 if (unlikely(!genpd->opp_to_performance_state))
2707 return 0;
2708
2709 genpd_lock(genpd);
2710 state = genpd->opp_to_performance_state(genpd, opp);
2711 genpd_unlock(genpd);
2712
2713 return state;
2714}
2715EXPORT_SYMBOL_GPL(pm_genpd_opp_to_performance_state);
2716
2717static int __init genpd_bus_init(void)
2718{
2719 return bus_register(&genpd_bus_type);
2720}
2721core_initcall(genpd_bus_init);
2722
2723#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
2724
2725
2726/*** debugfs support ***/
2727
2728#ifdef CONFIG_DEBUG_FS
2729#include <linux/pm.h>
2730#include <linux/device.h>
2731#include <linux/debugfs.h>
2732#include <linux/seq_file.h>
2733#include <linux/init.h>
2734#include <linux/kobject.h>
2735static struct dentry *genpd_debugfs_dir;
2736
2737/*
2738 * TODO: This function is a slightly modified version of rtpm_status_show
2739 * from sysfs.c, so generalize it.
2740 */
2741static void rtpm_status_str(struct seq_file *s, struct device *dev)
2742{
2743 static const char * const status_lookup[] = {
2744 [RPM_ACTIVE] = "active",
2745 [RPM_RESUMING] = "resuming",
2746 [RPM_SUSPENDED] = "suspended",
2747 [RPM_SUSPENDING] = "suspending"
2748 };
2749 const char *p = "";
2750
2751 if (dev->power.runtime_error)
2752 p = "error";
2753 else if (dev->power.disable_depth)
2754 p = "unsupported";
2755 else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
2756 p = status_lookup[dev->power.runtime_status];
2757 else
2758 WARN_ON(1);
2759
2760 seq_puts(s, p);
2761}
2762
2763static int genpd_summary_one(struct seq_file *s,
2764 struct generic_pm_domain *genpd)
2765{
2766 static const char * const status_lookup[] = {
2767 [GPD_STATE_ACTIVE] = "on",
2768 [GPD_STATE_POWER_OFF] = "off"
2769 };
2770 struct pm_domain_data *pm_data;
2771 const char *kobj_path;
2772 struct gpd_link *link;
2773 char state[16];
2774 int ret;
2775
2776 ret = genpd_lock_interruptible(genpd);
2777 if (ret)
2778 return -ERESTARTSYS;
2779
2780 if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
2781 goto exit;
2782 if (!genpd_status_on(genpd))
2783 snprintf(state, sizeof(state), "%s-%u",
2784 status_lookup[genpd->status], genpd->state_idx);
2785 else
2786 snprintf(state, sizeof(state), "%s",
2787 status_lookup[genpd->status]);
2788 seq_printf(s, "%-30s %-15s ", genpd->name, state);
2789
2790 /*
2791 * Modifications on the list require holding locks on both
2792 * master and slave, so we are safe.
2793 * Also genpd->name is immutable.
2794 */
2795 list_for_each_entry(link, &genpd->master_links, master_node) {
2796 seq_printf(s, "%s", link->slave->name);
2797 if (!list_is_last(&link->master_node, &genpd->master_links))
2798 seq_puts(s, ", ");
2799 }
2800
2801 list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
2802 kobj_path = kobject_get_path(&pm_data->dev->kobj,
2803 genpd_is_irq_safe(genpd) ?
2804 GFP_ATOMIC : GFP_KERNEL);
2805 if (kobj_path == NULL)
2806 continue;
2807
2808 seq_printf(s, "\n %-50s ", kobj_path);
2809 rtpm_status_str(s, pm_data->dev);
2810 kfree(kobj_path);
2811 }
2812
2813 seq_puts(s, "\n");
2814exit:
2815 genpd_unlock(genpd);
2816
2817 return 0;
2818}
2819
2820static int summary_show(struct seq_file *s, void *data)
2821{
2822 struct generic_pm_domain *genpd;
2823 int ret = 0;
2824
2825 seq_puts(s, "domain status slaves\n");
2826 seq_puts(s, " /device runtime status\n");
2827 seq_puts(s, "----------------------------------------------------------------------\n");
2828
2829 ret = mutex_lock_interruptible(&gpd_list_lock);
2830 if (ret)
2831 return -ERESTARTSYS;
2832
2833 list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
2834 ret = genpd_summary_one(s, genpd);
2835 if (ret)
2836 break;
2837 }
2838 mutex_unlock(&gpd_list_lock);
2839
2840 return ret;
2841}
2842
2843static int status_show(struct seq_file *s, void *data)
2844{
2845 static const char * const status_lookup[] = {
2846 [GPD_STATE_ACTIVE] = "on",
2847 [GPD_STATE_POWER_OFF] = "off"
2848 };
2849
2850 struct generic_pm_domain *genpd = s->private;
2851 int ret = 0;
2852
2853 ret = genpd_lock_interruptible(genpd);
2854 if (ret)
2855 return -ERESTARTSYS;
2856
2857 if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup)))
2858 goto exit;
2859
2860 if (genpd->status == GPD_STATE_POWER_OFF)
2861 seq_printf(s, "%s-%u\n", status_lookup[genpd->status],
2862 genpd->state_idx);
2863 else
2864 seq_printf(s, "%s\n", status_lookup[genpd->status]);
2865exit:
2866 genpd_unlock(genpd);
2867 return ret;
2868}
2869
2870static int sub_domains_show(struct seq_file *s, void *data)
2871{
2872 struct generic_pm_domain *genpd = s->private;
2873 struct gpd_link *link;
2874 int ret = 0;
2875
2876 ret = genpd_lock_interruptible(genpd);
2877 if (ret)
2878 return -ERESTARTSYS;
2879
2880 list_for_each_entry(link, &genpd->master_links, master_node)
2881 seq_printf(s, "%s\n", link->slave->name);
2882
2883 genpd_unlock(genpd);
2884 return ret;
2885}
2886
2887static int idle_states_show(struct seq_file *s, void *data)
2888{
2889 struct generic_pm_domain *genpd = s->private;
2890 unsigned int i;
2891 int ret = 0;
2892
2893 ret = genpd_lock_interruptible(genpd);
2894 if (ret)
2895 return -ERESTARTSYS;
2896
2897 seq_puts(s, "State Time Spent(ms)\n");
2898
2899 for (i = 0; i < genpd->state_count; i++) {
2900 ktime_t delta = 0;
2901 s64 msecs;
2902
2903 if ((genpd->status == GPD_STATE_POWER_OFF) &&
2904 (genpd->state_idx == i))
2905 delta = ktime_sub(ktime_get(), genpd->accounting_time);
2906
2907 msecs = ktime_to_ms(
2908 ktime_add(genpd->states[i].idle_time, delta));
2909 seq_printf(s, "S%-13i %lld\n", i, msecs);
2910 }
2911
2912 genpd_unlock(genpd);
2913 return ret;
2914}
2915
2916static int active_time_show(struct seq_file *s, void *data)
2917{
2918 struct generic_pm_domain *genpd = s->private;
2919 ktime_t delta = 0;
2920 int ret = 0;
2921
2922 ret = genpd_lock_interruptible(genpd);
2923 if (ret)
2924 return -ERESTARTSYS;
2925
2926 if (genpd->status == GPD_STATE_ACTIVE)
2927 delta = ktime_sub(ktime_get(), genpd->accounting_time);
2928
2929 seq_printf(s, "%lld ms\n", ktime_to_ms(
2930 ktime_add(genpd->on_time, delta)));
2931
2932 genpd_unlock(genpd);
2933 return ret;
2934}
2935
2936static int total_idle_time_show(struct seq_file *s, void *data)
2937{
2938 struct generic_pm_domain *genpd = s->private;
2939 ktime_t delta = 0, total = 0;
2940 unsigned int i;
2941 int ret = 0;
2942
2943 ret = genpd_lock_interruptible(genpd);
2944 if (ret)
2945 return -ERESTARTSYS;
2946
2947 for (i = 0; i < genpd->state_count; i++) {
2948
2949 if ((genpd->status == GPD_STATE_POWER_OFF) &&
2950 (genpd->state_idx == i))
2951 delta = ktime_sub(ktime_get(), genpd->accounting_time);
2952
2953 total = ktime_add(total, genpd->states[i].idle_time);
2954 }
2955 total = ktime_add(total, delta);
2956
2957 seq_printf(s, "%lld ms\n", ktime_to_ms(total));
2958
2959 genpd_unlock(genpd);
2960 return ret;
2961}
2962
2963
2964static int devices_show(struct seq_file *s, void *data)
2965{
2966 struct generic_pm_domain *genpd = s->private;
2967 struct pm_domain_data *pm_data;
2968 const char *kobj_path;
2969 int ret = 0;
2970
2971 ret = genpd_lock_interruptible(genpd);
2972 if (ret)
2973 return -ERESTARTSYS;
2974
2975 list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
2976 kobj_path = kobject_get_path(&pm_data->dev->kobj,
2977 genpd_is_irq_safe(genpd) ?
2978 GFP_ATOMIC : GFP_KERNEL);
2979 if (kobj_path == NULL)
2980 continue;
2981
2982 seq_printf(s, "%s\n", kobj_path);
2983 kfree(kobj_path);
2984 }
2985
2986 genpd_unlock(genpd);
2987 return ret;
2988}
2989
2990static int perf_state_show(struct seq_file *s, void *data)
2991{
2992 struct generic_pm_domain *genpd = s->private;
2993
2994 if (genpd_lock_interruptible(genpd))
2995 return -ERESTARTSYS;
2996
2997 seq_printf(s, "%u\n", genpd->performance_state);
2998
2999 genpd_unlock(genpd);
3000 return 0;
3001}
3002
3003DEFINE_SHOW_ATTRIBUTE(summary);
3004DEFINE_SHOW_ATTRIBUTE(status);
3005DEFINE_SHOW_ATTRIBUTE(sub_domains);
3006DEFINE_SHOW_ATTRIBUTE(idle_states);
3007DEFINE_SHOW_ATTRIBUTE(active_time);
3008DEFINE_SHOW_ATTRIBUTE(total_idle_time);
3009DEFINE_SHOW_ATTRIBUTE(devices);
3010DEFINE_SHOW_ATTRIBUTE(perf_state);
3011
3012static int __init genpd_debug_init(void)
3013{
3014 struct dentry *d;
3015 struct generic_pm_domain *genpd;
3016
3017 genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
3018
3019 debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir,
3020 NULL, &summary_fops);
3021
3022 list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
3023 d = debugfs_create_dir(genpd->name, genpd_debugfs_dir);
3024
3025 debugfs_create_file("current_state", 0444,
3026 d, genpd, &status_fops);
3027 debugfs_create_file("sub_domains", 0444,
3028 d, genpd, &sub_domains_fops);
3029 debugfs_create_file("idle_states", 0444,
3030 d, genpd, &idle_states_fops);
3031 debugfs_create_file("active_time", 0444,
3032 d, genpd, &active_time_fops);
3033 debugfs_create_file("total_idle_time", 0444,
3034 d, genpd, &total_idle_time_fops);
3035 debugfs_create_file("devices", 0444,
3036 d, genpd, &devices_fops);
3037 if (genpd->set_performance_state)
3038 debugfs_create_file("perf_state", 0444,
3039 d, genpd, &perf_state_fops);
3040 }
3041
3042 return 0;
3043}
3044late_initcall(genpd_debug_init);
3045
3046static void __exit genpd_debug_exit(void)
3047{
3048 debugfs_remove_recursive(genpd_debugfs_dir);
3049}
3050__exitcall(genpd_debug_exit);
3051#endif /* CONFIG_DEBUG_FS */
1/*
2 * drivers/base/power/domain.c - Common code related to device power domains.
3 *
4 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5 *
6 * This file is released under the GPLv2.
7 */
8
9#include <linux/delay.h>
10#include <linux/kernel.h>
11#include <linux/io.h>
12#include <linux/platform_device.h>
13#include <linux/pm_runtime.h>
14#include <linux/pm_domain.h>
15#include <linux/pm_qos.h>
16#include <linux/pm_clock.h>
17#include <linux/slab.h>
18#include <linux/err.h>
19#include <linux/sched.h>
20#include <linux/suspend.h>
21#include <linux/export.h>
22
23#include "power.h"
24
25#define GENPD_RETRY_MAX_MS 250 /* Approximate */
26
27#define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
28({ \
29 type (*__routine)(struct device *__d); \
30 type __ret = (type)0; \
31 \
32 __routine = genpd->dev_ops.callback; \
33 if (__routine) { \
34 __ret = __routine(dev); \
35 } \
36 __ret; \
37})
38
39static LIST_HEAD(gpd_list);
40static DEFINE_MUTEX(gpd_list_lock);
41
42struct genpd_lock_ops {
43 void (*lock)(struct generic_pm_domain *genpd);
44 void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
45 int (*lock_interruptible)(struct generic_pm_domain *genpd);
46 void (*unlock)(struct generic_pm_domain *genpd);
47};
48
49static void genpd_lock_mtx(struct generic_pm_domain *genpd)
50{
51 mutex_lock(&genpd->mlock);
52}
53
54static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
55 int depth)
56{
57 mutex_lock_nested(&genpd->mlock, depth);
58}
59
60static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
61{
62 return mutex_lock_interruptible(&genpd->mlock);
63}
64
65static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
66{
67 return mutex_unlock(&genpd->mlock);
68}
69
70static const struct genpd_lock_ops genpd_mtx_ops = {
71 .lock = genpd_lock_mtx,
72 .lock_nested = genpd_lock_nested_mtx,
73 .lock_interruptible = genpd_lock_interruptible_mtx,
74 .unlock = genpd_unlock_mtx,
75};
76
77static void genpd_lock_spin(struct generic_pm_domain *genpd)
78 __acquires(&genpd->slock)
79{
80 unsigned long flags;
81
82 spin_lock_irqsave(&genpd->slock, flags);
83 genpd->lock_flags = flags;
84}
85
86static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
87 int depth)
88 __acquires(&genpd->slock)
89{
90 unsigned long flags;
91
92 spin_lock_irqsave_nested(&genpd->slock, flags, depth);
93 genpd->lock_flags = flags;
94}
95
96static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
97 __acquires(&genpd->slock)
98{
99 unsigned long flags;
100
101 spin_lock_irqsave(&genpd->slock, flags);
102 genpd->lock_flags = flags;
103 return 0;
104}
105
106static void genpd_unlock_spin(struct generic_pm_domain *genpd)
107 __releases(&genpd->slock)
108{
109 spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
110}
111
112static const struct genpd_lock_ops genpd_spin_ops = {
113 .lock = genpd_lock_spin,
114 .lock_nested = genpd_lock_nested_spin,
115 .lock_interruptible = genpd_lock_interruptible_spin,
116 .unlock = genpd_unlock_spin,
117};
118
119#define genpd_lock(p) p->lock_ops->lock(p)
120#define genpd_lock_nested(p, d) p->lock_ops->lock_nested(p, d)
121#define genpd_lock_interruptible(p) p->lock_ops->lock_interruptible(p)
122#define genpd_unlock(p) p->lock_ops->unlock(p)
123
124#define genpd_status_on(genpd) (genpd->status == GPD_STATE_ACTIVE)
125#define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE)
126#define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON)
127#define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
128
129static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
130 const struct generic_pm_domain *genpd)
131{
132 bool ret;
133
134 ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
135
136 /*
137 * Warn once if an IRQ safe device is attached to a no sleep domain, as
138 * to indicate a suboptimal configuration for PM. For an always on
139 * domain this isn't case, thus don't warn.
140 */
141 if (ret && !genpd_is_always_on(genpd))
142 dev_warn_once(dev, "PM domain %s will not be powered off\n",
143 genpd->name);
144
145 return ret;
146}
147
148/*
149 * Get the generic PM domain for a particular struct device.
150 * This validates the struct device pointer, the PM domain pointer,
151 * and checks that the PM domain pointer is a real generic PM domain.
152 * Any failure results in NULL being returned.
153 */
154static struct generic_pm_domain *genpd_lookup_dev(struct device *dev)
155{
156 struct generic_pm_domain *genpd = NULL, *gpd;
157
158 if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
159 return NULL;
160
161 mutex_lock(&gpd_list_lock);
162 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
163 if (&gpd->domain == dev->pm_domain) {
164 genpd = gpd;
165 break;
166 }
167 }
168 mutex_unlock(&gpd_list_lock);
169
170 return genpd;
171}
172
173/*
174 * This should only be used where we are certain that the pm_domain
175 * attached to the device is a genpd domain.
176 */
177static struct generic_pm_domain *dev_to_genpd(struct device *dev)
178{
179 if (IS_ERR_OR_NULL(dev->pm_domain))
180 return ERR_PTR(-EINVAL);
181
182 return pd_to_genpd(dev->pm_domain);
183}
184
185static int genpd_stop_dev(const struct generic_pm_domain *genpd,
186 struct device *dev)
187{
188 return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
189}
190
191static int genpd_start_dev(const struct generic_pm_domain *genpd,
192 struct device *dev)
193{
194 return GENPD_DEV_CALLBACK(genpd, int, start, dev);
195}
196
197static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
198{
199 bool ret = false;
200
201 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
202 ret = !!atomic_dec_and_test(&genpd->sd_count);
203
204 return ret;
205}
206
207static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
208{
209 atomic_inc(&genpd->sd_count);
210 smp_mb__after_atomic();
211}
212
213#ifdef CONFIG_DEBUG_FS
214static void genpd_update_accounting(struct generic_pm_domain *genpd)
215{
216 ktime_t delta, now;
217
218 now = ktime_get();
219 delta = ktime_sub(now, genpd->accounting_time);
220
221 /*
222 * If genpd->status is active, it means we are just
223 * out of off and so update the idle time and vice
224 * versa.
225 */
226 if (genpd->status == GPD_STATE_ACTIVE) {
227 int state_idx = genpd->state_idx;
228
229 genpd->states[state_idx].idle_time =
230 ktime_add(genpd->states[state_idx].idle_time, delta);
231 } else {
232 genpd->on_time = ktime_add(genpd->on_time, delta);
233 }
234
235 genpd->accounting_time = now;
236}
237#else
238static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
239#endif
240
241/**
242 * dev_pm_genpd_set_performance_state- Set performance state of device's power
243 * domain.
244 *
245 * @dev: Device for which the performance-state needs to be set.
246 * @state: Target performance state of the device. This can be set as 0 when the
247 * device doesn't have any performance state constraints left (And so
248 * the device wouldn't participate anymore to find the target
249 * performance state of the genpd).
250 *
251 * It is assumed that the users guarantee that the genpd wouldn't be detached
252 * while this routine is getting called.
253 *
254 * Returns 0 on success and negative error values on failures.
255 */
256int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
257{
258 struct generic_pm_domain *genpd;
259 struct generic_pm_domain_data *gpd_data, *pd_data;
260 struct pm_domain_data *pdd;
261 unsigned int prev;
262 int ret = 0;
263
264 genpd = dev_to_genpd(dev);
265 if (IS_ERR(genpd))
266 return -ENODEV;
267
268 if (unlikely(!genpd->set_performance_state))
269 return -EINVAL;
270
271 if (unlikely(!dev->power.subsys_data ||
272 !dev->power.subsys_data->domain_data)) {
273 WARN_ON(1);
274 return -EINVAL;
275 }
276
277 genpd_lock(genpd);
278
279 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
280 prev = gpd_data->performance_state;
281 gpd_data->performance_state = state;
282
283 /* New requested state is same as Max requested state */
284 if (state == genpd->performance_state)
285 goto unlock;
286
287 /* New requested state is higher than Max requested state */
288 if (state > genpd->performance_state)
289 goto update_state;
290
291 /* Traverse all devices within the domain */
292 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
293 pd_data = to_gpd_data(pdd);
294
295 if (pd_data->performance_state > state)
296 state = pd_data->performance_state;
297 }
298
299 if (state == genpd->performance_state)
300 goto unlock;
301
302 /*
303 * We aren't propagating performance state changes of a subdomain to its
304 * masters as we don't have hardware that needs it. Over that, the
305 * performance states of subdomain and its masters may not have
306 * one-to-one mapping and would require additional information. We can
307 * get back to this once we have hardware that needs it. For that
308 * reason, we don't have to consider performance state of the subdomains
309 * of genpd here.
310 */
311
312update_state:
313 if (genpd_status_on(genpd)) {
314 ret = genpd->set_performance_state(genpd, state);
315 if (ret) {
316 gpd_data->performance_state = prev;
317 goto unlock;
318 }
319 }
320
321 genpd->performance_state = state;
322
323unlock:
324 genpd_unlock(genpd);
325
326 return ret;
327}
328EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);
329
330static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
331{
332 unsigned int state_idx = genpd->state_idx;
333 ktime_t time_start;
334 s64 elapsed_ns;
335 int ret;
336
337 if (!genpd->power_on)
338 return 0;
339
340 if (!timed)
341 return genpd->power_on(genpd);
342
343 time_start = ktime_get();
344 ret = genpd->power_on(genpd);
345 if (ret)
346 return ret;
347
348 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
349
350 if (unlikely(genpd->set_performance_state)) {
351 ret = genpd->set_performance_state(genpd, genpd->performance_state);
352 if (ret) {
353 pr_warn("%s: Failed to set performance state %d (%d)\n",
354 genpd->name, genpd->performance_state, ret);
355 }
356 }
357
358 if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
359 return ret;
360
361 genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
362 genpd->max_off_time_changed = true;
363 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
364 genpd->name, "on", elapsed_ns);
365
366 return ret;
367}
368
369static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
370{
371 unsigned int state_idx = genpd->state_idx;
372 ktime_t time_start;
373 s64 elapsed_ns;
374 int ret;
375
376 if (!genpd->power_off)
377 return 0;
378
379 if (!timed)
380 return genpd->power_off(genpd);
381
382 time_start = ktime_get();
383 ret = genpd->power_off(genpd);
384 if (ret == -EBUSY)
385 return ret;
386
387 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
388 if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
389 return ret;
390
391 genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
392 genpd->max_off_time_changed = true;
393 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
394 genpd->name, "off", elapsed_ns);
395
396 return ret;
397}
398
399/**
400 * genpd_queue_power_off_work - Queue up the execution of genpd_power_off().
401 * @genpd: PM domain to power off.
402 *
403 * Queue up the execution of genpd_power_off() unless it's already been done
404 * before.
405 */
406static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
407{
408 queue_work(pm_wq, &genpd->power_off_work);
409}
410
411/**
412 * genpd_power_off - Remove power from a given PM domain.
413 * @genpd: PM domain to power down.
414 * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the
415 * RPM status of the releated device is in an intermediate state, not yet turned
416 * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not
417 * be RPM_SUSPENDED, while it tries to power off the PM domain.
418 *
419 * If all of the @genpd's devices have been suspended and all of its subdomains
420 * have been powered down, remove power from @genpd.
421 */
422static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
423 unsigned int depth)
424{
425 struct pm_domain_data *pdd;
426 struct gpd_link *link;
427 unsigned int not_suspended = 0;
428
429 /*
430 * Do not try to power off the domain in the following situations:
431 * (1) The domain is already in the "power off" state.
432 * (2) System suspend is in progress.
433 */
434 if (!genpd_status_on(genpd) || genpd->prepared_count > 0)
435 return 0;
436
437 /*
438 * Abort power off for the PM domain in the following situations:
439 * (1) The domain is configured as always on.
440 * (2) When the domain has a subdomain being powered on.
441 */
442 if (genpd_is_always_on(genpd) || atomic_read(&genpd->sd_count) > 0)
443 return -EBUSY;
444
445 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
446 enum pm_qos_flags_status stat;
447
448 stat = dev_pm_qos_flags(pdd->dev, PM_QOS_FLAG_NO_POWER_OFF);
449 if (stat > PM_QOS_FLAGS_NONE)
450 return -EBUSY;
451
452 /*
453 * Do not allow PM domain to be powered off, when an IRQ safe
454 * device is part of a non-IRQ safe domain.
455 */
456 if (!pm_runtime_suspended(pdd->dev) ||
457 irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd))
458 not_suspended++;
459 }
460
461 if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
462 return -EBUSY;
463
464 if (genpd->gov && genpd->gov->power_down_ok) {
465 if (!genpd->gov->power_down_ok(&genpd->domain))
466 return -EAGAIN;
467 }
468
469 if (genpd->power_off) {
470 int ret;
471
472 if (atomic_read(&genpd->sd_count) > 0)
473 return -EBUSY;
474
475 /*
476 * If sd_count > 0 at this point, one of the subdomains hasn't
477 * managed to call genpd_power_on() for the master yet after
478 * incrementing it. In that case genpd_power_on() will wait
479 * for us to drop the lock, so we can call .power_off() and let
480 * the genpd_power_on() restore power for us (this shouldn't
481 * happen very often).
482 */
483 ret = _genpd_power_off(genpd, true);
484 if (ret)
485 return ret;
486 }
487
488 genpd->status = GPD_STATE_POWER_OFF;
489 genpd_update_accounting(genpd);
490
491 list_for_each_entry(link, &genpd->slave_links, slave_node) {
492 genpd_sd_counter_dec(link->master);
493 genpd_lock_nested(link->master, depth + 1);
494 genpd_power_off(link->master, false, depth + 1);
495 genpd_unlock(link->master);
496 }
497
498 return 0;
499}
500
501/**
502 * genpd_power_on - Restore power to a given PM domain and its masters.
503 * @genpd: PM domain to power up.
504 * @depth: nesting count for lockdep.
505 *
506 * Restore power to @genpd and all of its masters so that it is possible to
507 * resume a device belonging to it.
508 */
509static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
510{
511 struct gpd_link *link;
512 int ret = 0;
513
514 if (genpd_status_on(genpd))
515 return 0;
516
517 /*
518 * The list is guaranteed not to change while the loop below is being
519 * executed, unless one of the masters' .power_on() callbacks fiddles
520 * with it.
521 */
522 list_for_each_entry(link, &genpd->slave_links, slave_node) {
523 struct generic_pm_domain *master = link->master;
524
525 genpd_sd_counter_inc(master);
526
527 genpd_lock_nested(master, depth + 1);
528 ret = genpd_power_on(master, depth + 1);
529 genpd_unlock(master);
530
531 if (ret) {
532 genpd_sd_counter_dec(master);
533 goto err;
534 }
535 }
536
537 ret = _genpd_power_on(genpd, true);
538 if (ret)
539 goto err;
540
541 genpd->status = GPD_STATE_ACTIVE;
542 genpd_update_accounting(genpd);
543
544 return 0;
545
546 err:
547 list_for_each_entry_continue_reverse(link,
548 &genpd->slave_links,
549 slave_node) {
550 genpd_sd_counter_dec(link->master);
551 genpd_lock_nested(link->master, depth + 1);
552 genpd_power_off(link->master, false, depth + 1);
553 genpd_unlock(link->master);
554 }
555
556 return ret;
557}
558
559static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
560 unsigned long val, void *ptr)
561{
562 struct generic_pm_domain_data *gpd_data;
563 struct device *dev;
564
565 gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
566 dev = gpd_data->base.dev;
567
568 for (;;) {
569 struct generic_pm_domain *genpd;
570 struct pm_domain_data *pdd;
571
572 spin_lock_irq(&dev->power.lock);
573
574 pdd = dev->power.subsys_data ?
575 dev->power.subsys_data->domain_data : NULL;
576 if (pdd) {
577 to_gpd_data(pdd)->td.constraint_changed = true;
578 genpd = dev_to_genpd(dev);
579 } else {
580 genpd = ERR_PTR(-ENODATA);
581 }
582
583 spin_unlock_irq(&dev->power.lock);
584
585 if (!IS_ERR(genpd)) {
586 genpd_lock(genpd);
587 genpd->max_off_time_changed = true;
588 genpd_unlock(genpd);
589 }
590
591 dev = dev->parent;
592 if (!dev || dev->power.ignore_children)
593 break;
594 }
595
596 return NOTIFY_DONE;
597}
598
599/**
600 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
601 * @work: Work structure used for scheduling the execution of this function.
602 */
603static void genpd_power_off_work_fn(struct work_struct *work)
604{
605 struct generic_pm_domain *genpd;
606
607 genpd = container_of(work, struct generic_pm_domain, power_off_work);
608
609 genpd_lock(genpd);
610 genpd_power_off(genpd, false, 0);
611 genpd_unlock(genpd);
612}
613
614/**
615 * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks
616 * @dev: Device to handle.
617 */
618static int __genpd_runtime_suspend(struct device *dev)
619{
620 int (*cb)(struct device *__dev);
621
622 if (dev->type && dev->type->pm)
623 cb = dev->type->pm->runtime_suspend;
624 else if (dev->class && dev->class->pm)
625 cb = dev->class->pm->runtime_suspend;
626 else if (dev->bus && dev->bus->pm)
627 cb = dev->bus->pm->runtime_suspend;
628 else
629 cb = NULL;
630
631 if (!cb && dev->driver && dev->driver->pm)
632 cb = dev->driver->pm->runtime_suspend;
633
634 return cb ? cb(dev) : 0;
635}
636
637/**
638 * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks
639 * @dev: Device to handle.
640 */
641static int __genpd_runtime_resume(struct device *dev)
642{
643 int (*cb)(struct device *__dev);
644
645 if (dev->type && dev->type->pm)
646 cb = dev->type->pm->runtime_resume;
647 else if (dev->class && dev->class->pm)
648 cb = dev->class->pm->runtime_resume;
649 else if (dev->bus && dev->bus->pm)
650 cb = dev->bus->pm->runtime_resume;
651 else
652 cb = NULL;
653
654 if (!cb && dev->driver && dev->driver->pm)
655 cb = dev->driver->pm->runtime_resume;
656
657 return cb ? cb(dev) : 0;
658}
659
660/**
661 * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
662 * @dev: Device to suspend.
663 *
664 * Carry out a runtime suspend of a device under the assumption that its
665 * pm_domain field points to the domain member of an object of type
666 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
667 */
668static int genpd_runtime_suspend(struct device *dev)
669{
670 struct generic_pm_domain *genpd;
671 bool (*suspend_ok)(struct device *__dev);
672 struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
673 bool runtime_pm = pm_runtime_enabled(dev);
674 ktime_t time_start;
675 s64 elapsed_ns;
676 int ret;
677
678 dev_dbg(dev, "%s()\n", __func__);
679
680 genpd = dev_to_genpd(dev);
681 if (IS_ERR(genpd))
682 return -EINVAL;
683
684 /*
685 * A runtime PM centric subsystem/driver may re-use the runtime PM
686 * callbacks for other purposes than runtime PM. In those scenarios
687 * runtime PM is disabled. Under these circumstances, we shall skip
688 * validating/measuring the PM QoS latency.
689 */
690 suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
691 if (runtime_pm && suspend_ok && !suspend_ok(dev))
692 return -EBUSY;
693
694 /* Measure suspend latency. */
695 time_start = 0;
696 if (runtime_pm)
697 time_start = ktime_get();
698
699 ret = __genpd_runtime_suspend(dev);
700 if (ret)
701 return ret;
702
703 ret = genpd_stop_dev(genpd, dev);
704 if (ret) {
705 __genpd_runtime_resume(dev);
706 return ret;
707 }
708
709 /* Update suspend latency value if the measured time exceeds it. */
710 if (runtime_pm) {
711 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
712 if (elapsed_ns > td->suspend_latency_ns) {
713 td->suspend_latency_ns = elapsed_ns;
714 dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
715 elapsed_ns);
716 genpd->max_off_time_changed = true;
717 td->constraint_changed = true;
718 }
719 }
720
721 /*
722 * If power.irq_safe is set, this routine may be run with
723 * IRQs disabled, so suspend only if the PM domain also is irq_safe.
724 */
725 if (irq_safe_dev_in_no_sleep_domain(dev, genpd))
726 return 0;
727
728 genpd_lock(genpd);
729 genpd_power_off(genpd, true, 0);
730 genpd_unlock(genpd);
731
732 return 0;
733}
734
735/**
736 * genpd_runtime_resume - Resume a device belonging to I/O PM domain.
737 * @dev: Device to resume.
738 *
739 * Carry out a runtime resume of a device under the assumption that its
740 * pm_domain field points to the domain member of an object of type
741 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
742 */
743static int genpd_runtime_resume(struct device *dev)
744{
745 struct generic_pm_domain *genpd;
746 struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
747 bool runtime_pm = pm_runtime_enabled(dev);
748 ktime_t time_start;
749 s64 elapsed_ns;
750 int ret;
751 bool timed = true;
752
753 dev_dbg(dev, "%s()\n", __func__);
754
755 genpd = dev_to_genpd(dev);
756 if (IS_ERR(genpd))
757 return -EINVAL;
758
759 /*
760 * As we don't power off a non IRQ safe domain, which holds
761 * an IRQ safe device, we don't need to restore power to it.
762 */
763 if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) {
764 timed = false;
765 goto out;
766 }
767
768 genpd_lock(genpd);
769 ret = genpd_power_on(genpd, 0);
770 genpd_unlock(genpd);
771
772 if (ret)
773 return ret;
774
775 out:
776 /* Measure resume latency. */
777 time_start = 0;
778 if (timed && runtime_pm)
779 time_start = ktime_get();
780
781 ret = genpd_start_dev(genpd, dev);
782 if (ret)
783 goto err_poweroff;
784
785 ret = __genpd_runtime_resume(dev);
786 if (ret)
787 goto err_stop;
788
789 /* Update resume latency value if the measured time exceeds it. */
790 if (timed && runtime_pm) {
791 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
792 if (elapsed_ns > td->resume_latency_ns) {
793 td->resume_latency_ns = elapsed_ns;
794 dev_dbg(dev, "resume latency exceeded, %lld ns\n",
795 elapsed_ns);
796 genpd->max_off_time_changed = true;
797 td->constraint_changed = true;
798 }
799 }
800
801 return 0;
802
803err_stop:
804 genpd_stop_dev(genpd, dev);
805err_poweroff:
806 if (!pm_runtime_is_irq_safe(dev) ||
807 (pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) {
808 genpd_lock(genpd);
809 genpd_power_off(genpd, true, 0);
810 genpd_unlock(genpd);
811 }
812
813 return ret;
814}
815
816static bool pd_ignore_unused;
817static int __init pd_ignore_unused_setup(char *__unused)
818{
819 pd_ignore_unused = true;
820 return 1;
821}
822__setup("pd_ignore_unused", pd_ignore_unused_setup);
823
824/**
825 * genpd_power_off_unused - Power off all PM domains with no devices in use.
826 */
827static int __init genpd_power_off_unused(void)
828{
829 struct generic_pm_domain *genpd;
830
831 if (pd_ignore_unused) {
832 pr_warn("genpd: Not disabling unused power domains\n");
833 return 0;
834 }
835
836 mutex_lock(&gpd_list_lock);
837
838 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
839 genpd_queue_power_off_work(genpd);
840
841 mutex_unlock(&gpd_list_lock);
842
843 return 0;
844}
845late_initcall(genpd_power_off_unused);
846
847#if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_GENERIC_DOMAINS_OF)
848
849static bool genpd_present(const struct generic_pm_domain *genpd)
850{
851 const struct generic_pm_domain *gpd;
852
853 if (IS_ERR_OR_NULL(genpd))
854 return false;
855
856 list_for_each_entry(gpd, &gpd_list, gpd_list_node)
857 if (gpd == genpd)
858 return true;
859
860 return false;
861}
862
863#endif
864
865#ifdef CONFIG_PM_SLEEP
866
867/**
868 * genpd_sync_power_off - Synchronously power off a PM domain and its masters.
869 * @genpd: PM domain to power off, if possible.
870 * @use_lock: use the lock.
871 * @depth: nesting count for lockdep.
872 *
873 * Check if the given PM domain can be powered off (during system suspend or
874 * hibernation) and do that if so. Also, in that case propagate to its masters.
875 *
876 * This function is only called in "noirq" and "syscore" stages of system power
877 * transitions. The "noirq" callbacks may be executed asynchronously, thus in
878 * these cases the lock must be held.
879 */
880static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
881 unsigned int depth)
882{
883 struct gpd_link *link;
884
885 if (!genpd_status_on(genpd) || genpd_is_always_on(genpd))
886 return;
887
888 if (genpd->suspended_count != genpd->device_count
889 || atomic_read(&genpd->sd_count) > 0)
890 return;
891
892 /* Choose the deepest state when suspending */
893 genpd->state_idx = genpd->state_count - 1;
894 if (_genpd_power_off(genpd, false))
895 return;
896
897 genpd->status = GPD_STATE_POWER_OFF;
898
899 list_for_each_entry(link, &genpd->slave_links, slave_node) {
900 genpd_sd_counter_dec(link->master);
901
902 if (use_lock)
903 genpd_lock_nested(link->master, depth + 1);
904
905 genpd_sync_power_off(link->master, use_lock, depth + 1);
906
907 if (use_lock)
908 genpd_unlock(link->master);
909 }
910}
911
912/**
913 * genpd_sync_power_on - Synchronously power on a PM domain and its masters.
914 * @genpd: PM domain to power on.
915 * @use_lock: use the lock.
916 * @depth: nesting count for lockdep.
917 *
918 * This function is only called in "noirq" and "syscore" stages of system power
919 * transitions. The "noirq" callbacks may be executed asynchronously, thus in
920 * these cases the lock must be held.
921 */
922static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
923 unsigned int depth)
924{
925 struct gpd_link *link;
926
927 if (genpd_status_on(genpd))
928 return;
929
930 list_for_each_entry(link, &genpd->slave_links, slave_node) {
931 genpd_sd_counter_inc(link->master);
932
933 if (use_lock)
934 genpd_lock_nested(link->master, depth + 1);
935
936 genpd_sync_power_on(link->master, use_lock, depth + 1);
937
938 if (use_lock)
939 genpd_unlock(link->master);
940 }
941
942 _genpd_power_on(genpd, false);
943
944 genpd->status = GPD_STATE_ACTIVE;
945}
946
947/**
948 * resume_needed - Check whether to resume a device before system suspend.
949 * @dev: Device to check.
950 * @genpd: PM domain the device belongs to.
951 *
952 * There are two cases in which a device that can wake up the system from sleep
953 * states should be resumed by genpd_prepare(): (1) if the device is enabled
954 * to wake up the system and it has to remain active for this purpose while the
955 * system is in the sleep state and (2) if the device is not enabled to wake up
956 * the system from sleep states and it generally doesn't generate wakeup signals
957 * by itself (those signals are generated on its behalf by other parts of the
958 * system). In the latter case it may be necessary to reconfigure the device's
959 * wakeup settings during system suspend, because it may have been set up to
960 * signal remote wakeup from the system's working state as needed by runtime PM.
961 * Return 'true' in either of the above cases.
962 */
963static bool resume_needed(struct device *dev,
964 const struct generic_pm_domain *genpd)
965{
966 bool active_wakeup;
967
968 if (!device_can_wakeup(dev))
969 return false;
970
971 active_wakeup = genpd_is_active_wakeup(genpd);
972 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
973}
974
975/**
976 * genpd_prepare - Start power transition of a device in a PM domain.
977 * @dev: Device to start the transition of.
978 *
979 * Start a power transition of a device (during a system-wide power transition)
980 * under the assumption that its pm_domain field points to the domain member of
981 * an object of type struct generic_pm_domain representing a PM domain
982 * consisting of I/O devices.
983 */
984static int genpd_prepare(struct device *dev)
985{
986 struct generic_pm_domain *genpd;
987 int ret;
988
989 dev_dbg(dev, "%s()\n", __func__);
990
991 genpd = dev_to_genpd(dev);
992 if (IS_ERR(genpd))
993 return -EINVAL;
994
995 /*
996 * If a wakeup request is pending for the device, it should be woken up
997 * at this point and a system wakeup event should be reported if it's
998 * set up to wake up the system from sleep states.
999 */
1000 if (resume_needed(dev, genpd))
1001 pm_runtime_resume(dev);
1002
1003 genpd_lock(genpd);
1004
1005 if (genpd->prepared_count++ == 0)
1006 genpd->suspended_count = 0;
1007
1008 genpd_unlock(genpd);
1009
1010 ret = pm_generic_prepare(dev);
1011 if (ret < 0) {
1012 genpd_lock(genpd);
1013
1014 genpd->prepared_count--;
1015
1016 genpd_unlock(genpd);
1017 }
1018
1019 /* Never return 1, as genpd don't cope with the direct_complete path. */
1020 return ret >= 0 ? 0 : ret;
1021}
1022
1023/**
1024 * genpd_finish_suspend - Completion of suspend or hibernation of device in an
1025 * I/O pm domain.
1026 * @dev: Device to suspend.
1027 * @poweroff: Specifies if this is a poweroff_noirq or suspend_noirq callback.
1028 *
1029 * Stop the device and remove power from the domain if all devices in it have
1030 * been stopped.
1031 */
1032static int genpd_finish_suspend(struct device *dev, bool poweroff)
1033{
1034 struct generic_pm_domain *genpd;
1035 int ret = 0;
1036
1037 genpd = dev_to_genpd(dev);
1038 if (IS_ERR(genpd))
1039 return -EINVAL;
1040
1041 if (poweroff)
1042 ret = pm_generic_poweroff_noirq(dev);
1043 else
1044 ret = pm_generic_suspend_noirq(dev);
1045 if (ret)
1046 return ret;
1047
1048 if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
1049 return 0;
1050
1051 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1052 !pm_runtime_status_suspended(dev)) {
1053 ret = genpd_stop_dev(genpd, dev);
1054 if (ret) {
1055 if (poweroff)
1056 pm_generic_restore_noirq(dev);
1057 else
1058 pm_generic_resume_noirq(dev);
1059 return ret;
1060 }
1061 }
1062
1063 genpd_lock(genpd);
1064 genpd->suspended_count++;
1065 genpd_sync_power_off(genpd, true, 0);
1066 genpd_unlock(genpd);
1067
1068 return 0;
1069}
1070
1071/**
1072 * genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
1073 * @dev: Device to suspend.
1074 *
1075 * Stop the device and remove power from the domain if all devices in it have
1076 * been stopped.
1077 */
1078static int genpd_suspend_noirq(struct device *dev)
1079{
1080 dev_dbg(dev, "%s()\n", __func__);
1081
1082 return genpd_finish_suspend(dev, false);
1083}
1084
1085/**
1086 * genpd_resume_noirq - Start of resume of device in an I/O PM domain.
1087 * @dev: Device to resume.
1088 *
1089 * Restore power to the device's PM domain, if necessary, and start the device.
1090 */
1091static int genpd_resume_noirq(struct device *dev)
1092{
1093 struct generic_pm_domain *genpd;
1094 int ret;
1095
1096 dev_dbg(dev, "%s()\n", __func__);
1097
1098 genpd = dev_to_genpd(dev);
1099 if (IS_ERR(genpd))
1100 return -EINVAL;
1101
1102 if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
1103 return pm_generic_resume_noirq(dev);
1104
1105 genpd_lock(genpd);
1106 genpd_sync_power_on(genpd, true, 0);
1107 genpd->suspended_count--;
1108 genpd_unlock(genpd);
1109
1110 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1111 !pm_runtime_status_suspended(dev)) {
1112 ret = genpd_start_dev(genpd, dev);
1113 if (ret)
1114 return ret;
1115 }
1116
1117 return pm_generic_resume_noirq(dev);
1118}
1119
1120/**
1121 * genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
1122 * @dev: Device to freeze.
1123 *
1124 * Carry out a late freeze of a device under the assumption that its
1125 * pm_domain field points to the domain member of an object of type
1126 * struct generic_pm_domain representing a power domain consisting of I/O
1127 * devices.
1128 */
1129static int genpd_freeze_noirq(struct device *dev)
1130{
1131 const struct generic_pm_domain *genpd;
1132 int ret = 0;
1133
1134 dev_dbg(dev, "%s()\n", __func__);
1135
1136 genpd = dev_to_genpd(dev);
1137 if (IS_ERR(genpd))
1138 return -EINVAL;
1139
1140 ret = pm_generic_freeze_noirq(dev);
1141 if (ret)
1142 return ret;
1143
1144 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1145 !pm_runtime_status_suspended(dev))
1146 ret = genpd_stop_dev(genpd, dev);
1147
1148 return ret;
1149}
1150
1151/**
1152 * genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
1153 * @dev: Device to thaw.
1154 *
1155 * Start the device, unless power has been removed from the domain already
1156 * before the system transition.
1157 */
1158static int genpd_thaw_noirq(struct device *dev)
1159{
1160 const struct generic_pm_domain *genpd;
1161 int ret = 0;
1162
1163 dev_dbg(dev, "%s()\n", __func__);
1164
1165 genpd = dev_to_genpd(dev);
1166 if (IS_ERR(genpd))
1167 return -EINVAL;
1168
1169 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1170 !pm_runtime_status_suspended(dev)) {
1171 ret = genpd_start_dev(genpd, dev);
1172 if (ret)
1173 return ret;
1174 }
1175
1176 return pm_generic_thaw_noirq(dev);
1177}
1178
1179/**
1180 * genpd_poweroff_noirq - Completion of hibernation of device in an
1181 * I/O PM domain.
1182 * @dev: Device to poweroff.
1183 *
1184 * Stop the device and remove power from the domain if all devices in it have
1185 * been stopped.
1186 */
1187static int genpd_poweroff_noirq(struct device *dev)
1188{
1189 dev_dbg(dev, "%s()\n", __func__);
1190
1191 return genpd_finish_suspend(dev, true);
1192}
1193
1194/**
1195 * genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1196 * @dev: Device to resume.
1197 *
1198 * Make sure the domain will be in the same power state as before the
1199 * hibernation the system is resuming from and start the device if necessary.
1200 */
1201static int genpd_restore_noirq(struct device *dev)
1202{
1203 struct generic_pm_domain *genpd;
1204 int ret = 0;
1205
1206 dev_dbg(dev, "%s()\n", __func__);
1207
1208 genpd = dev_to_genpd(dev);
1209 if (IS_ERR(genpd))
1210 return -EINVAL;
1211
1212 /*
1213 * At this point suspended_count == 0 means we are being run for the
1214 * first time for the given domain in the present cycle.
1215 */
1216 genpd_lock(genpd);
1217 if (genpd->suspended_count++ == 0)
1218 /*
1219 * The boot kernel might put the domain into arbitrary state,
1220 * so make it appear as powered off to genpd_sync_power_on(),
1221 * so that it tries to power it on in case it was really off.
1222 */
1223 genpd->status = GPD_STATE_POWER_OFF;
1224
1225 genpd_sync_power_on(genpd, true, 0);
1226 genpd_unlock(genpd);
1227
1228 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1229 !pm_runtime_status_suspended(dev)) {
1230 ret = genpd_start_dev(genpd, dev);
1231 if (ret)
1232 return ret;
1233 }
1234
1235 return pm_generic_restore_noirq(dev);
1236}
1237
1238/**
1239 * genpd_complete - Complete power transition of a device in a power domain.
1240 * @dev: Device to complete the transition of.
1241 *
1242 * Complete a power transition of a device (during a system-wide power
1243 * transition) under the assumption that its pm_domain field points to the
1244 * domain member of an object of type struct generic_pm_domain representing
1245 * a power domain consisting of I/O devices.
1246 */
1247static void genpd_complete(struct device *dev)
1248{
1249 struct generic_pm_domain *genpd;
1250
1251 dev_dbg(dev, "%s()\n", __func__);
1252
1253 genpd = dev_to_genpd(dev);
1254 if (IS_ERR(genpd))
1255 return;
1256
1257 pm_generic_complete(dev);
1258
1259 genpd_lock(genpd);
1260
1261 genpd->prepared_count--;
1262 if (!genpd->prepared_count)
1263 genpd_queue_power_off_work(genpd);
1264
1265 genpd_unlock(genpd);
1266}
1267
1268/**
1269 * genpd_syscore_switch - Switch power during system core suspend or resume.
1270 * @dev: Device that normally is marked as "always on" to switch power for.
1271 *
1272 * This routine may only be called during the system core (syscore) suspend or
1273 * resume phase for devices whose "always on" flags are set.
1274 */
1275static void genpd_syscore_switch(struct device *dev, bool suspend)
1276{
1277 struct generic_pm_domain *genpd;
1278
1279 genpd = dev_to_genpd(dev);
1280 if (!genpd_present(genpd))
1281 return;
1282
1283 if (suspend) {
1284 genpd->suspended_count++;
1285 genpd_sync_power_off(genpd, false, 0);
1286 } else {
1287 genpd_sync_power_on(genpd, false, 0);
1288 genpd->suspended_count--;
1289 }
1290}
1291
1292void pm_genpd_syscore_poweroff(struct device *dev)
1293{
1294 genpd_syscore_switch(dev, true);
1295}
1296EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff);
1297
1298void pm_genpd_syscore_poweron(struct device *dev)
1299{
1300 genpd_syscore_switch(dev, false);
1301}
1302EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
1303
1304#else /* !CONFIG_PM_SLEEP */
1305
1306#define genpd_prepare NULL
1307#define genpd_suspend_noirq NULL
1308#define genpd_resume_noirq NULL
1309#define genpd_freeze_noirq NULL
1310#define genpd_thaw_noirq NULL
1311#define genpd_poweroff_noirq NULL
1312#define genpd_restore_noirq NULL
1313#define genpd_complete NULL
1314
1315#endif /* CONFIG_PM_SLEEP */
1316
1317static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
1318 struct generic_pm_domain *genpd,
1319 struct gpd_timing_data *td)
1320{
1321 struct generic_pm_domain_data *gpd_data;
1322 int ret;
1323
1324 ret = dev_pm_get_subsys_data(dev);
1325 if (ret)
1326 return ERR_PTR(ret);
1327
1328 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1329 if (!gpd_data) {
1330 ret = -ENOMEM;
1331 goto err_put;
1332 }
1333
1334 if (td)
1335 gpd_data->td = *td;
1336
1337 gpd_data->base.dev = dev;
1338 gpd_data->td.constraint_changed = true;
1339 gpd_data->td.effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
1340 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1341
1342 spin_lock_irq(&dev->power.lock);
1343
1344 if (dev->power.subsys_data->domain_data) {
1345 ret = -EINVAL;
1346 goto err_free;
1347 }
1348
1349 dev->power.subsys_data->domain_data = &gpd_data->base;
1350
1351 spin_unlock_irq(&dev->power.lock);
1352
1353 return gpd_data;
1354
1355 err_free:
1356 spin_unlock_irq(&dev->power.lock);
1357 kfree(gpd_data);
1358 err_put:
1359 dev_pm_put_subsys_data(dev);
1360 return ERR_PTR(ret);
1361}
1362
1363static void genpd_free_dev_data(struct device *dev,
1364 struct generic_pm_domain_data *gpd_data)
1365{
1366 spin_lock_irq(&dev->power.lock);
1367
1368 dev->power.subsys_data->domain_data = NULL;
1369
1370 spin_unlock_irq(&dev->power.lock);
1371
1372 kfree(gpd_data);
1373 dev_pm_put_subsys_data(dev);
1374}
1375
1376static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1377 struct gpd_timing_data *td)
1378{
1379 struct generic_pm_domain_data *gpd_data;
1380 int ret = 0;
1381
1382 dev_dbg(dev, "%s()\n", __func__);
1383
1384 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1385 return -EINVAL;
1386
1387 gpd_data = genpd_alloc_dev_data(dev, genpd, td);
1388 if (IS_ERR(gpd_data))
1389 return PTR_ERR(gpd_data);
1390
1391 genpd_lock(genpd);
1392
1393 if (genpd->prepared_count > 0) {
1394 ret = -EAGAIN;
1395 goto out;
1396 }
1397
1398 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1399 if (ret)
1400 goto out;
1401
1402 dev_pm_domain_set(dev, &genpd->domain);
1403
1404 genpd->device_count++;
1405 genpd->max_off_time_changed = true;
1406
1407 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1408
1409 out:
1410 genpd_unlock(genpd);
1411
1412 if (ret)
1413 genpd_free_dev_data(dev, gpd_data);
1414 else
1415 dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1416
1417 return ret;
1418}
1419
1420/**
1421 * __pm_genpd_add_device - Add a device to an I/O PM domain.
1422 * @genpd: PM domain to add the device to.
1423 * @dev: Device to be added.
1424 * @td: Set of PM QoS timing parameters to attach to the device.
1425 */
1426int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1427 struct gpd_timing_data *td)
1428{
1429 int ret;
1430
1431 mutex_lock(&gpd_list_lock);
1432 ret = genpd_add_device(genpd, dev, td);
1433 mutex_unlock(&gpd_list_lock);
1434
1435 return ret;
1436}
1437EXPORT_SYMBOL_GPL(__pm_genpd_add_device);
1438
1439static int genpd_remove_device(struct generic_pm_domain *genpd,
1440 struct device *dev)
1441{
1442 struct generic_pm_domain_data *gpd_data;
1443 struct pm_domain_data *pdd;
1444 int ret = 0;
1445
1446 dev_dbg(dev, "%s()\n", __func__);
1447
1448 pdd = dev->power.subsys_data->domain_data;
1449 gpd_data = to_gpd_data(pdd);
1450 dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
1451
1452 genpd_lock(genpd);
1453
1454 if (genpd->prepared_count > 0) {
1455 ret = -EAGAIN;
1456 goto out;
1457 }
1458
1459 genpd->device_count--;
1460 genpd->max_off_time_changed = true;
1461
1462 if (genpd->detach_dev)
1463 genpd->detach_dev(genpd, dev);
1464
1465 dev_pm_domain_set(dev, NULL);
1466
1467 list_del_init(&pdd->list_node);
1468
1469 genpd_unlock(genpd);
1470
1471 genpd_free_dev_data(dev, gpd_data);
1472
1473 return 0;
1474
1475 out:
1476 genpd_unlock(genpd);
1477 dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1478
1479 return ret;
1480}
1481
1482/**
1483 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1484 * @genpd: PM domain to remove the device from.
1485 * @dev: Device to be removed.
1486 */
1487int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1488 struct device *dev)
1489{
1490 if (!genpd || genpd != genpd_lookup_dev(dev))
1491 return -EINVAL;
1492
1493 return genpd_remove_device(genpd, dev);
1494}
1495EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
1496
1497static int genpd_add_subdomain(struct generic_pm_domain *genpd,
1498 struct generic_pm_domain *subdomain)
1499{
1500 struct gpd_link *link, *itr;
1501 int ret = 0;
1502
1503 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1504 || genpd == subdomain)
1505 return -EINVAL;
1506
1507 /*
1508 * If the domain can be powered on/off in an IRQ safe
1509 * context, ensure that the subdomain can also be
1510 * powered on/off in that context.
1511 */
1512 if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
1513 WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
1514 genpd->name, subdomain->name);
1515 return -EINVAL;
1516 }
1517
1518 link = kzalloc(sizeof(*link), GFP_KERNEL);
1519 if (!link)
1520 return -ENOMEM;
1521
1522 genpd_lock(subdomain);
1523 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1524
1525 if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) {
1526 ret = -EINVAL;
1527 goto out;
1528 }
1529
1530 list_for_each_entry(itr, &genpd->master_links, master_node) {
1531 if (itr->slave == subdomain && itr->master == genpd) {
1532 ret = -EINVAL;
1533 goto out;
1534 }
1535 }
1536
1537 link->master = genpd;
1538 list_add_tail(&link->master_node, &genpd->master_links);
1539 link->slave = subdomain;
1540 list_add_tail(&link->slave_node, &subdomain->slave_links);
1541 if (genpd_status_on(subdomain))
1542 genpd_sd_counter_inc(genpd);
1543
1544 out:
1545 genpd_unlock(genpd);
1546 genpd_unlock(subdomain);
1547 if (ret)
1548 kfree(link);
1549 return ret;
1550}
1551
1552/**
1553 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1554 * @genpd: Master PM domain to add the subdomain to.
1555 * @subdomain: Subdomain to be added.
1556 */
1557int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1558 struct generic_pm_domain *subdomain)
1559{
1560 int ret;
1561
1562 mutex_lock(&gpd_list_lock);
1563 ret = genpd_add_subdomain(genpd, subdomain);
1564 mutex_unlock(&gpd_list_lock);
1565
1566 return ret;
1567}
1568EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
1569
1570/**
1571 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1572 * @genpd: Master PM domain to remove the subdomain from.
1573 * @subdomain: Subdomain to be removed.
1574 */
1575int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1576 struct generic_pm_domain *subdomain)
1577{
1578 struct gpd_link *l, *link;
1579 int ret = -EINVAL;
1580
1581 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1582 return -EINVAL;
1583
1584 genpd_lock(subdomain);
1585 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1586
1587 if (!list_empty(&subdomain->master_links) || subdomain->device_count) {
1588 pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
1589 subdomain->name);
1590 ret = -EBUSY;
1591 goto out;
1592 }
1593
1594 list_for_each_entry_safe(link, l, &genpd->master_links, master_node) {
1595 if (link->slave != subdomain)
1596 continue;
1597
1598 list_del(&link->master_node);
1599 list_del(&link->slave_node);
1600 kfree(link);
1601 if (genpd_status_on(subdomain))
1602 genpd_sd_counter_dec(genpd);
1603
1604 ret = 0;
1605 break;
1606 }
1607
1608out:
1609 genpd_unlock(genpd);
1610 genpd_unlock(subdomain);
1611
1612 return ret;
1613}
1614EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
1615
1616static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
1617{
1618 struct genpd_power_state *state;
1619
1620 state = kzalloc(sizeof(*state), GFP_KERNEL);
1621 if (!state)
1622 return -ENOMEM;
1623
1624 genpd->states = state;
1625 genpd->state_count = 1;
1626 genpd->free = state;
1627
1628 return 0;
1629}
1630
1631static void genpd_lock_init(struct generic_pm_domain *genpd)
1632{
1633 if (genpd->flags & GENPD_FLAG_IRQ_SAFE) {
1634 spin_lock_init(&genpd->slock);
1635 genpd->lock_ops = &genpd_spin_ops;
1636 } else {
1637 mutex_init(&genpd->mlock);
1638 genpd->lock_ops = &genpd_mtx_ops;
1639 }
1640}
1641
1642/**
1643 * pm_genpd_init - Initialize a generic I/O PM domain object.
1644 * @genpd: PM domain object to initialize.
1645 * @gov: PM domain governor to associate with the domain (may be NULL).
1646 * @is_off: Initial value of the domain's power_is_off field.
1647 *
1648 * Returns 0 on successful initialization, else a negative error code.
1649 */
1650int pm_genpd_init(struct generic_pm_domain *genpd,
1651 struct dev_power_governor *gov, bool is_off)
1652{
1653 int ret;
1654
1655 if (IS_ERR_OR_NULL(genpd))
1656 return -EINVAL;
1657
1658 INIT_LIST_HEAD(&genpd->master_links);
1659 INIT_LIST_HEAD(&genpd->slave_links);
1660 INIT_LIST_HEAD(&genpd->dev_list);
1661 genpd_lock_init(genpd);
1662 genpd->gov = gov;
1663 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1664 atomic_set(&genpd->sd_count, 0);
1665 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1666 genpd->device_count = 0;
1667 genpd->max_off_time_ns = -1;
1668 genpd->max_off_time_changed = true;
1669 genpd->provider = NULL;
1670 genpd->has_provider = false;
1671 genpd->accounting_time = ktime_get();
1672 genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
1673 genpd->domain.ops.runtime_resume = genpd_runtime_resume;
1674 genpd->domain.ops.prepare = genpd_prepare;
1675 genpd->domain.ops.suspend_noirq = genpd_suspend_noirq;
1676 genpd->domain.ops.resume_noirq = genpd_resume_noirq;
1677 genpd->domain.ops.freeze_noirq = genpd_freeze_noirq;
1678 genpd->domain.ops.thaw_noirq = genpd_thaw_noirq;
1679 genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq;
1680 genpd->domain.ops.restore_noirq = genpd_restore_noirq;
1681 genpd->domain.ops.complete = genpd_complete;
1682
1683 if (genpd->flags & GENPD_FLAG_PM_CLK) {
1684 genpd->dev_ops.stop = pm_clk_suspend;
1685 genpd->dev_ops.start = pm_clk_resume;
1686 }
1687
1688 /* Always-on domains must be powered on at initialization. */
1689 if (genpd_is_always_on(genpd) && !genpd_status_on(genpd))
1690 return -EINVAL;
1691
1692 /* Use only one "off" state if there were no states declared */
1693 if (genpd->state_count == 0) {
1694 ret = genpd_set_default_power_state(genpd);
1695 if (ret)
1696 return ret;
1697 }
1698
1699 mutex_lock(&gpd_list_lock);
1700 list_add(&genpd->gpd_list_node, &gpd_list);
1701 mutex_unlock(&gpd_list_lock);
1702
1703 return 0;
1704}
1705EXPORT_SYMBOL_GPL(pm_genpd_init);
1706
1707static int genpd_remove(struct generic_pm_domain *genpd)
1708{
1709 struct gpd_link *l, *link;
1710
1711 if (IS_ERR_OR_NULL(genpd))
1712 return -EINVAL;
1713
1714 genpd_lock(genpd);
1715
1716 if (genpd->has_provider) {
1717 genpd_unlock(genpd);
1718 pr_err("Provider present, unable to remove %s\n", genpd->name);
1719 return -EBUSY;
1720 }
1721
1722 if (!list_empty(&genpd->master_links) || genpd->device_count) {
1723 genpd_unlock(genpd);
1724 pr_err("%s: unable to remove %s\n", __func__, genpd->name);
1725 return -EBUSY;
1726 }
1727
1728 list_for_each_entry_safe(link, l, &genpd->slave_links, slave_node) {
1729 list_del(&link->master_node);
1730 list_del(&link->slave_node);
1731 kfree(link);
1732 }
1733
1734 list_del(&genpd->gpd_list_node);
1735 genpd_unlock(genpd);
1736 cancel_work_sync(&genpd->power_off_work);
1737 kfree(genpd->free);
1738 pr_debug("%s: removed %s\n", __func__, genpd->name);
1739
1740 return 0;
1741}
1742
1743/**
1744 * pm_genpd_remove - Remove a generic I/O PM domain
1745 * @genpd: Pointer to PM domain that is to be removed.
1746 *
1747 * To remove the PM domain, this function:
1748 * - Removes the PM domain as a subdomain to any parent domains,
1749 * if it was added.
1750 * - Removes the PM domain from the list of registered PM domains.
1751 *
1752 * The PM domain will only be removed, if the associated provider has
1753 * been removed, it is not a parent to any other PM domain and has no
1754 * devices associated with it.
1755 */
1756int pm_genpd_remove(struct generic_pm_domain *genpd)
1757{
1758 int ret;
1759
1760 mutex_lock(&gpd_list_lock);
1761 ret = genpd_remove(genpd);
1762 mutex_unlock(&gpd_list_lock);
1763
1764 return ret;
1765}
1766EXPORT_SYMBOL_GPL(pm_genpd_remove);
1767
1768#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
1769
1770/*
1771 * Device Tree based PM domain providers.
1772 *
1773 * The code below implements generic device tree based PM domain providers that
1774 * bind device tree nodes with generic PM domains registered in the system.
1775 *
1776 * Any driver that registers generic PM domains and needs to support binding of
1777 * devices to these domains is supposed to register a PM domain provider, which
1778 * maps a PM domain specifier retrieved from the device tree to a PM domain.
1779 *
1780 * Two simple mapping functions have been provided for convenience:
1781 * - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
1782 * - genpd_xlate_onecell() for mapping of multiple PM domains per node by
1783 * index.
1784 */
1785
1786/**
1787 * struct of_genpd_provider - PM domain provider registration structure
1788 * @link: Entry in global list of PM domain providers
1789 * @node: Pointer to device tree node of PM domain provider
1790 * @xlate: Provider-specific xlate callback mapping a set of specifier cells
1791 * into a PM domain.
1792 * @data: context pointer to be passed into @xlate callback
1793 */
1794struct of_genpd_provider {
1795 struct list_head link;
1796 struct device_node *node;
1797 genpd_xlate_t xlate;
1798 void *data;
1799};
1800
1801/* List of registered PM domain providers. */
1802static LIST_HEAD(of_genpd_providers);
1803/* Mutex to protect the list above. */
1804static DEFINE_MUTEX(of_genpd_mutex);
1805
1806/**
1807 * genpd_xlate_simple() - Xlate function for direct node-domain mapping
1808 * @genpdspec: OF phandle args to map into a PM domain
1809 * @data: xlate function private data - pointer to struct generic_pm_domain
1810 *
1811 * This is a generic xlate function that can be used to model PM domains that
1812 * have their own device tree nodes. The private data of xlate function needs
1813 * to be a valid pointer to struct generic_pm_domain.
1814 */
1815static struct generic_pm_domain *genpd_xlate_simple(
1816 struct of_phandle_args *genpdspec,
1817 void *data)
1818{
1819 return data;
1820}
1821
1822/**
1823 * genpd_xlate_onecell() - Xlate function using a single index.
1824 * @genpdspec: OF phandle args to map into a PM domain
1825 * @data: xlate function private data - pointer to struct genpd_onecell_data
1826 *
1827 * This is a generic xlate function that can be used to model simple PM domain
1828 * controllers that have one device tree node and provide multiple PM domains.
1829 * A single cell is used as an index into an array of PM domains specified in
1830 * the genpd_onecell_data struct when registering the provider.
1831 */
1832static struct generic_pm_domain *genpd_xlate_onecell(
1833 struct of_phandle_args *genpdspec,
1834 void *data)
1835{
1836 struct genpd_onecell_data *genpd_data = data;
1837 unsigned int idx = genpdspec->args[0];
1838
1839 if (genpdspec->args_count != 1)
1840 return ERR_PTR(-EINVAL);
1841
1842 if (idx >= genpd_data->num_domains) {
1843 pr_err("%s: invalid domain index %u\n", __func__, idx);
1844 return ERR_PTR(-EINVAL);
1845 }
1846
1847 if (!genpd_data->domains[idx])
1848 return ERR_PTR(-ENOENT);
1849
1850 return genpd_data->domains[idx];
1851}
1852
1853/**
1854 * genpd_add_provider() - Register a PM domain provider for a node
1855 * @np: Device node pointer associated with the PM domain provider.
1856 * @xlate: Callback for decoding PM domain from phandle arguments.
1857 * @data: Context pointer for @xlate callback.
1858 */
1859static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
1860 void *data)
1861{
1862 struct of_genpd_provider *cp;
1863
1864 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
1865 if (!cp)
1866 return -ENOMEM;
1867
1868 cp->node = of_node_get(np);
1869 cp->data = data;
1870 cp->xlate = xlate;
1871
1872 mutex_lock(&of_genpd_mutex);
1873 list_add(&cp->link, &of_genpd_providers);
1874 mutex_unlock(&of_genpd_mutex);
1875 pr_debug("Added domain provider from %pOF\n", np);
1876
1877 return 0;
1878}
1879
1880/**
1881 * of_genpd_add_provider_simple() - Register a simple PM domain provider
1882 * @np: Device node pointer associated with the PM domain provider.
1883 * @genpd: Pointer to PM domain associated with the PM domain provider.
1884 */
1885int of_genpd_add_provider_simple(struct device_node *np,
1886 struct generic_pm_domain *genpd)
1887{
1888 int ret = -EINVAL;
1889
1890 if (!np || !genpd)
1891 return -EINVAL;
1892
1893 mutex_lock(&gpd_list_lock);
1894
1895 if (genpd_present(genpd)) {
1896 ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
1897 if (!ret) {
1898 genpd->provider = &np->fwnode;
1899 genpd->has_provider = true;
1900 }
1901 }
1902
1903 mutex_unlock(&gpd_list_lock);
1904
1905 return ret;
1906}
1907EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);
1908
1909/**
1910 * of_genpd_add_provider_onecell() - Register a onecell PM domain provider
1911 * @np: Device node pointer associated with the PM domain provider.
1912 * @data: Pointer to the data associated with the PM domain provider.
1913 */
1914int of_genpd_add_provider_onecell(struct device_node *np,
1915 struct genpd_onecell_data *data)
1916{
1917 unsigned int i;
1918 int ret = -EINVAL;
1919
1920 if (!np || !data)
1921 return -EINVAL;
1922
1923 mutex_lock(&gpd_list_lock);
1924
1925 if (!data->xlate)
1926 data->xlate = genpd_xlate_onecell;
1927
1928 for (i = 0; i < data->num_domains; i++) {
1929 if (!data->domains[i])
1930 continue;
1931 if (!genpd_present(data->domains[i]))
1932 goto error;
1933
1934 data->domains[i]->provider = &np->fwnode;
1935 data->domains[i]->has_provider = true;
1936 }
1937
1938 ret = genpd_add_provider(np, data->xlate, data);
1939 if (ret < 0)
1940 goto error;
1941
1942 mutex_unlock(&gpd_list_lock);
1943
1944 return 0;
1945
1946error:
1947 while (i--) {
1948 if (!data->domains[i])
1949 continue;
1950 data->domains[i]->provider = NULL;
1951 data->domains[i]->has_provider = false;
1952 }
1953
1954 mutex_unlock(&gpd_list_lock);
1955
1956 return ret;
1957}
1958EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
1959
1960/**
1961 * of_genpd_del_provider() - Remove a previously registered PM domain provider
1962 * @np: Device node pointer associated with the PM domain provider
1963 */
1964void of_genpd_del_provider(struct device_node *np)
1965{
1966 struct of_genpd_provider *cp, *tmp;
1967 struct generic_pm_domain *gpd;
1968
1969 mutex_lock(&gpd_list_lock);
1970 mutex_lock(&of_genpd_mutex);
1971 list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) {
1972 if (cp->node == np) {
1973 /*
1974 * For each PM domain associated with the
1975 * provider, set the 'has_provider' to false
1976 * so that the PM domain can be safely removed.
1977 */
1978 list_for_each_entry(gpd, &gpd_list, gpd_list_node)
1979 if (gpd->provider == &np->fwnode)
1980 gpd->has_provider = false;
1981
1982 list_del(&cp->link);
1983 of_node_put(cp->node);
1984 kfree(cp);
1985 break;
1986 }
1987 }
1988 mutex_unlock(&of_genpd_mutex);
1989 mutex_unlock(&gpd_list_lock);
1990}
1991EXPORT_SYMBOL_GPL(of_genpd_del_provider);
1992
1993/**
1994 * genpd_get_from_provider() - Look-up PM domain
1995 * @genpdspec: OF phandle args to use for look-up
1996 *
1997 * Looks for a PM domain provider under the node specified by @genpdspec and if
1998 * found, uses xlate function of the provider to map phandle args to a PM
1999 * domain.
2000 *
2001 * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
2002 * on failure.
2003 */
2004static struct generic_pm_domain *genpd_get_from_provider(
2005 struct of_phandle_args *genpdspec)
2006{
2007 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
2008 struct of_genpd_provider *provider;
2009
2010 if (!genpdspec)
2011 return ERR_PTR(-EINVAL);
2012
2013 mutex_lock(&of_genpd_mutex);
2014
2015 /* Check if we have such a provider in our array */
2016 list_for_each_entry(provider, &of_genpd_providers, link) {
2017 if (provider->node == genpdspec->np)
2018 genpd = provider->xlate(genpdspec, provider->data);
2019 if (!IS_ERR(genpd))
2020 break;
2021 }
2022
2023 mutex_unlock(&of_genpd_mutex);
2024
2025 return genpd;
2026}
2027
2028/**
2029 * of_genpd_add_device() - Add a device to an I/O PM domain
2030 * @genpdspec: OF phandle args to use for look-up PM domain
2031 * @dev: Device to be added.
2032 *
2033 * Looks-up an I/O PM domain based upon phandle args provided and adds
2034 * the device to the PM domain. Returns a negative error code on failure.
2035 */
2036int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev)
2037{
2038 struct generic_pm_domain *genpd;
2039 int ret;
2040
2041 mutex_lock(&gpd_list_lock);
2042
2043 genpd = genpd_get_from_provider(genpdspec);
2044 if (IS_ERR(genpd)) {
2045 ret = PTR_ERR(genpd);
2046 goto out;
2047 }
2048
2049 ret = genpd_add_device(genpd, dev, NULL);
2050
2051out:
2052 mutex_unlock(&gpd_list_lock);
2053
2054 return ret;
2055}
2056EXPORT_SYMBOL_GPL(of_genpd_add_device);
2057
2058/**
2059 * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
2060 * @parent_spec: OF phandle args to use for parent PM domain look-up
2061 * @subdomain_spec: OF phandle args to use for subdomain look-up
2062 *
2063 * Looks-up a parent PM domain and subdomain based upon phandle args
2064 * provided and adds the subdomain to the parent PM domain. Returns a
2065 * negative error code on failure.
2066 */
2067int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
2068 struct of_phandle_args *subdomain_spec)
2069{
2070 struct generic_pm_domain *parent, *subdomain;
2071 int ret;
2072
2073 mutex_lock(&gpd_list_lock);
2074
2075 parent = genpd_get_from_provider(parent_spec);
2076 if (IS_ERR(parent)) {
2077 ret = PTR_ERR(parent);
2078 goto out;
2079 }
2080
2081 subdomain = genpd_get_from_provider(subdomain_spec);
2082 if (IS_ERR(subdomain)) {
2083 ret = PTR_ERR(subdomain);
2084 goto out;
2085 }
2086
2087 ret = genpd_add_subdomain(parent, subdomain);
2088
2089out:
2090 mutex_unlock(&gpd_list_lock);
2091
2092 return ret;
2093}
2094EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
2095
2096/**
2097 * of_genpd_remove_last - Remove the last PM domain registered for a provider
2098 * @provider: Pointer to device structure associated with provider
2099 *
2100 * Find the last PM domain that was added by a particular provider and
2101 * remove this PM domain from the list of PM domains. The provider is
2102 * identified by the 'provider' device structure that is passed. The PM
2103 * domain will only be removed, if the provider associated with domain
2104 * has been removed.
2105 *
2106 * Returns a valid pointer to struct generic_pm_domain on success or
2107 * ERR_PTR() on failure.
2108 */
2109struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
2110{
2111 struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT);
2112 int ret;
2113
2114 if (IS_ERR_OR_NULL(np))
2115 return ERR_PTR(-EINVAL);
2116
2117 mutex_lock(&gpd_list_lock);
2118 list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) {
2119 if (gpd->provider == &np->fwnode) {
2120 ret = genpd_remove(gpd);
2121 genpd = ret ? ERR_PTR(ret) : gpd;
2122 break;
2123 }
2124 }
2125 mutex_unlock(&gpd_list_lock);
2126
2127 return genpd;
2128}
2129EXPORT_SYMBOL_GPL(of_genpd_remove_last);
2130
2131/**
2132 * genpd_dev_pm_detach - Detach a device from its PM domain.
2133 * @dev: Device to detach.
2134 * @power_off: Currently not used
2135 *
2136 * Try to locate a corresponding generic PM domain, which the device was
2137 * attached to previously. If such is found, the device is detached from it.
2138 */
2139static void genpd_dev_pm_detach(struct device *dev, bool power_off)
2140{
2141 struct generic_pm_domain *pd;
2142 unsigned int i;
2143 int ret = 0;
2144
2145 pd = dev_to_genpd(dev);
2146 if (IS_ERR(pd))
2147 return;
2148
2149 dev_dbg(dev, "removing from PM domain %s\n", pd->name);
2150
2151 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
2152 ret = genpd_remove_device(pd, dev);
2153 if (ret != -EAGAIN)
2154 break;
2155
2156 mdelay(i);
2157 cond_resched();
2158 }
2159
2160 if (ret < 0) {
2161 dev_err(dev, "failed to remove from PM domain %s: %d",
2162 pd->name, ret);
2163 return;
2164 }
2165
2166 /* Check if PM domain can be powered off after removing this device. */
2167 genpd_queue_power_off_work(pd);
2168}
2169
2170static void genpd_dev_pm_sync(struct device *dev)
2171{
2172 struct generic_pm_domain *pd;
2173
2174 pd = dev_to_genpd(dev);
2175 if (IS_ERR(pd))
2176 return;
2177
2178 genpd_queue_power_off_work(pd);
2179}
2180
2181/**
2182 * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
2183 * @dev: Device to attach.
2184 *
2185 * Parse device's OF node to find a PM domain specifier. If such is found,
2186 * attaches the device to retrieved pm_domain ops.
2187 *
2188 * Both generic and legacy Samsung-specific DT bindings are supported to keep
2189 * backwards compatibility with existing DTBs.
2190 *
2191 * Returns 0 on successfully attached PM domain or negative error code. Note
2192 * that if a power-domain exists for the device, but it cannot be found or
2193 * turned on, then return -EPROBE_DEFER to ensure that the device is not
2194 * probed and to re-try again later.
2195 */
2196int genpd_dev_pm_attach(struct device *dev)
2197{
2198 struct of_phandle_args pd_args;
2199 struct generic_pm_domain *pd;
2200 unsigned int i;
2201 int ret;
2202
2203 if (!dev->of_node)
2204 return -ENODEV;
2205
2206 if (dev->pm_domain)
2207 return -EEXIST;
2208
2209 ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
2210 "#power-domain-cells", 0, &pd_args);
2211 if (ret < 0)
2212 return ret;
2213
2214 mutex_lock(&gpd_list_lock);
2215 pd = genpd_get_from_provider(&pd_args);
2216 of_node_put(pd_args.np);
2217 if (IS_ERR(pd)) {
2218 mutex_unlock(&gpd_list_lock);
2219 dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
2220 __func__, PTR_ERR(pd));
2221 return -EPROBE_DEFER;
2222 }
2223
2224 dev_dbg(dev, "adding to PM domain %s\n", pd->name);
2225
2226 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
2227 ret = genpd_add_device(pd, dev, NULL);
2228 if (ret != -EAGAIN)
2229 break;
2230
2231 mdelay(i);
2232 cond_resched();
2233 }
2234 mutex_unlock(&gpd_list_lock);
2235
2236 if (ret < 0) {
2237 if (ret != -EPROBE_DEFER)
2238 dev_err(dev, "failed to add to PM domain %s: %d",
2239 pd->name, ret);
2240 goto out;
2241 }
2242
2243 dev->pm_domain->detach = genpd_dev_pm_detach;
2244 dev->pm_domain->sync = genpd_dev_pm_sync;
2245
2246 genpd_lock(pd);
2247 ret = genpd_power_on(pd, 0);
2248 genpd_unlock(pd);
2249out:
2250 return ret ? -EPROBE_DEFER : 0;
2251}
2252EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
2253
2254static const struct of_device_id idle_state_match[] = {
2255 { .compatible = "domain-idle-state", },
2256 { }
2257};
2258
2259static int genpd_parse_state(struct genpd_power_state *genpd_state,
2260 struct device_node *state_node)
2261{
2262 int err;
2263 u32 residency;
2264 u32 entry_latency, exit_latency;
2265
2266 err = of_property_read_u32(state_node, "entry-latency-us",
2267 &entry_latency);
2268 if (err) {
2269 pr_debug(" * %pOF missing entry-latency-us property\n",
2270 state_node);
2271 return -EINVAL;
2272 }
2273
2274 err = of_property_read_u32(state_node, "exit-latency-us",
2275 &exit_latency);
2276 if (err) {
2277 pr_debug(" * %pOF missing exit-latency-us property\n",
2278 state_node);
2279 return -EINVAL;
2280 }
2281
2282 err = of_property_read_u32(state_node, "min-residency-us", &residency);
2283 if (!err)
2284 genpd_state->residency_ns = 1000 * residency;
2285
2286 genpd_state->power_on_latency_ns = 1000 * exit_latency;
2287 genpd_state->power_off_latency_ns = 1000 * entry_latency;
2288 genpd_state->fwnode = &state_node->fwnode;
2289
2290 return 0;
2291}
2292
2293static int genpd_iterate_idle_states(struct device_node *dn,
2294 struct genpd_power_state *states)
2295{
2296 int ret;
2297 struct of_phandle_iterator it;
2298 struct device_node *np;
2299 int i = 0;
2300
2301 ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
2302 if (ret <= 0)
2303 return ret;
2304
2305 /* Loop over the phandles until all the requested entry is found */
2306 of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) {
2307 np = it.node;
2308 if (!of_match_node(idle_state_match, np))
2309 continue;
2310 if (states) {
2311 ret = genpd_parse_state(&states[i], np);
2312 if (ret) {
2313 pr_err("Parsing idle state node %pOF failed with err %d\n",
2314 np, ret);
2315 of_node_put(np);
2316 return ret;
2317 }
2318 }
2319 i++;
2320 }
2321
2322 return i;
2323}
2324
2325/**
2326 * of_genpd_parse_idle_states: Return array of idle states for the genpd.
2327 *
2328 * @dn: The genpd device node
2329 * @states: The pointer to which the state array will be saved.
2330 * @n: The count of elements in the array returned from this function.
2331 *
2332 * Returns the device states parsed from the OF node. The memory for the states
2333 * is allocated by this function and is the responsibility of the caller to
2334 * free the memory after use. If no domain idle states is found it returns
2335 * -EINVAL and in case of errors, a negative error code.
2336 */
2337int of_genpd_parse_idle_states(struct device_node *dn,
2338 struct genpd_power_state **states, int *n)
2339{
2340 struct genpd_power_state *st;
2341 int ret;
2342
2343 ret = genpd_iterate_idle_states(dn, NULL);
2344 if (ret <= 0)
2345 return ret < 0 ? ret : -EINVAL;
2346
2347 st = kcalloc(ret, sizeof(*st), GFP_KERNEL);
2348 if (!st)
2349 return -ENOMEM;
2350
2351 ret = genpd_iterate_idle_states(dn, st);
2352 if (ret <= 0) {
2353 kfree(st);
2354 return ret < 0 ? ret : -EINVAL;
2355 }
2356
2357 *states = st;
2358 *n = ret;
2359
2360 return 0;
2361}
2362EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
2363
2364#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
2365
2366
2367/*** debugfs support ***/
2368
2369#ifdef CONFIG_DEBUG_FS
2370#include <linux/pm.h>
2371#include <linux/device.h>
2372#include <linux/debugfs.h>
2373#include <linux/seq_file.h>
2374#include <linux/init.h>
2375#include <linux/kobject.h>
2376static struct dentry *genpd_debugfs_dir;
2377
2378/*
2379 * TODO: This function is a slightly modified version of rtpm_status_show
2380 * from sysfs.c, so generalize it.
2381 */
2382static void rtpm_status_str(struct seq_file *s, struct device *dev)
2383{
2384 static const char * const status_lookup[] = {
2385 [RPM_ACTIVE] = "active",
2386 [RPM_RESUMING] = "resuming",
2387 [RPM_SUSPENDED] = "suspended",
2388 [RPM_SUSPENDING] = "suspending"
2389 };
2390 const char *p = "";
2391
2392 if (dev->power.runtime_error)
2393 p = "error";
2394 else if (dev->power.disable_depth)
2395 p = "unsupported";
2396 else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
2397 p = status_lookup[dev->power.runtime_status];
2398 else
2399 WARN_ON(1);
2400
2401 seq_puts(s, p);
2402}
2403
2404static int genpd_summary_one(struct seq_file *s,
2405 struct generic_pm_domain *genpd)
2406{
2407 static const char * const status_lookup[] = {
2408 [GPD_STATE_ACTIVE] = "on",
2409 [GPD_STATE_POWER_OFF] = "off"
2410 };
2411 struct pm_domain_data *pm_data;
2412 const char *kobj_path;
2413 struct gpd_link *link;
2414 char state[16];
2415 int ret;
2416
2417 ret = genpd_lock_interruptible(genpd);
2418 if (ret)
2419 return -ERESTARTSYS;
2420
2421 if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
2422 goto exit;
2423 if (!genpd_status_on(genpd))
2424 snprintf(state, sizeof(state), "%s-%u",
2425 status_lookup[genpd->status], genpd->state_idx);
2426 else
2427 snprintf(state, sizeof(state), "%s",
2428 status_lookup[genpd->status]);
2429 seq_printf(s, "%-30s %-15s ", genpd->name, state);
2430
2431 /*
2432 * Modifications on the list require holding locks on both
2433 * master and slave, so we are safe.
2434 * Also genpd->name is immutable.
2435 */
2436 list_for_each_entry(link, &genpd->master_links, master_node) {
2437 seq_printf(s, "%s", link->slave->name);
2438 if (!list_is_last(&link->master_node, &genpd->master_links))
2439 seq_puts(s, ", ");
2440 }
2441
2442 list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
2443 kobj_path = kobject_get_path(&pm_data->dev->kobj,
2444 genpd_is_irq_safe(genpd) ?
2445 GFP_ATOMIC : GFP_KERNEL);
2446 if (kobj_path == NULL)
2447 continue;
2448
2449 seq_printf(s, "\n %-50s ", kobj_path);
2450 rtpm_status_str(s, pm_data->dev);
2451 kfree(kobj_path);
2452 }
2453
2454 seq_puts(s, "\n");
2455exit:
2456 genpd_unlock(genpd);
2457
2458 return 0;
2459}
2460
2461static int genpd_summary_show(struct seq_file *s, void *data)
2462{
2463 struct generic_pm_domain *genpd;
2464 int ret = 0;
2465
2466 seq_puts(s, "domain status slaves\n");
2467 seq_puts(s, " /device runtime status\n");
2468 seq_puts(s, "----------------------------------------------------------------------\n");
2469
2470 ret = mutex_lock_interruptible(&gpd_list_lock);
2471 if (ret)
2472 return -ERESTARTSYS;
2473
2474 list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
2475 ret = genpd_summary_one(s, genpd);
2476 if (ret)
2477 break;
2478 }
2479 mutex_unlock(&gpd_list_lock);
2480
2481 return ret;
2482}
2483
2484static int genpd_status_show(struct seq_file *s, void *data)
2485{
2486 static const char * const status_lookup[] = {
2487 [GPD_STATE_ACTIVE] = "on",
2488 [GPD_STATE_POWER_OFF] = "off"
2489 };
2490
2491 struct generic_pm_domain *genpd = s->private;
2492 int ret = 0;
2493
2494 ret = genpd_lock_interruptible(genpd);
2495 if (ret)
2496 return -ERESTARTSYS;
2497
2498 if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup)))
2499 goto exit;
2500
2501 if (genpd->status == GPD_STATE_POWER_OFF)
2502 seq_printf(s, "%s-%u\n", status_lookup[genpd->status],
2503 genpd->state_idx);
2504 else
2505 seq_printf(s, "%s\n", status_lookup[genpd->status]);
2506exit:
2507 genpd_unlock(genpd);
2508 return ret;
2509}
2510
2511static int genpd_sub_domains_show(struct seq_file *s, void *data)
2512{
2513 struct generic_pm_domain *genpd = s->private;
2514 struct gpd_link *link;
2515 int ret = 0;
2516
2517 ret = genpd_lock_interruptible(genpd);
2518 if (ret)
2519 return -ERESTARTSYS;
2520
2521 list_for_each_entry(link, &genpd->master_links, master_node)
2522 seq_printf(s, "%s\n", link->slave->name);
2523
2524 genpd_unlock(genpd);
2525 return ret;
2526}
2527
2528static int genpd_idle_states_show(struct seq_file *s, void *data)
2529{
2530 struct generic_pm_domain *genpd = s->private;
2531 unsigned int i;
2532 int ret = 0;
2533
2534 ret = genpd_lock_interruptible(genpd);
2535 if (ret)
2536 return -ERESTARTSYS;
2537
2538 seq_puts(s, "State Time Spent(ms)\n");
2539
2540 for (i = 0; i < genpd->state_count; i++) {
2541 ktime_t delta = 0;
2542 s64 msecs;
2543
2544 if ((genpd->status == GPD_STATE_POWER_OFF) &&
2545 (genpd->state_idx == i))
2546 delta = ktime_sub(ktime_get(), genpd->accounting_time);
2547
2548 msecs = ktime_to_ms(
2549 ktime_add(genpd->states[i].idle_time, delta));
2550 seq_printf(s, "S%-13i %lld\n", i, msecs);
2551 }
2552
2553 genpd_unlock(genpd);
2554 return ret;
2555}
2556
2557static int genpd_active_time_show(struct seq_file *s, void *data)
2558{
2559 struct generic_pm_domain *genpd = s->private;
2560 ktime_t delta = 0;
2561 int ret = 0;
2562
2563 ret = genpd_lock_interruptible(genpd);
2564 if (ret)
2565 return -ERESTARTSYS;
2566
2567 if (genpd->status == GPD_STATE_ACTIVE)
2568 delta = ktime_sub(ktime_get(), genpd->accounting_time);
2569
2570 seq_printf(s, "%lld ms\n", ktime_to_ms(
2571 ktime_add(genpd->on_time, delta)));
2572
2573 genpd_unlock(genpd);
2574 return ret;
2575}
2576
2577static int genpd_total_idle_time_show(struct seq_file *s, void *data)
2578{
2579 struct generic_pm_domain *genpd = s->private;
2580 ktime_t delta = 0, total = 0;
2581 unsigned int i;
2582 int ret = 0;
2583
2584 ret = genpd_lock_interruptible(genpd);
2585 if (ret)
2586 return -ERESTARTSYS;
2587
2588 for (i = 0; i < genpd->state_count; i++) {
2589
2590 if ((genpd->status == GPD_STATE_POWER_OFF) &&
2591 (genpd->state_idx == i))
2592 delta = ktime_sub(ktime_get(), genpd->accounting_time);
2593
2594 total = ktime_add(total, genpd->states[i].idle_time);
2595 }
2596 total = ktime_add(total, delta);
2597
2598 seq_printf(s, "%lld ms\n", ktime_to_ms(total));
2599
2600 genpd_unlock(genpd);
2601 return ret;
2602}
2603
2604
2605static int genpd_devices_show(struct seq_file *s, void *data)
2606{
2607 struct generic_pm_domain *genpd = s->private;
2608 struct pm_domain_data *pm_data;
2609 const char *kobj_path;
2610 int ret = 0;
2611
2612 ret = genpd_lock_interruptible(genpd);
2613 if (ret)
2614 return -ERESTARTSYS;
2615
2616 list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
2617 kobj_path = kobject_get_path(&pm_data->dev->kobj,
2618 genpd_is_irq_safe(genpd) ?
2619 GFP_ATOMIC : GFP_KERNEL);
2620 if (kobj_path == NULL)
2621 continue;
2622
2623 seq_printf(s, "%s\n", kobj_path);
2624 kfree(kobj_path);
2625 }
2626
2627 genpd_unlock(genpd);
2628 return ret;
2629}
2630
2631#define define_genpd_open_function(name) \
2632static int genpd_##name##_open(struct inode *inode, struct file *file) \
2633{ \
2634 return single_open(file, genpd_##name##_show, inode->i_private); \
2635}
2636
2637define_genpd_open_function(summary);
2638define_genpd_open_function(status);
2639define_genpd_open_function(sub_domains);
2640define_genpd_open_function(idle_states);
2641define_genpd_open_function(active_time);
2642define_genpd_open_function(total_idle_time);
2643define_genpd_open_function(devices);
2644
2645#define define_genpd_debugfs_fops(name) \
2646static const struct file_operations genpd_##name##_fops = { \
2647 .open = genpd_##name##_open, \
2648 .read = seq_read, \
2649 .llseek = seq_lseek, \
2650 .release = single_release, \
2651}
2652
2653define_genpd_debugfs_fops(summary);
2654define_genpd_debugfs_fops(status);
2655define_genpd_debugfs_fops(sub_domains);
2656define_genpd_debugfs_fops(idle_states);
2657define_genpd_debugfs_fops(active_time);
2658define_genpd_debugfs_fops(total_idle_time);
2659define_genpd_debugfs_fops(devices);
2660
2661static int __init genpd_debug_init(void)
2662{
2663 struct dentry *d;
2664 struct generic_pm_domain *genpd;
2665
2666 genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
2667
2668 if (!genpd_debugfs_dir)
2669 return -ENOMEM;
2670
2671 d = debugfs_create_file("pm_genpd_summary", S_IRUGO,
2672 genpd_debugfs_dir, NULL, &genpd_summary_fops);
2673 if (!d)
2674 return -ENOMEM;
2675
2676 list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
2677 d = debugfs_create_dir(genpd->name, genpd_debugfs_dir);
2678 if (!d)
2679 return -ENOMEM;
2680
2681 debugfs_create_file("current_state", 0444,
2682 d, genpd, &genpd_status_fops);
2683 debugfs_create_file("sub_domains", 0444,
2684 d, genpd, &genpd_sub_domains_fops);
2685 debugfs_create_file("idle_states", 0444,
2686 d, genpd, &genpd_idle_states_fops);
2687 debugfs_create_file("active_time", 0444,
2688 d, genpd, &genpd_active_time_fops);
2689 debugfs_create_file("total_idle_time", 0444,
2690 d, genpd, &genpd_total_idle_time_fops);
2691 debugfs_create_file("devices", 0444,
2692 d, genpd, &genpd_devices_fops);
2693 }
2694
2695 return 0;
2696}
2697late_initcall(genpd_debug_init);
2698
2699static void __exit genpd_debug_exit(void)
2700{
2701 debugfs_remove_recursive(genpd_debugfs_dir);
2702}
2703__exitcall(genpd_debug_exit);
2704#endif /* CONFIG_DEBUG_FS */