Loading...
1/*
2 * drivers/base/power/generic_ops.c - Generic PM callbacks for subsystems
3 *
4 * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 *
6 * This file is released under the GPLv2.
7 */
8
9#include <linux/pm.h>
10#include <linux/pm_runtime.h>
11
12#ifdef CONFIG_PM_RUNTIME
13/**
14 * pm_generic_runtime_idle - Generic runtime idle callback for subsystems.
15 * @dev: Device to handle.
16 *
17 * If PM operations are defined for the @dev's driver and they include
18 * ->runtime_idle(), execute it and return its error code, if nonzero.
19 * Otherwise, execute pm_runtime_suspend() for the device and return 0.
20 */
21int pm_generic_runtime_idle(struct device *dev)
22{
23 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
24
25 if (pm && pm->runtime_idle) {
26 int ret = pm->runtime_idle(dev);
27 if (ret)
28 return ret;
29 }
30
31 pm_runtime_suspend(dev);
32 return 0;
33}
34EXPORT_SYMBOL_GPL(pm_generic_runtime_idle);
35
36/**
37 * pm_generic_runtime_suspend - Generic runtime suspend callback for subsystems.
38 * @dev: Device to suspend.
39 *
40 * If PM operations are defined for the @dev's driver and they include
41 * ->runtime_suspend(), execute it and return its error code. Otherwise,
42 * return 0.
43 */
44int pm_generic_runtime_suspend(struct device *dev)
45{
46 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
47 int ret;
48
49 ret = pm && pm->runtime_suspend ? pm->runtime_suspend(dev) : 0;
50
51 return ret;
52}
53EXPORT_SYMBOL_GPL(pm_generic_runtime_suspend);
54
55/**
56 * pm_generic_runtime_resume - Generic runtime resume callback for subsystems.
57 * @dev: Device to resume.
58 *
59 * If PM operations are defined for the @dev's driver and they include
60 * ->runtime_resume(), execute it and return its error code. Otherwise,
61 * return 0.
62 */
63int pm_generic_runtime_resume(struct device *dev)
64{
65 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
66 int ret;
67
68 ret = pm && pm->runtime_resume ? pm->runtime_resume(dev) : 0;
69
70 return ret;
71}
72EXPORT_SYMBOL_GPL(pm_generic_runtime_resume);
73#endif /* CONFIG_PM_RUNTIME */
74
75#ifdef CONFIG_PM_SLEEP
76/**
77 * pm_generic_prepare - Generic routine preparing a device for power transition.
78 * @dev: Device to prepare.
79 *
80 * Prepare a device for a system-wide power transition.
81 */
82int pm_generic_prepare(struct device *dev)
83{
84 struct device_driver *drv = dev->driver;
85 int ret = 0;
86
87 if (drv && drv->pm && drv->pm->prepare)
88 ret = drv->pm->prepare(dev);
89
90 return ret;
91}
92
93/**
94 * __pm_generic_call - Generic suspend/freeze/poweroff/thaw subsystem callback.
95 * @dev: Device to handle.
96 * @event: PM transition of the system under way.
97 * @bool: Whether or not this is the "noirq" stage.
98 *
99 * If the device has not been suspended at run time, execute the
100 * suspend/freeze/poweroff/thaw callback provided by its driver, if defined, and
101 * return its error code. Otherwise, return zero.
102 */
103static int __pm_generic_call(struct device *dev, int event, bool noirq)
104{
105 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
106 int (*callback)(struct device *);
107
108 if (!pm || pm_runtime_suspended(dev))
109 return 0;
110
111 switch (event) {
112 case PM_EVENT_SUSPEND:
113 callback = noirq ? pm->suspend_noirq : pm->suspend;
114 break;
115 case PM_EVENT_FREEZE:
116 callback = noirq ? pm->freeze_noirq : pm->freeze;
117 break;
118 case PM_EVENT_HIBERNATE:
119 callback = noirq ? pm->poweroff_noirq : pm->poweroff;
120 break;
121 case PM_EVENT_THAW:
122 callback = noirq ? pm->thaw_noirq : pm->thaw;
123 break;
124 default:
125 callback = NULL;
126 break;
127 }
128
129 return callback ? callback(dev) : 0;
130}
131
132/**
133 * pm_generic_suspend_noirq - Generic suspend_noirq callback for subsystems.
134 * @dev: Device to suspend.
135 */
136int pm_generic_suspend_noirq(struct device *dev)
137{
138 return __pm_generic_call(dev, PM_EVENT_SUSPEND, true);
139}
140EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq);
141
142/**
143 * pm_generic_suspend - Generic suspend callback for subsystems.
144 * @dev: Device to suspend.
145 */
146int pm_generic_suspend(struct device *dev)
147{
148 return __pm_generic_call(dev, PM_EVENT_SUSPEND, false);
149}
150EXPORT_SYMBOL_GPL(pm_generic_suspend);
151
152/**
153 * pm_generic_freeze_noirq - Generic freeze_noirq callback for subsystems.
154 * @dev: Device to freeze.
155 */
156int pm_generic_freeze_noirq(struct device *dev)
157{
158 return __pm_generic_call(dev, PM_EVENT_FREEZE, true);
159}
160EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq);
161
162/**
163 * pm_generic_freeze - Generic freeze callback for subsystems.
164 * @dev: Device to freeze.
165 */
166int pm_generic_freeze(struct device *dev)
167{
168 return __pm_generic_call(dev, PM_EVENT_FREEZE, false);
169}
170EXPORT_SYMBOL_GPL(pm_generic_freeze);
171
172/**
173 * pm_generic_poweroff_noirq - Generic poweroff_noirq callback for subsystems.
174 * @dev: Device to handle.
175 */
176int pm_generic_poweroff_noirq(struct device *dev)
177{
178 return __pm_generic_call(dev, PM_EVENT_HIBERNATE, true);
179}
180EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq);
181
182/**
183 * pm_generic_poweroff - Generic poweroff callback for subsystems.
184 * @dev: Device to handle.
185 */
186int pm_generic_poweroff(struct device *dev)
187{
188 return __pm_generic_call(dev, PM_EVENT_HIBERNATE, false);
189}
190EXPORT_SYMBOL_GPL(pm_generic_poweroff);
191
192/**
193 * pm_generic_thaw_noirq - Generic thaw_noirq callback for subsystems.
194 * @dev: Device to thaw.
195 */
196int pm_generic_thaw_noirq(struct device *dev)
197{
198 return __pm_generic_call(dev, PM_EVENT_THAW, true);
199}
200EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq);
201
202/**
203 * pm_generic_thaw - Generic thaw callback for subsystems.
204 * @dev: Device to thaw.
205 */
206int pm_generic_thaw(struct device *dev)
207{
208 return __pm_generic_call(dev, PM_EVENT_THAW, false);
209}
210EXPORT_SYMBOL_GPL(pm_generic_thaw);
211
212/**
213 * __pm_generic_resume - Generic resume/restore callback for subsystems.
214 * @dev: Device to handle.
215 * @event: PM transition of the system under way.
216 * @bool: Whether or not this is the "noirq" stage.
217 *
218 * Execute the resume/resotre callback provided by the @dev's driver, if
219 * defined. If it returns 0, change the device's runtime PM status to 'active'.
220 * Return the callback's error code.
221 */
222static int __pm_generic_resume(struct device *dev, int event, bool noirq)
223{
224 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
225 int (*callback)(struct device *);
226 int ret;
227
228 if (!pm)
229 return 0;
230
231 switch (event) {
232 case PM_EVENT_RESUME:
233 callback = noirq ? pm->resume_noirq : pm->resume;
234 break;
235 case PM_EVENT_RESTORE:
236 callback = noirq ? pm->restore_noirq : pm->restore;
237 break;
238 default:
239 callback = NULL;
240 break;
241 }
242
243 if (!callback)
244 return 0;
245
246 ret = callback(dev);
247 if (!ret && !noirq && pm_runtime_enabled(dev)) {
248 pm_runtime_disable(dev);
249 pm_runtime_set_active(dev);
250 pm_runtime_enable(dev);
251 }
252
253 return ret;
254}
255
256/**
257 * pm_generic_resume_noirq - Generic resume_noirq callback for subsystems.
258 * @dev: Device to resume.
259 */
260int pm_generic_resume_noirq(struct device *dev)
261{
262 return __pm_generic_resume(dev, PM_EVENT_RESUME, true);
263}
264EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
265
266/**
267 * pm_generic_resume - Generic resume callback for subsystems.
268 * @dev: Device to resume.
269 */
270int pm_generic_resume(struct device *dev)
271{
272 return __pm_generic_resume(dev, PM_EVENT_RESUME, false);
273}
274EXPORT_SYMBOL_GPL(pm_generic_resume);
275
276/**
277 * pm_generic_restore_noirq - Generic restore_noirq callback for subsystems.
278 * @dev: Device to restore.
279 */
280int pm_generic_restore_noirq(struct device *dev)
281{
282 return __pm_generic_resume(dev, PM_EVENT_RESTORE, true);
283}
284EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
285
286/**
287 * pm_generic_restore - Generic restore callback for subsystems.
288 * @dev: Device to restore.
289 */
290int pm_generic_restore(struct device *dev)
291{
292 return __pm_generic_resume(dev, PM_EVENT_RESTORE, false);
293}
294EXPORT_SYMBOL_GPL(pm_generic_restore);
295
296/**
297 * pm_generic_complete - Generic routine competing a device power transition.
298 * @dev: Device to handle.
299 *
300 * Complete a device power transition during a system-wide power transition.
301 */
302void pm_generic_complete(struct device *dev)
303{
304 struct device_driver *drv = dev->driver;
305
306 if (drv && drv->pm && drv->pm->complete)
307 drv->pm->complete(dev);
308
309 /*
310 * Let runtime PM try to suspend devices that haven't been in use before
311 * going into the system-wide sleep state we're resuming from.
312 */
313 pm_runtime_idle(dev);
314}
315#endif /* CONFIG_PM_SLEEP */
316
317struct dev_pm_ops generic_subsys_pm_ops = {
318#ifdef CONFIG_PM_SLEEP
319 .prepare = pm_generic_prepare,
320 .suspend = pm_generic_suspend,
321 .suspend_noirq = pm_generic_suspend_noirq,
322 .resume = pm_generic_resume,
323 .resume_noirq = pm_generic_resume_noirq,
324 .freeze = pm_generic_freeze,
325 .freeze_noirq = pm_generic_freeze_noirq,
326 .thaw = pm_generic_thaw,
327 .thaw_noirq = pm_generic_thaw_noirq,
328 .poweroff = pm_generic_poweroff,
329 .poweroff_noirq = pm_generic_poweroff_noirq,
330 .restore = pm_generic_restore,
331 .restore_noirq = pm_generic_restore_noirq,
332 .complete = pm_generic_complete,
333#endif
334#ifdef CONFIG_PM_RUNTIME
335 .runtime_suspend = pm_generic_runtime_suspend,
336 .runtime_resume = pm_generic_runtime_resume,
337 .runtime_idle = pm_generic_runtime_idle,
338#endif
339};
340EXPORT_SYMBOL_GPL(generic_subsys_pm_ops);
1/*
2 * drivers/base/power/generic_ops.c - Generic PM callbacks for subsystems
3 *
4 * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 *
6 * This file is released under the GPLv2.
7 */
8
9#include <linux/pm.h>
10#include <linux/pm_runtime.h>
11#include <linux/export.h>
12
13#ifdef CONFIG_PM_RUNTIME
14/**
15 * pm_generic_runtime_idle - Generic runtime idle callback for subsystems.
16 * @dev: Device to handle.
17 *
18 * If PM operations are defined for the @dev's driver and they include
19 * ->runtime_idle(), execute it and return its error code, if nonzero.
20 * Otherwise, execute pm_runtime_suspend() for the device and return 0.
21 */
22int pm_generic_runtime_idle(struct device *dev)
23{
24 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
25
26 if (pm && pm->runtime_idle) {
27 int ret = pm->runtime_idle(dev);
28 if (ret)
29 return ret;
30 }
31
32 pm_runtime_suspend(dev);
33 return 0;
34}
35EXPORT_SYMBOL_GPL(pm_generic_runtime_idle);
36
37/**
38 * pm_generic_runtime_suspend - Generic runtime suspend callback for subsystems.
39 * @dev: Device to suspend.
40 *
41 * If PM operations are defined for the @dev's driver and they include
42 * ->runtime_suspend(), execute it and return its error code. Otherwise,
43 * return 0.
44 */
45int pm_generic_runtime_suspend(struct device *dev)
46{
47 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
48 int ret;
49
50 ret = pm && pm->runtime_suspend ? pm->runtime_suspend(dev) : 0;
51
52 return ret;
53}
54EXPORT_SYMBOL_GPL(pm_generic_runtime_suspend);
55
56/**
57 * pm_generic_runtime_resume - Generic runtime resume callback for subsystems.
58 * @dev: Device to resume.
59 *
60 * If PM operations are defined for the @dev's driver and they include
61 * ->runtime_resume(), execute it and return its error code. Otherwise,
62 * return 0.
63 */
64int pm_generic_runtime_resume(struct device *dev)
65{
66 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
67 int ret;
68
69 ret = pm && pm->runtime_resume ? pm->runtime_resume(dev) : 0;
70
71 return ret;
72}
73EXPORT_SYMBOL_GPL(pm_generic_runtime_resume);
74#endif /* CONFIG_PM_RUNTIME */
75
76#ifdef CONFIG_PM_SLEEP
77/**
78 * pm_generic_prepare - Generic routine preparing a device for power transition.
79 * @dev: Device to prepare.
80 *
81 * Prepare a device for a system-wide power transition.
82 */
83int pm_generic_prepare(struct device *dev)
84{
85 struct device_driver *drv = dev->driver;
86 int ret = 0;
87
88 if (drv && drv->pm && drv->pm->prepare)
89 ret = drv->pm->prepare(dev);
90
91 return ret;
92}
93
94/**
95 * pm_generic_suspend_noirq - Generic suspend_noirq callback for subsystems.
96 * @dev: Device to suspend.
97 */
98int pm_generic_suspend_noirq(struct device *dev)
99{
100 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
101
102 return pm && pm->suspend_noirq ? pm->suspend_noirq(dev) : 0;
103}
104EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq);
105
106/**
107 * pm_generic_suspend_late - Generic suspend_late callback for subsystems.
108 * @dev: Device to suspend.
109 */
110int pm_generic_suspend_late(struct device *dev)
111{
112 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
113
114 return pm && pm->suspend_late ? pm->suspend_late(dev) : 0;
115}
116EXPORT_SYMBOL_GPL(pm_generic_suspend_late);
117
118/**
119 * pm_generic_suspend - Generic suspend callback for subsystems.
120 * @dev: Device to suspend.
121 */
122int pm_generic_suspend(struct device *dev)
123{
124 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
125
126 return pm && pm->suspend ? pm->suspend(dev) : 0;
127}
128EXPORT_SYMBOL_GPL(pm_generic_suspend);
129
130/**
131 * pm_generic_freeze_noirq - Generic freeze_noirq callback for subsystems.
132 * @dev: Device to freeze.
133 */
134int pm_generic_freeze_noirq(struct device *dev)
135{
136 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
137
138 return pm && pm->freeze_noirq ? pm->freeze_noirq(dev) : 0;
139}
140EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq);
141
142/**
143 * pm_generic_freeze_late - Generic freeze_late callback for subsystems.
144 * @dev: Device to freeze.
145 */
146int pm_generic_freeze_late(struct device *dev)
147{
148 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
149
150 return pm && pm->freeze_late ? pm->freeze_late(dev) : 0;
151}
152EXPORT_SYMBOL_GPL(pm_generic_freeze_late);
153
154/**
155 * pm_generic_freeze - Generic freeze callback for subsystems.
156 * @dev: Device to freeze.
157 */
158int pm_generic_freeze(struct device *dev)
159{
160 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
161
162 return pm && pm->freeze ? pm->freeze(dev) : 0;
163}
164EXPORT_SYMBOL_GPL(pm_generic_freeze);
165
166/**
167 * pm_generic_poweroff_noirq - Generic poweroff_noirq callback for subsystems.
168 * @dev: Device to handle.
169 */
170int pm_generic_poweroff_noirq(struct device *dev)
171{
172 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
173
174 return pm && pm->poweroff_noirq ? pm->poweroff_noirq(dev) : 0;
175}
176EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq);
177
178/**
179 * pm_generic_poweroff_late - Generic poweroff_late callback for subsystems.
180 * @dev: Device to handle.
181 */
182int pm_generic_poweroff_late(struct device *dev)
183{
184 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
185
186 return pm && pm->poweroff_late ? pm->poweroff_late(dev) : 0;
187}
188EXPORT_SYMBOL_GPL(pm_generic_poweroff_late);
189
190/**
191 * pm_generic_poweroff - Generic poweroff callback for subsystems.
192 * @dev: Device to handle.
193 */
194int pm_generic_poweroff(struct device *dev)
195{
196 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
197
198 return pm && pm->poweroff ? pm->poweroff(dev) : 0;
199}
200EXPORT_SYMBOL_GPL(pm_generic_poweroff);
201
202/**
203 * pm_generic_thaw_noirq - Generic thaw_noirq callback for subsystems.
204 * @dev: Device to thaw.
205 */
206int pm_generic_thaw_noirq(struct device *dev)
207{
208 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
209
210 return pm && pm->thaw_noirq ? pm->thaw_noirq(dev) : 0;
211}
212EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq);
213
214/**
215 * pm_generic_thaw_early - Generic thaw_early callback for subsystems.
216 * @dev: Device to thaw.
217 */
218int pm_generic_thaw_early(struct device *dev)
219{
220 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
221
222 return pm && pm->thaw_early ? pm->thaw_early(dev) : 0;
223}
224EXPORT_SYMBOL_GPL(pm_generic_thaw_early);
225
226/**
227 * pm_generic_thaw - Generic thaw callback for subsystems.
228 * @dev: Device to thaw.
229 */
230int pm_generic_thaw(struct device *dev)
231{
232 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
233
234 return pm && pm->thaw ? pm->thaw(dev) : 0;
235}
236EXPORT_SYMBOL_GPL(pm_generic_thaw);
237
238/**
239 * pm_generic_resume_noirq - Generic resume_noirq callback for subsystems.
240 * @dev: Device to resume.
241 */
242int pm_generic_resume_noirq(struct device *dev)
243{
244 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
245
246 return pm && pm->resume_noirq ? pm->resume_noirq(dev) : 0;
247}
248EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
249
250/**
251 * pm_generic_resume_early - Generic resume_early callback for subsystems.
252 * @dev: Device to resume.
253 */
254int pm_generic_resume_early(struct device *dev)
255{
256 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
257
258 return pm && pm->resume_early ? pm->resume_early(dev) : 0;
259}
260EXPORT_SYMBOL_GPL(pm_generic_resume_early);
261
262/**
263 * pm_generic_resume - Generic resume callback for subsystems.
264 * @dev: Device to resume.
265 */
266int pm_generic_resume(struct device *dev)
267{
268 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
269
270 return pm && pm->resume ? pm->resume(dev) : 0;
271}
272EXPORT_SYMBOL_GPL(pm_generic_resume);
273
274/**
275 * pm_generic_restore_noirq - Generic restore_noirq callback for subsystems.
276 * @dev: Device to restore.
277 */
278int pm_generic_restore_noirq(struct device *dev)
279{
280 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
281
282 return pm && pm->restore_noirq ? pm->restore_noirq(dev) : 0;
283}
284EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
285
286/**
287 * pm_generic_restore_early - Generic restore_early callback for subsystems.
288 * @dev: Device to resume.
289 */
290int pm_generic_restore_early(struct device *dev)
291{
292 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
293
294 return pm && pm->restore_early ? pm->restore_early(dev) : 0;
295}
296EXPORT_SYMBOL_GPL(pm_generic_restore_early);
297
298/**
299 * pm_generic_restore - Generic restore callback for subsystems.
300 * @dev: Device to restore.
301 */
302int pm_generic_restore(struct device *dev)
303{
304 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
305
306 return pm && pm->restore ? pm->restore(dev) : 0;
307}
308EXPORT_SYMBOL_GPL(pm_generic_restore);
309
310/**
311 * pm_generic_complete - Generic routine competing a device power transition.
312 * @dev: Device to handle.
313 *
314 * Complete a device power transition during a system-wide power transition.
315 */
316void pm_generic_complete(struct device *dev)
317{
318 struct device_driver *drv = dev->driver;
319
320 if (drv && drv->pm && drv->pm->complete)
321 drv->pm->complete(dev);
322
323 /*
324 * Let runtime PM try to suspend devices that haven't been in use before
325 * going into the system-wide sleep state we're resuming from.
326 */
327 pm_runtime_idle(dev);
328}
329#endif /* CONFIG_PM_SLEEP */