Loading...
1/*
2 * drivers/base/power/generic_ops.c - Generic PM callbacks for subsystems
3 *
4 * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 *
6 * This file is released under the GPLv2.
7 */
8
9#include <linux/pm.h>
10#include <linux/pm_runtime.h>
11#include <linux/export.h>
12#include <linux/suspend.h>
13
14#ifdef CONFIG_PM
15/**
16 * pm_generic_runtime_suspend - Generic runtime suspend callback for subsystems.
17 * @dev: Device to suspend.
18 *
19 * If PM operations are defined for the @dev's driver and they include
20 * ->runtime_suspend(), execute it and return its error code. Otherwise,
21 * return 0.
22 */
23int pm_generic_runtime_suspend(struct device *dev)
24{
25 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
26 int ret;
27
28 ret = pm && pm->runtime_suspend ? pm->runtime_suspend(dev) : 0;
29
30 return ret;
31}
32EXPORT_SYMBOL_GPL(pm_generic_runtime_suspend);
33
34/**
35 * pm_generic_runtime_resume - Generic runtime resume callback for subsystems.
36 * @dev: Device to resume.
37 *
38 * If PM operations are defined for the @dev's driver and they include
39 * ->runtime_resume(), execute it and return its error code. Otherwise,
40 * return 0.
41 */
42int pm_generic_runtime_resume(struct device *dev)
43{
44 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
45 int ret;
46
47 ret = pm && pm->runtime_resume ? pm->runtime_resume(dev) : 0;
48
49 return ret;
50}
51EXPORT_SYMBOL_GPL(pm_generic_runtime_resume);
52#endif /* CONFIG_PM */
53
54#ifdef CONFIG_PM_SLEEP
55/**
56 * pm_generic_prepare - Generic routine preparing a device for power transition.
57 * @dev: Device to prepare.
58 *
59 * Prepare a device for a system-wide power transition.
60 */
61int pm_generic_prepare(struct device *dev)
62{
63 struct device_driver *drv = dev->driver;
64 int ret = 0;
65
66 if (drv && drv->pm && drv->pm->prepare)
67 ret = drv->pm->prepare(dev);
68
69 return ret;
70}
71
72/**
73 * pm_generic_suspend_noirq - Generic suspend_noirq callback for subsystems.
74 * @dev: Device to suspend.
75 */
76int pm_generic_suspend_noirq(struct device *dev)
77{
78 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
79
80 return pm && pm->suspend_noirq ? pm->suspend_noirq(dev) : 0;
81}
82EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq);
83
84/**
85 * pm_generic_suspend_late - Generic suspend_late callback for subsystems.
86 * @dev: Device to suspend.
87 */
88int pm_generic_suspend_late(struct device *dev)
89{
90 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
91
92 return pm && pm->suspend_late ? pm->suspend_late(dev) : 0;
93}
94EXPORT_SYMBOL_GPL(pm_generic_suspend_late);
95
96/**
97 * pm_generic_suspend - Generic suspend callback for subsystems.
98 * @dev: Device to suspend.
99 */
100int pm_generic_suspend(struct device *dev)
101{
102 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
103
104 return pm && pm->suspend ? pm->suspend(dev) : 0;
105}
106EXPORT_SYMBOL_GPL(pm_generic_suspend);
107
108/**
109 * pm_generic_freeze_noirq - Generic freeze_noirq callback for subsystems.
110 * @dev: Device to freeze.
111 */
112int pm_generic_freeze_noirq(struct device *dev)
113{
114 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
115
116 return pm && pm->freeze_noirq ? pm->freeze_noirq(dev) : 0;
117}
118EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq);
119
120/**
121 * pm_generic_freeze_late - Generic freeze_late callback for subsystems.
122 * @dev: Device to freeze.
123 */
124int pm_generic_freeze_late(struct device *dev)
125{
126 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
127
128 return pm && pm->freeze_late ? pm->freeze_late(dev) : 0;
129}
130EXPORT_SYMBOL_GPL(pm_generic_freeze_late);
131
132/**
133 * pm_generic_freeze - Generic freeze callback for subsystems.
134 * @dev: Device to freeze.
135 */
136int pm_generic_freeze(struct device *dev)
137{
138 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
139
140 return pm && pm->freeze ? pm->freeze(dev) : 0;
141}
142EXPORT_SYMBOL_GPL(pm_generic_freeze);
143
144/**
145 * pm_generic_poweroff_noirq - Generic poweroff_noirq callback for subsystems.
146 * @dev: Device to handle.
147 */
148int pm_generic_poweroff_noirq(struct device *dev)
149{
150 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
151
152 return pm && pm->poweroff_noirq ? pm->poweroff_noirq(dev) : 0;
153}
154EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq);
155
156/**
157 * pm_generic_poweroff_late - Generic poweroff_late callback for subsystems.
158 * @dev: Device to handle.
159 */
160int pm_generic_poweroff_late(struct device *dev)
161{
162 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
163
164 return pm && pm->poweroff_late ? pm->poweroff_late(dev) : 0;
165}
166EXPORT_SYMBOL_GPL(pm_generic_poweroff_late);
167
168/**
169 * pm_generic_poweroff - Generic poweroff callback for subsystems.
170 * @dev: Device to handle.
171 */
172int pm_generic_poweroff(struct device *dev)
173{
174 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
175
176 return pm && pm->poweroff ? pm->poweroff(dev) : 0;
177}
178EXPORT_SYMBOL_GPL(pm_generic_poweroff);
179
180/**
181 * pm_generic_thaw_noirq - Generic thaw_noirq callback for subsystems.
182 * @dev: Device to thaw.
183 */
184int pm_generic_thaw_noirq(struct device *dev)
185{
186 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
187
188 return pm && pm->thaw_noirq ? pm->thaw_noirq(dev) : 0;
189}
190EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq);
191
192/**
193 * pm_generic_thaw_early - Generic thaw_early callback for subsystems.
194 * @dev: Device to thaw.
195 */
196int pm_generic_thaw_early(struct device *dev)
197{
198 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
199
200 return pm && pm->thaw_early ? pm->thaw_early(dev) : 0;
201}
202EXPORT_SYMBOL_GPL(pm_generic_thaw_early);
203
204/**
205 * pm_generic_thaw - Generic thaw callback for subsystems.
206 * @dev: Device to thaw.
207 */
208int pm_generic_thaw(struct device *dev)
209{
210 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
211
212 return pm && pm->thaw ? pm->thaw(dev) : 0;
213}
214EXPORT_SYMBOL_GPL(pm_generic_thaw);
215
216/**
217 * pm_generic_resume_noirq - Generic resume_noirq callback for subsystems.
218 * @dev: Device to resume.
219 */
220int pm_generic_resume_noirq(struct device *dev)
221{
222 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
223
224 return pm && pm->resume_noirq ? pm->resume_noirq(dev) : 0;
225}
226EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
227
228/**
229 * pm_generic_resume_early - Generic resume_early callback for subsystems.
230 * @dev: Device to resume.
231 */
232int pm_generic_resume_early(struct device *dev)
233{
234 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
235
236 return pm && pm->resume_early ? pm->resume_early(dev) : 0;
237}
238EXPORT_SYMBOL_GPL(pm_generic_resume_early);
239
240/**
241 * pm_generic_resume - Generic resume callback for subsystems.
242 * @dev: Device to resume.
243 */
244int pm_generic_resume(struct device *dev)
245{
246 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
247
248 return pm && pm->resume ? pm->resume(dev) : 0;
249}
250EXPORT_SYMBOL_GPL(pm_generic_resume);
251
252/**
253 * pm_generic_restore_noirq - Generic restore_noirq callback for subsystems.
254 * @dev: Device to restore.
255 */
256int pm_generic_restore_noirq(struct device *dev)
257{
258 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
259
260 return pm && pm->restore_noirq ? pm->restore_noirq(dev) : 0;
261}
262EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
263
264/**
265 * pm_generic_restore_early - Generic restore_early callback for subsystems.
266 * @dev: Device to resume.
267 */
268int pm_generic_restore_early(struct device *dev)
269{
270 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
271
272 return pm && pm->restore_early ? pm->restore_early(dev) : 0;
273}
274EXPORT_SYMBOL_GPL(pm_generic_restore_early);
275
276/**
277 * pm_generic_restore - Generic restore callback for subsystems.
278 * @dev: Device to restore.
279 */
280int pm_generic_restore(struct device *dev)
281{
282 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
283
284 return pm && pm->restore ? pm->restore(dev) : 0;
285}
286EXPORT_SYMBOL_GPL(pm_generic_restore);
287
288/**
289 * pm_generic_complete - Generic routine completing a device power transition.
290 * @dev: Device to handle.
291 *
292 * Complete a device power transition during a system-wide power transition.
293 */
294void pm_generic_complete(struct device *dev)
295{
296 struct device_driver *drv = dev->driver;
297
298 if (drv && drv->pm && drv->pm->complete)
299 drv->pm->complete(dev);
300}
301
302/**
303 * pm_complete_with_resume_check - Complete a device power transition.
304 * @dev: Device to handle.
305 *
306 * Complete a device power transition during a system-wide power transition and
307 * optionally schedule a runtime resume of the device if the system resume in
308 * progress has been initated by the platform firmware and the device had its
309 * power.direct_complete flag set.
310 */
311void pm_complete_with_resume_check(struct device *dev)
312{
313 pm_generic_complete(dev);
314 /*
315 * If the device had been runtime-suspended before the system went into
316 * the sleep state it is going out of and it has never been resumed till
317 * now, resume it in case the firmware powered it up.
318 */
319 if (dev->power.direct_complete && pm_resume_via_firmware())
320 pm_request_resume(dev);
321}
322EXPORT_SYMBOL_GPL(pm_complete_with_resume_check);
323#endif /* CONFIG_PM_SLEEP */
1/*
2 * drivers/base/power/generic_ops.c - Generic PM callbacks for subsystems
3 *
4 * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 *
6 * This file is released under the GPLv2.
7 */
8
9#include <linux/pm.h>
10#include <linux/pm_runtime.h>
11
12#ifdef CONFIG_PM_RUNTIME
13/**
14 * pm_generic_runtime_idle - Generic runtime idle callback for subsystems.
15 * @dev: Device to handle.
16 *
17 * If PM operations are defined for the @dev's driver and they include
18 * ->runtime_idle(), execute it and return its error code, if nonzero.
19 * Otherwise, execute pm_runtime_suspend() for the device and return 0.
20 */
21int pm_generic_runtime_idle(struct device *dev)
22{
23 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
24
25 if (pm && pm->runtime_idle) {
26 int ret = pm->runtime_idle(dev);
27 if (ret)
28 return ret;
29 }
30
31 pm_runtime_suspend(dev);
32 return 0;
33}
34EXPORT_SYMBOL_GPL(pm_generic_runtime_idle);
35
36/**
37 * pm_generic_runtime_suspend - Generic runtime suspend callback for subsystems.
38 * @dev: Device to suspend.
39 *
40 * If PM operations are defined for the @dev's driver and they include
41 * ->runtime_suspend(), execute it and return its error code. Otherwise,
42 * return 0.
43 */
44int pm_generic_runtime_suspend(struct device *dev)
45{
46 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
47 int ret;
48
49 ret = pm && pm->runtime_suspend ? pm->runtime_suspend(dev) : 0;
50
51 return ret;
52}
53EXPORT_SYMBOL_GPL(pm_generic_runtime_suspend);
54
55/**
56 * pm_generic_runtime_resume - Generic runtime resume callback for subsystems.
57 * @dev: Device to resume.
58 *
59 * If PM operations are defined for the @dev's driver and they include
60 * ->runtime_resume(), execute it and return its error code. Otherwise,
61 * return 0.
62 */
63int pm_generic_runtime_resume(struct device *dev)
64{
65 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
66 int ret;
67
68 ret = pm && pm->runtime_resume ? pm->runtime_resume(dev) : 0;
69
70 return ret;
71}
72EXPORT_SYMBOL_GPL(pm_generic_runtime_resume);
73#endif /* CONFIG_PM_RUNTIME */
74
75#ifdef CONFIG_PM_SLEEP
76/**
77 * pm_generic_prepare - Generic routine preparing a device for power transition.
78 * @dev: Device to prepare.
79 *
80 * Prepare a device for a system-wide power transition.
81 */
82int pm_generic_prepare(struct device *dev)
83{
84 struct device_driver *drv = dev->driver;
85 int ret = 0;
86
87 if (drv && drv->pm && drv->pm->prepare)
88 ret = drv->pm->prepare(dev);
89
90 return ret;
91}
92
93/**
94 * __pm_generic_call - Generic suspend/freeze/poweroff/thaw subsystem callback.
95 * @dev: Device to handle.
96 * @event: PM transition of the system under way.
97 * @bool: Whether or not this is the "noirq" stage.
98 *
99 * If the device has not been suspended at run time, execute the
100 * suspend/freeze/poweroff/thaw callback provided by its driver, if defined, and
101 * return its error code. Otherwise, return zero.
102 */
103static int __pm_generic_call(struct device *dev, int event, bool noirq)
104{
105 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
106 int (*callback)(struct device *);
107
108 if (!pm || pm_runtime_suspended(dev))
109 return 0;
110
111 switch (event) {
112 case PM_EVENT_SUSPEND:
113 callback = noirq ? pm->suspend_noirq : pm->suspend;
114 break;
115 case PM_EVENT_FREEZE:
116 callback = noirq ? pm->freeze_noirq : pm->freeze;
117 break;
118 case PM_EVENT_HIBERNATE:
119 callback = noirq ? pm->poweroff_noirq : pm->poweroff;
120 break;
121 case PM_EVENT_THAW:
122 callback = noirq ? pm->thaw_noirq : pm->thaw;
123 break;
124 default:
125 callback = NULL;
126 break;
127 }
128
129 return callback ? callback(dev) : 0;
130}
131
132/**
133 * pm_generic_suspend_noirq - Generic suspend_noirq callback for subsystems.
134 * @dev: Device to suspend.
135 */
136int pm_generic_suspend_noirq(struct device *dev)
137{
138 return __pm_generic_call(dev, PM_EVENT_SUSPEND, true);
139}
140EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq);
141
142/**
143 * pm_generic_suspend - Generic suspend callback for subsystems.
144 * @dev: Device to suspend.
145 */
146int pm_generic_suspend(struct device *dev)
147{
148 return __pm_generic_call(dev, PM_EVENT_SUSPEND, false);
149}
150EXPORT_SYMBOL_GPL(pm_generic_suspend);
151
152/**
153 * pm_generic_freeze_noirq - Generic freeze_noirq callback for subsystems.
154 * @dev: Device to freeze.
155 */
156int pm_generic_freeze_noirq(struct device *dev)
157{
158 return __pm_generic_call(dev, PM_EVENT_FREEZE, true);
159}
160EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq);
161
162/**
163 * pm_generic_freeze - Generic freeze callback for subsystems.
164 * @dev: Device to freeze.
165 */
166int pm_generic_freeze(struct device *dev)
167{
168 return __pm_generic_call(dev, PM_EVENT_FREEZE, false);
169}
170EXPORT_SYMBOL_GPL(pm_generic_freeze);
171
172/**
173 * pm_generic_poweroff_noirq - Generic poweroff_noirq callback for subsystems.
174 * @dev: Device to handle.
175 */
176int pm_generic_poweroff_noirq(struct device *dev)
177{
178 return __pm_generic_call(dev, PM_EVENT_HIBERNATE, true);
179}
180EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq);
181
182/**
183 * pm_generic_poweroff - Generic poweroff callback for subsystems.
184 * @dev: Device to handle.
185 */
186int pm_generic_poweroff(struct device *dev)
187{
188 return __pm_generic_call(dev, PM_EVENT_HIBERNATE, false);
189}
190EXPORT_SYMBOL_GPL(pm_generic_poweroff);
191
192/**
193 * pm_generic_thaw_noirq - Generic thaw_noirq callback for subsystems.
194 * @dev: Device to thaw.
195 */
196int pm_generic_thaw_noirq(struct device *dev)
197{
198 return __pm_generic_call(dev, PM_EVENT_THAW, true);
199}
200EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq);
201
202/**
203 * pm_generic_thaw - Generic thaw callback for subsystems.
204 * @dev: Device to thaw.
205 */
206int pm_generic_thaw(struct device *dev)
207{
208 return __pm_generic_call(dev, PM_EVENT_THAW, false);
209}
210EXPORT_SYMBOL_GPL(pm_generic_thaw);
211
212/**
213 * __pm_generic_resume - Generic resume/restore callback for subsystems.
214 * @dev: Device to handle.
215 * @event: PM transition of the system under way.
216 * @bool: Whether or not this is the "noirq" stage.
217 *
218 * Execute the resume/resotre callback provided by the @dev's driver, if
219 * defined. If it returns 0, change the device's runtime PM status to 'active'.
220 * Return the callback's error code.
221 */
222static int __pm_generic_resume(struct device *dev, int event, bool noirq)
223{
224 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
225 int (*callback)(struct device *);
226 int ret;
227
228 if (!pm)
229 return 0;
230
231 switch (event) {
232 case PM_EVENT_RESUME:
233 callback = noirq ? pm->resume_noirq : pm->resume;
234 break;
235 case PM_EVENT_RESTORE:
236 callback = noirq ? pm->restore_noirq : pm->restore;
237 break;
238 default:
239 callback = NULL;
240 break;
241 }
242
243 if (!callback)
244 return 0;
245
246 ret = callback(dev);
247 if (!ret && !noirq && pm_runtime_enabled(dev)) {
248 pm_runtime_disable(dev);
249 pm_runtime_set_active(dev);
250 pm_runtime_enable(dev);
251 }
252
253 return ret;
254}
255
256/**
257 * pm_generic_resume_noirq - Generic resume_noirq callback for subsystems.
258 * @dev: Device to resume.
259 */
260int pm_generic_resume_noirq(struct device *dev)
261{
262 return __pm_generic_resume(dev, PM_EVENT_RESUME, true);
263}
264EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
265
266/**
267 * pm_generic_resume - Generic resume callback for subsystems.
268 * @dev: Device to resume.
269 */
270int pm_generic_resume(struct device *dev)
271{
272 return __pm_generic_resume(dev, PM_EVENT_RESUME, false);
273}
274EXPORT_SYMBOL_GPL(pm_generic_resume);
275
276/**
277 * pm_generic_restore_noirq - Generic restore_noirq callback for subsystems.
278 * @dev: Device to restore.
279 */
280int pm_generic_restore_noirq(struct device *dev)
281{
282 return __pm_generic_resume(dev, PM_EVENT_RESTORE, true);
283}
284EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
285
286/**
287 * pm_generic_restore - Generic restore callback for subsystems.
288 * @dev: Device to restore.
289 */
290int pm_generic_restore(struct device *dev)
291{
292 return __pm_generic_resume(dev, PM_EVENT_RESTORE, false);
293}
294EXPORT_SYMBOL_GPL(pm_generic_restore);
295
296/**
297 * pm_generic_complete - Generic routine competing a device power transition.
298 * @dev: Device to handle.
299 *
300 * Complete a device power transition during a system-wide power transition.
301 */
302void pm_generic_complete(struct device *dev)
303{
304 struct device_driver *drv = dev->driver;
305
306 if (drv && drv->pm && drv->pm->complete)
307 drv->pm->complete(dev);
308
309 /*
310 * Let runtime PM try to suspend devices that haven't been in use before
311 * going into the system-wide sleep state we're resuming from.
312 */
313 pm_runtime_idle(dev);
314}
315#endif /* CONFIG_PM_SLEEP */
316
317struct dev_pm_ops generic_subsys_pm_ops = {
318#ifdef CONFIG_PM_SLEEP
319 .prepare = pm_generic_prepare,
320 .suspend = pm_generic_suspend,
321 .suspend_noirq = pm_generic_suspend_noirq,
322 .resume = pm_generic_resume,
323 .resume_noirq = pm_generic_resume_noirq,
324 .freeze = pm_generic_freeze,
325 .freeze_noirq = pm_generic_freeze_noirq,
326 .thaw = pm_generic_thaw,
327 .thaw_noirq = pm_generic_thaw_noirq,
328 .poweroff = pm_generic_poweroff,
329 .poweroff_noirq = pm_generic_poweroff_noirq,
330 .restore = pm_generic_restore,
331 .restore_noirq = pm_generic_restore_noirq,
332 .complete = pm_generic_complete,
333#endif
334#ifdef CONFIG_PM_RUNTIME
335 .runtime_suspend = pm_generic_runtime_suspend,
336 .runtime_resume = pm_generic_runtime_resume,
337 .runtime_idle = pm_generic_runtime_idle,
338#endif
339};
340EXPORT_SYMBOL_GPL(generic_subsys_pm_ops);