Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Device wakeirq helper functions */
3#include <linux/device.h>
4#include <linux/interrupt.h>
5#include <linux/irq.h>
6#include <linux/slab.h>
7#include <linux/pm_runtime.h>
8#include <linux/pm_wakeirq.h>
9
10#include "power.h"
11
12/**
13 * dev_pm_attach_wake_irq - Attach device interrupt as a wake IRQ
14 * @dev: Device entry
15 * @wirq: Wake irq specific data
16 *
17 * Internal function to attach a dedicated wake-up interrupt as a wake IRQ.
18 */
19static int dev_pm_attach_wake_irq(struct device *dev, struct wake_irq *wirq)
20{
21 unsigned long flags;
22
23 if (!dev || !wirq)
24 return -EINVAL;
25
26 spin_lock_irqsave(&dev->power.lock, flags);
27 if (dev_WARN_ONCE(dev, dev->power.wakeirq,
28 "wake irq already initialized\n")) {
29 spin_unlock_irqrestore(&dev->power.lock, flags);
30 return -EEXIST;
31 }
32
33 dev->power.wakeirq = wirq;
34 device_wakeup_attach_irq(dev, wirq);
35
36 spin_unlock_irqrestore(&dev->power.lock, flags);
37 return 0;
38}
39
40/**
41 * dev_pm_set_wake_irq - Attach device IO interrupt as wake IRQ
42 * @dev: Device entry
43 * @irq: Device IO interrupt
44 *
45 * Attach a device IO interrupt as a wake IRQ. The wake IRQ gets
46 * automatically configured for wake-up from suspend based
47 * on the device specific sysfs wakeup entry. Typically called
48 * during driver probe after calling device_init_wakeup().
49 */
50int dev_pm_set_wake_irq(struct device *dev, int irq)
51{
52 struct wake_irq *wirq;
53 int err;
54
55 if (irq < 0)
56 return -EINVAL;
57
58 wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
59 if (!wirq)
60 return -ENOMEM;
61
62 wirq->dev = dev;
63 wirq->irq = irq;
64
65 err = dev_pm_attach_wake_irq(dev, wirq);
66 if (err)
67 kfree(wirq);
68
69 return err;
70}
71EXPORT_SYMBOL_GPL(dev_pm_set_wake_irq);
72
73/**
74 * dev_pm_clear_wake_irq - Detach a device IO interrupt wake IRQ
75 * @dev: Device entry
76 *
77 * Detach a device wake IRQ and free resources.
78 *
79 * Note that it's OK for drivers to call this without calling
80 * dev_pm_set_wake_irq() as all the driver instances may not have
81 * a wake IRQ configured. This avoid adding wake IRQ specific
82 * checks into the drivers.
83 */
84void dev_pm_clear_wake_irq(struct device *dev)
85{
86 struct wake_irq *wirq = dev->power.wakeirq;
87 unsigned long flags;
88
89 if (!wirq)
90 return;
91
92 spin_lock_irqsave(&dev->power.lock, flags);
93 device_wakeup_detach_irq(dev);
94 dev->power.wakeirq = NULL;
95 spin_unlock_irqrestore(&dev->power.lock, flags);
96
97 if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED) {
98 free_irq(wirq->irq, wirq);
99 wirq->status &= ~WAKE_IRQ_DEDICATED_MASK;
100 }
101 kfree(wirq->name);
102 kfree(wirq);
103}
104EXPORT_SYMBOL_GPL(dev_pm_clear_wake_irq);
105
106/**
107 * handle_threaded_wake_irq - Handler for dedicated wake-up interrupts
108 * @irq: Device specific dedicated wake-up interrupt
109 * @_wirq: Wake IRQ data
110 *
111 * Some devices have a separate wake-up interrupt in addition to the
112 * device IO interrupt. The wake-up interrupt signals that a device
113 * should be woken up from it's idle state. This handler uses device
114 * specific pm_runtime functions to wake the device, and then it's
115 * up to the device to do whatever it needs to. Note that as the
116 * device may need to restore context and start up regulators, we
117 * use a threaded IRQ.
118 *
119 * Also note that we are not resending the lost device interrupts.
120 * We assume that the wake-up interrupt just needs to wake-up the
121 * device, and then device's pm_runtime_resume() can deal with the
122 * situation.
123 */
124static irqreturn_t handle_threaded_wake_irq(int irq, void *_wirq)
125{
126 struct wake_irq *wirq = _wirq;
127 int res;
128
129 /* Maybe abort suspend? */
130 if (irqd_is_wakeup_set(irq_get_irq_data(irq))) {
131 pm_wakeup_event(wirq->dev, 0);
132
133 return IRQ_HANDLED;
134 }
135
136 /* We don't want RPM_ASYNC or RPM_NOWAIT here */
137 res = pm_runtime_resume(wirq->dev);
138 if (res < 0)
139 dev_warn(wirq->dev,
140 "wake IRQ with no resume: %i\n", res);
141
142 return IRQ_HANDLED;
143}
144
145static int __dev_pm_set_dedicated_wake_irq(struct device *dev, int irq, unsigned int flag)
146{
147 struct wake_irq *wirq;
148 int err;
149
150 if (irq < 0)
151 return -EINVAL;
152
153 wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
154 if (!wirq)
155 return -ENOMEM;
156
157 wirq->name = kasprintf(GFP_KERNEL, "%s:wakeup", dev_name(dev));
158 if (!wirq->name) {
159 err = -ENOMEM;
160 goto err_free;
161 }
162
163 wirq->dev = dev;
164 wirq->irq = irq;
165
166 /* Prevent deferred spurious wakeirqs with disable_irq_nosync() */
167 irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
168
169 /*
170 * Consumer device may need to power up and restore state
171 * so we use a threaded irq.
172 */
173 err = request_threaded_irq(irq, NULL, handle_threaded_wake_irq,
174 IRQF_ONESHOT | IRQF_NO_AUTOEN,
175 wirq->name, wirq);
176 if (err)
177 goto err_free_name;
178
179 err = dev_pm_attach_wake_irq(dev, wirq);
180 if (err)
181 goto err_free_irq;
182
183 wirq->status = WAKE_IRQ_DEDICATED_ALLOCATED | flag;
184
185 return err;
186
187err_free_irq:
188 free_irq(irq, wirq);
189err_free_name:
190 kfree(wirq->name);
191err_free:
192 kfree(wirq);
193
194 return err;
195}
196
197/**
198 * dev_pm_set_dedicated_wake_irq - Request a dedicated wake-up interrupt
199 * @dev: Device entry
200 * @irq: Device wake-up interrupt
201 *
202 * Unless your hardware has separate wake-up interrupts in addition
203 * to the device IO interrupts, you don't need this.
204 *
205 * Sets up a threaded interrupt handler for a device that has
206 * a dedicated wake-up interrupt in addition to the device IO
207 * interrupt.
208 */
209int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
210{
211 return __dev_pm_set_dedicated_wake_irq(dev, irq, 0);
212}
213EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq);
214
215/**
216 * dev_pm_set_dedicated_wake_irq_reverse - Request a dedicated wake-up interrupt
217 * with reverse enable ordering
218 * @dev: Device entry
219 * @irq: Device wake-up interrupt
220 *
221 * Unless your hardware has separate wake-up interrupts in addition
222 * to the device IO interrupts, you don't need this.
223 *
224 * Sets up a threaded interrupt handler for a device that has a dedicated
225 * wake-up interrupt in addition to the device IO interrupt. It sets
226 * the status of WAKE_IRQ_DEDICATED_REVERSE to tell rpm_suspend()
227 * to enable dedicated wake-up interrupt after running the runtime suspend
228 * callback for @dev.
229 */
230int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq)
231{
232 return __dev_pm_set_dedicated_wake_irq(dev, irq, WAKE_IRQ_DEDICATED_REVERSE);
233}
234EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq_reverse);
235
236/**
237 * dev_pm_enable_wake_irq_check - Checks and enables wake-up interrupt
238 * @dev: Device
239 * @can_change_status: Can change wake-up interrupt status
240 *
241 * Enables wakeirq conditionally. We need to enable wake-up interrupt
242 * lazily on the first rpm_suspend(). This is needed as the consumer device
243 * starts in RPM_SUSPENDED state, and the first pm_runtime_get() would
244 * otherwise try to disable already disabled wakeirq. The wake-up interrupt
245 * starts disabled with IRQ_NOAUTOEN set.
246 *
247 * Should be only called from rpm_suspend() and rpm_resume() path.
248 * Caller must hold &dev->power.lock to change wirq->status
249 */
250void dev_pm_enable_wake_irq_check(struct device *dev,
251 bool can_change_status)
252{
253 struct wake_irq *wirq = dev->power.wakeirq;
254
255 if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
256 return;
257
258 if (likely(wirq->status & WAKE_IRQ_DEDICATED_MANAGED)) {
259 goto enable;
260 } else if (can_change_status) {
261 wirq->status |= WAKE_IRQ_DEDICATED_MANAGED;
262 goto enable;
263 }
264
265 return;
266
267enable:
268 if (!can_change_status || !(wirq->status & WAKE_IRQ_DEDICATED_REVERSE)) {
269 enable_irq(wirq->irq);
270 wirq->status |= WAKE_IRQ_DEDICATED_ENABLED;
271 }
272}
273
274/**
275 * dev_pm_disable_wake_irq_check - Checks and disables wake-up interrupt
276 * @dev: Device
277 * @cond_disable: if set, also check WAKE_IRQ_DEDICATED_REVERSE
278 *
279 * Disables wake-up interrupt conditionally based on status.
280 * Should be only called from rpm_suspend() and rpm_resume() path.
281 */
282void dev_pm_disable_wake_irq_check(struct device *dev, bool cond_disable)
283{
284 struct wake_irq *wirq = dev->power.wakeirq;
285
286 if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
287 return;
288
289 if (cond_disable && (wirq->status & WAKE_IRQ_DEDICATED_REVERSE))
290 return;
291
292 if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED) {
293 wirq->status &= ~WAKE_IRQ_DEDICATED_ENABLED;
294 disable_irq_nosync(wirq->irq);
295 }
296}
297
298/**
299 * dev_pm_enable_wake_irq_complete - enable wake IRQ not enabled before
300 * @dev: Device using the wake IRQ
301 *
302 * Enable wake IRQ conditionally based on status, mainly used if want to
303 * enable wake IRQ after running ->runtime_suspend() which depends on
304 * WAKE_IRQ_DEDICATED_REVERSE.
305 *
306 * Should be only called from rpm_suspend() path.
307 */
308void dev_pm_enable_wake_irq_complete(struct device *dev)
309{
310 struct wake_irq *wirq = dev->power.wakeirq;
311
312 if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
313 return;
314
315 if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED &&
316 wirq->status & WAKE_IRQ_DEDICATED_REVERSE)
317 enable_irq(wirq->irq);
318}
319
320/**
321 * dev_pm_arm_wake_irq - Arm device wake-up
322 * @wirq: Device wake-up interrupt
323 *
324 * Sets up the wake-up event conditionally based on the
325 * device_may_wake().
326 */
327void dev_pm_arm_wake_irq(struct wake_irq *wirq)
328{
329 if (!wirq)
330 return;
331
332 if (device_may_wakeup(wirq->dev)) {
333 if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
334 !(wirq->status & WAKE_IRQ_DEDICATED_ENABLED))
335 enable_irq(wirq->irq);
336
337 enable_irq_wake(wirq->irq);
338 }
339}
340
341/**
342 * dev_pm_disarm_wake_irq - Disarm device wake-up
343 * @wirq: Device wake-up interrupt
344 *
345 * Clears up the wake-up event conditionally based on the
346 * device_may_wake().
347 */
348void dev_pm_disarm_wake_irq(struct wake_irq *wirq)
349{
350 if (!wirq)
351 return;
352
353 if (device_may_wakeup(wirq->dev)) {
354 disable_irq_wake(wirq->irq);
355
356 if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
357 !(wirq->status & WAKE_IRQ_DEDICATED_ENABLED))
358 disable_irq_nosync(wirq->irq);
359 }
360}
1// SPDX-License-Identifier: GPL-2.0
2/* Device wakeirq helper functions */
3#include <linux/device.h>
4#include <linux/interrupt.h>
5#include <linux/irq.h>
6#include <linux/slab.h>
7#include <linux/pm_runtime.h>
8#include <linux/pm_wakeirq.h>
9
10#include "power.h"
11
12/**
13 * dev_pm_attach_wake_irq - Attach device interrupt as a wake IRQ
14 * @dev: Device entry
15 * @irq: Device wake-up capable interrupt
16 * @wirq: Wake irq specific data
17 *
18 * Internal function to attach either a device IO interrupt or a
19 * dedicated wake-up interrupt as a wake IRQ.
20 */
21static int dev_pm_attach_wake_irq(struct device *dev, int irq,
22 struct wake_irq *wirq)
23{
24 unsigned long flags;
25
26 if (!dev || !wirq)
27 return -EINVAL;
28
29 spin_lock_irqsave(&dev->power.lock, flags);
30 if (dev_WARN_ONCE(dev, dev->power.wakeirq,
31 "wake irq already initialized\n")) {
32 spin_unlock_irqrestore(&dev->power.lock, flags);
33 return -EEXIST;
34 }
35
36 dev->power.wakeirq = wirq;
37 device_wakeup_attach_irq(dev, wirq);
38
39 spin_unlock_irqrestore(&dev->power.lock, flags);
40 return 0;
41}
42
43/**
44 * dev_pm_set_wake_irq - Attach device IO interrupt as wake IRQ
45 * @dev: Device entry
46 * @irq: Device IO interrupt
47 *
48 * Attach a device IO interrupt as a wake IRQ. The wake IRQ gets
49 * automatically configured for wake-up from suspend based
50 * on the device specific sysfs wakeup entry. Typically called
51 * during driver probe after calling device_init_wakeup().
52 */
53int dev_pm_set_wake_irq(struct device *dev, int irq)
54{
55 struct wake_irq *wirq;
56 int err;
57
58 if (irq < 0)
59 return -EINVAL;
60
61 wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
62 if (!wirq)
63 return -ENOMEM;
64
65 wirq->dev = dev;
66 wirq->irq = irq;
67
68 err = dev_pm_attach_wake_irq(dev, irq, wirq);
69 if (err)
70 kfree(wirq);
71
72 return err;
73}
74EXPORT_SYMBOL_GPL(dev_pm_set_wake_irq);
75
76/**
77 * dev_pm_clear_wake_irq - Detach a device IO interrupt wake IRQ
78 * @dev: Device entry
79 *
80 * Detach a device wake IRQ and free resources.
81 *
82 * Note that it's OK for drivers to call this without calling
83 * dev_pm_set_wake_irq() as all the driver instances may not have
84 * a wake IRQ configured. This avoid adding wake IRQ specific
85 * checks into the drivers.
86 */
87void dev_pm_clear_wake_irq(struct device *dev)
88{
89 struct wake_irq *wirq = dev->power.wakeirq;
90 unsigned long flags;
91
92 if (!wirq)
93 return;
94
95 spin_lock_irqsave(&dev->power.lock, flags);
96 device_wakeup_detach_irq(dev);
97 dev->power.wakeirq = NULL;
98 spin_unlock_irqrestore(&dev->power.lock, flags);
99
100 if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED) {
101 free_irq(wirq->irq, wirq);
102 wirq->status &= ~WAKE_IRQ_DEDICATED_MASK;
103 }
104 kfree(wirq->name);
105 kfree(wirq);
106}
107EXPORT_SYMBOL_GPL(dev_pm_clear_wake_irq);
108
109/**
110 * handle_threaded_wake_irq - Handler for dedicated wake-up interrupts
111 * @irq: Device specific dedicated wake-up interrupt
112 * @_wirq: Wake IRQ data
113 *
114 * Some devices have a separate wake-up interrupt in addition to the
115 * device IO interrupt. The wake-up interrupt signals that a device
116 * should be woken up from it's idle state. This handler uses device
117 * specific pm_runtime functions to wake the device, and then it's
118 * up to the device to do whatever it needs to. Note that as the
119 * device may need to restore context and start up regulators, we
120 * use a threaded IRQ.
121 *
122 * Also note that we are not resending the lost device interrupts.
123 * We assume that the wake-up interrupt just needs to wake-up the
124 * device, and then device's pm_runtime_resume() can deal with the
125 * situation.
126 */
127static irqreturn_t handle_threaded_wake_irq(int irq, void *_wirq)
128{
129 struct wake_irq *wirq = _wirq;
130 int res;
131
132 /* Maybe abort suspend? */
133 if (irqd_is_wakeup_set(irq_get_irq_data(irq))) {
134 pm_wakeup_event(wirq->dev, 0);
135
136 return IRQ_HANDLED;
137 }
138
139 /* We don't want RPM_ASYNC or RPM_NOWAIT here */
140 res = pm_runtime_resume(wirq->dev);
141 if (res < 0)
142 dev_warn(wirq->dev,
143 "wake IRQ with no resume: %i\n", res);
144
145 return IRQ_HANDLED;
146}
147
148/**
149 * dev_pm_set_dedicated_wake_irq - Request a dedicated wake-up interrupt
150 * @dev: Device entry
151 * @irq: Device wake-up interrupt
152 *
153 * Unless your hardware has separate wake-up interrupts in addition
154 * to the device IO interrupts, you don't need this.
155 *
156 * Sets up a threaded interrupt handler for a device that has
157 * a dedicated wake-up interrupt in addition to the device IO
158 * interrupt.
159 *
160 * The interrupt starts disabled, and needs to be managed for
161 * the device by the bus code or the device driver using
162 * dev_pm_enable_wake_irq() and dev_pm_disable_wake_irq()
163 * functions.
164 */
165int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
166{
167 struct wake_irq *wirq;
168 int err;
169
170 if (irq < 0)
171 return -EINVAL;
172
173 wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
174 if (!wirq)
175 return -ENOMEM;
176
177 wirq->name = kasprintf(GFP_KERNEL, "%s:wakeup", dev_name(dev));
178 if (!wirq->name) {
179 err = -ENOMEM;
180 goto err_free;
181 }
182
183 wirq->dev = dev;
184 wirq->irq = irq;
185 irq_set_status_flags(irq, IRQ_NOAUTOEN);
186
187 /* Prevent deferred spurious wakeirqs with disable_irq_nosync() */
188 irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
189
190 /*
191 * Consumer device may need to power up and restore state
192 * so we use a threaded irq.
193 */
194 err = request_threaded_irq(irq, NULL, handle_threaded_wake_irq,
195 IRQF_ONESHOT, wirq->name, wirq);
196 if (err)
197 goto err_free_name;
198
199 err = dev_pm_attach_wake_irq(dev, irq, wirq);
200 if (err)
201 goto err_free_irq;
202
203 wirq->status = WAKE_IRQ_DEDICATED_ALLOCATED;
204
205 return err;
206
207err_free_irq:
208 free_irq(irq, wirq);
209err_free_name:
210 kfree(wirq->name);
211err_free:
212 kfree(wirq);
213
214 return err;
215}
216EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq);
217
218/**
219 * dev_pm_enable_wake_irq - Enable device wake-up interrupt
220 * @dev: Device
221 *
222 * Optionally called from the bus code or the device driver for
223 * runtime_resume() to override the PM runtime core managed wake-up
224 * interrupt handling to enable the wake-up interrupt.
225 *
226 * Note that for runtime_suspend()) the wake-up interrupts
227 * should be unconditionally enabled unlike for suspend()
228 * that is conditional.
229 */
230void dev_pm_enable_wake_irq(struct device *dev)
231{
232 struct wake_irq *wirq = dev->power.wakeirq;
233
234 if (wirq && (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED))
235 enable_irq(wirq->irq);
236}
237EXPORT_SYMBOL_GPL(dev_pm_enable_wake_irq);
238
239/**
240 * dev_pm_disable_wake_irq - Disable device wake-up interrupt
241 * @dev: Device
242 *
243 * Optionally called from the bus code or the device driver for
244 * runtime_suspend() to override the PM runtime core managed wake-up
245 * interrupt handling to disable the wake-up interrupt.
246 */
247void dev_pm_disable_wake_irq(struct device *dev)
248{
249 struct wake_irq *wirq = dev->power.wakeirq;
250
251 if (wirq && (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED))
252 disable_irq_nosync(wirq->irq);
253}
254EXPORT_SYMBOL_GPL(dev_pm_disable_wake_irq);
255
256/**
257 * dev_pm_enable_wake_irq_check - Checks and enables wake-up interrupt
258 * @dev: Device
259 * @can_change_status: Can change wake-up interrupt status
260 *
261 * Enables wakeirq conditionally. We need to enable wake-up interrupt
262 * lazily on the first rpm_suspend(). This is needed as the consumer device
263 * starts in RPM_SUSPENDED state, and the the first pm_runtime_get() would
264 * otherwise try to disable already disabled wakeirq. The wake-up interrupt
265 * starts disabled with IRQ_NOAUTOEN set.
266 *
267 * Should be only called from rpm_suspend() and rpm_resume() path.
268 * Caller must hold &dev->power.lock to change wirq->status
269 */
270void dev_pm_enable_wake_irq_check(struct device *dev,
271 bool can_change_status)
272{
273 struct wake_irq *wirq = dev->power.wakeirq;
274
275 if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
276 return;
277
278 if (likely(wirq->status & WAKE_IRQ_DEDICATED_MANAGED)) {
279 goto enable;
280 } else if (can_change_status) {
281 wirq->status |= WAKE_IRQ_DEDICATED_MANAGED;
282 goto enable;
283 }
284
285 return;
286
287enable:
288 enable_irq(wirq->irq);
289}
290
291/**
292 * dev_pm_disable_wake_irq_check - Checks and disables wake-up interrupt
293 * @dev: Device
294 *
295 * Disables wake-up interrupt conditionally based on status.
296 * Should be only called from rpm_suspend() and rpm_resume() path.
297 */
298void dev_pm_disable_wake_irq_check(struct device *dev)
299{
300 struct wake_irq *wirq = dev->power.wakeirq;
301
302 if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
303 return;
304
305 if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED)
306 disable_irq_nosync(wirq->irq);
307}
308
309/**
310 * dev_pm_arm_wake_irq - Arm device wake-up
311 * @wirq: Device wake-up interrupt
312 *
313 * Sets up the wake-up event conditionally based on the
314 * device_may_wake().
315 */
316void dev_pm_arm_wake_irq(struct wake_irq *wirq)
317{
318 if (!wirq)
319 return;
320
321 if (device_may_wakeup(wirq->dev)) {
322 if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
323 !pm_runtime_status_suspended(wirq->dev))
324 enable_irq(wirq->irq);
325
326 enable_irq_wake(wirq->irq);
327 }
328}
329
330/**
331 * dev_pm_disarm_wake_irq - Disarm device wake-up
332 * @wirq: Device wake-up interrupt
333 *
334 * Clears up the wake-up event conditionally based on the
335 * device_may_wake().
336 */
337void dev_pm_disarm_wake_irq(struct wake_irq *wirq)
338{
339 if (!wirq)
340 return;
341
342 if (device_may_wakeup(wirq->dev)) {
343 disable_irq_wake(wirq->irq);
344
345 if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
346 !pm_runtime_status_suspended(wirq->dev))
347 disable_irq_nosync(wirq->irq);
348 }
349}