Loading...
1/*
2 * PCIe Native PME support
3 *
4 * Copyright (C) 2007 - 2009 Intel Corp
5 * Copyright (C) 2007 - 2009 Shaohua Li <shaohua.li@intel.com>
6 * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License V2. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12
13#include <linux/module.h>
14#include <linux/pci.h>
15#include <linux/kernel.h>
16#include <linux/errno.h>
17#include <linux/slab.h>
18#include <linux/init.h>
19#include <linux/interrupt.h>
20#include <linux/device.h>
21#include <linux/pcieport_if.h>
22#include <linux/pm_runtime.h>
23
24#include "../pci.h"
25#include "portdrv.h"
26
27/*
28 * If this switch is set, MSI will not be used for PCIe PME signaling. This
29 * causes the PCIe port driver to use INTx interrupts only, but it turns out
30 * that using MSI for PCIe PME signaling doesn't play well with PCIe PME-based
31 * wake-up from system sleep states.
32 */
33bool pcie_pme_msi_disabled;
34
35static int __init pcie_pme_setup(char *str)
36{
37 if (!strncmp(str, "nomsi", 5))
38 pcie_pme_msi_disabled = true;
39
40 return 1;
41}
42__setup("pcie_pme=", pcie_pme_setup);
43
44enum pme_suspend_level {
45 PME_SUSPEND_NONE = 0,
46 PME_SUSPEND_WAKEUP,
47 PME_SUSPEND_NOIRQ,
48};
49
50struct pcie_pme_service_data {
51 spinlock_t lock;
52 struct pcie_device *srv;
53 struct work_struct work;
54 enum pme_suspend_level suspend_level;
55};
56
57/**
58 * pcie_pme_interrupt_enable - Enable/disable PCIe PME interrupt generation.
59 * @dev: PCIe root port or event collector.
60 * @enable: Enable or disable the interrupt.
61 */
62void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable)
63{
64 if (enable)
65 pcie_capability_set_word(dev, PCI_EXP_RTCTL,
66 PCI_EXP_RTCTL_PMEIE);
67 else
68 pcie_capability_clear_word(dev, PCI_EXP_RTCTL,
69 PCI_EXP_RTCTL_PMEIE);
70}
71
72/**
73 * pcie_pme_walk_bus - Scan a PCI bus for devices asserting PME#.
74 * @bus: PCI bus to scan.
75 *
76 * Scan given PCI bus and all buses under it for devices asserting PME#.
77 */
78static bool pcie_pme_walk_bus(struct pci_bus *bus)
79{
80 struct pci_dev *dev;
81 bool ret = false;
82
83 list_for_each_entry(dev, &bus->devices, bus_list) {
84 /* Skip PCIe devices in case we started from a root port. */
85 if (!pci_is_pcie(dev) && pci_check_pme_status(dev)) {
86 if (dev->pme_poll)
87 dev->pme_poll = false;
88
89 pci_wakeup_event(dev);
90 pm_request_resume(&dev->dev);
91 ret = true;
92 }
93
94 if (dev->subordinate && pcie_pme_walk_bus(dev->subordinate))
95 ret = true;
96 }
97
98 return ret;
99}
100
101/**
102 * pcie_pme_from_pci_bridge - Check if PCIe-PCI bridge generated a PME.
103 * @bus: Secondary bus of the bridge.
104 * @devfn: Device/function number to check.
105 *
106 * PME from PCI devices under a PCIe-PCI bridge may be converted to an in-band
107 * PCIe PME message. In such that case the bridge should use the Requester ID
108 * of device/function number 0 on its secondary bus.
109 */
110static bool pcie_pme_from_pci_bridge(struct pci_bus *bus, u8 devfn)
111{
112 struct pci_dev *dev;
113 bool found = false;
114
115 if (devfn)
116 return false;
117
118 dev = pci_dev_get(bus->self);
119 if (!dev)
120 return false;
121
122 if (pci_is_pcie(dev) && pci_pcie_type(dev) == PCI_EXP_TYPE_PCI_BRIDGE) {
123 down_read(&pci_bus_sem);
124 if (pcie_pme_walk_bus(bus))
125 found = true;
126 up_read(&pci_bus_sem);
127 }
128
129 pci_dev_put(dev);
130 return found;
131}
132
133/**
134 * pcie_pme_handle_request - Find device that generated PME and handle it.
135 * @port: Root port or event collector that generated the PME interrupt.
136 * @req_id: PCIe Requester ID of the device that generated the PME.
137 */
138static void pcie_pme_handle_request(struct pci_dev *port, u16 req_id)
139{
140 u8 busnr = req_id >> 8, devfn = req_id & 0xff;
141 struct pci_bus *bus;
142 struct pci_dev *dev;
143 bool found = false;
144
145 /* First, check if the PME is from the root port itself. */
146 if (port->devfn == devfn && port->bus->number == busnr) {
147 if (port->pme_poll)
148 port->pme_poll = false;
149
150 if (pci_check_pme_status(port)) {
151 pm_request_resume(&port->dev);
152 found = true;
153 } else {
154 /*
155 * Apparently, the root port generated the PME on behalf
156 * of a non-PCIe device downstream. If this is done by
157 * a root port, the Requester ID field in its status
158 * register may contain either the root port's, or the
159 * source device's information (PCI Express Base
160 * Specification, Rev. 2.0, Section 6.1.9).
161 */
162 down_read(&pci_bus_sem);
163 found = pcie_pme_walk_bus(port->subordinate);
164 up_read(&pci_bus_sem);
165 }
166 goto out;
167 }
168
169 /* Second, find the bus the source device is on. */
170 bus = pci_find_bus(pci_domain_nr(port->bus), busnr);
171 if (!bus)
172 goto out;
173
174 /* Next, check if the PME is from a PCIe-PCI bridge. */
175 found = pcie_pme_from_pci_bridge(bus, devfn);
176 if (found)
177 goto out;
178
179 /* Finally, try to find the PME source on the bus. */
180 down_read(&pci_bus_sem);
181 list_for_each_entry(dev, &bus->devices, bus_list) {
182 pci_dev_get(dev);
183 if (dev->devfn == devfn) {
184 found = true;
185 break;
186 }
187 pci_dev_put(dev);
188 }
189 up_read(&pci_bus_sem);
190
191 if (found) {
192 /* The device is there, but we have to check its PME status. */
193 found = pci_check_pme_status(dev);
194 if (found) {
195 if (dev->pme_poll)
196 dev->pme_poll = false;
197
198 pci_wakeup_event(dev);
199 pm_request_resume(&dev->dev);
200 }
201 pci_dev_put(dev);
202 } else if (devfn) {
203 /*
204 * The device is not there, but we can still try to recover by
205 * assuming that the PME was reported by a PCIe-PCI bridge that
206 * used devfn different from zero.
207 */
208 dev_dbg(&port->dev, "PME interrupt generated for non-existent device %02x:%02x.%d\n",
209 busnr, PCI_SLOT(devfn), PCI_FUNC(devfn));
210 found = pcie_pme_from_pci_bridge(bus, 0);
211 }
212
213 out:
214 if (!found)
215 dev_dbg(&port->dev, "Spurious native PME interrupt!\n");
216}
217
218/**
219 * pcie_pme_work_fn - Work handler for PCIe PME interrupt.
220 * @work: Work structure giving access to service data.
221 */
222static void pcie_pme_work_fn(struct work_struct *work)
223{
224 struct pcie_pme_service_data *data =
225 container_of(work, struct pcie_pme_service_data, work);
226 struct pci_dev *port = data->srv->port;
227 u32 rtsta;
228
229 spin_lock_irq(&data->lock);
230
231 for (;;) {
232 if (data->suspend_level != PME_SUSPEND_NONE)
233 break;
234
235 pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta);
236 if (rtsta & PCI_EXP_RTSTA_PME) {
237 /*
238 * Clear PME status of the port. If there are other
239 * pending PMEs, the status will be set again.
240 */
241 pcie_clear_root_pme_status(port);
242
243 spin_unlock_irq(&data->lock);
244 pcie_pme_handle_request(port, rtsta & 0xffff);
245 spin_lock_irq(&data->lock);
246
247 continue;
248 }
249
250 /* No need to loop if there are no more PMEs pending. */
251 if (!(rtsta & PCI_EXP_RTSTA_PENDING))
252 break;
253
254 spin_unlock_irq(&data->lock);
255 cpu_relax();
256 spin_lock_irq(&data->lock);
257 }
258
259 if (data->suspend_level == PME_SUSPEND_NONE)
260 pcie_pme_interrupt_enable(port, true);
261
262 spin_unlock_irq(&data->lock);
263}
264
265/**
266 * pcie_pme_irq - Interrupt handler for PCIe root port PME interrupt.
267 * @irq: Interrupt vector.
268 * @context: Interrupt context pointer.
269 */
270static irqreturn_t pcie_pme_irq(int irq, void *context)
271{
272 struct pci_dev *port;
273 struct pcie_pme_service_data *data;
274 u32 rtsta;
275 unsigned long flags;
276
277 port = ((struct pcie_device *)context)->port;
278 data = get_service_data((struct pcie_device *)context);
279
280 spin_lock_irqsave(&data->lock, flags);
281 pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta);
282
283 if (!(rtsta & PCI_EXP_RTSTA_PME)) {
284 spin_unlock_irqrestore(&data->lock, flags);
285 return IRQ_NONE;
286 }
287
288 pcie_pme_interrupt_enable(port, false);
289 spin_unlock_irqrestore(&data->lock, flags);
290
291 /* We don't use pm_wq, because it's freezable. */
292 schedule_work(&data->work);
293
294 return IRQ_HANDLED;
295}
296
297/**
298 * pcie_pme_set_native - Set the PME interrupt flag for given device.
299 * @dev: PCI device to handle.
300 * @ign: Ignored.
301 */
302static int pcie_pme_set_native(struct pci_dev *dev, void *ign)
303{
304 dev_info(&dev->dev, "Signaling PME through PCIe PME interrupt\n");
305
306 device_set_run_wake(&dev->dev, true);
307 dev->pme_interrupt = true;
308 return 0;
309}
310
311/**
312 * pcie_pme_mark_devices - Set the PME interrupt flag for devices below a port.
313 * @port: PCIe root port or event collector to handle.
314 *
315 * For each device below given root port, including the port itself (or for each
316 * root complex integrated endpoint if @port is a root complex event collector)
317 * set the flag indicating that it can signal run-time wake-up events via PCIe
318 * PME interrupts.
319 */
320static void pcie_pme_mark_devices(struct pci_dev *port)
321{
322 pcie_pme_set_native(port, NULL);
323 if (port->subordinate) {
324 pci_walk_bus(port->subordinate, pcie_pme_set_native, NULL);
325 } else {
326 struct pci_bus *bus = port->bus;
327 struct pci_dev *dev;
328
329 /* Check if this is a root port event collector. */
330 if (pci_pcie_type(port) != PCI_EXP_TYPE_RC_EC || !bus)
331 return;
332
333 down_read(&pci_bus_sem);
334 list_for_each_entry(dev, &bus->devices, bus_list)
335 if (pci_is_pcie(dev)
336 && pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END)
337 pcie_pme_set_native(dev, NULL);
338 up_read(&pci_bus_sem);
339 }
340}
341
342/**
343 * pcie_pme_probe - Initialize PCIe PME service for given root port.
344 * @srv: PCIe service to initialize.
345 */
346static int pcie_pme_probe(struct pcie_device *srv)
347{
348 struct pci_dev *port;
349 struct pcie_pme_service_data *data;
350 int ret;
351
352 data = kzalloc(sizeof(*data), GFP_KERNEL);
353 if (!data)
354 return -ENOMEM;
355
356 spin_lock_init(&data->lock);
357 INIT_WORK(&data->work, pcie_pme_work_fn);
358 data->srv = srv;
359 set_service_data(srv, data);
360
361 port = srv->port;
362 pcie_pme_interrupt_enable(port, false);
363 pcie_clear_root_pme_status(port);
364
365 ret = request_irq(srv->irq, pcie_pme_irq, IRQF_SHARED, "PCIe PME", srv);
366 if (ret) {
367 kfree(data);
368 } else {
369 pcie_pme_mark_devices(port);
370 pcie_pme_interrupt_enable(port, true);
371 }
372
373 return ret;
374}
375
376static bool pcie_pme_check_wakeup(struct pci_bus *bus)
377{
378 struct pci_dev *dev;
379
380 if (!bus)
381 return false;
382
383 list_for_each_entry(dev, &bus->devices, bus_list)
384 if (device_may_wakeup(&dev->dev)
385 || pcie_pme_check_wakeup(dev->subordinate))
386 return true;
387
388 return false;
389}
390
391/**
392 * pcie_pme_suspend - Suspend PCIe PME service device.
393 * @srv: PCIe service device to suspend.
394 */
395static int pcie_pme_suspend(struct pcie_device *srv)
396{
397 struct pcie_pme_service_data *data = get_service_data(srv);
398 struct pci_dev *port = srv->port;
399 bool wakeup, wake_irq_enabled = false;
400 int ret;
401
402 if (device_may_wakeup(&port->dev)) {
403 wakeup = true;
404 } else {
405 down_read(&pci_bus_sem);
406 wakeup = pcie_pme_check_wakeup(port->subordinate);
407 up_read(&pci_bus_sem);
408 }
409 spin_lock_irq(&data->lock);
410 if (wakeup) {
411 ret = enable_irq_wake(srv->irq);
412 if (ret == 0) {
413 data->suspend_level = PME_SUSPEND_WAKEUP;
414 wake_irq_enabled = true;
415 }
416 }
417 if (!wake_irq_enabled) {
418 pcie_pme_interrupt_enable(port, false);
419 pcie_clear_root_pme_status(port);
420 data->suspend_level = PME_SUSPEND_NOIRQ;
421 }
422 spin_unlock_irq(&data->lock);
423
424 synchronize_irq(srv->irq);
425
426 return 0;
427}
428
429/**
430 * pcie_pme_resume - Resume PCIe PME service device.
431 * @srv - PCIe service device to resume.
432 */
433static int pcie_pme_resume(struct pcie_device *srv)
434{
435 struct pcie_pme_service_data *data = get_service_data(srv);
436
437 spin_lock_irq(&data->lock);
438 if (data->suspend_level == PME_SUSPEND_NOIRQ) {
439 struct pci_dev *port = srv->port;
440
441 pcie_clear_root_pme_status(port);
442 pcie_pme_interrupt_enable(port, true);
443 } else {
444 disable_irq_wake(srv->irq);
445 }
446 data->suspend_level = PME_SUSPEND_NONE;
447 spin_unlock_irq(&data->lock);
448
449 return 0;
450}
451
452/**
453 * pcie_pme_remove - Prepare PCIe PME service device for removal.
454 * @srv - PCIe service device to remove.
455 */
456static void pcie_pme_remove(struct pcie_device *srv)
457{
458 pcie_pme_suspend(srv);
459 free_irq(srv->irq, srv);
460 kfree(get_service_data(srv));
461}
462
463static struct pcie_port_service_driver pcie_pme_driver = {
464 .name = "pcie_pme",
465 .port_type = PCI_EXP_TYPE_ROOT_PORT,
466 .service = PCIE_PORT_SERVICE_PME,
467
468 .probe = pcie_pme_probe,
469 .suspend = pcie_pme_suspend,
470 .resume = pcie_pme_resume,
471 .remove = pcie_pme_remove,
472};
473
474/**
475 * pcie_pme_service_init - Register the PCIe PME service driver.
476 */
477static int __init pcie_pme_service_init(void)
478{
479 return pcie_port_service_register(&pcie_pme_driver);
480}
481
482module_init(pcie_pme_service_init);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * PCIe Native PME support
4 *
5 * Copyright (C) 2007 - 2009 Intel Corp
6 * Copyright (C) 2007 - 2009 Shaohua Li <shaohua.li@intel.com>
7 * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
8 */
9
10#define dev_fmt(fmt) "PME: " fmt
11
12#include <linux/bitfield.h>
13#include <linux/pci.h>
14#include <linux/kernel.h>
15#include <linux/errno.h>
16#include <linux/slab.h>
17#include <linux/init.h>
18#include <linux/interrupt.h>
19#include <linux/device.h>
20#include <linux/pm_runtime.h>
21
22#include "../pci.h"
23#include "portdrv.h"
24
25/*
26 * If this switch is set, MSI will not be used for PCIe PME signaling. This
27 * causes the PCIe port driver to use INTx interrupts only, but it turns out
28 * that using MSI for PCIe PME signaling doesn't play well with PCIe PME-based
29 * wake-up from system sleep states.
30 */
31bool pcie_pme_msi_disabled;
32
33static int __init pcie_pme_setup(char *str)
34{
35 if (!strncmp(str, "nomsi", 5))
36 pcie_pme_msi_disabled = true;
37
38 return 1;
39}
40__setup("pcie_pme=", pcie_pme_setup);
41
42struct pcie_pme_service_data {
43 spinlock_t lock;
44 struct pcie_device *srv;
45 struct work_struct work;
46 bool noirq; /* If set, keep the PME interrupt disabled. */
47};
48
49/**
50 * pcie_pme_interrupt_enable - Enable/disable PCIe PME interrupt generation.
51 * @dev: PCIe root port or event collector.
52 * @enable: Enable or disable the interrupt.
53 */
54void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable)
55{
56 if (enable)
57 pcie_capability_set_word(dev, PCI_EXP_RTCTL,
58 PCI_EXP_RTCTL_PMEIE);
59 else
60 pcie_capability_clear_word(dev, PCI_EXP_RTCTL,
61 PCI_EXP_RTCTL_PMEIE);
62}
63
64/**
65 * pcie_pme_walk_bus - Scan a PCI bus for devices asserting PME#.
66 * @bus: PCI bus to scan.
67 *
68 * Scan given PCI bus and all buses under it for devices asserting PME#.
69 */
70static bool pcie_pme_walk_bus(struct pci_bus *bus)
71{
72 struct pci_dev *dev;
73 bool ret = false;
74
75 list_for_each_entry(dev, &bus->devices, bus_list) {
76 /* Skip PCIe devices in case we started from a root port. */
77 if (!pci_is_pcie(dev) && pci_check_pme_status(dev)) {
78 if (dev->pme_poll)
79 dev->pme_poll = false;
80
81 pci_wakeup_event(dev);
82 pm_request_resume(&dev->dev);
83 ret = true;
84 }
85
86 if (dev->subordinate && pcie_pme_walk_bus(dev->subordinate))
87 ret = true;
88 }
89
90 return ret;
91}
92
93/**
94 * pcie_pme_from_pci_bridge - Check if PCIe-PCI bridge generated a PME.
95 * @bus: Secondary bus of the bridge.
96 * @devfn: Device/function number to check.
97 *
98 * PME from PCI devices under a PCIe-PCI bridge may be converted to an in-band
99 * PCIe PME message. In such that case the bridge should use the Requester ID
100 * of device/function number 0 on its secondary bus.
101 */
102static bool pcie_pme_from_pci_bridge(struct pci_bus *bus, u8 devfn)
103{
104 struct pci_dev *dev;
105 bool found = false;
106
107 if (devfn)
108 return false;
109
110 dev = pci_dev_get(bus->self);
111 if (!dev)
112 return false;
113
114 if (pci_is_pcie(dev) && pci_pcie_type(dev) == PCI_EXP_TYPE_PCI_BRIDGE) {
115 down_read(&pci_bus_sem);
116 if (pcie_pme_walk_bus(bus))
117 found = true;
118 up_read(&pci_bus_sem);
119 }
120
121 pci_dev_put(dev);
122 return found;
123}
124
125/**
126 * pcie_pme_handle_request - Find device that generated PME and handle it.
127 * @port: Root port or event collector that generated the PME interrupt.
128 * @req_id: PCIe Requester ID of the device that generated the PME.
129 */
130static void pcie_pme_handle_request(struct pci_dev *port, u16 req_id)
131{
132 u8 busnr = req_id >> 8, devfn = req_id & 0xff;
133 struct pci_bus *bus;
134 struct pci_dev *dev;
135 bool found = false;
136
137 /* First, check if the PME is from the root port itself. */
138 if (port->devfn == devfn && port->bus->number == busnr) {
139 if (port->pme_poll)
140 port->pme_poll = false;
141
142 if (pci_check_pme_status(port)) {
143 pm_request_resume(&port->dev);
144 found = true;
145 } else {
146 /*
147 * Apparently, the root port generated the PME on behalf
148 * of a non-PCIe device downstream. If this is done by
149 * a root port, the Requester ID field in its status
150 * register may contain either the root port's, or the
151 * source device's information (PCI Express Base
152 * Specification, Rev. 2.0, Section 6.1.9).
153 */
154 down_read(&pci_bus_sem);
155 found = pcie_pme_walk_bus(port->subordinate);
156 up_read(&pci_bus_sem);
157 }
158 goto out;
159 }
160
161 /* Second, find the bus the source device is on. */
162 bus = pci_find_bus(pci_domain_nr(port->bus), busnr);
163 if (!bus)
164 goto out;
165
166 /* Next, check if the PME is from a PCIe-PCI bridge. */
167 found = pcie_pme_from_pci_bridge(bus, devfn);
168 if (found)
169 goto out;
170
171 /* Finally, try to find the PME source on the bus. */
172 down_read(&pci_bus_sem);
173 list_for_each_entry(dev, &bus->devices, bus_list) {
174 pci_dev_get(dev);
175 if (dev->devfn == devfn) {
176 found = true;
177 break;
178 }
179 pci_dev_put(dev);
180 }
181 up_read(&pci_bus_sem);
182
183 if (found) {
184 /* The device is there, but we have to check its PME status. */
185 found = pci_check_pme_status(dev);
186 if (found) {
187 if (dev->pme_poll)
188 dev->pme_poll = false;
189
190 pci_wakeup_event(dev);
191 pm_request_resume(&dev->dev);
192 }
193 pci_dev_put(dev);
194 } else if (devfn) {
195 /*
196 * The device is not there, but we can still try to recover by
197 * assuming that the PME was reported by a PCIe-PCI bridge that
198 * used devfn different from zero.
199 */
200 pci_info(port, "interrupt generated for non-existent device %02x:%02x.%d\n",
201 busnr, PCI_SLOT(devfn), PCI_FUNC(devfn));
202 found = pcie_pme_from_pci_bridge(bus, 0);
203 }
204
205 out:
206 if (!found)
207 pci_info(port, "Spurious native interrupt!\n");
208}
209
210/**
211 * pcie_pme_work_fn - Work handler for PCIe PME interrupt.
212 * @work: Work structure giving access to service data.
213 */
214static void pcie_pme_work_fn(struct work_struct *work)
215{
216 struct pcie_pme_service_data *data =
217 container_of(work, struct pcie_pme_service_data, work);
218 struct pci_dev *port = data->srv->port;
219 u32 rtsta;
220
221 spin_lock_irq(&data->lock);
222
223 for (;;) {
224 if (data->noirq)
225 break;
226
227 pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta);
228 if (PCI_POSSIBLE_ERROR(rtsta))
229 break;
230
231 if (rtsta & PCI_EXP_RTSTA_PME) {
232 /*
233 * Clear PME status of the port. If there are other
234 * pending PMEs, the status will be set again.
235 */
236 pcie_clear_root_pme_status(port);
237
238 spin_unlock_irq(&data->lock);
239 pcie_pme_handle_request(port,
240 FIELD_GET(PCI_EXP_RTSTA_PME_RQ_ID, rtsta));
241 spin_lock_irq(&data->lock);
242
243 continue;
244 }
245
246 /* No need to loop if there are no more PMEs pending. */
247 if (!(rtsta & PCI_EXP_RTSTA_PENDING))
248 break;
249
250 spin_unlock_irq(&data->lock);
251 cpu_relax();
252 spin_lock_irq(&data->lock);
253 }
254
255 if (!data->noirq)
256 pcie_pme_interrupt_enable(port, true);
257
258 spin_unlock_irq(&data->lock);
259}
260
261/**
262 * pcie_pme_irq - Interrupt handler for PCIe root port PME interrupt.
263 * @irq: Interrupt vector.
264 * @context: Interrupt context pointer.
265 */
266static irqreturn_t pcie_pme_irq(int irq, void *context)
267{
268 struct pci_dev *port;
269 struct pcie_pme_service_data *data;
270 u32 rtsta;
271 unsigned long flags;
272
273 port = ((struct pcie_device *)context)->port;
274 data = get_service_data((struct pcie_device *)context);
275
276 spin_lock_irqsave(&data->lock, flags);
277 pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta);
278
279 if (PCI_POSSIBLE_ERROR(rtsta) || !(rtsta & PCI_EXP_RTSTA_PME)) {
280 spin_unlock_irqrestore(&data->lock, flags);
281 return IRQ_NONE;
282 }
283
284 pcie_pme_interrupt_enable(port, false);
285 spin_unlock_irqrestore(&data->lock, flags);
286
287 /* We don't use pm_wq, because it's freezable. */
288 schedule_work(&data->work);
289
290 return IRQ_HANDLED;
291}
292
293/**
294 * pcie_pme_can_wakeup - Set the wakeup capability flag.
295 * @dev: PCI device to handle.
296 * @ign: Ignored.
297 */
298static int pcie_pme_can_wakeup(struct pci_dev *dev, void *ign)
299{
300 device_set_wakeup_capable(&dev->dev, true);
301 return 0;
302}
303
304/**
305 * pcie_pme_mark_devices - Set the wakeup flag for devices below a port.
306 * @port: PCIe root port or event collector to handle.
307 *
308 * For each device below given root port, including the port itself (or for each
309 * root complex integrated endpoint if @port is a root complex event collector)
310 * set the flag indicating that it can signal run-time wake-up events.
311 */
312static void pcie_pme_mark_devices(struct pci_dev *port)
313{
314 pcie_pme_can_wakeup(port, NULL);
315
316 if (pci_pcie_type(port) == PCI_EXP_TYPE_RC_EC)
317 pcie_walk_rcec(port, pcie_pme_can_wakeup, NULL);
318 else if (port->subordinate)
319 pci_walk_bus(port->subordinate, pcie_pme_can_wakeup, NULL);
320}
321
322/**
323 * pcie_pme_probe - Initialize PCIe PME service for given root port.
324 * @srv: PCIe service to initialize.
325 */
326static int pcie_pme_probe(struct pcie_device *srv)
327{
328 struct pci_dev *port = srv->port;
329 struct pcie_pme_service_data *data;
330 int type = pci_pcie_type(port);
331 int ret;
332
333 /* Limit to Root Ports or Root Complex Event Collectors */
334 if (type != PCI_EXP_TYPE_RC_EC &&
335 type != PCI_EXP_TYPE_ROOT_PORT)
336 return -ENODEV;
337
338 data = kzalloc(sizeof(*data), GFP_KERNEL);
339 if (!data)
340 return -ENOMEM;
341
342 spin_lock_init(&data->lock);
343 INIT_WORK(&data->work, pcie_pme_work_fn);
344 data->srv = srv;
345 set_service_data(srv, data);
346
347 pcie_pme_interrupt_enable(port, false);
348 pcie_clear_root_pme_status(port);
349
350 ret = request_irq(srv->irq, pcie_pme_irq, IRQF_SHARED, "PCIe PME", srv);
351 if (ret) {
352 kfree(data);
353 return ret;
354 }
355
356 pci_info(port, "Signaling with IRQ %d\n", srv->irq);
357
358 pcie_pme_mark_devices(port);
359 pcie_pme_interrupt_enable(port, true);
360 return 0;
361}
362
363static bool pcie_pme_check_wakeup(struct pci_bus *bus)
364{
365 struct pci_dev *dev;
366
367 if (!bus)
368 return false;
369
370 list_for_each_entry(dev, &bus->devices, bus_list)
371 if (device_may_wakeup(&dev->dev)
372 || pcie_pme_check_wakeup(dev->subordinate))
373 return true;
374
375 return false;
376}
377
378static void pcie_pme_disable_interrupt(struct pci_dev *port,
379 struct pcie_pme_service_data *data)
380{
381 spin_lock_irq(&data->lock);
382 pcie_pme_interrupt_enable(port, false);
383 pcie_clear_root_pme_status(port);
384 data->noirq = true;
385 spin_unlock_irq(&data->lock);
386}
387
388/**
389 * pcie_pme_suspend - Suspend PCIe PME service device.
390 * @srv: PCIe service device to suspend.
391 */
392static int pcie_pme_suspend(struct pcie_device *srv)
393{
394 struct pcie_pme_service_data *data = get_service_data(srv);
395 struct pci_dev *port = srv->port;
396 bool wakeup;
397 int ret;
398
399 if (device_may_wakeup(&port->dev)) {
400 wakeup = true;
401 } else {
402 down_read(&pci_bus_sem);
403 wakeup = pcie_pme_check_wakeup(port->subordinate);
404 up_read(&pci_bus_sem);
405 }
406 if (wakeup) {
407 ret = enable_irq_wake(srv->irq);
408 if (!ret)
409 return 0;
410 }
411
412 pcie_pme_disable_interrupt(port, data);
413
414 synchronize_irq(srv->irq);
415
416 return 0;
417}
418
419/**
420 * pcie_pme_resume - Resume PCIe PME service device.
421 * @srv: PCIe service device to resume.
422 */
423static int pcie_pme_resume(struct pcie_device *srv)
424{
425 struct pcie_pme_service_data *data = get_service_data(srv);
426
427 spin_lock_irq(&data->lock);
428 if (data->noirq) {
429 struct pci_dev *port = srv->port;
430
431 pcie_clear_root_pme_status(port);
432 pcie_pme_interrupt_enable(port, true);
433 data->noirq = false;
434 } else {
435 disable_irq_wake(srv->irq);
436 }
437 spin_unlock_irq(&data->lock);
438
439 return 0;
440}
441
442/**
443 * pcie_pme_remove - Prepare PCIe PME service device for removal.
444 * @srv: PCIe service device to remove.
445 */
446static void pcie_pme_remove(struct pcie_device *srv)
447{
448 struct pcie_pme_service_data *data = get_service_data(srv);
449
450 pcie_pme_disable_interrupt(srv->port, data);
451 free_irq(srv->irq, srv);
452 cancel_work_sync(&data->work);
453 kfree(data);
454}
455
456static struct pcie_port_service_driver pcie_pme_driver = {
457 .name = "pcie_pme",
458 .port_type = PCIE_ANY_PORT,
459 .service = PCIE_PORT_SERVICE_PME,
460
461 .probe = pcie_pme_probe,
462 .suspend = pcie_pme_suspend,
463 .resume = pcie_pme_resume,
464 .remove = pcie_pme_remove,
465};
466
467/**
468 * pcie_pme_init - Register the PCIe PME service driver.
469 */
470int __init pcie_pme_init(void)
471{
472 return pcie_port_service_register(&pcie_pme_driver);
473}