Loading...
1/*
2 * PCI Error Recovery Driver for RPA-compliant PPC64 platform.
3 * Copyright IBM Corp. 2004 2005
4 * Copyright Linas Vepstas <linas@linas.org> 2004, 2005
5 *
6 * All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or (at
11 * your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
16 * NON INFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 *
23 * Send comments and feedback to Linas Vepstas <linas@austin.ibm.com>
24 */
25#include <linux/delay.h>
26#include <linux/interrupt.h>
27#include <linux/irq.h>
28#include <linux/module.h>
29#include <linux/pci.h>
30#include <asm/eeh.h>
31#include <asm/eeh_event.h>
32#include <asm/ppc-pci.h>
33#include <asm/pci-bridge.h>
34#include <asm/prom.h>
35#include <asm/rtas.h>
36
37struct eeh_rmv_data {
38 struct list_head edev_list;
39 int removed;
40};
41
42/**
43 * eeh_pcid_name - Retrieve name of PCI device driver
44 * @pdev: PCI device
45 *
46 * This routine is used to retrieve the name of PCI device driver
47 * if that's valid.
48 */
49static inline const char *eeh_pcid_name(struct pci_dev *pdev)
50{
51 if (pdev && pdev->dev.driver)
52 return pdev->dev.driver->name;
53 return "";
54}
55
56/**
57 * eeh_pcid_get - Get the PCI device driver
58 * @pdev: PCI device
59 *
60 * The function is used to retrieve the PCI device driver for
61 * the indicated PCI device. Besides, we will increase the reference
62 * of the PCI device driver to prevent that being unloaded on
63 * the fly. Otherwise, kernel crash would be seen.
64 */
65static inline struct pci_driver *eeh_pcid_get(struct pci_dev *pdev)
66{
67 if (!pdev || !pdev->driver)
68 return NULL;
69
70 if (!try_module_get(pdev->driver->driver.owner))
71 return NULL;
72
73 return pdev->driver;
74}
75
76/**
77 * eeh_pcid_put - Dereference on the PCI device driver
78 * @pdev: PCI device
79 *
80 * The function is called to do dereference on the PCI device
81 * driver of the indicated PCI device.
82 */
83static inline void eeh_pcid_put(struct pci_dev *pdev)
84{
85 if (!pdev || !pdev->driver)
86 return;
87
88 module_put(pdev->driver->driver.owner);
89}
90
91/**
92 * eeh_disable_irq - Disable interrupt for the recovering device
93 * @dev: PCI device
94 *
95 * This routine must be called when reporting temporary or permanent
96 * error to the particular PCI device to disable interrupt of that
97 * device. If the device has enabled MSI or MSI-X interrupt, we needn't
98 * do real work because EEH should freeze DMA transfers for those PCI
99 * devices encountering EEH errors, which includes MSI or MSI-X.
100 */
101static void eeh_disable_irq(struct pci_dev *dev)
102{
103 struct eeh_dev *edev = pci_dev_to_eeh_dev(dev);
104
105 /* Don't disable MSI and MSI-X interrupts. They are
106 * effectively disabled by the DMA Stopped state
107 * when an EEH error occurs.
108 */
109 if (dev->msi_enabled || dev->msix_enabled)
110 return;
111
112 if (!irq_has_action(dev->irq))
113 return;
114
115 edev->mode |= EEH_DEV_IRQ_DISABLED;
116 disable_irq_nosync(dev->irq);
117}
118
119/**
120 * eeh_enable_irq - Enable interrupt for the recovering device
121 * @dev: PCI device
122 *
123 * This routine must be called to enable interrupt while failed
124 * device could be resumed.
125 */
126static void eeh_enable_irq(struct pci_dev *dev)
127{
128 struct eeh_dev *edev = pci_dev_to_eeh_dev(dev);
129
130 if ((edev->mode) & EEH_DEV_IRQ_DISABLED) {
131 edev->mode &= ~EEH_DEV_IRQ_DISABLED;
132 /*
133 * FIXME !!!!!
134 *
135 * This is just ass backwards. This maze has
136 * unbalanced irq_enable/disable calls. So instead of
137 * finding the root cause it works around the warning
138 * in the irq_enable code by conditionally calling
139 * into it.
140 *
141 * That's just wrong.The warning in the core code is
142 * there to tell people to fix their asymmetries in
143 * their own code, not by abusing the core information
144 * to avoid it.
145 *
146 * I so wish that the assymetry would be the other way
147 * round and a few more irq_disable calls render that
148 * shit unusable forever.
149 *
150 * tglx
151 */
152 if (irqd_irq_disabled(irq_get_irq_data(dev->irq)))
153 enable_irq(dev->irq);
154 }
155}
156
157static bool eeh_dev_removed(struct eeh_dev *edev)
158{
159 /* EEH device removed ? */
160 if (!edev || (edev->mode & EEH_DEV_REMOVED))
161 return true;
162
163 return false;
164}
165
166static void *eeh_dev_save_state(void *data, void *userdata)
167{
168 struct eeh_dev *edev = data;
169 struct pci_dev *pdev;
170
171 if (!edev)
172 return NULL;
173
174 /*
175 * We cannot access the config space on some adapters.
176 * Otherwise, it will cause fenced PHB. We don't save
177 * the content in their config space and will restore
178 * from the initial config space saved when the EEH
179 * device is created.
180 */
181 if (edev->pe && (edev->pe->state & EEH_PE_CFG_RESTRICTED))
182 return NULL;
183
184 pdev = eeh_dev_to_pci_dev(edev);
185 if (!pdev)
186 return NULL;
187
188 pci_save_state(pdev);
189 return NULL;
190}
191
192/**
193 * eeh_report_error - Report pci error to each device driver
194 * @data: eeh device
195 * @userdata: return value
196 *
197 * Report an EEH error to each device driver, collect up and
198 * merge the device driver responses. Cumulative response
199 * passed back in "userdata".
200 */
201static void *eeh_report_error(void *data, void *userdata)
202{
203 struct eeh_dev *edev = (struct eeh_dev *)data;
204 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
205 enum pci_ers_result rc, *res = userdata;
206 struct pci_driver *driver;
207
208 if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
209 return NULL;
210
211 device_lock(&dev->dev);
212 dev->error_state = pci_channel_io_frozen;
213
214 driver = eeh_pcid_get(dev);
215 if (!driver) goto out_no_dev;
216
217 eeh_disable_irq(dev);
218
219 if (!driver->err_handler ||
220 !driver->err_handler->error_detected)
221 goto out;
222
223 rc = driver->err_handler->error_detected(dev, pci_channel_io_frozen);
224
225 /* A driver that needs a reset trumps all others */
226 if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
227 if (*res == PCI_ERS_RESULT_NONE) *res = rc;
228
229 edev->in_error = true;
230 pci_uevent_ers(dev, PCI_ERS_RESULT_NONE);
231
232out:
233 eeh_pcid_put(dev);
234out_no_dev:
235 device_unlock(&dev->dev);
236 return NULL;
237}
238
239/**
240 * eeh_report_mmio_enabled - Tell drivers that MMIO has been enabled
241 * @data: eeh device
242 * @userdata: return value
243 *
244 * Tells each device driver that IO ports, MMIO and config space I/O
245 * are now enabled. Collects up and merges the device driver responses.
246 * Cumulative response passed back in "userdata".
247 */
248static void *eeh_report_mmio_enabled(void *data, void *userdata)
249{
250 struct eeh_dev *edev = (struct eeh_dev *)data;
251 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
252 enum pci_ers_result rc, *res = userdata;
253 struct pci_driver *driver;
254
255 if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
256 return NULL;
257
258 device_lock(&dev->dev);
259 driver = eeh_pcid_get(dev);
260 if (!driver) goto out_no_dev;
261
262 if (!driver->err_handler ||
263 !driver->err_handler->mmio_enabled ||
264 (edev->mode & EEH_DEV_NO_HANDLER))
265 goto out;
266
267 rc = driver->err_handler->mmio_enabled(dev);
268
269 /* A driver that needs a reset trumps all others */
270 if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
271 if (*res == PCI_ERS_RESULT_NONE) *res = rc;
272
273out:
274 eeh_pcid_put(dev);
275out_no_dev:
276 device_unlock(&dev->dev);
277 return NULL;
278}
279
280/**
281 * eeh_report_reset - Tell device that slot has been reset
282 * @data: eeh device
283 * @userdata: return value
284 *
285 * This routine must be called while EEH tries to reset particular
286 * PCI device so that the associated PCI device driver could take
287 * some actions, usually to save data the driver needs so that the
288 * driver can work again while the device is recovered.
289 */
290static void *eeh_report_reset(void *data, void *userdata)
291{
292 struct eeh_dev *edev = (struct eeh_dev *)data;
293 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
294 enum pci_ers_result rc, *res = userdata;
295 struct pci_driver *driver;
296
297 if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
298 return NULL;
299
300 device_lock(&dev->dev);
301 dev->error_state = pci_channel_io_normal;
302
303 driver = eeh_pcid_get(dev);
304 if (!driver) goto out_no_dev;
305
306 eeh_enable_irq(dev);
307
308 if (!driver->err_handler ||
309 !driver->err_handler->slot_reset ||
310 (edev->mode & EEH_DEV_NO_HANDLER) ||
311 (!edev->in_error))
312 goto out;
313
314 rc = driver->err_handler->slot_reset(dev);
315 if ((*res == PCI_ERS_RESULT_NONE) ||
316 (*res == PCI_ERS_RESULT_RECOVERED)) *res = rc;
317 if (*res == PCI_ERS_RESULT_DISCONNECT &&
318 rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
319
320out:
321 eeh_pcid_put(dev);
322out_no_dev:
323 device_unlock(&dev->dev);
324 return NULL;
325}
326
327static void *eeh_dev_restore_state(void *data, void *userdata)
328{
329 struct eeh_dev *edev = data;
330 struct pci_dev *pdev;
331
332 if (!edev)
333 return NULL;
334
335 /*
336 * The content in the config space isn't saved because
337 * the blocked config space on some adapters. We have
338 * to restore the initial saved config space when the
339 * EEH device is created.
340 */
341 if (edev->pe && (edev->pe->state & EEH_PE_CFG_RESTRICTED)) {
342 if (list_is_last(&edev->list, &edev->pe->edevs))
343 eeh_pe_restore_bars(edev->pe);
344
345 return NULL;
346 }
347
348 pdev = eeh_dev_to_pci_dev(edev);
349 if (!pdev)
350 return NULL;
351
352 pci_restore_state(pdev);
353 return NULL;
354}
355
356/**
357 * eeh_report_resume - Tell device to resume normal operations
358 * @data: eeh device
359 * @userdata: return value
360 *
361 * This routine must be called to notify the device driver that it
362 * could resume so that the device driver can do some initialization
363 * to make the recovered device work again.
364 */
365static void *eeh_report_resume(void *data, void *userdata)
366{
367 struct eeh_dev *edev = (struct eeh_dev *)data;
368 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
369 bool was_in_error;
370 struct pci_driver *driver;
371
372 if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
373 return NULL;
374
375 device_lock(&dev->dev);
376 dev->error_state = pci_channel_io_normal;
377
378 driver = eeh_pcid_get(dev);
379 if (!driver) goto out_no_dev;
380
381 was_in_error = edev->in_error;
382 edev->in_error = false;
383 eeh_enable_irq(dev);
384
385 if (!driver->err_handler ||
386 !driver->err_handler->resume ||
387 (edev->mode & EEH_DEV_NO_HANDLER) || !was_in_error) {
388 edev->mode &= ~EEH_DEV_NO_HANDLER;
389 goto out;
390 }
391
392 driver->err_handler->resume(dev);
393
394 pci_uevent_ers(dev, PCI_ERS_RESULT_RECOVERED);
395out:
396 eeh_pcid_put(dev);
397#ifdef CONFIG_PCI_IOV
398 if (eeh_ops->notify_resume && eeh_dev_to_pdn(edev))
399 eeh_ops->notify_resume(eeh_dev_to_pdn(edev));
400#endif
401out_no_dev:
402 device_unlock(&dev->dev);
403 return NULL;
404}
405
406/**
407 * eeh_report_failure - Tell device driver that device is dead.
408 * @data: eeh device
409 * @userdata: return value
410 *
411 * This informs the device driver that the device is permanently
412 * dead, and that no further recovery attempts will be made on it.
413 */
414static void *eeh_report_failure(void *data, void *userdata)
415{
416 struct eeh_dev *edev = (struct eeh_dev *)data;
417 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
418 struct pci_driver *driver;
419
420 if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
421 return NULL;
422
423 device_lock(&dev->dev);
424 dev->error_state = pci_channel_io_perm_failure;
425
426 driver = eeh_pcid_get(dev);
427 if (!driver) goto out_no_dev;
428
429 eeh_disable_irq(dev);
430
431 if (!driver->err_handler ||
432 !driver->err_handler->error_detected)
433 goto out;
434
435 driver->err_handler->error_detected(dev, pci_channel_io_perm_failure);
436
437 pci_uevent_ers(dev, PCI_ERS_RESULT_DISCONNECT);
438out:
439 eeh_pcid_put(dev);
440out_no_dev:
441 device_unlock(&dev->dev);
442 return NULL;
443}
444
445static void *eeh_add_virt_device(void *data, void *userdata)
446{
447 struct pci_driver *driver;
448 struct eeh_dev *edev = (struct eeh_dev *)data;
449 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
450 struct pci_dn *pdn = eeh_dev_to_pdn(edev);
451
452 if (!(edev->physfn)) {
453 pr_warn("%s: EEH dev %04x:%02x:%02x.%01x not for VF\n",
454 __func__, pdn->phb->global_number, pdn->busno,
455 PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn));
456 return NULL;
457 }
458
459 driver = eeh_pcid_get(dev);
460 if (driver) {
461 eeh_pcid_put(dev);
462 if (driver->err_handler)
463 return NULL;
464 }
465
466#ifdef CONFIG_PCI_IOV
467 pci_iov_add_virtfn(edev->physfn, pdn->vf_index);
468#endif
469 return NULL;
470}
471
472static void *eeh_rmv_device(void *data, void *userdata)
473{
474 struct pci_driver *driver;
475 struct eeh_dev *edev = (struct eeh_dev *)data;
476 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
477 struct eeh_rmv_data *rmv_data = (struct eeh_rmv_data *)userdata;
478 int *removed = rmv_data ? &rmv_data->removed : NULL;
479
480 /*
481 * Actually, we should remove the PCI bridges as well.
482 * However, that's lots of complexity to do that,
483 * particularly some of devices under the bridge might
484 * support EEH. So we just care about PCI devices for
485 * simplicity here.
486 */
487 if (!dev || (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE))
488 return NULL;
489
490 /*
491 * We rely on count-based pcibios_release_device() to
492 * detach permanently offlined PEs. Unfortunately, that's
493 * not reliable enough. We might have the permanently
494 * offlined PEs attached, but we needn't take care of
495 * them and their child devices.
496 */
497 if (eeh_dev_removed(edev))
498 return NULL;
499
500 driver = eeh_pcid_get(dev);
501 if (driver) {
502 eeh_pcid_put(dev);
503 if (removed &&
504 eeh_pe_passed(edev->pe))
505 return NULL;
506 if (removed &&
507 driver->err_handler &&
508 driver->err_handler->error_detected &&
509 driver->err_handler->slot_reset)
510 return NULL;
511 }
512
513 /* Remove it from PCI subsystem */
514 pr_debug("EEH: Removing %s without EEH sensitive driver\n",
515 pci_name(dev));
516 edev->bus = dev->bus;
517 edev->mode |= EEH_DEV_DISCONNECTED;
518 if (removed)
519 (*removed)++;
520
521 if (edev->physfn) {
522#ifdef CONFIG_PCI_IOV
523 struct pci_dn *pdn = eeh_dev_to_pdn(edev);
524
525 pci_iov_remove_virtfn(edev->physfn, pdn->vf_index);
526 edev->pdev = NULL;
527
528 /*
529 * We have to set the VF PE number to invalid one, which is
530 * required to plug the VF successfully.
531 */
532 pdn->pe_number = IODA_INVALID_PE;
533#endif
534 if (rmv_data)
535 list_add(&edev->rmv_list, &rmv_data->edev_list);
536 } else {
537 pci_lock_rescan_remove();
538 pci_stop_and_remove_bus_device(dev);
539 pci_unlock_rescan_remove();
540 }
541
542 return NULL;
543}
544
545static void *eeh_pe_detach_dev(void *data, void *userdata)
546{
547 struct eeh_pe *pe = (struct eeh_pe *)data;
548 struct eeh_dev *edev, *tmp;
549
550 eeh_pe_for_each_dev(pe, edev, tmp) {
551 if (!(edev->mode & EEH_DEV_DISCONNECTED))
552 continue;
553
554 edev->mode &= ~(EEH_DEV_DISCONNECTED | EEH_DEV_IRQ_DISABLED);
555 eeh_rmv_from_parent_pe(edev);
556 }
557
558 return NULL;
559}
560
561/*
562 * Explicitly clear PE's frozen state for PowerNV where
563 * we have frozen PE until BAR restore is completed. It's
564 * harmless to clear it for pSeries. To be consistent with
565 * PE reset (for 3 times), we try to clear the frozen state
566 * for 3 times as well.
567 */
568static void *__eeh_clear_pe_frozen_state(void *data, void *flag)
569{
570 struct eeh_pe *pe = (struct eeh_pe *)data;
571 bool clear_sw_state = *(bool *)flag;
572 int i, rc = 1;
573
574 for (i = 0; rc && i < 3; i++)
575 rc = eeh_unfreeze_pe(pe, clear_sw_state);
576
577 /* Stop immediately on any errors */
578 if (rc) {
579 pr_warn("%s: Failure %d unfreezing PHB#%x-PE#%x\n",
580 __func__, rc, pe->phb->global_number, pe->addr);
581 return (void *)pe;
582 }
583
584 return NULL;
585}
586
587static int eeh_clear_pe_frozen_state(struct eeh_pe *pe,
588 bool clear_sw_state)
589{
590 void *rc;
591
592 rc = eeh_pe_traverse(pe, __eeh_clear_pe_frozen_state, &clear_sw_state);
593 if (!rc)
594 eeh_pe_state_clear(pe, EEH_PE_ISOLATED);
595
596 return rc ? -EIO : 0;
597}
598
599int eeh_pe_reset_and_recover(struct eeh_pe *pe)
600{
601 int ret;
602
603 /* Bail if the PE is being recovered */
604 if (pe->state & EEH_PE_RECOVERING)
605 return 0;
606
607 /* Put the PE into recovery mode */
608 eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
609
610 /* Save states */
611 eeh_pe_dev_traverse(pe, eeh_dev_save_state, NULL);
612
613 /* Issue reset */
614 ret = eeh_pe_reset_full(pe);
615 if (ret) {
616 eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
617 return ret;
618 }
619
620 /* Unfreeze the PE */
621 ret = eeh_clear_pe_frozen_state(pe, true);
622 if (ret) {
623 eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
624 return ret;
625 }
626
627 /* Restore device state */
628 eeh_pe_dev_traverse(pe, eeh_dev_restore_state, NULL);
629
630 /* Clear recovery mode */
631 eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
632
633 return 0;
634}
635
636/**
637 * eeh_reset_device - Perform actual reset of a pci slot
638 * @driver_eeh_aware: Does the device's driver provide EEH support?
639 * @pe: EEH PE
640 * @bus: PCI bus corresponding to the isolcated slot
641 * @rmv_data: Optional, list to record removed devices
642 *
643 * This routine must be called to do reset on the indicated PE.
644 * During the reset, udev might be invoked because those affected
645 * PCI devices will be removed and then added.
646 */
647static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
648 struct eeh_rmv_data *rmv_data,
649 bool driver_eeh_aware)
650{
651 time64_t tstamp;
652 int cnt, rc;
653 struct eeh_dev *edev;
654
655 /* pcibios will clear the counter; save the value */
656 cnt = pe->freeze_count;
657 tstamp = pe->tstamp;
658
659 /*
660 * We don't remove the corresponding PE instances because
661 * we need the information afterwords. The attached EEH
662 * devices are expected to be attached soon when calling
663 * into pci_hp_add_devices().
664 */
665 eeh_pe_state_mark(pe, EEH_PE_KEEP);
666 if (driver_eeh_aware || (pe->type & EEH_PE_VF)) {
667 eeh_pe_dev_traverse(pe, eeh_rmv_device, rmv_data);
668 } else {
669 pci_lock_rescan_remove();
670 pci_hp_remove_devices(bus);
671 pci_unlock_rescan_remove();
672 }
673
674 /*
675 * Reset the pci controller. (Asserts RST#; resets config space).
676 * Reconfigure bridges and devices. Don't try to bring the system
677 * up if the reset failed for some reason.
678 *
679 * During the reset, it's very dangerous to have uncontrolled PCI
680 * config accesses. So we prefer to block them. However, controlled
681 * PCI config accesses initiated from EEH itself are allowed.
682 */
683 rc = eeh_pe_reset_full(pe);
684 if (rc)
685 return rc;
686
687 pci_lock_rescan_remove();
688
689 /* Restore PE */
690 eeh_ops->configure_bridge(pe);
691 eeh_pe_restore_bars(pe);
692
693 /* Clear frozen state */
694 rc = eeh_clear_pe_frozen_state(pe, false);
695 if (rc) {
696 pci_unlock_rescan_remove();
697 return rc;
698 }
699
700 /* Give the system 5 seconds to finish running the user-space
701 * hotplug shutdown scripts, e.g. ifdown for ethernet. Yes,
702 * this is a hack, but if we don't do this, and try to bring
703 * the device up before the scripts have taken it down,
704 * potentially weird things happen.
705 */
706 if (!driver_eeh_aware || rmv_data->removed) {
707 pr_info("EEH: Sleep 5s ahead of %s hotplug\n",
708 (driver_eeh_aware ? "partial" : "complete"));
709 ssleep(5);
710
711 /*
712 * The EEH device is still connected with its parent
713 * PE. We should disconnect it so the binding can be
714 * rebuilt when adding PCI devices.
715 */
716 edev = list_first_entry(&pe->edevs, struct eeh_dev, list);
717 eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL);
718 if (pe->type & EEH_PE_VF) {
719 eeh_add_virt_device(edev, NULL);
720 } else {
721 if (!driver_eeh_aware)
722 eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
723 pci_hp_add_devices(bus);
724 }
725 }
726 eeh_pe_state_clear(pe, EEH_PE_KEEP);
727
728 pe->tstamp = tstamp;
729 pe->freeze_count = cnt;
730
731 pci_unlock_rescan_remove();
732 return 0;
733}
734
735/* The longest amount of time to wait for a pci device
736 * to come back on line, in seconds.
737 */
738#define MAX_WAIT_FOR_RECOVERY 300
739
740/**
741 * eeh_handle_normal_event - Handle EEH events on a specific PE
742 * @pe: EEH PE - which should not be used after we return, as it may
743 * have been invalidated.
744 *
745 * Attempts to recover the given PE. If recovery fails or the PE has failed
746 * too many times, remove the PE.
747 *
748 * While PHB detects address or data parity errors on particular PCI
749 * slot, the associated PE will be frozen. Besides, DMA's occurring
750 * to wild addresses (which usually happen due to bugs in device
751 * drivers or in PCI adapter firmware) can cause EEH error. #SERR,
752 * #PERR or other misc PCI-related errors also can trigger EEH errors.
753 *
754 * Recovery process consists of unplugging the device driver (which
755 * generated hotplug events to userspace), then issuing a PCI #RST to
756 * the device, then reconfiguring the PCI config space for all bridges
757 * & devices under this slot, and then finally restarting the device
758 * drivers (which cause a second set of hotplug events to go out to
759 * userspace).
760 */
761void eeh_handle_normal_event(struct eeh_pe *pe)
762{
763 struct pci_bus *bus;
764 struct eeh_dev *edev, *tmp;
765 int rc = 0;
766 enum pci_ers_result result = PCI_ERS_RESULT_NONE;
767 struct eeh_rmv_data rmv_data = {LIST_HEAD_INIT(rmv_data.edev_list), 0};
768
769 bus = eeh_pe_bus_get(pe);
770 if (!bus) {
771 pr_err("%s: Cannot find PCI bus for PHB#%x-PE#%x\n",
772 __func__, pe->phb->global_number, pe->addr);
773 return;
774 }
775
776 eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
777
778 eeh_pe_update_time_stamp(pe);
779 pe->freeze_count++;
780 if (pe->freeze_count > eeh_max_freezes) {
781 pr_err("EEH: PHB#%x-PE#%x has failed %d times in the\n"
782 "last hour and has been permanently disabled.\n",
783 pe->phb->global_number, pe->addr,
784 pe->freeze_count);
785 goto hard_fail;
786 }
787 pr_warn("EEH: This PCI device has failed %d times in the last hour\n",
788 pe->freeze_count);
789
790 /* Walk the various device drivers attached to this slot through
791 * a reset sequence, giving each an opportunity to do what it needs
792 * to accomplish the reset. Each child gets a report of the
793 * status ... if any child can't handle the reset, then the entire
794 * slot is dlpar removed and added.
795 *
796 * When the PHB is fenced, we have to issue a reset to recover from
797 * the error. Override the result if necessary to have partially
798 * hotplug for this case.
799 */
800 pr_info("EEH: Notify device drivers to shutdown\n");
801 eeh_pe_dev_traverse(pe, eeh_report_error, &result);
802 if ((pe->type & EEH_PE_PHB) &&
803 result != PCI_ERS_RESULT_NONE &&
804 result != PCI_ERS_RESULT_NEED_RESET)
805 result = PCI_ERS_RESULT_NEED_RESET;
806
807 /* Get the current PCI slot state. This can take a long time,
808 * sometimes over 300 seconds for certain systems.
809 */
810 rc = eeh_ops->wait_state(pe, MAX_WAIT_FOR_RECOVERY*1000);
811 if (rc < 0 || rc == EEH_STATE_NOT_SUPPORT) {
812 pr_warn("EEH: Permanent failure\n");
813 goto hard_fail;
814 }
815
816 /* Since rtas may enable MMIO when posting the error log,
817 * don't post the error log until after all dev drivers
818 * have been informed.
819 */
820 pr_info("EEH: Collect temporary log\n");
821 eeh_slot_error_detail(pe, EEH_LOG_TEMP);
822
823 /* If all device drivers were EEH-unaware, then shut
824 * down all of the device drivers, and hope they
825 * go down willingly, without panicing the system.
826 */
827 if (result == PCI_ERS_RESULT_NONE) {
828 pr_info("EEH: Reset with hotplug activity\n");
829 rc = eeh_reset_device(pe, bus, NULL, false);
830 if (rc) {
831 pr_warn("%s: Unable to reset, err=%d\n",
832 __func__, rc);
833 goto hard_fail;
834 }
835 }
836
837 /* If all devices reported they can proceed, then re-enable MMIO */
838 if (result == PCI_ERS_RESULT_CAN_RECOVER) {
839 pr_info("EEH: Enable I/O for affected devices\n");
840 rc = eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
841
842 if (rc < 0)
843 goto hard_fail;
844 if (rc) {
845 result = PCI_ERS_RESULT_NEED_RESET;
846 } else {
847 pr_info("EEH: Notify device drivers to resume I/O\n");
848 eeh_pe_dev_traverse(pe, eeh_report_mmio_enabled, &result);
849 }
850 }
851
852 /* If all devices reported they can proceed, then re-enable DMA */
853 if (result == PCI_ERS_RESULT_CAN_RECOVER) {
854 pr_info("EEH: Enabled DMA for affected devices\n");
855 rc = eeh_pci_enable(pe, EEH_OPT_THAW_DMA);
856
857 if (rc < 0)
858 goto hard_fail;
859 if (rc) {
860 result = PCI_ERS_RESULT_NEED_RESET;
861 } else {
862 /*
863 * We didn't do PE reset for the case. The PE
864 * is still in frozen state. Clear it before
865 * resuming the PE.
866 */
867 eeh_pe_state_clear(pe, EEH_PE_ISOLATED);
868 result = PCI_ERS_RESULT_RECOVERED;
869 }
870 }
871
872 /* If any device has a hard failure, then shut off everything. */
873 if (result == PCI_ERS_RESULT_DISCONNECT) {
874 pr_warn("EEH: Device driver gave up\n");
875 goto hard_fail;
876 }
877
878 /* If any device called out for a reset, then reset the slot */
879 if (result == PCI_ERS_RESULT_NEED_RESET) {
880 pr_info("EEH: Reset without hotplug activity\n");
881 rc = eeh_reset_device(pe, bus, &rmv_data, true);
882 if (rc) {
883 pr_warn("%s: Cannot reset, err=%d\n",
884 __func__, rc);
885 goto hard_fail;
886 }
887
888 pr_info("EEH: Notify device drivers "
889 "the completion of reset\n");
890 result = PCI_ERS_RESULT_NONE;
891 eeh_pe_dev_traverse(pe, eeh_report_reset, &result);
892 }
893
894 /* All devices should claim they have recovered by now. */
895 if ((result != PCI_ERS_RESULT_RECOVERED) &&
896 (result != PCI_ERS_RESULT_NONE)) {
897 pr_warn("EEH: Not recovered\n");
898 goto hard_fail;
899 }
900
901 /*
902 * For those hot removed VFs, we should add back them after PF get
903 * recovered properly.
904 */
905 list_for_each_entry_safe(edev, tmp, &rmv_data.edev_list, rmv_list) {
906 eeh_add_virt_device(edev, NULL);
907 list_del(&edev->rmv_list);
908 }
909
910 /* Tell all device drivers that they can resume operations */
911 pr_info("EEH: Notify device driver to resume\n");
912 eeh_pe_dev_traverse(pe, eeh_report_resume, NULL);
913
914 goto final;
915
916hard_fail:
917 /*
918 * About 90% of all real-life EEH failures in the field
919 * are due to poorly seated PCI cards. Only 10% or so are
920 * due to actual, failed cards.
921 */
922 pr_err("EEH: Unable to recover from failure from PHB#%x-PE#%x.\n"
923 "Please try reseating or replacing it\n",
924 pe->phb->global_number, pe->addr);
925
926 eeh_slot_error_detail(pe, EEH_LOG_PERM);
927
928 /* Notify all devices that they're about to go down. */
929 eeh_pe_dev_traverse(pe, eeh_report_failure, NULL);
930
931 /* Mark the PE to be removed permanently */
932 eeh_pe_state_mark(pe, EEH_PE_REMOVED);
933
934 /*
935 * Shut down the device drivers for good. We mark
936 * all removed devices correctly to avoid access
937 * the their PCI config any more.
938 */
939 if (pe->type & EEH_PE_VF) {
940 eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL);
941 eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
942 } else {
943 eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
944 eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
945
946 pci_lock_rescan_remove();
947 pci_hp_remove_devices(bus);
948 pci_unlock_rescan_remove();
949 /* The passed PE should no longer be used */
950 return;
951 }
952final:
953 eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
954}
955
956/**
957 * eeh_handle_special_event - Handle EEH events without a specific failing PE
958 *
959 * Called when an EEH event is detected but can't be narrowed down to a
960 * specific PE. Iterates through possible failures and handles them as
961 * necessary.
962 */
963void eeh_handle_special_event(void)
964{
965 struct eeh_pe *pe, *phb_pe;
966 struct pci_bus *bus;
967 struct pci_controller *hose;
968 unsigned long flags;
969 int rc;
970
971
972 do {
973 rc = eeh_ops->next_error(&pe);
974
975 switch (rc) {
976 case EEH_NEXT_ERR_DEAD_IOC:
977 /* Mark all PHBs in dead state */
978 eeh_serialize_lock(&flags);
979
980 /* Purge all events */
981 eeh_remove_event(NULL, true);
982
983 list_for_each_entry(hose, &hose_list, list_node) {
984 phb_pe = eeh_phb_pe_get(hose);
985 if (!phb_pe) continue;
986
987 eeh_pe_state_mark(phb_pe, EEH_PE_ISOLATED);
988 }
989
990 eeh_serialize_unlock(flags);
991
992 break;
993 case EEH_NEXT_ERR_FROZEN_PE:
994 case EEH_NEXT_ERR_FENCED_PHB:
995 case EEH_NEXT_ERR_DEAD_PHB:
996 /* Mark the PE in fenced state */
997 eeh_serialize_lock(&flags);
998
999 /* Purge all events of the PHB */
1000 eeh_remove_event(pe, true);
1001
1002 if (rc == EEH_NEXT_ERR_DEAD_PHB)
1003 eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
1004 else
1005 eeh_pe_state_mark(pe,
1006 EEH_PE_ISOLATED | EEH_PE_RECOVERING);
1007
1008 eeh_serialize_unlock(flags);
1009
1010 break;
1011 case EEH_NEXT_ERR_NONE:
1012 return;
1013 default:
1014 pr_warn("%s: Invalid value %d from next_error()\n",
1015 __func__, rc);
1016 return;
1017 }
1018
1019 /*
1020 * For fenced PHB and frozen PE, it's handled as normal
1021 * event. We have to remove the affected PHBs for dead
1022 * PHB and IOC
1023 */
1024 if (rc == EEH_NEXT_ERR_FROZEN_PE ||
1025 rc == EEH_NEXT_ERR_FENCED_PHB) {
1026 eeh_handle_normal_event(pe);
1027 } else {
1028 pci_lock_rescan_remove();
1029 list_for_each_entry(hose, &hose_list, list_node) {
1030 phb_pe = eeh_phb_pe_get(hose);
1031 if (!phb_pe ||
1032 !(phb_pe->state & EEH_PE_ISOLATED) ||
1033 (phb_pe->state & EEH_PE_RECOVERING))
1034 continue;
1035
1036 /* Notify all devices to be down */
1037 eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
1038 eeh_pe_dev_traverse(pe,
1039 eeh_report_failure, NULL);
1040 bus = eeh_pe_bus_get(phb_pe);
1041 if (!bus) {
1042 pr_err("%s: Cannot find PCI bus for "
1043 "PHB#%x-PE#%x\n",
1044 __func__,
1045 pe->phb->global_number,
1046 pe->addr);
1047 break;
1048 }
1049 pci_hp_remove_devices(bus);
1050 }
1051 pci_unlock_rescan_remove();
1052 }
1053
1054 /*
1055 * If we have detected dead IOC, we needn't proceed
1056 * any more since all PHBs would have been removed
1057 */
1058 if (rc == EEH_NEXT_ERR_DEAD_IOC)
1059 break;
1060 } while (rc != EEH_NEXT_ERR_NONE);
1061}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * PCI Error Recovery Driver for RPA-compliant PPC64 platform.
4 * Copyright IBM Corp. 2004 2005
5 * Copyright Linas Vepstas <linas@linas.org> 2004, 2005
6 *
7 * Send comments and feedback to Linas Vepstas <linas@austin.ibm.com>
8 */
9#include <linux/delay.h>
10#include <linux/interrupt.h>
11#include <linux/irq.h>
12#include <linux/module.h>
13#include <linux/pci.h>
14#include <linux/pci_hotplug.h>
15#include <asm/eeh.h>
16#include <asm/eeh_event.h>
17#include <asm/ppc-pci.h>
18#include <asm/pci-bridge.h>
19#include <asm/rtas.h>
20
21struct eeh_rmv_data {
22 struct list_head removed_vf_list;
23 int removed_dev_count;
24};
25
26static int eeh_result_priority(enum pci_ers_result result)
27{
28 switch (result) {
29 case PCI_ERS_RESULT_NONE:
30 return 1;
31 case PCI_ERS_RESULT_NO_AER_DRIVER:
32 return 2;
33 case PCI_ERS_RESULT_RECOVERED:
34 return 3;
35 case PCI_ERS_RESULT_CAN_RECOVER:
36 return 4;
37 case PCI_ERS_RESULT_DISCONNECT:
38 return 5;
39 case PCI_ERS_RESULT_NEED_RESET:
40 return 6;
41 default:
42 WARN_ONCE(1, "Unknown pci_ers_result value: %d\n", result);
43 return 0;
44 }
45};
46
47static const char *pci_ers_result_name(enum pci_ers_result result)
48{
49 switch (result) {
50 case PCI_ERS_RESULT_NONE:
51 return "none";
52 case PCI_ERS_RESULT_CAN_RECOVER:
53 return "can recover";
54 case PCI_ERS_RESULT_NEED_RESET:
55 return "need reset";
56 case PCI_ERS_RESULT_DISCONNECT:
57 return "disconnect";
58 case PCI_ERS_RESULT_RECOVERED:
59 return "recovered";
60 case PCI_ERS_RESULT_NO_AER_DRIVER:
61 return "no AER driver";
62 default:
63 WARN_ONCE(1, "Unknown result type: %d\n", result);
64 return "unknown";
65 }
66};
67
68static enum pci_ers_result pci_ers_merge_result(enum pci_ers_result old,
69 enum pci_ers_result new)
70{
71 if (eeh_result_priority(new) > eeh_result_priority(old))
72 return new;
73 return old;
74}
75
76static bool eeh_dev_removed(struct eeh_dev *edev)
77{
78 return !edev || (edev->mode & EEH_DEV_REMOVED);
79}
80
81static bool eeh_edev_actionable(struct eeh_dev *edev)
82{
83 if (!edev->pdev)
84 return false;
85 if (edev->pdev->error_state == pci_channel_io_perm_failure)
86 return false;
87 if (eeh_dev_removed(edev))
88 return false;
89 if (eeh_pe_passed(edev->pe))
90 return false;
91
92 return true;
93}
94
95/**
96 * eeh_pcid_get - Get the PCI device driver
97 * @pdev: PCI device
98 *
99 * The function is used to retrieve the PCI device driver for
100 * the indicated PCI device. Besides, we will increase the reference
101 * of the PCI device driver to prevent that being unloaded on
102 * the fly. Otherwise, kernel crash would be seen.
103 */
104static inline struct pci_driver *eeh_pcid_get(struct pci_dev *pdev)
105{
106 if (!pdev || !pdev->dev.driver)
107 return NULL;
108
109 if (!try_module_get(pdev->dev.driver->owner))
110 return NULL;
111
112 return to_pci_driver(pdev->dev.driver);
113}
114
115/**
116 * eeh_pcid_put - Dereference on the PCI device driver
117 * @pdev: PCI device
118 *
119 * The function is called to do dereference on the PCI device
120 * driver of the indicated PCI device.
121 */
122static inline void eeh_pcid_put(struct pci_dev *pdev)
123{
124 if (!pdev || !pdev->dev.driver)
125 return;
126
127 module_put(pdev->dev.driver->owner);
128}
129
130/**
131 * eeh_disable_irq - Disable interrupt for the recovering device
132 * @dev: PCI device
133 *
134 * This routine must be called when reporting temporary or permanent
135 * error to the particular PCI device to disable interrupt of that
136 * device. If the device has enabled MSI or MSI-X interrupt, we needn't
137 * do real work because EEH should freeze DMA transfers for those PCI
138 * devices encountering EEH errors, which includes MSI or MSI-X.
139 */
140static void eeh_disable_irq(struct eeh_dev *edev)
141{
142 /* Don't disable MSI and MSI-X interrupts. They are
143 * effectively disabled by the DMA Stopped state
144 * when an EEH error occurs.
145 */
146 if (edev->pdev->msi_enabled || edev->pdev->msix_enabled)
147 return;
148
149 if (!irq_has_action(edev->pdev->irq))
150 return;
151
152 edev->mode |= EEH_DEV_IRQ_DISABLED;
153 disable_irq_nosync(edev->pdev->irq);
154}
155
156/**
157 * eeh_enable_irq - Enable interrupt for the recovering device
158 * @dev: PCI device
159 *
160 * This routine must be called to enable interrupt while failed
161 * device could be resumed.
162 */
163static void eeh_enable_irq(struct eeh_dev *edev)
164{
165 if ((edev->mode) & EEH_DEV_IRQ_DISABLED) {
166 edev->mode &= ~EEH_DEV_IRQ_DISABLED;
167 /*
168 * FIXME !!!!!
169 *
170 * This is just ass backwards. This maze has
171 * unbalanced irq_enable/disable calls. So instead of
172 * finding the root cause it works around the warning
173 * in the irq_enable code by conditionally calling
174 * into it.
175 *
176 * That's just wrong.The warning in the core code is
177 * there to tell people to fix their asymmetries in
178 * their own code, not by abusing the core information
179 * to avoid it.
180 *
181 * I so wish that the assymetry would be the other way
182 * round and a few more irq_disable calls render that
183 * shit unusable forever.
184 *
185 * tglx
186 */
187 if (irqd_irq_disabled(irq_get_irq_data(edev->pdev->irq)))
188 enable_irq(edev->pdev->irq);
189 }
190}
191
192static void eeh_dev_save_state(struct eeh_dev *edev, void *userdata)
193{
194 struct pci_dev *pdev;
195
196 if (!edev)
197 return;
198
199 /*
200 * We cannot access the config space on some adapters.
201 * Otherwise, it will cause fenced PHB. We don't save
202 * the content in their config space and will restore
203 * from the initial config space saved when the EEH
204 * device is created.
205 */
206 if (edev->pe && (edev->pe->state & EEH_PE_CFG_RESTRICTED))
207 return;
208
209 pdev = eeh_dev_to_pci_dev(edev);
210 if (!pdev)
211 return;
212
213 pci_save_state(pdev);
214}
215
216static void eeh_set_channel_state(struct eeh_pe *root, pci_channel_state_t s)
217{
218 struct eeh_pe *pe;
219 struct eeh_dev *edev, *tmp;
220
221 eeh_for_each_pe(root, pe)
222 eeh_pe_for_each_dev(pe, edev, tmp)
223 if (eeh_edev_actionable(edev))
224 edev->pdev->error_state = s;
225}
226
227static void eeh_set_irq_state(struct eeh_pe *root, bool enable)
228{
229 struct eeh_pe *pe;
230 struct eeh_dev *edev, *tmp;
231
232 eeh_for_each_pe(root, pe) {
233 eeh_pe_for_each_dev(pe, edev, tmp) {
234 if (!eeh_edev_actionable(edev))
235 continue;
236
237 if (!eeh_pcid_get(edev->pdev))
238 continue;
239
240 if (enable)
241 eeh_enable_irq(edev);
242 else
243 eeh_disable_irq(edev);
244
245 eeh_pcid_put(edev->pdev);
246 }
247 }
248}
249
250typedef enum pci_ers_result (*eeh_report_fn)(struct eeh_dev *,
251 struct pci_dev *,
252 struct pci_driver *);
253static void eeh_pe_report_edev(struct eeh_dev *edev, eeh_report_fn fn,
254 enum pci_ers_result *result)
255{
256 struct pci_dev *pdev;
257 struct pci_driver *driver;
258 enum pci_ers_result new_result;
259
260 pci_lock_rescan_remove();
261 pdev = edev->pdev;
262 if (pdev)
263 get_device(&pdev->dev);
264 pci_unlock_rescan_remove();
265 if (!pdev) {
266 eeh_edev_info(edev, "no device");
267 return;
268 }
269 device_lock(&pdev->dev);
270 if (eeh_edev_actionable(edev)) {
271 driver = eeh_pcid_get(pdev);
272
273 if (!driver)
274 eeh_edev_info(edev, "no driver");
275 else if (!driver->err_handler)
276 eeh_edev_info(edev, "driver not EEH aware");
277 else if (edev->mode & EEH_DEV_NO_HANDLER)
278 eeh_edev_info(edev, "driver bound too late");
279 else {
280 new_result = fn(edev, pdev, driver);
281 eeh_edev_info(edev, "%s driver reports: '%s'",
282 driver->name,
283 pci_ers_result_name(new_result));
284 if (result)
285 *result = pci_ers_merge_result(*result,
286 new_result);
287 }
288 if (driver)
289 eeh_pcid_put(pdev);
290 } else {
291 eeh_edev_info(edev, "not actionable (%d,%d,%d)", !!pdev,
292 !eeh_dev_removed(edev), !eeh_pe_passed(edev->pe));
293 }
294 device_unlock(&pdev->dev);
295 if (edev->pdev != pdev)
296 eeh_edev_warn(edev, "Device changed during processing!\n");
297 put_device(&pdev->dev);
298}
299
300static void eeh_pe_report(const char *name, struct eeh_pe *root,
301 eeh_report_fn fn, enum pci_ers_result *result)
302{
303 struct eeh_pe *pe;
304 struct eeh_dev *edev, *tmp;
305
306 pr_info("EEH: Beginning: '%s'\n", name);
307 eeh_for_each_pe(root, pe) eeh_pe_for_each_dev(pe, edev, tmp)
308 eeh_pe_report_edev(edev, fn, result);
309 if (result)
310 pr_info("EEH: Finished:'%s' with aggregate recovery state:'%s'\n",
311 name, pci_ers_result_name(*result));
312 else
313 pr_info("EEH: Finished:'%s'", name);
314}
315
316/**
317 * eeh_report_error - Report pci error to each device driver
318 * @edev: eeh device
319 * @driver: device's PCI driver
320 *
321 * Report an EEH error to each device driver.
322 */
323static enum pci_ers_result eeh_report_error(struct eeh_dev *edev,
324 struct pci_dev *pdev,
325 struct pci_driver *driver)
326{
327 enum pci_ers_result rc;
328
329 if (!driver->err_handler->error_detected)
330 return PCI_ERS_RESULT_NONE;
331
332 eeh_edev_info(edev, "Invoking %s->error_detected(IO frozen)",
333 driver->name);
334 rc = driver->err_handler->error_detected(pdev, pci_channel_io_frozen);
335
336 edev->in_error = true;
337 pci_uevent_ers(pdev, PCI_ERS_RESULT_NONE);
338 return rc;
339}
340
341/**
342 * eeh_report_mmio_enabled - Tell drivers that MMIO has been enabled
343 * @edev: eeh device
344 * @driver: device's PCI driver
345 *
346 * Tells each device driver that IO ports, MMIO and config space I/O
347 * are now enabled.
348 */
349static enum pci_ers_result eeh_report_mmio_enabled(struct eeh_dev *edev,
350 struct pci_dev *pdev,
351 struct pci_driver *driver)
352{
353 if (!driver->err_handler->mmio_enabled)
354 return PCI_ERS_RESULT_NONE;
355 eeh_edev_info(edev, "Invoking %s->mmio_enabled()", driver->name);
356 return driver->err_handler->mmio_enabled(pdev);
357}
358
359/**
360 * eeh_report_reset - Tell device that slot has been reset
361 * @edev: eeh device
362 * @driver: device's PCI driver
363 *
364 * This routine must be called while EEH tries to reset particular
365 * PCI device so that the associated PCI device driver could take
366 * some actions, usually to save data the driver needs so that the
367 * driver can work again while the device is recovered.
368 */
369static enum pci_ers_result eeh_report_reset(struct eeh_dev *edev,
370 struct pci_dev *pdev,
371 struct pci_driver *driver)
372{
373 if (!driver->err_handler->slot_reset || !edev->in_error)
374 return PCI_ERS_RESULT_NONE;
375 eeh_edev_info(edev, "Invoking %s->slot_reset()", driver->name);
376 return driver->err_handler->slot_reset(pdev);
377}
378
379static void eeh_dev_restore_state(struct eeh_dev *edev, void *userdata)
380{
381 struct pci_dev *pdev;
382
383 if (!edev)
384 return;
385
386 /*
387 * The content in the config space isn't saved because
388 * the blocked config space on some adapters. We have
389 * to restore the initial saved config space when the
390 * EEH device is created.
391 */
392 if (edev->pe && (edev->pe->state & EEH_PE_CFG_RESTRICTED)) {
393 if (list_is_last(&edev->entry, &edev->pe->edevs))
394 eeh_pe_restore_bars(edev->pe);
395
396 return;
397 }
398
399 pdev = eeh_dev_to_pci_dev(edev);
400 if (!pdev)
401 return;
402
403 pci_restore_state(pdev);
404}
405
406/**
407 * eeh_report_resume - Tell device to resume normal operations
408 * @edev: eeh device
409 * @driver: device's PCI driver
410 *
411 * This routine must be called to notify the device driver that it
412 * could resume so that the device driver can do some initialization
413 * to make the recovered device work again.
414 */
415static enum pci_ers_result eeh_report_resume(struct eeh_dev *edev,
416 struct pci_dev *pdev,
417 struct pci_driver *driver)
418{
419 if (!driver->err_handler->resume || !edev->in_error)
420 return PCI_ERS_RESULT_NONE;
421
422 eeh_edev_info(edev, "Invoking %s->resume()", driver->name);
423 driver->err_handler->resume(pdev);
424
425 pci_uevent_ers(edev->pdev, PCI_ERS_RESULT_RECOVERED);
426#ifdef CONFIG_PCI_IOV
427 if (eeh_ops->notify_resume)
428 eeh_ops->notify_resume(edev);
429#endif
430 return PCI_ERS_RESULT_NONE;
431}
432
433/**
434 * eeh_report_failure - Tell device driver that device is dead.
435 * @edev: eeh device
436 * @driver: device's PCI driver
437 *
438 * This informs the device driver that the device is permanently
439 * dead, and that no further recovery attempts will be made on it.
440 */
441static enum pci_ers_result eeh_report_failure(struct eeh_dev *edev,
442 struct pci_dev *pdev,
443 struct pci_driver *driver)
444{
445 enum pci_ers_result rc;
446
447 if (!driver->err_handler->error_detected)
448 return PCI_ERS_RESULT_NONE;
449
450 eeh_edev_info(edev, "Invoking %s->error_detected(permanent failure)",
451 driver->name);
452 rc = driver->err_handler->error_detected(pdev,
453 pci_channel_io_perm_failure);
454
455 pci_uevent_ers(pdev, PCI_ERS_RESULT_DISCONNECT);
456 return rc;
457}
458
459static void *eeh_add_virt_device(struct eeh_dev *edev)
460{
461 struct pci_driver *driver;
462 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
463
464 if (!(edev->physfn)) {
465 eeh_edev_warn(edev, "Not for VF\n");
466 return NULL;
467 }
468
469 driver = eeh_pcid_get(dev);
470 if (driver) {
471 if (driver->err_handler) {
472 eeh_pcid_put(dev);
473 return NULL;
474 }
475 eeh_pcid_put(dev);
476 }
477
478#ifdef CONFIG_PCI_IOV
479 pci_iov_add_virtfn(edev->physfn, edev->vf_index);
480#endif
481 return NULL;
482}
483
484static void eeh_rmv_device(struct eeh_dev *edev, void *userdata)
485{
486 struct pci_driver *driver;
487 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
488 struct eeh_rmv_data *rmv_data = (struct eeh_rmv_data *)userdata;
489
490 /*
491 * Actually, we should remove the PCI bridges as well.
492 * However, that's lots of complexity to do that,
493 * particularly some of devices under the bridge might
494 * support EEH. So we just care about PCI devices for
495 * simplicity here.
496 */
497 if (!eeh_edev_actionable(edev) ||
498 (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE))
499 return;
500
501 if (rmv_data) {
502 driver = eeh_pcid_get(dev);
503 if (driver) {
504 if (driver->err_handler &&
505 driver->err_handler->error_detected &&
506 driver->err_handler->slot_reset) {
507 eeh_pcid_put(dev);
508 return;
509 }
510 eeh_pcid_put(dev);
511 }
512 }
513
514 /* Remove it from PCI subsystem */
515 pr_info("EEH: Removing %s without EEH sensitive driver\n",
516 pci_name(dev));
517 edev->mode |= EEH_DEV_DISCONNECTED;
518 if (rmv_data)
519 rmv_data->removed_dev_count++;
520
521 if (edev->physfn) {
522#ifdef CONFIG_PCI_IOV
523 pci_iov_remove_virtfn(edev->physfn, edev->vf_index);
524 edev->pdev = NULL;
525#endif
526 if (rmv_data)
527 list_add(&edev->rmv_entry, &rmv_data->removed_vf_list);
528 } else {
529 pci_lock_rescan_remove();
530 pci_stop_and_remove_bus_device(dev);
531 pci_unlock_rescan_remove();
532 }
533}
534
535static void *eeh_pe_detach_dev(struct eeh_pe *pe, void *userdata)
536{
537 struct eeh_dev *edev, *tmp;
538
539 eeh_pe_for_each_dev(pe, edev, tmp) {
540 if (!(edev->mode & EEH_DEV_DISCONNECTED))
541 continue;
542
543 edev->mode &= ~(EEH_DEV_DISCONNECTED | EEH_DEV_IRQ_DISABLED);
544 eeh_pe_tree_remove(edev);
545 }
546
547 return NULL;
548}
549
550/*
551 * Explicitly clear PE's frozen state for PowerNV where
552 * we have frozen PE until BAR restore is completed. It's
553 * harmless to clear it for pSeries. To be consistent with
554 * PE reset (for 3 times), we try to clear the frozen state
555 * for 3 times as well.
556 */
557static int eeh_clear_pe_frozen_state(struct eeh_pe *root, bool include_passed)
558{
559 struct eeh_pe *pe;
560 int i;
561
562 eeh_for_each_pe(root, pe) {
563 if (include_passed || !eeh_pe_passed(pe)) {
564 for (i = 0; i < 3; i++)
565 if (!eeh_unfreeze_pe(pe))
566 break;
567 if (i >= 3)
568 return -EIO;
569 }
570 }
571 eeh_pe_state_clear(root, EEH_PE_ISOLATED, include_passed);
572 return 0;
573}
574
575int eeh_pe_reset_and_recover(struct eeh_pe *pe)
576{
577 int ret;
578
579 /* Bail if the PE is being recovered */
580 if (pe->state & EEH_PE_RECOVERING)
581 return 0;
582
583 /* Put the PE into recovery mode */
584 eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
585
586 /* Save states */
587 eeh_pe_dev_traverse(pe, eeh_dev_save_state, NULL);
588
589 /* Issue reset */
590 ret = eeh_pe_reset_full(pe, true);
591 if (ret) {
592 eeh_pe_state_clear(pe, EEH_PE_RECOVERING, true);
593 return ret;
594 }
595
596 /* Unfreeze the PE */
597 ret = eeh_clear_pe_frozen_state(pe, true);
598 if (ret) {
599 eeh_pe_state_clear(pe, EEH_PE_RECOVERING, true);
600 return ret;
601 }
602
603 /* Restore device state */
604 eeh_pe_dev_traverse(pe, eeh_dev_restore_state, NULL);
605
606 /* Clear recovery mode */
607 eeh_pe_state_clear(pe, EEH_PE_RECOVERING, true);
608
609 return 0;
610}
611
612/**
613 * eeh_reset_device - Perform actual reset of a pci slot
614 * @driver_eeh_aware: Does the device's driver provide EEH support?
615 * @pe: EEH PE
616 * @bus: PCI bus corresponding to the isolcated slot
617 * @rmv_data: Optional, list to record removed devices
618 *
619 * This routine must be called to do reset on the indicated PE.
620 * During the reset, udev might be invoked because those affected
621 * PCI devices will be removed and then added.
622 */
623static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
624 struct eeh_rmv_data *rmv_data,
625 bool driver_eeh_aware)
626{
627 time64_t tstamp;
628 int cnt, rc;
629 struct eeh_dev *edev;
630 struct eeh_pe *tmp_pe;
631 bool any_passed = false;
632
633 eeh_for_each_pe(pe, tmp_pe)
634 any_passed |= eeh_pe_passed(tmp_pe);
635
636 /* pcibios will clear the counter; save the value */
637 cnt = pe->freeze_count;
638 tstamp = pe->tstamp;
639
640 /*
641 * We don't remove the corresponding PE instances because
642 * we need the information afterwords. The attached EEH
643 * devices are expected to be attached soon when calling
644 * into pci_hp_add_devices().
645 */
646 eeh_pe_state_mark(pe, EEH_PE_KEEP);
647 if (any_passed || driver_eeh_aware || (pe->type & EEH_PE_VF)) {
648 eeh_pe_dev_traverse(pe, eeh_rmv_device, rmv_data);
649 } else {
650 pci_lock_rescan_remove();
651 pci_hp_remove_devices(bus);
652 pci_unlock_rescan_remove();
653 }
654
655 /*
656 * Reset the pci controller. (Asserts RST#; resets config space).
657 * Reconfigure bridges and devices. Don't try to bring the system
658 * up if the reset failed for some reason.
659 *
660 * During the reset, it's very dangerous to have uncontrolled PCI
661 * config accesses. So we prefer to block them. However, controlled
662 * PCI config accesses initiated from EEH itself are allowed.
663 */
664 rc = eeh_pe_reset_full(pe, false);
665 if (rc)
666 return rc;
667
668 pci_lock_rescan_remove();
669
670 /* Restore PE */
671 eeh_ops->configure_bridge(pe);
672 eeh_pe_restore_bars(pe);
673
674 /* Clear frozen state */
675 rc = eeh_clear_pe_frozen_state(pe, false);
676 if (rc) {
677 pci_unlock_rescan_remove();
678 return rc;
679 }
680
681 /* Give the system 5 seconds to finish running the user-space
682 * hotplug shutdown scripts, e.g. ifdown for ethernet. Yes,
683 * this is a hack, but if we don't do this, and try to bring
684 * the device up before the scripts have taken it down,
685 * potentially weird things happen.
686 */
687 if (!driver_eeh_aware || rmv_data->removed_dev_count) {
688 pr_info("EEH: Sleep 5s ahead of %s hotplug\n",
689 (driver_eeh_aware ? "partial" : "complete"));
690 ssleep(5);
691
692 /*
693 * The EEH device is still connected with its parent
694 * PE. We should disconnect it so the binding can be
695 * rebuilt when adding PCI devices.
696 */
697 edev = list_first_entry(&pe->edevs, struct eeh_dev, entry);
698 eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL);
699 if (pe->type & EEH_PE_VF) {
700 eeh_add_virt_device(edev);
701 } else {
702 if (!driver_eeh_aware)
703 eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
704 pci_hp_add_devices(bus);
705 }
706 }
707 eeh_pe_state_clear(pe, EEH_PE_KEEP, true);
708
709 pe->tstamp = tstamp;
710 pe->freeze_count = cnt;
711
712 pci_unlock_rescan_remove();
713 return 0;
714}
715
716/* The longest amount of time to wait for a pci device
717 * to come back on line, in seconds.
718 */
719#define MAX_WAIT_FOR_RECOVERY 300
720
721
722/* Walks the PE tree after processing an event to remove any stale PEs.
723 *
724 * NB: This needs to be recursive to ensure the leaf PEs get removed
725 * before their parents do. Although this is possible to do recursively
726 * we don't since this is easier to read and we need to garantee
727 * the leaf nodes will be handled first.
728 */
729static void eeh_pe_cleanup(struct eeh_pe *pe)
730{
731 struct eeh_pe *child_pe, *tmp;
732
733 list_for_each_entry_safe(child_pe, tmp, &pe->child_list, child)
734 eeh_pe_cleanup(child_pe);
735
736 if (pe->state & EEH_PE_KEEP)
737 return;
738
739 if (!(pe->state & EEH_PE_INVALID))
740 return;
741
742 if (list_empty(&pe->edevs) && list_empty(&pe->child_list)) {
743 list_del(&pe->child);
744 kfree(pe);
745 }
746}
747
748/**
749 * eeh_check_slot_presence - Check if a device is still present in a slot
750 * @pdev: pci_dev to check
751 *
752 * This function may return a false positive if we can't determine the slot's
753 * presence state. This might happen for PCIe slots if the PE containing
754 * the upstream bridge is also frozen, or the bridge is part of the same PE
755 * as the device.
756 *
757 * This shouldn't happen often, but you might see it if you hotplug a PCIe
758 * switch.
759 */
760static bool eeh_slot_presence_check(struct pci_dev *pdev)
761{
762 const struct hotplug_slot_ops *ops;
763 struct pci_slot *slot;
764 u8 state;
765 int rc;
766
767 if (!pdev)
768 return false;
769
770 if (pdev->error_state == pci_channel_io_perm_failure)
771 return false;
772
773 slot = pdev->slot;
774 if (!slot || !slot->hotplug)
775 return true;
776
777 ops = slot->hotplug->ops;
778 if (!ops || !ops->get_adapter_status)
779 return true;
780
781 /* set the attention indicator while we've got the slot ops */
782 if (ops->set_attention_status)
783 ops->set_attention_status(slot->hotplug, 1);
784
785 rc = ops->get_adapter_status(slot->hotplug, &state);
786 if (rc)
787 return true;
788
789 return !!state;
790}
791
792static void eeh_clear_slot_attention(struct pci_dev *pdev)
793{
794 const struct hotplug_slot_ops *ops;
795 struct pci_slot *slot;
796
797 if (!pdev)
798 return;
799
800 if (pdev->error_state == pci_channel_io_perm_failure)
801 return;
802
803 slot = pdev->slot;
804 if (!slot || !slot->hotplug)
805 return;
806
807 ops = slot->hotplug->ops;
808 if (!ops || !ops->set_attention_status)
809 return;
810
811 ops->set_attention_status(slot->hotplug, 0);
812}
813
814/**
815 * eeh_handle_normal_event - Handle EEH events on a specific PE
816 * @pe: EEH PE - which should not be used after we return, as it may
817 * have been invalidated.
818 *
819 * Attempts to recover the given PE. If recovery fails or the PE has failed
820 * too many times, remove the PE.
821 *
822 * While PHB detects address or data parity errors on particular PCI
823 * slot, the associated PE will be frozen. Besides, DMA's occurring
824 * to wild addresses (which usually happen due to bugs in device
825 * drivers or in PCI adapter firmware) can cause EEH error. #SERR,
826 * #PERR or other misc PCI-related errors also can trigger EEH errors.
827 *
828 * Recovery process consists of unplugging the device driver (which
829 * generated hotplug events to userspace), then issuing a PCI #RST to
830 * the device, then reconfiguring the PCI config space for all bridges
831 * & devices under this slot, and then finally restarting the device
832 * drivers (which cause a second set of hotplug events to go out to
833 * userspace).
834 */
835void eeh_handle_normal_event(struct eeh_pe *pe)
836{
837 struct pci_bus *bus;
838 struct eeh_dev *edev, *tmp;
839 struct eeh_pe *tmp_pe;
840 int rc = 0;
841 enum pci_ers_result result = PCI_ERS_RESULT_NONE;
842 struct eeh_rmv_data rmv_data =
843 {LIST_HEAD_INIT(rmv_data.removed_vf_list), 0};
844 int devices = 0;
845
846 bus = eeh_pe_bus_get(pe);
847 if (!bus) {
848 pr_err("%s: Cannot find PCI bus for PHB#%x-PE#%x\n",
849 __func__, pe->phb->global_number, pe->addr);
850 return;
851 }
852
853 /*
854 * When devices are hot-removed we might get an EEH due to
855 * a driver attempting to touch the MMIO space of a removed
856 * device. In this case we don't have a device to recover
857 * so suppress the event if we can't find any present devices.
858 *
859 * The hotplug driver should take care of tearing down the
860 * device itself.
861 */
862 eeh_for_each_pe(pe, tmp_pe)
863 eeh_pe_for_each_dev(tmp_pe, edev, tmp)
864 if (eeh_slot_presence_check(edev->pdev))
865 devices++;
866
867 if (!devices) {
868 pr_debug("EEH: Frozen PHB#%x-PE#%x is empty!\n",
869 pe->phb->global_number, pe->addr);
870 goto out; /* nothing to recover */
871 }
872
873 /* Log the event */
874 if (pe->type & EEH_PE_PHB) {
875 pr_err("EEH: Recovering PHB#%x, location: %s\n",
876 pe->phb->global_number, eeh_pe_loc_get(pe));
877 } else {
878 struct eeh_pe *phb_pe = eeh_phb_pe_get(pe->phb);
879
880 pr_err("EEH: Recovering PHB#%x-PE#%x\n",
881 pe->phb->global_number, pe->addr);
882 pr_err("EEH: PE location: %s, PHB location: %s\n",
883 eeh_pe_loc_get(pe), eeh_pe_loc_get(phb_pe));
884 }
885
886#ifdef CONFIG_STACKTRACE
887 /*
888 * Print the saved stack trace now that we've verified there's
889 * something to recover.
890 */
891 if (pe->trace_entries) {
892 void **ptrs = (void **) pe->stack_trace;
893 int i;
894
895 pr_err("EEH: Frozen PHB#%x-PE#%x detected\n",
896 pe->phb->global_number, pe->addr);
897
898 /* FIXME: Use the same format as dump_stack() */
899 pr_err("EEH: Call Trace:\n");
900 for (i = 0; i < pe->trace_entries; i++)
901 pr_err("EEH: [%pK] %pS\n", ptrs[i], ptrs[i]);
902
903 pe->trace_entries = 0;
904 }
905#endif /* CONFIG_STACKTRACE */
906
907 eeh_for_each_pe(pe, tmp_pe)
908 eeh_pe_for_each_dev(tmp_pe, edev, tmp)
909 edev->mode &= ~EEH_DEV_NO_HANDLER;
910
911 eeh_pe_update_time_stamp(pe);
912 pe->freeze_count++;
913 if (pe->freeze_count > eeh_max_freezes) {
914 pr_err("EEH: PHB#%x-PE#%x has failed %d times in the last hour and has been permanently disabled.\n",
915 pe->phb->global_number, pe->addr,
916 pe->freeze_count);
917
918 goto recover_failed;
919 }
920
921 /* Walk the various device drivers attached to this slot through
922 * a reset sequence, giving each an opportunity to do what it needs
923 * to accomplish the reset. Each child gets a report of the
924 * status ... if any child can't handle the reset, then the entire
925 * slot is dlpar removed and added.
926 *
927 * When the PHB is fenced, we have to issue a reset to recover from
928 * the error. Override the result if necessary to have partially
929 * hotplug for this case.
930 */
931 pr_warn("EEH: This PCI device has failed %d times in the last hour and will be permanently disabled after %d failures.\n",
932 pe->freeze_count, eeh_max_freezes);
933 pr_info("EEH: Notify device drivers to shutdown\n");
934 eeh_set_channel_state(pe, pci_channel_io_frozen);
935 eeh_set_irq_state(pe, false);
936 eeh_pe_report("error_detected(IO frozen)", pe,
937 eeh_report_error, &result);
938 if (result == PCI_ERS_RESULT_DISCONNECT)
939 goto recover_failed;
940
941 /*
942 * Error logged on a PHB are always fences which need a full
943 * PHB reset to clear so force that to happen.
944 */
945 if ((pe->type & EEH_PE_PHB) && result != PCI_ERS_RESULT_NONE)
946 result = PCI_ERS_RESULT_NEED_RESET;
947
948 /* Get the current PCI slot state. This can take a long time,
949 * sometimes over 300 seconds for certain systems.
950 */
951 rc = eeh_wait_state(pe, MAX_WAIT_FOR_RECOVERY * 1000);
952 if (rc < 0 || rc == EEH_STATE_NOT_SUPPORT) {
953 pr_warn("EEH: Permanent failure\n");
954 goto recover_failed;
955 }
956
957 /* Since rtas may enable MMIO when posting the error log,
958 * don't post the error log until after all dev drivers
959 * have been informed.
960 */
961 pr_info("EEH: Collect temporary log\n");
962 eeh_slot_error_detail(pe, EEH_LOG_TEMP);
963
964 /* If all device drivers were EEH-unaware, then shut
965 * down all of the device drivers, and hope they
966 * go down willingly, without panicing the system.
967 */
968 if (result == PCI_ERS_RESULT_NONE) {
969 pr_info("EEH: Reset with hotplug activity\n");
970 rc = eeh_reset_device(pe, bus, NULL, false);
971 if (rc) {
972 pr_warn("%s: Unable to reset, err=%d\n", __func__, rc);
973 goto recover_failed;
974 }
975 }
976
977 /* If all devices reported they can proceed, then re-enable MMIO */
978 if (result == PCI_ERS_RESULT_CAN_RECOVER) {
979 pr_info("EEH: Enable I/O for affected devices\n");
980 rc = eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
981 if (rc < 0)
982 goto recover_failed;
983
984 if (rc) {
985 result = PCI_ERS_RESULT_NEED_RESET;
986 } else {
987 pr_info("EEH: Notify device drivers to resume I/O\n");
988 eeh_pe_report("mmio_enabled", pe,
989 eeh_report_mmio_enabled, &result);
990 }
991 }
992 if (result == PCI_ERS_RESULT_CAN_RECOVER) {
993 pr_info("EEH: Enabled DMA for affected devices\n");
994 rc = eeh_pci_enable(pe, EEH_OPT_THAW_DMA);
995 if (rc < 0)
996 goto recover_failed;
997
998 if (rc) {
999 result = PCI_ERS_RESULT_NEED_RESET;
1000 } else {
1001 /*
1002 * We didn't do PE reset for the case. The PE
1003 * is still in frozen state. Clear it before
1004 * resuming the PE.
1005 */
1006 eeh_pe_state_clear(pe, EEH_PE_ISOLATED, true);
1007 result = PCI_ERS_RESULT_RECOVERED;
1008 }
1009 }
1010
1011 /* If any device called out for a reset, then reset the slot */
1012 if (result == PCI_ERS_RESULT_NEED_RESET) {
1013 pr_info("EEH: Reset without hotplug activity\n");
1014 rc = eeh_reset_device(pe, bus, &rmv_data, true);
1015 if (rc) {
1016 pr_warn("%s: Cannot reset, err=%d\n", __func__, rc);
1017 goto recover_failed;
1018 }
1019
1020 result = PCI_ERS_RESULT_NONE;
1021 eeh_set_channel_state(pe, pci_channel_io_normal);
1022 eeh_set_irq_state(pe, true);
1023 eeh_pe_report("slot_reset", pe, eeh_report_reset,
1024 &result);
1025 }
1026
1027 if ((result == PCI_ERS_RESULT_RECOVERED) ||
1028 (result == PCI_ERS_RESULT_NONE)) {
1029 /*
1030 * For those hot removed VFs, we should add back them after PF
1031 * get recovered properly.
1032 */
1033 list_for_each_entry_safe(edev, tmp, &rmv_data.removed_vf_list,
1034 rmv_entry) {
1035 eeh_add_virt_device(edev);
1036 list_del(&edev->rmv_entry);
1037 }
1038
1039 /* Tell all device drivers that they can resume operations */
1040 pr_info("EEH: Notify device driver to resume\n");
1041 eeh_set_channel_state(pe, pci_channel_io_normal);
1042 eeh_set_irq_state(pe, true);
1043 eeh_pe_report("resume", pe, eeh_report_resume, NULL);
1044 eeh_for_each_pe(pe, tmp_pe) {
1045 eeh_pe_for_each_dev(tmp_pe, edev, tmp) {
1046 edev->mode &= ~EEH_DEV_NO_HANDLER;
1047 edev->in_error = false;
1048 }
1049 }
1050
1051 pr_info("EEH: Recovery successful.\n");
1052 goto out;
1053 }
1054
1055recover_failed:
1056 /*
1057 * About 90% of all real-life EEH failures in the field
1058 * are due to poorly seated PCI cards. Only 10% or so are
1059 * due to actual, failed cards.
1060 */
1061 pr_err("EEH: Unable to recover from failure from PHB#%x-PE#%x.\n"
1062 "Please try reseating or replacing it\n",
1063 pe->phb->global_number, pe->addr);
1064
1065 eeh_slot_error_detail(pe, EEH_LOG_PERM);
1066
1067 /* Notify all devices that they're about to go down. */
1068 eeh_set_irq_state(pe, false);
1069 eeh_pe_report("error_detected(permanent failure)", pe,
1070 eeh_report_failure, NULL);
1071 eeh_set_channel_state(pe, pci_channel_io_perm_failure);
1072
1073 /* Mark the PE to be removed permanently */
1074 eeh_pe_state_mark(pe, EEH_PE_REMOVED);
1075
1076 /*
1077 * Shut down the device drivers for good. We mark
1078 * all removed devices correctly to avoid access
1079 * the their PCI config any more.
1080 */
1081 if (pe->type & EEH_PE_VF) {
1082 eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL);
1083 eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
1084 } else {
1085 eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
1086 eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
1087
1088 pci_lock_rescan_remove();
1089 pci_hp_remove_devices(bus);
1090 pci_unlock_rescan_remove();
1091 /* The passed PE should no longer be used */
1092 return;
1093 }
1094
1095out:
1096 /*
1097 * Clean up any PEs without devices. While marked as EEH_PE_RECOVERYING
1098 * we don't want to modify the PE tree structure so we do it here.
1099 */
1100 eeh_pe_cleanup(pe);
1101
1102 /* clear the slot attention LED for all recovered devices */
1103 eeh_for_each_pe(pe, tmp_pe)
1104 eeh_pe_for_each_dev(tmp_pe, edev, tmp)
1105 eeh_clear_slot_attention(edev->pdev);
1106
1107 eeh_pe_state_clear(pe, EEH_PE_RECOVERING, true);
1108}
1109
1110/**
1111 * eeh_handle_special_event - Handle EEH events without a specific failing PE
1112 *
1113 * Called when an EEH event is detected but can't be narrowed down to a
1114 * specific PE. Iterates through possible failures and handles them as
1115 * necessary.
1116 */
1117void eeh_handle_special_event(void)
1118{
1119 struct eeh_pe *pe, *phb_pe, *tmp_pe;
1120 struct eeh_dev *edev, *tmp_edev;
1121 struct pci_bus *bus;
1122 struct pci_controller *hose;
1123 unsigned long flags;
1124 int rc;
1125
1126
1127 do {
1128 rc = eeh_ops->next_error(&pe);
1129
1130 switch (rc) {
1131 case EEH_NEXT_ERR_DEAD_IOC:
1132 /* Mark all PHBs in dead state */
1133 eeh_serialize_lock(&flags);
1134
1135 /* Purge all events */
1136 eeh_remove_event(NULL, true);
1137
1138 list_for_each_entry(hose, &hose_list, list_node) {
1139 phb_pe = eeh_phb_pe_get(hose);
1140 if (!phb_pe) continue;
1141
1142 eeh_pe_mark_isolated(phb_pe);
1143 }
1144
1145 eeh_serialize_unlock(flags);
1146
1147 break;
1148 case EEH_NEXT_ERR_FROZEN_PE:
1149 case EEH_NEXT_ERR_FENCED_PHB:
1150 case EEH_NEXT_ERR_DEAD_PHB:
1151 /* Mark the PE in fenced state */
1152 eeh_serialize_lock(&flags);
1153
1154 /* Purge all events of the PHB */
1155 eeh_remove_event(pe, true);
1156
1157 if (rc != EEH_NEXT_ERR_DEAD_PHB)
1158 eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
1159 eeh_pe_mark_isolated(pe);
1160
1161 eeh_serialize_unlock(flags);
1162
1163 break;
1164 case EEH_NEXT_ERR_NONE:
1165 return;
1166 default:
1167 pr_warn("%s: Invalid value %d from next_error()\n",
1168 __func__, rc);
1169 return;
1170 }
1171
1172 /*
1173 * For fenced PHB and frozen PE, it's handled as normal
1174 * event. We have to remove the affected PHBs for dead
1175 * PHB and IOC
1176 */
1177 if (rc == EEH_NEXT_ERR_FROZEN_PE ||
1178 rc == EEH_NEXT_ERR_FENCED_PHB) {
1179 eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
1180 eeh_handle_normal_event(pe);
1181 } else {
1182 eeh_for_each_pe(pe, tmp_pe)
1183 eeh_pe_for_each_dev(tmp_pe, edev, tmp_edev)
1184 edev->mode &= ~EEH_DEV_NO_HANDLER;
1185
1186 /* Notify all devices to be down */
1187 eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
1188 eeh_pe_report(
1189 "error_detected(permanent failure)", pe,
1190 eeh_report_failure, NULL);
1191 eeh_set_channel_state(pe, pci_channel_io_perm_failure);
1192
1193 pci_lock_rescan_remove();
1194 list_for_each_entry(hose, &hose_list, list_node) {
1195 phb_pe = eeh_phb_pe_get(hose);
1196 if (!phb_pe ||
1197 !(phb_pe->state & EEH_PE_ISOLATED) ||
1198 (phb_pe->state & EEH_PE_RECOVERING))
1199 continue;
1200
1201 bus = eeh_pe_bus_get(phb_pe);
1202 if (!bus) {
1203 pr_err("%s: Cannot find PCI bus for "
1204 "PHB#%x-PE#%x\n",
1205 __func__,
1206 pe->phb->global_number,
1207 pe->addr);
1208 break;
1209 }
1210 pci_hp_remove_devices(bus);
1211 }
1212 pci_unlock_rescan_remove();
1213 }
1214
1215 /*
1216 * If we have detected dead IOC, we needn't proceed
1217 * any more since all PHBs would have been removed
1218 */
1219 if (rc == EEH_NEXT_ERR_DEAD_IOC)
1220 break;
1221 } while (rc != EEH_NEXT_ERR_NONE);
1222}