Linux Audio

Check our new training course

Loading...
v4.17
 
   1/*
   2 * PCI Error Recovery Driver for RPA-compliant PPC64 platform.
   3 * Copyright IBM Corp. 2004 2005
   4 * Copyright Linas Vepstas <linas@linas.org> 2004, 2005
   5 *
   6 * All rights reserved.
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License as published by
  10 * the Free Software Foundation; either version 2 of the License, or (at
  11 * your option) any later version.
  12 *
  13 * This program is distributed in the hope that it will be useful, but
  14 * WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  16 * NON INFRINGEMENT.  See the GNU General Public License for more
  17 * details.
  18 *
  19 * You should have received a copy of the GNU General Public License
  20 * along with this program; if not, write to the Free Software
  21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  22 *
  23 * Send comments and feedback to Linas Vepstas <linas@austin.ibm.com>
  24 */
  25#include <linux/delay.h>
  26#include <linux/interrupt.h>
  27#include <linux/irq.h>
  28#include <linux/module.h>
  29#include <linux/pci.h>
 
  30#include <asm/eeh.h>
  31#include <asm/eeh_event.h>
  32#include <asm/ppc-pci.h>
  33#include <asm/pci-bridge.h>
  34#include <asm/prom.h>
  35#include <asm/rtas.h>
  36
  37struct eeh_rmv_data {
  38	struct list_head edev_list;
  39	int removed;
  40};
  41
  42/**
  43 * eeh_pcid_name - Retrieve name of PCI device driver
  44 * @pdev: PCI device
  45 *
  46 * This routine is used to retrieve the name of PCI device driver
  47 * if that's valid.
  48 */
  49static inline const char *eeh_pcid_name(struct pci_dev *pdev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  50{
  51	if (pdev && pdev->dev.driver)
  52		return pdev->dev.driver->name;
  53	return "";
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  54}
  55
  56/**
  57 * eeh_pcid_get - Get the PCI device driver
  58 * @pdev: PCI device
  59 *
  60 * The function is used to retrieve the PCI device driver for
  61 * the indicated PCI device. Besides, we will increase the reference
  62 * of the PCI device driver to prevent that being unloaded on
  63 * the fly. Otherwise, kernel crash would be seen.
  64 */
  65static inline struct pci_driver *eeh_pcid_get(struct pci_dev *pdev)
  66{
  67	if (!pdev || !pdev->driver)
  68		return NULL;
  69
  70	if (!try_module_get(pdev->driver->driver.owner))
  71		return NULL;
  72
  73	return pdev->driver;
  74}
  75
  76/**
  77 * eeh_pcid_put - Dereference on the PCI device driver
  78 * @pdev: PCI device
  79 *
  80 * The function is called to do dereference on the PCI device
  81 * driver of the indicated PCI device.
  82 */
  83static inline void eeh_pcid_put(struct pci_dev *pdev)
  84{
  85	if (!pdev || !pdev->driver)
  86		return;
  87
  88	module_put(pdev->driver->driver.owner);
  89}
  90
  91/**
  92 * eeh_disable_irq - Disable interrupt for the recovering device
  93 * @dev: PCI device
  94 *
  95 * This routine must be called when reporting temporary or permanent
  96 * error to the particular PCI device to disable interrupt of that
  97 * device. If the device has enabled MSI or MSI-X interrupt, we needn't
  98 * do real work because EEH should freeze DMA transfers for those PCI
  99 * devices encountering EEH errors, which includes MSI or MSI-X.
 100 */
 101static void eeh_disable_irq(struct pci_dev *dev)
 102{
 103	struct eeh_dev *edev = pci_dev_to_eeh_dev(dev);
 104
 105	/* Don't disable MSI and MSI-X interrupts. They are
 106	 * effectively disabled by the DMA Stopped state
 107	 * when an EEH error occurs.
 108	 */
 109	if (dev->msi_enabled || dev->msix_enabled)
 110		return;
 111
 112	if (!irq_has_action(dev->irq))
 113		return;
 114
 115	edev->mode |= EEH_DEV_IRQ_DISABLED;
 116	disable_irq_nosync(dev->irq);
 117}
 118
 119/**
 120 * eeh_enable_irq - Enable interrupt for the recovering device
 121 * @dev: PCI device
 122 *
 123 * This routine must be called to enable interrupt while failed
 124 * device could be resumed.
 125 */
 126static void eeh_enable_irq(struct pci_dev *dev)
 127{
 128	struct eeh_dev *edev = pci_dev_to_eeh_dev(dev);
 129
 130	if ((edev->mode) & EEH_DEV_IRQ_DISABLED) {
 131		edev->mode &= ~EEH_DEV_IRQ_DISABLED;
 132		/*
 133		 * FIXME !!!!!
 134		 *
 135		 * This is just ass backwards. This maze has
 136		 * unbalanced irq_enable/disable calls. So instead of
 137		 * finding the root cause it works around the warning
 138		 * in the irq_enable code by conditionally calling
 139		 * into it.
 140		 *
 141		 * That's just wrong.The warning in the core code is
 142		 * there to tell people to fix their asymmetries in
 143		 * their own code, not by abusing the core information
 144		 * to avoid it.
 145		 *
 146		 * I so wish that the assymetry would be the other way
 147		 * round and a few more irq_disable calls render that
 148		 * shit unusable forever.
 149		 *
 150		 *	tglx
 151		 */
 152		if (irqd_irq_disabled(irq_get_irq_data(dev->irq)))
 153			enable_irq(dev->irq);
 154	}
 155}
 156
 157static bool eeh_dev_removed(struct eeh_dev *edev)
 158{
 159	/* EEH device removed ? */
 160	if (!edev || (edev->mode & EEH_DEV_REMOVED))
 161		return true;
 162
 163	return false;
 164}
 165
 166static void *eeh_dev_save_state(void *data, void *userdata)
 167{
 168	struct eeh_dev *edev = data;
 169	struct pci_dev *pdev;
 170
 171	if (!edev)
 172		return NULL;
 173
 174	/*
 175	 * We cannot access the config space on some adapters.
 176	 * Otherwise, it will cause fenced PHB. We don't save
 177	 * the content in their config space and will restore
 178	 * from the initial config space saved when the EEH
 179	 * device is created.
 180	 */
 181	if (edev->pe && (edev->pe->state & EEH_PE_CFG_RESTRICTED))
 182		return NULL;
 183
 184	pdev = eeh_dev_to_pci_dev(edev);
 185	if (!pdev)
 186		return NULL;
 187
 188	pci_save_state(pdev);
 189	return NULL;
 190}
 191
 192/**
 193 * eeh_report_error - Report pci error to each device driver
 194 * @data: eeh device
 195 * @userdata: return value
 196 *
 197 * Report an EEH error to each device driver, collect up and
 198 * merge the device driver responses. Cumulative response
 199 * passed back in "userdata".
 200 */
 201static void *eeh_report_error(void *data, void *userdata)
 202{
 203	struct eeh_dev *edev = (struct eeh_dev *)data;
 204	struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
 205	enum pci_ers_result rc, *res = userdata;
 206	struct pci_driver *driver;
 207
 208	if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
 209		return NULL;
 
 
 
 210
 211	device_lock(&dev->dev);
 212	dev->error_state = pci_channel_io_frozen;
 
 
 213
 214	driver = eeh_pcid_get(dev);
 215	if (!driver) goto out_no_dev;
 
 
 216
 217	eeh_disable_irq(dev);
 
 218
 219	if (!driver->err_handler ||
 220	    !driver->err_handler->error_detected)
 221		goto out;
 
 222
 223	rc = driver->err_handler->error_detected(dev, pci_channel_io_frozen);
 
 
 
 224
 225	/* A driver that needs a reset trumps all others */
 226	if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
 227	if (*res == PCI_ERS_RESULT_NONE) *res = rc;
 
 
 
 
 
 
 228
 229	edev->in_error = true;
 230	pci_uevent_ers(dev, PCI_ERS_RESULT_NONE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 231
 232out:
 233	eeh_pcid_put(dev);
 234out_no_dev:
 235	device_unlock(&dev->dev);
 236	return NULL;
 
 
 
 
 
 
 
 
 
 237}
 238
 239/**
 240 * eeh_report_mmio_enabled - Tell drivers that MMIO has been enabled
 241 * @data: eeh device
 242 * @userdata: return value
 243 *
 244 * Tells each device driver that IO ports, MMIO and config space I/O
 245 * are now enabled. Collects up and merges the device driver responses.
 246 * Cumulative response passed back in "userdata".
 247 */
 248static void *eeh_report_mmio_enabled(void *data, void *userdata)
 249{
 250	struct eeh_dev *edev = (struct eeh_dev *)data;
 251	struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
 252	enum pci_ers_result rc, *res = userdata;
 253	struct pci_driver *driver;
 
 
 
 
 
 
 254
 255	if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
 256		return NULL;
 257
 258	device_lock(&dev->dev);
 259	driver = eeh_pcid_get(dev);
 260	if (!driver) goto out_no_dev;
 261
 262	if (!driver->err_handler ||
 263	    !driver->err_handler->mmio_enabled ||
 264	    (edev->mode & EEH_DEV_NO_HANDLER))
 265		goto out;
 266
 267	rc = driver->err_handler->mmio_enabled(dev);
 268
 269	/* A driver that needs a reset trumps all others */
 270	if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
 271	if (*res == PCI_ERS_RESULT_NONE) *res = rc;
 272
 273out:
 274	eeh_pcid_put(dev);
 275out_no_dev:
 276	device_unlock(&dev->dev);
 277	return NULL;
 
 
 
 
 
 
 
 
 
 
 
 278}
 279
 280/**
 281 * eeh_report_reset - Tell device that slot has been reset
 282 * @data: eeh device
 283 * @userdata: return value
 284 *
 285 * This routine must be called while EEH tries to reset particular
 286 * PCI device so that the associated PCI device driver could take
 287 * some actions, usually to save data the driver needs so that the
 288 * driver can work again while the device is recovered.
 289 */
 290static void *eeh_report_reset(void *data, void *userdata)
 
 
 291{
 292	struct eeh_dev *edev = (struct eeh_dev *)data;
 293	struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
 294	enum pci_ers_result rc, *res = userdata;
 295	struct pci_driver *driver;
 296
 297	if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
 298		return NULL;
 299
 300	device_lock(&dev->dev);
 301	dev->error_state = pci_channel_io_normal;
 302
 303	driver = eeh_pcid_get(dev);
 304	if (!driver) goto out_no_dev;
 305
 306	eeh_enable_irq(dev);
 307
 308	if (!driver->err_handler ||
 309	    !driver->err_handler->slot_reset ||
 310	    (edev->mode & EEH_DEV_NO_HANDLER) ||
 311	    (!edev->in_error))
 312		goto out;
 313
 314	rc = driver->err_handler->slot_reset(dev);
 315	if ((*res == PCI_ERS_RESULT_NONE) ||
 316	    (*res == PCI_ERS_RESULT_RECOVERED)) *res = rc;
 317	if (*res == PCI_ERS_RESULT_DISCONNECT &&
 318	     rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
 319
 320out:
 321	eeh_pcid_put(dev);
 322out_no_dev:
 323	device_unlock(&dev->dev);
 324	return NULL;
 325}
 326
 327static void *eeh_dev_restore_state(void *data, void *userdata)
 328{
 329	struct eeh_dev *edev = data;
 330	struct pci_dev *pdev;
 331
 332	if (!edev)
 333		return NULL;
 334
 335	/*
 336	 * The content in the config space isn't saved because
 337	 * the blocked config space on some adapters. We have
 338	 * to restore the initial saved config space when the
 339	 * EEH device is created.
 340	 */
 341	if (edev->pe && (edev->pe->state & EEH_PE_CFG_RESTRICTED)) {
 342		if (list_is_last(&edev->list, &edev->pe->edevs))
 343			eeh_pe_restore_bars(edev->pe);
 344
 345		return NULL;
 346	}
 347
 348	pdev = eeh_dev_to_pci_dev(edev);
 349	if (!pdev)
 350		return NULL;
 351
 352	pci_restore_state(pdev);
 353	return NULL;
 354}
 355
 356/**
 357 * eeh_report_resume - Tell device to resume normal operations
 358 * @data: eeh device
 359 * @userdata: return value
 360 *
 361 * This routine must be called to notify the device driver that it
 362 * could resume so that the device driver can do some initialization
 363 * to make the recovered device work again.
 364 */
 365static void *eeh_report_resume(void *data, void *userdata)
 
 
 366{
 367	struct eeh_dev *edev = (struct eeh_dev *)data;
 368	struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
 369	bool was_in_error;
 370	struct pci_driver *driver;
 371
 372	if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
 373		return NULL;
 374
 375	device_lock(&dev->dev);
 376	dev->error_state = pci_channel_io_normal;
 377
 378	driver = eeh_pcid_get(dev);
 379	if (!driver) goto out_no_dev;
 380
 381	was_in_error = edev->in_error;
 382	edev->in_error = false;
 383	eeh_enable_irq(dev);
 384
 385	if (!driver->err_handler ||
 386	    !driver->err_handler->resume ||
 387	    (edev->mode & EEH_DEV_NO_HANDLER) || !was_in_error) {
 388		edev->mode &= ~EEH_DEV_NO_HANDLER;
 389		goto out;
 390	}
 391
 392	driver->err_handler->resume(dev);
 
 393
 394	pci_uevent_ers(dev, PCI_ERS_RESULT_RECOVERED);
 395out:
 396	eeh_pcid_put(dev);
 397#ifdef CONFIG_PCI_IOV
 398	if (eeh_ops->notify_resume && eeh_dev_to_pdn(edev))
 399		eeh_ops->notify_resume(eeh_dev_to_pdn(edev));
 400#endif
 401out_no_dev:
 402	device_unlock(&dev->dev);
 403	return NULL;
 404}
 405
 406/**
 407 * eeh_report_failure - Tell device driver that device is dead.
 408 * @data: eeh device
 409 * @userdata: return value
 410 *
 411 * This informs the device driver that the device is permanently
 412 * dead, and that no further recovery attempts will be made on it.
 413 */
 414static void *eeh_report_failure(void *data, void *userdata)
 
 
 415{
 416	struct eeh_dev *edev = (struct eeh_dev *)data;
 417	struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
 418	struct pci_driver *driver;
 419
 420	if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
 421		return NULL;
 422
 423	device_lock(&dev->dev);
 424	dev->error_state = pci_channel_io_perm_failure;
 425
 426	driver = eeh_pcid_get(dev);
 427	if (!driver) goto out_no_dev;
 428
 429	eeh_disable_irq(dev);
 
 
 
 430
 431	if (!driver->err_handler ||
 432	    !driver->err_handler->error_detected)
 433		goto out;
 434
 435	driver->err_handler->error_detected(dev, pci_channel_io_perm_failure);
 436
 437	pci_uevent_ers(dev, PCI_ERS_RESULT_DISCONNECT);
 438out:
 439	eeh_pcid_put(dev);
 440out_no_dev:
 441	device_unlock(&dev->dev);
 442	return NULL;
 443}
 444
 445static void *eeh_add_virt_device(void *data, void *userdata)
 446{
 447	struct pci_driver *driver;
 448	struct eeh_dev *edev = (struct eeh_dev *)data;
 449	struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
 450	struct pci_dn *pdn = eeh_dev_to_pdn(edev);
 451
 452	if (!(edev->physfn)) {
 453		pr_warn("%s: EEH dev %04x:%02x:%02x.%01x not for VF\n",
 454			__func__, pdn->phb->global_number, pdn->busno,
 455			PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn));
 456		return NULL;
 457	}
 458
 459	driver = eeh_pcid_get(dev);
 460	if (driver) {
 461		eeh_pcid_put(dev);
 462		if (driver->err_handler)
 463			return NULL;
 
 
 464	}
 465
 466#ifdef CONFIG_PCI_IOV
 467	pci_iov_add_virtfn(edev->physfn, pdn->vf_index);
 468#endif
 469	return NULL;
 470}
 471
 472static void *eeh_rmv_device(void *data, void *userdata)
 473{
 474	struct pci_driver *driver;
 475	struct eeh_dev *edev = (struct eeh_dev *)data;
 476	struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
 477	struct eeh_rmv_data *rmv_data = (struct eeh_rmv_data *)userdata;
 478	int *removed = rmv_data ? &rmv_data->removed : NULL;
 479
 480	/*
 481	 * Actually, we should remove the PCI bridges as well.
 482	 * However, that's lots of complexity to do that,
 483	 * particularly some of devices under the bridge might
 484	 * support EEH. So we just care about PCI devices for
 485	 * simplicity here.
 486	 */
 487	if (!dev || (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE))
 488		return NULL;
 489
 490	/*
 491	 * We rely on count-based pcibios_release_device() to
 492	 * detach permanently offlined PEs. Unfortunately, that's
 493	 * not reliable enough. We might have the permanently
 494	 * offlined PEs attached, but we needn't take care of
 495	 * them and their child devices.
 496	 */
 497	if (eeh_dev_removed(edev))
 498		return NULL;
 499
 500	driver = eeh_pcid_get(dev);
 501	if (driver) {
 502		eeh_pcid_put(dev);
 503		if (removed &&
 504		    eeh_pe_passed(edev->pe))
 505			return NULL;
 506		if (removed &&
 507		    driver->err_handler &&
 508		    driver->err_handler->error_detected &&
 509		    driver->err_handler->slot_reset)
 510			return NULL;
 511	}
 512
 513	/* Remove it from PCI subsystem */
 514	pr_debug("EEH: Removing %s without EEH sensitive driver\n",
 515		 pci_name(dev));
 516	edev->bus = dev->bus;
 517	edev->mode |= EEH_DEV_DISCONNECTED;
 518	if (removed)
 519		(*removed)++;
 520
 521	if (edev->physfn) {
 522#ifdef CONFIG_PCI_IOV
 523		struct pci_dn *pdn = eeh_dev_to_pdn(edev);
 524
 525		pci_iov_remove_virtfn(edev->physfn, pdn->vf_index);
 526		edev->pdev = NULL;
 527
 528		/*
 529		 * We have to set the VF PE number to invalid one, which is
 530		 * required to plug the VF successfully.
 531		 */
 532		pdn->pe_number = IODA_INVALID_PE;
 533#endif
 534		if (rmv_data)
 535			list_add(&edev->rmv_list, &rmv_data->edev_list);
 536	} else {
 537		pci_lock_rescan_remove();
 538		pci_stop_and_remove_bus_device(dev);
 539		pci_unlock_rescan_remove();
 540	}
 541
 542	return NULL;
 543}
 544
 545static void *eeh_pe_detach_dev(void *data, void *userdata)
 546{
 547	struct eeh_pe *pe = (struct eeh_pe *)data;
 548	struct eeh_dev *edev, *tmp;
 549
 550	eeh_pe_for_each_dev(pe, edev, tmp) {
 551		if (!(edev->mode & EEH_DEV_DISCONNECTED))
 552			continue;
 553
 554		edev->mode &= ~(EEH_DEV_DISCONNECTED | EEH_DEV_IRQ_DISABLED);
 555		eeh_rmv_from_parent_pe(edev);
 556	}
 557
 558	return NULL;
 559}
 560
 561/*
 562 * Explicitly clear PE's frozen state for PowerNV where
 563 * we have frozen PE until BAR restore is completed. It's
 564 * harmless to clear it for pSeries. To be consistent with
 565 * PE reset (for 3 times), we try to clear the frozen state
 566 * for 3 times as well.
 567 */
 568static void *__eeh_clear_pe_frozen_state(void *data, void *flag)
 569{
 570	struct eeh_pe *pe = (struct eeh_pe *)data;
 571	bool clear_sw_state = *(bool *)flag;
 572	int i, rc = 1;
 573
 574	for (i = 0; rc && i < 3; i++)
 575		rc = eeh_unfreeze_pe(pe, clear_sw_state);
 576
 577	/* Stop immediately on any errors */
 578	if (rc) {
 579		pr_warn("%s: Failure %d unfreezing PHB#%x-PE#%x\n",
 580			__func__, rc, pe->phb->global_number, pe->addr);
 581		return (void *)pe;
 
 
 
 582	}
 583
 584	return NULL;
 585}
 586
 587static int eeh_clear_pe_frozen_state(struct eeh_pe *pe,
 588				     bool clear_sw_state)
 589{
 590	void *rc;
 591
 592	rc = eeh_pe_traverse(pe, __eeh_clear_pe_frozen_state, &clear_sw_state);
 593	if (!rc)
 594		eeh_pe_state_clear(pe, EEH_PE_ISOLATED);
 595
 596	return rc ? -EIO : 0;
 597}
 598
 599int eeh_pe_reset_and_recover(struct eeh_pe *pe)
 600{
 601	int ret;
 602
 603	/* Bail if the PE is being recovered */
 604	if (pe->state & EEH_PE_RECOVERING)
 605		return 0;
 606
 607	/* Put the PE into recovery mode */
 608	eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
 609
 610	/* Save states */
 611	eeh_pe_dev_traverse(pe, eeh_dev_save_state, NULL);
 612
 613	/* Issue reset */
 614	ret = eeh_pe_reset_full(pe);
 615	if (ret) {
 616		eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
 617		return ret;
 618	}
 619
 620	/* Unfreeze the PE */
 621	ret = eeh_clear_pe_frozen_state(pe, true);
 622	if (ret) {
 623		eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
 624		return ret;
 625	}
 626
 627	/* Restore device state */
 628	eeh_pe_dev_traverse(pe, eeh_dev_restore_state, NULL);
 629
 630	/* Clear recovery mode */
 631	eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
 632
 633	return 0;
 634}
 635
 636/**
 637 * eeh_reset_device - Perform actual reset of a pci slot
 638 * @driver_eeh_aware: Does the device's driver provide EEH support?
 639 * @pe: EEH PE
 640 * @bus: PCI bus corresponding to the isolcated slot
 641 * @rmv_data: Optional, list to record removed devices
 642 *
 643 * This routine must be called to do reset on the indicated PE.
 644 * During the reset, udev might be invoked because those affected
 645 * PCI devices will be removed and then added.
 646 */
 647static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
 648			    struct eeh_rmv_data *rmv_data,
 649			    bool driver_eeh_aware)
 650{
 651	time64_t tstamp;
 652	int cnt, rc;
 653	struct eeh_dev *edev;
 
 
 
 
 
 654
 655	/* pcibios will clear the counter; save the value */
 656	cnt = pe->freeze_count;
 657	tstamp = pe->tstamp;
 658
 659	/*
 660	 * We don't remove the corresponding PE instances because
 661	 * we need the information afterwords. The attached EEH
 662	 * devices are expected to be attached soon when calling
 663	 * into pci_hp_add_devices().
 664	 */
 665	eeh_pe_state_mark(pe, EEH_PE_KEEP);
 666	if (driver_eeh_aware || (pe->type & EEH_PE_VF)) {
 667		eeh_pe_dev_traverse(pe, eeh_rmv_device, rmv_data);
 668	} else {
 669		pci_lock_rescan_remove();
 670		pci_hp_remove_devices(bus);
 671		pci_unlock_rescan_remove();
 672	}
 673
 674	/*
 675	 * Reset the pci controller. (Asserts RST#; resets config space).
 676	 * Reconfigure bridges and devices. Don't try to bring the system
 677	 * up if the reset failed for some reason.
 678	 *
 679	 * During the reset, it's very dangerous to have uncontrolled PCI
 680	 * config accesses. So we prefer to block them. However, controlled
 681	 * PCI config accesses initiated from EEH itself are allowed.
 682	 */
 683	rc = eeh_pe_reset_full(pe);
 684	if (rc)
 685		return rc;
 686
 687	pci_lock_rescan_remove();
 688
 689	/* Restore PE */
 690	eeh_ops->configure_bridge(pe);
 691	eeh_pe_restore_bars(pe);
 692
 693	/* Clear frozen state */
 694	rc = eeh_clear_pe_frozen_state(pe, false);
 695	if (rc) {
 696		pci_unlock_rescan_remove();
 697		return rc;
 698	}
 699
 700	/* Give the system 5 seconds to finish running the user-space
 701	 * hotplug shutdown scripts, e.g. ifdown for ethernet.  Yes,
 702	 * this is a hack, but if we don't do this, and try to bring
 703	 * the device up before the scripts have taken it down,
 704	 * potentially weird things happen.
 705	 */
 706	if (!driver_eeh_aware || rmv_data->removed) {
 707		pr_info("EEH: Sleep 5s ahead of %s hotplug\n",
 708			(driver_eeh_aware ? "partial" : "complete"));
 709		ssleep(5);
 710
 711		/*
 712		 * The EEH device is still connected with its parent
 713		 * PE. We should disconnect it so the binding can be
 714		 * rebuilt when adding PCI devices.
 715		 */
 716		edev = list_first_entry(&pe->edevs, struct eeh_dev, list);
 717		eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL);
 718		if (pe->type & EEH_PE_VF) {
 719			eeh_add_virt_device(edev, NULL);
 720		} else {
 721			if (!driver_eeh_aware)
 722				eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
 723			pci_hp_add_devices(bus);
 724		}
 725	}
 726	eeh_pe_state_clear(pe, EEH_PE_KEEP);
 727
 728	pe->tstamp = tstamp;
 729	pe->freeze_count = cnt;
 730
 731	pci_unlock_rescan_remove();
 732	return 0;
 733}
 734
 735/* The longest amount of time to wait for a pci device
 736 * to come back on line, in seconds.
 737 */
 738#define MAX_WAIT_FOR_RECOVERY 300
 739
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 740/**
 741 * eeh_handle_normal_event - Handle EEH events on a specific PE
 742 * @pe: EEH PE - which should not be used after we return, as it may
 743 * have been invalidated.
 744 *
 745 * Attempts to recover the given PE.  If recovery fails or the PE has failed
 746 * too many times, remove the PE.
 747 *
 748 * While PHB detects address or data parity errors on particular PCI
 749 * slot, the associated PE will be frozen. Besides, DMA's occurring
 750 * to wild addresses (which usually happen due to bugs in device
 751 * drivers or in PCI adapter firmware) can cause EEH error. #SERR,
 752 * #PERR or other misc PCI-related errors also can trigger EEH errors.
 753 *
 754 * Recovery process consists of unplugging the device driver (which
 755 * generated hotplug events to userspace), then issuing a PCI #RST to
 756 * the device, then reconfiguring the PCI config space for all bridges
 757 * & devices under this slot, and then finally restarting the device
 758 * drivers (which cause a second set of hotplug events to go out to
 759 * userspace).
 760 */
 761void eeh_handle_normal_event(struct eeh_pe *pe)
 762{
 763	struct pci_bus *bus;
 764	struct eeh_dev *edev, *tmp;
 
 765	int rc = 0;
 766	enum pci_ers_result result = PCI_ERS_RESULT_NONE;
 767	struct eeh_rmv_data rmv_data = {LIST_HEAD_INIT(rmv_data.edev_list), 0};
 
 
 768
 769	bus = eeh_pe_bus_get(pe);
 770	if (!bus) {
 771		pr_err("%s: Cannot find PCI bus for PHB#%x-PE#%x\n",
 772			__func__, pe->phb->global_number, pe->addr);
 773		return;
 774	}
 775
 776	eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 777
 778	eeh_pe_update_time_stamp(pe);
 779	pe->freeze_count++;
 780	if (pe->freeze_count > eeh_max_freezes) {
 781		pr_err("EEH: PHB#%x-PE#%x has failed %d times in the\n"
 782		       "last hour and has been permanently disabled.\n",
 783		       pe->phb->global_number, pe->addr,
 784		       pe->freeze_count);
 785		goto hard_fail;
 786	}
 787	pr_warn("EEH: This PCI device has failed %d times in the last hour\n",
 788		pe->freeze_count);
 
 
 789
 790	/* Walk the various device drivers attached to this slot through
 791	 * a reset sequence, giving each an opportunity to do what it needs
 792	 * to accomplish the reset.  Each child gets a report of the
 793	 * status ... if any child can't handle the reset, then the entire
 794	 * slot is dlpar removed and added.
 795	 *
 796	 * When the PHB is fenced, we have to issue a reset to recover from
 797	 * the error. Override the result if necessary to have partially
 798	 * hotplug for this case.
 799	 */
 800	pr_info("EEH: Notify device drivers to shutdown\n");
 801	eeh_pe_dev_traverse(pe, eeh_report_error, &result);
 802	if ((pe->type & EEH_PE_PHB) &&
 803	    result != PCI_ERS_RESULT_NONE &&
 804	    result != PCI_ERS_RESULT_NEED_RESET)
 805		result = PCI_ERS_RESULT_NEED_RESET;
 
 
 
 
 
 
 
 806
 807	/* Get the current PCI slot state. This can take a long time,
 808	 * sometimes over 300 seconds for certain systems.
 809	 */
 810	rc = eeh_ops->wait_state(pe, MAX_WAIT_FOR_RECOVERY*1000);
 811	if (rc < 0 || rc == EEH_STATE_NOT_SUPPORT) {
 812		pr_warn("EEH: Permanent failure\n");
 813		goto hard_fail;
 
 
 814	}
 815
 816	/* Since rtas may enable MMIO when posting the error log,
 817	 * don't post the error log until after all dev drivers
 818	 * have been informed.
 819	 */
 820	pr_info("EEH: Collect temporary log\n");
 821	eeh_slot_error_detail(pe, EEH_LOG_TEMP);
 
 
 822
 823	/* If all device drivers were EEH-unaware, then shut
 824	 * down all of the device drivers, and hope they
 825	 * go down willingly, without panicing the system.
 826	 */
 827	if (result == PCI_ERS_RESULT_NONE) {
 828		pr_info("EEH: Reset with hotplug activity\n");
 829		rc = eeh_reset_device(pe, bus, NULL, false);
 830		if (rc) {
 831			pr_warn("%s: Unable to reset, err=%d\n",
 832				__func__, rc);
 833			goto hard_fail;
 834		}
 835	}
 836
 837	/* If all devices reported they can proceed, then re-enable MMIO */
 838	if (result == PCI_ERS_RESULT_CAN_RECOVER) {
 839		pr_info("EEH: Enable I/O for affected devices\n");
 840		rc = eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
 841
 842		if (rc < 0)
 843			goto hard_fail;
 844		if (rc) {
 845			result = PCI_ERS_RESULT_NEED_RESET;
 846		} else {
 847			pr_info("EEH: Notify device drivers to resume I/O\n");
 848			eeh_pe_dev_traverse(pe, eeh_report_mmio_enabled, &result);
 
 849		}
 850	}
 851
 852	/* If all devices reported they can proceed, then re-enable DMA */
 853	if (result == PCI_ERS_RESULT_CAN_RECOVER) {
 854		pr_info("EEH: Enabled DMA for affected devices\n");
 855		rc = eeh_pci_enable(pe, EEH_OPT_THAW_DMA);
 856
 857		if (rc < 0)
 858			goto hard_fail;
 859		if (rc) {
 860			result = PCI_ERS_RESULT_NEED_RESET;
 861		} else {
 862			/*
 863			 * We didn't do PE reset for the case. The PE
 864			 * is still in frozen state. Clear it before
 865			 * resuming the PE.
 866			 */
 867			eeh_pe_state_clear(pe, EEH_PE_ISOLATED);
 868			result = PCI_ERS_RESULT_RECOVERED;
 869		}
 870	}
 871
 872	/* If any device has a hard failure, then shut off everything. */
 873	if (result == PCI_ERS_RESULT_DISCONNECT) {
 874		pr_warn("EEH: Device driver gave up\n");
 875		goto hard_fail;
 876	}
 877
 878	/* If any device called out for a reset, then reset the slot */
 879	if (result == PCI_ERS_RESULT_NEED_RESET) {
 880		pr_info("EEH: Reset without hotplug activity\n");
 881		rc = eeh_reset_device(pe, bus, &rmv_data, true);
 882		if (rc) {
 883			pr_warn("%s: Cannot reset, err=%d\n",
 884				__func__, rc);
 885			goto hard_fail;
 
 
 
 
 
 
 886		}
 887
 888		pr_info("EEH: Notify device drivers "
 889			"the completion of reset\n");
 890		result = PCI_ERS_RESULT_NONE;
 891		eeh_pe_dev_traverse(pe, eeh_report_reset, &result);
 892	}
 893
 894	/* All devices should claim they have recovered by now. */
 895	if ((result != PCI_ERS_RESULT_RECOVERED) &&
 896	    (result != PCI_ERS_RESULT_NONE)) {
 897		pr_warn("EEH: Not recovered\n");
 898		goto hard_fail;
 899	}
 900
 901	/*
 902	 * For those hot removed VFs, we should add back them after PF get
 903	 * recovered properly.
 904	 */
 905	list_for_each_entry_safe(edev, tmp, &rmv_data.edev_list, rmv_list) {
 906		eeh_add_virt_device(edev, NULL);
 907		list_del(&edev->rmv_list);
 908	}
 909
 910	/* Tell all device drivers that they can resume operations */
 911	pr_info("EEH: Notify device driver to resume\n");
 912	eeh_pe_dev_traverse(pe, eeh_report_resume, NULL);
 913
 914	goto final;
 
 
 
 
 
 
 
 
 
 
 915
 916hard_fail:
 917	/*
 918	 * About 90% of all real-life EEH failures in the field
 919	 * are due to poorly seated PCI cards. Only 10% or so are
 920	 * due to actual, failed cards.
 921	 */
 922	pr_err("EEH: Unable to recover from failure from PHB#%x-PE#%x.\n"
 923	       "Please try reseating or replacing it\n",
 924		pe->phb->global_number, pe->addr);
 
 
 
 
 
 
 
 
 
 925
 926	eeh_slot_error_detail(pe, EEH_LOG_PERM);
 
 927
 928	/* Notify all devices that they're about to go down. */
 929	eeh_pe_dev_traverse(pe, eeh_report_failure, NULL);
 
 
 
 
 
 
 
 
 
 930
 931	/* Mark the PE to be removed permanently */
 932	eeh_pe_state_mark(pe, EEH_PE_REMOVED);
 
 
 
 
 
 933
 
 934	/*
 935	 * Shut down the device drivers for good. We mark
 936	 * all removed devices correctly to avoid access
 937	 * the their PCI config any more.
 938	 */
 939	if (pe->type & EEH_PE_VF) {
 940		eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL);
 941		eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
 942	} else {
 943		eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
 944		eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
 945
 946		pci_lock_rescan_remove();
 947		pci_hp_remove_devices(bus);
 948		pci_unlock_rescan_remove();
 949		/* The passed PE should no longer be used */
 950		return;
 951	}
 952final:
 953	eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
 954}
 955
 956/**
 957 * eeh_handle_special_event - Handle EEH events without a specific failing PE
 958 *
 959 * Called when an EEH event is detected but can't be narrowed down to a
 960 * specific PE.  Iterates through possible failures and handles them as
 961 * necessary.
 962 */
 963void eeh_handle_special_event(void)
 964{
 965	struct eeh_pe *pe, *phb_pe;
 
 966	struct pci_bus *bus;
 967	struct pci_controller *hose;
 968	unsigned long flags;
 969	int rc;
 970
 971
 972	do {
 973		rc = eeh_ops->next_error(&pe);
 974
 975		switch (rc) {
 976		case EEH_NEXT_ERR_DEAD_IOC:
 977			/* Mark all PHBs in dead state */
 978			eeh_serialize_lock(&flags);
 979
 980			/* Purge all events */
 981			eeh_remove_event(NULL, true);
 982
 983			list_for_each_entry(hose, &hose_list, list_node) {
 984				phb_pe = eeh_phb_pe_get(hose);
 985				if (!phb_pe) continue;
 986
 987				eeh_pe_state_mark(phb_pe, EEH_PE_ISOLATED);
 988			}
 989
 990			eeh_serialize_unlock(flags);
 991
 992			break;
 993		case EEH_NEXT_ERR_FROZEN_PE:
 994		case EEH_NEXT_ERR_FENCED_PHB:
 995		case EEH_NEXT_ERR_DEAD_PHB:
 996			/* Mark the PE in fenced state */
 997			eeh_serialize_lock(&flags);
 998
 999			/* Purge all events of the PHB */
1000			eeh_remove_event(pe, true);
1001
1002			if (rc == EEH_NEXT_ERR_DEAD_PHB)
1003				eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
1004			else
1005				eeh_pe_state_mark(pe,
1006					EEH_PE_ISOLATED | EEH_PE_RECOVERING);
1007
1008			eeh_serialize_unlock(flags);
1009
1010			break;
1011		case EEH_NEXT_ERR_NONE:
1012			return;
1013		default:
1014			pr_warn("%s: Invalid value %d from next_error()\n",
1015				__func__, rc);
1016			return;
1017		}
1018
1019		/*
1020		 * For fenced PHB and frozen PE, it's handled as normal
1021		 * event. We have to remove the affected PHBs for dead
1022		 * PHB and IOC
1023		 */
1024		if (rc == EEH_NEXT_ERR_FROZEN_PE ||
1025		    rc == EEH_NEXT_ERR_FENCED_PHB) {
 
1026			eeh_handle_normal_event(pe);
1027		} else {
 
 
 
 
 
 
 
 
 
 
 
1028			pci_lock_rescan_remove();
1029			list_for_each_entry(hose, &hose_list, list_node) {
1030				phb_pe = eeh_phb_pe_get(hose);
1031				if (!phb_pe ||
1032				    !(phb_pe->state & EEH_PE_ISOLATED) ||
1033				    (phb_pe->state & EEH_PE_RECOVERING))
1034					continue;
1035
1036				/* Notify all devices to be down */
1037				eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
1038				eeh_pe_dev_traverse(pe,
1039					eeh_report_failure, NULL);
1040				bus = eeh_pe_bus_get(phb_pe);
1041				if (!bus) {
1042					pr_err("%s: Cannot find PCI bus for "
1043					       "PHB#%x-PE#%x\n",
1044					       __func__,
1045					       pe->phb->global_number,
1046					       pe->addr);
1047					break;
1048				}
1049				pci_hp_remove_devices(bus);
1050			}
1051			pci_unlock_rescan_remove();
1052		}
1053
1054		/*
1055		 * If we have detected dead IOC, we needn't proceed
1056		 * any more since all PHBs would have been removed
1057		 */
1058		if (rc == EEH_NEXT_ERR_DEAD_IOC)
1059			break;
1060	} while (rc != EEH_NEXT_ERR_NONE);
1061}
v5.9
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * PCI Error Recovery Driver for RPA-compliant PPC64 platform.
   4 * Copyright IBM Corp. 2004 2005
   5 * Copyright Linas Vepstas <linas@linas.org> 2004, 2005
   6 *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   7 * Send comments and feedback to Linas Vepstas <linas@austin.ibm.com>
   8 */
   9#include <linux/delay.h>
  10#include <linux/interrupt.h>
  11#include <linux/irq.h>
  12#include <linux/module.h>
  13#include <linux/pci.h>
  14#include <linux/pci_hotplug.h>
  15#include <asm/eeh.h>
  16#include <asm/eeh_event.h>
  17#include <asm/ppc-pci.h>
  18#include <asm/pci-bridge.h>
  19#include <asm/prom.h>
  20#include <asm/rtas.h>
  21
  22struct eeh_rmv_data {
  23	struct list_head removed_vf_list;
  24	int removed_dev_count;
  25};
  26
  27static int eeh_result_priority(enum pci_ers_result result)
  28{
  29	switch (result) {
  30	case PCI_ERS_RESULT_NONE:
  31		return 1;
  32	case PCI_ERS_RESULT_NO_AER_DRIVER:
  33		return 2;
  34	case PCI_ERS_RESULT_RECOVERED:
  35		return 3;
  36	case PCI_ERS_RESULT_CAN_RECOVER:
  37		return 4;
  38	case PCI_ERS_RESULT_DISCONNECT:
  39		return 5;
  40	case PCI_ERS_RESULT_NEED_RESET:
  41		return 6;
  42	default:
  43		WARN_ONCE(1, "Unknown pci_ers_result value: %d\n", (int)result);
  44		return 0;
  45	}
  46};
  47
  48static const char *pci_ers_result_name(enum pci_ers_result result)
  49{
  50	switch (result) {
  51	case PCI_ERS_RESULT_NONE:
  52		return "none";
  53	case PCI_ERS_RESULT_CAN_RECOVER:
  54		return "can recover";
  55	case PCI_ERS_RESULT_NEED_RESET:
  56		return "need reset";
  57	case PCI_ERS_RESULT_DISCONNECT:
  58		return "disconnect";
  59	case PCI_ERS_RESULT_RECOVERED:
  60		return "recovered";
  61	case PCI_ERS_RESULT_NO_AER_DRIVER:
  62		return "no AER driver";
  63	default:
  64		WARN_ONCE(1, "Unknown result type: %d\n", (int)result);
  65		return "unknown";
  66	}
  67};
  68
  69static enum pci_ers_result pci_ers_merge_result(enum pci_ers_result old,
  70						enum pci_ers_result new)
  71{
  72	if (eeh_result_priority(new) > eeh_result_priority(old))
  73		return new;
  74	return old;
  75}
  76
  77static bool eeh_dev_removed(struct eeh_dev *edev)
  78{
  79	return !edev || (edev->mode & EEH_DEV_REMOVED);
  80}
  81
  82static bool eeh_edev_actionable(struct eeh_dev *edev)
  83{
  84	if (!edev->pdev)
  85		return false;
  86	if (edev->pdev->error_state == pci_channel_io_perm_failure)
  87		return false;
  88	if (eeh_dev_removed(edev))
  89		return false;
  90	if (eeh_pe_passed(edev->pe))
  91		return false;
  92
  93	return true;
  94}
  95
  96/**
  97 * eeh_pcid_get - Get the PCI device driver
  98 * @pdev: PCI device
  99 *
 100 * The function is used to retrieve the PCI device driver for
 101 * the indicated PCI device. Besides, we will increase the reference
 102 * of the PCI device driver to prevent that being unloaded on
 103 * the fly. Otherwise, kernel crash would be seen.
 104 */
 105static inline struct pci_driver *eeh_pcid_get(struct pci_dev *pdev)
 106{
 107	if (!pdev || !pdev->driver)
 108		return NULL;
 109
 110	if (!try_module_get(pdev->driver->driver.owner))
 111		return NULL;
 112
 113	return pdev->driver;
 114}
 115
 116/**
 117 * eeh_pcid_put - Dereference on the PCI device driver
 118 * @pdev: PCI device
 119 *
 120 * The function is called to do dereference on the PCI device
 121 * driver of the indicated PCI device.
 122 */
 123static inline void eeh_pcid_put(struct pci_dev *pdev)
 124{
 125	if (!pdev || !pdev->driver)
 126		return;
 127
 128	module_put(pdev->driver->driver.owner);
 129}
 130
 131/**
 132 * eeh_disable_irq - Disable interrupt for the recovering device
 133 * @dev: PCI device
 134 *
 135 * This routine must be called when reporting temporary or permanent
 136 * error to the particular PCI device to disable interrupt of that
 137 * device. If the device has enabled MSI or MSI-X interrupt, we needn't
 138 * do real work because EEH should freeze DMA transfers for those PCI
 139 * devices encountering EEH errors, which includes MSI or MSI-X.
 140 */
 141static void eeh_disable_irq(struct eeh_dev *edev)
 142{
 
 
 143	/* Don't disable MSI and MSI-X interrupts. They are
 144	 * effectively disabled by the DMA Stopped state
 145	 * when an EEH error occurs.
 146	 */
 147	if (edev->pdev->msi_enabled || edev->pdev->msix_enabled)
 148		return;
 149
 150	if (!irq_has_action(edev->pdev->irq))
 151		return;
 152
 153	edev->mode |= EEH_DEV_IRQ_DISABLED;
 154	disable_irq_nosync(edev->pdev->irq);
 155}
 156
 157/**
 158 * eeh_enable_irq - Enable interrupt for the recovering device
 159 * @dev: PCI device
 160 *
 161 * This routine must be called to enable interrupt while failed
 162 * device could be resumed.
 163 */
 164static void eeh_enable_irq(struct eeh_dev *edev)
 165{
 
 
 166	if ((edev->mode) & EEH_DEV_IRQ_DISABLED) {
 167		edev->mode &= ~EEH_DEV_IRQ_DISABLED;
 168		/*
 169		 * FIXME !!!!!
 170		 *
 171		 * This is just ass backwards. This maze has
 172		 * unbalanced irq_enable/disable calls. So instead of
 173		 * finding the root cause it works around the warning
 174		 * in the irq_enable code by conditionally calling
 175		 * into it.
 176		 *
 177		 * That's just wrong.The warning in the core code is
 178		 * there to tell people to fix their asymmetries in
 179		 * their own code, not by abusing the core information
 180		 * to avoid it.
 181		 *
 182		 * I so wish that the assymetry would be the other way
 183		 * round and a few more irq_disable calls render that
 184		 * shit unusable forever.
 185		 *
 186		 *	tglx
 187		 */
 188		if (irqd_irq_disabled(irq_get_irq_data(edev->pdev->irq)))
 189			enable_irq(edev->pdev->irq);
 190	}
 191}
 192
 193static void eeh_dev_save_state(struct eeh_dev *edev, void *userdata)
 
 
 
 
 
 
 
 
 
 194{
 
 195	struct pci_dev *pdev;
 196
 197	if (!edev)
 198		return;
 199
 200	/*
 201	 * We cannot access the config space on some adapters.
 202	 * Otherwise, it will cause fenced PHB. We don't save
 203	 * the content in their config space and will restore
 204	 * from the initial config space saved when the EEH
 205	 * device is created.
 206	 */
 207	if (edev->pe && (edev->pe->state & EEH_PE_CFG_RESTRICTED))
 208		return;
 209
 210	pdev = eeh_dev_to_pci_dev(edev);
 211	if (!pdev)
 212		return;
 213
 214	pci_save_state(pdev);
 
 215}
 216
 217static void eeh_set_channel_state(struct eeh_pe *root, pci_channel_state_t s)
 
 
 
 
 
 
 
 
 
 218{
 219	struct eeh_pe *pe;
 220	struct eeh_dev *edev, *tmp;
 
 
 221
 222	eeh_for_each_pe(root, pe)
 223		eeh_pe_for_each_dev(pe, edev, tmp)
 224			if (eeh_edev_actionable(edev))
 225				edev->pdev->error_state = s;
 226}
 227
 228static void eeh_set_irq_state(struct eeh_pe *root, bool enable)
 229{
 230	struct eeh_pe *pe;
 231	struct eeh_dev *edev, *tmp;
 232
 233	eeh_for_each_pe(root, pe) {
 234		eeh_pe_for_each_dev(pe, edev, tmp) {
 235			if (!eeh_edev_actionable(edev))
 236				continue;
 237
 238			if (!eeh_pcid_get(edev->pdev))
 239				continue;
 240
 241			if (enable)
 242				eeh_enable_irq(edev);
 243			else
 244				eeh_disable_irq(edev);
 245
 246			eeh_pcid_put(edev->pdev);
 247		}
 248	}
 249}
 250
 251typedef enum pci_ers_result (*eeh_report_fn)(struct eeh_dev *,
 252					     struct pci_dev *,
 253					     struct pci_driver *);
 254static void eeh_pe_report_edev(struct eeh_dev *edev, eeh_report_fn fn,
 255			       enum pci_ers_result *result)
 256{
 257	struct pci_dev *pdev;
 258	struct pci_driver *driver;
 259	enum pci_ers_result new_result;
 260
 261	pci_lock_rescan_remove();
 262	pdev = edev->pdev;
 263	if (pdev)
 264		get_device(&pdev->dev);
 265	pci_unlock_rescan_remove();
 266	if (!pdev) {
 267		eeh_edev_info(edev, "no device");
 268		return;
 269	}
 270	device_lock(&pdev->dev);
 271	if (eeh_edev_actionable(edev)) {
 272		driver = eeh_pcid_get(pdev);
 273
 274		if (!driver)
 275			eeh_edev_info(edev, "no driver");
 276		else if (!driver->err_handler)
 277			eeh_edev_info(edev, "driver not EEH aware");
 278		else if (edev->mode & EEH_DEV_NO_HANDLER)
 279			eeh_edev_info(edev, "driver bound too late");
 280		else {
 281			new_result = fn(edev, pdev, driver);
 282			eeh_edev_info(edev, "%s driver reports: '%s'",
 283				      driver->name,
 284				      pci_ers_result_name(new_result));
 285			if (result)
 286				*result = pci_ers_merge_result(*result,
 287							       new_result);
 288		}
 289		if (driver)
 290			eeh_pcid_put(pdev);
 291	} else {
 292		eeh_edev_info(edev, "not actionable (%d,%d,%d)", !!pdev,
 293			      !eeh_dev_removed(edev), !eeh_pe_passed(edev->pe));
 294	}
 295	device_unlock(&pdev->dev);
 296	if (edev->pdev != pdev)
 297		eeh_edev_warn(edev, "Device changed during processing!\n");
 298	put_device(&pdev->dev);
 299}
 300
 301static void eeh_pe_report(const char *name, struct eeh_pe *root,
 302			  eeh_report_fn fn, enum pci_ers_result *result)
 303{
 304	struct eeh_pe *pe;
 305	struct eeh_dev *edev, *tmp;
 306
 307	pr_info("EEH: Beginning: '%s'\n", name);
 308	eeh_for_each_pe(root, pe) eeh_pe_for_each_dev(pe, edev, tmp)
 309		eeh_pe_report_edev(edev, fn, result);
 310	if (result)
 311		pr_info("EEH: Finished:'%s' with aggregate recovery state:'%s'\n",
 312			name, pci_ers_result_name(*result));
 313	else
 314		pr_info("EEH: Finished:'%s'", name);
 315}
 316
 317/**
 318 * eeh_report_error - Report pci error to each device driver
 319 * @edev: eeh device
 320 * @driver: device's PCI driver
 321 *
 322 * Report an EEH error to each device driver.
 
 
 323 */
 324static enum pci_ers_result eeh_report_error(struct eeh_dev *edev,
 325					    struct pci_dev *pdev,
 326					    struct pci_driver *driver)
 327{
 328	enum pci_ers_result rc;
 329
 330	if (!driver->err_handler->error_detected)
 331		return PCI_ERS_RESULT_NONE;
 332
 333	eeh_edev_info(edev, "Invoking %s->error_detected(IO frozen)",
 334		      driver->name);
 335	rc = driver->err_handler->error_detected(pdev, pci_channel_io_frozen);
 336
 337	edev->in_error = true;
 338	pci_uevent_ers(pdev, PCI_ERS_RESULT_NONE);
 339	return rc;
 340}
 
 
 
 
 
 
 
 
 
 
 
 
 
 341
 342/**
 343 * eeh_report_mmio_enabled - Tell drivers that MMIO has been enabled
 344 * @edev: eeh device
 345 * @driver: device's PCI driver
 346 *
 347 * Tells each device driver that IO ports, MMIO and config space I/O
 348 * are now enabled.
 349 */
 350static enum pci_ers_result eeh_report_mmio_enabled(struct eeh_dev *edev,
 351						   struct pci_dev *pdev,
 352						   struct pci_driver *driver)
 353{
 354	if (!driver->err_handler->mmio_enabled)
 355		return PCI_ERS_RESULT_NONE;
 356	eeh_edev_info(edev, "Invoking %s->mmio_enabled()", driver->name);
 357	return driver->err_handler->mmio_enabled(pdev);
 358}
 359
 360/**
 361 * eeh_report_reset - Tell device that slot has been reset
 362 * @edev: eeh device
 363 * @driver: device's PCI driver
 364 *
 365 * This routine must be called while EEH tries to reset particular
 366 * PCI device so that the associated PCI device driver could take
 367 * some actions, usually to save data the driver needs so that the
 368 * driver can work again while the device is recovered.
 369 */
 370static enum pci_ers_result eeh_report_reset(struct eeh_dev *edev,
 371					    struct pci_dev *pdev,
 372					    struct pci_driver *driver)
 373{
 374	if (!driver->err_handler->slot_reset || !edev->in_error)
 375		return PCI_ERS_RESULT_NONE;
 376	eeh_edev_info(edev, "Invoking %s->slot_reset()", driver->name);
 377	return driver->err_handler->slot_reset(pdev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 378}
 379
 380static void eeh_dev_restore_state(struct eeh_dev *edev, void *userdata)
 381{
 
 382	struct pci_dev *pdev;
 383
 384	if (!edev)
 385		return;
 386
 387	/*
 388	 * The content in the config space isn't saved because
 389	 * the blocked config space on some adapters. We have
 390	 * to restore the initial saved config space when the
 391	 * EEH device is created.
 392	 */
 393	if (edev->pe && (edev->pe->state & EEH_PE_CFG_RESTRICTED)) {
 394		if (list_is_last(&edev->entry, &edev->pe->edevs))
 395			eeh_pe_restore_bars(edev->pe);
 396
 397		return;
 398	}
 399
 400	pdev = eeh_dev_to_pci_dev(edev);
 401	if (!pdev)
 402		return;
 403
 404	pci_restore_state(pdev);
 
 405}
 406
 407/**
 408 * eeh_report_resume - Tell device to resume normal operations
 409 * @edev: eeh device
 410 * @driver: device's PCI driver
 411 *
 412 * This routine must be called to notify the device driver that it
 413 * could resume so that the device driver can do some initialization
 414 * to make the recovered device work again.
 415 */
 416static enum pci_ers_result eeh_report_resume(struct eeh_dev *edev,
 417					     struct pci_dev *pdev,
 418					     struct pci_driver *driver)
 419{
 420	if (!driver->err_handler->resume || !edev->in_error)
 421		return PCI_ERS_RESULT_NONE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 422
 423	eeh_edev_info(edev, "Invoking %s->resume()", driver->name);
 424	driver->err_handler->resume(pdev);
 425
 426	pci_uevent_ers(edev->pdev, PCI_ERS_RESULT_RECOVERED);
 
 
 427#ifdef CONFIG_PCI_IOV
 428	if (eeh_ops->notify_resume)
 429		eeh_ops->notify_resume(edev);
 430#endif
 431	return PCI_ERS_RESULT_NONE;
 
 
 432}
 433
 434/**
 435 * eeh_report_failure - Tell device driver that device is dead.
 436 * @edev: eeh device
 437 * @driver: device's PCI driver
 438 *
 439 * This informs the device driver that the device is permanently
 440 * dead, and that no further recovery attempts will be made on it.
 441 */
 442static enum pci_ers_result eeh_report_failure(struct eeh_dev *edev,
 443					      struct pci_dev *pdev,
 444					      struct pci_driver *driver)
 445{
 446	enum pci_ers_result rc;
 
 
 447
 448	if (!driver->err_handler->error_detected)
 449		return PCI_ERS_RESULT_NONE;
 
 
 
 
 
 
 450
 451	eeh_edev_info(edev, "Invoking %s->error_detected(permanent failure)",
 452		      driver->name);
 453	rc = driver->err_handler->error_detected(pdev,
 454						 pci_channel_io_perm_failure);
 455
 456	pci_uevent_ers(pdev, PCI_ERS_RESULT_DISCONNECT);
 457	return rc;
 
 
 
 
 
 
 
 
 
 
 458}
 459
 460static void *eeh_add_virt_device(struct eeh_dev *edev)
 461{
 462	struct pci_driver *driver;
 
 463	struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
 
 464
 465	if (!(edev->physfn)) {
 466		eeh_edev_warn(edev, "Not for VF\n");
 
 
 467		return NULL;
 468	}
 469
 470	driver = eeh_pcid_get(dev);
 471	if (driver) {
 472		if (driver->err_handler) {
 473			eeh_pcid_put(dev);
 474			return NULL;
 475		}
 476		eeh_pcid_put(dev);
 477	}
 478
 479#ifdef CONFIG_PCI_IOV
 480	pci_iov_add_virtfn(edev->physfn, edev->vf_index);
 481#endif
 482	return NULL;
 483}
 484
 485static void eeh_rmv_device(struct eeh_dev *edev, void *userdata)
 486{
 487	struct pci_driver *driver;
 
 488	struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
 489	struct eeh_rmv_data *rmv_data = (struct eeh_rmv_data *)userdata;
 
 490
 491	/*
 492	 * Actually, we should remove the PCI bridges as well.
 493	 * However, that's lots of complexity to do that,
 494	 * particularly some of devices under the bridge might
 495	 * support EEH. So we just care about PCI devices for
 496	 * simplicity here.
 497	 */
 498	if (!eeh_edev_actionable(edev) ||
 499	    (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE))
 500		return;
 
 
 
 
 
 
 
 
 
 501
 502	if (rmv_data) {
 503		driver = eeh_pcid_get(dev);
 504		if (driver) {
 505			if (driver->err_handler &&
 506			    driver->err_handler->error_detected &&
 507			    driver->err_handler->slot_reset) {
 508				eeh_pcid_put(dev);
 509				return;
 510			}
 511			eeh_pcid_put(dev);
 512		}
 513	}
 514
 515	/* Remove it from PCI subsystem */
 516	pr_info("EEH: Removing %s without EEH sensitive driver\n",
 517		pci_name(dev));
 
 518	edev->mode |= EEH_DEV_DISCONNECTED;
 519	if (rmv_data)
 520		rmv_data->removed_dev_count++;
 521
 522	if (edev->physfn) {
 523#ifdef CONFIG_PCI_IOV
 524		pci_iov_remove_virtfn(edev->physfn, edev->vf_index);
 
 
 525		edev->pdev = NULL;
 
 
 
 
 
 
 526#endif
 527		if (rmv_data)
 528			list_add(&edev->rmv_entry, &rmv_data->removed_vf_list);
 529	} else {
 530		pci_lock_rescan_remove();
 531		pci_stop_and_remove_bus_device(dev);
 532		pci_unlock_rescan_remove();
 533	}
 
 
 534}
 535
 536static void *eeh_pe_detach_dev(struct eeh_pe *pe, void *userdata)
 537{
 
 538	struct eeh_dev *edev, *tmp;
 539
 540	eeh_pe_for_each_dev(pe, edev, tmp) {
 541		if (!(edev->mode & EEH_DEV_DISCONNECTED))
 542			continue;
 543
 544		edev->mode &= ~(EEH_DEV_DISCONNECTED | EEH_DEV_IRQ_DISABLED);
 545		eeh_pe_tree_remove(edev);
 546	}
 547
 548	return NULL;
 549}
 550
 551/*
 552 * Explicitly clear PE's frozen state for PowerNV where
 553 * we have frozen PE until BAR restore is completed. It's
 554 * harmless to clear it for pSeries. To be consistent with
 555 * PE reset (for 3 times), we try to clear the frozen state
 556 * for 3 times as well.
 557 */
 558static int eeh_clear_pe_frozen_state(struct eeh_pe *root, bool include_passed)
 559{
 560	struct eeh_pe *pe;
 561	int i;
 
 
 
 
 562
 563	eeh_for_each_pe(root, pe) {
 564		if (include_passed || !eeh_pe_passed(pe)) {
 565			for (i = 0; i < 3; i++)
 566				if (!eeh_unfreeze_pe(pe))
 567					break;
 568			if (i >= 3)
 569				return -EIO;
 570		}
 571	}
 572	eeh_pe_state_clear(root, EEH_PE_ISOLATED, include_passed);
 573	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 574}
 575
 576int eeh_pe_reset_and_recover(struct eeh_pe *pe)
 577{
 578	int ret;
 579
 580	/* Bail if the PE is being recovered */
 581	if (pe->state & EEH_PE_RECOVERING)
 582		return 0;
 583
 584	/* Put the PE into recovery mode */
 585	eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
 586
 587	/* Save states */
 588	eeh_pe_dev_traverse(pe, eeh_dev_save_state, NULL);
 589
 590	/* Issue reset */
 591	ret = eeh_pe_reset_full(pe, true);
 592	if (ret) {
 593		eeh_pe_state_clear(pe, EEH_PE_RECOVERING, true);
 594		return ret;
 595	}
 596
 597	/* Unfreeze the PE */
 598	ret = eeh_clear_pe_frozen_state(pe, true);
 599	if (ret) {
 600		eeh_pe_state_clear(pe, EEH_PE_RECOVERING, true);
 601		return ret;
 602	}
 603
 604	/* Restore device state */
 605	eeh_pe_dev_traverse(pe, eeh_dev_restore_state, NULL);
 606
 607	/* Clear recovery mode */
 608	eeh_pe_state_clear(pe, EEH_PE_RECOVERING, true);
 609
 610	return 0;
 611}
 612
 613/**
 614 * eeh_reset_device - Perform actual reset of a pci slot
 615 * @driver_eeh_aware: Does the device's driver provide EEH support?
 616 * @pe: EEH PE
 617 * @bus: PCI bus corresponding to the isolcated slot
 618 * @rmv_data: Optional, list to record removed devices
 619 *
 620 * This routine must be called to do reset on the indicated PE.
 621 * During the reset, udev might be invoked because those affected
 622 * PCI devices will be removed and then added.
 623 */
 624static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
 625			    struct eeh_rmv_data *rmv_data,
 626			    bool driver_eeh_aware)
 627{
 628	time64_t tstamp;
 629	int cnt, rc;
 630	struct eeh_dev *edev;
 631	struct eeh_pe *tmp_pe;
 632	bool any_passed = false;
 633
 634	eeh_for_each_pe(pe, tmp_pe)
 635		any_passed |= eeh_pe_passed(tmp_pe);
 636
 637	/* pcibios will clear the counter; save the value */
 638	cnt = pe->freeze_count;
 639	tstamp = pe->tstamp;
 640
 641	/*
 642	 * We don't remove the corresponding PE instances because
 643	 * we need the information afterwords. The attached EEH
 644	 * devices are expected to be attached soon when calling
 645	 * into pci_hp_add_devices().
 646	 */
 647	eeh_pe_state_mark(pe, EEH_PE_KEEP);
 648	if (any_passed || driver_eeh_aware || (pe->type & EEH_PE_VF)) {
 649		eeh_pe_dev_traverse(pe, eeh_rmv_device, rmv_data);
 650	} else {
 651		pci_lock_rescan_remove();
 652		pci_hp_remove_devices(bus);
 653		pci_unlock_rescan_remove();
 654	}
 655
 656	/*
 657	 * Reset the pci controller. (Asserts RST#; resets config space).
 658	 * Reconfigure bridges and devices. Don't try to bring the system
 659	 * up if the reset failed for some reason.
 660	 *
 661	 * During the reset, it's very dangerous to have uncontrolled PCI
 662	 * config accesses. So we prefer to block them. However, controlled
 663	 * PCI config accesses initiated from EEH itself are allowed.
 664	 */
 665	rc = eeh_pe_reset_full(pe, false);
 666	if (rc)
 667		return rc;
 668
 669	pci_lock_rescan_remove();
 670
 671	/* Restore PE */
 672	eeh_ops->configure_bridge(pe);
 673	eeh_pe_restore_bars(pe);
 674
 675	/* Clear frozen state */
 676	rc = eeh_clear_pe_frozen_state(pe, false);
 677	if (rc) {
 678		pci_unlock_rescan_remove();
 679		return rc;
 680	}
 681
 682	/* Give the system 5 seconds to finish running the user-space
 683	 * hotplug shutdown scripts, e.g. ifdown for ethernet.  Yes,
 684	 * this is a hack, but if we don't do this, and try to bring
 685	 * the device up before the scripts have taken it down,
 686	 * potentially weird things happen.
 687	 */
 688	if (!driver_eeh_aware || rmv_data->removed_dev_count) {
 689		pr_info("EEH: Sleep 5s ahead of %s hotplug\n",
 690			(driver_eeh_aware ? "partial" : "complete"));
 691		ssleep(5);
 692
 693		/*
 694		 * The EEH device is still connected with its parent
 695		 * PE. We should disconnect it so the binding can be
 696		 * rebuilt when adding PCI devices.
 697		 */
 698		edev = list_first_entry(&pe->edevs, struct eeh_dev, entry);
 699		eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL);
 700		if (pe->type & EEH_PE_VF) {
 701			eeh_add_virt_device(edev);
 702		} else {
 703			if (!driver_eeh_aware)
 704				eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
 705			pci_hp_add_devices(bus);
 706		}
 707	}
 708	eeh_pe_state_clear(pe, EEH_PE_KEEP, true);
 709
 710	pe->tstamp = tstamp;
 711	pe->freeze_count = cnt;
 712
 713	pci_unlock_rescan_remove();
 714	return 0;
 715}
 716
 717/* The longest amount of time to wait for a pci device
 718 * to come back on line, in seconds.
 719 */
 720#define MAX_WAIT_FOR_RECOVERY 300
 721
 722
 723/* Walks the PE tree after processing an event to remove any stale PEs.
 724 *
 725 * NB: This needs to be recursive to ensure the leaf PEs get removed
 726 * before their parents do. Although this is possible to do recursively
 727 * we don't since this is easier to read and we need to garantee
 728 * the leaf nodes will be handled first.
 729 */
 730static void eeh_pe_cleanup(struct eeh_pe *pe)
 731{
 732	struct eeh_pe *child_pe, *tmp;
 733
 734	list_for_each_entry_safe(child_pe, tmp, &pe->child_list, child)
 735		eeh_pe_cleanup(child_pe);
 736
 737	if (pe->state & EEH_PE_KEEP)
 738		return;
 739
 740	if (!(pe->state & EEH_PE_INVALID))
 741		return;
 742
 743	if (list_empty(&pe->edevs) && list_empty(&pe->child_list)) {
 744		list_del(&pe->child);
 745		kfree(pe);
 746	}
 747}
 748
 749/**
 750 * eeh_check_slot_presence - Check if a device is still present in a slot
 751 * @pdev: pci_dev to check
 752 *
 753 * This function may return a false positive if we can't determine the slot's
 754 * presence state. This might happen for for PCIe slots if the PE containing
 755 * the upstream bridge is also frozen, or the bridge is part of the same PE
 756 * as the device.
 757 *
 758 * This shouldn't happen often, but you might see it if you hotplug a PCIe
 759 * switch.
 760 */
 761static bool eeh_slot_presence_check(struct pci_dev *pdev)
 762{
 763	const struct hotplug_slot_ops *ops;
 764	struct pci_slot *slot;
 765	u8 state;
 766	int rc;
 767
 768	if (!pdev)
 769		return false;
 770
 771	if (pdev->error_state == pci_channel_io_perm_failure)
 772		return false;
 773
 774	slot = pdev->slot;
 775	if (!slot || !slot->hotplug)
 776		return true;
 777
 778	ops = slot->hotplug->ops;
 779	if (!ops || !ops->get_adapter_status)
 780		return true;
 781
 782	/* set the attention indicator while we've got the slot ops */
 783	if (ops->set_attention_status)
 784		ops->set_attention_status(slot->hotplug, 1);
 785
 786	rc = ops->get_adapter_status(slot->hotplug, &state);
 787	if (rc)
 788		return true;
 789
 790	return !!state;
 791}
 792
 793static void eeh_clear_slot_attention(struct pci_dev *pdev)
 794{
 795	const struct hotplug_slot_ops *ops;
 796	struct pci_slot *slot;
 797
 798	if (!pdev)
 799		return;
 800
 801	if (pdev->error_state == pci_channel_io_perm_failure)
 802		return;
 803
 804	slot = pdev->slot;
 805	if (!slot || !slot->hotplug)
 806		return;
 807
 808	ops = slot->hotplug->ops;
 809	if (!ops || !ops->set_attention_status)
 810		return;
 811
 812	ops->set_attention_status(slot->hotplug, 0);
 813}
 814
 815/**
 816 * eeh_handle_normal_event - Handle EEH events on a specific PE
 817 * @pe: EEH PE - which should not be used after we return, as it may
 818 * have been invalidated.
 819 *
 820 * Attempts to recover the given PE.  If recovery fails or the PE has failed
 821 * too many times, remove the PE.
 822 *
 823 * While PHB detects address or data parity errors on particular PCI
 824 * slot, the associated PE will be frozen. Besides, DMA's occurring
 825 * to wild addresses (which usually happen due to bugs in device
 826 * drivers or in PCI adapter firmware) can cause EEH error. #SERR,
 827 * #PERR or other misc PCI-related errors also can trigger EEH errors.
 828 *
 829 * Recovery process consists of unplugging the device driver (which
 830 * generated hotplug events to userspace), then issuing a PCI #RST to
 831 * the device, then reconfiguring the PCI config space for all bridges
 832 * & devices under this slot, and then finally restarting the device
 833 * drivers (which cause a second set of hotplug events to go out to
 834 * userspace).
 835 */
 836void eeh_handle_normal_event(struct eeh_pe *pe)
 837{
 838	struct pci_bus *bus;
 839	struct eeh_dev *edev, *tmp;
 840	struct eeh_pe *tmp_pe;
 841	int rc = 0;
 842	enum pci_ers_result result = PCI_ERS_RESULT_NONE;
 843	struct eeh_rmv_data rmv_data =
 844		{LIST_HEAD_INIT(rmv_data.removed_vf_list), 0};
 845	int devices = 0;
 846
 847	bus = eeh_pe_bus_get(pe);
 848	if (!bus) {
 849		pr_err("%s: Cannot find PCI bus for PHB#%x-PE#%x\n",
 850			__func__, pe->phb->global_number, pe->addr);
 851		return;
 852	}
 853
 854	/*
 855	 * When devices are hot-removed we might get an EEH due to
 856	 * a driver attempting to touch the MMIO space of a removed
 857	 * device. In this case we don't have a device to recover
 858	 * so suppress the event if we can't find any present devices.
 859	 *
 860	 * The hotplug driver should take care of tearing down the
 861	 * device itself.
 862	 */
 863	eeh_for_each_pe(pe, tmp_pe)
 864		eeh_pe_for_each_dev(tmp_pe, edev, tmp)
 865			if (eeh_slot_presence_check(edev->pdev))
 866				devices++;
 867
 868	if (!devices) {
 869		pr_debug("EEH: Frozen PHB#%x-PE#%x is empty!\n",
 870			pe->phb->global_number, pe->addr);
 871		goto out; /* nothing to recover */
 872	}
 873
 874	/* Log the event */
 875	if (pe->type & EEH_PE_PHB) {
 876		pr_err("EEH: Recovering PHB#%x, location: %s\n",
 877			pe->phb->global_number, eeh_pe_loc_get(pe));
 878	} else {
 879		struct eeh_pe *phb_pe = eeh_phb_pe_get(pe->phb);
 880
 881		pr_err("EEH: Recovering PHB#%x-PE#%x\n",
 882		       pe->phb->global_number, pe->addr);
 883		pr_err("EEH: PE location: %s, PHB location: %s\n",
 884		       eeh_pe_loc_get(pe), eeh_pe_loc_get(phb_pe));
 885	}
 886
 887#ifdef CONFIG_STACKTRACE
 888	/*
 889	 * Print the saved stack trace now that we've verified there's
 890	 * something to recover.
 891	 */
 892	if (pe->trace_entries) {
 893		void **ptrs = (void **) pe->stack_trace;
 894		int i;
 895
 896		pr_err("EEH: Frozen PHB#%x-PE#%x detected\n",
 897		       pe->phb->global_number, pe->addr);
 898
 899		/* FIXME: Use the same format as dump_stack() */
 900		pr_err("EEH: Call Trace:\n");
 901		for (i = 0; i < pe->trace_entries; i++)
 902			pr_err("EEH: [%pK] %pS\n", ptrs[i], ptrs[i]);
 903
 904		pe->trace_entries = 0;
 905	}
 906#endif /* CONFIG_STACKTRACE */
 907
 908	eeh_pe_update_time_stamp(pe);
 909	pe->freeze_count++;
 910	if (pe->freeze_count > eeh_max_freezes) {
 911		pr_err("EEH: PHB#%x-PE#%x has failed %d times in the last hour and has been permanently disabled.\n",
 
 912		       pe->phb->global_number, pe->addr,
 913		       pe->freeze_count);
 914		result = PCI_ERS_RESULT_DISCONNECT;
 915	}
 916
 917	eeh_for_each_pe(pe, tmp_pe)
 918		eeh_pe_for_each_dev(tmp_pe, edev, tmp)
 919			edev->mode &= ~EEH_DEV_NO_HANDLER;
 920
 921	/* Walk the various device drivers attached to this slot through
 922	 * a reset sequence, giving each an opportunity to do what it needs
 923	 * to accomplish the reset.  Each child gets a report of the
 924	 * status ... if any child can't handle the reset, then the entire
 925	 * slot is dlpar removed and added.
 926	 *
 927	 * When the PHB is fenced, we have to issue a reset to recover from
 928	 * the error. Override the result if necessary to have partially
 929	 * hotplug for this case.
 930	 */
 931	if (result != PCI_ERS_RESULT_DISCONNECT) {
 932		pr_warn("EEH: This PCI device has failed %d times in the last hour and will be permanently disabled after %d failures.\n",
 933			pe->freeze_count, eeh_max_freezes);
 934		pr_info("EEH: Notify device drivers to shutdown\n");
 935		eeh_set_channel_state(pe, pci_channel_io_frozen);
 936		eeh_set_irq_state(pe, false);
 937		eeh_pe_report("error_detected(IO frozen)", pe,
 938			      eeh_report_error, &result);
 939		if ((pe->type & EEH_PE_PHB) &&
 940		    result != PCI_ERS_RESULT_NONE &&
 941		    result != PCI_ERS_RESULT_NEED_RESET)
 942			result = PCI_ERS_RESULT_NEED_RESET;
 943	}
 944
 945	/* Get the current PCI slot state. This can take a long time,
 946	 * sometimes over 300 seconds for certain systems.
 947	 */
 948	if (result != PCI_ERS_RESULT_DISCONNECT) {
 949		rc = eeh_wait_state(pe, MAX_WAIT_FOR_RECOVERY*1000);
 950		if (rc < 0 || rc == EEH_STATE_NOT_SUPPORT) {
 951			pr_warn("EEH: Permanent failure\n");
 952			result = PCI_ERS_RESULT_DISCONNECT;
 953		}
 954	}
 955
 956	/* Since rtas may enable MMIO when posting the error log,
 957	 * don't post the error log until after all dev drivers
 958	 * have been informed.
 959	 */
 960	if (result != PCI_ERS_RESULT_DISCONNECT) {
 961		pr_info("EEH: Collect temporary log\n");
 962		eeh_slot_error_detail(pe, EEH_LOG_TEMP);
 963	}
 964
 965	/* If all device drivers were EEH-unaware, then shut
 966	 * down all of the device drivers, and hope they
 967	 * go down willingly, without panicing the system.
 968	 */
 969	if (result == PCI_ERS_RESULT_NONE) {
 970		pr_info("EEH: Reset with hotplug activity\n");
 971		rc = eeh_reset_device(pe, bus, NULL, false);
 972		if (rc) {
 973			pr_warn("%s: Unable to reset, err=%d\n",
 974				__func__, rc);
 975			result = PCI_ERS_RESULT_DISCONNECT;
 976		}
 977	}
 978
 979	/* If all devices reported they can proceed, then re-enable MMIO */
 980	if (result == PCI_ERS_RESULT_CAN_RECOVER) {
 981		pr_info("EEH: Enable I/O for affected devices\n");
 982		rc = eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
 983
 984		if (rc < 0) {
 985			result = PCI_ERS_RESULT_DISCONNECT;
 986		} else if (rc) {
 987			result = PCI_ERS_RESULT_NEED_RESET;
 988		} else {
 989			pr_info("EEH: Notify device drivers to resume I/O\n");
 990			eeh_pe_report("mmio_enabled", pe,
 991				      eeh_report_mmio_enabled, &result);
 992		}
 993	}
 994
 995	/* If all devices reported they can proceed, then re-enable DMA */
 996	if (result == PCI_ERS_RESULT_CAN_RECOVER) {
 997		pr_info("EEH: Enabled DMA for affected devices\n");
 998		rc = eeh_pci_enable(pe, EEH_OPT_THAW_DMA);
 999
1000		if (rc < 0) {
1001			result = PCI_ERS_RESULT_DISCONNECT;
1002		} else if (rc) {
1003			result = PCI_ERS_RESULT_NEED_RESET;
1004		} else {
1005			/*
1006			 * We didn't do PE reset for the case. The PE
1007			 * is still in frozen state. Clear it before
1008			 * resuming the PE.
1009			 */
1010			eeh_pe_state_clear(pe, EEH_PE_ISOLATED, true);
1011			result = PCI_ERS_RESULT_RECOVERED;
1012		}
1013	}
1014
 
 
 
 
 
 
1015	/* If any device called out for a reset, then reset the slot */
1016	if (result == PCI_ERS_RESULT_NEED_RESET) {
1017		pr_info("EEH: Reset without hotplug activity\n");
1018		rc = eeh_reset_device(pe, bus, &rmv_data, true);
1019		if (rc) {
1020			pr_warn("%s: Cannot reset, err=%d\n",
1021				__func__, rc);
1022			result = PCI_ERS_RESULT_DISCONNECT;
1023		} else {
1024			result = PCI_ERS_RESULT_NONE;
1025			eeh_set_channel_state(pe, pci_channel_io_normal);
1026			eeh_set_irq_state(pe, true);
1027			eeh_pe_report("slot_reset", pe, eeh_report_reset,
1028				      &result);
1029		}
 
 
 
 
 
 
 
 
 
 
 
 
1030	}
1031
1032	if ((result == PCI_ERS_RESULT_RECOVERED) ||
1033	    (result == PCI_ERS_RESULT_NONE)) {
1034		/*
1035		 * For those hot removed VFs, we should add back them after PF
1036		 * get recovered properly.
1037		 */
1038		list_for_each_entry_safe(edev, tmp, &rmv_data.removed_vf_list,
1039					 rmv_entry) {
1040			eeh_add_virt_device(edev);
1041			list_del(&edev->rmv_entry);
1042		}
 
1043
1044		/* Tell all device drivers that they can resume operations */
1045		pr_info("EEH: Notify device driver to resume\n");
1046		eeh_set_channel_state(pe, pci_channel_io_normal);
1047		eeh_set_irq_state(pe, true);
1048		eeh_pe_report("resume", pe, eeh_report_resume, NULL);
1049		eeh_for_each_pe(pe, tmp_pe) {
1050			eeh_pe_for_each_dev(tmp_pe, edev, tmp) {
1051				edev->mode &= ~EEH_DEV_NO_HANDLER;
1052				edev->in_error = false;
1053			}
1054		}
1055
1056		pr_info("EEH: Recovery successful.\n");
1057	} else  {
1058		/*
1059		 * About 90% of all real-life EEH failures in the field
1060		 * are due to poorly seated PCI cards. Only 10% or so are
1061		 * due to actual, failed cards.
1062		 */
1063		pr_err("EEH: Unable to recover from failure from PHB#%x-PE#%x.\n"
1064		       "Please try reseating or replacing it\n",
1065			pe->phb->global_number, pe->addr);
1066
1067		eeh_slot_error_detail(pe, EEH_LOG_PERM);
1068
1069		/* Notify all devices that they're about to go down. */
1070		eeh_set_channel_state(pe, pci_channel_io_perm_failure);
1071		eeh_set_irq_state(pe, false);
1072		eeh_pe_report("error_detected(permanent failure)", pe,
1073			      eeh_report_failure, NULL);
1074
1075		/* Mark the PE to be removed permanently */
1076		eeh_pe_state_mark(pe, EEH_PE_REMOVED);
1077
1078		/*
1079		 * Shut down the device drivers for good. We mark
1080		 * all removed devices correctly to avoid access
1081		 * the their PCI config any more.
1082		 */
1083		if (pe->type & EEH_PE_VF) {
1084			eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL);
1085			eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
1086		} else {
1087			eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
1088			eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
1089
1090			pci_lock_rescan_remove();
1091			pci_hp_remove_devices(bus);
1092			pci_unlock_rescan_remove();
1093			/* The passed PE should no longer be used */
1094			return;
1095		}
1096	}
1097
1098out:
1099	/*
1100	 * Clean up any PEs without devices. While marked as EEH_PE_RECOVERYING
1101	 * we don't want to modify the PE tree structure so we do it here.
 
1102	 */
1103	eeh_pe_cleanup(pe);
 
 
 
 
 
1104
1105	/* clear the slot attention LED for all recovered devices */
1106	eeh_for_each_pe(pe, tmp_pe)
1107		eeh_pe_for_each_dev(tmp_pe, edev, tmp)
1108			eeh_clear_slot_attention(edev->pdev);
1109
1110	eeh_pe_state_clear(pe, EEH_PE_RECOVERING, true);
 
 
1111}
1112
1113/**
1114 * eeh_handle_special_event - Handle EEH events without a specific failing PE
1115 *
1116 * Called when an EEH event is detected but can't be narrowed down to a
1117 * specific PE.  Iterates through possible failures and handles them as
1118 * necessary.
1119 */
1120void eeh_handle_special_event(void)
1121{
1122	struct eeh_pe *pe, *phb_pe, *tmp_pe;
1123	struct eeh_dev *edev, *tmp_edev;
1124	struct pci_bus *bus;
1125	struct pci_controller *hose;
1126	unsigned long flags;
1127	int rc;
1128
1129
1130	do {
1131		rc = eeh_ops->next_error(&pe);
1132
1133		switch (rc) {
1134		case EEH_NEXT_ERR_DEAD_IOC:
1135			/* Mark all PHBs in dead state */
1136			eeh_serialize_lock(&flags);
1137
1138			/* Purge all events */
1139			eeh_remove_event(NULL, true);
1140
1141			list_for_each_entry(hose, &hose_list, list_node) {
1142				phb_pe = eeh_phb_pe_get(hose);
1143				if (!phb_pe) continue;
1144
1145				eeh_pe_mark_isolated(phb_pe);
1146			}
1147
1148			eeh_serialize_unlock(flags);
1149
1150			break;
1151		case EEH_NEXT_ERR_FROZEN_PE:
1152		case EEH_NEXT_ERR_FENCED_PHB:
1153		case EEH_NEXT_ERR_DEAD_PHB:
1154			/* Mark the PE in fenced state */
1155			eeh_serialize_lock(&flags);
1156
1157			/* Purge all events of the PHB */
1158			eeh_remove_event(pe, true);
1159
1160			if (rc != EEH_NEXT_ERR_DEAD_PHB)
1161				eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
1162			eeh_pe_mark_isolated(pe);
 
 
1163
1164			eeh_serialize_unlock(flags);
1165
1166			break;
1167		case EEH_NEXT_ERR_NONE:
1168			return;
1169		default:
1170			pr_warn("%s: Invalid value %d from next_error()\n",
1171				__func__, rc);
1172			return;
1173		}
1174
1175		/*
1176		 * For fenced PHB and frozen PE, it's handled as normal
1177		 * event. We have to remove the affected PHBs for dead
1178		 * PHB and IOC
1179		 */
1180		if (rc == EEH_NEXT_ERR_FROZEN_PE ||
1181		    rc == EEH_NEXT_ERR_FENCED_PHB) {
1182			eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
1183			eeh_handle_normal_event(pe);
1184		} else {
1185			eeh_for_each_pe(pe, tmp_pe)
1186				eeh_pe_for_each_dev(tmp_pe, edev, tmp_edev)
1187					edev->mode &= ~EEH_DEV_NO_HANDLER;
1188
1189			/* Notify all devices to be down */
1190			eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
1191			eeh_set_channel_state(pe, pci_channel_io_perm_failure);
1192			eeh_pe_report(
1193				"error_detected(permanent failure)", pe,
1194				eeh_report_failure, NULL);
1195
1196			pci_lock_rescan_remove();
1197			list_for_each_entry(hose, &hose_list, list_node) {
1198				phb_pe = eeh_phb_pe_get(hose);
1199				if (!phb_pe ||
1200				    !(phb_pe->state & EEH_PE_ISOLATED) ||
1201				    (phb_pe->state & EEH_PE_RECOVERING))
1202					continue;
1203
 
 
 
 
1204				bus = eeh_pe_bus_get(phb_pe);
1205				if (!bus) {
1206					pr_err("%s: Cannot find PCI bus for "
1207					       "PHB#%x-PE#%x\n",
1208					       __func__,
1209					       pe->phb->global_number,
1210					       pe->addr);
1211					break;
1212				}
1213				pci_hp_remove_devices(bus);
1214			}
1215			pci_unlock_rescan_remove();
1216		}
1217
1218		/*
1219		 * If we have detected dead IOC, we needn't proceed
1220		 * any more since all PHBs would have been removed
1221		 */
1222		if (rc == EEH_NEXT_ERR_DEAD_IOC)
1223			break;
1224	} while (rc != EEH_NEXT_ERR_NONE);
1225}