Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * The file intends to implement PE based on the information from
4 * platforms. Basically, there have 3 types of PEs: PHB/Bus/Device.
5 * All the PEs should be organized as hierarchy tree. The first level
6 * of the tree will be associated to existing PHBs since the particular
7 * PE is only meaningful in one PHB domain.
8 *
9 * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2012.
10 */
11
12#include <linux/delay.h>
13#include <linux/export.h>
14#include <linux/gfp.h>
15#include <linux/kernel.h>
16#include <linux/pci.h>
17#include <linux/string.h>
18
19#include <asm/pci-bridge.h>
20#include <asm/ppc-pci.h>
21
22static int eeh_pe_aux_size = 0;
23static LIST_HEAD(eeh_phb_pe);
24
25/**
26 * eeh_set_pe_aux_size - Set PE auxillary data size
27 * @size: PE auxillary data size
28 *
29 * Set PE auxillary data size
30 */
31void eeh_set_pe_aux_size(int size)
32{
33 if (size < 0)
34 return;
35
36 eeh_pe_aux_size = size;
37}
38
39/**
40 * eeh_pe_alloc - Allocate PE
41 * @phb: PCI controller
42 * @type: PE type
43 *
44 * Allocate PE instance dynamically.
45 */
46static struct eeh_pe *eeh_pe_alloc(struct pci_controller *phb, int type)
47{
48 struct eeh_pe *pe;
49 size_t alloc_size;
50
51 alloc_size = sizeof(struct eeh_pe);
52 if (eeh_pe_aux_size) {
53 alloc_size = ALIGN(alloc_size, cache_line_size());
54 alloc_size += eeh_pe_aux_size;
55 }
56
57 /* Allocate PHB PE */
58 pe = kzalloc(alloc_size, GFP_KERNEL);
59 if (!pe) return NULL;
60
61 /* Initialize PHB PE */
62 pe->type = type;
63 pe->phb = phb;
64 INIT_LIST_HEAD(&pe->child_list);
65 INIT_LIST_HEAD(&pe->edevs);
66
67 pe->data = (void *)pe + ALIGN(sizeof(struct eeh_pe),
68 cache_line_size());
69 return pe;
70}
71
72/**
73 * eeh_phb_pe_create - Create PHB PE
74 * @phb: PCI controller
75 *
76 * The function should be called while the PHB is detected during
77 * system boot or PCI hotplug in order to create PHB PE.
78 */
79int eeh_phb_pe_create(struct pci_controller *phb)
80{
81 struct eeh_pe *pe;
82
83 /* Allocate PHB PE */
84 pe = eeh_pe_alloc(phb, EEH_PE_PHB);
85 if (!pe) {
86 pr_err("%s: out of memory!\n", __func__);
87 return -ENOMEM;
88 }
89
90 /* Put it into the list */
91 list_add_tail(&pe->child, &eeh_phb_pe);
92
93 pr_debug("EEH: Add PE for PHB#%x\n", phb->global_number);
94
95 return 0;
96}
97
98/**
99 * eeh_wait_state - Wait for PE state
100 * @pe: EEH PE
101 * @max_wait: maximal period in millisecond
102 *
103 * Wait for the state of associated PE. It might take some time
104 * to retrieve the PE's state.
105 */
106int eeh_wait_state(struct eeh_pe *pe, int max_wait)
107{
108 int ret;
109 int mwait;
110
111 /*
112 * According to PAPR, the state of PE might be temporarily
113 * unavailable. Under the circumstance, we have to wait
114 * for indicated time determined by firmware. The maximal
115 * wait time is 5 minutes, which is acquired from the original
116 * EEH implementation. Also, the original implementation
117 * also defined the minimal wait time as 1 second.
118 */
119#define EEH_STATE_MIN_WAIT_TIME (1000)
120#define EEH_STATE_MAX_WAIT_TIME (300 * 1000)
121
122 while (1) {
123 ret = eeh_ops->get_state(pe, &mwait);
124
125 if (ret != EEH_STATE_UNAVAILABLE)
126 return ret;
127
128 if (max_wait <= 0) {
129 pr_warn("%s: Timeout when getting PE's state (%d)\n",
130 __func__, max_wait);
131 return EEH_STATE_NOT_SUPPORT;
132 }
133
134 if (mwait < EEH_STATE_MIN_WAIT_TIME) {
135 pr_warn("%s: Firmware returned bad wait value %d\n",
136 __func__, mwait);
137 mwait = EEH_STATE_MIN_WAIT_TIME;
138 } else if (mwait > EEH_STATE_MAX_WAIT_TIME) {
139 pr_warn("%s: Firmware returned too long wait value %d\n",
140 __func__, mwait);
141 mwait = EEH_STATE_MAX_WAIT_TIME;
142 }
143
144 msleep(min(mwait, max_wait));
145 max_wait -= mwait;
146 }
147}
148
149/**
150 * eeh_phb_pe_get - Retrieve PHB PE based on the given PHB
151 * @phb: PCI controller
152 *
153 * The overall PEs form hierarchy tree. The first layer of the
154 * hierarchy tree is composed of PHB PEs. The function is used
155 * to retrieve the corresponding PHB PE according to the given PHB.
156 */
157struct eeh_pe *eeh_phb_pe_get(struct pci_controller *phb)
158{
159 struct eeh_pe *pe;
160
161 list_for_each_entry(pe, &eeh_phb_pe, child) {
162 /*
163 * Actually, we needn't check the type since
164 * the PE for PHB has been determined when that
165 * was created.
166 */
167 if ((pe->type & EEH_PE_PHB) && pe->phb == phb)
168 return pe;
169 }
170
171 return NULL;
172}
173
174/**
175 * eeh_pe_next - Retrieve the next PE in the tree
176 * @pe: current PE
177 * @root: root PE
178 *
179 * The function is used to retrieve the next PE in the
180 * hierarchy PE tree.
181 */
182struct eeh_pe *eeh_pe_next(struct eeh_pe *pe, struct eeh_pe *root)
183{
184 struct list_head *next = pe->child_list.next;
185
186 if (next == &pe->child_list) {
187 while (1) {
188 if (pe == root)
189 return NULL;
190 next = pe->child.next;
191 if (next != &pe->parent->child_list)
192 break;
193 pe = pe->parent;
194 }
195 }
196
197 return list_entry(next, struct eeh_pe, child);
198}
199
200/**
201 * eeh_pe_traverse - Traverse PEs in the specified PHB
202 * @root: root PE
203 * @fn: callback
204 * @flag: extra parameter to callback
205 *
206 * The function is used to traverse the specified PE and its
207 * child PEs. The traversing is to be terminated once the
208 * callback returns something other than NULL, or no more PEs
209 * to be traversed.
210 */
211void *eeh_pe_traverse(struct eeh_pe *root,
212 eeh_pe_traverse_func fn, void *flag)
213{
214 struct eeh_pe *pe;
215 void *ret;
216
217 eeh_for_each_pe(root, pe) {
218 ret = fn(pe, flag);
219 if (ret) return ret;
220 }
221
222 return NULL;
223}
224
225/**
226 * eeh_pe_dev_traverse - Traverse the devices from the PE
227 * @root: EEH PE
228 * @fn: function callback
229 * @flag: extra parameter to callback
230 *
231 * The function is used to traverse the devices of the specified
232 * PE and its child PEs.
233 */
234void eeh_pe_dev_traverse(struct eeh_pe *root,
235 eeh_edev_traverse_func fn, void *flag)
236{
237 struct eeh_pe *pe;
238 struct eeh_dev *edev, *tmp;
239
240 if (!root) {
241 pr_warn("%s: Invalid PE %p\n",
242 __func__, root);
243 return;
244 }
245
246 /* Traverse root PE */
247 eeh_for_each_pe(root, pe)
248 eeh_pe_for_each_dev(pe, edev, tmp)
249 fn(edev, flag);
250}
251
252/**
253 * __eeh_pe_get - Check the PE address
254 * @data: EEH PE
255 * @flag: EEH device
256 *
257 * For one particular PE, it can be identified by PE address
258 * or tranditional BDF address. BDF address is composed of
259 * Bus/Device/Function number. The extra data referred by flag
260 * indicates which type of address should be used.
261 */
262struct eeh_pe_get_flag {
263 int pe_no;
264 int config_addr;
265};
266
267static void *__eeh_pe_get(struct eeh_pe *pe, void *flag)
268{
269 struct eeh_pe_get_flag *tmp = (struct eeh_pe_get_flag *) flag;
270
271 /* Unexpected PHB PE */
272 if (pe->type & EEH_PE_PHB)
273 return NULL;
274
275 /*
276 * We prefer PE address. For most cases, we should
277 * have non-zero PE address
278 */
279 if (eeh_has_flag(EEH_VALID_PE_ZERO)) {
280 if (tmp->pe_no == pe->addr)
281 return pe;
282 } else {
283 if (tmp->pe_no &&
284 (tmp->pe_no == pe->addr))
285 return pe;
286 }
287
288 /* Try BDF address */
289 if (tmp->config_addr &&
290 (tmp->config_addr == pe->config_addr))
291 return pe;
292
293 return NULL;
294}
295
296/**
297 * eeh_pe_get - Search PE based on the given address
298 * @phb: PCI controller
299 * @pe_no: PE number
300 * @config_addr: Config address
301 *
302 * Search the corresponding PE based on the specified address which
303 * is included in the eeh device. The function is used to check if
304 * the associated PE has been created against the PE address. It's
305 * notable that the PE address has 2 format: traditional PE address
306 * which is composed of PCI bus/device/function number, or unified
307 * PE address.
308 */
309struct eeh_pe *eeh_pe_get(struct pci_controller *phb,
310 int pe_no, int config_addr)
311{
312 struct eeh_pe *root = eeh_phb_pe_get(phb);
313 struct eeh_pe_get_flag tmp = { pe_no, config_addr };
314 struct eeh_pe *pe;
315
316 pe = eeh_pe_traverse(root, __eeh_pe_get, &tmp);
317
318 return pe;
319}
320
321/**
322 * eeh_pe_tree_insert - Add EEH device to parent PE
323 * @edev: EEH device
324 * @new_pe_parent: PE to create additional PEs under
325 *
326 * Add EEH device to the PE in edev->pe_config_addr. If a PE already
327 * exists with that address then @edev is added to that PE. Otherwise
328 * a new PE is created and inserted into the PE tree as a child of
329 * @new_pe_parent.
330 *
331 * If @new_pe_parent is NULL then the new PE will be inserted under
332 * directly under the the PHB.
333 */
334int eeh_pe_tree_insert(struct eeh_dev *edev, struct eeh_pe *new_pe_parent)
335{
336 struct pci_controller *hose = edev->controller;
337 struct eeh_pe *pe, *parent;
338
339 /* Check if the PE number is valid */
340 if (!eeh_has_flag(EEH_VALID_PE_ZERO) && !edev->pe_config_addr) {
341 eeh_edev_err(edev, "PE#0 is invalid for this PHB!\n");
342 return -EINVAL;
343 }
344
345 /*
346 * Search the PE has been existing or not according
347 * to the PE address. If that has been existing, the
348 * PE should be composed of PCI bus and its subordinate
349 * components.
350 */
351 pe = eeh_pe_get(hose, edev->pe_config_addr, edev->bdfn);
352 if (pe) {
353 if (pe->type & EEH_PE_INVALID) {
354 list_add_tail(&edev->entry, &pe->edevs);
355 edev->pe = pe;
356 /*
357 * We're running to here because of PCI hotplug caused by
358 * EEH recovery. We need clear EEH_PE_INVALID until the top.
359 */
360 parent = pe;
361 while (parent) {
362 if (!(parent->type & EEH_PE_INVALID))
363 break;
364 parent->type &= ~EEH_PE_INVALID;
365 parent = parent->parent;
366 }
367
368 eeh_edev_dbg(edev, "Added to existing PE (parent: PE#%x)\n",
369 pe->parent->addr);
370 } else {
371 /* Mark the PE as type of PCI bus */
372 pe->type = EEH_PE_BUS;
373 edev->pe = pe;
374
375 /* Put the edev to PE */
376 list_add_tail(&edev->entry, &pe->edevs);
377 eeh_edev_dbg(edev, "Added to bus PE\n");
378 }
379 return 0;
380 }
381
382 /* Create a new EEH PE */
383 if (edev->physfn)
384 pe = eeh_pe_alloc(hose, EEH_PE_VF);
385 else
386 pe = eeh_pe_alloc(hose, EEH_PE_DEVICE);
387 if (!pe) {
388 pr_err("%s: out of memory!\n", __func__);
389 return -ENOMEM;
390 }
391 pe->addr = edev->pe_config_addr;
392 pe->config_addr = edev->bdfn;
393
394 /*
395 * Put the new EEH PE into hierarchy tree. If the parent
396 * can't be found, the newly created PE will be attached
397 * to PHB directly. Otherwise, we have to associate the
398 * PE with its parent.
399 */
400 if (!new_pe_parent) {
401 new_pe_parent = eeh_phb_pe_get(hose);
402 if (!new_pe_parent) {
403 pr_err("%s: No PHB PE is found (PHB Domain=%d)\n",
404 __func__, hose->global_number);
405 edev->pe = NULL;
406 kfree(pe);
407 return -EEXIST;
408 }
409 }
410
411 /* link new PE into the tree */
412 pe->parent = new_pe_parent;
413 list_add_tail(&pe->child, &new_pe_parent->child_list);
414
415 /*
416 * Put the newly created PE into the child list and
417 * link the EEH device accordingly.
418 */
419 list_add_tail(&edev->entry, &pe->edevs);
420 edev->pe = pe;
421 eeh_edev_dbg(edev, "Added to new (parent: PE#%x)\n",
422 new_pe_parent->addr);
423
424 return 0;
425}
426
427/**
428 * eeh_pe_tree_remove - Remove one EEH device from the associated PE
429 * @edev: EEH device
430 *
431 * The PE hierarchy tree might be changed when doing PCI hotplug.
432 * Also, the PCI devices or buses could be removed from the system
433 * during EEH recovery. So we have to call the function remove the
434 * corresponding PE accordingly if necessary.
435 */
436int eeh_pe_tree_remove(struct eeh_dev *edev)
437{
438 struct eeh_pe *pe, *parent, *child;
439 bool keep, recover;
440 int cnt;
441
442 pe = eeh_dev_to_pe(edev);
443 if (!pe) {
444 eeh_edev_dbg(edev, "No PE found for device.\n");
445 return -EEXIST;
446 }
447
448 /* Remove the EEH device */
449 edev->pe = NULL;
450 list_del(&edev->entry);
451
452 /*
453 * Check if the parent PE includes any EEH devices.
454 * If not, we should delete that. Also, we should
455 * delete the parent PE if it doesn't have associated
456 * child PEs and EEH devices.
457 */
458 while (1) {
459 parent = pe->parent;
460
461 /* PHB PEs should never be removed */
462 if (pe->type & EEH_PE_PHB)
463 break;
464
465 /*
466 * XXX: KEEP is set while resetting a PE. I don't think it's
467 * ever set without RECOVERING also being set. I could
468 * be wrong though so catch that with a WARN.
469 */
470 keep = !!(pe->state & EEH_PE_KEEP);
471 recover = !!(pe->state & EEH_PE_RECOVERING);
472 WARN_ON(keep && !recover);
473
474 if (!keep && !recover) {
475 if (list_empty(&pe->edevs) &&
476 list_empty(&pe->child_list)) {
477 list_del(&pe->child);
478 kfree(pe);
479 } else {
480 break;
481 }
482 } else {
483 /*
484 * Mark the PE as invalid. At the end of the recovery
485 * process any invalid PEs will be garbage collected.
486 *
487 * We need to delay the free()ing of them since we can
488 * remove edev's while traversing the PE tree which
489 * might trigger the removal of a PE and we can't
490 * deal with that (yet).
491 */
492 if (list_empty(&pe->edevs)) {
493 cnt = 0;
494 list_for_each_entry(child, &pe->child_list, child) {
495 if (!(child->type & EEH_PE_INVALID)) {
496 cnt++;
497 break;
498 }
499 }
500
501 if (!cnt)
502 pe->type |= EEH_PE_INVALID;
503 else
504 break;
505 }
506 }
507
508 pe = parent;
509 }
510
511 return 0;
512}
513
514/**
515 * eeh_pe_update_time_stamp - Update PE's frozen time stamp
516 * @pe: EEH PE
517 *
518 * We have time stamp for each PE to trace its time of getting
519 * frozen in last hour. The function should be called to update
520 * the time stamp on first error of the specific PE. On the other
521 * handle, we needn't account for errors happened in last hour.
522 */
523void eeh_pe_update_time_stamp(struct eeh_pe *pe)
524{
525 time64_t tstamp;
526
527 if (!pe) return;
528
529 if (pe->freeze_count <= 0) {
530 pe->freeze_count = 0;
531 pe->tstamp = ktime_get_seconds();
532 } else {
533 tstamp = ktime_get_seconds();
534 if (tstamp - pe->tstamp > 3600) {
535 pe->tstamp = tstamp;
536 pe->freeze_count = 0;
537 }
538 }
539}
540
541/**
542 * eeh_pe_state_mark - Mark specified state for PE and its associated device
543 * @pe: EEH PE
544 *
545 * EEH error affects the current PE and its child PEs. The function
546 * is used to mark appropriate state for the affected PEs and the
547 * associated devices.
548 */
549void eeh_pe_state_mark(struct eeh_pe *root, int state)
550{
551 struct eeh_pe *pe;
552
553 eeh_for_each_pe(root, pe)
554 if (!(pe->state & EEH_PE_REMOVED))
555 pe->state |= state;
556}
557EXPORT_SYMBOL_GPL(eeh_pe_state_mark);
558
559/**
560 * eeh_pe_mark_isolated
561 * @pe: EEH PE
562 *
563 * Record that a PE has been isolated by marking the PE and it's children as
564 * EEH_PE_ISOLATED (and EEH_PE_CFG_BLOCKED, if required) and their PCI devices
565 * as pci_channel_io_frozen.
566 */
567void eeh_pe_mark_isolated(struct eeh_pe *root)
568{
569 struct eeh_pe *pe;
570 struct eeh_dev *edev;
571 struct pci_dev *pdev;
572
573 eeh_pe_state_mark(root, EEH_PE_ISOLATED);
574 eeh_for_each_pe(root, pe) {
575 list_for_each_entry(edev, &pe->edevs, entry) {
576 pdev = eeh_dev_to_pci_dev(edev);
577 if (pdev)
578 pdev->error_state = pci_channel_io_frozen;
579 }
580 /* Block PCI config access if required */
581 if (pe->state & EEH_PE_CFG_RESTRICTED)
582 pe->state |= EEH_PE_CFG_BLOCKED;
583 }
584}
585EXPORT_SYMBOL_GPL(eeh_pe_mark_isolated);
586
587static void __eeh_pe_dev_mode_mark(struct eeh_dev *edev, void *flag)
588{
589 int mode = *((int *)flag);
590
591 edev->mode |= mode;
592}
593
594/**
595 * eeh_pe_dev_state_mark - Mark state for all device under the PE
596 * @pe: EEH PE
597 *
598 * Mark specific state for all child devices of the PE.
599 */
600void eeh_pe_dev_mode_mark(struct eeh_pe *pe, int mode)
601{
602 eeh_pe_dev_traverse(pe, __eeh_pe_dev_mode_mark, &mode);
603}
604
605/**
606 * eeh_pe_state_clear - Clear state for the PE
607 * @data: EEH PE
608 * @state: state
609 * @include_passed: include passed-through devices?
610 *
611 * The function is used to clear the indicated state from the
612 * given PE. Besides, we also clear the check count of the PE
613 * as well.
614 */
615void eeh_pe_state_clear(struct eeh_pe *root, int state, bool include_passed)
616{
617 struct eeh_pe *pe;
618 struct eeh_dev *edev, *tmp;
619 struct pci_dev *pdev;
620
621 eeh_for_each_pe(root, pe) {
622 /* Keep the state of permanently removed PE intact */
623 if (pe->state & EEH_PE_REMOVED)
624 continue;
625
626 if (!include_passed && eeh_pe_passed(pe))
627 continue;
628
629 pe->state &= ~state;
630
631 /*
632 * Special treatment on clearing isolated state. Clear
633 * check count since last isolation and put all affected
634 * devices to normal state.
635 */
636 if (!(state & EEH_PE_ISOLATED))
637 continue;
638
639 pe->check_count = 0;
640 eeh_pe_for_each_dev(pe, edev, tmp) {
641 pdev = eeh_dev_to_pci_dev(edev);
642 if (!pdev)
643 continue;
644
645 pdev->error_state = pci_channel_io_normal;
646 }
647
648 /* Unblock PCI config access if required */
649 if (pe->state & EEH_PE_CFG_RESTRICTED)
650 pe->state &= ~EEH_PE_CFG_BLOCKED;
651 }
652}
653
654/*
655 * Some PCI bridges (e.g. PLX bridges) have primary/secondary
656 * buses assigned explicitly by firmware, and we probably have
657 * lost that after reset. So we have to delay the check until
658 * the PCI-CFG registers have been restored for the parent
659 * bridge.
660 *
661 * Don't use normal PCI-CFG accessors, which probably has been
662 * blocked on normal path during the stage. So we need utilize
663 * eeh operations, which is always permitted.
664 */
665static void eeh_bridge_check_link(struct eeh_dev *edev)
666{
667 int cap;
668 uint32_t val;
669 int timeout = 0;
670
671 /*
672 * We only check root port and downstream ports of
673 * PCIe switches
674 */
675 if (!(edev->mode & (EEH_DEV_ROOT_PORT | EEH_DEV_DS_PORT)))
676 return;
677
678 eeh_edev_dbg(edev, "Checking PCIe link...\n");
679
680 /* Check slot status */
681 cap = edev->pcie_cap;
682 eeh_ops->read_config(edev, cap + PCI_EXP_SLTSTA, 2, &val);
683 if (!(val & PCI_EXP_SLTSTA_PDS)) {
684 eeh_edev_dbg(edev, "No card in the slot (0x%04x) !\n", val);
685 return;
686 }
687
688 /* Check power status if we have the capability */
689 eeh_ops->read_config(edev, cap + PCI_EXP_SLTCAP, 2, &val);
690 if (val & PCI_EXP_SLTCAP_PCP) {
691 eeh_ops->read_config(edev, cap + PCI_EXP_SLTCTL, 2, &val);
692 if (val & PCI_EXP_SLTCTL_PCC) {
693 eeh_edev_dbg(edev, "In power-off state, power it on ...\n");
694 val &= ~(PCI_EXP_SLTCTL_PCC | PCI_EXP_SLTCTL_PIC);
695 val |= (0x0100 & PCI_EXP_SLTCTL_PIC);
696 eeh_ops->write_config(edev, cap + PCI_EXP_SLTCTL, 2, val);
697 msleep(2 * 1000);
698 }
699 }
700
701 /* Enable link */
702 eeh_ops->read_config(edev, cap + PCI_EXP_LNKCTL, 2, &val);
703 val &= ~PCI_EXP_LNKCTL_LD;
704 eeh_ops->write_config(edev, cap + PCI_EXP_LNKCTL, 2, val);
705
706 /* Check link */
707 eeh_ops->read_config(edev, cap + PCI_EXP_LNKCAP, 4, &val);
708 if (!(val & PCI_EXP_LNKCAP_DLLLARC)) {
709 eeh_edev_dbg(edev, "No link reporting capability (0x%08x) \n", val);
710 msleep(1000);
711 return;
712 }
713
714 /* Wait the link is up until timeout (5s) */
715 timeout = 0;
716 while (timeout < 5000) {
717 msleep(20);
718 timeout += 20;
719
720 eeh_ops->read_config(edev, cap + PCI_EXP_LNKSTA, 2, &val);
721 if (val & PCI_EXP_LNKSTA_DLLLA)
722 break;
723 }
724
725 if (val & PCI_EXP_LNKSTA_DLLLA)
726 eeh_edev_dbg(edev, "Link up (%s)\n",
727 (val & PCI_EXP_LNKSTA_CLS_2_5GB) ? "2.5GB" : "5GB");
728 else
729 eeh_edev_dbg(edev, "Link not ready (0x%04x)\n", val);
730}
731
732#define BYTE_SWAP(OFF) (8*((OFF)/4)+3-(OFF))
733#define SAVED_BYTE(OFF) (((u8 *)(edev->config_space))[BYTE_SWAP(OFF)])
734
735static void eeh_restore_bridge_bars(struct eeh_dev *edev)
736{
737 int i;
738
739 /*
740 * Device BARs: 0x10 - 0x18
741 * Bus numbers and windows: 0x18 - 0x30
742 */
743 for (i = 4; i < 13; i++)
744 eeh_ops->write_config(edev, i*4, 4, edev->config_space[i]);
745 /* Rom: 0x38 */
746 eeh_ops->write_config(edev, 14*4, 4, edev->config_space[14]);
747
748 /* Cache line & Latency timer: 0xC 0xD */
749 eeh_ops->write_config(edev, PCI_CACHE_LINE_SIZE, 1,
750 SAVED_BYTE(PCI_CACHE_LINE_SIZE));
751 eeh_ops->write_config(edev, PCI_LATENCY_TIMER, 1,
752 SAVED_BYTE(PCI_LATENCY_TIMER));
753 /* Max latency, min grant, interrupt ping and line: 0x3C */
754 eeh_ops->write_config(edev, 15*4, 4, edev->config_space[15]);
755
756 /* PCI Command: 0x4 */
757 eeh_ops->write_config(edev, PCI_COMMAND, 4, edev->config_space[1] |
758 PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
759
760 /* Check the PCIe link is ready */
761 eeh_bridge_check_link(edev);
762}
763
764static void eeh_restore_device_bars(struct eeh_dev *edev)
765{
766 int i;
767 u32 cmd;
768
769 for (i = 4; i < 10; i++)
770 eeh_ops->write_config(edev, i*4, 4, edev->config_space[i]);
771 /* 12 == Expansion ROM Address */
772 eeh_ops->write_config(edev, 12*4, 4, edev->config_space[12]);
773
774 eeh_ops->write_config(edev, PCI_CACHE_LINE_SIZE, 1,
775 SAVED_BYTE(PCI_CACHE_LINE_SIZE));
776 eeh_ops->write_config(edev, PCI_LATENCY_TIMER, 1,
777 SAVED_BYTE(PCI_LATENCY_TIMER));
778
779 /* max latency, min grant, interrupt pin and line */
780 eeh_ops->write_config(edev, 15*4, 4, edev->config_space[15]);
781
782 /*
783 * Restore PERR & SERR bits, some devices require it,
784 * don't touch the other command bits
785 */
786 eeh_ops->read_config(edev, PCI_COMMAND, 4, &cmd);
787 if (edev->config_space[1] & PCI_COMMAND_PARITY)
788 cmd |= PCI_COMMAND_PARITY;
789 else
790 cmd &= ~PCI_COMMAND_PARITY;
791 if (edev->config_space[1] & PCI_COMMAND_SERR)
792 cmd |= PCI_COMMAND_SERR;
793 else
794 cmd &= ~PCI_COMMAND_SERR;
795 eeh_ops->write_config(edev, PCI_COMMAND, 4, cmd);
796}
797
798/**
799 * eeh_restore_one_device_bars - Restore the Base Address Registers for one device
800 * @data: EEH device
801 * @flag: Unused
802 *
803 * Loads the PCI configuration space base address registers,
804 * the expansion ROM base address, the latency timer, and etc.
805 * from the saved values in the device node.
806 */
807static void eeh_restore_one_device_bars(struct eeh_dev *edev, void *flag)
808{
809 /* Do special restore for bridges */
810 if (edev->mode & EEH_DEV_BRIDGE)
811 eeh_restore_bridge_bars(edev);
812 else
813 eeh_restore_device_bars(edev);
814
815 if (eeh_ops->restore_config)
816 eeh_ops->restore_config(edev);
817}
818
819/**
820 * eeh_pe_restore_bars - Restore the PCI config space info
821 * @pe: EEH PE
822 *
823 * This routine performs a recursive walk to the children
824 * of this device as well.
825 */
826void eeh_pe_restore_bars(struct eeh_pe *pe)
827{
828 /*
829 * We needn't take the EEH lock since eeh_pe_dev_traverse()
830 * will take that.
831 */
832 eeh_pe_dev_traverse(pe, eeh_restore_one_device_bars, NULL);
833}
834
835/**
836 * eeh_pe_loc_get - Retrieve location code binding to the given PE
837 * @pe: EEH PE
838 *
839 * Retrieve the location code of the given PE. If the primary PE bus
840 * is root bus, we will grab location code from PHB device tree node
841 * or root port. Otherwise, the upstream bridge's device tree node
842 * of the primary PE bus will be checked for the location code.
843 */
844const char *eeh_pe_loc_get(struct eeh_pe *pe)
845{
846 struct pci_bus *bus = eeh_pe_bus_get(pe);
847 struct device_node *dn;
848 const char *loc = NULL;
849
850 while (bus) {
851 dn = pci_bus_to_OF_node(bus);
852 if (!dn) {
853 bus = bus->parent;
854 continue;
855 }
856
857 if (pci_is_root_bus(bus))
858 loc = of_get_property(dn, "ibm,io-base-loc-code", NULL);
859 else
860 loc = of_get_property(dn, "ibm,slot-location-code",
861 NULL);
862
863 if (loc)
864 return loc;
865
866 bus = bus->parent;
867 }
868
869 return "N/A";
870}
871
872/**
873 * eeh_pe_bus_get - Retrieve PCI bus according to the given PE
874 * @pe: EEH PE
875 *
876 * Retrieve the PCI bus according to the given PE. Basically,
877 * there're 3 types of PEs: PHB/Bus/Device. For PHB PE, the
878 * primary PCI bus will be retrieved. The parent bus will be
879 * returned for BUS PE. However, we don't have associated PCI
880 * bus for DEVICE PE.
881 */
882struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe)
883{
884 struct eeh_dev *edev;
885 struct pci_dev *pdev;
886
887 if (pe->type & EEH_PE_PHB)
888 return pe->phb->bus;
889
890 /* The primary bus might be cached during probe time */
891 if (pe->state & EEH_PE_PRI_BUS)
892 return pe->bus;
893
894 /* Retrieve the parent PCI bus of first (top) PCI device */
895 edev = list_first_entry_or_null(&pe->edevs, struct eeh_dev, entry);
896 pdev = eeh_dev_to_pci_dev(edev);
897 if (pdev)
898 return pdev->bus;
899
900 return NULL;
901}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * The file intends to implement PE based on the information from
4 * platforms. Basically, there have 3 types of PEs: PHB/Bus/Device.
5 * All the PEs should be organized as hierarchy tree. The first level
6 * of the tree will be associated to existing PHBs since the particular
7 * PE is only meaningful in one PHB domain.
8 *
9 * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2012.
10 */
11
12#include <linux/delay.h>
13#include <linux/export.h>
14#include <linux/gfp.h>
15#include <linux/kernel.h>
16#include <linux/pci.h>
17#include <linux/string.h>
18
19#include <asm/pci-bridge.h>
20#include <asm/ppc-pci.h>
21
22static int eeh_pe_aux_size = 0;
23static LIST_HEAD(eeh_phb_pe);
24
25/**
26 * eeh_set_pe_aux_size - Set PE auxillary data size
27 * @size: PE auxillary data size
28 *
29 * Set PE auxillary data size
30 */
31void eeh_set_pe_aux_size(int size)
32{
33 if (size < 0)
34 return;
35
36 eeh_pe_aux_size = size;
37}
38
39/**
40 * eeh_pe_alloc - Allocate PE
41 * @phb: PCI controller
42 * @type: PE type
43 *
44 * Allocate PE instance dynamically.
45 */
46static struct eeh_pe *eeh_pe_alloc(struct pci_controller *phb, int type)
47{
48 struct eeh_pe *pe;
49 size_t alloc_size;
50
51 alloc_size = sizeof(struct eeh_pe);
52 if (eeh_pe_aux_size) {
53 alloc_size = ALIGN(alloc_size, cache_line_size());
54 alloc_size += eeh_pe_aux_size;
55 }
56
57 /* Allocate PHB PE */
58 pe = kzalloc(alloc_size, GFP_KERNEL);
59 if (!pe) return NULL;
60
61 /* Initialize PHB PE */
62 pe->type = type;
63 pe->phb = phb;
64 INIT_LIST_HEAD(&pe->child_list);
65 INIT_LIST_HEAD(&pe->edevs);
66
67 pe->data = (void *)pe + ALIGN(sizeof(struct eeh_pe),
68 cache_line_size());
69 return pe;
70}
71
72/**
73 * eeh_phb_pe_create - Create PHB PE
74 * @phb: PCI controller
75 *
76 * The function should be called while the PHB is detected during
77 * system boot or PCI hotplug in order to create PHB PE.
78 */
79int eeh_phb_pe_create(struct pci_controller *phb)
80{
81 struct eeh_pe *pe;
82
83 /* Allocate PHB PE */
84 pe = eeh_pe_alloc(phb, EEH_PE_PHB);
85 if (!pe) {
86 pr_err("%s: out of memory!\n", __func__);
87 return -ENOMEM;
88 }
89
90 /* Put it into the list */
91 list_add_tail(&pe->child, &eeh_phb_pe);
92
93 pr_debug("EEH: Add PE for PHB#%x\n", phb->global_number);
94
95 return 0;
96}
97
98/**
99 * eeh_wait_state - Wait for PE state
100 * @pe: EEH PE
101 * @max_wait: maximal period in millisecond
102 *
103 * Wait for the state of associated PE. It might take some time
104 * to retrieve the PE's state.
105 */
106int eeh_wait_state(struct eeh_pe *pe, int max_wait)
107{
108 int ret;
109 int mwait;
110
111 /*
112 * According to PAPR, the state of PE might be temporarily
113 * unavailable. Under the circumstance, we have to wait
114 * for indicated time determined by firmware. The maximal
115 * wait time is 5 minutes, which is acquired from the original
116 * EEH implementation. Also, the original implementation
117 * also defined the minimal wait time as 1 second.
118 */
119#define EEH_STATE_MIN_WAIT_TIME (1000)
120#define EEH_STATE_MAX_WAIT_TIME (300 * 1000)
121
122 while (1) {
123 ret = eeh_ops->get_state(pe, &mwait);
124
125 if (ret != EEH_STATE_UNAVAILABLE)
126 return ret;
127
128 if (max_wait <= 0) {
129 pr_warn("%s: Timeout when getting PE's state (%d)\n",
130 __func__, max_wait);
131 return EEH_STATE_NOT_SUPPORT;
132 }
133
134 if (mwait < EEH_STATE_MIN_WAIT_TIME) {
135 pr_warn("%s: Firmware returned bad wait value %d\n",
136 __func__, mwait);
137 mwait = EEH_STATE_MIN_WAIT_TIME;
138 } else if (mwait > EEH_STATE_MAX_WAIT_TIME) {
139 pr_warn("%s: Firmware returned too long wait value %d\n",
140 __func__, mwait);
141 mwait = EEH_STATE_MAX_WAIT_TIME;
142 }
143
144 msleep(min(mwait, max_wait));
145 max_wait -= mwait;
146 }
147}
148
149/**
150 * eeh_phb_pe_get - Retrieve PHB PE based on the given PHB
151 * @phb: PCI controller
152 *
153 * The overall PEs form hierarchy tree. The first layer of the
154 * hierarchy tree is composed of PHB PEs. The function is used
155 * to retrieve the corresponding PHB PE according to the given PHB.
156 */
157struct eeh_pe *eeh_phb_pe_get(struct pci_controller *phb)
158{
159 struct eeh_pe *pe;
160
161 list_for_each_entry(pe, &eeh_phb_pe, child) {
162 /*
163 * Actually, we needn't check the type since
164 * the PE for PHB has been determined when that
165 * was created.
166 */
167 if ((pe->type & EEH_PE_PHB) && pe->phb == phb)
168 return pe;
169 }
170
171 return NULL;
172}
173
174/**
175 * eeh_pe_next - Retrieve the next PE in the tree
176 * @pe: current PE
177 * @root: root PE
178 *
179 * The function is used to retrieve the next PE in the
180 * hierarchy PE tree.
181 */
182struct eeh_pe *eeh_pe_next(struct eeh_pe *pe, struct eeh_pe *root)
183{
184 struct list_head *next = pe->child_list.next;
185
186 if (next == &pe->child_list) {
187 while (1) {
188 if (pe == root)
189 return NULL;
190 next = pe->child.next;
191 if (next != &pe->parent->child_list)
192 break;
193 pe = pe->parent;
194 }
195 }
196
197 return list_entry(next, struct eeh_pe, child);
198}
199
200/**
201 * eeh_pe_traverse - Traverse PEs in the specified PHB
202 * @root: root PE
203 * @fn: callback
204 * @flag: extra parameter to callback
205 *
206 * The function is used to traverse the specified PE and its
207 * child PEs. The traversing is to be terminated once the
208 * callback returns something other than NULL, or no more PEs
209 * to be traversed.
210 */
211void *eeh_pe_traverse(struct eeh_pe *root,
212 eeh_pe_traverse_func fn, void *flag)
213{
214 struct eeh_pe *pe;
215 void *ret;
216
217 eeh_for_each_pe(root, pe) {
218 ret = fn(pe, flag);
219 if (ret) return ret;
220 }
221
222 return NULL;
223}
224
225/**
226 * eeh_pe_dev_traverse - Traverse the devices from the PE
227 * @root: EEH PE
228 * @fn: function callback
229 * @flag: extra parameter to callback
230 *
231 * The function is used to traverse the devices of the specified
232 * PE and its child PEs.
233 */
234void eeh_pe_dev_traverse(struct eeh_pe *root,
235 eeh_edev_traverse_func fn, void *flag)
236{
237 struct eeh_pe *pe;
238 struct eeh_dev *edev, *tmp;
239
240 if (!root) {
241 pr_warn("%s: Invalid PE %p\n",
242 __func__, root);
243 return;
244 }
245
246 /* Traverse root PE */
247 eeh_for_each_pe(root, pe)
248 eeh_pe_for_each_dev(pe, edev, tmp)
249 fn(edev, flag);
250}
251
252/**
253 * __eeh_pe_get - Check the PE address
254 *
255 * For one particular PE, it can be identified by PE address
256 * or tranditional BDF address. BDF address is composed of
257 * Bus/Device/Function number. The extra data referred by flag
258 * indicates which type of address should be used.
259 */
260static void *__eeh_pe_get(struct eeh_pe *pe, void *flag)
261{
262 int *target_pe = flag;
263
264 /* PHB PEs are special and should be ignored */
265 if (pe->type & EEH_PE_PHB)
266 return NULL;
267
268 if (*target_pe == pe->addr)
269 return pe;
270
271 return NULL;
272}
273
274/**
275 * eeh_pe_get - Search PE based on the given address
276 * @phb: PCI controller
277 * @pe_no: PE number
278 *
279 * Search the corresponding PE based on the specified address which
280 * is included in the eeh device. The function is used to check if
281 * the associated PE has been created against the PE address. It's
282 * notable that the PE address has 2 format: traditional PE address
283 * which is composed of PCI bus/device/function number, or unified
284 * PE address.
285 */
286struct eeh_pe *eeh_pe_get(struct pci_controller *phb, int pe_no)
287{
288 struct eeh_pe *root = eeh_phb_pe_get(phb);
289
290 return eeh_pe_traverse(root, __eeh_pe_get, &pe_no);
291}
292
293/**
294 * eeh_pe_tree_insert - Add EEH device to parent PE
295 * @edev: EEH device
296 * @new_pe_parent: PE to create additional PEs under
297 *
298 * Add EEH device to the PE in edev->pe_config_addr. If a PE already
299 * exists with that address then @edev is added to that PE. Otherwise
300 * a new PE is created and inserted into the PE tree as a child of
301 * @new_pe_parent.
302 *
303 * If @new_pe_parent is NULL then the new PE will be inserted under
304 * directly under the the PHB.
305 */
306int eeh_pe_tree_insert(struct eeh_dev *edev, struct eeh_pe *new_pe_parent)
307{
308 struct pci_controller *hose = edev->controller;
309 struct eeh_pe *pe, *parent;
310
311 /*
312 * Search the PE has been existing or not according
313 * to the PE address. If that has been existing, the
314 * PE should be composed of PCI bus and its subordinate
315 * components.
316 */
317 pe = eeh_pe_get(hose, edev->pe_config_addr);
318 if (pe) {
319 if (pe->type & EEH_PE_INVALID) {
320 list_add_tail(&edev->entry, &pe->edevs);
321 edev->pe = pe;
322 /*
323 * We're running to here because of PCI hotplug caused by
324 * EEH recovery. We need clear EEH_PE_INVALID until the top.
325 */
326 parent = pe;
327 while (parent) {
328 if (!(parent->type & EEH_PE_INVALID))
329 break;
330 parent->type &= ~EEH_PE_INVALID;
331 parent = parent->parent;
332 }
333
334 eeh_edev_dbg(edev, "Added to existing PE (parent: PE#%x)\n",
335 pe->parent->addr);
336 } else {
337 /* Mark the PE as type of PCI bus */
338 pe->type = EEH_PE_BUS;
339 edev->pe = pe;
340
341 /* Put the edev to PE */
342 list_add_tail(&edev->entry, &pe->edevs);
343 eeh_edev_dbg(edev, "Added to bus PE\n");
344 }
345 return 0;
346 }
347
348 /* Create a new EEH PE */
349 if (edev->physfn)
350 pe = eeh_pe_alloc(hose, EEH_PE_VF);
351 else
352 pe = eeh_pe_alloc(hose, EEH_PE_DEVICE);
353 if (!pe) {
354 pr_err("%s: out of memory!\n", __func__);
355 return -ENOMEM;
356 }
357
358 pe->addr = edev->pe_config_addr;
359
360 /*
361 * Put the new EEH PE into hierarchy tree. If the parent
362 * can't be found, the newly created PE will be attached
363 * to PHB directly. Otherwise, we have to associate the
364 * PE with its parent.
365 */
366 if (!new_pe_parent) {
367 new_pe_parent = eeh_phb_pe_get(hose);
368 if (!new_pe_parent) {
369 pr_err("%s: No PHB PE is found (PHB Domain=%d)\n",
370 __func__, hose->global_number);
371 edev->pe = NULL;
372 kfree(pe);
373 return -EEXIST;
374 }
375 }
376
377 /* link new PE into the tree */
378 pe->parent = new_pe_parent;
379 list_add_tail(&pe->child, &new_pe_parent->child_list);
380
381 /*
382 * Put the newly created PE into the child list and
383 * link the EEH device accordingly.
384 */
385 list_add_tail(&edev->entry, &pe->edevs);
386 edev->pe = pe;
387 eeh_edev_dbg(edev, "Added to new (parent: PE#%x)\n",
388 new_pe_parent->addr);
389
390 return 0;
391}
392
393/**
394 * eeh_pe_tree_remove - Remove one EEH device from the associated PE
395 * @edev: EEH device
396 *
397 * The PE hierarchy tree might be changed when doing PCI hotplug.
398 * Also, the PCI devices or buses could be removed from the system
399 * during EEH recovery. So we have to call the function remove the
400 * corresponding PE accordingly if necessary.
401 */
402int eeh_pe_tree_remove(struct eeh_dev *edev)
403{
404 struct eeh_pe *pe, *parent, *child;
405 bool keep, recover;
406 int cnt;
407
408 pe = eeh_dev_to_pe(edev);
409 if (!pe) {
410 eeh_edev_dbg(edev, "No PE found for device.\n");
411 return -EEXIST;
412 }
413
414 /* Remove the EEH device */
415 edev->pe = NULL;
416 list_del(&edev->entry);
417
418 /*
419 * Check if the parent PE includes any EEH devices.
420 * If not, we should delete that. Also, we should
421 * delete the parent PE if it doesn't have associated
422 * child PEs and EEH devices.
423 */
424 while (1) {
425 parent = pe->parent;
426
427 /* PHB PEs should never be removed */
428 if (pe->type & EEH_PE_PHB)
429 break;
430
431 /*
432 * XXX: KEEP is set while resetting a PE. I don't think it's
433 * ever set without RECOVERING also being set. I could
434 * be wrong though so catch that with a WARN.
435 */
436 keep = !!(pe->state & EEH_PE_KEEP);
437 recover = !!(pe->state & EEH_PE_RECOVERING);
438 WARN_ON(keep && !recover);
439
440 if (!keep && !recover) {
441 if (list_empty(&pe->edevs) &&
442 list_empty(&pe->child_list)) {
443 list_del(&pe->child);
444 kfree(pe);
445 } else {
446 break;
447 }
448 } else {
449 /*
450 * Mark the PE as invalid. At the end of the recovery
451 * process any invalid PEs will be garbage collected.
452 *
453 * We need to delay the free()ing of them since we can
454 * remove edev's while traversing the PE tree which
455 * might trigger the removal of a PE and we can't
456 * deal with that (yet).
457 */
458 if (list_empty(&pe->edevs)) {
459 cnt = 0;
460 list_for_each_entry(child, &pe->child_list, child) {
461 if (!(child->type & EEH_PE_INVALID)) {
462 cnt++;
463 break;
464 }
465 }
466
467 if (!cnt)
468 pe->type |= EEH_PE_INVALID;
469 else
470 break;
471 }
472 }
473
474 pe = parent;
475 }
476
477 return 0;
478}
479
480/**
481 * eeh_pe_update_time_stamp - Update PE's frozen time stamp
482 * @pe: EEH PE
483 *
484 * We have time stamp for each PE to trace its time of getting
485 * frozen in last hour. The function should be called to update
486 * the time stamp on first error of the specific PE. On the other
487 * handle, we needn't account for errors happened in last hour.
488 */
489void eeh_pe_update_time_stamp(struct eeh_pe *pe)
490{
491 time64_t tstamp;
492
493 if (!pe) return;
494
495 if (pe->freeze_count <= 0) {
496 pe->freeze_count = 0;
497 pe->tstamp = ktime_get_seconds();
498 } else {
499 tstamp = ktime_get_seconds();
500 if (tstamp - pe->tstamp > 3600) {
501 pe->tstamp = tstamp;
502 pe->freeze_count = 0;
503 }
504 }
505}
506
507/**
508 * eeh_pe_state_mark - Mark specified state for PE and its associated device
509 * @pe: EEH PE
510 *
511 * EEH error affects the current PE and its child PEs. The function
512 * is used to mark appropriate state for the affected PEs and the
513 * associated devices.
514 */
515void eeh_pe_state_mark(struct eeh_pe *root, int state)
516{
517 struct eeh_pe *pe;
518
519 eeh_for_each_pe(root, pe)
520 if (!(pe->state & EEH_PE_REMOVED))
521 pe->state |= state;
522}
523EXPORT_SYMBOL_GPL(eeh_pe_state_mark);
524
525/**
526 * eeh_pe_mark_isolated
527 * @pe: EEH PE
528 *
529 * Record that a PE has been isolated by marking the PE and it's children as
530 * EEH_PE_ISOLATED (and EEH_PE_CFG_BLOCKED, if required) and their PCI devices
531 * as pci_channel_io_frozen.
532 */
533void eeh_pe_mark_isolated(struct eeh_pe *root)
534{
535 struct eeh_pe *pe;
536 struct eeh_dev *edev;
537 struct pci_dev *pdev;
538
539 eeh_pe_state_mark(root, EEH_PE_ISOLATED);
540 eeh_for_each_pe(root, pe) {
541 list_for_each_entry(edev, &pe->edevs, entry) {
542 pdev = eeh_dev_to_pci_dev(edev);
543 if (pdev)
544 pdev->error_state = pci_channel_io_frozen;
545 }
546 /* Block PCI config access if required */
547 if (pe->state & EEH_PE_CFG_RESTRICTED)
548 pe->state |= EEH_PE_CFG_BLOCKED;
549 }
550}
551EXPORT_SYMBOL_GPL(eeh_pe_mark_isolated);
552
553static void __eeh_pe_dev_mode_mark(struct eeh_dev *edev, void *flag)
554{
555 int mode = *((int *)flag);
556
557 edev->mode |= mode;
558}
559
560/**
561 * eeh_pe_dev_state_mark - Mark state for all device under the PE
562 * @pe: EEH PE
563 *
564 * Mark specific state for all child devices of the PE.
565 */
566void eeh_pe_dev_mode_mark(struct eeh_pe *pe, int mode)
567{
568 eeh_pe_dev_traverse(pe, __eeh_pe_dev_mode_mark, &mode);
569}
570
571/**
572 * eeh_pe_state_clear - Clear state for the PE
573 * @data: EEH PE
574 * @state: state
575 * @include_passed: include passed-through devices?
576 *
577 * The function is used to clear the indicated state from the
578 * given PE. Besides, we also clear the check count of the PE
579 * as well.
580 */
581void eeh_pe_state_clear(struct eeh_pe *root, int state, bool include_passed)
582{
583 struct eeh_pe *pe;
584 struct eeh_dev *edev, *tmp;
585 struct pci_dev *pdev;
586
587 eeh_for_each_pe(root, pe) {
588 /* Keep the state of permanently removed PE intact */
589 if (pe->state & EEH_PE_REMOVED)
590 continue;
591
592 if (!include_passed && eeh_pe_passed(pe))
593 continue;
594
595 pe->state &= ~state;
596
597 /*
598 * Special treatment on clearing isolated state. Clear
599 * check count since last isolation and put all affected
600 * devices to normal state.
601 */
602 if (!(state & EEH_PE_ISOLATED))
603 continue;
604
605 pe->check_count = 0;
606 eeh_pe_for_each_dev(pe, edev, tmp) {
607 pdev = eeh_dev_to_pci_dev(edev);
608 if (!pdev)
609 continue;
610
611 pdev->error_state = pci_channel_io_normal;
612 }
613
614 /* Unblock PCI config access if required */
615 if (pe->state & EEH_PE_CFG_RESTRICTED)
616 pe->state &= ~EEH_PE_CFG_BLOCKED;
617 }
618}
619
620/*
621 * Some PCI bridges (e.g. PLX bridges) have primary/secondary
622 * buses assigned explicitly by firmware, and we probably have
623 * lost that after reset. So we have to delay the check until
624 * the PCI-CFG registers have been restored for the parent
625 * bridge.
626 *
627 * Don't use normal PCI-CFG accessors, which probably has been
628 * blocked on normal path during the stage. So we need utilize
629 * eeh operations, which is always permitted.
630 */
631static void eeh_bridge_check_link(struct eeh_dev *edev)
632{
633 int cap;
634 uint32_t val;
635 int timeout = 0;
636
637 /*
638 * We only check root port and downstream ports of
639 * PCIe switches
640 */
641 if (!(edev->mode & (EEH_DEV_ROOT_PORT | EEH_DEV_DS_PORT)))
642 return;
643
644 eeh_edev_dbg(edev, "Checking PCIe link...\n");
645
646 /* Check slot status */
647 cap = edev->pcie_cap;
648 eeh_ops->read_config(edev, cap + PCI_EXP_SLTSTA, 2, &val);
649 if (!(val & PCI_EXP_SLTSTA_PDS)) {
650 eeh_edev_dbg(edev, "No card in the slot (0x%04x) !\n", val);
651 return;
652 }
653
654 /* Check power status if we have the capability */
655 eeh_ops->read_config(edev, cap + PCI_EXP_SLTCAP, 2, &val);
656 if (val & PCI_EXP_SLTCAP_PCP) {
657 eeh_ops->read_config(edev, cap + PCI_EXP_SLTCTL, 2, &val);
658 if (val & PCI_EXP_SLTCTL_PCC) {
659 eeh_edev_dbg(edev, "In power-off state, power it on ...\n");
660 val &= ~(PCI_EXP_SLTCTL_PCC | PCI_EXP_SLTCTL_PIC);
661 val |= (0x0100 & PCI_EXP_SLTCTL_PIC);
662 eeh_ops->write_config(edev, cap + PCI_EXP_SLTCTL, 2, val);
663 msleep(2 * 1000);
664 }
665 }
666
667 /* Enable link */
668 eeh_ops->read_config(edev, cap + PCI_EXP_LNKCTL, 2, &val);
669 val &= ~PCI_EXP_LNKCTL_LD;
670 eeh_ops->write_config(edev, cap + PCI_EXP_LNKCTL, 2, val);
671
672 /* Check link */
673 eeh_ops->read_config(edev, cap + PCI_EXP_LNKCAP, 4, &val);
674 if (!(val & PCI_EXP_LNKCAP_DLLLARC)) {
675 eeh_edev_dbg(edev, "No link reporting capability (0x%08x) \n", val);
676 msleep(1000);
677 return;
678 }
679
680 /* Wait the link is up until timeout (5s) */
681 timeout = 0;
682 while (timeout < 5000) {
683 msleep(20);
684 timeout += 20;
685
686 eeh_ops->read_config(edev, cap + PCI_EXP_LNKSTA, 2, &val);
687 if (val & PCI_EXP_LNKSTA_DLLLA)
688 break;
689 }
690
691 if (val & PCI_EXP_LNKSTA_DLLLA)
692 eeh_edev_dbg(edev, "Link up (%s)\n",
693 (val & PCI_EXP_LNKSTA_CLS_2_5GB) ? "2.5GB" : "5GB");
694 else
695 eeh_edev_dbg(edev, "Link not ready (0x%04x)\n", val);
696}
697
698#define BYTE_SWAP(OFF) (8*((OFF)/4)+3-(OFF))
699#define SAVED_BYTE(OFF) (((u8 *)(edev->config_space))[BYTE_SWAP(OFF)])
700
701static void eeh_restore_bridge_bars(struct eeh_dev *edev)
702{
703 int i;
704
705 /*
706 * Device BARs: 0x10 - 0x18
707 * Bus numbers and windows: 0x18 - 0x30
708 */
709 for (i = 4; i < 13; i++)
710 eeh_ops->write_config(edev, i*4, 4, edev->config_space[i]);
711 /* Rom: 0x38 */
712 eeh_ops->write_config(edev, 14*4, 4, edev->config_space[14]);
713
714 /* Cache line & Latency timer: 0xC 0xD */
715 eeh_ops->write_config(edev, PCI_CACHE_LINE_SIZE, 1,
716 SAVED_BYTE(PCI_CACHE_LINE_SIZE));
717 eeh_ops->write_config(edev, PCI_LATENCY_TIMER, 1,
718 SAVED_BYTE(PCI_LATENCY_TIMER));
719 /* Max latency, min grant, interrupt ping and line: 0x3C */
720 eeh_ops->write_config(edev, 15*4, 4, edev->config_space[15]);
721
722 /* PCI Command: 0x4 */
723 eeh_ops->write_config(edev, PCI_COMMAND, 4, edev->config_space[1] |
724 PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
725
726 /* Check the PCIe link is ready */
727 eeh_bridge_check_link(edev);
728}
729
730static void eeh_restore_device_bars(struct eeh_dev *edev)
731{
732 int i;
733 u32 cmd;
734
735 for (i = 4; i < 10; i++)
736 eeh_ops->write_config(edev, i*4, 4, edev->config_space[i]);
737 /* 12 == Expansion ROM Address */
738 eeh_ops->write_config(edev, 12*4, 4, edev->config_space[12]);
739
740 eeh_ops->write_config(edev, PCI_CACHE_LINE_SIZE, 1,
741 SAVED_BYTE(PCI_CACHE_LINE_SIZE));
742 eeh_ops->write_config(edev, PCI_LATENCY_TIMER, 1,
743 SAVED_BYTE(PCI_LATENCY_TIMER));
744
745 /* max latency, min grant, interrupt pin and line */
746 eeh_ops->write_config(edev, 15*4, 4, edev->config_space[15]);
747
748 /*
749 * Restore PERR & SERR bits, some devices require it,
750 * don't touch the other command bits
751 */
752 eeh_ops->read_config(edev, PCI_COMMAND, 4, &cmd);
753 if (edev->config_space[1] & PCI_COMMAND_PARITY)
754 cmd |= PCI_COMMAND_PARITY;
755 else
756 cmd &= ~PCI_COMMAND_PARITY;
757 if (edev->config_space[1] & PCI_COMMAND_SERR)
758 cmd |= PCI_COMMAND_SERR;
759 else
760 cmd &= ~PCI_COMMAND_SERR;
761 eeh_ops->write_config(edev, PCI_COMMAND, 4, cmd);
762}
763
764/**
765 * eeh_restore_one_device_bars - Restore the Base Address Registers for one device
766 * @data: EEH device
767 * @flag: Unused
768 *
769 * Loads the PCI configuration space base address registers,
770 * the expansion ROM base address, the latency timer, and etc.
771 * from the saved values in the device node.
772 */
773static void eeh_restore_one_device_bars(struct eeh_dev *edev, void *flag)
774{
775 /* Do special restore for bridges */
776 if (edev->mode & EEH_DEV_BRIDGE)
777 eeh_restore_bridge_bars(edev);
778 else
779 eeh_restore_device_bars(edev);
780
781 if (eeh_ops->restore_config)
782 eeh_ops->restore_config(edev);
783}
784
785/**
786 * eeh_pe_restore_bars - Restore the PCI config space info
787 * @pe: EEH PE
788 *
789 * This routine performs a recursive walk to the children
790 * of this device as well.
791 */
792void eeh_pe_restore_bars(struct eeh_pe *pe)
793{
794 /*
795 * We needn't take the EEH lock since eeh_pe_dev_traverse()
796 * will take that.
797 */
798 eeh_pe_dev_traverse(pe, eeh_restore_one_device_bars, NULL);
799}
800
801/**
802 * eeh_pe_loc_get - Retrieve location code binding to the given PE
803 * @pe: EEH PE
804 *
805 * Retrieve the location code of the given PE. If the primary PE bus
806 * is root bus, we will grab location code from PHB device tree node
807 * or root port. Otherwise, the upstream bridge's device tree node
808 * of the primary PE bus will be checked for the location code.
809 */
810const char *eeh_pe_loc_get(struct eeh_pe *pe)
811{
812 struct pci_bus *bus = eeh_pe_bus_get(pe);
813 struct device_node *dn;
814 const char *loc = NULL;
815
816 while (bus) {
817 dn = pci_bus_to_OF_node(bus);
818 if (!dn) {
819 bus = bus->parent;
820 continue;
821 }
822
823 if (pci_is_root_bus(bus))
824 loc = of_get_property(dn, "ibm,io-base-loc-code", NULL);
825 else
826 loc = of_get_property(dn, "ibm,slot-location-code",
827 NULL);
828
829 if (loc)
830 return loc;
831
832 bus = bus->parent;
833 }
834
835 return "N/A";
836}
837
838/**
839 * eeh_pe_bus_get - Retrieve PCI bus according to the given PE
840 * @pe: EEH PE
841 *
842 * Retrieve the PCI bus according to the given PE. Basically,
843 * there're 3 types of PEs: PHB/Bus/Device. For PHB PE, the
844 * primary PCI bus will be retrieved. The parent bus will be
845 * returned for BUS PE. However, we don't have associated PCI
846 * bus for DEVICE PE.
847 */
848struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe)
849{
850 struct eeh_dev *edev;
851 struct pci_dev *pdev;
852
853 if (pe->type & EEH_PE_PHB)
854 return pe->phb->bus;
855
856 /* The primary bus might be cached during probe time */
857 if (pe->state & EEH_PE_PRI_BUS)
858 return pe->bus;
859
860 /* Retrieve the parent PCI bus of first (top) PCI device */
861 edev = list_first_entry_or_null(&pe->edevs, struct eeh_dev, entry);
862 pdev = eeh_dev_to_pci_dev(edev);
863 if (pdev)
864 return pdev->bus;
865
866 return NULL;
867}