xref: /openbmc/linux/arch/powerpc/kernel/eeh_driver.c (revision af958a38)
1 /*
2  * PCI Error Recovery Driver for RPA-compliant PPC64 platform.
3  * Copyright IBM Corp. 2004 2005
4  * Copyright Linas Vepstas <linas@linas.org> 2004, 2005
5  *
6  * All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or (at
11  * your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
16  * NON INFRINGEMENT.  See the GNU General Public License for more
17  * details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  *
23  * Send comments and feedback to Linas Vepstas <linas@austin.ibm.com>
24  */
25 #include <linux/delay.h>
26 #include <linux/interrupt.h>
27 #include <linux/irq.h>
28 #include <linux/module.h>
29 #include <linux/pci.h>
30 #include <asm/eeh.h>
31 #include <asm/eeh_event.h>
32 #include <asm/ppc-pci.h>
33 #include <asm/pci-bridge.h>
34 #include <asm/prom.h>
35 #include <asm/rtas.h>
36 
37 /**
38  * eeh_pcid_name - Retrieve name of PCI device driver
39  * @pdev: PCI device
40  *
41  * This routine is used to retrieve the name of PCI device driver
42  * if that's valid.
43  */
44 static inline const char *eeh_pcid_name(struct pci_dev *pdev)
45 {
46 	if (pdev && pdev->dev.driver)
47 		return pdev->dev.driver->name;
48 	return "";
49 }
50 
51 /**
52  * eeh_pcid_get - Get the PCI device driver
53  * @pdev: PCI device
54  *
55  * The function is used to retrieve the PCI device driver for
56  * the indicated PCI device. Besides, we will increase the reference
57  * of the PCI device driver to prevent that being unloaded on
58  * the fly. Otherwise, kernel crash would be seen.
59  */
60 static inline struct pci_driver *eeh_pcid_get(struct pci_dev *pdev)
61 {
62 	if (!pdev || !pdev->driver)
63 		return NULL;
64 
65 	if (!try_module_get(pdev->driver->driver.owner))
66 		return NULL;
67 
68 	return pdev->driver;
69 }
70 
71 /**
72  * eeh_pcid_put - Dereference on the PCI device driver
73  * @pdev: PCI device
74  *
75  * The function is called to do dereference on the PCI device
76  * driver of the indicated PCI device.
77  */
78 static inline void eeh_pcid_put(struct pci_dev *pdev)
79 {
80 	if (!pdev || !pdev->driver)
81 		return;
82 
83 	module_put(pdev->driver->driver.owner);
84 }
85 
86 #if 0
87 static void print_device_node_tree(struct pci_dn *pdn, int dent)
88 {
89 	int i;
90 	struct device_node *pc;
91 
92 	if (!pdn)
93 		return;
94 	for (i = 0; i < dent; i++)
95 		printk(" ");
96 	printk("dn=%s mode=%x \tcfg_addr=%x pe_addr=%x \tfull=%s\n",
97 		pdn->node->name, pdn->eeh_mode, pdn->eeh_config_addr,
98 		pdn->eeh_pe_config_addr, pdn->node->full_name);
99 	dent += 3;
100 	pc = pdn->node->child;
101 	while (pc) {
102 		print_device_node_tree(PCI_DN(pc), dent);
103 		pc = pc->sibling;
104 	}
105 }
106 #endif
107 
108 /**
109  * eeh_disable_irq - Disable interrupt for the recovering device
110  * @dev: PCI device
111  *
112  * This routine must be called when reporting temporary or permanent
113  * error to the particular PCI device to disable interrupt of that
114  * device. If the device has enabled MSI or MSI-X interrupt, we needn't
115  * do real work because EEH should freeze DMA transfers for those PCI
116  * devices encountering EEH errors, which includes MSI or MSI-X.
117  */
118 static void eeh_disable_irq(struct pci_dev *dev)
119 {
120 	struct eeh_dev *edev = pci_dev_to_eeh_dev(dev);
121 
122 	/* Don't disable MSI and MSI-X interrupts. They are
123 	 * effectively disabled by the DMA Stopped state
124 	 * when an EEH error occurs.
125 	 */
126 	if (dev->msi_enabled || dev->msix_enabled)
127 		return;
128 
129 	if (!irq_has_action(dev->irq))
130 		return;
131 
132 	edev->mode |= EEH_DEV_IRQ_DISABLED;
133 	disable_irq_nosync(dev->irq);
134 }
135 
136 /**
137  * eeh_enable_irq - Enable interrupt for the recovering device
138  * @dev: PCI device
139  *
140  * This routine must be called to enable interrupt while failed
141  * device could be resumed.
142  */
143 static void eeh_enable_irq(struct pci_dev *dev)
144 {
145 	struct eeh_dev *edev = pci_dev_to_eeh_dev(dev);
146 
147 	if ((edev->mode) & EEH_DEV_IRQ_DISABLED) {
148 		edev->mode &= ~EEH_DEV_IRQ_DISABLED;
149 		/*
150 		 * FIXME !!!!!
151 		 *
152 		 * This is just ass backwards. This maze has
153 		 * unbalanced irq_enable/disable calls. So instead of
154 		 * finding the root cause it works around the warning
155 		 * in the irq_enable code by conditionally calling
156 		 * into it.
157 		 *
158 		 * That's just wrong.The warning in the core code is
159 		 * there to tell people to fix their assymetries in
160 		 * their own code, not by abusing the core information
161 		 * to avoid it.
162 		 *
163 		 * I so wish that the assymetry would be the other way
164 		 * round and a few more irq_disable calls render that
165 		 * shit unusable forever.
166 		 *
167 		 *	tglx
168 		 */
169 		if (irqd_irq_disabled(irq_get_irq_data(dev->irq)))
170 			enable_irq(dev->irq);
171 	}
172 }
173 
174 static bool eeh_dev_removed(struct eeh_dev *edev)
175 {
176 	/* EEH device removed ? */
177 	if (!edev || (edev->mode & EEH_DEV_REMOVED))
178 		return true;
179 
180 	return false;
181 }
182 
183 /**
184  * eeh_report_error - Report pci error to each device driver
185  * @data: eeh device
186  * @userdata: return value
187  *
188  * Report an EEH error to each device driver, collect up and
189  * merge the device driver responses. Cumulative response
190  * passed back in "userdata".
191  */
192 static void *eeh_report_error(void *data, void *userdata)
193 {
194 	struct eeh_dev *edev = (struct eeh_dev *)data;
195 	struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
196 	enum pci_ers_result rc, *res = userdata;
197 	struct pci_driver *driver;
198 
199 	if (!dev || eeh_dev_removed(edev))
200 		return NULL;
201 	dev->error_state = pci_channel_io_frozen;
202 
203 	driver = eeh_pcid_get(dev);
204 	if (!driver) return NULL;
205 
206 	eeh_disable_irq(dev);
207 
208 	if (!driver->err_handler ||
209 	    !driver->err_handler->error_detected) {
210 		eeh_pcid_put(dev);
211 		return NULL;
212 	}
213 
214 	rc = driver->err_handler->error_detected(dev, pci_channel_io_frozen);
215 
216 	/* A driver that needs a reset trumps all others */
217 	if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
218 	if (*res == PCI_ERS_RESULT_NONE) *res = rc;
219 
220 	eeh_pcid_put(dev);
221 	return NULL;
222 }
223 
224 /**
225  * eeh_report_mmio_enabled - Tell drivers that MMIO has been enabled
226  * @data: eeh device
227  * @userdata: return value
228  *
229  * Tells each device driver that IO ports, MMIO and config space I/O
230  * are now enabled. Collects up and merges the device driver responses.
231  * Cumulative response passed back in "userdata".
232  */
233 static void *eeh_report_mmio_enabled(void *data, void *userdata)
234 {
235 	struct eeh_dev *edev = (struct eeh_dev *)data;
236 	struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
237 	enum pci_ers_result rc, *res = userdata;
238 	struct pci_driver *driver;
239 
240 	if (!dev || eeh_dev_removed(edev))
241 		return NULL;
242 
243 	driver = eeh_pcid_get(dev);
244 	if (!driver) return NULL;
245 
246 	if (!driver->err_handler ||
247 	    !driver->err_handler->mmio_enabled ||
248 	    (edev->mode & EEH_DEV_NO_HANDLER)) {
249 		eeh_pcid_put(dev);
250 		return NULL;
251 	}
252 
253 	rc = driver->err_handler->mmio_enabled(dev);
254 
255 	/* A driver that needs a reset trumps all others */
256 	if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
257 	if (*res == PCI_ERS_RESULT_NONE) *res = rc;
258 
259 	eeh_pcid_put(dev);
260 	return NULL;
261 }
262 
263 /**
264  * eeh_report_reset - Tell device that slot has been reset
265  * @data: eeh device
266  * @userdata: return value
267  *
268  * This routine must be called while EEH tries to reset particular
269  * PCI device so that the associated PCI device driver could take
270  * some actions, usually to save data the driver needs so that the
271  * driver can work again while the device is recovered.
272  */
273 static void *eeh_report_reset(void *data, void *userdata)
274 {
275 	struct eeh_dev *edev = (struct eeh_dev *)data;
276 	struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
277 	enum pci_ers_result rc, *res = userdata;
278 	struct pci_driver *driver;
279 
280 	if (!dev || eeh_dev_removed(edev))
281 		return NULL;
282 	dev->error_state = pci_channel_io_normal;
283 
284 	driver = eeh_pcid_get(dev);
285 	if (!driver) return NULL;
286 
287 	eeh_enable_irq(dev);
288 
289 	if (!driver->err_handler ||
290 	    !driver->err_handler->slot_reset ||
291 	    (edev->mode & EEH_DEV_NO_HANDLER)) {
292 		eeh_pcid_put(dev);
293 		return NULL;
294 	}
295 
296 	rc = driver->err_handler->slot_reset(dev);
297 	if ((*res == PCI_ERS_RESULT_NONE) ||
298 	    (*res == PCI_ERS_RESULT_RECOVERED)) *res = rc;
299 	if (*res == PCI_ERS_RESULT_DISCONNECT &&
300 	     rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
301 
302 	eeh_pcid_put(dev);
303 	return NULL;
304 }
305 
306 /**
307  * eeh_report_resume - Tell device to resume normal operations
308  * @data: eeh device
309  * @userdata: return value
310  *
311  * This routine must be called to notify the device driver that it
312  * could resume so that the device driver can do some initialization
313  * to make the recovered device work again.
314  */
315 static void *eeh_report_resume(void *data, void *userdata)
316 {
317 	struct eeh_dev *edev = (struct eeh_dev *)data;
318 	struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
319 	struct pci_driver *driver;
320 
321 	if (!dev || eeh_dev_removed(edev))
322 		return NULL;
323 	dev->error_state = pci_channel_io_normal;
324 
325 	driver = eeh_pcid_get(dev);
326 	if (!driver) return NULL;
327 
328 	eeh_enable_irq(dev);
329 
330 	if (!driver->err_handler ||
331 	    !driver->err_handler->resume ||
332 	    (edev->mode & EEH_DEV_NO_HANDLER)) {
333 		edev->mode &= ~EEH_DEV_NO_HANDLER;
334 		eeh_pcid_put(dev);
335 		return NULL;
336 	}
337 
338 	driver->err_handler->resume(dev);
339 
340 	eeh_pcid_put(dev);
341 	return NULL;
342 }
343 
344 /**
345  * eeh_report_failure - Tell device driver that device is dead.
346  * @data: eeh device
347  * @userdata: return value
348  *
349  * This informs the device driver that the device is permanently
350  * dead, and that no further recovery attempts will be made on it.
351  */
352 static void *eeh_report_failure(void *data, void *userdata)
353 {
354 	struct eeh_dev *edev = (struct eeh_dev *)data;
355 	struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
356 	struct pci_driver *driver;
357 
358 	if (!dev || eeh_dev_removed(edev))
359 		return NULL;
360 	dev->error_state = pci_channel_io_perm_failure;
361 
362 	driver = eeh_pcid_get(dev);
363 	if (!driver) return NULL;
364 
365 	eeh_disable_irq(dev);
366 
367 	if (!driver->err_handler ||
368 	    !driver->err_handler->error_detected) {
369 		eeh_pcid_put(dev);
370 		return NULL;
371 	}
372 
373 	driver->err_handler->error_detected(dev, pci_channel_io_perm_failure);
374 
375 	eeh_pcid_put(dev);
376 	return NULL;
377 }
378 
379 static void *eeh_rmv_device(void *data, void *userdata)
380 {
381 	struct pci_driver *driver;
382 	struct eeh_dev *edev = (struct eeh_dev *)data;
383 	struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
384 	int *removed = (int *)userdata;
385 
386 	/*
387 	 * Actually, we should remove the PCI bridges as well.
388 	 * However, that's lots of complexity to do that,
389 	 * particularly some of devices under the bridge might
390 	 * support EEH. So we just care about PCI devices for
391 	 * simplicity here.
392 	 */
393 	if (!dev || (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE))
394 		return NULL;
395 
396 	/*
397 	 * We rely on count-based pcibios_release_device() to
398 	 * detach permanently offlined PEs. Unfortunately, that's
399 	 * not reliable enough. We might have the permanently
400 	 * offlined PEs attached, but we needn't take care of
401 	 * them and their child devices.
402 	 */
403 	if (eeh_dev_removed(edev))
404 		return NULL;
405 
406 	driver = eeh_pcid_get(dev);
407 	if (driver) {
408 		eeh_pcid_put(dev);
409 		if (driver->err_handler)
410 			return NULL;
411 	}
412 
413 	/* Remove it from PCI subsystem */
414 	pr_debug("EEH: Removing %s without EEH sensitive driver\n",
415 		 pci_name(dev));
416 	edev->bus = dev->bus;
417 	edev->mode |= EEH_DEV_DISCONNECTED;
418 	(*removed)++;
419 
420 	pci_lock_rescan_remove();
421 	pci_stop_and_remove_bus_device(dev);
422 	pci_unlock_rescan_remove();
423 
424 	return NULL;
425 }
426 
427 static void *eeh_pe_detach_dev(void *data, void *userdata)
428 {
429 	struct eeh_pe *pe = (struct eeh_pe *)data;
430 	struct eeh_dev *edev, *tmp;
431 
432 	eeh_pe_for_each_dev(pe, edev, tmp) {
433 		if (!(edev->mode & EEH_DEV_DISCONNECTED))
434 			continue;
435 
436 		edev->mode &= ~(EEH_DEV_DISCONNECTED | EEH_DEV_IRQ_DISABLED);
437 		eeh_rmv_from_parent_pe(edev);
438 	}
439 
440 	return NULL;
441 }
442 
443 /*
444  * Explicitly clear PE's frozen state for PowerNV where
445  * we have frozen PE until BAR restore is completed. It's
446  * harmless to clear it for pSeries. To be consistent with
447  * PE reset (for 3 times), we try to clear the frozen state
448  * for 3 times as well.
449  */
450 static void *__eeh_clear_pe_frozen_state(void *data, void *flag)
451 {
452 	struct eeh_pe *pe = (struct eeh_pe *)data;
453 	int i, rc;
454 
455 	for (i = 0; i < 3; i++) {
456 		rc = eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
457 		if (rc)
458 			continue;
459 		rc = eeh_pci_enable(pe, EEH_OPT_THAW_DMA);
460 		if (!rc)
461 			break;
462 	}
463 
464 	/* The PE has been isolated, clear it */
465 	if (rc) {
466 		pr_warn("%s: Can't clear frozen PHB#%x-PE#%x (%d)\n",
467 			__func__, pe->phb->global_number, pe->addr, rc);
468 		return (void *)pe;
469 	}
470 
471 	return NULL;
472 }
473 
474 static int eeh_clear_pe_frozen_state(struct eeh_pe *pe)
475 {
476 	void *rc;
477 
478 	rc = eeh_pe_traverse(pe, __eeh_clear_pe_frozen_state, NULL);
479 	if (!rc)
480 		eeh_pe_state_clear(pe, EEH_PE_ISOLATED);
481 
482 	return rc ? -EIO : 0;
483 }
484 
485 /**
486  * eeh_reset_device - Perform actual reset of a pci slot
487  * @pe: EEH PE
488  * @bus: PCI bus corresponding to the isolcated slot
489  *
490  * This routine must be called to do reset on the indicated PE.
491  * During the reset, udev might be invoked because those affected
492  * PCI devices will be removed and then added.
493  */
494 static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
495 {
496 	struct pci_bus *frozen_bus = eeh_pe_bus_get(pe);
497 	struct timeval tstamp;
498 	int cnt, rc, removed = 0;
499 
500 	/* pcibios will clear the counter; save the value */
501 	cnt = pe->freeze_count;
502 	tstamp = pe->tstamp;
503 
504 	/*
505 	 * We don't remove the corresponding PE instances because
506 	 * we need the information afterwords. The attached EEH
507 	 * devices are expected to be attached soon when calling
508 	 * into pcibios_add_pci_devices().
509 	 */
510 	eeh_pe_state_mark(pe, EEH_PE_KEEP);
511 	if (bus) {
512 		pci_lock_rescan_remove();
513 		pcibios_remove_pci_devices(bus);
514 		pci_unlock_rescan_remove();
515 	} else if (frozen_bus) {
516 		eeh_pe_dev_traverse(pe, eeh_rmv_device, &removed);
517 	}
518 
519 	/*
520 	 * Reset the pci controller. (Asserts RST#; resets config space).
521 	 * Reconfigure bridges and devices. Don't try to bring the system
522 	 * up if the reset failed for some reason.
523 	 *
524 	 * During the reset, it's very dangerous to have uncontrolled PCI
525 	 * config accesses. So we prefer to block them. However, controlled
526 	 * PCI config accesses initiated from EEH itself are allowed.
527 	 */
528 	eeh_pe_state_mark(pe, EEH_PE_RESET);
529 	rc = eeh_reset_pe(pe);
530 	if (rc) {
531 		eeh_pe_state_clear(pe, EEH_PE_RESET);
532 		return rc;
533 	}
534 
535 	pci_lock_rescan_remove();
536 
537 	/* Restore PE */
538 	eeh_ops->configure_bridge(pe);
539 	eeh_pe_restore_bars(pe);
540 	eeh_pe_state_clear(pe, EEH_PE_RESET);
541 
542 	/* Clear frozen state */
543 	rc = eeh_clear_pe_frozen_state(pe);
544 	if (rc)
545 		return rc;
546 
547 	/* Give the system 5 seconds to finish running the user-space
548 	 * hotplug shutdown scripts, e.g. ifdown for ethernet.  Yes,
549 	 * this is a hack, but if we don't do this, and try to bring
550 	 * the device up before the scripts have taken it down,
551 	 * potentially weird things happen.
552 	 */
553 	if (bus) {
554 		pr_info("EEH: Sleep 5s ahead of complete hotplug\n");
555 		ssleep(5);
556 
557 		/*
558 		 * The EEH device is still connected with its parent
559 		 * PE. We should disconnect it so the binding can be
560 		 * rebuilt when adding PCI devices.
561 		 */
562 		eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL);
563 		pcibios_add_pci_devices(bus);
564 	} else if (frozen_bus && removed) {
565 		pr_info("EEH: Sleep 5s ahead of partial hotplug\n");
566 		ssleep(5);
567 
568 		eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL);
569 		pcibios_add_pci_devices(frozen_bus);
570 	}
571 	eeh_pe_state_clear(pe, EEH_PE_KEEP);
572 
573 	pe->tstamp = tstamp;
574 	pe->freeze_count = cnt;
575 
576 	pci_unlock_rescan_remove();
577 	return 0;
578 }
579 
580 /* The longest amount of time to wait for a pci device
581  * to come back on line, in seconds.
582  */
583 #define MAX_WAIT_FOR_RECOVERY 300
584 
585 static void eeh_handle_normal_event(struct eeh_pe *pe)
586 {
587 	struct pci_bus *frozen_bus;
588 	int rc = 0;
589 	enum pci_ers_result result = PCI_ERS_RESULT_NONE;
590 
591 	frozen_bus = eeh_pe_bus_get(pe);
592 	if (!frozen_bus) {
593 		pr_err("%s: Cannot find PCI bus for PHB#%d-PE#%x\n",
594 			__func__, pe->phb->global_number, pe->addr);
595 		return;
596 	}
597 
598 	eeh_pe_update_time_stamp(pe);
599 	pe->freeze_count++;
600 	if (pe->freeze_count > EEH_MAX_ALLOWED_FREEZES)
601 		goto excess_failures;
602 	pr_warn("EEH: This PCI device has failed %d times in the last hour\n",
603 		pe->freeze_count);
604 
605 	/* Walk the various device drivers attached to this slot through
606 	 * a reset sequence, giving each an opportunity to do what it needs
607 	 * to accomplish the reset.  Each child gets a report of the
608 	 * status ... if any child can't handle the reset, then the entire
609 	 * slot is dlpar removed and added.
610 	 */
611 	pr_info("EEH: Notify device drivers to shutdown\n");
612 	eeh_pe_dev_traverse(pe, eeh_report_error, &result);
613 
614 	/* Get the current PCI slot state. This can take a long time,
615 	 * sometimes over 3 seconds for certain systems.
616 	 */
617 	rc = eeh_ops->wait_state(pe, MAX_WAIT_FOR_RECOVERY*1000);
618 	if (rc < 0 || rc == EEH_STATE_NOT_SUPPORT) {
619 		pr_warn("EEH: Permanent failure\n");
620 		goto hard_fail;
621 	}
622 
623 	/* Since rtas may enable MMIO when posting the error log,
624 	 * don't post the error log until after all dev drivers
625 	 * have been informed.
626 	 */
627 	pr_info("EEH: Collect temporary log\n");
628 	eeh_slot_error_detail(pe, EEH_LOG_TEMP);
629 
630 	/* If all device drivers were EEH-unaware, then shut
631 	 * down all of the device drivers, and hope they
632 	 * go down willingly, without panicing the system.
633 	 */
634 	if (result == PCI_ERS_RESULT_NONE) {
635 		pr_info("EEH: Reset with hotplug activity\n");
636 		rc = eeh_reset_device(pe, frozen_bus);
637 		if (rc) {
638 			pr_warn("%s: Unable to reset, err=%d\n",
639 				__func__, rc);
640 			goto hard_fail;
641 		}
642 	}
643 
644 	/* If all devices reported they can proceed, then re-enable MMIO */
645 	if (result == PCI_ERS_RESULT_CAN_RECOVER) {
646 		pr_info("EEH: Enable I/O for affected devices\n");
647 		rc = eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
648 
649 		if (rc < 0)
650 			goto hard_fail;
651 		if (rc) {
652 			result = PCI_ERS_RESULT_NEED_RESET;
653 		} else {
654 			pr_info("EEH: Notify device drivers to resume I/O\n");
655 			eeh_pe_dev_traverse(pe, eeh_report_mmio_enabled, &result);
656 		}
657 	}
658 
659 	/* If all devices reported they can proceed, then re-enable DMA */
660 	if (result == PCI_ERS_RESULT_CAN_RECOVER) {
661 		pr_info("EEH: Enabled DMA for affected devices\n");
662 		rc = eeh_pci_enable(pe, EEH_OPT_THAW_DMA);
663 
664 		if (rc < 0)
665 			goto hard_fail;
666 		if (rc) {
667 			result = PCI_ERS_RESULT_NEED_RESET;
668 		} else {
669 			/*
670 			 * We didn't do PE reset for the case. The PE
671 			 * is still in frozen state. Clear it before
672 			 * resuming the PE.
673 			 */
674 			eeh_pe_state_clear(pe, EEH_PE_ISOLATED);
675 			result = PCI_ERS_RESULT_RECOVERED;
676 		}
677 	}
678 
679 	/* If any device has a hard failure, then shut off everything. */
680 	if (result == PCI_ERS_RESULT_DISCONNECT) {
681 		pr_warn("EEH: Device driver gave up\n");
682 		goto hard_fail;
683 	}
684 
685 	/* If any device called out for a reset, then reset the slot */
686 	if (result == PCI_ERS_RESULT_NEED_RESET) {
687 		pr_info("EEH: Reset without hotplug activity\n");
688 		rc = eeh_reset_device(pe, NULL);
689 		if (rc) {
690 			pr_warn("%s: Cannot reset, err=%d\n",
691 				__func__, rc);
692 			goto hard_fail;
693 		}
694 
695 		pr_info("EEH: Notify device drivers "
696 			"the completion of reset\n");
697 		result = PCI_ERS_RESULT_NONE;
698 		eeh_pe_dev_traverse(pe, eeh_report_reset, &result);
699 	}
700 
701 	/* All devices should claim they have recovered by now. */
702 	if ((result != PCI_ERS_RESULT_RECOVERED) &&
703 	    (result != PCI_ERS_RESULT_NONE)) {
704 		pr_warn("EEH: Not recovered\n");
705 		goto hard_fail;
706 	}
707 
708 	/* Tell all device drivers that they can resume operations */
709 	pr_info("EEH: Notify device driver to resume\n");
710 	eeh_pe_dev_traverse(pe, eeh_report_resume, NULL);
711 
712 	return;
713 
714 excess_failures:
715 	/*
716 	 * About 90% of all real-life EEH failures in the field
717 	 * are due to poorly seated PCI cards. Only 10% or so are
718 	 * due to actual, failed cards.
719 	 */
720 	pr_err("EEH: PHB#%d-PE#%x has failed %d times in the\n"
721 	       "last hour and has been permanently disabled.\n"
722 	       "Please try reseating or replacing it.\n",
723 		pe->phb->global_number, pe->addr,
724 		pe->freeze_count);
725 	goto perm_error;
726 
727 hard_fail:
728 	pr_err("EEH: Unable to recover from failure from PHB#%d-PE#%x.\n"
729 	       "Please try reseating or replacing it\n",
730 		pe->phb->global_number, pe->addr);
731 
732 perm_error:
733 	eeh_slot_error_detail(pe, EEH_LOG_PERM);
734 
735 	/* Notify all devices that they're about to go down. */
736 	eeh_pe_dev_traverse(pe, eeh_report_failure, NULL);
737 
738 	/* Mark the PE to be removed permanently */
739 	pe->freeze_count = EEH_MAX_ALLOWED_FREEZES + 1;
740 
741 	/*
742 	 * Shut down the device drivers for good. We mark
743 	 * all removed devices correctly to avoid access
744 	 * the their PCI config any more.
745 	 */
746 	if (frozen_bus) {
747 		eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
748 
749 		pci_lock_rescan_remove();
750 		pcibios_remove_pci_devices(frozen_bus);
751 		pci_unlock_rescan_remove();
752 	}
753 }
754 
755 static void eeh_handle_special_event(void)
756 {
757 	struct eeh_pe *pe, *phb_pe;
758 	struct pci_bus *bus;
759 	struct pci_controller *hose;
760 	unsigned long flags;
761 	int rc;
762 
763 
764 	do {
765 		rc = eeh_ops->next_error(&pe);
766 
767 		switch (rc) {
768 		case EEH_NEXT_ERR_DEAD_IOC:
769 			/* Mark all PHBs in dead state */
770 			eeh_serialize_lock(&flags);
771 
772 			/* Purge all events */
773 			eeh_remove_event(NULL, true);
774 
775 			list_for_each_entry(hose, &hose_list, list_node) {
776 				phb_pe = eeh_phb_pe_get(hose);
777 				if (!phb_pe) continue;
778 
779 				eeh_pe_state_mark(phb_pe, EEH_PE_ISOLATED);
780 			}
781 
782 			eeh_serialize_unlock(flags);
783 
784 			break;
785 		case EEH_NEXT_ERR_FROZEN_PE:
786 		case EEH_NEXT_ERR_FENCED_PHB:
787 		case EEH_NEXT_ERR_DEAD_PHB:
788 			/* Mark the PE in fenced state */
789 			eeh_serialize_lock(&flags);
790 
791 			/* Purge all events of the PHB */
792 			eeh_remove_event(pe, true);
793 
794 			if (rc == EEH_NEXT_ERR_DEAD_PHB)
795 				eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
796 			else
797 				eeh_pe_state_mark(pe,
798 					EEH_PE_ISOLATED | EEH_PE_RECOVERING);
799 
800 			eeh_serialize_unlock(flags);
801 
802 			break;
803 		case EEH_NEXT_ERR_NONE:
804 			return;
805 		default:
806 			pr_warn("%s: Invalid value %d from next_error()\n",
807 				__func__, rc);
808 			return;
809 		}
810 
811 		/*
812 		 * For fenced PHB and frozen PE, it's handled as normal
813 		 * event. We have to remove the affected PHBs for dead
814 		 * PHB and IOC
815 		 */
816 		if (rc == EEH_NEXT_ERR_FROZEN_PE ||
817 		    rc == EEH_NEXT_ERR_FENCED_PHB) {
818 			eeh_handle_normal_event(pe);
819 			eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
820 		} else {
821 			pci_lock_rescan_remove();
822 			list_for_each_entry(hose, &hose_list, list_node) {
823 				phb_pe = eeh_phb_pe_get(hose);
824 				if (!phb_pe ||
825 				    !(phb_pe->state & EEH_PE_ISOLATED) ||
826 				    (phb_pe->state & EEH_PE_RECOVERING))
827 					continue;
828 
829 				/* Notify all devices to be down */
830 				bus = eeh_pe_bus_get(phb_pe);
831 				eeh_pe_dev_traverse(pe,
832 					eeh_report_failure, NULL);
833 				pcibios_remove_pci_devices(bus);
834 			}
835 			pci_unlock_rescan_remove();
836 		}
837 
838 		/*
839 		 * If we have detected dead IOC, we needn't proceed
840 		 * any more since all PHBs would have been removed
841 		 */
842 		if (rc == EEH_NEXT_ERR_DEAD_IOC)
843 			break;
844 	} while (rc != EEH_NEXT_ERR_NONE);
845 }
846 
847 /**
848  * eeh_handle_event - Reset a PCI device after hard lockup.
849  * @pe: EEH PE
850  *
851  * While PHB detects address or data parity errors on particular PCI
852  * slot, the associated PE will be frozen. Besides, DMA's occurring
853  * to wild addresses (which usually happen due to bugs in device
854  * drivers or in PCI adapter firmware) can cause EEH error. #SERR,
855  * #PERR or other misc PCI-related errors also can trigger EEH errors.
856  *
857  * Recovery process consists of unplugging the device driver (which
858  * generated hotplug events to userspace), then issuing a PCI #RST to
859  * the device, then reconfiguring the PCI config space for all bridges
860  * & devices under this slot, and then finally restarting the device
861  * drivers (which cause a second set of hotplug events to go out to
862  * userspace).
863  */
864 void eeh_handle_event(struct eeh_pe *pe)
865 {
866 	if (pe)
867 		eeh_handle_normal_event(pe);
868 	else
869 		eeh_handle_special_event();
870 }
871