xref: /openbmc/linux/arch/powerpc/kernel/eeh_driver.c (revision 80483c3a)
1 /*
2  * PCI Error Recovery Driver for RPA-compliant PPC64 platform.
3  * Copyright IBM Corp. 2004 2005
4  * Copyright Linas Vepstas <linas@linas.org> 2004, 2005
5  *
6  * All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or (at
11  * your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
16  * NON INFRINGEMENT.  See the GNU General Public License for more
17  * details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  *
23  * Send comments and feedback to Linas Vepstas <linas@austin.ibm.com>
24  */
25 #include <linux/delay.h>
26 #include <linux/interrupt.h>
27 #include <linux/irq.h>
28 #include <linux/module.h>
29 #include <linux/pci.h>
30 #include <asm/eeh.h>
31 #include <asm/eeh_event.h>
32 #include <asm/ppc-pci.h>
33 #include <asm/pci-bridge.h>
34 #include <asm/prom.h>
35 #include <asm/rtas.h>
36 
37 struct eeh_rmv_data {
38 	struct list_head edev_list;
39 	int removed;
40 };
41 
42 /**
43  * eeh_pcid_name - Retrieve name of PCI device driver
44  * @pdev: PCI device
45  *
46  * This routine is used to retrieve the name of PCI device driver
47  * if that's valid.
48  */
49 static inline const char *eeh_pcid_name(struct pci_dev *pdev)
50 {
51 	if (pdev && pdev->dev.driver)
52 		return pdev->dev.driver->name;
53 	return "";
54 }
55 
56 /**
57  * eeh_pcid_get - Get the PCI device driver
58  * @pdev: PCI device
59  *
60  * The function is used to retrieve the PCI device driver for
61  * the indicated PCI device. Besides, we will increase the reference
62  * of the PCI device driver to prevent that being unloaded on
63  * the fly. Otherwise, kernel crash would be seen.
64  */
65 static inline struct pci_driver *eeh_pcid_get(struct pci_dev *pdev)
66 {
67 	if (!pdev || !pdev->driver)
68 		return NULL;
69 
70 	if (!try_module_get(pdev->driver->driver.owner))
71 		return NULL;
72 
73 	return pdev->driver;
74 }
75 
76 /**
77  * eeh_pcid_put - Dereference on the PCI device driver
78  * @pdev: PCI device
79  *
80  * The function is called to do dereference on the PCI device
81  * driver of the indicated PCI device.
82  */
83 static inline void eeh_pcid_put(struct pci_dev *pdev)
84 {
85 	if (!pdev || !pdev->driver)
86 		return;
87 
88 	module_put(pdev->driver->driver.owner);
89 }
90 
91 /**
92  * eeh_disable_irq - Disable interrupt for the recovering device
93  * @dev: PCI device
94  *
95  * This routine must be called when reporting temporary or permanent
96  * error to the particular PCI device to disable interrupt of that
97  * device. If the device has enabled MSI or MSI-X interrupt, we needn't
98  * do real work because EEH should freeze DMA transfers for those PCI
99  * devices encountering EEH errors, which includes MSI or MSI-X.
100  */
101 static void eeh_disable_irq(struct pci_dev *dev)
102 {
103 	struct eeh_dev *edev = pci_dev_to_eeh_dev(dev);
104 
105 	/* Don't disable MSI and MSI-X interrupts. They are
106 	 * effectively disabled by the DMA Stopped state
107 	 * when an EEH error occurs.
108 	 */
109 	if (dev->msi_enabled || dev->msix_enabled)
110 		return;
111 
112 	if (!irq_has_action(dev->irq))
113 		return;
114 
115 	edev->mode |= EEH_DEV_IRQ_DISABLED;
116 	disable_irq_nosync(dev->irq);
117 }
118 
119 /**
120  * eeh_enable_irq - Enable interrupt for the recovering device
121  * @dev: PCI device
122  *
123  * This routine must be called to enable interrupt while failed
124  * device could be resumed.
125  */
126 static void eeh_enable_irq(struct pci_dev *dev)
127 {
128 	struct eeh_dev *edev = pci_dev_to_eeh_dev(dev);
129 
130 	if ((edev->mode) & EEH_DEV_IRQ_DISABLED) {
131 		edev->mode &= ~EEH_DEV_IRQ_DISABLED;
132 		/*
133 		 * FIXME !!!!!
134 		 *
135 		 * This is just ass backwards. This maze has
136 		 * unbalanced irq_enable/disable calls. So instead of
137 		 * finding the root cause it works around the warning
138 		 * in the irq_enable code by conditionally calling
139 		 * into it.
140 		 *
141 		 * That's just wrong.The warning in the core code is
142 		 * there to tell people to fix their asymmetries in
143 		 * their own code, not by abusing the core information
144 		 * to avoid it.
145 		 *
146 		 * I so wish that the assymetry would be the other way
147 		 * round and a few more irq_disable calls render that
148 		 * shit unusable forever.
149 		 *
150 		 *	tglx
151 		 */
152 		if (irqd_irq_disabled(irq_get_irq_data(dev->irq)))
153 			enable_irq(dev->irq);
154 	}
155 }
156 
157 static bool eeh_dev_removed(struct eeh_dev *edev)
158 {
159 	/* EEH device removed ? */
160 	if (!edev || (edev->mode & EEH_DEV_REMOVED))
161 		return true;
162 
163 	return false;
164 }
165 
166 static void *eeh_dev_save_state(void *data, void *userdata)
167 {
168 	struct eeh_dev *edev = data;
169 	struct pci_dev *pdev;
170 
171 	if (!edev)
172 		return NULL;
173 
174 	/*
175 	 * We cannot access the config space on some adapters.
176 	 * Otherwise, it will cause fenced PHB. We don't save
177 	 * the content in their config space and will restore
178 	 * from the initial config space saved when the EEH
179 	 * device is created.
180 	 */
181 	if (edev->pe && (edev->pe->state & EEH_PE_CFG_RESTRICTED))
182 		return NULL;
183 
184 	pdev = eeh_dev_to_pci_dev(edev);
185 	if (!pdev)
186 		return NULL;
187 
188 	pci_save_state(pdev);
189 	return NULL;
190 }
191 
192 /**
193  * eeh_report_error - Report pci error to each device driver
194  * @data: eeh device
195  * @userdata: return value
196  *
197  * Report an EEH error to each device driver, collect up and
198  * merge the device driver responses. Cumulative response
199  * passed back in "userdata".
200  */
201 static void *eeh_report_error(void *data, void *userdata)
202 {
203 	struct eeh_dev *edev = (struct eeh_dev *)data;
204 	struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
205 	enum pci_ers_result rc, *res = userdata;
206 	struct pci_driver *driver;
207 
208 	if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
209 		return NULL;
210 	dev->error_state = pci_channel_io_frozen;
211 
212 	driver = eeh_pcid_get(dev);
213 	if (!driver) return NULL;
214 
215 	eeh_disable_irq(dev);
216 
217 	if (!driver->err_handler ||
218 	    !driver->err_handler->error_detected) {
219 		eeh_pcid_put(dev);
220 		return NULL;
221 	}
222 
223 	rc = driver->err_handler->error_detected(dev, pci_channel_io_frozen);
224 
225 	/* A driver that needs a reset trumps all others */
226 	if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
227 	if (*res == PCI_ERS_RESULT_NONE) *res = rc;
228 
229 	edev->in_error = true;
230 	eeh_pcid_put(dev);
231 	return NULL;
232 }
233 
234 /**
235  * eeh_report_mmio_enabled - Tell drivers that MMIO has been enabled
236  * @data: eeh device
237  * @userdata: return value
238  *
239  * Tells each device driver that IO ports, MMIO and config space I/O
240  * are now enabled. Collects up and merges the device driver responses.
241  * Cumulative response passed back in "userdata".
242  */
243 static void *eeh_report_mmio_enabled(void *data, void *userdata)
244 {
245 	struct eeh_dev *edev = (struct eeh_dev *)data;
246 	struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
247 	enum pci_ers_result rc, *res = userdata;
248 	struct pci_driver *driver;
249 
250 	if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
251 		return NULL;
252 
253 	driver = eeh_pcid_get(dev);
254 	if (!driver) return NULL;
255 
256 	if (!driver->err_handler ||
257 	    !driver->err_handler->mmio_enabled ||
258 	    (edev->mode & EEH_DEV_NO_HANDLER)) {
259 		eeh_pcid_put(dev);
260 		return NULL;
261 	}
262 
263 	rc = driver->err_handler->mmio_enabled(dev);
264 
265 	/* A driver that needs a reset trumps all others */
266 	if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
267 	if (*res == PCI_ERS_RESULT_NONE) *res = rc;
268 
269 	eeh_pcid_put(dev);
270 	return NULL;
271 }
272 
273 /**
274  * eeh_report_reset - Tell device that slot has been reset
275  * @data: eeh device
276  * @userdata: return value
277  *
278  * This routine must be called while EEH tries to reset particular
279  * PCI device so that the associated PCI device driver could take
280  * some actions, usually to save data the driver needs so that the
281  * driver can work again while the device is recovered.
282  */
283 static void *eeh_report_reset(void *data, void *userdata)
284 {
285 	struct eeh_dev *edev = (struct eeh_dev *)data;
286 	struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
287 	enum pci_ers_result rc, *res = userdata;
288 	struct pci_driver *driver;
289 
290 	if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
291 		return NULL;
292 	dev->error_state = pci_channel_io_normal;
293 
294 	driver = eeh_pcid_get(dev);
295 	if (!driver) return NULL;
296 
297 	eeh_enable_irq(dev);
298 
299 	if (!driver->err_handler ||
300 	    !driver->err_handler->slot_reset ||
301 	    (edev->mode & EEH_DEV_NO_HANDLER) ||
302 	    (!edev->in_error)) {
303 		eeh_pcid_put(dev);
304 		return NULL;
305 	}
306 
307 	rc = driver->err_handler->slot_reset(dev);
308 	if ((*res == PCI_ERS_RESULT_NONE) ||
309 	    (*res == PCI_ERS_RESULT_RECOVERED)) *res = rc;
310 	if (*res == PCI_ERS_RESULT_DISCONNECT &&
311 	     rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
312 
313 	eeh_pcid_put(dev);
314 	return NULL;
315 }
316 
317 static void *eeh_dev_restore_state(void *data, void *userdata)
318 {
319 	struct eeh_dev *edev = data;
320 	struct pci_dev *pdev;
321 
322 	if (!edev)
323 		return NULL;
324 
325 	/*
326 	 * The content in the config space isn't saved because
327 	 * the blocked config space on some adapters. We have
328 	 * to restore the initial saved config space when the
329 	 * EEH device is created.
330 	 */
331 	if (edev->pe && (edev->pe->state & EEH_PE_CFG_RESTRICTED)) {
332 		if (list_is_last(&edev->list, &edev->pe->edevs))
333 			eeh_pe_restore_bars(edev->pe);
334 
335 		return NULL;
336 	}
337 
338 	pdev = eeh_dev_to_pci_dev(edev);
339 	if (!pdev)
340 		return NULL;
341 
342 	pci_restore_state(pdev);
343 	return NULL;
344 }
345 
346 /**
347  * eeh_report_resume - Tell device to resume normal operations
348  * @data: eeh device
349  * @userdata: return value
350  *
351  * This routine must be called to notify the device driver that it
352  * could resume so that the device driver can do some initialization
353  * to make the recovered device work again.
354  */
355 static void *eeh_report_resume(void *data, void *userdata)
356 {
357 	struct eeh_dev *edev = (struct eeh_dev *)data;
358 	struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
359 	bool was_in_error;
360 	struct pci_driver *driver;
361 
362 	if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
363 		return NULL;
364 	dev->error_state = pci_channel_io_normal;
365 
366 	driver = eeh_pcid_get(dev);
367 	if (!driver) return NULL;
368 
369 	was_in_error = edev->in_error;
370 	edev->in_error = false;
371 	eeh_enable_irq(dev);
372 
373 	if (!driver->err_handler ||
374 	    !driver->err_handler->resume ||
375 	    (edev->mode & EEH_DEV_NO_HANDLER) || !was_in_error) {
376 		edev->mode &= ~EEH_DEV_NO_HANDLER;
377 		eeh_pcid_put(dev);
378 		return NULL;
379 	}
380 
381 	driver->err_handler->resume(dev);
382 
383 	eeh_pcid_put(dev);
384 	return NULL;
385 }
386 
387 /**
388  * eeh_report_failure - Tell device driver that device is dead.
389  * @data: eeh device
390  * @userdata: return value
391  *
392  * This informs the device driver that the device is permanently
393  * dead, and that no further recovery attempts will be made on it.
394  */
395 static void *eeh_report_failure(void *data, void *userdata)
396 {
397 	struct eeh_dev *edev = (struct eeh_dev *)data;
398 	struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
399 	struct pci_driver *driver;
400 
401 	if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
402 		return NULL;
403 	dev->error_state = pci_channel_io_perm_failure;
404 
405 	driver = eeh_pcid_get(dev);
406 	if (!driver) return NULL;
407 
408 	eeh_disable_irq(dev);
409 
410 	if (!driver->err_handler ||
411 	    !driver->err_handler->error_detected) {
412 		eeh_pcid_put(dev);
413 		return NULL;
414 	}
415 
416 	driver->err_handler->error_detected(dev, pci_channel_io_perm_failure);
417 
418 	eeh_pcid_put(dev);
419 	return NULL;
420 }
421 
422 static void *eeh_add_virt_device(void *data, void *userdata)
423 {
424 	struct pci_driver *driver;
425 	struct eeh_dev *edev = (struct eeh_dev *)data;
426 	struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
427 	struct pci_dn *pdn = eeh_dev_to_pdn(edev);
428 
429 	if (!(edev->physfn)) {
430 		pr_warn("%s: EEH dev %04x:%02x:%02x.%01x not for VF\n",
431 			__func__, edev->phb->global_number, pdn->busno,
432 			PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn));
433 		return NULL;
434 	}
435 
436 	driver = eeh_pcid_get(dev);
437 	if (driver) {
438 		eeh_pcid_put(dev);
439 		if (driver->err_handler)
440 			return NULL;
441 	}
442 
443 #ifdef CONFIG_PPC_POWERNV
444 	pci_iov_add_virtfn(edev->physfn, pdn->vf_index, 0);
445 #endif
446 	return NULL;
447 }
448 
449 static void *eeh_rmv_device(void *data, void *userdata)
450 {
451 	struct pci_driver *driver;
452 	struct eeh_dev *edev = (struct eeh_dev *)data;
453 	struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
454 	struct eeh_rmv_data *rmv_data = (struct eeh_rmv_data *)userdata;
455 	int *removed = rmv_data ? &rmv_data->removed : NULL;
456 
457 	/*
458 	 * Actually, we should remove the PCI bridges as well.
459 	 * However, that's lots of complexity to do that,
460 	 * particularly some of devices under the bridge might
461 	 * support EEH. So we just care about PCI devices for
462 	 * simplicity here.
463 	 */
464 	if (!dev || (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE))
465 		return NULL;
466 
467 	/*
468 	 * We rely on count-based pcibios_release_device() to
469 	 * detach permanently offlined PEs. Unfortunately, that's
470 	 * not reliable enough. We might have the permanently
471 	 * offlined PEs attached, but we needn't take care of
472 	 * them and their child devices.
473 	 */
474 	if (eeh_dev_removed(edev))
475 		return NULL;
476 
477 	driver = eeh_pcid_get(dev);
478 	if (driver) {
479 		eeh_pcid_put(dev);
480 		if (removed &&
481 		    eeh_pe_passed(edev->pe))
482 			return NULL;
483 		if (removed &&
484 		    driver->err_handler &&
485 		    driver->err_handler->error_detected &&
486 		    driver->err_handler->slot_reset)
487 			return NULL;
488 	}
489 
490 	/* Remove it from PCI subsystem */
491 	pr_debug("EEH: Removing %s without EEH sensitive driver\n",
492 		 pci_name(dev));
493 	edev->bus = dev->bus;
494 	edev->mode |= EEH_DEV_DISCONNECTED;
495 	if (removed)
496 		(*removed)++;
497 
498 	if (edev->physfn) {
499 #ifdef CONFIG_PPC_POWERNV
500 		struct pci_dn *pdn = eeh_dev_to_pdn(edev);
501 
502 		pci_iov_remove_virtfn(edev->physfn, pdn->vf_index, 0);
503 		edev->pdev = NULL;
504 
505 		/*
506 		 * We have to set the VF PE number to invalid one, which is
507 		 * required to plug the VF successfully.
508 		 */
509 		pdn->pe_number = IODA_INVALID_PE;
510 #endif
511 		if (rmv_data)
512 			list_add(&edev->rmv_list, &rmv_data->edev_list);
513 	} else {
514 		pci_lock_rescan_remove();
515 		pci_stop_and_remove_bus_device(dev);
516 		pci_unlock_rescan_remove();
517 	}
518 
519 	return NULL;
520 }
521 
522 static void *eeh_pe_detach_dev(void *data, void *userdata)
523 {
524 	struct eeh_pe *pe = (struct eeh_pe *)data;
525 	struct eeh_dev *edev, *tmp;
526 
527 	eeh_pe_for_each_dev(pe, edev, tmp) {
528 		if (!(edev->mode & EEH_DEV_DISCONNECTED))
529 			continue;
530 
531 		edev->mode &= ~(EEH_DEV_DISCONNECTED | EEH_DEV_IRQ_DISABLED);
532 		eeh_rmv_from_parent_pe(edev);
533 	}
534 
535 	return NULL;
536 }
537 
538 /*
539  * Explicitly clear PE's frozen state for PowerNV where
540  * we have frozen PE until BAR restore is completed. It's
541  * harmless to clear it for pSeries. To be consistent with
542  * PE reset (for 3 times), we try to clear the frozen state
543  * for 3 times as well.
544  */
545 static void *__eeh_clear_pe_frozen_state(void *data, void *flag)
546 {
547 	struct eeh_pe *pe = (struct eeh_pe *)data;
548 	bool *clear_sw_state = flag;
549 	int i, rc = 1;
550 
551 	for (i = 0; rc && i < 3; i++)
552 		rc = eeh_unfreeze_pe(pe, clear_sw_state);
553 
554 	/* Stop immediately on any errors */
555 	if (rc) {
556 		pr_warn("%s: Failure %d unfreezing PHB#%x-PE#%x\n",
557 			__func__, rc, pe->phb->global_number, pe->addr);
558 		return (void *)pe;
559 	}
560 
561 	return NULL;
562 }
563 
564 static int eeh_clear_pe_frozen_state(struct eeh_pe *pe,
565 				     bool clear_sw_state)
566 {
567 	void *rc;
568 
569 	rc = eeh_pe_traverse(pe, __eeh_clear_pe_frozen_state, &clear_sw_state);
570 	if (!rc)
571 		eeh_pe_state_clear(pe, EEH_PE_ISOLATED);
572 
573 	return rc ? -EIO : 0;
574 }
575 
576 int eeh_pe_reset_and_recover(struct eeh_pe *pe)
577 {
578 	int ret;
579 
580 	/* Bail if the PE is being recovered */
581 	if (pe->state & EEH_PE_RECOVERING)
582 		return 0;
583 
584 	/* Put the PE into recovery mode */
585 	eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
586 
587 	/* Save states */
588 	eeh_pe_dev_traverse(pe, eeh_dev_save_state, NULL);
589 
590 	/* Issue reset */
591 	ret = eeh_reset_pe(pe);
592 	if (ret) {
593 		eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
594 		return ret;
595 	}
596 
597 	/* Unfreeze the PE */
598 	ret = eeh_clear_pe_frozen_state(pe, true);
599 	if (ret) {
600 		eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
601 		return ret;
602 	}
603 
604 	/* Restore device state */
605 	eeh_pe_dev_traverse(pe, eeh_dev_restore_state, NULL);
606 
607 	/* Clear recovery mode */
608 	eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
609 
610 	return 0;
611 }
612 
613 /**
614  * eeh_reset_device - Perform actual reset of a pci slot
615  * @pe: EEH PE
616  * @bus: PCI bus corresponding to the isolcated slot
617  *
618  * This routine must be called to do reset on the indicated PE.
619  * During the reset, udev might be invoked because those affected
620  * PCI devices will be removed and then added.
621  */
622 static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
623 				struct eeh_rmv_data *rmv_data)
624 {
625 	struct pci_bus *frozen_bus = eeh_pe_bus_get(pe);
626 	struct timeval tstamp;
627 	int cnt, rc;
628 	struct eeh_dev *edev;
629 
630 	/* pcibios will clear the counter; save the value */
631 	cnt = pe->freeze_count;
632 	tstamp = pe->tstamp;
633 
634 	/*
635 	 * We don't remove the corresponding PE instances because
636 	 * we need the information afterwords. The attached EEH
637 	 * devices are expected to be attached soon when calling
638 	 * into pci_hp_add_devices().
639 	 */
640 	eeh_pe_state_mark(pe, EEH_PE_KEEP);
641 	if (bus) {
642 		if (pe->type & EEH_PE_VF) {
643 			eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL);
644 		} else {
645 			pci_lock_rescan_remove();
646 			pci_hp_remove_devices(bus);
647 			pci_unlock_rescan_remove();
648 		}
649 	} else if (frozen_bus) {
650 		eeh_pe_dev_traverse(pe, eeh_rmv_device, rmv_data);
651 	}
652 
653 	/*
654 	 * Reset the pci controller. (Asserts RST#; resets config space).
655 	 * Reconfigure bridges and devices. Don't try to bring the system
656 	 * up if the reset failed for some reason.
657 	 *
658 	 * During the reset, it's very dangerous to have uncontrolled PCI
659 	 * config accesses. So we prefer to block them. However, controlled
660 	 * PCI config accesses initiated from EEH itself are allowed.
661 	 */
662 	rc = eeh_reset_pe(pe);
663 	if (rc)
664 		return rc;
665 
666 	pci_lock_rescan_remove();
667 
668 	/* Restore PE */
669 	eeh_ops->configure_bridge(pe);
670 	eeh_pe_restore_bars(pe);
671 
672 	/* Clear frozen state */
673 	rc = eeh_clear_pe_frozen_state(pe, false);
674 	if (rc)
675 		return rc;
676 
677 	/* Give the system 5 seconds to finish running the user-space
678 	 * hotplug shutdown scripts, e.g. ifdown for ethernet.  Yes,
679 	 * this is a hack, but if we don't do this, and try to bring
680 	 * the device up before the scripts have taken it down,
681 	 * potentially weird things happen.
682 	 */
683 	if (bus) {
684 		pr_info("EEH: Sleep 5s ahead of complete hotplug\n");
685 		ssleep(5);
686 
687 		/*
688 		 * The EEH device is still connected with its parent
689 		 * PE. We should disconnect it so the binding can be
690 		 * rebuilt when adding PCI devices.
691 		 */
692 		edev = list_first_entry(&pe->edevs, struct eeh_dev, list);
693 		eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL);
694 		if (pe->type & EEH_PE_VF) {
695 			eeh_add_virt_device(edev, NULL);
696 		} else {
697 			eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
698 			pci_hp_add_devices(bus);
699 		}
700 	} else if (frozen_bus && rmv_data->removed) {
701 		pr_info("EEH: Sleep 5s ahead of partial hotplug\n");
702 		ssleep(5);
703 
704 		edev = list_first_entry(&pe->edevs, struct eeh_dev, list);
705 		eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL);
706 		if (pe->type & EEH_PE_VF)
707 			eeh_add_virt_device(edev, NULL);
708 		else
709 			pci_hp_add_devices(frozen_bus);
710 	}
711 	eeh_pe_state_clear(pe, EEH_PE_KEEP);
712 
713 	pe->tstamp = tstamp;
714 	pe->freeze_count = cnt;
715 
716 	pci_unlock_rescan_remove();
717 	return 0;
718 }
719 
720 /* The longest amount of time to wait for a pci device
721  * to come back on line, in seconds.
722  */
723 #define MAX_WAIT_FOR_RECOVERY 300
724 
725 static void eeh_handle_normal_event(struct eeh_pe *pe)
726 {
727 	struct pci_bus *frozen_bus;
728 	struct eeh_dev *edev, *tmp;
729 	int rc = 0;
730 	enum pci_ers_result result = PCI_ERS_RESULT_NONE;
731 	struct eeh_rmv_data rmv_data = {LIST_HEAD_INIT(rmv_data.edev_list), 0};
732 
733 	frozen_bus = eeh_pe_bus_get(pe);
734 	if (!frozen_bus) {
735 		pr_err("%s: Cannot find PCI bus for PHB#%d-PE#%x\n",
736 			__func__, pe->phb->global_number, pe->addr);
737 		return;
738 	}
739 
740 	eeh_pe_update_time_stamp(pe);
741 	pe->freeze_count++;
742 	if (pe->freeze_count > eeh_max_freezes)
743 		goto excess_failures;
744 	pr_warn("EEH: This PCI device has failed %d times in the last hour\n",
745 		pe->freeze_count);
746 
747 	/* Walk the various device drivers attached to this slot through
748 	 * a reset sequence, giving each an opportunity to do what it needs
749 	 * to accomplish the reset.  Each child gets a report of the
750 	 * status ... if any child can't handle the reset, then the entire
751 	 * slot is dlpar removed and added.
752 	 *
753 	 * When the PHB is fenced, we have to issue a reset to recover from
754 	 * the error. Override the result if necessary to have partially
755 	 * hotplug for this case.
756 	 */
757 	pr_info("EEH: Notify device drivers to shutdown\n");
758 	eeh_pe_dev_traverse(pe, eeh_report_error, &result);
759 	if ((pe->type & EEH_PE_PHB) &&
760 	    result != PCI_ERS_RESULT_NONE &&
761 	    result != PCI_ERS_RESULT_NEED_RESET)
762 		result = PCI_ERS_RESULT_NEED_RESET;
763 
764 	/* Get the current PCI slot state. This can take a long time,
765 	 * sometimes over 300 seconds for certain systems.
766 	 */
767 	rc = eeh_ops->wait_state(pe, MAX_WAIT_FOR_RECOVERY*1000);
768 	if (rc < 0 || rc == EEH_STATE_NOT_SUPPORT) {
769 		pr_warn("EEH: Permanent failure\n");
770 		goto hard_fail;
771 	}
772 
773 	/* Since rtas may enable MMIO when posting the error log,
774 	 * don't post the error log until after all dev drivers
775 	 * have been informed.
776 	 */
777 	pr_info("EEH: Collect temporary log\n");
778 	eeh_slot_error_detail(pe, EEH_LOG_TEMP);
779 
780 	/* If all device drivers were EEH-unaware, then shut
781 	 * down all of the device drivers, and hope they
782 	 * go down willingly, without panicing the system.
783 	 */
784 	if (result == PCI_ERS_RESULT_NONE) {
785 		pr_info("EEH: Reset with hotplug activity\n");
786 		rc = eeh_reset_device(pe, frozen_bus, NULL);
787 		if (rc) {
788 			pr_warn("%s: Unable to reset, err=%d\n",
789 				__func__, rc);
790 			goto hard_fail;
791 		}
792 	}
793 
794 	/* If all devices reported they can proceed, then re-enable MMIO */
795 	if (result == PCI_ERS_RESULT_CAN_RECOVER) {
796 		pr_info("EEH: Enable I/O for affected devices\n");
797 		rc = eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
798 
799 		if (rc < 0)
800 			goto hard_fail;
801 		if (rc) {
802 			result = PCI_ERS_RESULT_NEED_RESET;
803 		} else {
804 			pr_info("EEH: Notify device drivers to resume I/O\n");
805 			eeh_pe_dev_traverse(pe, eeh_report_mmio_enabled, &result);
806 		}
807 	}
808 
809 	/* If all devices reported they can proceed, then re-enable DMA */
810 	if (result == PCI_ERS_RESULT_CAN_RECOVER) {
811 		pr_info("EEH: Enabled DMA for affected devices\n");
812 		rc = eeh_pci_enable(pe, EEH_OPT_THAW_DMA);
813 
814 		if (rc < 0)
815 			goto hard_fail;
816 		if (rc) {
817 			result = PCI_ERS_RESULT_NEED_RESET;
818 		} else {
819 			/*
820 			 * We didn't do PE reset for the case. The PE
821 			 * is still in frozen state. Clear it before
822 			 * resuming the PE.
823 			 */
824 			eeh_pe_state_clear(pe, EEH_PE_ISOLATED);
825 			result = PCI_ERS_RESULT_RECOVERED;
826 		}
827 	}
828 
829 	/* If any device has a hard failure, then shut off everything. */
830 	if (result == PCI_ERS_RESULT_DISCONNECT) {
831 		pr_warn("EEH: Device driver gave up\n");
832 		goto hard_fail;
833 	}
834 
835 	/* If any device called out for a reset, then reset the slot */
836 	if (result == PCI_ERS_RESULT_NEED_RESET) {
837 		pr_info("EEH: Reset without hotplug activity\n");
838 		rc = eeh_reset_device(pe, NULL, &rmv_data);
839 		if (rc) {
840 			pr_warn("%s: Cannot reset, err=%d\n",
841 				__func__, rc);
842 			goto hard_fail;
843 		}
844 
845 		pr_info("EEH: Notify device drivers "
846 			"the completion of reset\n");
847 		result = PCI_ERS_RESULT_NONE;
848 		eeh_pe_dev_traverse(pe, eeh_report_reset, &result);
849 	}
850 
851 	/* All devices should claim they have recovered by now. */
852 	if ((result != PCI_ERS_RESULT_RECOVERED) &&
853 	    (result != PCI_ERS_RESULT_NONE)) {
854 		pr_warn("EEH: Not recovered\n");
855 		goto hard_fail;
856 	}
857 
858 	/*
859 	 * For those hot removed VFs, we should add back them after PF get
860 	 * recovered properly.
861 	 */
862 	list_for_each_entry_safe(edev, tmp, &rmv_data.edev_list, rmv_list) {
863 		eeh_add_virt_device(edev, NULL);
864 		list_del(&edev->rmv_list);
865 	}
866 
867 	/* Tell all device drivers that they can resume operations */
868 	pr_info("EEH: Notify device driver to resume\n");
869 	eeh_pe_dev_traverse(pe, eeh_report_resume, NULL);
870 
871 	return;
872 
873 excess_failures:
874 	/*
875 	 * About 90% of all real-life EEH failures in the field
876 	 * are due to poorly seated PCI cards. Only 10% or so are
877 	 * due to actual, failed cards.
878 	 */
879 	pr_err("EEH: PHB#%d-PE#%x has failed %d times in the\n"
880 	       "last hour and has been permanently disabled.\n"
881 	       "Please try reseating or replacing it.\n",
882 		pe->phb->global_number, pe->addr,
883 		pe->freeze_count);
884 	goto perm_error;
885 
886 hard_fail:
887 	pr_err("EEH: Unable to recover from failure from PHB#%d-PE#%x.\n"
888 	       "Please try reseating or replacing it\n",
889 		pe->phb->global_number, pe->addr);
890 
891 perm_error:
892 	eeh_slot_error_detail(pe, EEH_LOG_PERM);
893 
894 	/* Notify all devices that they're about to go down. */
895 	eeh_pe_dev_traverse(pe, eeh_report_failure, NULL);
896 
897 	/* Mark the PE to be removed permanently */
898 	eeh_pe_state_mark(pe, EEH_PE_REMOVED);
899 
900 	/*
901 	 * Shut down the device drivers for good. We mark
902 	 * all removed devices correctly to avoid access
903 	 * the their PCI config any more.
904 	 */
905 	if (frozen_bus) {
906 		if (pe->type & EEH_PE_VF) {
907 			eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL);
908 			eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
909 		} else {
910 			eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
911 			eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
912 
913 			pci_lock_rescan_remove();
914 			pci_hp_remove_devices(frozen_bus);
915 			pci_unlock_rescan_remove();
916 		}
917 	}
918 }
919 
920 static void eeh_handle_special_event(void)
921 {
922 	struct eeh_pe *pe, *phb_pe;
923 	struct pci_bus *bus;
924 	struct pci_controller *hose;
925 	unsigned long flags;
926 	int rc;
927 
928 
929 	do {
930 		rc = eeh_ops->next_error(&pe);
931 
932 		switch (rc) {
933 		case EEH_NEXT_ERR_DEAD_IOC:
934 			/* Mark all PHBs in dead state */
935 			eeh_serialize_lock(&flags);
936 
937 			/* Purge all events */
938 			eeh_remove_event(NULL, true);
939 
940 			list_for_each_entry(hose, &hose_list, list_node) {
941 				phb_pe = eeh_phb_pe_get(hose);
942 				if (!phb_pe) continue;
943 
944 				eeh_pe_state_mark(phb_pe, EEH_PE_ISOLATED);
945 			}
946 
947 			eeh_serialize_unlock(flags);
948 
949 			break;
950 		case EEH_NEXT_ERR_FROZEN_PE:
951 		case EEH_NEXT_ERR_FENCED_PHB:
952 		case EEH_NEXT_ERR_DEAD_PHB:
953 			/* Mark the PE in fenced state */
954 			eeh_serialize_lock(&flags);
955 
956 			/* Purge all events of the PHB */
957 			eeh_remove_event(pe, true);
958 
959 			if (rc == EEH_NEXT_ERR_DEAD_PHB)
960 				eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
961 			else
962 				eeh_pe_state_mark(pe,
963 					EEH_PE_ISOLATED | EEH_PE_RECOVERING);
964 
965 			eeh_serialize_unlock(flags);
966 
967 			break;
968 		case EEH_NEXT_ERR_NONE:
969 			return;
970 		default:
971 			pr_warn("%s: Invalid value %d from next_error()\n",
972 				__func__, rc);
973 			return;
974 		}
975 
976 		/*
977 		 * For fenced PHB and frozen PE, it's handled as normal
978 		 * event. We have to remove the affected PHBs for dead
979 		 * PHB and IOC
980 		 */
981 		if (rc == EEH_NEXT_ERR_FROZEN_PE ||
982 		    rc == EEH_NEXT_ERR_FENCED_PHB) {
983 			eeh_handle_normal_event(pe);
984 			eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
985 		} else {
986 			pci_lock_rescan_remove();
987 			list_for_each_entry(hose, &hose_list, list_node) {
988 				phb_pe = eeh_phb_pe_get(hose);
989 				if (!phb_pe ||
990 				    !(phb_pe->state & EEH_PE_ISOLATED) ||
991 				    (phb_pe->state & EEH_PE_RECOVERING))
992 					continue;
993 
994 				/* Notify all devices to be down */
995 				eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
996 				bus = eeh_pe_bus_get(phb_pe);
997 				eeh_pe_dev_traverse(pe,
998 					eeh_report_failure, NULL);
999 				pci_hp_remove_devices(bus);
1000 			}
1001 			pci_unlock_rescan_remove();
1002 		}
1003 
1004 		/*
1005 		 * If we have detected dead IOC, we needn't proceed
1006 		 * any more since all PHBs would have been removed
1007 		 */
1008 		if (rc == EEH_NEXT_ERR_DEAD_IOC)
1009 			break;
1010 	} while (rc != EEH_NEXT_ERR_NONE);
1011 }
1012 
1013 /**
1014  * eeh_handle_event - Reset a PCI device after hard lockup.
1015  * @pe: EEH PE
1016  *
1017  * While PHB detects address or data parity errors on particular PCI
1018  * slot, the associated PE will be frozen. Besides, DMA's occurring
1019  * to wild addresses (which usually happen due to bugs in device
1020  * drivers or in PCI adapter firmware) can cause EEH error. #SERR,
1021  * #PERR or other misc PCI-related errors also can trigger EEH errors.
1022  *
1023  * Recovery process consists of unplugging the device driver (which
1024  * generated hotplug events to userspace), then issuing a PCI #RST to
1025  * the device, then reconfiguring the PCI config space for all bridges
1026  * & devices under this slot, and then finally restarting the device
1027  * drivers (which cause a second set of hotplug events to go out to
1028  * userspace).
1029  */
1030 void eeh_handle_event(struct eeh_pe *pe)
1031 {
1032 	if (pe)
1033 		eeh_handle_normal_event(pe);
1034 	else
1035 		eeh_handle_special_event();
1036 }
1037