xref: /openbmc/linux/arch/powerpc/kernel/eeh_driver.c (revision a59511d1)
1 /*
2  * PCI Error Recovery Driver for RPA-compliant PPC64 platform.
3  * Copyright IBM Corp. 2004 2005
4  * Copyright Linas Vepstas <linas@linas.org> 2004, 2005
5  *
6  * All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or (at
11  * your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
16  * NON INFRINGEMENT.  See the GNU General Public License for more
17  * details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  *
23  * Send comments and feedback to Linas Vepstas <linas@austin.ibm.com>
24  */
25 #include <linux/delay.h>
26 #include <linux/interrupt.h>
27 #include <linux/irq.h>
28 #include <linux/module.h>
29 #include <linux/pci.h>
30 #include <asm/eeh.h>
31 #include <asm/eeh_event.h>
32 #include <asm/ppc-pci.h>
33 #include <asm/pci-bridge.h>
34 #include <asm/prom.h>
35 #include <asm/rtas.h>
36 
37 struct eeh_rmv_data {
38 	struct list_head edev_list;
39 	int removed;
40 };
41 
42 /**
43  * eeh_pcid_name - Retrieve name of PCI device driver
44  * @pdev: PCI device
45  *
46  * This routine is used to retrieve the name of PCI device driver
47  * if that's valid.
48  */
49 static inline const char *eeh_pcid_name(struct pci_dev *pdev)
50 {
51 	if (pdev && pdev->dev.driver)
52 		return pdev->dev.driver->name;
53 	return "";
54 }
55 
56 /**
57  * eeh_pcid_get - Get the PCI device driver
58  * @pdev: PCI device
59  *
60  * The function is used to retrieve the PCI device driver for
61  * the indicated PCI device. Besides, we will increase the reference
62  * of the PCI device driver to prevent that being unloaded on
63  * the fly. Otherwise, kernel crash would be seen.
64  */
65 static inline struct pci_driver *eeh_pcid_get(struct pci_dev *pdev)
66 {
67 	if (!pdev || !pdev->driver)
68 		return NULL;
69 
70 	if (!try_module_get(pdev->driver->driver.owner))
71 		return NULL;
72 
73 	return pdev->driver;
74 }
75 
76 /**
77  * eeh_pcid_put - Dereference on the PCI device driver
78  * @pdev: PCI device
79  *
80  * The function is called to do dereference on the PCI device
81  * driver of the indicated PCI device.
82  */
83 static inline void eeh_pcid_put(struct pci_dev *pdev)
84 {
85 	if (!pdev || !pdev->driver)
86 		return;
87 
88 	module_put(pdev->driver->driver.owner);
89 }
90 
91 /**
92  * eeh_disable_irq - Disable interrupt for the recovering device
93  * @dev: PCI device
94  *
95  * This routine must be called when reporting temporary or permanent
96  * error to the particular PCI device to disable interrupt of that
97  * device. If the device has enabled MSI or MSI-X interrupt, we needn't
98  * do real work because EEH should freeze DMA transfers for those PCI
99  * devices encountering EEH errors, which includes MSI or MSI-X.
100  */
101 static void eeh_disable_irq(struct pci_dev *dev)
102 {
103 	struct eeh_dev *edev = pci_dev_to_eeh_dev(dev);
104 
105 	/* Don't disable MSI and MSI-X interrupts. They are
106 	 * effectively disabled by the DMA Stopped state
107 	 * when an EEH error occurs.
108 	 */
109 	if (dev->msi_enabled || dev->msix_enabled)
110 		return;
111 
112 	if (!irq_has_action(dev->irq))
113 		return;
114 
115 	edev->mode |= EEH_DEV_IRQ_DISABLED;
116 	disable_irq_nosync(dev->irq);
117 }
118 
119 /**
120  * eeh_enable_irq - Enable interrupt for the recovering device
121  * @dev: PCI device
122  *
123  * This routine must be called to enable interrupt while failed
124  * device could be resumed.
125  */
126 static void eeh_enable_irq(struct pci_dev *dev)
127 {
128 	struct eeh_dev *edev = pci_dev_to_eeh_dev(dev);
129 
130 	if ((edev->mode) & EEH_DEV_IRQ_DISABLED) {
131 		edev->mode &= ~EEH_DEV_IRQ_DISABLED;
132 		/*
133 		 * FIXME !!!!!
134 		 *
135 		 * This is just ass backwards. This maze has
136 		 * unbalanced irq_enable/disable calls. So instead of
137 		 * finding the root cause it works around the warning
138 		 * in the irq_enable code by conditionally calling
139 		 * into it.
140 		 *
141 		 * That's just wrong.The warning in the core code is
142 		 * there to tell people to fix their assymetries in
143 		 * their own code, not by abusing the core information
144 		 * to avoid it.
145 		 *
146 		 * I so wish that the assymetry would be the other way
147 		 * round and a few more irq_disable calls render that
148 		 * shit unusable forever.
149 		 *
150 		 *	tglx
151 		 */
152 		if (irqd_irq_disabled(irq_get_irq_data(dev->irq)))
153 			enable_irq(dev->irq);
154 	}
155 }
156 
157 static bool eeh_dev_removed(struct eeh_dev *edev)
158 {
159 	/* EEH device removed ? */
160 	if (!edev || (edev->mode & EEH_DEV_REMOVED))
161 		return true;
162 
163 	return false;
164 }
165 
166 static void *eeh_dev_save_state(void *data, void *userdata)
167 {
168 	struct eeh_dev *edev = data;
169 	struct pci_dev *pdev;
170 
171 	if (!edev)
172 		return NULL;
173 
174 	pdev = eeh_dev_to_pci_dev(edev);
175 	if (!pdev)
176 		return NULL;
177 
178 	pci_save_state(pdev);
179 	return NULL;
180 }
181 
182 /**
183  * eeh_report_error - Report pci error to each device driver
184  * @data: eeh device
185  * @userdata: return value
186  *
187  * Report an EEH error to each device driver, collect up and
188  * merge the device driver responses. Cumulative response
189  * passed back in "userdata".
190  */
191 static void *eeh_report_error(void *data, void *userdata)
192 {
193 	struct eeh_dev *edev = (struct eeh_dev *)data;
194 	struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
195 	enum pci_ers_result rc, *res = userdata;
196 	struct pci_driver *driver;
197 
198 	if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
199 		return NULL;
200 	dev->error_state = pci_channel_io_frozen;
201 
202 	driver = eeh_pcid_get(dev);
203 	if (!driver) return NULL;
204 
205 	eeh_disable_irq(dev);
206 
207 	if (!driver->err_handler ||
208 	    !driver->err_handler->error_detected) {
209 		eeh_pcid_put(dev);
210 		return NULL;
211 	}
212 
213 	rc = driver->err_handler->error_detected(dev, pci_channel_io_frozen);
214 
215 	/* A driver that needs a reset trumps all others */
216 	if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
217 	if (*res == PCI_ERS_RESULT_NONE) *res = rc;
218 
219 	edev->in_error = true;
220 	eeh_pcid_put(dev);
221 	return NULL;
222 }
223 
224 /**
225  * eeh_report_mmio_enabled - Tell drivers that MMIO has been enabled
226  * @data: eeh device
227  * @userdata: return value
228  *
229  * Tells each device driver that IO ports, MMIO and config space I/O
230  * are now enabled. Collects up and merges the device driver responses.
231  * Cumulative response passed back in "userdata".
232  */
233 static void *eeh_report_mmio_enabled(void *data, void *userdata)
234 {
235 	struct eeh_dev *edev = (struct eeh_dev *)data;
236 	struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
237 	enum pci_ers_result rc, *res = userdata;
238 	struct pci_driver *driver;
239 
240 	if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
241 		return NULL;
242 
243 	driver = eeh_pcid_get(dev);
244 	if (!driver) return NULL;
245 
246 	if (!driver->err_handler ||
247 	    !driver->err_handler->mmio_enabled ||
248 	    (edev->mode & EEH_DEV_NO_HANDLER)) {
249 		eeh_pcid_put(dev);
250 		return NULL;
251 	}
252 
253 	rc = driver->err_handler->mmio_enabled(dev);
254 
255 	/* A driver that needs a reset trumps all others */
256 	if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
257 	if (*res == PCI_ERS_RESULT_NONE) *res = rc;
258 
259 	eeh_pcid_put(dev);
260 	return NULL;
261 }
262 
263 /**
264  * eeh_report_reset - Tell device that slot has been reset
265  * @data: eeh device
266  * @userdata: return value
267  *
268  * This routine must be called while EEH tries to reset particular
269  * PCI device so that the associated PCI device driver could take
270  * some actions, usually to save data the driver needs so that the
271  * driver can work again while the device is recovered.
272  */
273 static void *eeh_report_reset(void *data, void *userdata)
274 {
275 	struct eeh_dev *edev = (struct eeh_dev *)data;
276 	struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
277 	enum pci_ers_result rc, *res = userdata;
278 	struct pci_driver *driver;
279 
280 	if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
281 		return NULL;
282 	dev->error_state = pci_channel_io_normal;
283 
284 	driver = eeh_pcid_get(dev);
285 	if (!driver) return NULL;
286 
287 	eeh_enable_irq(dev);
288 
289 	if (!driver->err_handler ||
290 	    !driver->err_handler->slot_reset ||
291 	    (edev->mode & EEH_DEV_NO_HANDLER) ||
292 	    (!edev->in_error)) {
293 		eeh_pcid_put(dev);
294 		return NULL;
295 	}
296 
297 	rc = driver->err_handler->slot_reset(dev);
298 	if ((*res == PCI_ERS_RESULT_NONE) ||
299 	    (*res == PCI_ERS_RESULT_RECOVERED)) *res = rc;
300 	if (*res == PCI_ERS_RESULT_DISCONNECT &&
301 	     rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
302 
303 	eeh_pcid_put(dev);
304 	return NULL;
305 }
306 
307 static void *eeh_dev_restore_state(void *data, void *userdata)
308 {
309 	struct eeh_dev *edev = data;
310 	struct pci_dev *pdev;
311 
312 	if (!edev)
313 		return NULL;
314 
315 	pdev = eeh_dev_to_pci_dev(edev);
316 	if (!pdev)
317 		return NULL;
318 
319 	pci_restore_state(pdev);
320 	return NULL;
321 }
322 
323 /**
324  * eeh_report_resume - Tell device to resume normal operations
325  * @data: eeh device
326  * @userdata: return value
327  *
328  * This routine must be called to notify the device driver that it
329  * could resume so that the device driver can do some initialization
330  * to make the recovered device work again.
331  */
332 static void *eeh_report_resume(void *data, void *userdata)
333 {
334 	struct eeh_dev *edev = (struct eeh_dev *)data;
335 	struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
336 	bool was_in_error;
337 	struct pci_driver *driver;
338 
339 	if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
340 		return NULL;
341 	dev->error_state = pci_channel_io_normal;
342 
343 	driver = eeh_pcid_get(dev);
344 	if (!driver) return NULL;
345 
346 	was_in_error = edev->in_error;
347 	edev->in_error = false;
348 	eeh_enable_irq(dev);
349 
350 	if (!driver->err_handler ||
351 	    !driver->err_handler->resume ||
352 	    (edev->mode & EEH_DEV_NO_HANDLER) || !was_in_error) {
353 		edev->mode &= ~EEH_DEV_NO_HANDLER;
354 		eeh_pcid_put(dev);
355 		return NULL;
356 	}
357 
358 	driver->err_handler->resume(dev);
359 
360 	eeh_pcid_put(dev);
361 	return NULL;
362 }
363 
364 /**
365  * eeh_report_failure - Tell device driver that device is dead.
366  * @data: eeh device
367  * @userdata: return value
368  *
369  * This informs the device driver that the device is permanently
370  * dead, and that no further recovery attempts will be made on it.
371  */
372 static void *eeh_report_failure(void *data, void *userdata)
373 {
374 	struct eeh_dev *edev = (struct eeh_dev *)data;
375 	struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
376 	struct pci_driver *driver;
377 
378 	if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
379 		return NULL;
380 	dev->error_state = pci_channel_io_perm_failure;
381 
382 	driver = eeh_pcid_get(dev);
383 	if (!driver) return NULL;
384 
385 	eeh_disable_irq(dev);
386 
387 	if (!driver->err_handler ||
388 	    !driver->err_handler->error_detected) {
389 		eeh_pcid_put(dev);
390 		return NULL;
391 	}
392 
393 	driver->err_handler->error_detected(dev, pci_channel_io_perm_failure);
394 
395 	eeh_pcid_put(dev);
396 	return NULL;
397 }
398 
399 static void *eeh_add_virt_device(void *data, void *userdata)
400 {
401 	struct pci_driver *driver;
402 	struct eeh_dev *edev = (struct eeh_dev *)data;
403 	struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
404 	struct pci_dn *pdn = eeh_dev_to_pdn(edev);
405 
406 	if (!(edev->physfn)) {
407 		pr_warn("%s: EEH dev %04x:%02x:%02x.%01x not for VF\n",
408 			__func__, edev->phb->global_number, pdn->busno,
409 			PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn));
410 		return NULL;
411 	}
412 
413 	driver = eeh_pcid_get(dev);
414 	if (driver) {
415 		eeh_pcid_put(dev);
416 		if (driver->err_handler)
417 			return NULL;
418 	}
419 
420 #ifdef CONFIG_PPC_POWERNV
421 	pci_iov_add_virtfn(edev->physfn, pdn->vf_index, 0);
422 #endif
423 	return NULL;
424 }
425 
426 static void *eeh_rmv_device(void *data, void *userdata)
427 {
428 	struct pci_driver *driver;
429 	struct eeh_dev *edev = (struct eeh_dev *)data;
430 	struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
431 	struct eeh_rmv_data *rmv_data = (struct eeh_rmv_data *)userdata;
432 	int *removed = rmv_data ? &rmv_data->removed : NULL;
433 
434 	/*
435 	 * Actually, we should remove the PCI bridges as well.
436 	 * However, that's lots of complexity to do that,
437 	 * particularly some of devices under the bridge might
438 	 * support EEH. So we just care about PCI devices for
439 	 * simplicity here.
440 	 */
441 	if (!dev || (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE))
442 		return NULL;
443 
444 	/*
445 	 * We rely on count-based pcibios_release_device() to
446 	 * detach permanently offlined PEs. Unfortunately, that's
447 	 * not reliable enough. We might have the permanently
448 	 * offlined PEs attached, but we needn't take care of
449 	 * them and their child devices.
450 	 */
451 	if (eeh_dev_removed(edev))
452 		return NULL;
453 
454 	driver = eeh_pcid_get(dev);
455 	if (driver) {
456 		eeh_pcid_put(dev);
457 		if (removed &&
458 		    eeh_pe_passed(edev->pe))
459 			return NULL;
460 		if (removed &&
461 		    driver->err_handler &&
462 		    driver->err_handler->error_detected &&
463 		    driver->err_handler->slot_reset)
464 			return NULL;
465 	}
466 
467 	/* Remove it from PCI subsystem */
468 	pr_debug("EEH: Removing %s without EEH sensitive driver\n",
469 		 pci_name(dev));
470 	edev->bus = dev->bus;
471 	edev->mode |= EEH_DEV_DISCONNECTED;
472 	if (removed)
473 		(*removed)++;
474 
475 	if (edev->physfn) {
476 #ifdef CONFIG_PPC_POWERNV
477 		struct pci_dn *pdn = eeh_dev_to_pdn(edev);
478 
479 		pci_iov_remove_virtfn(edev->physfn, pdn->vf_index, 0);
480 		edev->pdev = NULL;
481 
482 		/*
483 		 * We have to set the VF PE number to invalid one, which is
484 		 * required to plug the VF successfully.
485 		 */
486 		pdn->pe_number = IODA_INVALID_PE;
487 #endif
488 		if (rmv_data)
489 			list_add(&edev->rmv_list, &rmv_data->edev_list);
490 	} else {
491 		pci_lock_rescan_remove();
492 		pci_stop_and_remove_bus_device(dev);
493 		pci_unlock_rescan_remove();
494 	}
495 
496 	return NULL;
497 }
498 
499 static void *eeh_pe_detach_dev(void *data, void *userdata)
500 {
501 	struct eeh_pe *pe = (struct eeh_pe *)data;
502 	struct eeh_dev *edev, *tmp;
503 
504 	eeh_pe_for_each_dev(pe, edev, tmp) {
505 		if (!(edev->mode & EEH_DEV_DISCONNECTED))
506 			continue;
507 
508 		edev->mode &= ~(EEH_DEV_DISCONNECTED | EEH_DEV_IRQ_DISABLED);
509 		eeh_rmv_from_parent_pe(edev);
510 	}
511 
512 	return NULL;
513 }
514 
515 /*
516  * Explicitly clear PE's frozen state for PowerNV where
517  * we have frozen PE until BAR restore is completed. It's
518  * harmless to clear it for pSeries. To be consistent with
519  * PE reset (for 3 times), we try to clear the frozen state
520  * for 3 times as well.
521  */
522 static void *__eeh_clear_pe_frozen_state(void *data, void *flag)
523 {
524 	struct eeh_pe *pe = (struct eeh_pe *)data;
525 	bool *clear_sw_state = flag;
526 	int i, rc = 1;
527 
528 	for (i = 0; rc && i < 3; i++)
529 		rc = eeh_unfreeze_pe(pe, clear_sw_state);
530 
531 	/* Stop immediately on any errors */
532 	if (rc) {
533 		pr_warn("%s: Failure %d unfreezing PHB#%x-PE#%x\n",
534 			__func__, rc, pe->phb->global_number, pe->addr);
535 		return (void *)pe;
536 	}
537 
538 	return NULL;
539 }
540 
541 static int eeh_clear_pe_frozen_state(struct eeh_pe *pe,
542 				     bool clear_sw_state)
543 {
544 	void *rc;
545 
546 	rc = eeh_pe_traverse(pe, __eeh_clear_pe_frozen_state, &clear_sw_state);
547 	if (!rc)
548 		eeh_pe_state_clear(pe, EEH_PE_ISOLATED);
549 
550 	return rc ? -EIO : 0;
551 }
552 
553 int eeh_pe_reset_and_recover(struct eeh_pe *pe)
554 {
555 	int result, ret;
556 
557 	/* Bail if the PE is being recovered */
558 	if (pe->state & EEH_PE_RECOVERING)
559 		return 0;
560 
561 	/* Put the PE into recovery mode */
562 	eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
563 
564 	/* Save states */
565 	eeh_pe_dev_traverse(pe, eeh_dev_save_state, NULL);
566 
567 	/* Report error */
568 	eeh_pe_dev_traverse(pe, eeh_report_error, &result);
569 
570 	/* Issue reset */
571 	ret = eeh_reset_pe(pe);
572 	if (ret) {
573 		eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
574 		return ret;
575 	}
576 
577 	/* Unfreeze the PE */
578 	ret = eeh_clear_pe_frozen_state(pe, true);
579 	if (ret) {
580 		eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
581 		return ret;
582 	}
583 
584 	/* Notify completion of reset */
585 	eeh_pe_dev_traverse(pe, eeh_report_reset, &result);
586 
587 	/* Restore device state */
588 	eeh_pe_dev_traverse(pe, eeh_dev_restore_state, NULL);
589 
590 	/* Resume */
591 	eeh_pe_dev_traverse(pe, eeh_report_resume, NULL);
592 
593 	/* Clear recovery mode */
594 	eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
595 
596 	return 0;
597 }
598 
599 /**
600  * eeh_reset_device - Perform actual reset of a pci slot
601  * @pe: EEH PE
602  * @bus: PCI bus corresponding to the isolcated slot
603  *
604  * This routine must be called to do reset on the indicated PE.
605  * During the reset, udev might be invoked because those affected
606  * PCI devices will be removed and then added.
607  */
608 static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
609 				struct eeh_rmv_data *rmv_data)
610 {
611 	struct pci_bus *frozen_bus = eeh_pe_bus_get(pe);
612 	struct timeval tstamp;
613 	int cnt, rc;
614 	struct eeh_dev *edev;
615 
616 	/* pcibios will clear the counter; save the value */
617 	cnt = pe->freeze_count;
618 	tstamp = pe->tstamp;
619 
620 	/*
621 	 * We don't remove the corresponding PE instances because
622 	 * we need the information afterwords. The attached EEH
623 	 * devices are expected to be attached soon when calling
624 	 * into pcibios_add_pci_devices().
625 	 */
626 	eeh_pe_state_mark(pe, EEH_PE_KEEP);
627 	if (bus) {
628 		if (pe->type & EEH_PE_VF) {
629 			eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL);
630 		} else {
631 			eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
632 			pci_lock_rescan_remove();
633 			pcibios_remove_pci_devices(bus);
634 			pci_unlock_rescan_remove();
635 		}
636 	} else if (frozen_bus) {
637 		eeh_pe_dev_traverse(pe, eeh_rmv_device, &rmv_data);
638 	}
639 
640 	/*
641 	 * Reset the pci controller. (Asserts RST#; resets config space).
642 	 * Reconfigure bridges and devices. Don't try to bring the system
643 	 * up if the reset failed for some reason.
644 	 *
645 	 * During the reset, it's very dangerous to have uncontrolled PCI
646 	 * config accesses. So we prefer to block them. However, controlled
647 	 * PCI config accesses initiated from EEH itself are allowed.
648 	 */
649 	rc = eeh_reset_pe(pe);
650 	if (rc)
651 		return rc;
652 
653 	pci_lock_rescan_remove();
654 
655 	/* Restore PE */
656 	eeh_ops->configure_bridge(pe);
657 	eeh_pe_restore_bars(pe);
658 
659 	/* Clear frozen state */
660 	rc = eeh_clear_pe_frozen_state(pe, false);
661 	if (rc)
662 		return rc;
663 
664 	/* Give the system 5 seconds to finish running the user-space
665 	 * hotplug shutdown scripts, e.g. ifdown for ethernet.  Yes,
666 	 * this is a hack, but if we don't do this, and try to bring
667 	 * the device up before the scripts have taken it down,
668 	 * potentially weird things happen.
669 	 */
670 	if (bus) {
671 		pr_info("EEH: Sleep 5s ahead of complete hotplug\n");
672 		ssleep(5);
673 
674 		/*
675 		 * The EEH device is still connected with its parent
676 		 * PE. We should disconnect it so the binding can be
677 		 * rebuilt when adding PCI devices.
678 		 */
679 		edev = list_first_entry(&pe->edevs, struct eeh_dev, list);
680 		eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL);
681 		if (pe->type & EEH_PE_VF)
682 			eeh_add_virt_device(edev, NULL);
683 		else
684 			pcibios_add_pci_devices(bus);
685 	} else if (frozen_bus && rmv_data->removed) {
686 		pr_info("EEH: Sleep 5s ahead of partial hotplug\n");
687 		ssleep(5);
688 
689 		edev = list_first_entry(&pe->edevs, struct eeh_dev, list);
690 		eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL);
691 		if (pe->type & EEH_PE_VF)
692 			eeh_add_virt_device(edev, NULL);
693 		else
694 			pcibios_add_pci_devices(frozen_bus);
695 	}
696 	eeh_pe_state_clear(pe, EEH_PE_KEEP);
697 
698 	pe->tstamp = tstamp;
699 	pe->freeze_count = cnt;
700 
701 	pci_unlock_rescan_remove();
702 	return 0;
703 }
704 
705 /* The longest amount of time to wait for a pci device
706  * to come back on line, in seconds.
707  */
708 #define MAX_WAIT_FOR_RECOVERY 300
709 
710 static void eeh_handle_normal_event(struct eeh_pe *pe)
711 {
712 	struct pci_bus *frozen_bus;
713 	struct eeh_dev *edev, *tmp;
714 	int rc = 0;
715 	enum pci_ers_result result = PCI_ERS_RESULT_NONE;
716 	struct eeh_rmv_data rmv_data = {LIST_HEAD_INIT(rmv_data.edev_list), 0};
717 
718 	frozen_bus = eeh_pe_bus_get(pe);
719 	if (!frozen_bus) {
720 		pr_err("%s: Cannot find PCI bus for PHB#%d-PE#%x\n",
721 			__func__, pe->phb->global_number, pe->addr);
722 		return;
723 	}
724 
725 	eeh_pe_update_time_stamp(pe);
726 	pe->freeze_count++;
727 	if (pe->freeze_count > eeh_max_freezes)
728 		goto excess_failures;
729 	pr_warn("EEH: This PCI device has failed %d times in the last hour\n",
730 		pe->freeze_count);
731 
732 	/* Walk the various device drivers attached to this slot through
733 	 * a reset sequence, giving each an opportunity to do what it needs
734 	 * to accomplish the reset.  Each child gets a report of the
735 	 * status ... if any child can't handle the reset, then the entire
736 	 * slot is dlpar removed and added.
737 	 *
738 	 * When the PHB is fenced, we have to issue a reset to recover from
739 	 * the error. Override the result if necessary to have partially
740 	 * hotplug for this case.
741 	 */
742 	pr_info("EEH: Notify device drivers to shutdown\n");
743 	eeh_pe_dev_traverse(pe, eeh_report_error, &result);
744 	if ((pe->type & EEH_PE_PHB) &&
745 	    result != PCI_ERS_RESULT_NONE &&
746 	    result != PCI_ERS_RESULT_NEED_RESET)
747 		result = PCI_ERS_RESULT_NEED_RESET;
748 
749 	/* Get the current PCI slot state. This can take a long time,
750 	 * sometimes over 300 seconds for certain systems.
751 	 */
752 	rc = eeh_ops->wait_state(pe, MAX_WAIT_FOR_RECOVERY*1000);
753 	if (rc < 0 || rc == EEH_STATE_NOT_SUPPORT) {
754 		pr_warn("EEH: Permanent failure\n");
755 		goto hard_fail;
756 	}
757 
758 	/* Since rtas may enable MMIO when posting the error log,
759 	 * don't post the error log until after all dev drivers
760 	 * have been informed.
761 	 */
762 	pr_info("EEH: Collect temporary log\n");
763 	eeh_slot_error_detail(pe, EEH_LOG_TEMP);
764 
765 	/* If all device drivers were EEH-unaware, then shut
766 	 * down all of the device drivers, and hope they
767 	 * go down willingly, without panicing the system.
768 	 */
769 	if (result == PCI_ERS_RESULT_NONE) {
770 		pr_info("EEH: Reset with hotplug activity\n");
771 		rc = eeh_reset_device(pe, frozen_bus, NULL);
772 		if (rc) {
773 			pr_warn("%s: Unable to reset, err=%d\n",
774 				__func__, rc);
775 			goto hard_fail;
776 		}
777 	}
778 
779 	/* If all devices reported they can proceed, then re-enable MMIO */
780 	if (result == PCI_ERS_RESULT_CAN_RECOVER) {
781 		pr_info("EEH: Enable I/O for affected devices\n");
782 		rc = eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
783 
784 		if (rc < 0)
785 			goto hard_fail;
786 		if (rc) {
787 			result = PCI_ERS_RESULT_NEED_RESET;
788 		} else {
789 			pr_info("EEH: Notify device drivers to resume I/O\n");
790 			eeh_pe_dev_traverse(pe, eeh_report_mmio_enabled, &result);
791 		}
792 	}
793 
794 	/* If all devices reported they can proceed, then re-enable DMA */
795 	if (result == PCI_ERS_RESULT_CAN_RECOVER) {
796 		pr_info("EEH: Enabled DMA for affected devices\n");
797 		rc = eeh_pci_enable(pe, EEH_OPT_THAW_DMA);
798 
799 		if (rc < 0)
800 			goto hard_fail;
801 		if (rc) {
802 			result = PCI_ERS_RESULT_NEED_RESET;
803 		} else {
804 			/*
805 			 * We didn't do PE reset for the case. The PE
806 			 * is still in frozen state. Clear it before
807 			 * resuming the PE.
808 			 */
809 			eeh_pe_state_clear(pe, EEH_PE_ISOLATED);
810 			result = PCI_ERS_RESULT_RECOVERED;
811 		}
812 	}
813 
814 	/* If any device has a hard failure, then shut off everything. */
815 	if (result == PCI_ERS_RESULT_DISCONNECT) {
816 		pr_warn("EEH: Device driver gave up\n");
817 		goto hard_fail;
818 	}
819 
820 	/* If any device called out for a reset, then reset the slot */
821 	if (result == PCI_ERS_RESULT_NEED_RESET) {
822 		pr_info("EEH: Reset without hotplug activity\n");
823 		rc = eeh_reset_device(pe, NULL, &rmv_data);
824 		if (rc) {
825 			pr_warn("%s: Cannot reset, err=%d\n",
826 				__func__, rc);
827 			goto hard_fail;
828 		}
829 
830 		pr_info("EEH: Notify device drivers "
831 			"the completion of reset\n");
832 		result = PCI_ERS_RESULT_NONE;
833 		eeh_pe_dev_traverse(pe, eeh_report_reset, &result);
834 	}
835 
836 	/* All devices should claim they have recovered by now. */
837 	if ((result != PCI_ERS_RESULT_RECOVERED) &&
838 	    (result != PCI_ERS_RESULT_NONE)) {
839 		pr_warn("EEH: Not recovered\n");
840 		goto hard_fail;
841 	}
842 
843 	/*
844 	 * For those hot removed VFs, we should add back them after PF get
845 	 * recovered properly.
846 	 */
847 	list_for_each_entry_safe(edev, tmp, &rmv_data.edev_list, rmv_list) {
848 		eeh_add_virt_device(edev, NULL);
849 		list_del(&edev->rmv_list);
850 	}
851 
852 	/* Tell all device drivers that they can resume operations */
853 	pr_info("EEH: Notify device driver to resume\n");
854 	eeh_pe_dev_traverse(pe, eeh_report_resume, NULL);
855 
856 	return;
857 
858 excess_failures:
859 	/*
860 	 * About 90% of all real-life EEH failures in the field
861 	 * are due to poorly seated PCI cards. Only 10% or so are
862 	 * due to actual, failed cards.
863 	 */
864 	pr_err("EEH: PHB#%d-PE#%x has failed %d times in the\n"
865 	       "last hour and has been permanently disabled.\n"
866 	       "Please try reseating or replacing it.\n",
867 		pe->phb->global_number, pe->addr,
868 		pe->freeze_count);
869 	goto perm_error;
870 
871 hard_fail:
872 	pr_err("EEH: Unable to recover from failure from PHB#%d-PE#%x.\n"
873 	       "Please try reseating or replacing it\n",
874 		pe->phb->global_number, pe->addr);
875 
876 perm_error:
877 	eeh_slot_error_detail(pe, EEH_LOG_PERM);
878 
879 	/* Notify all devices that they're about to go down. */
880 	eeh_pe_dev_traverse(pe, eeh_report_failure, NULL);
881 
882 	/* Mark the PE to be removed permanently */
883 	eeh_pe_state_mark(pe, EEH_PE_REMOVED);
884 
885 	/*
886 	 * Shut down the device drivers for good. We mark
887 	 * all removed devices correctly to avoid access
888 	 * the their PCI config any more.
889 	 */
890 	if (frozen_bus) {
891 		if (pe->type & EEH_PE_VF) {
892 			eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL);
893 			eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
894 		} else {
895 			eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
896 			eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
897 
898 			pci_lock_rescan_remove();
899 			pcibios_remove_pci_devices(frozen_bus);
900 			pci_unlock_rescan_remove();
901 		}
902 	}
903 }
904 
905 static void eeh_handle_special_event(void)
906 {
907 	struct eeh_pe *pe, *phb_pe;
908 	struct pci_bus *bus;
909 	struct pci_controller *hose;
910 	unsigned long flags;
911 	int rc;
912 
913 
914 	do {
915 		rc = eeh_ops->next_error(&pe);
916 
917 		switch (rc) {
918 		case EEH_NEXT_ERR_DEAD_IOC:
919 			/* Mark all PHBs in dead state */
920 			eeh_serialize_lock(&flags);
921 
922 			/* Purge all events */
923 			eeh_remove_event(NULL, true);
924 
925 			list_for_each_entry(hose, &hose_list, list_node) {
926 				phb_pe = eeh_phb_pe_get(hose);
927 				if (!phb_pe) continue;
928 
929 				eeh_pe_state_mark(phb_pe, EEH_PE_ISOLATED);
930 			}
931 
932 			eeh_serialize_unlock(flags);
933 
934 			break;
935 		case EEH_NEXT_ERR_FROZEN_PE:
936 		case EEH_NEXT_ERR_FENCED_PHB:
937 		case EEH_NEXT_ERR_DEAD_PHB:
938 			/* Mark the PE in fenced state */
939 			eeh_serialize_lock(&flags);
940 
941 			/* Purge all events of the PHB */
942 			eeh_remove_event(pe, true);
943 
944 			if (rc == EEH_NEXT_ERR_DEAD_PHB)
945 				eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
946 			else
947 				eeh_pe_state_mark(pe,
948 					EEH_PE_ISOLATED | EEH_PE_RECOVERING);
949 
950 			eeh_serialize_unlock(flags);
951 
952 			break;
953 		case EEH_NEXT_ERR_NONE:
954 			return;
955 		default:
956 			pr_warn("%s: Invalid value %d from next_error()\n",
957 				__func__, rc);
958 			return;
959 		}
960 
961 		/*
962 		 * For fenced PHB and frozen PE, it's handled as normal
963 		 * event. We have to remove the affected PHBs for dead
964 		 * PHB and IOC
965 		 */
966 		if (rc == EEH_NEXT_ERR_FROZEN_PE ||
967 		    rc == EEH_NEXT_ERR_FENCED_PHB) {
968 			eeh_handle_normal_event(pe);
969 			eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
970 		} else {
971 			pci_lock_rescan_remove();
972 			list_for_each_entry(hose, &hose_list, list_node) {
973 				phb_pe = eeh_phb_pe_get(hose);
974 				if (!phb_pe ||
975 				    !(phb_pe->state & EEH_PE_ISOLATED) ||
976 				    (phb_pe->state & EEH_PE_RECOVERING))
977 					continue;
978 
979 				/* Notify all devices to be down */
980 				eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
981 				bus = eeh_pe_bus_get(phb_pe);
982 				eeh_pe_dev_traverse(pe,
983 					eeh_report_failure, NULL);
984 				pcibios_remove_pci_devices(bus);
985 			}
986 			pci_unlock_rescan_remove();
987 		}
988 
989 		/*
990 		 * If we have detected dead IOC, we needn't proceed
991 		 * any more since all PHBs would have been removed
992 		 */
993 		if (rc == EEH_NEXT_ERR_DEAD_IOC)
994 			break;
995 	} while (rc != EEH_NEXT_ERR_NONE);
996 }
997 
998 /**
999  * eeh_handle_event - Reset a PCI device after hard lockup.
1000  * @pe: EEH PE
1001  *
1002  * While PHB detects address or data parity errors on particular PCI
1003  * slot, the associated PE will be frozen. Besides, DMA's occurring
1004  * to wild addresses (which usually happen due to bugs in device
1005  * drivers or in PCI adapter firmware) can cause EEH error. #SERR,
1006  * #PERR or other misc PCI-related errors also can trigger EEH errors.
1007  *
1008  * Recovery process consists of unplugging the device driver (which
1009  * generated hotplug events to userspace), then issuing a PCI #RST to
1010  * the device, then reconfiguring the PCI config space for all bridges
1011  * & devices under this slot, and then finally restarting the device
1012  * drivers (which cause a second set of hotplug events to go out to
1013  * userspace).
1014  */
1015 void eeh_handle_event(struct eeh_pe *pe)
1016 {
1017 	if (pe)
1018 		eeh_handle_normal_event(pe);
1019 	else
1020 		eeh_handle_special_event();
1021 }
1022