xref: /openbmc/linux/arch/powerpc/kernel/eeh_driver.c (revision 6d99a79c)
1 /*
2  * PCI Error Recovery Driver for RPA-compliant PPC64 platform.
3  * Copyright IBM Corp. 2004 2005
4  * Copyright Linas Vepstas <linas@linas.org> 2004, 2005
5  *
6  * All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or (at
11  * your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
16  * NON INFRINGEMENT.  See the GNU General Public License for more
17  * details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  *
23  * Send comments and feedback to Linas Vepstas <linas@austin.ibm.com>
24  */
25 #include <linux/delay.h>
26 #include <linux/interrupt.h>
27 #include <linux/irq.h>
28 #include <linux/module.h>
29 #include <linux/pci.h>
30 #include <asm/eeh.h>
31 #include <asm/eeh_event.h>
32 #include <asm/ppc-pci.h>
33 #include <asm/pci-bridge.h>
34 #include <asm/prom.h>
35 #include <asm/rtas.h>
36 
37 struct eeh_rmv_data {
38 	struct list_head removed_vf_list;
39 	int removed_dev_count;
40 };
41 
42 static int eeh_result_priority(enum pci_ers_result result)
43 {
44 	switch (result) {
45 	case PCI_ERS_RESULT_NONE:
46 		return 1;
47 	case PCI_ERS_RESULT_NO_AER_DRIVER:
48 		return 2;
49 	case PCI_ERS_RESULT_RECOVERED:
50 		return 3;
51 	case PCI_ERS_RESULT_CAN_RECOVER:
52 		return 4;
53 	case PCI_ERS_RESULT_DISCONNECT:
54 		return 5;
55 	case PCI_ERS_RESULT_NEED_RESET:
56 		return 6;
57 	default:
58 		WARN_ONCE(1, "Unknown pci_ers_result value: %d\n", (int)result);
59 		return 0;
60 	}
61 };
62 
63 const char *pci_ers_result_name(enum pci_ers_result result)
64 {
65 	switch (result) {
66 	case PCI_ERS_RESULT_NONE:
67 		return "none";
68 	case PCI_ERS_RESULT_CAN_RECOVER:
69 		return "can recover";
70 	case PCI_ERS_RESULT_NEED_RESET:
71 		return "need reset";
72 	case PCI_ERS_RESULT_DISCONNECT:
73 		return "disconnect";
74 	case PCI_ERS_RESULT_RECOVERED:
75 		return "recovered";
76 	case PCI_ERS_RESULT_NO_AER_DRIVER:
77 		return "no AER driver";
78 	default:
79 		WARN_ONCE(1, "Unknown result type: %d\n", (int)result);
80 		return "unknown";
81 	}
82 };
83 
84 static __printf(2, 3) void eeh_edev_info(const struct eeh_dev *edev,
85 					 const char *fmt, ...)
86 {
87 	struct va_format vaf;
88 	va_list args;
89 
90 	va_start(args, fmt);
91 
92 	vaf.fmt = fmt;
93 	vaf.va = &args;
94 
95 	printk(KERN_INFO "EEH: PE#%x (PCI %s): %pV\n", edev->pe_config_addr,
96 	       edev->pdev ? dev_name(&edev->pdev->dev) : "none", &vaf);
97 
98 	va_end(args);
99 }
100 
101 static enum pci_ers_result pci_ers_merge_result(enum pci_ers_result old,
102 						enum pci_ers_result new)
103 {
104 	if (eeh_result_priority(new) > eeh_result_priority(old))
105 		return new;
106 	return old;
107 }
108 
109 static bool eeh_dev_removed(struct eeh_dev *edev)
110 {
111 	return !edev || (edev->mode & EEH_DEV_REMOVED);
112 }
113 
114 static bool eeh_edev_actionable(struct eeh_dev *edev)
115 {
116 	return (edev->pdev && !eeh_dev_removed(edev) &&
117 		!eeh_pe_passed(edev->pe));
118 }
119 
120 /**
121  * eeh_pcid_get - Get the PCI device driver
122  * @pdev: PCI device
123  *
124  * The function is used to retrieve the PCI device driver for
125  * the indicated PCI device. Besides, we will increase the reference
126  * of the PCI device driver to prevent that being unloaded on
127  * the fly. Otherwise, kernel crash would be seen.
128  */
129 static inline struct pci_driver *eeh_pcid_get(struct pci_dev *pdev)
130 {
131 	if (!pdev || !pdev->driver)
132 		return NULL;
133 
134 	if (!try_module_get(pdev->driver->driver.owner))
135 		return NULL;
136 
137 	return pdev->driver;
138 }
139 
140 /**
141  * eeh_pcid_put - Dereference on the PCI device driver
142  * @pdev: PCI device
143  *
144  * The function is called to do dereference on the PCI device
145  * driver of the indicated PCI device.
146  */
147 static inline void eeh_pcid_put(struct pci_dev *pdev)
148 {
149 	if (!pdev || !pdev->driver)
150 		return;
151 
152 	module_put(pdev->driver->driver.owner);
153 }
154 
155 /**
156  * eeh_disable_irq - Disable interrupt for the recovering device
157  * @dev: PCI device
158  *
159  * This routine must be called when reporting temporary or permanent
160  * error to the particular PCI device to disable interrupt of that
161  * device. If the device has enabled MSI or MSI-X interrupt, we needn't
162  * do real work because EEH should freeze DMA transfers for those PCI
163  * devices encountering EEH errors, which includes MSI or MSI-X.
164  */
165 static void eeh_disable_irq(struct eeh_dev *edev)
166 {
167 	/* Don't disable MSI and MSI-X interrupts. They are
168 	 * effectively disabled by the DMA Stopped state
169 	 * when an EEH error occurs.
170 	 */
171 	if (edev->pdev->msi_enabled || edev->pdev->msix_enabled)
172 		return;
173 
174 	if (!irq_has_action(edev->pdev->irq))
175 		return;
176 
177 	edev->mode |= EEH_DEV_IRQ_DISABLED;
178 	disable_irq_nosync(edev->pdev->irq);
179 }
180 
181 /**
182  * eeh_enable_irq - Enable interrupt for the recovering device
183  * @dev: PCI device
184  *
185  * This routine must be called to enable interrupt while failed
186  * device could be resumed.
187  */
188 static void eeh_enable_irq(struct eeh_dev *edev)
189 {
190 	if ((edev->mode) & EEH_DEV_IRQ_DISABLED) {
191 		edev->mode &= ~EEH_DEV_IRQ_DISABLED;
192 		/*
193 		 * FIXME !!!!!
194 		 *
195 		 * This is just ass backwards. This maze has
196 		 * unbalanced irq_enable/disable calls. So instead of
197 		 * finding the root cause it works around the warning
198 		 * in the irq_enable code by conditionally calling
199 		 * into it.
200 		 *
201 		 * That's just wrong.The warning in the core code is
202 		 * there to tell people to fix their asymmetries in
203 		 * their own code, not by abusing the core information
204 		 * to avoid it.
205 		 *
206 		 * I so wish that the assymetry would be the other way
207 		 * round and a few more irq_disable calls render that
208 		 * shit unusable forever.
209 		 *
210 		 *	tglx
211 		 */
212 		if (irqd_irq_disabled(irq_get_irq_data(edev->pdev->irq)))
213 			enable_irq(edev->pdev->irq);
214 	}
215 }
216 
217 static void *eeh_dev_save_state(struct eeh_dev *edev, void *userdata)
218 {
219 	struct pci_dev *pdev;
220 
221 	if (!edev)
222 		return NULL;
223 
224 	/*
225 	 * We cannot access the config space on some adapters.
226 	 * Otherwise, it will cause fenced PHB. We don't save
227 	 * the content in their config space and will restore
228 	 * from the initial config space saved when the EEH
229 	 * device is created.
230 	 */
231 	if (edev->pe && (edev->pe->state & EEH_PE_CFG_RESTRICTED))
232 		return NULL;
233 
234 	pdev = eeh_dev_to_pci_dev(edev);
235 	if (!pdev)
236 		return NULL;
237 
238 	pci_save_state(pdev);
239 	return NULL;
240 }
241 
242 static void eeh_set_channel_state(struct eeh_pe *root, enum pci_channel_state s)
243 {
244 	struct eeh_pe *pe;
245 	struct eeh_dev *edev, *tmp;
246 
247 	eeh_for_each_pe(root, pe)
248 		eeh_pe_for_each_dev(pe, edev, tmp)
249 			if (eeh_edev_actionable(edev))
250 				edev->pdev->error_state = s;
251 }
252 
253 static void eeh_set_irq_state(struct eeh_pe *root, bool enable)
254 {
255 	struct eeh_pe *pe;
256 	struct eeh_dev *edev, *tmp;
257 
258 	eeh_for_each_pe(root, pe) {
259 		eeh_pe_for_each_dev(pe, edev, tmp) {
260 			if (!eeh_edev_actionable(edev))
261 				continue;
262 
263 			if (!eeh_pcid_get(edev->pdev))
264 				continue;
265 
266 			if (enable)
267 				eeh_enable_irq(edev);
268 			else
269 				eeh_disable_irq(edev);
270 
271 			eeh_pcid_put(edev->pdev);
272 		}
273 	}
274 }
275 
276 typedef enum pci_ers_result (*eeh_report_fn)(struct eeh_dev *,
277 					     struct pci_driver *);
278 static void eeh_pe_report_edev(struct eeh_dev *edev, eeh_report_fn fn,
279 			       enum pci_ers_result *result)
280 {
281 	struct pci_driver *driver;
282 	enum pci_ers_result new_result;
283 
284 	if (!edev->pdev) {
285 		eeh_edev_info(edev, "no device");
286 		return;
287 	}
288 	device_lock(&edev->pdev->dev);
289 	if (eeh_edev_actionable(edev)) {
290 		driver = eeh_pcid_get(edev->pdev);
291 
292 		if (!driver)
293 			eeh_edev_info(edev, "no driver");
294 		else if (!driver->err_handler)
295 			eeh_edev_info(edev, "driver not EEH aware");
296 		else if (edev->mode & EEH_DEV_NO_HANDLER)
297 			eeh_edev_info(edev, "driver bound too late");
298 		else {
299 			new_result = fn(edev, driver);
300 			eeh_edev_info(edev, "%s driver reports: '%s'",
301 				      driver->name,
302 				      pci_ers_result_name(new_result));
303 			if (result)
304 				*result = pci_ers_merge_result(*result,
305 							       new_result);
306 		}
307 		if (driver)
308 			eeh_pcid_put(edev->pdev);
309 	} else {
310 		eeh_edev_info(edev, "not actionable (%d,%d,%d)", !!edev->pdev,
311 			      !eeh_dev_removed(edev), !eeh_pe_passed(edev->pe));
312 	}
313 	device_unlock(&edev->pdev->dev);
314 }
315 
316 static void eeh_pe_report(const char *name, struct eeh_pe *root,
317 			  eeh_report_fn fn, enum pci_ers_result *result)
318 {
319 	struct eeh_pe *pe;
320 	struct eeh_dev *edev, *tmp;
321 
322 	pr_info("EEH: Beginning: '%s'\n", name);
323 	eeh_for_each_pe(root, pe) eeh_pe_for_each_dev(pe, edev, tmp)
324 		eeh_pe_report_edev(edev, fn, result);
325 	if (result)
326 		pr_info("EEH: Finished:'%s' with aggregate recovery state:'%s'\n",
327 			name, pci_ers_result_name(*result));
328 	else
329 		pr_info("EEH: Finished:'%s'", name);
330 }
331 
332 /**
333  * eeh_report_error - Report pci error to each device driver
334  * @edev: eeh device
335  * @driver: device's PCI driver
336  *
337  * Report an EEH error to each device driver.
338  */
339 static enum pci_ers_result eeh_report_error(struct eeh_dev *edev,
340 					    struct pci_driver *driver)
341 {
342 	enum pci_ers_result rc;
343 	struct pci_dev *dev = edev->pdev;
344 
345 	if (!driver->err_handler->error_detected)
346 		return PCI_ERS_RESULT_NONE;
347 
348 	eeh_edev_info(edev, "Invoking %s->error_detected(IO frozen)",
349 		      driver->name);
350 	rc = driver->err_handler->error_detected(dev, pci_channel_io_frozen);
351 
352 	edev->in_error = true;
353 	pci_uevent_ers(dev, PCI_ERS_RESULT_NONE);
354 	return rc;
355 }
356 
357 /**
358  * eeh_report_mmio_enabled - Tell drivers that MMIO has been enabled
359  * @edev: eeh device
360  * @driver: device's PCI driver
361  *
362  * Tells each device driver that IO ports, MMIO and config space I/O
363  * are now enabled.
364  */
365 static enum pci_ers_result eeh_report_mmio_enabled(struct eeh_dev *edev,
366 						   struct pci_driver *driver)
367 {
368 	if (!driver->err_handler->mmio_enabled)
369 		return PCI_ERS_RESULT_NONE;
370 	eeh_edev_info(edev, "Invoking %s->mmio_enabled()", driver->name);
371 	return driver->err_handler->mmio_enabled(edev->pdev);
372 }
373 
374 /**
375  * eeh_report_reset - Tell device that slot has been reset
376  * @edev: eeh device
377  * @driver: device's PCI driver
378  *
379  * This routine must be called while EEH tries to reset particular
380  * PCI device so that the associated PCI device driver could take
381  * some actions, usually to save data the driver needs so that the
382  * driver can work again while the device is recovered.
383  */
384 static enum pci_ers_result eeh_report_reset(struct eeh_dev *edev,
385 					    struct pci_driver *driver)
386 {
387 	if (!driver->err_handler->slot_reset || !edev->in_error)
388 		return PCI_ERS_RESULT_NONE;
389 	eeh_edev_info(edev, "Invoking %s->slot_reset()", driver->name);
390 	return driver->err_handler->slot_reset(edev->pdev);
391 }
392 
393 static void *eeh_dev_restore_state(struct eeh_dev *edev, void *userdata)
394 {
395 	struct pci_dev *pdev;
396 
397 	if (!edev)
398 		return NULL;
399 
400 	/*
401 	 * The content in the config space isn't saved because
402 	 * the blocked config space on some adapters. We have
403 	 * to restore the initial saved config space when the
404 	 * EEH device is created.
405 	 */
406 	if (edev->pe && (edev->pe->state & EEH_PE_CFG_RESTRICTED)) {
407 		if (list_is_last(&edev->entry, &edev->pe->edevs))
408 			eeh_pe_restore_bars(edev->pe);
409 
410 		return NULL;
411 	}
412 
413 	pdev = eeh_dev_to_pci_dev(edev);
414 	if (!pdev)
415 		return NULL;
416 
417 	pci_restore_state(pdev);
418 	return NULL;
419 }
420 
421 /**
422  * eeh_report_resume - Tell device to resume normal operations
423  * @edev: eeh device
424  * @driver: device's PCI driver
425  *
426  * This routine must be called to notify the device driver that it
427  * could resume so that the device driver can do some initialization
428  * to make the recovered device work again.
429  */
430 static enum pci_ers_result eeh_report_resume(struct eeh_dev *edev,
431 					     struct pci_driver *driver)
432 {
433 	if (!driver->err_handler->resume || !edev->in_error)
434 		return PCI_ERS_RESULT_NONE;
435 
436 	eeh_edev_info(edev, "Invoking %s->resume()", driver->name);
437 	driver->err_handler->resume(edev->pdev);
438 
439 	pci_uevent_ers(edev->pdev, PCI_ERS_RESULT_RECOVERED);
440 #ifdef CONFIG_PCI_IOV
441 	if (eeh_ops->notify_resume && eeh_dev_to_pdn(edev))
442 		eeh_ops->notify_resume(eeh_dev_to_pdn(edev));
443 #endif
444 	return PCI_ERS_RESULT_NONE;
445 }
446 
447 /**
448  * eeh_report_failure - Tell device driver that device is dead.
449  * @edev: eeh device
450  * @driver: device's PCI driver
451  *
452  * This informs the device driver that the device is permanently
453  * dead, and that no further recovery attempts will be made on it.
454  */
455 static enum pci_ers_result eeh_report_failure(struct eeh_dev *edev,
456 					      struct pci_driver *driver)
457 {
458 	enum pci_ers_result rc;
459 
460 	if (!driver->err_handler->error_detected)
461 		return PCI_ERS_RESULT_NONE;
462 
463 	eeh_edev_info(edev, "Invoking %s->error_detected(permanent failure)",
464 		      driver->name);
465 	rc = driver->err_handler->error_detected(edev->pdev,
466 						 pci_channel_io_perm_failure);
467 
468 	pci_uevent_ers(edev->pdev, PCI_ERS_RESULT_DISCONNECT);
469 	return rc;
470 }
471 
472 static void *eeh_add_virt_device(struct eeh_dev *edev)
473 {
474 	struct pci_driver *driver;
475 	struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
476 	struct pci_dn *pdn = eeh_dev_to_pdn(edev);
477 
478 	if (!(edev->physfn)) {
479 		pr_warn("%s: EEH dev %04x:%02x:%02x.%01x not for VF\n",
480 			__func__, pdn->phb->global_number, pdn->busno,
481 			PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn));
482 		return NULL;
483 	}
484 
485 	driver = eeh_pcid_get(dev);
486 	if (driver) {
487 		if (driver->err_handler) {
488 			eeh_pcid_put(dev);
489 			return NULL;
490 		}
491 		eeh_pcid_put(dev);
492 	}
493 
494 #ifdef CONFIG_PCI_IOV
495 	pci_iov_add_virtfn(edev->physfn, pdn->vf_index);
496 #endif
497 	return NULL;
498 }
499 
500 static void *eeh_rmv_device(struct eeh_dev *edev, void *userdata)
501 {
502 	struct pci_driver *driver;
503 	struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
504 	struct eeh_rmv_data *rmv_data = (struct eeh_rmv_data *)userdata;
505 
506 	/*
507 	 * Actually, we should remove the PCI bridges as well.
508 	 * However, that's lots of complexity to do that,
509 	 * particularly some of devices under the bridge might
510 	 * support EEH. So we just care about PCI devices for
511 	 * simplicity here.
512 	 */
513 	if (!dev || (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE))
514 		return NULL;
515 
516 	/*
517 	 * We rely on count-based pcibios_release_device() to
518 	 * detach permanently offlined PEs. Unfortunately, that's
519 	 * not reliable enough. We might have the permanently
520 	 * offlined PEs attached, but we needn't take care of
521 	 * them and their child devices.
522 	 */
523 	if (eeh_dev_removed(edev))
524 		return NULL;
525 
526 	if (rmv_data) {
527 		if (eeh_pe_passed(edev->pe))
528 			return NULL;
529 		driver = eeh_pcid_get(dev);
530 		if (driver) {
531 			if (driver->err_handler &&
532 			    driver->err_handler->error_detected &&
533 			    driver->err_handler->slot_reset) {
534 				eeh_pcid_put(dev);
535 				return NULL;
536 			}
537 			eeh_pcid_put(dev);
538 		}
539 	}
540 
541 	/* Remove it from PCI subsystem */
542 	pr_debug("EEH: Removing %s without EEH sensitive driver\n",
543 		 pci_name(dev));
544 	edev->mode |= EEH_DEV_DISCONNECTED;
545 	if (rmv_data)
546 		rmv_data->removed_dev_count++;
547 
548 	if (edev->physfn) {
549 #ifdef CONFIG_PCI_IOV
550 		struct pci_dn *pdn = eeh_dev_to_pdn(edev);
551 
552 		pci_iov_remove_virtfn(edev->physfn, pdn->vf_index);
553 		edev->pdev = NULL;
554 
555 		/*
556 		 * We have to set the VF PE number to invalid one, which is
557 		 * required to plug the VF successfully.
558 		 */
559 		pdn->pe_number = IODA_INVALID_PE;
560 #endif
561 		if (rmv_data)
562 			list_add(&edev->rmv_entry, &rmv_data->removed_vf_list);
563 	} else {
564 		pci_lock_rescan_remove();
565 		pci_stop_and_remove_bus_device(dev);
566 		pci_unlock_rescan_remove();
567 	}
568 
569 	return NULL;
570 }
571 
572 static void *eeh_pe_detach_dev(struct eeh_pe *pe, void *userdata)
573 {
574 	struct eeh_dev *edev, *tmp;
575 
576 	eeh_pe_for_each_dev(pe, edev, tmp) {
577 		if (!(edev->mode & EEH_DEV_DISCONNECTED))
578 			continue;
579 
580 		edev->mode &= ~(EEH_DEV_DISCONNECTED | EEH_DEV_IRQ_DISABLED);
581 		eeh_rmv_from_parent_pe(edev);
582 	}
583 
584 	return NULL;
585 }
586 
587 /*
588  * Explicitly clear PE's frozen state for PowerNV where
589  * we have frozen PE until BAR restore is completed. It's
590  * harmless to clear it for pSeries. To be consistent with
591  * PE reset (for 3 times), we try to clear the frozen state
592  * for 3 times as well.
593  */
594 static void *__eeh_clear_pe_frozen_state(struct eeh_pe *pe, void *flag)
595 {
596 	bool clear_sw_state = *(bool *)flag;
597 	int i, rc = 1;
598 
599 	for (i = 0; rc && i < 3; i++)
600 		rc = eeh_unfreeze_pe(pe, clear_sw_state);
601 
602 	/* Stop immediately on any errors */
603 	if (rc) {
604 		pr_warn("%s: Failure %d unfreezing PHB#%x-PE#%x\n",
605 			__func__, rc, pe->phb->global_number, pe->addr);
606 		return (void *)pe;
607 	}
608 
609 	return NULL;
610 }
611 
612 static int eeh_clear_pe_frozen_state(struct eeh_pe *pe,
613 				     bool clear_sw_state)
614 {
615 	void *rc;
616 
617 	rc = eeh_pe_traverse(pe, __eeh_clear_pe_frozen_state, &clear_sw_state);
618 	if (!rc)
619 		eeh_pe_state_clear(pe, EEH_PE_ISOLATED);
620 
621 	return rc ? -EIO : 0;
622 }
623 
624 int eeh_pe_reset_and_recover(struct eeh_pe *pe)
625 {
626 	int ret;
627 
628 	/* Bail if the PE is being recovered */
629 	if (pe->state & EEH_PE_RECOVERING)
630 		return 0;
631 
632 	/* Put the PE into recovery mode */
633 	eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
634 
635 	/* Save states */
636 	eeh_pe_dev_traverse(pe, eeh_dev_save_state, NULL);
637 
638 	/* Issue reset */
639 	ret = eeh_pe_reset_full(pe);
640 	if (ret) {
641 		eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
642 		return ret;
643 	}
644 
645 	/* Unfreeze the PE */
646 	ret = eeh_clear_pe_frozen_state(pe, true);
647 	if (ret) {
648 		eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
649 		return ret;
650 	}
651 
652 	/* Restore device state */
653 	eeh_pe_dev_traverse(pe, eeh_dev_restore_state, NULL);
654 
655 	/* Clear recovery mode */
656 	eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
657 
658 	return 0;
659 }
660 
661 /**
662  * eeh_reset_device - Perform actual reset of a pci slot
663  * @driver_eeh_aware: Does the device's driver provide EEH support?
664  * @pe: EEH PE
665  * @bus: PCI bus corresponding to the isolcated slot
666  * @rmv_data: Optional, list to record removed devices
667  *
668  * This routine must be called to do reset on the indicated PE.
669  * During the reset, udev might be invoked because those affected
670  * PCI devices will be removed and then added.
671  */
672 static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
673 			    struct eeh_rmv_data *rmv_data,
674 			    bool driver_eeh_aware)
675 {
676 	time64_t tstamp;
677 	int cnt, rc;
678 	struct eeh_dev *edev;
679 
680 	/* pcibios will clear the counter; save the value */
681 	cnt = pe->freeze_count;
682 	tstamp = pe->tstamp;
683 
684 	/*
685 	 * We don't remove the corresponding PE instances because
686 	 * we need the information afterwords. The attached EEH
687 	 * devices are expected to be attached soon when calling
688 	 * into pci_hp_add_devices().
689 	 */
690 	eeh_pe_state_mark(pe, EEH_PE_KEEP);
691 	if (driver_eeh_aware || (pe->type & EEH_PE_VF)) {
692 		eeh_pe_dev_traverse(pe, eeh_rmv_device, rmv_data);
693 	} else {
694 		pci_lock_rescan_remove();
695 		pci_hp_remove_devices(bus);
696 		pci_unlock_rescan_remove();
697 	}
698 
699 	/*
700 	 * Reset the pci controller. (Asserts RST#; resets config space).
701 	 * Reconfigure bridges and devices. Don't try to bring the system
702 	 * up if the reset failed for some reason.
703 	 *
704 	 * During the reset, it's very dangerous to have uncontrolled PCI
705 	 * config accesses. So we prefer to block them. However, controlled
706 	 * PCI config accesses initiated from EEH itself are allowed.
707 	 */
708 	rc = eeh_pe_reset_full(pe);
709 	if (rc)
710 		return rc;
711 
712 	pci_lock_rescan_remove();
713 
714 	/* Restore PE */
715 	eeh_ops->configure_bridge(pe);
716 	eeh_pe_restore_bars(pe);
717 
718 	/* Clear frozen state */
719 	rc = eeh_clear_pe_frozen_state(pe, false);
720 	if (rc) {
721 		pci_unlock_rescan_remove();
722 		return rc;
723 	}
724 
725 	/* Give the system 5 seconds to finish running the user-space
726 	 * hotplug shutdown scripts, e.g. ifdown for ethernet.  Yes,
727 	 * this is a hack, but if we don't do this, and try to bring
728 	 * the device up before the scripts have taken it down,
729 	 * potentially weird things happen.
730 	 */
731 	if (!driver_eeh_aware || rmv_data->removed_dev_count) {
732 		pr_info("EEH: Sleep 5s ahead of %s hotplug\n",
733 			(driver_eeh_aware ? "partial" : "complete"));
734 		ssleep(5);
735 
736 		/*
737 		 * The EEH device is still connected with its parent
738 		 * PE. We should disconnect it so the binding can be
739 		 * rebuilt when adding PCI devices.
740 		 */
741 		edev = list_first_entry(&pe->edevs, struct eeh_dev, entry);
742 		eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL);
743 		if (pe->type & EEH_PE_VF) {
744 			eeh_add_virt_device(edev);
745 		} else {
746 			if (!driver_eeh_aware)
747 				eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
748 			pci_hp_add_devices(bus);
749 		}
750 	}
751 	eeh_pe_state_clear(pe, EEH_PE_KEEP);
752 
753 	pe->tstamp = tstamp;
754 	pe->freeze_count = cnt;
755 
756 	pci_unlock_rescan_remove();
757 	return 0;
758 }
759 
760 /* The longest amount of time to wait for a pci device
761  * to come back on line, in seconds.
762  */
763 #define MAX_WAIT_FOR_RECOVERY 300
764 
765 /**
766  * eeh_handle_normal_event - Handle EEH events on a specific PE
767  * @pe: EEH PE - which should not be used after we return, as it may
768  * have been invalidated.
769  *
770  * Attempts to recover the given PE.  If recovery fails or the PE has failed
771  * too many times, remove the PE.
772  *
773  * While PHB detects address or data parity errors on particular PCI
774  * slot, the associated PE will be frozen. Besides, DMA's occurring
775  * to wild addresses (which usually happen due to bugs in device
776  * drivers or in PCI adapter firmware) can cause EEH error. #SERR,
777  * #PERR or other misc PCI-related errors also can trigger EEH errors.
778  *
779  * Recovery process consists of unplugging the device driver (which
780  * generated hotplug events to userspace), then issuing a PCI #RST to
781  * the device, then reconfiguring the PCI config space for all bridges
782  * & devices under this slot, and then finally restarting the device
783  * drivers (which cause a second set of hotplug events to go out to
784  * userspace).
785  */
786 void eeh_handle_normal_event(struct eeh_pe *pe)
787 {
788 	struct pci_bus *bus;
789 	struct eeh_dev *edev, *tmp;
790 	struct eeh_pe *tmp_pe;
791 	int rc = 0;
792 	enum pci_ers_result result = PCI_ERS_RESULT_NONE;
793 	struct eeh_rmv_data rmv_data =
794 		{LIST_HEAD_INIT(rmv_data.removed_vf_list), 0};
795 
796 	bus = eeh_pe_bus_get(pe);
797 	if (!bus) {
798 		pr_err("%s: Cannot find PCI bus for PHB#%x-PE#%x\n",
799 			__func__, pe->phb->global_number, pe->addr);
800 		return;
801 	}
802 
803 	eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
804 
805 	eeh_pe_update_time_stamp(pe);
806 	pe->freeze_count++;
807 	if (pe->freeze_count > eeh_max_freezes) {
808 		pr_err("EEH: PHB#%x-PE#%x has failed %d times in the last hour and has been permanently disabled.\n",
809 		       pe->phb->global_number, pe->addr,
810 		       pe->freeze_count);
811 		result = PCI_ERS_RESULT_DISCONNECT;
812 	}
813 
814 	/* Walk the various device drivers attached to this slot through
815 	 * a reset sequence, giving each an opportunity to do what it needs
816 	 * to accomplish the reset.  Each child gets a report of the
817 	 * status ... if any child can't handle the reset, then the entire
818 	 * slot is dlpar removed and added.
819 	 *
820 	 * When the PHB is fenced, we have to issue a reset to recover from
821 	 * the error. Override the result if necessary to have partially
822 	 * hotplug for this case.
823 	 */
824 	if (result != PCI_ERS_RESULT_DISCONNECT) {
825 		pr_warn("EEH: This PCI device has failed %d times in the last hour and will be permanently disabled after %d failures.\n",
826 			pe->freeze_count, eeh_max_freezes);
827 		pr_info("EEH: Notify device drivers to shutdown\n");
828 		eeh_set_channel_state(pe, pci_channel_io_frozen);
829 		eeh_set_irq_state(pe, false);
830 		eeh_pe_report("error_detected(IO frozen)", pe,
831 			      eeh_report_error, &result);
832 		if ((pe->type & EEH_PE_PHB) &&
833 		    result != PCI_ERS_RESULT_NONE &&
834 		    result != PCI_ERS_RESULT_NEED_RESET)
835 			result = PCI_ERS_RESULT_NEED_RESET;
836 	}
837 
838 	/* Get the current PCI slot state. This can take a long time,
839 	 * sometimes over 300 seconds for certain systems.
840 	 */
841 	if (result != PCI_ERS_RESULT_DISCONNECT) {
842 		rc = eeh_wait_state(pe, MAX_WAIT_FOR_RECOVERY*1000);
843 		if (rc < 0 || rc == EEH_STATE_NOT_SUPPORT) {
844 			pr_warn("EEH: Permanent failure\n");
845 			result = PCI_ERS_RESULT_DISCONNECT;
846 		}
847 	}
848 
849 	/* Since rtas may enable MMIO when posting the error log,
850 	 * don't post the error log until after all dev drivers
851 	 * have been informed.
852 	 */
853 	if (result != PCI_ERS_RESULT_DISCONNECT) {
854 		pr_info("EEH: Collect temporary log\n");
855 		eeh_slot_error_detail(pe, EEH_LOG_TEMP);
856 	}
857 
858 	/* If all device drivers were EEH-unaware, then shut
859 	 * down all of the device drivers, and hope they
860 	 * go down willingly, without panicing the system.
861 	 */
862 	if (result == PCI_ERS_RESULT_NONE) {
863 		pr_info("EEH: Reset with hotplug activity\n");
864 		rc = eeh_reset_device(pe, bus, NULL, false);
865 		if (rc) {
866 			pr_warn("%s: Unable to reset, err=%d\n",
867 				__func__, rc);
868 			result = PCI_ERS_RESULT_DISCONNECT;
869 		}
870 	}
871 
872 	/* If all devices reported they can proceed, then re-enable MMIO */
873 	if (result == PCI_ERS_RESULT_CAN_RECOVER) {
874 		pr_info("EEH: Enable I/O for affected devices\n");
875 		rc = eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
876 
877 		if (rc < 0) {
878 			result = PCI_ERS_RESULT_DISCONNECT;
879 		} else if (rc) {
880 			result = PCI_ERS_RESULT_NEED_RESET;
881 		} else {
882 			pr_info("EEH: Notify device drivers to resume I/O\n");
883 			eeh_pe_report("mmio_enabled", pe,
884 				      eeh_report_mmio_enabled, &result);
885 		}
886 	}
887 
888 	/* If all devices reported they can proceed, then re-enable DMA */
889 	if (result == PCI_ERS_RESULT_CAN_RECOVER) {
890 		pr_info("EEH: Enabled DMA for affected devices\n");
891 		rc = eeh_pci_enable(pe, EEH_OPT_THAW_DMA);
892 
893 		if (rc < 0) {
894 			result = PCI_ERS_RESULT_DISCONNECT;
895 		} else if (rc) {
896 			result = PCI_ERS_RESULT_NEED_RESET;
897 		} else {
898 			/*
899 			 * We didn't do PE reset for the case. The PE
900 			 * is still in frozen state. Clear it before
901 			 * resuming the PE.
902 			 */
903 			eeh_pe_state_clear(pe, EEH_PE_ISOLATED);
904 			result = PCI_ERS_RESULT_RECOVERED;
905 		}
906 	}
907 
908 	/* If any device called out for a reset, then reset the slot */
909 	if (result == PCI_ERS_RESULT_NEED_RESET) {
910 		pr_info("EEH: Reset without hotplug activity\n");
911 		rc = eeh_reset_device(pe, bus, &rmv_data, true);
912 		if (rc) {
913 			pr_warn("%s: Cannot reset, err=%d\n",
914 				__func__, rc);
915 			result = PCI_ERS_RESULT_DISCONNECT;
916 		} else {
917 			result = PCI_ERS_RESULT_NONE;
918 			eeh_set_channel_state(pe, pci_channel_io_normal);
919 			eeh_set_irq_state(pe, true);
920 			eeh_pe_report("slot_reset", pe, eeh_report_reset,
921 				      &result);
922 		}
923 	}
924 
925 	if ((result == PCI_ERS_RESULT_RECOVERED) ||
926 	    (result == PCI_ERS_RESULT_NONE)) {
927 		/*
928 		 * For those hot removed VFs, we should add back them after PF
929 		 * get recovered properly.
930 		 */
931 		list_for_each_entry_safe(edev, tmp, &rmv_data.removed_vf_list,
932 					 rmv_entry) {
933 			eeh_add_virt_device(edev);
934 			list_del(&edev->rmv_entry);
935 		}
936 
937 		/* Tell all device drivers that they can resume operations */
938 		pr_info("EEH: Notify device driver to resume\n");
939 		eeh_set_channel_state(pe, pci_channel_io_normal);
940 		eeh_set_irq_state(pe, true);
941 		eeh_pe_report("resume", pe, eeh_report_resume, NULL);
942 		eeh_for_each_pe(pe, tmp_pe) {
943 			eeh_pe_for_each_dev(tmp_pe, edev, tmp) {
944 				edev->mode &= ~EEH_DEV_NO_HANDLER;
945 				edev->in_error = false;
946 			}
947 		}
948 
949 		pr_info("EEH: Recovery successful.\n");
950 	} else  {
951 		/*
952 		 * About 90% of all real-life EEH failures in the field
953 		 * are due to poorly seated PCI cards. Only 10% or so are
954 		 * due to actual, failed cards.
955 		 */
956 		pr_err("EEH: Unable to recover from failure from PHB#%x-PE#%x.\n"
957 		       "Please try reseating or replacing it\n",
958 			pe->phb->global_number, pe->addr);
959 
960 		eeh_slot_error_detail(pe, EEH_LOG_PERM);
961 
962 		/* Notify all devices that they're about to go down. */
963 		eeh_set_channel_state(pe, pci_channel_io_perm_failure);
964 		eeh_set_irq_state(pe, false);
965 		eeh_pe_report("error_detected(permanent failure)", pe,
966 			      eeh_report_failure, NULL);
967 
968 		/* Mark the PE to be removed permanently */
969 		eeh_pe_state_mark(pe, EEH_PE_REMOVED);
970 
971 		/*
972 		 * Shut down the device drivers for good. We mark
973 		 * all removed devices correctly to avoid access
974 		 * the their PCI config any more.
975 		 */
976 		if (pe->type & EEH_PE_VF) {
977 			eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL);
978 			eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
979 		} else {
980 			eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
981 			eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
982 
983 			pci_lock_rescan_remove();
984 			pci_hp_remove_devices(bus);
985 			pci_unlock_rescan_remove();
986 			/* The passed PE should no longer be used */
987 			return;
988 		}
989 	}
990 	eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
991 }
992 
993 /**
994  * eeh_handle_special_event - Handle EEH events without a specific failing PE
995  *
996  * Called when an EEH event is detected but can't be narrowed down to a
997  * specific PE.  Iterates through possible failures and handles them as
998  * necessary.
999  */
1000 void eeh_handle_special_event(void)
1001 {
1002 	struct eeh_pe *pe, *phb_pe;
1003 	struct pci_bus *bus;
1004 	struct pci_controller *hose;
1005 	unsigned long flags;
1006 	int rc;
1007 
1008 
1009 	do {
1010 		rc = eeh_ops->next_error(&pe);
1011 
1012 		switch (rc) {
1013 		case EEH_NEXT_ERR_DEAD_IOC:
1014 			/* Mark all PHBs in dead state */
1015 			eeh_serialize_lock(&flags);
1016 
1017 			/* Purge all events */
1018 			eeh_remove_event(NULL, true);
1019 
1020 			list_for_each_entry(hose, &hose_list, list_node) {
1021 				phb_pe = eeh_phb_pe_get(hose);
1022 				if (!phb_pe) continue;
1023 
1024 				eeh_pe_mark_isolated(phb_pe);
1025 			}
1026 
1027 			eeh_serialize_unlock(flags);
1028 
1029 			break;
1030 		case EEH_NEXT_ERR_FROZEN_PE:
1031 		case EEH_NEXT_ERR_FENCED_PHB:
1032 		case EEH_NEXT_ERR_DEAD_PHB:
1033 			/* Mark the PE in fenced state */
1034 			eeh_serialize_lock(&flags);
1035 
1036 			/* Purge all events of the PHB */
1037 			eeh_remove_event(pe, true);
1038 
1039 			if (rc != EEH_NEXT_ERR_DEAD_PHB)
1040 				eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
1041 			eeh_pe_mark_isolated(pe);
1042 
1043 			eeh_serialize_unlock(flags);
1044 
1045 			break;
1046 		case EEH_NEXT_ERR_NONE:
1047 			return;
1048 		default:
1049 			pr_warn("%s: Invalid value %d from next_error()\n",
1050 				__func__, rc);
1051 			return;
1052 		}
1053 
1054 		/*
1055 		 * For fenced PHB and frozen PE, it's handled as normal
1056 		 * event. We have to remove the affected PHBs for dead
1057 		 * PHB and IOC
1058 		 */
1059 		if (rc == EEH_NEXT_ERR_FROZEN_PE ||
1060 		    rc == EEH_NEXT_ERR_FENCED_PHB) {
1061 			eeh_handle_normal_event(pe);
1062 		} else {
1063 			pci_lock_rescan_remove();
1064 			list_for_each_entry(hose, &hose_list, list_node) {
1065 				phb_pe = eeh_phb_pe_get(hose);
1066 				if (!phb_pe ||
1067 				    !(phb_pe->state & EEH_PE_ISOLATED) ||
1068 				    (phb_pe->state & EEH_PE_RECOVERING))
1069 					continue;
1070 
1071 				/* Notify all devices to be down */
1072 				eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
1073 				eeh_set_channel_state(pe, pci_channel_io_perm_failure);
1074 				eeh_pe_report(
1075 					"error_detected(permanent failure)", pe,
1076 					eeh_report_failure, NULL);
1077 				bus = eeh_pe_bus_get(phb_pe);
1078 				if (!bus) {
1079 					pr_err("%s: Cannot find PCI bus for "
1080 					       "PHB#%x-PE#%x\n",
1081 					       __func__,
1082 					       pe->phb->global_number,
1083 					       pe->addr);
1084 					break;
1085 				}
1086 				pci_hp_remove_devices(bus);
1087 			}
1088 			pci_unlock_rescan_remove();
1089 		}
1090 
1091 		/*
1092 		 * If we have detected dead IOC, we needn't proceed
1093 		 * any more since all PHBs would have been removed
1094 		 */
1095 		if (rc == EEH_NEXT_ERR_DEAD_IOC)
1096 			break;
1097 	} while (rc != EEH_NEXT_ERR_NONE);
1098 }
1099