xref: /openbmc/linux/arch/powerpc/kernel/eeh.c (revision 3471b9f7)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright IBM Corporation 2001, 2005, 2006
4  * Copyright Dave Engebretsen & Todd Inglett 2001
5  * Copyright Linas Vepstas 2005, 2006
6  * Copyright 2001-2012 IBM Corporation.
7  *
8  * Please address comments and feedback to Linas Vepstas <linas@austin.ibm.com>
9  */
10 
11 #include <linux/delay.h>
12 #include <linux/sched.h>
13 #include <linux/init.h>
14 #include <linux/list.h>
15 #include <linux/pci.h>
16 #include <linux/iommu.h>
17 #include <linux/proc_fs.h>
18 #include <linux/rbtree.h>
19 #include <linux/reboot.h>
20 #include <linux/seq_file.h>
21 #include <linux/spinlock.h>
22 #include <linux/export.h>
23 #include <linux/of.h>
24 
25 #include <linux/atomic.h>
26 #include <asm/debugfs.h>
27 #include <asm/eeh.h>
28 #include <asm/eeh_event.h>
29 #include <asm/io.h>
30 #include <asm/iommu.h>
31 #include <asm/machdep.h>
32 #include <asm/ppc-pci.h>
33 #include <asm/rtas.h>
34 #include <asm/pte-walk.h>
35 
36 
37 /** Overview:
38  *  EEH, or "Enhanced Error Handling" is a PCI bridge technology for
39  *  dealing with PCI bus errors that can't be dealt with within the
40  *  usual PCI framework, except by check-stopping the CPU.  Systems
41  *  that are designed for high-availability/reliability cannot afford
42  *  to crash due to a "mere" PCI error, thus the need for EEH.
43  *  An EEH-capable bridge operates by converting a detected error
44  *  into a "slot freeze", taking the PCI adapter off-line, making
45  *  the slot behave, from the OS'es point of view, as if the slot
46  *  were "empty": all reads return 0xff's and all writes are silently
47  *  ignored.  EEH slot isolation events can be triggered by parity
48  *  errors on the address or data busses (e.g. during posted writes),
49  *  which in turn might be caused by low voltage on the bus, dust,
50  *  vibration, humidity, radioactivity or plain-old failed hardware.
51  *
52  *  Note, however, that one of the leading causes of EEH slot
53  *  freeze events are buggy device drivers, buggy device microcode,
54  *  or buggy device hardware.  This is because any attempt by the
55  *  device to bus-master data to a memory address that is not
56  *  assigned to the device will trigger a slot freeze.   (The idea
57  *  is to prevent devices-gone-wild from corrupting system memory).
58  *  Buggy hardware/drivers will have a miserable time co-existing
59  *  with EEH.
60  *
61  *  Ideally, a PCI device driver, when suspecting that an isolation
62  *  event has occurred (e.g. by reading 0xff's), will then ask EEH
63  *  whether this is the case, and then take appropriate steps to
64  *  reset the PCI slot, the PCI device, and then resume operations.
65  *  However, until that day,  the checking is done here, with the
66  *  eeh_check_failure() routine embedded in the MMIO macros.  If
67  *  the slot is found to be isolated, an "EEH Event" is synthesized
68  *  and sent out for processing.
69  */
70 
71 /* If a device driver keeps reading an MMIO register in an interrupt
72  * handler after a slot isolation event, it might be broken.
73  * This sets the threshold for how many read attempts we allow
74  * before printing an error message.
75  */
76 #define EEH_MAX_FAILS	2100000
77 
78 /* Time to wait for a PCI slot to report status, in milliseconds */
79 #define PCI_BUS_RESET_WAIT_MSEC (5*60*1000)
80 
81 /*
82  * EEH probe mode support, which is part of the flags,
83  * is to support multiple platforms for EEH. Some platforms
84  * like pSeries do PCI emunation based on device tree.
85  * However, other platforms like powernv probe PCI devices
86  * from hardware. The flag is used to distinguish that.
87  * In addition, struct eeh_ops::probe would be invoked for
88  * particular OF node or PCI device so that the corresponding
89  * PE would be created there.
90  */
91 int eeh_subsystem_flags;
92 EXPORT_SYMBOL(eeh_subsystem_flags);
93 
94 /*
95  * EEH allowed maximal frozen times. If one particular PE's
96  * frozen count in last hour exceeds this limit, the PE will
97  * be forced to be offline permanently.
98  */
99 u32 eeh_max_freezes = 5;
100 
101 /*
102  * Controls whether a recovery event should be scheduled when an
103  * isolated device is discovered. This is only really useful for
104  * debugging problems with the EEH core.
105  */
106 bool eeh_debugfs_no_recover;
107 
108 /* Platform dependent EEH operations */
109 struct eeh_ops *eeh_ops = NULL;
110 
111 /* Lock to avoid races due to multiple reports of an error */
112 DEFINE_RAW_SPINLOCK(confirm_error_lock);
113 EXPORT_SYMBOL_GPL(confirm_error_lock);
114 
115 /* Lock to protect passed flags */
116 static DEFINE_MUTEX(eeh_dev_mutex);
117 
118 /* Buffer for reporting pci register dumps. Its here in BSS, and
119  * not dynamically alloced, so that it ends up in RMO where RTAS
120  * can access it.
121  */
122 #define EEH_PCI_REGS_LOG_LEN 8192
123 static unsigned char pci_regs_buf[EEH_PCI_REGS_LOG_LEN];
124 
125 /*
126  * The struct is used to maintain the EEH global statistic
127  * information. Besides, the EEH global statistics will be
128  * exported to user space through procfs
129  */
130 struct eeh_stats {
131 	u64 no_device;		/* PCI device not found		*/
132 	u64 no_dn;		/* OF node not found		*/
133 	u64 no_cfg_addr;	/* Config address not found	*/
134 	u64 ignored_check;	/* EEH check skipped		*/
135 	u64 total_mmio_ffs;	/* Total EEH checks		*/
136 	u64 false_positives;	/* Unnecessary EEH checks	*/
137 	u64 slot_resets;	/* PE reset			*/
138 };
139 
140 static struct eeh_stats eeh_stats;
141 
142 static int __init eeh_setup(char *str)
143 {
144 	if (!strcmp(str, "off"))
145 		eeh_add_flag(EEH_FORCE_DISABLED);
146 	else if (!strcmp(str, "early_log"))
147 		eeh_add_flag(EEH_EARLY_DUMP_LOG);
148 
149 	return 1;
150 }
151 __setup("eeh=", eeh_setup);
152 
153 void eeh_show_enabled(void)
154 {
155 	if (eeh_has_flag(EEH_FORCE_DISABLED))
156 		pr_info("EEH: Recovery disabled by kernel parameter.\n");
157 	else if (eeh_has_flag(EEH_ENABLED))
158 		pr_info("EEH: Capable adapter found: recovery enabled.\n");
159 	else
160 		pr_info("EEH: No capable adapters found: recovery disabled.\n");
161 }
162 
163 /*
164  * This routine captures assorted PCI configuration space data
165  * for the indicated PCI device, and puts them into a buffer
166  * for RTAS error logging.
167  */
168 static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len)
169 {
170 	struct pci_dn *pdn = eeh_dev_to_pdn(edev);
171 	u32 cfg;
172 	int cap, i;
173 	int n = 0, l = 0;
174 	char buffer[128];
175 
176 	if (!pdn) {
177 		pr_warn("EEH: Note: No error log for absent device.\n");
178 		return 0;
179 	}
180 
181 	n += scnprintf(buf+n, len-n, "%04x:%02x:%02x.%01x\n",
182 		       pdn->phb->global_number, pdn->busno,
183 		       PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn));
184 	pr_warn("EEH: of node=%04x:%02x:%02x.%01x\n",
185 		pdn->phb->global_number, pdn->busno,
186 		PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn));
187 
188 	eeh_ops->read_config(pdn, PCI_VENDOR_ID, 4, &cfg);
189 	n += scnprintf(buf+n, len-n, "dev/vend:%08x\n", cfg);
190 	pr_warn("EEH: PCI device/vendor: %08x\n", cfg);
191 
192 	eeh_ops->read_config(pdn, PCI_COMMAND, 4, &cfg);
193 	n += scnprintf(buf+n, len-n, "cmd/stat:%x\n", cfg);
194 	pr_warn("EEH: PCI cmd/status register: %08x\n", cfg);
195 
196 	/* Gather bridge-specific registers */
197 	if (edev->mode & EEH_DEV_BRIDGE) {
198 		eeh_ops->read_config(pdn, PCI_SEC_STATUS, 2, &cfg);
199 		n += scnprintf(buf+n, len-n, "sec stat:%x\n", cfg);
200 		pr_warn("EEH: Bridge secondary status: %04x\n", cfg);
201 
202 		eeh_ops->read_config(pdn, PCI_BRIDGE_CONTROL, 2, &cfg);
203 		n += scnprintf(buf+n, len-n, "brdg ctl:%x\n", cfg);
204 		pr_warn("EEH: Bridge control: %04x\n", cfg);
205 	}
206 
207 	/* Dump out the PCI-X command and status regs */
208 	cap = edev->pcix_cap;
209 	if (cap) {
210 		eeh_ops->read_config(pdn, cap, 4, &cfg);
211 		n += scnprintf(buf+n, len-n, "pcix-cmd:%x\n", cfg);
212 		pr_warn("EEH: PCI-X cmd: %08x\n", cfg);
213 
214 		eeh_ops->read_config(pdn, cap+4, 4, &cfg);
215 		n += scnprintf(buf+n, len-n, "pcix-stat:%x\n", cfg);
216 		pr_warn("EEH: PCI-X status: %08x\n", cfg);
217 	}
218 
219 	/* If PCI-E capable, dump PCI-E cap 10 */
220 	cap = edev->pcie_cap;
221 	if (cap) {
222 		n += scnprintf(buf+n, len-n, "pci-e cap10:\n");
223 		pr_warn("EEH: PCI-E capabilities and status follow:\n");
224 
225 		for (i=0; i<=8; i++) {
226 			eeh_ops->read_config(pdn, cap+4*i, 4, &cfg);
227 			n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg);
228 
229 			if ((i % 4) == 0) {
230 				if (i != 0)
231 					pr_warn("%s\n", buffer);
232 
233 				l = scnprintf(buffer, sizeof(buffer),
234 					      "EEH: PCI-E %02x: %08x ",
235 					      4*i, cfg);
236 			} else {
237 				l += scnprintf(buffer+l, sizeof(buffer)-l,
238 					       "%08x ", cfg);
239 			}
240 
241 		}
242 
243 		pr_warn("%s\n", buffer);
244 	}
245 
246 	/* If AER capable, dump it */
247 	cap = edev->aer_cap;
248 	if (cap) {
249 		n += scnprintf(buf+n, len-n, "pci-e AER:\n");
250 		pr_warn("EEH: PCI-E AER capability register set follows:\n");
251 
252 		for (i=0; i<=13; i++) {
253 			eeh_ops->read_config(pdn, cap+4*i, 4, &cfg);
254 			n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg);
255 
256 			if ((i % 4) == 0) {
257 				if (i != 0)
258 					pr_warn("%s\n", buffer);
259 
260 				l = scnprintf(buffer, sizeof(buffer),
261 					      "EEH: PCI-E AER %02x: %08x ",
262 					      4*i, cfg);
263 			} else {
264 				l += scnprintf(buffer+l, sizeof(buffer)-l,
265 					       "%08x ", cfg);
266 			}
267 		}
268 
269 		pr_warn("%s\n", buffer);
270 	}
271 
272 	return n;
273 }
274 
275 static void *eeh_dump_pe_log(struct eeh_pe *pe, void *flag)
276 {
277 	struct eeh_dev *edev, *tmp;
278 	size_t *plen = flag;
279 
280 	eeh_pe_for_each_dev(pe, edev, tmp)
281 		*plen += eeh_dump_dev_log(edev, pci_regs_buf + *plen,
282 					  EEH_PCI_REGS_LOG_LEN - *plen);
283 
284 	return NULL;
285 }
286 
287 /**
288  * eeh_slot_error_detail - Generate combined log including driver log and error log
289  * @pe: EEH PE
290  * @severity: temporary or permanent error log
291  *
292  * This routine should be called to generate the combined log, which
293  * is comprised of driver log and error log. The driver log is figured
294  * out from the config space of the corresponding PCI device, while
295  * the error log is fetched through platform dependent function call.
296  */
297 void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
298 {
299 	size_t loglen = 0;
300 
301 	/*
302 	 * When the PHB is fenced or dead, it's pointless to collect
303 	 * the data from PCI config space because it should return
304 	 * 0xFF's. For ER, we still retrieve the data from the PCI
305 	 * config space.
306 	 *
307 	 * For pHyp, we have to enable IO for log retrieval. Otherwise,
308 	 * 0xFF's is always returned from PCI config space.
309 	 *
310 	 * When the @severity is EEH_LOG_PERM, the PE is going to be
311 	 * removed. Prior to that, the drivers for devices included in
312 	 * the PE will be closed. The drivers rely on working IO path
313 	 * to bring the devices to quiet state. Otherwise, PCI traffic
314 	 * from those devices after they are removed is like to cause
315 	 * another unexpected EEH error.
316 	 */
317 	if (!(pe->type & EEH_PE_PHB)) {
318 		if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG) ||
319 		    severity == EEH_LOG_PERM)
320 			eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
321 
322 		/*
323 		 * The config space of some PCI devices can't be accessed
324 		 * when their PEs are in frozen state. Otherwise, fenced
325 		 * PHB might be seen. Those PEs are identified with flag
326 		 * EEH_PE_CFG_RESTRICTED, indicating EEH_PE_CFG_BLOCKED
327 		 * is set automatically when the PE is put to EEH_PE_ISOLATED.
328 		 *
329 		 * Restoring BARs possibly triggers PCI config access in
330 		 * (OPAL) firmware and then causes fenced PHB. If the
331 		 * PCI config is blocked with flag EEH_PE_CFG_BLOCKED, it's
332 		 * pointless to restore BARs and dump config space.
333 		 */
334 		eeh_ops->configure_bridge(pe);
335 		if (!(pe->state & EEH_PE_CFG_BLOCKED)) {
336 			eeh_pe_restore_bars(pe);
337 
338 			pci_regs_buf[0] = 0;
339 			eeh_pe_traverse(pe, eeh_dump_pe_log, &loglen);
340 		}
341 	}
342 
343 	eeh_ops->get_log(pe, severity, pci_regs_buf, loglen);
344 }
345 
346 /**
347  * eeh_token_to_phys - Convert EEH address token to phys address
348  * @token: I/O token, should be address in the form 0xA....
349  *
350  * This routine should be called to convert virtual I/O address
351  * to physical one.
352  */
353 static inline unsigned long eeh_token_to_phys(unsigned long token)
354 {
355 	pte_t *ptep;
356 	unsigned long pa;
357 	int hugepage_shift;
358 
359 	/*
360 	 * We won't find hugepages here(this is iomem). Hence we are not
361 	 * worried about _PAGE_SPLITTING/collapse. Also we will not hit
362 	 * page table free, because of init_mm.
363 	 */
364 	ptep = find_init_mm_pte(token, &hugepage_shift);
365 	if (!ptep)
366 		return token;
367 
368 	pa = pte_pfn(*ptep);
369 
370 	/* On radix we can do hugepage mappings for io, so handle that */
371 	if (hugepage_shift) {
372 		pa <<= hugepage_shift;
373 		pa |= token & ((1ul << hugepage_shift) - 1);
374 	} else {
375 		pa <<= PAGE_SHIFT;
376 		pa |= token & (PAGE_SIZE - 1);
377 	}
378 
379 	return pa;
380 }
381 
382 /*
383  * On PowerNV platform, we might already have fenced PHB there.
384  * For that case, it's meaningless to recover frozen PE. Intead,
385  * We have to handle fenced PHB firstly.
386  */
387 static int eeh_phb_check_failure(struct eeh_pe *pe)
388 {
389 	struct eeh_pe *phb_pe;
390 	unsigned long flags;
391 	int ret;
392 
393 	if (!eeh_has_flag(EEH_PROBE_MODE_DEV))
394 		return -EPERM;
395 
396 	/* Find the PHB PE */
397 	phb_pe = eeh_phb_pe_get(pe->phb);
398 	if (!phb_pe) {
399 		pr_warn("%s Can't find PE for PHB#%x\n",
400 			__func__, pe->phb->global_number);
401 		return -EEXIST;
402 	}
403 
404 	/* If the PHB has been in problematic state */
405 	eeh_serialize_lock(&flags);
406 	if (phb_pe->state & EEH_PE_ISOLATED) {
407 		ret = 0;
408 		goto out;
409 	}
410 
411 	/* Check PHB state */
412 	ret = eeh_ops->get_state(phb_pe, NULL);
413 	if ((ret < 0) ||
414 	    (ret == EEH_STATE_NOT_SUPPORT) || eeh_state_active(ret)) {
415 		ret = 0;
416 		goto out;
417 	}
418 
419 	/* Isolate the PHB and send event */
420 	eeh_pe_mark_isolated(phb_pe);
421 	eeh_serialize_unlock(flags);
422 
423 	pr_debug("EEH: PHB#%x failure detected, location: %s\n",
424 		phb_pe->phb->global_number, eeh_pe_loc_get(phb_pe));
425 	eeh_send_failure_event(phb_pe);
426 	return 1;
427 out:
428 	eeh_serialize_unlock(flags);
429 	return ret;
430 }
431 
432 /**
433  * eeh_dev_check_failure - Check if all 1's data is due to EEH slot freeze
434  * @edev: eeh device
435  *
436  * Check for an EEH failure for the given device node.  Call this
437  * routine if the result of a read was all 0xff's and you want to
438  * find out if this is due to an EEH slot freeze.  This routine
439  * will query firmware for the EEH status.
440  *
441  * Returns 0 if there has not been an EEH error; otherwise returns
442  * a non-zero value and queues up a slot isolation event notification.
443  *
444  * It is safe to call this routine in an interrupt context.
445  */
446 int eeh_dev_check_failure(struct eeh_dev *edev)
447 {
448 	int ret;
449 	unsigned long flags;
450 	struct device_node *dn;
451 	struct pci_dev *dev;
452 	struct eeh_pe *pe, *parent_pe;
453 	int rc = 0;
454 	const char *location = NULL;
455 
456 	eeh_stats.total_mmio_ffs++;
457 
458 	if (!eeh_enabled())
459 		return 0;
460 
461 	if (!edev) {
462 		eeh_stats.no_dn++;
463 		return 0;
464 	}
465 	dev = eeh_dev_to_pci_dev(edev);
466 	pe = eeh_dev_to_pe(edev);
467 
468 	/* Access to IO BARs might get this far and still not want checking. */
469 	if (!pe) {
470 		eeh_stats.ignored_check++;
471 		eeh_edev_dbg(edev, "Ignored check\n");
472 		return 0;
473 	}
474 
475 	if (!pe->addr && !pe->config_addr) {
476 		eeh_stats.no_cfg_addr++;
477 		return 0;
478 	}
479 
480 	/*
481 	 * On PowerNV platform, we might already have fenced PHB
482 	 * there and we need take care of that firstly.
483 	 */
484 	ret = eeh_phb_check_failure(pe);
485 	if (ret > 0)
486 		return ret;
487 
488 	/*
489 	 * If the PE isn't owned by us, we shouldn't check the
490 	 * state. Instead, let the owner handle it if the PE has
491 	 * been frozen.
492 	 */
493 	if (eeh_pe_passed(pe))
494 		return 0;
495 
496 	/* If we already have a pending isolation event for this
497 	 * slot, we know it's bad already, we don't need to check.
498 	 * Do this checking under a lock; as multiple PCI devices
499 	 * in one slot might report errors simultaneously, and we
500 	 * only want one error recovery routine running.
501 	 */
502 	eeh_serialize_lock(&flags);
503 	rc = 1;
504 	if (pe->state & EEH_PE_ISOLATED) {
505 		pe->check_count++;
506 		if (pe->check_count == EEH_MAX_FAILS) {
507 			dn = pci_device_to_OF_node(dev);
508 			if (dn)
509 				location = of_get_property(dn, "ibm,loc-code",
510 						NULL);
511 			eeh_edev_err(edev, "%d reads ignored for recovering device at location=%s driver=%s\n",
512 				pe->check_count,
513 				location ? location : "unknown",
514 				eeh_driver_name(dev));
515 			eeh_edev_err(edev, "Might be infinite loop in %s driver\n",
516 				eeh_driver_name(dev));
517 			dump_stack();
518 		}
519 		goto dn_unlock;
520 	}
521 
522 	/*
523 	 * Now test for an EEH failure.  This is VERY expensive.
524 	 * Note that the eeh_config_addr may be a parent device
525 	 * in the case of a device behind a bridge, or it may be
526 	 * function zero of a multi-function device.
527 	 * In any case they must share a common PHB.
528 	 */
529 	ret = eeh_ops->get_state(pe, NULL);
530 
531 	/* Note that config-io to empty slots may fail;
532 	 * they are empty when they don't have children.
533 	 * We will punt with the following conditions: Failure to get
534 	 * PE's state, EEH not support and Permanently unavailable
535 	 * state, PE is in good state.
536 	 */
537 	if ((ret < 0) ||
538 	    (ret == EEH_STATE_NOT_SUPPORT) || eeh_state_active(ret)) {
539 		eeh_stats.false_positives++;
540 		pe->false_positives++;
541 		rc = 0;
542 		goto dn_unlock;
543 	}
544 
545 	/*
546 	 * It should be corner case that the parent PE has been
547 	 * put into frozen state as well. We should take care
548 	 * that at first.
549 	 */
550 	parent_pe = pe->parent;
551 	while (parent_pe) {
552 		/* Hit the ceiling ? */
553 		if (parent_pe->type & EEH_PE_PHB)
554 			break;
555 
556 		/* Frozen parent PE ? */
557 		ret = eeh_ops->get_state(parent_pe, NULL);
558 		if (ret > 0 && !eeh_state_active(ret)) {
559 			pe = parent_pe;
560 			pr_err("EEH: Failure of PHB#%x-PE#%x will be handled at parent PHB#%x-PE#%x.\n",
561 			       pe->phb->global_number, pe->addr,
562 			       pe->phb->global_number, parent_pe->addr);
563 		}
564 
565 		/* Next parent level */
566 		parent_pe = parent_pe->parent;
567 	}
568 
569 	eeh_stats.slot_resets++;
570 
571 	/* Avoid repeated reports of this failure, including problems
572 	 * with other functions on this device, and functions under
573 	 * bridges.
574 	 */
575 	eeh_pe_mark_isolated(pe);
576 	eeh_serialize_unlock(flags);
577 
578 	/* Most EEH events are due to device driver bugs.  Having
579 	 * a stack trace will help the device-driver authors figure
580 	 * out what happened.  So print that out.
581 	 */
582 	pr_debug("EEH: %s: Frozen PHB#%x-PE#%x detected\n",
583 		__func__, pe->phb->global_number, pe->addr);
584 	eeh_send_failure_event(pe);
585 
586 	return 1;
587 
588 dn_unlock:
589 	eeh_serialize_unlock(flags);
590 	return rc;
591 }
592 
593 EXPORT_SYMBOL_GPL(eeh_dev_check_failure);
594 
595 /**
596  * eeh_check_failure - Check if all 1's data is due to EEH slot freeze
597  * @token: I/O address
598  *
599  * Check for an EEH failure at the given I/O address. Call this
600  * routine if the result of a read was all 0xff's and you want to
601  * find out if this is due to an EEH slot freeze event. This routine
602  * will query firmware for the EEH status.
603  *
604  * Note this routine is safe to call in an interrupt context.
605  */
606 int eeh_check_failure(const volatile void __iomem *token)
607 {
608 	unsigned long addr;
609 	struct eeh_dev *edev;
610 
611 	/* Finding the phys addr + pci device; this is pretty quick. */
612 	addr = eeh_token_to_phys((unsigned long __force) token);
613 	edev = eeh_addr_cache_get_dev(addr);
614 	if (!edev) {
615 		eeh_stats.no_device++;
616 		return 0;
617 	}
618 
619 	return eeh_dev_check_failure(edev);
620 }
621 EXPORT_SYMBOL(eeh_check_failure);
622 
623 
624 /**
625  * eeh_pci_enable - Enable MMIO or DMA transfers for this slot
626  * @pe: EEH PE
627  *
628  * This routine should be called to reenable frozen MMIO or DMA
629  * so that it would work correctly again. It's useful while doing
630  * recovery or log collection on the indicated device.
631  */
632 int eeh_pci_enable(struct eeh_pe *pe, int function)
633 {
634 	int active_flag, rc;
635 
636 	/*
637 	 * pHyp doesn't allow to enable IO or DMA on unfrozen PE.
638 	 * Also, it's pointless to enable them on unfrozen PE. So
639 	 * we have to check before enabling IO or DMA.
640 	 */
641 	switch (function) {
642 	case EEH_OPT_THAW_MMIO:
643 		active_flag = EEH_STATE_MMIO_ACTIVE | EEH_STATE_MMIO_ENABLED;
644 		break;
645 	case EEH_OPT_THAW_DMA:
646 		active_flag = EEH_STATE_DMA_ACTIVE;
647 		break;
648 	case EEH_OPT_DISABLE:
649 	case EEH_OPT_ENABLE:
650 	case EEH_OPT_FREEZE_PE:
651 		active_flag = 0;
652 		break;
653 	default:
654 		pr_warn("%s: Invalid function %d\n",
655 			__func__, function);
656 		return -EINVAL;
657 	}
658 
659 	/*
660 	 * Check if IO or DMA has been enabled before
661 	 * enabling them.
662 	 */
663 	if (active_flag) {
664 		rc = eeh_ops->get_state(pe, NULL);
665 		if (rc < 0)
666 			return rc;
667 
668 		/* Needn't enable it at all */
669 		if (rc == EEH_STATE_NOT_SUPPORT)
670 			return 0;
671 
672 		/* It's already enabled */
673 		if (rc & active_flag)
674 			return 0;
675 	}
676 
677 
678 	/* Issue the request */
679 	rc = eeh_ops->set_option(pe, function);
680 	if (rc)
681 		pr_warn("%s: Unexpected state change %d on "
682 			"PHB#%x-PE#%x, err=%d\n",
683 			__func__, function, pe->phb->global_number,
684 			pe->addr, rc);
685 
686 	/* Check if the request is finished successfully */
687 	if (active_flag) {
688 		rc = eeh_wait_state(pe, PCI_BUS_RESET_WAIT_MSEC);
689 		if (rc < 0)
690 			return rc;
691 
692 		if (rc & active_flag)
693 			return 0;
694 
695 		return -EIO;
696 	}
697 
698 	return rc;
699 }
700 
701 static void eeh_disable_and_save_dev_state(struct eeh_dev *edev,
702 					    void *userdata)
703 {
704 	struct pci_dev *pdev = eeh_dev_to_pci_dev(edev);
705 	struct pci_dev *dev = userdata;
706 
707 	/*
708 	 * The caller should have disabled and saved the
709 	 * state for the specified device
710 	 */
711 	if (!pdev || pdev == dev)
712 		return;
713 
714 	/* Ensure we have D0 power state */
715 	pci_set_power_state(pdev, PCI_D0);
716 
717 	/* Save device state */
718 	pci_save_state(pdev);
719 
720 	/*
721 	 * Disable device to avoid any DMA traffic and
722 	 * interrupt from the device
723 	 */
724 	pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
725 }
726 
727 static void eeh_restore_dev_state(struct eeh_dev *edev, void *userdata)
728 {
729 	struct pci_dn *pdn = eeh_dev_to_pdn(edev);
730 	struct pci_dev *pdev = eeh_dev_to_pci_dev(edev);
731 	struct pci_dev *dev = userdata;
732 
733 	if (!pdev)
734 		return;
735 
736 	/* Apply customization from firmware */
737 	if (pdn && eeh_ops->restore_config)
738 		eeh_ops->restore_config(pdn);
739 
740 	/* The caller should restore state for the specified device */
741 	if (pdev != dev)
742 		pci_restore_state(pdev);
743 }
744 
745 int eeh_restore_vf_config(struct pci_dn *pdn)
746 {
747 	struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
748 	u32 devctl, cmd, cap2, aer_capctl;
749 	int old_mps;
750 
751 	if (edev->pcie_cap) {
752 		/* Restore MPS */
753 		old_mps = (ffs(pdn->mps) - 8) << 5;
754 		eeh_ops->read_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL,
755 				     2, &devctl);
756 		devctl &= ~PCI_EXP_DEVCTL_PAYLOAD;
757 		devctl |= old_mps;
758 		eeh_ops->write_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL,
759 				      2, devctl);
760 
761 		/* Disable Completion Timeout if possible */
762 		eeh_ops->read_config(pdn, edev->pcie_cap + PCI_EXP_DEVCAP2,
763 				     4, &cap2);
764 		if (cap2 & PCI_EXP_DEVCAP2_COMP_TMOUT_DIS) {
765 			eeh_ops->read_config(pdn,
766 					     edev->pcie_cap + PCI_EXP_DEVCTL2,
767 					     4, &cap2);
768 			cap2 |= PCI_EXP_DEVCTL2_COMP_TMOUT_DIS;
769 			eeh_ops->write_config(pdn,
770 					      edev->pcie_cap + PCI_EXP_DEVCTL2,
771 					      4, cap2);
772 		}
773 	}
774 
775 	/* Enable SERR and parity checking */
776 	eeh_ops->read_config(pdn, PCI_COMMAND, 2, &cmd);
777 	cmd |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
778 	eeh_ops->write_config(pdn, PCI_COMMAND, 2, cmd);
779 
780 	/* Enable report various errors */
781 	if (edev->pcie_cap) {
782 		eeh_ops->read_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL,
783 				     2, &devctl);
784 		devctl &= ~PCI_EXP_DEVCTL_CERE;
785 		devctl |= (PCI_EXP_DEVCTL_NFERE |
786 			   PCI_EXP_DEVCTL_FERE |
787 			   PCI_EXP_DEVCTL_URRE);
788 		eeh_ops->write_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL,
789 				      2, devctl);
790 	}
791 
792 	/* Enable ECRC generation and check */
793 	if (edev->pcie_cap && edev->aer_cap) {
794 		eeh_ops->read_config(pdn, edev->aer_cap + PCI_ERR_CAP,
795 				     4, &aer_capctl);
796 		aer_capctl |= (PCI_ERR_CAP_ECRC_GENE | PCI_ERR_CAP_ECRC_CHKE);
797 		eeh_ops->write_config(pdn, edev->aer_cap + PCI_ERR_CAP,
798 				      4, aer_capctl);
799 	}
800 
801 	return 0;
802 }
803 
804 /**
805  * pcibios_set_pcie_reset_state - Set PCI-E reset state
806  * @dev: pci device struct
807  * @state: reset state to enter
808  *
809  * Return value:
810  * 	0 if success
811  */
812 int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
813 {
814 	struct eeh_dev *edev = pci_dev_to_eeh_dev(dev);
815 	struct eeh_pe *pe = eeh_dev_to_pe(edev);
816 
817 	if (!pe) {
818 		pr_err("%s: No PE found on PCI device %s\n",
819 			__func__, pci_name(dev));
820 		return -EINVAL;
821 	}
822 
823 	switch (state) {
824 	case pcie_deassert_reset:
825 		eeh_ops->reset(pe, EEH_RESET_DEACTIVATE);
826 		eeh_unfreeze_pe(pe);
827 		if (!(pe->type & EEH_PE_VF))
828 			eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED, true);
829 		eeh_pe_dev_traverse(pe, eeh_restore_dev_state, dev);
830 		eeh_pe_state_clear(pe, EEH_PE_ISOLATED, true);
831 		break;
832 	case pcie_hot_reset:
833 		eeh_pe_mark_isolated(pe);
834 		eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED, true);
835 		eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE);
836 		eeh_pe_dev_traverse(pe, eeh_disable_and_save_dev_state, dev);
837 		if (!(pe->type & EEH_PE_VF))
838 			eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
839 		eeh_ops->reset(pe, EEH_RESET_HOT);
840 		break;
841 	case pcie_warm_reset:
842 		eeh_pe_mark_isolated(pe);
843 		eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED, true);
844 		eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE);
845 		eeh_pe_dev_traverse(pe, eeh_disable_and_save_dev_state, dev);
846 		if (!(pe->type & EEH_PE_VF))
847 			eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
848 		eeh_ops->reset(pe, EEH_RESET_FUNDAMENTAL);
849 		break;
850 	default:
851 		eeh_pe_state_clear(pe, EEH_PE_ISOLATED | EEH_PE_CFG_BLOCKED, true);
852 		return -EINVAL;
853 	};
854 
855 	return 0;
856 }
857 
858 /**
859  * eeh_set_pe_freset - Check the required reset for the indicated device
860  * @data: EEH device
861  * @flag: return value
862  *
863  * Each device might have its preferred reset type: fundamental or
864  * hot reset. The routine is used to collected the information for
865  * the indicated device and its children so that the bunch of the
866  * devices could be reset properly.
867  */
868 static void eeh_set_dev_freset(struct eeh_dev *edev, void *flag)
869 {
870 	struct pci_dev *dev;
871 	unsigned int *freset = (unsigned int *)flag;
872 
873 	dev = eeh_dev_to_pci_dev(edev);
874 	if (dev)
875 		*freset |= dev->needs_freset;
876 }
877 
878 static void eeh_pe_refreeze_passed(struct eeh_pe *root)
879 {
880 	struct eeh_pe *pe;
881 	int state;
882 
883 	eeh_for_each_pe(root, pe) {
884 		if (eeh_pe_passed(pe)) {
885 			state = eeh_ops->get_state(pe, NULL);
886 			if (state &
887 			   (EEH_STATE_MMIO_ACTIVE | EEH_STATE_MMIO_ENABLED)) {
888 				pr_info("EEH: Passed-through PE PHB#%x-PE#%x was thawed by reset, re-freezing for safety.\n",
889 					pe->phb->global_number, pe->addr);
890 				eeh_pe_set_option(pe, EEH_OPT_FREEZE_PE);
891 			}
892 		}
893 	}
894 }
895 
896 /**
897  * eeh_pe_reset_full - Complete a full reset process on the indicated PE
898  * @pe: EEH PE
899  *
900  * This function executes a full reset procedure on a PE, including setting
901  * the appropriate flags, performing a fundamental or hot reset, and then
902  * deactivating the reset status.  It is designed to be used within the EEH
903  * subsystem, as opposed to eeh_pe_reset which is exported to drivers and
904  * only performs a single operation at a time.
905  *
906  * This function will attempt to reset a PE three times before failing.
907  */
908 int eeh_pe_reset_full(struct eeh_pe *pe, bool include_passed)
909 {
910 	int reset_state = (EEH_PE_RESET | EEH_PE_CFG_BLOCKED);
911 	int type = EEH_RESET_HOT;
912 	unsigned int freset = 0;
913 	int i, state = 0, ret;
914 
915 	/*
916 	 * Determine the type of reset to perform - hot or fundamental.
917 	 * Hot reset is the default operation, unless any device under the
918 	 * PE requires a fundamental reset.
919 	 */
920 	eeh_pe_dev_traverse(pe, eeh_set_dev_freset, &freset);
921 
922 	if (freset)
923 		type = EEH_RESET_FUNDAMENTAL;
924 
925 	/* Mark the PE as in reset state and block config space accesses */
926 	eeh_pe_state_mark(pe, reset_state);
927 
928 	/* Make three attempts at resetting the bus */
929 	for (i = 0; i < 3; i++) {
930 		ret = eeh_pe_reset(pe, type, include_passed);
931 		if (!ret)
932 			ret = eeh_pe_reset(pe, EEH_RESET_DEACTIVATE,
933 					   include_passed);
934 		if (ret) {
935 			ret = -EIO;
936 			pr_warn("EEH: Failure %d resetting PHB#%x-PE#%x (attempt %d)\n\n",
937 				state, pe->phb->global_number, pe->addr, i + 1);
938 			continue;
939 		}
940 		if (i)
941 			pr_warn("EEH: PHB#%x-PE#%x: Successful reset (attempt %d)\n",
942 				pe->phb->global_number, pe->addr, i + 1);
943 
944 		/* Wait until the PE is in a functioning state */
945 		state = eeh_wait_state(pe, PCI_BUS_RESET_WAIT_MSEC);
946 		if (state < 0) {
947 			pr_warn("EEH: Unrecoverable slot failure on PHB#%x-PE#%x",
948 				pe->phb->global_number, pe->addr);
949 			ret = -ENOTRECOVERABLE;
950 			break;
951 		}
952 		if (eeh_state_active(state))
953 			break;
954 		else
955 			pr_warn("EEH: PHB#%x-PE#%x: Slot inactive after reset: 0x%x (attempt %d)\n",
956 				pe->phb->global_number, pe->addr, state, i + 1);
957 	}
958 
959 	/* Resetting the PE may have unfrozen child PEs. If those PEs have been
960 	 * (potentially) passed through to a guest, re-freeze them:
961 	 */
962 	if (!include_passed)
963 		eeh_pe_refreeze_passed(pe);
964 
965 	eeh_pe_state_clear(pe, reset_state, true);
966 	return ret;
967 }
968 
969 /**
970  * eeh_save_bars - Save device bars
971  * @edev: PCI device associated EEH device
972  *
973  * Save the values of the device bars. Unlike the restore
974  * routine, this routine is *not* recursive. This is because
975  * PCI devices are added individually; but, for the restore,
976  * an entire slot is reset at a time.
977  */
978 void eeh_save_bars(struct eeh_dev *edev)
979 {
980 	struct pci_dn *pdn;
981 	int i;
982 
983 	pdn = eeh_dev_to_pdn(edev);
984 	if (!pdn)
985 		return;
986 
987 	for (i = 0; i < 16; i++)
988 		eeh_ops->read_config(pdn, i * 4, 4, &edev->config_space[i]);
989 
990 	/*
991 	 * For PCI bridges including root port, we need enable bus
992 	 * master explicitly. Otherwise, it can't fetch IODA table
993 	 * entries correctly. So we cache the bit in advance so that
994 	 * we can restore it after reset, either PHB range or PE range.
995 	 */
996 	if (edev->mode & EEH_DEV_BRIDGE)
997 		edev->config_space[1] |= PCI_COMMAND_MASTER;
998 }
999 
1000 /**
1001  * eeh_ops_register - Register platform dependent EEH operations
1002  * @ops: platform dependent EEH operations
1003  *
1004  * Register the platform dependent EEH operation callback
1005  * functions. The platform should call this function before
1006  * any other EEH operations.
1007  */
1008 int __init eeh_ops_register(struct eeh_ops *ops)
1009 {
1010 	if (!ops->name) {
1011 		pr_warn("%s: Invalid EEH ops name for %p\n",
1012 			__func__, ops);
1013 		return -EINVAL;
1014 	}
1015 
1016 	if (eeh_ops && eeh_ops != ops) {
1017 		pr_warn("%s: EEH ops of platform %s already existing (%s)\n",
1018 			__func__, eeh_ops->name, ops->name);
1019 		return -EEXIST;
1020 	}
1021 
1022 	eeh_ops = ops;
1023 
1024 	return 0;
1025 }
1026 
1027 /**
1028  * eeh_ops_unregister - Unreigster platform dependent EEH operations
1029  * @name: name of EEH platform operations
1030  *
1031  * Unregister the platform dependent EEH operation callback
1032  * functions.
1033  */
1034 int __exit eeh_ops_unregister(const char *name)
1035 {
1036 	if (!name || !strlen(name)) {
1037 		pr_warn("%s: Invalid EEH ops name\n",
1038 			__func__);
1039 		return -EINVAL;
1040 	}
1041 
1042 	if (eeh_ops && !strcmp(eeh_ops->name, name)) {
1043 		eeh_ops = NULL;
1044 		return 0;
1045 	}
1046 
1047 	return -EEXIST;
1048 }
1049 
1050 static int eeh_reboot_notifier(struct notifier_block *nb,
1051 			       unsigned long action, void *unused)
1052 {
1053 	eeh_clear_flag(EEH_ENABLED);
1054 	return NOTIFY_DONE;
1055 }
1056 
1057 static struct notifier_block eeh_reboot_nb = {
1058 	.notifier_call = eeh_reboot_notifier,
1059 };
1060 
1061 /**
1062  * eeh_init - EEH initialization
1063  *
1064  * Initialize EEH by trying to enable it for all of the adapters in the system.
1065  * As a side effect we can determine here if eeh is supported at all.
1066  * Note that we leave EEH on so failed config cycles won't cause a machine
1067  * check.  If a user turns off EEH for a particular adapter they are really
1068  * telling Linux to ignore errors.  Some hardware (e.g. POWER5) won't
1069  * grant access to a slot if EEH isn't enabled, and so we always enable
1070  * EEH for all slots/all devices.
1071  *
1072  * The eeh-force-off option disables EEH checking globally, for all slots.
1073  * Even if force-off is set, the EEH hardware is still enabled, so that
1074  * newer systems can boot.
1075  */
1076 static int eeh_init(void)
1077 {
1078 	struct pci_controller *hose, *tmp;
1079 	int ret = 0;
1080 
1081 	/* Register reboot notifier */
1082 	ret = register_reboot_notifier(&eeh_reboot_nb);
1083 	if (ret) {
1084 		pr_warn("%s: Failed to register notifier (%d)\n",
1085 			__func__, ret);
1086 		return ret;
1087 	}
1088 
1089 	/* call platform initialization function */
1090 	if (!eeh_ops) {
1091 		pr_warn("%s: Platform EEH operation not found\n",
1092 			__func__);
1093 		return -EEXIST;
1094 	} else if ((ret = eeh_ops->init()))
1095 		return ret;
1096 
1097 	/* Initialize PHB PEs */
1098 	list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
1099 		eeh_dev_phb_init_dynamic(hose);
1100 
1101 	eeh_addr_cache_init();
1102 
1103 	/* Initialize EEH event */
1104 	return eeh_event_init();
1105 }
1106 
1107 core_initcall_sync(eeh_init);
1108 
1109 static int eeh_device_notifier(struct notifier_block *nb,
1110 			       unsigned long action, void *data)
1111 {
1112 	struct device *dev = data;
1113 
1114 	switch (action) {
1115 	/*
1116 	 * Note: It's not possible to perform EEH device addition (i.e.
1117 	 * {pseries,pnv}_pcibios_bus_add_device()) here because it depends on
1118 	 * the device's resources, which have not yet been set up.
1119 	 */
1120 	case BUS_NOTIFY_DEL_DEVICE:
1121 		eeh_remove_device(to_pci_dev(dev));
1122 		break;
1123 	default:
1124 		break;
1125 	}
1126 	return NOTIFY_DONE;
1127 }
1128 
1129 static struct notifier_block eeh_device_nb = {
1130 	.notifier_call = eeh_device_notifier,
1131 };
1132 
1133 static __init int eeh_set_bus_notifier(void)
1134 {
1135 	bus_register_notifier(&pci_bus_type, &eeh_device_nb);
1136 	return 0;
1137 }
1138 arch_initcall(eeh_set_bus_notifier);
1139 
1140 /**
1141  * eeh_probe_device() - Perform EEH initialization for the indicated pci device
1142  * @dev: pci device for which to set up EEH
1143  *
1144  * This routine must be used to complete EEH initialization for PCI
1145  * devices that were added after system boot (e.g. hotplug, dlpar).
1146  */
1147 void eeh_probe_device(struct pci_dev *dev)
1148 {
1149 	struct eeh_dev *edev;
1150 
1151 	pr_debug("EEH: Adding device %s\n", pci_name(dev));
1152 
1153 	/*
1154 	 * pci_dev_to_eeh_dev() can only work if eeh_probe_dev() was
1155 	 * already called for this device.
1156 	 */
1157 	if (WARN_ON_ONCE(pci_dev_to_eeh_dev(dev))) {
1158 		pci_dbg(dev, "Already bound to an eeh_dev!\n");
1159 		return;
1160 	}
1161 
1162 	edev = eeh_ops->probe(dev);
1163 	if (!edev) {
1164 		pr_debug("EEH: Adding device failed\n");
1165 		return;
1166 	}
1167 
1168 	/*
1169 	 * FIXME: We rely on pcibios_release_device() to remove the
1170 	 * existing EEH state. The release function is only called if
1171 	 * the pci_dev's refcount drops to zero so if something is
1172 	 * keeping a ref to a device (e.g. a filesystem) we need to
1173 	 * remove the old EEH state.
1174 	 *
1175 	 * FIXME: HEY MA, LOOK AT ME, NO LOCKING!
1176 	 */
1177 	if (edev->pdev && edev->pdev != dev) {
1178 		eeh_rmv_from_parent_pe(edev);
1179 		eeh_addr_cache_rmv_dev(edev->pdev);
1180 		eeh_sysfs_remove_device(edev->pdev);
1181 
1182 		/*
1183 		 * We definitely should have the PCI device removed
1184 		 * though it wasn't correctly. So we needn't call
1185 		 * into error handler afterwards.
1186 		 */
1187 		edev->mode |= EEH_DEV_NO_HANDLER;
1188 	}
1189 
1190 	/* bind the pdev and the edev together */
1191 	edev->pdev = dev;
1192 	dev->dev.archdata.edev = edev;
1193 	eeh_addr_cache_insert_dev(dev);
1194 	eeh_sysfs_add_device(dev);
1195 }
1196 
1197 /**
1198  * eeh_remove_device - Undo EEH setup for the indicated pci device
1199  * @dev: pci device to be removed
1200  *
1201  * This routine should be called when a device is removed from
1202  * a running system (e.g. by hotplug or dlpar).  It unregisters
1203  * the PCI device from the EEH subsystem.  I/O errors affecting
1204  * this device will no longer be detected after this call; thus,
1205  * i/o errors affecting this slot may leave this device unusable.
1206  */
1207 void eeh_remove_device(struct pci_dev *dev)
1208 {
1209 	struct eeh_dev *edev;
1210 
1211 	if (!dev || !eeh_enabled())
1212 		return;
1213 	edev = pci_dev_to_eeh_dev(dev);
1214 
1215 	/* Unregister the device with the EEH/PCI address search system */
1216 	dev_dbg(&dev->dev, "EEH: Removing device\n");
1217 
1218 	if (!edev || !edev->pdev || !edev->pe) {
1219 		dev_dbg(&dev->dev, "EEH: Device not referenced!\n");
1220 		return;
1221 	}
1222 
1223 	/*
1224 	 * During the hotplug for EEH error recovery, we need the EEH
1225 	 * device attached to the parent PE in order for BAR restore
1226 	 * a bit later. So we keep it for BAR restore and remove it
1227 	 * from the parent PE during the BAR resotre.
1228 	 */
1229 	edev->pdev = NULL;
1230 
1231 	/*
1232 	 * eeh_sysfs_remove_device() uses pci_dev_to_eeh_dev() so we need to
1233 	 * remove the sysfs files before clearing dev.archdata.edev
1234 	 */
1235 	if (edev->mode & EEH_DEV_SYSFS)
1236 		eeh_sysfs_remove_device(dev);
1237 
1238 	/*
1239 	 * We're removing from the PCI subsystem, that means
1240 	 * the PCI device driver can't support EEH or not
1241 	 * well. So we rely on hotplug completely to do recovery
1242 	 * for the specific PCI device.
1243 	 */
1244 	edev->mode |= EEH_DEV_NO_HANDLER;
1245 
1246 	eeh_addr_cache_rmv_dev(dev);
1247 
1248 	/*
1249 	 * The flag "in_error" is used to trace EEH devices for VFs
1250 	 * in error state or not. It's set in eeh_report_error(). If
1251 	 * it's not set, eeh_report_{reset,resume}() won't be called
1252 	 * for the VF EEH device.
1253 	 */
1254 	edev->in_error = false;
1255 	dev->dev.archdata.edev = NULL;
1256 	if (!(edev->pe->state & EEH_PE_KEEP))
1257 		eeh_rmv_from_parent_pe(edev);
1258 	else
1259 		edev->mode |= EEH_DEV_DISCONNECTED;
1260 }
1261 
1262 int eeh_unfreeze_pe(struct eeh_pe *pe)
1263 {
1264 	int ret;
1265 
1266 	ret = eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
1267 	if (ret) {
1268 		pr_warn("%s: Failure %d enabling IO on PHB#%x-PE#%x\n",
1269 			__func__, ret, pe->phb->global_number, pe->addr);
1270 		return ret;
1271 	}
1272 
1273 	ret = eeh_pci_enable(pe, EEH_OPT_THAW_DMA);
1274 	if (ret) {
1275 		pr_warn("%s: Failure %d enabling DMA on PHB#%x-PE#%x\n",
1276 			__func__, ret, pe->phb->global_number, pe->addr);
1277 		return ret;
1278 	}
1279 
1280 	return ret;
1281 }
1282 
1283 
1284 static struct pci_device_id eeh_reset_ids[] = {
1285 	{ PCI_DEVICE(0x19a2, 0x0710) },	/* Emulex, BE     */
1286 	{ PCI_DEVICE(0x10df, 0xe220) },	/* Emulex, Lancer */
1287 	{ PCI_DEVICE(0x14e4, 0x1657) }, /* Broadcom BCM5719 */
1288 	{ 0 }
1289 };
1290 
1291 static int eeh_pe_change_owner(struct eeh_pe *pe)
1292 {
1293 	struct eeh_dev *edev, *tmp;
1294 	struct pci_dev *pdev;
1295 	struct pci_device_id *id;
1296 	int ret;
1297 
1298 	/* Check PE state */
1299 	ret = eeh_ops->get_state(pe, NULL);
1300 	if (ret < 0 || ret == EEH_STATE_NOT_SUPPORT)
1301 		return 0;
1302 
1303 	/* Unfrozen PE, nothing to do */
1304 	if (eeh_state_active(ret))
1305 		return 0;
1306 
1307 	/* Frozen PE, check if it needs PE level reset */
1308 	eeh_pe_for_each_dev(pe, edev, tmp) {
1309 		pdev = eeh_dev_to_pci_dev(edev);
1310 		if (!pdev)
1311 			continue;
1312 
1313 		for (id = &eeh_reset_ids[0]; id->vendor != 0; id++) {
1314 			if (id->vendor != PCI_ANY_ID &&
1315 			    id->vendor != pdev->vendor)
1316 				continue;
1317 			if (id->device != PCI_ANY_ID &&
1318 			    id->device != pdev->device)
1319 				continue;
1320 			if (id->subvendor != PCI_ANY_ID &&
1321 			    id->subvendor != pdev->subsystem_vendor)
1322 				continue;
1323 			if (id->subdevice != PCI_ANY_ID &&
1324 			    id->subdevice != pdev->subsystem_device)
1325 				continue;
1326 
1327 			return eeh_pe_reset_and_recover(pe);
1328 		}
1329 	}
1330 
1331 	ret = eeh_unfreeze_pe(pe);
1332 	if (!ret)
1333 		eeh_pe_state_clear(pe, EEH_PE_ISOLATED, true);
1334 	return ret;
1335 }
1336 
1337 /**
1338  * eeh_dev_open - Increase count of pass through devices for PE
1339  * @pdev: PCI device
1340  *
1341  * Increase count of passed through devices for the indicated
1342  * PE. In the result, the EEH errors detected on the PE won't be
1343  * reported. The PE owner will be responsible for detection
1344  * and recovery.
1345  */
1346 int eeh_dev_open(struct pci_dev *pdev)
1347 {
1348 	struct eeh_dev *edev;
1349 	int ret = -ENODEV;
1350 
1351 	mutex_lock(&eeh_dev_mutex);
1352 
1353 	/* No PCI device ? */
1354 	if (!pdev)
1355 		goto out;
1356 
1357 	/* No EEH device or PE ? */
1358 	edev = pci_dev_to_eeh_dev(pdev);
1359 	if (!edev || !edev->pe)
1360 		goto out;
1361 
1362 	/*
1363 	 * The PE might have been put into frozen state, but we
1364 	 * didn't detect that yet. The passed through PCI devices
1365 	 * in frozen PE won't work properly. Clear the frozen state
1366 	 * in advance.
1367 	 */
1368 	ret = eeh_pe_change_owner(edev->pe);
1369 	if (ret)
1370 		goto out;
1371 
1372 	/* Increase PE's pass through count */
1373 	atomic_inc(&edev->pe->pass_dev_cnt);
1374 	mutex_unlock(&eeh_dev_mutex);
1375 
1376 	return 0;
1377 out:
1378 	mutex_unlock(&eeh_dev_mutex);
1379 	return ret;
1380 }
1381 EXPORT_SYMBOL_GPL(eeh_dev_open);
1382 
1383 /**
1384  * eeh_dev_release - Decrease count of pass through devices for PE
1385  * @pdev: PCI device
1386  *
1387  * Decrease count of pass through devices for the indicated PE. If
1388  * there is no passed through device in PE, the EEH errors detected
1389  * on the PE will be reported and handled as usual.
1390  */
1391 void eeh_dev_release(struct pci_dev *pdev)
1392 {
1393 	struct eeh_dev *edev;
1394 
1395 	mutex_lock(&eeh_dev_mutex);
1396 
1397 	/* No PCI device ? */
1398 	if (!pdev)
1399 		goto out;
1400 
1401 	/* No EEH device ? */
1402 	edev = pci_dev_to_eeh_dev(pdev);
1403 	if (!edev || !edev->pe || !eeh_pe_passed(edev->pe))
1404 		goto out;
1405 
1406 	/* Decrease PE's pass through count */
1407 	WARN_ON(atomic_dec_if_positive(&edev->pe->pass_dev_cnt) < 0);
1408 	eeh_pe_change_owner(edev->pe);
1409 out:
1410 	mutex_unlock(&eeh_dev_mutex);
1411 }
1412 EXPORT_SYMBOL(eeh_dev_release);
1413 
1414 #ifdef CONFIG_IOMMU_API
1415 
1416 static int dev_has_iommu_table(struct device *dev, void *data)
1417 {
1418 	struct pci_dev *pdev = to_pci_dev(dev);
1419 	struct pci_dev **ppdev = data;
1420 
1421 	if (!dev)
1422 		return 0;
1423 
1424 	if (device_iommu_mapped(dev)) {
1425 		*ppdev = pdev;
1426 		return 1;
1427 	}
1428 
1429 	return 0;
1430 }
1431 
1432 /**
1433  * eeh_iommu_group_to_pe - Convert IOMMU group to EEH PE
1434  * @group: IOMMU group
1435  *
1436  * The routine is called to convert IOMMU group to EEH PE.
1437  */
1438 struct eeh_pe *eeh_iommu_group_to_pe(struct iommu_group *group)
1439 {
1440 	struct pci_dev *pdev = NULL;
1441 	struct eeh_dev *edev;
1442 	int ret;
1443 
1444 	/* No IOMMU group ? */
1445 	if (!group)
1446 		return NULL;
1447 
1448 	ret = iommu_group_for_each_dev(group, &pdev, dev_has_iommu_table);
1449 	if (!ret || !pdev)
1450 		return NULL;
1451 
1452 	/* No EEH device or PE ? */
1453 	edev = pci_dev_to_eeh_dev(pdev);
1454 	if (!edev || !edev->pe)
1455 		return NULL;
1456 
1457 	return edev->pe;
1458 }
1459 EXPORT_SYMBOL_GPL(eeh_iommu_group_to_pe);
1460 
1461 #endif /* CONFIG_IOMMU_API */
1462 
1463 /**
1464  * eeh_pe_set_option - Set options for the indicated PE
1465  * @pe: EEH PE
1466  * @option: requested option
1467  *
1468  * The routine is called to enable or disable EEH functionality
1469  * on the indicated PE, to enable IO or DMA for the frozen PE.
1470  */
1471 int eeh_pe_set_option(struct eeh_pe *pe, int option)
1472 {
1473 	int ret = 0;
1474 
1475 	/* Invalid PE ? */
1476 	if (!pe)
1477 		return -ENODEV;
1478 
1479 	/*
1480 	 * EEH functionality could possibly be disabled, just
1481 	 * return error for the case. And the EEH functinality
1482 	 * isn't expected to be disabled on one specific PE.
1483 	 */
1484 	switch (option) {
1485 	case EEH_OPT_ENABLE:
1486 		if (eeh_enabled()) {
1487 			ret = eeh_pe_change_owner(pe);
1488 			break;
1489 		}
1490 		ret = -EIO;
1491 		break;
1492 	case EEH_OPT_DISABLE:
1493 		break;
1494 	case EEH_OPT_THAW_MMIO:
1495 	case EEH_OPT_THAW_DMA:
1496 	case EEH_OPT_FREEZE_PE:
1497 		if (!eeh_ops || !eeh_ops->set_option) {
1498 			ret = -ENOENT;
1499 			break;
1500 		}
1501 
1502 		ret = eeh_pci_enable(pe, option);
1503 		break;
1504 	default:
1505 		pr_debug("%s: Option %d out of range (%d, %d)\n",
1506 			__func__, option, EEH_OPT_DISABLE, EEH_OPT_THAW_DMA);
1507 		ret = -EINVAL;
1508 	}
1509 
1510 	return ret;
1511 }
1512 EXPORT_SYMBOL_GPL(eeh_pe_set_option);
1513 
1514 /**
1515  * eeh_pe_get_state - Retrieve PE's state
1516  * @pe: EEH PE
1517  *
1518  * Retrieve the PE's state, which includes 3 aspects: enabled
1519  * DMA, enabled IO and asserted reset.
1520  */
1521 int eeh_pe_get_state(struct eeh_pe *pe)
1522 {
1523 	int result, ret = 0;
1524 	bool rst_active, dma_en, mmio_en;
1525 
1526 	/* Existing PE ? */
1527 	if (!pe)
1528 		return -ENODEV;
1529 
1530 	if (!eeh_ops || !eeh_ops->get_state)
1531 		return -ENOENT;
1532 
1533 	/*
1534 	 * If the parent PE is owned by the host kernel and is undergoing
1535 	 * error recovery, we should return the PE state as temporarily
1536 	 * unavailable so that the error recovery on the guest is suspended
1537 	 * until the recovery completes on the host.
1538 	 */
1539 	if (pe->parent &&
1540 	    !(pe->state & EEH_PE_REMOVED) &&
1541 	    (pe->parent->state & (EEH_PE_ISOLATED | EEH_PE_RECOVERING)))
1542 		return EEH_PE_STATE_UNAVAIL;
1543 
1544 	result = eeh_ops->get_state(pe, NULL);
1545 	rst_active = !!(result & EEH_STATE_RESET_ACTIVE);
1546 	dma_en = !!(result & EEH_STATE_DMA_ENABLED);
1547 	mmio_en = !!(result & EEH_STATE_MMIO_ENABLED);
1548 
1549 	if (rst_active)
1550 		ret = EEH_PE_STATE_RESET;
1551 	else if (dma_en && mmio_en)
1552 		ret = EEH_PE_STATE_NORMAL;
1553 	else if (!dma_en && !mmio_en)
1554 		ret = EEH_PE_STATE_STOPPED_IO_DMA;
1555 	else if (!dma_en && mmio_en)
1556 		ret = EEH_PE_STATE_STOPPED_DMA;
1557 	else
1558 		ret = EEH_PE_STATE_UNAVAIL;
1559 
1560 	return ret;
1561 }
1562 EXPORT_SYMBOL_GPL(eeh_pe_get_state);
1563 
1564 static int eeh_pe_reenable_devices(struct eeh_pe *pe, bool include_passed)
1565 {
1566 	struct eeh_dev *edev, *tmp;
1567 	struct pci_dev *pdev;
1568 	int ret = 0;
1569 
1570 	eeh_pe_restore_bars(pe);
1571 
1572 	/*
1573 	 * Reenable PCI devices as the devices passed
1574 	 * through are always enabled before the reset.
1575 	 */
1576 	eeh_pe_for_each_dev(pe, edev, tmp) {
1577 		pdev = eeh_dev_to_pci_dev(edev);
1578 		if (!pdev)
1579 			continue;
1580 
1581 		ret = pci_reenable_device(pdev);
1582 		if (ret) {
1583 			pr_warn("%s: Failure %d reenabling %s\n",
1584 				__func__, ret, pci_name(pdev));
1585 			return ret;
1586 		}
1587 	}
1588 
1589 	/* The PE is still in frozen state */
1590 	if (include_passed || !eeh_pe_passed(pe)) {
1591 		ret = eeh_unfreeze_pe(pe);
1592 	} else
1593 		pr_info("EEH: Note: Leaving passthrough PHB#%x-PE#%x frozen.\n",
1594 			pe->phb->global_number, pe->addr);
1595 	if (!ret)
1596 		eeh_pe_state_clear(pe, EEH_PE_ISOLATED, include_passed);
1597 	return ret;
1598 }
1599 
1600 
1601 /**
1602  * eeh_pe_reset - Issue PE reset according to specified type
1603  * @pe: EEH PE
1604  * @option: reset type
1605  *
1606  * The routine is called to reset the specified PE with the
1607  * indicated type, either fundamental reset or hot reset.
1608  * PE reset is the most important part for error recovery.
1609  */
1610 int eeh_pe_reset(struct eeh_pe *pe, int option, bool include_passed)
1611 {
1612 	int ret = 0;
1613 
1614 	/* Invalid PE ? */
1615 	if (!pe)
1616 		return -ENODEV;
1617 
1618 	if (!eeh_ops || !eeh_ops->set_option || !eeh_ops->reset)
1619 		return -ENOENT;
1620 
1621 	switch (option) {
1622 	case EEH_RESET_DEACTIVATE:
1623 		ret = eeh_ops->reset(pe, option);
1624 		eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED, include_passed);
1625 		if (ret)
1626 			break;
1627 
1628 		ret = eeh_pe_reenable_devices(pe, include_passed);
1629 		break;
1630 	case EEH_RESET_HOT:
1631 	case EEH_RESET_FUNDAMENTAL:
1632 		/*
1633 		 * Proactively freeze the PE to drop all MMIO access
1634 		 * during reset, which should be banned as it's always
1635 		 * cause recursive EEH error.
1636 		 */
1637 		eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE);
1638 
1639 		eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
1640 		ret = eeh_ops->reset(pe, option);
1641 		break;
1642 	default:
1643 		pr_debug("%s: Unsupported option %d\n",
1644 			__func__, option);
1645 		ret = -EINVAL;
1646 	}
1647 
1648 	return ret;
1649 }
1650 EXPORT_SYMBOL_GPL(eeh_pe_reset);
1651 
1652 /**
1653  * eeh_pe_configure - Configure PCI bridges after PE reset
1654  * @pe: EEH PE
1655  *
1656  * The routine is called to restore the PCI config space for
1657  * those PCI devices, especially PCI bridges affected by PE
1658  * reset issued previously.
1659  */
1660 int eeh_pe_configure(struct eeh_pe *pe)
1661 {
1662 	int ret = 0;
1663 
1664 	/* Invalid PE ? */
1665 	if (!pe)
1666 		return -ENODEV;
1667 
1668 	return ret;
1669 }
1670 EXPORT_SYMBOL_GPL(eeh_pe_configure);
1671 
1672 /**
1673  * eeh_pe_inject_err - Injecting the specified PCI error to the indicated PE
1674  * @pe: the indicated PE
1675  * @type: error type
1676  * @function: error function
1677  * @addr: address
1678  * @mask: address mask
1679  *
1680  * The routine is called to inject the specified PCI error, which
1681  * is determined by @type and @function, to the indicated PE for
1682  * testing purpose.
1683  */
1684 int eeh_pe_inject_err(struct eeh_pe *pe, int type, int func,
1685 		      unsigned long addr, unsigned long mask)
1686 {
1687 	/* Invalid PE ? */
1688 	if (!pe)
1689 		return -ENODEV;
1690 
1691 	/* Unsupported operation ? */
1692 	if (!eeh_ops || !eeh_ops->err_inject)
1693 		return -ENOENT;
1694 
1695 	/* Check on PCI error type */
1696 	if (type != EEH_ERR_TYPE_32 && type != EEH_ERR_TYPE_64)
1697 		return -EINVAL;
1698 
1699 	/* Check on PCI error function */
1700 	if (func < EEH_ERR_FUNC_MIN || func > EEH_ERR_FUNC_MAX)
1701 		return -EINVAL;
1702 
1703 	return eeh_ops->err_inject(pe, type, func, addr, mask);
1704 }
1705 EXPORT_SYMBOL_GPL(eeh_pe_inject_err);
1706 
1707 static int proc_eeh_show(struct seq_file *m, void *v)
1708 {
1709 	if (!eeh_enabled()) {
1710 		seq_printf(m, "EEH Subsystem is globally disabled\n");
1711 		seq_printf(m, "eeh_total_mmio_ffs=%llu\n", eeh_stats.total_mmio_ffs);
1712 	} else {
1713 		seq_printf(m, "EEH Subsystem is enabled\n");
1714 		seq_printf(m,
1715 				"no device=%llu\n"
1716 				"no device node=%llu\n"
1717 				"no config address=%llu\n"
1718 				"check not wanted=%llu\n"
1719 				"eeh_total_mmio_ffs=%llu\n"
1720 				"eeh_false_positives=%llu\n"
1721 				"eeh_slot_resets=%llu\n",
1722 				eeh_stats.no_device,
1723 				eeh_stats.no_dn,
1724 				eeh_stats.no_cfg_addr,
1725 				eeh_stats.ignored_check,
1726 				eeh_stats.total_mmio_ffs,
1727 				eeh_stats.false_positives,
1728 				eeh_stats.slot_resets);
1729 	}
1730 
1731 	return 0;
1732 }
1733 
1734 #ifdef CONFIG_DEBUG_FS
1735 static int eeh_enable_dbgfs_set(void *data, u64 val)
1736 {
1737 	if (val)
1738 		eeh_clear_flag(EEH_FORCE_DISABLED);
1739 	else
1740 		eeh_add_flag(EEH_FORCE_DISABLED);
1741 
1742 	return 0;
1743 }
1744 
1745 static int eeh_enable_dbgfs_get(void *data, u64 *val)
1746 {
1747 	if (eeh_enabled())
1748 		*val = 0x1ul;
1749 	else
1750 		*val = 0x0ul;
1751 	return 0;
1752 }
1753 
1754 DEFINE_DEBUGFS_ATTRIBUTE(eeh_enable_dbgfs_ops, eeh_enable_dbgfs_get,
1755 			 eeh_enable_dbgfs_set, "0x%llx\n");
1756 
1757 static ssize_t eeh_force_recover_write(struct file *filp,
1758 				const char __user *user_buf,
1759 				size_t count, loff_t *ppos)
1760 {
1761 	struct pci_controller *hose;
1762 	uint32_t phbid, pe_no;
1763 	struct eeh_pe *pe;
1764 	char buf[20];
1765 	int ret;
1766 
1767 	ret = simple_write_to_buffer(buf, sizeof(buf), ppos, user_buf, count);
1768 	if (!ret)
1769 		return -EFAULT;
1770 
1771 	/*
1772 	 * When PE is NULL the event is a "special" event. Rather than
1773 	 * recovering a specific PE it forces the EEH core to scan for failed
1774 	 * PHBs and recovers each. This needs to be done before any device
1775 	 * recoveries can occur.
1776 	 */
1777 	if (!strncmp(buf, "hwcheck", 7)) {
1778 		__eeh_send_failure_event(NULL);
1779 		return count;
1780 	}
1781 
1782 	ret = sscanf(buf, "%x:%x", &phbid, &pe_no);
1783 	if (ret != 2)
1784 		return -EINVAL;
1785 
1786 	hose = pci_find_controller_for_domain(phbid);
1787 	if (!hose)
1788 		return -ENODEV;
1789 
1790 	/* Retrieve PE */
1791 	pe = eeh_pe_get(hose, pe_no, 0);
1792 	if (!pe)
1793 		return -ENODEV;
1794 
1795 	/*
1796 	 * We don't do any state checking here since the detection
1797 	 * process is async to the recovery process. The recovery
1798 	 * thread *should* not break even if we schedule a recovery
1799 	 * from an odd state (e.g. PE removed, or recovery of a
1800 	 * non-isolated PE)
1801 	 */
1802 	__eeh_send_failure_event(pe);
1803 
1804 	return ret < 0 ? ret : count;
1805 }
1806 
1807 static const struct file_operations eeh_force_recover_fops = {
1808 	.open	= simple_open,
1809 	.llseek	= no_llseek,
1810 	.write	= eeh_force_recover_write,
1811 };
1812 
1813 static ssize_t eeh_debugfs_dev_usage(struct file *filp,
1814 				char __user *user_buf,
1815 				size_t count, loff_t *ppos)
1816 {
1817 	static const char usage[] = "input format: <domain>:<bus>:<dev>.<fn>\n";
1818 
1819 	return simple_read_from_buffer(user_buf, count, ppos,
1820 				       usage, sizeof(usage) - 1);
1821 }
1822 
1823 static ssize_t eeh_dev_check_write(struct file *filp,
1824 				const char __user *user_buf,
1825 				size_t count, loff_t *ppos)
1826 {
1827 	uint32_t domain, bus, dev, fn;
1828 	struct pci_dev *pdev;
1829 	struct eeh_dev *edev;
1830 	char buf[20];
1831 	int ret;
1832 
1833 	memset(buf, 0, sizeof(buf));
1834 	ret = simple_write_to_buffer(buf, sizeof(buf)-1, ppos, user_buf, count);
1835 	if (!ret)
1836 		return -EFAULT;
1837 
1838 	ret = sscanf(buf, "%x:%x:%x.%x", &domain, &bus, &dev, &fn);
1839 	if (ret != 4) {
1840 		pr_err("%s: expected 4 args, got %d\n", __func__, ret);
1841 		return -EINVAL;
1842 	}
1843 
1844 	pdev = pci_get_domain_bus_and_slot(domain, bus, (dev << 3) | fn);
1845 	if (!pdev)
1846 		return -ENODEV;
1847 
1848 	edev = pci_dev_to_eeh_dev(pdev);
1849 	if (!edev) {
1850 		pci_err(pdev, "No eeh_dev for this device!\n");
1851 		pci_dev_put(pdev);
1852 		return -ENODEV;
1853 	}
1854 
1855 	ret = eeh_dev_check_failure(edev);
1856 	pci_info(pdev, "eeh_dev_check_failure(%04x:%02x:%02x.%01x) = %d\n",
1857 			domain, bus, dev, fn, ret);
1858 
1859 	pci_dev_put(pdev);
1860 
1861 	return count;
1862 }
1863 
1864 static const struct file_operations eeh_dev_check_fops = {
1865 	.open	= simple_open,
1866 	.llseek	= no_llseek,
1867 	.write	= eeh_dev_check_write,
1868 	.read   = eeh_debugfs_dev_usage,
1869 };
1870 
1871 static int eeh_debugfs_break_device(struct pci_dev *pdev)
1872 {
1873 	struct resource *bar = NULL;
1874 	void __iomem *mapped;
1875 	u16 old, bit;
1876 	int i, pos;
1877 
1878 	/* Do we have an MMIO BAR to disable? */
1879 	for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
1880 		struct resource *r = &pdev->resource[i];
1881 
1882 		if (!r->flags || !r->start)
1883 			continue;
1884 		if (r->flags & IORESOURCE_IO)
1885 			continue;
1886 		if (r->flags & IORESOURCE_UNSET)
1887 			continue;
1888 
1889 		bar = r;
1890 		break;
1891 	}
1892 
1893 	if (!bar) {
1894 		pci_err(pdev, "Unable to find Memory BAR to cause EEH with\n");
1895 		return -ENXIO;
1896 	}
1897 
1898 	pci_err(pdev, "Going to break: %pR\n", bar);
1899 
1900 	if (pdev->is_virtfn) {
1901 #ifndef CONFIG_PCI_IOV
1902 		return -ENXIO;
1903 #else
1904 		/*
1905 		 * VFs don't have a per-function COMMAND register, so the best
1906 		 * we can do is clear the Memory Space Enable bit in the PF's
1907 		 * SRIOV control reg.
1908 		 *
1909 		 * Unfortunately, this requires that we have a PF (i.e doesn't
1910 		 * work for a passed-through VF) and it has the potential side
1911 		 * effect of also causing an EEH on every other VF under the
1912 		 * PF. Oh well.
1913 		 */
1914 		pdev = pdev->physfn;
1915 		if (!pdev)
1916 			return -ENXIO; /* passed through VFs have no PF */
1917 
1918 		pos  = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1919 		pos += PCI_SRIOV_CTRL;
1920 		bit  = PCI_SRIOV_CTRL_MSE;
1921 #endif /* !CONFIG_PCI_IOV */
1922 	} else {
1923 		bit = PCI_COMMAND_MEMORY;
1924 		pos = PCI_COMMAND;
1925 	}
1926 
1927 	/*
1928 	 * Process here is:
1929 	 *
1930 	 * 1. Disable Memory space.
1931 	 *
1932 	 * 2. Perform an MMIO to the device. This should result in an error
1933 	 *    (CA  / UR) being raised by the device which results in an EEH
1934 	 *    PE freeze. Using the in_8() accessor skips the eeh detection hook
1935 	 *    so the freeze hook so the EEH Detection machinery won't be
1936 	 *    triggered here. This is to match the usual behaviour of EEH
1937 	 *    where the HW will asyncronously freeze a PE and it's up to
1938 	 *    the kernel to notice and deal with it.
1939 	 *
1940 	 * 3. Turn Memory space back on. This is more important for VFs
1941 	 *    since recovery will probably fail if we don't. For normal
1942 	 *    the COMMAND register is reset as a part of re-initialising
1943 	 *    the device.
1944 	 *
1945 	 * Breaking stuff is the point so who cares if it's racy ;)
1946 	 */
1947 	pci_read_config_word(pdev, pos, &old);
1948 
1949 	mapped = ioremap(bar->start, PAGE_SIZE);
1950 	if (!mapped) {
1951 		pci_err(pdev, "Unable to map MMIO BAR %pR\n", bar);
1952 		return -ENXIO;
1953 	}
1954 
1955 	pci_write_config_word(pdev, pos, old & ~bit);
1956 	in_8(mapped);
1957 	pci_write_config_word(pdev, pos, old);
1958 
1959 	iounmap(mapped);
1960 
1961 	return 0;
1962 }
1963 
1964 static ssize_t eeh_dev_break_write(struct file *filp,
1965 				const char __user *user_buf,
1966 				size_t count, loff_t *ppos)
1967 {
1968 	uint32_t domain, bus, dev, fn;
1969 	struct pci_dev *pdev;
1970 	char buf[20];
1971 	int ret;
1972 
1973 	memset(buf, 0, sizeof(buf));
1974 	ret = simple_write_to_buffer(buf, sizeof(buf)-1, ppos, user_buf, count);
1975 	if (!ret)
1976 		return -EFAULT;
1977 
1978 	ret = sscanf(buf, "%x:%x:%x.%x", &domain, &bus, &dev, &fn);
1979 	if (ret != 4) {
1980 		pr_err("%s: expected 4 args, got %d\n", __func__, ret);
1981 		return -EINVAL;
1982 	}
1983 
1984 	pdev = pci_get_domain_bus_and_slot(domain, bus, (dev << 3) | fn);
1985 	if (!pdev)
1986 		return -ENODEV;
1987 
1988 	ret = eeh_debugfs_break_device(pdev);
1989 	pci_dev_put(pdev);
1990 
1991 	if (ret < 0)
1992 		return ret;
1993 
1994 	return count;
1995 }
1996 
1997 static const struct file_operations eeh_dev_break_fops = {
1998 	.open	= simple_open,
1999 	.llseek	= no_llseek,
2000 	.write	= eeh_dev_break_write,
2001 	.read   = eeh_debugfs_dev_usage,
2002 };
2003 
2004 #endif
2005 
2006 static int __init eeh_init_proc(void)
2007 {
2008 	if (machine_is(pseries) || machine_is(powernv)) {
2009 		proc_create_single("powerpc/eeh", 0, NULL, proc_eeh_show);
2010 #ifdef CONFIG_DEBUG_FS
2011 		debugfs_create_file_unsafe("eeh_enable", 0600,
2012 					   powerpc_debugfs_root, NULL,
2013 					   &eeh_enable_dbgfs_ops);
2014 		debugfs_create_u32("eeh_max_freezes", 0600,
2015 				powerpc_debugfs_root, &eeh_max_freezes);
2016 		debugfs_create_bool("eeh_disable_recovery", 0600,
2017 				powerpc_debugfs_root,
2018 				&eeh_debugfs_no_recover);
2019 		debugfs_create_file_unsafe("eeh_dev_check", 0600,
2020 				powerpc_debugfs_root, NULL,
2021 				&eeh_dev_check_fops);
2022 		debugfs_create_file_unsafe("eeh_dev_break", 0600,
2023 				powerpc_debugfs_root, NULL,
2024 				&eeh_dev_break_fops);
2025 		debugfs_create_file_unsafe("eeh_force_recover", 0600,
2026 				powerpc_debugfs_root, NULL,
2027 				&eeh_force_recover_fops);
2028 		eeh_cache_debugfs_init();
2029 #endif
2030 	}
2031 
2032 	return 0;
2033 }
2034 __initcall(eeh_init_proc);
2035