1 /*
2  * The file intends to implement the platform dependent EEH operations on
3  * powernv platform. Actually, the powernv was created in order to fully
4  * hypervisor support.
5  *
6  * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2013.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  */
13 
14 #include <linux/atomic.h>
15 #include <linux/debugfs.h>
16 #include <linux/delay.h>
17 #include <linux/export.h>
18 #include <linux/init.h>
19 #include <linux/list.h>
20 #include <linux/msi.h>
21 #include <linux/of.h>
22 #include <linux/pci.h>
23 #include <linux/proc_fs.h>
24 #include <linux/rbtree.h>
25 #include <linux/sched.h>
26 #include <linux/seq_file.h>
27 #include <linux/spinlock.h>
28 
29 #include <asm/eeh.h>
30 #include <asm/eeh_event.h>
31 #include <asm/firmware.h>
32 #include <asm/io.h>
33 #include <asm/iommu.h>
34 #include <asm/machdep.h>
35 #include <asm/msi_bitmap.h>
36 #include <asm/opal.h>
37 #include <asm/ppc-pci.h>
38 
39 #include "powernv.h"
40 #include "pci.h"
41 
42 static bool pnv_eeh_nb_init = false;
43 
44 /**
45  * pnv_eeh_init - EEH platform dependent initialization
46  *
47  * EEH platform dependent initialization on powernv
48  */
49 static int pnv_eeh_init(void)
50 {
51 	struct pci_controller *hose;
52 	struct pnv_phb *phb;
53 
54 	/* We require OPALv3 */
55 	if (!firmware_has_feature(FW_FEATURE_OPALv3)) {
56 		pr_warn("%s: OPALv3 is required !\n",
57 			__func__);
58 		return -EINVAL;
59 	}
60 
61 	/* Set probe mode */
62 	eeh_add_flag(EEH_PROBE_MODE_DEV);
63 
64 	/*
65 	 * P7IOC blocks PCI config access to frozen PE, but PHB3
66 	 * doesn't do that. So we have to selectively enable I/O
67 	 * prior to collecting error log.
68 	 */
69 	list_for_each_entry(hose, &hose_list, list_node) {
70 		phb = hose->private_data;
71 
72 		if (phb->model == PNV_PHB_MODEL_P7IOC)
73 			eeh_add_flag(EEH_ENABLE_IO_FOR_LOG);
74 
75 		/*
76 		 * PE#0 should be regarded as valid by EEH core
77 		 * if it's not the reserved one. Currently, we
78 		 * have the reserved PE#0 and PE#127 for PHB3
79 		 * and P7IOC separately. So we should regard
80 		 * PE#0 as valid for P7IOC.
81 		 */
82 		if (phb->ioda.reserved_pe != 0)
83 			eeh_add_flag(EEH_VALID_PE_ZERO);
84 
85 		break;
86 	}
87 
88 	return 0;
89 }
90 
91 static int pnv_eeh_event(struct notifier_block *nb,
92 			 unsigned long events, void *change)
93 {
94 	uint64_t changed_evts = (uint64_t)change;
95 
96 	/*
97 	 * We simply send special EEH event if EEH has
98 	 * been enabled, or clear pending events in
99 	 * case that we enable EEH soon
100 	 */
101 	if (!(changed_evts & OPAL_EVENT_PCI_ERROR) ||
102 	    !(events & OPAL_EVENT_PCI_ERROR))
103 		return 0;
104 
105 	if (eeh_enabled())
106 		eeh_send_failure_event(NULL);
107 	else
108 		opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul);
109 
110 	return 0;
111 }
112 
113 static struct notifier_block pnv_eeh_nb = {
114 	.notifier_call	= pnv_eeh_event,
115 	.next		= NULL,
116 	.priority	= 0
117 };
118 
119 #ifdef CONFIG_DEBUG_FS
120 static ssize_t pnv_eeh_ei_write(struct file *filp,
121 				const char __user *user_buf,
122 				size_t count, loff_t *ppos)
123 {
124 	struct pci_controller *hose = filp->private_data;
125 	struct eeh_dev *edev;
126 	struct eeh_pe *pe;
127 	int pe_no, type, func;
128 	unsigned long addr, mask;
129 	char buf[50];
130 	int ret;
131 
132 	if (!eeh_ops || !eeh_ops->err_inject)
133 		return -ENXIO;
134 
135 	/* Copy over argument buffer */
136 	ret = simple_write_to_buffer(buf, sizeof(buf), ppos, user_buf, count);
137 	if (!ret)
138 		return -EFAULT;
139 
140 	/* Retrieve parameters */
141 	ret = sscanf(buf, "%x:%x:%x:%lx:%lx",
142 		     &pe_no, &type, &func, &addr, &mask);
143 	if (ret != 5)
144 		return -EINVAL;
145 
146 	/* Retrieve PE */
147 	edev = kzalloc(sizeof(*edev), GFP_KERNEL);
148 	if (!edev)
149 		return -ENOMEM;
150 	edev->phb = hose;
151 	edev->pe_config_addr = pe_no;
152 	pe = eeh_pe_get(edev);
153 	kfree(edev);
154 	if (!pe)
155 		return -ENODEV;
156 
157 	/* Do error injection */
158 	ret = eeh_ops->err_inject(pe, type, func, addr, mask);
159 	return ret < 0 ? ret : count;
160 }
161 
162 static const struct file_operations pnv_eeh_ei_fops = {
163 	.open	= simple_open,
164 	.llseek	= no_llseek,
165 	.write	= pnv_eeh_ei_write,
166 };
167 
168 static int pnv_eeh_dbgfs_set(void *data, int offset, u64 val)
169 {
170 	struct pci_controller *hose = data;
171 	struct pnv_phb *phb = hose->private_data;
172 
173 	out_be64(phb->regs + offset, val);
174 	return 0;
175 }
176 
177 static int pnv_eeh_dbgfs_get(void *data, int offset, u64 *val)
178 {
179 	struct pci_controller *hose = data;
180 	struct pnv_phb *phb = hose->private_data;
181 
182 	*val = in_be64(phb->regs + offset);
183 	return 0;
184 }
185 
186 static int pnv_eeh_outb_dbgfs_set(void *data, u64 val)
187 {
188 	return pnv_eeh_dbgfs_set(data, 0xD10, val);
189 }
190 
191 static int pnv_eeh_outb_dbgfs_get(void *data, u64 *val)
192 {
193 	return pnv_eeh_dbgfs_get(data, 0xD10, val);
194 }
195 
196 static int pnv_eeh_inbA_dbgfs_set(void *data, u64 val)
197 {
198 	return pnv_eeh_dbgfs_set(data, 0xD90, val);
199 }
200 
201 static int pnv_eeh_inbA_dbgfs_get(void *data, u64 *val)
202 {
203 	return pnv_eeh_dbgfs_get(data, 0xD90, val);
204 }
205 
206 static int pnv_eeh_inbB_dbgfs_set(void *data, u64 val)
207 {
208 	return pnv_eeh_dbgfs_set(data, 0xE10, val);
209 }
210 
211 static int pnv_eeh_inbB_dbgfs_get(void *data, u64 *val)
212 {
213 	return pnv_eeh_dbgfs_get(data, 0xE10, val);
214 }
215 
216 DEFINE_SIMPLE_ATTRIBUTE(pnv_eeh_outb_dbgfs_ops, pnv_eeh_outb_dbgfs_get,
217 			pnv_eeh_outb_dbgfs_set, "0x%llx\n");
218 DEFINE_SIMPLE_ATTRIBUTE(pnv_eeh_inbA_dbgfs_ops, pnv_eeh_inbA_dbgfs_get,
219 			pnv_eeh_inbA_dbgfs_set, "0x%llx\n");
220 DEFINE_SIMPLE_ATTRIBUTE(pnv_eeh_inbB_dbgfs_ops, pnv_eeh_inbB_dbgfs_get,
221 			pnv_eeh_inbB_dbgfs_set, "0x%llx\n");
222 #endif /* CONFIG_DEBUG_FS */
223 
224 /**
225  * pnv_eeh_post_init - EEH platform dependent post initialization
226  *
227  * EEH platform dependent post initialization on powernv. When
228  * the function is called, the EEH PEs and devices should have
229  * been built. If the I/O cache staff has been built, EEH is
230  * ready to supply service.
231  */
232 static int pnv_eeh_post_init(void)
233 {
234 	struct pci_controller *hose;
235 	struct pnv_phb *phb;
236 	int ret = 0;
237 
238 	/* Register OPAL event notifier */
239 	if (!pnv_eeh_nb_init) {
240 		ret = opal_notifier_register(&pnv_eeh_nb);
241 		if (ret) {
242 			pr_warn("%s: Can't register OPAL event notifier (%d)\n",
243 				__func__, ret);
244 			return ret;
245 		}
246 
247 		pnv_eeh_nb_init = true;
248 	}
249 
250 	list_for_each_entry(hose, &hose_list, list_node) {
251 		phb = hose->private_data;
252 
253 		/*
254 		 * If EEH is enabled, we're going to rely on that.
255 		 * Otherwise, we restore to conventional mechanism
256 		 * to clear frozen PE during PCI config access.
257 		 */
258 		if (eeh_enabled())
259 			phb->flags |= PNV_PHB_FLAG_EEH;
260 		else
261 			phb->flags &= ~PNV_PHB_FLAG_EEH;
262 
263 		/* Create debugfs entries */
264 #ifdef CONFIG_DEBUG_FS
265 		if (phb->has_dbgfs || !phb->dbgfs)
266 			continue;
267 
268 		phb->has_dbgfs = 1;
269 		debugfs_create_file("err_injct", 0200,
270 				    phb->dbgfs, hose,
271 				    &pnv_eeh_ei_fops);
272 
273 		debugfs_create_file("err_injct_outbound", 0600,
274 				    phb->dbgfs, hose,
275 				    &pnv_eeh_outb_dbgfs_ops);
276 		debugfs_create_file("err_injct_inboundA", 0600,
277 				    phb->dbgfs, hose,
278 				    &pnv_eeh_inbA_dbgfs_ops);
279 		debugfs_create_file("err_injct_inboundB", 0600,
280 				    phb->dbgfs, hose,
281 				    &pnv_eeh_inbB_dbgfs_ops);
282 #endif /* CONFIG_DEBUG_FS */
283 	}
284 
285 
286 	return ret;
287 }
288 
289 /**
290  * pnv_eeh_dev_probe - Do probe on PCI device
291  * @dev: PCI device
292  * @flag: unused
293  *
294  * When EEH module is installed during system boot, all PCI devices
295  * are checked one by one to see if it supports EEH. The function
296  * is introduced for the purpose. By default, EEH has been enabled
297  * on all PCI devices. That's to say, we only need do necessary
298  * initialization on the corresponding eeh device and create PE
299  * accordingly.
300  *
301  * It's notable that's unsafe to retrieve the EEH device through
302  * the corresponding PCI device. During the PCI device hotplug, which
303  * was possiblly triggered by EEH core, the binding between EEH device
304  * and the PCI device isn't built yet.
305  */
306 static int pnv_eeh_dev_probe(struct pci_dev *dev, void *flag)
307 {
308 	struct pci_controller *hose = pci_bus_to_host(dev->bus);
309 	struct pnv_phb *phb = hose->private_data;
310 	struct device_node *dn = pci_device_to_OF_node(dev);
311 	struct eeh_dev *edev = of_node_to_eeh_dev(dn);
312 	int ret;
313 
314 	/*
315 	 * When probing the root bridge, which doesn't have any
316 	 * subordinate PCI devices. We don't have OF node for
317 	 * the root bridge. So it's not reasonable to continue
318 	 * the probing.
319 	 */
320 	if (!dn || !edev || edev->pe)
321 		return 0;
322 
323 	/* Skip for PCI-ISA bridge */
324 	if ((dev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
325 		return 0;
326 
327 	/* Initialize eeh device */
328 	edev->class_code = dev->class;
329 	edev->mode	&= 0xFFFFFF00;
330 	if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
331 		edev->mode |= EEH_DEV_BRIDGE;
332 	edev->pcix_cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
333 	if (pci_is_pcie(dev)) {
334 		edev->pcie_cap = pci_pcie_cap(dev);
335 
336 		if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
337 			edev->mode |= EEH_DEV_ROOT_PORT;
338 		else if (pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM)
339 			edev->mode |= EEH_DEV_DS_PORT;
340 
341 		edev->aer_cap = pci_find_ext_capability(dev,
342 							PCI_EXT_CAP_ID_ERR);
343 	}
344 
345 	edev->config_addr	= ((dev->bus->number << 8) | dev->devfn);
346 	edev->pe_config_addr	= phb->bdfn_to_pe(phb, dev->bus, dev->devfn & 0xff);
347 
348 	/* Create PE */
349 	ret = eeh_add_to_parent_pe(edev);
350 	if (ret) {
351 		pr_warn("%s: Can't add PCI dev %s to parent PE (%d)\n",
352 			__func__, pci_name(dev), ret);
353 		return ret;
354 	}
355 
356 	/*
357 	 * If the PE contains any one of following adapters, the
358 	 * PCI config space can't be accessed when dumping EEH log.
359 	 * Otherwise, we will run into fenced PHB caused by shortage
360 	 * of outbound credits in the adapter. The PCI config access
361 	 * should be blocked until PE reset. MMIO access is dropped
362 	 * by hardware certainly. In order to drop PCI config requests,
363 	 * one more flag (EEH_PE_CFG_RESTRICTED) is introduced, which
364 	 * will be checked in the backend for PE state retrival. If
365 	 * the PE becomes frozen for the first time and the flag has
366 	 * been set for the PE, we will set EEH_PE_CFG_BLOCKED for
367 	 * that PE to block its config space.
368 	 *
369 	 * Broadcom Austin 4-ports NICs (14e4:1657)
370 	 * Broadcom Shiner 2-ports 10G NICs (14e4:168e)
371 	 */
372 	if ((dev->vendor == PCI_VENDOR_ID_BROADCOM && dev->device == 0x1657) ||
373 	    (dev->vendor == PCI_VENDOR_ID_BROADCOM && dev->device == 0x168e))
374 		edev->pe->state |= EEH_PE_CFG_RESTRICTED;
375 
376 	/*
377 	 * Cache the PE primary bus, which can't be fetched when
378 	 * full hotplug is in progress. In that case, all child
379 	 * PCI devices of the PE are expected to be removed prior
380 	 * to PE reset.
381 	 */
382 	if (!edev->pe->bus)
383 		edev->pe->bus = dev->bus;
384 
385 	/*
386 	 * Enable EEH explicitly so that we will do EEH check
387 	 * while accessing I/O stuff
388 	 */
389 	eeh_add_flag(EEH_ENABLED);
390 
391 	/* Save memory bars */
392 	eeh_save_bars(edev);
393 
394 	return 0;
395 }
396 
397 /**
398  * pnv_eeh_set_option - Initialize EEH or MMIO/DMA reenable
399  * @pe: EEH PE
400  * @option: operation to be issued
401  *
402  * The function is used to control the EEH functionality globally.
403  * Currently, following options are support according to PAPR:
404  * Enable EEH, Disable EEH, Enable MMIO and Enable DMA
405  */
406 static int pnv_eeh_set_option(struct eeh_pe *pe, int option)
407 {
408 	struct pci_controller *hose = pe->phb;
409 	struct pnv_phb *phb = hose->private_data;
410 	int ret = -EEXIST;
411 
412 	/*
413 	 * What we need do is pass it down for hardware
414 	 * implementation to handle it.
415 	 */
416 	if (phb->eeh_ops && phb->eeh_ops->set_option)
417 		ret = phb->eeh_ops->set_option(pe, option);
418 
419 	return ret;
420 }
421 
422 /**
423  * pnv_eeh_get_pe_addr - Retrieve PE address
424  * @pe: EEH PE
425  *
426  * Retrieve the PE address according to the given tranditional
427  * PCI BDF (Bus/Device/Function) address.
428  */
429 static int pnv_eeh_get_pe_addr(struct eeh_pe *pe)
430 {
431 	return pe->addr;
432 }
433 
434 /**
435  * pnv_eeh_get_state - Retrieve PE state
436  * @pe: EEH PE
437  * @delay: delay while PE state is temporarily unavailable
438  *
439  * Retrieve the state of the specified PE. For IODA-compitable
440  * platform, it should be retrieved from IODA table. Therefore,
441  * we prefer passing down to hardware implementation to handle
442  * it.
443  */
444 static int pnv_eeh_get_state(struct eeh_pe *pe, int *delay)
445 {
446 	struct pci_controller *hose = pe->phb;
447 	struct pnv_phb *phb = hose->private_data;
448 	int ret = EEH_STATE_NOT_SUPPORT;
449 
450 	if (phb->eeh_ops && phb->eeh_ops->get_state) {
451 		ret = phb->eeh_ops->get_state(pe);
452 
453 		/*
454 		 * If the PE state is temporarily unavailable,
455 		 * to inform the EEH core delay for default
456 		 * period (1 second)
457 		 */
458 		if (delay) {
459 			*delay = 0;
460 			if (ret & EEH_STATE_UNAVAILABLE)
461 				*delay = 1000;
462 		}
463 	}
464 
465 	return ret;
466 }
467 
468 /**
469  * pnv_eeh_reset - Reset the specified PE
470  * @pe: EEH PE
471  * @option: reset option
472  *
473  * Reset the specified PE
474  */
475 static int pnv_eeh_reset(struct eeh_pe *pe, int option)
476 {
477 	struct pci_controller *hose = pe->phb;
478 	struct pnv_phb *phb = hose->private_data;
479 	int ret = -EEXIST;
480 
481 	if (phb->eeh_ops && phb->eeh_ops->reset)
482 		ret = phb->eeh_ops->reset(pe, option);
483 
484 	return ret;
485 }
486 
487 /**
488  * pnv_eeh_wait_state - Wait for PE state
489  * @pe: EEH PE
490  * @max_wait: maximal period in microsecond
491  *
492  * Wait for the state of associated PE. It might take some time
493  * to retrieve the PE's state.
494  */
495 static int pnv_eeh_wait_state(struct eeh_pe *pe, int max_wait)
496 {
497 	int ret;
498 	int mwait;
499 
500 	while (1) {
501 		ret = pnv_eeh_get_state(pe, &mwait);
502 
503 		/*
504 		 * If the PE's state is temporarily unavailable,
505 		 * we have to wait for the specified time. Otherwise,
506 		 * the PE's state will be returned immediately.
507 		 */
508 		if (ret != EEH_STATE_UNAVAILABLE)
509 			return ret;
510 
511 		max_wait -= mwait;
512 		if (max_wait <= 0) {
513 			pr_warn("%s: Timeout getting PE#%x's state (%d)\n",
514 				__func__, pe->addr, max_wait);
515 			return EEH_STATE_NOT_SUPPORT;
516 		}
517 
518 		msleep(mwait);
519 	}
520 
521 	return EEH_STATE_NOT_SUPPORT;
522 }
523 
524 /**
525  * pnv_eeh_get_log - Retrieve error log
526  * @pe: EEH PE
527  * @severity: temporary or permanent error log
528  * @drv_log: driver log to be combined with retrieved error log
529  * @len: length of driver log
530  *
531  * Retrieve the temporary or permanent error from the PE.
532  */
533 static int pnv_eeh_get_log(struct eeh_pe *pe, int severity,
534 			   char *drv_log, unsigned long len)
535 {
536 	if (!eeh_has_flag(EEH_EARLY_DUMP_LOG))
537 		pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
538 
539 	return 0;
540 }
541 
542 /**
543  * pnv_eeh_configure_bridge - Configure PCI bridges in the indicated PE
544  * @pe: EEH PE
545  *
546  * The function will be called to reconfigure the bridges included
547  * in the specified PE so that the mulfunctional PE would be recovered
548  * again.
549  */
550 static int pnv_eeh_configure_bridge(struct eeh_pe *pe)
551 {
552 	struct pci_controller *hose = pe->phb;
553 	struct pnv_phb *phb = hose->private_data;
554 	int ret = 0;
555 
556 	if (phb->eeh_ops && phb->eeh_ops->configure_bridge)
557 		ret = phb->eeh_ops->configure_bridge(pe);
558 
559 	return ret;
560 }
561 
562 /**
563  * pnv_pe_err_inject - Inject specified error to the indicated PE
564  * @pe: the indicated PE
565  * @type: error type
566  * @func: specific error type
567  * @addr: address
568  * @mask: address mask
569  *
570  * The routine is called to inject specified error, which is
571  * determined by @type and @func, to the indicated PE for
572  * testing purpose.
573  */
574 static int pnv_eeh_err_inject(struct eeh_pe *pe, int type, int func,
575 			      unsigned long addr, unsigned long mask)
576 {
577 	struct pci_controller *hose = pe->phb;
578 	struct pnv_phb *phb = hose->private_data;
579 	s64 rc;
580 
581 	/* Sanity check on error type */
582 	if (type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR &&
583 	    type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR64) {
584 		pr_warn("%s: Invalid error type %d\n",
585 			__func__, type);
586 		return -ERANGE;
587 	}
588 
589 	if (func < OPAL_ERR_INJECT_FUNC_IOA_LD_MEM_ADDR ||
590 	    func > OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_TARGET) {
591 		pr_warn("%s: Invalid error function %d\n",
592 			__func__, func);
593 		return -ERANGE;
594 	}
595 
596 	/* Firmware supports error injection ? */
597 	if (!opal_check_token(OPAL_PCI_ERR_INJECT)) {
598 		pr_warn("%s: Firmware doesn't support error injection\n",
599 			__func__);
600 		return -ENXIO;
601 	}
602 
603 	/* Do error injection */
604 	rc = opal_pci_err_inject(phb->opal_id, pe->addr,
605 				 type, func, addr, mask);
606 	if (rc != OPAL_SUCCESS) {
607 		pr_warn("%s: Failure %lld injecting error "
608 			"%d-%d to PHB#%x-PE#%x\n",
609 			__func__, rc, type, func,
610 			hose->global_number, pe->addr);
611 		return -EIO;
612 	}
613 
614 	return 0;
615 }
616 
617 static inline bool pnv_eeh_cfg_blocked(struct device_node *dn)
618 {
619 	struct eeh_dev *edev = of_node_to_eeh_dev(dn);
620 
621 	if (!edev || !edev->pe)
622 		return false;
623 
624 	if (edev->pe->state & EEH_PE_CFG_BLOCKED)
625 		return true;
626 
627 	return false;
628 }
629 
630 static int pnv_eeh_read_config(struct device_node *dn,
631 			       int where, int size, u32 *val)
632 {
633 	if (pnv_eeh_cfg_blocked(dn)) {
634 		*val = 0xFFFFFFFF;
635 		return PCIBIOS_SET_FAILED;
636 	}
637 
638 	return pnv_pci_cfg_read(dn, where, size, val);
639 }
640 
641 static int pnv_eeh_write_config(struct device_node *dn,
642 				int where, int size, u32 val)
643 {
644 	if (pnv_eeh_cfg_blocked(dn))
645 		return PCIBIOS_SET_FAILED;
646 
647 	return pnv_pci_cfg_write(dn, where, size, val);
648 }
649 
650 /**
651  * pnv_eeh_next_error - Retrieve next EEH error to handle
652  * @pe: Affected PE
653  *
654  * Using OPAL API, to retrieve next EEH error for EEH core to handle
655  */
656 static int pnv_eeh_next_error(struct eeh_pe **pe)
657 {
658 	struct pci_controller *hose;
659 	struct pnv_phb *phb = NULL;
660 
661 	list_for_each_entry(hose, &hose_list, list_node) {
662 		phb = hose->private_data;
663 		break;
664 	}
665 
666 	if (phb && phb->eeh_ops->next_error)
667 		return phb->eeh_ops->next_error(pe);
668 
669 	return -EEXIST;
670 }
671 
672 static int pnv_eeh_restore_config(struct device_node *dn)
673 {
674 	struct eeh_dev *edev = of_node_to_eeh_dev(dn);
675 	struct pnv_phb *phb;
676 	s64 ret;
677 
678 	if (!edev)
679 		return -EEXIST;
680 
681 	phb = edev->phb->private_data;
682 	ret = opal_pci_reinit(phb->opal_id,
683 			      OPAL_REINIT_PCI_DEV, edev->config_addr);
684 	if (ret) {
685 		pr_warn("%s: Can't reinit PCI dev 0x%x (%lld)\n",
686 			__func__, edev->config_addr, ret);
687 		return -EIO;
688 	}
689 
690 	return 0;
691 }
692 
693 static struct eeh_ops pnv_eeh_ops = {
694 	.name                   = "powernv",
695 	.init                   = pnv_eeh_init,
696 	.post_init              = pnv_eeh_post_init,
697 	.of_probe               = NULL,
698 	.dev_probe              = pnv_eeh_dev_probe,
699 	.set_option             = pnv_eeh_set_option,
700 	.get_pe_addr            = pnv_eeh_get_pe_addr,
701 	.get_state              = pnv_eeh_get_state,
702 	.reset                  = pnv_eeh_reset,
703 	.wait_state             = pnv_eeh_wait_state,
704 	.get_log                = pnv_eeh_get_log,
705 	.configure_bridge       = pnv_eeh_configure_bridge,
706 	.err_inject		= pnv_eeh_err_inject,
707 	.read_config            = pnv_eeh_read_config,
708 	.write_config           = pnv_eeh_write_config,
709 	.next_error		= pnv_eeh_next_error,
710 	.restore_config		= pnv_eeh_restore_config
711 };
712 
713 /**
714  * eeh_powernv_init - Register platform dependent EEH operations
715  *
716  * EEH initialization on powernv platform. This function should be
717  * called before any EEH related functions.
718  */
719 static int __init eeh_powernv_init(void)
720 {
721 	int ret = -EINVAL;
722 
723 	eeh_set_pe_aux_size(PNV_PCI_DIAG_BUF_SIZE);
724 	ret = eeh_ops_register(&pnv_eeh_ops);
725 	if (!ret)
726 		pr_info("EEH: PowerNV platform initialized\n");
727 	else
728 		pr_info("EEH: Failed to initialize PowerNV platform (%d)\n", ret);
729 
730 	return ret;
731 }
732 machine_early_initcall(powernv, eeh_powernv_init);
733