1 /*
2  * The file intends to implement the platform dependent EEH operations on
3  * powernv platform. Actually, the powernv was created in order to fully
4  * hypervisor support.
5  *
6  * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2013.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  */
13 
14 #include <linux/atomic.h>
15 #include <linux/debugfs.h>
16 #include <linux/delay.h>
17 #include <linux/export.h>
18 #include <linux/init.h>
19 #include <linux/list.h>
20 #include <linux/msi.h>
21 #include <linux/of.h>
22 #include <linux/pci.h>
23 #include <linux/proc_fs.h>
24 #include <linux/rbtree.h>
25 #include <linux/sched.h>
26 #include <linux/seq_file.h>
27 #include <linux/spinlock.h>
28 
29 #include <asm/eeh.h>
30 #include <asm/eeh_event.h>
31 #include <asm/firmware.h>
32 #include <asm/io.h>
33 #include <asm/iommu.h>
34 #include <asm/machdep.h>
35 #include <asm/msi_bitmap.h>
36 #include <asm/opal.h>
37 #include <asm/ppc-pci.h>
38 
39 #include "powernv.h"
40 #include "pci.h"
41 
42 static bool pnv_eeh_nb_init = false;
43 
44 /**
45  * pnv_eeh_init - EEH platform dependent initialization
46  *
47  * EEH platform dependent initialization on powernv
48  */
49 static int pnv_eeh_init(void)
50 {
51 	struct pci_controller *hose;
52 	struct pnv_phb *phb;
53 
54 	/* We require OPALv3 */
55 	if (!firmware_has_feature(FW_FEATURE_OPALv3)) {
56 		pr_warn("%s: OPALv3 is required !\n",
57 			__func__);
58 		return -EINVAL;
59 	}
60 
61 	/* Set probe mode */
62 	eeh_add_flag(EEH_PROBE_MODE_DEV);
63 
64 	/*
65 	 * P7IOC blocks PCI config access to frozen PE, but PHB3
66 	 * doesn't do that. So we have to selectively enable I/O
67 	 * prior to collecting error log.
68 	 */
69 	list_for_each_entry(hose, &hose_list, list_node) {
70 		phb = hose->private_data;
71 
72 		if (phb->model == PNV_PHB_MODEL_P7IOC)
73 			eeh_add_flag(EEH_ENABLE_IO_FOR_LOG);
74 
75 		/*
76 		 * PE#0 should be regarded as valid by EEH core
77 		 * if it's not the reserved one. Currently, we
78 		 * have the reserved PE#0 and PE#127 for PHB3
79 		 * and P7IOC separately. So we should regard
80 		 * PE#0 as valid for P7IOC.
81 		 */
82 		if (phb->ioda.reserved_pe != 0)
83 			eeh_add_flag(EEH_VALID_PE_ZERO);
84 
85 		break;
86 	}
87 
88 	return 0;
89 }
90 
91 static int pnv_eeh_event(struct notifier_block *nb,
92 			 unsigned long events, void *change)
93 {
94 	uint64_t changed_evts = (uint64_t)change;
95 
96 	/*
97 	 * We simply send special EEH event if EEH has
98 	 * been enabled, or clear pending events in
99 	 * case that we enable EEH soon
100 	 */
101 	if (!(changed_evts & OPAL_EVENT_PCI_ERROR) ||
102 	    !(events & OPAL_EVENT_PCI_ERROR))
103 		return 0;
104 
105 	if (eeh_enabled())
106 		eeh_send_failure_event(NULL);
107 	else
108 		opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul);
109 
110 	return 0;
111 }
112 
113 static struct notifier_block pnv_eeh_nb = {
114 	.notifier_call	= pnv_eeh_event,
115 	.next		= NULL,
116 	.priority	= 0
117 };
118 
119 #ifdef CONFIG_DEBUG_FS
120 static ssize_t pnv_eeh_ei_write(struct file *filp,
121 				const char __user *user_buf,
122 				size_t count, loff_t *ppos)
123 {
124 	struct pci_controller *hose = filp->private_data;
125 	struct eeh_dev *edev;
126 	struct eeh_pe *pe;
127 	int pe_no, type, func;
128 	unsigned long addr, mask;
129 	char buf[50];
130 	int ret;
131 
132 	if (!eeh_ops || !eeh_ops->err_inject)
133 		return -ENXIO;
134 
135 	/* Copy over argument buffer */
136 	ret = simple_write_to_buffer(buf, sizeof(buf), ppos, user_buf, count);
137 	if (!ret)
138 		return -EFAULT;
139 
140 	/* Retrieve parameters */
141 	ret = sscanf(buf, "%x:%x:%x:%lx:%lx",
142 		     &pe_no, &type, &func, &addr, &mask);
143 	if (ret != 5)
144 		return -EINVAL;
145 
146 	/* Retrieve PE */
147 	edev = kzalloc(sizeof(*edev), GFP_KERNEL);
148 	if (!edev)
149 		return -ENOMEM;
150 	edev->phb = hose;
151 	edev->pe_config_addr = pe_no;
152 	pe = eeh_pe_get(edev);
153 	kfree(edev);
154 	if (!pe)
155 		return -ENODEV;
156 
157 	/* Do error injection */
158 	ret = eeh_ops->err_inject(pe, type, func, addr, mask);
159 	return ret < 0 ? ret : count;
160 }
161 
162 static const struct file_operations pnv_eeh_ei_fops = {
163 	.open	= simple_open,
164 	.llseek	= no_llseek,
165 	.write	= pnv_eeh_ei_write,
166 };
167 
168 static int pnv_eeh_dbgfs_set(void *data, int offset, u64 val)
169 {
170 	struct pci_controller *hose = data;
171 	struct pnv_phb *phb = hose->private_data;
172 
173 	out_be64(phb->regs + offset, val);
174 	return 0;
175 }
176 
177 static int pnv_eeh_dbgfs_get(void *data, int offset, u64 *val)
178 {
179 	struct pci_controller *hose = data;
180 	struct pnv_phb *phb = hose->private_data;
181 
182 	*val = in_be64(phb->regs + offset);
183 	return 0;
184 }
185 
186 static int pnv_eeh_outb_dbgfs_set(void *data, u64 val)
187 {
188 	return pnv_eeh_dbgfs_set(data, 0xD10, val);
189 }
190 
191 static int pnv_eeh_outb_dbgfs_get(void *data, u64 *val)
192 {
193 	return pnv_eeh_dbgfs_get(data, 0xD10, val);
194 }
195 
196 static int pnv_eeh_inbA_dbgfs_set(void *data, u64 val)
197 {
198 	return pnv_eeh_dbgfs_set(data, 0xD90, val);
199 }
200 
201 static int pnv_eeh_inbA_dbgfs_get(void *data, u64 *val)
202 {
203 	return pnv_eeh_dbgfs_get(data, 0xD90, val);
204 }
205 
206 static int pnv_eeh_inbB_dbgfs_set(void *data, u64 val)
207 {
208 	return pnv_eeh_dbgfs_set(data, 0xE10, val);
209 }
210 
211 static int pnv_eeh_inbB_dbgfs_get(void *data, u64 *val)
212 {
213 	return pnv_eeh_dbgfs_get(data, 0xE10, val);
214 }
215 
216 DEFINE_SIMPLE_ATTRIBUTE(pnv_eeh_outb_dbgfs_ops, pnv_eeh_outb_dbgfs_get,
217 			pnv_eeh_outb_dbgfs_set, "0x%llx\n");
218 DEFINE_SIMPLE_ATTRIBUTE(pnv_eeh_inbA_dbgfs_ops, pnv_eeh_inbA_dbgfs_get,
219 			pnv_eeh_inbA_dbgfs_set, "0x%llx\n");
220 DEFINE_SIMPLE_ATTRIBUTE(pnv_eeh_inbB_dbgfs_ops, pnv_eeh_inbB_dbgfs_get,
221 			pnv_eeh_inbB_dbgfs_set, "0x%llx\n");
222 #endif /* CONFIG_DEBUG_FS */
223 
224 /**
225  * pnv_eeh_post_init - EEH platform dependent post initialization
226  *
227  * EEH platform dependent post initialization on powernv. When
228  * the function is called, the EEH PEs and devices should have
229  * been built. If the I/O cache staff has been built, EEH is
230  * ready to supply service.
231  */
232 static int pnv_eeh_post_init(void)
233 {
234 	struct pci_controller *hose;
235 	struct pnv_phb *phb;
236 	int ret = 0;
237 
238 	/* Register OPAL event notifier */
239 	if (!pnv_eeh_nb_init) {
240 		ret = opal_notifier_register(&pnv_eeh_nb);
241 		if (ret) {
242 			pr_warn("%s: Can't register OPAL event notifier (%d)\n",
243 				__func__, ret);
244 			return ret;
245 		}
246 
247 		pnv_eeh_nb_init = true;
248 	}
249 
250 	list_for_each_entry(hose, &hose_list, list_node) {
251 		phb = hose->private_data;
252 
253 		/*
254 		 * If EEH is enabled, we're going to rely on that.
255 		 * Otherwise, we restore to conventional mechanism
256 		 * to clear frozen PE during PCI config access.
257 		 */
258 		if (eeh_enabled())
259 			phb->flags |= PNV_PHB_FLAG_EEH;
260 		else
261 			phb->flags &= ~PNV_PHB_FLAG_EEH;
262 
263 		/* Create debugfs entries */
264 #ifdef CONFIG_DEBUG_FS
265 		if (phb->has_dbgfs || !phb->dbgfs)
266 			continue;
267 
268 		phb->has_dbgfs = 1;
269 		debugfs_create_file("err_injct", 0200,
270 				    phb->dbgfs, hose,
271 				    &pnv_eeh_ei_fops);
272 
273 		debugfs_create_file("err_injct_outbound", 0600,
274 				    phb->dbgfs, hose,
275 				    &pnv_eeh_outb_dbgfs_ops);
276 		debugfs_create_file("err_injct_inboundA", 0600,
277 				    phb->dbgfs, hose,
278 				    &pnv_eeh_inbA_dbgfs_ops);
279 		debugfs_create_file("err_injct_inboundB", 0600,
280 				    phb->dbgfs, hose,
281 				    &pnv_eeh_inbB_dbgfs_ops);
282 #endif /* CONFIG_DEBUG_FS */
283 	}
284 
285 
286 	return ret;
287 }
288 
289 /**
290  * pnv_eeh_dev_probe - Do probe on PCI device
291  * @dev: PCI device
292  * @flag: unused
293  *
294  * When EEH module is installed during system boot, all PCI devices
295  * are checked one by one to see if it supports EEH. The function
296  * is introduced for the purpose. By default, EEH has been enabled
297  * on all PCI devices. That's to say, we only need do necessary
298  * initialization on the corresponding eeh device and create PE
299  * accordingly.
300  *
301  * It's notable that's unsafe to retrieve the EEH device through
302  * the corresponding PCI device. During the PCI device hotplug, which
303  * was possiblly triggered by EEH core, the binding between EEH device
304  * and the PCI device isn't built yet.
305  */
306 static int pnv_eeh_dev_probe(struct pci_dev *dev, void *flag)
307 {
308 	struct pci_controller *hose = pci_bus_to_host(dev->bus);
309 	struct pnv_phb *phb = hose->private_data;
310 	struct device_node *dn = pci_device_to_OF_node(dev);
311 	struct eeh_dev *edev = of_node_to_eeh_dev(dn);
312 	int ret;
313 
314 	/*
315 	 * When probing the root bridge, which doesn't have any
316 	 * subordinate PCI devices. We don't have OF node for
317 	 * the root bridge. So it's not reasonable to continue
318 	 * the probing.
319 	 */
320 	if (!dn || !edev || edev->pe)
321 		return 0;
322 
323 	/* Skip for PCI-ISA bridge */
324 	if ((dev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
325 		return 0;
326 
327 	/* Initialize eeh device */
328 	edev->class_code = dev->class;
329 	edev->mode	&= 0xFFFFFF00;
330 	if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
331 		edev->mode |= EEH_DEV_BRIDGE;
332 	edev->pcix_cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
333 	if (pci_is_pcie(dev)) {
334 		edev->pcie_cap = pci_pcie_cap(dev);
335 
336 		if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
337 			edev->mode |= EEH_DEV_ROOT_PORT;
338 		else if (pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM)
339 			edev->mode |= EEH_DEV_DS_PORT;
340 
341 		edev->aer_cap = pci_find_ext_capability(dev,
342 							PCI_EXT_CAP_ID_ERR);
343 	}
344 
345 	edev->config_addr	= ((dev->bus->number << 8) | dev->devfn);
346 	edev->pe_config_addr	= phb->bdfn_to_pe(phb, dev->bus, dev->devfn & 0xff);
347 
348 	/* Create PE */
349 	ret = eeh_add_to_parent_pe(edev);
350 	if (ret) {
351 		pr_warn("%s: Can't add PCI dev %s to parent PE (%d)\n",
352 			__func__, pci_name(dev), ret);
353 		return ret;
354 	}
355 
356 	/*
357 	 * If the PE contains any one of following adapters, the
358 	 * PCI config space can't be accessed when dumping EEH log.
359 	 * Otherwise, we will run into fenced PHB caused by shortage
360 	 * of outbound credits in the adapter. The PCI config access
361 	 * should be blocked until PE reset. MMIO access is dropped
362 	 * by hardware certainly. In order to drop PCI config requests,
363 	 * one more flag (EEH_PE_CFG_RESTRICTED) is introduced, which
364 	 * will be checked in the backend for PE state retrival. If
365 	 * the PE becomes frozen for the first time and the flag has
366 	 * been set for the PE, we will set EEH_PE_CFG_BLOCKED for
367 	 * that PE to block its config space.
368 	 *
369 	 * Broadcom Austin 4-ports NICs (14e4:1657)
370 	 * Broadcom Shiner 2-ports 10G NICs (14e4:168e)
371 	 */
372 	if ((dev->vendor == PCI_VENDOR_ID_BROADCOM && dev->device == 0x1657) ||
373 	    (dev->vendor == PCI_VENDOR_ID_BROADCOM && dev->device == 0x168e))
374 		edev->pe->state |= EEH_PE_CFG_RESTRICTED;
375 
376 	/*
377 	 * Cache the PE primary bus, which can't be fetched when
378 	 * full hotplug is in progress. In that case, all child
379 	 * PCI devices of the PE are expected to be removed prior
380 	 * to PE reset.
381 	 */
382 	if (!edev->pe->bus)
383 		edev->pe->bus = dev->bus;
384 
385 	/*
386 	 * Enable EEH explicitly so that we will do EEH check
387 	 * while accessing I/O stuff
388 	 */
389 	eeh_add_flag(EEH_ENABLED);
390 
391 	/* Save memory bars */
392 	eeh_save_bars(edev);
393 
394 	return 0;
395 }
396 
397 /**
398  * pnv_eeh_set_option - Initialize EEH or MMIO/DMA reenable
399  * @pe: EEH PE
400  * @option: operation to be issued
401  *
402  * The function is used to control the EEH functionality globally.
403  * Currently, following options are support according to PAPR:
404  * Enable EEH, Disable EEH, Enable MMIO and Enable DMA
405  */
406 static int pnv_eeh_set_option(struct eeh_pe *pe, int option)
407 {
408 	struct pci_controller *hose = pe->phb;
409 	struct pnv_phb *phb = hose->private_data;
410 	bool freeze_pe = false;
411 	int opt, ret = 0;
412 	s64 rc;
413 
414 	/* Sanity check on option */
415 	switch (option) {
416 	case EEH_OPT_DISABLE:
417 		return -EPERM;
418 	case EEH_OPT_ENABLE:
419 		return 0;
420 	case EEH_OPT_THAW_MMIO:
421 		opt = OPAL_EEH_ACTION_CLEAR_FREEZE_MMIO;
422 		break;
423 	case EEH_OPT_THAW_DMA:
424 		opt = OPAL_EEH_ACTION_CLEAR_FREEZE_DMA;
425 		break;
426 	case EEH_OPT_FREEZE_PE:
427 		freeze_pe = true;
428 		opt = OPAL_EEH_ACTION_SET_FREEZE_ALL;
429 		break;
430 	default:
431 		pr_warn("%s: Invalid option %d\n", __func__, option);
432 		return -EINVAL;
433 	}
434 
435 	/* If PHB supports compound PE, to handle it */
436 	if (freeze_pe) {
437 		if (phb->freeze_pe) {
438 			phb->freeze_pe(phb, pe->addr);
439 		} else {
440 			rc = opal_pci_eeh_freeze_set(phb->opal_id,
441 						     pe->addr, opt);
442 			if (rc != OPAL_SUCCESS) {
443 				pr_warn("%s: Failure %lld freezing "
444 					"PHB#%x-PE#%x\n",
445 					__func__, rc,
446 					phb->hose->global_number, pe->addr);
447 				ret = -EIO;
448 			}
449 		}
450 	} else {
451 		if (phb->unfreeze_pe) {
452 			ret = phb->unfreeze_pe(phb, pe->addr, opt);
453 		} else {
454 			rc = opal_pci_eeh_freeze_clear(phb->opal_id,
455 						       pe->addr, opt);
456 			if (rc != OPAL_SUCCESS) {
457 				pr_warn("%s: Failure %lld enable %d "
458 					"for PHB#%x-PE#%x\n",
459 					__func__, rc, option,
460 					phb->hose->global_number, pe->addr);
461 				ret = -EIO;
462 			}
463 		}
464 	}
465 
466 	return ret;
467 }
468 
469 /**
470  * pnv_eeh_get_pe_addr - Retrieve PE address
471  * @pe: EEH PE
472  *
473  * Retrieve the PE address according to the given tranditional
474  * PCI BDF (Bus/Device/Function) address.
475  */
476 static int pnv_eeh_get_pe_addr(struct eeh_pe *pe)
477 {
478 	return pe->addr;
479 }
480 
481 /**
482  * pnv_eeh_get_state - Retrieve PE state
483  * @pe: EEH PE
484  * @delay: delay while PE state is temporarily unavailable
485  *
486  * Retrieve the state of the specified PE. For IODA-compitable
487  * platform, it should be retrieved from IODA table. Therefore,
488  * we prefer passing down to hardware implementation to handle
489  * it.
490  */
491 static int pnv_eeh_get_state(struct eeh_pe *pe, int *delay)
492 {
493 	struct pci_controller *hose = pe->phb;
494 	struct pnv_phb *phb = hose->private_data;
495 	int ret = EEH_STATE_NOT_SUPPORT;
496 
497 	if (phb->eeh_ops && phb->eeh_ops->get_state) {
498 		ret = phb->eeh_ops->get_state(pe);
499 
500 		/*
501 		 * If the PE state is temporarily unavailable,
502 		 * to inform the EEH core delay for default
503 		 * period (1 second)
504 		 */
505 		if (delay) {
506 			*delay = 0;
507 			if (ret & EEH_STATE_UNAVAILABLE)
508 				*delay = 1000;
509 		}
510 	}
511 
512 	return ret;
513 }
514 
515 /**
516  * pnv_eeh_reset - Reset the specified PE
517  * @pe: EEH PE
518  * @option: reset option
519  *
520  * Reset the specified PE
521  */
522 static int pnv_eeh_reset(struct eeh_pe *pe, int option)
523 {
524 	struct pci_controller *hose = pe->phb;
525 	struct pnv_phb *phb = hose->private_data;
526 	int ret = -EEXIST;
527 
528 	if (phb->eeh_ops && phb->eeh_ops->reset)
529 		ret = phb->eeh_ops->reset(pe, option);
530 
531 	return ret;
532 }
533 
534 /**
535  * pnv_eeh_wait_state - Wait for PE state
536  * @pe: EEH PE
537  * @max_wait: maximal period in microsecond
538  *
539  * Wait for the state of associated PE. It might take some time
540  * to retrieve the PE's state.
541  */
542 static int pnv_eeh_wait_state(struct eeh_pe *pe, int max_wait)
543 {
544 	int ret;
545 	int mwait;
546 
547 	while (1) {
548 		ret = pnv_eeh_get_state(pe, &mwait);
549 
550 		/*
551 		 * If the PE's state is temporarily unavailable,
552 		 * we have to wait for the specified time. Otherwise,
553 		 * the PE's state will be returned immediately.
554 		 */
555 		if (ret != EEH_STATE_UNAVAILABLE)
556 			return ret;
557 
558 		max_wait -= mwait;
559 		if (max_wait <= 0) {
560 			pr_warn("%s: Timeout getting PE#%x's state (%d)\n",
561 				__func__, pe->addr, max_wait);
562 			return EEH_STATE_NOT_SUPPORT;
563 		}
564 
565 		msleep(mwait);
566 	}
567 
568 	return EEH_STATE_NOT_SUPPORT;
569 }
570 
571 /**
572  * pnv_eeh_get_log - Retrieve error log
573  * @pe: EEH PE
574  * @severity: temporary or permanent error log
575  * @drv_log: driver log to be combined with retrieved error log
576  * @len: length of driver log
577  *
578  * Retrieve the temporary or permanent error from the PE.
579  */
580 static int pnv_eeh_get_log(struct eeh_pe *pe, int severity,
581 			   char *drv_log, unsigned long len)
582 {
583 	if (!eeh_has_flag(EEH_EARLY_DUMP_LOG))
584 		pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
585 
586 	return 0;
587 }
588 
589 /**
590  * pnv_eeh_configure_bridge - Configure PCI bridges in the indicated PE
591  * @pe: EEH PE
592  *
593  * The function will be called to reconfigure the bridges included
594  * in the specified PE so that the mulfunctional PE would be recovered
595  * again.
596  */
597 static int pnv_eeh_configure_bridge(struct eeh_pe *pe)
598 {
599 	return 0;
600 }
601 
602 /**
603  * pnv_pe_err_inject - Inject specified error to the indicated PE
604  * @pe: the indicated PE
605  * @type: error type
606  * @func: specific error type
607  * @addr: address
608  * @mask: address mask
609  *
610  * The routine is called to inject specified error, which is
611  * determined by @type and @func, to the indicated PE for
612  * testing purpose.
613  */
614 static int pnv_eeh_err_inject(struct eeh_pe *pe, int type, int func,
615 			      unsigned long addr, unsigned long mask)
616 {
617 	struct pci_controller *hose = pe->phb;
618 	struct pnv_phb *phb = hose->private_data;
619 	s64 rc;
620 
621 	/* Sanity check on error type */
622 	if (type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR &&
623 	    type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR64) {
624 		pr_warn("%s: Invalid error type %d\n",
625 			__func__, type);
626 		return -ERANGE;
627 	}
628 
629 	if (func < OPAL_ERR_INJECT_FUNC_IOA_LD_MEM_ADDR ||
630 	    func > OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_TARGET) {
631 		pr_warn("%s: Invalid error function %d\n",
632 			__func__, func);
633 		return -ERANGE;
634 	}
635 
636 	/* Firmware supports error injection ? */
637 	if (!opal_check_token(OPAL_PCI_ERR_INJECT)) {
638 		pr_warn("%s: Firmware doesn't support error injection\n",
639 			__func__);
640 		return -ENXIO;
641 	}
642 
643 	/* Do error injection */
644 	rc = opal_pci_err_inject(phb->opal_id, pe->addr,
645 				 type, func, addr, mask);
646 	if (rc != OPAL_SUCCESS) {
647 		pr_warn("%s: Failure %lld injecting error "
648 			"%d-%d to PHB#%x-PE#%x\n",
649 			__func__, rc, type, func,
650 			hose->global_number, pe->addr);
651 		return -EIO;
652 	}
653 
654 	return 0;
655 }
656 
657 static inline bool pnv_eeh_cfg_blocked(struct device_node *dn)
658 {
659 	struct eeh_dev *edev = of_node_to_eeh_dev(dn);
660 
661 	if (!edev || !edev->pe)
662 		return false;
663 
664 	if (edev->pe->state & EEH_PE_CFG_BLOCKED)
665 		return true;
666 
667 	return false;
668 }
669 
670 static int pnv_eeh_read_config(struct device_node *dn,
671 			       int where, int size, u32 *val)
672 {
673 	if (pnv_eeh_cfg_blocked(dn)) {
674 		*val = 0xFFFFFFFF;
675 		return PCIBIOS_SET_FAILED;
676 	}
677 
678 	return pnv_pci_cfg_read(dn, where, size, val);
679 }
680 
681 static int pnv_eeh_write_config(struct device_node *dn,
682 				int where, int size, u32 val)
683 {
684 	if (pnv_eeh_cfg_blocked(dn))
685 		return PCIBIOS_SET_FAILED;
686 
687 	return pnv_pci_cfg_write(dn, where, size, val);
688 }
689 
690 /**
691  * pnv_eeh_next_error - Retrieve next EEH error to handle
692  * @pe: Affected PE
693  *
694  * Using OPAL API, to retrieve next EEH error for EEH core to handle
695  */
696 static int pnv_eeh_next_error(struct eeh_pe **pe)
697 {
698 	struct pci_controller *hose;
699 	struct pnv_phb *phb = NULL;
700 
701 	list_for_each_entry(hose, &hose_list, list_node) {
702 		phb = hose->private_data;
703 		break;
704 	}
705 
706 	if (phb && phb->eeh_ops->next_error)
707 		return phb->eeh_ops->next_error(pe);
708 
709 	return -EEXIST;
710 }
711 
712 static int pnv_eeh_restore_config(struct device_node *dn)
713 {
714 	struct eeh_dev *edev = of_node_to_eeh_dev(dn);
715 	struct pnv_phb *phb;
716 	s64 ret;
717 
718 	if (!edev)
719 		return -EEXIST;
720 
721 	phb = edev->phb->private_data;
722 	ret = opal_pci_reinit(phb->opal_id,
723 			      OPAL_REINIT_PCI_DEV, edev->config_addr);
724 	if (ret) {
725 		pr_warn("%s: Can't reinit PCI dev 0x%x (%lld)\n",
726 			__func__, edev->config_addr, ret);
727 		return -EIO;
728 	}
729 
730 	return 0;
731 }
732 
733 static struct eeh_ops pnv_eeh_ops = {
734 	.name                   = "powernv",
735 	.init                   = pnv_eeh_init,
736 	.post_init              = pnv_eeh_post_init,
737 	.of_probe               = NULL,
738 	.dev_probe              = pnv_eeh_dev_probe,
739 	.set_option             = pnv_eeh_set_option,
740 	.get_pe_addr            = pnv_eeh_get_pe_addr,
741 	.get_state              = pnv_eeh_get_state,
742 	.reset                  = pnv_eeh_reset,
743 	.wait_state             = pnv_eeh_wait_state,
744 	.get_log                = pnv_eeh_get_log,
745 	.configure_bridge       = pnv_eeh_configure_bridge,
746 	.err_inject		= pnv_eeh_err_inject,
747 	.read_config            = pnv_eeh_read_config,
748 	.write_config           = pnv_eeh_write_config,
749 	.next_error		= pnv_eeh_next_error,
750 	.restore_config		= pnv_eeh_restore_config
751 };
752 
753 /**
754  * eeh_powernv_init - Register platform dependent EEH operations
755  *
756  * EEH initialization on powernv platform. This function should be
757  * called before any EEH related functions.
758  */
759 static int __init eeh_powernv_init(void)
760 {
761 	int ret = -EINVAL;
762 
763 	eeh_set_pe_aux_size(PNV_PCI_DIAG_BUF_SIZE);
764 	ret = eeh_ops_register(&pnv_eeh_ops);
765 	if (!ret)
766 		pr_info("EEH: PowerNV platform initialized\n");
767 	else
768 		pr_info("EEH: Failed to initialize PowerNV platform (%d)\n", ret);
769 
770 	return ret;
771 }
772 machine_early_initcall(powernv, eeh_powernv_init);
773