1 /*
2  * The file intends to implement the platform dependent EEH operations on
3  * powernv platform. Actually, the powernv was created in order to fully
4  * hypervisor support.
5  *
6  * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2013.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  */
13 
14 #include <linux/atomic.h>
15 #include <linux/delay.h>
16 #include <linux/export.h>
17 #include <linux/init.h>
18 #include <linux/list.h>
19 #include <linux/msi.h>
20 #include <linux/of.h>
21 #include <linux/pci.h>
22 #include <linux/proc_fs.h>
23 #include <linux/rbtree.h>
24 #include <linux/sched.h>
25 #include <linux/seq_file.h>
26 #include <linux/spinlock.h>
27 
28 #include <asm/eeh.h>
29 #include <asm/eeh_event.h>
30 #include <asm/firmware.h>
31 #include <asm/io.h>
32 #include <asm/iommu.h>
33 #include <asm/machdep.h>
34 #include <asm/msi_bitmap.h>
35 #include <asm/opal.h>
36 #include <asm/ppc-pci.h>
37 
38 #include "powernv.h"
39 #include "pci.h"
40 
41 /**
42  * powernv_eeh_init - EEH platform dependent initialization
43  *
44  * EEH platform dependent initialization on powernv
45  */
46 static int powernv_eeh_init(void)
47 {
48 	struct pci_controller *hose;
49 	struct pnv_phb *phb;
50 
51 	/* We require OPALv3 */
52 	if (!firmware_has_feature(FW_FEATURE_OPALv3)) {
53 		pr_warn("%s: OPALv3 is required !\n",
54 			__func__);
55 		return -EINVAL;
56 	}
57 
58 	/* Set probe mode */
59 	eeh_add_flag(EEH_PROBE_MODE_DEV);
60 
61 	/*
62 	 * P7IOC blocks PCI config access to frozen PE, but PHB3
63 	 * doesn't do that. So we have to selectively enable I/O
64 	 * prior to collecting error log.
65 	 */
66 	list_for_each_entry(hose, &hose_list, list_node) {
67 		phb = hose->private_data;
68 
69 		if (phb->model == PNV_PHB_MODEL_P7IOC)
70 			eeh_add_flag(EEH_ENABLE_IO_FOR_LOG);
71 
72 		/*
73 		 * PE#0 should be regarded as valid by EEH core
74 		 * if it's not the reserved one. Currently, we
75 		 * have the reserved PE#0 and PE#127 for PHB3
76 		 * and P7IOC separately. So we should regard
77 		 * PE#0 as valid for P7IOC.
78 		 */
79 		if (phb->ioda.reserved_pe != 0)
80 			eeh_add_flag(EEH_VALID_PE_ZERO);
81 
82 		break;
83 	}
84 
85 	return 0;
86 }
87 
88 /**
89  * powernv_eeh_post_init - EEH platform dependent post initialization
90  *
91  * EEH platform dependent post initialization on powernv. When
92  * the function is called, the EEH PEs and devices should have
93  * been built. If the I/O cache staff has been built, EEH is
94  * ready to supply service.
95  */
96 static int powernv_eeh_post_init(void)
97 {
98 	struct pci_controller *hose;
99 	struct pnv_phb *phb;
100 	int ret = 0;
101 
102 	list_for_each_entry(hose, &hose_list, list_node) {
103 		phb = hose->private_data;
104 
105 		if (phb->eeh_ops && phb->eeh_ops->post_init) {
106 			ret = phb->eeh_ops->post_init(hose);
107 			if (ret)
108 				break;
109 		}
110 	}
111 
112 	return ret;
113 }
114 
115 /**
116  * powernv_eeh_dev_probe - Do probe on PCI device
117  * @dev: PCI device
118  * @flag: unused
119  *
120  * When EEH module is installed during system boot, all PCI devices
121  * are checked one by one to see if it supports EEH. The function
122  * is introduced for the purpose. By default, EEH has been enabled
123  * on all PCI devices. That's to say, we only need do necessary
124  * initialization on the corresponding eeh device and create PE
125  * accordingly.
126  *
127  * It's notable that's unsafe to retrieve the EEH device through
128  * the corresponding PCI device. During the PCI device hotplug, which
129  * was possiblly triggered by EEH core, the binding between EEH device
130  * and the PCI device isn't built yet.
131  */
132 static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag)
133 {
134 	struct pci_controller *hose = pci_bus_to_host(dev->bus);
135 	struct pnv_phb *phb = hose->private_data;
136 	struct device_node *dn = pci_device_to_OF_node(dev);
137 	struct eeh_dev *edev = of_node_to_eeh_dev(dn);
138 	int ret;
139 
140 	/*
141 	 * When probing the root bridge, which doesn't have any
142 	 * subordinate PCI devices. We don't have OF node for
143 	 * the root bridge. So it's not reasonable to continue
144 	 * the probing.
145 	 */
146 	if (!dn || !edev || edev->pe)
147 		return 0;
148 
149 	/* Skip for PCI-ISA bridge */
150 	if ((dev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
151 		return 0;
152 
153 	/* Initialize eeh device */
154 	edev->class_code = dev->class;
155 	edev->mode	&= 0xFFFFFF00;
156 	if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
157 		edev->mode |= EEH_DEV_BRIDGE;
158 	edev->pcix_cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
159 	if (pci_is_pcie(dev)) {
160 		edev->pcie_cap = pci_pcie_cap(dev);
161 
162 		if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
163 			edev->mode |= EEH_DEV_ROOT_PORT;
164 		else if (pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM)
165 			edev->mode |= EEH_DEV_DS_PORT;
166 
167 		edev->aer_cap = pci_find_ext_capability(dev,
168 							PCI_EXT_CAP_ID_ERR);
169 	}
170 
171 	edev->config_addr	= ((dev->bus->number << 8) | dev->devfn);
172 	edev->pe_config_addr	= phb->bdfn_to_pe(phb, dev->bus, dev->devfn & 0xff);
173 
174 	/* Create PE */
175 	ret = eeh_add_to_parent_pe(edev);
176 	if (ret) {
177 		pr_warn("%s: Can't add PCI dev %s to parent PE (%d)\n",
178 			__func__, pci_name(dev), ret);
179 		return ret;
180 	}
181 
182 	/*
183 	 * If the PE contains any one of following adapters, the
184 	 * PCI config space can't be accessed when dumping EEH log.
185 	 * Otherwise, we will run into fenced PHB caused by shortage
186 	 * of outbound credits in the adapter. The PCI config access
187 	 * should be blocked until PE reset. MMIO access is dropped
188 	 * by hardware certainly. In order to drop PCI config requests,
189 	 * one more flag (EEH_PE_CFG_RESTRICTED) is introduced, which
190 	 * will be checked in the backend for PE state retrival. If
191 	 * the PE becomes frozen for the first time and the flag has
192 	 * been set for the PE, we will set EEH_PE_CFG_BLOCKED for
193 	 * that PE to block its config space.
194 	 *
195 	 * Broadcom Austin 4-ports NICs (14e4:1657)
196 	 * Broadcom Shiner 2-ports 10G NICs (14e4:168e)
197 	 */
198 	if ((dev->vendor == PCI_VENDOR_ID_BROADCOM && dev->device == 0x1657) ||
199 	    (dev->vendor == PCI_VENDOR_ID_BROADCOM && dev->device == 0x168e))
200 		edev->pe->state |= EEH_PE_CFG_RESTRICTED;
201 
202 	/*
203 	 * Cache the PE primary bus, which can't be fetched when
204 	 * full hotplug is in progress. In that case, all child
205 	 * PCI devices of the PE are expected to be removed prior
206 	 * to PE reset.
207 	 */
208 	if (!edev->pe->bus)
209 		edev->pe->bus = dev->bus;
210 
211 	/*
212 	 * Enable EEH explicitly so that we will do EEH check
213 	 * while accessing I/O stuff
214 	 */
215 	eeh_add_flag(EEH_ENABLED);
216 
217 	/* Save memory bars */
218 	eeh_save_bars(edev);
219 
220 	return 0;
221 }
222 
223 /**
224  * powernv_eeh_set_option - Initialize EEH or MMIO/DMA reenable
225  * @pe: EEH PE
226  * @option: operation to be issued
227  *
228  * The function is used to control the EEH functionality globally.
229  * Currently, following options are support according to PAPR:
230  * Enable EEH, Disable EEH, Enable MMIO and Enable DMA
231  */
232 static int powernv_eeh_set_option(struct eeh_pe *pe, int option)
233 {
234 	struct pci_controller *hose = pe->phb;
235 	struct pnv_phb *phb = hose->private_data;
236 	int ret = -EEXIST;
237 
238 	/*
239 	 * What we need do is pass it down for hardware
240 	 * implementation to handle it.
241 	 */
242 	if (phb->eeh_ops && phb->eeh_ops->set_option)
243 		ret = phb->eeh_ops->set_option(pe, option);
244 
245 	return ret;
246 }
247 
248 /**
249  * powernv_eeh_get_pe_addr - Retrieve PE address
250  * @pe: EEH PE
251  *
252  * Retrieve the PE address according to the given tranditional
253  * PCI BDF (Bus/Device/Function) address.
254  */
255 static int powernv_eeh_get_pe_addr(struct eeh_pe *pe)
256 {
257 	return pe->addr;
258 }
259 
260 /**
261  * powernv_eeh_get_state - Retrieve PE state
262  * @pe: EEH PE
263  * @delay: delay while PE state is temporarily unavailable
264  *
265  * Retrieve the state of the specified PE. For IODA-compitable
266  * platform, it should be retrieved from IODA table. Therefore,
267  * we prefer passing down to hardware implementation to handle
268  * it.
269  */
270 static int powernv_eeh_get_state(struct eeh_pe *pe, int *delay)
271 {
272 	struct pci_controller *hose = pe->phb;
273 	struct pnv_phb *phb = hose->private_data;
274 	int ret = EEH_STATE_NOT_SUPPORT;
275 
276 	if (phb->eeh_ops && phb->eeh_ops->get_state) {
277 		ret = phb->eeh_ops->get_state(pe);
278 
279 		/*
280 		 * If the PE state is temporarily unavailable,
281 		 * to inform the EEH core delay for default
282 		 * period (1 second)
283 		 */
284 		if (delay) {
285 			*delay = 0;
286 			if (ret & EEH_STATE_UNAVAILABLE)
287 				*delay = 1000;
288 		}
289 	}
290 
291 	return ret;
292 }
293 
294 /**
295  * powernv_eeh_reset - Reset the specified PE
296  * @pe: EEH PE
297  * @option: reset option
298  *
299  * Reset the specified PE
300  */
301 static int powernv_eeh_reset(struct eeh_pe *pe, int option)
302 {
303 	struct pci_controller *hose = pe->phb;
304 	struct pnv_phb *phb = hose->private_data;
305 	int ret = -EEXIST;
306 
307 	if (phb->eeh_ops && phb->eeh_ops->reset)
308 		ret = phb->eeh_ops->reset(pe, option);
309 
310 	return ret;
311 }
312 
313 /**
314  * powernv_eeh_wait_state - Wait for PE state
315  * @pe: EEH PE
316  * @max_wait: maximal period in microsecond
317  *
318  * Wait for the state of associated PE. It might take some time
319  * to retrieve the PE's state.
320  */
321 static int powernv_eeh_wait_state(struct eeh_pe *pe, int max_wait)
322 {
323 	int ret;
324 	int mwait;
325 
326 	while (1) {
327 		ret = powernv_eeh_get_state(pe, &mwait);
328 
329 		/*
330 		 * If the PE's state is temporarily unavailable,
331 		 * we have to wait for the specified time. Otherwise,
332 		 * the PE's state will be returned immediately.
333 		 */
334 		if (ret != EEH_STATE_UNAVAILABLE)
335 			return ret;
336 
337 		max_wait -= mwait;
338 		if (max_wait <= 0) {
339 			pr_warn("%s: Timeout getting PE#%x's state (%d)\n",
340 				__func__, pe->addr, max_wait);
341 			return EEH_STATE_NOT_SUPPORT;
342 		}
343 
344 		msleep(mwait);
345 	}
346 
347 	return EEH_STATE_NOT_SUPPORT;
348 }
349 
350 /**
351  * powernv_eeh_get_log - Retrieve error log
352  * @pe: EEH PE
353  * @severity: temporary or permanent error log
354  * @drv_log: driver log to be combined with retrieved error log
355  * @len: length of driver log
356  *
357  * Retrieve the temporary or permanent error from the PE.
358  */
359 static int powernv_eeh_get_log(struct eeh_pe *pe, int severity,
360 			       char *drv_log, unsigned long len)
361 {
362 	struct pci_controller *hose = pe->phb;
363 	struct pnv_phb *phb = hose->private_data;
364 	int ret = -EEXIST;
365 
366 	if (phb->eeh_ops && phb->eeh_ops->get_log)
367 		ret = phb->eeh_ops->get_log(pe, severity, drv_log, len);
368 
369 	return ret;
370 }
371 
372 /**
373  * powernv_eeh_configure_bridge - Configure PCI bridges in the indicated PE
374  * @pe: EEH PE
375  *
376  * The function will be called to reconfigure the bridges included
377  * in the specified PE so that the mulfunctional PE would be recovered
378  * again.
379  */
380 static int powernv_eeh_configure_bridge(struct eeh_pe *pe)
381 {
382 	struct pci_controller *hose = pe->phb;
383 	struct pnv_phb *phb = hose->private_data;
384 	int ret = 0;
385 
386 	if (phb->eeh_ops && phb->eeh_ops->configure_bridge)
387 		ret = phb->eeh_ops->configure_bridge(pe);
388 
389 	return ret;
390 }
391 
392 /**
393  * powernv_pe_err_inject - Inject specified error to the indicated PE
394  * @pe: the indicated PE
395  * @type: error type
396  * @func: specific error type
397  * @addr: address
398  * @mask: address mask
399  *
400  * The routine is called to inject specified error, which is
401  * determined by @type and @func, to the indicated PE for
402  * testing purpose.
403  */
404 static int powernv_eeh_err_inject(struct eeh_pe *pe, int type, int func,
405 				  unsigned long addr, unsigned long mask)
406 {
407 	struct pci_controller *hose = pe->phb;
408 	struct pnv_phb *phb = hose->private_data;
409 	int ret = -EEXIST;
410 
411 	if (phb->eeh_ops && phb->eeh_ops->err_inject)
412 		ret = phb->eeh_ops->err_inject(pe, type, func, addr, mask);
413 
414 	return ret;
415 }
416 
417 static inline bool powernv_eeh_cfg_blocked(struct device_node *dn)
418 {
419 	struct eeh_dev *edev = of_node_to_eeh_dev(dn);
420 
421 	if (!edev || !edev->pe)
422 		return false;
423 
424 	if (edev->pe->state & EEH_PE_CFG_BLOCKED)
425 		return true;
426 
427 	return false;
428 }
429 
430 static int powernv_eeh_read_config(struct device_node *dn,
431 				   int where, int size, u32 *val)
432 {
433 	if (powernv_eeh_cfg_blocked(dn)) {
434 		*val = 0xFFFFFFFF;
435 		return PCIBIOS_SET_FAILED;
436 	}
437 
438 	return pnv_pci_cfg_read(dn, where, size, val);
439 }
440 
441 static int powernv_eeh_write_config(struct device_node *dn,
442 				    int where, int size, u32 val)
443 {
444 	if (powernv_eeh_cfg_blocked(dn))
445 		return PCIBIOS_SET_FAILED;
446 
447 	return pnv_pci_cfg_write(dn, where, size, val);
448 }
449 
450 /**
451  * powernv_eeh_next_error - Retrieve next EEH error to handle
452  * @pe: Affected PE
453  *
454  * Using OPAL API, to retrieve next EEH error for EEH core to handle
455  */
456 static int powernv_eeh_next_error(struct eeh_pe **pe)
457 {
458 	struct pci_controller *hose;
459 	struct pnv_phb *phb = NULL;
460 
461 	list_for_each_entry(hose, &hose_list, list_node) {
462 		phb = hose->private_data;
463 		break;
464 	}
465 
466 	if (phb && phb->eeh_ops->next_error)
467 		return phb->eeh_ops->next_error(pe);
468 
469 	return -EEXIST;
470 }
471 
472 static int powernv_eeh_restore_config(struct device_node *dn)
473 {
474 	struct eeh_dev *edev = of_node_to_eeh_dev(dn);
475 	struct pnv_phb *phb;
476 	s64 ret;
477 
478 	if (!edev)
479 		return -EEXIST;
480 
481 	phb = edev->phb->private_data;
482 	ret = opal_pci_reinit(phb->opal_id,
483 			      OPAL_REINIT_PCI_DEV, edev->config_addr);
484 	if (ret) {
485 		pr_warn("%s: Can't reinit PCI dev 0x%x (%lld)\n",
486 			__func__, edev->config_addr, ret);
487 		return -EIO;
488 	}
489 
490 	return 0;
491 }
492 
493 static struct eeh_ops powernv_eeh_ops = {
494 	.name                   = "powernv",
495 	.init                   = powernv_eeh_init,
496 	.post_init              = powernv_eeh_post_init,
497 	.of_probe               = NULL,
498 	.dev_probe              = powernv_eeh_dev_probe,
499 	.set_option             = powernv_eeh_set_option,
500 	.get_pe_addr            = powernv_eeh_get_pe_addr,
501 	.get_state              = powernv_eeh_get_state,
502 	.reset                  = powernv_eeh_reset,
503 	.wait_state             = powernv_eeh_wait_state,
504 	.get_log                = powernv_eeh_get_log,
505 	.configure_bridge       = powernv_eeh_configure_bridge,
506 	.err_inject		= powernv_eeh_err_inject,
507 	.read_config            = powernv_eeh_read_config,
508 	.write_config           = powernv_eeh_write_config,
509 	.next_error		= powernv_eeh_next_error,
510 	.restore_config		= powernv_eeh_restore_config
511 };
512 
513 /**
514  * eeh_powernv_init - Register platform dependent EEH operations
515  *
516  * EEH initialization on powernv platform. This function should be
517  * called before any EEH related functions.
518  */
519 static int __init eeh_powernv_init(void)
520 {
521 	int ret = -EINVAL;
522 
523 	eeh_set_pe_aux_size(PNV_PCI_DIAG_BUF_SIZE);
524 	ret = eeh_ops_register(&powernv_eeh_ops);
525 	if (!ret)
526 		pr_info("EEH: PowerNV platform initialized\n");
527 	else
528 		pr_info("EEH: Failed to initialize PowerNV platform (%d)\n", ret);
529 
530 	return ret;
531 }
532 machine_early_initcall(powernv, eeh_powernv_init);
533