1 /*
2  * Support PCI/PCIe on PowerNV platforms
3  *
4  * Copyright 2011 Benjamin Herrenschmidt, IBM Corp.
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 
12 #include <linux/kernel.h>
13 #include <linux/pci.h>
14 #include <linux/delay.h>
15 #include <linux/string.h>
16 #include <linux/init.h>
17 #include <linux/irq.h>
18 #include <linux/io.h>
19 #include <linux/msi.h>
20 #include <linux/iommu.h>
21 #include <linux/sched/mm.h>
22 
23 #include <asm/sections.h>
24 #include <asm/io.h>
25 #include <asm/prom.h>
26 #include <asm/pci-bridge.h>
27 #include <asm/machdep.h>
28 #include <asm/msi_bitmap.h>
29 #include <asm/ppc-pci.h>
30 #include <asm/pnv-pci.h>
31 #include <asm/opal.h>
32 #include <asm/iommu.h>
33 #include <asm/tce.h>
34 #include <asm/firmware.h>
35 #include <asm/eeh_event.h>
36 #include <asm/eeh.h>
37 
38 #include "powernv.h"
39 #include "pci.h"
40 
41 static DEFINE_MUTEX(tunnel_mutex);
42 
43 int pnv_pci_get_slot_id(struct device_node *np, uint64_t *id)
44 {
45 	struct device_node *parent = np;
46 	u32 bdfn;
47 	u64 phbid;
48 	int ret;
49 
50 	ret = of_property_read_u32(np, "reg", &bdfn);
51 	if (ret)
52 		return -ENXIO;
53 
54 	bdfn = ((bdfn & 0x00ffff00) >> 8);
55 	while ((parent = of_get_parent(parent))) {
56 		if (!PCI_DN(parent)) {
57 			of_node_put(parent);
58 			break;
59 		}
60 
61 		if (!of_device_is_compatible(parent, "ibm,ioda2-phb")) {
62 			of_node_put(parent);
63 			continue;
64 		}
65 
66 		ret = of_property_read_u64(parent, "ibm,opal-phbid", &phbid);
67 		if (ret) {
68 			of_node_put(parent);
69 			return -ENXIO;
70 		}
71 
72 		*id = PCI_SLOT_ID(phbid, bdfn);
73 		return 0;
74 	}
75 
76 	return -ENODEV;
77 }
78 EXPORT_SYMBOL_GPL(pnv_pci_get_slot_id);
79 
80 int pnv_pci_get_device_tree(uint32_t phandle, void *buf, uint64_t len)
81 {
82 	int64_t rc;
83 
84 	if (!opal_check_token(OPAL_GET_DEVICE_TREE))
85 		return -ENXIO;
86 
87 	rc = opal_get_device_tree(phandle, (uint64_t)buf, len);
88 	if (rc < OPAL_SUCCESS)
89 		return -EIO;
90 
91 	return rc;
92 }
93 EXPORT_SYMBOL_GPL(pnv_pci_get_device_tree);
94 
95 int pnv_pci_get_presence_state(uint64_t id, uint8_t *state)
96 {
97 	int64_t rc;
98 
99 	if (!opal_check_token(OPAL_PCI_GET_PRESENCE_STATE))
100 		return -ENXIO;
101 
102 	rc = opal_pci_get_presence_state(id, (uint64_t)state);
103 	if (rc != OPAL_SUCCESS)
104 		return -EIO;
105 
106 	return 0;
107 }
108 EXPORT_SYMBOL_GPL(pnv_pci_get_presence_state);
109 
110 int pnv_pci_get_power_state(uint64_t id, uint8_t *state)
111 {
112 	int64_t rc;
113 
114 	if (!opal_check_token(OPAL_PCI_GET_POWER_STATE))
115 		return -ENXIO;
116 
117 	rc = opal_pci_get_power_state(id, (uint64_t)state);
118 	if (rc != OPAL_SUCCESS)
119 		return -EIO;
120 
121 	return 0;
122 }
123 EXPORT_SYMBOL_GPL(pnv_pci_get_power_state);
124 
125 int pnv_pci_set_power_state(uint64_t id, uint8_t state, struct opal_msg *msg)
126 {
127 	struct opal_msg m;
128 	int token, ret;
129 	int64_t rc;
130 
131 	if (!opal_check_token(OPAL_PCI_SET_POWER_STATE))
132 		return -ENXIO;
133 
134 	token = opal_async_get_token_interruptible();
135 	if (unlikely(token < 0))
136 		return token;
137 
138 	rc = opal_pci_set_power_state(token, id, (uint64_t)&state);
139 	if (rc == OPAL_SUCCESS) {
140 		ret = 0;
141 		goto exit;
142 	} else if (rc != OPAL_ASYNC_COMPLETION) {
143 		ret = -EIO;
144 		goto exit;
145 	}
146 
147 	ret = opal_async_wait_response(token, &m);
148 	if (ret < 0)
149 		goto exit;
150 
151 	if (msg) {
152 		ret = 1;
153 		memcpy(msg, &m, sizeof(m));
154 	}
155 
156 exit:
157 	opal_async_release_token(token);
158 	return ret;
159 }
160 EXPORT_SYMBOL_GPL(pnv_pci_set_power_state);
161 
162 int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
163 {
164 	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
165 	struct pnv_phb *phb = hose->private_data;
166 	struct msi_desc *entry;
167 	struct msi_msg msg;
168 	int hwirq;
169 	unsigned int virq;
170 	int rc;
171 
172 	if (WARN_ON(!phb) || !phb->msi_bmp.bitmap)
173 		return -ENODEV;
174 
175 	if (pdev->no_64bit_msi && !phb->msi32_support)
176 		return -ENODEV;
177 
178 	for_each_pci_msi_entry(entry, pdev) {
179 		if (!entry->msi_attrib.is_64 && !phb->msi32_support) {
180 			pr_warn("%s: Supports only 64-bit MSIs\n",
181 				pci_name(pdev));
182 			return -ENXIO;
183 		}
184 		hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, 1);
185 		if (hwirq < 0) {
186 			pr_warn("%s: Failed to find a free MSI\n",
187 				pci_name(pdev));
188 			return -ENOSPC;
189 		}
190 		virq = irq_create_mapping(NULL, phb->msi_base + hwirq);
191 		if (!virq) {
192 			pr_warn("%s: Failed to map MSI to linux irq\n",
193 				pci_name(pdev));
194 			msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1);
195 			return -ENOMEM;
196 		}
197 		rc = phb->msi_setup(phb, pdev, phb->msi_base + hwirq,
198 				    virq, entry->msi_attrib.is_64, &msg);
199 		if (rc) {
200 			pr_warn("%s: Failed to setup MSI\n", pci_name(pdev));
201 			irq_dispose_mapping(virq);
202 			msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1);
203 			return rc;
204 		}
205 		irq_set_msi_desc(virq, entry);
206 		pci_write_msi_msg(virq, &msg);
207 	}
208 	return 0;
209 }
210 
211 void pnv_teardown_msi_irqs(struct pci_dev *pdev)
212 {
213 	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
214 	struct pnv_phb *phb = hose->private_data;
215 	struct msi_desc *entry;
216 	irq_hw_number_t hwirq;
217 
218 	if (WARN_ON(!phb))
219 		return;
220 
221 	for_each_pci_msi_entry(entry, pdev) {
222 		if (!entry->irq)
223 			continue;
224 		hwirq = virq_to_hw(entry->irq);
225 		irq_set_msi_desc(entry->irq, NULL);
226 		irq_dispose_mapping(entry->irq);
227 		msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, 1);
228 	}
229 }
230 
231 /* Nicely print the contents of the PE State Tables (PEST). */
232 static void pnv_pci_dump_pest(__be64 pestA[], __be64 pestB[], int pest_size)
233 {
234 	__be64 prevA = ULONG_MAX, prevB = ULONG_MAX;
235 	bool dup = false;
236 	int i;
237 
238 	for (i = 0; i < pest_size; i++) {
239 		__be64 peA = be64_to_cpu(pestA[i]);
240 		__be64 peB = be64_to_cpu(pestB[i]);
241 
242 		if (peA != prevA || peB != prevB) {
243 			if (dup) {
244 				pr_info("PE[..%03x] A/B: as above\n", i-1);
245 				dup = false;
246 			}
247 			prevA = peA;
248 			prevB = peB;
249 			if (peA & PNV_IODA_STOPPED_STATE ||
250 			    peB & PNV_IODA_STOPPED_STATE)
251 				pr_info("PE[%03x] A/B: %016llx %016llx\n",
252 					i, peA, peB);
253 		} else if (!dup && (peA & PNV_IODA_STOPPED_STATE ||
254 				    peB & PNV_IODA_STOPPED_STATE)) {
255 			dup = true;
256 		}
257 	}
258 }
259 
260 static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller *hose,
261 					 struct OpalIoPhbErrorCommon *common)
262 {
263 	struct OpalIoP7IOCPhbErrorData *data;
264 
265 	data = (struct OpalIoP7IOCPhbErrorData *)common;
266 	pr_info("P7IOC PHB#%x Diag-data (Version: %d)\n",
267 		hose->global_number, be32_to_cpu(common->version));
268 
269 	if (data->brdgCtl)
270 		pr_info("brdgCtl:     %08x\n",
271 			be32_to_cpu(data->brdgCtl));
272 	if (data->portStatusReg || data->rootCmplxStatus ||
273 	    data->busAgentStatus)
274 		pr_info("UtlSts:      %08x %08x %08x\n",
275 			be32_to_cpu(data->portStatusReg),
276 			be32_to_cpu(data->rootCmplxStatus),
277 			be32_to_cpu(data->busAgentStatus));
278 	if (data->deviceStatus || data->slotStatus   ||
279 	    data->linkStatus   || data->devCmdStatus ||
280 	    data->devSecStatus)
281 		pr_info("RootSts:     %08x %08x %08x %08x %08x\n",
282 			be32_to_cpu(data->deviceStatus),
283 			be32_to_cpu(data->slotStatus),
284 			be32_to_cpu(data->linkStatus),
285 			be32_to_cpu(data->devCmdStatus),
286 			be32_to_cpu(data->devSecStatus));
287 	if (data->rootErrorStatus   || data->uncorrErrorStatus ||
288 	    data->corrErrorStatus)
289 		pr_info("RootErrSts:  %08x %08x %08x\n",
290 			be32_to_cpu(data->rootErrorStatus),
291 			be32_to_cpu(data->uncorrErrorStatus),
292 			be32_to_cpu(data->corrErrorStatus));
293 	if (data->tlpHdr1 || data->tlpHdr2 ||
294 	    data->tlpHdr3 || data->tlpHdr4)
295 		pr_info("RootErrLog:  %08x %08x %08x %08x\n",
296 			be32_to_cpu(data->tlpHdr1),
297 			be32_to_cpu(data->tlpHdr2),
298 			be32_to_cpu(data->tlpHdr3),
299 			be32_to_cpu(data->tlpHdr4));
300 	if (data->sourceId || data->errorClass ||
301 	    data->correlator)
302 		pr_info("RootErrLog1: %08x %016llx %016llx\n",
303 			be32_to_cpu(data->sourceId),
304 			be64_to_cpu(data->errorClass),
305 			be64_to_cpu(data->correlator));
306 	if (data->p7iocPlssr || data->p7iocCsr)
307 		pr_info("PhbSts:      %016llx %016llx\n",
308 			be64_to_cpu(data->p7iocPlssr),
309 			be64_to_cpu(data->p7iocCsr));
310 	if (data->lemFir)
311 		pr_info("Lem:         %016llx %016llx %016llx\n",
312 			be64_to_cpu(data->lemFir),
313 			be64_to_cpu(data->lemErrorMask),
314 			be64_to_cpu(data->lemWOF));
315 	if (data->phbErrorStatus)
316 		pr_info("PhbErr:      %016llx %016llx %016llx %016llx\n",
317 			be64_to_cpu(data->phbErrorStatus),
318 			be64_to_cpu(data->phbFirstErrorStatus),
319 			be64_to_cpu(data->phbErrorLog0),
320 			be64_to_cpu(data->phbErrorLog1));
321 	if (data->mmioErrorStatus)
322 		pr_info("OutErr:      %016llx %016llx %016llx %016llx\n",
323 			be64_to_cpu(data->mmioErrorStatus),
324 			be64_to_cpu(data->mmioFirstErrorStatus),
325 			be64_to_cpu(data->mmioErrorLog0),
326 			be64_to_cpu(data->mmioErrorLog1));
327 	if (data->dma0ErrorStatus)
328 		pr_info("InAErr:      %016llx %016llx %016llx %016llx\n",
329 			be64_to_cpu(data->dma0ErrorStatus),
330 			be64_to_cpu(data->dma0FirstErrorStatus),
331 			be64_to_cpu(data->dma0ErrorLog0),
332 			be64_to_cpu(data->dma0ErrorLog1));
333 	if (data->dma1ErrorStatus)
334 		pr_info("InBErr:      %016llx %016llx %016llx %016llx\n",
335 			be64_to_cpu(data->dma1ErrorStatus),
336 			be64_to_cpu(data->dma1FirstErrorStatus),
337 			be64_to_cpu(data->dma1ErrorLog0),
338 			be64_to_cpu(data->dma1ErrorLog1));
339 
340 	pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_P7IOC_NUM_PEST_REGS);
341 }
342 
343 static void pnv_pci_dump_phb3_diag_data(struct pci_controller *hose,
344 					struct OpalIoPhbErrorCommon *common)
345 {
346 	struct OpalIoPhb3ErrorData *data;
347 
348 	data = (struct OpalIoPhb3ErrorData*)common;
349 	pr_info("PHB3 PHB#%x Diag-data (Version: %d)\n",
350 		hose->global_number, be32_to_cpu(common->version));
351 	if (data->brdgCtl)
352 		pr_info("brdgCtl:     %08x\n",
353 			be32_to_cpu(data->brdgCtl));
354 	if (data->portStatusReg || data->rootCmplxStatus ||
355 	    data->busAgentStatus)
356 		pr_info("UtlSts:      %08x %08x %08x\n",
357 			be32_to_cpu(data->portStatusReg),
358 			be32_to_cpu(data->rootCmplxStatus),
359 			be32_to_cpu(data->busAgentStatus));
360 	if (data->deviceStatus || data->slotStatus   ||
361 	    data->linkStatus   || data->devCmdStatus ||
362 	    data->devSecStatus)
363 		pr_info("RootSts:     %08x %08x %08x %08x %08x\n",
364 			be32_to_cpu(data->deviceStatus),
365 			be32_to_cpu(data->slotStatus),
366 			be32_to_cpu(data->linkStatus),
367 			be32_to_cpu(data->devCmdStatus),
368 			be32_to_cpu(data->devSecStatus));
369 	if (data->rootErrorStatus || data->uncorrErrorStatus ||
370 	    data->corrErrorStatus)
371 		pr_info("RootErrSts:  %08x %08x %08x\n",
372 			be32_to_cpu(data->rootErrorStatus),
373 			be32_to_cpu(data->uncorrErrorStatus),
374 			be32_to_cpu(data->corrErrorStatus));
375 	if (data->tlpHdr1 || data->tlpHdr2 ||
376 	    data->tlpHdr3 || data->tlpHdr4)
377 		pr_info("RootErrLog:  %08x %08x %08x %08x\n",
378 			be32_to_cpu(data->tlpHdr1),
379 			be32_to_cpu(data->tlpHdr2),
380 			be32_to_cpu(data->tlpHdr3),
381 			be32_to_cpu(data->tlpHdr4));
382 	if (data->sourceId || data->errorClass ||
383 	    data->correlator)
384 		pr_info("RootErrLog1: %08x %016llx %016llx\n",
385 			be32_to_cpu(data->sourceId),
386 			be64_to_cpu(data->errorClass),
387 			be64_to_cpu(data->correlator));
388 	if (data->nFir)
389 		pr_info("nFir:        %016llx %016llx %016llx\n",
390 			be64_to_cpu(data->nFir),
391 			be64_to_cpu(data->nFirMask),
392 			be64_to_cpu(data->nFirWOF));
393 	if (data->phbPlssr || data->phbCsr)
394 		pr_info("PhbSts:      %016llx %016llx\n",
395 			be64_to_cpu(data->phbPlssr),
396 			be64_to_cpu(data->phbCsr));
397 	if (data->lemFir)
398 		pr_info("Lem:         %016llx %016llx %016llx\n",
399 			be64_to_cpu(data->lemFir),
400 			be64_to_cpu(data->lemErrorMask),
401 			be64_to_cpu(data->lemWOF));
402 	if (data->phbErrorStatus)
403 		pr_info("PhbErr:      %016llx %016llx %016llx %016llx\n",
404 			be64_to_cpu(data->phbErrorStatus),
405 			be64_to_cpu(data->phbFirstErrorStatus),
406 			be64_to_cpu(data->phbErrorLog0),
407 			be64_to_cpu(data->phbErrorLog1));
408 	if (data->mmioErrorStatus)
409 		pr_info("OutErr:      %016llx %016llx %016llx %016llx\n",
410 			be64_to_cpu(data->mmioErrorStatus),
411 			be64_to_cpu(data->mmioFirstErrorStatus),
412 			be64_to_cpu(data->mmioErrorLog0),
413 			be64_to_cpu(data->mmioErrorLog1));
414 	if (data->dma0ErrorStatus)
415 		pr_info("InAErr:      %016llx %016llx %016llx %016llx\n",
416 			be64_to_cpu(data->dma0ErrorStatus),
417 			be64_to_cpu(data->dma0FirstErrorStatus),
418 			be64_to_cpu(data->dma0ErrorLog0),
419 			be64_to_cpu(data->dma0ErrorLog1));
420 	if (data->dma1ErrorStatus)
421 		pr_info("InBErr:      %016llx %016llx %016llx %016llx\n",
422 			be64_to_cpu(data->dma1ErrorStatus),
423 			be64_to_cpu(data->dma1FirstErrorStatus),
424 			be64_to_cpu(data->dma1ErrorLog0),
425 			be64_to_cpu(data->dma1ErrorLog1));
426 
427 	pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_PHB3_NUM_PEST_REGS);
428 }
429 
430 static void pnv_pci_dump_phb4_diag_data(struct pci_controller *hose,
431 					struct OpalIoPhbErrorCommon *common)
432 {
433 	struct OpalIoPhb4ErrorData *data;
434 
435 	data = (struct OpalIoPhb4ErrorData*)common;
436 	pr_info("PHB4 PHB#%d Diag-data (Version: %d)\n",
437 		hose->global_number, be32_to_cpu(common->version));
438 	if (data->brdgCtl)
439 		pr_info("brdgCtl:    %08x\n",
440 			be32_to_cpu(data->brdgCtl));
441 	if (data->deviceStatus || data->slotStatus   ||
442 	    data->linkStatus   || data->devCmdStatus ||
443 	    data->devSecStatus)
444 		pr_info("RootSts:    %08x %08x %08x %08x %08x\n",
445 			be32_to_cpu(data->deviceStatus),
446 			be32_to_cpu(data->slotStatus),
447 			be32_to_cpu(data->linkStatus),
448 			be32_to_cpu(data->devCmdStatus),
449 			be32_to_cpu(data->devSecStatus));
450 	if (data->rootErrorStatus || data->uncorrErrorStatus ||
451 	    data->corrErrorStatus)
452 		pr_info("RootErrSts: %08x %08x %08x\n",
453 			be32_to_cpu(data->rootErrorStatus),
454 			be32_to_cpu(data->uncorrErrorStatus),
455 			be32_to_cpu(data->corrErrorStatus));
456 	if (data->tlpHdr1 || data->tlpHdr2 ||
457 	    data->tlpHdr3 || data->tlpHdr4)
458 		pr_info("RootErrLog: %08x %08x %08x %08x\n",
459 			be32_to_cpu(data->tlpHdr1),
460 			be32_to_cpu(data->tlpHdr2),
461 			be32_to_cpu(data->tlpHdr3),
462 			be32_to_cpu(data->tlpHdr4));
463 	if (data->sourceId)
464 		pr_info("sourceId:   %08x\n", be32_to_cpu(data->sourceId));
465 	if (data->nFir)
466 		pr_info("nFir:       %016llx %016llx %016llx\n",
467 			be64_to_cpu(data->nFir),
468 			be64_to_cpu(data->nFirMask),
469 			be64_to_cpu(data->nFirWOF));
470 	if (data->phbPlssr || data->phbCsr)
471 		pr_info("PhbSts:     %016llx %016llx\n",
472 			be64_to_cpu(data->phbPlssr),
473 			be64_to_cpu(data->phbCsr));
474 	if (data->lemFir)
475 		pr_info("Lem:        %016llx %016llx %016llx\n",
476 			be64_to_cpu(data->lemFir),
477 			be64_to_cpu(data->lemErrorMask),
478 			be64_to_cpu(data->lemWOF));
479 	if (data->phbErrorStatus)
480 		pr_info("PhbErr:     %016llx %016llx %016llx %016llx\n",
481 			be64_to_cpu(data->phbErrorStatus),
482 			be64_to_cpu(data->phbFirstErrorStatus),
483 			be64_to_cpu(data->phbErrorLog0),
484 			be64_to_cpu(data->phbErrorLog1));
485 	if (data->phbTxeErrorStatus)
486 		pr_info("PhbTxeErr:  %016llx %016llx %016llx %016llx\n",
487 			be64_to_cpu(data->phbTxeErrorStatus),
488 			be64_to_cpu(data->phbTxeFirstErrorStatus),
489 			be64_to_cpu(data->phbTxeErrorLog0),
490 			be64_to_cpu(data->phbTxeErrorLog1));
491 	if (data->phbRxeArbErrorStatus)
492 		pr_info("RxeArbErr:  %016llx %016llx %016llx %016llx\n",
493 			be64_to_cpu(data->phbRxeArbErrorStatus),
494 			be64_to_cpu(data->phbRxeArbFirstErrorStatus),
495 			be64_to_cpu(data->phbRxeArbErrorLog0),
496 			be64_to_cpu(data->phbRxeArbErrorLog1));
497 	if (data->phbRxeMrgErrorStatus)
498 		pr_info("RxeMrgErr:  %016llx %016llx %016llx %016llx\n",
499 			be64_to_cpu(data->phbRxeMrgErrorStatus),
500 			be64_to_cpu(data->phbRxeMrgFirstErrorStatus),
501 			be64_to_cpu(data->phbRxeMrgErrorLog0),
502 			be64_to_cpu(data->phbRxeMrgErrorLog1));
503 	if (data->phbRxeTceErrorStatus)
504 		pr_info("RxeTceErr:  %016llx %016llx %016llx %016llx\n",
505 			be64_to_cpu(data->phbRxeTceErrorStatus),
506 			be64_to_cpu(data->phbRxeTceFirstErrorStatus),
507 			be64_to_cpu(data->phbRxeTceErrorLog0),
508 			be64_to_cpu(data->phbRxeTceErrorLog1));
509 
510 	if (data->phbPblErrorStatus)
511 		pr_info("PblErr:     %016llx %016llx %016llx %016llx\n",
512 			be64_to_cpu(data->phbPblErrorStatus),
513 			be64_to_cpu(data->phbPblFirstErrorStatus),
514 			be64_to_cpu(data->phbPblErrorLog0),
515 			be64_to_cpu(data->phbPblErrorLog1));
516 	if (data->phbPcieDlpErrorStatus)
517 		pr_info("PcieDlp:    %016llx %016llx %016llx\n",
518 			be64_to_cpu(data->phbPcieDlpErrorLog1),
519 			be64_to_cpu(data->phbPcieDlpErrorLog2),
520 			be64_to_cpu(data->phbPcieDlpErrorStatus));
521 	if (data->phbRegbErrorStatus)
522 		pr_info("RegbErr:    %016llx %016llx %016llx %016llx\n",
523 			be64_to_cpu(data->phbRegbErrorStatus),
524 			be64_to_cpu(data->phbRegbFirstErrorStatus),
525 			be64_to_cpu(data->phbRegbErrorLog0),
526 			be64_to_cpu(data->phbRegbErrorLog1));
527 
528 
529 	pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_PHB4_NUM_PEST_REGS);
530 }
531 
532 void pnv_pci_dump_phb_diag_data(struct pci_controller *hose,
533 				unsigned char *log_buff)
534 {
535 	struct OpalIoPhbErrorCommon *common;
536 
537 	if (!hose || !log_buff)
538 		return;
539 
540 	common = (struct OpalIoPhbErrorCommon *)log_buff;
541 	switch (be32_to_cpu(common->ioType)) {
542 	case OPAL_PHB_ERROR_DATA_TYPE_P7IOC:
543 		pnv_pci_dump_p7ioc_diag_data(hose, common);
544 		break;
545 	case OPAL_PHB_ERROR_DATA_TYPE_PHB3:
546 		pnv_pci_dump_phb3_diag_data(hose, common);
547 		break;
548 	case OPAL_PHB_ERROR_DATA_TYPE_PHB4:
549 		pnv_pci_dump_phb4_diag_data(hose, common);
550 		break;
551 	default:
552 		pr_warn("%s: Unrecognized ioType %d\n",
553 			__func__, be32_to_cpu(common->ioType));
554 	}
555 }
556 
557 static void pnv_pci_handle_eeh_config(struct pnv_phb *phb, u32 pe_no)
558 {
559 	unsigned long flags, rc;
560 	int has_diag, ret = 0;
561 
562 	spin_lock_irqsave(&phb->lock, flags);
563 
564 	/* Fetch PHB diag-data */
565 	rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag_data,
566 					 phb->diag_data_size);
567 	has_diag = (rc == OPAL_SUCCESS);
568 
569 	/* If PHB supports compound PE, to handle it */
570 	if (phb->unfreeze_pe) {
571 		ret = phb->unfreeze_pe(phb,
572 				       pe_no,
573 				       OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
574 	} else {
575 		rc = opal_pci_eeh_freeze_clear(phb->opal_id,
576 					     pe_no,
577 					     OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
578 		if (rc) {
579 			pr_warn("%s: Failure %ld clearing frozen "
580 				"PHB#%x-PE#%x\n",
581 				__func__, rc, phb->hose->global_number,
582 				pe_no);
583 			ret = -EIO;
584 		}
585 	}
586 
587 	/*
588 	 * For now, let's only display the diag buffer when we fail to clear
589 	 * the EEH status. We'll do more sensible things later when we have
590 	 * proper EEH support. We need to make sure we don't pollute ourselves
591 	 * with the normal errors generated when probing empty slots
592 	 */
593 	if (has_diag && ret)
594 		pnv_pci_dump_phb_diag_data(phb->hose, phb->diag_data);
595 
596 	spin_unlock_irqrestore(&phb->lock, flags);
597 }
598 
599 static void pnv_pci_config_check_eeh(struct pci_dn *pdn)
600 {
601 	struct pnv_phb *phb = pdn->phb->private_data;
602 	u8	fstate = 0;
603 	__be16	pcierr = 0;
604 	unsigned int pe_no;
605 	s64	rc;
606 
607 	/*
608 	 * Get the PE#. During the PCI probe stage, we might not
609 	 * setup that yet. So all ER errors should be mapped to
610 	 * reserved PE.
611 	 */
612 	pe_no = pdn->pe_number;
613 	if (pe_no == IODA_INVALID_PE) {
614 		pe_no = phb->ioda.reserved_pe_idx;
615 	}
616 
617 	/*
618 	 * Fetch frozen state. If the PHB support compound PE,
619 	 * we need handle that case.
620 	 */
621 	if (phb->get_pe_state) {
622 		fstate = phb->get_pe_state(phb, pe_no);
623 	} else {
624 		rc = opal_pci_eeh_freeze_status(phb->opal_id,
625 						pe_no,
626 						&fstate,
627 						&pcierr,
628 						NULL);
629 		if (rc) {
630 			pr_warn("%s: Failure %lld getting PHB#%x-PE#%x state\n",
631 				__func__, rc, phb->hose->global_number, pe_no);
632 			return;
633 		}
634 	}
635 
636 	pr_devel(" -> EEH check, bdfn=%04x PE#%x fstate=%x\n",
637 		 (pdn->busno << 8) | (pdn->devfn), pe_no, fstate);
638 
639 	/* Clear the frozen state if applicable */
640 	if (fstate == OPAL_EEH_STOPPED_MMIO_FREEZE ||
641 	    fstate == OPAL_EEH_STOPPED_DMA_FREEZE  ||
642 	    fstate == OPAL_EEH_STOPPED_MMIO_DMA_FREEZE) {
643 		/*
644 		 * If PHB supports compound PE, freeze it for
645 		 * consistency.
646 		 */
647 		if (phb->freeze_pe)
648 			phb->freeze_pe(phb, pe_no);
649 
650 		pnv_pci_handle_eeh_config(phb, pe_no);
651 	}
652 }
653 
654 int pnv_pci_cfg_read(struct pci_dn *pdn,
655 		     int where, int size, u32 *val)
656 {
657 	struct pnv_phb *phb = pdn->phb->private_data;
658 	u32 bdfn = (pdn->busno << 8) | pdn->devfn;
659 	s64 rc;
660 
661 	switch (size) {
662 	case 1: {
663 		u8 v8;
664 		rc = opal_pci_config_read_byte(phb->opal_id, bdfn, where, &v8);
665 		*val = (rc == OPAL_SUCCESS) ? v8 : 0xff;
666 		break;
667 	}
668 	case 2: {
669 		__be16 v16;
670 		rc = opal_pci_config_read_half_word(phb->opal_id, bdfn, where,
671 						   &v16);
672 		*val = (rc == OPAL_SUCCESS) ? be16_to_cpu(v16) : 0xffff;
673 		break;
674 	}
675 	case 4: {
676 		__be32 v32;
677 		rc = opal_pci_config_read_word(phb->opal_id, bdfn, where, &v32);
678 		*val = (rc == OPAL_SUCCESS) ? be32_to_cpu(v32) : 0xffffffff;
679 		break;
680 	}
681 	default:
682 		return PCIBIOS_FUNC_NOT_SUPPORTED;
683 	}
684 
685 	pr_devel("%s: bus: %x devfn: %x +%x/%x -> %08x\n",
686 		 __func__, pdn->busno, pdn->devfn, where, size, *val);
687 	return PCIBIOS_SUCCESSFUL;
688 }
689 
690 int pnv_pci_cfg_write(struct pci_dn *pdn,
691 		      int where, int size, u32 val)
692 {
693 	struct pnv_phb *phb = pdn->phb->private_data;
694 	u32 bdfn = (pdn->busno << 8) | pdn->devfn;
695 
696 	pr_devel("%s: bus: %x devfn: %x +%x/%x -> %08x\n",
697 		 __func__, pdn->busno, pdn->devfn, where, size, val);
698 	switch (size) {
699 	case 1:
700 		opal_pci_config_write_byte(phb->opal_id, bdfn, where, val);
701 		break;
702 	case 2:
703 		opal_pci_config_write_half_word(phb->opal_id, bdfn, where, val);
704 		break;
705 	case 4:
706 		opal_pci_config_write_word(phb->opal_id, bdfn, where, val);
707 		break;
708 	default:
709 		return PCIBIOS_FUNC_NOT_SUPPORTED;
710 	}
711 
712 	return PCIBIOS_SUCCESSFUL;
713 }
714 
715 #if CONFIG_EEH
716 static bool pnv_pci_cfg_check(struct pci_dn *pdn)
717 {
718 	struct eeh_dev *edev = NULL;
719 	struct pnv_phb *phb = pdn->phb->private_data;
720 
721 	/* EEH not enabled ? */
722 	if (!(phb->flags & PNV_PHB_FLAG_EEH))
723 		return true;
724 
725 	/* PE reset or device removed ? */
726 	edev = pdn->edev;
727 	if (edev) {
728 		if (edev->pe &&
729 		    (edev->pe->state & EEH_PE_CFG_BLOCKED))
730 			return false;
731 
732 		if (edev->mode & EEH_DEV_REMOVED)
733 			return false;
734 	}
735 
736 	return true;
737 }
738 #else
739 static inline pnv_pci_cfg_check(struct pci_dn *pdn)
740 {
741 	return true;
742 }
743 #endif /* CONFIG_EEH */
744 
745 static int pnv_pci_read_config(struct pci_bus *bus,
746 			       unsigned int devfn,
747 			       int where, int size, u32 *val)
748 {
749 	struct pci_dn *pdn;
750 	struct pnv_phb *phb;
751 	int ret;
752 
753 	*val = 0xFFFFFFFF;
754 	pdn = pci_get_pdn_by_devfn(bus, devfn);
755 	if (!pdn)
756 		return PCIBIOS_DEVICE_NOT_FOUND;
757 
758 	if (!pnv_pci_cfg_check(pdn))
759 		return PCIBIOS_DEVICE_NOT_FOUND;
760 
761 	ret = pnv_pci_cfg_read(pdn, where, size, val);
762 	phb = pdn->phb->private_data;
763 	if (phb->flags & PNV_PHB_FLAG_EEH && pdn->edev) {
764 		if (*val == EEH_IO_ERROR_VALUE(size) &&
765 		    eeh_dev_check_failure(pdn->edev))
766                         return PCIBIOS_DEVICE_NOT_FOUND;
767 	} else {
768 		pnv_pci_config_check_eeh(pdn);
769 	}
770 
771 	return ret;
772 }
773 
774 static int pnv_pci_write_config(struct pci_bus *bus,
775 				unsigned int devfn,
776 				int where, int size, u32 val)
777 {
778 	struct pci_dn *pdn;
779 	struct pnv_phb *phb;
780 	int ret;
781 
782 	pdn = pci_get_pdn_by_devfn(bus, devfn);
783 	if (!pdn)
784 		return PCIBIOS_DEVICE_NOT_FOUND;
785 
786 	if (!pnv_pci_cfg_check(pdn))
787 		return PCIBIOS_DEVICE_NOT_FOUND;
788 
789 	ret = pnv_pci_cfg_write(pdn, where, size, val);
790 	phb = pdn->phb->private_data;
791 	if (!(phb->flags & PNV_PHB_FLAG_EEH))
792 		pnv_pci_config_check_eeh(pdn);
793 
794 	return ret;
795 }
796 
797 struct pci_ops pnv_pci_ops = {
798 	.read  = pnv_pci_read_config,
799 	.write = pnv_pci_write_config,
800 };
801 
802 struct iommu_table *pnv_pci_table_alloc(int nid)
803 {
804 	struct iommu_table *tbl;
805 
806 	tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, nid);
807 	if (!tbl)
808 		return NULL;
809 
810 	INIT_LIST_HEAD_RCU(&tbl->it_group_list);
811 	kref_init(&tbl->it_kref);
812 
813 	return tbl;
814 }
815 
816 void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
817 {
818 	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
819 	struct pnv_phb *phb = hose->private_data;
820 #ifdef CONFIG_PCI_IOV
821 	struct pnv_ioda_pe *pe;
822 	struct pci_dn *pdn;
823 
824 	/* Fix the VF pdn PE number */
825 	if (pdev->is_virtfn) {
826 		pdn = pci_get_pdn(pdev);
827 		WARN_ON(pdn->pe_number != IODA_INVALID_PE);
828 		list_for_each_entry(pe, &phb->ioda.pe_list, list) {
829 			if (pe->rid == ((pdev->bus->number << 8) |
830 			    (pdev->devfn & 0xff))) {
831 				pdn->pe_number = pe->pe_number;
832 				pe->pdev = pdev;
833 				break;
834 			}
835 		}
836 	}
837 #endif /* CONFIG_PCI_IOV */
838 
839 	if (phb && phb->dma_dev_setup)
840 		phb->dma_dev_setup(phb, pdev);
841 }
842 
843 void pnv_pci_dma_bus_setup(struct pci_bus *bus)
844 {
845 	struct pci_controller *hose = bus->sysdata;
846 	struct pnv_phb *phb = hose->private_data;
847 	struct pnv_ioda_pe *pe;
848 
849 	list_for_each_entry(pe, &phb->ioda.pe_list, list) {
850 		if (!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)))
851 			continue;
852 
853 		if (!pe->pbus)
854 			continue;
855 
856 		if (bus->number == ((pe->rid >> 8) & 0xFF)) {
857 			pe->pbus = bus;
858 			break;
859 		}
860 	}
861 }
862 
863 struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev)
864 {
865 	struct pci_controller *hose = pci_bus_to_host(dev->bus);
866 
867 	return of_node_get(hose->dn);
868 }
869 EXPORT_SYMBOL(pnv_pci_get_phb_node);
870 
871 int pnv_pci_set_tunnel_bar(struct pci_dev *dev, u64 addr, int enable)
872 {
873 	__be64 val;
874 	struct pci_controller *hose;
875 	struct pnv_phb *phb;
876 	u64 tunnel_bar;
877 	int rc;
878 
879 	if (!opal_check_token(OPAL_PCI_GET_PBCQ_TUNNEL_BAR))
880 		return -ENXIO;
881 	if (!opal_check_token(OPAL_PCI_SET_PBCQ_TUNNEL_BAR))
882 		return -ENXIO;
883 
884 	hose = pci_bus_to_host(dev->bus);
885 	phb = hose->private_data;
886 
887 	mutex_lock(&tunnel_mutex);
888 	rc = opal_pci_get_pbcq_tunnel_bar(phb->opal_id, &val);
889 	if (rc != OPAL_SUCCESS) {
890 		rc = -EIO;
891 		goto out;
892 	}
893 	tunnel_bar = be64_to_cpu(val);
894 	if (enable) {
895 		/*
896 		* Only one device per PHB can use atomics.
897 		* Our policy is first-come, first-served.
898 		*/
899 		if (tunnel_bar) {
900 			if (tunnel_bar != addr)
901 				rc = -EBUSY;
902 			else
903 				rc = 0;	/* Setting same address twice is ok */
904 			goto out;
905 		}
906 	} else {
907 		/*
908 		* The device that owns atomics and wants to release
909 		* them must pass the same address with enable == 0.
910 		*/
911 		if (tunnel_bar != addr) {
912 			rc = -EPERM;
913 			goto out;
914 		}
915 		addr = 0x0ULL;
916 	}
917 	rc = opal_pci_set_pbcq_tunnel_bar(phb->opal_id, addr);
918 	rc = opal_error_code(rc);
919 out:
920 	mutex_unlock(&tunnel_mutex);
921 	return rc;
922 }
923 EXPORT_SYMBOL_GPL(pnv_pci_set_tunnel_bar);
924 
925 void pnv_pci_shutdown(void)
926 {
927 	struct pci_controller *hose;
928 
929 	list_for_each_entry(hose, &hose_list, list_node)
930 		if (hose->controller_ops.shutdown)
931 			hose->controller_ops.shutdown(hose);
932 }
933 
934 /* Fixup wrong class code in p7ioc and p8 root complex */
935 static void pnv_p7ioc_rc_quirk(struct pci_dev *dev)
936 {
937 	dev->class = PCI_CLASS_BRIDGE_PCI << 8;
938 }
939 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_IBM, 0x3b9, pnv_p7ioc_rc_quirk);
940 
941 void __init pnv_pci_init(void)
942 {
943 	struct device_node *np;
944 
945 	pci_add_flags(PCI_CAN_SKIP_ISA_ALIGN);
946 
947 	/* If we don't have OPAL, eg. in sim, just skip PCI probe */
948 	if (!firmware_has_feature(FW_FEATURE_OPAL))
949 		return;
950 
951 	/* Look for IODA IO-Hubs. */
952 	for_each_compatible_node(np, NULL, "ibm,ioda-hub") {
953 		pnv_pci_init_ioda_hub(np);
954 	}
955 
956 	/* Look for ioda2 built-in PHB3's */
957 	for_each_compatible_node(np, NULL, "ibm,ioda2-phb")
958 		pnv_pci_init_ioda2_phb(np);
959 
960 	/* Look for ioda3 built-in PHB4's, we treat them as IODA2 */
961 	for_each_compatible_node(np, NULL, "ibm,ioda3-phb")
962 		pnv_pci_init_ioda2_phb(np);
963 
964 	/* Look for NPU PHBs */
965 	for_each_compatible_node(np, NULL, "ibm,ioda2-npu-phb")
966 		pnv_pci_init_npu_phb(np);
967 
968 	/*
969 	 * Look for NPU2 PHBs which we treat mostly as NPU PHBs with
970 	 * the exception of TCE kill which requires an OPAL call.
971 	 */
972 	for_each_compatible_node(np, NULL, "ibm,ioda2-npu2-phb")
973 		pnv_pci_init_npu_phb(np);
974 
975 	/* Look for NPU2 OpenCAPI PHBs */
976 	for_each_compatible_node(np, NULL, "ibm,ioda2-npu2-opencapi-phb")
977 		pnv_pci_init_npu2_opencapi_phb(np);
978 
979 	/* Configure IOMMU DMA hooks */
980 	set_pci_dma_ops(&dma_iommu_ops);
981 }
982 
983 static int pnv_tce_iommu_bus_notifier(struct notifier_block *nb,
984 		unsigned long action, void *data)
985 {
986 	struct device *dev = data;
987 	struct pci_dev *pdev;
988 	struct pci_dn *pdn;
989 	struct pnv_ioda_pe *pe;
990 	struct pci_controller *hose;
991 	struct pnv_phb *phb;
992 
993 	switch (action) {
994 	case BUS_NOTIFY_ADD_DEVICE:
995 		pdev = to_pci_dev(dev);
996 		pdn = pci_get_pdn(pdev);
997 		hose = pci_bus_to_host(pdev->bus);
998 		phb = hose->private_data;
999 
1000 		WARN_ON_ONCE(!phb);
1001 		if (!pdn || pdn->pe_number == IODA_INVALID_PE || !phb)
1002 			return 0;
1003 
1004 		pe = &phb->ioda.pe_array[pdn->pe_number];
1005 		if (!pe->table_group.group)
1006 			return 0;
1007 		iommu_add_device(&pe->table_group, dev);
1008 		return 0;
1009 	case BUS_NOTIFY_DEL_DEVICE:
1010 		iommu_del_device(dev);
1011 		return 0;
1012 	default:
1013 		return 0;
1014 	}
1015 }
1016 
1017 static struct notifier_block pnv_tce_iommu_bus_nb = {
1018 	.notifier_call = pnv_tce_iommu_bus_notifier,
1019 };
1020 
1021 static int __init pnv_tce_iommu_bus_notifier_init(void)
1022 {
1023 	bus_register_notifier(&pci_bus_type, &pnv_tce_iommu_bus_nb);
1024 	return 0;
1025 }
1026 machine_subsys_initcall_sync(powernv, pnv_tce_iommu_bus_notifier_init);
1027