1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Support PCI/PCIe on PowerNV platforms
4  *
5  * Copyright 2011 Benjamin Herrenschmidt, IBM Corp.
6  */
7 
8 #undef DEBUG
9 
10 #include <linux/kernel.h>
11 #include <linux/pci.h>
12 #include <linux/crash_dump.h>
13 #include <linux/delay.h>
14 #include <linux/string.h>
15 #include <linux/init.h>
16 #include <linux/memblock.h>
17 #include <linux/irq.h>
18 #include <linux/io.h>
19 #include <linux/msi.h>
20 #include <linux/iommu.h>
21 #include <linux/rculist.h>
22 #include <linux/sizes.h>
23 
24 #include <asm/sections.h>
25 #include <asm/io.h>
26 #include <asm/prom.h>
27 #include <asm/pci-bridge.h>
28 #include <asm/machdep.h>
29 #include <asm/msi_bitmap.h>
30 #include <asm/ppc-pci.h>
31 #include <asm/opal.h>
32 #include <asm/iommu.h>
33 #include <asm/tce.h>
34 #include <asm/xics.h>
35 #include <asm/debugfs.h>
36 #include <asm/firmware.h>
37 #include <asm/pnv-pci.h>
38 #include <asm/mmzone.h>
39 
40 #include <misc/cxl-base.h>
41 
42 #include "powernv.h"
43 #include "pci.h"
44 #include "../../../../drivers/pci/pci.h"
45 
46 #define PNV_IODA1_M64_NUM	16	/* Number of M64 BARs	*/
47 #define PNV_IODA1_M64_SEGS	8	/* Segments per M64 BAR	*/
48 #define PNV_IODA1_DMA32_SEGSIZE	0x10000000
49 
50 static const char * const pnv_phb_names[] = { "IODA1", "IODA2", "NPU_NVLINK",
51 					      "NPU_OCAPI" };
52 
53 static void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable);
54 
55 void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,
56 			    const char *fmt, ...)
57 {
58 	struct va_format vaf;
59 	va_list args;
60 	char pfix[32];
61 
62 	va_start(args, fmt);
63 
64 	vaf.fmt = fmt;
65 	vaf.va = &args;
66 
67 	if (pe->flags & PNV_IODA_PE_DEV)
68 		strlcpy(pfix, dev_name(&pe->pdev->dev), sizeof(pfix));
69 	else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
70 		sprintf(pfix, "%04x:%02x     ",
71 			pci_domain_nr(pe->pbus), pe->pbus->number);
72 #ifdef CONFIG_PCI_IOV
73 	else if (pe->flags & PNV_IODA_PE_VF)
74 		sprintf(pfix, "%04x:%02x:%2x.%d",
75 			pci_domain_nr(pe->parent_dev->bus),
76 			(pe->rid & 0xff00) >> 8,
77 			PCI_SLOT(pe->rid), PCI_FUNC(pe->rid));
78 #endif /* CONFIG_PCI_IOV*/
79 
80 	printk("%spci %s: [PE# %.2x] %pV",
81 	       level, pfix, pe->pe_number, &vaf);
82 
83 	va_end(args);
84 }
85 
86 static bool pnv_iommu_bypass_disabled __read_mostly;
87 static bool pci_reset_phbs __read_mostly;
88 
89 static int __init iommu_setup(char *str)
90 {
91 	if (!str)
92 		return -EINVAL;
93 
94 	while (*str) {
95 		if (!strncmp(str, "nobypass", 8)) {
96 			pnv_iommu_bypass_disabled = true;
97 			pr_info("PowerNV: IOMMU bypass window disabled.\n");
98 			break;
99 		}
100 		str += strcspn(str, ",");
101 		if (*str == ',')
102 			str++;
103 	}
104 
105 	return 0;
106 }
107 early_param("iommu", iommu_setup);
108 
109 static int __init pci_reset_phbs_setup(char *str)
110 {
111 	pci_reset_phbs = true;
112 	return 0;
113 }
114 
115 early_param("ppc_pci_reset_phbs", pci_reset_phbs_setup);
116 
117 static inline bool pnv_pci_is_m64(struct pnv_phb *phb, struct resource *r)
118 {
119 	/*
120 	 * WARNING: We cannot rely on the resource flags. The Linux PCI
121 	 * allocation code sometimes decides to put a 64-bit prefetchable
122 	 * BAR in the 32-bit window, so we have to compare the addresses.
123 	 *
124 	 * For simplicity we only test resource start.
125 	 */
126 	return (r->start >= phb->ioda.m64_base &&
127 		r->start < (phb->ioda.m64_base + phb->ioda.m64_size));
128 }
129 
130 static inline bool pnv_pci_is_m64_flags(unsigned long resource_flags)
131 {
132 	unsigned long flags = (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH);
133 
134 	return (resource_flags & flags) == flags;
135 }
136 
137 static struct pnv_ioda_pe *pnv_ioda_init_pe(struct pnv_phb *phb, int pe_no)
138 {
139 	s64 rc;
140 
141 	phb->ioda.pe_array[pe_no].phb = phb;
142 	phb->ioda.pe_array[pe_no].pe_number = pe_no;
143 
144 	/*
145 	 * Clear the PE frozen state as it might be put into frozen state
146 	 * in the last PCI remove path. It's not harmful to do so when the
147 	 * PE is already in unfrozen state.
148 	 */
149 	rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no,
150 				       OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
151 	if (rc != OPAL_SUCCESS && rc != OPAL_UNSUPPORTED)
152 		pr_warn("%s: Error %lld unfreezing PHB#%x-PE#%x\n",
153 			__func__, rc, phb->hose->global_number, pe_no);
154 
155 	return &phb->ioda.pe_array[pe_no];
156 }
157 
158 static void pnv_ioda_reserve_pe(struct pnv_phb *phb, int pe_no)
159 {
160 	if (!(pe_no >= 0 && pe_no < phb->ioda.total_pe_num)) {
161 		pr_warn("%s: Invalid PE %x on PHB#%x\n",
162 			__func__, pe_no, phb->hose->global_number);
163 		return;
164 	}
165 
166 	if (test_and_set_bit(pe_no, phb->ioda.pe_alloc))
167 		pr_debug("%s: PE %x was reserved on PHB#%x\n",
168 			 __func__, pe_no, phb->hose->global_number);
169 
170 	pnv_ioda_init_pe(phb, pe_no);
171 }
172 
173 static struct pnv_ioda_pe *pnv_ioda_alloc_pe(struct pnv_phb *phb)
174 {
175 	long pe;
176 
177 	for (pe = phb->ioda.total_pe_num - 1; pe >= 0; pe--) {
178 		if (!test_and_set_bit(pe, phb->ioda.pe_alloc))
179 			return pnv_ioda_init_pe(phb, pe);
180 	}
181 
182 	return NULL;
183 }
184 
185 static void pnv_ioda_free_pe(struct pnv_ioda_pe *pe)
186 {
187 	struct pnv_phb *phb = pe->phb;
188 	unsigned int pe_num = pe->pe_number;
189 
190 	WARN_ON(pe->pdev);
191 	WARN_ON(pe->npucomp); /* NPUs are not supposed to be freed */
192 	kfree(pe->npucomp);
193 	memset(pe, 0, sizeof(struct pnv_ioda_pe));
194 	clear_bit(pe_num, phb->ioda.pe_alloc);
195 }
196 
197 /* The default M64 BAR is shared by all PEs */
198 static int pnv_ioda2_init_m64(struct pnv_phb *phb)
199 {
200 	const char *desc;
201 	struct resource *r;
202 	s64 rc;
203 
204 	/* Configure the default M64 BAR */
205 	rc = opal_pci_set_phb_mem_window(phb->opal_id,
206 					 OPAL_M64_WINDOW_TYPE,
207 					 phb->ioda.m64_bar_idx,
208 					 phb->ioda.m64_base,
209 					 0, /* unused */
210 					 phb->ioda.m64_size);
211 	if (rc != OPAL_SUCCESS) {
212 		desc = "configuring";
213 		goto fail;
214 	}
215 
216 	/* Enable the default M64 BAR */
217 	rc = opal_pci_phb_mmio_enable(phb->opal_id,
218 				      OPAL_M64_WINDOW_TYPE,
219 				      phb->ioda.m64_bar_idx,
220 				      OPAL_ENABLE_M64_SPLIT);
221 	if (rc != OPAL_SUCCESS) {
222 		desc = "enabling";
223 		goto fail;
224 	}
225 
226 	/*
227 	 * Exclude the segments for reserved and root bus PE, which
228 	 * are first or last two PEs.
229 	 */
230 	r = &phb->hose->mem_resources[1];
231 	if (phb->ioda.reserved_pe_idx == 0)
232 		r->start += (2 * phb->ioda.m64_segsize);
233 	else if (phb->ioda.reserved_pe_idx == (phb->ioda.total_pe_num - 1))
234 		r->end -= (2 * phb->ioda.m64_segsize);
235 	else
236 		pr_warn("  Cannot strip M64 segment for reserved PE#%x\n",
237 			phb->ioda.reserved_pe_idx);
238 
239 	return 0;
240 
241 fail:
242 	pr_warn("  Failure %lld %s M64 BAR#%d\n",
243 		rc, desc, phb->ioda.m64_bar_idx);
244 	opal_pci_phb_mmio_enable(phb->opal_id,
245 				 OPAL_M64_WINDOW_TYPE,
246 				 phb->ioda.m64_bar_idx,
247 				 OPAL_DISABLE_M64);
248 	return -EIO;
249 }
250 
251 static void pnv_ioda_reserve_dev_m64_pe(struct pci_dev *pdev,
252 					 unsigned long *pe_bitmap)
253 {
254 	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
255 	struct pnv_phb *phb = hose->private_data;
256 	struct resource *r;
257 	resource_size_t base, sgsz, start, end;
258 	int segno, i;
259 
260 	base = phb->ioda.m64_base;
261 	sgsz = phb->ioda.m64_segsize;
262 	for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
263 		r = &pdev->resource[i];
264 		if (!r->parent || !pnv_pci_is_m64(phb, r))
265 			continue;
266 
267 		start = _ALIGN_DOWN(r->start - base, sgsz);
268 		end = _ALIGN_UP(r->end - base, sgsz);
269 		for (segno = start / sgsz; segno < end / sgsz; segno++) {
270 			if (pe_bitmap)
271 				set_bit(segno, pe_bitmap);
272 			else
273 				pnv_ioda_reserve_pe(phb, segno);
274 		}
275 	}
276 }
277 
278 static int pnv_ioda1_init_m64(struct pnv_phb *phb)
279 {
280 	struct resource *r;
281 	int index;
282 
283 	/*
284 	 * There are 16 M64 BARs, each of which has 8 segments. So
285 	 * there are as many M64 segments as the maximum number of
286 	 * PEs, which is 128.
287 	 */
288 	for (index = 0; index < PNV_IODA1_M64_NUM; index++) {
289 		unsigned long base, segsz = phb->ioda.m64_segsize;
290 		int64_t rc;
291 
292 		base = phb->ioda.m64_base +
293 		       index * PNV_IODA1_M64_SEGS * segsz;
294 		rc = opal_pci_set_phb_mem_window(phb->opal_id,
295 				OPAL_M64_WINDOW_TYPE, index, base, 0,
296 				PNV_IODA1_M64_SEGS * segsz);
297 		if (rc != OPAL_SUCCESS) {
298 			pr_warn("  Error %lld setting M64 PHB#%x-BAR#%d\n",
299 				rc, phb->hose->global_number, index);
300 			goto fail;
301 		}
302 
303 		rc = opal_pci_phb_mmio_enable(phb->opal_id,
304 				OPAL_M64_WINDOW_TYPE, index,
305 				OPAL_ENABLE_M64_SPLIT);
306 		if (rc != OPAL_SUCCESS) {
307 			pr_warn("  Error %lld enabling M64 PHB#%x-BAR#%d\n",
308 				rc, phb->hose->global_number, index);
309 			goto fail;
310 		}
311 	}
312 
313 	/*
314 	 * Exclude the segments for reserved and root bus PE, which
315 	 * are first or last two PEs.
316 	 */
317 	r = &phb->hose->mem_resources[1];
318 	if (phb->ioda.reserved_pe_idx == 0)
319 		r->start += (2 * phb->ioda.m64_segsize);
320 	else if (phb->ioda.reserved_pe_idx == (phb->ioda.total_pe_num - 1))
321 		r->end -= (2 * phb->ioda.m64_segsize);
322 	else
323 		WARN(1, "Wrong reserved PE#%x on PHB#%x\n",
324 		     phb->ioda.reserved_pe_idx, phb->hose->global_number);
325 
326 	return 0;
327 
328 fail:
329 	for ( ; index >= 0; index--)
330 		opal_pci_phb_mmio_enable(phb->opal_id,
331 			OPAL_M64_WINDOW_TYPE, index, OPAL_DISABLE_M64);
332 
333 	return -EIO;
334 }
335 
336 static void pnv_ioda_reserve_m64_pe(struct pci_bus *bus,
337 				    unsigned long *pe_bitmap,
338 				    bool all)
339 {
340 	struct pci_dev *pdev;
341 
342 	list_for_each_entry(pdev, &bus->devices, bus_list) {
343 		pnv_ioda_reserve_dev_m64_pe(pdev, pe_bitmap);
344 
345 		if (all && pdev->subordinate)
346 			pnv_ioda_reserve_m64_pe(pdev->subordinate,
347 						pe_bitmap, all);
348 	}
349 }
350 
351 static struct pnv_ioda_pe *pnv_ioda_pick_m64_pe(struct pci_bus *bus, bool all)
352 {
353 	struct pci_controller *hose = pci_bus_to_host(bus);
354 	struct pnv_phb *phb = hose->private_data;
355 	struct pnv_ioda_pe *master_pe, *pe;
356 	unsigned long size, *pe_alloc;
357 	int i;
358 
359 	/* Root bus shouldn't use M64 */
360 	if (pci_is_root_bus(bus))
361 		return NULL;
362 
363 	/* Allocate bitmap */
364 	size = _ALIGN_UP(phb->ioda.total_pe_num / 8, sizeof(unsigned long));
365 	pe_alloc = kzalloc(size, GFP_KERNEL);
366 	if (!pe_alloc) {
367 		pr_warn("%s: Out of memory !\n",
368 			__func__);
369 		return NULL;
370 	}
371 
372 	/* Figure out reserved PE numbers by the PE */
373 	pnv_ioda_reserve_m64_pe(bus, pe_alloc, all);
374 
375 	/*
376 	 * the current bus might not own M64 window and that's all
377 	 * contributed by its child buses. For the case, we needn't
378 	 * pick M64 dependent PE#.
379 	 */
380 	if (bitmap_empty(pe_alloc, phb->ioda.total_pe_num)) {
381 		kfree(pe_alloc);
382 		return NULL;
383 	}
384 
385 	/*
386 	 * Figure out the master PE and put all slave PEs to master
387 	 * PE's list to form compound PE.
388 	 */
389 	master_pe = NULL;
390 	i = -1;
391 	while ((i = find_next_bit(pe_alloc, phb->ioda.total_pe_num, i + 1)) <
392 		phb->ioda.total_pe_num) {
393 		pe = &phb->ioda.pe_array[i];
394 
395 		phb->ioda.m64_segmap[pe->pe_number] = pe->pe_number;
396 		if (!master_pe) {
397 			pe->flags |= PNV_IODA_PE_MASTER;
398 			INIT_LIST_HEAD(&pe->slaves);
399 			master_pe = pe;
400 		} else {
401 			pe->flags |= PNV_IODA_PE_SLAVE;
402 			pe->master = master_pe;
403 			list_add_tail(&pe->list, &master_pe->slaves);
404 		}
405 
406 		/*
407 		 * P7IOC supports M64DT, which helps mapping M64 segment
408 		 * to one particular PE#. However, PHB3 has fixed mapping
409 		 * between M64 segment and PE#. In order to have same logic
410 		 * for P7IOC and PHB3, we enforce fixed mapping between M64
411 		 * segment and PE# on P7IOC.
412 		 */
413 		if (phb->type == PNV_PHB_IODA1) {
414 			int64_t rc;
415 
416 			rc = opal_pci_map_pe_mmio_window(phb->opal_id,
417 					pe->pe_number, OPAL_M64_WINDOW_TYPE,
418 					pe->pe_number / PNV_IODA1_M64_SEGS,
419 					pe->pe_number % PNV_IODA1_M64_SEGS);
420 			if (rc != OPAL_SUCCESS)
421 				pr_warn("%s: Error %lld mapping M64 for PHB#%x-PE#%x\n",
422 					__func__, rc, phb->hose->global_number,
423 					pe->pe_number);
424 		}
425 	}
426 
427 	kfree(pe_alloc);
428 	return master_pe;
429 }
430 
431 static void __init pnv_ioda_parse_m64_window(struct pnv_phb *phb)
432 {
433 	struct pci_controller *hose = phb->hose;
434 	struct device_node *dn = hose->dn;
435 	struct resource *res;
436 	u32 m64_range[2], i;
437 	const __be32 *r;
438 	u64 pci_addr;
439 
440 	if (phb->type != PNV_PHB_IODA1 && phb->type != PNV_PHB_IODA2) {
441 		pr_info("  Not support M64 window\n");
442 		return;
443 	}
444 
445 	if (!firmware_has_feature(FW_FEATURE_OPAL)) {
446 		pr_info("  Firmware too old to support M64 window\n");
447 		return;
448 	}
449 
450 	r = of_get_property(dn, "ibm,opal-m64-window", NULL);
451 	if (!r) {
452 		pr_info("  No <ibm,opal-m64-window> on %pOF\n",
453 			dn);
454 		return;
455 	}
456 
457 	/*
458 	 * Find the available M64 BAR range and pickup the last one for
459 	 * covering the whole 64-bits space. We support only one range.
460 	 */
461 	if (of_property_read_u32_array(dn, "ibm,opal-available-m64-ranges",
462 				       m64_range, 2)) {
463 		/* In absence of the property, assume 0..15 */
464 		m64_range[0] = 0;
465 		m64_range[1] = 16;
466 	}
467 	/* We only support 64 bits in our allocator */
468 	if (m64_range[1] > 63) {
469 		pr_warn("%s: Limiting M64 range to 63 (from %d) on PHB#%x\n",
470 			__func__, m64_range[1], phb->hose->global_number);
471 		m64_range[1] = 63;
472 	}
473 	/* Empty range, no m64 */
474 	if (m64_range[1] <= m64_range[0]) {
475 		pr_warn("%s: M64 empty, disabling M64 usage on PHB#%x\n",
476 			__func__, phb->hose->global_number);
477 		return;
478 	}
479 
480 	/* Configure M64 informations */
481 	res = &hose->mem_resources[1];
482 	res->name = dn->full_name;
483 	res->start = of_translate_address(dn, r + 2);
484 	res->end = res->start + of_read_number(r + 4, 2) - 1;
485 	res->flags = (IORESOURCE_MEM | IORESOURCE_MEM_64 | IORESOURCE_PREFETCH);
486 	pci_addr = of_read_number(r, 2);
487 	hose->mem_offset[1] = res->start - pci_addr;
488 
489 	phb->ioda.m64_size = resource_size(res);
490 	phb->ioda.m64_segsize = phb->ioda.m64_size / phb->ioda.total_pe_num;
491 	phb->ioda.m64_base = pci_addr;
492 
493 	/* This lines up nicely with the display from processing OF ranges */
494 	pr_info(" MEM 0x%016llx..0x%016llx -> 0x%016llx (M64 #%d..%d)\n",
495 		res->start, res->end, pci_addr, m64_range[0],
496 		m64_range[0] + m64_range[1] - 1);
497 
498 	/* Mark all M64 used up by default */
499 	phb->ioda.m64_bar_alloc = (unsigned long)-1;
500 
501 	/* Use last M64 BAR to cover M64 window */
502 	m64_range[1]--;
503 	phb->ioda.m64_bar_idx = m64_range[0] + m64_range[1];
504 
505 	pr_info(" Using M64 #%d as default window\n", phb->ioda.m64_bar_idx);
506 
507 	/* Mark remaining ones free */
508 	for (i = m64_range[0]; i < m64_range[1]; i++)
509 		clear_bit(i, &phb->ioda.m64_bar_alloc);
510 
511 	/*
512 	 * Setup init functions for M64 based on IODA version, IODA3 uses
513 	 * the IODA2 code.
514 	 */
515 	if (phb->type == PNV_PHB_IODA1)
516 		phb->init_m64 = pnv_ioda1_init_m64;
517 	else
518 		phb->init_m64 = pnv_ioda2_init_m64;
519 }
520 
521 static void pnv_ioda_freeze_pe(struct pnv_phb *phb, int pe_no)
522 {
523 	struct pnv_ioda_pe *pe = &phb->ioda.pe_array[pe_no];
524 	struct pnv_ioda_pe *slave;
525 	s64 rc;
526 
527 	/* Fetch master PE */
528 	if (pe->flags & PNV_IODA_PE_SLAVE) {
529 		pe = pe->master;
530 		if (WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER)))
531 			return;
532 
533 		pe_no = pe->pe_number;
534 	}
535 
536 	/* Freeze master PE */
537 	rc = opal_pci_eeh_freeze_set(phb->opal_id,
538 				     pe_no,
539 				     OPAL_EEH_ACTION_SET_FREEZE_ALL);
540 	if (rc != OPAL_SUCCESS) {
541 		pr_warn("%s: Failure %lld freezing PHB#%x-PE#%x\n",
542 			__func__, rc, phb->hose->global_number, pe_no);
543 		return;
544 	}
545 
546 	/* Freeze slave PEs */
547 	if (!(pe->flags & PNV_IODA_PE_MASTER))
548 		return;
549 
550 	list_for_each_entry(slave, &pe->slaves, list) {
551 		rc = opal_pci_eeh_freeze_set(phb->opal_id,
552 					     slave->pe_number,
553 					     OPAL_EEH_ACTION_SET_FREEZE_ALL);
554 		if (rc != OPAL_SUCCESS)
555 			pr_warn("%s: Failure %lld freezing PHB#%x-PE#%x\n",
556 				__func__, rc, phb->hose->global_number,
557 				slave->pe_number);
558 	}
559 }
560 
561 static int pnv_ioda_unfreeze_pe(struct pnv_phb *phb, int pe_no, int opt)
562 {
563 	struct pnv_ioda_pe *pe, *slave;
564 	s64 rc;
565 
566 	/* Find master PE */
567 	pe = &phb->ioda.pe_array[pe_no];
568 	if (pe->flags & PNV_IODA_PE_SLAVE) {
569 		pe = pe->master;
570 		WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER));
571 		pe_no = pe->pe_number;
572 	}
573 
574 	/* Clear frozen state for master PE */
575 	rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no, opt);
576 	if (rc != OPAL_SUCCESS) {
577 		pr_warn("%s: Failure %lld clear %d on PHB#%x-PE#%x\n",
578 			__func__, rc, opt, phb->hose->global_number, pe_no);
579 		return -EIO;
580 	}
581 
582 	if (!(pe->flags & PNV_IODA_PE_MASTER))
583 		return 0;
584 
585 	/* Clear frozen state for slave PEs */
586 	list_for_each_entry(slave, &pe->slaves, list) {
587 		rc = opal_pci_eeh_freeze_clear(phb->opal_id,
588 					     slave->pe_number,
589 					     opt);
590 		if (rc != OPAL_SUCCESS) {
591 			pr_warn("%s: Failure %lld clear %d on PHB#%x-PE#%x\n",
592 				__func__, rc, opt, phb->hose->global_number,
593 				slave->pe_number);
594 			return -EIO;
595 		}
596 	}
597 
598 	return 0;
599 }
600 
601 static int pnv_ioda_get_pe_state(struct pnv_phb *phb, int pe_no)
602 {
603 	struct pnv_ioda_pe *slave, *pe;
604 	u8 fstate = 0, state;
605 	__be16 pcierr = 0;
606 	s64 rc;
607 
608 	/* Sanity check on PE number */
609 	if (pe_no < 0 || pe_no >= phb->ioda.total_pe_num)
610 		return OPAL_EEH_STOPPED_PERM_UNAVAIL;
611 
612 	/*
613 	 * Fetch the master PE and the PE instance might be
614 	 * not initialized yet.
615 	 */
616 	pe = &phb->ioda.pe_array[pe_no];
617 	if (pe->flags & PNV_IODA_PE_SLAVE) {
618 		pe = pe->master;
619 		WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER));
620 		pe_no = pe->pe_number;
621 	}
622 
623 	/* Check the master PE */
624 	rc = opal_pci_eeh_freeze_status(phb->opal_id, pe_no,
625 					&state, &pcierr, NULL);
626 	if (rc != OPAL_SUCCESS) {
627 		pr_warn("%s: Failure %lld getting "
628 			"PHB#%x-PE#%x state\n",
629 			__func__, rc,
630 			phb->hose->global_number, pe_no);
631 		return OPAL_EEH_STOPPED_TEMP_UNAVAIL;
632 	}
633 
634 	/* Check the slave PE */
635 	if (!(pe->flags & PNV_IODA_PE_MASTER))
636 		return state;
637 
638 	list_for_each_entry(slave, &pe->slaves, list) {
639 		rc = opal_pci_eeh_freeze_status(phb->opal_id,
640 						slave->pe_number,
641 						&fstate,
642 						&pcierr,
643 						NULL);
644 		if (rc != OPAL_SUCCESS) {
645 			pr_warn("%s: Failure %lld getting "
646 				"PHB#%x-PE#%x state\n",
647 				__func__, rc,
648 				phb->hose->global_number, slave->pe_number);
649 			return OPAL_EEH_STOPPED_TEMP_UNAVAIL;
650 		}
651 
652 		/*
653 		 * Override the result based on the ascending
654 		 * priority.
655 		 */
656 		if (fstate > state)
657 			state = fstate;
658 	}
659 
660 	return state;
661 }
662 
663 struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev)
664 {
665 	struct pci_controller *hose = pci_bus_to_host(dev->bus);
666 	struct pnv_phb *phb = hose->private_data;
667 	struct pci_dn *pdn = pci_get_pdn(dev);
668 
669 	if (!pdn)
670 		return NULL;
671 	if (pdn->pe_number == IODA_INVALID_PE)
672 		return NULL;
673 	return &phb->ioda.pe_array[pdn->pe_number];
674 }
675 
676 static int pnv_ioda_set_one_peltv(struct pnv_phb *phb,
677 				  struct pnv_ioda_pe *parent,
678 				  struct pnv_ioda_pe *child,
679 				  bool is_add)
680 {
681 	const char *desc = is_add ? "adding" : "removing";
682 	uint8_t op = is_add ? OPAL_ADD_PE_TO_DOMAIN :
683 			      OPAL_REMOVE_PE_FROM_DOMAIN;
684 	struct pnv_ioda_pe *slave;
685 	long rc;
686 
687 	/* Parent PE affects child PE */
688 	rc = opal_pci_set_peltv(phb->opal_id, parent->pe_number,
689 				child->pe_number, op);
690 	if (rc != OPAL_SUCCESS) {
691 		pe_warn(child, "OPAL error %ld %s to parent PELTV\n",
692 			rc, desc);
693 		return -ENXIO;
694 	}
695 
696 	if (!(child->flags & PNV_IODA_PE_MASTER))
697 		return 0;
698 
699 	/* Compound case: parent PE affects slave PEs */
700 	list_for_each_entry(slave, &child->slaves, list) {
701 		rc = opal_pci_set_peltv(phb->opal_id, parent->pe_number,
702 					slave->pe_number, op);
703 		if (rc != OPAL_SUCCESS) {
704 			pe_warn(slave, "OPAL error %ld %s to parent PELTV\n",
705 				rc, desc);
706 			return -ENXIO;
707 		}
708 	}
709 
710 	return 0;
711 }
712 
713 static int pnv_ioda_set_peltv(struct pnv_phb *phb,
714 			      struct pnv_ioda_pe *pe,
715 			      bool is_add)
716 {
717 	struct pnv_ioda_pe *slave;
718 	struct pci_dev *pdev = NULL;
719 	int ret;
720 
721 	/*
722 	 * Clear PE frozen state. If it's master PE, we need
723 	 * clear slave PE frozen state as well.
724 	 */
725 	if (is_add) {
726 		opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number,
727 					  OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
728 		if (pe->flags & PNV_IODA_PE_MASTER) {
729 			list_for_each_entry(slave, &pe->slaves, list)
730 				opal_pci_eeh_freeze_clear(phb->opal_id,
731 							  slave->pe_number,
732 							  OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
733 		}
734 	}
735 
736 	/*
737 	 * Associate PE in PELT. We need add the PE into the
738 	 * corresponding PELT-V as well. Otherwise, the error
739 	 * originated from the PE might contribute to other
740 	 * PEs.
741 	 */
742 	ret = pnv_ioda_set_one_peltv(phb, pe, pe, is_add);
743 	if (ret)
744 		return ret;
745 
746 	/* For compound PEs, any one affects all of them */
747 	if (pe->flags & PNV_IODA_PE_MASTER) {
748 		list_for_each_entry(slave, &pe->slaves, list) {
749 			ret = pnv_ioda_set_one_peltv(phb, slave, pe, is_add);
750 			if (ret)
751 				return ret;
752 		}
753 	}
754 
755 	if (pe->flags & (PNV_IODA_PE_BUS_ALL | PNV_IODA_PE_BUS))
756 		pdev = pe->pbus->self;
757 	else if (pe->flags & PNV_IODA_PE_DEV)
758 		pdev = pe->pdev->bus->self;
759 #ifdef CONFIG_PCI_IOV
760 	else if (pe->flags & PNV_IODA_PE_VF)
761 		pdev = pe->parent_dev;
762 #endif /* CONFIG_PCI_IOV */
763 	while (pdev) {
764 		struct pci_dn *pdn = pci_get_pdn(pdev);
765 		struct pnv_ioda_pe *parent;
766 
767 		if (pdn && pdn->pe_number != IODA_INVALID_PE) {
768 			parent = &phb->ioda.pe_array[pdn->pe_number];
769 			ret = pnv_ioda_set_one_peltv(phb, parent, pe, is_add);
770 			if (ret)
771 				return ret;
772 		}
773 
774 		pdev = pdev->bus->self;
775 	}
776 
777 	return 0;
778 }
779 
780 static int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
781 {
782 	struct pci_dev *parent;
783 	uint8_t bcomp, dcomp, fcomp;
784 	int64_t rc;
785 	long rid_end, rid;
786 
787 	/* Currently, we just deconfigure VF PE. Bus PE will always there.*/
788 	if (pe->pbus) {
789 		int count;
790 
791 		dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER;
792 		fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER;
793 		parent = pe->pbus->self;
794 		if (pe->flags & PNV_IODA_PE_BUS_ALL)
795 			count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1;
796 		else
797 			count = 1;
798 
799 		switch(count) {
800 		case  1: bcomp = OpalPciBusAll;         break;
801 		case  2: bcomp = OpalPciBus7Bits;       break;
802 		case  4: bcomp = OpalPciBus6Bits;       break;
803 		case  8: bcomp = OpalPciBus5Bits;       break;
804 		case 16: bcomp = OpalPciBus4Bits;       break;
805 		case 32: bcomp = OpalPciBus3Bits;       break;
806 		default:
807 			dev_err(&pe->pbus->dev, "Number of subordinate buses %d unsupported\n",
808 			        count);
809 			/* Do an exact match only */
810 			bcomp = OpalPciBusAll;
811 		}
812 		rid_end = pe->rid + (count << 8);
813 	} else {
814 #ifdef CONFIG_PCI_IOV
815 		if (pe->flags & PNV_IODA_PE_VF)
816 			parent = pe->parent_dev;
817 		else
818 #endif
819 			parent = pe->pdev->bus->self;
820 		bcomp = OpalPciBusAll;
821 		dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER;
822 		fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER;
823 		rid_end = pe->rid + 1;
824 	}
825 
826 	/* Clear the reverse map */
827 	for (rid = pe->rid; rid < rid_end; rid++)
828 		phb->ioda.pe_rmap[rid] = IODA_INVALID_PE;
829 
830 	/* Release from all parents PELT-V */
831 	while (parent) {
832 		struct pci_dn *pdn = pci_get_pdn(parent);
833 		if (pdn && pdn->pe_number != IODA_INVALID_PE) {
834 			rc = opal_pci_set_peltv(phb->opal_id, pdn->pe_number,
835 						pe->pe_number, OPAL_REMOVE_PE_FROM_DOMAIN);
836 			/* XXX What to do in case of error ? */
837 		}
838 		parent = parent->bus->self;
839 	}
840 
841 	opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number,
842 				  OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
843 
844 	/* Disassociate PE in PELT */
845 	rc = opal_pci_set_peltv(phb->opal_id, pe->pe_number,
846 				pe->pe_number, OPAL_REMOVE_PE_FROM_DOMAIN);
847 	if (rc)
848 		pe_warn(pe, "OPAL error %lld remove self from PELTV\n", rc);
849 	rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
850 			     bcomp, dcomp, fcomp, OPAL_UNMAP_PE);
851 	if (rc)
852 		pe_err(pe, "OPAL error %lld trying to setup PELT table\n", rc);
853 
854 	pe->pbus = NULL;
855 	pe->pdev = NULL;
856 #ifdef CONFIG_PCI_IOV
857 	pe->parent_dev = NULL;
858 #endif
859 
860 	return 0;
861 }
862 
863 static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
864 {
865 	struct pci_dev *parent;
866 	uint8_t bcomp, dcomp, fcomp;
867 	long rc, rid_end, rid;
868 
869 	/* Bus validation ? */
870 	if (pe->pbus) {
871 		int count;
872 
873 		dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER;
874 		fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER;
875 		parent = pe->pbus->self;
876 		if (pe->flags & PNV_IODA_PE_BUS_ALL)
877 			count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1;
878 		else
879 			count = 1;
880 
881 		switch(count) {
882 		case  1: bcomp = OpalPciBusAll;		break;
883 		case  2: bcomp = OpalPciBus7Bits;	break;
884 		case  4: bcomp = OpalPciBus6Bits;	break;
885 		case  8: bcomp = OpalPciBus5Bits;	break;
886 		case 16: bcomp = OpalPciBus4Bits;	break;
887 		case 32: bcomp = OpalPciBus3Bits;	break;
888 		default:
889 			dev_err(&pe->pbus->dev, "Number of subordinate buses %d unsupported\n",
890 			        count);
891 			/* Do an exact match only */
892 			bcomp = OpalPciBusAll;
893 		}
894 		rid_end = pe->rid + (count << 8);
895 	} else {
896 #ifdef CONFIG_PCI_IOV
897 		if (pe->flags & PNV_IODA_PE_VF)
898 			parent = pe->parent_dev;
899 		else
900 #endif /* CONFIG_PCI_IOV */
901 			parent = pe->pdev->bus->self;
902 		bcomp = OpalPciBusAll;
903 		dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER;
904 		fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER;
905 		rid_end = pe->rid + 1;
906 	}
907 
908 	/*
909 	 * Associate PE in PELT. We need add the PE into the
910 	 * corresponding PELT-V as well. Otherwise, the error
911 	 * originated from the PE might contribute to other
912 	 * PEs.
913 	 */
914 	rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
915 			     bcomp, dcomp, fcomp, OPAL_MAP_PE);
916 	if (rc) {
917 		pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc);
918 		return -ENXIO;
919 	}
920 
921 	/*
922 	 * Configure PELTV. NPUs don't have a PELTV table so skip
923 	 * configuration on them.
924 	 */
925 	if (phb->type != PNV_PHB_NPU_NVLINK && phb->type != PNV_PHB_NPU_OCAPI)
926 		pnv_ioda_set_peltv(phb, pe, true);
927 
928 	/* Setup reverse map */
929 	for (rid = pe->rid; rid < rid_end; rid++)
930 		phb->ioda.pe_rmap[rid] = pe->pe_number;
931 
932 	/* Setup one MVTs on IODA1 */
933 	if (phb->type != PNV_PHB_IODA1) {
934 		pe->mve_number = 0;
935 		goto out;
936 	}
937 
938 	pe->mve_number = pe->pe_number;
939 	rc = opal_pci_set_mve(phb->opal_id, pe->mve_number, pe->pe_number);
940 	if (rc != OPAL_SUCCESS) {
941 		pe_err(pe, "OPAL error %ld setting up MVE %x\n",
942 		       rc, pe->mve_number);
943 		pe->mve_number = -1;
944 	} else {
945 		rc = opal_pci_set_mve_enable(phb->opal_id,
946 					     pe->mve_number, OPAL_ENABLE_MVE);
947 		if (rc) {
948 			pe_err(pe, "OPAL error %ld enabling MVE %x\n",
949 			       rc, pe->mve_number);
950 			pe->mve_number = -1;
951 		}
952 	}
953 
954 out:
955 	return 0;
956 }
957 
958 #ifdef CONFIG_PCI_IOV
959 static int pnv_pci_vf_resource_shift(struct pci_dev *dev, int offset)
960 {
961 	struct pci_dn *pdn = pci_get_pdn(dev);
962 	int i;
963 	struct resource *res, res2;
964 	resource_size_t size;
965 	u16 num_vfs;
966 
967 	if (!dev->is_physfn)
968 		return -EINVAL;
969 
970 	/*
971 	 * "offset" is in VFs.  The M64 windows are sized so that when they
972 	 * are segmented, each segment is the same size as the IOV BAR.
973 	 * Each segment is in a separate PE, and the high order bits of the
974 	 * address are the PE number.  Therefore, each VF's BAR is in a
975 	 * separate PE, and changing the IOV BAR start address changes the
976 	 * range of PEs the VFs are in.
977 	 */
978 	num_vfs = pdn->num_vfs;
979 	for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
980 		res = &dev->resource[i + PCI_IOV_RESOURCES];
981 		if (!res->flags || !res->parent)
982 			continue;
983 
984 		/*
985 		 * The actual IOV BAR range is determined by the start address
986 		 * and the actual size for num_vfs VFs BAR.  This check is to
987 		 * make sure that after shifting, the range will not overlap
988 		 * with another device.
989 		 */
990 		size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES);
991 		res2.flags = res->flags;
992 		res2.start = res->start + (size * offset);
993 		res2.end = res2.start + (size * num_vfs) - 1;
994 
995 		if (res2.end > res->end) {
996 			dev_err(&dev->dev, "VF BAR%d: %pR would extend past %pR (trying to enable %d VFs shifted by %d)\n",
997 				i, &res2, res, num_vfs, offset);
998 			return -EBUSY;
999 		}
1000 	}
1001 
1002 	/*
1003 	 * Since M64 BAR shares segments among all possible 256 PEs,
1004 	 * we have to shift the beginning of PF IOV BAR to make it start from
1005 	 * the segment which belongs to the PE number assigned to the first VF.
1006 	 * This creates a "hole" in the /proc/iomem which could be used for
1007 	 * allocating other resources so we reserve this area below and
1008 	 * release when IOV is released.
1009 	 */
1010 	for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
1011 		res = &dev->resource[i + PCI_IOV_RESOURCES];
1012 		if (!res->flags || !res->parent)
1013 			continue;
1014 
1015 		size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES);
1016 		res2 = *res;
1017 		res->start += size * offset;
1018 
1019 		dev_info(&dev->dev, "VF BAR%d: %pR shifted to %pR (%sabling %d VFs shifted by %d)\n",
1020 			 i, &res2, res, (offset > 0) ? "En" : "Dis",
1021 			 num_vfs, offset);
1022 
1023 		if (offset < 0) {
1024 			devm_release_resource(&dev->dev, &pdn->holes[i]);
1025 			memset(&pdn->holes[i], 0, sizeof(pdn->holes[i]));
1026 		}
1027 
1028 		pci_update_resource(dev, i + PCI_IOV_RESOURCES);
1029 
1030 		if (offset > 0) {
1031 			pdn->holes[i].start = res2.start;
1032 			pdn->holes[i].end = res2.start + size * offset - 1;
1033 			pdn->holes[i].flags = IORESOURCE_BUS;
1034 			pdn->holes[i].name = "pnv_iov_reserved";
1035 			devm_request_resource(&dev->dev, res->parent,
1036 					&pdn->holes[i]);
1037 		}
1038 	}
1039 	return 0;
1040 }
1041 #endif /* CONFIG_PCI_IOV */
1042 
1043 static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)
1044 {
1045 	struct pci_controller *hose = pci_bus_to_host(dev->bus);
1046 	struct pnv_phb *phb = hose->private_data;
1047 	struct pci_dn *pdn = pci_get_pdn(dev);
1048 	struct pnv_ioda_pe *pe;
1049 
1050 	if (!pdn) {
1051 		pr_err("%s: Device tree node not associated properly\n",
1052 			   pci_name(dev));
1053 		return NULL;
1054 	}
1055 	if (pdn->pe_number != IODA_INVALID_PE)
1056 		return NULL;
1057 
1058 	pe = pnv_ioda_alloc_pe(phb);
1059 	if (!pe) {
1060 		pr_warn("%s: Not enough PE# available, disabling device\n",
1061 			pci_name(dev));
1062 		return NULL;
1063 	}
1064 
1065 	/* NOTE: We get only one ref to the pci_dev for the pdn, not for the
1066 	 * pointer in the PE data structure, both should be destroyed at the
1067 	 * same time. However, this needs to be looked at more closely again
1068 	 * once we actually start removing things (Hotplug, SR-IOV, ...)
1069 	 *
1070 	 * At some point we want to remove the PDN completely anyways
1071 	 */
1072 	pci_dev_get(dev);
1073 	pdn->pe_number = pe->pe_number;
1074 	pe->flags = PNV_IODA_PE_DEV;
1075 	pe->pdev = dev;
1076 	pe->pbus = NULL;
1077 	pe->mve_number = -1;
1078 	pe->rid = dev->bus->number << 8 | pdn->devfn;
1079 
1080 	pe_info(pe, "Associated device to PE\n");
1081 
1082 	if (pnv_ioda_configure_pe(phb, pe)) {
1083 		/* XXX What do we do here ? */
1084 		pnv_ioda_free_pe(pe);
1085 		pdn->pe_number = IODA_INVALID_PE;
1086 		pe->pdev = NULL;
1087 		pci_dev_put(dev);
1088 		return NULL;
1089 	}
1090 
1091 	/* Put PE to the list */
1092 	list_add_tail(&pe->list, &phb->ioda.pe_list);
1093 
1094 	return pe;
1095 }
1096 
1097 static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
1098 {
1099 	struct pci_dev *dev;
1100 
1101 	list_for_each_entry(dev, &bus->devices, bus_list) {
1102 		struct pci_dn *pdn = pci_get_pdn(dev);
1103 
1104 		if (pdn == NULL) {
1105 			pr_warn("%s: No device node associated with device !\n",
1106 				pci_name(dev));
1107 			continue;
1108 		}
1109 
1110 		/*
1111 		 * In partial hotplug case, the PCI device might be still
1112 		 * associated with the PE and needn't attach it to the PE
1113 		 * again.
1114 		 */
1115 		if (pdn->pe_number != IODA_INVALID_PE)
1116 			continue;
1117 
1118 		pe->device_count++;
1119 		pdn->pe_number = pe->pe_number;
1120 		if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
1121 			pnv_ioda_setup_same_PE(dev->subordinate, pe);
1122 	}
1123 }
1124 
1125 /*
1126  * There're 2 types of PCI bus sensitive PEs: One that is compromised of
1127  * single PCI bus. Another one that contains the primary PCI bus and its
1128  * subordinate PCI devices and buses. The second type of PE is normally
1129  * orgiriated by PCIe-to-PCI bridge or PLX switch downstream ports.
1130  */
1131 static struct pnv_ioda_pe *pnv_ioda_setup_bus_PE(struct pci_bus *bus, bool all)
1132 {
1133 	struct pci_controller *hose = pci_bus_to_host(bus);
1134 	struct pnv_phb *phb = hose->private_data;
1135 	struct pnv_ioda_pe *pe = NULL;
1136 	unsigned int pe_num;
1137 
1138 	/*
1139 	 * In partial hotplug case, the PE instance might be still alive.
1140 	 * We should reuse it instead of allocating a new one.
1141 	 */
1142 	pe_num = phb->ioda.pe_rmap[bus->number << 8];
1143 	if (pe_num != IODA_INVALID_PE) {
1144 		pe = &phb->ioda.pe_array[pe_num];
1145 		pnv_ioda_setup_same_PE(bus, pe);
1146 		return NULL;
1147 	}
1148 
1149 	/* PE number for root bus should have been reserved */
1150 	if (pci_is_root_bus(bus) &&
1151 	    phb->ioda.root_pe_idx != IODA_INVALID_PE)
1152 		pe = &phb->ioda.pe_array[phb->ioda.root_pe_idx];
1153 
1154 	/* Check if PE is determined by M64 */
1155 	if (!pe)
1156 		pe = pnv_ioda_pick_m64_pe(bus, all);
1157 
1158 	/* The PE number isn't pinned by M64 */
1159 	if (!pe)
1160 		pe = pnv_ioda_alloc_pe(phb);
1161 
1162 	if (!pe) {
1163 		pr_warn("%s: Not enough PE# available for PCI bus %04x:%02x\n",
1164 			__func__, pci_domain_nr(bus), bus->number);
1165 		return NULL;
1166 	}
1167 
1168 	pe->flags |= (all ? PNV_IODA_PE_BUS_ALL : PNV_IODA_PE_BUS);
1169 	pe->pbus = bus;
1170 	pe->pdev = NULL;
1171 	pe->mve_number = -1;
1172 	pe->rid = bus->busn_res.start << 8;
1173 
1174 	if (all)
1175 		pe_info(pe, "Secondary bus %pad..%pad associated with PE#%x\n",
1176 			&bus->busn_res.start, &bus->busn_res.end,
1177 			pe->pe_number);
1178 	else
1179 		pe_info(pe, "Secondary bus %pad associated with PE#%x\n",
1180 			&bus->busn_res.start, pe->pe_number);
1181 
1182 	if (pnv_ioda_configure_pe(phb, pe)) {
1183 		/* XXX What do we do here ? */
1184 		pnv_ioda_free_pe(pe);
1185 		pe->pbus = NULL;
1186 		return NULL;
1187 	}
1188 
1189 	/* Associate it with all child devices */
1190 	pnv_ioda_setup_same_PE(bus, pe);
1191 
1192 	/* Put PE to the list */
1193 	list_add_tail(&pe->list, &phb->ioda.pe_list);
1194 
1195 	return pe;
1196 }
1197 
1198 static struct pnv_ioda_pe *pnv_ioda_setup_npu_PE(struct pci_dev *npu_pdev)
1199 {
1200 	int pe_num, found_pe = false, rc;
1201 	long rid;
1202 	struct pnv_ioda_pe *pe;
1203 	struct pci_dev *gpu_pdev;
1204 	struct pci_dn *npu_pdn;
1205 	struct pci_controller *hose = pci_bus_to_host(npu_pdev->bus);
1206 	struct pnv_phb *phb = hose->private_data;
1207 
1208 	/*
1209 	 * Due to a hardware errata PE#0 on the NPU is reserved for
1210 	 * error handling. This means we only have three PEs remaining
1211 	 * which need to be assigned to four links, implying some
1212 	 * links must share PEs.
1213 	 *
1214 	 * To achieve this we assign PEs such that NPUs linking the
1215 	 * same GPU get assigned the same PE.
1216 	 */
1217 	gpu_pdev = pnv_pci_get_gpu_dev(npu_pdev);
1218 	for (pe_num = 0; pe_num < phb->ioda.total_pe_num; pe_num++) {
1219 		pe = &phb->ioda.pe_array[pe_num];
1220 		if (!pe->pdev)
1221 			continue;
1222 
1223 		if (pnv_pci_get_gpu_dev(pe->pdev) == gpu_pdev) {
1224 			/*
1225 			 * This device has the same peer GPU so should
1226 			 * be assigned the same PE as the existing
1227 			 * peer NPU.
1228 			 */
1229 			dev_info(&npu_pdev->dev,
1230 				"Associating to existing PE %x\n", pe_num);
1231 			pci_dev_get(npu_pdev);
1232 			npu_pdn = pci_get_pdn(npu_pdev);
1233 			rid = npu_pdev->bus->number << 8 | npu_pdn->devfn;
1234 			npu_pdn->pe_number = pe_num;
1235 			phb->ioda.pe_rmap[rid] = pe->pe_number;
1236 
1237 			/* Map the PE to this link */
1238 			rc = opal_pci_set_pe(phb->opal_id, pe_num, rid,
1239 					OpalPciBusAll,
1240 					OPAL_COMPARE_RID_DEVICE_NUMBER,
1241 					OPAL_COMPARE_RID_FUNCTION_NUMBER,
1242 					OPAL_MAP_PE);
1243 			WARN_ON(rc != OPAL_SUCCESS);
1244 			found_pe = true;
1245 			break;
1246 		}
1247 	}
1248 
1249 	if (!found_pe)
1250 		/*
1251 		 * Could not find an existing PE so allocate a new
1252 		 * one.
1253 		 */
1254 		return pnv_ioda_setup_dev_PE(npu_pdev);
1255 	else
1256 		return pe;
1257 }
1258 
1259 static void pnv_ioda_setup_npu_PEs(struct pci_bus *bus)
1260 {
1261 	struct pci_dev *pdev;
1262 
1263 	list_for_each_entry(pdev, &bus->devices, bus_list)
1264 		pnv_ioda_setup_npu_PE(pdev);
1265 }
1266 
1267 static void pnv_pci_ioda_setup_PEs(void)
1268 {
1269 	struct pci_controller *hose;
1270 	struct pnv_phb *phb;
1271 	struct pci_bus *bus;
1272 	struct pci_dev *pdev;
1273 	struct pnv_ioda_pe *pe;
1274 
1275 	list_for_each_entry(hose, &hose_list, list_node) {
1276 		phb = hose->private_data;
1277 		if (phb->type == PNV_PHB_NPU_NVLINK) {
1278 			/* PE#0 is needed for error reporting */
1279 			pnv_ioda_reserve_pe(phb, 0);
1280 			pnv_ioda_setup_npu_PEs(hose->bus);
1281 			if (phb->model == PNV_PHB_MODEL_NPU2)
1282 				WARN_ON_ONCE(pnv_npu2_init(hose));
1283 		}
1284 		if (phb->type == PNV_PHB_NPU_OCAPI) {
1285 			bus = hose->bus;
1286 			list_for_each_entry(pdev, &bus->devices, bus_list)
1287 				pnv_ioda_setup_dev_PE(pdev);
1288 		}
1289 	}
1290 	list_for_each_entry(hose, &hose_list, list_node) {
1291 		phb = hose->private_data;
1292 		if (phb->type != PNV_PHB_IODA2)
1293 			continue;
1294 
1295 		list_for_each_entry(pe, &phb->ioda.pe_list, list)
1296 			pnv_npu2_map_lpar(pe, MSR_DR | MSR_PR | MSR_HV);
1297 	}
1298 }
1299 
1300 #ifdef CONFIG_PCI_IOV
1301 static int pnv_pci_vf_release_m64(struct pci_dev *pdev, u16 num_vfs)
1302 {
1303 	struct pci_bus        *bus;
1304 	struct pci_controller *hose;
1305 	struct pnv_phb        *phb;
1306 	struct pci_dn         *pdn;
1307 	int                    i, j;
1308 	int                    m64_bars;
1309 
1310 	bus = pdev->bus;
1311 	hose = pci_bus_to_host(bus);
1312 	phb = hose->private_data;
1313 	pdn = pci_get_pdn(pdev);
1314 
1315 	if (pdn->m64_single_mode)
1316 		m64_bars = num_vfs;
1317 	else
1318 		m64_bars = 1;
1319 
1320 	for (i = 0; i < PCI_SRIOV_NUM_BARS; i++)
1321 		for (j = 0; j < m64_bars; j++) {
1322 			if (pdn->m64_map[j][i] == IODA_INVALID_M64)
1323 				continue;
1324 			opal_pci_phb_mmio_enable(phb->opal_id,
1325 				OPAL_M64_WINDOW_TYPE, pdn->m64_map[j][i], 0);
1326 			clear_bit(pdn->m64_map[j][i], &phb->ioda.m64_bar_alloc);
1327 			pdn->m64_map[j][i] = IODA_INVALID_M64;
1328 		}
1329 
1330 	kfree(pdn->m64_map);
1331 	return 0;
1332 }
1333 
1334 static int pnv_pci_vf_assign_m64(struct pci_dev *pdev, u16 num_vfs)
1335 {
1336 	struct pci_bus        *bus;
1337 	struct pci_controller *hose;
1338 	struct pnv_phb        *phb;
1339 	struct pci_dn         *pdn;
1340 	unsigned int           win;
1341 	struct resource       *res;
1342 	int                    i, j;
1343 	int64_t                rc;
1344 	int                    total_vfs;
1345 	resource_size_t        size, start;
1346 	int                    pe_num;
1347 	int                    m64_bars;
1348 
1349 	bus = pdev->bus;
1350 	hose = pci_bus_to_host(bus);
1351 	phb = hose->private_data;
1352 	pdn = pci_get_pdn(pdev);
1353 	total_vfs = pci_sriov_get_totalvfs(pdev);
1354 
1355 	if (pdn->m64_single_mode)
1356 		m64_bars = num_vfs;
1357 	else
1358 		m64_bars = 1;
1359 
1360 	pdn->m64_map = kmalloc_array(m64_bars,
1361 				     sizeof(*pdn->m64_map),
1362 				     GFP_KERNEL);
1363 	if (!pdn->m64_map)
1364 		return -ENOMEM;
1365 	/* Initialize the m64_map to IODA_INVALID_M64 */
1366 	for (i = 0; i < m64_bars ; i++)
1367 		for (j = 0; j < PCI_SRIOV_NUM_BARS; j++)
1368 			pdn->m64_map[i][j] = IODA_INVALID_M64;
1369 
1370 
1371 	for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
1372 		res = &pdev->resource[i + PCI_IOV_RESOURCES];
1373 		if (!res->flags || !res->parent)
1374 			continue;
1375 
1376 		for (j = 0; j < m64_bars; j++) {
1377 			do {
1378 				win = find_next_zero_bit(&phb->ioda.m64_bar_alloc,
1379 						phb->ioda.m64_bar_idx + 1, 0);
1380 
1381 				if (win >= phb->ioda.m64_bar_idx + 1)
1382 					goto m64_failed;
1383 			} while (test_and_set_bit(win, &phb->ioda.m64_bar_alloc));
1384 
1385 			pdn->m64_map[j][i] = win;
1386 
1387 			if (pdn->m64_single_mode) {
1388 				size = pci_iov_resource_size(pdev,
1389 							PCI_IOV_RESOURCES + i);
1390 				start = res->start + size * j;
1391 			} else {
1392 				size = resource_size(res);
1393 				start = res->start;
1394 			}
1395 
1396 			/* Map the M64 here */
1397 			if (pdn->m64_single_mode) {
1398 				pe_num = pdn->pe_num_map[j];
1399 				rc = opal_pci_map_pe_mmio_window(phb->opal_id,
1400 						pe_num, OPAL_M64_WINDOW_TYPE,
1401 						pdn->m64_map[j][i], 0);
1402 			}
1403 
1404 			rc = opal_pci_set_phb_mem_window(phb->opal_id,
1405 						 OPAL_M64_WINDOW_TYPE,
1406 						 pdn->m64_map[j][i],
1407 						 start,
1408 						 0, /* unused */
1409 						 size);
1410 
1411 
1412 			if (rc != OPAL_SUCCESS) {
1413 				dev_err(&pdev->dev, "Failed to map M64 window #%d: %lld\n",
1414 					win, rc);
1415 				goto m64_failed;
1416 			}
1417 
1418 			if (pdn->m64_single_mode)
1419 				rc = opal_pci_phb_mmio_enable(phb->opal_id,
1420 				     OPAL_M64_WINDOW_TYPE, pdn->m64_map[j][i], 2);
1421 			else
1422 				rc = opal_pci_phb_mmio_enable(phb->opal_id,
1423 				     OPAL_M64_WINDOW_TYPE, pdn->m64_map[j][i], 1);
1424 
1425 			if (rc != OPAL_SUCCESS) {
1426 				dev_err(&pdev->dev, "Failed to enable M64 window #%d: %llx\n",
1427 					win, rc);
1428 				goto m64_failed;
1429 			}
1430 		}
1431 	}
1432 	return 0;
1433 
1434 m64_failed:
1435 	pnv_pci_vf_release_m64(pdev, num_vfs);
1436 	return -EBUSY;
1437 }
1438 
1439 static long pnv_pci_ioda2_unset_window(struct iommu_table_group *table_group,
1440 		int num);
1441 
1442 static void pnv_pci_ioda2_release_dma_pe(struct pci_dev *dev, struct pnv_ioda_pe *pe)
1443 {
1444 	struct iommu_table    *tbl;
1445 	int64_t               rc;
1446 
1447 	tbl = pe->table_group.tables[0];
1448 	rc = pnv_pci_ioda2_unset_window(&pe->table_group, 0);
1449 	if (rc)
1450 		pe_warn(pe, "OPAL error %lld release DMA window\n", rc);
1451 
1452 	pnv_pci_ioda2_set_bypass(pe, false);
1453 	if (pe->table_group.group) {
1454 		iommu_group_put(pe->table_group.group);
1455 		BUG_ON(pe->table_group.group);
1456 	}
1457 	iommu_tce_table_put(tbl);
1458 }
1459 
1460 static void pnv_ioda_release_vf_PE(struct pci_dev *pdev)
1461 {
1462 	struct pci_bus        *bus;
1463 	struct pci_controller *hose;
1464 	struct pnv_phb        *phb;
1465 	struct pnv_ioda_pe    *pe, *pe_n;
1466 	struct pci_dn         *pdn;
1467 
1468 	bus = pdev->bus;
1469 	hose = pci_bus_to_host(bus);
1470 	phb = hose->private_data;
1471 	pdn = pci_get_pdn(pdev);
1472 
1473 	if (!pdev->is_physfn)
1474 		return;
1475 
1476 	list_for_each_entry_safe(pe, pe_n, &phb->ioda.pe_list, list) {
1477 		if (pe->parent_dev != pdev)
1478 			continue;
1479 
1480 		pnv_pci_ioda2_release_dma_pe(pdev, pe);
1481 
1482 		/* Remove from list */
1483 		mutex_lock(&phb->ioda.pe_list_mutex);
1484 		list_del(&pe->list);
1485 		mutex_unlock(&phb->ioda.pe_list_mutex);
1486 
1487 		pnv_ioda_deconfigure_pe(phb, pe);
1488 
1489 		pnv_ioda_free_pe(pe);
1490 	}
1491 }
1492 
1493 void pnv_pci_sriov_disable(struct pci_dev *pdev)
1494 {
1495 	struct pci_bus        *bus;
1496 	struct pci_controller *hose;
1497 	struct pnv_phb        *phb;
1498 	struct pnv_ioda_pe    *pe;
1499 	struct pci_dn         *pdn;
1500 	u16                    num_vfs, i;
1501 
1502 	bus = pdev->bus;
1503 	hose = pci_bus_to_host(bus);
1504 	phb = hose->private_data;
1505 	pdn = pci_get_pdn(pdev);
1506 	num_vfs = pdn->num_vfs;
1507 
1508 	/* Release VF PEs */
1509 	pnv_ioda_release_vf_PE(pdev);
1510 
1511 	if (phb->type == PNV_PHB_IODA2) {
1512 		if (!pdn->m64_single_mode)
1513 			pnv_pci_vf_resource_shift(pdev, -*pdn->pe_num_map);
1514 
1515 		/* Release M64 windows */
1516 		pnv_pci_vf_release_m64(pdev, num_vfs);
1517 
1518 		/* Release PE numbers */
1519 		if (pdn->m64_single_mode) {
1520 			for (i = 0; i < num_vfs; i++) {
1521 				if (pdn->pe_num_map[i] == IODA_INVALID_PE)
1522 					continue;
1523 
1524 				pe = &phb->ioda.pe_array[pdn->pe_num_map[i]];
1525 				pnv_ioda_free_pe(pe);
1526 			}
1527 		} else
1528 			bitmap_clear(phb->ioda.pe_alloc, *pdn->pe_num_map, num_vfs);
1529 		/* Releasing pe_num_map */
1530 		kfree(pdn->pe_num_map);
1531 	}
1532 }
1533 
1534 static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
1535 				       struct pnv_ioda_pe *pe);
1536 #ifdef CONFIG_IOMMU_API
1537 static void pnv_ioda_setup_bus_iommu_group(struct pnv_ioda_pe *pe,
1538 		struct iommu_table_group *table_group, struct pci_bus *bus);
1539 
1540 #endif
1541 static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
1542 {
1543 	struct pci_bus        *bus;
1544 	struct pci_controller *hose;
1545 	struct pnv_phb        *phb;
1546 	struct pnv_ioda_pe    *pe;
1547 	int                    pe_num;
1548 	u16                    vf_index;
1549 	struct pci_dn         *pdn;
1550 
1551 	bus = pdev->bus;
1552 	hose = pci_bus_to_host(bus);
1553 	phb = hose->private_data;
1554 	pdn = pci_get_pdn(pdev);
1555 
1556 	if (!pdev->is_physfn)
1557 		return;
1558 
1559 	/* Reserve PE for each VF */
1560 	for (vf_index = 0; vf_index < num_vfs; vf_index++) {
1561 		if (pdn->m64_single_mode)
1562 			pe_num = pdn->pe_num_map[vf_index];
1563 		else
1564 			pe_num = *pdn->pe_num_map + vf_index;
1565 
1566 		pe = &phb->ioda.pe_array[pe_num];
1567 		pe->pe_number = pe_num;
1568 		pe->phb = phb;
1569 		pe->flags = PNV_IODA_PE_VF;
1570 		pe->pbus = NULL;
1571 		pe->parent_dev = pdev;
1572 		pe->mve_number = -1;
1573 		pe->rid = (pci_iov_virtfn_bus(pdev, vf_index) << 8) |
1574 			   pci_iov_virtfn_devfn(pdev, vf_index);
1575 
1576 		pe_info(pe, "VF %04d:%02d:%02d.%d associated with PE#%x\n",
1577 			hose->global_number, pdev->bus->number,
1578 			PCI_SLOT(pci_iov_virtfn_devfn(pdev, vf_index)),
1579 			PCI_FUNC(pci_iov_virtfn_devfn(pdev, vf_index)), pe_num);
1580 
1581 		if (pnv_ioda_configure_pe(phb, pe)) {
1582 			/* XXX What do we do here ? */
1583 			pnv_ioda_free_pe(pe);
1584 			pe->pdev = NULL;
1585 			continue;
1586 		}
1587 
1588 		/* Put PE to the list */
1589 		mutex_lock(&phb->ioda.pe_list_mutex);
1590 		list_add_tail(&pe->list, &phb->ioda.pe_list);
1591 		mutex_unlock(&phb->ioda.pe_list_mutex);
1592 
1593 		pnv_pci_ioda2_setup_dma_pe(phb, pe);
1594 #ifdef CONFIG_IOMMU_API
1595 		iommu_register_group(&pe->table_group,
1596 				pe->phb->hose->global_number, pe->pe_number);
1597 		pnv_ioda_setup_bus_iommu_group(pe, &pe->table_group, NULL);
1598 #endif
1599 	}
1600 }
1601 
1602 int pnv_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
1603 {
1604 	struct pci_bus        *bus;
1605 	struct pci_controller *hose;
1606 	struct pnv_phb        *phb;
1607 	struct pnv_ioda_pe    *pe;
1608 	struct pci_dn         *pdn;
1609 	int                    ret;
1610 	u16                    i;
1611 
1612 	bus = pdev->bus;
1613 	hose = pci_bus_to_host(bus);
1614 	phb = hose->private_data;
1615 	pdn = pci_get_pdn(pdev);
1616 
1617 	if (phb->type == PNV_PHB_IODA2) {
1618 		if (!pdn->vfs_expanded) {
1619 			dev_info(&pdev->dev, "don't support this SRIOV device"
1620 				" with non 64bit-prefetchable IOV BAR\n");
1621 			return -ENOSPC;
1622 		}
1623 
1624 		/*
1625 		 * When M64 BARs functions in Single PE mode, the number of VFs
1626 		 * could be enabled must be less than the number of M64 BARs.
1627 		 */
1628 		if (pdn->m64_single_mode && num_vfs > phb->ioda.m64_bar_idx) {
1629 			dev_info(&pdev->dev, "Not enough M64 BAR for VFs\n");
1630 			return -EBUSY;
1631 		}
1632 
1633 		/* Allocating pe_num_map */
1634 		if (pdn->m64_single_mode)
1635 			pdn->pe_num_map = kmalloc_array(num_vfs,
1636 							sizeof(*pdn->pe_num_map),
1637 							GFP_KERNEL);
1638 		else
1639 			pdn->pe_num_map = kmalloc(sizeof(*pdn->pe_num_map), GFP_KERNEL);
1640 
1641 		if (!pdn->pe_num_map)
1642 			return -ENOMEM;
1643 
1644 		if (pdn->m64_single_mode)
1645 			for (i = 0; i < num_vfs; i++)
1646 				pdn->pe_num_map[i] = IODA_INVALID_PE;
1647 
1648 		/* Calculate available PE for required VFs */
1649 		if (pdn->m64_single_mode) {
1650 			for (i = 0; i < num_vfs; i++) {
1651 				pe = pnv_ioda_alloc_pe(phb);
1652 				if (!pe) {
1653 					ret = -EBUSY;
1654 					goto m64_failed;
1655 				}
1656 
1657 				pdn->pe_num_map[i] = pe->pe_number;
1658 			}
1659 		} else {
1660 			mutex_lock(&phb->ioda.pe_alloc_mutex);
1661 			*pdn->pe_num_map = bitmap_find_next_zero_area(
1662 				phb->ioda.pe_alloc, phb->ioda.total_pe_num,
1663 				0, num_vfs, 0);
1664 			if (*pdn->pe_num_map >= phb->ioda.total_pe_num) {
1665 				mutex_unlock(&phb->ioda.pe_alloc_mutex);
1666 				dev_info(&pdev->dev, "Failed to enable VF%d\n", num_vfs);
1667 				kfree(pdn->pe_num_map);
1668 				return -EBUSY;
1669 			}
1670 			bitmap_set(phb->ioda.pe_alloc, *pdn->pe_num_map, num_vfs);
1671 			mutex_unlock(&phb->ioda.pe_alloc_mutex);
1672 		}
1673 		pdn->num_vfs = num_vfs;
1674 
1675 		/* Assign M64 window accordingly */
1676 		ret = pnv_pci_vf_assign_m64(pdev, num_vfs);
1677 		if (ret) {
1678 			dev_info(&pdev->dev, "Not enough M64 window resources\n");
1679 			goto m64_failed;
1680 		}
1681 
1682 		/*
1683 		 * When using one M64 BAR to map one IOV BAR, we need to shift
1684 		 * the IOV BAR according to the PE# allocated to the VFs.
1685 		 * Otherwise, the PE# for the VF will conflict with others.
1686 		 */
1687 		if (!pdn->m64_single_mode) {
1688 			ret = pnv_pci_vf_resource_shift(pdev, *pdn->pe_num_map);
1689 			if (ret)
1690 				goto m64_failed;
1691 		}
1692 	}
1693 
1694 	/* Setup VF PEs */
1695 	pnv_ioda_setup_vf_PE(pdev, num_vfs);
1696 
1697 	return 0;
1698 
1699 m64_failed:
1700 	if (pdn->m64_single_mode) {
1701 		for (i = 0; i < num_vfs; i++) {
1702 			if (pdn->pe_num_map[i] == IODA_INVALID_PE)
1703 				continue;
1704 
1705 			pe = &phb->ioda.pe_array[pdn->pe_num_map[i]];
1706 			pnv_ioda_free_pe(pe);
1707 		}
1708 	} else
1709 		bitmap_clear(phb->ioda.pe_alloc, *pdn->pe_num_map, num_vfs);
1710 
1711 	/* Releasing pe_num_map */
1712 	kfree(pdn->pe_num_map);
1713 
1714 	return ret;
1715 }
1716 
1717 int pnv_pcibios_sriov_disable(struct pci_dev *pdev)
1718 {
1719 	pnv_pci_sriov_disable(pdev);
1720 
1721 	/* Release PCI data */
1722 	remove_dev_pci_data(pdev);
1723 	return 0;
1724 }
1725 
1726 int pnv_pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
1727 {
1728 	/* Allocate PCI data */
1729 	add_dev_pci_data(pdev);
1730 
1731 	return pnv_pci_sriov_enable(pdev, num_vfs);
1732 }
1733 #endif /* CONFIG_PCI_IOV */
1734 
1735 static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev)
1736 {
1737 	struct pci_dn *pdn = pci_get_pdn(pdev);
1738 	struct pnv_ioda_pe *pe;
1739 
1740 	/*
1741 	 * The function can be called while the PE#
1742 	 * hasn't been assigned. Do nothing for the
1743 	 * case.
1744 	 */
1745 	if (!pdn || pdn->pe_number == IODA_INVALID_PE)
1746 		return;
1747 
1748 	pe = &phb->ioda.pe_array[pdn->pe_number];
1749 	WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops);
1750 	pdev->dev.archdata.dma_offset = pe->tce_bypass_base;
1751 	set_iommu_table_base(&pdev->dev, pe->table_group.tables[0]);
1752 	/*
1753 	 * Note: iommu_add_device() will fail here as
1754 	 * for physical PE: the device is already added by now;
1755 	 * for virtual PE: sysfs entries are not ready yet and
1756 	 * tce_iommu_bus_notifier will add the device to a group later.
1757 	 */
1758 }
1759 
1760 /*
1761  * Reconfigure TVE#0 to be usable as 64-bit DMA space.
1762  *
1763  * The first 4GB of virtual memory for a PE is reserved for 32-bit accesses.
1764  * Devices can only access more than that if bit 59 of the PCI address is set
1765  * by hardware, which indicates TVE#1 should be used instead of TVE#0.
1766  * Many PCI devices are not capable of addressing that many bits, and as a
1767  * result are limited to the 4GB of virtual memory made available to 32-bit
1768  * devices in TVE#0.
1769  *
1770  * In order to work around this, reconfigure TVE#0 to be suitable for 64-bit
1771  * devices by configuring the virtual memory past the first 4GB inaccessible
1772  * by 64-bit DMAs.  This should only be used by devices that want more than
1773  * 4GB, and only on PEs that have no 32-bit devices.
1774  *
1775  * Currently this will only work on PHB3 (POWER8).
1776  */
1777 static int pnv_pci_ioda_dma_64bit_bypass(struct pnv_ioda_pe *pe)
1778 {
1779 	u64 window_size, table_size, tce_count, addr;
1780 	struct page *table_pages;
1781 	u64 tce_order = 28; /* 256MB TCEs */
1782 	__be64 *tces;
1783 	s64 rc;
1784 
1785 	/*
1786 	 * Window size needs to be a power of two, but needs to account for
1787 	 * shifting memory by the 4GB offset required to skip 32bit space.
1788 	 */
1789 	window_size = roundup_pow_of_two(memory_hotplug_max() + (1ULL << 32));
1790 	tce_count = window_size >> tce_order;
1791 	table_size = tce_count << 3;
1792 
1793 	if (table_size < PAGE_SIZE)
1794 		table_size = PAGE_SIZE;
1795 
1796 	table_pages = alloc_pages_node(pe->phb->hose->node, GFP_KERNEL,
1797 				       get_order(table_size));
1798 	if (!table_pages)
1799 		goto err;
1800 
1801 	tces = page_address(table_pages);
1802 	if (!tces)
1803 		goto err;
1804 
1805 	memset(tces, 0, table_size);
1806 
1807 	for (addr = 0; addr < memory_hotplug_max(); addr += (1 << tce_order)) {
1808 		tces[(addr + (1ULL << 32)) >> tce_order] =
1809 			cpu_to_be64(addr | TCE_PCI_READ | TCE_PCI_WRITE);
1810 	}
1811 
1812 	rc = opal_pci_map_pe_dma_window(pe->phb->opal_id,
1813 					pe->pe_number,
1814 					/* reconfigure window 0 */
1815 					(pe->pe_number << 1) + 0,
1816 					1,
1817 					__pa(tces),
1818 					table_size,
1819 					1 << tce_order);
1820 	if (rc == OPAL_SUCCESS) {
1821 		pe_info(pe, "Using 64-bit DMA iommu bypass (through TVE#0)\n");
1822 		return 0;
1823 	}
1824 err:
1825 	pe_err(pe, "Error configuring 64-bit DMA bypass\n");
1826 	return -EIO;
1827 }
1828 
1829 static bool pnv_pci_ioda_iommu_bypass_supported(struct pci_dev *pdev,
1830 		u64 dma_mask)
1831 {
1832 	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
1833 	struct pnv_phb *phb = hose->private_data;
1834 	struct pci_dn *pdn = pci_get_pdn(pdev);
1835 	struct pnv_ioda_pe *pe;
1836 
1837 	if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
1838 		return false;
1839 
1840 	pe = &phb->ioda.pe_array[pdn->pe_number];
1841 	if (pe->tce_bypass_enabled) {
1842 		u64 top = pe->tce_bypass_base + memblock_end_of_DRAM() - 1;
1843 		if (dma_mask >= top)
1844 			return true;
1845 	}
1846 
1847 	/*
1848 	 * If the device can't set the TCE bypass bit but still wants
1849 	 * to access 4GB or more, on PHB3 we can reconfigure TVE#0 to
1850 	 * bypass the 32-bit region and be usable for 64-bit DMAs.
1851 	 * The device needs to be able to address all of this space.
1852 	 */
1853 	if (dma_mask >> 32 &&
1854 	    dma_mask > (memory_hotplug_max() + (1ULL << 32)) &&
1855 	    /* pe->pdev should be set if it's a single device, pe->pbus if not */
1856 	    (pe->device_count == 1 || !pe->pbus) &&
1857 	    phb->model == PNV_PHB_MODEL_PHB3) {
1858 		/* Configure the bypass mode */
1859 		s64 rc = pnv_pci_ioda_dma_64bit_bypass(pe);
1860 		if (rc)
1861 			return false;
1862 		/* 4GB offset bypasses 32-bit space */
1863 		pdev->dev.archdata.dma_offset = (1ULL << 32);
1864 		return true;
1865 	}
1866 
1867 	return false;
1868 }
1869 
1870 static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus)
1871 {
1872 	struct pci_dev *dev;
1873 
1874 	list_for_each_entry(dev, &bus->devices, bus_list) {
1875 		set_iommu_table_base(&dev->dev, pe->table_group.tables[0]);
1876 		dev->dev.archdata.dma_offset = pe->tce_bypass_base;
1877 
1878 		if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
1879 			pnv_ioda_setup_bus_dma(pe, dev->subordinate);
1880 	}
1881 }
1882 
1883 static inline __be64 __iomem *pnv_ioda_get_inval_reg(struct pnv_phb *phb,
1884 						     bool real_mode)
1885 {
1886 	return real_mode ? (__be64 __iomem *)(phb->regs_phys + 0x210) :
1887 		(phb->regs + 0x210);
1888 }
1889 
1890 static void pnv_pci_p7ioc_tce_invalidate(struct iommu_table *tbl,
1891 		unsigned long index, unsigned long npages, bool rm)
1892 {
1893 	struct iommu_table_group_link *tgl = list_first_entry_or_null(
1894 			&tbl->it_group_list, struct iommu_table_group_link,
1895 			next);
1896 	struct pnv_ioda_pe *pe = container_of(tgl->table_group,
1897 			struct pnv_ioda_pe, table_group);
1898 	__be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, rm);
1899 	unsigned long start, end, inc;
1900 
1901 	start = __pa(((__be64 *)tbl->it_base) + index - tbl->it_offset);
1902 	end = __pa(((__be64 *)tbl->it_base) + index - tbl->it_offset +
1903 			npages - 1);
1904 
1905 	/* p7ioc-style invalidation, 2 TCEs per write */
1906 	start |= (1ull << 63);
1907 	end |= (1ull << 63);
1908 	inc = 16;
1909         end |= inc - 1;	/* round up end to be different than start */
1910 
1911         mb(); /* Ensure above stores are visible */
1912         while (start <= end) {
1913 		if (rm)
1914 			__raw_rm_writeq_be(start, invalidate);
1915 		else
1916 			__raw_writeq_be(start, invalidate);
1917 
1918                 start += inc;
1919         }
1920 
1921 	/*
1922 	 * The iommu layer will do another mb() for us on build()
1923 	 * and we don't care on free()
1924 	 */
1925 }
1926 
1927 static int pnv_ioda1_tce_build(struct iommu_table *tbl, long index,
1928 		long npages, unsigned long uaddr,
1929 		enum dma_data_direction direction,
1930 		unsigned long attrs)
1931 {
1932 	int ret = pnv_tce_build(tbl, index, npages, uaddr, direction,
1933 			attrs);
1934 
1935 	if (!ret)
1936 		pnv_pci_p7ioc_tce_invalidate(tbl, index, npages, false);
1937 
1938 	return ret;
1939 }
1940 
1941 #ifdef CONFIG_IOMMU_API
1942 static int pnv_ioda1_tce_xchg(struct iommu_table *tbl, long index,
1943 		unsigned long *hpa, enum dma_data_direction *direction)
1944 {
1945 	long ret = pnv_tce_xchg(tbl, index, hpa, direction, true);
1946 
1947 	if (!ret)
1948 		pnv_pci_p7ioc_tce_invalidate(tbl, index, 1, false);
1949 
1950 	return ret;
1951 }
1952 
1953 static int pnv_ioda1_tce_xchg_rm(struct iommu_table *tbl, long index,
1954 		unsigned long *hpa, enum dma_data_direction *direction)
1955 {
1956 	long ret = pnv_tce_xchg(tbl, index, hpa, direction, false);
1957 
1958 	if (!ret)
1959 		pnv_pci_p7ioc_tce_invalidate(tbl, index, 1, true);
1960 
1961 	return ret;
1962 }
1963 #endif
1964 
1965 static void pnv_ioda1_tce_free(struct iommu_table *tbl, long index,
1966 		long npages)
1967 {
1968 	pnv_tce_free(tbl, index, npages);
1969 
1970 	pnv_pci_p7ioc_tce_invalidate(tbl, index, npages, false);
1971 }
1972 
1973 static struct iommu_table_ops pnv_ioda1_iommu_ops = {
1974 	.set = pnv_ioda1_tce_build,
1975 #ifdef CONFIG_IOMMU_API
1976 	.exchange = pnv_ioda1_tce_xchg,
1977 	.exchange_rm = pnv_ioda1_tce_xchg_rm,
1978 	.useraddrptr = pnv_tce_useraddrptr,
1979 #endif
1980 	.clear = pnv_ioda1_tce_free,
1981 	.get = pnv_tce_get,
1982 };
1983 
1984 #define PHB3_TCE_KILL_INVAL_ALL		PPC_BIT(0)
1985 #define PHB3_TCE_KILL_INVAL_PE		PPC_BIT(1)
1986 #define PHB3_TCE_KILL_INVAL_ONE		PPC_BIT(2)
1987 
1988 static void pnv_pci_phb3_tce_invalidate_entire(struct pnv_phb *phb, bool rm)
1989 {
1990 	__be64 __iomem *invalidate = pnv_ioda_get_inval_reg(phb, rm);
1991 	const unsigned long val = PHB3_TCE_KILL_INVAL_ALL;
1992 
1993 	mb(); /* Ensure previous TCE table stores are visible */
1994 	if (rm)
1995 		__raw_rm_writeq_be(val, invalidate);
1996 	else
1997 		__raw_writeq_be(val, invalidate);
1998 }
1999 
2000 static inline void pnv_pci_phb3_tce_invalidate_pe(struct pnv_ioda_pe *pe)
2001 {
2002 	/* 01xb - invalidate TCEs that match the specified PE# */
2003 	__be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, false);
2004 	unsigned long val = PHB3_TCE_KILL_INVAL_PE | (pe->pe_number & 0xFF);
2005 
2006 	mb(); /* Ensure above stores are visible */
2007 	__raw_writeq_be(val, invalidate);
2008 }
2009 
2010 static void pnv_pci_phb3_tce_invalidate(struct pnv_ioda_pe *pe, bool rm,
2011 					unsigned shift, unsigned long index,
2012 					unsigned long npages)
2013 {
2014 	__be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, rm);
2015 	unsigned long start, end, inc;
2016 
2017 	/* We'll invalidate DMA address in PE scope */
2018 	start = PHB3_TCE_KILL_INVAL_ONE;
2019 	start |= (pe->pe_number & 0xFF);
2020 	end = start;
2021 
2022 	/* Figure out the start, end and step */
2023 	start |= (index << shift);
2024 	end |= ((index + npages - 1) << shift);
2025 	inc = (0x1ull << shift);
2026 	mb();
2027 
2028 	while (start <= end) {
2029 		if (rm)
2030 			__raw_rm_writeq_be(start, invalidate);
2031 		else
2032 			__raw_writeq_be(start, invalidate);
2033 		start += inc;
2034 	}
2035 }
2036 
2037 static inline void pnv_pci_ioda2_tce_invalidate_pe(struct pnv_ioda_pe *pe)
2038 {
2039 	struct pnv_phb *phb = pe->phb;
2040 
2041 	if (phb->model == PNV_PHB_MODEL_PHB3 && phb->regs)
2042 		pnv_pci_phb3_tce_invalidate_pe(pe);
2043 	else
2044 		opal_pci_tce_kill(phb->opal_id, OPAL_PCI_TCE_KILL_PE,
2045 				  pe->pe_number, 0, 0, 0);
2046 }
2047 
2048 static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl,
2049 		unsigned long index, unsigned long npages, bool rm)
2050 {
2051 	struct iommu_table_group_link *tgl;
2052 
2053 	list_for_each_entry_lockless(tgl, &tbl->it_group_list, next) {
2054 		struct pnv_ioda_pe *pe = container_of(tgl->table_group,
2055 				struct pnv_ioda_pe, table_group);
2056 		struct pnv_phb *phb = pe->phb;
2057 		unsigned int shift = tbl->it_page_shift;
2058 
2059 		/*
2060 		 * NVLink1 can use the TCE kill register directly as
2061 		 * it's the same as PHB3. NVLink2 is different and
2062 		 * should go via the OPAL call.
2063 		 */
2064 		if (phb->model == PNV_PHB_MODEL_NPU) {
2065 			/*
2066 			 * The NVLink hardware does not support TCE kill
2067 			 * per TCE entry so we have to invalidate
2068 			 * the entire cache for it.
2069 			 */
2070 			pnv_pci_phb3_tce_invalidate_entire(phb, rm);
2071 			continue;
2072 		}
2073 		if (phb->model == PNV_PHB_MODEL_PHB3 && phb->regs)
2074 			pnv_pci_phb3_tce_invalidate(pe, rm, shift,
2075 						    index, npages);
2076 		else
2077 			opal_pci_tce_kill(phb->opal_id,
2078 					  OPAL_PCI_TCE_KILL_PAGES,
2079 					  pe->pe_number, 1u << shift,
2080 					  index << shift, npages);
2081 	}
2082 }
2083 
2084 void pnv_pci_ioda2_tce_invalidate_entire(struct pnv_phb *phb, bool rm)
2085 {
2086 	if (phb->model == PNV_PHB_MODEL_NPU || phb->model == PNV_PHB_MODEL_PHB3)
2087 		pnv_pci_phb3_tce_invalidate_entire(phb, rm);
2088 	else
2089 		opal_pci_tce_kill(phb->opal_id, OPAL_PCI_TCE_KILL, 0, 0, 0, 0);
2090 }
2091 
2092 static int pnv_ioda2_tce_build(struct iommu_table *tbl, long index,
2093 		long npages, unsigned long uaddr,
2094 		enum dma_data_direction direction,
2095 		unsigned long attrs)
2096 {
2097 	int ret = pnv_tce_build(tbl, index, npages, uaddr, direction,
2098 			attrs);
2099 
2100 	if (!ret)
2101 		pnv_pci_ioda2_tce_invalidate(tbl, index, npages, false);
2102 
2103 	return ret;
2104 }
2105 
2106 #ifdef CONFIG_IOMMU_API
2107 static int pnv_ioda2_tce_xchg(struct iommu_table *tbl, long index,
2108 		unsigned long *hpa, enum dma_data_direction *direction)
2109 {
2110 	long ret = pnv_tce_xchg(tbl, index, hpa, direction, true);
2111 
2112 	if (!ret)
2113 		pnv_pci_ioda2_tce_invalidate(tbl, index, 1, false);
2114 
2115 	return ret;
2116 }
2117 
2118 static int pnv_ioda2_tce_xchg_rm(struct iommu_table *tbl, long index,
2119 		unsigned long *hpa, enum dma_data_direction *direction)
2120 {
2121 	long ret = pnv_tce_xchg(tbl, index, hpa, direction, false);
2122 
2123 	if (!ret)
2124 		pnv_pci_ioda2_tce_invalidate(tbl, index, 1, true);
2125 
2126 	return ret;
2127 }
2128 #endif
2129 
2130 static void pnv_ioda2_tce_free(struct iommu_table *tbl, long index,
2131 		long npages)
2132 {
2133 	pnv_tce_free(tbl, index, npages);
2134 
2135 	pnv_pci_ioda2_tce_invalidate(tbl, index, npages, false);
2136 }
2137 
2138 static struct iommu_table_ops pnv_ioda2_iommu_ops = {
2139 	.set = pnv_ioda2_tce_build,
2140 #ifdef CONFIG_IOMMU_API
2141 	.exchange = pnv_ioda2_tce_xchg,
2142 	.exchange_rm = pnv_ioda2_tce_xchg_rm,
2143 	.useraddrptr = pnv_tce_useraddrptr,
2144 #endif
2145 	.clear = pnv_ioda2_tce_free,
2146 	.get = pnv_tce_get,
2147 	.free = pnv_pci_ioda2_table_free_pages,
2148 };
2149 
2150 static int pnv_pci_ioda_dev_dma_weight(struct pci_dev *dev, void *data)
2151 {
2152 	unsigned int *weight = (unsigned int *)data;
2153 
2154 	/* This is quite simplistic. The "base" weight of a device
2155 	 * is 10. 0 means no DMA is to be accounted for it.
2156 	 */
2157 	if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL)
2158 		return 0;
2159 
2160 	if (dev->class == PCI_CLASS_SERIAL_USB_UHCI ||
2161 	    dev->class == PCI_CLASS_SERIAL_USB_OHCI ||
2162 	    dev->class == PCI_CLASS_SERIAL_USB_EHCI)
2163 		*weight += 3;
2164 	else if ((dev->class >> 8) == PCI_CLASS_STORAGE_RAID)
2165 		*weight += 15;
2166 	else
2167 		*weight += 10;
2168 
2169 	return 0;
2170 }
2171 
2172 static unsigned int pnv_pci_ioda_pe_dma_weight(struct pnv_ioda_pe *pe)
2173 {
2174 	unsigned int weight = 0;
2175 
2176 	/* SRIOV VF has same DMA32 weight as its PF */
2177 #ifdef CONFIG_PCI_IOV
2178 	if ((pe->flags & PNV_IODA_PE_VF) && pe->parent_dev) {
2179 		pnv_pci_ioda_dev_dma_weight(pe->parent_dev, &weight);
2180 		return weight;
2181 	}
2182 #endif
2183 
2184 	if ((pe->flags & PNV_IODA_PE_DEV) && pe->pdev) {
2185 		pnv_pci_ioda_dev_dma_weight(pe->pdev, &weight);
2186 	} else if ((pe->flags & PNV_IODA_PE_BUS) && pe->pbus) {
2187 		struct pci_dev *pdev;
2188 
2189 		list_for_each_entry(pdev, &pe->pbus->devices, bus_list)
2190 			pnv_pci_ioda_dev_dma_weight(pdev, &weight);
2191 	} else if ((pe->flags & PNV_IODA_PE_BUS_ALL) && pe->pbus) {
2192 		pci_walk_bus(pe->pbus, pnv_pci_ioda_dev_dma_weight, &weight);
2193 	}
2194 
2195 	return weight;
2196 }
2197 
2198 static void pnv_pci_ioda1_setup_dma_pe(struct pnv_phb *phb,
2199 				       struct pnv_ioda_pe *pe)
2200 {
2201 
2202 	struct page *tce_mem = NULL;
2203 	struct iommu_table *tbl;
2204 	unsigned int weight, total_weight = 0;
2205 	unsigned int tce32_segsz, base, segs, avail, i;
2206 	int64_t rc;
2207 	void *addr;
2208 
2209 	/* XXX FIXME: Handle 64-bit only DMA devices */
2210 	/* XXX FIXME: Provide 64-bit DMA facilities & non-4K TCE tables etc.. */
2211 	/* XXX FIXME: Allocate multi-level tables on PHB3 */
2212 	weight = pnv_pci_ioda_pe_dma_weight(pe);
2213 	if (!weight)
2214 		return;
2215 
2216 	pci_walk_bus(phb->hose->bus, pnv_pci_ioda_dev_dma_weight,
2217 		     &total_weight);
2218 	segs = (weight * phb->ioda.dma32_count) / total_weight;
2219 	if (!segs)
2220 		segs = 1;
2221 
2222 	/*
2223 	 * Allocate contiguous DMA32 segments. We begin with the expected
2224 	 * number of segments. With one more attempt, the number of DMA32
2225 	 * segments to be allocated is decreased by one until one segment
2226 	 * is allocated successfully.
2227 	 */
2228 	do {
2229 		for (base = 0; base <= phb->ioda.dma32_count - segs; base++) {
2230 			for (avail = 0, i = base; i < base + segs; i++) {
2231 				if (phb->ioda.dma32_segmap[i] ==
2232 				    IODA_INVALID_PE)
2233 					avail++;
2234 			}
2235 
2236 			if (avail == segs)
2237 				goto found;
2238 		}
2239 	} while (--segs);
2240 
2241 	if (!segs) {
2242 		pe_warn(pe, "No available DMA32 segments\n");
2243 		return;
2244 	}
2245 
2246 found:
2247 	tbl = pnv_pci_table_alloc(phb->hose->node);
2248 	if (WARN_ON(!tbl))
2249 		return;
2250 
2251 	iommu_register_group(&pe->table_group, phb->hose->global_number,
2252 			pe->pe_number);
2253 	pnv_pci_link_table_and_group(phb->hose->node, 0, tbl, &pe->table_group);
2254 
2255 	/* Grab a 32-bit TCE table */
2256 	pe_info(pe, "DMA weight %d (%d), assigned (%d) %d DMA32 segments\n",
2257 		weight, total_weight, base, segs);
2258 	pe_info(pe, " Setting up 32-bit TCE table at %08x..%08x\n",
2259 		base * PNV_IODA1_DMA32_SEGSIZE,
2260 		(base + segs) * PNV_IODA1_DMA32_SEGSIZE - 1);
2261 
2262 	/* XXX Currently, we allocate one big contiguous table for the
2263 	 * TCEs. We only really need one chunk per 256M of TCE space
2264 	 * (ie per segment) but that's an optimization for later, it
2265 	 * requires some added smarts with our get/put_tce implementation
2266 	 *
2267 	 * Each TCE page is 4KB in size and each TCE entry occupies 8
2268 	 * bytes
2269 	 */
2270 	tce32_segsz = PNV_IODA1_DMA32_SEGSIZE >> (IOMMU_PAGE_SHIFT_4K - 3);
2271 	tce_mem = alloc_pages_node(phb->hose->node, GFP_KERNEL,
2272 				   get_order(tce32_segsz * segs));
2273 	if (!tce_mem) {
2274 		pe_err(pe, " Failed to allocate a 32-bit TCE memory\n");
2275 		goto fail;
2276 	}
2277 	addr = page_address(tce_mem);
2278 	memset(addr, 0, tce32_segsz * segs);
2279 
2280 	/* Configure HW */
2281 	for (i = 0; i < segs; i++) {
2282 		rc = opal_pci_map_pe_dma_window(phb->opal_id,
2283 					      pe->pe_number,
2284 					      base + i, 1,
2285 					      __pa(addr) + tce32_segsz * i,
2286 					      tce32_segsz, IOMMU_PAGE_SIZE_4K);
2287 		if (rc) {
2288 			pe_err(pe, " Failed to configure 32-bit TCE table, err %lld\n",
2289 			       rc);
2290 			goto fail;
2291 		}
2292 	}
2293 
2294 	/* Setup DMA32 segment mapping */
2295 	for (i = base; i < base + segs; i++)
2296 		phb->ioda.dma32_segmap[i] = pe->pe_number;
2297 
2298 	/* Setup linux iommu table */
2299 	pnv_pci_setup_iommu_table(tbl, addr, tce32_segsz * segs,
2300 				  base * PNV_IODA1_DMA32_SEGSIZE,
2301 				  IOMMU_PAGE_SHIFT_4K);
2302 
2303 	tbl->it_ops = &pnv_ioda1_iommu_ops;
2304 	pe->table_group.tce32_start = tbl->it_offset << tbl->it_page_shift;
2305 	pe->table_group.tce32_size = tbl->it_size << tbl->it_page_shift;
2306 	iommu_init_table(tbl, phb->hose->node, 0, 0);
2307 
2308 	if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
2309 		pnv_ioda_setup_bus_dma(pe, pe->pbus);
2310 
2311 	return;
2312  fail:
2313 	/* XXX Failure: Try to fallback to 64-bit only ? */
2314 	if (tce_mem)
2315 		__free_pages(tce_mem, get_order(tce32_segsz * segs));
2316 	if (tbl) {
2317 		pnv_pci_unlink_table_and_group(tbl, &pe->table_group);
2318 		iommu_tce_table_put(tbl);
2319 	}
2320 }
2321 
2322 static long pnv_pci_ioda2_set_window(struct iommu_table_group *table_group,
2323 		int num, struct iommu_table *tbl)
2324 {
2325 	struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
2326 			table_group);
2327 	struct pnv_phb *phb = pe->phb;
2328 	int64_t rc;
2329 	const unsigned long size = tbl->it_indirect_levels ?
2330 			tbl->it_level_size : tbl->it_size;
2331 	const __u64 start_addr = tbl->it_offset << tbl->it_page_shift;
2332 	const __u64 win_size = tbl->it_size << tbl->it_page_shift;
2333 
2334 	pe_info(pe, "Setting up window#%d %llx..%llx pg=%lx\n",
2335 		num, start_addr, start_addr + win_size - 1,
2336 		IOMMU_PAGE_SIZE(tbl));
2337 
2338 	/*
2339 	 * Map TCE table through TVT. The TVE index is the PE number
2340 	 * shifted by 1 bit for 32-bits DMA space.
2341 	 */
2342 	rc = opal_pci_map_pe_dma_window(phb->opal_id,
2343 			pe->pe_number,
2344 			(pe->pe_number << 1) + num,
2345 			tbl->it_indirect_levels + 1,
2346 			__pa(tbl->it_base),
2347 			size << 3,
2348 			IOMMU_PAGE_SIZE(tbl));
2349 	if (rc) {
2350 		pe_err(pe, "Failed to configure TCE table, err %lld\n", rc);
2351 		return rc;
2352 	}
2353 
2354 	pnv_pci_link_table_and_group(phb->hose->node, num,
2355 			tbl, &pe->table_group);
2356 	pnv_pci_ioda2_tce_invalidate_pe(pe);
2357 
2358 	return 0;
2359 }
2360 
2361 static void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable)
2362 {
2363 	uint16_t window_id = (pe->pe_number << 1 ) + 1;
2364 	int64_t rc;
2365 
2366 	pe_info(pe, "%sabling 64-bit DMA bypass\n", enable ? "En" : "Dis");
2367 	if (enable) {
2368 		phys_addr_t top = memblock_end_of_DRAM();
2369 
2370 		top = roundup_pow_of_two(top);
2371 		rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
2372 						     pe->pe_number,
2373 						     window_id,
2374 						     pe->tce_bypass_base,
2375 						     top);
2376 	} else {
2377 		rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
2378 						     pe->pe_number,
2379 						     window_id,
2380 						     pe->tce_bypass_base,
2381 						     0);
2382 	}
2383 	if (rc)
2384 		pe_err(pe, "OPAL error %lld configuring bypass window\n", rc);
2385 	else
2386 		pe->tce_bypass_enabled = enable;
2387 }
2388 
2389 static long pnv_pci_ioda2_create_table(struct iommu_table_group *table_group,
2390 		int num, __u32 page_shift, __u64 window_size, __u32 levels,
2391 		bool alloc_userspace_copy, struct iommu_table **ptbl)
2392 {
2393 	struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
2394 			table_group);
2395 	int nid = pe->phb->hose->node;
2396 	__u64 bus_offset = num ? pe->tce_bypass_base : table_group->tce32_start;
2397 	long ret;
2398 	struct iommu_table *tbl;
2399 
2400 	tbl = pnv_pci_table_alloc(nid);
2401 	if (!tbl)
2402 		return -ENOMEM;
2403 
2404 	tbl->it_ops = &pnv_ioda2_iommu_ops;
2405 
2406 	ret = pnv_pci_ioda2_table_alloc_pages(nid,
2407 			bus_offset, page_shift, window_size,
2408 			levels, alloc_userspace_copy, tbl);
2409 	if (ret) {
2410 		iommu_tce_table_put(tbl);
2411 		return ret;
2412 	}
2413 
2414 	*ptbl = tbl;
2415 
2416 	return 0;
2417 }
2418 
2419 static long pnv_pci_ioda2_setup_default_config(struct pnv_ioda_pe *pe)
2420 {
2421 	struct iommu_table *tbl = NULL;
2422 	long rc;
2423 	unsigned long res_start, res_end;
2424 
2425 	/*
2426 	 * crashkernel= specifies the kdump kernel's maximum memory at
2427 	 * some offset and there is no guaranteed the result is a power
2428 	 * of 2, which will cause errors later.
2429 	 */
2430 	const u64 max_memory = __rounddown_pow_of_two(memory_hotplug_max());
2431 
2432 	/*
2433 	 * In memory constrained environments, e.g. kdump kernel, the
2434 	 * DMA window can be larger than available memory, which will
2435 	 * cause errors later.
2436 	 */
2437 	const u64 maxblock = 1UL << (PAGE_SHIFT + MAX_ORDER - 1);
2438 
2439 	/*
2440 	 * We create the default window as big as we can. The constraint is
2441 	 * the max order of allocation possible. The TCE table is likely to
2442 	 * end up being multilevel and with on-demand allocation in place,
2443 	 * the initial use is not going to be huge as the default window aims
2444 	 * to support crippled devices (i.e. not fully 64bit DMAble) only.
2445 	 */
2446 	/* iommu_table::it_map uses 1 bit per IOMMU page, hence 8 */
2447 	const u64 window_size = min((maxblock * 8) << PAGE_SHIFT, max_memory);
2448 	/* Each TCE level cannot exceed maxblock so go multilevel if needed */
2449 	unsigned long tces_order = ilog2(window_size >> PAGE_SHIFT);
2450 	unsigned long tcelevel_order = ilog2(maxblock >> 3);
2451 	unsigned int levels = tces_order / tcelevel_order;
2452 
2453 	if (tces_order % tcelevel_order)
2454 		levels += 1;
2455 	/*
2456 	 * We try to stick to default levels (which is >1 at the moment) in
2457 	 * order to save memory by relying on on-demain TCE level allocation.
2458 	 */
2459 	levels = max_t(unsigned int, levels, POWERNV_IOMMU_DEFAULT_LEVELS);
2460 
2461 	rc = pnv_pci_ioda2_create_table(&pe->table_group, 0, PAGE_SHIFT,
2462 			window_size, levels, false, &tbl);
2463 	if (rc) {
2464 		pe_err(pe, "Failed to create 32-bit TCE table, err %ld",
2465 				rc);
2466 		return rc;
2467 	}
2468 
2469 	/* We use top part of 32bit space for MMIO so exclude it from DMA */
2470 	res_start = 0;
2471 	res_end = 0;
2472 	if (window_size > pe->phb->ioda.m32_pci_base) {
2473 		res_start = pe->phb->ioda.m32_pci_base >> tbl->it_page_shift;
2474 		res_end = min(window_size, SZ_4G) >> tbl->it_page_shift;
2475 	}
2476 	iommu_init_table(tbl, pe->phb->hose->node, res_start, res_end);
2477 
2478 	rc = pnv_pci_ioda2_set_window(&pe->table_group, 0, tbl);
2479 	if (rc) {
2480 		pe_err(pe, "Failed to configure 32-bit TCE table, err %ld\n",
2481 				rc);
2482 		iommu_tce_table_put(tbl);
2483 		return rc;
2484 	}
2485 
2486 	if (!pnv_iommu_bypass_disabled)
2487 		pnv_pci_ioda2_set_bypass(pe, true);
2488 
2489 	/*
2490 	 * Set table base for the case of IOMMU DMA use. Usually this is done
2491 	 * from dma_dev_setup() which is not called when a device is returned
2492 	 * from VFIO so do it here.
2493 	 */
2494 	if (pe->pdev)
2495 		set_iommu_table_base(&pe->pdev->dev, tbl);
2496 
2497 	return 0;
2498 }
2499 
2500 #if defined(CONFIG_IOMMU_API) || defined(CONFIG_PCI_IOV)
2501 static long pnv_pci_ioda2_unset_window(struct iommu_table_group *table_group,
2502 		int num)
2503 {
2504 	struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
2505 			table_group);
2506 	struct pnv_phb *phb = pe->phb;
2507 	long ret;
2508 
2509 	pe_info(pe, "Removing DMA window #%d\n", num);
2510 
2511 	ret = opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number,
2512 			(pe->pe_number << 1) + num,
2513 			0/* levels */, 0/* table address */,
2514 			0/* table size */, 0/* page size */);
2515 	if (ret)
2516 		pe_warn(pe, "Unmapping failed, ret = %ld\n", ret);
2517 	else
2518 		pnv_pci_ioda2_tce_invalidate_pe(pe);
2519 
2520 	pnv_pci_unlink_table_and_group(table_group->tables[num], table_group);
2521 
2522 	return ret;
2523 }
2524 #endif
2525 
2526 #ifdef CONFIG_IOMMU_API
2527 unsigned long pnv_pci_ioda2_get_table_size(__u32 page_shift,
2528 		__u64 window_size, __u32 levels)
2529 {
2530 	unsigned long bytes = 0;
2531 	const unsigned window_shift = ilog2(window_size);
2532 	unsigned entries_shift = window_shift - page_shift;
2533 	unsigned table_shift = entries_shift + 3;
2534 	unsigned long tce_table_size = max(0x1000UL, 1UL << table_shift);
2535 	unsigned long direct_table_size;
2536 
2537 	if (!levels || (levels > POWERNV_IOMMU_MAX_LEVELS) ||
2538 			!is_power_of_2(window_size))
2539 		return 0;
2540 
2541 	/* Calculate a direct table size from window_size and levels */
2542 	entries_shift = (entries_shift + levels - 1) / levels;
2543 	table_shift = entries_shift + 3;
2544 	table_shift = max_t(unsigned, table_shift, PAGE_SHIFT);
2545 	direct_table_size =  1UL << table_shift;
2546 
2547 	for ( ; levels; --levels) {
2548 		bytes += _ALIGN_UP(tce_table_size, direct_table_size);
2549 
2550 		tce_table_size /= direct_table_size;
2551 		tce_table_size <<= 3;
2552 		tce_table_size = max_t(unsigned long,
2553 				tce_table_size, direct_table_size);
2554 	}
2555 
2556 	return bytes + bytes; /* one for HW table, one for userspace copy */
2557 }
2558 
2559 static long pnv_pci_ioda2_create_table_userspace(
2560 		struct iommu_table_group *table_group,
2561 		int num, __u32 page_shift, __u64 window_size, __u32 levels,
2562 		struct iommu_table **ptbl)
2563 {
2564 	long ret = pnv_pci_ioda2_create_table(table_group,
2565 			num, page_shift, window_size, levels, true, ptbl);
2566 
2567 	if (!ret)
2568 		(*ptbl)->it_allocated_size = pnv_pci_ioda2_get_table_size(
2569 				page_shift, window_size, levels);
2570 	return ret;
2571 }
2572 
2573 static void pnv_ioda2_take_ownership(struct iommu_table_group *table_group)
2574 {
2575 	struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
2576 						table_group);
2577 	/* Store @tbl as pnv_pci_ioda2_unset_window() resets it */
2578 	struct iommu_table *tbl = pe->table_group.tables[0];
2579 
2580 	pnv_pci_ioda2_set_bypass(pe, false);
2581 	pnv_pci_ioda2_unset_window(&pe->table_group, 0);
2582 	if (pe->pbus)
2583 		pnv_ioda_setup_bus_dma(pe, pe->pbus);
2584 	else if (pe->pdev)
2585 		set_iommu_table_base(&pe->pdev->dev, NULL);
2586 	iommu_tce_table_put(tbl);
2587 }
2588 
2589 static void pnv_ioda2_release_ownership(struct iommu_table_group *table_group)
2590 {
2591 	struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
2592 						table_group);
2593 
2594 	pnv_pci_ioda2_setup_default_config(pe);
2595 	if (pe->pbus)
2596 		pnv_ioda_setup_bus_dma(pe, pe->pbus);
2597 }
2598 
2599 static struct iommu_table_group_ops pnv_pci_ioda2_ops = {
2600 	.get_table_size = pnv_pci_ioda2_get_table_size,
2601 	.create_table = pnv_pci_ioda2_create_table_userspace,
2602 	.set_window = pnv_pci_ioda2_set_window,
2603 	.unset_window = pnv_pci_ioda2_unset_window,
2604 	.take_ownership = pnv_ioda2_take_ownership,
2605 	.release_ownership = pnv_ioda2_release_ownership,
2606 };
2607 
2608 static void pnv_ioda_setup_bus_iommu_group_add_devices(struct pnv_ioda_pe *pe,
2609 		struct iommu_table_group *table_group,
2610 		struct pci_bus *bus)
2611 {
2612 	struct pci_dev *dev;
2613 
2614 	list_for_each_entry(dev, &bus->devices, bus_list) {
2615 		iommu_add_device(table_group, &dev->dev);
2616 
2617 		if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
2618 			pnv_ioda_setup_bus_iommu_group_add_devices(pe,
2619 					table_group, dev->subordinate);
2620 	}
2621 }
2622 
2623 static void pnv_ioda_setup_bus_iommu_group(struct pnv_ioda_pe *pe,
2624 		struct iommu_table_group *table_group, struct pci_bus *bus)
2625 {
2626 
2627 	if (pe->flags & PNV_IODA_PE_DEV)
2628 		iommu_add_device(table_group, &pe->pdev->dev);
2629 
2630 	if ((pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)) || bus)
2631 		pnv_ioda_setup_bus_iommu_group_add_devices(pe, table_group,
2632 				bus);
2633 }
2634 
2635 static unsigned long pnv_ioda_parse_tce_sizes(struct pnv_phb *phb);
2636 
2637 static void pnv_pci_ioda_setup_iommu_api(void)
2638 {
2639 	struct pci_controller *hose;
2640 	struct pnv_phb *phb;
2641 	struct pnv_ioda_pe *pe;
2642 
2643 	/*
2644 	 * There are 4 types of PEs:
2645 	 * - PNV_IODA_PE_BUS: a downstream port with an adapter,
2646 	 *   created from pnv_pci_setup_bridge();
2647 	 * - PNV_IODA_PE_BUS_ALL: a PCI-PCIX bridge with devices behind it,
2648 	 *   created from pnv_pci_setup_bridge();
2649 	 * - PNV_IODA_PE_VF: a SRIOV virtual function,
2650 	 *   created from pnv_pcibios_sriov_enable();
2651 	 * - PNV_IODA_PE_DEV: an NPU or OCAPI device,
2652 	 *   created from pnv_pci_ioda_fixup().
2653 	 *
2654 	 * Normally a PE is represented by an IOMMU group, however for
2655 	 * devices with side channels the groups need to be more strict.
2656 	 */
2657 	list_for_each_entry(hose, &hose_list, list_node) {
2658 		phb = hose->private_data;
2659 
2660 		if (phb->type == PNV_PHB_NPU_NVLINK ||
2661 		    phb->type == PNV_PHB_NPU_OCAPI)
2662 			continue;
2663 
2664 		list_for_each_entry(pe, &phb->ioda.pe_list, list) {
2665 			struct iommu_table_group *table_group;
2666 
2667 			table_group = pnv_try_setup_npu_table_group(pe);
2668 			if (!table_group) {
2669 				if (!pnv_pci_ioda_pe_dma_weight(pe))
2670 					continue;
2671 
2672 				table_group = &pe->table_group;
2673 				iommu_register_group(&pe->table_group,
2674 						pe->phb->hose->global_number,
2675 						pe->pe_number);
2676 			}
2677 			pnv_ioda_setup_bus_iommu_group(pe, table_group,
2678 					pe->pbus);
2679 		}
2680 	}
2681 
2682 	/*
2683 	 * Now we have all PHBs discovered, time to add NPU devices to
2684 	 * the corresponding IOMMU groups.
2685 	 */
2686 	list_for_each_entry(hose, &hose_list, list_node) {
2687 		unsigned long  pgsizes;
2688 
2689 		phb = hose->private_data;
2690 
2691 		if (phb->type != PNV_PHB_NPU_NVLINK)
2692 			continue;
2693 
2694 		pgsizes = pnv_ioda_parse_tce_sizes(phb);
2695 		list_for_each_entry(pe, &phb->ioda.pe_list, list) {
2696 			/*
2697 			 * IODA2 bridges get this set up from
2698 			 * pci_controller_ops::setup_bridge but NPU bridges
2699 			 * do not have this hook defined so we do it here.
2700 			 */
2701 			pe->table_group.pgsizes = pgsizes;
2702 			pnv_npu_compound_attach(pe);
2703 		}
2704 	}
2705 }
2706 #else /* !CONFIG_IOMMU_API */
2707 static void pnv_pci_ioda_setup_iommu_api(void) { };
2708 #endif
2709 
2710 static unsigned long pnv_ioda_parse_tce_sizes(struct pnv_phb *phb)
2711 {
2712 	struct pci_controller *hose = phb->hose;
2713 	struct device_node *dn = hose->dn;
2714 	unsigned long mask = 0;
2715 	int i, rc, count;
2716 	u32 val;
2717 
2718 	count = of_property_count_u32_elems(dn, "ibm,supported-tce-sizes");
2719 	if (count <= 0) {
2720 		mask = SZ_4K | SZ_64K;
2721 		/* Add 16M for POWER8 by default */
2722 		if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
2723 				!cpu_has_feature(CPU_FTR_ARCH_300))
2724 			mask |= SZ_16M | SZ_256M;
2725 		return mask;
2726 	}
2727 
2728 	for (i = 0; i < count; i++) {
2729 		rc = of_property_read_u32_index(dn, "ibm,supported-tce-sizes",
2730 						i, &val);
2731 		if (rc == 0)
2732 			mask |= 1ULL << val;
2733 	}
2734 
2735 	return mask;
2736 }
2737 
2738 static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
2739 				       struct pnv_ioda_pe *pe)
2740 {
2741 	int64_t rc;
2742 
2743 	if (!pnv_pci_ioda_pe_dma_weight(pe))
2744 		return;
2745 
2746 	/* TVE #1 is selected by PCI address bit 59 */
2747 	pe->tce_bypass_base = 1ull << 59;
2748 
2749 	/* The PE will reserve all possible 32-bits space */
2750 	pe_info(pe, "Setting up 32-bit TCE table at 0..%08x\n",
2751 		phb->ioda.m32_pci_base);
2752 
2753 	/* Setup linux iommu table */
2754 	pe->table_group.tce32_start = 0;
2755 	pe->table_group.tce32_size = phb->ioda.m32_pci_base;
2756 	pe->table_group.max_dynamic_windows_supported =
2757 			IOMMU_TABLE_GROUP_MAX_TABLES;
2758 	pe->table_group.max_levels = POWERNV_IOMMU_MAX_LEVELS;
2759 	pe->table_group.pgsizes = pnv_ioda_parse_tce_sizes(phb);
2760 #ifdef CONFIG_IOMMU_API
2761 	pe->table_group.ops = &pnv_pci_ioda2_ops;
2762 #endif
2763 
2764 	rc = pnv_pci_ioda2_setup_default_config(pe);
2765 	if (rc)
2766 		return;
2767 
2768 	if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
2769 		pnv_ioda_setup_bus_dma(pe, pe->pbus);
2770 }
2771 
2772 int64_t pnv_opal_pci_msi_eoi(struct irq_chip *chip, unsigned int hw_irq)
2773 {
2774 	struct pnv_phb *phb = container_of(chip, struct pnv_phb,
2775 					   ioda.irq_chip);
2776 
2777 	return opal_pci_msi_eoi(phb->opal_id, hw_irq);
2778 }
2779 
2780 static void pnv_ioda2_msi_eoi(struct irq_data *d)
2781 {
2782 	int64_t rc;
2783 	unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
2784 	struct irq_chip *chip = irq_data_get_irq_chip(d);
2785 
2786 	rc = pnv_opal_pci_msi_eoi(chip, hw_irq);
2787 	WARN_ON_ONCE(rc);
2788 
2789 	icp_native_eoi(d);
2790 }
2791 
2792 
2793 void pnv_set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq)
2794 {
2795 	struct irq_data *idata;
2796 	struct irq_chip *ichip;
2797 
2798 	/* The MSI EOI OPAL call is only needed on PHB3 */
2799 	if (phb->model != PNV_PHB_MODEL_PHB3)
2800 		return;
2801 
2802 	if (!phb->ioda.irq_chip_init) {
2803 		/*
2804 		 * First time we setup an MSI IRQ, we need to setup the
2805 		 * corresponding IRQ chip to route correctly.
2806 		 */
2807 		idata = irq_get_irq_data(virq);
2808 		ichip = irq_data_get_irq_chip(idata);
2809 		phb->ioda.irq_chip_init = 1;
2810 		phb->ioda.irq_chip = *ichip;
2811 		phb->ioda.irq_chip.irq_eoi = pnv_ioda2_msi_eoi;
2812 	}
2813 	irq_set_chip(virq, &phb->ioda.irq_chip);
2814 }
2815 
2816 /*
2817  * Returns true iff chip is something that we could call
2818  * pnv_opal_pci_msi_eoi for.
2819  */
2820 bool is_pnv_opal_msi(struct irq_chip *chip)
2821 {
2822 	return chip->irq_eoi == pnv_ioda2_msi_eoi;
2823 }
2824 EXPORT_SYMBOL_GPL(is_pnv_opal_msi);
2825 
2826 static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
2827 				  unsigned int hwirq, unsigned int virq,
2828 				  unsigned int is_64, struct msi_msg *msg)
2829 {
2830 	struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev);
2831 	unsigned int xive_num = hwirq - phb->msi_base;
2832 	__be32 data;
2833 	int rc;
2834 
2835 	/* No PE assigned ? bail out ... no MSI for you ! */
2836 	if (pe == NULL)
2837 		return -ENXIO;
2838 
2839 	/* Check if we have an MVE */
2840 	if (pe->mve_number < 0)
2841 		return -ENXIO;
2842 
2843 	/* Force 32-bit MSI on some broken devices */
2844 	if (dev->no_64bit_msi)
2845 		is_64 = 0;
2846 
2847 	/* Assign XIVE to PE */
2848 	rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num);
2849 	if (rc) {
2850 		pr_warn("%s: OPAL error %d setting XIVE %d PE\n",
2851 			pci_name(dev), rc, xive_num);
2852 		return -EIO;
2853 	}
2854 
2855 	if (is_64) {
2856 		__be64 addr64;
2857 
2858 		rc = opal_get_msi_64(phb->opal_id, pe->mve_number, xive_num, 1,
2859 				     &addr64, &data);
2860 		if (rc) {
2861 			pr_warn("%s: OPAL error %d getting 64-bit MSI data\n",
2862 				pci_name(dev), rc);
2863 			return -EIO;
2864 		}
2865 		msg->address_hi = be64_to_cpu(addr64) >> 32;
2866 		msg->address_lo = be64_to_cpu(addr64) & 0xfffffffful;
2867 	} else {
2868 		__be32 addr32;
2869 
2870 		rc = opal_get_msi_32(phb->opal_id, pe->mve_number, xive_num, 1,
2871 				     &addr32, &data);
2872 		if (rc) {
2873 			pr_warn("%s: OPAL error %d getting 32-bit MSI data\n",
2874 				pci_name(dev), rc);
2875 			return -EIO;
2876 		}
2877 		msg->address_hi = 0;
2878 		msg->address_lo = be32_to_cpu(addr32);
2879 	}
2880 	msg->data = be32_to_cpu(data);
2881 
2882 	pnv_set_msi_irq_chip(phb, virq);
2883 
2884 	pr_devel("%s: %s-bit MSI on hwirq %x (xive #%d),"
2885 		 " address=%x_%08x data=%x PE# %x\n",
2886 		 pci_name(dev), is_64 ? "64" : "32", hwirq, xive_num,
2887 		 msg->address_hi, msg->address_lo, data, pe->pe_number);
2888 
2889 	return 0;
2890 }
2891 
2892 static void pnv_pci_init_ioda_msis(struct pnv_phb *phb)
2893 {
2894 	unsigned int count;
2895 	const __be32 *prop = of_get_property(phb->hose->dn,
2896 					     "ibm,opal-msi-ranges", NULL);
2897 	if (!prop) {
2898 		/* BML Fallback */
2899 		prop = of_get_property(phb->hose->dn, "msi-ranges", NULL);
2900 	}
2901 	if (!prop)
2902 		return;
2903 
2904 	phb->msi_base = be32_to_cpup(prop);
2905 	count = be32_to_cpup(prop + 1);
2906 	if (msi_bitmap_alloc(&phb->msi_bmp, count, phb->hose->dn)) {
2907 		pr_err("PCI %d: Failed to allocate MSI bitmap !\n",
2908 		       phb->hose->global_number);
2909 		return;
2910 	}
2911 
2912 	phb->msi_setup = pnv_pci_ioda_msi_setup;
2913 	phb->msi32_support = 1;
2914 	pr_info("  Allocated bitmap for %d MSIs (base IRQ 0x%x)\n",
2915 		count, phb->msi_base);
2916 }
2917 
2918 #ifdef CONFIG_PCI_IOV
2919 static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev)
2920 {
2921 	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
2922 	struct pnv_phb *phb = hose->private_data;
2923 	const resource_size_t gate = phb->ioda.m64_segsize >> 2;
2924 	struct resource *res;
2925 	int i;
2926 	resource_size_t size, total_vf_bar_sz;
2927 	struct pci_dn *pdn;
2928 	int mul, total_vfs;
2929 
2930 	if (!pdev->is_physfn || pci_dev_is_added(pdev))
2931 		return;
2932 
2933 	pdn = pci_get_pdn(pdev);
2934 	pdn->vfs_expanded = 0;
2935 	pdn->m64_single_mode = false;
2936 
2937 	total_vfs = pci_sriov_get_totalvfs(pdev);
2938 	mul = phb->ioda.total_pe_num;
2939 	total_vf_bar_sz = 0;
2940 
2941 	for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
2942 		res = &pdev->resource[i + PCI_IOV_RESOURCES];
2943 		if (!res->flags || res->parent)
2944 			continue;
2945 		if (!pnv_pci_is_m64_flags(res->flags)) {
2946 			dev_warn(&pdev->dev, "Don't support SR-IOV with"
2947 					" non M64 VF BAR%d: %pR. \n",
2948 				 i, res);
2949 			goto truncate_iov;
2950 		}
2951 
2952 		total_vf_bar_sz += pci_iov_resource_size(pdev,
2953 				i + PCI_IOV_RESOURCES);
2954 
2955 		/*
2956 		 * If bigger than quarter of M64 segment size, just round up
2957 		 * power of two.
2958 		 *
2959 		 * Generally, one M64 BAR maps one IOV BAR. To avoid conflict
2960 		 * with other devices, IOV BAR size is expanded to be
2961 		 * (total_pe * VF_BAR_size).  When VF_BAR_size is half of M64
2962 		 * segment size , the expanded size would equal to half of the
2963 		 * whole M64 space size, which will exhaust the M64 Space and
2964 		 * limit the system flexibility.  This is a design decision to
2965 		 * set the boundary to quarter of the M64 segment size.
2966 		 */
2967 		if (total_vf_bar_sz > gate) {
2968 			mul = roundup_pow_of_two(total_vfs);
2969 			dev_info(&pdev->dev,
2970 				"VF BAR Total IOV size %llx > %llx, roundup to %d VFs\n",
2971 				total_vf_bar_sz, gate, mul);
2972 			pdn->m64_single_mode = true;
2973 			break;
2974 		}
2975 	}
2976 
2977 	for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
2978 		res = &pdev->resource[i + PCI_IOV_RESOURCES];
2979 		if (!res->flags || res->parent)
2980 			continue;
2981 
2982 		size = pci_iov_resource_size(pdev, i + PCI_IOV_RESOURCES);
2983 		/*
2984 		 * On PHB3, the minimum size alignment of M64 BAR in single
2985 		 * mode is 32MB.
2986 		 */
2987 		if (pdn->m64_single_mode && (size < SZ_32M))
2988 			goto truncate_iov;
2989 		dev_dbg(&pdev->dev, " Fixing VF BAR%d: %pR to\n", i, res);
2990 		res->end = res->start + size * mul - 1;
2991 		dev_dbg(&pdev->dev, "                       %pR\n", res);
2992 		dev_info(&pdev->dev, "VF BAR%d: %pR (expanded to %d VFs for PE alignment)",
2993 			 i, res, mul);
2994 	}
2995 	pdn->vfs_expanded = mul;
2996 
2997 	return;
2998 
2999 truncate_iov:
3000 	/* To save MMIO space, IOV BAR is truncated. */
3001 	for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
3002 		res = &pdev->resource[i + PCI_IOV_RESOURCES];
3003 		res->flags = 0;
3004 		res->end = res->start - 1;
3005 	}
3006 }
3007 #endif /* CONFIG_PCI_IOV */
3008 
3009 static void pnv_ioda_setup_pe_res(struct pnv_ioda_pe *pe,
3010 				  struct resource *res)
3011 {
3012 	struct pnv_phb *phb = pe->phb;
3013 	struct pci_bus_region region;
3014 	int index;
3015 	int64_t rc;
3016 
3017 	if (!res || !res->flags || res->start > res->end)
3018 		return;
3019 
3020 	if (res->flags & IORESOURCE_IO) {
3021 		region.start = res->start - phb->ioda.io_pci_base;
3022 		region.end   = res->end - phb->ioda.io_pci_base;
3023 		index = region.start / phb->ioda.io_segsize;
3024 
3025 		while (index < phb->ioda.total_pe_num &&
3026 		       region.start <= region.end) {
3027 			phb->ioda.io_segmap[index] = pe->pe_number;
3028 			rc = opal_pci_map_pe_mmio_window(phb->opal_id,
3029 				pe->pe_number, OPAL_IO_WINDOW_TYPE, 0, index);
3030 			if (rc != OPAL_SUCCESS) {
3031 				pr_err("%s: Error %lld mapping IO segment#%d to PE#%x\n",
3032 				       __func__, rc, index, pe->pe_number);
3033 				break;
3034 			}
3035 
3036 			region.start += phb->ioda.io_segsize;
3037 			index++;
3038 		}
3039 	} else if ((res->flags & IORESOURCE_MEM) &&
3040 		   !pnv_pci_is_m64(phb, res)) {
3041 		region.start = res->start -
3042 			       phb->hose->mem_offset[0] -
3043 			       phb->ioda.m32_pci_base;
3044 		region.end   = res->end -
3045 			       phb->hose->mem_offset[0] -
3046 			       phb->ioda.m32_pci_base;
3047 		index = region.start / phb->ioda.m32_segsize;
3048 
3049 		while (index < phb->ioda.total_pe_num &&
3050 		       region.start <= region.end) {
3051 			phb->ioda.m32_segmap[index] = pe->pe_number;
3052 			rc = opal_pci_map_pe_mmio_window(phb->opal_id,
3053 				pe->pe_number, OPAL_M32_WINDOW_TYPE, 0, index);
3054 			if (rc != OPAL_SUCCESS) {
3055 				pr_err("%s: Error %lld mapping M32 segment#%d to PE#%x",
3056 				       __func__, rc, index, pe->pe_number);
3057 				break;
3058 			}
3059 
3060 			region.start += phb->ioda.m32_segsize;
3061 			index++;
3062 		}
3063 	}
3064 }
3065 
3066 /*
3067  * This function is supposed to be called on basis of PE from top
3068  * to bottom style. So the the I/O or MMIO segment assigned to
3069  * parent PE could be overridden by its child PEs if necessary.
3070  */
3071 static void pnv_ioda_setup_pe_seg(struct pnv_ioda_pe *pe)
3072 {
3073 	struct pci_dev *pdev;
3074 	int i;
3075 
3076 	/*
3077 	 * NOTE: We only care PCI bus based PE for now. For PCI
3078 	 * device based PE, for example SRIOV sensitive VF should
3079 	 * be figured out later.
3080 	 */
3081 	BUG_ON(!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)));
3082 
3083 	list_for_each_entry(pdev, &pe->pbus->devices, bus_list) {
3084 		for (i = 0; i <= PCI_ROM_RESOURCE; i++)
3085 			pnv_ioda_setup_pe_res(pe, &pdev->resource[i]);
3086 
3087 		/*
3088 		 * If the PE contains all subordinate PCI buses, the
3089 		 * windows of the child bridges should be mapped to
3090 		 * the PE as well.
3091 		 */
3092 		if (!(pe->flags & PNV_IODA_PE_BUS_ALL) || !pci_is_bridge(pdev))
3093 			continue;
3094 		for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
3095 			pnv_ioda_setup_pe_res(pe,
3096 				&pdev->resource[PCI_BRIDGE_RESOURCES + i]);
3097 	}
3098 }
3099 
3100 #ifdef CONFIG_DEBUG_FS
3101 static int pnv_pci_diag_data_set(void *data, u64 val)
3102 {
3103 	struct pci_controller *hose;
3104 	struct pnv_phb *phb;
3105 	s64 ret;
3106 
3107 	if (val != 1ULL)
3108 		return -EINVAL;
3109 
3110 	hose = (struct pci_controller *)data;
3111 	if (!hose || !hose->private_data)
3112 		return -ENODEV;
3113 
3114 	phb = hose->private_data;
3115 
3116 	/* Retrieve the diag data from firmware */
3117 	ret = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag_data,
3118 					  phb->diag_data_size);
3119 	if (ret != OPAL_SUCCESS)
3120 		return -EIO;
3121 
3122 	/* Print the diag data to the kernel log */
3123 	pnv_pci_dump_phb_diag_data(phb->hose, phb->diag_data);
3124 	return 0;
3125 }
3126 
3127 DEFINE_SIMPLE_ATTRIBUTE(pnv_pci_diag_data_fops, NULL,
3128 			pnv_pci_diag_data_set, "%llu\n");
3129 
3130 #endif /* CONFIG_DEBUG_FS */
3131 
3132 static void pnv_pci_ioda_create_dbgfs(void)
3133 {
3134 #ifdef CONFIG_DEBUG_FS
3135 	struct pci_controller *hose, *tmp;
3136 	struct pnv_phb *phb;
3137 	char name[16];
3138 
3139 	list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
3140 		phb = hose->private_data;
3141 
3142 		/* Notify initialization of PHB done */
3143 		phb->initialized = 1;
3144 
3145 		sprintf(name, "PCI%04x", hose->global_number);
3146 		phb->dbgfs = debugfs_create_dir(name, powerpc_debugfs_root);
3147 		if (!phb->dbgfs) {
3148 			pr_warn("%s: Error on creating debugfs on PHB#%x\n",
3149 				__func__, hose->global_number);
3150 			continue;
3151 		}
3152 
3153 		debugfs_create_file("dump_diag_regs", 0200, phb->dbgfs, hose,
3154 				    &pnv_pci_diag_data_fops);
3155 	}
3156 #endif /* CONFIG_DEBUG_FS */
3157 }
3158 
3159 static void pnv_pci_enable_bridge(struct pci_bus *bus)
3160 {
3161 	struct pci_dev *dev = bus->self;
3162 	struct pci_bus *child;
3163 
3164 	/* Empty bus ? bail */
3165 	if (list_empty(&bus->devices))
3166 		return;
3167 
3168 	/*
3169 	 * If there's a bridge associated with that bus enable it. This works
3170 	 * around races in the generic code if the enabling is done during
3171 	 * parallel probing. This can be removed once those races have been
3172 	 * fixed.
3173 	 */
3174 	if (dev) {
3175 		int rc = pci_enable_device(dev);
3176 		if (rc)
3177 			pci_err(dev, "Error enabling bridge (%d)\n", rc);
3178 		pci_set_master(dev);
3179 	}
3180 
3181 	/* Perform the same to child busses */
3182 	list_for_each_entry(child, &bus->children, node)
3183 		pnv_pci_enable_bridge(child);
3184 }
3185 
3186 static void pnv_pci_enable_bridges(void)
3187 {
3188 	struct pci_controller *hose;
3189 
3190 	list_for_each_entry(hose, &hose_list, list_node)
3191 		pnv_pci_enable_bridge(hose->bus);
3192 }
3193 
3194 static void pnv_pci_ioda_fixup(void)
3195 {
3196 	pnv_pci_ioda_setup_PEs();
3197 	pnv_pci_ioda_setup_iommu_api();
3198 	pnv_pci_ioda_create_dbgfs();
3199 
3200 	pnv_pci_enable_bridges();
3201 
3202 #ifdef CONFIG_EEH
3203 	pnv_eeh_post_init();
3204 #endif
3205 }
3206 
3207 /*
3208  * Returns the alignment for I/O or memory windows for P2P
3209  * bridges. That actually depends on how PEs are segmented.
3210  * For now, we return I/O or M32 segment size for PE sensitive
3211  * P2P bridges. Otherwise, the default values (4KiB for I/O,
3212  * 1MiB for memory) will be returned.
3213  *
3214  * The current PCI bus might be put into one PE, which was
3215  * create against the parent PCI bridge. For that case, we
3216  * needn't enlarge the alignment so that we can save some
3217  * resources.
3218  */
3219 static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus,
3220 						unsigned long type)
3221 {
3222 	struct pci_dev *bridge;
3223 	struct pci_controller *hose = pci_bus_to_host(bus);
3224 	struct pnv_phb *phb = hose->private_data;
3225 	int num_pci_bridges = 0;
3226 
3227 	bridge = bus->self;
3228 	while (bridge) {
3229 		if (pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE) {
3230 			num_pci_bridges++;
3231 			if (num_pci_bridges >= 2)
3232 				return 1;
3233 		}
3234 
3235 		bridge = bridge->bus->self;
3236 	}
3237 
3238 	/*
3239 	 * We fall back to M32 if M64 isn't supported. We enforce the M64
3240 	 * alignment for any 64-bit resource, PCIe doesn't care and
3241 	 * bridges only do 64-bit prefetchable anyway.
3242 	 */
3243 	if (phb->ioda.m64_segsize && pnv_pci_is_m64_flags(type))
3244 		return phb->ioda.m64_segsize;
3245 	if (type & IORESOURCE_MEM)
3246 		return phb->ioda.m32_segsize;
3247 
3248 	return phb->ioda.io_segsize;
3249 }
3250 
3251 /*
3252  * We are updating root port or the upstream port of the
3253  * bridge behind the root port with PHB's windows in order
3254  * to accommodate the changes on required resources during
3255  * PCI (slot) hotplug, which is connected to either root
3256  * port or the downstream ports of PCIe switch behind the
3257  * root port.
3258  */
3259 static void pnv_pci_fixup_bridge_resources(struct pci_bus *bus,
3260 					   unsigned long type)
3261 {
3262 	struct pci_controller *hose = pci_bus_to_host(bus);
3263 	struct pnv_phb *phb = hose->private_data;
3264 	struct pci_dev *bridge = bus->self;
3265 	struct resource *r, *w;
3266 	bool msi_region = false;
3267 	int i;
3268 
3269 	/* Check if we need apply fixup to the bridge's windows */
3270 	if (!pci_is_root_bus(bridge->bus) &&
3271 	    !pci_is_root_bus(bridge->bus->self->bus))
3272 		return;
3273 
3274 	/* Fixup the resources */
3275 	for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
3276 		r = &bridge->resource[PCI_BRIDGE_RESOURCES + i];
3277 		if (!r->flags || !r->parent)
3278 			continue;
3279 
3280 		w = NULL;
3281 		if (r->flags & type & IORESOURCE_IO)
3282 			w = &hose->io_resource;
3283 		else if (pnv_pci_is_m64(phb, r) &&
3284 			 (type & IORESOURCE_PREFETCH) &&
3285 			 phb->ioda.m64_segsize)
3286 			w = &hose->mem_resources[1];
3287 		else if (r->flags & type & IORESOURCE_MEM) {
3288 			w = &hose->mem_resources[0];
3289 			msi_region = true;
3290 		}
3291 
3292 		r->start = w->start;
3293 		r->end = w->end;
3294 
3295 		/* The 64KB 32-bits MSI region shouldn't be included in
3296 		 * the 32-bits bridge window. Otherwise, we can see strange
3297 		 * issues. One of them is EEH error observed on Garrison.
3298 		 *
3299 		 * Exclude top 1MB region which is the minimal alignment of
3300 		 * 32-bits bridge window.
3301 		 */
3302 		if (msi_region) {
3303 			r->end += 0x10000;
3304 			r->end -= 0x100000;
3305 		}
3306 	}
3307 }
3308 
3309 static void pnv_pci_setup_bridge(struct pci_bus *bus, unsigned long type)
3310 {
3311 	struct pci_controller *hose = pci_bus_to_host(bus);
3312 	struct pnv_phb *phb = hose->private_data;
3313 	struct pci_dev *bridge = bus->self;
3314 	struct pnv_ioda_pe *pe;
3315 	bool all = (pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE);
3316 
3317 	/* Extend bridge's windows if necessary */
3318 	pnv_pci_fixup_bridge_resources(bus, type);
3319 
3320 	/* The PE for root bus should be realized before any one else */
3321 	if (!phb->ioda.root_pe_populated) {
3322 		pe = pnv_ioda_setup_bus_PE(phb->hose->bus, false);
3323 		if (pe) {
3324 			phb->ioda.root_pe_idx = pe->pe_number;
3325 			phb->ioda.root_pe_populated = true;
3326 		}
3327 	}
3328 
3329 	/* Don't assign PE to PCI bus, which doesn't have subordinate devices */
3330 	if (list_empty(&bus->devices))
3331 		return;
3332 
3333 	/* Reserve PEs according to used M64 resources */
3334 	pnv_ioda_reserve_m64_pe(bus, NULL, all);
3335 
3336 	/*
3337 	 * Assign PE. We might run here because of partial hotplug.
3338 	 * For the case, we just pick up the existing PE and should
3339 	 * not allocate resources again.
3340 	 */
3341 	pe = pnv_ioda_setup_bus_PE(bus, all);
3342 	if (!pe)
3343 		return;
3344 
3345 	pnv_ioda_setup_pe_seg(pe);
3346 	switch (phb->type) {
3347 	case PNV_PHB_IODA1:
3348 		pnv_pci_ioda1_setup_dma_pe(phb, pe);
3349 		break;
3350 	case PNV_PHB_IODA2:
3351 		pnv_pci_ioda2_setup_dma_pe(phb, pe);
3352 		break;
3353 	default:
3354 		pr_warn("%s: No DMA for PHB#%x (type %d)\n",
3355 			__func__, phb->hose->global_number, phb->type);
3356 	}
3357 }
3358 
3359 static resource_size_t pnv_pci_default_alignment(void)
3360 {
3361 	return PAGE_SIZE;
3362 }
3363 
3364 #ifdef CONFIG_PCI_IOV
3365 static resource_size_t pnv_pci_iov_resource_alignment(struct pci_dev *pdev,
3366 						      int resno)
3367 {
3368 	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
3369 	struct pnv_phb *phb = hose->private_data;
3370 	struct pci_dn *pdn = pci_get_pdn(pdev);
3371 	resource_size_t align;
3372 
3373 	/*
3374 	 * On PowerNV platform, IOV BAR is mapped by M64 BAR to enable the
3375 	 * SR-IOV. While from hardware perspective, the range mapped by M64
3376 	 * BAR should be size aligned.
3377 	 *
3378 	 * When IOV BAR is mapped with M64 BAR in Single PE mode, the extra
3379 	 * powernv-specific hardware restriction is gone. But if just use the
3380 	 * VF BAR size as the alignment, PF BAR / VF BAR may be allocated with
3381 	 * in one segment of M64 #15, which introduces the PE conflict between
3382 	 * PF and VF. Based on this, the minimum alignment of an IOV BAR is
3383 	 * m64_segsize.
3384 	 *
3385 	 * This function returns the total IOV BAR size if M64 BAR is in
3386 	 * Shared PE mode or just VF BAR size if not.
3387 	 * If the M64 BAR is in Single PE mode, return the VF BAR size or
3388 	 * M64 segment size if IOV BAR size is less.
3389 	 */
3390 	align = pci_iov_resource_size(pdev, resno);
3391 	if (!pdn->vfs_expanded)
3392 		return align;
3393 	if (pdn->m64_single_mode)
3394 		return max(align, (resource_size_t)phb->ioda.m64_segsize);
3395 
3396 	return pdn->vfs_expanded * align;
3397 }
3398 #endif /* CONFIG_PCI_IOV */
3399 
3400 /* Prevent enabling devices for which we couldn't properly
3401  * assign a PE
3402  */
3403 static bool pnv_pci_enable_device_hook(struct pci_dev *dev)
3404 {
3405 	struct pci_controller *hose = pci_bus_to_host(dev->bus);
3406 	struct pnv_phb *phb = hose->private_data;
3407 	struct pci_dn *pdn;
3408 
3409 	/* The function is probably called while the PEs have
3410 	 * not be created yet. For example, resource reassignment
3411 	 * during PCI probe period. We just skip the check if
3412 	 * PEs isn't ready.
3413 	 */
3414 	if (!phb->initialized)
3415 		return true;
3416 
3417 	pdn = pci_get_pdn(dev);
3418 	if (!pdn || pdn->pe_number == IODA_INVALID_PE)
3419 		return false;
3420 
3421 	return true;
3422 }
3423 
3424 static long pnv_pci_ioda1_unset_window(struct iommu_table_group *table_group,
3425 				       int num)
3426 {
3427 	struct pnv_ioda_pe *pe = container_of(table_group,
3428 					      struct pnv_ioda_pe, table_group);
3429 	struct pnv_phb *phb = pe->phb;
3430 	unsigned int idx;
3431 	long rc;
3432 
3433 	pe_info(pe, "Removing DMA window #%d\n", num);
3434 	for (idx = 0; idx < phb->ioda.dma32_count; idx++) {
3435 		if (phb->ioda.dma32_segmap[idx] != pe->pe_number)
3436 			continue;
3437 
3438 		rc = opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number,
3439 						idx, 0, 0ul, 0ul, 0ul);
3440 		if (rc != OPAL_SUCCESS) {
3441 			pe_warn(pe, "Failure %ld unmapping DMA32 segment#%d\n",
3442 				rc, idx);
3443 			return rc;
3444 		}
3445 
3446 		phb->ioda.dma32_segmap[idx] = IODA_INVALID_PE;
3447 	}
3448 
3449 	pnv_pci_unlink_table_and_group(table_group->tables[num], table_group);
3450 	return OPAL_SUCCESS;
3451 }
3452 
3453 static void pnv_pci_ioda1_release_pe_dma(struct pnv_ioda_pe *pe)
3454 {
3455 	unsigned int weight = pnv_pci_ioda_pe_dma_weight(pe);
3456 	struct iommu_table *tbl = pe->table_group.tables[0];
3457 	int64_t rc;
3458 
3459 	if (!weight)
3460 		return;
3461 
3462 	rc = pnv_pci_ioda1_unset_window(&pe->table_group, 0);
3463 	if (rc != OPAL_SUCCESS)
3464 		return;
3465 
3466 	pnv_pci_p7ioc_tce_invalidate(tbl, tbl->it_offset, tbl->it_size, false);
3467 	if (pe->table_group.group) {
3468 		iommu_group_put(pe->table_group.group);
3469 		WARN_ON(pe->table_group.group);
3470 	}
3471 
3472 	free_pages(tbl->it_base, get_order(tbl->it_size << 3));
3473 	iommu_tce_table_put(tbl);
3474 }
3475 
3476 static void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe)
3477 {
3478 	struct iommu_table *tbl = pe->table_group.tables[0];
3479 	unsigned int weight = pnv_pci_ioda_pe_dma_weight(pe);
3480 #ifdef CONFIG_IOMMU_API
3481 	int64_t rc;
3482 #endif
3483 
3484 	if (!weight)
3485 		return;
3486 
3487 #ifdef CONFIG_IOMMU_API
3488 	rc = pnv_pci_ioda2_unset_window(&pe->table_group, 0);
3489 	if (rc)
3490 		pe_warn(pe, "OPAL error %lld release DMA window\n", rc);
3491 #endif
3492 
3493 	pnv_pci_ioda2_set_bypass(pe, false);
3494 	if (pe->table_group.group) {
3495 		iommu_group_put(pe->table_group.group);
3496 		WARN_ON(pe->table_group.group);
3497 	}
3498 
3499 	iommu_tce_table_put(tbl);
3500 }
3501 
3502 static void pnv_ioda_free_pe_seg(struct pnv_ioda_pe *pe,
3503 				 unsigned short win,
3504 				 unsigned int *map)
3505 {
3506 	struct pnv_phb *phb = pe->phb;
3507 	int idx;
3508 	int64_t rc;
3509 
3510 	for (idx = 0; idx < phb->ioda.total_pe_num; idx++) {
3511 		if (map[idx] != pe->pe_number)
3512 			continue;
3513 
3514 		if (win == OPAL_M64_WINDOW_TYPE)
3515 			rc = opal_pci_map_pe_mmio_window(phb->opal_id,
3516 					phb->ioda.reserved_pe_idx, win,
3517 					idx / PNV_IODA1_M64_SEGS,
3518 					idx % PNV_IODA1_M64_SEGS);
3519 		else
3520 			rc = opal_pci_map_pe_mmio_window(phb->opal_id,
3521 					phb->ioda.reserved_pe_idx, win, 0, idx);
3522 
3523 		if (rc != OPAL_SUCCESS)
3524 			pe_warn(pe, "Error %lld unmapping (%d) segment#%d\n",
3525 				rc, win, idx);
3526 
3527 		map[idx] = IODA_INVALID_PE;
3528 	}
3529 }
3530 
3531 static void pnv_ioda_release_pe_seg(struct pnv_ioda_pe *pe)
3532 {
3533 	struct pnv_phb *phb = pe->phb;
3534 
3535 	if (phb->type == PNV_PHB_IODA1) {
3536 		pnv_ioda_free_pe_seg(pe, OPAL_IO_WINDOW_TYPE,
3537 				     phb->ioda.io_segmap);
3538 		pnv_ioda_free_pe_seg(pe, OPAL_M32_WINDOW_TYPE,
3539 				     phb->ioda.m32_segmap);
3540 		pnv_ioda_free_pe_seg(pe, OPAL_M64_WINDOW_TYPE,
3541 				     phb->ioda.m64_segmap);
3542 	} else if (phb->type == PNV_PHB_IODA2) {
3543 		pnv_ioda_free_pe_seg(pe, OPAL_M32_WINDOW_TYPE,
3544 				     phb->ioda.m32_segmap);
3545 	}
3546 }
3547 
3548 static void pnv_ioda_release_pe(struct pnv_ioda_pe *pe)
3549 {
3550 	struct pnv_phb *phb = pe->phb;
3551 	struct pnv_ioda_pe *slave, *tmp;
3552 
3553 	list_del(&pe->list);
3554 	switch (phb->type) {
3555 	case PNV_PHB_IODA1:
3556 		pnv_pci_ioda1_release_pe_dma(pe);
3557 		break;
3558 	case PNV_PHB_IODA2:
3559 		pnv_pci_ioda2_release_pe_dma(pe);
3560 		break;
3561 	default:
3562 		WARN_ON(1);
3563 	}
3564 
3565 	pnv_ioda_release_pe_seg(pe);
3566 	pnv_ioda_deconfigure_pe(pe->phb, pe);
3567 
3568 	/* Release slave PEs in the compound PE */
3569 	if (pe->flags & PNV_IODA_PE_MASTER) {
3570 		list_for_each_entry_safe(slave, tmp, &pe->slaves, list) {
3571 			list_del(&slave->list);
3572 			pnv_ioda_free_pe(slave);
3573 		}
3574 	}
3575 
3576 	/*
3577 	 * The PE for root bus can be removed because of hotplug in EEH
3578 	 * recovery for fenced PHB error. We need to mark the PE dead so
3579 	 * that it can be populated again in PCI hot add path. The PE
3580 	 * shouldn't be destroyed as it's the global reserved resource.
3581 	 */
3582 	if (phb->ioda.root_pe_populated &&
3583 	    phb->ioda.root_pe_idx == pe->pe_number)
3584 		phb->ioda.root_pe_populated = false;
3585 	else
3586 		pnv_ioda_free_pe(pe);
3587 }
3588 
3589 static void pnv_pci_release_device(struct pci_dev *pdev)
3590 {
3591 	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
3592 	struct pnv_phb *phb = hose->private_data;
3593 	struct pci_dn *pdn = pci_get_pdn(pdev);
3594 	struct pnv_ioda_pe *pe;
3595 
3596 	if (pdev->is_virtfn)
3597 		return;
3598 
3599 	if (!pdn || pdn->pe_number == IODA_INVALID_PE)
3600 		return;
3601 
3602 	/*
3603 	 * PCI hotplug can happen as part of EEH error recovery. The @pdn
3604 	 * isn't removed and added afterwards in this scenario. We should
3605 	 * set the PE number in @pdn to an invalid one. Otherwise, the PE's
3606 	 * device count is decreased on removing devices while failing to
3607 	 * be increased on adding devices. It leads to unbalanced PE's device
3608 	 * count and eventually make normal PCI hotplug path broken.
3609 	 */
3610 	pe = &phb->ioda.pe_array[pdn->pe_number];
3611 	pdn->pe_number = IODA_INVALID_PE;
3612 
3613 	WARN_ON(--pe->device_count < 0);
3614 	if (pe->device_count == 0)
3615 		pnv_ioda_release_pe(pe);
3616 }
3617 
3618 static void pnv_npu_disable_device(struct pci_dev *pdev)
3619 {
3620 	struct eeh_dev *edev = pci_dev_to_eeh_dev(pdev);
3621 	struct eeh_pe *eehpe = edev ? edev->pe : NULL;
3622 
3623 	if (eehpe && eeh_ops && eeh_ops->reset)
3624 		eeh_ops->reset(eehpe, EEH_RESET_HOT);
3625 }
3626 
3627 static void pnv_pci_ioda_shutdown(struct pci_controller *hose)
3628 {
3629 	struct pnv_phb *phb = hose->private_data;
3630 
3631 	opal_pci_reset(phb->opal_id, OPAL_RESET_PCI_IODA_TABLE,
3632 		       OPAL_ASSERT_RESET);
3633 }
3634 
3635 static const struct pci_controller_ops pnv_pci_ioda_controller_ops = {
3636 	.dma_dev_setup		= pnv_pci_dma_dev_setup,
3637 	.dma_bus_setup		= pnv_pci_dma_bus_setup,
3638 	.iommu_bypass_supported	= pnv_pci_ioda_iommu_bypass_supported,
3639 	.setup_msi_irqs		= pnv_setup_msi_irqs,
3640 	.teardown_msi_irqs	= pnv_teardown_msi_irqs,
3641 	.enable_device_hook	= pnv_pci_enable_device_hook,
3642 	.release_device		= pnv_pci_release_device,
3643 	.window_alignment	= pnv_pci_window_alignment,
3644 	.setup_bridge		= pnv_pci_setup_bridge,
3645 	.reset_secondary_bus	= pnv_pci_reset_secondary_bus,
3646 	.shutdown		= pnv_pci_ioda_shutdown,
3647 };
3648 
3649 static const struct pci_controller_ops pnv_npu_ioda_controller_ops = {
3650 	.dma_dev_setup		= pnv_pci_dma_dev_setup,
3651 	.setup_msi_irqs		= pnv_setup_msi_irqs,
3652 	.teardown_msi_irqs	= pnv_teardown_msi_irqs,
3653 	.enable_device_hook	= pnv_pci_enable_device_hook,
3654 	.window_alignment	= pnv_pci_window_alignment,
3655 	.reset_secondary_bus	= pnv_pci_reset_secondary_bus,
3656 	.shutdown		= pnv_pci_ioda_shutdown,
3657 	.disable_device		= pnv_npu_disable_device,
3658 };
3659 
3660 static const struct pci_controller_ops pnv_npu_ocapi_ioda_controller_ops = {
3661 	.enable_device_hook	= pnv_pci_enable_device_hook,
3662 	.window_alignment	= pnv_pci_window_alignment,
3663 	.reset_secondary_bus	= pnv_pci_reset_secondary_bus,
3664 	.shutdown		= pnv_pci_ioda_shutdown,
3665 };
3666 
3667 static void __init pnv_pci_init_ioda_phb(struct device_node *np,
3668 					 u64 hub_id, int ioda_type)
3669 {
3670 	struct pci_controller *hose;
3671 	struct pnv_phb *phb;
3672 	unsigned long size, m64map_off, m32map_off, pemap_off;
3673 	unsigned long iomap_off = 0, dma32map_off = 0;
3674 	struct resource r;
3675 	const __be64 *prop64;
3676 	const __be32 *prop32;
3677 	int len;
3678 	unsigned int segno;
3679 	u64 phb_id;
3680 	void *aux;
3681 	long rc;
3682 
3683 	if (!of_device_is_available(np))
3684 		return;
3685 
3686 	pr_info("Initializing %s PHB (%pOF)\n",	pnv_phb_names[ioda_type], np);
3687 
3688 	prop64 = of_get_property(np, "ibm,opal-phbid", NULL);
3689 	if (!prop64) {
3690 		pr_err("  Missing \"ibm,opal-phbid\" property !\n");
3691 		return;
3692 	}
3693 	phb_id = be64_to_cpup(prop64);
3694 	pr_debug("  PHB-ID  : 0x%016llx\n", phb_id);
3695 
3696 	phb = memblock_alloc(sizeof(*phb), SMP_CACHE_BYTES);
3697 	if (!phb)
3698 		panic("%s: Failed to allocate %zu bytes\n", __func__,
3699 		      sizeof(*phb));
3700 
3701 	/* Allocate PCI controller */
3702 	phb->hose = hose = pcibios_alloc_controller(np);
3703 	if (!phb->hose) {
3704 		pr_err("  Can't allocate PCI controller for %pOF\n",
3705 		       np);
3706 		memblock_free(__pa(phb), sizeof(struct pnv_phb));
3707 		return;
3708 	}
3709 
3710 	spin_lock_init(&phb->lock);
3711 	prop32 = of_get_property(np, "bus-range", &len);
3712 	if (prop32 && len == 8) {
3713 		hose->first_busno = be32_to_cpu(prop32[0]);
3714 		hose->last_busno = be32_to_cpu(prop32[1]);
3715 	} else {
3716 		pr_warn("  Broken <bus-range> on %pOF\n", np);
3717 		hose->first_busno = 0;
3718 		hose->last_busno = 0xff;
3719 	}
3720 	hose->private_data = phb;
3721 	phb->hub_id = hub_id;
3722 	phb->opal_id = phb_id;
3723 	phb->type = ioda_type;
3724 	mutex_init(&phb->ioda.pe_alloc_mutex);
3725 
3726 	/* Detect specific models for error handling */
3727 	if (of_device_is_compatible(np, "ibm,p7ioc-pciex"))
3728 		phb->model = PNV_PHB_MODEL_P7IOC;
3729 	else if (of_device_is_compatible(np, "ibm,power8-pciex"))
3730 		phb->model = PNV_PHB_MODEL_PHB3;
3731 	else if (of_device_is_compatible(np, "ibm,power8-npu-pciex"))
3732 		phb->model = PNV_PHB_MODEL_NPU;
3733 	else if (of_device_is_compatible(np, "ibm,power9-npu-pciex"))
3734 		phb->model = PNV_PHB_MODEL_NPU2;
3735 	else
3736 		phb->model = PNV_PHB_MODEL_UNKNOWN;
3737 
3738 	/* Initialize diagnostic data buffer */
3739 	prop32 = of_get_property(np, "ibm,phb-diag-data-size", NULL);
3740 	if (prop32)
3741 		phb->diag_data_size = be32_to_cpup(prop32);
3742 	else
3743 		phb->diag_data_size = PNV_PCI_DIAG_BUF_SIZE;
3744 
3745 	phb->diag_data = memblock_alloc(phb->diag_data_size, SMP_CACHE_BYTES);
3746 	if (!phb->diag_data)
3747 		panic("%s: Failed to allocate %u bytes\n", __func__,
3748 		      phb->diag_data_size);
3749 
3750 	/* Parse 32-bit and IO ranges (if any) */
3751 	pci_process_bridge_OF_ranges(hose, np, !hose->global_number);
3752 
3753 	/* Get registers */
3754 	if (!of_address_to_resource(np, 0, &r)) {
3755 		phb->regs_phys = r.start;
3756 		phb->regs = ioremap(r.start, resource_size(&r));
3757 		if (phb->regs == NULL)
3758 			pr_err("  Failed to map registers !\n");
3759 	}
3760 
3761 	/* Initialize more IODA stuff */
3762 	phb->ioda.total_pe_num = 1;
3763 	prop32 = of_get_property(np, "ibm,opal-num-pes", NULL);
3764 	if (prop32)
3765 		phb->ioda.total_pe_num = be32_to_cpup(prop32);
3766 	prop32 = of_get_property(np, "ibm,opal-reserved-pe", NULL);
3767 	if (prop32)
3768 		phb->ioda.reserved_pe_idx = be32_to_cpup(prop32);
3769 
3770 	/* Invalidate RID to PE# mapping */
3771 	for (segno = 0; segno < ARRAY_SIZE(phb->ioda.pe_rmap); segno++)
3772 		phb->ioda.pe_rmap[segno] = IODA_INVALID_PE;
3773 
3774 	/* Parse 64-bit MMIO range */
3775 	pnv_ioda_parse_m64_window(phb);
3776 
3777 	phb->ioda.m32_size = resource_size(&hose->mem_resources[0]);
3778 	/* FW Has already off top 64k of M32 space (MSI space) */
3779 	phb->ioda.m32_size += 0x10000;
3780 
3781 	phb->ioda.m32_segsize = phb->ioda.m32_size / phb->ioda.total_pe_num;
3782 	phb->ioda.m32_pci_base = hose->mem_resources[0].start - hose->mem_offset[0];
3783 	phb->ioda.io_size = hose->pci_io_size;
3784 	phb->ioda.io_segsize = phb->ioda.io_size / phb->ioda.total_pe_num;
3785 	phb->ioda.io_pci_base = 0; /* XXX calculate this ? */
3786 
3787 	/* Calculate how many 32-bit TCE segments we have */
3788 	phb->ioda.dma32_count = phb->ioda.m32_pci_base /
3789 				PNV_IODA1_DMA32_SEGSIZE;
3790 
3791 	/* Allocate aux data & arrays. We don't have IO ports on PHB3 */
3792 	size = _ALIGN_UP(max_t(unsigned, phb->ioda.total_pe_num, 8) / 8,
3793 			sizeof(unsigned long));
3794 	m64map_off = size;
3795 	size += phb->ioda.total_pe_num * sizeof(phb->ioda.m64_segmap[0]);
3796 	m32map_off = size;
3797 	size += phb->ioda.total_pe_num * sizeof(phb->ioda.m32_segmap[0]);
3798 	if (phb->type == PNV_PHB_IODA1) {
3799 		iomap_off = size;
3800 		size += phb->ioda.total_pe_num * sizeof(phb->ioda.io_segmap[0]);
3801 		dma32map_off = size;
3802 		size += phb->ioda.dma32_count *
3803 			sizeof(phb->ioda.dma32_segmap[0]);
3804 	}
3805 	pemap_off = size;
3806 	size += phb->ioda.total_pe_num * sizeof(struct pnv_ioda_pe);
3807 	aux = memblock_alloc(size, SMP_CACHE_BYTES);
3808 	if (!aux)
3809 		panic("%s: Failed to allocate %lu bytes\n", __func__, size);
3810 	phb->ioda.pe_alloc = aux;
3811 	phb->ioda.m64_segmap = aux + m64map_off;
3812 	phb->ioda.m32_segmap = aux + m32map_off;
3813 	for (segno = 0; segno < phb->ioda.total_pe_num; segno++) {
3814 		phb->ioda.m64_segmap[segno] = IODA_INVALID_PE;
3815 		phb->ioda.m32_segmap[segno] = IODA_INVALID_PE;
3816 	}
3817 	if (phb->type == PNV_PHB_IODA1) {
3818 		phb->ioda.io_segmap = aux + iomap_off;
3819 		for (segno = 0; segno < phb->ioda.total_pe_num; segno++)
3820 			phb->ioda.io_segmap[segno] = IODA_INVALID_PE;
3821 
3822 		phb->ioda.dma32_segmap = aux + dma32map_off;
3823 		for (segno = 0; segno < phb->ioda.dma32_count; segno++)
3824 			phb->ioda.dma32_segmap[segno] = IODA_INVALID_PE;
3825 	}
3826 	phb->ioda.pe_array = aux + pemap_off;
3827 
3828 	/*
3829 	 * Choose PE number for root bus, which shouldn't have
3830 	 * M64 resources consumed by its child devices. To pick
3831 	 * the PE number adjacent to the reserved one if possible.
3832 	 */
3833 	pnv_ioda_reserve_pe(phb, phb->ioda.reserved_pe_idx);
3834 	if (phb->ioda.reserved_pe_idx == 0) {
3835 		phb->ioda.root_pe_idx = 1;
3836 		pnv_ioda_reserve_pe(phb, phb->ioda.root_pe_idx);
3837 	} else if (phb->ioda.reserved_pe_idx == (phb->ioda.total_pe_num - 1)) {
3838 		phb->ioda.root_pe_idx = phb->ioda.reserved_pe_idx - 1;
3839 		pnv_ioda_reserve_pe(phb, phb->ioda.root_pe_idx);
3840 	} else {
3841 		phb->ioda.root_pe_idx = IODA_INVALID_PE;
3842 	}
3843 
3844 	INIT_LIST_HEAD(&phb->ioda.pe_list);
3845 	mutex_init(&phb->ioda.pe_list_mutex);
3846 
3847 	/* Calculate how many 32-bit TCE segments we have */
3848 	phb->ioda.dma32_count = phb->ioda.m32_pci_base /
3849 				PNV_IODA1_DMA32_SEGSIZE;
3850 
3851 #if 0 /* We should really do that ... */
3852 	rc = opal_pci_set_phb_mem_window(opal->phb_id,
3853 					 window_type,
3854 					 window_num,
3855 					 starting_real_address,
3856 					 starting_pci_address,
3857 					 segment_size);
3858 #endif
3859 
3860 	pr_info("  %03d (%03d) PE's M32: 0x%x [segment=0x%x]\n",
3861 		phb->ioda.total_pe_num, phb->ioda.reserved_pe_idx,
3862 		phb->ioda.m32_size, phb->ioda.m32_segsize);
3863 	if (phb->ioda.m64_size)
3864 		pr_info("                 M64: 0x%lx [segment=0x%lx]\n",
3865 			phb->ioda.m64_size, phb->ioda.m64_segsize);
3866 	if (phb->ioda.io_size)
3867 		pr_info("                  IO: 0x%x [segment=0x%x]\n",
3868 			phb->ioda.io_size, phb->ioda.io_segsize);
3869 
3870 
3871 	phb->hose->ops = &pnv_pci_ops;
3872 	phb->get_pe_state = pnv_ioda_get_pe_state;
3873 	phb->freeze_pe = pnv_ioda_freeze_pe;
3874 	phb->unfreeze_pe = pnv_ioda_unfreeze_pe;
3875 
3876 	/* Setup MSI support */
3877 	pnv_pci_init_ioda_msis(phb);
3878 
3879 	/*
3880 	 * We pass the PCI probe flag PCI_REASSIGN_ALL_RSRC here
3881 	 * to let the PCI core do resource assignment. It's supposed
3882 	 * that the PCI core will do correct I/O and MMIO alignment
3883 	 * for the P2P bridge bars so that each PCI bus (excluding
3884 	 * the child P2P bridges) can form individual PE.
3885 	 */
3886 	ppc_md.pcibios_fixup = pnv_pci_ioda_fixup;
3887 
3888 	switch (phb->type) {
3889 	case PNV_PHB_NPU_NVLINK:
3890 		hose->controller_ops = pnv_npu_ioda_controller_ops;
3891 		break;
3892 	case PNV_PHB_NPU_OCAPI:
3893 		hose->controller_ops = pnv_npu_ocapi_ioda_controller_ops;
3894 		break;
3895 	default:
3896 		phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup;
3897 		hose->controller_ops = pnv_pci_ioda_controller_ops;
3898 	}
3899 
3900 	ppc_md.pcibios_default_alignment = pnv_pci_default_alignment;
3901 
3902 #ifdef CONFIG_PCI_IOV
3903 	ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov_resources;
3904 	ppc_md.pcibios_iov_resource_alignment = pnv_pci_iov_resource_alignment;
3905 	ppc_md.pcibios_sriov_enable = pnv_pcibios_sriov_enable;
3906 	ppc_md.pcibios_sriov_disable = pnv_pcibios_sriov_disable;
3907 #endif
3908 
3909 	pci_add_flags(PCI_REASSIGN_ALL_RSRC);
3910 
3911 	/* Reset IODA tables to a clean state */
3912 	rc = opal_pci_reset(phb_id, OPAL_RESET_PCI_IODA_TABLE, OPAL_ASSERT_RESET);
3913 	if (rc)
3914 		pr_warn("  OPAL Error %ld performing IODA table reset !\n", rc);
3915 
3916 	/*
3917 	 * If we're running in kdump kernel, the previous kernel never
3918 	 * shutdown PCI devices correctly. We already got IODA table
3919 	 * cleaned out. So we have to issue PHB reset to stop all PCI
3920 	 * transactions from previous kernel. The ppc_pci_reset_phbs
3921 	 * kernel parameter will force this reset too. Additionally,
3922 	 * if the IODA reset above failed then use a bigger hammer.
3923 	 * This can happen if we get a PHB fatal error in very early
3924 	 * boot.
3925 	 */
3926 	if (is_kdump_kernel() || pci_reset_phbs || rc) {
3927 		pr_info("  Issue PHB reset ...\n");
3928 		pnv_eeh_phb_reset(hose, EEH_RESET_FUNDAMENTAL);
3929 		pnv_eeh_phb_reset(hose, EEH_RESET_DEACTIVATE);
3930 	}
3931 
3932 	/* Remove M64 resource if we can't configure it successfully */
3933 	if (!phb->init_m64 || phb->init_m64(phb))
3934 		hose->mem_resources[1].flags = 0;
3935 }
3936 
3937 void __init pnv_pci_init_ioda2_phb(struct device_node *np)
3938 {
3939 	pnv_pci_init_ioda_phb(np, 0, PNV_PHB_IODA2);
3940 }
3941 
3942 void __init pnv_pci_init_npu_phb(struct device_node *np)
3943 {
3944 	pnv_pci_init_ioda_phb(np, 0, PNV_PHB_NPU_NVLINK);
3945 }
3946 
3947 void __init pnv_pci_init_npu2_opencapi_phb(struct device_node *np)
3948 {
3949 	pnv_pci_init_ioda_phb(np, 0, PNV_PHB_NPU_OCAPI);
3950 }
3951 
3952 static void pnv_npu2_opencapi_cfg_size_fixup(struct pci_dev *dev)
3953 {
3954 	struct pci_controller *hose = pci_bus_to_host(dev->bus);
3955 	struct pnv_phb *phb = hose->private_data;
3956 
3957 	if (!machine_is(powernv))
3958 		return;
3959 
3960 	if (phb->type == PNV_PHB_NPU_OCAPI)
3961 		dev->cfg_size = PCI_CFG_SPACE_EXP_SIZE;
3962 }
3963 DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, pnv_npu2_opencapi_cfg_size_fixup);
3964 
3965 void __init pnv_pci_init_ioda_hub(struct device_node *np)
3966 {
3967 	struct device_node *phbn;
3968 	const __be64 *prop64;
3969 	u64 hub_id;
3970 
3971 	pr_info("Probing IODA IO-Hub %pOF\n", np);
3972 
3973 	prop64 = of_get_property(np, "ibm,opal-hubid", NULL);
3974 	if (!prop64) {
3975 		pr_err(" Missing \"ibm,opal-hubid\" property !\n");
3976 		return;
3977 	}
3978 	hub_id = be64_to_cpup(prop64);
3979 	pr_devel(" HUB-ID : 0x%016llx\n", hub_id);
3980 
3981 	/* Count child PHBs */
3982 	for_each_child_of_node(np, phbn) {
3983 		/* Look for IODA1 PHBs */
3984 		if (of_device_is_compatible(phbn, "ibm,ioda-phb"))
3985 			pnv_pci_init_ioda_phb(phbn, hub_id, PNV_PHB_IODA1);
3986 	}
3987 }
3988