xref: /openbmc/linux/arch/s390/pci/pci.c (revision 79f08d9e)
1 /*
2  * Copyright IBM Corp. 2012
3  *
4  * Author(s):
5  *   Jan Glauber <jang@linux.vnet.ibm.com>
6  *
7  * The System z PCI code is a rewrite from a prototype by
8  * the following people (Kudoz!):
9  *   Alexander Schmidt
10  *   Christoph Raisch
11  *   Hannes Hering
12  *   Hoang-Nam Nguyen
13  *   Jan-Bernd Themann
14  *   Stefan Roscher
15  *   Thomas Klein
16  */
17 
18 #define COMPONENT "zPCI"
19 #define pr_fmt(fmt) COMPONENT ": " fmt
20 
21 #include <linux/kernel.h>
22 #include <linux/slab.h>
23 #include <linux/err.h>
24 #include <linux/export.h>
25 #include <linux/delay.h>
26 #include <linux/irq.h>
27 #include <linux/kernel_stat.h>
28 #include <linux/seq_file.h>
29 #include <linux/pci.h>
30 #include <linux/msi.h>
31 
32 #include <asm/isc.h>
33 #include <asm/airq.h>
34 #include <asm/facility.h>
35 #include <asm/pci_insn.h>
36 #include <asm/pci_clp.h>
37 #include <asm/pci_dma.h>
38 
39 #define DEBUG				/* enable pr_debug */
40 
41 #define	SIC_IRQ_MODE_ALL		0
42 #define	SIC_IRQ_MODE_SINGLE		1
43 
44 #define ZPCI_NR_DMA_SPACES		1
45 #define ZPCI_NR_DEVICES			CONFIG_PCI_NR_FUNCTIONS
46 
47 /* list of all detected zpci devices */
48 static LIST_HEAD(zpci_list);
49 static DEFINE_SPINLOCK(zpci_list_lock);
50 
51 static void zpci_enable_irq(struct irq_data *data);
52 static void zpci_disable_irq(struct irq_data *data);
53 
54 static struct irq_chip zpci_irq_chip = {
55 	.name = "zPCI",
56 	.irq_unmask = zpci_enable_irq,
57 	.irq_mask = zpci_disable_irq,
58 };
59 
60 static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES);
61 static DEFINE_SPINLOCK(zpci_domain_lock);
62 
63 static struct airq_iv *zpci_aisb_iv;
64 static struct airq_iv *zpci_aibv[ZPCI_NR_DEVICES];
65 
66 /* Adapter interrupt definitions */
67 static void zpci_irq_handler(struct airq_struct *airq);
68 
69 static struct airq_struct zpci_airq = {
70 	.handler = zpci_irq_handler,
71 	.isc = PCI_ISC,
72 };
73 
74 /* I/O Map */
75 static DEFINE_SPINLOCK(zpci_iomap_lock);
76 static DECLARE_BITMAP(zpci_iomap, ZPCI_IOMAP_MAX_ENTRIES);
77 struct zpci_iomap_entry *zpci_iomap_start;
78 EXPORT_SYMBOL_GPL(zpci_iomap_start);
79 
80 static struct kmem_cache *zdev_fmb_cache;
81 
82 struct zpci_dev *get_zdev(struct pci_dev *pdev)
83 {
84 	return (struct zpci_dev *) pdev->sysdata;
85 }
86 
87 struct zpci_dev *get_zdev_by_fid(u32 fid)
88 {
89 	struct zpci_dev *tmp, *zdev = NULL;
90 
91 	spin_lock(&zpci_list_lock);
92 	list_for_each_entry(tmp, &zpci_list, entry) {
93 		if (tmp->fid == fid) {
94 			zdev = tmp;
95 			break;
96 		}
97 	}
98 	spin_unlock(&zpci_list_lock);
99 	return zdev;
100 }
101 
102 static struct zpci_dev *get_zdev_by_bus(struct pci_bus *bus)
103 {
104 	return (bus && bus->sysdata) ? (struct zpci_dev *) bus->sysdata : NULL;
105 }
106 
107 int pci_domain_nr(struct pci_bus *bus)
108 {
109 	return ((struct zpci_dev *) bus->sysdata)->domain;
110 }
111 EXPORT_SYMBOL_GPL(pci_domain_nr);
112 
113 int pci_proc_domain(struct pci_bus *bus)
114 {
115 	return pci_domain_nr(bus);
116 }
117 EXPORT_SYMBOL_GPL(pci_proc_domain);
118 
119 /* Modify PCI: Register adapter interruptions */
120 static int zpci_set_airq(struct zpci_dev *zdev)
121 {
122 	u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT);
123 	struct zpci_fib fib = {0};
124 
125 	fib.isc = PCI_ISC;
126 	fib.sum = 1;		/* enable summary notifications */
127 	fib.noi = airq_iv_end(zdev->aibv);
128 	fib.aibv = (unsigned long) zdev->aibv->vector;
129 	fib.aibvo = 0;		/* each zdev has its own interrupt vector */
130 	fib.aisb = (unsigned long) zpci_aisb_iv->vector + (zdev->aisb/64)*8;
131 	fib.aisbo = zdev->aisb & 63;
132 
133 	return zpci_mod_fc(req, &fib);
134 }
135 
136 struct mod_pci_args {
137 	u64 base;
138 	u64 limit;
139 	u64 iota;
140 	u64 fmb_addr;
141 };
142 
143 static int mod_pci(struct zpci_dev *zdev, int fn, u8 dmaas, struct mod_pci_args *args)
144 {
145 	u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, fn);
146 	struct zpci_fib fib = {0};
147 
148 	fib.pba = args->base;
149 	fib.pal = args->limit;
150 	fib.iota = args->iota;
151 	fib.fmb_addr = args->fmb_addr;
152 
153 	return zpci_mod_fc(req, &fib);
154 }
155 
156 /* Modify PCI: Register I/O address translation parameters */
157 int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas,
158 		       u64 base, u64 limit, u64 iota)
159 {
160 	struct mod_pci_args args = { base, limit, iota, 0 };
161 
162 	WARN_ON_ONCE(iota & 0x3fff);
163 	args.iota |= ZPCI_IOTA_RTTO_FLAG;
164 	return mod_pci(zdev, ZPCI_MOD_FC_REG_IOAT, dmaas, &args);
165 }
166 
167 /* Modify PCI: Unregister I/O address translation parameters */
168 int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas)
169 {
170 	struct mod_pci_args args = { 0, 0, 0, 0 };
171 
172 	return mod_pci(zdev, ZPCI_MOD_FC_DEREG_IOAT, dmaas, &args);
173 }
174 
175 /* Modify PCI: Unregister adapter interruptions */
176 static int zpci_clear_airq(struct zpci_dev *zdev)
177 {
178 	struct mod_pci_args args = { 0, 0, 0, 0 };
179 
180 	return mod_pci(zdev, ZPCI_MOD_FC_DEREG_INT, 0, &args);
181 }
182 
183 /* Modify PCI: Set PCI function measurement parameters */
184 int zpci_fmb_enable_device(struct zpci_dev *zdev)
185 {
186 	struct mod_pci_args args = { 0, 0, 0, 0 };
187 
188 	if (zdev->fmb)
189 		return -EINVAL;
190 
191 	zdev->fmb = kmem_cache_zalloc(zdev_fmb_cache, GFP_KERNEL);
192 	if (!zdev->fmb)
193 		return -ENOMEM;
194 	WARN_ON((u64) zdev->fmb & 0xf);
195 
196 	args.fmb_addr = virt_to_phys(zdev->fmb);
197 	return mod_pci(zdev, ZPCI_MOD_FC_SET_MEASURE, 0, &args);
198 }
199 
200 /* Modify PCI: Disable PCI function measurement */
201 int zpci_fmb_disable_device(struct zpci_dev *zdev)
202 {
203 	struct mod_pci_args args = { 0, 0, 0, 0 };
204 	int rc;
205 
206 	if (!zdev->fmb)
207 		return -EINVAL;
208 
209 	/* Function measurement is disabled if fmb address is zero */
210 	rc = mod_pci(zdev, ZPCI_MOD_FC_SET_MEASURE, 0, &args);
211 
212 	kmem_cache_free(zdev_fmb_cache, zdev->fmb);
213 	zdev->fmb = NULL;
214 	return rc;
215 }
216 
217 #define ZPCI_PCIAS_CFGSPC	15
218 
219 static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len)
220 {
221 	u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
222 	u64 data;
223 	int rc;
224 
225 	rc = zpci_load(&data, req, offset);
226 	if (!rc) {
227 		data = data << ((8 - len) * 8);
228 		data = le64_to_cpu(data);
229 		*val = (u32) data;
230 	} else
231 		*val = 0xffffffff;
232 	return rc;
233 }
234 
235 static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
236 {
237 	u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
238 	u64 data = val;
239 	int rc;
240 
241 	data = cpu_to_le64(data);
242 	data = data >> ((8 - len) * 8);
243 	rc = zpci_store(data, req, offset);
244 	return rc;
245 }
246 
247 static int zpci_msi_set_mask_bits(struct msi_desc *msi, u32 mask, u32 flag)
248 {
249 	int offset, pos;
250 	u32 mask_bits;
251 
252 	if (msi->msi_attrib.is_msix) {
253 		offset = msi->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
254 			PCI_MSIX_ENTRY_VECTOR_CTRL;
255 		msi->masked = readl(msi->mask_base + offset);
256 		writel(flag, msi->mask_base + offset);
257 	} else if (msi->msi_attrib.maskbit) {
258 		pos = (long) msi->mask_base;
259 		pci_read_config_dword(msi->dev, pos, &mask_bits);
260 		mask_bits &= ~(mask);
261 		mask_bits |= flag & mask;
262 		pci_write_config_dword(msi->dev, pos, mask_bits);
263 	} else
264 		return 0;
265 
266 	msi->msi_attrib.maskbit = !!flag;
267 	return 1;
268 }
269 
270 static void zpci_enable_irq(struct irq_data *data)
271 {
272 	struct msi_desc *msi = irq_get_msi_desc(data->irq);
273 
274 	zpci_msi_set_mask_bits(msi, 1, 0);
275 }
276 
277 static void zpci_disable_irq(struct irq_data *data)
278 {
279 	struct msi_desc *msi = irq_get_msi_desc(data->irq);
280 
281 	zpci_msi_set_mask_bits(msi, 1, 1);
282 }
283 
284 void pcibios_fixup_bus(struct pci_bus *bus)
285 {
286 }
287 
288 resource_size_t pcibios_align_resource(void *data, const struct resource *res,
289 				       resource_size_t size,
290 				       resource_size_t align)
291 {
292 	return 0;
293 }
294 
295 /* combine single writes by using store-block insn */
296 void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
297 {
298        zpci_memcpy_toio(to, from, count);
299 }
300 
301 /* Create a virtual mapping cookie for a PCI BAR */
302 void __iomem *pci_iomap(struct pci_dev *pdev, int bar, unsigned long max)
303 {
304 	struct zpci_dev *zdev =	get_zdev(pdev);
305 	u64 addr;
306 	int idx;
307 
308 	if ((bar & 7) != bar)
309 		return NULL;
310 
311 	idx = zdev->bars[bar].map_idx;
312 	spin_lock(&zpci_iomap_lock);
313 	zpci_iomap_start[idx].fh = zdev->fh;
314 	zpci_iomap_start[idx].bar = bar;
315 	spin_unlock(&zpci_iomap_lock);
316 
317 	addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48);
318 	return (void __iomem *) addr;
319 }
320 EXPORT_SYMBOL_GPL(pci_iomap);
321 
322 void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
323 {
324 	unsigned int idx;
325 
326 	idx = (((__force u64) addr) & ~ZPCI_IOMAP_ADDR_BASE) >> 48;
327 	spin_lock(&zpci_iomap_lock);
328 	zpci_iomap_start[idx].fh = 0;
329 	zpci_iomap_start[idx].bar = 0;
330 	spin_unlock(&zpci_iomap_lock);
331 }
332 EXPORT_SYMBOL_GPL(pci_iounmap);
333 
334 static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
335 		    int size, u32 *val)
336 {
337 	struct zpci_dev *zdev = get_zdev_by_bus(bus);
338 	int ret;
339 
340 	if (!zdev || devfn != ZPCI_DEVFN)
341 		ret = -ENODEV;
342 	else
343 		ret = zpci_cfg_load(zdev, where, val, size);
344 
345 	return ret;
346 }
347 
348 static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
349 		     int size, u32 val)
350 {
351 	struct zpci_dev *zdev = get_zdev_by_bus(bus);
352 	int ret;
353 
354 	if (!zdev || devfn != ZPCI_DEVFN)
355 		ret = -ENODEV;
356 	else
357 		ret = zpci_cfg_store(zdev, where, val, size);
358 
359 	return ret;
360 }
361 
362 static struct pci_ops pci_root_ops = {
363 	.read = pci_read,
364 	.write = pci_write,
365 };
366 
367 static void zpci_irq_handler(struct airq_struct *airq)
368 {
369 	unsigned long si, ai;
370 	struct airq_iv *aibv;
371 	int irqs_on = 0;
372 
373 	inc_irq_stat(IRQIO_PCI);
374 	for (si = 0;;) {
375 		/* Scan adapter summary indicator bit vector */
376 		si = airq_iv_scan(zpci_aisb_iv, si, airq_iv_end(zpci_aisb_iv));
377 		if (si == -1UL) {
378 			if (irqs_on++)
379 				/* End of second scan with interrupts on. */
380 				break;
381 			/* First scan complete, reenable interrupts. */
382 			zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
383 			si = 0;
384 			continue;
385 		}
386 
387 		/* Scan the adapter interrupt vector for this device. */
388 		aibv = zpci_aibv[si];
389 		for (ai = 0;;) {
390 			ai = airq_iv_scan(aibv, ai, airq_iv_end(aibv));
391 			if (ai == -1UL)
392 				break;
393 			inc_irq_stat(IRQIO_MSI);
394 			airq_iv_lock(aibv, ai);
395 			generic_handle_irq(airq_iv_get_data(aibv, ai));
396 			airq_iv_unlock(aibv, ai);
397 		}
398 	}
399 }
400 
401 int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
402 {
403 	struct zpci_dev *zdev = get_zdev(pdev);
404 	unsigned int hwirq, irq, msi_vecs;
405 	unsigned long aisb;
406 	struct msi_desc *msi;
407 	struct msi_msg msg;
408 	int rc;
409 
410 	if (type != PCI_CAP_ID_MSIX && type != PCI_CAP_ID_MSI)
411 		return -EINVAL;
412 	msi_vecs = min(nvec, ZPCI_MSI_VEC_MAX);
413 	msi_vecs = min_t(unsigned int, msi_vecs, CONFIG_PCI_NR_MSI);
414 
415 	/* Allocate adapter summary indicator bit */
416 	rc = -EIO;
417 	aisb = airq_iv_alloc_bit(zpci_aisb_iv);
418 	if (aisb == -1UL)
419 		goto out;
420 	zdev->aisb = aisb;
421 
422 	/* Create adapter interrupt vector */
423 	rc = -ENOMEM;
424 	zdev->aibv = airq_iv_create(msi_vecs, AIRQ_IV_DATA | AIRQ_IV_BITLOCK);
425 	if (!zdev->aibv)
426 		goto out_si;
427 
428 	/* Wire up shortcut pointer */
429 	zpci_aibv[aisb] = zdev->aibv;
430 
431 	/* Request MSI interrupts */
432 	hwirq = 0;
433 	list_for_each_entry(msi, &pdev->msi_list, list) {
434 		rc = -EIO;
435 		irq = irq_alloc_desc(0);	/* Alloc irq on node 0 */
436 		if (irq == NO_IRQ)
437 			goto out_msi;
438 		rc = irq_set_msi_desc(irq, msi);
439 		if (rc)
440 			goto out_msi;
441 		irq_set_chip_and_handler(irq, &zpci_irq_chip,
442 					 handle_simple_irq);
443 		msg.data = hwirq;
444 		msg.address_lo = zdev->msi_addr & 0xffffffff;
445 		msg.address_hi = zdev->msi_addr >> 32;
446 		write_msi_msg(irq, &msg);
447 		airq_iv_set_data(zdev->aibv, hwirq, irq);
448 		hwirq++;
449 	}
450 
451 	/* Enable adapter interrupts */
452 	rc = zpci_set_airq(zdev);
453 	if (rc)
454 		goto out_msi;
455 
456 	return (msi_vecs == nvec) ? 0 : msi_vecs;
457 
458 out_msi:
459 	list_for_each_entry(msi, &pdev->msi_list, list) {
460 		if (hwirq-- == 0)
461 			break;
462 		irq_set_msi_desc(msi->irq, NULL);
463 		irq_free_desc(msi->irq);
464 		msi->msg.address_lo = 0;
465 		msi->msg.address_hi = 0;
466 		msi->msg.data = 0;
467 		msi->irq = 0;
468 	}
469 	zpci_aibv[aisb] = NULL;
470 	airq_iv_release(zdev->aibv);
471 out_si:
472 	airq_iv_free_bit(zpci_aisb_iv, aisb);
473 out:
474 	return rc;
475 }
476 
477 void arch_teardown_msi_irqs(struct pci_dev *pdev)
478 {
479 	struct zpci_dev *zdev = get_zdev(pdev);
480 	struct msi_desc *msi;
481 	int rc;
482 
483 	/* Disable adapter interrupts */
484 	rc = zpci_clear_airq(zdev);
485 	if (rc)
486 		return;
487 
488 	/* Release MSI interrupts */
489 	list_for_each_entry(msi, &pdev->msi_list, list) {
490 		zpci_msi_set_mask_bits(msi, 1, 1);
491 		irq_set_msi_desc(msi->irq, NULL);
492 		irq_free_desc(msi->irq);
493 		msi->msg.address_lo = 0;
494 		msi->msg.address_hi = 0;
495 		msi->msg.data = 0;
496 		msi->irq = 0;
497 	}
498 
499 	zpci_aibv[zdev->aisb] = NULL;
500 	airq_iv_release(zdev->aibv);
501 	airq_iv_free_bit(zpci_aisb_iv, zdev->aisb);
502 }
503 
504 static void zpci_map_resources(struct zpci_dev *zdev)
505 {
506 	struct pci_dev *pdev = zdev->pdev;
507 	resource_size_t len;
508 	int i;
509 
510 	for (i = 0; i < PCI_BAR_COUNT; i++) {
511 		len = pci_resource_len(pdev, i);
512 		if (!len)
513 			continue;
514 		pdev->resource[i].start = (resource_size_t) pci_iomap(pdev, i, 0);
515 		pdev->resource[i].end = pdev->resource[i].start + len - 1;
516 	}
517 }
518 
519 static void zpci_unmap_resources(struct zpci_dev *zdev)
520 {
521 	struct pci_dev *pdev = zdev->pdev;
522 	resource_size_t len;
523 	int i;
524 
525 	for (i = 0; i < PCI_BAR_COUNT; i++) {
526 		len = pci_resource_len(pdev, i);
527 		if (!len)
528 			continue;
529 		pci_iounmap(pdev, (void *) pdev->resource[i].start);
530 	}
531 }
532 
533 int pcibios_add_platform_entries(struct pci_dev *pdev)
534 {
535 	return zpci_sysfs_add_device(&pdev->dev);
536 }
537 
538 static int __init zpci_irq_init(void)
539 {
540 	int rc;
541 
542 	rc = register_adapter_interrupt(&zpci_airq);
543 	if (rc)
544 		goto out;
545 	/* Set summary to 1 to be called every time for the ISC. */
546 	*zpci_airq.lsi_ptr = 1;
547 
548 	rc = -ENOMEM;
549 	zpci_aisb_iv = airq_iv_create(ZPCI_NR_DEVICES, AIRQ_IV_ALLOC);
550 	if (!zpci_aisb_iv)
551 		goto out_airq;
552 
553 	zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
554 	return 0;
555 
556 out_airq:
557 	unregister_adapter_interrupt(&zpci_airq);
558 out:
559 	return rc;
560 }
561 
562 static void zpci_irq_exit(void)
563 {
564 	airq_iv_release(zpci_aisb_iv);
565 	unregister_adapter_interrupt(&zpci_airq);
566 }
567 
568 static int zpci_alloc_iomap(struct zpci_dev *zdev)
569 {
570 	int entry;
571 
572 	spin_lock(&zpci_iomap_lock);
573 	entry = find_first_zero_bit(zpci_iomap, ZPCI_IOMAP_MAX_ENTRIES);
574 	if (entry == ZPCI_IOMAP_MAX_ENTRIES) {
575 		spin_unlock(&zpci_iomap_lock);
576 		return -ENOSPC;
577 	}
578 	set_bit(entry, zpci_iomap);
579 	spin_unlock(&zpci_iomap_lock);
580 	return entry;
581 }
582 
583 static void zpci_free_iomap(struct zpci_dev *zdev, int entry)
584 {
585 	spin_lock(&zpci_iomap_lock);
586 	memset(&zpci_iomap_start[entry], 0, sizeof(struct zpci_iomap_entry));
587 	clear_bit(entry, zpci_iomap);
588 	spin_unlock(&zpci_iomap_lock);
589 }
590 
591 static struct resource *__alloc_res(struct zpci_dev *zdev, unsigned long start,
592 				    unsigned long size, unsigned long flags)
593 {
594 	struct resource *r;
595 
596 	r = kzalloc(sizeof(*r), GFP_KERNEL);
597 	if (!r)
598 		return NULL;
599 
600 	r->start = start;
601 	r->end = r->start + size - 1;
602 	r->flags = flags;
603 	r->name = zdev->res_name;
604 
605 	if (request_resource(&iomem_resource, r)) {
606 		kfree(r);
607 		return NULL;
608 	}
609 	return r;
610 }
611 
612 static int zpci_setup_bus_resources(struct zpci_dev *zdev,
613 				    struct list_head *resources)
614 {
615 	unsigned long addr, size, flags;
616 	struct resource *res;
617 	int i, entry;
618 
619 	snprintf(zdev->res_name, sizeof(zdev->res_name),
620 		 "PCI Bus %04x:%02x", zdev->domain, ZPCI_BUS_NR);
621 
622 	for (i = 0; i < PCI_BAR_COUNT; i++) {
623 		if (!zdev->bars[i].size)
624 			continue;
625 		entry = zpci_alloc_iomap(zdev);
626 		if (entry < 0)
627 			return entry;
628 		zdev->bars[i].map_idx = entry;
629 
630 		/* only MMIO is supported */
631 		flags = IORESOURCE_MEM;
632 		if (zdev->bars[i].val & 8)
633 			flags |= IORESOURCE_PREFETCH;
634 		if (zdev->bars[i].val & 4)
635 			flags |= IORESOURCE_MEM_64;
636 
637 		addr = ZPCI_IOMAP_ADDR_BASE + ((u64) entry << 48);
638 
639 		size = 1UL << zdev->bars[i].size;
640 
641 		res = __alloc_res(zdev, addr, size, flags);
642 		if (!res) {
643 			zpci_free_iomap(zdev, entry);
644 			return -ENOMEM;
645 		}
646 		zdev->bars[i].res = res;
647 		pci_add_resource(resources, res);
648 	}
649 
650 	return 0;
651 }
652 
653 static void zpci_cleanup_bus_resources(struct zpci_dev *zdev)
654 {
655 	int i;
656 
657 	for (i = 0; i < PCI_BAR_COUNT; i++) {
658 		if (!zdev->bars[i].size)
659 			continue;
660 
661 		zpci_free_iomap(zdev, zdev->bars[i].map_idx);
662 		release_resource(zdev->bars[i].res);
663 		kfree(zdev->bars[i].res);
664 	}
665 }
666 
667 int pcibios_add_device(struct pci_dev *pdev)
668 {
669 	struct zpci_dev *zdev = get_zdev(pdev);
670 	struct resource *res;
671 	int i;
672 
673 	zdev->pdev = pdev;
674 	zpci_map_resources(zdev);
675 
676 	for (i = 0; i < PCI_BAR_COUNT; i++) {
677 		res = &pdev->resource[i];
678 		if (res->parent || !res->flags)
679 			continue;
680 		pci_claim_resource(pdev, i);
681 	}
682 
683 	return 0;
684 }
685 
686 int pcibios_enable_device(struct pci_dev *pdev, int mask)
687 {
688 	struct zpci_dev *zdev = get_zdev(pdev);
689 	struct resource *res;
690 	u16 cmd;
691 	int i;
692 
693 	zdev->pdev = pdev;
694 	zpci_debug_init_device(zdev);
695 	zpci_fmb_enable_device(zdev);
696 	zpci_map_resources(zdev);
697 
698 	pci_read_config_word(pdev, PCI_COMMAND, &cmd);
699 	for (i = 0; i < PCI_BAR_COUNT; i++) {
700 		res = &pdev->resource[i];
701 
702 		if (res->flags & IORESOURCE_IO)
703 			return -EINVAL;
704 
705 		if (res->flags & IORESOURCE_MEM)
706 			cmd |= PCI_COMMAND_MEMORY;
707 	}
708 	pci_write_config_word(pdev, PCI_COMMAND, cmd);
709 	return 0;
710 }
711 
712 void pcibios_disable_device(struct pci_dev *pdev)
713 {
714 	struct zpci_dev *zdev = get_zdev(pdev);
715 
716 	zpci_unmap_resources(zdev);
717 	zpci_fmb_disable_device(zdev);
718 	zpci_debug_exit_device(zdev);
719 	zdev->pdev = NULL;
720 }
721 
722 #ifdef CONFIG_HIBERNATE_CALLBACKS
723 static int zpci_restore(struct device *dev)
724 {
725 	struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
726 	int ret = 0;
727 
728 	if (zdev->state != ZPCI_FN_STATE_ONLINE)
729 		goto out;
730 
731 	ret = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES);
732 	if (ret)
733 		goto out;
734 
735 	zpci_map_resources(zdev);
736 	zpci_register_ioat(zdev, 0, zdev->start_dma + PAGE_OFFSET,
737 			   zdev->start_dma + zdev->iommu_size - 1,
738 			   (u64) zdev->dma_table);
739 
740 out:
741 	return ret;
742 }
743 
744 static int zpci_freeze(struct device *dev)
745 {
746 	struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
747 
748 	if (zdev->state != ZPCI_FN_STATE_ONLINE)
749 		return 0;
750 
751 	zpci_unregister_ioat(zdev, 0);
752 	return clp_disable_fh(zdev);
753 }
754 
755 struct dev_pm_ops pcibios_pm_ops = {
756 	.thaw_noirq = zpci_restore,
757 	.freeze_noirq = zpci_freeze,
758 	.restore_noirq = zpci_restore,
759 	.poweroff_noirq = zpci_freeze,
760 };
761 #endif /* CONFIG_HIBERNATE_CALLBACKS */
762 
763 static int zpci_alloc_domain(struct zpci_dev *zdev)
764 {
765 	spin_lock(&zpci_domain_lock);
766 	zdev->domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES);
767 	if (zdev->domain == ZPCI_NR_DEVICES) {
768 		spin_unlock(&zpci_domain_lock);
769 		return -ENOSPC;
770 	}
771 	set_bit(zdev->domain, zpci_domain);
772 	spin_unlock(&zpci_domain_lock);
773 	return 0;
774 }
775 
776 static void zpci_free_domain(struct zpci_dev *zdev)
777 {
778 	spin_lock(&zpci_domain_lock);
779 	clear_bit(zdev->domain, zpci_domain);
780 	spin_unlock(&zpci_domain_lock);
781 }
782 
783 void pcibios_remove_bus(struct pci_bus *bus)
784 {
785 	struct zpci_dev *zdev = get_zdev_by_bus(bus);
786 
787 	zpci_exit_slot(zdev);
788 	zpci_cleanup_bus_resources(zdev);
789 	zpci_free_domain(zdev);
790 
791 	spin_lock(&zpci_list_lock);
792 	list_del(&zdev->entry);
793 	spin_unlock(&zpci_list_lock);
794 
795 	kfree(zdev);
796 }
797 
798 static int zpci_scan_bus(struct zpci_dev *zdev)
799 {
800 	LIST_HEAD(resources);
801 	int ret;
802 
803 	ret = zpci_setup_bus_resources(zdev, &resources);
804 	if (ret)
805 		return ret;
806 
807 	zdev->bus = pci_scan_root_bus(NULL, ZPCI_BUS_NR, &pci_root_ops,
808 				      zdev, &resources);
809 	if (!zdev->bus) {
810 		zpci_cleanup_bus_resources(zdev);
811 		return -EIO;
812 	}
813 
814 	zdev->bus->max_bus_speed = zdev->max_bus_speed;
815 	return 0;
816 }
817 
818 int zpci_enable_device(struct zpci_dev *zdev)
819 {
820 	int rc;
821 
822 	rc = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES);
823 	if (rc)
824 		goto out;
825 
826 	rc = zpci_dma_init_device(zdev);
827 	if (rc)
828 		goto out_dma;
829 
830 	zdev->state = ZPCI_FN_STATE_ONLINE;
831 	return 0;
832 
833 out_dma:
834 	clp_disable_fh(zdev);
835 out:
836 	return rc;
837 }
838 EXPORT_SYMBOL_GPL(zpci_enable_device);
839 
840 int zpci_disable_device(struct zpci_dev *zdev)
841 {
842 	zpci_dma_exit_device(zdev);
843 	return clp_disable_fh(zdev);
844 }
845 EXPORT_SYMBOL_GPL(zpci_disable_device);
846 
847 int zpci_create_device(struct zpci_dev *zdev)
848 {
849 	int rc;
850 
851 	rc = zpci_alloc_domain(zdev);
852 	if (rc)
853 		goto out;
854 
855 	if (zdev->state == ZPCI_FN_STATE_CONFIGURED) {
856 		rc = zpci_enable_device(zdev);
857 		if (rc)
858 			goto out_free;
859 	}
860 	rc = zpci_scan_bus(zdev);
861 	if (rc)
862 		goto out_disable;
863 
864 	spin_lock(&zpci_list_lock);
865 	list_add_tail(&zdev->entry, &zpci_list);
866 	spin_unlock(&zpci_list_lock);
867 
868 	zpci_init_slot(zdev);
869 
870 	return 0;
871 
872 out_disable:
873 	if (zdev->state == ZPCI_FN_STATE_ONLINE)
874 		zpci_disable_device(zdev);
875 out_free:
876 	zpci_free_domain(zdev);
877 out:
878 	return rc;
879 }
880 
881 void zpci_stop_device(struct zpci_dev *zdev)
882 {
883 	zpci_dma_exit_device(zdev);
884 	/*
885 	 * Note: SCLP disables fh via set-pci-fn so don't
886 	 * do that here.
887 	 */
888 }
889 EXPORT_SYMBOL_GPL(zpci_stop_device);
890 
891 static inline int barsize(u8 size)
892 {
893 	return (size) ? (1 << size) >> 10 : 0;
894 }
895 
896 static int zpci_mem_init(void)
897 {
898 	zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb),
899 				16, 0, NULL);
900 	if (!zdev_fmb_cache)
901 		goto error_zdev;
902 
903 	/* TODO: use realloc */
904 	zpci_iomap_start = kzalloc(ZPCI_IOMAP_MAX_ENTRIES * sizeof(*zpci_iomap_start),
905 				   GFP_KERNEL);
906 	if (!zpci_iomap_start)
907 		goto error_iomap;
908 	return 0;
909 
910 error_iomap:
911 	kmem_cache_destroy(zdev_fmb_cache);
912 error_zdev:
913 	return -ENOMEM;
914 }
915 
916 static void zpci_mem_exit(void)
917 {
918 	kfree(zpci_iomap_start);
919 	kmem_cache_destroy(zdev_fmb_cache);
920 }
921 
922 static unsigned int s390_pci_probe;
923 
924 char * __init pcibios_setup(char *str)
925 {
926 	if (!strcmp(str, "on")) {
927 		s390_pci_probe = 1;
928 		return NULL;
929 	}
930 	return str;
931 }
932 
933 static int __init pci_base_init(void)
934 {
935 	int rc;
936 
937 	if (!s390_pci_probe)
938 		return 0;
939 
940 	if (!test_facility(2) || !test_facility(69)
941 	    || !test_facility(71) || !test_facility(72))
942 		return 0;
943 
944 	rc = zpci_debug_init();
945 	if (rc)
946 		goto out;
947 
948 	rc = zpci_mem_init();
949 	if (rc)
950 		goto out_mem;
951 
952 	rc = zpci_irq_init();
953 	if (rc)
954 		goto out_irq;
955 
956 	rc = zpci_dma_init();
957 	if (rc)
958 		goto out_dma;
959 
960 	rc = clp_scan_pci_devices();
961 	if (rc)
962 		goto out_find;
963 
964 	return 0;
965 
966 out_find:
967 	zpci_dma_exit();
968 out_dma:
969 	zpci_irq_exit();
970 out_irq:
971 	zpci_mem_exit();
972 out_mem:
973 	zpci_debug_exit();
974 out:
975 	return rc;
976 }
977 subsys_initcall_sync(pci_base_init);
978 
979 void zpci_rescan(void)
980 {
981 	clp_rescan_pci_devices_simple();
982 }
983