xref: /openbmc/linux/arch/s390/pci/pci.c (revision 8684014d)
1 /*
2  * Copyright IBM Corp. 2012
3  *
4  * Author(s):
5  *   Jan Glauber <jang@linux.vnet.ibm.com>
6  *
7  * The System z PCI code is a rewrite from a prototype by
8  * the following people (Kudoz!):
9  *   Alexander Schmidt
10  *   Christoph Raisch
11  *   Hannes Hering
12  *   Hoang-Nam Nguyen
13  *   Jan-Bernd Themann
14  *   Stefan Roscher
15  *   Thomas Klein
16  */
17 
18 #define KMSG_COMPONENT "zpci"
19 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
20 
21 #include <linux/kernel.h>
22 #include <linux/slab.h>
23 #include <linux/err.h>
24 #include <linux/export.h>
25 #include <linux/delay.h>
26 #include <linux/irq.h>
27 #include <linux/kernel_stat.h>
28 #include <linux/seq_file.h>
29 #include <linux/pci.h>
30 #include <linux/msi.h>
31 
32 #include <asm/isc.h>
33 #include <asm/airq.h>
34 #include <asm/facility.h>
35 #include <asm/pci_insn.h>
36 #include <asm/pci_clp.h>
37 #include <asm/pci_dma.h>
38 
39 #define DEBUG				/* enable pr_debug */
40 
41 #define	SIC_IRQ_MODE_ALL		0
42 #define	SIC_IRQ_MODE_SINGLE		1
43 
44 #define ZPCI_NR_DMA_SPACES		1
45 #define ZPCI_NR_DEVICES			CONFIG_PCI_NR_FUNCTIONS
46 
47 /* list of all detected zpci devices */
48 static LIST_HEAD(zpci_list);
49 static DEFINE_SPINLOCK(zpci_list_lock);
50 
51 static struct irq_chip zpci_irq_chip = {
52 	.name = "zPCI",
53 	.irq_unmask = pci_msi_unmask_irq,
54 	.irq_mask = pci_msi_mask_irq,
55 };
56 
57 static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES);
58 static DEFINE_SPINLOCK(zpci_domain_lock);
59 
60 static struct airq_iv *zpci_aisb_iv;
61 static struct airq_iv *zpci_aibv[ZPCI_NR_DEVICES];
62 
63 /* Adapter interrupt definitions */
64 static void zpci_irq_handler(struct airq_struct *airq);
65 
66 static struct airq_struct zpci_airq = {
67 	.handler = zpci_irq_handler,
68 	.isc = PCI_ISC,
69 };
70 
71 /* I/O Map */
72 static DEFINE_SPINLOCK(zpci_iomap_lock);
73 static DECLARE_BITMAP(zpci_iomap, ZPCI_IOMAP_MAX_ENTRIES);
74 struct zpci_iomap_entry *zpci_iomap_start;
75 EXPORT_SYMBOL_GPL(zpci_iomap_start);
76 
77 static struct kmem_cache *zdev_fmb_cache;
78 
79 struct zpci_dev *get_zdev(struct pci_dev *pdev)
80 {
81 	return (struct zpci_dev *) pdev->sysdata;
82 }
83 
84 struct zpci_dev *get_zdev_by_fid(u32 fid)
85 {
86 	struct zpci_dev *tmp, *zdev = NULL;
87 
88 	spin_lock(&zpci_list_lock);
89 	list_for_each_entry(tmp, &zpci_list, entry) {
90 		if (tmp->fid == fid) {
91 			zdev = tmp;
92 			break;
93 		}
94 	}
95 	spin_unlock(&zpci_list_lock);
96 	return zdev;
97 }
98 
99 static struct zpci_dev *get_zdev_by_bus(struct pci_bus *bus)
100 {
101 	return (bus && bus->sysdata) ? (struct zpci_dev *) bus->sysdata : NULL;
102 }
103 
104 int pci_domain_nr(struct pci_bus *bus)
105 {
106 	return ((struct zpci_dev *) bus->sysdata)->domain;
107 }
108 EXPORT_SYMBOL_GPL(pci_domain_nr);
109 
110 int pci_proc_domain(struct pci_bus *bus)
111 {
112 	return pci_domain_nr(bus);
113 }
114 EXPORT_SYMBOL_GPL(pci_proc_domain);
115 
116 /* Modify PCI: Register adapter interruptions */
117 static int zpci_set_airq(struct zpci_dev *zdev)
118 {
119 	u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT);
120 	struct zpci_fib fib = {0};
121 
122 	fib.isc = PCI_ISC;
123 	fib.sum = 1;		/* enable summary notifications */
124 	fib.noi = airq_iv_end(zdev->aibv);
125 	fib.aibv = (unsigned long) zdev->aibv->vector;
126 	fib.aibvo = 0;		/* each zdev has its own interrupt vector */
127 	fib.aisb = (unsigned long) zpci_aisb_iv->vector + (zdev->aisb/64)*8;
128 	fib.aisbo = zdev->aisb & 63;
129 
130 	return zpci_mod_fc(req, &fib);
131 }
132 
133 struct mod_pci_args {
134 	u64 base;
135 	u64 limit;
136 	u64 iota;
137 	u64 fmb_addr;
138 };
139 
140 static int mod_pci(struct zpci_dev *zdev, int fn, u8 dmaas, struct mod_pci_args *args)
141 {
142 	u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, fn);
143 	struct zpci_fib fib = {0};
144 
145 	fib.pba = args->base;
146 	fib.pal = args->limit;
147 	fib.iota = args->iota;
148 	fib.fmb_addr = args->fmb_addr;
149 
150 	return zpci_mod_fc(req, &fib);
151 }
152 
153 /* Modify PCI: Register I/O address translation parameters */
154 int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas,
155 		       u64 base, u64 limit, u64 iota)
156 {
157 	struct mod_pci_args args = { base, limit, iota, 0 };
158 
159 	WARN_ON_ONCE(iota & 0x3fff);
160 	args.iota |= ZPCI_IOTA_RTTO_FLAG;
161 	return mod_pci(zdev, ZPCI_MOD_FC_REG_IOAT, dmaas, &args);
162 }
163 
164 /* Modify PCI: Unregister I/O address translation parameters */
165 int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas)
166 {
167 	struct mod_pci_args args = { 0, 0, 0, 0 };
168 
169 	return mod_pci(zdev, ZPCI_MOD_FC_DEREG_IOAT, dmaas, &args);
170 }
171 
172 /* Modify PCI: Unregister adapter interruptions */
173 static int zpci_clear_airq(struct zpci_dev *zdev)
174 {
175 	struct mod_pci_args args = { 0, 0, 0, 0 };
176 
177 	return mod_pci(zdev, ZPCI_MOD_FC_DEREG_INT, 0, &args);
178 }
179 
180 /* Modify PCI: Set PCI function measurement parameters */
181 int zpci_fmb_enable_device(struct zpci_dev *zdev)
182 {
183 	struct mod_pci_args args = { 0, 0, 0, 0 };
184 
185 	if (zdev->fmb)
186 		return -EINVAL;
187 
188 	zdev->fmb = kmem_cache_zalloc(zdev_fmb_cache, GFP_KERNEL);
189 	if (!zdev->fmb)
190 		return -ENOMEM;
191 	WARN_ON((u64) zdev->fmb & 0xf);
192 
193 	args.fmb_addr = virt_to_phys(zdev->fmb);
194 	return mod_pci(zdev, ZPCI_MOD_FC_SET_MEASURE, 0, &args);
195 }
196 
197 /* Modify PCI: Disable PCI function measurement */
198 int zpci_fmb_disable_device(struct zpci_dev *zdev)
199 {
200 	struct mod_pci_args args = { 0, 0, 0, 0 };
201 	int rc;
202 
203 	if (!zdev->fmb)
204 		return -EINVAL;
205 
206 	/* Function measurement is disabled if fmb address is zero */
207 	rc = mod_pci(zdev, ZPCI_MOD_FC_SET_MEASURE, 0, &args);
208 
209 	kmem_cache_free(zdev_fmb_cache, zdev->fmb);
210 	zdev->fmb = NULL;
211 	return rc;
212 }
213 
214 #define ZPCI_PCIAS_CFGSPC	15
215 
216 static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len)
217 {
218 	u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
219 	u64 data;
220 	int rc;
221 
222 	rc = zpci_load(&data, req, offset);
223 	if (!rc) {
224 		data = data << ((8 - len) * 8);
225 		data = le64_to_cpu(data);
226 		*val = (u32) data;
227 	} else
228 		*val = 0xffffffff;
229 	return rc;
230 }
231 
232 static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
233 {
234 	u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
235 	u64 data = val;
236 	int rc;
237 
238 	data = cpu_to_le64(data);
239 	data = data >> ((8 - len) * 8);
240 	rc = zpci_store(data, req, offset);
241 	return rc;
242 }
243 
244 void pcibios_fixup_bus(struct pci_bus *bus)
245 {
246 }
247 
248 resource_size_t pcibios_align_resource(void *data, const struct resource *res,
249 				       resource_size_t size,
250 				       resource_size_t align)
251 {
252 	return 0;
253 }
254 
255 /* combine single writes by using store-block insn */
256 void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
257 {
258        zpci_memcpy_toio(to, from, count);
259 }
260 
261 /* Create a virtual mapping cookie for a PCI BAR */
262 void __iomem *pci_iomap(struct pci_dev *pdev, int bar, unsigned long max)
263 {
264 	struct zpci_dev *zdev =	get_zdev(pdev);
265 	u64 addr;
266 	int idx;
267 
268 	if ((bar & 7) != bar)
269 		return NULL;
270 
271 	idx = zdev->bars[bar].map_idx;
272 	spin_lock(&zpci_iomap_lock);
273 	zpci_iomap_start[idx].fh = zdev->fh;
274 	zpci_iomap_start[idx].bar = bar;
275 	spin_unlock(&zpci_iomap_lock);
276 
277 	addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48);
278 	return (void __iomem *) addr;
279 }
280 EXPORT_SYMBOL_GPL(pci_iomap);
281 
282 void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
283 {
284 	unsigned int idx;
285 
286 	idx = (((__force u64) addr) & ~ZPCI_IOMAP_ADDR_BASE) >> 48;
287 	spin_lock(&zpci_iomap_lock);
288 	zpci_iomap_start[idx].fh = 0;
289 	zpci_iomap_start[idx].bar = 0;
290 	spin_unlock(&zpci_iomap_lock);
291 }
292 EXPORT_SYMBOL_GPL(pci_iounmap);
293 
294 static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
295 		    int size, u32 *val)
296 {
297 	struct zpci_dev *zdev = get_zdev_by_bus(bus);
298 	int ret;
299 
300 	if (!zdev || devfn != ZPCI_DEVFN)
301 		ret = -ENODEV;
302 	else
303 		ret = zpci_cfg_load(zdev, where, val, size);
304 
305 	return ret;
306 }
307 
308 static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
309 		     int size, u32 val)
310 {
311 	struct zpci_dev *zdev = get_zdev_by_bus(bus);
312 	int ret;
313 
314 	if (!zdev || devfn != ZPCI_DEVFN)
315 		ret = -ENODEV;
316 	else
317 		ret = zpci_cfg_store(zdev, where, val, size);
318 
319 	return ret;
320 }
321 
322 static struct pci_ops pci_root_ops = {
323 	.read = pci_read,
324 	.write = pci_write,
325 };
326 
327 static void zpci_irq_handler(struct airq_struct *airq)
328 {
329 	unsigned long si, ai;
330 	struct airq_iv *aibv;
331 	int irqs_on = 0;
332 
333 	inc_irq_stat(IRQIO_PCI);
334 	for (si = 0;;) {
335 		/* Scan adapter summary indicator bit vector */
336 		si = airq_iv_scan(zpci_aisb_iv, si, airq_iv_end(zpci_aisb_iv));
337 		if (si == -1UL) {
338 			if (irqs_on++)
339 				/* End of second scan with interrupts on. */
340 				break;
341 			/* First scan complete, reenable interrupts. */
342 			zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
343 			si = 0;
344 			continue;
345 		}
346 
347 		/* Scan the adapter interrupt vector for this device. */
348 		aibv = zpci_aibv[si];
349 		for (ai = 0;;) {
350 			ai = airq_iv_scan(aibv, ai, airq_iv_end(aibv));
351 			if (ai == -1UL)
352 				break;
353 			inc_irq_stat(IRQIO_MSI);
354 			airq_iv_lock(aibv, ai);
355 			generic_handle_irq(airq_iv_get_data(aibv, ai));
356 			airq_iv_unlock(aibv, ai);
357 		}
358 	}
359 }
360 
361 int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
362 {
363 	struct zpci_dev *zdev = get_zdev(pdev);
364 	unsigned int hwirq, msi_vecs;
365 	unsigned long aisb;
366 	struct msi_desc *msi;
367 	struct msi_msg msg;
368 	int rc, irq;
369 
370 	if (type == PCI_CAP_ID_MSI && nvec > 1)
371 		return 1;
372 	msi_vecs = min_t(unsigned int, nvec, zdev->max_msi);
373 
374 	/* Allocate adapter summary indicator bit */
375 	rc = -EIO;
376 	aisb = airq_iv_alloc_bit(zpci_aisb_iv);
377 	if (aisb == -1UL)
378 		goto out;
379 	zdev->aisb = aisb;
380 
381 	/* Create adapter interrupt vector */
382 	rc = -ENOMEM;
383 	zdev->aibv = airq_iv_create(msi_vecs, AIRQ_IV_DATA | AIRQ_IV_BITLOCK);
384 	if (!zdev->aibv)
385 		goto out_si;
386 
387 	/* Wire up shortcut pointer */
388 	zpci_aibv[aisb] = zdev->aibv;
389 
390 	/* Request MSI interrupts */
391 	hwirq = 0;
392 	list_for_each_entry(msi, &pdev->msi_list, list) {
393 		rc = -EIO;
394 		irq = irq_alloc_desc(0);	/* Alloc irq on node 0 */
395 		if (irq < 0)
396 			goto out_msi;
397 		rc = irq_set_msi_desc(irq, msi);
398 		if (rc)
399 			goto out_msi;
400 		irq_set_chip_and_handler(irq, &zpci_irq_chip,
401 					 handle_simple_irq);
402 		msg.data = hwirq;
403 		msg.address_lo = zdev->msi_addr & 0xffffffff;
404 		msg.address_hi = zdev->msi_addr >> 32;
405 		pci_write_msi_msg(irq, &msg);
406 		airq_iv_set_data(zdev->aibv, hwirq, irq);
407 		hwirq++;
408 	}
409 
410 	/* Enable adapter interrupts */
411 	rc = zpci_set_airq(zdev);
412 	if (rc)
413 		goto out_msi;
414 
415 	return (msi_vecs == nvec) ? 0 : msi_vecs;
416 
417 out_msi:
418 	list_for_each_entry(msi, &pdev->msi_list, list) {
419 		if (hwirq-- == 0)
420 			break;
421 		irq_set_msi_desc(msi->irq, NULL);
422 		irq_free_desc(msi->irq);
423 		msi->msg.address_lo = 0;
424 		msi->msg.address_hi = 0;
425 		msi->msg.data = 0;
426 		msi->irq = 0;
427 	}
428 	zpci_aibv[aisb] = NULL;
429 	airq_iv_release(zdev->aibv);
430 out_si:
431 	airq_iv_free_bit(zpci_aisb_iv, aisb);
432 out:
433 	return rc;
434 }
435 
436 void arch_teardown_msi_irqs(struct pci_dev *pdev)
437 {
438 	struct zpci_dev *zdev = get_zdev(pdev);
439 	struct msi_desc *msi;
440 	int rc;
441 
442 	/* Disable adapter interrupts */
443 	rc = zpci_clear_airq(zdev);
444 	if (rc)
445 		return;
446 
447 	/* Release MSI interrupts */
448 	list_for_each_entry(msi, &pdev->msi_list, list) {
449 		if (msi->msi_attrib.is_msix)
450 			__pci_msix_desc_mask_irq(msi, 1);
451 		else
452 			__pci_msi_desc_mask_irq(msi, 1, 1);
453 		irq_set_msi_desc(msi->irq, NULL);
454 		irq_free_desc(msi->irq);
455 		msi->msg.address_lo = 0;
456 		msi->msg.address_hi = 0;
457 		msi->msg.data = 0;
458 		msi->irq = 0;
459 	}
460 
461 	zpci_aibv[zdev->aisb] = NULL;
462 	airq_iv_release(zdev->aibv);
463 	airq_iv_free_bit(zpci_aisb_iv, zdev->aisb);
464 }
465 
466 static void zpci_map_resources(struct zpci_dev *zdev)
467 {
468 	struct pci_dev *pdev = zdev->pdev;
469 	resource_size_t len;
470 	int i;
471 
472 	for (i = 0; i < PCI_BAR_COUNT; i++) {
473 		len = pci_resource_len(pdev, i);
474 		if (!len)
475 			continue;
476 		pdev->resource[i].start =
477 			(resource_size_t __force) pci_iomap(pdev, i, 0);
478 		pdev->resource[i].end = pdev->resource[i].start + len - 1;
479 	}
480 }
481 
482 static void zpci_unmap_resources(struct zpci_dev *zdev)
483 {
484 	struct pci_dev *pdev = zdev->pdev;
485 	resource_size_t len;
486 	int i;
487 
488 	for (i = 0; i < PCI_BAR_COUNT; i++) {
489 		len = pci_resource_len(pdev, i);
490 		if (!len)
491 			continue;
492 		pci_iounmap(pdev, (void __iomem __force *)
493 			    pdev->resource[i].start);
494 	}
495 }
496 
497 static int __init zpci_irq_init(void)
498 {
499 	int rc;
500 
501 	rc = register_adapter_interrupt(&zpci_airq);
502 	if (rc)
503 		goto out;
504 	/* Set summary to 1 to be called every time for the ISC. */
505 	*zpci_airq.lsi_ptr = 1;
506 
507 	rc = -ENOMEM;
508 	zpci_aisb_iv = airq_iv_create(ZPCI_NR_DEVICES, AIRQ_IV_ALLOC);
509 	if (!zpci_aisb_iv)
510 		goto out_airq;
511 
512 	zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
513 	return 0;
514 
515 out_airq:
516 	unregister_adapter_interrupt(&zpci_airq);
517 out:
518 	return rc;
519 }
520 
521 static void zpci_irq_exit(void)
522 {
523 	airq_iv_release(zpci_aisb_iv);
524 	unregister_adapter_interrupt(&zpci_airq);
525 }
526 
527 static int zpci_alloc_iomap(struct zpci_dev *zdev)
528 {
529 	int entry;
530 
531 	spin_lock(&zpci_iomap_lock);
532 	entry = find_first_zero_bit(zpci_iomap, ZPCI_IOMAP_MAX_ENTRIES);
533 	if (entry == ZPCI_IOMAP_MAX_ENTRIES) {
534 		spin_unlock(&zpci_iomap_lock);
535 		return -ENOSPC;
536 	}
537 	set_bit(entry, zpci_iomap);
538 	spin_unlock(&zpci_iomap_lock);
539 	return entry;
540 }
541 
542 static void zpci_free_iomap(struct zpci_dev *zdev, int entry)
543 {
544 	spin_lock(&zpci_iomap_lock);
545 	memset(&zpci_iomap_start[entry], 0, sizeof(struct zpci_iomap_entry));
546 	clear_bit(entry, zpci_iomap);
547 	spin_unlock(&zpci_iomap_lock);
548 }
549 
550 static struct resource *__alloc_res(struct zpci_dev *zdev, unsigned long start,
551 				    unsigned long size, unsigned long flags)
552 {
553 	struct resource *r;
554 
555 	r = kzalloc(sizeof(*r), GFP_KERNEL);
556 	if (!r)
557 		return NULL;
558 
559 	r->start = start;
560 	r->end = r->start + size - 1;
561 	r->flags = flags;
562 	r->name = zdev->res_name;
563 
564 	if (request_resource(&iomem_resource, r)) {
565 		kfree(r);
566 		return NULL;
567 	}
568 	return r;
569 }
570 
571 static int zpci_setup_bus_resources(struct zpci_dev *zdev,
572 				    struct list_head *resources)
573 {
574 	unsigned long addr, size, flags;
575 	struct resource *res;
576 	int i, entry;
577 
578 	snprintf(zdev->res_name, sizeof(zdev->res_name),
579 		 "PCI Bus %04x:%02x", zdev->domain, ZPCI_BUS_NR);
580 
581 	for (i = 0; i < PCI_BAR_COUNT; i++) {
582 		if (!zdev->bars[i].size)
583 			continue;
584 		entry = zpci_alloc_iomap(zdev);
585 		if (entry < 0)
586 			return entry;
587 		zdev->bars[i].map_idx = entry;
588 
589 		/* only MMIO is supported */
590 		flags = IORESOURCE_MEM;
591 		if (zdev->bars[i].val & 8)
592 			flags |= IORESOURCE_PREFETCH;
593 		if (zdev->bars[i].val & 4)
594 			flags |= IORESOURCE_MEM_64;
595 
596 		addr = ZPCI_IOMAP_ADDR_BASE + ((u64) entry << 48);
597 
598 		size = 1UL << zdev->bars[i].size;
599 
600 		res = __alloc_res(zdev, addr, size, flags);
601 		if (!res) {
602 			zpci_free_iomap(zdev, entry);
603 			return -ENOMEM;
604 		}
605 		zdev->bars[i].res = res;
606 		pci_add_resource(resources, res);
607 	}
608 
609 	return 0;
610 }
611 
612 static void zpci_cleanup_bus_resources(struct zpci_dev *zdev)
613 {
614 	int i;
615 
616 	for (i = 0; i < PCI_BAR_COUNT; i++) {
617 		if (!zdev->bars[i].size)
618 			continue;
619 
620 		zpci_free_iomap(zdev, zdev->bars[i].map_idx);
621 		release_resource(zdev->bars[i].res);
622 		kfree(zdev->bars[i].res);
623 	}
624 }
625 
626 int pcibios_add_device(struct pci_dev *pdev)
627 {
628 	struct zpci_dev *zdev = get_zdev(pdev);
629 	struct resource *res;
630 	int i;
631 
632 	zdev->pdev = pdev;
633 	pdev->dev.groups = zpci_attr_groups;
634 	zpci_map_resources(zdev);
635 
636 	for (i = 0; i < PCI_BAR_COUNT; i++) {
637 		res = &pdev->resource[i];
638 		if (res->parent || !res->flags)
639 			continue;
640 		pci_claim_resource(pdev, i);
641 	}
642 
643 	return 0;
644 }
645 
646 int pcibios_enable_device(struct pci_dev *pdev, int mask)
647 {
648 	struct zpci_dev *zdev = get_zdev(pdev);
649 
650 	zdev->pdev = pdev;
651 	zpci_debug_init_device(zdev);
652 	zpci_fmb_enable_device(zdev);
653 	zpci_map_resources(zdev);
654 
655 	return pci_enable_resources(pdev, mask);
656 }
657 
658 void pcibios_disable_device(struct pci_dev *pdev)
659 {
660 	struct zpci_dev *zdev = get_zdev(pdev);
661 
662 	zpci_unmap_resources(zdev);
663 	zpci_fmb_disable_device(zdev);
664 	zpci_debug_exit_device(zdev);
665 	zdev->pdev = NULL;
666 }
667 
668 #ifdef CONFIG_HIBERNATE_CALLBACKS
669 static int zpci_restore(struct device *dev)
670 {
671 	struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
672 	int ret = 0;
673 
674 	if (zdev->state != ZPCI_FN_STATE_ONLINE)
675 		goto out;
676 
677 	ret = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES);
678 	if (ret)
679 		goto out;
680 
681 	zpci_map_resources(zdev);
682 	zpci_register_ioat(zdev, 0, zdev->start_dma + PAGE_OFFSET,
683 			   zdev->start_dma + zdev->iommu_size - 1,
684 			   (u64) zdev->dma_table);
685 
686 out:
687 	return ret;
688 }
689 
690 static int zpci_freeze(struct device *dev)
691 {
692 	struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
693 
694 	if (zdev->state != ZPCI_FN_STATE_ONLINE)
695 		return 0;
696 
697 	zpci_unregister_ioat(zdev, 0);
698 	return clp_disable_fh(zdev);
699 }
700 
701 struct dev_pm_ops pcibios_pm_ops = {
702 	.thaw_noirq = zpci_restore,
703 	.freeze_noirq = zpci_freeze,
704 	.restore_noirq = zpci_restore,
705 	.poweroff_noirq = zpci_freeze,
706 };
707 #endif /* CONFIG_HIBERNATE_CALLBACKS */
708 
709 static int zpci_alloc_domain(struct zpci_dev *zdev)
710 {
711 	spin_lock(&zpci_domain_lock);
712 	zdev->domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES);
713 	if (zdev->domain == ZPCI_NR_DEVICES) {
714 		spin_unlock(&zpci_domain_lock);
715 		return -ENOSPC;
716 	}
717 	set_bit(zdev->domain, zpci_domain);
718 	spin_unlock(&zpci_domain_lock);
719 	return 0;
720 }
721 
722 static void zpci_free_domain(struct zpci_dev *zdev)
723 {
724 	spin_lock(&zpci_domain_lock);
725 	clear_bit(zdev->domain, zpci_domain);
726 	spin_unlock(&zpci_domain_lock);
727 }
728 
729 void pcibios_remove_bus(struct pci_bus *bus)
730 {
731 	struct zpci_dev *zdev = get_zdev_by_bus(bus);
732 
733 	zpci_exit_slot(zdev);
734 	zpci_cleanup_bus_resources(zdev);
735 	zpci_free_domain(zdev);
736 
737 	spin_lock(&zpci_list_lock);
738 	list_del(&zdev->entry);
739 	spin_unlock(&zpci_list_lock);
740 
741 	kfree(zdev);
742 }
743 
744 static int zpci_scan_bus(struct zpci_dev *zdev)
745 {
746 	LIST_HEAD(resources);
747 	int ret;
748 
749 	ret = zpci_setup_bus_resources(zdev, &resources);
750 	if (ret)
751 		return ret;
752 
753 	zdev->bus = pci_scan_root_bus(NULL, ZPCI_BUS_NR, &pci_root_ops,
754 				      zdev, &resources);
755 	if (!zdev->bus) {
756 		zpci_cleanup_bus_resources(zdev);
757 		return -EIO;
758 	}
759 
760 	zdev->bus->max_bus_speed = zdev->max_bus_speed;
761 	return 0;
762 }
763 
764 int zpci_enable_device(struct zpci_dev *zdev)
765 {
766 	int rc;
767 
768 	rc = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES);
769 	if (rc)
770 		goto out;
771 
772 	rc = zpci_dma_init_device(zdev);
773 	if (rc)
774 		goto out_dma;
775 
776 	zdev->state = ZPCI_FN_STATE_ONLINE;
777 	return 0;
778 
779 out_dma:
780 	clp_disable_fh(zdev);
781 out:
782 	return rc;
783 }
784 EXPORT_SYMBOL_GPL(zpci_enable_device);
785 
786 int zpci_disable_device(struct zpci_dev *zdev)
787 {
788 	zpci_dma_exit_device(zdev);
789 	return clp_disable_fh(zdev);
790 }
791 EXPORT_SYMBOL_GPL(zpci_disable_device);
792 
793 int zpci_create_device(struct zpci_dev *zdev)
794 {
795 	int rc;
796 
797 	rc = zpci_alloc_domain(zdev);
798 	if (rc)
799 		goto out;
800 
801 	if (zdev->state == ZPCI_FN_STATE_CONFIGURED) {
802 		rc = zpci_enable_device(zdev);
803 		if (rc)
804 			goto out_free;
805 	}
806 	rc = zpci_scan_bus(zdev);
807 	if (rc)
808 		goto out_disable;
809 
810 	spin_lock(&zpci_list_lock);
811 	list_add_tail(&zdev->entry, &zpci_list);
812 	spin_unlock(&zpci_list_lock);
813 
814 	zpci_init_slot(zdev);
815 
816 	return 0;
817 
818 out_disable:
819 	if (zdev->state == ZPCI_FN_STATE_ONLINE)
820 		zpci_disable_device(zdev);
821 out_free:
822 	zpci_free_domain(zdev);
823 out:
824 	return rc;
825 }
826 
827 void zpci_stop_device(struct zpci_dev *zdev)
828 {
829 	zpci_dma_exit_device(zdev);
830 	/*
831 	 * Note: SCLP disables fh via set-pci-fn so don't
832 	 * do that here.
833 	 */
834 }
835 EXPORT_SYMBOL_GPL(zpci_stop_device);
836 
837 static inline int barsize(u8 size)
838 {
839 	return (size) ? (1 << size) >> 10 : 0;
840 }
841 
842 static int zpci_mem_init(void)
843 {
844 	zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb),
845 				16, 0, NULL);
846 	if (!zdev_fmb_cache)
847 		goto error_zdev;
848 
849 	/* TODO: use realloc */
850 	zpci_iomap_start = kzalloc(ZPCI_IOMAP_MAX_ENTRIES * sizeof(*zpci_iomap_start),
851 				   GFP_KERNEL);
852 	if (!zpci_iomap_start)
853 		goto error_iomap;
854 	return 0;
855 
856 error_iomap:
857 	kmem_cache_destroy(zdev_fmb_cache);
858 error_zdev:
859 	return -ENOMEM;
860 }
861 
862 static void zpci_mem_exit(void)
863 {
864 	kfree(zpci_iomap_start);
865 	kmem_cache_destroy(zdev_fmb_cache);
866 }
867 
868 static unsigned int s390_pci_probe = 1;
869 static unsigned int s390_pci_initialized;
870 
871 char * __init pcibios_setup(char *str)
872 {
873 	if (!strcmp(str, "off")) {
874 		s390_pci_probe = 0;
875 		return NULL;
876 	}
877 	return str;
878 }
879 
880 bool zpci_is_enabled(void)
881 {
882 	return s390_pci_initialized;
883 }
884 
885 static int __init pci_base_init(void)
886 {
887 	int rc;
888 
889 	if (!s390_pci_probe)
890 		return 0;
891 
892 	if (!test_facility(2) || !test_facility(69)
893 	    || !test_facility(71) || !test_facility(72))
894 		return 0;
895 
896 	rc = zpci_debug_init();
897 	if (rc)
898 		goto out;
899 
900 	rc = zpci_mem_init();
901 	if (rc)
902 		goto out_mem;
903 
904 	rc = zpci_irq_init();
905 	if (rc)
906 		goto out_irq;
907 
908 	rc = zpci_dma_init();
909 	if (rc)
910 		goto out_dma;
911 
912 	rc = clp_scan_pci_devices();
913 	if (rc)
914 		goto out_find;
915 
916 	s390_pci_initialized = 1;
917 	return 0;
918 
919 out_find:
920 	zpci_dma_exit();
921 out_dma:
922 	zpci_irq_exit();
923 out_irq:
924 	zpci_mem_exit();
925 out_mem:
926 	zpci_debug_exit();
927 out:
928 	return rc;
929 }
930 subsys_initcall_sync(pci_base_init);
931 
932 void zpci_rescan(void)
933 {
934 	if (zpci_is_enabled())
935 		clp_rescan_pci_devices_simple();
936 }
937