xref: /openbmc/linux/arch/s390/pci/pci.c (revision aeb64ff3)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright IBM Corp. 2012
4  *
5  * Author(s):
6  *   Jan Glauber <jang@linux.vnet.ibm.com>
7  *
8  * The System z PCI code is a rewrite from a prototype by
9  * the following people (Kudoz!):
10  *   Alexander Schmidt
11  *   Christoph Raisch
12  *   Hannes Hering
13  *   Hoang-Nam Nguyen
14  *   Jan-Bernd Themann
15  *   Stefan Roscher
16  *   Thomas Klein
17  */
18 
19 #define KMSG_COMPONENT "zpci"
20 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
21 
22 #include <linux/kernel.h>
23 #include <linux/slab.h>
24 #include <linux/err.h>
25 #include <linux/export.h>
26 #include <linux/delay.h>
27 #include <linux/seq_file.h>
28 #include <linux/jump_label.h>
29 #include <linux/pci.h>
30 #include <linux/printk.h>
31 
32 #include <asm/isc.h>
33 #include <asm/airq.h>
34 #include <asm/facility.h>
35 #include <asm/pci_insn.h>
36 #include <asm/pci_clp.h>
37 #include <asm/pci_dma.h>
38 
39 /* list of all detected zpci devices */
40 static LIST_HEAD(zpci_list);
41 static DEFINE_SPINLOCK(zpci_list_lock);
42 
43 static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES);
44 static DEFINE_SPINLOCK(zpci_domain_lock);
45 
46 #define ZPCI_IOMAP_ENTRIES						\
47 	min(((unsigned long) ZPCI_NR_DEVICES * PCI_STD_NUM_BARS / 2),	\
48 	    ZPCI_IOMAP_MAX_ENTRIES)
49 
50 static DEFINE_SPINLOCK(zpci_iomap_lock);
51 static unsigned long *zpci_iomap_bitmap;
52 struct zpci_iomap_entry *zpci_iomap_start;
53 EXPORT_SYMBOL_GPL(zpci_iomap_start);
54 
55 DEFINE_STATIC_KEY_FALSE(have_mio);
56 
57 static struct kmem_cache *zdev_fmb_cache;
58 
59 struct zpci_dev *get_zdev_by_fid(u32 fid)
60 {
61 	struct zpci_dev *tmp, *zdev = NULL;
62 
63 	spin_lock(&zpci_list_lock);
64 	list_for_each_entry(tmp, &zpci_list, entry) {
65 		if (tmp->fid == fid) {
66 			zdev = tmp;
67 			break;
68 		}
69 	}
70 	spin_unlock(&zpci_list_lock);
71 	return zdev;
72 }
73 
74 void zpci_remove_reserved_devices(void)
75 {
76 	struct zpci_dev *tmp, *zdev;
77 	enum zpci_state state;
78 	LIST_HEAD(remove);
79 
80 	spin_lock(&zpci_list_lock);
81 	list_for_each_entry_safe(zdev, tmp, &zpci_list, entry) {
82 		if (zdev->state == ZPCI_FN_STATE_STANDBY &&
83 		    !clp_get_state(zdev->fid, &state) &&
84 		    state == ZPCI_FN_STATE_RESERVED)
85 			list_move_tail(&zdev->entry, &remove);
86 	}
87 	spin_unlock(&zpci_list_lock);
88 
89 	list_for_each_entry_safe(zdev, tmp, &remove, entry)
90 		zpci_remove_device(zdev);
91 }
92 
93 static struct zpci_dev *get_zdev_by_bus(struct pci_bus *bus)
94 {
95 	return (bus && bus->sysdata) ? (struct zpci_dev *) bus->sysdata : NULL;
96 }
97 
98 int pci_domain_nr(struct pci_bus *bus)
99 {
100 	return ((struct zpci_dev *) bus->sysdata)->domain;
101 }
102 EXPORT_SYMBOL_GPL(pci_domain_nr);
103 
104 int pci_proc_domain(struct pci_bus *bus)
105 {
106 	return pci_domain_nr(bus);
107 }
108 EXPORT_SYMBOL_GPL(pci_proc_domain);
109 
110 /* Modify PCI: Register I/O address translation parameters */
111 int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas,
112 		       u64 base, u64 limit, u64 iota)
113 {
114 	u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_REG_IOAT);
115 	struct zpci_fib fib = {0};
116 	u8 status;
117 
118 	WARN_ON_ONCE(iota & 0x3fff);
119 	fib.pba = base;
120 	fib.pal = limit;
121 	fib.iota = iota | ZPCI_IOTA_RTTO_FLAG;
122 	return zpci_mod_fc(req, &fib, &status) ? -EIO : 0;
123 }
124 
125 /* Modify PCI: Unregister I/O address translation parameters */
126 int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas)
127 {
128 	u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_DEREG_IOAT);
129 	struct zpci_fib fib = {0};
130 	u8 cc, status;
131 
132 	cc = zpci_mod_fc(req, &fib, &status);
133 	if (cc == 3) /* Function already gone. */
134 		cc = 0;
135 	return cc ? -EIO : 0;
136 }
137 
138 /* Modify PCI: Set PCI function measurement parameters */
139 int zpci_fmb_enable_device(struct zpci_dev *zdev)
140 {
141 	u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
142 	struct zpci_fib fib = {0};
143 	u8 cc, status;
144 
145 	if (zdev->fmb || sizeof(*zdev->fmb) < zdev->fmb_length)
146 		return -EINVAL;
147 
148 	zdev->fmb = kmem_cache_zalloc(zdev_fmb_cache, GFP_KERNEL);
149 	if (!zdev->fmb)
150 		return -ENOMEM;
151 	WARN_ON((u64) zdev->fmb & 0xf);
152 
153 	/* reset software counters */
154 	atomic64_set(&zdev->allocated_pages, 0);
155 	atomic64_set(&zdev->mapped_pages, 0);
156 	atomic64_set(&zdev->unmapped_pages, 0);
157 
158 	fib.fmb_addr = virt_to_phys(zdev->fmb);
159 	cc = zpci_mod_fc(req, &fib, &status);
160 	if (cc) {
161 		kmem_cache_free(zdev_fmb_cache, zdev->fmb);
162 		zdev->fmb = NULL;
163 	}
164 	return cc ? -EIO : 0;
165 }
166 
167 /* Modify PCI: Disable PCI function measurement */
168 int zpci_fmb_disable_device(struct zpci_dev *zdev)
169 {
170 	u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
171 	struct zpci_fib fib = {0};
172 	u8 cc, status;
173 
174 	if (!zdev->fmb)
175 		return -EINVAL;
176 
177 	/* Function measurement is disabled if fmb address is zero */
178 	cc = zpci_mod_fc(req, &fib, &status);
179 	if (cc == 3) /* Function already gone. */
180 		cc = 0;
181 
182 	if (!cc) {
183 		kmem_cache_free(zdev_fmb_cache, zdev->fmb);
184 		zdev->fmb = NULL;
185 	}
186 	return cc ? -EIO : 0;
187 }
188 
189 static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len)
190 {
191 	u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
192 	u64 data;
193 	int rc;
194 
195 	rc = __zpci_load(&data, req, offset);
196 	if (!rc) {
197 		data = le64_to_cpu((__force __le64) data);
198 		data >>= (8 - len) * 8;
199 		*val = (u32) data;
200 	} else
201 		*val = 0xffffffff;
202 	return rc;
203 }
204 
205 static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
206 {
207 	u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
208 	u64 data = val;
209 	int rc;
210 
211 	data <<= (8 - len) * 8;
212 	data = (__force u64) cpu_to_le64(data);
213 	rc = __zpci_store(data, req, offset);
214 	return rc;
215 }
216 
217 resource_size_t pcibios_align_resource(void *data, const struct resource *res,
218 				       resource_size_t size,
219 				       resource_size_t align)
220 {
221 	return 0;
222 }
223 
224 /* combine single writes by using store-block insn */
225 void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
226 {
227        zpci_memcpy_toio(to, from, count);
228 }
229 
230 void __iomem *ioremap(unsigned long ioaddr, unsigned long size)
231 {
232 	struct vm_struct *area;
233 	unsigned long offset;
234 
235 	if (!size)
236 		return NULL;
237 
238 	if (!static_branch_unlikely(&have_mio))
239 		return (void __iomem *) ioaddr;
240 
241 	offset = ioaddr & ~PAGE_MASK;
242 	ioaddr &= PAGE_MASK;
243 	size = PAGE_ALIGN(size + offset);
244 	area = get_vm_area(size, VM_IOREMAP);
245 	if (!area)
246 		return NULL;
247 
248 	if (ioremap_page_range((unsigned long) area->addr,
249 			       (unsigned long) area->addr + size,
250 			       ioaddr, PAGE_KERNEL)) {
251 		vunmap(area->addr);
252 		return NULL;
253 	}
254 	return (void __iomem *) ((unsigned long) area->addr + offset);
255 }
256 EXPORT_SYMBOL(ioremap);
257 
258 void iounmap(volatile void __iomem *addr)
259 {
260 	if (static_branch_likely(&have_mio))
261 		vunmap((__force void *) ((unsigned long) addr & PAGE_MASK));
262 }
263 EXPORT_SYMBOL(iounmap);
264 
265 /* Create a virtual mapping cookie for a PCI BAR */
266 static void __iomem *pci_iomap_range_fh(struct pci_dev *pdev, int bar,
267 					unsigned long offset, unsigned long max)
268 {
269 	struct zpci_dev *zdev =	to_zpci(pdev);
270 	int idx;
271 
272 	idx = zdev->bars[bar].map_idx;
273 	spin_lock(&zpci_iomap_lock);
274 	/* Detect overrun */
275 	WARN_ON(!++zpci_iomap_start[idx].count);
276 	zpci_iomap_start[idx].fh = zdev->fh;
277 	zpci_iomap_start[idx].bar = bar;
278 	spin_unlock(&zpci_iomap_lock);
279 
280 	return (void __iomem *) ZPCI_ADDR(idx) + offset;
281 }
282 
283 static void __iomem *pci_iomap_range_mio(struct pci_dev *pdev, int bar,
284 					 unsigned long offset,
285 					 unsigned long max)
286 {
287 	unsigned long barsize = pci_resource_len(pdev, bar);
288 	struct zpci_dev *zdev = to_zpci(pdev);
289 	void __iomem *iova;
290 
291 	iova = ioremap((unsigned long) zdev->bars[bar].mio_wt, barsize);
292 	return iova ? iova + offset : iova;
293 }
294 
295 void __iomem *pci_iomap_range(struct pci_dev *pdev, int bar,
296 			      unsigned long offset, unsigned long max)
297 {
298 	if (bar >= PCI_STD_NUM_BARS || !pci_resource_len(pdev, bar))
299 		return NULL;
300 
301 	if (static_branch_likely(&have_mio))
302 		return pci_iomap_range_mio(pdev, bar, offset, max);
303 	else
304 		return pci_iomap_range_fh(pdev, bar, offset, max);
305 }
306 EXPORT_SYMBOL(pci_iomap_range);
307 
308 void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
309 {
310 	return pci_iomap_range(dev, bar, 0, maxlen);
311 }
312 EXPORT_SYMBOL(pci_iomap);
313 
314 static void __iomem *pci_iomap_wc_range_mio(struct pci_dev *pdev, int bar,
315 					    unsigned long offset, unsigned long max)
316 {
317 	unsigned long barsize = pci_resource_len(pdev, bar);
318 	struct zpci_dev *zdev = to_zpci(pdev);
319 	void __iomem *iova;
320 
321 	iova = ioremap((unsigned long) zdev->bars[bar].mio_wb, barsize);
322 	return iova ? iova + offset : iova;
323 }
324 
325 void __iomem *pci_iomap_wc_range(struct pci_dev *pdev, int bar,
326 				 unsigned long offset, unsigned long max)
327 {
328 	if (bar >= PCI_STD_NUM_BARS || !pci_resource_len(pdev, bar))
329 		return NULL;
330 
331 	if (static_branch_likely(&have_mio))
332 		return pci_iomap_wc_range_mio(pdev, bar, offset, max);
333 	else
334 		return pci_iomap_range_fh(pdev, bar, offset, max);
335 }
336 EXPORT_SYMBOL(pci_iomap_wc_range);
337 
338 void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen)
339 {
340 	return pci_iomap_wc_range(dev, bar, 0, maxlen);
341 }
342 EXPORT_SYMBOL(pci_iomap_wc);
343 
344 static void pci_iounmap_fh(struct pci_dev *pdev, void __iomem *addr)
345 {
346 	unsigned int idx = ZPCI_IDX(addr);
347 
348 	spin_lock(&zpci_iomap_lock);
349 	/* Detect underrun */
350 	WARN_ON(!zpci_iomap_start[idx].count);
351 	if (!--zpci_iomap_start[idx].count) {
352 		zpci_iomap_start[idx].fh = 0;
353 		zpci_iomap_start[idx].bar = 0;
354 	}
355 	spin_unlock(&zpci_iomap_lock);
356 }
357 
358 static void pci_iounmap_mio(struct pci_dev *pdev, void __iomem *addr)
359 {
360 	iounmap(addr);
361 }
362 
363 void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
364 {
365 	if (static_branch_likely(&have_mio))
366 		pci_iounmap_mio(pdev, addr);
367 	else
368 		pci_iounmap_fh(pdev, addr);
369 }
370 EXPORT_SYMBOL(pci_iounmap);
371 
372 static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
373 		    int size, u32 *val)
374 {
375 	struct zpci_dev *zdev = get_zdev_by_bus(bus);
376 	int ret;
377 
378 	if (!zdev || devfn != ZPCI_DEVFN)
379 		ret = -ENODEV;
380 	else
381 		ret = zpci_cfg_load(zdev, where, val, size);
382 
383 	return ret;
384 }
385 
386 static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
387 		     int size, u32 val)
388 {
389 	struct zpci_dev *zdev = get_zdev_by_bus(bus);
390 	int ret;
391 
392 	if (!zdev || devfn != ZPCI_DEVFN)
393 		ret = -ENODEV;
394 	else
395 		ret = zpci_cfg_store(zdev, where, val, size);
396 
397 	return ret;
398 }
399 
400 static struct pci_ops pci_root_ops = {
401 	.read = pci_read,
402 	.write = pci_write,
403 };
404 
405 #ifdef CONFIG_PCI_IOV
406 static struct resource iov_res = {
407 	.name	= "PCI IOV res",
408 	.start	= 0,
409 	.end	= -1,
410 	.flags	= IORESOURCE_MEM,
411 };
412 #endif
413 
414 static void zpci_map_resources(struct pci_dev *pdev)
415 {
416 	struct zpci_dev *zdev = to_zpci(pdev);
417 	resource_size_t len;
418 	int i;
419 
420 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
421 		len = pci_resource_len(pdev, i);
422 		if (!len)
423 			continue;
424 
425 		if (zpci_use_mio(zdev))
426 			pdev->resource[i].start =
427 				(resource_size_t __force) zdev->bars[i].mio_wb;
428 		else
429 			pdev->resource[i].start = (resource_size_t __force)
430 				pci_iomap_range_fh(pdev, i, 0, 0);
431 		pdev->resource[i].end = pdev->resource[i].start + len - 1;
432 	}
433 
434 #ifdef CONFIG_PCI_IOV
435 	for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
436 		int bar = i + PCI_IOV_RESOURCES;
437 
438 		len = pci_resource_len(pdev, bar);
439 		if (!len)
440 			continue;
441 		pdev->resource[bar].parent = &iov_res;
442 	}
443 #endif
444 }
445 
446 static void zpci_unmap_resources(struct pci_dev *pdev)
447 {
448 	struct zpci_dev *zdev = to_zpci(pdev);
449 	resource_size_t len;
450 	int i;
451 
452 	if (zpci_use_mio(zdev))
453 		return;
454 
455 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
456 		len = pci_resource_len(pdev, i);
457 		if (!len)
458 			continue;
459 		pci_iounmap_fh(pdev, (void __iomem __force *)
460 			       pdev->resource[i].start);
461 	}
462 }
463 
464 static int zpci_alloc_iomap(struct zpci_dev *zdev)
465 {
466 	unsigned long entry;
467 
468 	spin_lock(&zpci_iomap_lock);
469 	entry = find_first_zero_bit(zpci_iomap_bitmap, ZPCI_IOMAP_ENTRIES);
470 	if (entry == ZPCI_IOMAP_ENTRIES) {
471 		spin_unlock(&zpci_iomap_lock);
472 		return -ENOSPC;
473 	}
474 	set_bit(entry, zpci_iomap_bitmap);
475 	spin_unlock(&zpci_iomap_lock);
476 	return entry;
477 }
478 
479 static void zpci_free_iomap(struct zpci_dev *zdev, int entry)
480 {
481 	spin_lock(&zpci_iomap_lock);
482 	memset(&zpci_iomap_start[entry], 0, sizeof(struct zpci_iomap_entry));
483 	clear_bit(entry, zpci_iomap_bitmap);
484 	spin_unlock(&zpci_iomap_lock);
485 }
486 
487 static struct resource *__alloc_res(struct zpci_dev *zdev, unsigned long start,
488 				    unsigned long size, unsigned long flags)
489 {
490 	struct resource *r;
491 
492 	r = kzalloc(sizeof(*r), GFP_KERNEL);
493 	if (!r)
494 		return NULL;
495 
496 	r->start = start;
497 	r->end = r->start + size - 1;
498 	r->flags = flags;
499 	r->name = zdev->res_name;
500 
501 	if (request_resource(&iomem_resource, r)) {
502 		kfree(r);
503 		return NULL;
504 	}
505 	return r;
506 }
507 
508 static int zpci_setup_bus_resources(struct zpci_dev *zdev,
509 				    struct list_head *resources)
510 {
511 	unsigned long addr, size, flags;
512 	struct resource *res;
513 	int i, entry;
514 
515 	snprintf(zdev->res_name, sizeof(zdev->res_name),
516 		 "PCI Bus %04x:%02x", zdev->domain, ZPCI_BUS_NR);
517 
518 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
519 		if (!zdev->bars[i].size)
520 			continue;
521 		entry = zpci_alloc_iomap(zdev);
522 		if (entry < 0)
523 			return entry;
524 		zdev->bars[i].map_idx = entry;
525 
526 		/* only MMIO is supported */
527 		flags = IORESOURCE_MEM;
528 		if (zdev->bars[i].val & 8)
529 			flags |= IORESOURCE_PREFETCH;
530 		if (zdev->bars[i].val & 4)
531 			flags |= IORESOURCE_MEM_64;
532 
533 		if (zpci_use_mio(zdev))
534 			addr = (unsigned long) zdev->bars[i].mio_wb;
535 		else
536 			addr = ZPCI_ADDR(entry);
537 		size = 1UL << zdev->bars[i].size;
538 
539 		res = __alloc_res(zdev, addr, size, flags);
540 		if (!res) {
541 			zpci_free_iomap(zdev, entry);
542 			return -ENOMEM;
543 		}
544 		zdev->bars[i].res = res;
545 		pci_add_resource(resources, res);
546 	}
547 
548 	return 0;
549 }
550 
551 static void zpci_cleanup_bus_resources(struct zpci_dev *zdev)
552 {
553 	int i;
554 
555 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
556 		if (!zdev->bars[i].size || !zdev->bars[i].res)
557 			continue;
558 
559 		zpci_free_iomap(zdev, zdev->bars[i].map_idx);
560 		release_resource(zdev->bars[i].res);
561 		kfree(zdev->bars[i].res);
562 	}
563 }
564 
565 int pcibios_add_device(struct pci_dev *pdev)
566 {
567 	struct resource *res;
568 	int i;
569 
570 	if (pdev->is_physfn)
571 		pdev->no_vf_scan = 1;
572 
573 	pdev->dev.groups = zpci_attr_groups;
574 	pdev->dev.dma_ops = &s390_pci_dma_ops;
575 	zpci_map_resources(pdev);
576 
577 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
578 		res = &pdev->resource[i];
579 		if (res->parent || !res->flags)
580 			continue;
581 		pci_claim_resource(pdev, i);
582 	}
583 
584 	return 0;
585 }
586 
587 void pcibios_release_device(struct pci_dev *pdev)
588 {
589 	zpci_unmap_resources(pdev);
590 }
591 
592 int pcibios_enable_device(struct pci_dev *pdev, int mask)
593 {
594 	struct zpci_dev *zdev = to_zpci(pdev);
595 
596 	zpci_debug_init_device(zdev, dev_name(&pdev->dev));
597 	zpci_fmb_enable_device(zdev);
598 
599 	return pci_enable_resources(pdev, mask);
600 }
601 
602 void pcibios_disable_device(struct pci_dev *pdev)
603 {
604 	struct zpci_dev *zdev = to_zpci(pdev);
605 
606 	zpci_fmb_disable_device(zdev);
607 	zpci_debug_exit_device(zdev);
608 }
609 
610 #ifdef CONFIG_HIBERNATE_CALLBACKS
611 static int zpci_restore(struct device *dev)
612 {
613 	struct pci_dev *pdev = to_pci_dev(dev);
614 	struct zpci_dev *zdev = to_zpci(pdev);
615 	int ret = 0;
616 
617 	if (zdev->state != ZPCI_FN_STATE_ONLINE)
618 		goto out;
619 
620 	ret = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES);
621 	if (ret)
622 		goto out;
623 
624 	zpci_map_resources(pdev);
625 	zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
626 			   (u64) zdev->dma_table);
627 
628 out:
629 	return ret;
630 }
631 
632 static int zpci_freeze(struct device *dev)
633 {
634 	struct pci_dev *pdev = to_pci_dev(dev);
635 	struct zpci_dev *zdev = to_zpci(pdev);
636 
637 	if (zdev->state != ZPCI_FN_STATE_ONLINE)
638 		return 0;
639 
640 	zpci_unregister_ioat(zdev, 0);
641 	zpci_unmap_resources(pdev);
642 	return clp_disable_fh(zdev);
643 }
644 
645 struct dev_pm_ops pcibios_pm_ops = {
646 	.thaw_noirq = zpci_restore,
647 	.freeze_noirq = zpci_freeze,
648 	.restore_noirq = zpci_restore,
649 	.poweroff_noirq = zpci_freeze,
650 };
651 #endif /* CONFIG_HIBERNATE_CALLBACKS */
652 
653 static int zpci_alloc_domain(struct zpci_dev *zdev)
654 {
655 	if (zpci_unique_uid) {
656 		zdev->domain = (u16) zdev->uid;
657 		if (zdev->domain >= ZPCI_NR_DEVICES)
658 			return 0;
659 
660 		spin_lock(&zpci_domain_lock);
661 		if (test_bit(zdev->domain, zpci_domain)) {
662 			spin_unlock(&zpci_domain_lock);
663 			pr_err("Adding PCI function %08x failed because domain %04x is already assigned\n",
664 				zdev->fid, zdev->domain);
665 			return -EEXIST;
666 		}
667 		set_bit(zdev->domain, zpci_domain);
668 		spin_unlock(&zpci_domain_lock);
669 		return 0;
670 	}
671 
672 	spin_lock(&zpci_domain_lock);
673 	zdev->domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES);
674 	if (zdev->domain == ZPCI_NR_DEVICES) {
675 		spin_unlock(&zpci_domain_lock);
676 		pr_err("Adding PCI function %08x failed because the configured limit of %d is reached\n",
677 			zdev->fid, ZPCI_NR_DEVICES);
678 		return -ENOSPC;
679 	}
680 	set_bit(zdev->domain, zpci_domain);
681 	spin_unlock(&zpci_domain_lock);
682 	return 0;
683 }
684 
685 static void zpci_free_domain(struct zpci_dev *zdev)
686 {
687 	if (zdev->domain >= ZPCI_NR_DEVICES)
688 		return;
689 
690 	spin_lock(&zpci_domain_lock);
691 	clear_bit(zdev->domain, zpci_domain);
692 	spin_unlock(&zpci_domain_lock);
693 }
694 
695 void pcibios_remove_bus(struct pci_bus *bus)
696 {
697 	struct zpci_dev *zdev = get_zdev_by_bus(bus);
698 
699 	zpci_exit_slot(zdev);
700 	zpci_cleanup_bus_resources(zdev);
701 	zpci_destroy_iommu(zdev);
702 	zpci_free_domain(zdev);
703 
704 	spin_lock(&zpci_list_lock);
705 	list_del(&zdev->entry);
706 	spin_unlock(&zpci_list_lock);
707 
708 	zpci_dbg(3, "rem fid:%x\n", zdev->fid);
709 	kfree(zdev);
710 }
711 
712 static int zpci_scan_bus(struct zpci_dev *zdev)
713 {
714 	LIST_HEAD(resources);
715 	int ret;
716 
717 	ret = zpci_setup_bus_resources(zdev, &resources);
718 	if (ret)
719 		goto error;
720 
721 	zdev->bus = pci_scan_root_bus(NULL, ZPCI_BUS_NR, &pci_root_ops,
722 				      zdev, &resources);
723 	if (!zdev->bus) {
724 		ret = -EIO;
725 		goto error;
726 	}
727 	zdev->bus->max_bus_speed = zdev->max_bus_speed;
728 	pci_bus_add_devices(zdev->bus);
729 	return 0;
730 
731 error:
732 	zpci_cleanup_bus_resources(zdev);
733 	pci_free_resource_list(&resources);
734 	return ret;
735 }
736 
737 int zpci_enable_device(struct zpci_dev *zdev)
738 {
739 	int rc;
740 
741 	rc = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES);
742 	if (rc)
743 		goto out;
744 
745 	rc = zpci_dma_init_device(zdev);
746 	if (rc)
747 		goto out_dma;
748 
749 	zdev->state = ZPCI_FN_STATE_ONLINE;
750 	return 0;
751 
752 out_dma:
753 	clp_disable_fh(zdev);
754 out:
755 	return rc;
756 }
757 EXPORT_SYMBOL_GPL(zpci_enable_device);
758 
759 int zpci_disable_device(struct zpci_dev *zdev)
760 {
761 	zpci_dma_exit_device(zdev);
762 	return clp_disable_fh(zdev);
763 }
764 EXPORT_SYMBOL_GPL(zpci_disable_device);
765 
766 int zpci_create_device(struct zpci_dev *zdev)
767 {
768 	int rc;
769 
770 	rc = zpci_alloc_domain(zdev);
771 	if (rc)
772 		goto out;
773 
774 	rc = zpci_init_iommu(zdev);
775 	if (rc)
776 		goto out_free;
777 
778 	mutex_init(&zdev->lock);
779 	if (zdev->state == ZPCI_FN_STATE_CONFIGURED) {
780 		rc = zpci_enable_device(zdev);
781 		if (rc)
782 			goto out_destroy_iommu;
783 	}
784 	rc = zpci_scan_bus(zdev);
785 	if (rc)
786 		goto out_disable;
787 
788 	spin_lock(&zpci_list_lock);
789 	list_add_tail(&zdev->entry, &zpci_list);
790 	spin_unlock(&zpci_list_lock);
791 
792 	zpci_init_slot(zdev);
793 
794 	return 0;
795 
796 out_disable:
797 	if (zdev->state == ZPCI_FN_STATE_ONLINE)
798 		zpci_disable_device(zdev);
799 out_destroy_iommu:
800 	zpci_destroy_iommu(zdev);
801 out_free:
802 	zpci_free_domain(zdev);
803 out:
804 	return rc;
805 }
806 
807 void zpci_remove_device(struct zpci_dev *zdev)
808 {
809 	if (!zdev->bus)
810 		return;
811 
812 	pci_stop_root_bus(zdev->bus);
813 	pci_remove_root_bus(zdev->bus);
814 }
815 
816 int zpci_report_error(struct pci_dev *pdev,
817 		      struct zpci_report_error_header *report)
818 {
819 	struct zpci_dev *zdev = to_zpci(pdev);
820 
821 	return sclp_pci_report(report, zdev->fh, zdev->fid);
822 }
823 EXPORT_SYMBOL(zpci_report_error);
824 
825 static int zpci_mem_init(void)
826 {
827 	BUILD_BUG_ON(!is_power_of_2(__alignof__(struct zpci_fmb)) ||
828 		     __alignof__(struct zpci_fmb) < sizeof(struct zpci_fmb));
829 
830 	zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb),
831 					   __alignof__(struct zpci_fmb), 0, NULL);
832 	if (!zdev_fmb_cache)
833 		goto error_fmb;
834 
835 	zpci_iomap_start = kcalloc(ZPCI_IOMAP_ENTRIES,
836 				   sizeof(*zpci_iomap_start), GFP_KERNEL);
837 	if (!zpci_iomap_start)
838 		goto error_iomap;
839 
840 	zpci_iomap_bitmap = kcalloc(BITS_TO_LONGS(ZPCI_IOMAP_ENTRIES),
841 				    sizeof(*zpci_iomap_bitmap), GFP_KERNEL);
842 	if (!zpci_iomap_bitmap)
843 		goto error_iomap_bitmap;
844 
845 	return 0;
846 error_iomap_bitmap:
847 	kfree(zpci_iomap_start);
848 error_iomap:
849 	kmem_cache_destroy(zdev_fmb_cache);
850 error_fmb:
851 	return -ENOMEM;
852 }
853 
854 static void zpci_mem_exit(void)
855 {
856 	kfree(zpci_iomap_bitmap);
857 	kfree(zpci_iomap_start);
858 	kmem_cache_destroy(zdev_fmb_cache);
859 }
860 
861 static unsigned int s390_pci_probe __initdata = 1;
862 static unsigned int s390_pci_no_mio __initdata;
863 unsigned int s390_pci_force_floating __initdata;
864 static unsigned int s390_pci_initialized;
865 
866 char * __init pcibios_setup(char *str)
867 {
868 	if (!strcmp(str, "off")) {
869 		s390_pci_probe = 0;
870 		return NULL;
871 	}
872 	if (!strcmp(str, "nomio")) {
873 		s390_pci_no_mio = 1;
874 		return NULL;
875 	}
876 	if (!strcmp(str, "force_floating")) {
877 		s390_pci_force_floating = 1;
878 		return NULL;
879 	}
880 	return str;
881 }
882 
883 bool zpci_is_enabled(void)
884 {
885 	return s390_pci_initialized;
886 }
887 
888 static int __init pci_base_init(void)
889 {
890 	int rc;
891 
892 	if (!s390_pci_probe)
893 		return 0;
894 
895 	if (!test_facility(69) || !test_facility(71))
896 		return 0;
897 
898 	if (test_facility(153) && !s390_pci_no_mio) {
899 		static_branch_enable(&have_mio);
900 		ctl_set_bit(2, 5);
901 	}
902 
903 	rc = zpci_debug_init();
904 	if (rc)
905 		goto out;
906 
907 	rc = zpci_mem_init();
908 	if (rc)
909 		goto out_mem;
910 
911 	rc = zpci_irq_init();
912 	if (rc)
913 		goto out_irq;
914 
915 	rc = zpci_dma_init();
916 	if (rc)
917 		goto out_dma;
918 
919 	rc = clp_scan_pci_devices();
920 	if (rc)
921 		goto out_find;
922 
923 	s390_pci_initialized = 1;
924 	return 0;
925 
926 out_find:
927 	zpci_dma_exit();
928 out_dma:
929 	zpci_irq_exit();
930 out_irq:
931 	zpci_mem_exit();
932 out_mem:
933 	zpci_debug_exit();
934 out:
935 	return rc;
936 }
937 subsys_initcall_sync(pci_base_init);
938 
939 void zpci_rescan(void)
940 {
941 	if (zpci_is_enabled())
942 		clp_rescan_pci_devices_simple();
943 }
944