xref: /openbmc/linux/arch/s390/pci/pci.c (revision c1d45424)
1 /*
2  * Copyright IBM Corp. 2012
3  *
4  * Author(s):
5  *   Jan Glauber <jang@linux.vnet.ibm.com>
6  *
7  * The System z PCI code is a rewrite from a prototype by
8  * the following people (Kudoz!):
9  *   Alexander Schmidt
10  *   Christoph Raisch
11  *   Hannes Hering
12  *   Hoang-Nam Nguyen
13  *   Jan-Bernd Themann
14  *   Stefan Roscher
15  *   Thomas Klein
16  */
17 
18 #define COMPONENT "zPCI"
19 #define pr_fmt(fmt) COMPONENT ": " fmt
20 
21 #include <linux/kernel.h>
22 #include <linux/slab.h>
23 #include <linux/err.h>
24 #include <linux/export.h>
25 #include <linux/delay.h>
26 #include <linux/irq.h>
27 #include <linux/kernel_stat.h>
28 #include <linux/seq_file.h>
29 #include <linux/pci.h>
30 #include <linux/msi.h>
31 
32 #include <asm/isc.h>
33 #include <asm/airq.h>
34 #include <asm/facility.h>
35 #include <asm/pci_insn.h>
36 #include <asm/pci_clp.h>
37 #include <asm/pci_dma.h>
38 
39 #define DEBUG				/* enable pr_debug */
40 
41 #define	SIC_IRQ_MODE_ALL		0
42 #define	SIC_IRQ_MODE_SINGLE		1
43 
44 #define ZPCI_NR_DMA_SPACES		1
45 #define ZPCI_MSI_VEC_BITS		6
46 #define ZPCI_NR_DEVICES			CONFIG_PCI_NR_FUNCTIONS
47 
48 /* list of all detected zpci devices */
49 LIST_HEAD(zpci_list);
50 EXPORT_SYMBOL_GPL(zpci_list);
51 DEFINE_MUTEX(zpci_list_lock);
52 EXPORT_SYMBOL_GPL(zpci_list_lock);
53 
54 static struct pci_hp_callback_ops *hotplug_ops;
55 
56 static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES);
57 static DEFINE_SPINLOCK(zpci_domain_lock);
58 
59 struct callback {
60 	irq_handler_t	handler;
61 	void		*data;
62 };
63 
64 struct zdev_irq_map {
65 	unsigned long	aibv;		/* AI bit vector */
66 	int		msi_vecs;	/* consecutive MSI-vectors used */
67 	int		__unused;
68 	struct callback	cb[ZPCI_NR_MSI_VECS]; /* callback handler array */
69 	spinlock_t	lock;		/* protect callbacks against de-reg */
70 };
71 
72 struct intr_bucket {
73 	/* amap of adapters, one bit per dev, corresponds to one irq nr */
74 	unsigned long	*alloc;
75 	/* AI summary bit, global page for all devices */
76 	unsigned long	*aisb;
77 	/* pointer to aibv and callback data in zdev */
78 	struct zdev_irq_map *imap[ZPCI_NR_DEVICES];
79 	/* protects the whole bucket struct */
80 	spinlock_t	lock;
81 };
82 
83 static struct intr_bucket *bucket;
84 
85 /* Adapter interrupt definitions */
86 static void zpci_irq_handler(struct airq_struct *airq);
87 
88 static struct airq_struct zpci_airq = {
89 	.handler = zpci_irq_handler,
90 	.isc = PCI_ISC,
91 };
92 
93 /* I/O Map */
94 static DEFINE_SPINLOCK(zpci_iomap_lock);
95 static DECLARE_BITMAP(zpci_iomap, ZPCI_IOMAP_MAX_ENTRIES);
96 struct zpci_iomap_entry *zpci_iomap_start;
97 EXPORT_SYMBOL_GPL(zpci_iomap_start);
98 
99 /* highest irq summary bit */
100 static int __read_mostly aisb_max;
101 
102 static struct kmem_cache *zdev_irq_cache;
103 static struct kmem_cache *zdev_fmb_cache;
104 
105 static inline int irq_to_msi_nr(unsigned int irq)
106 {
107 	return irq & ZPCI_MSI_MASK;
108 }
109 
110 static inline int irq_to_dev_nr(unsigned int irq)
111 {
112 	return irq >> ZPCI_MSI_VEC_BITS;
113 }
114 
115 static inline struct zdev_irq_map *get_imap(unsigned int irq)
116 {
117 	return bucket->imap[irq_to_dev_nr(irq)];
118 }
119 
120 struct zpci_dev *get_zdev(struct pci_dev *pdev)
121 {
122 	return (struct zpci_dev *) pdev->sysdata;
123 }
124 
125 struct zpci_dev *get_zdev_by_fid(u32 fid)
126 {
127 	struct zpci_dev *tmp, *zdev = NULL;
128 
129 	mutex_lock(&zpci_list_lock);
130 	list_for_each_entry(tmp, &zpci_list, entry) {
131 		if (tmp->fid == fid) {
132 			zdev = tmp;
133 			break;
134 		}
135 	}
136 	mutex_unlock(&zpci_list_lock);
137 	return zdev;
138 }
139 
140 bool zpci_fid_present(u32 fid)
141 {
142 	return (get_zdev_by_fid(fid) != NULL) ? true : false;
143 }
144 
145 static struct zpci_dev *get_zdev_by_bus(struct pci_bus *bus)
146 {
147 	return (bus && bus->sysdata) ? (struct zpci_dev *) bus->sysdata : NULL;
148 }
149 
150 int pci_domain_nr(struct pci_bus *bus)
151 {
152 	return ((struct zpci_dev *) bus->sysdata)->domain;
153 }
154 EXPORT_SYMBOL_GPL(pci_domain_nr);
155 
156 int pci_proc_domain(struct pci_bus *bus)
157 {
158 	return pci_domain_nr(bus);
159 }
160 EXPORT_SYMBOL_GPL(pci_proc_domain);
161 
162 /* Modify PCI: Register adapter interruptions */
163 static int zpci_register_airq(struct zpci_dev *zdev, unsigned int aisb,
164 			      u64 aibv)
165 {
166 	u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT);
167 	struct zpci_fib *fib;
168 	int rc;
169 
170 	fib = (void *) get_zeroed_page(GFP_KERNEL);
171 	if (!fib)
172 		return -ENOMEM;
173 
174 	fib->isc = PCI_ISC;
175 	fib->noi = zdev->irq_map->msi_vecs;
176 	fib->sum = 1;		/* enable summary notifications */
177 	fib->aibv = aibv;
178 	fib->aibvo = 0;		/* every function has its own page */
179 	fib->aisb = (u64) bucket->aisb + aisb / 8;
180 	fib->aisbo = aisb & ZPCI_MSI_MASK;
181 
182 	rc = s390pci_mod_fc(req, fib);
183 	pr_debug("%s mpcifc returned noi: %d\n", __func__, fib->noi);
184 
185 	free_page((unsigned long) fib);
186 	return rc;
187 }
188 
189 struct mod_pci_args {
190 	u64 base;
191 	u64 limit;
192 	u64 iota;
193 	u64 fmb_addr;
194 };
195 
196 static int mod_pci(struct zpci_dev *zdev, int fn, u8 dmaas, struct mod_pci_args *args)
197 {
198 	u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, fn);
199 	struct zpci_fib *fib;
200 	int rc;
201 
202 	/* The FIB must be available even if it's not used */
203 	fib = (void *) get_zeroed_page(GFP_KERNEL);
204 	if (!fib)
205 		return -ENOMEM;
206 
207 	fib->pba = args->base;
208 	fib->pal = args->limit;
209 	fib->iota = args->iota;
210 	fib->fmb_addr = args->fmb_addr;
211 
212 	rc = s390pci_mod_fc(req, fib);
213 	free_page((unsigned long) fib);
214 	return rc;
215 }
216 
217 /* Modify PCI: Register I/O address translation parameters */
218 int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas,
219 		       u64 base, u64 limit, u64 iota)
220 {
221 	struct mod_pci_args args = { base, limit, iota, 0 };
222 
223 	WARN_ON_ONCE(iota & 0x3fff);
224 	args.iota |= ZPCI_IOTA_RTTO_FLAG;
225 	return mod_pci(zdev, ZPCI_MOD_FC_REG_IOAT, dmaas, &args);
226 }
227 
228 /* Modify PCI: Unregister I/O address translation parameters */
229 int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas)
230 {
231 	struct mod_pci_args args = { 0, 0, 0, 0 };
232 
233 	return mod_pci(zdev, ZPCI_MOD_FC_DEREG_IOAT, dmaas, &args);
234 }
235 
236 /* Modify PCI: Unregister adapter interruptions */
237 static int zpci_unregister_airq(struct zpci_dev *zdev)
238 {
239 	struct mod_pci_args args = { 0, 0, 0, 0 };
240 
241 	return mod_pci(zdev, ZPCI_MOD_FC_DEREG_INT, 0, &args);
242 }
243 
244 /* Modify PCI: Set PCI function measurement parameters */
245 int zpci_fmb_enable_device(struct zpci_dev *zdev)
246 {
247 	struct mod_pci_args args = { 0, 0, 0, 0 };
248 
249 	if (zdev->fmb)
250 		return -EINVAL;
251 
252 	zdev->fmb = kmem_cache_zalloc(zdev_fmb_cache, GFP_KERNEL);
253 	if (!zdev->fmb)
254 		return -ENOMEM;
255 	WARN_ON((u64) zdev->fmb & 0xf);
256 
257 	args.fmb_addr = virt_to_phys(zdev->fmb);
258 	return mod_pci(zdev, ZPCI_MOD_FC_SET_MEASURE, 0, &args);
259 }
260 
261 /* Modify PCI: Disable PCI function measurement */
262 int zpci_fmb_disable_device(struct zpci_dev *zdev)
263 {
264 	struct mod_pci_args args = { 0, 0, 0, 0 };
265 	int rc;
266 
267 	if (!zdev->fmb)
268 		return -EINVAL;
269 
270 	/* Function measurement is disabled if fmb address is zero */
271 	rc = mod_pci(zdev, ZPCI_MOD_FC_SET_MEASURE, 0, &args);
272 
273 	kmem_cache_free(zdev_fmb_cache, zdev->fmb);
274 	zdev->fmb = NULL;
275 	return rc;
276 }
277 
278 #define ZPCI_PCIAS_CFGSPC	15
279 
280 static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len)
281 {
282 	u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
283 	u64 data;
284 	int rc;
285 
286 	rc = s390pci_load(&data, req, offset);
287 	if (!rc) {
288 		data = data << ((8 - len) * 8);
289 		data = le64_to_cpu(data);
290 		*val = (u32) data;
291 	} else
292 		*val = 0xffffffff;
293 	return rc;
294 }
295 
296 static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
297 {
298 	u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
299 	u64 data = val;
300 	int rc;
301 
302 	data = cpu_to_le64(data);
303 	data = data >> ((8 - len) * 8);
304 	rc = s390pci_store(data, req, offset);
305 	return rc;
306 }
307 
308 void enable_irq(unsigned int irq)
309 {
310 	struct msi_desc *msi = irq_get_msi_desc(irq);
311 
312 	zpci_msi_set_mask_bits(msi, 1, 0);
313 }
314 EXPORT_SYMBOL_GPL(enable_irq);
315 
316 void disable_irq(unsigned int irq)
317 {
318 	struct msi_desc *msi = irq_get_msi_desc(irq);
319 
320 	zpci_msi_set_mask_bits(msi, 1, 1);
321 }
322 EXPORT_SYMBOL_GPL(disable_irq);
323 
324 void pcibios_fixup_bus(struct pci_bus *bus)
325 {
326 }
327 
328 resource_size_t pcibios_align_resource(void *data, const struct resource *res,
329 				       resource_size_t size,
330 				       resource_size_t align)
331 {
332 	return 0;
333 }
334 
335 /* combine single writes by using store-block insn */
336 void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
337 {
338        zpci_memcpy_toio(to, from, count);
339 }
340 
341 /* Create a virtual mapping cookie for a PCI BAR */
342 void __iomem *pci_iomap(struct pci_dev *pdev, int bar, unsigned long max)
343 {
344 	struct zpci_dev *zdev =	get_zdev(pdev);
345 	u64 addr;
346 	int idx;
347 
348 	if ((bar & 7) != bar)
349 		return NULL;
350 
351 	idx = zdev->bars[bar].map_idx;
352 	spin_lock(&zpci_iomap_lock);
353 	zpci_iomap_start[idx].fh = zdev->fh;
354 	zpci_iomap_start[idx].bar = bar;
355 	spin_unlock(&zpci_iomap_lock);
356 
357 	addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48);
358 	return (void __iomem *) addr;
359 }
360 EXPORT_SYMBOL_GPL(pci_iomap);
361 
362 void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
363 {
364 	unsigned int idx;
365 
366 	idx = (((__force u64) addr) & ~ZPCI_IOMAP_ADDR_BASE) >> 48;
367 	spin_lock(&zpci_iomap_lock);
368 	zpci_iomap_start[idx].fh = 0;
369 	zpci_iomap_start[idx].bar = 0;
370 	spin_unlock(&zpci_iomap_lock);
371 }
372 EXPORT_SYMBOL_GPL(pci_iounmap);
373 
374 static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
375 		    int size, u32 *val)
376 {
377 	struct zpci_dev *zdev = get_zdev_by_bus(bus);
378 	int ret;
379 
380 	if (!zdev || devfn != ZPCI_DEVFN)
381 		ret = -ENODEV;
382 	else
383 		ret = zpci_cfg_load(zdev, where, val, size);
384 
385 	return ret;
386 }
387 
388 static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
389 		     int size, u32 val)
390 {
391 	struct zpci_dev *zdev = get_zdev_by_bus(bus);
392 	int ret;
393 
394 	if (!zdev || devfn != ZPCI_DEVFN)
395 		ret = -ENODEV;
396 	else
397 		ret = zpci_cfg_store(zdev, where, val, size);
398 
399 	return ret;
400 }
401 
402 static struct pci_ops pci_root_ops = {
403 	.read = pci_read,
404 	.write = pci_write,
405 };
406 
407 /* store the last handled bit to implement fair scheduling of devices */
408 static DEFINE_PER_CPU(unsigned long, next_sbit);
409 
410 static void zpci_irq_handler(struct airq_struct *airq)
411 {
412 	unsigned long sbit, mbit, last = 0, start = __get_cpu_var(next_sbit);
413 	int rescan = 0, max = aisb_max;
414 	struct zdev_irq_map *imap;
415 
416 	inc_irq_stat(IRQIO_PCI);
417 	sbit = start;
418 
419 scan:
420 	/* find summary_bit */
421 	for_each_set_bit_left_cont(sbit, bucket->aisb, max) {
422 		clear_bit(63 - (sbit & 63), bucket->aisb + (sbit >> 6));
423 		last = sbit;
424 
425 		/* find vector bit */
426 		imap = bucket->imap[sbit];
427 		for_each_set_bit_left(mbit, &imap->aibv, imap->msi_vecs) {
428 			inc_irq_stat(IRQIO_MSI);
429 			clear_bit(63 - mbit, &imap->aibv);
430 
431 			spin_lock(&imap->lock);
432 			if (imap->cb[mbit].handler)
433 				imap->cb[mbit].handler(mbit,
434 					imap->cb[mbit].data);
435 			spin_unlock(&imap->lock);
436 		}
437 	}
438 
439 	if (rescan)
440 		goto out;
441 
442 	/* scan the skipped bits */
443 	if (start > 0) {
444 		sbit = 0;
445 		max = start;
446 		start = 0;
447 		goto scan;
448 	}
449 
450 	/* enable interrupts again */
451 	set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
452 
453 	/* check again to not lose initiative */
454 	rmb();
455 	max = aisb_max;
456 	sbit = find_first_bit_left(bucket->aisb, max);
457 	if (sbit != max) {
458 		rescan++;
459 		goto scan;
460 	}
461 out:
462 	/* store next device bit to scan */
463 	__get_cpu_var(next_sbit) = (++last >= aisb_max) ? 0 : last;
464 }
465 
466 /* msi_vecs - number of requested interrupts, 0 place function to error state */
467 static int zpci_setup_msi(struct pci_dev *pdev, int msi_vecs)
468 {
469 	struct zpci_dev *zdev = get_zdev(pdev);
470 	unsigned int aisb, msi_nr;
471 	struct msi_desc *msi;
472 	int rc;
473 
474 	/* store the number of used MSI vectors */
475 	zdev->irq_map->msi_vecs = min(msi_vecs, ZPCI_NR_MSI_VECS);
476 
477 	spin_lock(&bucket->lock);
478 	aisb = find_first_zero_bit(bucket->alloc, PAGE_SIZE);
479 	/* alloc map exhausted? */
480 	if (aisb == PAGE_SIZE) {
481 		spin_unlock(&bucket->lock);
482 		return -EIO;
483 	}
484 	set_bit(aisb, bucket->alloc);
485 	spin_unlock(&bucket->lock);
486 
487 	zdev->aisb = aisb;
488 	if (aisb + 1 > aisb_max)
489 		aisb_max = aisb + 1;
490 
491 	/* wire up IRQ shortcut pointer */
492 	bucket->imap[zdev->aisb] = zdev->irq_map;
493 	pr_debug("%s: imap[%u] linked to %p\n", __func__, zdev->aisb, zdev->irq_map);
494 
495 	/* TODO: irq number 0 wont be found if we return less than requested MSIs.
496 	 * ignore it for now and fix in common code.
497 	 */
498 	msi_nr = aisb << ZPCI_MSI_VEC_BITS;
499 
500 	list_for_each_entry(msi, &pdev->msi_list, list) {
501 		rc = zpci_setup_msi_irq(zdev, msi, msi_nr,
502 					  aisb << ZPCI_MSI_VEC_BITS);
503 		if (rc)
504 			return rc;
505 		msi_nr++;
506 	}
507 
508 	rc = zpci_register_airq(zdev, aisb, (u64) &zdev->irq_map->aibv);
509 	if (rc) {
510 		clear_bit(aisb, bucket->alloc);
511 		dev_err(&pdev->dev, "register MSI failed with: %d\n", rc);
512 		return rc;
513 	}
514 	return (zdev->irq_map->msi_vecs == msi_vecs) ?
515 		0 : zdev->irq_map->msi_vecs;
516 }
517 
518 static void zpci_teardown_msi(struct pci_dev *pdev)
519 {
520 	struct zpci_dev *zdev = get_zdev(pdev);
521 	struct msi_desc *msi;
522 	int aisb, rc;
523 
524 	rc = zpci_unregister_airq(zdev);
525 	if (rc) {
526 		dev_err(&pdev->dev, "deregister MSI failed with: %d\n", rc);
527 		return;
528 	}
529 
530 	msi = list_first_entry(&pdev->msi_list, struct msi_desc, list);
531 	aisb = irq_to_dev_nr(msi->irq);
532 
533 	list_for_each_entry(msi, &pdev->msi_list, list)
534 		zpci_teardown_msi_irq(zdev, msi);
535 
536 	clear_bit(aisb, bucket->alloc);
537 	if (aisb + 1 == aisb_max)
538 		aisb_max--;
539 }
540 
541 int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
542 {
543 	pr_debug("%s: requesting %d MSI-X interrupts...", __func__, nvec);
544 	if (type != PCI_CAP_ID_MSIX && type != PCI_CAP_ID_MSI)
545 		return -EINVAL;
546 	return zpci_setup_msi(pdev, nvec);
547 }
548 
549 void arch_teardown_msi_irqs(struct pci_dev *pdev)
550 {
551 	pr_info("%s: on pdev: %p\n", __func__, pdev);
552 	zpci_teardown_msi(pdev);
553 }
554 
555 static void zpci_map_resources(struct zpci_dev *zdev)
556 {
557 	struct pci_dev *pdev = zdev->pdev;
558 	resource_size_t len;
559 	int i;
560 
561 	for (i = 0; i < PCI_BAR_COUNT; i++) {
562 		len = pci_resource_len(pdev, i);
563 		if (!len)
564 			continue;
565 		pdev->resource[i].start = (resource_size_t) pci_iomap(pdev, i, 0);
566 		pdev->resource[i].end = pdev->resource[i].start + len - 1;
567 		pr_debug("BAR%i: -> start: %Lx  end: %Lx\n",
568 			i, pdev->resource[i].start, pdev->resource[i].end);
569 	}
570 }
571 
572 static void zpci_unmap_resources(struct zpci_dev *zdev)
573 {
574 	struct pci_dev *pdev = zdev->pdev;
575 	resource_size_t len;
576 	int i;
577 
578 	for (i = 0; i < PCI_BAR_COUNT; i++) {
579 		len = pci_resource_len(pdev, i);
580 		if (!len)
581 			continue;
582 		pci_iounmap(pdev, (void *) pdev->resource[i].start);
583 	}
584 }
585 
586 struct zpci_dev *zpci_alloc_device(void)
587 {
588 	struct zpci_dev *zdev;
589 
590 	/* Alloc memory for our private pci device data */
591 	zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
592 	if (!zdev)
593 		return ERR_PTR(-ENOMEM);
594 
595 	/* Alloc aibv & callback space */
596 	zdev->irq_map = kmem_cache_zalloc(zdev_irq_cache, GFP_KERNEL);
597 	if (!zdev->irq_map)
598 		goto error;
599 	WARN_ON((u64) zdev->irq_map & 0xff);
600 	return zdev;
601 
602 error:
603 	kfree(zdev);
604 	return ERR_PTR(-ENOMEM);
605 }
606 
607 void zpci_free_device(struct zpci_dev *zdev)
608 {
609 	kmem_cache_free(zdev_irq_cache, zdev->irq_map);
610 	kfree(zdev);
611 }
612 
613 /*
614  * Too late for any s390 specific setup, since interrupts must be set up
615  * already which requires DMA setup too and the pci scan will access the
616  * config space, which only works if the function handle is enabled.
617  */
618 int pcibios_enable_device(struct pci_dev *pdev, int mask)
619 {
620 	struct resource *res;
621 	u16 cmd;
622 	int i;
623 
624 	pci_read_config_word(pdev, PCI_COMMAND, &cmd);
625 
626 	for (i = 0; i < PCI_BAR_COUNT; i++) {
627 		res = &pdev->resource[i];
628 
629 		if (res->flags & IORESOURCE_IO)
630 			return -EINVAL;
631 
632 		if (res->flags & IORESOURCE_MEM)
633 			cmd |= PCI_COMMAND_MEMORY;
634 	}
635 	pci_write_config_word(pdev, PCI_COMMAND, cmd);
636 	return 0;
637 }
638 
639 int pcibios_add_platform_entries(struct pci_dev *pdev)
640 {
641 	return zpci_sysfs_add_device(&pdev->dev);
642 }
643 
644 int zpci_request_irq(unsigned int irq, irq_handler_t handler, void *data)
645 {
646 	int msi_nr = irq_to_msi_nr(irq);
647 	struct zdev_irq_map *imap;
648 	struct msi_desc *msi;
649 
650 	msi = irq_get_msi_desc(irq);
651 	if (!msi)
652 		return -EIO;
653 
654 	imap = get_imap(irq);
655 	spin_lock_init(&imap->lock);
656 
657 	pr_debug("%s: register handler for IRQ:MSI %d:%d\n", __func__, irq >> 6, msi_nr);
658 	imap->cb[msi_nr].handler = handler;
659 	imap->cb[msi_nr].data = data;
660 
661 	/*
662 	 * The generic MSI code returns with the interrupt disabled on the
663 	 * card, using the MSI mask bits. Firmware doesn't appear to unmask
664 	 * at that level, so we do it here by hand.
665 	 */
666 	zpci_msi_set_mask_bits(msi, 1, 0);
667 	return 0;
668 }
669 
670 void zpci_free_irq(unsigned int irq)
671 {
672 	struct zdev_irq_map *imap = get_imap(irq);
673 	int msi_nr = irq_to_msi_nr(irq);
674 	unsigned long flags;
675 
676 	pr_debug("%s: for irq: %d\n", __func__, irq);
677 
678 	spin_lock_irqsave(&imap->lock, flags);
679 	imap->cb[msi_nr].handler = NULL;
680 	imap->cb[msi_nr].data = NULL;
681 	spin_unlock_irqrestore(&imap->lock, flags);
682 }
683 
684 int request_irq(unsigned int irq, irq_handler_t handler,
685 		unsigned long irqflags, const char *devname, void *dev_id)
686 {
687 	pr_debug("%s: irq: %d  handler: %p  flags: %lx  dev: %s\n",
688 		__func__, irq, handler, irqflags, devname);
689 
690 	return zpci_request_irq(irq, handler, dev_id);
691 }
692 EXPORT_SYMBOL_GPL(request_irq);
693 
694 void free_irq(unsigned int irq, void *dev_id)
695 {
696 	zpci_free_irq(irq);
697 }
698 EXPORT_SYMBOL_GPL(free_irq);
699 
700 static int __init zpci_irq_init(void)
701 {
702 	int cpu, rc;
703 
704 	bucket = kzalloc(sizeof(*bucket), GFP_KERNEL);
705 	if (!bucket)
706 		return -ENOMEM;
707 
708 	bucket->aisb = (unsigned long *) get_zeroed_page(GFP_KERNEL);
709 	if (!bucket->aisb) {
710 		rc = -ENOMEM;
711 		goto out_aisb;
712 	}
713 
714 	bucket->alloc = (unsigned long *) get_zeroed_page(GFP_KERNEL);
715 	if (!bucket->alloc) {
716 		rc = -ENOMEM;
717 		goto out_alloc;
718 	}
719 
720 	rc = register_adapter_interrupt(&zpci_airq);
721 	if (rc)
722 		goto out_ai;
723 	/* Set summary to 1 to be called every time for the ISC. */
724 	*zpci_airq.lsi_ptr = 1;
725 
726 	for_each_online_cpu(cpu)
727 		per_cpu(next_sbit, cpu) = 0;
728 
729 	spin_lock_init(&bucket->lock);
730 	set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
731 	return 0;
732 
733 out_ai:
734 	free_page((unsigned long) bucket->alloc);
735 out_alloc:
736 	free_page((unsigned long) bucket->aisb);
737 out_aisb:
738 	kfree(bucket);
739 	return rc;
740 }
741 
742 static void zpci_irq_exit(void)
743 {
744 	free_page((unsigned long) bucket->alloc);
745 	free_page((unsigned long) bucket->aisb);
746 	unregister_adapter_interrupt(&zpci_airq);
747 	kfree(bucket);
748 }
749 
750 static struct resource *zpci_alloc_bus_resource(unsigned long start, unsigned long size,
751 						unsigned long flags, int domain)
752 {
753 	struct resource *r;
754 	char *name;
755 	int rc;
756 
757 	r = kzalloc(sizeof(*r), GFP_KERNEL);
758 	if (!r)
759 		return ERR_PTR(-ENOMEM);
760 	r->start = start;
761 	r->end = r->start + size - 1;
762 	r->flags = flags;
763 	r->parent = &iomem_resource;
764 	name = kmalloc(18, GFP_KERNEL);
765 	if (!name) {
766 		kfree(r);
767 		return ERR_PTR(-ENOMEM);
768 	}
769 	sprintf(name, "PCI Bus: %04x:%02x", domain, ZPCI_BUS_NR);
770 	r->name = name;
771 
772 	rc = request_resource(&iomem_resource, r);
773 	if (rc)
774 		pr_debug("request resource %pR failed\n", r);
775 	return r;
776 }
777 
778 static int zpci_alloc_iomap(struct zpci_dev *zdev)
779 {
780 	int entry;
781 
782 	spin_lock(&zpci_iomap_lock);
783 	entry = find_first_zero_bit(zpci_iomap, ZPCI_IOMAP_MAX_ENTRIES);
784 	if (entry == ZPCI_IOMAP_MAX_ENTRIES) {
785 		spin_unlock(&zpci_iomap_lock);
786 		return -ENOSPC;
787 	}
788 	set_bit(entry, zpci_iomap);
789 	spin_unlock(&zpci_iomap_lock);
790 	return entry;
791 }
792 
793 static void zpci_free_iomap(struct zpci_dev *zdev, int entry)
794 {
795 	spin_lock(&zpci_iomap_lock);
796 	memset(&zpci_iomap_start[entry], 0, sizeof(struct zpci_iomap_entry));
797 	clear_bit(entry, zpci_iomap);
798 	spin_unlock(&zpci_iomap_lock);
799 }
800 
801 int pcibios_add_device(struct pci_dev *pdev)
802 {
803 	struct zpci_dev *zdev = get_zdev(pdev);
804 
805 	zdev->pdev = pdev;
806 	zpci_debug_init_device(zdev);
807 	zpci_fmb_enable_device(zdev);
808 	zpci_map_resources(zdev);
809 
810 	return 0;
811 }
812 
813 void pcibios_release_device(struct pci_dev *pdev)
814 {
815 	struct zpci_dev *zdev = get_zdev(pdev);
816 
817 	zpci_unmap_resources(zdev);
818 	zpci_fmb_disable_device(zdev);
819 	zpci_debug_exit_device(zdev);
820 	zdev->pdev = NULL;
821 }
822 
823 static int zpci_scan_bus(struct zpci_dev *zdev)
824 {
825 	struct resource *res;
826 	LIST_HEAD(resources);
827 	int i;
828 
829 	/* allocate mapping entry for each used bar */
830 	for (i = 0; i < PCI_BAR_COUNT; i++) {
831 		unsigned long addr, size, flags;
832 		int entry;
833 
834 		if (!zdev->bars[i].size)
835 			continue;
836 		entry = zpci_alloc_iomap(zdev);
837 		if (entry < 0)
838 			return entry;
839 		zdev->bars[i].map_idx = entry;
840 
841 		/* only MMIO is supported */
842 		flags = IORESOURCE_MEM;
843 		if (zdev->bars[i].val & 8)
844 			flags |= IORESOURCE_PREFETCH;
845 		if (zdev->bars[i].val & 4)
846 			flags |= IORESOURCE_MEM_64;
847 
848 		addr = ZPCI_IOMAP_ADDR_BASE + ((u64) entry << 48);
849 
850 		size = 1UL << zdev->bars[i].size;
851 
852 		res = zpci_alloc_bus_resource(addr, size, flags, zdev->domain);
853 		if (IS_ERR(res)) {
854 			zpci_free_iomap(zdev, entry);
855 			return PTR_ERR(res);
856 		}
857 		pci_add_resource(&resources, res);
858 	}
859 
860 	zdev->bus = pci_scan_root_bus(NULL, ZPCI_BUS_NR, &pci_root_ops,
861 				      zdev, &resources);
862 	if (!zdev->bus)
863 		return -EIO;
864 
865 	zdev->bus->max_bus_speed = zdev->max_bus_speed;
866 	return 0;
867 }
868 
869 static int zpci_alloc_domain(struct zpci_dev *zdev)
870 {
871 	spin_lock(&zpci_domain_lock);
872 	zdev->domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES);
873 	if (zdev->domain == ZPCI_NR_DEVICES) {
874 		spin_unlock(&zpci_domain_lock);
875 		return -ENOSPC;
876 	}
877 	set_bit(zdev->domain, zpci_domain);
878 	spin_unlock(&zpci_domain_lock);
879 	return 0;
880 }
881 
882 static void zpci_free_domain(struct zpci_dev *zdev)
883 {
884 	spin_lock(&zpci_domain_lock);
885 	clear_bit(zdev->domain, zpci_domain);
886 	spin_unlock(&zpci_domain_lock);
887 }
888 
889 int zpci_enable_device(struct zpci_dev *zdev)
890 {
891 	int rc;
892 
893 	rc = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES);
894 	if (rc)
895 		goto out;
896 	pr_info("Enabled fh: 0x%x fid: 0x%x\n", zdev->fh, zdev->fid);
897 
898 	rc = zpci_dma_init_device(zdev);
899 	if (rc)
900 		goto out_dma;
901 	return 0;
902 
903 out_dma:
904 	clp_disable_fh(zdev);
905 out:
906 	return rc;
907 }
908 EXPORT_SYMBOL_GPL(zpci_enable_device);
909 
910 int zpci_disable_device(struct zpci_dev *zdev)
911 {
912 	zpci_dma_exit_device(zdev);
913 	return clp_disable_fh(zdev);
914 }
915 EXPORT_SYMBOL_GPL(zpci_disable_device);
916 
917 int zpci_create_device(struct zpci_dev *zdev)
918 {
919 	int rc;
920 
921 	rc = zpci_alloc_domain(zdev);
922 	if (rc)
923 		goto out;
924 
925 	if (zdev->state == ZPCI_FN_STATE_CONFIGURED) {
926 		rc = zpci_enable_device(zdev);
927 		if (rc)
928 			goto out_free;
929 
930 		zdev->state = ZPCI_FN_STATE_ONLINE;
931 	}
932 	rc = zpci_scan_bus(zdev);
933 	if (rc)
934 		goto out_disable;
935 
936 	mutex_lock(&zpci_list_lock);
937 	list_add_tail(&zdev->entry, &zpci_list);
938 	if (hotplug_ops)
939 		hotplug_ops->create_slot(zdev);
940 	mutex_unlock(&zpci_list_lock);
941 
942 	return 0;
943 
944 out_disable:
945 	if (zdev->state == ZPCI_FN_STATE_ONLINE)
946 		zpci_disable_device(zdev);
947 out_free:
948 	zpci_free_domain(zdev);
949 out:
950 	return rc;
951 }
952 
953 void zpci_stop_device(struct zpci_dev *zdev)
954 {
955 	zpci_dma_exit_device(zdev);
956 	/*
957 	 * Note: SCLP disables fh via set-pci-fn so don't
958 	 * do that here.
959 	 */
960 }
961 EXPORT_SYMBOL_GPL(zpci_stop_device);
962 
963 static inline int barsize(u8 size)
964 {
965 	return (size) ? (1 << size) >> 10 : 0;
966 }
967 
968 static int zpci_mem_init(void)
969 {
970 	zdev_irq_cache = kmem_cache_create("PCI_IRQ_cache", sizeof(struct zdev_irq_map),
971 				L1_CACHE_BYTES, SLAB_HWCACHE_ALIGN, NULL);
972 	if (!zdev_irq_cache)
973 		goto error_zdev;
974 
975 	zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb),
976 				16, 0, NULL);
977 	if (!zdev_fmb_cache)
978 		goto error_fmb;
979 
980 	/* TODO: use realloc */
981 	zpci_iomap_start = kzalloc(ZPCI_IOMAP_MAX_ENTRIES * sizeof(*zpci_iomap_start),
982 				   GFP_KERNEL);
983 	if (!zpci_iomap_start)
984 		goto error_iomap;
985 	return 0;
986 
987 error_iomap:
988 	kmem_cache_destroy(zdev_fmb_cache);
989 error_fmb:
990 	kmem_cache_destroy(zdev_irq_cache);
991 error_zdev:
992 	return -ENOMEM;
993 }
994 
995 static void zpci_mem_exit(void)
996 {
997 	kfree(zpci_iomap_start);
998 	kmem_cache_destroy(zdev_irq_cache);
999 	kmem_cache_destroy(zdev_fmb_cache);
1000 }
1001 
1002 void zpci_register_hp_ops(struct pci_hp_callback_ops *ops)
1003 {
1004 	mutex_lock(&zpci_list_lock);
1005 	hotplug_ops = ops;
1006 	mutex_unlock(&zpci_list_lock);
1007 }
1008 EXPORT_SYMBOL_GPL(zpci_register_hp_ops);
1009 
1010 void zpci_deregister_hp_ops(void)
1011 {
1012 	mutex_lock(&zpci_list_lock);
1013 	hotplug_ops = NULL;
1014 	mutex_unlock(&zpci_list_lock);
1015 }
1016 EXPORT_SYMBOL_GPL(zpci_deregister_hp_ops);
1017 
1018 unsigned int s390_pci_probe;
1019 EXPORT_SYMBOL_GPL(s390_pci_probe);
1020 
1021 char * __init pcibios_setup(char *str)
1022 {
1023 	if (!strcmp(str, "on")) {
1024 		s390_pci_probe = 1;
1025 		return NULL;
1026 	}
1027 	return str;
1028 }
1029 
1030 static int __init pci_base_init(void)
1031 {
1032 	int rc;
1033 
1034 	if (!s390_pci_probe)
1035 		return 0;
1036 
1037 	if (!test_facility(2) || !test_facility(69)
1038 	    || !test_facility(71) || !test_facility(72))
1039 		return 0;
1040 
1041 	pr_info("Probing PCI hardware: PCI:%d  SID:%d  AEN:%d\n",
1042 		test_facility(69), test_facility(70),
1043 		test_facility(71));
1044 
1045 	rc = zpci_debug_init();
1046 	if (rc)
1047 		return rc;
1048 
1049 	rc = zpci_mem_init();
1050 	if (rc)
1051 		goto out_mem;
1052 
1053 	rc = zpci_msihash_init();
1054 	if (rc)
1055 		goto out_hash;
1056 
1057 	rc = zpci_irq_init();
1058 	if (rc)
1059 		goto out_irq;
1060 
1061 	rc = zpci_dma_init();
1062 	if (rc)
1063 		goto out_dma;
1064 
1065 	rc = clp_find_pci_devices();
1066 	if (rc)
1067 		goto out_find;
1068 
1069 	return 0;
1070 
1071 out_find:
1072 	zpci_dma_exit();
1073 out_dma:
1074 	zpci_irq_exit();
1075 out_irq:
1076 	zpci_msihash_exit();
1077 out_hash:
1078 	zpci_mem_exit();
1079 out_mem:
1080 	zpci_debug_exit();
1081 	return rc;
1082 }
1083 subsys_initcall(pci_base_init);
1084