xref: /openbmc/linux/arch/powerpc/sysdev/fsl_msi.c (revision 8730046c)
1 /*
2  * Copyright (C) 2007-2011 Freescale Semiconductor, Inc.
3  *
4  * Author: Tony Li <tony.li@freescale.com>
5  *	   Jason Jin <Jason.jin@freescale.com>
6  *
7  * The hwirq alloc and free code reuse from sysdev/mpic_msi.c
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * as published by the Free Software Foundation; version 2 of the
12  * License.
13  *
14  */
15 #include <linux/irq.h>
16 #include <linux/msi.h>
17 #include <linux/pci.h>
18 #include <linux/slab.h>
19 #include <linux/of_platform.h>
20 #include <linux/interrupt.h>
21 #include <linux/seq_file.h>
22 #include <sysdev/fsl_soc.h>
23 #include <asm/prom.h>
24 #include <asm/hw_irq.h>
25 #include <asm/ppc-pci.h>
26 #include <asm/mpic.h>
27 #include <asm/fsl_hcalls.h>
28 
29 #include "fsl_msi.h"
30 #include "fsl_pci.h"
31 
32 #define MSIIR_OFFSET_MASK	0xfffff
33 #define MSIIR_IBS_SHIFT		0
34 #define MSIIR_SRS_SHIFT		5
35 #define MSIIR1_IBS_SHIFT	4
36 #define MSIIR1_SRS_SHIFT	0
37 #define MSI_SRS_MASK		0xf
38 #define MSI_IBS_MASK		0x1f
39 
40 #define msi_hwirq(msi, msir_index, intr_index) \
41 		((msir_index) << (msi)->srs_shift | \
42 		 ((intr_index) << (msi)->ibs_shift))
43 
44 static LIST_HEAD(msi_head);
45 
46 struct fsl_msi_feature {
47 	u32 fsl_pic_ip;
48 	u32 msiir_offset; /* Offset of MSIIR, relative to start of MSIR bank */
49 };
50 
51 struct fsl_msi_cascade_data {
52 	struct fsl_msi *msi_data;
53 	int index;
54 	int virq;
55 };
56 
57 static inline u32 fsl_msi_read(u32 __iomem *base, unsigned int reg)
58 {
59 	return in_be32(base + (reg >> 2));
60 }
61 
62 /*
63  * We do not need this actually. The MSIR register has been read once
64  * in the cascade interrupt. So, this MSI interrupt has been acked
65 */
66 static void fsl_msi_end_irq(struct irq_data *d)
67 {
68 }
69 
70 static void fsl_msi_print_chip(struct irq_data *irqd, struct seq_file *p)
71 {
72 	struct fsl_msi *msi_data = irqd->domain->host_data;
73 	irq_hw_number_t hwirq = irqd_to_hwirq(irqd);
74 	int cascade_virq, srs;
75 
76 	srs = (hwirq >> msi_data->srs_shift) & MSI_SRS_MASK;
77 	cascade_virq = msi_data->cascade_array[srs]->virq;
78 
79 	seq_printf(p, " fsl-msi-%d", cascade_virq);
80 }
81 
82 
83 static struct irq_chip fsl_msi_chip = {
84 	.irq_mask	= pci_msi_mask_irq,
85 	.irq_unmask	= pci_msi_unmask_irq,
86 	.irq_ack	= fsl_msi_end_irq,
87 	.irq_print_chip = fsl_msi_print_chip,
88 };
89 
90 static int fsl_msi_host_map(struct irq_domain *h, unsigned int virq,
91 				irq_hw_number_t hw)
92 {
93 	struct fsl_msi *msi_data = h->host_data;
94 	struct irq_chip *chip = &fsl_msi_chip;
95 
96 	irq_set_status_flags(virq, IRQ_TYPE_EDGE_FALLING);
97 
98 	irq_set_chip_data(virq, msi_data);
99 	irq_set_chip_and_handler(virq, chip, handle_edge_irq);
100 
101 	return 0;
102 }
103 
104 static const struct irq_domain_ops fsl_msi_host_ops = {
105 	.map = fsl_msi_host_map,
106 };
107 
108 static int fsl_msi_init_allocator(struct fsl_msi *msi_data)
109 {
110 	int rc, hwirq;
111 
112 	rc = msi_bitmap_alloc(&msi_data->bitmap, NR_MSI_IRQS_MAX,
113 			      irq_domain_get_of_node(msi_data->irqhost));
114 	if (rc)
115 		return rc;
116 
117 	/*
118 	 * Reserve all the hwirqs
119 	 * The available hwirqs will be released in fsl_msi_setup_hwirq()
120 	 */
121 	for (hwirq = 0; hwirq < NR_MSI_IRQS_MAX; hwirq++)
122 		msi_bitmap_reserve_hwirq(&msi_data->bitmap, hwirq);
123 
124 	return 0;
125 }
126 
127 static void fsl_teardown_msi_irqs(struct pci_dev *pdev)
128 {
129 	struct msi_desc *entry;
130 	struct fsl_msi *msi_data;
131 	irq_hw_number_t hwirq;
132 
133 	for_each_pci_msi_entry(entry, pdev) {
134 		if (!entry->irq)
135 			continue;
136 		hwirq = virq_to_hw(entry->irq);
137 		msi_data = irq_get_chip_data(entry->irq);
138 		irq_set_msi_desc(entry->irq, NULL);
139 		irq_dispose_mapping(entry->irq);
140 		msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
141 	}
142 
143 	return;
144 }
145 
146 static void fsl_compose_msi_msg(struct pci_dev *pdev, int hwirq,
147 				struct msi_msg *msg,
148 				struct fsl_msi *fsl_msi_data)
149 {
150 	struct fsl_msi *msi_data = fsl_msi_data;
151 	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
152 	u64 address; /* Physical address of the MSIIR */
153 	int len;
154 	const __be64 *reg;
155 
156 	/* If the msi-address-64 property exists, then use it */
157 	reg = of_get_property(hose->dn, "msi-address-64", &len);
158 	if (reg && (len == sizeof(u64)))
159 		address = be64_to_cpup(reg);
160 	else
161 		address = fsl_pci_immrbar_base(hose) + msi_data->msiir_offset;
162 
163 	msg->address_lo = lower_32_bits(address);
164 	msg->address_hi = upper_32_bits(address);
165 
166 	/*
167 	 * MPIC version 2.0 has erratum PIC1. It causes
168 	 * that neither MSI nor MSI-X can work fine.
169 	 * This is a workaround to allow MSI-X to function
170 	 * properly. It only works for MSI-X, we prevent
171 	 * MSI on buggy chips in fsl_setup_msi_irqs().
172 	 */
173 	if (msi_data->feature & MSI_HW_ERRATA_ENDIAN)
174 		msg->data = __swab32(hwirq);
175 	else
176 		msg->data = hwirq;
177 
178 	pr_debug("%s: allocated srs: %d, ibs: %d\n", __func__,
179 		 (hwirq >> msi_data->srs_shift) & MSI_SRS_MASK,
180 		 (hwirq >> msi_data->ibs_shift) & MSI_IBS_MASK);
181 }
182 
183 static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
184 {
185 	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
186 	struct device_node *np;
187 	phandle phandle = 0;
188 	int rc, hwirq = -ENOMEM;
189 	unsigned int virq;
190 	struct msi_desc *entry;
191 	struct msi_msg msg;
192 	struct fsl_msi *msi_data;
193 
194 	if (type == PCI_CAP_ID_MSI) {
195 		/*
196 		 * MPIC version 2.0 has erratum PIC1. For now MSI
197 		 * could not work. So check to prevent MSI from
198 		 * being used on the board with this erratum.
199 		 */
200 		list_for_each_entry(msi_data, &msi_head, list)
201 			if (msi_data->feature & MSI_HW_ERRATA_ENDIAN)
202 				return -EINVAL;
203 	}
204 
205 	/*
206 	 * If the PCI node has an fsl,msi property, then we need to use it
207 	 * to find the specific MSI.
208 	 */
209 	np = of_parse_phandle(hose->dn, "fsl,msi", 0);
210 	if (np) {
211 		if (of_device_is_compatible(np, "fsl,mpic-msi") ||
212 		    of_device_is_compatible(np, "fsl,vmpic-msi") ||
213 		    of_device_is_compatible(np, "fsl,vmpic-msi-v4.3"))
214 			phandle = np->phandle;
215 		else {
216 			dev_err(&pdev->dev,
217 				"node %s has an invalid fsl,msi phandle %u\n",
218 				hose->dn->full_name, np->phandle);
219 			return -EINVAL;
220 		}
221 	}
222 
223 	for_each_pci_msi_entry(entry, pdev) {
224 		/*
225 		 * Loop over all the MSI devices until we find one that has an
226 		 * available interrupt.
227 		 */
228 		list_for_each_entry(msi_data, &msi_head, list) {
229 			/*
230 			 * If the PCI node has an fsl,msi property, then we
231 			 * restrict our search to the corresponding MSI node.
232 			 * The simplest way is to skip over MSI nodes with the
233 			 * wrong phandle. Under the Freescale hypervisor, this
234 			 * has the additional benefit of skipping over MSI
235 			 * nodes that are not mapped in the PAMU.
236 			 */
237 			if (phandle && (phandle != msi_data->phandle))
238 				continue;
239 
240 			hwirq = msi_bitmap_alloc_hwirqs(&msi_data->bitmap, 1);
241 			if (hwirq >= 0)
242 				break;
243 		}
244 
245 		if (hwirq < 0) {
246 			rc = hwirq;
247 			dev_err(&pdev->dev, "could not allocate MSI interrupt\n");
248 			goto out_free;
249 		}
250 
251 		virq = irq_create_mapping(msi_data->irqhost, hwirq);
252 
253 		if (!virq) {
254 			dev_err(&pdev->dev, "fail mapping hwirq %i\n", hwirq);
255 			msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
256 			rc = -ENOSPC;
257 			goto out_free;
258 		}
259 		/* chip_data is msi_data via host->hostdata in host->map() */
260 		irq_set_msi_desc(virq, entry);
261 
262 		fsl_compose_msi_msg(pdev, hwirq, &msg, msi_data);
263 		pci_write_msi_msg(virq, &msg);
264 	}
265 	return 0;
266 
267 out_free:
268 	/* free by the caller of this function */
269 	return rc;
270 }
271 
272 static irqreturn_t fsl_msi_cascade(int irq, void *data)
273 {
274 	unsigned int cascade_irq;
275 	struct fsl_msi *msi_data;
276 	int msir_index = -1;
277 	u32 msir_value = 0;
278 	u32 intr_index;
279 	u32 have_shift = 0;
280 	struct fsl_msi_cascade_data *cascade_data = data;
281 	irqreturn_t ret = IRQ_NONE;
282 
283 	msi_data = cascade_data->msi_data;
284 
285 	msir_index = cascade_data->index;
286 
287 	if (msir_index >= NR_MSI_REG_MAX)
288 		cascade_irq = 0;
289 
290 	switch (msi_data->feature & FSL_PIC_IP_MASK) {
291 	case FSL_PIC_IP_MPIC:
292 		msir_value = fsl_msi_read(msi_data->msi_regs,
293 			msir_index * 0x10);
294 		break;
295 	case FSL_PIC_IP_IPIC:
296 		msir_value = fsl_msi_read(msi_data->msi_regs, msir_index * 0x4);
297 		break;
298 #ifdef CONFIG_EPAPR_PARAVIRT
299 	case FSL_PIC_IP_VMPIC: {
300 		unsigned int ret;
301 		ret = fh_vmpic_get_msir(virq_to_hw(irq), &msir_value);
302 		if (ret) {
303 			pr_err("fsl-msi: fh_vmpic_get_msir() failed for "
304 			       "irq %u (ret=%u)\n", irq, ret);
305 			msir_value = 0;
306 		}
307 		break;
308 	}
309 #endif
310 	}
311 
312 	while (msir_value) {
313 		intr_index = ffs(msir_value) - 1;
314 
315 		cascade_irq = irq_linear_revmap(msi_data->irqhost,
316 				msi_hwirq(msi_data, msir_index,
317 					  intr_index + have_shift));
318 		if (cascade_irq) {
319 			generic_handle_irq(cascade_irq);
320 			ret = IRQ_HANDLED;
321 		}
322 		have_shift += intr_index + 1;
323 		msir_value = msir_value >> (intr_index + 1);
324 	}
325 
326 	return ret;
327 }
328 
329 static int fsl_of_msi_remove(struct platform_device *ofdev)
330 {
331 	struct fsl_msi *msi = platform_get_drvdata(ofdev);
332 	int virq, i;
333 
334 	if (msi->list.prev != NULL)
335 		list_del(&msi->list);
336 	for (i = 0; i < NR_MSI_REG_MAX; i++) {
337 		if (msi->cascade_array[i]) {
338 			virq = msi->cascade_array[i]->virq;
339 
340 			BUG_ON(!virq);
341 
342 			free_irq(virq, msi->cascade_array[i]);
343 			kfree(msi->cascade_array[i]);
344 			irq_dispose_mapping(virq);
345 		}
346 	}
347 	if (msi->bitmap.bitmap)
348 		msi_bitmap_free(&msi->bitmap);
349 	if ((msi->feature & FSL_PIC_IP_MASK) != FSL_PIC_IP_VMPIC)
350 		iounmap(msi->msi_regs);
351 	kfree(msi);
352 
353 	return 0;
354 }
355 
356 static struct lock_class_key fsl_msi_irq_class;
357 
358 static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev,
359 			       int offset, int irq_index)
360 {
361 	struct fsl_msi_cascade_data *cascade_data = NULL;
362 	int virt_msir, i, ret;
363 
364 	virt_msir = irq_of_parse_and_map(dev->dev.of_node, irq_index);
365 	if (!virt_msir) {
366 		dev_err(&dev->dev, "%s: Cannot translate IRQ index %d\n",
367 			__func__, irq_index);
368 		return 0;
369 	}
370 
371 	cascade_data = kzalloc(sizeof(struct fsl_msi_cascade_data), GFP_KERNEL);
372 	if (!cascade_data) {
373 		dev_err(&dev->dev, "No memory for MSI cascade data\n");
374 		return -ENOMEM;
375 	}
376 	irq_set_lockdep_class(virt_msir, &fsl_msi_irq_class);
377 	cascade_data->index = offset;
378 	cascade_data->msi_data = msi;
379 	cascade_data->virq = virt_msir;
380 	msi->cascade_array[irq_index] = cascade_data;
381 
382 	ret = request_irq(virt_msir, fsl_msi_cascade, IRQF_NO_THREAD,
383 			  "fsl-msi-cascade", cascade_data);
384 	if (ret) {
385 		dev_err(&dev->dev, "failed to request_irq(%d), ret = %d\n",
386 			virt_msir, ret);
387 		return ret;
388 	}
389 
390 	/* Release the hwirqs corresponding to this MSI register */
391 	for (i = 0; i < IRQS_PER_MSI_REG; i++)
392 		msi_bitmap_free_hwirqs(&msi->bitmap,
393 				       msi_hwirq(msi, offset, i), 1);
394 
395 	return 0;
396 }
397 
398 static const struct of_device_id fsl_of_msi_ids[];
399 static int fsl_of_msi_probe(struct platform_device *dev)
400 {
401 	const struct of_device_id *match;
402 	struct fsl_msi *msi;
403 	struct resource res, msiir;
404 	int err, i, j, irq_index, count;
405 	const u32 *p;
406 	const struct fsl_msi_feature *features;
407 	int len;
408 	u32 offset;
409 	struct pci_controller *phb;
410 
411 	match = of_match_device(fsl_of_msi_ids, &dev->dev);
412 	if (!match)
413 		return -EINVAL;
414 	features = match->data;
415 
416 	printk(KERN_DEBUG "Setting up Freescale MSI support\n");
417 
418 	msi = kzalloc(sizeof(struct fsl_msi), GFP_KERNEL);
419 	if (!msi) {
420 		dev_err(&dev->dev, "No memory for MSI structure\n");
421 		return -ENOMEM;
422 	}
423 	platform_set_drvdata(dev, msi);
424 
425 	msi->irqhost = irq_domain_add_linear(dev->dev.of_node,
426 				      NR_MSI_IRQS_MAX, &fsl_msi_host_ops, msi);
427 
428 	if (msi->irqhost == NULL) {
429 		dev_err(&dev->dev, "No memory for MSI irqhost\n");
430 		err = -ENOMEM;
431 		goto error_out;
432 	}
433 
434 	/*
435 	 * Under the Freescale hypervisor, the msi nodes don't have a 'reg'
436 	 * property.  Instead, we use hypercalls to access the MSI.
437 	 */
438 	if ((features->fsl_pic_ip & FSL_PIC_IP_MASK) != FSL_PIC_IP_VMPIC) {
439 		err = of_address_to_resource(dev->dev.of_node, 0, &res);
440 		if (err) {
441 			dev_err(&dev->dev, "invalid resource for node %s\n",
442 				dev->dev.of_node->full_name);
443 			goto error_out;
444 		}
445 
446 		msi->msi_regs = ioremap(res.start, resource_size(&res));
447 		if (!msi->msi_regs) {
448 			err = -ENOMEM;
449 			dev_err(&dev->dev, "could not map node %s\n",
450 				dev->dev.of_node->full_name);
451 			goto error_out;
452 		}
453 		msi->msiir_offset =
454 			features->msiir_offset + (res.start & 0xfffff);
455 
456 		/*
457 		 * First read the MSIIR/MSIIR1 offset from dts
458 		 * On failure use the hardcode MSIIR offset
459 		 */
460 		if (of_address_to_resource(dev->dev.of_node, 1, &msiir))
461 			msi->msiir_offset = features->msiir_offset +
462 					    (res.start & MSIIR_OFFSET_MASK);
463 		else
464 			msi->msiir_offset = msiir.start & MSIIR_OFFSET_MASK;
465 	}
466 
467 	msi->feature = features->fsl_pic_ip;
468 
469 	/* For erratum PIC1 on MPIC version 2.0*/
470 	if ((features->fsl_pic_ip & FSL_PIC_IP_MASK) == FSL_PIC_IP_MPIC
471 			&& (fsl_mpic_primary_get_version() == 0x0200))
472 		msi->feature |= MSI_HW_ERRATA_ENDIAN;
473 
474 	/*
475 	 * Remember the phandle, so that we can match with any PCI nodes
476 	 * that have an "fsl,msi" property.
477 	 */
478 	msi->phandle = dev->dev.of_node->phandle;
479 
480 	err = fsl_msi_init_allocator(msi);
481 	if (err) {
482 		dev_err(&dev->dev, "Error allocating MSI bitmap\n");
483 		goto error_out;
484 	}
485 
486 	p = of_get_property(dev->dev.of_node, "msi-available-ranges", &len);
487 
488 	if (of_device_is_compatible(dev->dev.of_node, "fsl,mpic-msi-v4.3") ||
489 	    of_device_is_compatible(dev->dev.of_node, "fsl,vmpic-msi-v4.3")) {
490 		msi->srs_shift = MSIIR1_SRS_SHIFT;
491 		msi->ibs_shift = MSIIR1_IBS_SHIFT;
492 		if (p)
493 			dev_warn(&dev->dev, "%s: dose not support msi-available-ranges property\n",
494 				__func__);
495 
496 		for (irq_index = 0; irq_index < NR_MSI_REG_MSIIR1;
497 		     irq_index++) {
498 			err = fsl_msi_setup_hwirq(msi, dev,
499 						  irq_index, irq_index);
500 			if (err)
501 				goto error_out;
502 		}
503 	} else {
504 		static const u32 all_avail[] =
505 			{ 0, NR_MSI_REG_MSIIR * IRQS_PER_MSI_REG };
506 
507 		msi->srs_shift = MSIIR_SRS_SHIFT;
508 		msi->ibs_shift = MSIIR_IBS_SHIFT;
509 
510 		if (p && len % (2 * sizeof(u32)) != 0) {
511 			dev_err(&dev->dev, "%s: Malformed msi-available-ranges property\n",
512 				__func__);
513 			err = -EINVAL;
514 			goto error_out;
515 		}
516 
517 		if (!p) {
518 			p = all_avail;
519 			len = sizeof(all_avail);
520 		}
521 
522 		for (irq_index = 0, i = 0; i < len / (2 * sizeof(u32)); i++) {
523 			if (p[i * 2] % IRQS_PER_MSI_REG ||
524 			    p[i * 2 + 1] % IRQS_PER_MSI_REG) {
525 				pr_warn("%s: %s: msi available range of %u at %u is not IRQ-aligned\n",
526 				       __func__, dev->dev.of_node->full_name,
527 				       p[i * 2 + 1], p[i * 2]);
528 				err = -EINVAL;
529 				goto error_out;
530 			}
531 
532 			offset = p[i * 2] / IRQS_PER_MSI_REG;
533 			count = p[i * 2 + 1] / IRQS_PER_MSI_REG;
534 
535 			for (j = 0; j < count; j++, irq_index++) {
536 				err = fsl_msi_setup_hwirq(msi, dev, offset + j,
537 							  irq_index);
538 				if (err)
539 					goto error_out;
540 			}
541 		}
542 	}
543 
544 	list_add_tail(&msi->list, &msi_head);
545 
546 	/*
547 	 * Apply the MSI ops to all the controllers.
548 	 * It doesn't hurt to reassign the same ops,
549 	 * but bail out if we find another MSI driver.
550 	 */
551 	list_for_each_entry(phb, &hose_list, list_node) {
552 		if (!phb->controller_ops.setup_msi_irqs) {
553 			phb->controller_ops.setup_msi_irqs = fsl_setup_msi_irqs;
554 			phb->controller_ops.teardown_msi_irqs = fsl_teardown_msi_irqs;
555 		} else if (phb->controller_ops.setup_msi_irqs != fsl_setup_msi_irqs) {
556 			dev_err(&dev->dev, "Different MSI driver already installed!\n");
557 			err = -ENODEV;
558 			goto error_out;
559 		}
560 	}
561 	return 0;
562 error_out:
563 	fsl_of_msi_remove(dev);
564 	return err;
565 }
566 
567 static const struct fsl_msi_feature mpic_msi_feature = {
568 	.fsl_pic_ip = FSL_PIC_IP_MPIC,
569 	.msiir_offset = 0x140,
570 };
571 
572 static const struct fsl_msi_feature ipic_msi_feature = {
573 	.fsl_pic_ip = FSL_PIC_IP_IPIC,
574 	.msiir_offset = 0x38,
575 };
576 
577 static const struct fsl_msi_feature vmpic_msi_feature = {
578 	.fsl_pic_ip = FSL_PIC_IP_VMPIC,
579 	.msiir_offset = 0,
580 };
581 
582 static const struct of_device_id fsl_of_msi_ids[] = {
583 	{
584 		.compatible = "fsl,mpic-msi",
585 		.data = &mpic_msi_feature,
586 	},
587 	{
588 		.compatible = "fsl,mpic-msi-v4.3",
589 		.data = &mpic_msi_feature,
590 	},
591 	{
592 		.compatible = "fsl,ipic-msi",
593 		.data = &ipic_msi_feature,
594 	},
595 #ifdef CONFIG_EPAPR_PARAVIRT
596 	{
597 		.compatible = "fsl,vmpic-msi",
598 		.data = &vmpic_msi_feature,
599 	},
600 	{
601 		.compatible = "fsl,vmpic-msi-v4.3",
602 		.data = &vmpic_msi_feature,
603 	},
604 #endif
605 	{}
606 };
607 
608 static struct platform_driver fsl_of_msi_driver = {
609 	.driver = {
610 		.name = "fsl-msi",
611 		.of_match_table = fsl_of_msi_ids,
612 	},
613 	.probe = fsl_of_msi_probe,
614 	.remove = fsl_of_msi_remove,
615 };
616 
617 static __init int fsl_of_msi_init(void)
618 {
619 	return platform_driver_register(&fsl_of_msi_driver);
620 }
621 
622 subsys_initcall(fsl_of_msi_init);
623