1 /*
2  * Copyright 2007, Michael Ellerman, IBM Corporation.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version
7  * 2 of the License, or (at your option) any later version.
8  */
9 
10 
11 #include <linux/interrupt.h>
12 #include <linux/irq.h>
13 #include <linux/kernel.h>
14 #include <linux/pci.h>
15 #include <linux/msi.h>
16 #include <linux/export.h>
17 #include <linux/of_platform.h>
18 #include <linux/debugfs.h>
19 #include <linux/slab.h>
20 
21 #include <asm/dcr.h>
22 #include <asm/machdep.h>
23 #include <asm/prom.h>
24 
25 
26 /*
27  * MSIC registers, specified as offsets from dcr_base
28  */
29 #define MSIC_CTRL_REG	0x0
30 
31 /* Base Address registers specify FIFO location in BE memory */
32 #define MSIC_BASE_ADDR_HI_REG	0x3
33 #define MSIC_BASE_ADDR_LO_REG	0x4
34 
35 /* Hold the read/write offsets into the FIFO */
36 #define MSIC_READ_OFFSET_REG	0x5
37 #define MSIC_WRITE_OFFSET_REG	0x6
38 
39 
40 /* MSIC control register flags */
41 #define MSIC_CTRL_ENABLE		0x0001
42 #define MSIC_CTRL_FIFO_FULL_ENABLE	0x0002
43 #define MSIC_CTRL_IRQ_ENABLE		0x0008
44 #define MSIC_CTRL_FULL_STOP_ENABLE	0x0010
45 
46 /*
47  * The MSIC can be configured to use a FIFO of 32KB, 64KB, 128KB or 256KB.
48  * Currently we're using a 64KB FIFO size.
49  */
50 #define MSIC_FIFO_SIZE_SHIFT	16
51 #define MSIC_FIFO_SIZE_BYTES	(1 << MSIC_FIFO_SIZE_SHIFT)
52 
53 /*
54  * To configure the FIFO size as (1 << n) bytes, we write (n - 15) into bits
55  * 8-9 of the MSIC control reg.
56  */
57 #define MSIC_CTRL_FIFO_SIZE	(((MSIC_FIFO_SIZE_SHIFT - 15) << 8) & 0x300)
58 
59 /*
60  * We need to mask the read/write offsets to make sure they stay within
61  * the bounds of the FIFO. Also they should always be 16-byte aligned.
62  */
63 #define MSIC_FIFO_SIZE_MASK	((MSIC_FIFO_SIZE_BYTES - 1) & ~0xFu)
64 
65 /* Each entry in the FIFO is 16 bytes, the first 4 bytes hold the irq # */
66 #define MSIC_FIFO_ENTRY_SIZE	0x10
67 
68 
69 struct axon_msic {
70 	struct irq_domain *irq_domain;
71 	__le32 *fifo_virt;
72 	dma_addr_t fifo_phys;
73 	dcr_host_t dcr_host;
74 	u32 read_offset;
75 #ifdef DEBUG
76 	u32 __iomem *trigger;
77 #endif
78 };
79 
80 #ifdef DEBUG
81 void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic);
82 #else
83 static inline void axon_msi_debug_setup(struct device_node *dn,
84 					struct axon_msic *msic) { }
85 #endif
86 
87 
88 static void msic_dcr_write(struct axon_msic *msic, unsigned int dcr_n, u32 val)
89 {
90 	pr_devel("axon_msi: dcr_write(0x%x, 0x%x)\n", val, dcr_n);
91 
92 	dcr_write(msic->dcr_host, dcr_n, val);
93 }
94 
95 static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc)
96 {
97 	struct irq_chip *chip = irq_desc_get_chip(desc);
98 	struct axon_msic *msic = irq_get_handler_data(irq);
99 	u32 write_offset, msi;
100 	int idx;
101 	int retry = 0;
102 
103 	write_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG);
104 	pr_devel("axon_msi: original write_offset 0x%x\n", write_offset);
105 
106 	/* write_offset doesn't wrap properly, so we have to mask it */
107 	write_offset &= MSIC_FIFO_SIZE_MASK;
108 
109 	while (msic->read_offset != write_offset && retry < 100) {
110 		idx  = msic->read_offset / sizeof(__le32);
111 		msi  = le32_to_cpu(msic->fifo_virt[idx]);
112 		msi &= 0xFFFF;
113 
114 		pr_devel("axon_msi: woff %x roff %x msi %x\n",
115 			  write_offset, msic->read_offset, msi);
116 
117 		if (msi < nr_irqs && irq_get_chip_data(msi) == msic) {
118 			generic_handle_irq(msi);
119 			msic->fifo_virt[idx] = cpu_to_le32(0xffffffff);
120 		} else {
121 			/*
122 			 * Reading the MSIC_WRITE_OFFSET_REG does not
123 			 * reliably flush the outstanding DMA to the
124 			 * FIFO buffer. Here we were reading stale
125 			 * data, so we need to retry.
126 			 */
127 			udelay(1);
128 			retry++;
129 			pr_devel("axon_msi: invalid irq 0x%x!\n", msi);
130 			continue;
131 		}
132 
133 		if (retry) {
134 			pr_devel("axon_msi: late irq 0x%x, retry %d\n",
135 				 msi, retry);
136 			retry = 0;
137 		}
138 
139 		msic->read_offset += MSIC_FIFO_ENTRY_SIZE;
140 		msic->read_offset &= MSIC_FIFO_SIZE_MASK;
141 	}
142 
143 	if (retry) {
144 		printk(KERN_WARNING "axon_msi: irq timed out\n");
145 
146 		msic->read_offset += MSIC_FIFO_ENTRY_SIZE;
147 		msic->read_offset &= MSIC_FIFO_SIZE_MASK;
148 	}
149 
150 	chip->irq_eoi(&desc->irq_data);
151 }
152 
153 static struct axon_msic *find_msi_translator(struct pci_dev *dev)
154 {
155 	struct irq_domain *irq_domain;
156 	struct device_node *dn, *tmp;
157 	const phandle *ph;
158 	struct axon_msic *msic = NULL;
159 
160 	dn = of_node_get(pci_device_to_OF_node(dev));
161 	if (!dn) {
162 		dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n");
163 		return NULL;
164 	}
165 
166 	for (; dn; dn = of_get_next_parent(dn)) {
167 		ph = of_get_property(dn, "msi-translator", NULL);
168 		if (ph)
169 			break;
170 	}
171 
172 	if (!ph) {
173 		dev_dbg(&dev->dev,
174 			"axon_msi: no msi-translator property found\n");
175 		goto out_error;
176 	}
177 
178 	tmp = dn;
179 	dn = of_find_node_by_phandle(*ph);
180 	of_node_put(tmp);
181 	if (!dn) {
182 		dev_dbg(&dev->dev,
183 			"axon_msi: msi-translator doesn't point to a node\n");
184 		goto out_error;
185 	}
186 
187 	irq_domain = irq_find_host(dn);
188 	if (!irq_domain) {
189 		dev_dbg(&dev->dev, "axon_msi: no irq_domain found for node %s\n",
190 			dn->full_name);
191 		goto out_error;
192 	}
193 
194 	msic = irq_domain->host_data;
195 
196 out_error:
197 	of_node_put(dn);
198 
199 	return msic;
200 }
201 
202 static int axon_msi_check_device(struct pci_dev *dev, int nvec, int type)
203 {
204 	if (!find_msi_translator(dev))
205 		return -ENODEV;
206 
207 	return 0;
208 }
209 
210 static int setup_msi_msg_address(struct pci_dev *dev, struct msi_msg *msg)
211 {
212 	struct device_node *dn;
213 	struct msi_desc *entry;
214 	int len;
215 	const u32 *prop;
216 
217 	dn = of_node_get(pci_device_to_OF_node(dev));
218 	if (!dn) {
219 		dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n");
220 		return -ENODEV;
221 	}
222 
223 	entry = list_first_entry(&dev->msi_list, struct msi_desc, list);
224 
225 	for (; dn; dn = of_get_next_parent(dn)) {
226 		if (entry->msi_attrib.is_64) {
227 			prop = of_get_property(dn, "msi-address-64", &len);
228 			if (prop)
229 				break;
230 		}
231 
232 		prop = of_get_property(dn, "msi-address-32", &len);
233 		if (prop)
234 			break;
235 	}
236 
237 	if (!prop) {
238 		dev_dbg(&dev->dev,
239 			"axon_msi: no msi-address-(32|64) properties found\n");
240 		return -ENOENT;
241 	}
242 
243 	switch (len) {
244 	case 8:
245 		msg->address_hi = prop[0];
246 		msg->address_lo = prop[1];
247 		break;
248 	case 4:
249 		msg->address_hi = 0;
250 		msg->address_lo = prop[0];
251 		break;
252 	default:
253 		dev_dbg(&dev->dev,
254 			"axon_msi: malformed msi-address-(32|64) property\n");
255 		of_node_put(dn);
256 		return -EINVAL;
257 	}
258 
259 	of_node_put(dn);
260 
261 	return 0;
262 }
263 
264 static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
265 {
266 	unsigned int virq, rc;
267 	struct msi_desc *entry;
268 	struct msi_msg msg;
269 	struct axon_msic *msic;
270 
271 	msic = find_msi_translator(dev);
272 	if (!msic)
273 		return -ENODEV;
274 
275 	rc = setup_msi_msg_address(dev, &msg);
276 	if (rc)
277 		return rc;
278 
279 	list_for_each_entry(entry, &dev->msi_list, list) {
280 		virq = irq_create_direct_mapping(msic->irq_domain);
281 		if (virq == NO_IRQ) {
282 			dev_warn(&dev->dev,
283 				 "axon_msi: virq allocation failed!\n");
284 			return -1;
285 		}
286 		dev_dbg(&dev->dev, "axon_msi: allocated virq 0x%x\n", virq);
287 
288 		irq_set_msi_desc(virq, entry);
289 		msg.data = virq;
290 		write_msi_msg(virq, &msg);
291 	}
292 
293 	return 0;
294 }
295 
296 static void axon_msi_teardown_msi_irqs(struct pci_dev *dev)
297 {
298 	struct msi_desc *entry;
299 
300 	dev_dbg(&dev->dev, "axon_msi: tearing down msi irqs\n");
301 
302 	list_for_each_entry(entry, &dev->msi_list, list) {
303 		if (entry->irq == NO_IRQ)
304 			continue;
305 
306 		irq_set_msi_desc(entry->irq, NULL);
307 		irq_dispose_mapping(entry->irq);
308 	}
309 }
310 
311 static struct irq_chip msic_irq_chip = {
312 	.irq_mask	= mask_msi_irq,
313 	.irq_unmask	= unmask_msi_irq,
314 	.irq_shutdown	= mask_msi_irq,
315 	.name		= "AXON-MSI",
316 };
317 
318 static int msic_host_map(struct irq_domain *h, unsigned int virq,
319 			 irq_hw_number_t hw)
320 {
321 	irq_set_chip_data(virq, h->host_data);
322 	irq_set_chip_and_handler(virq, &msic_irq_chip, handle_simple_irq);
323 
324 	return 0;
325 }
326 
327 static const struct irq_domain_ops msic_host_ops = {
328 	.map	= msic_host_map,
329 };
330 
331 static void axon_msi_shutdown(struct platform_device *device)
332 {
333 	struct axon_msic *msic = dev_get_drvdata(&device->dev);
334 	u32 tmp;
335 
336 	pr_devel("axon_msi: disabling %s\n",
337 		  msic->irq_domain->of_node->full_name);
338 	tmp  = dcr_read(msic->dcr_host, MSIC_CTRL_REG);
339 	tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE;
340 	msic_dcr_write(msic, MSIC_CTRL_REG, tmp);
341 }
342 
343 static int axon_msi_probe(struct platform_device *device)
344 {
345 	struct device_node *dn = device->dev.of_node;
346 	struct axon_msic *msic;
347 	unsigned int virq;
348 	int dcr_base, dcr_len;
349 
350 	pr_devel("axon_msi: setting up dn %s\n", dn->full_name);
351 
352 	msic = kzalloc(sizeof(struct axon_msic), GFP_KERNEL);
353 	if (!msic) {
354 		printk(KERN_ERR "axon_msi: couldn't allocate msic for %s\n",
355 		       dn->full_name);
356 		goto out;
357 	}
358 
359 	dcr_base = dcr_resource_start(dn, 0);
360 	dcr_len = dcr_resource_len(dn, 0);
361 
362 	if (dcr_base == 0 || dcr_len == 0) {
363 		printk(KERN_ERR
364 		       "axon_msi: couldn't parse dcr properties on %s\n",
365 			dn->full_name);
366 		goto out_free_msic;
367 	}
368 
369 	msic->dcr_host = dcr_map(dn, dcr_base, dcr_len);
370 	if (!DCR_MAP_OK(msic->dcr_host)) {
371 		printk(KERN_ERR "axon_msi: dcr_map failed for %s\n",
372 		       dn->full_name);
373 		goto out_free_msic;
374 	}
375 
376 	msic->fifo_virt = dma_alloc_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES,
377 					     &msic->fifo_phys, GFP_KERNEL);
378 	if (!msic->fifo_virt) {
379 		printk(KERN_ERR "axon_msi: couldn't allocate fifo for %s\n",
380 		       dn->full_name);
381 		goto out_free_msic;
382 	}
383 
384 	virq = irq_of_parse_and_map(dn, 0);
385 	if (virq == NO_IRQ) {
386 		printk(KERN_ERR "axon_msi: irq parse and map failed for %s\n",
387 		       dn->full_name);
388 		goto out_free_fifo;
389 	}
390 	memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES);
391 
392 	/* We rely on being able to stash a virq in a u16, so limit irqs to < 65536 */
393 	msic->irq_domain = irq_domain_add_nomap(dn, 65536, &msic_host_ops, msic);
394 	if (!msic->irq_domain) {
395 		printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %s\n",
396 		       dn->full_name);
397 		goto out_free_fifo;
398 	}
399 
400 	irq_set_handler_data(virq, msic);
401 	irq_set_chained_handler(virq, axon_msi_cascade);
402 	pr_devel("axon_msi: irq 0x%x setup for axon_msi\n", virq);
403 
404 	/* Enable the MSIC hardware */
405 	msic_dcr_write(msic, MSIC_BASE_ADDR_HI_REG, msic->fifo_phys >> 32);
406 	msic_dcr_write(msic, MSIC_BASE_ADDR_LO_REG,
407 				  msic->fifo_phys & 0xFFFFFFFF);
408 	msic_dcr_write(msic, MSIC_CTRL_REG,
409 			MSIC_CTRL_IRQ_ENABLE | MSIC_CTRL_ENABLE |
410 			MSIC_CTRL_FIFO_SIZE);
411 
412 	msic->read_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG)
413 				& MSIC_FIFO_SIZE_MASK;
414 
415 	dev_set_drvdata(&device->dev, msic);
416 
417 	ppc_md.setup_msi_irqs = axon_msi_setup_msi_irqs;
418 	ppc_md.teardown_msi_irqs = axon_msi_teardown_msi_irqs;
419 	ppc_md.msi_check_device = axon_msi_check_device;
420 
421 	axon_msi_debug_setup(dn, msic);
422 
423 	printk(KERN_DEBUG "axon_msi: setup MSIC on %s\n", dn->full_name);
424 
425 	return 0;
426 
427 out_free_fifo:
428 	dma_free_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES, msic->fifo_virt,
429 			  msic->fifo_phys);
430 out_free_msic:
431 	kfree(msic);
432 out:
433 
434 	return -1;
435 }
436 
437 static const struct of_device_id axon_msi_device_id[] = {
438 	{
439 		.compatible	= "ibm,axon-msic"
440 	},
441 	{}
442 };
443 
444 static struct platform_driver axon_msi_driver = {
445 	.probe		= axon_msi_probe,
446 	.shutdown	= axon_msi_shutdown,
447 	.driver = {
448 		.name = "axon-msi",
449 		.owner = THIS_MODULE,
450 		.of_match_table = axon_msi_device_id,
451 	},
452 };
453 
454 static int __init axon_msi_init(void)
455 {
456 	return platform_driver_register(&axon_msi_driver);
457 }
458 subsys_initcall(axon_msi_init);
459 
460 
461 #ifdef DEBUG
462 static int msic_set(void *data, u64 val)
463 {
464 	struct axon_msic *msic = data;
465 	out_le32(msic->trigger, val);
466 	return 0;
467 }
468 
469 static int msic_get(void *data, u64 *val)
470 {
471 	*val = 0;
472 	return 0;
473 }
474 
475 DEFINE_SIMPLE_ATTRIBUTE(fops_msic, msic_get, msic_set, "%llu\n");
476 
477 void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic)
478 {
479 	char name[8];
480 	u64 addr;
481 
482 	addr = of_translate_address(dn, of_get_property(dn, "reg", NULL));
483 	if (addr == OF_BAD_ADDR) {
484 		pr_devel("axon_msi: couldn't translate reg property\n");
485 		return;
486 	}
487 
488 	msic->trigger = ioremap(addr, 0x4);
489 	if (!msic->trigger) {
490 		pr_devel("axon_msi: ioremap failed\n");
491 		return;
492 	}
493 
494 	snprintf(name, sizeof(name), "msic_%d", of_node_to_nid(dn));
495 
496 	if (!debugfs_create_file(name, 0600, powerpc_debugfs_root,
497 				 msic, &fops_msic)) {
498 		pr_devel("axon_msi: debugfs_create_file failed!\n");
499 		return;
500 	}
501 }
502 #endif /* DEBUG */
503