1 /*
2  * Copyright 2007, Michael Ellerman, IBM Corporation.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version
7  * 2 of the License, or (at your option) any later version.
8  */
9 
10 
11 #include <linux/interrupt.h>
12 #include <linux/irq.h>
13 #include <linux/kernel.h>
14 #include <linux/pci.h>
15 #include <linux/msi.h>
16 #include <linux/export.h>
17 #include <linux/of_platform.h>
18 #include <linux/debugfs.h>
19 #include <linux/slab.h>
20 
21 #include <asm/dcr.h>
22 #include <asm/machdep.h>
23 #include <asm/prom.h>
24 
25 #include "cell.h"
26 
27 /*
28  * MSIC registers, specified as offsets from dcr_base
29  */
30 #define MSIC_CTRL_REG	0x0
31 
32 /* Base Address registers specify FIFO location in BE memory */
33 #define MSIC_BASE_ADDR_HI_REG	0x3
34 #define MSIC_BASE_ADDR_LO_REG	0x4
35 
36 /* Hold the read/write offsets into the FIFO */
37 #define MSIC_READ_OFFSET_REG	0x5
38 #define MSIC_WRITE_OFFSET_REG	0x6
39 
40 
41 /* MSIC control register flags */
42 #define MSIC_CTRL_ENABLE		0x0001
43 #define MSIC_CTRL_FIFO_FULL_ENABLE	0x0002
44 #define MSIC_CTRL_IRQ_ENABLE		0x0008
45 #define MSIC_CTRL_FULL_STOP_ENABLE	0x0010
46 
47 /*
48  * The MSIC can be configured to use a FIFO of 32KB, 64KB, 128KB or 256KB.
49  * Currently we're using a 64KB FIFO size.
50  */
51 #define MSIC_FIFO_SIZE_SHIFT	16
52 #define MSIC_FIFO_SIZE_BYTES	(1 << MSIC_FIFO_SIZE_SHIFT)
53 
54 /*
55  * To configure the FIFO size as (1 << n) bytes, we write (n - 15) into bits
56  * 8-9 of the MSIC control reg.
57  */
58 #define MSIC_CTRL_FIFO_SIZE	(((MSIC_FIFO_SIZE_SHIFT - 15) << 8) & 0x300)
59 
60 /*
61  * We need to mask the read/write offsets to make sure they stay within
62  * the bounds of the FIFO. Also they should always be 16-byte aligned.
63  */
64 #define MSIC_FIFO_SIZE_MASK	((MSIC_FIFO_SIZE_BYTES - 1) & ~0xFu)
65 
66 /* Each entry in the FIFO is 16 bytes, the first 4 bytes hold the irq # */
67 #define MSIC_FIFO_ENTRY_SIZE	0x10
68 
69 
70 struct axon_msic {
71 	struct irq_domain *irq_domain;
72 	__le32 *fifo_virt;
73 	dma_addr_t fifo_phys;
74 	dcr_host_t dcr_host;
75 	u32 read_offset;
76 #ifdef DEBUG
77 	u32 __iomem *trigger;
78 #endif
79 };
80 
81 #ifdef DEBUG
82 void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic);
83 #else
84 static inline void axon_msi_debug_setup(struct device_node *dn,
85 					struct axon_msic *msic) { }
86 #endif
87 
88 
89 static void msic_dcr_write(struct axon_msic *msic, unsigned int dcr_n, u32 val)
90 {
91 	pr_devel("axon_msi: dcr_write(0x%x, 0x%x)\n", val, dcr_n);
92 
93 	dcr_write(msic->dcr_host, dcr_n, val);
94 }
95 
96 static void axon_msi_cascade(struct irq_desc *desc)
97 {
98 	struct irq_chip *chip = irq_desc_get_chip(desc);
99 	struct axon_msic *msic = irq_desc_get_handler_data(desc);
100 	u32 write_offset, msi;
101 	int idx;
102 	int retry = 0;
103 
104 	write_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG);
105 	pr_devel("axon_msi: original write_offset 0x%x\n", write_offset);
106 
107 	/* write_offset doesn't wrap properly, so we have to mask it */
108 	write_offset &= MSIC_FIFO_SIZE_MASK;
109 
110 	while (msic->read_offset != write_offset && retry < 100) {
111 		idx  = msic->read_offset / sizeof(__le32);
112 		msi  = le32_to_cpu(msic->fifo_virt[idx]);
113 		msi &= 0xFFFF;
114 
115 		pr_devel("axon_msi: woff %x roff %x msi %x\n",
116 			  write_offset, msic->read_offset, msi);
117 
118 		if (msi < nr_irqs && irq_get_chip_data(msi) == msic) {
119 			generic_handle_irq(msi);
120 			msic->fifo_virt[idx] = cpu_to_le32(0xffffffff);
121 		} else {
122 			/*
123 			 * Reading the MSIC_WRITE_OFFSET_REG does not
124 			 * reliably flush the outstanding DMA to the
125 			 * FIFO buffer. Here we were reading stale
126 			 * data, so we need to retry.
127 			 */
128 			udelay(1);
129 			retry++;
130 			pr_devel("axon_msi: invalid irq 0x%x!\n", msi);
131 			continue;
132 		}
133 
134 		if (retry) {
135 			pr_devel("axon_msi: late irq 0x%x, retry %d\n",
136 				 msi, retry);
137 			retry = 0;
138 		}
139 
140 		msic->read_offset += MSIC_FIFO_ENTRY_SIZE;
141 		msic->read_offset &= MSIC_FIFO_SIZE_MASK;
142 	}
143 
144 	if (retry) {
145 		printk(KERN_WARNING "axon_msi: irq timed out\n");
146 
147 		msic->read_offset += MSIC_FIFO_ENTRY_SIZE;
148 		msic->read_offset &= MSIC_FIFO_SIZE_MASK;
149 	}
150 
151 	chip->irq_eoi(&desc->irq_data);
152 }
153 
154 static struct axon_msic *find_msi_translator(struct pci_dev *dev)
155 {
156 	struct irq_domain *irq_domain;
157 	struct device_node *dn, *tmp;
158 	const phandle *ph;
159 	struct axon_msic *msic = NULL;
160 
161 	dn = of_node_get(pci_device_to_OF_node(dev));
162 	if (!dn) {
163 		dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n");
164 		return NULL;
165 	}
166 
167 	for (; dn; dn = of_get_next_parent(dn)) {
168 		ph = of_get_property(dn, "msi-translator", NULL);
169 		if (ph)
170 			break;
171 	}
172 
173 	if (!ph) {
174 		dev_dbg(&dev->dev,
175 			"axon_msi: no msi-translator property found\n");
176 		goto out_error;
177 	}
178 
179 	tmp = dn;
180 	dn = of_find_node_by_phandle(*ph);
181 	of_node_put(tmp);
182 	if (!dn) {
183 		dev_dbg(&dev->dev,
184 			"axon_msi: msi-translator doesn't point to a node\n");
185 		goto out_error;
186 	}
187 
188 	irq_domain = irq_find_host(dn);
189 	if (!irq_domain) {
190 		dev_dbg(&dev->dev, "axon_msi: no irq_domain found for node %s\n",
191 			dn->full_name);
192 		goto out_error;
193 	}
194 
195 	msic = irq_domain->host_data;
196 
197 out_error:
198 	of_node_put(dn);
199 
200 	return msic;
201 }
202 
203 static int setup_msi_msg_address(struct pci_dev *dev, struct msi_msg *msg)
204 {
205 	struct device_node *dn;
206 	struct msi_desc *entry;
207 	int len;
208 	const u32 *prop;
209 
210 	dn = of_node_get(pci_device_to_OF_node(dev));
211 	if (!dn) {
212 		dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n");
213 		return -ENODEV;
214 	}
215 
216 	entry = first_pci_msi_entry(dev);
217 
218 	for (; dn; dn = of_get_next_parent(dn)) {
219 		if (entry->msi_attrib.is_64) {
220 			prop = of_get_property(dn, "msi-address-64", &len);
221 			if (prop)
222 				break;
223 		}
224 
225 		prop = of_get_property(dn, "msi-address-32", &len);
226 		if (prop)
227 			break;
228 	}
229 
230 	if (!prop) {
231 		dev_dbg(&dev->dev,
232 			"axon_msi: no msi-address-(32|64) properties found\n");
233 		return -ENOENT;
234 	}
235 
236 	switch (len) {
237 	case 8:
238 		msg->address_hi = prop[0];
239 		msg->address_lo = prop[1];
240 		break;
241 	case 4:
242 		msg->address_hi = 0;
243 		msg->address_lo = prop[0];
244 		break;
245 	default:
246 		dev_dbg(&dev->dev,
247 			"axon_msi: malformed msi-address-(32|64) property\n");
248 		of_node_put(dn);
249 		return -EINVAL;
250 	}
251 
252 	of_node_put(dn);
253 
254 	return 0;
255 }
256 
257 static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
258 {
259 	unsigned int virq, rc;
260 	struct msi_desc *entry;
261 	struct msi_msg msg;
262 	struct axon_msic *msic;
263 
264 	msic = find_msi_translator(dev);
265 	if (!msic)
266 		return -ENODEV;
267 
268 	rc = setup_msi_msg_address(dev, &msg);
269 	if (rc)
270 		return rc;
271 
272 	for_each_pci_msi_entry(entry, dev) {
273 		virq = irq_create_direct_mapping(msic->irq_domain);
274 		if (virq == NO_IRQ) {
275 			dev_warn(&dev->dev,
276 				 "axon_msi: virq allocation failed!\n");
277 			return -1;
278 		}
279 		dev_dbg(&dev->dev, "axon_msi: allocated virq 0x%x\n", virq);
280 
281 		irq_set_msi_desc(virq, entry);
282 		msg.data = virq;
283 		pci_write_msi_msg(virq, &msg);
284 	}
285 
286 	return 0;
287 }
288 
289 static void axon_msi_teardown_msi_irqs(struct pci_dev *dev)
290 {
291 	struct msi_desc *entry;
292 
293 	dev_dbg(&dev->dev, "axon_msi: tearing down msi irqs\n");
294 
295 	for_each_pci_msi_entry(entry, dev) {
296 		if (entry->irq == NO_IRQ)
297 			continue;
298 
299 		irq_set_msi_desc(entry->irq, NULL);
300 		irq_dispose_mapping(entry->irq);
301 	}
302 }
303 
304 static struct irq_chip msic_irq_chip = {
305 	.irq_mask	= pci_msi_mask_irq,
306 	.irq_unmask	= pci_msi_unmask_irq,
307 	.irq_shutdown	= pci_msi_mask_irq,
308 	.name		= "AXON-MSI",
309 };
310 
311 static int msic_host_map(struct irq_domain *h, unsigned int virq,
312 			 irq_hw_number_t hw)
313 {
314 	irq_set_chip_data(virq, h->host_data);
315 	irq_set_chip_and_handler(virq, &msic_irq_chip, handle_simple_irq);
316 
317 	return 0;
318 }
319 
320 static const struct irq_domain_ops msic_host_ops = {
321 	.map	= msic_host_map,
322 };
323 
324 static void axon_msi_shutdown(struct platform_device *device)
325 {
326 	struct axon_msic *msic = dev_get_drvdata(&device->dev);
327 	u32 tmp;
328 
329 	pr_devel("axon_msi: disabling %s\n",
330 		 irq_domain_get_of_node(msic->irq_domain)->full_name);
331 	tmp  = dcr_read(msic->dcr_host, MSIC_CTRL_REG);
332 	tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE;
333 	msic_dcr_write(msic, MSIC_CTRL_REG, tmp);
334 }
335 
336 static int axon_msi_probe(struct platform_device *device)
337 {
338 	struct device_node *dn = device->dev.of_node;
339 	struct axon_msic *msic;
340 	unsigned int virq;
341 	int dcr_base, dcr_len;
342 
343 	pr_devel("axon_msi: setting up dn %s\n", dn->full_name);
344 
345 	msic = kzalloc(sizeof(struct axon_msic), GFP_KERNEL);
346 	if (!msic) {
347 		printk(KERN_ERR "axon_msi: couldn't allocate msic for %s\n",
348 		       dn->full_name);
349 		goto out;
350 	}
351 
352 	dcr_base = dcr_resource_start(dn, 0);
353 	dcr_len = dcr_resource_len(dn, 0);
354 
355 	if (dcr_base == 0 || dcr_len == 0) {
356 		printk(KERN_ERR
357 		       "axon_msi: couldn't parse dcr properties on %s\n",
358 			dn->full_name);
359 		goto out_free_msic;
360 	}
361 
362 	msic->dcr_host = dcr_map(dn, dcr_base, dcr_len);
363 	if (!DCR_MAP_OK(msic->dcr_host)) {
364 		printk(KERN_ERR "axon_msi: dcr_map failed for %s\n",
365 		       dn->full_name);
366 		goto out_free_msic;
367 	}
368 
369 	msic->fifo_virt = dma_alloc_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES,
370 					     &msic->fifo_phys, GFP_KERNEL);
371 	if (!msic->fifo_virt) {
372 		printk(KERN_ERR "axon_msi: couldn't allocate fifo for %s\n",
373 		       dn->full_name);
374 		goto out_free_msic;
375 	}
376 
377 	virq = irq_of_parse_and_map(dn, 0);
378 	if (virq == NO_IRQ) {
379 		printk(KERN_ERR "axon_msi: irq parse and map failed for %s\n",
380 		       dn->full_name);
381 		goto out_free_fifo;
382 	}
383 	memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES);
384 
385 	/* We rely on being able to stash a virq in a u16, so limit irqs to < 65536 */
386 	msic->irq_domain = irq_domain_add_nomap(dn, 65536, &msic_host_ops, msic);
387 	if (!msic->irq_domain) {
388 		printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %s\n",
389 		       dn->full_name);
390 		goto out_free_fifo;
391 	}
392 
393 	irq_set_handler_data(virq, msic);
394 	irq_set_chained_handler(virq, axon_msi_cascade);
395 	pr_devel("axon_msi: irq 0x%x setup for axon_msi\n", virq);
396 
397 	/* Enable the MSIC hardware */
398 	msic_dcr_write(msic, MSIC_BASE_ADDR_HI_REG, msic->fifo_phys >> 32);
399 	msic_dcr_write(msic, MSIC_BASE_ADDR_LO_REG,
400 				  msic->fifo_phys & 0xFFFFFFFF);
401 	msic_dcr_write(msic, MSIC_CTRL_REG,
402 			MSIC_CTRL_IRQ_ENABLE | MSIC_CTRL_ENABLE |
403 			MSIC_CTRL_FIFO_SIZE);
404 
405 	msic->read_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG)
406 				& MSIC_FIFO_SIZE_MASK;
407 
408 	dev_set_drvdata(&device->dev, msic);
409 
410 	cell_pci_controller_ops.setup_msi_irqs = axon_msi_setup_msi_irqs;
411 	cell_pci_controller_ops.teardown_msi_irqs = axon_msi_teardown_msi_irqs;
412 
413 	axon_msi_debug_setup(dn, msic);
414 
415 	printk(KERN_DEBUG "axon_msi: setup MSIC on %s\n", dn->full_name);
416 
417 	return 0;
418 
419 out_free_fifo:
420 	dma_free_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES, msic->fifo_virt,
421 			  msic->fifo_phys);
422 out_free_msic:
423 	kfree(msic);
424 out:
425 
426 	return -1;
427 }
428 
429 static const struct of_device_id axon_msi_device_id[] = {
430 	{
431 		.compatible	= "ibm,axon-msic"
432 	},
433 	{}
434 };
435 
436 static struct platform_driver axon_msi_driver = {
437 	.probe		= axon_msi_probe,
438 	.shutdown	= axon_msi_shutdown,
439 	.driver = {
440 		.name = "axon-msi",
441 		.of_match_table = axon_msi_device_id,
442 	},
443 };
444 
445 static int __init axon_msi_init(void)
446 {
447 	return platform_driver_register(&axon_msi_driver);
448 }
449 subsys_initcall(axon_msi_init);
450 
451 
452 #ifdef DEBUG
453 static int msic_set(void *data, u64 val)
454 {
455 	struct axon_msic *msic = data;
456 	out_le32(msic->trigger, val);
457 	return 0;
458 }
459 
460 static int msic_get(void *data, u64 *val)
461 {
462 	*val = 0;
463 	return 0;
464 }
465 
466 DEFINE_SIMPLE_ATTRIBUTE(fops_msic, msic_get, msic_set, "%llu\n");
467 
468 void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic)
469 {
470 	char name[8];
471 	u64 addr;
472 
473 	addr = of_translate_address(dn, of_get_property(dn, "reg", NULL));
474 	if (addr == OF_BAD_ADDR) {
475 		pr_devel("axon_msi: couldn't translate reg property\n");
476 		return;
477 	}
478 
479 	msic->trigger = ioremap(addr, 0x4);
480 	if (!msic->trigger) {
481 		pr_devel("axon_msi: ioremap failed\n");
482 		return;
483 	}
484 
485 	snprintf(name, sizeof(name), "msic_%d", of_node_to_nid(dn));
486 
487 	if (!debugfs_create_file(name, 0600, powerpc_debugfs_root,
488 				 msic, &fops_msic)) {
489 		pr_devel("axon_msi: debugfs_create_file failed!\n");
490 		return;
491 	}
492 }
493 #endif /* DEBUG */
494