1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright 2007, Michael Ellerman, IBM Corporation.
4  */
5 
6 
7 #include <linux/interrupt.h>
8 #include <linux/irq.h>
9 #include <linux/kernel.h>
10 #include <linux/pci.h>
11 #include <linux/msi.h>
12 #include <linux/export.h>
13 #include <linux/of_platform.h>
14 #include <linux/slab.h>
15 #include <linux/debugfs.h>
16 #include <linux/of_irq.h>
17 
18 #include <asm/dcr.h>
19 #include <asm/machdep.h>
20 
21 #include "cell.h"
22 
23 /*
24  * MSIC registers, specified as offsets from dcr_base
25  */
26 #define MSIC_CTRL_REG	0x0
27 
28 /* Base Address registers specify FIFO location in BE memory */
29 #define MSIC_BASE_ADDR_HI_REG	0x3
30 #define MSIC_BASE_ADDR_LO_REG	0x4
31 
32 /* Hold the read/write offsets into the FIFO */
33 #define MSIC_READ_OFFSET_REG	0x5
34 #define MSIC_WRITE_OFFSET_REG	0x6
35 
36 
37 /* MSIC control register flags */
38 #define MSIC_CTRL_ENABLE		0x0001
39 #define MSIC_CTRL_FIFO_FULL_ENABLE	0x0002
40 #define MSIC_CTRL_IRQ_ENABLE		0x0008
41 #define MSIC_CTRL_FULL_STOP_ENABLE	0x0010
42 
43 /*
44  * The MSIC can be configured to use a FIFO of 32KB, 64KB, 128KB or 256KB.
45  * Currently we're using a 64KB FIFO size.
46  */
47 #define MSIC_FIFO_SIZE_SHIFT	16
48 #define MSIC_FIFO_SIZE_BYTES	(1 << MSIC_FIFO_SIZE_SHIFT)
49 
50 /*
51  * To configure the FIFO size as (1 << n) bytes, we write (n - 15) into bits
52  * 8-9 of the MSIC control reg.
53  */
54 #define MSIC_CTRL_FIFO_SIZE	(((MSIC_FIFO_SIZE_SHIFT - 15) << 8) & 0x300)
55 
56 /*
57  * We need to mask the read/write offsets to make sure they stay within
58  * the bounds of the FIFO. Also they should always be 16-byte aligned.
59  */
60 #define MSIC_FIFO_SIZE_MASK	((MSIC_FIFO_SIZE_BYTES - 1) & ~0xFu)
61 
62 /* Each entry in the FIFO is 16 bytes, the first 4 bytes hold the irq # */
63 #define MSIC_FIFO_ENTRY_SIZE	0x10
64 
65 
66 struct axon_msic {
67 	struct irq_domain *irq_domain;
68 	__le32 *fifo_virt;
69 	dma_addr_t fifo_phys;
70 	dcr_host_t dcr_host;
71 	u32 read_offset;
72 #ifdef DEBUG
73 	u32 __iomem *trigger;
74 #endif
75 };
76 
77 #ifdef DEBUG
78 void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic);
79 #else
80 static inline void axon_msi_debug_setup(struct device_node *dn,
81 					struct axon_msic *msic) { }
82 #endif
83 
84 
85 static void msic_dcr_write(struct axon_msic *msic, unsigned int dcr_n, u32 val)
86 {
87 	pr_devel("axon_msi: dcr_write(0x%x, 0x%x)\n", val, dcr_n);
88 
89 	dcr_write(msic->dcr_host, dcr_n, val);
90 }
91 
92 static void axon_msi_cascade(struct irq_desc *desc)
93 {
94 	struct irq_chip *chip = irq_desc_get_chip(desc);
95 	struct axon_msic *msic = irq_desc_get_handler_data(desc);
96 	u32 write_offset, msi;
97 	int idx;
98 	int retry = 0;
99 
100 	write_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG);
101 	pr_devel("axon_msi: original write_offset 0x%x\n", write_offset);
102 
103 	/* write_offset doesn't wrap properly, so we have to mask it */
104 	write_offset &= MSIC_FIFO_SIZE_MASK;
105 
106 	while (msic->read_offset != write_offset && retry < 100) {
107 		idx  = msic->read_offset / sizeof(__le32);
108 		msi  = le32_to_cpu(msic->fifo_virt[idx]);
109 		msi &= 0xFFFF;
110 
111 		pr_devel("axon_msi: woff %x roff %x msi %x\n",
112 			  write_offset, msic->read_offset, msi);
113 
114 		if (msi < nr_irqs && irq_get_chip_data(msi) == msic) {
115 			generic_handle_irq(msi);
116 			msic->fifo_virt[idx] = cpu_to_le32(0xffffffff);
117 		} else {
118 			/*
119 			 * Reading the MSIC_WRITE_OFFSET_REG does not
120 			 * reliably flush the outstanding DMA to the
121 			 * FIFO buffer. Here we were reading stale
122 			 * data, so we need to retry.
123 			 */
124 			udelay(1);
125 			retry++;
126 			pr_devel("axon_msi: invalid irq 0x%x!\n", msi);
127 			continue;
128 		}
129 
130 		if (retry) {
131 			pr_devel("axon_msi: late irq 0x%x, retry %d\n",
132 				 msi, retry);
133 			retry = 0;
134 		}
135 
136 		msic->read_offset += MSIC_FIFO_ENTRY_SIZE;
137 		msic->read_offset &= MSIC_FIFO_SIZE_MASK;
138 	}
139 
140 	if (retry) {
141 		printk(KERN_WARNING "axon_msi: irq timed out\n");
142 
143 		msic->read_offset += MSIC_FIFO_ENTRY_SIZE;
144 		msic->read_offset &= MSIC_FIFO_SIZE_MASK;
145 	}
146 
147 	chip->irq_eoi(&desc->irq_data);
148 }
149 
150 static struct axon_msic *find_msi_translator(struct pci_dev *dev)
151 {
152 	struct irq_domain *irq_domain;
153 	struct device_node *dn, *tmp;
154 	const phandle *ph;
155 	struct axon_msic *msic = NULL;
156 
157 	dn = of_node_get(pci_device_to_OF_node(dev));
158 	if (!dn) {
159 		dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n");
160 		return NULL;
161 	}
162 
163 	for (; dn; dn = of_get_next_parent(dn)) {
164 		ph = of_get_property(dn, "msi-translator", NULL);
165 		if (ph)
166 			break;
167 	}
168 
169 	if (!ph) {
170 		dev_dbg(&dev->dev,
171 			"axon_msi: no msi-translator property found\n");
172 		goto out_error;
173 	}
174 
175 	tmp = dn;
176 	dn = of_find_node_by_phandle(*ph);
177 	of_node_put(tmp);
178 	if (!dn) {
179 		dev_dbg(&dev->dev,
180 			"axon_msi: msi-translator doesn't point to a node\n");
181 		goto out_error;
182 	}
183 
184 	irq_domain = irq_find_host(dn);
185 	if (!irq_domain) {
186 		dev_dbg(&dev->dev, "axon_msi: no irq_domain found for node %pOF\n",
187 			dn);
188 		goto out_error;
189 	}
190 
191 	msic = irq_domain->host_data;
192 
193 out_error:
194 	of_node_put(dn);
195 
196 	return msic;
197 }
198 
199 static int setup_msi_msg_address(struct pci_dev *dev, struct msi_msg *msg)
200 {
201 	struct device_node *dn;
202 	int len;
203 	const u32 *prop;
204 
205 	dn = of_node_get(pci_device_to_OF_node(dev));
206 	if (!dn) {
207 		dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n");
208 		return -ENODEV;
209 	}
210 
211 	for (; dn; dn = of_get_next_parent(dn)) {
212 		if (!dev->no_64bit_msi) {
213 			prop = of_get_property(dn, "msi-address-64", &len);
214 			if (prop)
215 				break;
216 		}
217 
218 		prop = of_get_property(dn, "msi-address-32", &len);
219 		if (prop)
220 			break;
221 	}
222 
223 	if (!prop) {
224 		dev_dbg(&dev->dev,
225 			"axon_msi: no msi-address-(32|64) properties found\n");
226 		of_node_put(dn);
227 		return -ENOENT;
228 	}
229 
230 	switch (len) {
231 	case 8:
232 		msg->address_hi = prop[0];
233 		msg->address_lo = prop[1];
234 		break;
235 	case 4:
236 		msg->address_hi = 0;
237 		msg->address_lo = prop[0];
238 		break;
239 	default:
240 		dev_dbg(&dev->dev,
241 			"axon_msi: malformed msi-address-(32|64) property\n");
242 		of_node_put(dn);
243 		return -EINVAL;
244 	}
245 
246 	of_node_put(dn);
247 
248 	return 0;
249 }
250 
251 static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
252 {
253 	unsigned int virq, rc;
254 	struct msi_desc *entry;
255 	struct msi_msg msg;
256 	struct axon_msic *msic;
257 
258 	msic = find_msi_translator(dev);
259 	if (!msic)
260 		return -ENODEV;
261 
262 	rc = setup_msi_msg_address(dev, &msg);
263 	if (rc)
264 		return rc;
265 
266 	msi_for_each_desc(entry, &dev->dev, MSI_DESC_NOTASSOCIATED) {
267 		virq = irq_create_direct_mapping(msic->irq_domain);
268 		if (!virq) {
269 			dev_warn(&dev->dev,
270 				 "axon_msi: virq allocation failed!\n");
271 			return -1;
272 		}
273 		dev_dbg(&dev->dev, "axon_msi: allocated virq 0x%x\n", virq);
274 
275 		irq_set_msi_desc(virq, entry);
276 		msg.data = virq;
277 		pci_write_msi_msg(virq, &msg);
278 	}
279 
280 	return 0;
281 }
282 
283 static void axon_msi_teardown_msi_irqs(struct pci_dev *dev)
284 {
285 	struct msi_desc *entry;
286 
287 	dev_dbg(&dev->dev, "axon_msi: tearing down msi irqs\n");
288 
289 	msi_for_each_desc(entry, &dev->dev, MSI_DESC_ASSOCIATED) {
290 		irq_set_msi_desc(entry->irq, NULL);
291 		irq_dispose_mapping(entry->irq);
292 	}
293 }
294 
295 static struct irq_chip msic_irq_chip = {
296 	.irq_mask	= pci_msi_mask_irq,
297 	.irq_unmask	= pci_msi_unmask_irq,
298 	.irq_shutdown	= pci_msi_mask_irq,
299 	.name		= "AXON-MSI",
300 };
301 
302 static int msic_host_map(struct irq_domain *h, unsigned int virq,
303 			 irq_hw_number_t hw)
304 {
305 	irq_set_chip_data(virq, h->host_data);
306 	irq_set_chip_and_handler(virq, &msic_irq_chip, handle_simple_irq);
307 
308 	return 0;
309 }
310 
311 static const struct irq_domain_ops msic_host_ops = {
312 	.map	= msic_host_map,
313 };
314 
315 static void axon_msi_shutdown(struct platform_device *device)
316 {
317 	struct axon_msic *msic = dev_get_drvdata(&device->dev);
318 	u32 tmp;
319 
320 	pr_devel("axon_msi: disabling %pOF\n",
321 		 irq_domain_get_of_node(msic->irq_domain));
322 	tmp  = dcr_read(msic->dcr_host, MSIC_CTRL_REG);
323 	tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE;
324 	msic_dcr_write(msic, MSIC_CTRL_REG, tmp);
325 }
326 
327 static int axon_msi_probe(struct platform_device *device)
328 {
329 	struct device_node *dn = device->dev.of_node;
330 	struct axon_msic *msic;
331 	unsigned int virq;
332 	int dcr_base, dcr_len;
333 
334 	pr_devel("axon_msi: setting up dn %pOF\n", dn);
335 
336 	msic = kzalloc(sizeof(*msic), GFP_KERNEL);
337 	if (!msic) {
338 		printk(KERN_ERR "axon_msi: couldn't allocate msic for %pOF\n",
339 		       dn);
340 		goto out;
341 	}
342 
343 	dcr_base = dcr_resource_start(dn, 0);
344 	dcr_len = dcr_resource_len(dn, 0);
345 
346 	if (dcr_base == 0 || dcr_len == 0) {
347 		printk(KERN_ERR
348 		       "axon_msi: couldn't parse dcr properties on %pOF\n",
349 			dn);
350 		goto out_free_msic;
351 	}
352 
353 	msic->dcr_host = dcr_map(dn, dcr_base, dcr_len);
354 	if (!DCR_MAP_OK(msic->dcr_host)) {
355 		printk(KERN_ERR "axon_msi: dcr_map failed for %pOF\n",
356 		       dn);
357 		goto out_free_msic;
358 	}
359 
360 	msic->fifo_virt = dma_alloc_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES,
361 					     &msic->fifo_phys, GFP_KERNEL);
362 	if (!msic->fifo_virt) {
363 		printk(KERN_ERR "axon_msi: couldn't allocate fifo for %pOF\n",
364 		       dn);
365 		goto out_free_msic;
366 	}
367 
368 	virq = irq_of_parse_and_map(dn, 0);
369 	if (!virq) {
370 		printk(KERN_ERR "axon_msi: irq parse and map failed for %pOF\n",
371 		       dn);
372 		goto out_free_fifo;
373 	}
374 	memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES);
375 
376 	/* We rely on being able to stash a virq in a u16, so limit irqs to < 65536 */
377 	msic->irq_domain = irq_domain_add_nomap(dn, 65536, &msic_host_ops, msic);
378 	if (!msic->irq_domain) {
379 		printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %pOF\n",
380 		       dn);
381 		goto out_free_fifo;
382 	}
383 
384 	irq_set_handler_data(virq, msic);
385 	irq_set_chained_handler(virq, axon_msi_cascade);
386 	pr_devel("axon_msi: irq 0x%x setup for axon_msi\n", virq);
387 
388 	/* Enable the MSIC hardware */
389 	msic_dcr_write(msic, MSIC_BASE_ADDR_HI_REG, msic->fifo_phys >> 32);
390 	msic_dcr_write(msic, MSIC_BASE_ADDR_LO_REG,
391 				  msic->fifo_phys & 0xFFFFFFFF);
392 	msic_dcr_write(msic, MSIC_CTRL_REG,
393 			MSIC_CTRL_IRQ_ENABLE | MSIC_CTRL_ENABLE |
394 			MSIC_CTRL_FIFO_SIZE);
395 
396 	msic->read_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG)
397 				& MSIC_FIFO_SIZE_MASK;
398 
399 	dev_set_drvdata(&device->dev, msic);
400 
401 	cell_pci_controller_ops.setup_msi_irqs = axon_msi_setup_msi_irqs;
402 	cell_pci_controller_ops.teardown_msi_irqs = axon_msi_teardown_msi_irqs;
403 
404 	axon_msi_debug_setup(dn, msic);
405 
406 	printk(KERN_DEBUG "axon_msi: setup MSIC on %pOF\n", dn);
407 
408 	return 0;
409 
410 out_free_fifo:
411 	dma_free_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES, msic->fifo_virt,
412 			  msic->fifo_phys);
413 out_free_msic:
414 	kfree(msic);
415 out:
416 
417 	return -1;
418 }
419 
420 static const struct of_device_id axon_msi_device_id[] = {
421 	{
422 		.compatible	= "ibm,axon-msic"
423 	},
424 	{}
425 };
426 
427 static struct platform_driver axon_msi_driver = {
428 	.probe		= axon_msi_probe,
429 	.shutdown	= axon_msi_shutdown,
430 	.driver = {
431 		.name = "axon-msi",
432 		.of_match_table = axon_msi_device_id,
433 	},
434 };
435 
436 static int __init axon_msi_init(void)
437 {
438 	return platform_driver_register(&axon_msi_driver);
439 }
440 subsys_initcall(axon_msi_init);
441 
442 
443 #ifdef DEBUG
444 static int msic_set(void *data, u64 val)
445 {
446 	struct axon_msic *msic = data;
447 	out_le32(msic->trigger, val);
448 	return 0;
449 }
450 
451 static int msic_get(void *data, u64 *val)
452 {
453 	*val = 0;
454 	return 0;
455 }
456 
457 DEFINE_SIMPLE_ATTRIBUTE(fops_msic, msic_get, msic_set, "%llu\n");
458 
459 void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic)
460 {
461 	char name[8];
462 	u64 addr;
463 
464 	addr = of_translate_address(dn, of_get_property(dn, "reg", NULL));
465 	if (addr == OF_BAD_ADDR) {
466 		pr_devel("axon_msi: couldn't translate reg property\n");
467 		return;
468 	}
469 
470 	msic->trigger = ioremap(addr, 0x4);
471 	if (!msic->trigger) {
472 		pr_devel("axon_msi: ioremap failed\n");
473 		return;
474 	}
475 
476 	snprintf(name, sizeof(name), "msic_%d", of_node_to_nid(dn));
477 
478 	debugfs_create_file(name, 0600, arch_debugfs_dir, msic, &fops_msic);
479 }
480 #endif /* DEBUG */
481