1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Renesas IRQC Driver
4  *
5  *  Copyright (C) 2013 Magnus Damm
6  */
7 
8 #include <linux/init.h>
9 #include <linux/platform_device.h>
10 #include <linux/interrupt.h>
11 #include <linux/ioport.h>
12 #include <linux/io.h>
13 #include <linux/irq.h>
14 #include <linux/irqdomain.h>
15 #include <linux/err.h>
16 #include <linux/slab.h>
17 #include <linux/module.h>
18 #include <linux/pm_runtime.h>
19 
20 #define IRQC_IRQ_MAX	32	/* maximum 32 interrupts per driver instance */
21 
22 #define IRQC_REQ_STS	0x00	/* Interrupt Request Status Register */
23 #define IRQC_EN_STS	0x04	/* Interrupt Enable Status Register */
24 #define IRQC_EN_SET	0x08	/* Interrupt Enable Set Register */
25 #define IRQC_INT_CPU_BASE(n) (0x000 + ((n) * 0x10))
26 				/* SYS-CPU vs. RT-CPU */
27 #define DETECT_STATUS	0x100	/* IRQn Detect Status Register */
28 #define MONITOR		0x104	/* IRQn Signal Level Monitor Register */
29 #define HLVL_STS	0x108	/* IRQn High Level Detect Status Register */
30 #define LLVL_STS	0x10c	/* IRQn Low Level Detect Status Register */
31 #define S_R_EDGE_STS	0x110	/* IRQn Sync Rising Edge Detect Status Reg. */
32 #define S_F_EDGE_STS	0x114	/* IRQn Sync Falling Edge Detect Status Reg. */
33 #define A_R_EDGE_STS	0x118	/* IRQn Async Rising Edge Detect Status Reg. */
34 #define A_F_EDGE_STS	0x11c	/* IRQn Async Falling Edge Detect Status Reg. */
35 #define CHTEN_STS	0x120	/* Chattering Reduction Status Register */
36 #define IRQC_CONFIG(n) (0x180 + ((n) * 0x04))
37 				/* IRQn Configuration Register */
38 
39 struct irqc_irq {
40 	int hw_irq;
41 	int requested_irq;
42 	struct irqc_priv *p;
43 };
44 
45 struct irqc_priv {
46 	void __iomem *iomem;
47 	void __iomem *cpu_int_base;
48 	struct irqc_irq irq[IRQC_IRQ_MAX];
49 	unsigned int number_of_irqs;
50 	struct device *dev;
51 	struct irq_chip_generic *gc;
52 	struct irq_domain *irq_domain;
53 	atomic_t wakeup_path;
54 };
55 
56 static struct irqc_priv *irq_data_to_priv(struct irq_data *data)
57 {
58 	return data->domain->host_data;
59 }
60 
61 static void irqc_dbg(struct irqc_irq *i, char *str)
62 {
63 	dev_dbg(i->p->dev, "%s (%d:%d)\n", str, i->requested_irq, i->hw_irq);
64 }
65 
66 static unsigned char irqc_sense[IRQ_TYPE_SENSE_MASK + 1] = {
67 	[IRQ_TYPE_LEVEL_LOW]	= 0x01,
68 	[IRQ_TYPE_LEVEL_HIGH]	= 0x02,
69 	[IRQ_TYPE_EDGE_FALLING]	= 0x04,	/* Synchronous */
70 	[IRQ_TYPE_EDGE_RISING]	= 0x08,	/* Synchronous */
71 	[IRQ_TYPE_EDGE_BOTH]	= 0x0c,	/* Synchronous */
72 };
73 
74 static int irqc_irq_set_type(struct irq_data *d, unsigned int type)
75 {
76 	struct irqc_priv *p = irq_data_to_priv(d);
77 	int hw_irq = irqd_to_hwirq(d);
78 	unsigned char value = irqc_sense[type & IRQ_TYPE_SENSE_MASK];
79 	u32 tmp;
80 
81 	irqc_dbg(&p->irq[hw_irq], "sense");
82 
83 	if (!value)
84 		return -EINVAL;
85 
86 	tmp = ioread32(p->iomem + IRQC_CONFIG(hw_irq));
87 	tmp &= ~0x3f;
88 	tmp |= value;
89 	iowrite32(tmp, p->iomem + IRQC_CONFIG(hw_irq));
90 	return 0;
91 }
92 
93 static int irqc_irq_set_wake(struct irq_data *d, unsigned int on)
94 {
95 	struct irqc_priv *p = irq_data_to_priv(d);
96 	int hw_irq = irqd_to_hwirq(d);
97 
98 	irq_set_irq_wake(p->irq[hw_irq].requested_irq, on);
99 	if (on)
100 		atomic_inc(&p->wakeup_path);
101 	else
102 		atomic_dec(&p->wakeup_path);
103 
104 	return 0;
105 }
106 
107 static irqreturn_t irqc_irq_handler(int irq, void *dev_id)
108 {
109 	struct irqc_irq *i = dev_id;
110 	struct irqc_priv *p = i->p;
111 	u32 bit = BIT(i->hw_irq);
112 
113 	irqc_dbg(i, "demux1");
114 
115 	if (ioread32(p->iomem + DETECT_STATUS) & bit) {
116 		iowrite32(bit, p->iomem + DETECT_STATUS);
117 		irqc_dbg(i, "demux2");
118 		generic_handle_irq(irq_find_mapping(p->irq_domain, i->hw_irq));
119 		return IRQ_HANDLED;
120 	}
121 	return IRQ_NONE;
122 }
123 
124 static int irqc_probe(struct platform_device *pdev)
125 {
126 	struct device *dev = &pdev->dev;
127 	const char *name = dev_name(dev);
128 	struct irqc_priv *p;
129 	struct resource *irq;
130 	int ret;
131 	int k;
132 
133 	p = devm_kzalloc(dev, sizeof(*p), GFP_KERNEL);
134 	if (!p)
135 		return -ENOMEM;
136 
137 	p->dev = dev;
138 	platform_set_drvdata(pdev, p);
139 
140 	pm_runtime_enable(dev);
141 	pm_runtime_get_sync(dev);
142 
143 	/* allow any number of IRQs between 1 and IRQC_IRQ_MAX */
144 	for (k = 0; k < IRQC_IRQ_MAX; k++) {
145 		irq = platform_get_resource(pdev, IORESOURCE_IRQ, k);
146 		if (!irq)
147 			break;
148 
149 		p->irq[k].p = p;
150 		p->irq[k].hw_irq = k;
151 		p->irq[k].requested_irq = irq->start;
152 	}
153 
154 	p->number_of_irqs = k;
155 	if (p->number_of_irqs < 1) {
156 		dev_err(dev, "not enough IRQ resources\n");
157 		ret = -EINVAL;
158 		goto err_runtime_pm_disable;
159 	}
160 
161 	/* ioremap IOMEM and setup read/write callbacks */
162 	p->iomem = devm_platform_ioremap_resource(pdev, 0);
163 	if (IS_ERR(p->iomem)) {
164 		ret = PTR_ERR(p->iomem);
165 		goto err_runtime_pm_disable;
166 	}
167 
168 	p->cpu_int_base = p->iomem + IRQC_INT_CPU_BASE(0); /* SYS-SPI */
169 
170 	p->irq_domain = irq_domain_add_linear(dev->of_node, p->number_of_irqs,
171 					      &irq_generic_chip_ops, p);
172 	if (!p->irq_domain) {
173 		ret = -ENXIO;
174 		dev_err(dev, "cannot initialize irq domain\n");
175 		goto err_runtime_pm_disable;
176 	}
177 
178 	ret = irq_alloc_domain_generic_chips(p->irq_domain, p->number_of_irqs,
179 					     1, "irqc", handle_level_irq,
180 					     0, 0, IRQ_GC_INIT_NESTED_LOCK);
181 	if (ret) {
182 		dev_err(dev, "cannot allocate generic chip\n");
183 		goto err_remove_domain;
184 	}
185 
186 	p->gc = irq_get_domain_generic_chip(p->irq_domain, 0);
187 	p->gc->reg_base = p->cpu_int_base;
188 	p->gc->chip_types[0].regs.enable = IRQC_EN_SET;
189 	p->gc->chip_types[0].regs.disable = IRQC_EN_STS;
190 	p->gc->chip_types[0].chip.parent_device = dev;
191 	p->gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg;
192 	p->gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg;
193 	p->gc->chip_types[0].chip.irq_set_type	= irqc_irq_set_type;
194 	p->gc->chip_types[0].chip.irq_set_wake	= irqc_irq_set_wake;
195 	p->gc->chip_types[0].chip.flags	= IRQCHIP_MASK_ON_SUSPEND;
196 
197 	/* request interrupts one by one */
198 	for (k = 0; k < p->number_of_irqs; k++) {
199 		if (devm_request_irq(dev, p->irq[k].requested_irq,
200 				     irqc_irq_handler, 0, name, &p->irq[k])) {
201 			dev_err(dev, "failed to request IRQ\n");
202 			ret = -ENOENT;
203 			goto err_remove_domain;
204 		}
205 	}
206 
207 	dev_info(dev, "driving %d irqs\n", p->number_of_irqs);
208 
209 	return 0;
210 
211 err_remove_domain:
212 	irq_domain_remove(p->irq_domain);
213 err_runtime_pm_disable:
214 	pm_runtime_put(dev);
215 	pm_runtime_disable(dev);
216 	return ret;
217 }
218 
219 static int irqc_remove(struct platform_device *pdev)
220 {
221 	struct irqc_priv *p = platform_get_drvdata(pdev);
222 
223 	irq_domain_remove(p->irq_domain);
224 	pm_runtime_put(&pdev->dev);
225 	pm_runtime_disable(&pdev->dev);
226 	return 0;
227 }
228 
229 static int __maybe_unused irqc_suspend(struct device *dev)
230 {
231 	struct irqc_priv *p = dev_get_drvdata(dev);
232 
233 	if (atomic_read(&p->wakeup_path))
234 		device_set_wakeup_path(dev);
235 
236 	return 0;
237 }
238 
239 static SIMPLE_DEV_PM_OPS(irqc_pm_ops, irqc_suspend, NULL);
240 
241 static const struct of_device_id irqc_dt_ids[] = {
242 	{ .compatible = "renesas,irqc", },
243 	{},
244 };
245 MODULE_DEVICE_TABLE(of, irqc_dt_ids);
246 
247 static struct platform_driver irqc_device_driver = {
248 	.probe		= irqc_probe,
249 	.remove		= irqc_remove,
250 	.driver		= {
251 		.name	= "renesas_irqc",
252 		.of_match_table	= irqc_dt_ids,
253 		.pm	= &irqc_pm_ops,
254 	}
255 };
256 
257 static int __init irqc_init(void)
258 {
259 	return platform_driver_register(&irqc_device_driver);
260 }
261 postcore_initcall(irqc_init);
262 
263 static void __exit irqc_exit(void)
264 {
265 	platform_driver_unregister(&irqc_device_driver);
266 }
267 module_exit(irqc_exit);
268 
269 MODULE_AUTHOR("Magnus Damm");
270 MODULE_DESCRIPTION("Renesas IRQC Driver");
271 MODULE_LICENSE("GPL v2");
272