1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Renesas RZ/G2L IRQC Driver
4 *
5 * Copyright (C) 2022 Renesas Electronics Corporation.
6 *
7 * Author: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
8 */
9
10 #include <linux/bitfield.h>
11 #include <linux/cleanup.h>
12 #include <linux/clk.h>
13 #include <linux/err.h>
14 #include <linux/io.h>
15 #include <linux/irqchip.h>
16 #include <linux/irqdomain.h>
17 #include <linux/of_address.h>
18 #include <linux/of_platform.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/reset.h>
21 #include <linux/spinlock.h>
22 #include <linux/syscore_ops.h>
23
24 #define IRQC_IRQ_START 1
25 #define IRQC_IRQ_COUNT 8
26 #define IRQC_TINT_START (IRQC_IRQ_START + IRQC_IRQ_COUNT)
27 #define IRQC_TINT_COUNT 32
28 #define IRQC_NUM_IRQ (IRQC_TINT_START + IRQC_TINT_COUNT)
29
30 #define ISCR 0x10
31 #define IITSR 0x14
32 #define TSCR 0x20
33 #define TITSR(n) (0x24 + (n) * 4)
34 #define TITSR0_MAX_INT 16
35 #define TITSEL_WIDTH 0x2
36 #define TSSR(n) (0x30 + ((n) * 4))
37 #define TIEN BIT(7)
38 #define TSSEL_SHIFT(n) (8 * (n))
39 #define TSSEL_MASK GENMASK(7, 0)
40 #define IRQ_MASK 0x3
41
42 #define TSSR_OFFSET(n) ((n) % 4)
43 #define TSSR_INDEX(n) ((n) / 4)
44
45 #define TITSR_TITSEL_EDGE_RISING 0
46 #define TITSR_TITSEL_EDGE_FALLING 1
47 #define TITSR_TITSEL_LEVEL_HIGH 2
48 #define TITSR_TITSEL_LEVEL_LOW 3
49
50 #define IITSR_IITSEL(n, sense) ((sense) << ((n) * 2))
51 #define IITSR_IITSEL_LEVEL_LOW 0
52 #define IITSR_IITSEL_EDGE_FALLING 1
53 #define IITSR_IITSEL_EDGE_RISING 2
54 #define IITSR_IITSEL_EDGE_BOTH 3
55 #define IITSR_IITSEL_MASK(n) IITSR_IITSEL((n), 3)
56
57 #define TINT_EXTRACT_HWIRQ(x) FIELD_GET(GENMASK(15, 0), (x))
58 #define TINT_EXTRACT_GPIOINT(x) FIELD_GET(GENMASK(31, 16), (x))
59
60 /**
61 * struct rzg2l_irqc_reg_cache - registers cache (necessary for suspend/resume)
62 * @iitsr: IITSR register
63 * @titsr: TITSR registers
64 */
65 struct rzg2l_irqc_reg_cache {
66 u32 iitsr;
67 u32 titsr[2];
68 };
69
70 /**
71 * struct rzg2l_irqc_priv - IRQ controller private data structure
72 * @base: Controller's base address
73 * @fwspec: IRQ firmware specific data
74 * @lock: Lock to serialize access to hardware registers
75 * @cache: Registers cache for suspend/resume
76 */
77 static struct rzg2l_irqc_priv {
78 void __iomem *base;
79 struct irq_fwspec fwspec[IRQC_NUM_IRQ];
80 raw_spinlock_t lock;
81 struct rzg2l_irqc_reg_cache cache;
82 } *rzg2l_irqc_data;
83
irq_data_to_priv(struct irq_data * data)84 static struct rzg2l_irqc_priv *irq_data_to_priv(struct irq_data *data)
85 {
86 return data->domain->host_data;
87 }
88
rzg2l_clear_irq_int(struct rzg2l_irqc_priv * priv,unsigned int hwirq)89 static void rzg2l_clear_irq_int(struct rzg2l_irqc_priv *priv, unsigned int hwirq)
90 {
91 unsigned int hw_irq = hwirq - IRQC_IRQ_START;
92 u32 bit = BIT(hw_irq);
93 u32 iitsr, iscr;
94
95 iscr = readl_relaxed(priv->base + ISCR);
96 iitsr = readl_relaxed(priv->base + IITSR);
97
98 /*
99 * ISCR can only be cleared if the type is falling-edge, rising-edge or
100 * falling/rising-edge.
101 */
102 if ((iscr & bit) && (iitsr & IITSR_IITSEL_MASK(hw_irq))) {
103 writel_relaxed(iscr & ~bit, priv->base + ISCR);
104 /*
105 * Enforce that the posted write is flushed to prevent that the
106 * just handled interrupt is raised again.
107 */
108 readl_relaxed(priv->base + ISCR);
109 }
110 }
111
rzg2l_clear_tint_int(struct rzg2l_irqc_priv * priv,unsigned int hwirq)112 static void rzg2l_clear_tint_int(struct rzg2l_irqc_priv *priv, unsigned int hwirq)
113 {
114 u32 bit = BIT(hwirq - IRQC_TINT_START);
115 u32 reg;
116
117 reg = readl_relaxed(priv->base + TSCR);
118 if (reg & bit) {
119 writel_relaxed(reg & ~bit, priv->base + TSCR);
120 /*
121 * Enforce that the posted write is flushed to prevent that the
122 * just handled interrupt is raised again.
123 */
124 readl_relaxed(priv->base + TSCR);
125 }
126 }
127
rzg2l_irqc_eoi(struct irq_data * d)128 static void rzg2l_irqc_eoi(struct irq_data *d)
129 {
130 struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
131 unsigned int hw_irq = irqd_to_hwirq(d);
132
133 raw_spin_lock(&priv->lock);
134 if (hw_irq >= IRQC_IRQ_START && hw_irq <= IRQC_IRQ_COUNT)
135 rzg2l_clear_irq_int(priv, hw_irq);
136 else if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ)
137 rzg2l_clear_tint_int(priv, hw_irq);
138 raw_spin_unlock(&priv->lock);
139 irq_chip_eoi_parent(d);
140 }
141
rzg2l_irqc_irq_disable(struct irq_data * d)142 static void rzg2l_irqc_irq_disable(struct irq_data *d)
143 {
144 unsigned int hw_irq = irqd_to_hwirq(d);
145
146 if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ) {
147 struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
148 u32 offset = hw_irq - IRQC_TINT_START;
149 u32 tssr_offset = TSSR_OFFSET(offset);
150 u8 tssr_index = TSSR_INDEX(offset);
151 u32 reg;
152
153 raw_spin_lock(&priv->lock);
154 reg = readl_relaxed(priv->base + TSSR(tssr_index));
155 reg &= ~(TIEN << TSSEL_SHIFT(tssr_offset));
156 writel_relaxed(reg, priv->base + TSSR(tssr_index));
157 raw_spin_unlock(&priv->lock);
158 }
159 irq_chip_disable_parent(d);
160 }
161
rzg2l_irqc_irq_enable(struct irq_data * d)162 static void rzg2l_irqc_irq_enable(struct irq_data *d)
163 {
164 unsigned int hw_irq = irqd_to_hwirq(d);
165
166 if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ) {
167 struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
168 u32 offset = hw_irq - IRQC_TINT_START;
169 u32 tssr_offset = TSSR_OFFSET(offset);
170 u8 tssr_index = TSSR_INDEX(offset);
171 u32 reg;
172
173 raw_spin_lock(&priv->lock);
174 reg = readl_relaxed(priv->base + TSSR(tssr_index));
175 reg |= TIEN << TSSEL_SHIFT(tssr_offset);
176 writel_relaxed(reg, priv->base + TSSR(tssr_index));
177 raw_spin_unlock(&priv->lock);
178 }
179 irq_chip_enable_parent(d);
180 }
181
rzg2l_irq_set_type(struct irq_data * d,unsigned int type)182 static int rzg2l_irq_set_type(struct irq_data *d, unsigned int type)
183 {
184 struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
185 unsigned int hwirq = irqd_to_hwirq(d);
186 u32 iitseln = hwirq - IRQC_IRQ_START;
187 bool clear_irq_int = false;
188 u16 sense, tmp;
189
190 switch (type & IRQ_TYPE_SENSE_MASK) {
191 case IRQ_TYPE_LEVEL_LOW:
192 sense = IITSR_IITSEL_LEVEL_LOW;
193 break;
194
195 case IRQ_TYPE_EDGE_FALLING:
196 sense = IITSR_IITSEL_EDGE_FALLING;
197 clear_irq_int = true;
198 break;
199
200 case IRQ_TYPE_EDGE_RISING:
201 sense = IITSR_IITSEL_EDGE_RISING;
202 clear_irq_int = true;
203 break;
204
205 case IRQ_TYPE_EDGE_BOTH:
206 sense = IITSR_IITSEL_EDGE_BOTH;
207 clear_irq_int = true;
208 break;
209
210 default:
211 return -EINVAL;
212 }
213
214 raw_spin_lock(&priv->lock);
215 tmp = readl_relaxed(priv->base + IITSR);
216 tmp &= ~IITSR_IITSEL_MASK(iitseln);
217 tmp |= IITSR_IITSEL(iitseln, sense);
218 if (clear_irq_int)
219 rzg2l_clear_irq_int(priv, hwirq);
220 writel_relaxed(tmp, priv->base + IITSR);
221 raw_spin_unlock(&priv->lock);
222
223 return 0;
224 }
225
rzg2l_disable_tint_and_set_tint_source(struct irq_data * d,struct rzg2l_irqc_priv * priv,u32 reg,u32 tssr_offset,u8 tssr_index)226 static u32 rzg2l_disable_tint_and_set_tint_source(struct irq_data *d, struct rzg2l_irqc_priv *priv,
227 u32 reg, u32 tssr_offset, u8 tssr_index)
228 {
229 u32 tint = (u32)(uintptr_t)irq_data_get_irq_chip_data(d);
230 u32 tien = reg & (TIEN << TSSEL_SHIFT(tssr_offset));
231
232 /* Clear the relevant byte in reg */
233 reg &= ~(TSSEL_MASK << TSSEL_SHIFT(tssr_offset));
234 /* Set TINT and leave TIEN clear */
235 reg |= tint << TSSEL_SHIFT(tssr_offset);
236 writel_relaxed(reg, priv->base + TSSR(tssr_index));
237
238 return reg | tien;
239 }
240
rzg2l_tint_set_edge(struct irq_data * d,unsigned int type)241 static int rzg2l_tint_set_edge(struct irq_data *d, unsigned int type)
242 {
243 struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
244 unsigned int hwirq = irqd_to_hwirq(d);
245 u32 titseln = hwirq - IRQC_TINT_START;
246 u32 tssr_offset = TSSR_OFFSET(titseln);
247 u8 tssr_index = TSSR_INDEX(titseln);
248 u8 index, sense;
249 u32 reg, tssr;
250
251 switch (type & IRQ_TYPE_SENSE_MASK) {
252 case IRQ_TYPE_EDGE_RISING:
253 sense = TITSR_TITSEL_EDGE_RISING;
254 break;
255
256 case IRQ_TYPE_EDGE_FALLING:
257 sense = TITSR_TITSEL_EDGE_FALLING;
258 break;
259
260 default:
261 return -EINVAL;
262 }
263
264 index = 0;
265 if (titseln >= TITSR0_MAX_INT) {
266 titseln -= TITSR0_MAX_INT;
267 index = 1;
268 }
269
270 raw_spin_lock(&priv->lock);
271 tssr = readl_relaxed(priv->base + TSSR(tssr_index));
272 tssr = rzg2l_disable_tint_and_set_tint_source(d, priv, tssr, tssr_offset, tssr_index);
273 reg = readl_relaxed(priv->base + TITSR(index));
274 reg &= ~(IRQ_MASK << (titseln * TITSEL_WIDTH));
275 reg |= sense << (titseln * TITSEL_WIDTH);
276 writel_relaxed(reg, priv->base + TITSR(index));
277 rzg2l_clear_tint_int(priv, hwirq);
278 writel_relaxed(tssr, priv->base + TSSR(tssr_index));
279 raw_spin_unlock(&priv->lock);
280
281 return 0;
282 }
283
rzg2l_irqc_set_type(struct irq_data * d,unsigned int type)284 static int rzg2l_irqc_set_type(struct irq_data *d, unsigned int type)
285 {
286 unsigned int hw_irq = irqd_to_hwirq(d);
287 int ret = -EINVAL;
288
289 if (hw_irq >= IRQC_IRQ_START && hw_irq <= IRQC_IRQ_COUNT)
290 ret = rzg2l_irq_set_type(d, type);
291 else if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ)
292 ret = rzg2l_tint_set_edge(d, type);
293 if (ret)
294 return ret;
295
296 return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH);
297 }
298
rzg2l_irqc_irq_suspend(void)299 static int rzg2l_irqc_irq_suspend(void)
300 {
301 struct rzg2l_irqc_reg_cache *cache = &rzg2l_irqc_data->cache;
302 void __iomem *base = rzg2l_irqc_data->base;
303
304 cache->iitsr = readl_relaxed(base + IITSR);
305 for (u8 i = 0; i < 2; i++)
306 cache->titsr[i] = readl_relaxed(base + TITSR(i));
307
308 return 0;
309 }
310
rzg2l_irqc_irq_resume(void)311 static void rzg2l_irqc_irq_resume(void)
312 {
313 struct rzg2l_irqc_reg_cache *cache = &rzg2l_irqc_data->cache;
314 void __iomem *base = rzg2l_irqc_data->base;
315
316 /*
317 * Restore only interrupt type. TSSRx will be restored at the
318 * request of pin controller to avoid spurious interrupts due
319 * to invalid PIN states.
320 */
321 for (u8 i = 0; i < 2; i++)
322 writel_relaxed(cache->titsr[i], base + TITSR(i));
323 writel_relaxed(cache->iitsr, base + IITSR);
324 }
325
326 static struct syscore_ops rzg2l_irqc_syscore_ops = {
327 .suspend = rzg2l_irqc_irq_suspend,
328 .resume = rzg2l_irqc_irq_resume,
329 };
330
331 static const struct irq_chip irqc_chip = {
332 .name = "rzg2l-irqc",
333 .irq_eoi = rzg2l_irqc_eoi,
334 .irq_mask = irq_chip_mask_parent,
335 .irq_unmask = irq_chip_unmask_parent,
336 .irq_disable = rzg2l_irqc_irq_disable,
337 .irq_enable = rzg2l_irqc_irq_enable,
338 .irq_get_irqchip_state = irq_chip_get_parent_state,
339 .irq_set_irqchip_state = irq_chip_set_parent_state,
340 .irq_retrigger = irq_chip_retrigger_hierarchy,
341 .irq_set_type = rzg2l_irqc_set_type,
342 .flags = IRQCHIP_MASK_ON_SUSPEND |
343 IRQCHIP_SET_TYPE_MASKED |
344 IRQCHIP_SKIP_SET_WAKE,
345 };
346
rzg2l_irqc_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * arg)347 static int rzg2l_irqc_alloc(struct irq_domain *domain, unsigned int virq,
348 unsigned int nr_irqs, void *arg)
349 {
350 struct rzg2l_irqc_priv *priv = domain->host_data;
351 unsigned long tint = 0;
352 irq_hw_number_t hwirq;
353 unsigned int type;
354 int ret;
355
356 ret = irq_domain_translate_twocell(domain, arg, &hwirq, &type);
357 if (ret)
358 return ret;
359
360 /*
361 * For TINT interrupts ie where pinctrl driver is child of irqc domain
362 * the hwirq and TINT are encoded in fwspec->param[0].
363 * hwirq for TINT range from 9-40, hwirq is embedded 0-15 bits and TINT
364 * from 16-31 bits. TINT from the pinctrl driver needs to be programmed
365 * in IRQC registers to enable a given gpio pin as interrupt.
366 */
367 if (hwirq > IRQC_IRQ_COUNT) {
368 tint = TINT_EXTRACT_GPIOINT(hwirq);
369 hwirq = TINT_EXTRACT_HWIRQ(hwirq);
370
371 if (hwirq < IRQC_TINT_START)
372 return -EINVAL;
373 }
374
375 if (hwirq > (IRQC_NUM_IRQ - 1))
376 return -EINVAL;
377
378 ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq, &irqc_chip,
379 (void *)(uintptr_t)tint);
380 if (ret)
381 return ret;
382
383 return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &priv->fwspec[hwirq]);
384 }
385
386 static const struct irq_domain_ops rzg2l_irqc_domain_ops = {
387 .alloc = rzg2l_irqc_alloc,
388 .free = irq_domain_free_irqs_common,
389 .translate = irq_domain_translate_twocell,
390 };
391
rzg2l_irqc_parse_interrupts(struct rzg2l_irqc_priv * priv,struct device_node * np)392 static int rzg2l_irqc_parse_interrupts(struct rzg2l_irqc_priv *priv,
393 struct device_node *np)
394 {
395 struct of_phandle_args map;
396 unsigned int i;
397 int ret;
398
399 for (i = 0; i < IRQC_NUM_IRQ; i++) {
400 ret = of_irq_parse_one(np, i, &map);
401 if (ret)
402 return ret;
403 of_phandle_args_to_fwspec(np, map.args, map.args_count,
404 &priv->fwspec[i]);
405 }
406
407 return 0;
408 }
409
rzg2l_irqc_init(struct device_node * node,struct device_node * parent)410 static int rzg2l_irqc_init(struct device_node *node, struct device_node *parent)
411 {
412 struct platform_device *pdev = of_find_device_by_node(node);
413 struct device *dev __free(put_device) = pdev ? &pdev->dev : NULL;
414 struct irq_domain *irq_domain, *parent_domain;
415 struct reset_control *resetn;
416 int ret;
417
418 if (!pdev)
419 return -ENODEV;
420
421 parent_domain = irq_find_host(parent);
422 if (!parent_domain) {
423 dev_err(&pdev->dev, "cannot find parent domain\n");
424 return -ENODEV;
425 }
426
427 rzg2l_irqc_data = devm_kzalloc(&pdev->dev, sizeof(*rzg2l_irqc_data), GFP_KERNEL);
428 if (!rzg2l_irqc_data)
429 return -ENOMEM;
430
431 rzg2l_irqc_data->base = devm_of_iomap(&pdev->dev, pdev->dev.of_node, 0, NULL);
432 if (IS_ERR(rzg2l_irqc_data->base))
433 return PTR_ERR(rzg2l_irqc_data->base);
434
435 ret = rzg2l_irqc_parse_interrupts(rzg2l_irqc_data, node);
436 if (ret) {
437 dev_err(&pdev->dev, "cannot parse interrupts: %d\n", ret);
438 return ret;
439 }
440
441 resetn = devm_reset_control_get_exclusive(&pdev->dev, NULL);
442 if (IS_ERR(resetn))
443 return PTR_ERR(resetn);
444
445 ret = reset_control_deassert(resetn);
446 if (ret) {
447 dev_err(&pdev->dev, "failed to deassert resetn pin, %d\n", ret);
448 return ret;
449 }
450
451 pm_runtime_enable(&pdev->dev);
452 ret = pm_runtime_resume_and_get(&pdev->dev);
453 if (ret < 0) {
454 dev_err(&pdev->dev, "pm_runtime_resume_and_get failed: %d\n", ret);
455 goto pm_disable;
456 }
457
458 raw_spin_lock_init(&rzg2l_irqc_data->lock);
459
460 irq_domain = irq_domain_add_hierarchy(parent_domain, 0, IRQC_NUM_IRQ,
461 node, &rzg2l_irqc_domain_ops,
462 rzg2l_irqc_data);
463 if (!irq_domain) {
464 dev_err(&pdev->dev, "failed to add irq domain\n");
465 ret = -ENOMEM;
466 goto pm_put;
467 }
468
469 register_syscore_ops(&rzg2l_irqc_syscore_ops);
470
471 /*
472 * Prevent the cleanup function from invoking put_device by assigning
473 * NULL to dev.
474 *
475 * make coccicheck will complain about missing put_device calls, but
476 * those are false positives, as dev will be automatically "put" via
477 * __free_put_device on the failing path.
478 * On the successful path we don't actually want to "put" dev.
479 */
480 dev = NULL;
481
482 return 0;
483
484 pm_put:
485 pm_runtime_put(&pdev->dev);
486 pm_disable:
487 pm_runtime_disable(&pdev->dev);
488 reset_control_assert(resetn);
489 return ret;
490 }
491
492 IRQCHIP_PLATFORM_DRIVER_BEGIN(rzg2l_irqc)
493 IRQCHIP_MATCH("renesas,rzg2l-irqc", rzg2l_irqc_init)
494 IRQCHIP_PLATFORM_DRIVER_END(rzg2l_irqc)
495 MODULE_AUTHOR("Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>");
496 MODULE_DESCRIPTION("Renesas RZ/G2L IRQC Driver");
497