1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) Maxime Coquelin 2015
4  * Copyright (C) STMicroelectronics 2017
5  * Author:  Maxime Coquelin <mcoquelin.stm32@gmail.com>
6  */
7 
8 #include <linux/bitops.h>
9 #include <linux/delay.h>
10 #include <linux/hwspinlock.h>
11 #include <linux/interrupt.h>
12 #include <linux/io.h>
13 #include <linux/irq.h>
14 #include <linux/irqchip.h>
15 #include <linux/irqchip/chained_irq.h>
16 #include <linux/irqdomain.h>
17 #include <linux/of_address.h>
18 #include <linux/of_irq.h>
19 #include <linux/syscore_ops.h>
20 
21 #include <dt-bindings/interrupt-controller/arm-gic.h>
22 
23 #define IRQS_PER_BANK 32
24 
25 #define HWSPNLCK_TIMEOUT	1000 /* usec */
26 #define HWSPNLCK_RETRY_DELAY	100  /* usec */
27 
28 struct stm32_exti_bank {
29 	u32 imr_ofst;
30 	u32 emr_ofst;
31 	u32 rtsr_ofst;
32 	u32 ftsr_ofst;
33 	u32 swier_ofst;
34 	u32 rpr_ofst;
35 	u32 fpr_ofst;
36 };
37 
38 #define UNDEF_REG ~0
39 
40 enum stm32_exti_hwspinlock {
41 	HWSPINLOCK_UNKNOWN,
42 	HWSPINLOCK_NONE,
43 	HWSPINLOCK_READY,
44 };
45 
46 struct stm32_desc_irq {
47 	u32 exti;
48 	u32 irq_parent;
49 };
50 
51 struct stm32_exti_drv_data {
52 	const struct stm32_exti_bank **exti_banks;
53 	const struct stm32_desc_irq *desc_irqs;
54 	u32 bank_nr;
55 	u32 irq_nr;
56 };
57 
58 struct stm32_exti_chip_data {
59 	struct stm32_exti_host_data *host_data;
60 	const struct stm32_exti_bank *reg_bank;
61 	struct raw_spinlock rlock;
62 	u32 wake_active;
63 	u32 mask_cache;
64 	u32 rtsr_cache;
65 	u32 ftsr_cache;
66 };
67 
68 struct stm32_exti_host_data {
69 	void __iomem *base;
70 	struct stm32_exti_chip_data *chips_data;
71 	const struct stm32_exti_drv_data *drv_data;
72 	struct device_node *node;
73 	enum stm32_exti_hwspinlock hwlock_state;
74 	struct hwspinlock *hwlock;
75 };
76 
77 static struct stm32_exti_host_data *stm32_host_data;
78 
79 static const struct stm32_exti_bank stm32f4xx_exti_b1 = {
80 	.imr_ofst	= 0x00,
81 	.emr_ofst	= 0x04,
82 	.rtsr_ofst	= 0x08,
83 	.ftsr_ofst	= 0x0C,
84 	.swier_ofst	= 0x10,
85 	.rpr_ofst	= 0x14,
86 	.fpr_ofst	= UNDEF_REG,
87 };
88 
89 static const struct stm32_exti_bank *stm32f4xx_exti_banks[] = {
90 	&stm32f4xx_exti_b1,
91 };
92 
93 static const struct stm32_exti_drv_data stm32f4xx_drv_data = {
94 	.exti_banks = stm32f4xx_exti_banks,
95 	.bank_nr = ARRAY_SIZE(stm32f4xx_exti_banks),
96 };
97 
98 static const struct stm32_exti_bank stm32h7xx_exti_b1 = {
99 	.imr_ofst	= 0x80,
100 	.emr_ofst	= 0x84,
101 	.rtsr_ofst	= 0x00,
102 	.ftsr_ofst	= 0x04,
103 	.swier_ofst	= 0x08,
104 	.rpr_ofst	= 0x88,
105 	.fpr_ofst	= UNDEF_REG,
106 };
107 
108 static const struct stm32_exti_bank stm32h7xx_exti_b2 = {
109 	.imr_ofst	= 0x90,
110 	.emr_ofst	= 0x94,
111 	.rtsr_ofst	= 0x20,
112 	.ftsr_ofst	= 0x24,
113 	.swier_ofst	= 0x28,
114 	.rpr_ofst	= 0x98,
115 	.fpr_ofst	= UNDEF_REG,
116 };
117 
118 static const struct stm32_exti_bank stm32h7xx_exti_b3 = {
119 	.imr_ofst	= 0xA0,
120 	.emr_ofst	= 0xA4,
121 	.rtsr_ofst	= 0x40,
122 	.ftsr_ofst	= 0x44,
123 	.swier_ofst	= 0x48,
124 	.rpr_ofst	= 0xA8,
125 	.fpr_ofst	= UNDEF_REG,
126 };
127 
128 static const struct stm32_exti_bank *stm32h7xx_exti_banks[] = {
129 	&stm32h7xx_exti_b1,
130 	&stm32h7xx_exti_b2,
131 	&stm32h7xx_exti_b3,
132 };
133 
134 static const struct stm32_exti_drv_data stm32h7xx_drv_data = {
135 	.exti_banks = stm32h7xx_exti_banks,
136 	.bank_nr = ARRAY_SIZE(stm32h7xx_exti_banks),
137 };
138 
139 static const struct stm32_exti_bank stm32mp1_exti_b1 = {
140 	.imr_ofst	= 0x80,
141 	.emr_ofst	= 0x84,
142 	.rtsr_ofst	= 0x00,
143 	.ftsr_ofst	= 0x04,
144 	.swier_ofst	= 0x08,
145 	.rpr_ofst	= 0x0C,
146 	.fpr_ofst	= 0x10,
147 };
148 
149 static const struct stm32_exti_bank stm32mp1_exti_b2 = {
150 	.imr_ofst	= 0x90,
151 	.emr_ofst	= 0x94,
152 	.rtsr_ofst	= 0x20,
153 	.ftsr_ofst	= 0x24,
154 	.swier_ofst	= 0x28,
155 	.rpr_ofst	= 0x2C,
156 	.fpr_ofst	= 0x30,
157 };
158 
159 static const struct stm32_exti_bank stm32mp1_exti_b3 = {
160 	.imr_ofst	= 0xA0,
161 	.emr_ofst	= 0xA4,
162 	.rtsr_ofst	= 0x40,
163 	.ftsr_ofst	= 0x44,
164 	.swier_ofst	= 0x48,
165 	.rpr_ofst	= 0x4C,
166 	.fpr_ofst	= 0x50,
167 };
168 
169 static const struct stm32_exti_bank *stm32mp1_exti_banks[] = {
170 	&stm32mp1_exti_b1,
171 	&stm32mp1_exti_b2,
172 	&stm32mp1_exti_b3,
173 };
174 
175 static const struct stm32_desc_irq stm32mp1_desc_irq[] = {
176 	{ .exti = 0, .irq_parent = 6 },
177 	{ .exti = 1, .irq_parent = 7 },
178 	{ .exti = 2, .irq_parent = 8 },
179 	{ .exti = 3, .irq_parent = 9 },
180 	{ .exti = 4, .irq_parent = 10 },
181 	{ .exti = 5, .irq_parent = 23 },
182 	{ .exti = 6, .irq_parent = 64 },
183 	{ .exti = 7, .irq_parent = 65 },
184 	{ .exti = 8, .irq_parent = 66 },
185 	{ .exti = 9, .irq_parent = 67 },
186 	{ .exti = 10, .irq_parent = 40 },
187 	{ .exti = 11, .irq_parent = 42 },
188 	{ .exti = 12, .irq_parent = 76 },
189 	{ .exti = 13, .irq_parent = 77 },
190 	{ .exti = 14, .irq_parent = 121 },
191 	{ .exti = 15, .irq_parent = 127 },
192 	{ .exti = 16, .irq_parent = 1 },
193 	{ .exti = 65, .irq_parent = 144 },
194 	{ .exti = 68, .irq_parent = 143 },
195 	{ .exti = 73, .irq_parent = 129 },
196 };
197 
198 static const struct stm32_exti_drv_data stm32mp1_drv_data = {
199 	.exti_banks = stm32mp1_exti_banks,
200 	.bank_nr = ARRAY_SIZE(stm32mp1_exti_banks),
201 	.desc_irqs = stm32mp1_desc_irq,
202 	.irq_nr = ARRAY_SIZE(stm32mp1_desc_irq),
203 };
204 
205 static int stm32_exti_to_irq(const struct stm32_exti_drv_data *drv_data,
206 			     irq_hw_number_t hwirq)
207 {
208 	const struct stm32_desc_irq *desc_irq;
209 	int i;
210 
211 	if (!drv_data->desc_irqs)
212 		return -EINVAL;
213 
214 	for (i = 0; i < drv_data->irq_nr; i++) {
215 		desc_irq = &drv_data->desc_irqs[i];
216 		if (desc_irq->exti == hwirq)
217 			return desc_irq->irq_parent;
218 	}
219 
220 	return -EINVAL;
221 }
222 
223 static unsigned long stm32_exti_pending(struct irq_chip_generic *gc)
224 {
225 	struct stm32_exti_chip_data *chip_data = gc->private;
226 	const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
227 	unsigned long pending;
228 
229 	pending = irq_reg_readl(gc, stm32_bank->rpr_ofst);
230 	if (stm32_bank->fpr_ofst != UNDEF_REG)
231 		pending |= irq_reg_readl(gc, stm32_bank->fpr_ofst);
232 
233 	return pending;
234 }
235 
236 static void stm32_irq_handler(struct irq_desc *desc)
237 {
238 	struct irq_domain *domain = irq_desc_get_handler_data(desc);
239 	struct irq_chip *chip = irq_desc_get_chip(desc);
240 	unsigned int virq, nbanks = domain->gc->num_chips;
241 	struct irq_chip_generic *gc;
242 	unsigned long pending;
243 	int n, i, irq_base = 0;
244 
245 	chained_irq_enter(chip, desc);
246 
247 	for (i = 0; i < nbanks; i++, irq_base += IRQS_PER_BANK) {
248 		gc = irq_get_domain_generic_chip(domain, irq_base);
249 
250 		while ((pending = stm32_exti_pending(gc))) {
251 			for_each_set_bit(n, &pending, IRQS_PER_BANK) {
252 				virq = irq_find_mapping(domain, irq_base + n);
253 				generic_handle_irq(virq);
254 			}
255 		}
256 	}
257 
258 	chained_irq_exit(chip, desc);
259 }
260 
261 static int stm32_exti_set_type(struct irq_data *d,
262 			       unsigned int type, u32 *rtsr, u32 *ftsr)
263 {
264 	u32 mask = BIT(d->hwirq % IRQS_PER_BANK);
265 
266 	switch (type) {
267 	case IRQ_TYPE_EDGE_RISING:
268 		*rtsr |= mask;
269 		*ftsr &= ~mask;
270 		break;
271 	case IRQ_TYPE_EDGE_FALLING:
272 		*rtsr &= ~mask;
273 		*ftsr |= mask;
274 		break;
275 	case IRQ_TYPE_EDGE_BOTH:
276 		*rtsr |= mask;
277 		*ftsr |= mask;
278 		break;
279 	default:
280 		return -EINVAL;
281 	}
282 
283 	return 0;
284 }
285 
286 static int stm32_exti_hwspin_lock(struct stm32_exti_chip_data *chip_data)
287 {
288 	struct stm32_exti_host_data *host_data = chip_data->host_data;
289 	struct hwspinlock *hwlock;
290 	int id, ret = 0, timeout = 0;
291 
292 	/* first time, check for hwspinlock availability */
293 	if (unlikely(host_data->hwlock_state == HWSPINLOCK_UNKNOWN)) {
294 		id = of_hwspin_lock_get_id(host_data->node, 0);
295 		if (id >= 0) {
296 			hwlock = hwspin_lock_request_specific(id);
297 			if (hwlock) {
298 				/* found valid hwspinlock */
299 				host_data->hwlock_state = HWSPINLOCK_READY;
300 				host_data->hwlock = hwlock;
301 				pr_debug("%s hwspinlock = %d\n", __func__, id);
302 			} else {
303 				host_data->hwlock_state = HWSPINLOCK_NONE;
304 			}
305 		} else if (id != -EPROBE_DEFER) {
306 			host_data->hwlock_state = HWSPINLOCK_NONE;
307 		} else {
308 			/* hwspinlock driver shall be ready at that stage */
309 			ret = -EPROBE_DEFER;
310 		}
311 	}
312 
313 	if (likely(host_data->hwlock_state == HWSPINLOCK_READY)) {
314 		/*
315 		 * Use the x_raw API since we are under spin_lock protection.
316 		 * Do not use the x_timeout API because we are under irq_disable
317 		 * mode (see __setup_irq())
318 		 */
319 		do {
320 			ret = hwspin_trylock_raw(host_data->hwlock);
321 			if (!ret)
322 				return 0;
323 
324 			udelay(HWSPNLCK_RETRY_DELAY);
325 			timeout += HWSPNLCK_RETRY_DELAY;
326 		} while (timeout < HWSPNLCK_TIMEOUT);
327 
328 		if (ret == -EBUSY)
329 			ret = -ETIMEDOUT;
330 	}
331 
332 	if (ret)
333 		pr_err("%s can't get hwspinlock (%d)\n", __func__, ret);
334 
335 	return ret;
336 }
337 
338 static void stm32_exti_hwspin_unlock(struct stm32_exti_chip_data *chip_data)
339 {
340 	if (likely(chip_data->host_data->hwlock_state == HWSPINLOCK_READY))
341 		hwspin_unlock_raw(chip_data->host_data->hwlock);
342 }
343 
344 static int stm32_irq_set_type(struct irq_data *d, unsigned int type)
345 {
346 	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
347 	struct stm32_exti_chip_data *chip_data = gc->private;
348 	const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
349 	u32 rtsr, ftsr;
350 	int err;
351 
352 	irq_gc_lock(gc);
353 
354 	err = stm32_exti_hwspin_lock(chip_data);
355 	if (err)
356 		goto unlock;
357 
358 	rtsr = irq_reg_readl(gc, stm32_bank->rtsr_ofst);
359 	ftsr = irq_reg_readl(gc, stm32_bank->ftsr_ofst);
360 
361 	err = stm32_exti_set_type(d, type, &rtsr, &ftsr);
362 	if (err)
363 		goto unspinlock;
364 
365 	irq_reg_writel(gc, rtsr, stm32_bank->rtsr_ofst);
366 	irq_reg_writel(gc, ftsr, stm32_bank->ftsr_ofst);
367 
368 unspinlock:
369 	stm32_exti_hwspin_unlock(chip_data);
370 unlock:
371 	irq_gc_unlock(gc);
372 
373 	return err;
374 }
375 
376 static void stm32_chip_suspend(struct stm32_exti_chip_data *chip_data,
377 			       u32 wake_active)
378 {
379 	const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
380 	void __iomem *base = chip_data->host_data->base;
381 
382 	/* save rtsr, ftsr registers */
383 	chip_data->rtsr_cache = readl_relaxed(base + stm32_bank->rtsr_ofst);
384 	chip_data->ftsr_cache = readl_relaxed(base + stm32_bank->ftsr_ofst);
385 
386 	writel_relaxed(wake_active, base + stm32_bank->imr_ofst);
387 }
388 
389 static void stm32_chip_resume(struct stm32_exti_chip_data *chip_data,
390 			      u32 mask_cache)
391 {
392 	const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
393 	void __iomem *base = chip_data->host_data->base;
394 
395 	/* restore rtsr, ftsr, registers */
396 	writel_relaxed(chip_data->rtsr_cache, base + stm32_bank->rtsr_ofst);
397 	writel_relaxed(chip_data->ftsr_cache, base + stm32_bank->ftsr_ofst);
398 
399 	writel_relaxed(mask_cache, base + stm32_bank->imr_ofst);
400 }
401 
402 static void stm32_irq_suspend(struct irq_chip_generic *gc)
403 {
404 	struct stm32_exti_chip_data *chip_data = gc->private;
405 
406 	irq_gc_lock(gc);
407 	stm32_chip_suspend(chip_data, gc->wake_active);
408 	irq_gc_unlock(gc);
409 }
410 
411 static void stm32_irq_resume(struct irq_chip_generic *gc)
412 {
413 	struct stm32_exti_chip_data *chip_data = gc->private;
414 
415 	irq_gc_lock(gc);
416 	stm32_chip_resume(chip_data, gc->mask_cache);
417 	irq_gc_unlock(gc);
418 }
419 
420 static int stm32_exti_alloc(struct irq_domain *d, unsigned int virq,
421 			    unsigned int nr_irqs, void *data)
422 {
423 	struct irq_fwspec *fwspec = data;
424 	irq_hw_number_t hwirq;
425 
426 	hwirq = fwspec->param[0];
427 
428 	irq_map_generic_chip(d, virq, hwirq);
429 
430 	return 0;
431 }
432 
433 static void stm32_exti_free(struct irq_domain *d, unsigned int virq,
434 			    unsigned int nr_irqs)
435 {
436 	struct irq_data *data = irq_domain_get_irq_data(d, virq);
437 
438 	irq_domain_reset_irq_data(data);
439 }
440 
441 static const struct irq_domain_ops irq_exti_domain_ops = {
442 	.map	= irq_map_generic_chip,
443 	.alloc  = stm32_exti_alloc,
444 	.free	= stm32_exti_free,
445 };
446 
447 static void stm32_irq_ack(struct irq_data *d)
448 {
449 	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
450 	struct stm32_exti_chip_data *chip_data = gc->private;
451 	const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
452 
453 	irq_gc_lock(gc);
454 
455 	irq_reg_writel(gc, d->mask, stm32_bank->rpr_ofst);
456 	if (stm32_bank->fpr_ofst != UNDEF_REG)
457 		irq_reg_writel(gc, d->mask, stm32_bank->fpr_ofst);
458 
459 	irq_gc_unlock(gc);
460 }
461 
462 static inline u32 stm32_exti_set_bit(struct irq_data *d, u32 reg)
463 {
464 	struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
465 	void __iomem *base = chip_data->host_data->base;
466 	u32 val;
467 
468 	val = readl_relaxed(base + reg);
469 	val |= BIT(d->hwirq % IRQS_PER_BANK);
470 	writel_relaxed(val, base + reg);
471 
472 	return val;
473 }
474 
475 static inline u32 stm32_exti_clr_bit(struct irq_data *d, u32 reg)
476 {
477 	struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
478 	void __iomem *base = chip_data->host_data->base;
479 	u32 val;
480 
481 	val = readl_relaxed(base + reg);
482 	val &= ~BIT(d->hwirq % IRQS_PER_BANK);
483 	writel_relaxed(val, base + reg);
484 
485 	return val;
486 }
487 
488 static void stm32_exti_h_eoi(struct irq_data *d)
489 {
490 	struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
491 	const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
492 
493 	raw_spin_lock(&chip_data->rlock);
494 
495 	stm32_exti_set_bit(d, stm32_bank->rpr_ofst);
496 	if (stm32_bank->fpr_ofst != UNDEF_REG)
497 		stm32_exti_set_bit(d, stm32_bank->fpr_ofst);
498 
499 	raw_spin_unlock(&chip_data->rlock);
500 
501 	if (d->parent_data->chip)
502 		irq_chip_eoi_parent(d);
503 }
504 
505 static void stm32_exti_h_mask(struct irq_data *d)
506 {
507 	struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
508 	const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
509 
510 	raw_spin_lock(&chip_data->rlock);
511 	chip_data->mask_cache = stm32_exti_clr_bit(d, stm32_bank->imr_ofst);
512 	raw_spin_unlock(&chip_data->rlock);
513 
514 	if (d->parent_data->chip)
515 		irq_chip_mask_parent(d);
516 }
517 
518 static void stm32_exti_h_unmask(struct irq_data *d)
519 {
520 	struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
521 	const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
522 
523 	raw_spin_lock(&chip_data->rlock);
524 	chip_data->mask_cache = stm32_exti_set_bit(d, stm32_bank->imr_ofst);
525 	raw_spin_unlock(&chip_data->rlock);
526 
527 	if (d->parent_data->chip)
528 		irq_chip_unmask_parent(d);
529 }
530 
531 static int stm32_exti_h_set_type(struct irq_data *d, unsigned int type)
532 {
533 	struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
534 	const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
535 	void __iomem *base = chip_data->host_data->base;
536 	u32 rtsr, ftsr;
537 	int err;
538 
539 	raw_spin_lock(&chip_data->rlock);
540 
541 	err = stm32_exti_hwspin_lock(chip_data);
542 	if (err)
543 		goto unlock;
544 
545 	rtsr = readl_relaxed(base + stm32_bank->rtsr_ofst);
546 	ftsr = readl_relaxed(base + stm32_bank->ftsr_ofst);
547 
548 	err = stm32_exti_set_type(d, type, &rtsr, &ftsr);
549 	if (err)
550 		goto unspinlock;
551 
552 	writel_relaxed(rtsr, base + stm32_bank->rtsr_ofst);
553 	writel_relaxed(ftsr, base + stm32_bank->ftsr_ofst);
554 
555 unspinlock:
556 	stm32_exti_hwspin_unlock(chip_data);
557 unlock:
558 	raw_spin_unlock(&chip_data->rlock);
559 
560 	return err;
561 }
562 
563 static int stm32_exti_h_set_wake(struct irq_data *d, unsigned int on)
564 {
565 	struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
566 	u32 mask = BIT(d->hwirq % IRQS_PER_BANK);
567 
568 	raw_spin_lock(&chip_data->rlock);
569 
570 	if (on)
571 		chip_data->wake_active |= mask;
572 	else
573 		chip_data->wake_active &= ~mask;
574 
575 	raw_spin_unlock(&chip_data->rlock);
576 
577 	return 0;
578 }
579 
580 static int stm32_exti_h_set_affinity(struct irq_data *d,
581 				     const struct cpumask *dest, bool force)
582 {
583 	if (d->parent_data->chip)
584 		return irq_chip_set_affinity_parent(d, dest, force);
585 
586 	return -EINVAL;
587 }
588 
589 #ifdef CONFIG_PM
590 static int stm32_exti_h_suspend(void)
591 {
592 	struct stm32_exti_chip_data *chip_data;
593 	int i;
594 
595 	for (i = 0; i < stm32_host_data->drv_data->bank_nr; i++) {
596 		chip_data = &stm32_host_data->chips_data[i];
597 		raw_spin_lock(&chip_data->rlock);
598 		stm32_chip_suspend(chip_data, chip_data->wake_active);
599 		raw_spin_unlock(&chip_data->rlock);
600 	}
601 
602 	return 0;
603 }
604 
605 static void stm32_exti_h_resume(void)
606 {
607 	struct stm32_exti_chip_data *chip_data;
608 	int i;
609 
610 	for (i = 0; i < stm32_host_data->drv_data->bank_nr; i++) {
611 		chip_data = &stm32_host_data->chips_data[i];
612 		raw_spin_lock(&chip_data->rlock);
613 		stm32_chip_resume(chip_data, chip_data->mask_cache);
614 		raw_spin_unlock(&chip_data->rlock);
615 	}
616 }
617 
618 static struct syscore_ops stm32_exti_h_syscore_ops = {
619 	.suspend	= stm32_exti_h_suspend,
620 	.resume		= stm32_exti_h_resume,
621 };
622 
623 static void stm32_exti_h_syscore_init(void)
624 {
625 	register_syscore_ops(&stm32_exti_h_syscore_ops);
626 }
627 #else
628 static inline void stm32_exti_h_syscore_init(void) {}
629 #endif
630 
631 static struct irq_chip stm32_exti_h_chip = {
632 	.name			= "stm32-exti-h",
633 	.irq_eoi		= stm32_exti_h_eoi,
634 	.irq_mask		= stm32_exti_h_mask,
635 	.irq_unmask		= stm32_exti_h_unmask,
636 	.irq_retrigger		= irq_chip_retrigger_hierarchy,
637 	.irq_set_type		= stm32_exti_h_set_type,
638 	.irq_set_wake		= stm32_exti_h_set_wake,
639 	.flags			= IRQCHIP_MASK_ON_SUSPEND,
640 	.irq_set_affinity	= IS_ENABLED(CONFIG_SMP) ? stm32_exti_h_set_affinity : NULL,
641 };
642 
643 static int stm32_exti_h_domain_alloc(struct irq_domain *dm,
644 				     unsigned int virq,
645 				     unsigned int nr_irqs, void *data)
646 {
647 	struct stm32_exti_host_data *host_data = dm->host_data;
648 	struct stm32_exti_chip_data *chip_data;
649 	struct irq_fwspec *fwspec = data;
650 	struct irq_fwspec p_fwspec;
651 	irq_hw_number_t hwirq;
652 	int p_irq, bank;
653 
654 	hwirq = fwspec->param[0];
655 	bank  = hwirq / IRQS_PER_BANK;
656 	chip_data = &host_data->chips_data[bank];
657 
658 	irq_domain_set_hwirq_and_chip(dm, virq, hwirq,
659 				      &stm32_exti_h_chip, chip_data);
660 
661 	p_irq = stm32_exti_to_irq(host_data->drv_data, hwirq);
662 	if (p_irq >= 0) {
663 		p_fwspec.fwnode = dm->parent->fwnode;
664 		p_fwspec.param_count = 3;
665 		p_fwspec.param[0] = GIC_SPI;
666 		p_fwspec.param[1] = p_irq;
667 		p_fwspec.param[2] = IRQ_TYPE_LEVEL_HIGH;
668 
669 		return irq_domain_alloc_irqs_parent(dm, virq, 1, &p_fwspec);
670 	}
671 
672 	return 0;
673 }
674 
675 static struct
676 stm32_exti_host_data *stm32_exti_host_init(const struct stm32_exti_drv_data *dd,
677 					   struct device_node *node)
678 {
679 	struct stm32_exti_host_data *host_data;
680 
681 	host_data = kzalloc(sizeof(*host_data), GFP_KERNEL);
682 	if (!host_data)
683 		return NULL;
684 
685 	host_data->drv_data = dd;
686 	host_data->node = node;
687 	host_data->hwlock_state = HWSPINLOCK_UNKNOWN;
688 	host_data->chips_data = kcalloc(dd->bank_nr,
689 					sizeof(struct stm32_exti_chip_data),
690 					GFP_KERNEL);
691 	if (!host_data->chips_data)
692 		goto free_host_data;
693 
694 	host_data->base = of_iomap(node, 0);
695 	if (!host_data->base) {
696 		pr_err("%pOF: Unable to map registers\n", node);
697 		goto free_chips_data;
698 	}
699 
700 	stm32_host_data = host_data;
701 
702 	return host_data;
703 
704 free_chips_data:
705 	kfree(host_data->chips_data);
706 free_host_data:
707 	kfree(host_data);
708 
709 	return NULL;
710 }
711 
712 static struct
713 stm32_exti_chip_data *stm32_exti_chip_init(struct stm32_exti_host_data *h_data,
714 					   u32 bank_idx)
715 {
716 	const struct stm32_exti_bank *stm32_bank;
717 	struct stm32_exti_chip_data *chip_data;
718 	void __iomem *base = h_data->base;
719 	u32 irqs_mask;
720 
721 	stm32_bank = h_data->drv_data->exti_banks[bank_idx];
722 	chip_data = &h_data->chips_data[bank_idx];
723 	chip_data->host_data = h_data;
724 	chip_data->reg_bank = stm32_bank;
725 
726 	raw_spin_lock_init(&chip_data->rlock);
727 
728 	/* Determine number of irqs supported */
729 	writel_relaxed(~0UL, base + stm32_bank->rtsr_ofst);
730 	irqs_mask = readl_relaxed(base + stm32_bank->rtsr_ofst);
731 
732 	/*
733 	 * This IP has no reset, so after hot reboot we should
734 	 * clear registers to avoid residue
735 	 */
736 	writel_relaxed(0, base + stm32_bank->imr_ofst);
737 	writel_relaxed(0, base + stm32_bank->emr_ofst);
738 	writel_relaxed(0, base + stm32_bank->rtsr_ofst);
739 	writel_relaxed(0, base + stm32_bank->ftsr_ofst);
740 	writel_relaxed(~0UL, base + stm32_bank->rpr_ofst);
741 	if (stm32_bank->fpr_ofst != UNDEF_REG)
742 		writel_relaxed(~0UL, base + stm32_bank->fpr_ofst);
743 
744 	pr_info("%pOF: bank%d\n", h_data->node, bank_idx);
745 
746 	return chip_data;
747 }
748 
749 static int __init stm32_exti_init(const struct stm32_exti_drv_data *drv_data,
750 				  struct device_node *node)
751 {
752 	struct stm32_exti_host_data *host_data;
753 	unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
754 	int nr_irqs, ret, i;
755 	struct irq_chip_generic *gc;
756 	struct irq_domain *domain;
757 
758 	host_data = stm32_exti_host_init(drv_data, node);
759 	if (!host_data)
760 		return -ENOMEM;
761 
762 	domain = irq_domain_add_linear(node, drv_data->bank_nr * IRQS_PER_BANK,
763 				       &irq_exti_domain_ops, NULL);
764 	if (!domain) {
765 		pr_err("%pOFn: Could not register interrupt domain.\n",
766 		       node);
767 		ret = -ENOMEM;
768 		goto out_unmap;
769 	}
770 
771 	ret = irq_alloc_domain_generic_chips(domain, IRQS_PER_BANK, 1, "exti",
772 					     handle_edge_irq, clr, 0, 0);
773 	if (ret) {
774 		pr_err("%pOF: Could not allocate generic interrupt chip.\n",
775 		       node);
776 		goto out_free_domain;
777 	}
778 
779 	for (i = 0; i < drv_data->bank_nr; i++) {
780 		const struct stm32_exti_bank *stm32_bank;
781 		struct stm32_exti_chip_data *chip_data;
782 
783 		stm32_bank = drv_data->exti_banks[i];
784 		chip_data = stm32_exti_chip_init(host_data, i);
785 
786 		gc = irq_get_domain_generic_chip(domain, i * IRQS_PER_BANK);
787 
788 		gc->reg_base = host_data->base;
789 		gc->chip_types->type = IRQ_TYPE_EDGE_BOTH;
790 		gc->chip_types->chip.irq_ack = stm32_irq_ack;
791 		gc->chip_types->chip.irq_mask = irq_gc_mask_clr_bit;
792 		gc->chip_types->chip.irq_unmask = irq_gc_mask_set_bit;
793 		gc->chip_types->chip.irq_set_type = stm32_irq_set_type;
794 		gc->chip_types->chip.irq_set_wake = irq_gc_set_wake;
795 		gc->suspend = stm32_irq_suspend;
796 		gc->resume = stm32_irq_resume;
797 		gc->wake_enabled = IRQ_MSK(IRQS_PER_BANK);
798 
799 		gc->chip_types->regs.mask = stm32_bank->imr_ofst;
800 		gc->private = (void *)chip_data;
801 	}
802 
803 	nr_irqs = of_irq_count(node);
804 	for (i = 0; i < nr_irqs; i++) {
805 		unsigned int irq = irq_of_parse_and_map(node, i);
806 
807 		irq_set_handler_data(irq, domain);
808 		irq_set_chained_handler(irq, stm32_irq_handler);
809 	}
810 
811 	return 0;
812 
813 out_free_domain:
814 	irq_domain_remove(domain);
815 out_unmap:
816 	iounmap(host_data->base);
817 	kfree(host_data->chips_data);
818 	kfree(host_data);
819 	return ret;
820 }
821 
822 static const struct irq_domain_ops stm32_exti_h_domain_ops = {
823 	.alloc	= stm32_exti_h_domain_alloc,
824 	.free	= irq_domain_free_irqs_common,
825 	.xlate = irq_domain_xlate_twocell,
826 };
827 
828 static int
829 __init stm32_exti_hierarchy_init(const struct stm32_exti_drv_data *drv_data,
830 				 struct device_node *node,
831 				 struct device_node *parent)
832 {
833 	struct irq_domain *parent_domain, *domain;
834 	struct stm32_exti_host_data *host_data;
835 	int ret, i;
836 
837 	parent_domain = irq_find_host(parent);
838 	if (!parent_domain) {
839 		pr_err("interrupt-parent not found\n");
840 		return -EINVAL;
841 	}
842 
843 	host_data = stm32_exti_host_init(drv_data, node);
844 	if (!host_data)
845 		return -ENOMEM;
846 
847 	for (i = 0; i < drv_data->bank_nr; i++)
848 		stm32_exti_chip_init(host_data, i);
849 
850 	domain = irq_domain_add_hierarchy(parent_domain, 0,
851 					  drv_data->bank_nr * IRQS_PER_BANK,
852 					  node, &stm32_exti_h_domain_ops,
853 					  host_data);
854 
855 	if (!domain) {
856 		pr_err("%pOFn: Could not register exti domain.\n", node);
857 		ret = -ENOMEM;
858 		goto out_unmap;
859 	}
860 
861 	stm32_exti_h_syscore_init();
862 
863 	return 0;
864 
865 out_unmap:
866 	iounmap(host_data->base);
867 	kfree(host_data->chips_data);
868 	kfree(host_data);
869 	return ret;
870 }
871 
872 static int __init stm32f4_exti_of_init(struct device_node *np,
873 				       struct device_node *parent)
874 {
875 	return stm32_exti_init(&stm32f4xx_drv_data, np);
876 }
877 
878 IRQCHIP_DECLARE(stm32f4_exti, "st,stm32-exti", stm32f4_exti_of_init);
879 
880 static int __init stm32h7_exti_of_init(struct device_node *np,
881 				       struct device_node *parent)
882 {
883 	return stm32_exti_init(&stm32h7xx_drv_data, np);
884 }
885 
886 IRQCHIP_DECLARE(stm32h7_exti, "st,stm32h7-exti", stm32h7_exti_of_init);
887 
888 static int __init stm32mp1_exti_of_init(struct device_node *np,
889 					struct device_node *parent)
890 {
891 	return stm32_exti_hierarchy_init(&stm32mp1_drv_data, np, parent);
892 }
893 
894 IRQCHIP_DECLARE(stm32mp1_exti, "st,stm32mp1-exti", stm32mp1_exti_of_init);
895