1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2016 MediaTek Inc.
4  * Author: Youlin.Pei <youlin.pei@mediatek.com>
5  */
6 
7 #include <linux/interrupt.h>
8 #include <linux/io.h>
9 #include <linux/irq.h>
10 #include <linux/irqchip.h>
11 #include <linux/irqdomain.h>
12 #include <linux/of.h>
13 #include <linux/of_irq.h>
14 #include <linux/of_address.h>
15 #include <linux/slab.h>
16 #include <linux/syscore_ops.h>
17 
18 enum mtk_cirq_regoffs_index {
19 	CIRQ_STA,
20 	CIRQ_ACK,
21 	CIRQ_MASK_SET,
22 	CIRQ_MASK_CLR,
23 	CIRQ_SENS_SET,
24 	CIRQ_SENS_CLR,
25 	CIRQ_POL_SET,
26 	CIRQ_POL_CLR,
27 	CIRQ_CONTROL
28 };
29 
30 static const u32 mtk_cirq_regoffs_v1[] = {
31 	[CIRQ_STA]	= 0x0,
32 	[CIRQ_ACK]	= 0x40,
33 	[CIRQ_MASK_SET]	= 0xc0,
34 	[CIRQ_MASK_CLR]	= 0x100,
35 	[CIRQ_SENS_SET]	= 0x180,
36 	[CIRQ_SENS_CLR]	= 0x1c0,
37 	[CIRQ_POL_SET]	= 0x240,
38 	[CIRQ_POL_CLR]	= 0x280,
39 	[CIRQ_CONTROL]	= 0x300,
40 };
41 
42 static const u32 mtk_cirq_regoffs_v2[] = {
43 	[CIRQ_STA]	= 0x0,
44 	[CIRQ_ACK]	= 0x80,
45 	[CIRQ_MASK_SET]	= 0x180,
46 	[CIRQ_MASK_CLR]	= 0x200,
47 	[CIRQ_SENS_SET]	= 0x300,
48 	[CIRQ_SENS_CLR]	= 0x380,
49 	[CIRQ_POL_SET]	= 0x480,
50 	[CIRQ_POL_CLR]	= 0x500,
51 	[CIRQ_CONTROL]	= 0x600,
52 };
53 
54 #define CIRQ_EN	0x1
55 #define CIRQ_EDGE	0x2
56 #define CIRQ_FLUSH	0x4
57 
58 struct mtk_cirq_chip_data {
59 	void __iomem *base;
60 	unsigned int ext_irq_start;
61 	unsigned int ext_irq_end;
62 	const u32 *offsets;
63 	struct irq_domain *domain;
64 };
65 
66 static struct mtk_cirq_chip_data *cirq_data;
67 
mtk_cirq_reg(struct mtk_cirq_chip_data * chip_data,enum mtk_cirq_regoffs_index idx)68 static void __iomem *mtk_cirq_reg(struct mtk_cirq_chip_data *chip_data,
69 				  enum mtk_cirq_regoffs_index idx)
70 {
71 	return chip_data->base + chip_data->offsets[idx];
72 }
73 
mtk_cirq_irq_reg(struct mtk_cirq_chip_data * chip_data,enum mtk_cirq_regoffs_index idx,unsigned int cirq_num)74 static void __iomem *mtk_cirq_irq_reg(struct mtk_cirq_chip_data *chip_data,
75 				      enum mtk_cirq_regoffs_index idx,
76 				      unsigned int cirq_num)
77 {
78 	return mtk_cirq_reg(chip_data, idx) + (cirq_num / 32) * 4;
79 }
80 
mtk_cirq_write_mask(struct irq_data * data,enum mtk_cirq_regoffs_index idx)81 static void mtk_cirq_write_mask(struct irq_data *data, enum mtk_cirq_regoffs_index idx)
82 {
83 	struct mtk_cirq_chip_data *chip_data = data->chip_data;
84 	unsigned int cirq_num = data->hwirq;
85 	u32 mask = 1 << (cirq_num % 32);
86 
87 	writel_relaxed(mask, mtk_cirq_irq_reg(chip_data, idx, cirq_num));
88 }
89 
mtk_cirq_mask(struct irq_data * data)90 static void mtk_cirq_mask(struct irq_data *data)
91 {
92 	mtk_cirq_write_mask(data, CIRQ_MASK_SET);
93 	irq_chip_mask_parent(data);
94 }
95 
mtk_cirq_unmask(struct irq_data * data)96 static void mtk_cirq_unmask(struct irq_data *data)
97 {
98 	mtk_cirq_write_mask(data, CIRQ_MASK_CLR);
99 	irq_chip_unmask_parent(data);
100 }
101 
mtk_cirq_set_type(struct irq_data * data,unsigned int type)102 static int mtk_cirq_set_type(struct irq_data *data, unsigned int type)
103 {
104 	int ret;
105 
106 	switch (type & IRQ_TYPE_SENSE_MASK) {
107 	case IRQ_TYPE_EDGE_FALLING:
108 		mtk_cirq_write_mask(data, CIRQ_POL_CLR);
109 		mtk_cirq_write_mask(data, CIRQ_SENS_CLR);
110 		break;
111 	case IRQ_TYPE_EDGE_RISING:
112 		mtk_cirq_write_mask(data, CIRQ_POL_SET);
113 		mtk_cirq_write_mask(data, CIRQ_SENS_CLR);
114 		break;
115 	case IRQ_TYPE_LEVEL_LOW:
116 		mtk_cirq_write_mask(data, CIRQ_POL_CLR);
117 		mtk_cirq_write_mask(data, CIRQ_SENS_SET);
118 		break;
119 	case IRQ_TYPE_LEVEL_HIGH:
120 		mtk_cirq_write_mask(data, CIRQ_POL_SET);
121 		mtk_cirq_write_mask(data, CIRQ_SENS_SET);
122 		break;
123 	default:
124 		break;
125 	}
126 
127 	data = data->parent_data;
128 	ret = data->chip->irq_set_type(data, type);
129 	return ret;
130 }
131 
132 static struct irq_chip mtk_cirq_chip = {
133 	.name			= "MT_CIRQ",
134 	.irq_mask		= mtk_cirq_mask,
135 	.irq_unmask		= mtk_cirq_unmask,
136 	.irq_eoi		= irq_chip_eoi_parent,
137 	.irq_set_type		= mtk_cirq_set_type,
138 	.irq_retrigger		= irq_chip_retrigger_hierarchy,
139 #ifdef CONFIG_SMP
140 	.irq_set_affinity	= irq_chip_set_affinity_parent,
141 #endif
142 };
143 
mtk_cirq_domain_translate(struct irq_domain * d,struct irq_fwspec * fwspec,unsigned long * hwirq,unsigned int * type)144 static int mtk_cirq_domain_translate(struct irq_domain *d,
145 				     struct irq_fwspec *fwspec,
146 				     unsigned long *hwirq,
147 				     unsigned int *type)
148 {
149 	if (is_of_node(fwspec->fwnode)) {
150 		if (fwspec->param_count != 3)
151 			return -EINVAL;
152 
153 		/* No PPI should point to this domain */
154 		if (fwspec->param[0] != 0)
155 			return -EINVAL;
156 
157 		/* cirq support irq number check */
158 		if (fwspec->param[1] < cirq_data->ext_irq_start ||
159 		    fwspec->param[1] > cirq_data->ext_irq_end)
160 			return -EINVAL;
161 
162 		*hwirq = fwspec->param[1] - cirq_data->ext_irq_start;
163 		*type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
164 		return 0;
165 	}
166 
167 	return -EINVAL;
168 }
169 
mtk_cirq_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * arg)170 static int mtk_cirq_domain_alloc(struct irq_domain *domain, unsigned int virq,
171 				 unsigned int nr_irqs, void *arg)
172 {
173 	int ret;
174 	irq_hw_number_t hwirq;
175 	unsigned int type;
176 	struct irq_fwspec *fwspec = arg;
177 	struct irq_fwspec parent_fwspec = *fwspec;
178 
179 	ret = mtk_cirq_domain_translate(domain, fwspec, &hwirq, &type);
180 	if (ret)
181 		return ret;
182 
183 	if (WARN_ON(nr_irqs != 1))
184 		return -EINVAL;
185 
186 	irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
187 				      &mtk_cirq_chip,
188 				      domain->host_data);
189 
190 	parent_fwspec.fwnode = domain->parent->fwnode;
191 	return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
192 					    &parent_fwspec);
193 }
194 
195 static const struct irq_domain_ops cirq_domain_ops = {
196 	.translate	= mtk_cirq_domain_translate,
197 	.alloc		= mtk_cirq_domain_alloc,
198 	.free		= irq_domain_free_irqs_common,
199 };
200 
201 #ifdef CONFIG_PM_SLEEP
mtk_cirq_suspend(void)202 static int mtk_cirq_suspend(void)
203 {
204 	void __iomem *reg;
205 	u32 value, mask;
206 	unsigned int irq, hwirq_num;
207 	bool pending, masked;
208 	int i, pendret, maskret;
209 
210 	/*
211 	 * When external interrupts happened, CIRQ will record the status
212 	 * even CIRQ is not enabled. When execute flush command, CIRQ will
213 	 * resend the signals according to the status. So if don't clear the
214 	 * status, CIRQ will resend the wrong signals.
215 	 *
216 	 * arch_suspend_disable_irqs() will be called before CIRQ suspend
217 	 * callback. If clear all the status simply, the external interrupts
218 	 * which happened between arch_suspend_disable_irqs and CIRQ suspend
219 	 * callback will be lost. Using following steps to avoid this issue;
220 	 *
221 	 * - Iterate over all the CIRQ supported interrupts;
222 	 * - For each interrupt, inspect its pending and masked status at GIC
223 	 *   level;
224 	 * - If pending and unmasked, it happened between
225 	 *   arch_suspend_disable_irqs and CIRQ suspend callback, don't ACK
226 	 *   it. Otherwise, ACK it.
227 	 */
228 	hwirq_num = cirq_data->ext_irq_end - cirq_data->ext_irq_start + 1;
229 	for (i = 0; i < hwirq_num; i++) {
230 		irq = irq_find_mapping(cirq_data->domain, i);
231 		if (irq) {
232 			pendret = irq_get_irqchip_state(irq,
233 							IRQCHIP_STATE_PENDING,
234 							&pending);
235 
236 			maskret = irq_get_irqchip_state(irq,
237 							IRQCHIP_STATE_MASKED,
238 							&masked);
239 
240 			if (pendret == 0 && maskret == 0 &&
241 			    (pending && !masked))
242 				continue;
243 		}
244 
245 		reg = mtk_cirq_irq_reg(cirq_data, CIRQ_ACK, i);
246 		mask = 1 << (i % 32);
247 		writel_relaxed(mask, reg);
248 	}
249 
250 	/* set edge_only mode, record edge-triggerd interrupts */
251 	/* enable cirq */
252 	reg = mtk_cirq_reg(cirq_data, CIRQ_CONTROL);
253 	value = readl_relaxed(reg);
254 	value |= (CIRQ_EDGE | CIRQ_EN);
255 	writel_relaxed(value, reg);
256 
257 	return 0;
258 }
259 
mtk_cirq_resume(void)260 static void mtk_cirq_resume(void)
261 {
262 	void __iomem *reg = mtk_cirq_reg(cirq_data, CIRQ_CONTROL);
263 	u32 value;
264 
265 	/* flush recorded interrupts, will send signals to parent controller */
266 	value = readl_relaxed(reg);
267 	writel_relaxed(value | CIRQ_FLUSH, reg);
268 
269 	/* disable cirq */
270 	value = readl_relaxed(reg);
271 	value &= ~(CIRQ_EDGE | CIRQ_EN);
272 	writel_relaxed(value, reg);
273 }
274 
275 static struct syscore_ops mtk_cirq_syscore_ops = {
276 	.suspend	= mtk_cirq_suspend,
277 	.resume		= mtk_cirq_resume,
278 };
279 
mtk_cirq_syscore_init(void)280 static void mtk_cirq_syscore_init(void)
281 {
282 	register_syscore_ops(&mtk_cirq_syscore_ops);
283 }
284 #else
mtk_cirq_syscore_init(void)285 static inline void mtk_cirq_syscore_init(void) {}
286 #endif
287 
288 static const struct of_device_id mtk_cirq_of_match[] = {
289 	{ .compatible = "mediatek,mt2701-cirq", .data = &mtk_cirq_regoffs_v1 },
290 	{ .compatible = "mediatek,mt8135-cirq", .data = &mtk_cirq_regoffs_v1 },
291 	{ .compatible = "mediatek,mt8173-cirq", .data = &mtk_cirq_regoffs_v1 },
292 	{ .compatible = "mediatek,mt8192-cirq", .data = &mtk_cirq_regoffs_v2 },
293 	{ /* sentinel */ }
294 };
295 
mtk_cirq_of_init(struct device_node * node,struct device_node * parent)296 static int __init mtk_cirq_of_init(struct device_node *node,
297 				   struct device_node *parent)
298 {
299 	struct irq_domain *domain, *domain_parent;
300 	const struct of_device_id *match;
301 	unsigned int irq_num;
302 	int ret;
303 
304 	domain_parent = irq_find_host(parent);
305 	if (!domain_parent) {
306 		pr_err("mtk_cirq: interrupt-parent not found\n");
307 		return -EINVAL;
308 	}
309 
310 	cirq_data = kzalloc(sizeof(*cirq_data), GFP_KERNEL);
311 	if (!cirq_data)
312 		return -ENOMEM;
313 
314 	cirq_data->base = of_iomap(node, 0);
315 	if (!cirq_data->base) {
316 		pr_err("mtk_cirq: unable to map cirq register\n");
317 		ret = -ENXIO;
318 		goto out_free;
319 	}
320 
321 	ret = of_property_read_u32_index(node, "mediatek,ext-irq-range", 0,
322 					 &cirq_data->ext_irq_start);
323 	if (ret)
324 		goto out_unmap;
325 
326 	ret = of_property_read_u32_index(node, "mediatek,ext-irq-range", 1,
327 					 &cirq_data->ext_irq_end);
328 	if (ret)
329 		goto out_unmap;
330 
331 	match = of_match_node(mtk_cirq_of_match, node);
332 	if (!match) {
333 		ret = -ENODEV;
334 		goto out_unmap;
335 	}
336 	cirq_data->offsets = match->data;
337 
338 	irq_num = cirq_data->ext_irq_end - cirq_data->ext_irq_start + 1;
339 	domain = irq_domain_add_hierarchy(domain_parent, 0,
340 					  irq_num, node,
341 					  &cirq_domain_ops, cirq_data);
342 	if (!domain) {
343 		ret = -ENOMEM;
344 		goto out_unmap;
345 	}
346 	cirq_data->domain = domain;
347 
348 	mtk_cirq_syscore_init();
349 
350 	return 0;
351 
352 out_unmap:
353 	iounmap(cirq_data->base);
354 out_free:
355 	kfree(cirq_data);
356 	return ret;
357 }
358 
359 IRQCHIP_DECLARE(mtk_cirq, "mediatek,mtk-cirq", mtk_cirq_of_init);
360