1 /*
2  * Copyright (c) 2016 MediaTek Inc.
3  * Author: Youlin.Pei <youlin.pei@mediatek.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  */
14 
15 #include <linux/interrupt.h>
16 #include <linux/io.h>
17 #include <linux/irq.h>
18 #include <linux/irqchip.h>
19 #include <linux/irqdomain.h>
20 #include <linux/of.h>
21 #include <linux/of_irq.h>
22 #include <linux/of_address.h>
23 #include <linux/slab.h>
24 #include <linux/syscore_ops.h>
25 
26 #define CIRQ_ACK	0x40
27 #define CIRQ_MASK_SET	0xc0
28 #define CIRQ_MASK_CLR	0x100
29 #define CIRQ_SENS_SET	0x180
30 #define CIRQ_SENS_CLR	0x1c0
31 #define CIRQ_POL_SET	0x240
32 #define CIRQ_POL_CLR	0x280
33 #define CIRQ_CONTROL	0x300
34 
35 #define CIRQ_EN	0x1
36 #define CIRQ_EDGE	0x2
37 #define CIRQ_FLUSH	0x4
38 
39 struct mtk_cirq_chip_data {
40 	void __iomem *base;
41 	unsigned int ext_irq_start;
42 	unsigned int ext_irq_end;
43 	struct irq_domain *domain;
44 };
45 
46 static struct mtk_cirq_chip_data *cirq_data;
47 
48 static void mtk_cirq_write_mask(struct irq_data *data, unsigned int offset)
49 {
50 	struct mtk_cirq_chip_data *chip_data = data->chip_data;
51 	unsigned int cirq_num = data->hwirq;
52 	u32 mask = 1 << (cirq_num % 32);
53 
54 	writel_relaxed(mask, chip_data->base + offset + (cirq_num / 32) * 4);
55 }
56 
57 static void mtk_cirq_mask(struct irq_data *data)
58 {
59 	mtk_cirq_write_mask(data, CIRQ_MASK_SET);
60 	irq_chip_mask_parent(data);
61 }
62 
63 static void mtk_cirq_unmask(struct irq_data *data)
64 {
65 	mtk_cirq_write_mask(data, CIRQ_MASK_CLR);
66 	irq_chip_unmask_parent(data);
67 }
68 
69 static int mtk_cirq_set_type(struct irq_data *data, unsigned int type)
70 {
71 	int ret;
72 
73 	switch (type & IRQ_TYPE_SENSE_MASK) {
74 	case IRQ_TYPE_EDGE_FALLING:
75 		mtk_cirq_write_mask(data, CIRQ_POL_CLR);
76 		mtk_cirq_write_mask(data, CIRQ_SENS_CLR);
77 		break;
78 	case IRQ_TYPE_EDGE_RISING:
79 		mtk_cirq_write_mask(data, CIRQ_POL_SET);
80 		mtk_cirq_write_mask(data, CIRQ_SENS_CLR);
81 		break;
82 	case IRQ_TYPE_LEVEL_LOW:
83 		mtk_cirq_write_mask(data, CIRQ_POL_CLR);
84 		mtk_cirq_write_mask(data, CIRQ_SENS_SET);
85 		break;
86 	case IRQ_TYPE_LEVEL_HIGH:
87 		mtk_cirq_write_mask(data, CIRQ_POL_SET);
88 		mtk_cirq_write_mask(data, CIRQ_SENS_SET);
89 		break;
90 	default:
91 		break;
92 	}
93 
94 	data = data->parent_data;
95 	ret = data->chip->irq_set_type(data, type);
96 	return ret;
97 }
98 
99 static struct irq_chip mtk_cirq_chip = {
100 	.name			= "MT_CIRQ",
101 	.irq_mask		= mtk_cirq_mask,
102 	.irq_unmask		= mtk_cirq_unmask,
103 	.irq_eoi		= irq_chip_eoi_parent,
104 	.irq_set_type		= mtk_cirq_set_type,
105 	.irq_retrigger		= irq_chip_retrigger_hierarchy,
106 #ifdef CONFIG_SMP
107 	.irq_set_affinity	= irq_chip_set_affinity_parent,
108 #endif
109 };
110 
111 static int mtk_cirq_domain_translate(struct irq_domain *d,
112 				     struct irq_fwspec *fwspec,
113 				     unsigned long *hwirq,
114 				     unsigned int *type)
115 {
116 	if (is_of_node(fwspec->fwnode)) {
117 		if (fwspec->param_count != 3)
118 			return -EINVAL;
119 
120 		/* No PPI should point to this domain */
121 		if (fwspec->param[0] != 0)
122 			return -EINVAL;
123 
124 		/* cirq support irq number check */
125 		if (fwspec->param[1] < cirq_data->ext_irq_start ||
126 		    fwspec->param[1] > cirq_data->ext_irq_end)
127 			return -EINVAL;
128 
129 		*hwirq = fwspec->param[1] - cirq_data->ext_irq_start;
130 		*type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
131 		return 0;
132 	}
133 
134 	return -EINVAL;
135 }
136 
137 static int mtk_cirq_domain_alloc(struct irq_domain *domain, unsigned int virq,
138 				 unsigned int nr_irqs, void *arg)
139 {
140 	int ret;
141 	irq_hw_number_t hwirq;
142 	unsigned int type;
143 	struct irq_fwspec *fwspec = arg;
144 	struct irq_fwspec parent_fwspec = *fwspec;
145 
146 	ret = mtk_cirq_domain_translate(domain, fwspec, &hwirq, &type);
147 	if (ret)
148 		return ret;
149 
150 	if (WARN_ON(nr_irqs != 1))
151 		return -EINVAL;
152 
153 	irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
154 				      &mtk_cirq_chip,
155 				      domain->host_data);
156 
157 	parent_fwspec.fwnode = domain->parent->fwnode;
158 	return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
159 					    &parent_fwspec);
160 }
161 
162 static const struct irq_domain_ops cirq_domain_ops = {
163 	.translate	= mtk_cirq_domain_translate,
164 	.alloc		= mtk_cirq_domain_alloc,
165 	.free		= irq_domain_free_irqs_common,
166 };
167 
168 #ifdef CONFIG_PM_SLEEP
169 static int mtk_cirq_suspend(void)
170 {
171 	u32 value, mask;
172 	unsigned int irq, hwirq_num;
173 	bool pending, masked;
174 	int i, pendret, maskret;
175 
176 	/*
177 	 * When external interrupts happened, CIRQ will record the status
178 	 * even CIRQ is not enabled. When execute flush command, CIRQ will
179 	 * resend the signals according to the status. So if don't clear the
180 	 * status, CIRQ will resend the wrong signals.
181 	 *
182 	 * arch_suspend_disable_irqs() will be called before CIRQ suspend
183 	 * callback. If clear all the status simply, the external interrupts
184 	 * which happened between arch_suspend_disable_irqs and CIRQ suspend
185 	 * callback will be lost. Using following steps to avoid this issue;
186 	 *
187 	 * - Iterate over all the CIRQ supported interrupts;
188 	 * - For each interrupt, inspect its pending and masked status at GIC
189 	 *   level;
190 	 * - If pending and unmasked, it happened between
191 	 *   arch_suspend_disable_irqs and CIRQ suspend callback, don't ACK
192 	 *   it. Otherwise, ACK it.
193 	 */
194 	hwirq_num = cirq_data->ext_irq_end - cirq_data->ext_irq_start + 1;
195 	for (i = 0; i < hwirq_num; i++) {
196 		irq = irq_find_mapping(cirq_data->domain, i);
197 		if (irq) {
198 			pendret = irq_get_irqchip_state(irq,
199 							IRQCHIP_STATE_PENDING,
200 							&pending);
201 
202 			maskret = irq_get_irqchip_state(irq,
203 							IRQCHIP_STATE_MASKED,
204 							&masked);
205 
206 			if (pendret == 0 && maskret == 0 &&
207 			    (pending && !masked))
208 				continue;
209 		}
210 
211 		mask = 1 << (i % 32);
212 		writel_relaxed(mask, cirq_data->base + CIRQ_ACK + (i / 32) * 4);
213 	}
214 
215 	/* set edge_only mode, record edge-triggerd interrupts */
216 	/* enable cirq */
217 	value = readl_relaxed(cirq_data->base + CIRQ_CONTROL);
218 	value |= (CIRQ_EDGE | CIRQ_EN);
219 	writel_relaxed(value, cirq_data->base + CIRQ_CONTROL);
220 
221 	return 0;
222 }
223 
224 static void mtk_cirq_resume(void)
225 {
226 	u32 value;
227 
228 	/* flush recored interrupts, will send signals to parent controller */
229 	value = readl_relaxed(cirq_data->base + CIRQ_CONTROL);
230 	writel_relaxed(value | CIRQ_FLUSH, cirq_data->base + CIRQ_CONTROL);
231 
232 	/* disable cirq */
233 	value = readl_relaxed(cirq_data->base + CIRQ_CONTROL);
234 	value &= ~(CIRQ_EDGE | CIRQ_EN);
235 	writel_relaxed(value, cirq_data->base + CIRQ_CONTROL);
236 }
237 
238 static struct syscore_ops mtk_cirq_syscore_ops = {
239 	.suspend	= mtk_cirq_suspend,
240 	.resume		= mtk_cirq_resume,
241 };
242 
243 static void mtk_cirq_syscore_init(void)
244 {
245 	register_syscore_ops(&mtk_cirq_syscore_ops);
246 }
247 #else
248 static inline void mtk_cirq_syscore_init(void) {}
249 #endif
250 
251 static int __init mtk_cirq_of_init(struct device_node *node,
252 				   struct device_node *parent)
253 {
254 	struct irq_domain *domain, *domain_parent;
255 	unsigned int irq_num;
256 	int ret;
257 
258 	domain_parent = irq_find_host(parent);
259 	if (!domain_parent) {
260 		pr_err("mtk_cirq: interrupt-parent not found\n");
261 		return -EINVAL;
262 	}
263 
264 	cirq_data = kzalloc(sizeof(*cirq_data), GFP_KERNEL);
265 	if (!cirq_data)
266 		return -ENOMEM;
267 
268 	cirq_data->base = of_iomap(node, 0);
269 	if (!cirq_data->base) {
270 		pr_err("mtk_cirq: unable to map cirq register\n");
271 		ret = -ENXIO;
272 		goto out_free;
273 	}
274 
275 	ret = of_property_read_u32_index(node, "mediatek,ext-irq-range", 0,
276 					 &cirq_data->ext_irq_start);
277 	if (ret)
278 		goto out_unmap;
279 
280 	ret = of_property_read_u32_index(node, "mediatek,ext-irq-range", 1,
281 					 &cirq_data->ext_irq_end);
282 	if (ret)
283 		goto out_unmap;
284 
285 	irq_num = cirq_data->ext_irq_end - cirq_data->ext_irq_start + 1;
286 	domain = irq_domain_add_hierarchy(domain_parent, 0,
287 					  irq_num, node,
288 					  &cirq_domain_ops, cirq_data);
289 	if (!domain) {
290 		ret = -ENOMEM;
291 		goto out_unmap;
292 	}
293 	cirq_data->domain = domain;
294 
295 	mtk_cirq_syscore_init();
296 
297 	return 0;
298 
299 out_unmap:
300 	iounmap(cirq_data->base);
301 out_free:
302 	kfree(cirq_data);
303 	return ret;
304 }
305 
306 IRQCHIP_DECLARE(mtk_cirq, "mediatek,mtk-cirq", mtk_cirq_of_init);
307