1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright 2017 NXP
4  * Copyright (C) 2018 Pengutronix, Lucas Stach <kernel@pengutronix.de>
5  */
6 
7 #include <linux/clk.h>
8 #include <linux/interrupt.h>
9 #include <linux/irq.h>
10 #include <linux/irqchip/chained_irq.h>
11 #include <linux/irqdomain.h>
12 #include <linux/kernel.h>
13 #include <linux/of.h>
14 #include <linux/of_irq.h>
15 #include <linux/platform_device.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/spinlock.h>
18 
19 #define CTRL_STRIDE_OFF(_t, _r)	(_t * 4 * _r)
20 #define CHANCTRL		0x0
21 #define CHANMASK(n, t)		(CTRL_STRIDE_OFF(t, 0) + 0x4 * (n) + 0x4)
22 #define CHANSET(n, t)		(CTRL_STRIDE_OFF(t, 1) + 0x4 * (n) + 0x4)
23 #define CHANSTATUS(n, t)	(CTRL_STRIDE_OFF(t, 2) + 0x4 * (n) + 0x4)
24 #define CHAN_MINTDIS(t)		(CTRL_STRIDE_OFF(t, 3) + 0x4)
25 #define CHAN_MASTRSTAT(t)	(CTRL_STRIDE_OFF(t, 3) + 0x8)
26 
27 #define CHAN_MAX_OUTPUT_INT	0x8
28 
29 struct irqsteer_data {
30 	void __iomem		*regs;
31 	struct clk		*ipg_clk;
32 	int			irq[CHAN_MAX_OUTPUT_INT];
33 	int			irq_count;
34 	raw_spinlock_t		lock;
35 	int			reg_num;
36 	int			channel;
37 	struct irq_domain	*domain;
38 	u32			*saved_reg;
39 };
40 
imx_irqsteer_get_reg_index(struct irqsteer_data * data,unsigned long irqnum)41 static int imx_irqsteer_get_reg_index(struct irqsteer_data *data,
42 				      unsigned long irqnum)
43 {
44 	return (data->reg_num - irqnum / 32 - 1);
45 }
46 
imx_irqsteer_irq_unmask(struct irq_data * d)47 static void imx_irqsteer_irq_unmask(struct irq_data *d)
48 {
49 	struct irqsteer_data *data = d->chip_data;
50 	int idx = imx_irqsteer_get_reg_index(data, d->hwirq);
51 	unsigned long flags;
52 	u32 val;
53 
54 	raw_spin_lock_irqsave(&data->lock, flags);
55 	val = readl_relaxed(data->regs + CHANMASK(idx, data->reg_num));
56 	val |= BIT(d->hwirq % 32);
57 	writel_relaxed(val, data->regs + CHANMASK(idx, data->reg_num));
58 	raw_spin_unlock_irqrestore(&data->lock, flags);
59 }
60 
imx_irqsteer_irq_mask(struct irq_data * d)61 static void imx_irqsteer_irq_mask(struct irq_data *d)
62 {
63 	struct irqsteer_data *data = d->chip_data;
64 	int idx = imx_irqsteer_get_reg_index(data, d->hwirq);
65 	unsigned long flags;
66 	u32 val;
67 
68 	raw_spin_lock_irqsave(&data->lock, flags);
69 	val = readl_relaxed(data->regs + CHANMASK(idx, data->reg_num));
70 	val &= ~BIT(d->hwirq % 32);
71 	writel_relaxed(val, data->regs + CHANMASK(idx, data->reg_num));
72 	raw_spin_unlock_irqrestore(&data->lock, flags);
73 }
74 
75 static const struct irq_chip imx_irqsteer_irq_chip = {
76 	.name		= "irqsteer",
77 	.irq_mask	= imx_irqsteer_irq_mask,
78 	.irq_unmask	= imx_irqsteer_irq_unmask,
79 };
80 
imx_irqsteer_irq_map(struct irq_domain * h,unsigned int irq,irq_hw_number_t hwirq)81 static int imx_irqsteer_irq_map(struct irq_domain *h, unsigned int irq,
82 				irq_hw_number_t hwirq)
83 {
84 	irq_set_status_flags(irq, IRQ_LEVEL);
85 	irq_set_chip_data(irq, h->host_data);
86 	irq_set_chip_and_handler(irq, &imx_irqsteer_irq_chip, handle_level_irq);
87 
88 	return 0;
89 }
90 
91 static const struct irq_domain_ops imx_irqsteer_domain_ops = {
92 	.map		= imx_irqsteer_irq_map,
93 	.xlate		= irq_domain_xlate_onecell,
94 };
95 
imx_irqsteer_get_hwirq_base(struct irqsteer_data * data,u32 irq)96 static int imx_irqsteer_get_hwirq_base(struct irqsteer_data *data, u32 irq)
97 {
98 	int i;
99 
100 	for (i = 0; i < data->irq_count; i++) {
101 		if (data->irq[i] == irq)
102 			return i * 64;
103 	}
104 
105 	return -EINVAL;
106 }
107 
imx_irqsteer_irq_handler(struct irq_desc * desc)108 static void imx_irqsteer_irq_handler(struct irq_desc *desc)
109 {
110 	struct irqsteer_data *data = irq_desc_get_handler_data(desc);
111 	int hwirq;
112 	int irq, i;
113 
114 	chained_irq_enter(irq_desc_get_chip(desc), desc);
115 
116 	irq = irq_desc_get_irq(desc);
117 	hwirq = imx_irqsteer_get_hwirq_base(data, irq);
118 	if (hwirq < 0) {
119 		pr_warn("%s: unable to get hwirq base for irq %d\n",
120 			__func__, irq);
121 		return;
122 	}
123 
124 	for (i = 0; i < 2; i++, hwirq += 32) {
125 		int idx = imx_irqsteer_get_reg_index(data, hwirq);
126 		unsigned long irqmap;
127 		int pos;
128 
129 		if (hwirq >= data->reg_num * 32)
130 			break;
131 
132 		irqmap = readl_relaxed(data->regs +
133 				       CHANSTATUS(idx, data->reg_num));
134 
135 		for_each_set_bit(pos, &irqmap, 32)
136 			generic_handle_domain_irq(data->domain, pos + hwirq);
137 	}
138 
139 	chained_irq_exit(irq_desc_get_chip(desc), desc);
140 }
141 
imx_irqsteer_probe(struct platform_device * pdev)142 static int imx_irqsteer_probe(struct platform_device *pdev)
143 {
144 	struct device_node *np = pdev->dev.of_node;
145 	struct irqsteer_data *data;
146 	u32 irqs_num;
147 	int i, ret;
148 
149 	data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
150 	if (!data)
151 		return -ENOMEM;
152 
153 	data->regs = devm_platform_ioremap_resource(pdev, 0);
154 	if (IS_ERR(data->regs)) {
155 		dev_err(&pdev->dev, "failed to initialize reg\n");
156 		return PTR_ERR(data->regs);
157 	}
158 
159 	data->ipg_clk = devm_clk_get(&pdev->dev, "ipg");
160 	if (IS_ERR(data->ipg_clk))
161 		return dev_err_probe(&pdev->dev, PTR_ERR(data->ipg_clk),
162 				     "failed to get ipg clk\n");
163 
164 	raw_spin_lock_init(&data->lock);
165 
166 	ret = of_property_read_u32(np, "fsl,num-irqs", &irqs_num);
167 	if (ret)
168 		return ret;
169 	ret = of_property_read_u32(np, "fsl,channel", &data->channel);
170 	if (ret)
171 		return ret;
172 
173 	/*
174 	 * There is one output irq for each group of 64 inputs.
175 	 * One register bit map can represent 32 input interrupts.
176 	 */
177 	data->irq_count = DIV_ROUND_UP(irqs_num, 64);
178 	data->reg_num = irqs_num / 32;
179 
180 	if (IS_ENABLED(CONFIG_PM)) {
181 		data->saved_reg = devm_kzalloc(&pdev->dev,
182 					sizeof(u32) * data->reg_num,
183 					GFP_KERNEL);
184 		if (!data->saved_reg)
185 			return -ENOMEM;
186 	}
187 
188 	ret = clk_prepare_enable(data->ipg_clk);
189 	if (ret) {
190 		dev_err(&pdev->dev, "failed to enable ipg clk: %d\n", ret);
191 		return ret;
192 	}
193 
194 	/* steer all IRQs into configured channel */
195 	writel_relaxed(BIT(data->channel), data->regs + CHANCTRL);
196 
197 	data->domain = irq_domain_add_linear(np, data->reg_num * 32,
198 					     &imx_irqsteer_domain_ops, data);
199 	if (!data->domain) {
200 		dev_err(&pdev->dev, "failed to create IRQ domain\n");
201 		ret = -ENOMEM;
202 		goto out;
203 	}
204 	irq_domain_set_pm_device(data->domain, &pdev->dev);
205 
206 	if (!data->irq_count || data->irq_count > CHAN_MAX_OUTPUT_INT) {
207 		ret = -EINVAL;
208 		goto out;
209 	}
210 
211 	for (i = 0; i < data->irq_count; i++) {
212 		data->irq[i] = irq_of_parse_and_map(np, i);
213 		if (!data->irq[i]) {
214 			ret = -EINVAL;
215 			goto out;
216 		}
217 
218 		irq_set_chained_handler_and_data(data->irq[i],
219 						 imx_irqsteer_irq_handler,
220 						 data);
221 	}
222 
223 	platform_set_drvdata(pdev, data);
224 
225 	pm_runtime_set_active(&pdev->dev);
226 	pm_runtime_enable(&pdev->dev);
227 
228 	return 0;
229 out:
230 	clk_disable_unprepare(data->ipg_clk);
231 	return ret;
232 }
233 
imx_irqsteer_remove(struct platform_device * pdev)234 static int imx_irqsteer_remove(struct platform_device *pdev)
235 {
236 	struct irqsteer_data *irqsteer_data = platform_get_drvdata(pdev);
237 	int i;
238 
239 	for (i = 0; i < irqsteer_data->irq_count; i++)
240 		irq_set_chained_handler_and_data(irqsteer_data->irq[i],
241 						 NULL, NULL);
242 
243 	irq_domain_remove(irqsteer_data->domain);
244 
245 	clk_disable_unprepare(irqsteer_data->ipg_clk);
246 
247 	return 0;
248 }
249 
250 #ifdef CONFIG_PM
imx_irqsteer_save_regs(struct irqsteer_data * data)251 static void imx_irqsteer_save_regs(struct irqsteer_data *data)
252 {
253 	int i;
254 
255 	for (i = 0; i < data->reg_num; i++)
256 		data->saved_reg[i] = readl_relaxed(data->regs +
257 						CHANMASK(i, data->reg_num));
258 }
259 
imx_irqsteer_restore_regs(struct irqsteer_data * data)260 static void imx_irqsteer_restore_regs(struct irqsteer_data *data)
261 {
262 	int i;
263 
264 	writel_relaxed(BIT(data->channel), data->regs + CHANCTRL);
265 	for (i = 0; i < data->reg_num; i++)
266 		writel_relaxed(data->saved_reg[i],
267 			       data->regs + CHANMASK(i, data->reg_num));
268 }
269 
imx_irqsteer_suspend(struct device * dev)270 static int imx_irqsteer_suspend(struct device *dev)
271 {
272 	struct irqsteer_data *irqsteer_data = dev_get_drvdata(dev);
273 
274 	imx_irqsteer_save_regs(irqsteer_data);
275 	clk_disable_unprepare(irqsteer_data->ipg_clk);
276 
277 	return 0;
278 }
279 
imx_irqsteer_resume(struct device * dev)280 static int imx_irqsteer_resume(struct device *dev)
281 {
282 	struct irqsteer_data *irqsteer_data = dev_get_drvdata(dev);
283 	int ret;
284 
285 	ret = clk_prepare_enable(irqsteer_data->ipg_clk);
286 	if (ret) {
287 		dev_err(dev, "failed to enable ipg clk: %d\n", ret);
288 		return ret;
289 	}
290 	imx_irqsteer_restore_regs(irqsteer_data);
291 
292 	return 0;
293 }
294 #endif
295 
296 static const struct dev_pm_ops imx_irqsteer_pm_ops = {
297 	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
298 				      pm_runtime_force_resume)
299 	SET_RUNTIME_PM_OPS(imx_irqsteer_suspend,
300 			   imx_irqsteer_resume, NULL)
301 };
302 
303 static const struct of_device_id imx_irqsteer_dt_ids[] = {
304 	{ .compatible = "fsl,imx-irqsteer", },
305 	{},
306 };
307 
308 static struct platform_driver imx_irqsteer_driver = {
309 	.driver = {
310 		.name = "imx-irqsteer",
311 		.of_match_table = imx_irqsteer_dt_ids,
312 		.pm = &imx_irqsteer_pm_ops,
313 	},
314 	.probe = imx_irqsteer_probe,
315 	.remove = imx_irqsteer_remove,
316 };
317 builtin_platform_driver(imx_irqsteer_driver);
318