1 // SPDX-License-Identifier: GPL-2.0 2 // Copyright 2017 NXP 3 4 /* INTMUX Block Diagram 5 * 6 * ________________ 7 * interrupt source # 0 +---->| | 8 * | | | 9 * interrupt source # 1 +++-->| | 10 * ... | | | channel # 0 |--------->interrupt out # 0 11 * ... | | | | 12 * ... | | | | 13 * interrupt source # X-1 +++-->|________________| 14 * | | | 15 * | | | 16 * | | | ________________ 17 * +---->| | 18 * | | | | | 19 * | +-->| | 20 * | | | | channel # 1 |--------->interrupt out # 1 21 * | | +>| | 22 * | | | | | 23 * | | | |________________| 24 * | | | 25 * | | | 26 * | | | ... 27 * | | | ... 28 * | | | 29 * | | | ________________ 30 * +---->| | 31 * | | | | 32 * +-->| | 33 * | | channel # N |--------->interrupt out # N 34 * +>| | 35 * | | 36 * |________________| 37 * 38 * 39 * N: Interrupt Channel Instance Number (N=7) 40 * X: Interrupt Source Number for each channel (X=32) 41 * 42 * The INTMUX interrupt multiplexer has 8 channels, each channel receives 32 43 * interrupt sources and generates 1 interrupt output. 44 * 45 */ 46 47 #include <linux/clk.h> 48 #include <linux/interrupt.h> 49 #include <linux/irq.h> 50 #include <linux/irqchip/chained_irq.h> 51 #include <linux/irqdomain.h> 52 #include <linux/kernel.h> 53 #include <linux/of_irq.h> 54 #include <linux/of_platform.h> 55 #include <linux/spinlock.h> 56 #include <linux/pm_runtime.h> 57 58 #define CHANIER(n) (0x10 + (0x40 * n)) 59 #define CHANIPR(n) (0x20 + (0x40 * n)) 60 61 #define CHAN_MAX_NUM 0x8 62 63 struct intmux_irqchip_data { 64 struct irq_chip chip; 65 u32 saved_reg; 66 int chanidx; 67 int irq; 68 struct irq_domain *domain; 69 }; 70 71 struct intmux_data { 72 raw_spinlock_t lock; 73 void __iomem *regs; 74 struct clk *ipg_clk; 75 int channum; 76 struct intmux_irqchip_data irqchip_data[]; 77 }; 78 79 static void imx_intmux_irq_mask(struct irq_data *d) 80 { 81 struct intmux_irqchip_data *irqchip_data = d->chip_data; 82 int idx = irqchip_data->chanidx; 83 struct intmux_data *data = container_of(irqchip_data, struct intmux_data, 84 irqchip_data[idx]); 85 unsigned long flags; 86 void __iomem *reg; 87 u32 val; 88 89 raw_spin_lock_irqsave(&data->lock, flags); 90 reg = data->regs + CHANIER(idx); 91 val = readl_relaxed(reg); 92 /* disable the interrupt source of this channel */ 93 val &= ~BIT(d->hwirq); 94 writel_relaxed(val, reg); 95 raw_spin_unlock_irqrestore(&data->lock, flags); 96 } 97 98 static void imx_intmux_irq_unmask(struct irq_data *d) 99 { 100 struct intmux_irqchip_data *irqchip_data = d->chip_data; 101 int idx = irqchip_data->chanidx; 102 struct intmux_data *data = container_of(irqchip_data, struct intmux_data, 103 irqchip_data[idx]); 104 unsigned long flags; 105 void __iomem *reg; 106 u32 val; 107 108 raw_spin_lock_irqsave(&data->lock, flags); 109 reg = data->regs + CHANIER(idx); 110 val = readl_relaxed(reg); 111 /* enable the interrupt source of this channel */ 112 val |= BIT(d->hwirq); 113 writel_relaxed(val, reg); 114 raw_spin_unlock_irqrestore(&data->lock, flags); 115 } 116 117 static struct irq_chip imx_intmux_irq_chip = { 118 .name = "intmux", 119 .irq_mask = imx_intmux_irq_mask, 120 .irq_unmask = imx_intmux_irq_unmask, 121 }; 122 123 static int imx_intmux_irq_map(struct irq_domain *h, unsigned int irq, 124 irq_hw_number_t hwirq) 125 { 126 struct intmux_irqchip_data *data = h->host_data; 127 128 irq_set_chip_data(irq, data); 129 irq_set_chip_and_handler(irq, &data->chip, handle_level_irq); 130 131 return 0; 132 } 133 134 static int imx_intmux_irq_xlate(struct irq_domain *d, struct device_node *node, 135 const u32 *intspec, unsigned int intsize, 136 unsigned long *out_hwirq, unsigned int *out_type) 137 { 138 struct intmux_irqchip_data *irqchip_data = d->host_data; 139 int idx = irqchip_data->chanidx; 140 struct intmux_data *data = container_of(irqchip_data, struct intmux_data, 141 irqchip_data[idx]); 142 143 /* 144 * two cells needed in interrupt specifier: 145 * the 1st cell: hw interrupt number 146 * the 2nd cell: channel index 147 */ 148 if (WARN_ON(intsize != 2)) 149 return -EINVAL; 150 151 if (WARN_ON(intspec[1] >= data->channum)) 152 return -EINVAL; 153 154 *out_hwirq = intspec[0]; 155 *out_type = IRQ_TYPE_LEVEL_HIGH; 156 157 return 0; 158 } 159 160 static int imx_intmux_irq_select(struct irq_domain *d, struct irq_fwspec *fwspec, 161 enum irq_domain_bus_token bus_token) 162 { 163 struct intmux_irqchip_data *irqchip_data = d->host_data; 164 165 /* Not for us */ 166 if (fwspec->fwnode != d->fwnode) 167 return false; 168 169 return irqchip_data->chanidx == fwspec->param[1]; 170 } 171 172 static const struct irq_domain_ops imx_intmux_domain_ops = { 173 .map = imx_intmux_irq_map, 174 .xlate = imx_intmux_irq_xlate, 175 .select = imx_intmux_irq_select, 176 }; 177 178 static void imx_intmux_irq_handler(struct irq_desc *desc) 179 { 180 struct intmux_irqchip_data *irqchip_data = irq_desc_get_handler_data(desc); 181 int idx = irqchip_data->chanidx; 182 struct intmux_data *data = container_of(irqchip_data, struct intmux_data, 183 irqchip_data[idx]); 184 unsigned long irqstat; 185 int pos, virq; 186 187 chained_irq_enter(irq_desc_get_chip(desc), desc); 188 189 /* read the interrupt source pending status of this channel */ 190 irqstat = readl_relaxed(data->regs + CHANIPR(idx)); 191 192 for_each_set_bit(pos, &irqstat, 32) { 193 virq = irq_find_mapping(irqchip_data->domain, pos); 194 if (virq) 195 generic_handle_irq(virq); 196 } 197 198 chained_irq_exit(irq_desc_get_chip(desc), desc); 199 } 200 201 static int imx_intmux_probe(struct platform_device *pdev) 202 { 203 struct device_node *np = pdev->dev.of_node; 204 struct irq_domain *domain; 205 struct intmux_data *data; 206 int channum; 207 int i, ret; 208 209 channum = platform_irq_count(pdev); 210 if (channum == -EPROBE_DEFER) { 211 return -EPROBE_DEFER; 212 } else if (channum > CHAN_MAX_NUM) { 213 dev_err(&pdev->dev, "supports up to %d multiplex channels\n", 214 CHAN_MAX_NUM); 215 return -EINVAL; 216 } 217 218 data = devm_kzalloc(&pdev->dev, struct_size(data, irqchip_data, channum), GFP_KERNEL); 219 if (!data) 220 return -ENOMEM; 221 222 data->regs = devm_platform_ioremap_resource(pdev, 0); 223 if (IS_ERR(data->regs)) { 224 dev_err(&pdev->dev, "failed to initialize reg\n"); 225 return PTR_ERR(data->regs); 226 } 227 228 data->ipg_clk = devm_clk_get(&pdev->dev, "ipg"); 229 if (IS_ERR(data->ipg_clk)) { 230 ret = PTR_ERR(data->ipg_clk); 231 if (ret != -EPROBE_DEFER) 232 dev_err(&pdev->dev, "failed to get ipg clk: %d\n", ret); 233 return ret; 234 } 235 236 data->channum = channum; 237 raw_spin_lock_init(&data->lock); 238 239 pm_runtime_get_noresume(&pdev->dev); 240 pm_runtime_set_active(&pdev->dev); 241 pm_runtime_enable(&pdev->dev); 242 243 ret = clk_prepare_enable(data->ipg_clk); 244 if (ret) { 245 dev_err(&pdev->dev, "failed to enable ipg clk: %d\n", ret); 246 return ret; 247 } 248 249 for (i = 0; i < channum; i++) { 250 data->irqchip_data[i].chip = imx_intmux_irq_chip; 251 data->irqchip_data[i].chip.parent_device = &pdev->dev; 252 data->irqchip_data[i].chanidx = i; 253 254 data->irqchip_data[i].irq = irq_of_parse_and_map(np, i); 255 if (data->irqchip_data[i].irq <= 0) { 256 ret = -EINVAL; 257 dev_err(&pdev->dev, "failed to get irq\n"); 258 goto out; 259 } 260 261 domain = irq_domain_add_linear(np, 32, &imx_intmux_domain_ops, 262 &data->irqchip_data[i]); 263 if (!domain) { 264 ret = -ENOMEM; 265 dev_err(&pdev->dev, "failed to create IRQ domain\n"); 266 goto out; 267 } 268 data->irqchip_data[i].domain = domain; 269 270 /* disable all interrupt sources of this channel firstly */ 271 writel_relaxed(0, data->regs + CHANIER(i)); 272 273 irq_set_chained_handler_and_data(data->irqchip_data[i].irq, 274 imx_intmux_irq_handler, 275 &data->irqchip_data[i]); 276 } 277 278 platform_set_drvdata(pdev, data); 279 280 /* 281 * Let pm_runtime_put() disable clock. 282 * If CONFIG_PM is not enabled, the clock will stay powered. 283 */ 284 pm_runtime_put(&pdev->dev); 285 286 return 0; 287 out: 288 clk_disable_unprepare(data->ipg_clk); 289 return ret; 290 } 291 292 static int imx_intmux_remove(struct platform_device *pdev) 293 { 294 struct intmux_data *data = platform_get_drvdata(pdev); 295 int i; 296 297 for (i = 0; i < data->channum; i++) { 298 /* disable all interrupt sources of this channel */ 299 writel_relaxed(0, data->regs + CHANIER(i)); 300 301 irq_set_chained_handler_and_data(data->irqchip_data[i].irq, 302 NULL, NULL); 303 304 irq_domain_remove(data->irqchip_data[i].domain); 305 } 306 307 pm_runtime_disable(&pdev->dev); 308 309 return 0; 310 } 311 312 #ifdef CONFIG_PM 313 static int imx_intmux_runtime_suspend(struct device *dev) 314 { 315 struct intmux_data *data = dev_get_drvdata(dev); 316 struct intmux_irqchip_data *irqchip_data; 317 int i; 318 319 for (i = 0; i < data->channum; i++) { 320 irqchip_data = &data->irqchip_data[i]; 321 irqchip_data->saved_reg = readl_relaxed(data->regs + CHANIER(i)); 322 } 323 324 clk_disable_unprepare(data->ipg_clk); 325 326 return 0; 327 } 328 329 static int imx_intmux_runtime_resume(struct device *dev) 330 { 331 struct intmux_data *data = dev_get_drvdata(dev); 332 struct intmux_irqchip_data *irqchip_data; 333 int ret, i; 334 335 ret = clk_prepare_enable(data->ipg_clk); 336 if (ret) { 337 dev_err(dev, "failed to enable ipg clk: %d\n", ret); 338 return ret; 339 } 340 341 for (i = 0; i < data->channum; i++) { 342 irqchip_data = &data->irqchip_data[i]; 343 writel_relaxed(irqchip_data->saved_reg, data->regs + CHANIER(i)); 344 } 345 346 return 0; 347 } 348 #endif 349 350 static const struct dev_pm_ops imx_intmux_pm_ops = { 351 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, 352 pm_runtime_force_resume) 353 SET_RUNTIME_PM_OPS(imx_intmux_runtime_suspend, 354 imx_intmux_runtime_resume, NULL) 355 }; 356 357 static const struct of_device_id imx_intmux_id[] = { 358 { .compatible = "fsl,imx-intmux", }, 359 { /* sentinel */ }, 360 }; 361 362 static struct platform_driver imx_intmux_driver = { 363 .driver = { 364 .name = "imx-intmux", 365 .of_match_table = imx_intmux_id, 366 .pm = &imx_intmux_pm_ops, 367 }, 368 .probe = imx_intmux_probe, 369 .remove = imx_intmux_remove, 370 }; 371 builtin_platform_driver(imx_intmux_driver); 372