1 // SPDX-License-Identifier: GPL-2.0 2 // Copyright 2017 NXP 3 4 /* INTMUX Block Diagram 5 * 6 * ________________ 7 * interrupt source # 0 +---->| | 8 * | | | 9 * interrupt source # 1 +++-->| | 10 * ... | | | channel # 0 |--------->interrupt out # 0 11 * ... | | | | 12 * ... | | | | 13 * interrupt source # X-1 +++-->|________________| 14 * | | | 15 * | | | 16 * | | | ________________ 17 * +---->| | 18 * | | | | | 19 * | +-->| | 20 * | | | | channel # 1 |--------->interrupt out # 1 21 * | | +>| | 22 * | | | | | 23 * | | | |________________| 24 * | | | 25 * | | | 26 * | | | ... 27 * | | | ... 28 * | | | 29 * | | | ________________ 30 * +---->| | 31 * | | | | 32 * +-->| | 33 * | | channel # N |--------->interrupt out # N 34 * +>| | 35 * | | 36 * |________________| 37 * 38 * 39 * N: Interrupt Channel Instance Number (N=7) 40 * X: Interrupt Source Number for each channel (X=32) 41 * 42 * The INTMUX interrupt multiplexer has 8 channels, each channel receives 32 43 * interrupt sources and generates 1 interrupt output. 44 * 45 */ 46 47 #include <linux/clk.h> 48 #include <linux/interrupt.h> 49 #include <linux/irq.h> 50 #include <linux/irqchip/chained_irq.h> 51 #include <linux/irqdomain.h> 52 #include <linux/kernel.h> 53 #include <linux/of_irq.h> 54 #include <linux/of_platform.h> 55 #include <linux/spinlock.h> 56 #include <linux/pm_runtime.h> 57 58 #define CHANIER(n) (0x10 + (0x40 * n)) 59 #define CHANIPR(n) (0x20 + (0x40 * n)) 60 61 #define CHAN_MAX_NUM 0x8 62 63 struct intmux_irqchip_data { 64 struct irq_chip chip; 65 u32 saved_reg; 66 int chanidx; 67 int irq; 68 struct irq_domain *domain; 69 }; 70 71 struct intmux_data { 72 raw_spinlock_t lock; 73 void __iomem *regs; 74 struct clk *ipg_clk; 75 int channum; 76 struct intmux_irqchip_data irqchip_data[]; 77 }; 78 79 static void imx_intmux_irq_mask(struct irq_data *d) 80 { 81 struct intmux_irqchip_data *irqchip_data = d->chip_data; 82 int idx = irqchip_data->chanidx; 83 struct intmux_data *data = container_of(irqchip_data, struct intmux_data, 84 irqchip_data[idx]); 85 unsigned long flags; 86 void __iomem *reg; 87 u32 val; 88 89 raw_spin_lock_irqsave(&data->lock, flags); 90 reg = data->regs + CHANIER(idx); 91 val = readl_relaxed(reg); 92 /* disable the interrupt source of this channel */ 93 val &= ~BIT(d->hwirq); 94 writel_relaxed(val, reg); 95 raw_spin_unlock_irqrestore(&data->lock, flags); 96 } 97 98 static void imx_intmux_irq_unmask(struct irq_data *d) 99 { 100 struct intmux_irqchip_data *irqchip_data = d->chip_data; 101 int idx = irqchip_data->chanidx; 102 struct intmux_data *data = container_of(irqchip_data, struct intmux_data, 103 irqchip_data[idx]); 104 unsigned long flags; 105 void __iomem *reg; 106 u32 val; 107 108 raw_spin_lock_irqsave(&data->lock, flags); 109 reg = data->regs + CHANIER(idx); 110 val = readl_relaxed(reg); 111 /* enable the interrupt source of this channel */ 112 val |= BIT(d->hwirq); 113 writel_relaxed(val, reg); 114 raw_spin_unlock_irqrestore(&data->lock, flags); 115 } 116 117 static struct irq_chip imx_intmux_irq_chip = { 118 .name = "intmux", 119 .irq_mask = imx_intmux_irq_mask, 120 .irq_unmask = imx_intmux_irq_unmask, 121 }; 122 123 static int imx_intmux_irq_map(struct irq_domain *h, unsigned int irq, 124 irq_hw_number_t hwirq) 125 { 126 struct intmux_irqchip_data *data = h->host_data; 127 128 irq_set_chip_data(irq, data); 129 irq_set_chip_and_handler(irq, &data->chip, handle_level_irq); 130 131 return 0; 132 } 133 134 static int imx_intmux_irq_xlate(struct irq_domain *d, struct device_node *node, 135 const u32 *intspec, unsigned int intsize, 136 unsigned long *out_hwirq, unsigned int *out_type) 137 { 138 struct intmux_irqchip_data *irqchip_data = d->host_data; 139 int idx = irqchip_data->chanidx; 140 struct intmux_data *data = container_of(irqchip_data, struct intmux_data, 141 irqchip_data[idx]); 142 143 /* 144 * two cells needed in interrupt specifier: 145 * the 1st cell: hw interrupt number 146 * the 2nd cell: channel index 147 */ 148 if (WARN_ON(intsize != 2)) 149 return -EINVAL; 150 151 if (WARN_ON(intspec[1] >= data->channum)) 152 return -EINVAL; 153 154 *out_hwirq = intspec[0]; 155 *out_type = IRQ_TYPE_LEVEL_HIGH; 156 157 return 0; 158 } 159 160 static int imx_intmux_irq_select(struct irq_domain *d, struct irq_fwspec *fwspec, 161 enum irq_domain_bus_token bus_token) 162 { 163 struct intmux_irqchip_data *irqchip_data = d->host_data; 164 165 /* Not for us */ 166 if (fwspec->fwnode != d->fwnode) 167 return false; 168 169 return irqchip_data->chanidx == fwspec->param[1]; 170 } 171 172 static const struct irq_domain_ops imx_intmux_domain_ops = { 173 .map = imx_intmux_irq_map, 174 .xlate = imx_intmux_irq_xlate, 175 .select = imx_intmux_irq_select, 176 }; 177 178 static void imx_intmux_irq_handler(struct irq_desc *desc) 179 { 180 struct intmux_irqchip_data *irqchip_data = irq_desc_get_handler_data(desc); 181 int idx = irqchip_data->chanidx; 182 struct intmux_data *data = container_of(irqchip_data, struct intmux_data, 183 irqchip_data[idx]); 184 unsigned long irqstat; 185 int pos, virq; 186 187 chained_irq_enter(irq_desc_get_chip(desc), desc); 188 189 /* read the interrupt source pending status of this channel */ 190 irqstat = readl_relaxed(data->regs + CHANIPR(idx)); 191 192 for_each_set_bit(pos, &irqstat, 32) { 193 virq = irq_find_mapping(irqchip_data->domain, pos); 194 if (virq) 195 generic_handle_irq(virq); 196 } 197 198 chained_irq_exit(irq_desc_get_chip(desc), desc); 199 } 200 201 static int imx_intmux_probe(struct platform_device *pdev) 202 { 203 struct device_node *np = pdev->dev.of_node; 204 struct irq_domain *domain; 205 struct intmux_data *data; 206 int channum; 207 int i, ret; 208 209 channum = platform_irq_count(pdev); 210 if (channum == -EPROBE_DEFER) { 211 return -EPROBE_DEFER; 212 } else if (channum > CHAN_MAX_NUM) { 213 dev_err(&pdev->dev, "supports up to %d multiplex channels\n", 214 CHAN_MAX_NUM); 215 return -EINVAL; 216 } 217 218 data = devm_kzalloc(&pdev->dev, struct_size(data, irqchip_data, channum), GFP_KERNEL); 219 if (!data) 220 return -ENOMEM; 221 222 data->regs = devm_platform_ioremap_resource(pdev, 0); 223 if (IS_ERR(data->regs)) { 224 dev_err(&pdev->dev, "failed to initialize reg\n"); 225 return PTR_ERR(data->regs); 226 } 227 228 data->ipg_clk = devm_clk_get(&pdev->dev, "ipg"); 229 if (IS_ERR(data->ipg_clk)) 230 return dev_err_probe(&pdev->dev, PTR_ERR(data->ipg_clk), 231 "failed to get ipg clk\n"); 232 233 data->channum = channum; 234 raw_spin_lock_init(&data->lock); 235 236 pm_runtime_get_noresume(&pdev->dev); 237 pm_runtime_set_active(&pdev->dev); 238 pm_runtime_enable(&pdev->dev); 239 240 ret = clk_prepare_enable(data->ipg_clk); 241 if (ret) { 242 dev_err(&pdev->dev, "failed to enable ipg clk: %d\n", ret); 243 return ret; 244 } 245 246 for (i = 0; i < channum; i++) { 247 data->irqchip_data[i].chip = imx_intmux_irq_chip; 248 data->irqchip_data[i].chip.parent_device = &pdev->dev; 249 data->irqchip_data[i].chanidx = i; 250 251 data->irqchip_data[i].irq = irq_of_parse_and_map(np, i); 252 if (data->irqchip_data[i].irq <= 0) { 253 ret = -EINVAL; 254 dev_err(&pdev->dev, "failed to get irq\n"); 255 goto out; 256 } 257 258 domain = irq_domain_add_linear(np, 32, &imx_intmux_domain_ops, 259 &data->irqchip_data[i]); 260 if (!domain) { 261 ret = -ENOMEM; 262 dev_err(&pdev->dev, "failed to create IRQ domain\n"); 263 goto out; 264 } 265 data->irqchip_data[i].domain = domain; 266 267 /* disable all interrupt sources of this channel firstly */ 268 writel_relaxed(0, data->regs + CHANIER(i)); 269 270 irq_set_chained_handler_and_data(data->irqchip_data[i].irq, 271 imx_intmux_irq_handler, 272 &data->irqchip_data[i]); 273 } 274 275 platform_set_drvdata(pdev, data); 276 277 /* 278 * Let pm_runtime_put() disable clock. 279 * If CONFIG_PM is not enabled, the clock will stay powered. 280 */ 281 pm_runtime_put(&pdev->dev); 282 283 return 0; 284 out: 285 clk_disable_unprepare(data->ipg_clk); 286 return ret; 287 } 288 289 static int imx_intmux_remove(struct platform_device *pdev) 290 { 291 struct intmux_data *data = platform_get_drvdata(pdev); 292 int i; 293 294 for (i = 0; i < data->channum; i++) { 295 /* disable all interrupt sources of this channel */ 296 writel_relaxed(0, data->regs + CHANIER(i)); 297 298 irq_set_chained_handler_and_data(data->irqchip_data[i].irq, 299 NULL, NULL); 300 301 irq_domain_remove(data->irqchip_data[i].domain); 302 } 303 304 pm_runtime_disable(&pdev->dev); 305 306 return 0; 307 } 308 309 #ifdef CONFIG_PM 310 static int imx_intmux_runtime_suspend(struct device *dev) 311 { 312 struct intmux_data *data = dev_get_drvdata(dev); 313 struct intmux_irqchip_data *irqchip_data; 314 int i; 315 316 for (i = 0; i < data->channum; i++) { 317 irqchip_data = &data->irqchip_data[i]; 318 irqchip_data->saved_reg = readl_relaxed(data->regs + CHANIER(i)); 319 } 320 321 clk_disable_unprepare(data->ipg_clk); 322 323 return 0; 324 } 325 326 static int imx_intmux_runtime_resume(struct device *dev) 327 { 328 struct intmux_data *data = dev_get_drvdata(dev); 329 struct intmux_irqchip_data *irqchip_data; 330 int ret, i; 331 332 ret = clk_prepare_enable(data->ipg_clk); 333 if (ret) { 334 dev_err(dev, "failed to enable ipg clk: %d\n", ret); 335 return ret; 336 } 337 338 for (i = 0; i < data->channum; i++) { 339 irqchip_data = &data->irqchip_data[i]; 340 writel_relaxed(irqchip_data->saved_reg, data->regs + CHANIER(i)); 341 } 342 343 return 0; 344 } 345 #endif 346 347 static const struct dev_pm_ops imx_intmux_pm_ops = { 348 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, 349 pm_runtime_force_resume) 350 SET_RUNTIME_PM_OPS(imx_intmux_runtime_suspend, 351 imx_intmux_runtime_resume, NULL) 352 }; 353 354 static const struct of_device_id imx_intmux_id[] = { 355 { .compatible = "fsl,imx-intmux", }, 356 { /* sentinel */ }, 357 }; 358 359 static struct platform_driver imx_intmux_driver = { 360 .driver = { 361 .name = "imx-intmux", 362 .of_match_table = imx_intmux_id, 363 .pm = &imx_intmux_pm_ops, 364 }, 365 .probe = imx_intmux_probe, 366 .remove = imx_intmux_remove, 367 }; 368 builtin_platform_driver(imx_intmux_driver); 369