1 /* 2 * drivers/irqchip/irq-crossbar.c 3 * 4 * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com 5 * Author: Sricharan R <r.sricharan@ti.com> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 * 11 */ 12 #include <linux/err.h> 13 #include <linux/io.h> 14 #include <linux/irqchip.h> 15 #include <linux/irqdomain.h> 16 #include <linux/of_address.h> 17 #include <linux/of_irq.h> 18 #include <linux/slab.h> 19 20 #define IRQ_FREE -1 21 #define IRQ_RESERVED -2 22 #define IRQ_SKIP -3 23 #define GIC_IRQ_START 32 24 25 /** 26 * struct crossbar_device - crossbar device description 27 * @lock: spinlock serializing access to @irq_map 28 * @int_max: maximum number of supported interrupts 29 * @safe_map: safe default value to initialize the crossbar 30 * @max_crossbar_sources: Maximum number of crossbar sources 31 * @irq_map: array of interrupts to crossbar number mapping 32 * @crossbar_base: crossbar base address 33 * @register_offsets: offsets for each irq number 34 * @write: register write function pointer 35 */ 36 struct crossbar_device { 37 raw_spinlock_t lock; 38 uint int_max; 39 uint safe_map; 40 uint max_crossbar_sources; 41 uint *irq_map; 42 void __iomem *crossbar_base; 43 int *register_offsets; 44 void (*write)(int, int); 45 }; 46 47 static struct crossbar_device *cb; 48 49 static void crossbar_writel(int irq_no, int cb_no) 50 { 51 writel(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]); 52 } 53 54 static void crossbar_writew(int irq_no, int cb_no) 55 { 56 writew(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]); 57 } 58 59 static void crossbar_writeb(int irq_no, int cb_no) 60 { 61 writeb(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]); 62 } 63 64 static struct irq_chip crossbar_chip = { 65 .name = "CBAR", 66 .irq_eoi = irq_chip_eoi_parent, 67 .irq_mask = irq_chip_mask_parent, 68 .irq_unmask = irq_chip_unmask_parent, 69 .irq_retrigger = irq_chip_retrigger_hierarchy, 70 .irq_set_type = irq_chip_set_type_parent, 71 .flags = IRQCHIP_MASK_ON_SUSPEND | 72 IRQCHIP_SKIP_SET_WAKE, 73 #ifdef CONFIG_SMP 74 .irq_set_affinity = irq_chip_set_affinity_parent, 75 #endif 76 }; 77 78 static int allocate_gic_irq(struct irq_domain *domain, unsigned virq, 79 irq_hw_number_t hwirq) 80 { 81 struct irq_fwspec fwspec; 82 int i; 83 int err; 84 85 if (!irq_domain_get_of_node(domain->parent)) 86 return -EINVAL; 87 88 raw_spin_lock(&cb->lock); 89 for (i = cb->int_max - 1; i >= 0; i--) { 90 if (cb->irq_map[i] == IRQ_FREE) { 91 cb->irq_map[i] = hwirq; 92 break; 93 } 94 } 95 raw_spin_unlock(&cb->lock); 96 97 if (i < 0) 98 return -ENODEV; 99 100 fwspec.fwnode = domain->parent->fwnode; 101 fwspec.param_count = 3; 102 fwspec.param[0] = 0; /* SPI */ 103 fwspec.param[1] = i; 104 fwspec.param[2] = IRQ_TYPE_LEVEL_HIGH; 105 106 err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec); 107 if (err) 108 cb->irq_map[i] = IRQ_FREE; 109 else 110 cb->write(i, hwirq); 111 112 return err; 113 } 114 115 static int crossbar_domain_alloc(struct irq_domain *d, unsigned int virq, 116 unsigned int nr_irqs, void *data) 117 { 118 struct irq_fwspec *fwspec = data; 119 irq_hw_number_t hwirq; 120 int i; 121 122 if (fwspec->param_count != 3) 123 return -EINVAL; /* Not GIC compliant */ 124 if (fwspec->param[0] != 0) 125 return -EINVAL; /* No PPI should point to this domain */ 126 127 hwirq = fwspec->param[1]; 128 if ((hwirq + nr_irqs) > cb->max_crossbar_sources) 129 return -EINVAL; /* Can't deal with this */ 130 131 for (i = 0; i < nr_irqs; i++) { 132 int err = allocate_gic_irq(d, virq + i, hwirq + i); 133 134 if (err) 135 return err; 136 137 irq_domain_set_hwirq_and_chip(d, virq + i, hwirq + i, 138 &crossbar_chip, NULL); 139 } 140 141 return 0; 142 } 143 144 /** 145 * crossbar_domain_free - unmap/free a crossbar<->irq connection 146 * @domain: domain of irq to unmap 147 * @virq: virq number 148 * @nr_irqs: number of irqs to free 149 * 150 * We do not maintain a use count of total number of map/unmap 151 * calls for a particular irq to find out if a irq can be really 152 * unmapped. This is because unmap is called during irq_dispose_mapping(irq), 153 * after which irq is anyways unusable. So an explicit map has to be called 154 * after that. 155 */ 156 static void crossbar_domain_free(struct irq_domain *domain, unsigned int virq, 157 unsigned int nr_irqs) 158 { 159 int i; 160 161 raw_spin_lock(&cb->lock); 162 for (i = 0; i < nr_irqs; i++) { 163 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i); 164 165 irq_domain_reset_irq_data(d); 166 cb->irq_map[d->hwirq] = IRQ_FREE; 167 cb->write(d->hwirq, cb->safe_map); 168 } 169 raw_spin_unlock(&cb->lock); 170 } 171 172 static int crossbar_domain_translate(struct irq_domain *d, 173 struct irq_fwspec *fwspec, 174 unsigned long *hwirq, 175 unsigned int *type) 176 { 177 if (is_of_node(fwspec->fwnode)) { 178 if (fwspec->param_count != 3) 179 return -EINVAL; 180 181 /* No PPI should point to this domain */ 182 if (fwspec->param[0] != 0) 183 return -EINVAL; 184 185 *hwirq = fwspec->param[1]; 186 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; 187 return 0; 188 } 189 190 return -EINVAL; 191 } 192 193 static const struct irq_domain_ops crossbar_domain_ops = { 194 .alloc = crossbar_domain_alloc, 195 .free = crossbar_domain_free, 196 .translate = crossbar_domain_translate, 197 }; 198 199 static int __init crossbar_of_init(struct device_node *node) 200 { 201 u32 max = 0, entry, reg_size; 202 int i, size, reserved = 0; 203 const __be32 *irqsr; 204 int ret = -ENOMEM; 205 206 cb = kzalloc(sizeof(*cb), GFP_KERNEL); 207 208 if (!cb) 209 return ret; 210 211 cb->crossbar_base = of_iomap(node, 0); 212 if (!cb->crossbar_base) 213 goto err_cb; 214 215 of_property_read_u32(node, "ti,max-crossbar-sources", 216 &cb->max_crossbar_sources); 217 if (!cb->max_crossbar_sources) { 218 pr_err("missing 'ti,max-crossbar-sources' property\n"); 219 ret = -EINVAL; 220 goto err_base; 221 } 222 223 of_property_read_u32(node, "ti,max-irqs", &max); 224 if (!max) { 225 pr_err("missing 'ti,max-irqs' property\n"); 226 ret = -EINVAL; 227 goto err_base; 228 } 229 cb->irq_map = kcalloc(max, sizeof(int), GFP_KERNEL); 230 if (!cb->irq_map) 231 goto err_base; 232 233 cb->int_max = max; 234 235 for (i = 0; i < max; i++) 236 cb->irq_map[i] = IRQ_FREE; 237 238 /* Get and mark reserved irqs */ 239 irqsr = of_get_property(node, "ti,irqs-reserved", &size); 240 if (irqsr) { 241 size /= sizeof(__be32); 242 243 for (i = 0; i < size; i++) { 244 of_property_read_u32_index(node, 245 "ti,irqs-reserved", 246 i, &entry); 247 if (entry >= max) { 248 pr_err("Invalid reserved entry\n"); 249 ret = -EINVAL; 250 goto err_irq_map; 251 } 252 cb->irq_map[entry] = IRQ_RESERVED; 253 } 254 } 255 256 /* Skip irqs hardwired to bypass the crossbar */ 257 irqsr = of_get_property(node, "ti,irqs-skip", &size); 258 if (irqsr) { 259 size /= sizeof(__be32); 260 261 for (i = 0; i < size; i++) { 262 of_property_read_u32_index(node, 263 "ti,irqs-skip", 264 i, &entry); 265 if (entry >= max) { 266 pr_err("Invalid skip entry\n"); 267 ret = -EINVAL; 268 goto err_irq_map; 269 } 270 cb->irq_map[entry] = IRQ_SKIP; 271 } 272 } 273 274 275 cb->register_offsets = kcalloc(max, sizeof(int), GFP_KERNEL); 276 if (!cb->register_offsets) 277 goto err_irq_map; 278 279 of_property_read_u32(node, "ti,reg-size", ®_size); 280 281 switch (reg_size) { 282 case 1: 283 cb->write = crossbar_writeb; 284 break; 285 case 2: 286 cb->write = crossbar_writew; 287 break; 288 case 4: 289 cb->write = crossbar_writel; 290 break; 291 default: 292 pr_err("Invalid reg-size property\n"); 293 ret = -EINVAL; 294 goto err_reg_offset; 295 break; 296 } 297 298 /* 299 * Register offsets are not linear because of the 300 * reserved irqs. so find and store the offsets once. 301 */ 302 for (i = 0; i < max; i++) { 303 if (cb->irq_map[i] == IRQ_RESERVED) 304 continue; 305 306 cb->register_offsets[i] = reserved; 307 reserved += reg_size; 308 } 309 310 of_property_read_u32(node, "ti,irqs-safe-map", &cb->safe_map); 311 /* Initialize the crossbar with safe map to start with */ 312 for (i = 0; i < max; i++) { 313 if (cb->irq_map[i] == IRQ_RESERVED || 314 cb->irq_map[i] == IRQ_SKIP) 315 continue; 316 317 cb->write(i, cb->safe_map); 318 } 319 320 raw_spin_lock_init(&cb->lock); 321 322 return 0; 323 324 err_reg_offset: 325 kfree(cb->register_offsets); 326 err_irq_map: 327 kfree(cb->irq_map); 328 err_base: 329 iounmap(cb->crossbar_base); 330 err_cb: 331 kfree(cb); 332 333 cb = NULL; 334 return ret; 335 } 336 337 static int __init irqcrossbar_init(struct device_node *node, 338 struct device_node *parent) 339 { 340 struct irq_domain *parent_domain, *domain; 341 int err; 342 343 if (!parent) { 344 pr_err("%s: no parent, giving up\n", node->full_name); 345 return -ENODEV; 346 } 347 348 parent_domain = irq_find_host(parent); 349 if (!parent_domain) { 350 pr_err("%s: unable to obtain parent domain\n", node->full_name); 351 return -ENXIO; 352 } 353 354 err = crossbar_of_init(node); 355 if (err) 356 return err; 357 358 domain = irq_domain_add_hierarchy(parent_domain, 0, 359 cb->max_crossbar_sources, 360 node, &crossbar_domain_ops, 361 NULL); 362 if (!domain) { 363 pr_err("%s: failed to allocated domain\n", node->full_name); 364 return -ENOMEM; 365 } 366 367 return 0; 368 } 369 370 IRQCHIP_DECLARE(ti_irqcrossbar, "ti,irq-crossbar", irqcrossbar_init); 371