1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Texas Instruments' K3 Interrupt Aggregator irqchip driver 4 * 5 * Copyright (C) 2018-2019 Texas Instruments Incorporated - https://www.ti.com/ 6 * Lokesh Vutla <lokeshvutla@ti.com> 7 */ 8 9 #include <linux/err.h> 10 #include <linux/io.h> 11 #include <linux/irq.h> 12 #include <linux/irqchip.h> 13 #include <linux/irqdomain.h> 14 #include <linux/interrupt.h> 15 #include <linux/msi.h> 16 #include <linux/module.h> 17 #include <linux/moduleparam.h> 18 #include <linux/of_address.h> 19 #include <linux/of_irq.h> 20 #include <linux/of_platform.h> 21 #include <linux/irqchip/chained_irq.h> 22 #include <linux/soc/ti/ti_sci_inta_msi.h> 23 #include <linux/soc/ti/ti_sci_protocol.h> 24 #include <asm-generic/msi.h> 25 26 #define TI_SCI_DEV_ID_MASK 0xffff 27 #define TI_SCI_DEV_ID_SHIFT 16 28 #define TI_SCI_IRQ_ID_MASK 0xffff 29 #define TI_SCI_IRQ_ID_SHIFT 0 30 #define HWIRQ_TO_DEVID(hwirq) (((hwirq) >> (TI_SCI_DEV_ID_SHIFT)) & \ 31 (TI_SCI_DEV_ID_MASK)) 32 #define HWIRQ_TO_IRQID(hwirq) ((hwirq) & (TI_SCI_IRQ_ID_MASK)) 33 #define TO_HWIRQ(dev, index) ((((dev) & TI_SCI_DEV_ID_MASK) << \ 34 TI_SCI_DEV_ID_SHIFT) | \ 35 ((index) & TI_SCI_IRQ_ID_MASK)) 36 37 #define MAX_EVENTS_PER_VINT 64 38 #define VINT_ENABLE_SET_OFFSET 0x0 39 #define VINT_ENABLE_CLR_OFFSET 0x8 40 #define VINT_STATUS_OFFSET 0x18 41 #define VINT_STATUS_MASKED_OFFSET 0x20 42 43 /** 44 * struct ti_sci_inta_event_desc - Description of an event coming to 45 * Interrupt Aggregator. This serves 46 * as a mapping table for global event, 47 * hwirq and vint bit. 48 * @global_event: Global event number corresponding to this event 49 * @hwirq: Hwirq of the incoming interrupt 50 * @vint_bit: Corresponding vint bit to which this event is attached. 51 */ 52 struct ti_sci_inta_event_desc { 53 u16 global_event; 54 u32 hwirq; 55 u8 vint_bit; 56 }; 57 58 /** 59 * struct ti_sci_inta_vint_desc - Description of a virtual interrupt coming out 60 * of Interrupt Aggregator. 61 * @domain: Pointer to IRQ domain to which this vint belongs. 62 * @list: List entry for the vint list 63 * @event_map: Bitmap to manage the allocation of events to vint. 64 * @events: Array of event descriptors assigned to this vint. 65 * @parent_virq: Linux IRQ number that gets attached to parent 66 * @vint_id: TISCI vint ID 67 */ 68 struct ti_sci_inta_vint_desc { 69 struct irq_domain *domain; 70 struct list_head list; 71 DECLARE_BITMAP(event_map, MAX_EVENTS_PER_VINT); 72 struct ti_sci_inta_event_desc events[MAX_EVENTS_PER_VINT]; 73 unsigned int parent_virq; 74 u16 vint_id; 75 }; 76 77 /** 78 * struct ti_sci_inta_irq_domain - Structure representing a TISCI based 79 * Interrupt Aggregator IRQ domain. 80 * @sci: Pointer to TISCI handle 81 * @vint: TISCI resource pointer representing IA inerrupts. 82 * @global_event: TISCI resource pointer representing global events. 83 * @vint_list: List of the vints active in the system 84 * @vint_mutex: Mutex to protect vint_list 85 * @base: Base address of the memory mapped IO registers 86 * @pdev: Pointer to platform device. 87 * @ti_sci_id: TI-SCI device identifier 88 * @unmapped_cnt: Number of @unmapped_dev_ids entries 89 * @unmapped_dev_ids: Pointer to an array of TI-SCI device identifiers of 90 * unmapped event sources. 91 * Unmapped Events are not part of the Global Event Map and 92 * they are converted to Global event within INTA to be 93 * received by the same INTA to generate an interrupt. 94 * In case an interrupt request comes for a device which is 95 * generating Unmapped Event, we must use the INTA's TI-SCI 96 * device identifier in place of the source device 97 * identifier to let sysfw know where it has to program the 98 * Global Event number. 99 */ 100 struct ti_sci_inta_irq_domain { 101 const struct ti_sci_handle *sci; 102 struct ti_sci_resource *vint; 103 struct ti_sci_resource *global_event; 104 struct list_head vint_list; 105 /* Mutex to protect vint list */ 106 struct mutex vint_mutex; 107 void __iomem *base; 108 struct platform_device *pdev; 109 u32 ti_sci_id; 110 111 int unmapped_cnt; 112 u16 *unmapped_dev_ids; 113 }; 114 115 #define to_vint_desc(e, i) container_of(e, struct ti_sci_inta_vint_desc, \ 116 events[i]) 117 118 static u16 ti_sci_inta_get_dev_id(struct ti_sci_inta_irq_domain *inta, u32 hwirq) 119 { 120 u16 dev_id = HWIRQ_TO_DEVID(hwirq); 121 int i; 122 123 if (inta->unmapped_cnt == 0) 124 return dev_id; 125 126 /* 127 * For devices sending Unmapped Events we must use the INTA's TI-SCI 128 * device identifier number to be able to convert it to a Global Event 129 * and map it to an interrupt. 130 */ 131 for (i = 0; i < inta->unmapped_cnt; i++) { 132 if (dev_id == inta->unmapped_dev_ids[i]) { 133 dev_id = inta->ti_sci_id; 134 break; 135 } 136 } 137 138 return dev_id; 139 } 140 141 /** 142 * ti_sci_inta_irq_handler() - Chained IRQ handler for the vint irqs 143 * @desc: Pointer to irq_desc corresponding to the irq 144 */ 145 static void ti_sci_inta_irq_handler(struct irq_desc *desc) 146 { 147 struct ti_sci_inta_vint_desc *vint_desc; 148 struct ti_sci_inta_irq_domain *inta; 149 struct irq_domain *domain; 150 unsigned int virq, bit; 151 unsigned long val; 152 153 vint_desc = irq_desc_get_handler_data(desc); 154 domain = vint_desc->domain; 155 inta = domain->host_data; 156 157 chained_irq_enter(irq_desc_get_chip(desc), desc); 158 159 val = readq_relaxed(inta->base + vint_desc->vint_id * 0x1000 + 160 VINT_STATUS_MASKED_OFFSET); 161 162 for_each_set_bit(bit, &val, MAX_EVENTS_PER_VINT) { 163 virq = irq_find_mapping(domain, vint_desc->events[bit].hwirq); 164 if (virq) 165 generic_handle_irq(virq); 166 } 167 168 chained_irq_exit(irq_desc_get_chip(desc), desc); 169 } 170 171 /** 172 * ti_sci_inta_xlate_irq() - Translate hwirq to parent's hwirq. 173 * @inta: IRQ domain corresponding to Interrupt Aggregator 174 * @irq: Hardware irq corresponding to the above irq domain 175 * 176 * Return parent irq number if translation is available else -ENOENT. 177 */ 178 static int ti_sci_inta_xlate_irq(struct ti_sci_inta_irq_domain *inta, 179 u16 vint_id) 180 { 181 struct device_node *np = dev_of_node(&inta->pdev->dev); 182 u32 base, parent_base, size; 183 const __be32 *range; 184 int len; 185 186 range = of_get_property(np, "ti,interrupt-ranges", &len); 187 if (!range) 188 return vint_id; 189 190 for (len /= sizeof(*range); len >= 3; len -= 3) { 191 base = be32_to_cpu(*range++); 192 parent_base = be32_to_cpu(*range++); 193 size = be32_to_cpu(*range++); 194 195 if (base <= vint_id && vint_id < base + size) 196 return vint_id - base + parent_base; 197 } 198 199 return -ENOENT; 200 } 201 202 /** 203 * ti_sci_inta_alloc_parent_irq() - Allocate parent irq to Interrupt aggregator 204 * @domain: IRQ domain corresponding to Interrupt Aggregator 205 * 206 * Return 0 if all went well else corresponding error value. 207 */ 208 static struct ti_sci_inta_vint_desc *ti_sci_inta_alloc_parent_irq(struct irq_domain *domain) 209 { 210 struct ti_sci_inta_irq_domain *inta = domain->host_data; 211 struct ti_sci_inta_vint_desc *vint_desc; 212 struct irq_fwspec parent_fwspec; 213 struct device_node *parent_node; 214 unsigned int parent_virq; 215 int p_hwirq, ret; 216 u16 vint_id; 217 218 vint_id = ti_sci_get_free_resource(inta->vint); 219 if (vint_id == TI_SCI_RESOURCE_NULL) 220 return ERR_PTR(-EINVAL); 221 222 p_hwirq = ti_sci_inta_xlate_irq(inta, vint_id); 223 if (p_hwirq < 0) { 224 ret = p_hwirq; 225 goto free_vint; 226 } 227 228 vint_desc = kzalloc(sizeof(*vint_desc), GFP_KERNEL); 229 if (!vint_desc) { 230 ret = -ENOMEM; 231 goto free_vint; 232 } 233 234 vint_desc->domain = domain; 235 vint_desc->vint_id = vint_id; 236 INIT_LIST_HEAD(&vint_desc->list); 237 238 parent_node = of_irq_find_parent(dev_of_node(&inta->pdev->dev)); 239 parent_fwspec.fwnode = of_node_to_fwnode(parent_node); 240 241 if (of_device_is_compatible(parent_node, "arm,gic-v3")) { 242 /* Parent is GIC */ 243 parent_fwspec.param_count = 3; 244 parent_fwspec.param[0] = 0; 245 parent_fwspec.param[1] = p_hwirq - 32; 246 parent_fwspec.param[2] = IRQ_TYPE_LEVEL_HIGH; 247 } else { 248 /* Parent is Interrupt Router */ 249 parent_fwspec.param_count = 1; 250 parent_fwspec.param[0] = p_hwirq; 251 } 252 253 parent_virq = irq_create_fwspec_mapping(&parent_fwspec); 254 if (parent_virq == 0) { 255 dev_err(&inta->pdev->dev, "Parent IRQ allocation failed\n"); 256 ret = -EINVAL; 257 goto free_vint_desc; 258 259 } 260 vint_desc->parent_virq = parent_virq; 261 262 list_add_tail(&vint_desc->list, &inta->vint_list); 263 irq_set_chained_handler_and_data(vint_desc->parent_virq, 264 ti_sci_inta_irq_handler, vint_desc); 265 266 return vint_desc; 267 free_vint_desc: 268 kfree(vint_desc); 269 free_vint: 270 ti_sci_release_resource(inta->vint, vint_id); 271 return ERR_PTR(ret); 272 } 273 274 /** 275 * ti_sci_inta_alloc_event() - Attach an event to a IA vint. 276 * @vint_desc: Pointer to vint_desc to which the event gets attached 277 * @free_bit: Bit inside vint to which event gets attached 278 * @hwirq: hwirq of the input event 279 * 280 * Return event_desc pointer if all went ok else appropriate error value. 281 */ 282 static struct ti_sci_inta_event_desc *ti_sci_inta_alloc_event(struct ti_sci_inta_vint_desc *vint_desc, 283 u16 free_bit, 284 u32 hwirq) 285 { 286 struct ti_sci_inta_irq_domain *inta = vint_desc->domain->host_data; 287 struct ti_sci_inta_event_desc *event_desc; 288 u16 dev_id, dev_index; 289 int err; 290 291 dev_id = ti_sci_inta_get_dev_id(inta, hwirq); 292 dev_index = HWIRQ_TO_IRQID(hwirq); 293 294 event_desc = &vint_desc->events[free_bit]; 295 event_desc->hwirq = hwirq; 296 event_desc->vint_bit = free_bit; 297 event_desc->global_event = ti_sci_get_free_resource(inta->global_event); 298 if (event_desc->global_event == TI_SCI_RESOURCE_NULL) 299 return ERR_PTR(-EINVAL); 300 301 err = inta->sci->ops.rm_irq_ops.set_event_map(inta->sci, 302 dev_id, dev_index, 303 inta->ti_sci_id, 304 vint_desc->vint_id, 305 event_desc->global_event, 306 free_bit); 307 if (err) 308 goto free_global_event; 309 310 return event_desc; 311 free_global_event: 312 ti_sci_release_resource(inta->global_event, event_desc->global_event); 313 return ERR_PTR(err); 314 } 315 316 /** 317 * ti_sci_inta_alloc_irq() - Allocate an irq within INTA domain 318 * @domain: irq_domain pointer corresponding to INTA 319 * @hwirq: hwirq of the input event 320 * 321 * Note: Allocation happens in the following manner: 322 * - Find a free bit available in any of the vints available in the list. 323 * - If not found, allocate a vint from the vint pool 324 * - Attach the free bit to input hwirq. 325 * Return event_desc if all went ok else appropriate error value. 326 */ 327 static struct ti_sci_inta_event_desc *ti_sci_inta_alloc_irq(struct irq_domain *domain, 328 u32 hwirq) 329 { 330 struct ti_sci_inta_irq_domain *inta = domain->host_data; 331 struct ti_sci_inta_vint_desc *vint_desc = NULL; 332 struct ti_sci_inta_event_desc *event_desc; 333 u16 free_bit; 334 335 mutex_lock(&inta->vint_mutex); 336 list_for_each_entry(vint_desc, &inta->vint_list, list) { 337 free_bit = find_first_zero_bit(vint_desc->event_map, 338 MAX_EVENTS_PER_VINT); 339 if (free_bit != MAX_EVENTS_PER_VINT) { 340 set_bit(free_bit, vint_desc->event_map); 341 goto alloc_event; 342 } 343 } 344 345 /* No free bits available. Allocate a new vint */ 346 vint_desc = ti_sci_inta_alloc_parent_irq(domain); 347 if (IS_ERR(vint_desc)) { 348 event_desc = ERR_CAST(vint_desc); 349 goto unlock; 350 } 351 352 free_bit = find_first_zero_bit(vint_desc->event_map, 353 MAX_EVENTS_PER_VINT); 354 set_bit(free_bit, vint_desc->event_map); 355 356 alloc_event: 357 event_desc = ti_sci_inta_alloc_event(vint_desc, free_bit, hwirq); 358 if (IS_ERR(event_desc)) 359 clear_bit(free_bit, vint_desc->event_map); 360 361 unlock: 362 mutex_unlock(&inta->vint_mutex); 363 return event_desc; 364 } 365 366 /** 367 * ti_sci_inta_free_parent_irq() - Free a parent irq to INTA 368 * @inta: Pointer to inta domain. 369 * @vint_desc: Pointer to vint_desc that needs to be freed. 370 */ 371 static void ti_sci_inta_free_parent_irq(struct ti_sci_inta_irq_domain *inta, 372 struct ti_sci_inta_vint_desc *vint_desc) 373 { 374 if (find_first_bit(vint_desc->event_map, MAX_EVENTS_PER_VINT) == MAX_EVENTS_PER_VINT) { 375 list_del(&vint_desc->list); 376 ti_sci_release_resource(inta->vint, vint_desc->vint_id); 377 irq_dispose_mapping(vint_desc->parent_virq); 378 kfree(vint_desc); 379 } 380 } 381 382 /** 383 * ti_sci_inta_free_irq() - Free an IRQ within INTA domain 384 * @event_desc: Pointer to event_desc that needs to be freed. 385 * @hwirq: Hwirq number within INTA domain that needs to be freed 386 */ 387 static void ti_sci_inta_free_irq(struct ti_sci_inta_event_desc *event_desc, 388 u32 hwirq) 389 { 390 struct ti_sci_inta_vint_desc *vint_desc; 391 struct ti_sci_inta_irq_domain *inta; 392 u16 dev_id; 393 394 vint_desc = to_vint_desc(event_desc, event_desc->vint_bit); 395 inta = vint_desc->domain->host_data; 396 dev_id = ti_sci_inta_get_dev_id(inta, hwirq); 397 /* free event irq */ 398 mutex_lock(&inta->vint_mutex); 399 inta->sci->ops.rm_irq_ops.free_event_map(inta->sci, 400 dev_id, HWIRQ_TO_IRQID(hwirq), 401 inta->ti_sci_id, 402 vint_desc->vint_id, 403 event_desc->global_event, 404 event_desc->vint_bit); 405 406 clear_bit(event_desc->vint_bit, vint_desc->event_map); 407 ti_sci_release_resource(inta->global_event, event_desc->global_event); 408 event_desc->global_event = TI_SCI_RESOURCE_NULL; 409 event_desc->hwirq = 0; 410 411 ti_sci_inta_free_parent_irq(inta, vint_desc); 412 mutex_unlock(&inta->vint_mutex); 413 } 414 415 /** 416 * ti_sci_inta_request_resources() - Allocate resources for input irq 417 * @data: Pointer to corresponding irq_data 418 * 419 * Note: This is the core api where the actual allocation happens for input 420 * hwirq. This allocation involves creating a parent irq for vint. 421 * If this is done in irq_domain_ops.alloc() then a deadlock is reached 422 * for allocation. So this allocation is being done in request_resources() 423 * 424 * Return: 0 if all went well else corresponding error. 425 */ 426 static int ti_sci_inta_request_resources(struct irq_data *data) 427 { 428 struct ti_sci_inta_event_desc *event_desc; 429 430 event_desc = ti_sci_inta_alloc_irq(data->domain, data->hwirq); 431 if (IS_ERR(event_desc)) 432 return PTR_ERR(event_desc); 433 434 data->chip_data = event_desc; 435 436 return 0; 437 } 438 439 /** 440 * ti_sci_inta_release_resources - Release resources for input irq 441 * @data: Pointer to corresponding irq_data 442 * 443 * Note: Corresponding to request_resources(), all the unmapping and deletion 444 * of parent vint irqs happens in this api. 445 */ 446 static void ti_sci_inta_release_resources(struct irq_data *data) 447 { 448 struct ti_sci_inta_event_desc *event_desc; 449 450 event_desc = irq_data_get_irq_chip_data(data); 451 ti_sci_inta_free_irq(event_desc, data->hwirq); 452 } 453 454 /** 455 * ti_sci_inta_manage_event() - Control the event based on the offset 456 * @data: Pointer to corresponding irq_data 457 * @offset: register offset using which event is controlled. 458 */ 459 static void ti_sci_inta_manage_event(struct irq_data *data, u32 offset) 460 { 461 struct ti_sci_inta_event_desc *event_desc; 462 struct ti_sci_inta_vint_desc *vint_desc; 463 struct ti_sci_inta_irq_domain *inta; 464 465 event_desc = irq_data_get_irq_chip_data(data); 466 vint_desc = to_vint_desc(event_desc, event_desc->vint_bit); 467 inta = data->domain->host_data; 468 469 writeq_relaxed(BIT(event_desc->vint_bit), 470 inta->base + vint_desc->vint_id * 0x1000 + offset); 471 } 472 473 /** 474 * ti_sci_inta_mask_irq() - Mask an event 475 * @data: Pointer to corresponding irq_data 476 */ 477 static void ti_sci_inta_mask_irq(struct irq_data *data) 478 { 479 ti_sci_inta_manage_event(data, VINT_ENABLE_CLR_OFFSET); 480 } 481 482 /** 483 * ti_sci_inta_unmask_irq() - Unmask an event 484 * @data: Pointer to corresponding irq_data 485 */ 486 static void ti_sci_inta_unmask_irq(struct irq_data *data) 487 { 488 ti_sci_inta_manage_event(data, VINT_ENABLE_SET_OFFSET); 489 } 490 491 /** 492 * ti_sci_inta_ack_irq() - Ack an event 493 * @data: Pointer to corresponding irq_data 494 */ 495 static void ti_sci_inta_ack_irq(struct irq_data *data) 496 { 497 /* 498 * Do not clear the event if hardware is capable of sending 499 * a down event. 500 */ 501 if (irqd_get_trigger_type(data) != IRQF_TRIGGER_HIGH) 502 ti_sci_inta_manage_event(data, VINT_STATUS_OFFSET); 503 } 504 505 static int ti_sci_inta_set_affinity(struct irq_data *d, 506 const struct cpumask *mask_val, bool force) 507 { 508 return -EINVAL; 509 } 510 511 /** 512 * ti_sci_inta_set_type() - Update the trigger type of the irq. 513 * @data: Pointer to corresponding irq_data 514 * @type: Trigger type as specified by user 515 * 516 * Note: This updates the handle_irq callback for level msi. 517 * 518 * Return 0 if all went well else appropriate error. 519 */ 520 static int ti_sci_inta_set_type(struct irq_data *data, unsigned int type) 521 { 522 /* 523 * .alloc default sets handle_edge_irq. But if the user specifies 524 * that IRQ is level MSI, then update the handle to handle_level_irq 525 */ 526 switch (type & IRQ_TYPE_SENSE_MASK) { 527 case IRQF_TRIGGER_HIGH: 528 irq_set_handler_locked(data, handle_level_irq); 529 return 0; 530 case IRQF_TRIGGER_RISING: 531 return 0; 532 default: 533 return -EINVAL; 534 } 535 } 536 537 static struct irq_chip ti_sci_inta_irq_chip = { 538 .name = "INTA", 539 .irq_ack = ti_sci_inta_ack_irq, 540 .irq_mask = ti_sci_inta_mask_irq, 541 .irq_set_type = ti_sci_inta_set_type, 542 .irq_unmask = ti_sci_inta_unmask_irq, 543 .irq_set_affinity = ti_sci_inta_set_affinity, 544 .irq_request_resources = ti_sci_inta_request_resources, 545 .irq_release_resources = ti_sci_inta_release_resources, 546 }; 547 548 /** 549 * ti_sci_inta_irq_domain_free() - Free an IRQ from the IRQ domain 550 * @domain: Domain to which the irqs belong 551 * @virq: base linux virtual IRQ to be freed. 552 * @nr_irqs: Number of continuous irqs to be freed 553 */ 554 static void ti_sci_inta_irq_domain_free(struct irq_domain *domain, 555 unsigned int virq, unsigned int nr_irqs) 556 { 557 struct irq_data *data = irq_domain_get_irq_data(domain, virq); 558 559 irq_domain_reset_irq_data(data); 560 } 561 562 /** 563 * ti_sci_inta_irq_domain_alloc() - Allocate Interrupt aggregator IRQs 564 * @domain: Point to the interrupt aggregator IRQ domain 565 * @virq: Corresponding Linux virtual IRQ number 566 * @nr_irqs: Continuous irqs to be allocated 567 * @data: Pointer to firmware specifier 568 * 569 * No actual allocation happens here. 570 * 571 * Return 0 if all went well else appropriate error value. 572 */ 573 static int ti_sci_inta_irq_domain_alloc(struct irq_domain *domain, 574 unsigned int virq, unsigned int nr_irqs, 575 void *data) 576 { 577 msi_alloc_info_t *arg = data; 578 579 irq_domain_set_info(domain, virq, arg->hwirq, &ti_sci_inta_irq_chip, 580 NULL, handle_edge_irq, NULL, NULL); 581 582 return 0; 583 } 584 585 static const struct irq_domain_ops ti_sci_inta_irq_domain_ops = { 586 .free = ti_sci_inta_irq_domain_free, 587 .alloc = ti_sci_inta_irq_domain_alloc, 588 }; 589 590 static struct irq_chip ti_sci_inta_msi_irq_chip = { 591 .name = "MSI-INTA", 592 .flags = IRQCHIP_SUPPORTS_LEVEL_MSI, 593 }; 594 595 static void ti_sci_inta_msi_set_desc(msi_alloc_info_t *arg, 596 struct msi_desc *desc) 597 { 598 struct platform_device *pdev = to_platform_device(desc->dev); 599 600 arg->desc = desc; 601 arg->hwirq = TO_HWIRQ(pdev->id, desc->inta.dev_index); 602 } 603 604 static struct msi_domain_ops ti_sci_inta_msi_ops = { 605 .set_desc = ti_sci_inta_msi_set_desc, 606 }; 607 608 static struct msi_domain_info ti_sci_inta_msi_domain_info = { 609 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | 610 MSI_FLAG_LEVEL_CAPABLE), 611 .ops = &ti_sci_inta_msi_ops, 612 .chip = &ti_sci_inta_msi_irq_chip, 613 }; 614 615 static int ti_sci_inta_get_unmapped_sources(struct ti_sci_inta_irq_domain *inta) 616 { 617 struct device *dev = &inta->pdev->dev; 618 struct device_node *node = dev_of_node(dev); 619 struct of_phandle_iterator it; 620 int count, err, ret, i; 621 622 count = of_count_phandle_with_args(node, "ti,unmapped-event-sources", NULL); 623 if (count <= 0) 624 return 0; 625 626 inta->unmapped_dev_ids = devm_kcalloc(dev, count, 627 sizeof(*inta->unmapped_dev_ids), 628 GFP_KERNEL); 629 if (!inta->unmapped_dev_ids) 630 return -ENOMEM; 631 632 i = 0; 633 of_for_each_phandle(&it, err, node, "ti,unmapped-event-sources", NULL, 0) { 634 u32 dev_id; 635 636 ret = of_property_read_u32(it.node, "ti,sci-dev-id", &dev_id); 637 if (ret) { 638 dev_err(dev, "ti,sci-dev-id read failure for %pOFf\n", it.node); 639 of_node_put(it.node); 640 return ret; 641 } 642 inta->unmapped_dev_ids[i++] = dev_id; 643 } 644 645 inta->unmapped_cnt = count; 646 647 return 0; 648 } 649 650 static int ti_sci_inta_irq_domain_probe(struct platform_device *pdev) 651 { 652 struct irq_domain *parent_domain, *domain, *msi_domain; 653 struct device_node *parent_node, *node; 654 struct ti_sci_inta_irq_domain *inta; 655 struct device *dev = &pdev->dev; 656 struct resource *res; 657 int ret; 658 659 node = dev_of_node(dev); 660 parent_node = of_irq_find_parent(node); 661 if (!parent_node) { 662 dev_err(dev, "Failed to get IRQ parent node\n"); 663 return -ENODEV; 664 } 665 666 parent_domain = irq_find_host(parent_node); 667 if (!parent_domain) 668 return -EPROBE_DEFER; 669 670 inta = devm_kzalloc(dev, sizeof(*inta), GFP_KERNEL); 671 if (!inta) 672 return -ENOMEM; 673 674 inta->pdev = pdev; 675 inta->sci = devm_ti_sci_get_by_phandle(dev, "ti,sci"); 676 if (IS_ERR(inta->sci)) 677 return dev_err_probe(dev, PTR_ERR(inta->sci), 678 "ti,sci read fail\n"); 679 680 ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id", &inta->ti_sci_id); 681 if (ret) { 682 dev_err(dev, "missing 'ti,sci-dev-id' property\n"); 683 return -EINVAL; 684 } 685 686 inta->vint = devm_ti_sci_get_resource(inta->sci, dev, inta->ti_sci_id, 687 TI_SCI_RESASG_SUBTYPE_IA_VINT); 688 if (IS_ERR(inta->vint)) { 689 dev_err(dev, "VINT resource allocation failed\n"); 690 return PTR_ERR(inta->vint); 691 } 692 693 inta->global_event = devm_ti_sci_get_resource(inta->sci, dev, inta->ti_sci_id, 694 TI_SCI_RESASG_SUBTYPE_GLOBAL_EVENT_SEVT); 695 if (IS_ERR(inta->global_event)) { 696 dev_err(dev, "Global event resource allocation failed\n"); 697 return PTR_ERR(inta->global_event); 698 } 699 700 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 701 inta->base = devm_ioremap_resource(dev, res); 702 if (IS_ERR(inta->base)) 703 return PTR_ERR(inta->base); 704 705 ret = ti_sci_inta_get_unmapped_sources(inta); 706 if (ret) 707 return ret; 708 709 domain = irq_domain_add_linear(dev_of_node(dev), 710 ti_sci_get_num_resources(inta->vint), 711 &ti_sci_inta_irq_domain_ops, inta); 712 if (!domain) { 713 dev_err(dev, "Failed to allocate IRQ domain\n"); 714 return -ENOMEM; 715 } 716 717 msi_domain = ti_sci_inta_msi_create_irq_domain(of_node_to_fwnode(node), 718 &ti_sci_inta_msi_domain_info, 719 domain); 720 if (!msi_domain) { 721 irq_domain_remove(domain); 722 dev_err(dev, "Failed to allocate msi domain\n"); 723 return -ENOMEM; 724 } 725 726 INIT_LIST_HEAD(&inta->vint_list); 727 mutex_init(&inta->vint_mutex); 728 729 dev_info(dev, "Interrupt Aggregator domain %d created\n", inta->ti_sci_id); 730 731 return 0; 732 } 733 734 static const struct of_device_id ti_sci_inta_irq_domain_of_match[] = { 735 { .compatible = "ti,sci-inta", }, 736 { /* sentinel */ }, 737 }; 738 MODULE_DEVICE_TABLE(of, ti_sci_inta_irq_domain_of_match); 739 740 static struct platform_driver ti_sci_inta_irq_domain_driver = { 741 .probe = ti_sci_inta_irq_domain_probe, 742 .driver = { 743 .name = "ti-sci-inta", 744 .of_match_table = ti_sci_inta_irq_domain_of_match, 745 }, 746 }; 747 module_platform_driver(ti_sci_inta_irq_domain_driver); 748 749 MODULE_AUTHOR("Lokesh Vutla <lokeshvutla@ti.com>"); 750 MODULE_DESCRIPTION("K3 Interrupt Aggregator driver over TI SCI protocol"); 751 MODULE_LICENSE("GPL v2"); 752