1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Loongson Extend I/O Interrupt Controller support 4 * 5 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited 6 */ 7 8 #define pr_fmt(fmt) "eiointc: " fmt 9 10 #include <linux/cpuhotplug.h> 11 #include <linux/interrupt.h> 12 #include <linux/irq.h> 13 #include <linux/irqchip.h> 14 #include <linux/irqdomain.h> 15 #include <linux/irqchip/chained_irq.h> 16 #include <linux/kernel.h> 17 #include <linux/syscore_ops.h> 18 #include <asm/numa.h> 19 20 #define EIOINTC_REG_NODEMAP 0x14a0 21 #define EIOINTC_REG_IPMAP 0x14c0 22 #define EIOINTC_REG_ENABLE 0x1600 23 #define EIOINTC_REG_BOUNCE 0x1680 24 #define EIOINTC_REG_ISR 0x1800 25 #define EIOINTC_REG_ROUTE 0x1c00 26 27 #define VEC_REG_COUNT 4 28 #define VEC_COUNT_PER_REG 64 29 #define VEC_COUNT (VEC_REG_COUNT * VEC_COUNT_PER_REG) 30 #define VEC_REG_IDX(irq_id) ((irq_id) / VEC_COUNT_PER_REG) 31 #define VEC_REG_BIT(irq_id) ((irq_id) % VEC_COUNT_PER_REG) 32 #define EIOINTC_ALL_ENABLE 0xffffffff 33 34 #define MAX_EIO_NODES (NR_CPUS / CORES_PER_EIO_NODE) 35 36 static int nr_pics; 37 38 struct eiointc_priv { 39 u32 node; 40 u32 vec_count; 41 nodemask_t node_map; 42 cpumask_t cpuspan_map; 43 struct fwnode_handle *domain_handle; 44 struct irq_domain *eiointc_domain; 45 }; 46 47 static struct eiointc_priv *eiointc_priv[MAX_IO_PICS]; 48 49 static void eiointc_enable(void) 50 { 51 uint64_t misc; 52 53 misc = iocsr_read64(LOONGARCH_IOCSR_MISC_FUNC); 54 misc |= IOCSR_MISC_FUNC_EXT_IOI_EN; 55 iocsr_write64(misc, LOONGARCH_IOCSR_MISC_FUNC); 56 } 57 58 static int cpu_to_eio_node(int cpu) 59 { 60 return cpu_logical_map(cpu) / CORES_PER_EIO_NODE; 61 } 62 63 static void eiointc_set_irq_route(int pos, unsigned int cpu, unsigned int mnode, nodemask_t *node_map) 64 { 65 int i, node, cpu_node, route_node; 66 unsigned char coremap; 67 uint32_t pos_off, data, data_byte, data_mask; 68 69 pos_off = pos & ~3; 70 data_byte = pos & 3; 71 data_mask = ~BIT_MASK(data_byte) & 0xf; 72 73 /* Calculate node and coremap of target irq */ 74 cpu_node = cpu_logical_map(cpu) / CORES_PER_EIO_NODE; 75 coremap = BIT(cpu_logical_map(cpu) % CORES_PER_EIO_NODE); 76 77 for_each_online_cpu(i) { 78 node = cpu_to_eio_node(i); 79 if (!node_isset(node, *node_map)) 80 continue; 81 82 /* EIO node 0 is in charge of inter-node interrupt dispatch */ 83 route_node = (node == mnode) ? cpu_node : node; 84 data = ((coremap | (route_node << 4)) << (data_byte * 8)); 85 csr_any_send(EIOINTC_REG_ROUTE + pos_off, data, data_mask, node * CORES_PER_EIO_NODE); 86 } 87 } 88 89 static DEFINE_RAW_SPINLOCK(affinity_lock); 90 91 static int eiointc_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity, bool force) 92 { 93 unsigned int cpu; 94 unsigned long flags; 95 uint32_t vector, regaddr; 96 struct cpumask intersect_affinity; 97 struct eiointc_priv *priv = d->domain->host_data; 98 99 raw_spin_lock_irqsave(&affinity_lock, flags); 100 101 cpumask_and(&intersect_affinity, affinity, cpu_online_mask); 102 cpumask_and(&intersect_affinity, &intersect_affinity, &priv->cpuspan_map); 103 104 if (cpumask_empty(&intersect_affinity)) { 105 raw_spin_unlock_irqrestore(&affinity_lock, flags); 106 return -EINVAL; 107 } 108 cpu = cpumask_first(&intersect_affinity); 109 110 vector = d->hwirq; 111 regaddr = EIOINTC_REG_ENABLE + ((vector >> 5) << 2); 112 113 /* Mask target vector */ 114 csr_any_send(regaddr, EIOINTC_ALL_ENABLE & (~BIT(vector & 0x1F)), 115 0x0, priv->node * CORES_PER_EIO_NODE); 116 117 /* Set route for target vector */ 118 eiointc_set_irq_route(vector, cpu, priv->node, &priv->node_map); 119 120 /* Unmask target vector */ 121 csr_any_send(regaddr, EIOINTC_ALL_ENABLE, 122 0x0, priv->node * CORES_PER_EIO_NODE); 123 124 irq_data_update_effective_affinity(d, cpumask_of(cpu)); 125 126 raw_spin_unlock_irqrestore(&affinity_lock, flags); 127 128 return IRQ_SET_MASK_OK; 129 } 130 131 static int eiointc_index(int node) 132 { 133 int i; 134 135 for (i = 0; i < nr_pics; i++) { 136 if (node_isset(node, eiointc_priv[i]->node_map)) 137 return i; 138 } 139 140 return -1; 141 } 142 143 static int eiointc_router_init(unsigned int cpu) 144 { 145 int i, bit; 146 uint32_t data; 147 uint32_t node = cpu_to_eio_node(cpu); 148 int index = eiointc_index(node); 149 150 if (index < 0) { 151 pr_err("Error: invalid nodemap!\n"); 152 return -1; 153 } 154 155 if ((cpu_logical_map(cpu) % CORES_PER_EIO_NODE) == 0) { 156 eiointc_enable(); 157 158 for (i = 0; i < eiointc_priv[0]->vec_count / 32; i++) { 159 data = (((1 << (i * 2 + 1)) << 16) | (1 << (i * 2))); 160 iocsr_write32(data, EIOINTC_REG_NODEMAP + i * 4); 161 } 162 163 for (i = 0; i < eiointc_priv[0]->vec_count / 32 / 4; i++) { 164 bit = BIT(1 + index); /* Route to IP[1 + index] */ 165 data = bit | (bit << 8) | (bit << 16) | (bit << 24); 166 iocsr_write32(data, EIOINTC_REG_IPMAP + i * 4); 167 } 168 169 for (i = 0; i < eiointc_priv[0]->vec_count / 4; i++) { 170 /* Route to Node-0 Core-0 */ 171 if (index == 0) 172 bit = BIT(cpu_logical_map(0)); 173 else 174 bit = (eiointc_priv[index]->node << 4) | 1; 175 176 data = bit | (bit << 8) | (bit << 16) | (bit << 24); 177 iocsr_write32(data, EIOINTC_REG_ROUTE + i * 4); 178 } 179 180 for (i = 0; i < eiointc_priv[0]->vec_count / 32; i++) { 181 data = 0xffffffff; 182 iocsr_write32(data, EIOINTC_REG_ENABLE + i * 4); 183 iocsr_write32(data, EIOINTC_REG_BOUNCE + i * 4); 184 } 185 } 186 187 return 0; 188 } 189 190 static void eiointc_irq_dispatch(struct irq_desc *desc) 191 { 192 int i; 193 u64 pending; 194 bool handled = false; 195 struct irq_chip *chip = irq_desc_get_chip(desc); 196 struct eiointc_priv *priv = irq_desc_get_handler_data(desc); 197 198 chained_irq_enter(chip, desc); 199 200 for (i = 0; i < eiointc_priv[0]->vec_count / VEC_COUNT_PER_REG; i++) { 201 pending = iocsr_read64(EIOINTC_REG_ISR + (i << 3)); 202 iocsr_write64(pending, EIOINTC_REG_ISR + (i << 3)); 203 while (pending) { 204 int bit = __ffs(pending); 205 int irq = bit + VEC_COUNT_PER_REG * i; 206 207 generic_handle_domain_irq(priv->eiointc_domain, irq); 208 pending &= ~BIT(bit); 209 handled = true; 210 } 211 } 212 213 if (!handled) 214 spurious_interrupt(); 215 216 chained_irq_exit(chip, desc); 217 } 218 219 static void eiointc_ack_irq(struct irq_data *d) 220 { 221 } 222 223 static void eiointc_mask_irq(struct irq_data *d) 224 { 225 } 226 227 static void eiointc_unmask_irq(struct irq_data *d) 228 { 229 } 230 231 static struct irq_chip eiointc_irq_chip = { 232 .name = "EIOINTC", 233 .irq_ack = eiointc_ack_irq, 234 .irq_mask = eiointc_mask_irq, 235 .irq_unmask = eiointc_unmask_irq, 236 .irq_set_affinity = eiointc_set_irq_affinity, 237 }; 238 239 static int eiointc_domain_alloc(struct irq_domain *domain, unsigned int virq, 240 unsigned int nr_irqs, void *arg) 241 { 242 int ret; 243 unsigned int i, type; 244 unsigned long hwirq = 0; 245 struct eiointc_priv *priv = domain->host_data; 246 247 ret = irq_domain_translate_onecell(domain, arg, &hwirq, &type); 248 if (ret) 249 return ret; 250 251 for (i = 0; i < nr_irqs; i++) { 252 irq_domain_set_info(domain, virq + i, hwirq + i, &eiointc_irq_chip, 253 priv, handle_edge_irq, NULL, NULL); 254 } 255 256 return 0; 257 } 258 259 static void eiointc_domain_free(struct irq_domain *domain, unsigned int virq, 260 unsigned int nr_irqs) 261 { 262 int i; 263 264 for (i = 0; i < nr_irqs; i++) { 265 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i); 266 267 irq_set_handler(virq + i, NULL); 268 irq_domain_reset_irq_data(d); 269 } 270 } 271 272 static const struct irq_domain_ops eiointc_domain_ops = { 273 .translate = irq_domain_translate_onecell, 274 .alloc = eiointc_domain_alloc, 275 .free = eiointc_domain_free, 276 }; 277 278 static void acpi_set_vec_parent(int node, struct irq_domain *parent, struct acpi_vector_group *vec_group) 279 { 280 int i; 281 282 for (i = 0; i < MAX_IO_PICS; i++) { 283 if (node == vec_group[i].node) { 284 vec_group[i].parent = parent; 285 return; 286 } 287 } 288 } 289 290 static struct irq_domain *acpi_get_vec_parent(int node, struct acpi_vector_group *vec_group) 291 { 292 int i; 293 294 for (i = 0; i < MAX_IO_PICS; i++) { 295 if (node == vec_group[i].node) 296 return vec_group[i].parent; 297 } 298 return NULL; 299 } 300 301 static int eiointc_suspend(void) 302 { 303 return 0; 304 } 305 306 static void eiointc_resume(void) 307 { 308 int i, j; 309 struct irq_desc *desc; 310 struct irq_data *irq_data; 311 312 eiointc_router_init(0); 313 314 for (i = 0; i < nr_pics; i++) { 315 for (j = 0; j < eiointc_priv[0]->vec_count; j++) { 316 desc = irq_resolve_mapping(eiointc_priv[i]->eiointc_domain, j); 317 if (desc && desc->handle_irq && desc->handle_irq != handle_bad_irq) { 318 raw_spin_lock(&desc->lock); 319 irq_data = irq_domain_get_irq_data(eiointc_priv[i]->eiointc_domain, irq_desc_get_irq(desc)); 320 eiointc_set_irq_affinity(irq_data, irq_data->common->affinity, 0); 321 raw_spin_unlock(&desc->lock); 322 } 323 } 324 } 325 } 326 327 static struct syscore_ops eiointc_syscore_ops = { 328 .suspend = eiointc_suspend, 329 .resume = eiointc_resume, 330 }; 331 332 static int __init pch_pic_parse_madt(union acpi_subtable_headers *header, 333 const unsigned long end) 334 { 335 struct acpi_madt_bio_pic *pchpic_entry = (struct acpi_madt_bio_pic *)header; 336 unsigned int node = (pchpic_entry->address >> 44) & 0xf; 337 struct irq_domain *parent = acpi_get_vec_parent(node, pch_group); 338 339 if (parent) 340 return pch_pic_acpi_init(parent, pchpic_entry); 341 342 return 0; 343 } 344 345 static int __init pch_msi_parse_madt(union acpi_subtable_headers *header, 346 const unsigned long end) 347 { 348 struct irq_domain *parent; 349 struct acpi_madt_msi_pic *pchmsi_entry = (struct acpi_madt_msi_pic *)header; 350 int node; 351 352 if (cpu_has_flatmode) 353 node = early_cpu_to_node(eiointc_priv[nr_pics - 1]->node * CORES_PER_EIO_NODE); 354 else 355 node = eiointc_priv[nr_pics - 1]->node; 356 357 parent = acpi_get_vec_parent(node, msi_group); 358 359 if (parent) 360 return pch_msi_acpi_init(parent, pchmsi_entry); 361 362 return 0; 363 } 364 365 static int __init acpi_cascade_irqdomain_init(void) 366 { 367 int r; 368 369 r = acpi_table_parse_madt(ACPI_MADT_TYPE_BIO_PIC, pch_pic_parse_madt, 0); 370 if (r < 0) 371 return r; 372 373 r = acpi_table_parse_madt(ACPI_MADT_TYPE_MSI_PIC, pch_msi_parse_madt, 1); 374 if (r < 0) 375 return r; 376 377 return 0; 378 } 379 380 static int __init eiointc_init(struct eiointc_priv *priv, int parent_irq, 381 u64 node_map) 382 { 383 int i; 384 385 node_map = node_map ? node_map : -1ULL; 386 for_each_possible_cpu(i) { 387 if (node_map & (1ULL << (cpu_to_eio_node(i)))) { 388 node_set(cpu_to_eio_node(i), priv->node_map); 389 cpumask_or(&priv->cpuspan_map, &priv->cpuspan_map, 390 cpumask_of(i)); 391 } 392 } 393 394 priv->eiointc_domain = irq_domain_create_linear(priv->domain_handle, 395 priv->vec_count, 396 &eiointc_domain_ops, 397 priv); 398 if (!priv->eiointc_domain) { 399 pr_err("loongson-extioi: cannot add IRQ domain\n"); 400 return -ENOMEM; 401 } 402 403 eiointc_priv[nr_pics++] = priv; 404 eiointc_router_init(0); 405 irq_set_chained_handler_and_data(parent_irq, eiointc_irq_dispatch, priv); 406 407 if (nr_pics == 1) { 408 register_syscore_ops(&eiointc_syscore_ops); 409 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_LOONGARCH_STARTING, 410 "irqchip/loongarch/intc:starting", 411 eiointc_router_init, NULL); 412 } 413 414 return 0; 415 } 416 417 int __init eiointc_acpi_init(struct irq_domain *parent, 418 struct acpi_madt_eio_pic *acpi_eiointc) 419 { 420 int parent_irq, ret; 421 struct eiointc_priv *priv; 422 int node; 423 424 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 425 if (!priv) 426 return -ENOMEM; 427 428 priv->domain_handle = irq_domain_alloc_named_id_fwnode("EIOPIC", 429 acpi_eiointc->node); 430 if (!priv->domain_handle) { 431 pr_err("Unable to allocate domain handle\n"); 432 goto out_free_priv; 433 } 434 435 priv->vec_count = VEC_COUNT; 436 priv->node = acpi_eiointc->node; 437 438 parent_irq = irq_create_mapping(parent, acpi_eiointc->cascade); 439 440 ret = eiointc_init(priv, parent_irq, acpi_eiointc->node_map); 441 if (ret < 0) 442 goto out_free_handle; 443 444 if (cpu_has_flatmode) 445 node = early_cpu_to_node(acpi_eiointc->node * CORES_PER_EIO_NODE); 446 else 447 node = acpi_eiointc->node; 448 acpi_set_vec_parent(node, priv->eiointc_domain, pch_group); 449 acpi_set_vec_parent(node, priv->eiointc_domain, msi_group); 450 451 ret = acpi_cascade_irqdomain_init(); 452 if (ret < 0) 453 goto out_free_handle; 454 455 return ret; 456 457 out_free_handle: 458 irq_domain_free_fwnode(priv->domain_handle); 459 priv->domain_handle = NULL; 460 out_free_priv: 461 kfree(priv); 462 463 return -ENOMEM; 464 } 465 466 static int __init eiointc_of_init(struct device_node *of_node, 467 struct device_node *parent) 468 { 469 int parent_irq, ret; 470 struct eiointc_priv *priv; 471 472 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 473 if (!priv) 474 return -ENOMEM; 475 476 parent_irq = irq_of_parse_and_map(of_node, 0); 477 if (parent_irq <= 0) { 478 ret = -ENODEV; 479 goto out_free_priv; 480 } 481 482 ret = irq_set_handler_data(parent_irq, priv); 483 if (ret < 0) 484 goto out_free_priv; 485 486 /* 487 * In particular, the number of devices supported by the LS2K0500 488 * extended I/O interrupt vector is 128. 489 */ 490 if (of_device_is_compatible(of_node, "loongson,ls2k0500-eiointc")) 491 priv->vec_count = 128; 492 else 493 priv->vec_count = VEC_COUNT; 494 495 priv->node = 0; 496 priv->domain_handle = of_node_to_fwnode(of_node); 497 498 ret = eiointc_init(priv, parent_irq, 0); 499 if (ret < 0) 500 goto out_free_priv; 501 502 return 0; 503 504 out_free_priv: 505 kfree(priv); 506 return ret; 507 } 508 509 IRQCHIP_DECLARE(loongson_ls2k0500_eiointc, "loongson,ls2k0500-eiointc", eiointc_of_init); 510 IRQCHIP_DECLARE(loongson_ls2k2000_eiointc, "loongson,ls2k2000-eiointc", eiointc_of_init); 511