1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Loongson Extend I/O Interrupt Controller support 4 * 5 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited 6 */ 7 8 #define pr_fmt(fmt) "eiointc: " fmt 9 10 #include <linux/cpuhotplug.h> 11 #include <linux/interrupt.h> 12 #include <linux/irq.h> 13 #include <linux/irqchip.h> 14 #include <linux/irqdomain.h> 15 #include <linux/irqchip/chained_irq.h> 16 #include <linux/kernel.h> 17 #include <linux/syscore_ops.h> 18 19 #define EIOINTC_REG_NODEMAP 0x14a0 20 #define EIOINTC_REG_IPMAP 0x14c0 21 #define EIOINTC_REG_ENABLE 0x1600 22 #define EIOINTC_REG_BOUNCE 0x1680 23 #define EIOINTC_REG_ISR 0x1800 24 #define EIOINTC_REG_ROUTE 0x1c00 25 26 #define VEC_REG_COUNT 4 27 #define VEC_COUNT_PER_REG 64 28 #define VEC_COUNT (VEC_REG_COUNT * VEC_COUNT_PER_REG) 29 #define VEC_REG_IDX(irq_id) ((irq_id) / VEC_COUNT_PER_REG) 30 #define VEC_REG_BIT(irq_id) ((irq_id) % VEC_COUNT_PER_REG) 31 #define EIOINTC_ALL_ENABLE 0xffffffff 32 33 #define MAX_EIO_NODES (NR_CPUS / CORES_PER_EIO_NODE) 34 35 static int nr_pics; 36 37 struct eiointc_priv { 38 u32 node; 39 u32 vec_count; 40 nodemask_t node_map; 41 cpumask_t cpuspan_map; 42 struct fwnode_handle *domain_handle; 43 struct irq_domain *eiointc_domain; 44 }; 45 46 static struct eiointc_priv *eiointc_priv[MAX_IO_PICS]; 47 48 static void eiointc_enable(void) 49 { 50 uint64_t misc; 51 52 misc = iocsr_read64(LOONGARCH_IOCSR_MISC_FUNC); 53 misc |= IOCSR_MISC_FUNC_EXT_IOI_EN; 54 iocsr_write64(misc, LOONGARCH_IOCSR_MISC_FUNC); 55 } 56 57 static int cpu_to_eio_node(int cpu) 58 { 59 return cpu_logical_map(cpu) / CORES_PER_EIO_NODE; 60 } 61 62 static void eiointc_set_irq_route(int pos, unsigned int cpu, unsigned int mnode, nodemask_t *node_map) 63 { 64 int i, node, cpu_node, route_node; 65 unsigned char coremap; 66 uint32_t pos_off, data, data_byte, data_mask; 67 68 pos_off = pos & ~3; 69 data_byte = pos & 3; 70 data_mask = ~BIT_MASK(data_byte) & 0xf; 71 72 /* Calculate node and coremap of target irq */ 73 cpu_node = cpu_logical_map(cpu) / CORES_PER_EIO_NODE; 74 coremap = BIT(cpu_logical_map(cpu) % CORES_PER_EIO_NODE); 75 76 for_each_online_cpu(i) { 77 node = cpu_to_eio_node(i); 78 if (!node_isset(node, *node_map)) 79 continue; 80 81 /* EIO node 0 is in charge of inter-node interrupt dispatch */ 82 route_node = (node == mnode) ? cpu_node : node; 83 data = ((coremap | (route_node << 4)) << (data_byte * 8)); 84 csr_any_send(EIOINTC_REG_ROUTE + pos_off, data, data_mask, node * CORES_PER_EIO_NODE); 85 } 86 } 87 88 static DEFINE_RAW_SPINLOCK(affinity_lock); 89 90 static int eiointc_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity, bool force) 91 { 92 unsigned int cpu; 93 unsigned long flags; 94 uint32_t vector, regaddr; 95 struct cpumask intersect_affinity; 96 struct eiointc_priv *priv = d->domain->host_data; 97 98 raw_spin_lock_irqsave(&affinity_lock, flags); 99 100 cpumask_and(&intersect_affinity, affinity, cpu_online_mask); 101 cpumask_and(&intersect_affinity, &intersect_affinity, &priv->cpuspan_map); 102 103 if (cpumask_empty(&intersect_affinity)) { 104 raw_spin_unlock_irqrestore(&affinity_lock, flags); 105 return -EINVAL; 106 } 107 cpu = cpumask_first(&intersect_affinity); 108 109 vector = d->hwirq; 110 regaddr = EIOINTC_REG_ENABLE + ((vector >> 5) << 2); 111 112 /* Mask target vector */ 113 csr_any_send(regaddr, EIOINTC_ALL_ENABLE & (~BIT(vector & 0x1F)), 114 0x0, priv->node * CORES_PER_EIO_NODE); 115 116 /* Set route for target vector */ 117 eiointc_set_irq_route(vector, cpu, priv->node, &priv->node_map); 118 119 /* Unmask target vector */ 120 csr_any_send(regaddr, EIOINTC_ALL_ENABLE, 121 0x0, priv->node * CORES_PER_EIO_NODE); 122 123 irq_data_update_effective_affinity(d, cpumask_of(cpu)); 124 125 raw_spin_unlock_irqrestore(&affinity_lock, flags); 126 127 return IRQ_SET_MASK_OK; 128 } 129 130 static int eiointc_index(int node) 131 { 132 int i; 133 134 for (i = 0; i < nr_pics; i++) { 135 if (node_isset(node, eiointc_priv[i]->node_map)) 136 return i; 137 } 138 139 return -1; 140 } 141 142 static int eiointc_router_init(unsigned int cpu) 143 { 144 int i, bit; 145 uint32_t data; 146 uint32_t node = cpu_to_eio_node(cpu); 147 uint32_t index = eiointc_index(node); 148 149 if (index < 0) { 150 pr_err("Error: invalid nodemap!\n"); 151 return -1; 152 } 153 154 if ((cpu_logical_map(cpu) % CORES_PER_EIO_NODE) == 0) { 155 eiointc_enable(); 156 157 for (i = 0; i < eiointc_priv[0]->vec_count / 32; i++) { 158 data = (((1 << (i * 2 + 1)) << 16) | (1 << (i * 2))); 159 iocsr_write32(data, EIOINTC_REG_NODEMAP + i * 4); 160 } 161 162 for (i = 0; i < eiointc_priv[0]->vec_count / 32 / 4; i++) { 163 bit = BIT(1 + index); /* Route to IP[1 + index] */ 164 data = bit | (bit << 8) | (bit << 16) | (bit << 24); 165 iocsr_write32(data, EIOINTC_REG_IPMAP + i * 4); 166 } 167 168 for (i = 0; i < eiointc_priv[0]->vec_count / 4; i++) { 169 /* Route to Node-0 Core-0 */ 170 if (index == 0) 171 bit = BIT(cpu_logical_map(0)); 172 else 173 bit = (eiointc_priv[index]->node << 4) | 1; 174 175 data = bit | (bit << 8) | (bit << 16) | (bit << 24); 176 iocsr_write32(data, EIOINTC_REG_ROUTE + i * 4); 177 } 178 179 for (i = 0; i < eiointc_priv[0]->vec_count / 32; i++) { 180 data = 0xffffffff; 181 iocsr_write32(data, EIOINTC_REG_ENABLE + i * 4); 182 iocsr_write32(data, EIOINTC_REG_BOUNCE + i * 4); 183 } 184 } 185 186 return 0; 187 } 188 189 static void eiointc_irq_dispatch(struct irq_desc *desc) 190 { 191 int i; 192 u64 pending; 193 bool handled = false; 194 struct irq_chip *chip = irq_desc_get_chip(desc); 195 struct eiointc_priv *priv = irq_desc_get_handler_data(desc); 196 197 chained_irq_enter(chip, desc); 198 199 for (i = 0; i < eiointc_priv[0]->vec_count / VEC_COUNT_PER_REG; i++) { 200 pending = iocsr_read64(EIOINTC_REG_ISR + (i << 3)); 201 iocsr_write64(pending, EIOINTC_REG_ISR + (i << 3)); 202 while (pending) { 203 int bit = __ffs(pending); 204 int irq = bit + VEC_COUNT_PER_REG * i; 205 206 generic_handle_domain_irq(priv->eiointc_domain, irq); 207 pending &= ~BIT(bit); 208 handled = true; 209 } 210 } 211 212 if (!handled) 213 spurious_interrupt(); 214 215 chained_irq_exit(chip, desc); 216 } 217 218 static void eiointc_ack_irq(struct irq_data *d) 219 { 220 } 221 222 static void eiointc_mask_irq(struct irq_data *d) 223 { 224 } 225 226 static void eiointc_unmask_irq(struct irq_data *d) 227 { 228 } 229 230 static struct irq_chip eiointc_irq_chip = { 231 .name = "EIOINTC", 232 .irq_ack = eiointc_ack_irq, 233 .irq_mask = eiointc_mask_irq, 234 .irq_unmask = eiointc_unmask_irq, 235 .irq_set_affinity = eiointc_set_irq_affinity, 236 }; 237 238 static int eiointc_domain_alloc(struct irq_domain *domain, unsigned int virq, 239 unsigned int nr_irqs, void *arg) 240 { 241 int ret; 242 unsigned int i, type; 243 unsigned long hwirq = 0; 244 struct eiointc *priv = domain->host_data; 245 246 ret = irq_domain_translate_onecell(domain, arg, &hwirq, &type); 247 if (ret) 248 return ret; 249 250 for (i = 0; i < nr_irqs; i++) { 251 irq_domain_set_info(domain, virq + i, hwirq + i, &eiointc_irq_chip, 252 priv, handle_edge_irq, NULL, NULL); 253 } 254 255 return 0; 256 } 257 258 static void eiointc_domain_free(struct irq_domain *domain, unsigned int virq, 259 unsigned int nr_irqs) 260 { 261 int i; 262 263 for (i = 0; i < nr_irqs; i++) { 264 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i); 265 266 irq_set_handler(virq + i, NULL); 267 irq_domain_reset_irq_data(d); 268 } 269 } 270 271 static const struct irq_domain_ops eiointc_domain_ops = { 272 .translate = irq_domain_translate_onecell, 273 .alloc = eiointc_domain_alloc, 274 .free = eiointc_domain_free, 275 }; 276 277 static void acpi_set_vec_parent(int node, struct irq_domain *parent, struct acpi_vector_group *vec_group) 278 { 279 int i; 280 281 for (i = 0; i < MAX_IO_PICS; i++) { 282 if (node == vec_group[i].node) { 283 vec_group[i].parent = parent; 284 return; 285 } 286 } 287 } 288 289 static struct irq_domain *acpi_get_vec_parent(int node, struct acpi_vector_group *vec_group) 290 { 291 int i; 292 293 for (i = 0; i < MAX_IO_PICS; i++) { 294 if (node == vec_group[i].node) 295 return vec_group[i].parent; 296 } 297 return NULL; 298 } 299 300 static int eiointc_suspend(void) 301 { 302 return 0; 303 } 304 305 static void eiointc_resume(void) 306 { 307 int i, j; 308 struct irq_desc *desc; 309 struct irq_data *irq_data; 310 311 eiointc_router_init(0); 312 313 for (i = 0; i < nr_pics; i++) { 314 for (j = 0; j < eiointc_priv[0]->vec_count; j++) { 315 desc = irq_resolve_mapping(eiointc_priv[i]->eiointc_domain, j); 316 if (desc && desc->handle_irq && desc->handle_irq != handle_bad_irq) { 317 raw_spin_lock(&desc->lock); 318 irq_data = irq_domain_get_irq_data(eiointc_priv[i]->eiointc_domain, irq_desc_get_irq(desc)); 319 eiointc_set_irq_affinity(irq_data, irq_data->common->affinity, 0); 320 raw_spin_unlock(&desc->lock); 321 } 322 } 323 } 324 } 325 326 static struct syscore_ops eiointc_syscore_ops = { 327 .suspend = eiointc_suspend, 328 .resume = eiointc_resume, 329 }; 330 331 static int __init pch_pic_parse_madt(union acpi_subtable_headers *header, 332 const unsigned long end) 333 { 334 struct acpi_madt_bio_pic *pchpic_entry = (struct acpi_madt_bio_pic *)header; 335 unsigned int node = (pchpic_entry->address >> 44) & 0xf; 336 struct irq_domain *parent = acpi_get_vec_parent(node, pch_group); 337 338 if (parent) 339 return pch_pic_acpi_init(parent, pchpic_entry); 340 341 return 0; 342 } 343 344 static int __init pch_msi_parse_madt(union acpi_subtable_headers *header, 345 const unsigned long end) 346 { 347 struct irq_domain *parent; 348 struct acpi_madt_msi_pic *pchmsi_entry = (struct acpi_madt_msi_pic *)header; 349 int node; 350 351 if (cpu_has_flatmode) 352 node = cpu_to_node(eiointc_priv[nr_pics - 1]->node * CORES_PER_EIO_NODE); 353 else 354 node = eiointc_priv[nr_pics - 1]->node; 355 356 parent = acpi_get_vec_parent(node, msi_group); 357 358 if (parent) 359 return pch_msi_acpi_init(parent, pchmsi_entry); 360 361 return 0; 362 } 363 364 static int __init acpi_cascade_irqdomain_init(void) 365 { 366 int r; 367 368 r = acpi_table_parse_madt(ACPI_MADT_TYPE_BIO_PIC, pch_pic_parse_madt, 0); 369 if (r < 0) 370 return r; 371 372 r = acpi_table_parse_madt(ACPI_MADT_TYPE_MSI_PIC, pch_msi_parse_madt, 1); 373 if (r < 0) 374 return r; 375 376 return 0; 377 } 378 379 static int __init eiointc_init(struct eiointc_priv *priv, int parent_irq, 380 u64 node_map) 381 { 382 int i; 383 384 node_map = node_map ? node_map : -1ULL; 385 for_each_possible_cpu(i) { 386 if (node_map & (1ULL << (cpu_to_eio_node(i)))) { 387 node_set(cpu_to_eio_node(i), priv->node_map); 388 cpumask_or(&priv->cpuspan_map, &priv->cpuspan_map, 389 cpumask_of(i)); 390 } 391 } 392 393 priv->eiointc_domain = irq_domain_create_linear(priv->domain_handle, 394 priv->vec_count, 395 &eiointc_domain_ops, 396 priv); 397 if (!priv->eiointc_domain) { 398 pr_err("loongson-extioi: cannot add IRQ domain\n"); 399 return -ENOMEM; 400 } 401 402 eiointc_priv[nr_pics++] = priv; 403 eiointc_router_init(0); 404 irq_set_chained_handler_and_data(parent_irq, eiointc_irq_dispatch, priv); 405 406 if (nr_pics == 1) { 407 register_syscore_ops(&eiointc_syscore_ops); 408 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_LOONGARCH_STARTING, 409 "irqchip/loongarch/intc:starting", 410 eiointc_router_init, NULL); 411 } 412 413 return 0; 414 } 415 416 int __init eiointc_acpi_init(struct irq_domain *parent, 417 struct acpi_madt_eio_pic *acpi_eiointc) 418 { 419 int parent_irq, ret; 420 struct eiointc_priv *priv; 421 int node; 422 423 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 424 if (!priv) 425 return -ENOMEM; 426 427 priv->domain_handle = irq_domain_alloc_named_id_fwnode("EIOPIC", 428 acpi_eiointc->node); 429 if (!priv->domain_handle) { 430 pr_err("Unable to allocate domain handle\n"); 431 goto out_free_priv; 432 } 433 434 priv->vec_count = VEC_COUNT; 435 priv->node = acpi_eiointc->node; 436 437 parent_irq = irq_create_mapping(parent, acpi_eiointc->cascade); 438 439 ret = eiointc_init(priv, parent_irq, acpi_eiointc->node_map); 440 if (ret < 0) 441 goto out_free_handle; 442 443 if (cpu_has_flatmode) 444 node = cpu_to_node(acpi_eiointc->node * CORES_PER_EIO_NODE); 445 else 446 node = acpi_eiointc->node; 447 acpi_set_vec_parent(node, priv->eiointc_domain, pch_group); 448 acpi_set_vec_parent(node, priv->eiointc_domain, msi_group); 449 450 ret = acpi_cascade_irqdomain_init(); 451 if (ret < 0) 452 goto out_free_handle; 453 454 return ret; 455 456 out_free_handle: 457 irq_domain_free_fwnode(priv->domain_handle); 458 priv->domain_handle = NULL; 459 out_free_priv: 460 kfree(priv); 461 462 return -ENOMEM; 463 } 464 465 static int __init eiointc_of_init(struct device_node *of_node, 466 struct device_node *parent) 467 { 468 int parent_irq, ret; 469 struct eiointc_priv *priv; 470 471 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 472 if (!priv) 473 return -ENOMEM; 474 475 parent_irq = irq_of_parse_and_map(of_node, 0); 476 if (parent_irq <= 0) { 477 ret = -ENODEV; 478 goto out_free_priv; 479 } 480 481 ret = irq_set_handler_data(parent_irq, priv); 482 if (ret < 0) 483 goto out_free_priv; 484 485 /* 486 * In particular, the number of devices supported by the LS2K0500 487 * extended I/O interrupt vector is 128. 488 */ 489 if (of_device_is_compatible(of_node, "loongson,ls2k0500-eiointc")) 490 priv->vec_count = 128; 491 else 492 priv->vec_count = VEC_COUNT; 493 494 priv->node = 0; 495 priv->domain_handle = of_node_to_fwnode(of_node); 496 497 ret = eiointc_init(priv, parent_irq, 0); 498 if (ret < 0) 499 goto out_free_priv; 500 501 return 0; 502 503 out_free_priv: 504 kfree(priv); 505 return ret; 506 } 507 508 IRQCHIP_DECLARE(loongson_ls2k0500_eiointc, "loongson,ls2k0500-eiointc", eiointc_of_init); 509 IRQCHIP_DECLARE(loongson_ls2k2000_eiointc, "loongson,ls2k2000-eiointc", eiointc_of_init); 510