1*dd281e1aSHuacai Chen // SPDX-License-Identifier: GPL-2.0 2*dd281e1aSHuacai Chen /* 3*dd281e1aSHuacai Chen * Loongson Extend I/O Interrupt Controller support 4*dd281e1aSHuacai Chen * 5*dd281e1aSHuacai Chen * Copyright (C) 2020-2022 Loongson Technology Corporation Limited 6*dd281e1aSHuacai Chen */ 7*dd281e1aSHuacai Chen 8*dd281e1aSHuacai Chen #define pr_fmt(fmt) "eiointc: " fmt 9*dd281e1aSHuacai Chen 10*dd281e1aSHuacai Chen #include <linux/interrupt.h> 11*dd281e1aSHuacai Chen #include <linux/irq.h> 12*dd281e1aSHuacai Chen #include <linux/irqchip.h> 13*dd281e1aSHuacai Chen #include <linux/irqdomain.h> 14*dd281e1aSHuacai Chen #include <linux/irqchip/chained_irq.h> 15*dd281e1aSHuacai Chen #include <linux/kernel.h> 16*dd281e1aSHuacai Chen #include <linux/platform_device.h> 17*dd281e1aSHuacai Chen #include <linux/of_address.h> 18*dd281e1aSHuacai Chen #include <linux/of_irq.h> 19*dd281e1aSHuacai Chen #include <linux/of_platform.h> 20*dd281e1aSHuacai Chen 21*dd281e1aSHuacai Chen #define EIOINTC_REG_NODEMAP 0x14a0 22*dd281e1aSHuacai Chen #define EIOINTC_REG_IPMAP 0x14c0 23*dd281e1aSHuacai Chen #define EIOINTC_REG_ENABLE 0x1600 24*dd281e1aSHuacai Chen #define EIOINTC_REG_BOUNCE 0x1680 25*dd281e1aSHuacai Chen #define EIOINTC_REG_ISR 0x1800 26*dd281e1aSHuacai Chen #define EIOINTC_REG_ROUTE 0x1c00 27*dd281e1aSHuacai Chen 28*dd281e1aSHuacai Chen #define VEC_REG_COUNT 4 29*dd281e1aSHuacai Chen #define VEC_COUNT_PER_REG 64 30*dd281e1aSHuacai Chen #define VEC_COUNT (VEC_REG_COUNT * VEC_COUNT_PER_REG) 31*dd281e1aSHuacai Chen #define VEC_REG_IDX(irq_id) ((irq_id) / VEC_COUNT_PER_REG) 32*dd281e1aSHuacai Chen #define VEC_REG_BIT(irq_id) ((irq_id) % VEC_COUNT_PER_REG) 33*dd281e1aSHuacai Chen #define EIOINTC_ALL_ENABLE 0xffffffff 34*dd281e1aSHuacai Chen 35*dd281e1aSHuacai Chen #define MAX_EIO_NODES (NR_CPUS / CORES_PER_EIO_NODE) 36*dd281e1aSHuacai Chen 37*dd281e1aSHuacai Chen static int nr_pics; 38*dd281e1aSHuacai Chen 39*dd281e1aSHuacai Chen struct eiointc_priv { 40*dd281e1aSHuacai Chen u32 node; 41*dd281e1aSHuacai Chen nodemask_t node_map; 42*dd281e1aSHuacai Chen cpumask_t cpuspan_map; 43*dd281e1aSHuacai Chen struct fwnode_handle *domain_handle; 44*dd281e1aSHuacai Chen struct irq_domain *eiointc_domain; 45*dd281e1aSHuacai Chen }; 46*dd281e1aSHuacai Chen 47*dd281e1aSHuacai Chen static struct eiointc_priv *eiointc_priv[MAX_IO_PICS]; 48*dd281e1aSHuacai Chen 49*dd281e1aSHuacai Chen static void eiointc_enable(void) 50*dd281e1aSHuacai Chen { 51*dd281e1aSHuacai Chen uint64_t misc; 52*dd281e1aSHuacai Chen 53*dd281e1aSHuacai Chen misc = iocsr_read64(LOONGARCH_IOCSR_MISC_FUNC); 54*dd281e1aSHuacai Chen misc |= IOCSR_MISC_FUNC_EXT_IOI_EN; 55*dd281e1aSHuacai Chen iocsr_write64(misc, LOONGARCH_IOCSR_MISC_FUNC); 56*dd281e1aSHuacai Chen } 57*dd281e1aSHuacai Chen 58*dd281e1aSHuacai Chen static int cpu_to_eio_node(int cpu) 59*dd281e1aSHuacai Chen { 60*dd281e1aSHuacai Chen return cpu_logical_map(cpu) / CORES_PER_EIO_NODE; 61*dd281e1aSHuacai Chen } 62*dd281e1aSHuacai Chen 63*dd281e1aSHuacai Chen static void eiointc_set_irq_route(int pos, unsigned int cpu, unsigned int mnode, nodemask_t *node_map) 64*dd281e1aSHuacai Chen { 65*dd281e1aSHuacai Chen int i, node, cpu_node, route_node; 66*dd281e1aSHuacai Chen unsigned char coremap; 67*dd281e1aSHuacai Chen uint32_t pos_off, data, data_byte, data_mask; 68*dd281e1aSHuacai Chen 69*dd281e1aSHuacai Chen pos_off = pos & ~3; 70*dd281e1aSHuacai Chen data_byte = pos & 3; 71*dd281e1aSHuacai Chen data_mask = ~BIT_MASK(data_byte) & 0xf; 72*dd281e1aSHuacai Chen 73*dd281e1aSHuacai Chen /* Calculate node and coremap of target irq */ 74*dd281e1aSHuacai Chen cpu_node = cpu_logical_map(cpu) / CORES_PER_EIO_NODE; 75*dd281e1aSHuacai Chen coremap = BIT(cpu_logical_map(cpu) % CORES_PER_EIO_NODE); 76*dd281e1aSHuacai Chen 77*dd281e1aSHuacai Chen for_each_online_cpu(i) { 78*dd281e1aSHuacai Chen node = cpu_to_eio_node(i); 79*dd281e1aSHuacai Chen if (!node_isset(node, *node_map)) 80*dd281e1aSHuacai Chen continue; 81*dd281e1aSHuacai Chen 82*dd281e1aSHuacai Chen /* EIO node 0 is in charge of inter-node interrupt dispatch */ 83*dd281e1aSHuacai Chen route_node = (node == mnode) ? cpu_node : node; 84*dd281e1aSHuacai Chen data = ((coremap | (route_node << 4)) << (data_byte * 8)); 85*dd281e1aSHuacai Chen csr_any_send(EIOINTC_REG_ROUTE + pos_off, data, data_mask, node * CORES_PER_EIO_NODE); 86*dd281e1aSHuacai Chen } 87*dd281e1aSHuacai Chen } 88*dd281e1aSHuacai Chen 89*dd281e1aSHuacai Chen static DEFINE_RAW_SPINLOCK(affinity_lock); 90*dd281e1aSHuacai Chen 91*dd281e1aSHuacai Chen static int eiointc_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity, bool force) 92*dd281e1aSHuacai Chen { 93*dd281e1aSHuacai Chen unsigned int cpu; 94*dd281e1aSHuacai Chen unsigned long flags; 95*dd281e1aSHuacai Chen uint32_t vector, regaddr; 96*dd281e1aSHuacai Chen struct cpumask intersect_affinity; 97*dd281e1aSHuacai Chen struct eiointc_priv *priv = d->domain->host_data; 98*dd281e1aSHuacai Chen 99*dd281e1aSHuacai Chen raw_spin_lock_irqsave(&affinity_lock, flags); 100*dd281e1aSHuacai Chen 101*dd281e1aSHuacai Chen cpumask_and(&intersect_affinity, affinity, cpu_online_mask); 102*dd281e1aSHuacai Chen cpumask_and(&intersect_affinity, &intersect_affinity, &priv->cpuspan_map); 103*dd281e1aSHuacai Chen 104*dd281e1aSHuacai Chen if (cpumask_empty(&intersect_affinity)) { 105*dd281e1aSHuacai Chen raw_spin_unlock_irqrestore(&affinity_lock, flags); 106*dd281e1aSHuacai Chen return -EINVAL; 107*dd281e1aSHuacai Chen } 108*dd281e1aSHuacai Chen cpu = cpumask_first(&intersect_affinity); 109*dd281e1aSHuacai Chen 110*dd281e1aSHuacai Chen vector = d->hwirq; 111*dd281e1aSHuacai Chen regaddr = EIOINTC_REG_ENABLE + ((vector >> 5) << 2); 112*dd281e1aSHuacai Chen 113*dd281e1aSHuacai Chen /* Mask target vector */ 114*dd281e1aSHuacai Chen csr_any_send(regaddr, EIOINTC_ALL_ENABLE & (~BIT(vector & 0x1F)), 0x0, 0); 115*dd281e1aSHuacai Chen /* Set route for target vector */ 116*dd281e1aSHuacai Chen eiointc_set_irq_route(vector, cpu, priv->node, &priv->node_map); 117*dd281e1aSHuacai Chen /* Unmask target vector */ 118*dd281e1aSHuacai Chen csr_any_send(regaddr, EIOINTC_ALL_ENABLE, 0x0, 0); 119*dd281e1aSHuacai Chen 120*dd281e1aSHuacai Chen irq_data_update_effective_affinity(d, cpumask_of(cpu)); 121*dd281e1aSHuacai Chen 122*dd281e1aSHuacai Chen raw_spin_unlock_irqrestore(&affinity_lock, flags); 123*dd281e1aSHuacai Chen 124*dd281e1aSHuacai Chen return IRQ_SET_MASK_OK; 125*dd281e1aSHuacai Chen } 126*dd281e1aSHuacai Chen 127*dd281e1aSHuacai Chen static int eiointc_index(int node) 128*dd281e1aSHuacai Chen { 129*dd281e1aSHuacai Chen int i; 130*dd281e1aSHuacai Chen 131*dd281e1aSHuacai Chen for (i = 0; i < nr_pics; i++) { 132*dd281e1aSHuacai Chen if (node_isset(node, eiointc_priv[i]->node_map)) 133*dd281e1aSHuacai Chen return i; 134*dd281e1aSHuacai Chen } 135*dd281e1aSHuacai Chen 136*dd281e1aSHuacai Chen return -1; 137*dd281e1aSHuacai Chen } 138*dd281e1aSHuacai Chen 139*dd281e1aSHuacai Chen static int eiointc_router_init(unsigned int cpu) 140*dd281e1aSHuacai Chen { 141*dd281e1aSHuacai Chen int i, bit; 142*dd281e1aSHuacai Chen uint32_t data; 143*dd281e1aSHuacai Chen uint32_t node = cpu_to_eio_node(cpu); 144*dd281e1aSHuacai Chen uint32_t index = eiointc_index(node); 145*dd281e1aSHuacai Chen 146*dd281e1aSHuacai Chen if (index < 0) { 147*dd281e1aSHuacai Chen pr_err("Error: invalid nodemap!\n"); 148*dd281e1aSHuacai Chen return -1; 149*dd281e1aSHuacai Chen } 150*dd281e1aSHuacai Chen 151*dd281e1aSHuacai Chen if ((cpu_logical_map(cpu) % CORES_PER_EIO_NODE) == 0) { 152*dd281e1aSHuacai Chen eiointc_enable(); 153*dd281e1aSHuacai Chen 154*dd281e1aSHuacai Chen for (i = 0; i < VEC_COUNT / 32; i++) { 155*dd281e1aSHuacai Chen data = (((1 << (i * 2 + 1)) << 16) | (1 << (i * 2))); 156*dd281e1aSHuacai Chen iocsr_write32(data, EIOINTC_REG_NODEMAP + i * 4); 157*dd281e1aSHuacai Chen } 158*dd281e1aSHuacai Chen 159*dd281e1aSHuacai Chen for (i = 0; i < VEC_COUNT / 32 / 4; i++) { 160*dd281e1aSHuacai Chen bit = BIT(1 + index); /* Route to IP[1 + index] */ 161*dd281e1aSHuacai Chen data = bit | (bit << 8) | (bit << 16) | (bit << 24); 162*dd281e1aSHuacai Chen iocsr_write32(data, EIOINTC_REG_IPMAP + i * 4); 163*dd281e1aSHuacai Chen } 164*dd281e1aSHuacai Chen 165*dd281e1aSHuacai Chen for (i = 0; i < VEC_COUNT / 4; i++) { 166*dd281e1aSHuacai Chen /* Route to Node-0 Core-0 */ 167*dd281e1aSHuacai Chen if (index == 0) 168*dd281e1aSHuacai Chen bit = BIT(cpu_logical_map(0)); 169*dd281e1aSHuacai Chen else 170*dd281e1aSHuacai Chen bit = (eiointc_priv[index]->node << 4) | 1; 171*dd281e1aSHuacai Chen 172*dd281e1aSHuacai Chen data = bit | (bit << 8) | (bit << 16) | (bit << 24); 173*dd281e1aSHuacai Chen iocsr_write32(data, EIOINTC_REG_ROUTE + i * 4); 174*dd281e1aSHuacai Chen } 175*dd281e1aSHuacai Chen 176*dd281e1aSHuacai Chen for (i = 0; i < VEC_COUNT / 32; i++) { 177*dd281e1aSHuacai Chen data = 0xffffffff; 178*dd281e1aSHuacai Chen iocsr_write32(data, EIOINTC_REG_ENABLE + i * 4); 179*dd281e1aSHuacai Chen iocsr_write32(data, EIOINTC_REG_BOUNCE + i * 4); 180*dd281e1aSHuacai Chen } 181*dd281e1aSHuacai Chen } 182*dd281e1aSHuacai Chen 183*dd281e1aSHuacai Chen return 0; 184*dd281e1aSHuacai Chen } 185*dd281e1aSHuacai Chen 186*dd281e1aSHuacai Chen static void eiointc_irq_dispatch(struct irq_desc *desc) 187*dd281e1aSHuacai Chen { 188*dd281e1aSHuacai Chen int i; 189*dd281e1aSHuacai Chen u64 pending; 190*dd281e1aSHuacai Chen bool handled = false; 191*dd281e1aSHuacai Chen struct irq_chip *chip = irq_desc_get_chip(desc); 192*dd281e1aSHuacai Chen struct eiointc_priv *priv = irq_desc_get_handler_data(desc); 193*dd281e1aSHuacai Chen 194*dd281e1aSHuacai Chen chained_irq_enter(chip, desc); 195*dd281e1aSHuacai Chen 196*dd281e1aSHuacai Chen for (i = 0; i < VEC_REG_COUNT; i++) { 197*dd281e1aSHuacai Chen pending = iocsr_read64(EIOINTC_REG_ISR + (i << 3)); 198*dd281e1aSHuacai Chen iocsr_write64(pending, EIOINTC_REG_ISR + (i << 3)); 199*dd281e1aSHuacai Chen while (pending) { 200*dd281e1aSHuacai Chen int bit = __ffs(pending); 201*dd281e1aSHuacai Chen int irq = bit + VEC_COUNT_PER_REG * i; 202*dd281e1aSHuacai Chen 203*dd281e1aSHuacai Chen generic_handle_domain_irq(priv->eiointc_domain, irq); 204*dd281e1aSHuacai Chen pending &= ~BIT(bit); 205*dd281e1aSHuacai Chen handled = true; 206*dd281e1aSHuacai Chen } 207*dd281e1aSHuacai Chen } 208*dd281e1aSHuacai Chen 209*dd281e1aSHuacai Chen if (!handled) 210*dd281e1aSHuacai Chen spurious_interrupt(); 211*dd281e1aSHuacai Chen 212*dd281e1aSHuacai Chen chained_irq_exit(chip, desc); 213*dd281e1aSHuacai Chen } 214*dd281e1aSHuacai Chen 215*dd281e1aSHuacai Chen static void eiointc_ack_irq(struct irq_data *d) 216*dd281e1aSHuacai Chen { 217*dd281e1aSHuacai Chen } 218*dd281e1aSHuacai Chen 219*dd281e1aSHuacai Chen static void eiointc_mask_irq(struct irq_data *d) 220*dd281e1aSHuacai Chen { 221*dd281e1aSHuacai Chen } 222*dd281e1aSHuacai Chen 223*dd281e1aSHuacai Chen static void eiointc_unmask_irq(struct irq_data *d) 224*dd281e1aSHuacai Chen { 225*dd281e1aSHuacai Chen } 226*dd281e1aSHuacai Chen 227*dd281e1aSHuacai Chen static struct irq_chip eiointc_irq_chip = { 228*dd281e1aSHuacai Chen .name = "EIOINTC", 229*dd281e1aSHuacai Chen .irq_ack = eiointc_ack_irq, 230*dd281e1aSHuacai Chen .irq_mask = eiointc_mask_irq, 231*dd281e1aSHuacai Chen .irq_unmask = eiointc_unmask_irq, 232*dd281e1aSHuacai Chen .irq_set_affinity = eiointc_set_irq_affinity, 233*dd281e1aSHuacai Chen }; 234*dd281e1aSHuacai Chen 235*dd281e1aSHuacai Chen static int eiointc_domain_alloc(struct irq_domain *domain, unsigned int virq, 236*dd281e1aSHuacai Chen unsigned int nr_irqs, void *arg) 237*dd281e1aSHuacai Chen { 238*dd281e1aSHuacai Chen int ret; 239*dd281e1aSHuacai Chen unsigned int i, type; 240*dd281e1aSHuacai Chen unsigned long hwirq = 0; 241*dd281e1aSHuacai Chen struct eiointc *priv = domain->host_data; 242*dd281e1aSHuacai Chen 243*dd281e1aSHuacai Chen ret = irq_domain_translate_onecell(domain, arg, &hwirq, &type); 244*dd281e1aSHuacai Chen if (ret) 245*dd281e1aSHuacai Chen return ret; 246*dd281e1aSHuacai Chen 247*dd281e1aSHuacai Chen for (i = 0; i < nr_irqs; i++) { 248*dd281e1aSHuacai Chen irq_domain_set_info(domain, virq + i, hwirq + i, &eiointc_irq_chip, 249*dd281e1aSHuacai Chen priv, handle_edge_irq, NULL, NULL); 250*dd281e1aSHuacai Chen } 251*dd281e1aSHuacai Chen 252*dd281e1aSHuacai Chen return 0; 253*dd281e1aSHuacai Chen } 254*dd281e1aSHuacai Chen 255*dd281e1aSHuacai Chen static void eiointc_domain_free(struct irq_domain *domain, unsigned int virq, 256*dd281e1aSHuacai Chen unsigned int nr_irqs) 257*dd281e1aSHuacai Chen { 258*dd281e1aSHuacai Chen int i; 259*dd281e1aSHuacai Chen 260*dd281e1aSHuacai Chen for (i = 0; i < nr_irqs; i++) { 261*dd281e1aSHuacai Chen struct irq_data *d = irq_domain_get_irq_data(domain, virq + i); 262*dd281e1aSHuacai Chen 263*dd281e1aSHuacai Chen irq_set_handler(virq + i, NULL); 264*dd281e1aSHuacai Chen irq_domain_reset_irq_data(d); 265*dd281e1aSHuacai Chen } 266*dd281e1aSHuacai Chen } 267*dd281e1aSHuacai Chen 268*dd281e1aSHuacai Chen static const struct irq_domain_ops eiointc_domain_ops = { 269*dd281e1aSHuacai Chen .translate = irq_domain_translate_onecell, 270*dd281e1aSHuacai Chen .alloc = eiointc_domain_alloc, 271*dd281e1aSHuacai Chen .free = eiointc_domain_free, 272*dd281e1aSHuacai Chen }; 273*dd281e1aSHuacai Chen 274*dd281e1aSHuacai Chen static void acpi_set_vec_parent(int node, struct irq_domain *parent, struct acpi_vector_group *vec_group) 275*dd281e1aSHuacai Chen { 276*dd281e1aSHuacai Chen int i; 277*dd281e1aSHuacai Chen 278*dd281e1aSHuacai Chen if (cpu_has_flatmode) 279*dd281e1aSHuacai Chen node = cpu_to_node(node * CORES_PER_EIO_NODE); 280*dd281e1aSHuacai Chen 281*dd281e1aSHuacai Chen for (i = 0; i < MAX_IO_PICS; i++) { 282*dd281e1aSHuacai Chen if (node == vec_group[i].node) { 283*dd281e1aSHuacai Chen vec_group[i].parent = parent; 284*dd281e1aSHuacai Chen return; 285*dd281e1aSHuacai Chen } 286*dd281e1aSHuacai Chen } 287*dd281e1aSHuacai Chen } 288*dd281e1aSHuacai Chen 289*dd281e1aSHuacai Chen struct irq_domain *acpi_get_vec_parent(int node, struct acpi_vector_group *vec_group) 290*dd281e1aSHuacai Chen { 291*dd281e1aSHuacai Chen int i; 292*dd281e1aSHuacai Chen 293*dd281e1aSHuacai Chen for (i = 0; i < MAX_IO_PICS; i++) { 294*dd281e1aSHuacai Chen if (node == vec_group[i].node) 295*dd281e1aSHuacai Chen return vec_group[i].parent; 296*dd281e1aSHuacai Chen } 297*dd281e1aSHuacai Chen return NULL; 298*dd281e1aSHuacai Chen } 299*dd281e1aSHuacai Chen 300*dd281e1aSHuacai Chen static int __init 301*dd281e1aSHuacai Chen pch_pic_parse_madt(union acpi_subtable_headers *header, 302*dd281e1aSHuacai Chen const unsigned long end) 303*dd281e1aSHuacai Chen { 304*dd281e1aSHuacai Chen struct acpi_madt_bio_pic *pchpic_entry = (struct acpi_madt_bio_pic *)header; 305*dd281e1aSHuacai Chen unsigned int node = (pchpic_entry->address >> 44) & 0xf; 306*dd281e1aSHuacai Chen struct irq_domain *parent = acpi_get_vec_parent(node, pch_group); 307*dd281e1aSHuacai Chen 308*dd281e1aSHuacai Chen if (parent) 309*dd281e1aSHuacai Chen return pch_pic_acpi_init(parent, pchpic_entry); 310*dd281e1aSHuacai Chen 311*dd281e1aSHuacai Chen return -EINVAL; 312*dd281e1aSHuacai Chen } 313*dd281e1aSHuacai Chen 314*dd281e1aSHuacai Chen static int __init 315*dd281e1aSHuacai Chen pch_msi_parse_madt(union acpi_subtable_headers *header, 316*dd281e1aSHuacai Chen const unsigned long end) 317*dd281e1aSHuacai Chen { 318*dd281e1aSHuacai Chen struct acpi_madt_msi_pic *pchmsi_entry = (struct acpi_madt_msi_pic *)header; 319*dd281e1aSHuacai Chen struct irq_domain *parent = acpi_get_vec_parent(eiointc_priv[nr_pics - 1]->node, msi_group); 320*dd281e1aSHuacai Chen 321*dd281e1aSHuacai Chen if (parent) 322*dd281e1aSHuacai Chen return pch_msi_acpi_init(parent, pchmsi_entry); 323*dd281e1aSHuacai Chen 324*dd281e1aSHuacai Chen return -EINVAL; 325*dd281e1aSHuacai Chen } 326*dd281e1aSHuacai Chen 327*dd281e1aSHuacai Chen static int __init acpi_cascade_irqdomain_init(void) 328*dd281e1aSHuacai Chen { 329*dd281e1aSHuacai Chen acpi_table_parse_madt(ACPI_MADT_TYPE_BIO_PIC, 330*dd281e1aSHuacai Chen pch_pic_parse_madt, 0); 331*dd281e1aSHuacai Chen acpi_table_parse_madt(ACPI_MADT_TYPE_MSI_PIC, 332*dd281e1aSHuacai Chen pch_msi_parse_madt, 1); 333*dd281e1aSHuacai Chen return 0; 334*dd281e1aSHuacai Chen } 335*dd281e1aSHuacai Chen 336*dd281e1aSHuacai Chen int __init eiointc_acpi_init(struct irq_domain *parent, 337*dd281e1aSHuacai Chen struct acpi_madt_eio_pic *acpi_eiointc) 338*dd281e1aSHuacai Chen { 339*dd281e1aSHuacai Chen int i, parent_irq; 340*dd281e1aSHuacai Chen unsigned long node_map; 341*dd281e1aSHuacai Chen struct eiointc_priv *priv; 342*dd281e1aSHuacai Chen 343*dd281e1aSHuacai Chen priv = kzalloc(sizeof(*priv), GFP_KERNEL); 344*dd281e1aSHuacai Chen if (!priv) 345*dd281e1aSHuacai Chen return -ENOMEM; 346*dd281e1aSHuacai Chen 347*dd281e1aSHuacai Chen priv->domain_handle = irq_domain_alloc_fwnode((phys_addr_t *)acpi_eiointc); 348*dd281e1aSHuacai Chen if (!priv->domain_handle) { 349*dd281e1aSHuacai Chen pr_err("Unable to allocate domain handle\n"); 350*dd281e1aSHuacai Chen goto out_free_priv; 351*dd281e1aSHuacai Chen } 352*dd281e1aSHuacai Chen 353*dd281e1aSHuacai Chen priv->node = acpi_eiointc->node; 354*dd281e1aSHuacai Chen node_map = acpi_eiointc->node_map ? : -1ULL; 355*dd281e1aSHuacai Chen 356*dd281e1aSHuacai Chen for_each_possible_cpu(i) { 357*dd281e1aSHuacai Chen if (node_map & (1ULL << cpu_to_eio_node(i))) { 358*dd281e1aSHuacai Chen node_set(cpu_to_eio_node(i), priv->node_map); 359*dd281e1aSHuacai Chen cpumask_or(&priv->cpuspan_map, &priv->cpuspan_map, cpumask_of(i)); 360*dd281e1aSHuacai Chen } 361*dd281e1aSHuacai Chen } 362*dd281e1aSHuacai Chen 363*dd281e1aSHuacai Chen /* Setup IRQ domain */ 364*dd281e1aSHuacai Chen priv->eiointc_domain = irq_domain_create_linear(priv->domain_handle, VEC_COUNT, 365*dd281e1aSHuacai Chen &eiointc_domain_ops, priv); 366*dd281e1aSHuacai Chen if (!priv->eiointc_domain) { 367*dd281e1aSHuacai Chen pr_err("loongson-eiointc: cannot add IRQ domain\n"); 368*dd281e1aSHuacai Chen goto out_free_handle; 369*dd281e1aSHuacai Chen } 370*dd281e1aSHuacai Chen 371*dd281e1aSHuacai Chen eiointc_priv[nr_pics++] = priv; 372*dd281e1aSHuacai Chen 373*dd281e1aSHuacai Chen eiointc_router_init(0); 374*dd281e1aSHuacai Chen 375*dd281e1aSHuacai Chen parent_irq = irq_create_mapping(parent, acpi_eiointc->cascade); 376*dd281e1aSHuacai Chen irq_set_chained_handler_and_data(parent_irq, eiointc_irq_dispatch, priv); 377*dd281e1aSHuacai Chen 378*dd281e1aSHuacai Chen cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_LOONGARCH_STARTING, 379*dd281e1aSHuacai Chen "irqchip/loongarch/intc:starting", 380*dd281e1aSHuacai Chen eiointc_router_init, NULL); 381*dd281e1aSHuacai Chen 382*dd281e1aSHuacai Chen acpi_set_vec_parent(acpi_eiointc->node, priv->eiointc_domain, pch_group); 383*dd281e1aSHuacai Chen acpi_set_vec_parent(acpi_eiointc->node, priv->eiointc_domain, msi_group); 384*dd281e1aSHuacai Chen acpi_cascade_irqdomain_init(); 385*dd281e1aSHuacai Chen 386*dd281e1aSHuacai Chen return 0; 387*dd281e1aSHuacai Chen 388*dd281e1aSHuacai Chen out_free_handle: 389*dd281e1aSHuacai Chen irq_domain_free_fwnode(priv->domain_handle); 390*dd281e1aSHuacai Chen priv->domain_handle = NULL; 391*dd281e1aSHuacai Chen out_free_priv: 392*dd281e1aSHuacai Chen kfree(priv); 393*dd281e1aSHuacai Chen 394*dd281e1aSHuacai Chen return -ENOMEM; 395*dd281e1aSHuacai Chen } 396