1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * ip30-irq.c: Highlevel interrupt handling for IP30 architecture. 4 */ 5 #include <linux/errno.h> 6 #include <linux/init.h> 7 #include <linux/interrupt.h> 8 #include <linux/irq.h> 9 #include <linux/percpu.h> 10 #include <linux/spinlock.h> 11 #include <linux/tick.h> 12 #include <linux/types.h> 13 14 #include <asm/irq_cpu.h> 15 #include <asm/sgi/heart.h> 16 17 #include "ip30-common.h" 18 19 struct heart_irq_data { 20 u64 *irq_mask; 21 int cpu; 22 }; 23 24 static DECLARE_BITMAP(heart_irq_map, HEART_NUM_IRQS); 25 26 static DEFINE_PER_CPU(unsigned long, irq_enable_mask); 27 28 static inline int heart_alloc_int(void) 29 { 30 int bit; 31 32 again: 33 bit = find_first_zero_bit(heart_irq_map, HEART_NUM_IRQS); 34 if (bit >= HEART_NUM_IRQS) 35 return -ENOSPC; 36 37 if (test_and_set_bit(bit, heart_irq_map)) 38 goto again; 39 40 return bit; 41 } 42 43 static void ip30_error_irq(struct irq_desc *desc) 44 { 45 u64 pending, mask, cause, error_irqs, err_reg; 46 int cpu = smp_processor_id(); 47 int i; 48 49 pending = heart_read(&heart_regs->isr); 50 mask = heart_read(&heart_regs->imr[cpu]); 51 cause = heart_read(&heart_regs->cause); 52 error_irqs = (pending & HEART_L4_INT_MASK & mask); 53 54 /* Bail if there's nothing to process (how did we get here, then?) */ 55 if (unlikely(!error_irqs)) 56 return; 57 58 /* Prevent any of the error IRQs from firing again. */ 59 heart_write(mask & ~(pending), &heart_regs->imr[cpu]); 60 61 /* Ack all error IRQs. */ 62 heart_write(HEART_L4_INT_MASK, &heart_regs->clear_isr); 63 64 /* 65 * If we also have a cause value, then something happened, so loop 66 * through the error IRQs and report a "heart attack" for each one 67 * and print the value of the HEART cause register. This is really 68 * primitive right now, but it should hopefully work until a more 69 * robust error handling routine can be put together. 70 * 71 * Refer to heart.h for the HC_* macros to work out the cause 72 * that got us here. 73 */ 74 if (cause) { 75 pr_alert("IP30: CPU%d: HEART ATTACK! ISR = 0x%.16llx, IMR = 0x%.16llx, CAUSE = 0x%.16llx\n", 76 cpu, pending, mask, cause); 77 78 if (cause & HC_COR_MEM_ERR) { 79 err_reg = heart_read(&heart_regs->mem_err_addr); 80 pr_alert(" HEART_MEMERR_ADDR = 0x%.16llx\n", err_reg); 81 } 82 83 /* i = 63; i >= 51; i-- */ 84 for (i = HEART_ERR_MASK_END; i >= HEART_ERR_MASK_START; i--) 85 if ((pending >> i) & 1) 86 pr_alert(" HEART Error IRQ #%d\n", i); 87 88 /* XXX: Seems possible to loop forever here, so panic(). */ 89 panic("IP30: Fatal Error !\n"); 90 } 91 92 /* Unmask the error IRQs. */ 93 heart_write(mask, &heart_regs->imr[cpu]); 94 } 95 96 static void ip30_normal_irq(struct irq_desc *desc) 97 { 98 int cpu = smp_processor_id(); 99 struct irq_domain *domain; 100 u64 pend, mask; 101 int irq; 102 103 pend = heart_read(&heart_regs->isr); 104 mask = (heart_read(&heart_regs->imr[cpu]) & 105 (HEART_L0_INT_MASK | HEART_L1_INT_MASK | HEART_L2_INT_MASK)); 106 107 pend &= mask; 108 if (unlikely(!pend)) 109 return; 110 111 #ifdef CONFIG_SMP 112 if (pend & BIT_ULL(HEART_L2_INT_RESCHED_CPU_0)) { 113 heart_write(BIT_ULL(HEART_L2_INT_RESCHED_CPU_0), 114 &heart_regs->clear_isr); 115 scheduler_ipi(); 116 } else if (pend & BIT_ULL(HEART_L2_INT_RESCHED_CPU_1)) { 117 heart_write(BIT_ULL(HEART_L2_INT_RESCHED_CPU_1), 118 &heart_regs->clear_isr); 119 scheduler_ipi(); 120 } else if (pend & BIT_ULL(HEART_L2_INT_CALL_CPU_0)) { 121 heart_write(BIT_ULL(HEART_L2_INT_CALL_CPU_0), 122 &heart_regs->clear_isr); 123 generic_smp_call_function_interrupt(); 124 } else if (pend & BIT_ULL(HEART_L2_INT_CALL_CPU_1)) { 125 heart_write(BIT_ULL(HEART_L2_INT_CALL_CPU_1), 126 &heart_regs->clear_isr); 127 generic_smp_call_function_interrupt(); 128 } else 129 #endif 130 { 131 domain = irq_desc_get_handler_data(desc); 132 irq = irq_linear_revmap(domain, __ffs(pend)); 133 if (irq) 134 generic_handle_irq(irq); 135 else 136 spurious_interrupt(); 137 } 138 } 139 140 static void ip30_ack_heart_irq(struct irq_data *d) 141 { 142 heart_write(BIT_ULL(d->hwirq), &heart_regs->clear_isr); 143 } 144 145 static void ip30_mask_heart_irq(struct irq_data *d) 146 { 147 struct heart_irq_data *hd = irq_data_get_irq_chip_data(d); 148 unsigned long *mask = &per_cpu(irq_enable_mask, hd->cpu); 149 150 clear_bit(d->hwirq, mask); 151 heart_write(*mask, &heart_regs->imr[hd->cpu]); 152 } 153 154 static void ip30_mask_and_ack_heart_irq(struct irq_data *d) 155 { 156 struct heart_irq_data *hd = irq_data_get_irq_chip_data(d); 157 unsigned long *mask = &per_cpu(irq_enable_mask, hd->cpu); 158 159 clear_bit(d->hwirq, mask); 160 heart_write(*mask, &heart_regs->imr[hd->cpu]); 161 heart_write(BIT_ULL(d->hwirq), &heart_regs->clear_isr); 162 } 163 164 static void ip30_unmask_heart_irq(struct irq_data *d) 165 { 166 struct heart_irq_data *hd = irq_data_get_irq_chip_data(d); 167 unsigned long *mask = &per_cpu(irq_enable_mask, hd->cpu); 168 169 set_bit(d->hwirq, mask); 170 heart_write(*mask, &heart_regs->imr[hd->cpu]); 171 } 172 173 static int ip30_set_heart_irq_affinity(struct irq_data *d, 174 const struct cpumask *mask, bool force) 175 { 176 struct heart_irq_data *hd = irq_data_get_irq_chip_data(d); 177 178 if (!hd) 179 return -EINVAL; 180 181 if (irqd_is_started(d)) 182 ip30_mask_and_ack_heart_irq(d); 183 184 hd->cpu = cpumask_first_and(mask, cpu_online_mask); 185 186 if (irqd_is_started(d)) 187 ip30_unmask_heart_irq(d); 188 189 irq_data_update_effective_affinity(d, cpumask_of(hd->cpu)); 190 191 return 0; 192 } 193 194 static struct irq_chip heart_irq_chip = { 195 .name = "HEART", 196 .irq_ack = ip30_ack_heart_irq, 197 .irq_mask = ip30_mask_heart_irq, 198 .irq_mask_ack = ip30_mask_and_ack_heart_irq, 199 .irq_unmask = ip30_unmask_heart_irq, 200 .irq_set_affinity = ip30_set_heart_irq_affinity, 201 }; 202 203 static int heart_domain_alloc(struct irq_domain *domain, unsigned int virq, 204 unsigned int nr_irqs, void *arg) 205 { 206 struct irq_alloc_info *info = arg; 207 struct heart_irq_data *hd; 208 int hwirq; 209 210 if (nr_irqs > 1 || !info) 211 return -EINVAL; 212 213 hd = kzalloc(sizeof(*hd), GFP_KERNEL); 214 if (!hd) 215 return -ENOMEM; 216 217 hwirq = heart_alloc_int(); 218 if (hwirq < 0) { 219 kfree(hd); 220 return -EAGAIN; 221 } 222 irq_domain_set_info(domain, virq, hwirq, &heart_irq_chip, hd, 223 handle_level_irq, NULL, NULL); 224 225 return 0; 226 } 227 228 static void heart_domain_free(struct irq_domain *domain, 229 unsigned int virq, unsigned int nr_irqs) 230 { 231 struct irq_data *irqd; 232 233 if (nr_irqs > 1) 234 return; 235 236 irqd = irq_domain_get_irq_data(domain, virq); 237 if (irqd) { 238 clear_bit(irqd->hwirq, heart_irq_map); 239 kfree(irqd->chip_data); 240 } 241 } 242 243 static const struct irq_domain_ops heart_domain_ops = { 244 .alloc = heart_domain_alloc, 245 .free = heart_domain_free, 246 }; 247 248 void __init ip30_install_ipi(void) 249 { 250 int cpu = smp_processor_id(); 251 unsigned long *mask = &per_cpu(irq_enable_mask, cpu); 252 253 set_bit(HEART_L2_INT_RESCHED_CPU_0 + cpu, mask); 254 heart_write(BIT_ULL(HEART_L2_INT_RESCHED_CPU_0 + cpu), 255 &heart_regs->clear_isr); 256 set_bit(HEART_L2_INT_CALL_CPU_0 + cpu, mask); 257 heart_write(BIT_ULL(HEART_L2_INT_CALL_CPU_0 + cpu), 258 &heart_regs->clear_isr); 259 260 heart_write(*mask, &heart_regs->imr[cpu]); 261 } 262 263 void __init arch_init_irq(void) 264 { 265 struct irq_domain *domain; 266 struct fwnode_handle *fn; 267 unsigned long *mask; 268 int i; 269 270 mips_cpu_irq_init(); 271 272 /* Mask all IRQs. */ 273 heart_write(HEART_CLR_ALL_MASK, &heart_regs->imr[0]); 274 heart_write(HEART_CLR_ALL_MASK, &heart_regs->imr[1]); 275 heart_write(HEART_CLR_ALL_MASK, &heart_regs->imr[2]); 276 heart_write(HEART_CLR_ALL_MASK, &heart_regs->imr[3]); 277 278 /* Ack everything. */ 279 heart_write(HEART_ACK_ALL_MASK, &heart_regs->clear_isr); 280 281 /* Enable specific HEART error IRQs for each CPU. */ 282 mask = &per_cpu(irq_enable_mask, 0); 283 *mask |= HEART_CPU0_ERR_MASK; 284 heart_write(*mask, &heart_regs->imr[0]); 285 mask = &per_cpu(irq_enable_mask, 1); 286 *mask |= HEART_CPU1_ERR_MASK; 287 heart_write(*mask, &heart_regs->imr[1]); 288 289 /* 290 * Some HEART bits are reserved by hardware or by software convention. 291 * Mark these as reserved right away so they won't be accidentally 292 * used later. 293 */ 294 set_bit(HEART_L0_INT_GENERIC, heart_irq_map); 295 set_bit(HEART_L0_INT_FLOW_CTRL_HWTR_0, heart_irq_map); 296 set_bit(HEART_L0_INT_FLOW_CTRL_HWTR_1, heart_irq_map); 297 set_bit(HEART_L2_INT_RESCHED_CPU_0, heart_irq_map); 298 set_bit(HEART_L2_INT_RESCHED_CPU_1, heart_irq_map); 299 set_bit(HEART_L2_INT_CALL_CPU_0, heart_irq_map); 300 set_bit(HEART_L2_INT_CALL_CPU_1, heart_irq_map); 301 set_bit(HEART_L3_INT_TIMER, heart_irq_map); 302 303 /* Reserve the error interrupts (#51 to #63). */ 304 for (i = HEART_L4_INT_XWID_ERR_9; i <= HEART_L4_INT_HEART_EXCP; i++) 305 set_bit(i, heart_irq_map); 306 307 fn = irq_domain_alloc_named_fwnode("HEART"); 308 WARN_ON(fn == NULL); 309 if (!fn) 310 return; 311 domain = irq_domain_create_linear(fn, HEART_NUM_IRQS, 312 &heart_domain_ops, NULL); 313 WARN_ON(domain == NULL); 314 if (!domain) 315 return; 316 317 irq_set_default_host(domain); 318 319 irq_set_percpu_devid(IP30_HEART_L0_IRQ); 320 irq_set_chained_handler_and_data(IP30_HEART_L0_IRQ, ip30_normal_irq, 321 domain); 322 irq_set_percpu_devid(IP30_HEART_L1_IRQ); 323 irq_set_chained_handler_and_data(IP30_HEART_L1_IRQ, ip30_normal_irq, 324 domain); 325 irq_set_percpu_devid(IP30_HEART_L2_IRQ); 326 irq_set_chained_handler_and_data(IP30_HEART_L2_IRQ, ip30_normal_irq, 327 domain); 328 irq_set_percpu_devid(IP30_HEART_ERR_IRQ); 329 irq_set_chained_handler_and_data(IP30_HEART_ERR_IRQ, ip30_error_irq, 330 domain); 331 } 332