1 /* 2 * ARC ARConnect (MultiCore IP) support (formerly known as MCIP) 3 * 4 * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com) 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #include <linux/smp.h> 12 #include <linux/irq.h> 13 #include <linux/irqchip/chained_irq.h> 14 #include <linux/spinlock.h> 15 #include <soc/arc/mcip.h> 16 #include <asm/irqflags-arcv2.h> 17 #include <asm/setup.h> 18 19 static DEFINE_RAW_SPINLOCK(mcip_lock); 20 21 #ifdef CONFIG_SMP 22 23 static char smp_cpuinfo_buf[128]; 24 25 /* 26 * Set mask to halt GFRC if any online core in SMP cluster is halted. 27 * Only works for ARC HS v3.0+, on earlier versions has no effect. 28 */ 29 static void mcip_update_gfrc_halt_mask(int cpu) 30 { 31 struct bcr_generic gfrc; 32 unsigned long flags; 33 u32 gfrc_halt_mask; 34 35 READ_BCR(ARC_REG_GFRC_BUILD, gfrc); 36 37 /* 38 * CMD_GFRC_SET_CORE and CMD_GFRC_READ_CORE commands were added in 39 * GFRC 0x3 version. 40 */ 41 if (gfrc.ver < 0x3) 42 return; 43 44 raw_spin_lock_irqsave(&mcip_lock, flags); 45 46 __mcip_cmd(CMD_GFRC_READ_CORE, 0); 47 gfrc_halt_mask = read_aux_reg(ARC_REG_MCIP_READBACK); 48 gfrc_halt_mask |= BIT(cpu); 49 __mcip_cmd_data(CMD_GFRC_SET_CORE, 0, gfrc_halt_mask); 50 51 raw_spin_unlock_irqrestore(&mcip_lock, flags); 52 } 53 54 static void mcip_update_debug_halt_mask(int cpu) 55 { 56 u32 mcip_mask = 0; 57 unsigned long flags; 58 59 raw_spin_lock_irqsave(&mcip_lock, flags); 60 61 /* 62 * mcip_mask is same for CMD_DEBUG_SET_SELECT and CMD_DEBUG_SET_MASK 63 * commands. So read it once instead of reading both CMD_DEBUG_READ_MASK 64 * and CMD_DEBUG_READ_SELECT. 65 */ 66 __mcip_cmd(CMD_DEBUG_READ_SELECT, 0); 67 mcip_mask = read_aux_reg(ARC_REG_MCIP_READBACK); 68 69 mcip_mask |= BIT(cpu); 70 71 __mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, mcip_mask); 72 /* 73 * Parameter specified halt cause: 74 * STATUS32[H]/actionpoint/breakpoint/self-halt 75 * We choose all of them (0xF). 76 */ 77 __mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xF, mcip_mask); 78 79 raw_spin_unlock_irqrestore(&mcip_lock, flags); 80 } 81 82 static void mcip_setup_per_cpu(int cpu) 83 { 84 struct mcip_bcr mp; 85 86 READ_BCR(ARC_REG_MCIP_BCR, mp); 87 88 smp_ipi_irq_setup(cpu, IPI_IRQ); 89 smp_ipi_irq_setup(cpu, SOFTIRQ_IRQ); 90 91 /* Update GFRC halt mask as new CPU came online */ 92 if (mp.gfrc) 93 mcip_update_gfrc_halt_mask(cpu); 94 95 /* Update MCIP debug mask as new CPU came online */ 96 if (mp.dbg) 97 mcip_update_debug_halt_mask(cpu); 98 } 99 100 static void mcip_ipi_send(int cpu) 101 { 102 unsigned long flags; 103 int ipi_was_pending; 104 105 /* ARConnect can only send IPI to others */ 106 if (unlikely(cpu == raw_smp_processor_id())) { 107 arc_softirq_trigger(SOFTIRQ_IRQ); 108 return; 109 } 110 111 raw_spin_lock_irqsave(&mcip_lock, flags); 112 113 /* 114 * If receiver already has a pending interrupt, elide sending this one. 115 * Linux cross core calling works well with concurrent IPIs 116 * coalesced into one 117 * see arch/arc/kernel/smp.c: ipi_send_msg_one() 118 */ 119 __mcip_cmd(CMD_INTRPT_READ_STATUS, cpu); 120 ipi_was_pending = read_aux_reg(ARC_REG_MCIP_READBACK); 121 if (!ipi_was_pending) 122 __mcip_cmd(CMD_INTRPT_GENERATE_IRQ, cpu); 123 124 raw_spin_unlock_irqrestore(&mcip_lock, flags); 125 } 126 127 static void mcip_ipi_clear(int irq) 128 { 129 unsigned int cpu, c; 130 unsigned long flags; 131 132 if (unlikely(irq == SOFTIRQ_IRQ)) { 133 arc_softirq_clear(irq); 134 return; 135 } 136 137 raw_spin_lock_irqsave(&mcip_lock, flags); 138 139 /* Who sent the IPI */ 140 __mcip_cmd(CMD_INTRPT_CHECK_SOURCE, 0); 141 142 cpu = read_aux_reg(ARC_REG_MCIP_READBACK); /* 1,2,4,8... */ 143 144 /* 145 * In rare case, multiple concurrent IPIs sent to same target can 146 * possibly be coalesced by MCIP into 1 asserted IRQ, so @cpus can be 147 * "vectored" (multiple bits sets) as opposed to typical single bit 148 */ 149 do { 150 c = __ffs(cpu); /* 0,1,2,3 */ 151 __mcip_cmd(CMD_INTRPT_GENERATE_ACK, c); 152 cpu &= ~(1U << c); 153 } while (cpu); 154 155 raw_spin_unlock_irqrestore(&mcip_lock, flags); 156 } 157 158 static void mcip_probe_n_setup(void) 159 { 160 struct mcip_bcr mp; 161 162 READ_BCR(ARC_REG_MCIP_BCR, mp); 163 164 sprintf(smp_cpuinfo_buf, 165 "Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s\n", 166 mp.ver, mp.num_cores, 167 IS_AVAIL1(mp.ipi, "IPI "), 168 IS_AVAIL1(mp.idu, "IDU "), 169 IS_AVAIL1(mp.dbg, "DEBUG "), 170 IS_AVAIL1(mp.gfrc, "GFRC")); 171 172 cpuinfo_arc700[0].extn.gfrc = mp.gfrc; 173 } 174 175 struct plat_smp_ops plat_smp_ops = { 176 .info = smp_cpuinfo_buf, 177 .init_early_smp = mcip_probe_n_setup, 178 .init_per_cpu = mcip_setup_per_cpu, 179 .ipi_send = mcip_ipi_send, 180 .ipi_clear = mcip_ipi_clear, 181 }; 182 183 #endif 184 185 /*************************************************************************** 186 * ARCv2 Interrupt Distribution Unit (IDU) 187 * 188 * Connects external "COMMON" IRQs to core intc, providing: 189 * -dynamic routing (IRQ affinity) 190 * -load balancing (Round Robin interrupt distribution) 191 * -1:N distribution 192 * 193 * It physically resides in the MCIP hw block 194 */ 195 196 #include <linux/irqchip.h> 197 #include <linux/of.h> 198 #include <linux/of_irq.h> 199 200 /* 201 * Set the DEST for @cmn_irq to @cpu_mask (1 bit per core) 202 */ 203 static void idu_set_dest(unsigned int cmn_irq, unsigned int cpu_mask) 204 { 205 __mcip_cmd_data(CMD_IDU_SET_DEST, cmn_irq, cpu_mask); 206 } 207 208 static void idu_set_mode(unsigned int cmn_irq, unsigned int lvl, 209 unsigned int distr) 210 { 211 union { 212 unsigned int word; 213 struct { 214 unsigned int distr:2, pad:2, lvl:1, pad2:27; 215 }; 216 } data; 217 218 data.distr = distr; 219 data.lvl = lvl; 220 __mcip_cmd_data(CMD_IDU_SET_MODE, cmn_irq, data.word); 221 } 222 223 static void idu_irq_mask_raw(irq_hw_number_t hwirq) 224 { 225 unsigned long flags; 226 227 raw_spin_lock_irqsave(&mcip_lock, flags); 228 __mcip_cmd_data(CMD_IDU_SET_MASK, hwirq, 1); 229 raw_spin_unlock_irqrestore(&mcip_lock, flags); 230 } 231 232 static void idu_irq_mask(struct irq_data *data) 233 { 234 idu_irq_mask_raw(data->hwirq); 235 } 236 237 static void idu_irq_unmask(struct irq_data *data) 238 { 239 unsigned long flags; 240 241 raw_spin_lock_irqsave(&mcip_lock, flags); 242 __mcip_cmd_data(CMD_IDU_SET_MASK, data->hwirq, 0); 243 raw_spin_unlock_irqrestore(&mcip_lock, flags); 244 } 245 246 static int 247 idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask, 248 bool force) 249 { 250 unsigned long flags; 251 cpumask_t online; 252 unsigned int destination_bits; 253 unsigned int distribution_mode; 254 255 /* errout if no online cpu per @cpumask */ 256 if (!cpumask_and(&online, cpumask, cpu_online_mask)) 257 return -EINVAL; 258 259 raw_spin_lock_irqsave(&mcip_lock, flags); 260 261 destination_bits = cpumask_bits(&online)[0]; 262 idu_set_dest(data->hwirq, destination_bits); 263 264 if (ffs(destination_bits) == fls(destination_bits)) 265 distribution_mode = IDU_M_DISTRI_DEST; 266 else 267 distribution_mode = IDU_M_DISTRI_RR; 268 269 idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, distribution_mode); 270 271 raw_spin_unlock_irqrestore(&mcip_lock, flags); 272 273 return IRQ_SET_MASK_OK; 274 } 275 276 static void idu_irq_enable(struct irq_data *data) 277 { 278 /* 279 * By default send all common interrupts to all available online CPUs. 280 * The affinity of common interrupts in IDU must be set manually since 281 * in some cases the kernel will not call irq_set_affinity() by itself: 282 * 1. When the kernel is not configured with support of SMP. 283 * 2. When the kernel is configured with support of SMP but upper 284 * interrupt controllers does not support setting of the affinity 285 * and cannot propagate it to IDU. 286 */ 287 idu_irq_set_affinity(data, cpu_online_mask, false); 288 idu_irq_unmask(data); 289 } 290 291 static struct irq_chip idu_irq_chip = { 292 .name = "MCIP IDU Intc", 293 .irq_mask = idu_irq_mask, 294 .irq_unmask = idu_irq_unmask, 295 .irq_enable = idu_irq_enable, 296 #ifdef CONFIG_SMP 297 .irq_set_affinity = idu_irq_set_affinity, 298 #endif 299 300 }; 301 302 static void idu_cascade_isr(struct irq_desc *desc) 303 { 304 struct irq_domain *idu_domain = irq_desc_get_handler_data(desc); 305 struct irq_chip *core_chip = irq_desc_get_chip(desc); 306 irq_hw_number_t core_hwirq = irqd_to_hwirq(irq_desc_get_irq_data(desc)); 307 irq_hw_number_t idu_hwirq = core_hwirq - FIRST_EXT_IRQ; 308 309 chained_irq_enter(core_chip, desc); 310 generic_handle_irq(irq_find_mapping(idu_domain, idu_hwirq)); 311 chained_irq_exit(core_chip, desc); 312 } 313 314 static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq) 315 { 316 irq_set_chip_and_handler(virq, &idu_irq_chip, handle_level_irq); 317 irq_set_status_flags(virq, IRQ_MOVE_PCNTXT); 318 319 return 0; 320 } 321 322 static const struct irq_domain_ops idu_irq_ops = { 323 .xlate = irq_domain_xlate_onecell, 324 .map = idu_irq_map, 325 }; 326 327 /* 328 * [16, 23]: Statically assigned always private-per-core (Timers, WDT, IPI) 329 * [24, 23+C]: If C > 0 then "C" common IRQs 330 * [24+C, N]: Not statically assigned, private-per-core 331 */ 332 333 334 static int __init 335 idu_of_init(struct device_node *intc, struct device_node *parent) 336 { 337 struct irq_domain *domain; 338 int nr_irqs; 339 int i, virq; 340 struct mcip_bcr mp; 341 struct mcip_idu_bcr idu_bcr; 342 343 READ_BCR(ARC_REG_MCIP_BCR, mp); 344 345 if (!mp.idu) 346 panic("IDU not detected, but DeviceTree using it"); 347 348 READ_BCR(ARC_REG_MCIP_IDU_BCR, idu_bcr); 349 nr_irqs = mcip_idu_bcr_to_nr_irqs(idu_bcr); 350 351 pr_info("MCIP: IDU supports %u common irqs\n", nr_irqs); 352 353 domain = irq_domain_add_linear(intc, nr_irqs, &idu_irq_ops, NULL); 354 355 /* Parent interrupts (core-intc) are already mapped */ 356 357 for (i = 0; i < nr_irqs; i++) { 358 /* Mask all common interrupts by default */ 359 idu_irq_mask_raw(i); 360 361 /* 362 * Return parent uplink IRQs (towards core intc) 24,25,..... 363 * this step has been done before already 364 * however we need it to get the parent virq and set IDU handler 365 * as first level isr 366 */ 367 virq = irq_create_mapping(NULL, i + FIRST_EXT_IRQ); 368 BUG_ON(!virq); 369 irq_set_chained_handler_and_data(virq, idu_cascade_isr, domain); 370 } 371 372 __mcip_cmd(CMD_IDU_ENABLE, 0); 373 374 return 0; 375 } 376 IRQCHIP_DECLARE(arcv2_idu_intc, "snps,archs-idu-intc", idu_of_init); 377