1 /* 2 * This program is free software; you can redistribute it and/or modify it 3 * under the terms of the GNU General Public License version 2 as published 4 * by the Free Software Foundation. 5 * 6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org> 7 * Copyright (C) 2010 Thomas Langer <thomas.langer@lantiq.com> 8 */ 9 10 #include <linux/interrupt.h> 11 #include <linux/ioport.h> 12 13 #include <asm/bootinfo.h> 14 #include <asm/irq_cpu.h> 15 16 #include <lantiq_soc.h> 17 #include <irq.h> 18 19 /* register definitions */ 20 #define LTQ_ICU_IM0_ISR 0x0000 21 #define LTQ_ICU_IM0_IER 0x0008 22 #define LTQ_ICU_IM0_IOSR 0x0010 23 #define LTQ_ICU_IM0_IRSR 0x0018 24 #define LTQ_ICU_IM0_IMR 0x0020 25 #define LTQ_ICU_IM1_ISR 0x0028 26 #define LTQ_ICU_OFFSET (LTQ_ICU_IM1_ISR - LTQ_ICU_IM0_ISR) 27 28 #define LTQ_EIU_EXIN_C 0x0000 29 #define LTQ_EIU_EXIN_INIC 0x0004 30 #define LTQ_EIU_EXIN_INEN 0x000C 31 32 /* irq numbers used by the external interrupt unit (EIU) */ 33 #define LTQ_EIU_IR0 (INT_NUM_IM4_IRL0 + 30) 34 #define LTQ_EIU_IR1 (INT_NUM_IM3_IRL0 + 31) 35 #define LTQ_EIU_IR2 (INT_NUM_IM1_IRL0 + 26) 36 #define LTQ_EIU_IR3 INT_NUM_IM1_IRL0 37 #define LTQ_EIU_IR4 (INT_NUM_IM1_IRL0 + 1) 38 #define LTQ_EIU_IR5 (INT_NUM_IM1_IRL0 + 2) 39 #define LTQ_EIU_IR6 (INT_NUM_IM2_IRL0 + 30) 40 41 #define MAX_EIU 6 42 43 /* irqs generated by device attached to the EBU need to be acked in 44 * a special manner 45 */ 46 #define LTQ_ICU_EBU_IRQ 22 47 48 #define ltq_icu_w32(x, y) ltq_w32((x), ltq_icu_membase + (y)) 49 #define ltq_icu_r32(x) ltq_r32(ltq_icu_membase + (x)) 50 51 #define ltq_eiu_w32(x, y) ltq_w32((x), ltq_eiu_membase + (y)) 52 #define ltq_eiu_r32(x) ltq_r32(ltq_eiu_membase + (x)) 53 54 static unsigned short ltq_eiu_irq[MAX_EIU] = { 55 LTQ_EIU_IR0, 56 LTQ_EIU_IR1, 57 LTQ_EIU_IR2, 58 LTQ_EIU_IR3, 59 LTQ_EIU_IR4, 60 LTQ_EIU_IR5, 61 }; 62 63 static struct resource ltq_icu_resource = { 64 .name = "icu", 65 .start = LTQ_ICU_BASE_ADDR, 66 .end = LTQ_ICU_BASE_ADDR + LTQ_ICU_SIZE - 1, 67 .flags = IORESOURCE_MEM, 68 }; 69 70 static struct resource ltq_eiu_resource = { 71 .name = "eiu", 72 .start = LTQ_EIU_BASE_ADDR, 73 .end = LTQ_EIU_BASE_ADDR + LTQ_ICU_SIZE - 1, 74 .flags = IORESOURCE_MEM, 75 }; 76 77 static void __iomem *ltq_icu_membase; 78 static void __iomem *ltq_eiu_membase; 79 80 void ltq_disable_irq(struct irq_data *d) 81 { 82 u32 ier = LTQ_ICU_IM0_IER; 83 int irq_nr = d->irq - INT_NUM_IRQ0; 84 85 ier += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET); 86 irq_nr %= INT_NUM_IM_OFFSET; 87 ltq_icu_w32(ltq_icu_r32(ier) & ~(1 << irq_nr), ier); 88 } 89 90 void ltq_mask_and_ack_irq(struct irq_data *d) 91 { 92 u32 ier = LTQ_ICU_IM0_IER; 93 u32 isr = LTQ_ICU_IM0_ISR; 94 int irq_nr = d->irq - INT_NUM_IRQ0; 95 96 ier += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET); 97 isr += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET); 98 irq_nr %= INT_NUM_IM_OFFSET; 99 ltq_icu_w32(ltq_icu_r32(ier) & ~(1 << irq_nr), ier); 100 ltq_icu_w32((1 << irq_nr), isr); 101 } 102 103 static void ltq_ack_irq(struct irq_data *d) 104 { 105 u32 isr = LTQ_ICU_IM0_ISR; 106 int irq_nr = d->irq - INT_NUM_IRQ0; 107 108 isr += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET); 109 irq_nr %= INT_NUM_IM_OFFSET; 110 ltq_icu_w32((1 << irq_nr), isr); 111 } 112 113 void ltq_enable_irq(struct irq_data *d) 114 { 115 u32 ier = LTQ_ICU_IM0_IER; 116 int irq_nr = d->irq - INT_NUM_IRQ0; 117 118 ier += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET); 119 irq_nr %= INT_NUM_IM_OFFSET; 120 ltq_icu_w32(ltq_icu_r32(ier) | (1 << irq_nr), ier); 121 } 122 123 static unsigned int ltq_startup_eiu_irq(struct irq_data *d) 124 { 125 int i; 126 127 ltq_enable_irq(d); 128 for (i = 0; i < MAX_EIU; i++) { 129 if (d->irq == ltq_eiu_irq[i]) { 130 /* low level - we should really handle set_type */ 131 ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C) | 132 (0x6 << (i * 4)), LTQ_EIU_EXIN_C); 133 /* clear all pending */ 134 ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INIC) & ~(1 << i), 135 LTQ_EIU_EXIN_INIC); 136 /* enable */ 137 ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) | (1 << i), 138 LTQ_EIU_EXIN_INEN); 139 break; 140 } 141 } 142 143 return 0; 144 } 145 146 static void ltq_shutdown_eiu_irq(struct irq_data *d) 147 { 148 int i; 149 150 ltq_disable_irq(d); 151 for (i = 0; i < MAX_EIU; i++) { 152 if (d->irq == ltq_eiu_irq[i]) { 153 /* disable */ 154 ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~(1 << i), 155 LTQ_EIU_EXIN_INEN); 156 break; 157 } 158 } 159 } 160 161 static struct irq_chip ltq_irq_type = { 162 "icu", 163 .irq_enable = ltq_enable_irq, 164 .irq_disable = ltq_disable_irq, 165 .irq_unmask = ltq_enable_irq, 166 .irq_ack = ltq_ack_irq, 167 .irq_mask = ltq_disable_irq, 168 .irq_mask_ack = ltq_mask_and_ack_irq, 169 }; 170 171 static struct irq_chip ltq_eiu_type = { 172 "eiu", 173 .irq_startup = ltq_startup_eiu_irq, 174 .irq_shutdown = ltq_shutdown_eiu_irq, 175 .irq_enable = ltq_enable_irq, 176 .irq_disable = ltq_disable_irq, 177 .irq_unmask = ltq_enable_irq, 178 .irq_ack = ltq_ack_irq, 179 .irq_mask = ltq_disable_irq, 180 .irq_mask_ack = ltq_mask_and_ack_irq, 181 }; 182 183 static void ltq_hw_irqdispatch(int module) 184 { 185 u32 irq; 186 187 irq = ltq_icu_r32(LTQ_ICU_IM0_IOSR + (module * LTQ_ICU_OFFSET)); 188 if (irq == 0) 189 return; 190 191 /* silicon bug causes only the msb set to 1 to be valid. all 192 * other bits might be bogus 193 */ 194 irq = __fls(irq); 195 do_IRQ((int)irq + INT_NUM_IM0_IRL0 + (INT_NUM_IM_OFFSET * module)); 196 197 /* if this is a EBU irq, we need to ack it or get a deadlock */ 198 if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0)) 199 ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_ISTAT) | 0x10, 200 LTQ_EBU_PCC_ISTAT); 201 } 202 203 #define DEFINE_HWx_IRQDISPATCH(x) \ 204 static void ltq_hw ## x ## _irqdispatch(void) \ 205 { \ 206 ltq_hw_irqdispatch(x); \ 207 } 208 DEFINE_HWx_IRQDISPATCH(0) 209 DEFINE_HWx_IRQDISPATCH(1) 210 DEFINE_HWx_IRQDISPATCH(2) 211 DEFINE_HWx_IRQDISPATCH(3) 212 DEFINE_HWx_IRQDISPATCH(4) 213 214 static void ltq_hw5_irqdispatch(void) 215 { 216 do_IRQ(MIPS_CPU_TIMER_IRQ); 217 } 218 219 asmlinkage void plat_irq_dispatch(void) 220 { 221 unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM; 222 unsigned int i; 223 224 if (pending & CAUSEF_IP7) { 225 do_IRQ(MIPS_CPU_TIMER_IRQ); 226 goto out; 227 } else { 228 for (i = 0; i < 5; i++) { 229 if (pending & (CAUSEF_IP2 << i)) { 230 ltq_hw_irqdispatch(i); 231 goto out; 232 } 233 } 234 } 235 pr_alert("Spurious IRQ: CAUSE=0x%08x\n", read_c0_status()); 236 237 out: 238 return; 239 } 240 241 static struct irqaction cascade = { 242 .handler = no_action, 243 .name = "cascade", 244 }; 245 246 void __init arch_init_irq(void) 247 { 248 int i; 249 250 if (insert_resource(&iomem_resource, <q_icu_resource) < 0) 251 panic("Failed to insert icu memory"); 252 253 if (request_mem_region(ltq_icu_resource.start, 254 resource_size(<q_icu_resource), "icu") < 0) 255 panic("Failed to request icu memory"); 256 257 ltq_icu_membase = ioremap_nocache(ltq_icu_resource.start, 258 resource_size(<q_icu_resource)); 259 if (!ltq_icu_membase) 260 panic("Failed to remap icu memory"); 261 262 if (insert_resource(&iomem_resource, <q_eiu_resource) < 0) 263 panic("Failed to insert eiu memory"); 264 265 if (request_mem_region(ltq_eiu_resource.start, 266 resource_size(<q_eiu_resource), "eiu") < 0) 267 panic("Failed to request eiu memory"); 268 269 ltq_eiu_membase = ioremap_nocache(ltq_eiu_resource.start, 270 resource_size(<q_eiu_resource)); 271 if (!ltq_eiu_membase) 272 panic("Failed to remap eiu memory"); 273 274 /* make sure all irqs are turned off by default */ 275 for (i = 0; i < 5; i++) 276 ltq_icu_w32(0, LTQ_ICU_IM0_IER + (i * LTQ_ICU_OFFSET)); 277 278 /* clear all possibly pending interrupts */ 279 ltq_icu_w32(~0, LTQ_ICU_IM0_ISR + (i * LTQ_ICU_OFFSET)); 280 281 mips_cpu_irq_init(); 282 283 for (i = 2; i <= 6; i++) 284 setup_irq(i, &cascade); 285 286 if (cpu_has_vint) { 287 pr_info("Setting up vectored interrupts\n"); 288 set_vi_handler(2, ltq_hw0_irqdispatch); 289 set_vi_handler(3, ltq_hw1_irqdispatch); 290 set_vi_handler(4, ltq_hw2_irqdispatch); 291 set_vi_handler(5, ltq_hw3_irqdispatch); 292 set_vi_handler(6, ltq_hw4_irqdispatch); 293 set_vi_handler(7, ltq_hw5_irqdispatch); 294 } 295 296 for (i = INT_NUM_IRQ0; 297 i <= (INT_NUM_IRQ0 + (5 * INT_NUM_IM_OFFSET)); i++) 298 if ((i == LTQ_EIU_IR0) || (i == LTQ_EIU_IR1) || 299 (i == LTQ_EIU_IR2)) 300 irq_set_chip_and_handler(i, <q_eiu_type, 301 handle_level_irq); 302 /* EIU3-5 only exist on ar9 and vr9 */ 303 else if (((i == LTQ_EIU_IR3) || (i == LTQ_EIU_IR4) || 304 (i == LTQ_EIU_IR5)) && (ltq_is_ar9() || ltq_is_vr9())) 305 irq_set_chip_and_handler(i, <q_eiu_type, 306 handle_level_irq); 307 else 308 irq_set_chip_and_handler(i, <q_irq_type, 309 handle_level_irq); 310 311 #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC) 312 set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | 313 IE_IRQ3 | IE_IRQ4 | IE_IRQ5); 314 #else 315 set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ0 | IE_IRQ1 | 316 IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5); 317 #endif 318 } 319 320 unsigned int __cpuinit get_c0_compare_int(void) 321 { 322 return CP0_LEGACY_COMPARE_IRQ; 323 } 324