1 /* 2 * OMAP WakeupGen Source file 3 * 4 * OMAP WakeupGen is the interrupt controller extension used along 5 * with ARM GIC to wake the CPU out from low power states on 6 * external interrupts. It is responsible for generating wakeup 7 * event from the incoming interrupts and enable bits. It is 8 * implemented in MPU always ON power domain. During normal operation, 9 * WakeupGen delivers external interrupts directly to the GIC. 10 * 11 * Copyright (C) 2011 Texas Instruments, Inc. 12 * Santosh Shilimkar <santosh.shilimkar@ti.com> 13 * 14 * This program is free software; you can redistribute it and/or modify 15 * it under the terms of the GNU General Public License version 2 as 16 * published by the Free Software Foundation. 17 */ 18 19 #include <linux/kernel.h> 20 #include <linux/init.h> 21 #include <linux/io.h> 22 #include <linux/irq.h> 23 #include <linux/platform_device.h> 24 #include <linux/cpu.h> 25 #include <linux/notifier.h> 26 #include <linux/cpu_pm.h> 27 #include <linux/irqchip/arm-gic.h> 28 29 #include "omap-wakeupgen.h" 30 #include "omap-secure.h" 31 32 #include "soc.h" 33 #include "omap4-sar-layout.h" 34 #include "common.h" 35 36 #define MAX_NR_REG_BANKS 5 37 #define MAX_IRQS 160 38 #define WKG_MASK_ALL 0x00000000 39 #define WKG_UNMASK_ALL 0xffffffff 40 #define CPU_ENA_OFFSET 0x400 41 #define CPU0_ID 0x0 42 #define CPU1_ID 0x1 43 #define OMAP4_NR_BANKS 4 44 #define OMAP4_NR_IRQS 128 45 46 static void __iomem *wakeupgen_base; 47 static void __iomem *sar_base; 48 static DEFINE_RAW_SPINLOCK(wakeupgen_lock); 49 static unsigned int irq_target_cpu[MAX_IRQS]; 50 static unsigned int irq_banks = MAX_NR_REG_BANKS; 51 static unsigned int max_irqs = MAX_IRQS; 52 static unsigned int omap_secure_apis; 53 54 /* 55 * Static helper functions. 56 */ 57 static inline u32 wakeupgen_readl(u8 idx, u32 cpu) 58 { 59 return __raw_readl(wakeupgen_base + OMAP_WKG_ENB_A_0 + 60 (cpu * CPU_ENA_OFFSET) + (idx * 4)); 61 } 62 63 static inline void wakeupgen_writel(u32 val, u8 idx, u32 cpu) 64 { 65 __raw_writel(val, wakeupgen_base + OMAP_WKG_ENB_A_0 + 66 (cpu * CPU_ENA_OFFSET) + (idx * 4)); 67 } 68 69 static inline void sar_writel(u32 val, u32 offset, u8 idx) 70 { 71 __raw_writel(val, sar_base + offset + (idx * 4)); 72 } 73 74 static inline int _wakeupgen_get_irq_info(u32 irq, u32 *bit_posn, u8 *reg_index) 75 { 76 unsigned int spi_irq; 77 78 /* 79 * PPIs and SGIs are not supported. 80 */ 81 if (irq < OMAP44XX_IRQ_GIC_START) 82 return -EINVAL; 83 84 /* 85 * Subtract the GIC offset. 86 */ 87 spi_irq = irq - OMAP44XX_IRQ_GIC_START; 88 if (spi_irq > MAX_IRQS) { 89 pr_err("omap wakeupGen: Invalid IRQ%d\n", irq); 90 return -EINVAL; 91 } 92 93 /* 94 * Each WakeupGen register controls 32 interrupt. 95 * i.e. 1 bit per SPI IRQ 96 */ 97 *reg_index = spi_irq >> 5; 98 *bit_posn = spi_irq %= 32; 99 100 return 0; 101 } 102 103 static void _wakeupgen_clear(unsigned int irq, unsigned int cpu) 104 { 105 u32 val, bit_number; 106 u8 i; 107 108 if (_wakeupgen_get_irq_info(irq, &bit_number, &i)) 109 return; 110 111 val = wakeupgen_readl(i, cpu); 112 val &= ~BIT(bit_number); 113 wakeupgen_writel(val, i, cpu); 114 } 115 116 static void _wakeupgen_set(unsigned int irq, unsigned int cpu) 117 { 118 u32 val, bit_number; 119 u8 i; 120 121 if (_wakeupgen_get_irq_info(irq, &bit_number, &i)) 122 return; 123 124 val = wakeupgen_readl(i, cpu); 125 val |= BIT(bit_number); 126 wakeupgen_writel(val, i, cpu); 127 } 128 129 /* 130 * Architecture specific Mask extension 131 */ 132 static void wakeupgen_mask(struct irq_data *d) 133 { 134 unsigned long flags; 135 136 raw_spin_lock_irqsave(&wakeupgen_lock, flags); 137 _wakeupgen_clear(d->irq, irq_target_cpu[d->irq]); 138 raw_spin_unlock_irqrestore(&wakeupgen_lock, flags); 139 } 140 141 /* 142 * Architecture specific Unmask extension 143 */ 144 static void wakeupgen_unmask(struct irq_data *d) 145 { 146 unsigned long flags; 147 148 raw_spin_lock_irqsave(&wakeupgen_lock, flags); 149 _wakeupgen_set(d->irq, irq_target_cpu[d->irq]); 150 raw_spin_unlock_irqrestore(&wakeupgen_lock, flags); 151 } 152 153 #ifdef CONFIG_HOTPLUG_CPU 154 static DEFINE_PER_CPU(u32 [MAX_NR_REG_BANKS], irqmasks); 155 156 static void _wakeupgen_save_masks(unsigned int cpu) 157 { 158 u8 i; 159 160 for (i = 0; i < irq_banks; i++) 161 per_cpu(irqmasks, cpu)[i] = wakeupgen_readl(i, cpu); 162 } 163 164 static void _wakeupgen_restore_masks(unsigned int cpu) 165 { 166 u8 i; 167 168 for (i = 0; i < irq_banks; i++) 169 wakeupgen_writel(per_cpu(irqmasks, cpu)[i], i, cpu); 170 } 171 172 static void _wakeupgen_set_all(unsigned int cpu, unsigned int reg) 173 { 174 u8 i; 175 176 for (i = 0; i < irq_banks; i++) 177 wakeupgen_writel(reg, i, cpu); 178 } 179 180 /* 181 * Mask or unmask all interrupts on given CPU. 182 * 0 = Mask all interrupts on the 'cpu' 183 * 1 = Unmask all interrupts on the 'cpu' 184 * Ensure that the initial mask is maintained. This is faster than 185 * iterating through GIC registers to arrive at the correct masks. 186 */ 187 static void wakeupgen_irqmask_all(unsigned int cpu, unsigned int set) 188 { 189 unsigned long flags; 190 191 raw_spin_lock_irqsave(&wakeupgen_lock, flags); 192 if (set) { 193 _wakeupgen_save_masks(cpu); 194 _wakeupgen_set_all(cpu, WKG_MASK_ALL); 195 } else { 196 _wakeupgen_set_all(cpu, WKG_UNMASK_ALL); 197 _wakeupgen_restore_masks(cpu); 198 } 199 raw_spin_unlock_irqrestore(&wakeupgen_lock, flags); 200 } 201 #endif 202 203 #ifdef CONFIG_CPU_PM 204 static inline void omap4_irq_save_context(void) 205 { 206 u32 i, val; 207 208 if (omap_rev() == OMAP4430_REV_ES1_0) 209 return; 210 211 for (i = 0; i < irq_banks; i++) { 212 /* Save the CPUx interrupt mask for IRQ 0 to 127 */ 213 val = wakeupgen_readl(i, 0); 214 sar_writel(val, WAKEUPGENENB_OFFSET_CPU0, i); 215 val = wakeupgen_readl(i, 1); 216 sar_writel(val, WAKEUPGENENB_OFFSET_CPU1, i); 217 218 /* 219 * Disable the secure interrupts for CPUx. The restore 220 * code blindly restores secure and non-secure interrupt 221 * masks from SAR RAM. Secure interrupts are not suppose 222 * to be enabled from HLOS. So overwrite the SAR location 223 * so that the secure interrupt remains disabled. 224 */ 225 sar_writel(0x0, WAKEUPGENENB_SECURE_OFFSET_CPU0, i); 226 sar_writel(0x0, WAKEUPGENENB_SECURE_OFFSET_CPU1, i); 227 } 228 229 /* Save AuxBoot* registers */ 230 val = __raw_readl(wakeupgen_base + OMAP_AUX_CORE_BOOT_0); 231 __raw_writel(val, sar_base + AUXCOREBOOT0_OFFSET); 232 val = __raw_readl(wakeupgen_base + OMAP_AUX_CORE_BOOT_1); 233 __raw_writel(val, sar_base + AUXCOREBOOT1_OFFSET); 234 235 /* Save SyncReq generation logic */ 236 val = __raw_readl(wakeupgen_base + OMAP_PTMSYNCREQ_MASK); 237 __raw_writel(val, sar_base + PTMSYNCREQ_MASK_OFFSET); 238 val = __raw_readl(wakeupgen_base + OMAP_PTMSYNCREQ_EN); 239 __raw_writel(val, sar_base + PTMSYNCREQ_EN_OFFSET); 240 241 /* Set the Backup Bit Mask status */ 242 val = __raw_readl(sar_base + SAR_BACKUP_STATUS_OFFSET); 243 val |= SAR_BACKUP_STATUS_WAKEUPGEN; 244 __raw_writel(val, sar_base + SAR_BACKUP_STATUS_OFFSET); 245 246 } 247 248 static inline void omap5_irq_save_context(void) 249 { 250 u32 i, val; 251 252 for (i = 0; i < irq_banks; i++) { 253 /* Save the CPUx interrupt mask for IRQ 0 to 159 */ 254 val = wakeupgen_readl(i, 0); 255 sar_writel(val, OMAP5_WAKEUPGENENB_OFFSET_CPU0, i); 256 val = wakeupgen_readl(i, 1); 257 sar_writel(val, OMAP5_WAKEUPGENENB_OFFSET_CPU1, i); 258 sar_writel(0x0, OMAP5_WAKEUPGENENB_SECURE_OFFSET_CPU0, i); 259 sar_writel(0x0, OMAP5_WAKEUPGENENB_SECURE_OFFSET_CPU1, i); 260 } 261 262 /* Save AuxBoot* registers */ 263 val = __raw_readl(wakeupgen_base + OMAP_AUX_CORE_BOOT_0); 264 __raw_writel(val, sar_base + OMAP5_AUXCOREBOOT0_OFFSET); 265 val = __raw_readl(wakeupgen_base + OMAP_AUX_CORE_BOOT_0); 266 __raw_writel(val, sar_base + OMAP5_AUXCOREBOOT1_OFFSET); 267 268 /* Set the Backup Bit Mask status */ 269 val = __raw_readl(sar_base + OMAP5_SAR_BACKUP_STATUS_OFFSET); 270 val |= SAR_BACKUP_STATUS_WAKEUPGEN; 271 __raw_writel(val, sar_base + OMAP5_SAR_BACKUP_STATUS_OFFSET); 272 273 } 274 275 /* 276 * Save WakeupGen interrupt context in SAR BANK3. Restore is done by 277 * ROM code. WakeupGen IP is integrated along with GIC to manage the 278 * interrupt wakeups from CPU low power states. It manages 279 * masking/unmasking of Shared peripheral interrupts(SPI). So the 280 * interrupt enable/disable control should be in sync and consistent 281 * at WakeupGen and GIC so that interrupts are not lost. 282 */ 283 static void irq_save_context(void) 284 { 285 if (!sar_base) 286 sar_base = omap4_get_sar_ram_base(); 287 288 if (soc_is_omap54xx()) 289 omap5_irq_save_context(); 290 else 291 omap4_irq_save_context(); 292 } 293 294 /* 295 * Clear WakeupGen SAR backup status. 296 */ 297 static void irq_sar_clear(void) 298 { 299 u32 val; 300 u32 offset = SAR_BACKUP_STATUS_OFFSET; 301 302 if (soc_is_omap54xx()) 303 offset = OMAP5_SAR_BACKUP_STATUS_OFFSET; 304 305 val = __raw_readl(sar_base + offset); 306 val &= ~SAR_BACKUP_STATUS_WAKEUPGEN; 307 __raw_writel(val, sar_base + offset); 308 } 309 310 /* 311 * Save GIC and Wakeupgen interrupt context using secure API 312 * for HS/EMU devices. 313 */ 314 static void irq_save_secure_context(void) 315 { 316 u32 ret; 317 ret = omap_secure_dispatcher(OMAP4_HAL_SAVEGIC_INDEX, 318 FLAG_START_CRITICAL, 319 0, 0, 0, 0, 0); 320 if (ret != API_HAL_RET_VALUE_OK) 321 pr_err("GIC and Wakeupgen context save failed\n"); 322 } 323 #endif 324 325 #ifdef CONFIG_HOTPLUG_CPU 326 static int __cpuinit irq_cpu_hotplug_notify(struct notifier_block *self, 327 unsigned long action, void *hcpu) 328 { 329 unsigned int cpu = (unsigned int)hcpu; 330 331 switch (action) { 332 case CPU_ONLINE: 333 wakeupgen_irqmask_all(cpu, 0); 334 break; 335 case CPU_DEAD: 336 wakeupgen_irqmask_all(cpu, 1); 337 break; 338 } 339 return NOTIFY_OK; 340 } 341 342 static struct notifier_block __refdata irq_hotplug_notifier = { 343 .notifier_call = irq_cpu_hotplug_notify, 344 }; 345 346 static void __init irq_hotplug_init(void) 347 { 348 register_hotcpu_notifier(&irq_hotplug_notifier); 349 } 350 #else 351 static void __init irq_hotplug_init(void) 352 {} 353 #endif 354 355 #ifdef CONFIG_CPU_PM 356 static int irq_notifier(struct notifier_block *self, unsigned long cmd, void *v) 357 { 358 switch (cmd) { 359 case CPU_CLUSTER_PM_ENTER: 360 if (omap_type() == OMAP2_DEVICE_TYPE_GP) 361 irq_save_context(); 362 else 363 irq_save_secure_context(); 364 break; 365 case CPU_CLUSTER_PM_EXIT: 366 if (omap_type() == OMAP2_DEVICE_TYPE_GP) 367 irq_sar_clear(); 368 break; 369 } 370 return NOTIFY_OK; 371 } 372 373 static struct notifier_block irq_notifier_block = { 374 .notifier_call = irq_notifier, 375 }; 376 377 static void __init irq_pm_init(void) 378 { 379 /* FIXME: Remove this when MPU OSWR support is added */ 380 if (!soc_is_omap54xx()) 381 cpu_pm_register_notifier(&irq_notifier_block); 382 } 383 #else 384 static void __init irq_pm_init(void) 385 {} 386 #endif 387 388 void __iomem *omap_get_wakeupgen_base(void) 389 { 390 return wakeupgen_base; 391 } 392 393 int omap_secure_apis_support(void) 394 { 395 return omap_secure_apis; 396 } 397 398 /* 399 * Initialise the wakeupgen module. 400 */ 401 int __init omap_wakeupgen_init(void) 402 { 403 int i; 404 unsigned int boot_cpu = smp_processor_id(); 405 406 /* Not supported on OMAP4 ES1.0 silicon */ 407 if (omap_rev() == OMAP4430_REV_ES1_0) { 408 WARN(1, "WakeupGen: Not supported on OMAP4430 ES1.0\n"); 409 return -EPERM; 410 } 411 412 /* Static mapping, never released */ 413 wakeupgen_base = ioremap(OMAP_WKUPGEN_BASE, SZ_4K); 414 if (WARN_ON(!wakeupgen_base)) 415 return -ENOMEM; 416 417 if (cpu_is_omap44xx()) { 418 irq_banks = OMAP4_NR_BANKS; 419 max_irqs = OMAP4_NR_IRQS; 420 omap_secure_apis = 1; 421 } 422 423 /* Clear all IRQ bitmasks at wakeupGen level */ 424 for (i = 0; i < irq_banks; i++) { 425 wakeupgen_writel(0, i, CPU0_ID); 426 wakeupgen_writel(0, i, CPU1_ID); 427 } 428 429 /* 430 * Override GIC architecture specific functions to add 431 * OMAP WakeupGen interrupt controller along with GIC 432 */ 433 gic_arch_extn.irq_mask = wakeupgen_mask; 434 gic_arch_extn.irq_unmask = wakeupgen_unmask; 435 gic_arch_extn.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE; 436 437 /* 438 * FIXME: Add support to set_smp_affinity() once the core 439 * GIC code has necessary hooks in place. 440 */ 441 442 /* Associate all the IRQs to boot CPU like GIC init does. */ 443 for (i = 0; i < max_irqs; i++) 444 irq_target_cpu[i] = boot_cpu; 445 446 irq_hotplug_init(); 447 irq_pm_init(); 448 449 return 0; 450 } 451