1 /* 2 * OMAP WakeupGen Source file 3 * 4 * OMAP WakeupGen is the interrupt controller extension used along 5 * with ARM GIC to wake the CPU out from low power states on 6 * external interrupts. It is responsible for generating wakeup 7 * event from the incoming interrupts and enable bits. It is 8 * implemented in MPU always ON power domain. During normal operation, 9 * WakeupGen delivers external interrupts directly to the GIC. 10 * 11 * Copyright (C) 2011 Texas Instruments, Inc. 12 * Santosh Shilimkar <santosh.shilimkar@ti.com> 13 * 14 * This program is free software; you can redistribute it and/or modify 15 * it under the terms of the GNU General Public License version 2 as 16 * published by the Free Software Foundation. 17 */ 18 19 #include <linux/kernel.h> 20 #include <linux/init.h> 21 #include <linux/io.h> 22 #include <linux/irq.h> 23 #include <linux/irqchip.h> 24 #include <linux/irqdomain.h> 25 #include <linux/of_address.h> 26 #include <linux/platform_device.h> 27 #include <linux/cpu.h> 28 #include <linux/notifier.h> 29 #include <linux/cpu_pm.h> 30 31 #include "omap-wakeupgen.h" 32 #include "omap-secure.h" 33 34 #include "soc.h" 35 #include "omap4-sar-layout.h" 36 #include "common.h" 37 #include "pm.h" 38 39 #define AM43XX_NR_REG_BANKS 7 40 #define AM43XX_IRQS 224 41 #define MAX_NR_REG_BANKS AM43XX_NR_REG_BANKS 42 #define MAX_IRQS AM43XX_IRQS 43 #define DEFAULT_NR_REG_BANKS 5 44 #define DEFAULT_IRQS 160 45 #define WKG_MASK_ALL 0x00000000 46 #define WKG_UNMASK_ALL 0xffffffff 47 #define CPU_ENA_OFFSET 0x400 48 #define CPU0_ID 0x0 49 #define CPU1_ID 0x1 50 #define OMAP4_NR_BANKS 4 51 #define OMAP4_NR_IRQS 128 52 53 static void __iomem *wakeupgen_base; 54 static void __iomem *sar_base; 55 static DEFINE_RAW_SPINLOCK(wakeupgen_lock); 56 static unsigned int irq_target_cpu[MAX_IRQS]; 57 static unsigned int irq_banks = DEFAULT_NR_REG_BANKS; 58 static unsigned int max_irqs = DEFAULT_IRQS; 59 static unsigned int omap_secure_apis; 60 61 /* 62 * Static helper functions. 63 */ 64 static inline u32 wakeupgen_readl(u8 idx, u32 cpu) 65 { 66 return readl_relaxed(wakeupgen_base + OMAP_WKG_ENB_A_0 + 67 (cpu * CPU_ENA_OFFSET) + (idx * 4)); 68 } 69 70 static inline void wakeupgen_writel(u32 val, u8 idx, u32 cpu) 71 { 72 writel_relaxed(val, wakeupgen_base + OMAP_WKG_ENB_A_0 + 73 (cpu * CPU_ENA_OFFSET) + (idx * 4)); 74 } 75 76 static inline void sar_writel(u32 val, u32 offset, u8 idx) 77 { 78 writel_relaxed(val, sar_base + offset + (idx * 4)); 79 } 80 81 static inline int _wakeupgen_get_irq_info(u32 irq, u32 *bit_posn, u8 *reg_index) 82 { 83 /* 84 * Each WakeupGen register controls 32 interrupt. 85 * i.e. 1 bit per SPI IRQ 86 */ 87 *reg_index = irq >> 5; 88 *bit_posn = irq %= 32; 89 90 return 0; 91 } 92 93 static void _wakeupgen_clear(unsigned int irq, unsigned int cpu) 94 { 95 u32 val, bit_number; 96 u8 i; 97 98 if (_wakeupgen_get_irq_info(irq, &bit_number, &i)) 99 return; 100 101 val = wakeupgen_readl(i, cpu); 102 val &= ~BIT(bit_number); 103 wakeupgen_writel(val, i, cpu); 104 } 105 106 static void _wakeupgen_set(unsigned int irq, unsigned int cpu) 107 { 108 u32 val, bit_number; 109 u8 i; 110 111 if (_wakeupgen_get_irq_info(irq, &bit_number, &i)) 112 return; 113 114 val = wakeupgen_readl(i, cpu); 115 val |= BIT(bit_number); 116 wakeupgen_writel(val, i, cpu); 117 } 118 119 /* 120 * Architecture specific Mask extension 121 */ 122 static void wakeupgen_mask(struct irq_data *d) 123 { 124 unsigned long flags; 125 126 raw_spin_lock_irqsave(&wakeupgen_lock, flags); 127 _wakeupgen_clear(d->hwirq, irq_target_cpu[d->hwirq]); 128 raw_spin_unlock_irqrestore(&wakeupgen_lock, flags); 129 irq_chip_mask_parent(d); 130 } 131 132 /* 133 * Architecture specific Unmask extension 134 */ 135 static void wakeupgen_unmask(struct irq_data *d) 136 { 137 unsigned long flags; 138 139 raw_spin_lock_irqsave(&wakeupgen_lock, flags); 140 _wakeupgen_set(d->hwirq, irq_target_cpu[d->hwirq]); 141 raw_spin_unlock_irqrestore(&wakeupgen_lock, flags); 142 irq_chip_unmask_parent(d); 143 } 144 145 #ifdef CONFIG_HOTPLUG_CPU 146 static DEFINE_PER_CPU(u32 [MAX_NR_REG_BANKS], irqmasks); 147 148 static void _wakeupgen_save_masks(unsigned int cpu) 149 { 150 u8 i; 151 152 for (i = 0; i < irq_banks; i++) 153 per_cpu(irqmasks, cpu)[i] = wakeupgen_readl(i, cpu); 154 } 155 156 static void _wakeupgen_restore_masks(unsigned int cpu) 157 { 158 u8 i; 159 160 for (i = 0; i < irq_banks; i++) 161 wakeupgen_writel(per_cpu(irqmasks, cpu)[i], i, cpu); 162 } 163 164 static void _wakeupgen_set_all(unsigned int cpu, unsigned int reg) 165 { 166 u8 i; 167 168 for (i = 0; i < irq_banks; i++) 169 wakeupgen_writel(reg, i, cpu); 170 } 171 172 /* 173 * Mask or unmask all interrupts on given CPU. 174 * 0 = Mask all interrupts on the 'cpu' 175 * 1 = Unmask all interrupts on the 'cpu' 176 * Ensure that the initial mask is maintained. This is faster than 177 * iterating through GIC registers to arrive at the correct masks. 178 */ 179 static void wakeupgen_irqmask_all(unsigned int cpu, unsigned int set) 180 { 181 unsigned long flags; 182 183 raw_spin_lock_irqsave(&wakeupgen_lock, flags); 184 if (set) { 185 _wakeupgen_save_masks(cpu); 186 _wakeupgen_set_all(cpu, WKG_MASK_ALL); 187 } else { 188 _wakeupgen_set_all(cpu, WKG_UNMASK_ALL); 189 _wakeupgen_restore_masks(cpu); 190 } 191 raw_spin_unlock_irqrestore(&wakeupgen_lock, flags); 192 } 193 #endif 194 195 #ifdef CONFIG_CPU_PM 196 static inline void omap4_irq_save_context(void) 197 { 198 u32 i, val; 199 200 if (omap_rev() == OMAP4430_REV_ES1_0) 201 return; 202 203 for (i = 0; i < irq_banks; i++) { 204 /* Save the CPUx interrupt mask for IRQ 0 to 127 */ 205 val = wakeupgen_readl(i, 0); 206 sar_writel(val, WAKEUPGENENB_OFFSET_CPU0, i); 207 val = wakeupgen_readl(i, 1); 208 sar_writel(val, WAKEUPGENENB_OFFSET_CPU1, i); 209 210 /* 211 * Disable the secure interrupts for CPUx. The restore 212 * code blindly restores secure and non-secure interrupt 213 * masks from SAR RAM. Secure interrupts are not suppose 214 * to be enabled from HLOS. So overwrite the SAR location 215 * so that the secure interrupt remains disabled. 216 */ 217 sar_writel(0x0, WAKEUPGENENB_SECURE_OFFSET_CPU0, i); 218 sar_writel(0x0, WAKEUPGENENB_SECURE_OFFSET_CPU1, i); 219 } 220 221 /* Save AuxBoot* registers */ 222 val = readl_relaxed(wakeupgen_base + OMAP_AUX_CORE_BOOT_0); 223 writel_relaxed(val, sar_base + AUXCOREBOOT0_OFFSET); 224 val = readl_relaxed(wakeupgen_base + OMAP_AUX_CORE_BOOT_1); 225 writel_relaxed(val, sar_base + AUXCOREBOOT1_OFFSET); 226 227 /* Save SyncReq generation logic */ 228 val = readl_relaxed(wakeupgen_base + OMAP_PTMSYNCREQ_MASK); 229 writel_relaxed(val, sar_base + PTMSYNCREQ_MASK_OFFSET); 230 val = readl_relaxed(wakeupgen_base + OMAP_PTMSYNCREQ_EN); 231 writel_relaxed(val, sar_base + PTMSYNCREQ_EN_OFFSET); 232 233 /* Set the Backup Bit Mask status */ 234 val = readl_relaxed(sar_base + SAR_BACKUP_STATUS_OFFSET); 235 val |= SAR_BACKUP_STATUS_WAKEUPGEN; 236 writel_relaxed(val, sar_base + SAR_BACKUP_STATUS_OFFSET); 237 238 } 239 240 static inline void omap5_irq_save_context(void) 241 { 242 u32 i, val; 243 244 for (i = 0; i < irq_banks; i++) { 245 /* Save the CPUx interrupt mask for IRQ 0 to 159 */ 246 val = wakeupgen_readl(i, 0); 247 sar_writel(val, OMAP5_WAKEUPGENENB_OFFSET_CPU0, i); 248 val = wakeupgen_readl(i, 1); 249 sar_writel(val, OMAP5_WAKEUPGENENB_OFFSET_CPU1, i); 250 sar_writel(0x0, OMAP5_WAKEUPGENENB_SECURE_OFFSET_CPU0, i); 251 sar_writel(0x0, OMAP5_WAKEUPGENENB_SECURE_OFFSET_CPU1, i); 252 } 253 254 /* Save AuxBoot* registers */ 255 val = readl_relaxed(wakeupgen_base + OMAP_AUX_CORE_BOOT_0); 256 writel_relaxed(val, sar_base + OMAP5_AUXCOREBOOT0_OFFSET); 257 val = readl_relaxed(wakeupgen_base + OMAP_AUX_CORE_BOOT_0); 258 writel_relaxed(val, sar_base + OMAP5_AUXCOREBOOT1_OFFSET); 259 260 /* Set the Backup Bit Mask status */ 261 val = readl_relaxed(sar_base + OMAP5_SAR_BACKUP_STATUS_OFFSET); 262 val |= SAR_BACKUP_STATUS_WAKEUPGEN; 263 writel_relaxed(val, sar_base + OMAP5_SAR_BACKUP_STATUS_OFFSET); 264 265 } 266 267 /* 268 * Save WakeupGen interrupt context in SAR BANK3. Restore is done by 269 * ROM code. WakeupGen IP is integrated along with GIC to manage the 270 * interrupt wakeups from CPU low power states. It manages 271 * masking/unmasking of Shared peripheral interrupts(SPI). So the 272 * interrupt enable/disable control should be in sync and consistent 273 * at WakeupGen and GIC so that interrupts are not lost. 274 */ 275 static void irq_save_context(void) 276 { 277 /* DRA7 has no SAR to save */ 278 if (soc_is_dra7xx()) 279 return; 280 281 if (!sar_base) 282 sar_base = omap4_get_sar_ram_base(); 283 284 if (soc_is_omap54xx()) 285 omap5_irq_save_context(); 286 else 287 omap4_irq_save_context(); 288 } 289 290 /* 291 * Clear WakeupGen SAR backup status. 292 */ 293 static void irq_sar_clear(void) 294 { 295 u32 val; 296 u32 offset = SAR_BACKUP_STATUS_OFFSET; 297 /* DRA7 has no SAR to save */ 298 if (soc_is_dra7xx()) 299 return; 300 301 if (soc_is_omap54xx()) 302 offset = OMAP5_SAR_BACKUP_STATUS_OFFSET; 303 304 val = readl_relaxed(sar_base + offset); 305 val &= ~SAR_BACKUP_STATUS_WAKEUPGEN; 306 writel_relaxed(val, sar_base + offset); 307 } 308 309 /* 310 * Save GIC and Wakeupgen interrupt context using secure API 311 * for HS/EMU devices. 312 */ 313 static void irq_save_secure_context(void) 314 { 315 u32 ret; 316 ret = omap_secure_dispatcher(OMAP4_HAL_SAVEGIC_INDEX, 317 FLAG_START_CRITICAL, 318 0, 0, 0, 0, 0); 319 if (ret != API_HAL_RET_VALUE_OK) 320 pr_err("GIC and Wakeupgen context save failed\n"); 321 } 322 #endif 323 324 #ifdef CONFIG_HOTPLUG_CPU 325 static int irq_cpu_hotplug_notify(struct notifier_block *self, 326 unsigned long action, void *hcpu) 327 { 328 unsigned int cpu = (unsigned int)hcpu; 329 330 /* 331 * Corresponding FROZEN transitions do not have to be handled, 332 * they are handled by at a higher level 333 * (drivers/cpuidle/coupled.c). 334 */ 335 switch (action) { 336 case CPU_ONLINE: 337 wakeupgen_irqmask_all(cpu, 0); 338 break; 339 case CPU_DEAD: 340 wakeupgen_irqmask_all(cpu, 1); 341 break; 342 } 343 return NOTIFY_OK; 344 } 345 346 static struct notifier_block irq_hotplug_notifier = { 347 .notifier_call = irq_cpu_hotplug_notify, 348 }; 349 350 static void __init irq_hotplug_init(void) 351 { 352 register_hotcpu_notifier(&irq_hotplug_notifier); 353 } 354 #else 355 static void __init irq_hotplug_init(void) 356 {} 357 #endif 358 359 #ifdef CONFIG_CPU_PM 360 static int irq_notifier(struct notifier_block *self, unsigned long cmd, void *v) 361 { 362 switch (cmd) { 363 case CPU_CLUSTER_PM_ENTER: 364 if (omap_type() == OMAP2_DEVICE_TYPE_GP) 365 irq_save_context(); 366 else 367 irq_save_secure_context(); 368 break; 369 case CPU_CLUSTER_PM_EXIT: 370 if (omap_type() == OMAP2_DEVICE_TYPE_GP) 371 irq_sar_clear(); 372 break; 373 } 374 return NOTIFY_OK; 375 } 376 377 static struct notifier_block irq_notifier_block = { 378 .notifier_call = irq_notifier, 379 }; 380 381 static void __init irq_pm_init(void) 382 { 383 /* FIXME: Remove this when MPU OSWR support is added */ 384 if (!IS_PM44XX_ERRATUM(PM_OMAP4_CPU_OSWR_DISABLE)) 385 cpu_pm_register_notifier(&irq_notifier_block); 386 } 387 #else 388 static void __init irq_pm_init(void) 389 {} 390 #endif 391 392 void __iomem *omap_get_wakeupgen_base(void) 393 { 394 return wakeupgen_base; 395 } 396 397 int omap_secure_apis_support(void) 398 { 399 return omap_secure_apis; 400 } 401 402 static struct irq_chip wakeupgen_chip = { 403 .name = "WUGEN", 404 .irq_eoi = irq_chip_eoi_parent, 405 .irq_mask = wakeupgen_mask, 406 .irq_unmask = wakeupgen_unmask, 407 .irq_retrigger = irq_chip_retrigger_hierarchy, 408 .irq_set_type = irq_chip_set_type_parent, 409 .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND, 410 #ifdef CONFIG_SMP 411 .irq_set_affinity = irq_chip_set_affinity_parent, 412 #endif 413 }; 414 415 static int wakeupgen_domain_translate(struct irq_domain *d, 416 struct irq_fwspec *fwspec, 417 unsigned long *hwirq, 418 unsigned int *type) 419 { 420 if (is_of_node(fwspec->fwnode)) { 421 if (fwspec->param_count != 3) 422 return -EINVAL; 423 424 /* No PPI should point to this domain */ 425 if (fwspec->param[0] != 0) 426 return -EINVAL; 427 428 *hwirq = fwspec->param[1]; 429 *type = fwspec->param[2]; 430 return 0; 431 } 432 433 return -EINVAL; 434 } 435 436 static int wakeupgen_domain_alloc(struct irq_domain *domain, 437 unsigned int virq, 438 unsigned int nr_irqs, void *data) 439 { 440 struct irq_fwspec *fwspec = data; 441 struct irq_fwspec parent_fwspec; 442 irq_hw_number_t hwirq; 443 int i; 444 445 if (fwspec->param_count != 3) 446 return -EINVAL; /* Not GIC compliant */ 447 if (fwspec->param[0] != 0) 448 return -EINVAL; /* No PPI should point to this domain */ 449 450 hwirq = fwspec->param[1]; 451 if (hwirq >= MAX_IRQS) 452 return -EINVAL; /* Can't deal with this */ 453 454 for (i = 0; i < nr_irqs; i++) 455 irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, 456 &wakeupgen_chip, NULL); 457 458 parent_fwspec = *fwspec; 459 parent_fwspec.fwnode = domain->parent->fwnode; 460 return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, 461 &parent_fwspec); 462 } 463 464 static const struct irq_domain_ops wakeupgen_domain_ops = { 465 .translate = wakeupgen_domain_translate, 466 .alloc = wakeupgen_domain_alloc, 467 .free = irq_domain_free_irqs_common, 468 }; 469 470 /* 471 * Initialise the wakeupgen module. 472 */ 473 static int __init wakeupgen_init(struct device_node *node, 474 struct device_node *parent) 475 { 476 struct irq_domain *parent_domain, *domain; 477 int i; 478 unsigned int boot_cpu = smp_processor_id(); 479 u32 val; 480 481 if (!parent) { 482 pr_err("%s: no parent, giving up\n", node->full_name); 483 return -ENODEV; 484 } 485 486 parent_domain = irq_find_host(parent); 487 if (!parent_domain) { 488 pr_err("%s: unable to obtain parent domain\n", node->full_name); 489 return -ENXIO; 490 } 491 /* Not supported on OMAP4 ES1.0 silicon */ 492 if (omap_rev() == OMAP4430_REV_ES1_0) { 493 WARN(1, "WakeupGen: Not supported on OMAP4430 ES1.0\n"); 494 return -EPERM; 495 } 496 497 /* Static mapping, never released */ 498 wakeupgen_base = of_iomap(node, 0); 499 if (WARN_ON(!wakeupgen_base)) 500 return -ENOMEM; 501 502 if (cpu_is_omap44xx()) { 503 irq_banks = OMAP4_NR_BANKS; 504 max_irqs = OMAP4_NR_IRQS; 505 omap_secure_apis = 1; 506 } else if (soc_is_am43xx()) { 507 irq_banks = AM43XX_NR_REG_BANKS; 508 max_irqs = AM43XX_IRQS; 509 } 510 511 domain = irq_domain_add_hierarchy(parent_domain, 0, max_irqs, 512 node, &wakeupgen_domain_ops, 513 NULL); 514 if (!domain) { 515 iounmap(wakeupgen_base); 516 return -ENOMEM; 517 } 518 519 /* Clear all IRQ bitmasks at wakeupGen level */ 520 for (i = 0; i < irq_banks; i++) { 521 wakeupgen_writel(0, i, CPU0_ID); 522 if (!soc_is_am43xx()) 523 wakeupgen_writel(0, i, CPU1_ID); 524 } 525 526 /* 527 * FIXME: Add support to set_smp_affinity() once the core 528 * GIC code has necessary hooks in place. 529 */ 530 531 /* Associate all the IRQs to boot CPU like GIC init does. */ 532 for (i = 0; i < max_irqs; i++) 533 irq_target_cpu[i] = boot_cpu; 534 535 /* 536 * Enables OMAP5 ES2 PM Mode using ES2_PM_MODE in AMBA_IF_MODE 537 * 0x0: ES1 behavior, CPU cores would enter and exit OFF mode together. 538 * 0x1: ES2 behavior, CPU cores are allowed to enter/exit OFF mode 539 * independently. 540 * This needs to be set one time thanks to always ON domain. 541 * 542 * We do not support ES1 behavior anymore. OMAP5 is assumed to be 543 * ES2.0, and the same is applicable for DRA7. 544 */ 545 if (soc_is_omap54xx() || soc_is_dra7xx()) { 546 val = __raw_readl(wakeupgen_base + OMAP_AMBA_IF_MODE); 547 val |= BIT(5); 548 omap_smc1(OMAP5_MON_AMBA_IF_INDEX, val); 549 } 550 551 irq_hotplug_init(); 552 irq_pm_init(); 553 554 return 0; 555 } 556 IRQCHIP_DECLARE(ti_wakeupgen, "ti,omap4-wugen-mpu", wakeupgen_init); 557