1 /* 2 * OMAP WakeupGen Source file 3 * 4 * OMAP WakeupGen is the interrupt controller extension used along 5 * with ARM GIC to wake the CPU out from low power states on 6 * external interrupts. It is responsible for generating wakeup 7 * event from the incoming interrupts and enable bits. It is 8 * implemented in MPU always ON power domain. During normal operation, 9 * WakeupGen delivers external interrupts directly to the GIC. 10 * 11 * Copyright (C) 2011 Texas Instruments, Inc. 12 * Santosh Shilimkar <santosh.shilimkar@ti.com> 13 * 14 * This program is free software; you can redistribute it and/or modify 15 * it under the terms of the GNU General Public License version 2 as 16 * published by the Free Software Foundation. 17 */ 18 19 #include <linux/kernel.h> 20 #include <linux/init.h> 21 #include <linux/io.h> 22 #include <linux/irq.h> 23 #include <linux/irqchip.h> 24 #include <linux/irqdomain.h> 25 #include <linux/of_address.h> 26 #include <linux/platform_device.h> 27 #include <linux/cpu.h> 28 #include <linux/notifier.h> 29 #include <linux/cpu_pm.h> 30 31 #include "omap-wakeupgen.h" 32 #include "omap-secure.h" 33 34 #include "soc.h" 35 #include "omap4-sar-layout.h" 36 #include "common.h" 37 #include "pm.h" 38 39 #define AM43XX_NR_REG_BANKS 7 40 #define AM43XX_IRQS 224 41 #define MAX_NR_REG_BANKS AM43XX_NR_REG_BANKS 42 #define MAX_IRQS AM43XX_IRQS 43 #define DEFAULT_NR_REG_BANKS 5 44 #define DEFAULT_IRQS 160 45 #define WKG_MASK_ALL 0x00000000 46 #define WKG_UNMASK_ALL 0xffffffff 47 #define CPU_ENA_OFFSET 0x400 48 #define CPU0_ID 0x0 49 #define CPU1_ID 0x1 50 #define OMAP4_NR_BANKS 4 51 #define OMAP4_NR_IRQS 128 52 53 #define SYS_NIRQ1_EXT_SYS_IRQ_1 7 54 #define SYS_NIRQ2_EXT_SYS_IRQ_2 119 55 56 static void __iomem *wakeupgen_base; 57 static void __iomem *sar_base; 58 static DEFINE_RAW_SPINLOCK(wakeupgen_lock); 59 static unsigned int irq_target_cpu[MAX_IRQS]; 60 static unsigned int irq_banks = DEFAULT_NR_REG_BANKS; 61 static unsigned int max_irqs = DEFAULT_IRQS; 62 static unsigned int omap_secure_apis; 63 64 #ifdef CONFIG_CPU_PM 65 static unsigned int wakeupgen_context[MAX_NR_REG_BANKS]; 66 #endif 67 68 struct omap_wakeupgen_ops { 69 void (*save_context)(void); 70 void (*restore_context)(void); 71 }; 72 73 static struct omap_wakeupgen_ops *wakeupgen_ops; 74 75 /* 76 * Static helper functions. 77 */ 78 static inline u32 wakeupgen_readl(u8 idx, u32 cpu) 79 { 80 return readl_relaxed(wakeupgen_base + OMAP_WKG_ENB_A_0 + 81 (cpu * CPU_ENA_OFFSET) + (idx * 4)); 82 } 83 84 static inline void wakeupgen_writel(u32 val, u8 idx, u32 cpu) 85 { 86 writel_relaxed(val, wakeupgen_base + OMAP_WKG_ENB_A_0 + 87 (cpu * CPU_ENA_OFFSET) + (idx * 4)); 88 } 89 90 static inline void sar_writel(u32 val, u32 offset, u8 idx) 91 { 92 writel_relaxed(val, sar_base + offset + (idx * 4)); 93 } 94 95 static inline int _wakeupgen_get_irq_info(u32 irq, u32 *bit_posn, u8 *reg_index) 96 { 97 /* 98 * Each WakeupGen register controls 32 interrupt. 99 * i.e. 1 bit per SPI IRQ 100 */ 101 *reg_index = irq >> 5; 102 *bit_posn = irq %= 32; 103 104 return 0; 105 } 106 107 static void _wakeupgen_clear(unsigned int irq, unsigned int cpu) 108 { 109 u32 val, bit_number; 110 u8 i; 111 112 if (_wakeupgen_get_irq_info(irq, &bit_number, &i)) 113 return; 114 115 val = wakeupgen_readl(i, cpu); 116 val &= ~BIT(bit_number); 117 wakeupgen_writel(val, i, cpu); 118 } 119 120 static void _wakeupgen_set(unsigned int irq, unsigned int cpu) 121 { 122 u32 val, bit_number; 123 u8 i; 124 125 if (_wakeupgen_get_irq_info(irq, &bit_number, &i)) 126 return; 127 128 val = wakeupgen_readl(i, cpu); 129 val |= BIT(bit_number); 130 wakeupgen_writel(val, i, cpu); 131 } 132 133 /* 134 * Architecture specific Mask extension 135 */ 136 static void wakeupgen_mask(struct irq_data *d) 137 { 138 unsigned long flags; 139 140 raw_spin_lock_irqsave(&wakeupgen_lock, flags); 141 _wakeupgen_clear(d->hwirq, irq_target_cpu[d->hwirq]); 142 raw_spin_unlock_irqrestore(&wakeupgen_lock, flags); 143 irq_chip_mask_parent(d); 144 } 145 146 /* 147 * Architecture specific Unmask extension 148 */ 149 static void wakeupgen_unmask(struct irq_data *d) 150 { 151 unsigned long flags; 152 153 raw_spin_lock_irqsave(&wakeupgen_lock, flags); 154 _wakeupgen_set(d->hwirq, irq_target_cpu[d->hwirq]); 155 raw_spin_unlock_irqrestore(&wakeupgen_lock, flags); 156 irq_chip_unmask_parent(d); 157 } 158 159 /* 160 * The sys_nirq pins bypass peripheral modules and are wired directly 161 * to MPUSS wakeupgen. They get automatically inverted for GIC. 162 */ 163 static int wakeupgen_irq_set_type(struct irq_data *d, unsigned int type) 164 { 165 bool inverted = false; 166 167 switch (type) { 168 case IRQ_TYPE_LEVEL_LOW: 169 type &= ~IRQ_TYPE_LEVEL_MASK; 170 type |= IRQ_TYPE_LEVEL_HIGH; 171 inverted = true; 172 break; 173 case IRQ_TYPE_EDGE_FALLING: 174 type &= ~IRQ_TYPE_EDGE_BOTH; 175 type |= IRQ_TYPE_EDGE_RISING; 176 inverted = true; 177 break; 178 default: 179 break; 180 } 181 182 if (inverted && d->hwirq != SYS_NIRQ1_EXT_SYS_IRQ_1 && 183 d->hwirq != SYS_NIRQ2_EXT_SYS_IRQ_2) 184 pr_warn("wakeupgen: irq%li polarity inverted in dts\n", 185 d->hwirq); 186 187 return irq_chip_set_type_parent(d, type); 188 } 189 190 #ifdef CONFIG_HOTPLUG_CPU 191 static DEFINE_PER_CPU(u32 [MAX_NR_REG_BANKS], irqmasks); 192 193 static void _wakeupgen_save_masks(unsigned int cpu) 194 { 195 u8 i; 196 197 for (i = 0; i < irq_banks; i++) 198 per_cpu(irqmasks, cpu)[i] = wakeupgen_readl(i, cpu); 199 } 200 201 static void _wakeupgen_restore_masks(unsigned int cpu) 202 { 203 u8 i; 204 205 for (i = 0; i < irq_banks; i++) 206 wakeupgen_writel(per_cpu(irqmasks, cpu)[i], i, cpu); 207 } 208 209 static void _wakeupgen_set_all(unsigned int cpu, unsigned int reg) 210 { 211 u8 i; 212 213 for (i = 0; i < irq_banks; i++) 214 wakeupgen_writel(reg, i, cpu); 215 } 216 217 /* 218 * Mask or unmask all interrupts on given CPU. 219 * 0 = Mask all interrupts on the 'cpu' 220 * 1 = Unmask all interrupts on the 'cpu' 221 * Ensure that the initial mask is maintained. This is faster than 222 * iterating through GIC registers to arrive at the correct masks. 223 */ 224 static void wakeupgen_irqmask_all(unsigned int cpu, unsigned int set) 225 { 226 unsigned long flags; 227 228 raw_spin_lock_irqsave(&wakeupgen_lock, flags); 229 if (set) { 230 _wakeupgen_save_masks(cpu); 231 _wakeupgen_set_all(cpu, WKG_MASK_ALL); 232 } else { 233 _wakeupgen_set_all(cpu, WKG_UNMASK_ALL); 234 _wakeupgen_restore_masks(cpu); 235 } 236 raw_spin_unlock_irqrestore(&wakeupgen_lock, flags); 237 } 238 #endif 239 240 #ifdef CONFIG_CPU_PM 241 static inline void omap4_irq_save_context(void) 242 { 243 u32 i, val; 244 245 if (omap_rev() == OMAP4430_REV_ES1_0) 246 return; 247 248 for (i = 0; i < irq_banks; i++) { 249 /* Save the CPUx interrupt mask for IRQ 0 to 127 */ 250 val = wakeupgen_readl(i, 0); 251 sar_writel(val, WAKEUPGENENB_OFFSET_CPU0, i); 252 val = wakeupgen_readl(i, 1); 253 sar_writel(val, WAKEUPGENENB_OFFSET_CPU1, i); 254 255 /* 256 * Disable the secure interrupts for CPUx. The restore 257 * code blindly restores secure and non-secure interrupt 258 * masks from SAR RAM. Secure interrupts are not suppose 259 * to be enabled from HLOS. So overwrite the SAR location 260 * so that the secure interrupt remains disabled. 261 */ 262 sar_writel(0x0, WAKEUPGENENB_SECURE_OFFSET_CPU0, i); 263 sar_writel(0x0, WAKEUPGENENB_SECURE_OFFSET_CPU1, i); 264 } 265 266 /* Save AuxBoot* registers */ 267 val = readl_relaxed(wakeupgen_base + OMAP_AUX_CORE_BOOT_0); 268 writel_relaxed(val, sar_base + AUXCOREBOOT0_OFFSET); 269 val = readl_relaxed(wakeupgen_base + OMAP_AUX_CORE_BOOT_1); 270 writel_relaxed(val, sar_base + AUXCOREBOOT1_OFFSET); 271 272 /* Save SyncReq generation logic */ 273 val = readl_relaxed(wakeupgen_base + OMAP_PTMSYNCREQ_MASK); 274 writel_relaxed(val, sar_base + PTMSYNCREQ_MASK_OFFSET); 275 val = readl_relaxed(wakeupgen_base + OMAP_PTMSYNCREQ_EN); 276 writel_relaxed(val, sar_base + PTMSYNCREQ_EN_OFFSET); 277 278 /* Set the Backup Bit Mask status */ 279 val = readl_relaxed(sar_base + SAR_BACKUP_STATUS_OFFSET); 280 val |= SAR_BACKUP_STATUS_WAKEUPGEN; 281 writel_relaxed(val, sar_base + SAR_BACKUP_STATUS_OFFSET); 282 283 } 284 285 static inline void omap5_irq_save_context(void) 286 { 287 u32 i, val; 288 289 for (i = 0; i < irq_banks; i++) { 290 /* Save the CPUx interrupt mask for IRQ 0 to 159 */ 291 val = wakeupgen_readl(i, 0); 292 sar_writel(val, OMAP5_WAKEUPGENENB_OFFSET_CPU0, i); 293 val = wakeupgen_readl(i, 1); 294 sar_writel(val, OMAP5_WAKEUPGENENB_OFFSET_CPU1, i); 295 sar_writel(0x0, OMAP5_WAKEUPGENENB_SECURE_OFFSET_CPU0, i); 296 sar_writel(0x0, OMAP5_WAKEUPGENENB_SECURE_OFFSET_CPU1, i); 297 } 298 299 /* Save AuxBoot* registers */ 300 val = readl_relaxed(wakeupgen_base + OMAP_AUX_CORE_BOOT_0); 301 writel_relaxed(val, sar_base + OMAP5_AUXCOREBOOT0_OFFSET); 302 val = readl_relaxed(wakeupgen_base + OMAP_AUX_CORE_BOOT_0); 303 writel_relaxed(val, sar_base + OMAP5_AUXCOREBOOT1_OFFSET); 304 305 /* Set the Backup Bit Mask status */ 306 val = readl_relaxed(sar_base + OMAP5_SAR_BACKUP_STATUS_OFFSET); 307 val |= SAR_BACKUP_STATUS_WAKEUPGEN; 308 writel_relaxed(val, sar_base + OMAP5_SAR_BACKUP_STATUS_OFFSET); 309 310 } 311 312 static inline void am43xx_irq_save_context(void) 313 { 314 u32 i; 315 316 for (i = 0; i < irq_banks; i++) { 317 wakeupgen_context[i] = wakeupgen_readl(i, 0); 318 wakeupgen_writel(0, i, CPU0_ID); 319 } 320 } 321 322 /* 323 * Save WakeupGen interrupt context in SAR BANK3. Restore is done by 324 * ROM code. WakeupGen IP is integrated along with GIC to manage the 325 * interrupt wakeups from CPU low power states. It manages 326 * masking/unmasking of Shared peripheral interrupts(SPI). So the 327 * interrupt enable/disable control should be in sync and consistent 328 * at WakeupGen and GIC so that interrupts are not lost. 329 */ 330 static void irq_save_context(void) 331 { 332 /* DRA7 has no SAR to save */ 333 if (soc_is_dra7xx()) 334 return; 335 336 if (wakeupgen_ops && wakeupgen_ops->save_context) 337 wakeupgen_ops->save_context(); 338 } 339 340 /* 341 * Clear WakeupGen SAR backup status. 342 */ 343 static void irq_sar_clear(void) 344 { 345 u32 val; 346 u32 offset = SAR_BACKUP_STATUS_OFFSET; 347 /* DRA7 has no SAR to save */ 348 if (soc_is_dra7xx()) 349 return; 350 351 if (soc_is_omap54xx()) 352 offset = OMAP5_SAR_BACKUP_STATUS_OFFSET; 353 354 val = readl_relaxed(sar_base + offset); 355 val &= ~SAR_BACKUP_STATUS_WAKEUPGEN; 356 writel_relaxed(val, sar_base + offset); 357 } 358 359 static void am43xx_irq_restore_context(void) 360 { 361 u32 i; 362 363 for (i = 0; i < irq_banks; i++) 364 wakeupgen_writel(wakeupgen_context[i], i, CPU0_ID); 365 } 366 367 static void irq_restore_context(void) 368 { 369 if (wakeupgen_ops && wakeupgen_ops->restore_context) 370 wakeupgen_ops->restore_context(); 371 } 372 373 /* 374 * Save GIC and Wakeupgen interrupt context using secure API 375 * for HS/EMU devices. 376 */ 377 static void irq_save_secure_context(void) 378 { 379 u32 ret; 380 ret = omap_secure_dispatcher(OMAP4_HAL_SAVEGIC_INDEX, 381 FLAG_START_CRITICAL, 382 0, 0, 0, 0, 0); 383 if (ret != API_HAL_RET_VALUE_OK) 384 pr_err("GIC and Wakeupgen context save failed\n"); 385 } 386 387 /* Define ops for context save and restore for each SoC */ 388 static struct omap_wakeupgen_ops omap4_wakeupgen_ops = { 389 .save_context = omap4_irq_save_context, 390 .restore_context = irq_sar_clear, 391 }; 392 393 static struct omap_wakeupgen_ops omap5_wakeupgen_ops = { 394 .save_context = omap5_irq_save_context, 395 .restore_context = irq_sar_clear, 396 }; 397 398 static struct omap_wakeupgen_ops am43xx_wakeupgen_ops = { 399 .save_context = am43xx_irq_save_context, 400 .restore_context = am43xx_irq_restore_context, 401 }; 402 #else 403 static struct omap_wakeupgen_ops omap4_wakeupgen_ops = {}; 404 static struct omap_wakeupgen_ops omap5_wakeupgen_ops = {}; 405 static struct omap_wakeupgen_ops am43xx_wakeupgen_ops = {}; 406 #endif 407 408 #ifdef CONFIG_HOTPLUG_CPU 409 static int omap_wakeupgen_cpu_online(unsigned int cpu) 410 { 411 wakeupgen_irqmask_all(cpu, 0); 412 return 0; 413 } 414 415 static int omap_wakeupgen_cpu_dead(unsigned int cpu) 416 { 417 wakeupgen_irqmask_all(cpu, 1); 418 return 0; 419 } 420 421 static void __init irq_hotplug_init(void) 422 { 423 cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "arm/omap-wake:online", 424 omap_wakeupgen_cpu_online, NULL); 425 cpuhp_setup_state_nocalls(CPUHP_ARM_OMAP_WAKE_DEAD, 426 "arm/omap-wake:dead", NULL, 427 omap_wakeupgen_cpu_dead); 428 } 429 #else 430 static void __init irq_hotplug_init(void) 431 {} 432 #endif 433 434 #ifdef CONFIG_CPU_PM 435 static int irq_notifier(struct notifier_block *self, unsigned long cmd, void *v) 436 { 437 switch (cmd) { 438 case CPU_CLUSTER_PM_ENTER: 439 if (omap_type() == OMAP2_DEVICE_TYPE_GP || soc_is_am43xx()) 440 irq_save_context(); 441 else 442 irq_save_secure_context(); 443 break; 444 case CPU_CLUSTER_PM_EXIT: 445 if (omap_type() == OMAP2_DEVICE_TYPE_GP || soc_is_am43xx()) 446 irq_restore_context(); 447 break; 448 } 449 return NOTIFY_OK; 450 } 451 452 static struct notifier_block irq_notifier_block = { 453 .notifier_call = irq_notifier, 454 }; 455 456 static void __init irq_pm_init(void) 457 { 458 /* FIXME: Remove this when MPU OSWR support is added */ 459 if (!IS_PM44XX_ERRATUM(PM_OMAP4_CPU_OSWR_DISABLE)) 460 cpu_pm_register_notifier(&irq_notifier_block); 461 } 462 #else 463 static void __init irq_pm_init(void) 464 {} 465 #endif 466 467 void __iomem *omap_get_wakeupgen_base(void) 468 { 469 return wakeupgen_base; 470 } 471 472 int omap_secure_apis_support(void) 473 { 474 return omap_secure_apis; 475 } 476 477 static struct irq_chip wakeupgen_chip = { 478 .name = "WUGEN", 479 .irq_eoi = irq_chip_eoi_parent, 480 .irq_mask = wakeupgen_mask, 481 .irq_unmask = wakeupgen_unmask, 482 .irq_retrigger = irq_chip_retrigger_hierarchy, 483 .irq_set_type = wakeupgen_irq_set_type, 484 .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND, 485 #ifdef CONFIG_SMP 486 .irq_set_affinity = irq_chip_set_affinity_parent, 487 #endif 488 }; 489 490 static int wakeupgen_domain_translate(struct irq_domain *d, 491 struct irq_fwspec *fwspec, 492 unsigned long *hwirq, 493 unsigned int *type) 494 { 495 if (is_of_node(fwspec->fwnode)) { 496 if (fwspec->param_count != 3) 497 return -EINVAL; 498 499 /* No PPI should point to this domain */ 500 if (fwspec->param[0] != 0) 501 return -EINVAL; 502 503 *hwirq = fwspec->param[1]; 504 *type = fwspec->param[2]; 505 return 0; 506 } 507 508 return -EINVAL; 509 } 510 511 static int wakeupgen_domain_alloc(struct irq_domain *domain, 512 unsigned int virq, 513 unsigned int nr_irqs, void *data) 514 { 515 struct irq_fwspec *fwspec = data; 516 struct irq_fwspec parent_fwspec; 517 irq_hw_number_t hwirq; 518 int i; 519 520 if (fwspec->param_count != 3) 521 return -EINVAL; /* Not GIC compliant */ 522 if (fwspec->param[0] != 0) 523 return -EINVAL; /* No PPI should point to this domain */ 524 525 hwirq = fwspec->param[1]; 526 if (hwirq >= MAX_IRQS) 527 return -EINVAL; /* Can't deal with this */ 528 529 for (i = 0; i < nr_irqs; i++) 530 irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, 531 &wakeupgen_chip, NULL); 532 533 parent_fwspec = *fwspec; 534 parent_fwspec.fwnode = domain->parent->fwnode; 535 return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, 536 &parent_fwspec); 537 } 538 539 static const struct irq_domain_ops wakeupgen_domain_ops = { 540 .translate = wakeupgen_domain_translate, 541 .alloc = wakeupgen_domain_alloc, 542 .free = irq_domain_free_irqs_common, 543 }; 544 545 /* 546 * Initialise the wakeupgen module. 547 */ 548 static int __init wakeupgen_init(struct device_node *node, 549 struct device_node *parent) 550 { 551 struct irq_domain *parent_domain, *domain; 552 int i; 553 unsigned int boot_cpu = smp_processor_id(); 554 u32 val; 555 556 if (!parent) { 557 pr_err("%pOF: no parent, giving up\n", node); 558 return -ENODEV; 559 } 560 561 parent_domain = irq_find_host(parent); 562 if (!parent_domain) { 563 pr_err("%pOF: unable to obtain parent domain\n", node); 564 return -ENXIO; 565 } 566 /* Not supported on OMAP4 ES1.0 silicon */ 567 if (omap_rev() == OMAP4430_REV_ES1_0) { 568 WARN(1, "WakeupGen: Not supported on OMAP4430 ES1.0\n"); 569 return -EPERM; 570 } 571 572 /* Static mapping, never released */ 573 wakeupgen_base = of_iomap(node, 0); 574 if (WARN_ON(!wakeupgen_base)) 575 return -ENOMEM; 576 577 if (cpu_is_omap44xx()) { 578 irq_banks = OMAP4_NR_BANKS; 579 max_irqs = OMAP4_NR_IRQS; 580 omap_secure_apis = 1; 581 wakeupgen_ops = &omap4_wakeupgen_ops; 582 } else if (soc_is_omap54xx()) { 583 wakeupgen_ops = &omap5_wakeupgen_ops; 584 } else if (soc_is_am43xx()) { 585 irq_banks = AM43XX_NR_REG_BANKS; 586 max_irqs = AM43XX_IRQS; 587 wakeupgen_ops = &am43xx_wakeupgen_ops; 588 } 589 590 domain = irq_domain_add_hierarchy(parent_domain, 0, max_irqs, 591 node, &wakeupgen_domain_ops, 592 NULL); 593 if (!domain) { 594 iounmap(wakeupgen_base); 595 return -ENOMEM; 596 } 597 598 /* Clear all IRQ bitmasks at wakeupGen level */ 599 for (i = 0; i < irq_banks; i++) { 600 wakeupgen_writel(0, i, CPU0_ID); 601 if (!soc_is_am43xx()) 602 wakeupgen_writel(0, i, CPU1_ID); 603 } 604 605 /* 606 * FIXME: Add support to set_smp_affinity() once the core 607 * GIC code has necessary hooks in place. 608 */ 609 610 /* Associate all the IRQs to boot CPU like GIC init does. */ 611 for (i = 0; i < max_irqs; i++) 612 irq_target_cpu[i] = boot_cpu; 613 614 /* 615 * Enables OMAP5 ES2 PM Mode using ES2_PM_MODE in AMBA_IF_MODE 616 * 0x0: ES1 behavior, CPU cores would enter and exit OFF mode together. 617 * 0x1: ES2 behavior, CPU cores are allowed to enter/exit OFF mode 618 * independently. 619 * This needs to be set one time thanks to always ON domain. 620 * 621 * We do not support ES1 behavior anymore. OMAP5 is assumed to be 622 * ES2.0, and the same is applicable for DRA7. 623 */ 624 if (soc_is_omap54xx() || soc_is_dra7xx()) { 625 val = __raw_readl(wakeupgen_base + OMAP_AMBA_IF_MODE); 626 val |= BIT(5); 627 omap_smc1(OMAP5_MON_AMBA_IF_INDEX, val); 628 } 629 630 irq_hotplug_init(); 631 irq_pm_init(); 632 633 sar_base = omap4_get_sar_ram_base(); 634 635 return 0; 636 } 637 IRQCHIP_DECLARE(ti_wakeupgen, "ti,omap4-wugen-mpu", wakeupgen_init); 638