1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Support functions for OMAP GPIO 4 * 5 * Copyright (C) 2003-2005 Nokia Corporation 6 * Written by Juha Yrjölä <juha.yrjola@nokia.com> 7 * 8 * Copyright (C) 2009 Texas Instruments 9 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com> 10 */ 11 12 #include <linux/init.h> 13 #include <linux/module.h> 14 #include <linux/interrupt.h> 15 #include <linux/seq_file.h> 16 #include <linux/syscore_ops.h> 17 #include <linux/err.h> 18 #include <linux/clk.h> 19 #include <linux/io.h> 20 #include <linux/cpu_pm.h> 21 #include <linux/device.h> 22 #include <linux/pm_runtime.h> 23 #include <linux/pm.h> 24 #include <linux/of.h> 25 #include <linux/of_device.h> 26 #include <linux/gpio/driver.h> 27 #include <linux/bitops.h> 28 #include <linux/platform_data/gpio-omap.h> 29 30 #define OMAP4_GPIO_DEBOUNCINGTIME_MASK 0xFF 31 32 struct gpio_regs { 33 u32 sysconfig; 34 u32 irqenable1; 35 u32 irqenable2; 36 u32 wake_en; 37 u32 ctrl; 38 u32 oe; 39 u32 leveldetect0; 40 u32 leveldetect1; 41 u32 risingdetect; 42 u32 fallingdetect; 43 u32 dataout; 44 u32 debounce; 45 u32 debounce_en; 46 }; 47 48 struct gpio_bank { 49 void __iomem *base; 50 const struct omap_gpio_reg_offs *regs; 51 struct device *dev; 52 53 int irq; 54 u32 non_wakeup_gpios; 55 u32 enabled_non_wakeup_gpios; 56 struct gpio_regs context; 57 u32 saved_datain; 58 u32 level_mask; 59 u32 toggle_mask; 60 raw_spinlock_t lock; 61 raw_spinlock_t wa_lock; 62 struct gpio_chip chip; 63 struct clk *dbck; 64 struct notifier_block nb; 65 unsigned int is_suspended:1; 66 unsigned int needs_resume:1; 67 u32 mod_usage; 68 u32 irq_usage; 69 u32 dbck_enable_mask; 70 bool dbck_enabled; 71 bool is_mpuio; 72 bool dbck_flag; 73 bool loses_context; 74 bool context_valid; 75 int stride; 76 u32 width; 77 int context_loss_count; 78 79 void (*set_dataout)(struct gpio_bank *bank, unsigned gpio, int enable); 80 int (*get_context_loss_count)(struct device *dev); 81 }; 82 83 #define GPIO_MOD_CTRL_BIT BIT(0) 84 85 #define BANK_USED(bank) (bank->mod_usage || bank->irq_usage) 86 #define LINE_USED(line, offset) (line & (BIT(offset))) 87 88 static void omap_gpio_unmask_irq(struct irq_data *d); 89 90 static inline struct gpio_bank *omap_irq_data_get_bank(struct irq_data *d) 91 { 92 struct gpio_chip *chip = irq_data_get_irq_chip_data(d); 93 return gpiochip_get_data(chip); 94 } 95 96 static inline u32 omap_gpio_rmw(void __iomem *reg, u32 mask, bool set) 97 { 98 u32 val = readl_relaxed(reg); 99 100 if (set) 101 val |= mask; 102 else 103 val &= ~mask; 104 105 writel_relaxed(val, reg); 106 107 return val; 108 } 109 110 static void omap_set_gpio_direction(struct gpio_bank *bank, int gpio, 111 int is_input) 112 { 113 bank->context.oe = omap_gpio_rmw(bank->base + bank->regs->direction, 114 BIT(gpio), is_input); 115 } 116 117 118 /* set data out value using dedicate set/clear register */ 119 static void omap_set_gpio_dataout_reg(struct gpio_bank *bank, unsigned offset, 120 int enable) 121 { 122 void __iomem *reg = bank->base; 123 u32 l = BIT(offset); 124 125 if (enable) { 126 reg += bank->regs->set_dataout; 127 bank->context.dataout |= l; 128 } else { 129 reg += bank->regs->clr_dataout; 130 bank->context.dataout &= ~l; 131 } 132 133 writel_relaxed(l, reg); 134 } 135 136 /* set data out value using mask register */ 137 static void omap_set_gpio_dataout_mask(struct gpio_bank *bank, unsigned offset, 138 int enable) 139 { 140 bank->context.dataout = omap_gpio_rmw(bank->base + bank->regs->dataout, 141 BIT(offset), enable); 142 } 143 144 static inline void omap_gpio_dbck_enable(struct gpio_bank *bank) 145 { 146 if (bank->dbck_enable_mask && !bank->dbck_enabled) { 147 clk_enable(bank->dbck); 148 bank->dbck_enabled = true; 149 150 writel_relaxed(bank->dbck_enable_mask, 151 bank->base + bank->regs->debounce_en); 152 } 153 } 154 155 static inline void omap_gpio_dbck_disable(struct gpio_bank *bank) 156 { 157 if (bank->dbck_enable_mask && bank->dbck_enabled) { 158 /* 159 * Disable debounce before cutting it's clock. If debounce is 160 * enabled but the clock is not, GPIO module seems to be unable 161 * to detect events and generate interrupts at least on OMAP3. 162 */ 163 writel_relaxed(0, bank->base + bank->regs->debounce_en); 164 165 clk_disable(bank->dbck); 166 bank->dbck_enabled = false; 167 } 168 } 169 170 /** 171 * omap2_set_gpio_debounce - low level gpio debounce time 172 * @bank: the gpio bank we're acting upon 173 * @offset: the gpio number on this @bank 174 * @debounce: debounce time to use 175 * 176 * OMAP's debounce time is in 31us steps 177 * <debounce time> = (GPIO_DEBOUNCINGTIME[7:0].DEBOUNCETIME + 1) x 31 178 * so we need to convert and round up to the closest unit. 179 * 180 * Return: 0 on success, negative error otherwise. 181 */ 182 static int omap2_set_gpio_debounce(struct gpio_bank *bank, unsigned offset, 183 unsigned debounce) 184 { 185 u32 val; 186 u32 l; 187 bool enable = !!debounce; 188 189 if (!bank->dbck_flag) 190 return -ENOTSUPP; 191 192 if (enable) { 193 debounce = DIV_ROUND_UP(debounce, 31) - 1; 194 if ((debounce & OMAP4_GPIO_DEBOUNCINGTIME_MASK) != debounce) 195 return -EINVAL; 196 } 197 198 l = BIT(offset); 199 200 clk_enable(bank->dbck); 201 writel_relaxed(debounce, bank->base + bank->regs->debounce); 202 203 val = omap_gpio_rmw(bank->base + bank->regs->debounce_en, l, enable); 204 bank->dbck_enable_mask = val; 205 206 clk_disable(bank->dbck); 207 /* 208 * Enable debounce clock per module. 209 * This call is mandatory because in omap_gpio_request() when 210 * *_runtime_get_sync() is called, _gpio_dbck_enable() within 211 * runtime callbck fails to turn on dbck because dbck_enable_mask 212 * used within _gpio_dbck_enable() is still not initialized at 213 * that point. Therefore we have to enable dbck here. 214 */ 215 omap_gpio_dbck_enable(bank); 216 if (bank->dbck_enable_mask) { 217 bank->context.debounce = debounce; 218 bank->context.debounce_en = val; 219 } 220 221 return 0; 222 } 223 224 /** 225 * omap_clear_gpio_debounce - clear debounce settings for a gpio 226 * @bank: the gpio bank we're acting upon 227 * @offset: the gpio number on this @bank 228 * 229 * If a gpio is using debounce, then clear the debounce enable bit and if 230 * this is the only gpio in this bank using debounce, then clear the debounce 231 * time too. The debounce clock will also be disabled when calling this function 232 * if this is the only gpio in the bank using debounce. 233 */ 234 static void omap_clear_gpio_debounce(struct gpio_bank *bank, unsigned offset) 235 { 236 u32 gpio_bit = BIT(offset); 237 238 if (!bank->dbck_flag) 239 return; 240 241 if (!(bank->dbck_enable_mask & gpio_bit)) 242 return; 243 244 bank->dbck_enable_mask &= ~gpio_bit; 245 bank->context.debounce_en &= ~gpio_bit; 246 writel_relaxed(bank->context.debounce_en, 247 bank->base + bank->regs->debounce_en); 248 249 if (!bank->dbck_enable_mask) { 250 bank->context.debounce = 0; 251 writel_relaxed(bank->context.debounce, bank->base + 252 bank->regs->debounce); 253 clk_disable(bank->dbck); 254 bank->dbck_enabled = false; 255 } 256 } 257 258 /* 259 * Off mode wake-up capable GPIOs in bank(s) that are in the wakeup domain. 260 * See TRM section for GPIO for "Wake-Up Generation" for the list of GPIOs 261 * in wakeup domain. If bank->non_wakeup_gpios is not configured, assume none 262 * are capable waking up the system from off mode. 263 */ 264 static bool omap_gpio_is_off_wakeup_capable(struct gpio_bank *bank, u32 gpio_mask) 265 { 266 u32 no_wake = bank->non_wakeup_gpios; 267 268 if (no_wake) 269 return !!(~no_wake & gpio_mask); 270 271 return false; 272 } 273 274 static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio, 275 unsigned trigger) 276 { 277 void __iomem *base = bank->base; 278 u32 gpio_bit = BIT(gpio); 279 280 omap_gpio_rmw(base + bank->regs->leveldetect0, gpio_bit, 281 trigger & IRQ_TYPE_LEVEL_LOW); 282 omap_gpio_rmw(base + bank->regs->leveldetect1, gpio_bit, 283 trigger & IRQ_TYPE_LEVEL_HIGH); 284 285 /* 286 * We need the edge detection enabled for to allow the GPIO block 287 * to be woken from idle state. Set the appropriate edge detection 288 * in addition to the level detection. 289 */ 290 omap_gpio_rmw(base + bank->regs->risingdetect, gpio_bit, 291 trigger & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_LEVEL_HIGH)); 292 omap_gpio_rmw(base + bank->regs->fallingdetect, gpio_bit, 293 trigger & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW)); 294 295 bank->context.leveldetect0 = 296 readl_relaxed(bank->base + bank->regs->leveldetect0); 297 bank->context.leveldetect1 = 298 readl_relaxed(bank->base + bank->regs->leveldetect1); 299 bank->context.risingdetect = 300 readl_relaxed(bank->base + bank->regs->risingdetect); 301 bank->context.fallingdetect = 302 readl_relaxed(bank->base + bank->regs->fallingdetect); 303 304 bank->level_mask = bank->context.leveldetect0 | 305 bank->context.leveldetect1; 306 307 /* This part needs to be executed always for OMAP{34xx, 44xx} */ 308 if (!bank->regs->irqctrl && !omap_gpio_is_off_wakeup_capable(bank, gpio)) { 309 /* 310 * Log the edge gpio and manually trigger the IRQ 311 * after resume if the input level changes 312 * to avoid irq lost during PER RET/OFF mode 313 * Applies for omap2 non-wakeup gpio and all omap3 gpios 314 */ 315 if (trigger & IRQ_TYPE_EDGE_BOTH) 316 bank->enabled_non_wakeup_gpios |= gpio_bit; 317 else 318 bank->enabled_non_wakeup_gpios &= ~gpio_bit; 319 } 320 } 321 322 /* 323 * This only applies to chips that can't do both rising and falling edge 324 * detection at once. For all other chips, this function is a noop. 325 */ 326 static void omap_toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio) 327 { 328 if (IS_ENABLED(CONFIG_ARCH_OMAP1) && bank->regs->irqctrl) { 329 void __iomem *reg = bank->base + bank->regs->irqctrl; 330 331 writel_relaxed(readl_relaxed(reg) ^ BIT(gpio), reg); 332 } 333 } 334 335 static int omap_set_gpio_triggering(struct gpio_bank *bank, int gpio, 336 unsigned trigger) 337 { 338 void __iomem *reg = bank->base; 339 u32 l = 0; 340 341 if (bank->regs->leveldetect0 && bank->regs->wkup_en) { 342 omap_set_gpio_trigger(bank, gpio, trigger); 343 } else if (bank->regs->irqctrl) { 344 reg += bank->regs->irqctrl; 345 346 l = readl_relaxed(reg); 347 if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) 348 bank->toggle_mask |= BIT(gpio); 349 if (trigger & IRQ_TYPE_EDGE_RISING) 350 l |= BIT(gpio); 351 else if (trigger & IRQ_TYPE_EDGE_FALLING) 352 l &= ~(BIT(gpio)); 353 else 354 return -EINVAL; 355 356 writel_relaxed(l, reg); 357 } else if (bank->regs->edgectrl1) { 358 if (gpio & 0x08) 359 reg += bank->regs->edgectrl2; 360 else 361 reg += bank->regs->edgectrl1; 362 363 gpio &= 0x07; 364 l = readl_relaxed(reg); 365 l &= ~(3 << (gpio << 1)); 366 if (trigger & IRQ_TYPE_EDGE_RISING) 367 l |= 2 << (gpio << 1); 368 if (trigger & IRQ_TYPE_EDGE_FALLING) 369 l |= BIT(gpio << 1); 370 writel_relaxed(l, reg); 371 } 372 return 0; 373 } 374 375 static void omap_enable_gpio_module(struct gpio_bank *bank, unsigned offset) 376 { 377 if (bank->regs->pinctrl) { 378 void __iomem *reg = bank->base + bank->regs->pinctrl; 379 380 /* Claim the pin for MPU */ 381 writel_relaxed(readl_relaxed(reg) | (BIT(offset)), reg); 382 } 383 384 if (bank->regs->ctrl && !BANK_USED(bank)) { 385 void __iomem *reg = bank->base + bank->regs->ctrl; 386 u32 ctrl; 387 388 ctrl = readl_relaxed(reg); 389 /* Module is enabled, clocks are not gated */ 390 ctrl &= ~GPIO_MOD_CTRL_BIT; 391 writel_relaxed(ctrl, reg); 392 bank->context.ctrl = ctrl; 393 } 394 } 395 396 static void omap_disable_gpio_module(struct gpio_bank *bank, unsigned offset) 397 { 398 if (bank->regs->ctrl && !BANK_USED(bank)) { 399 void __iomem *reg = bank->base + bank->regs->ctrl; 400 u32 ctrl; 401 402 ctrl = readl_relaxed(reg); 403 /* Module is disabled, clocks are gated */ 404 ctrl |= GPIO_MOD_CTRL_BIT; 405 writel_relaxed(ctrl, reg); 406 bank->context.ctrl = ctrl; 407 } 408 } 409 410 static int omap_gpio_is_input(struct gpio_bank *bank, unsigned offset) 411 { 412 void __iomem *reg = bank->base + bank->regs->direction; 413 414 return readl_relaxed(reg) & BIT(offset); 415 } 416 417 static void omap_gpio_init_irq(struct gpio_bank *bank, unsigned offset) 418 { 419 if (!LINE_USED(bank->mod_usage, offset)) { 420 omap_enable_gpio_module(bank, offset); 421 omap_set_gpio_direction(bank, offset, 1); 422 } 423 bank->irq_usage |= BIT(offset); 424 } 425 426 static int omap_gpio_irq_type(struct irq_data *d, unsigned type) 427 { 428 struct gpio_bank *bank = omap_irq_data_get_bank(d); 429 int retval; 430 unsigned long flags; 431 unsigned offset = d->hwirq; 432 433 if (type & ~IRQ_TYPE_SENSE_MASK) 434 return -EINVAL; 435 436 if (!bank->regs->leveldetect0 && 437 (type & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH))) 438 return -EINVAL; 439 440 raw_spin_lock_irqsave(&bank->lock, flags); 441 retval = omap_set_gpio_triggering(bank, offset, type); 442 if (retval) { 443 raw_spin_unlock_irqrestore(&bank->lock, flags); 444 goto error; 445 } 446 omap_gpio_init_irq(bank, offset); 447 if (!omap_gpio_is_input(bank, offset)) { 448 raw_spin_unlock_irqrestore(&bank->lock, flags); 449 retval = -EINVAL; 450 goto error; 451 } 452 raw_spin_unlock_irqrestore(&bank->lock, flags); 453 454 if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) 455 irq_set_handler_locked(d, handle_level_irq); 456 else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) 457 /* 458 * Edge IRQs are already cleared/acked in irq_handler and 459 * not need to be masked, as result handle_edge_irq() 460 * logic is excessed here and may cause lose of interrupts. 461 * So just use handle_simple_irq. 462 */ 463 irq_set_handler_locked(d, handle_simple_irq); 464 465 return 0; 466 467 error: 468 return retval; 469 } 470 471 static void omap_clear_gpio_irqbank(struct gpio_bank *bank, int gpio_mask) 472 { 473 void __iomem *reg = bank->base; 474 475 reg += bank->regs->irqstatus; 476 writel_relaxed(gpio_mask, reg); 477 478 /* Workaround for clearing DSP GPIO interrupts to allow retention */ 479 if (bank->regs->irqstatus2) { 480 reg = bank->base + bank->regs->irqstatus2; 481 writel_relaxed(gpio_mask, reg); 482 } 483 484 /* Flush posted write for the irq status to avoid spurious interrupts */ 485 readl_relaxed(reg); 486 } 487 488 static inline void omap_clear_gpio_irqstatus(struct gpio_bank *bank, 489 unsigned offset) 490 { 491 omap_clear_gpio_irqbank(bank, BIT(offset)); 492 } 493 494 static u32 omap_get_gpio_irqbank_mask(struct gpio_bank *bank) 495 { 496 void __iomem *reg = bank->base; 497 u32 l; 498 u32 mask = (BIT(bank->width)) - 1; 499 500 reg += bank->regs->irqenable; 501 l = readl_relaxed(reg); 502 if (bank->regs->irqenable_inv) 503 l = ~l; 504 l &= mask; 505 return l; 506 } 507 508 static inline void omap_set_gpio_irqenable(struct gpio_bank *bank, 509 unsigned offset, int enable) 510 { 511 void __iomem *reg = bank->base; 512 u32 gpio_mask = BIT(offset); 513 514 if (bank->regs->set_irqenable && bank->regs->clr_irqenable) { 515 if (enable) { 516 reg += bank->regs->set_irqenable; 517 bank->context.irqenable1 |= gpio_mask; 518 } else { 519 reg += bank->regs->clr_irqenable; 520 bank->context.irqenable1 &= ~gpio_mask; 521 } 522 writel_relaxed(gpio_mask, reg); 523 } else { 524 bank->context.irqenable1 = 525 omap_gpio_rmw(reg + bank->regs->irqenable, gpio_mask, 526 enable ^ bank->regs->irqenable_inv); 527 } 528 529 /* 530 * Program GPIO wakeup along with IRQ enable to satisfy OMAP4430 TRM 531 * note requiring correlation between the IRQ enable registers and 532 * the wakeup registers. In any case, we want wakeup from idle 533 * enabled for the GPIOs which support this feature. 534 */ 535 if (bank->regs->wkup_en && 536 (bank->regs->edgectrl1 || !(bank->non_wakeup_gpios & gpio_mask))) { 537 bank->context.wake_en = 538 omap_gpio_rmw(bank->base + bank->regs->wkup_en, 539 gpio_mask, enable); 540 } 541 } 542 543 /* Use disable_irq_wake() and enable_irq_wake() functions from drivers */ 544 static int omap_gpio_wake_enable(struct irq_data *d, unsigned int enable) 545 { 546 struct gpio_bank *bank = omap_irq_data_get_bank(d); 547 548 return irq_set_irq_wake(bank->irq, enable); 549 } 550 551 /* 552 * We need to unmask the GPIO bank interrupt as soon as possible to 553 * avoid missing GPIO interrupts for other lines in the bank. 554 * Then we need to mask-read-clear-unmask the triggered GPIO lines 555 * in the bank to avoid missing nested interrupts for a GPIO line. 556 * If we wait to unmask individual GPIO lines in the bank after the 557 * line's interrupt handler has been run, we may miss some nested 558 * interrupts. 559 */ 560 static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank) 561 { 562 void __iomem *isr_reg = NULL; 563 u32 enabled, isr, edge; 564 unsigned int bit; 565 struct gpio_bank *bank = gpiobank; 566 unsigned long wa_lock_flags; 567 unsigned long lock_flags; 568 569 isr_reg = bank->base + bank->regs->irqstatus; 570 if (WARN_ON(!isr_reg)) 571 goto exit; 572 573 if (WARN_ONCE(!pm_runtime_active(bank->chip.parent), 574 "gpio irq%i while runtime suspended?\n", irq)) 575 return IRQ_NONE; 576 577 while (1) { 578 raw_spin_lock_irqsave(&bank->lock, lock_flags); 579 580 enabled = omap_get_gpio_irqbank_mask(bank); 581 isr = readl_relaxed(isr_reg) & enabled; 582 583 /* 584 * Clear edge sensitive interrupts before calling handler(s) 585 * so subsequent edge transitions are not missed while the 586 * handlers are running. 587 */ 588 edge = isr & ~bank->level_mask; 589 if (edge) 590 omap_clear_gpio_irqbank(bank, edge); 591 592 raw_spin_unlock_irqrestore(&bank->lock, lock_flags); 593 594 if (!isr) 595 break; 596 597 while (isr) { 598 bit = __ffs(isr); 599 isr &= ~(BIT(bit)); 600 601 raw_spin_lock_irqsave(&bank->lock, lock_flags); 602 /* 603 * Some chips can't respond to both rising and falling 604 * at the same time. If this irq was requested with 605 * both flags, we need to flip the ICR data for the IRQ 606 * to respond to the IRQ for the opposite direction. 607 * This will be indicated in the bank toggle_mask. 608 */ 609 if (bank->toggle_mask & (BIT(bit))) 610 omap_toggle_gpio_edge_triggering(bank, bit); 611 612 raw_spin_unlock_irqrestore(&bank->lock, lock_flags); 613 614 raw_spin_lock_irqsave(&bank->wa_lock, wa_lock_flags); 615 616 generic_handle_domain_irq(bank->chip.irq.domain, bit); 617 618 raw_spin_unlock_irqrestore(&bank->wa_lock, 619 wa_lock_flags); 620 } 621 } 622 exit: 623 return IRQ_HANDLED; 624 } 625 626 static unsigned int omap_gpio_irq_startup(struct irq_data *d) 627 { 628 struct gpio_bank *bank = omap_irq_data_get_bank(d); 629 unsigned long flags; 630 unsigned offset = d->hwirq; 631 632 raw_spin_lock_irqsave(&bank->lock, flags); 633 634 if (!LINE_USED(bank->mod_usage, offset)) 635 omap_set_gpio_direction(bank, offset, 1); 636 omap_enable_gpio_module(bank, offset); 637 bank->irq_usage |= BIT(offset); 638 639 raw_spin_unlock_irqrestore(&bank->lock, flags); 640 omap_gpio_unmask_irq(d); 641 642 return 0; 643 } 644 645 static void omap_gpio_irq_shutdown(struct irq_data *d) 646 { 647 struct gpio_bank *bank = omap_irq_data_get_bank(d); 648 unsigned long flags; 649 unsigned offset = d->hwirq; 650 651 raw_spin_lock_irqsave(&bank->lock, flags); 652 bank->irq_usage &= ~(BIT(offset)); 653 omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE); 654 omap_clear_gpio_irqstatus(bank, offset); 655 omap_set_gpio_irqenable(bank, offset, 0); 656 if (!LINE_USED(bank->mod_usage, offset)) 657 omap_clear_gpio_debounce(bank, offset); 658 omap_disable_gpio_module(bank, offset); 659 raw_spin_unlock_irqrestore(&bank->lock, flags); 660 } 661 662 static void omap_gpio_irq_bus_lock(struct irq_data *data) 663 { 664 struct gpio_bank *bank = omap_irq_data_get_bank(data); 665 666 pm_runtime_get_sync(bank->chip.parent); 667 } 668 669 static void gpio_irq_bus_sync_unlock(struct irq_data *data) 670 { 671 struct gpio_bank *bank = omap_irq_data_get_bank(data); 672 673 pm_runtime_put(bank->chip.parent); 674 } 675 676 static void omap_gpio_mask_irq(struct irq_data *d) 677 { 678 struct gpio_bank *bank = omap_irq_data_get_bank(d); 679 unsigned offset = d->hwirq; 680 unsigned long flags; 681 682 raw_spin_lock_irqsave(&bank->lock, flags); 683 omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE); 684 omap_set_gpio_irqenable(bank, offset, 0); 685 raw_spin_unlock_irqrestore(&bank->lock, flags); 686 gpiochip_disable_irq(&bank->chip, offset); 687 } 688 689 static void omap_gpio_unmask_irq(struct irq_data *d) 690 { 691 struct gpio_bank *bank = omap_irq_data_get_bank(d); 692 unsigned offset = d->hwirq; 693 u32 trigger = irqd_get_trigger_type(d); 694 unsigned long flags; 695 696 gpiochip_enable_irq(&bank->chip, offset); 697 raw_spin_lock_irqsave(&bank->lock, flags); 698 omap_set_gpio_irqenable(bank, offset, 1); 699 700 /* 701 * For level-triggered GPIOs, clearing must be done after the source 702 * is cleared, thus after the handler has run. OMAP4 needs this done 703 * after enabing the interrupt to clear the wakeup status. 704 */ 705 if (bank->regs->leveldetect0 && bank->regs->wkup_en && 706 trigger & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) 707 omap_clear_gpio_irqstatus(bank, offset); 708 709 if (trigger) 710 omap_set_gpio_triggering(bank, offset, trigger); 711 712 raw_spin_unlock_irqrestore(&bank->lock, flags); 713 } 714 715 static void omap_gpio_irq_print_chip(struct irq_data *d, struct seq_file *p) 716 { 717 struct gpio_bank *bank = omap_irq_data_get_bank(d); 718 719 seq_printf(p, dev_name(bank->dev)); 720 } 721 722 static const struct irq_chip omap_gpio_irq_chip = { 723 .irq_startup = omap_gpio_irq_startup, 724 .irq_shutdown = omap_gpio_irq_shutdown, 725 .irq_mask = omap_gpio_mask_irq, 726 .irq_unmask = omap_gpio_unmask_irq, 727 .irq_set_type = omap_gpio_irq_type, 728 .irq_set_wake = omap_gpio_wake_enable, 729 .irq_bus_lock = omap_gpio_irq_bus_lock, 730 .irq_bus_sync_unlock = gpio_irq_bus_sync_unlock, 731 .irq_print_chip = omap_gpio_irq_print_chip, 732 .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_IMMUTABLE, 733 GPIOCHIP_IRQ_RESOURCE_HELPERS, 734 }; 735 736 static const struct irq_chip omap_gpio_irq_chip_nowake = { 737 .irq_startup = omap_gpio_irq_startup, 738 .irq_shutdown = omap_gpio_irq_shutdown, 739 .irq_mask = omap_gpio_mask_irq, 740 .irq_unmask = omap_gpio_unmask_irq, 741 .irq_set_type = omap_gpio_irq_type, 742 .irq_bus_lock = omap_gpio_irq_bus_lock, 743 .irq_bus_sync_unlock = gpio_irq_bus_sync_unlock, 744 .irq_print_chip = omap_gpio_irq_print_chip, 745 .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_IMMUTABLE, 746 GPIOCHIP_IRQ_RESOURCE_HELPERS, 747 }; 748 749 /*---------------------------------------------------------------------*/ 750 751 static int omap_mpuio_suspend_noirq(struct device *dev) 752 { 753 struct gpio_bank *bank = dev_get_drvdata(dev); 754 void __iomem *mask_reg = bank->base + 755 OMAP_MPUIO_GPIO_MASKIT / bank->stride; 756 unsigned long flags; 757 758 raw_spin_lock_irqsave(&bank->lock, flags); 759 writel_relaxed(0xffff & ~bank->context.wake_en, mask_reg); 760 raw_spin_unlock_irqrestore(&bank->lock, flags); 761 762 return 0; 763 } 764 765 static int omap_mpuio_resume_noirq(struct device *dev) 766 { 767 struct gpio_bank *bank = dev_get_drvdata(dev); 768 void __iomem *mask_reg = bank->base + 769 OMAP_MPUIO_GPIO_MASKIT / bank->stride; 770 unsigned long flags; 771 772 raw_spin_lock_irqsave(&bank->lock, flags); 773 writel_relaxed(bank->context.wake_en, mask_reg); 774 raw_spin_unlock_irqrestore(&bank->lock, flags); 775 776 return 0; 777 } 778 779 static const struct dev_pm_ops omap_mpuio_dev_pm_ops = { 780 .suspend_noirq = omap_mpuio_suspend_noirq, 781 .resume_noirq = omap_mpuio_resume_noirq, 782 }; 783 784 /* use platform_driver for this. */ 785 static struct platform_driver omap_mpuio_driver = { 786 .driver = { 787 .name = "mpuio", 788 .pm = &omap_mpuio_dev_pm_ops, 789 }, 790 }; 791 792 static struct platform_device omap_mpuio_device = { 793 .name = "mpuio", 794 .id = -1, 795 .dev = { 796 .driver = &omap_mpuio_driver.driver, 797 } 798 /* could list the /proc/iomem resources */ 799 }; 800 801 static inline void omap_mpuio_init(struct gpio_bank *bank) 802 { 803 platform_set_drvdata(&omap_mpuio_device, bank); 804 805 if (platform_driver_register(&omap_mpuio_driver) == 0) 806 (void) platform_device_register(&omap_mpuio_device); 807 } 808 809 /*---------------------------------------------------------------------*/ 810 811 static int omap_gpio_request(struct gpio_chip *chip, unsigned offset) 812 { 813 struct gpio_bank *bank = gpiochip_get_data(chip); 814 unsigned long flags; 815 816 pm_runtime_get_sync(chip->parent); 817 818 raw_spin_lock_irqsave(&bank->lock, flags); 819 omap_enable_gpio_module(bank, offset); 820 bank->mod_usage |= BIT(offset); 821 raw_spin_unlock_irqrestore(&bank->lock, flags); 822 823 return 0; 824 } 825 826 static void omap_gpio_free(struct gpio_chip *chip, unsigned offset) 827 { 828 struct gpio_bank *bank = gpiochip_get_data(chip); 829 unsigned long flags; 830 831 raw_spin_lock_irqsave(&bank->lock, flags); 832 bank->mod_usage &= ~(BIT(offset)); 833 if (!LINE_USED(bank->irq_usage, offset)) { 834 omap_set_gpio_direction(bank, offset, 1); 835 omap_clear_gpio_debounce(bank, offset); 836 } 837 omap_disable_gpio_module(bank, offset); 838 raw_spin_unlock_irqrestore(&bank->lock, flags); 839 840 pm_runtime_put(chip->parent); 841 } 842 843 static int omap_gpio_get_direction(struct gpio_chip *chip, unsigned offset) 844 { 845 struct gpio_bank *bank = gpiochip_get_data(chip); 846 847 if (readl_relaxed(bank->base + bank->regs->direction) & BIT(offset)) 848 return GPIO_LINE_DIRECTION_IN; 849 850 return GPIO_LINE_DIRECTION_OUT; 851 } 852 853 static int omap_gpio_input(struct gpio_chip *chip, unsigned offset) 854 { 855 struct gpio_bank *bank; 856 unsigned long flags; 857 858 bank = gpiochip_get_data(chip); 859 raw_spin_lock_irqsave(&bank->lock, flags); 860 omap_set_gpio_direction(bank, offset, 1); 861 raw_spin_unlock_irqrestore(&bank->lock, flags); 862 return 0; 863 } 864 865 static int omap_gpio_get(struct gpio_chip *chip, unsigned offset) 866 { 867 struct gpio_bank *bank = gpiochip_get_data(chip); 868 void __iomem *reg; 869 870 if (omap_gpio_is_input(bank, offset)) 871 reg = bank->base + bank->regs->datain; 872 else 873 reg = bank->base + bank->regs->dataout; 874 875 return (readl_relaxed(reg) & BIT(offset)) != 0; 876 } 877 878 static int omap_gpio_output(struct gpio_chip *chip, unsigned offset, int value) 879 { 880 struct gpio_bank *bank; 881 unsigned long flags; 882 883 bank = gpiochip_get_data(chip); 884 raw_spin_lock_irqsave(&bank->lock, flags); 885 bank->set_dataout(bank, offset, value); 886 omap_set_gpio_direction(bank, offset, 0); 887 raw_spin_unlock_irqrestore(&bank->lock, flags); 888 return 0; 889 } 890 891 static int omap_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask, 892 unsigned long *bits) 893 { 894 struct gpio_bank *bank = gpiochip_get_data(chip); 895 void __iomem *base = bank->base; 896 u32 direction, m, val = 0; 897 898 direction = readl_relaxed(base + bank->regs->direction); 899 900 m = direction & *mask; 901 if (m) 902 val |= readl_relaxed(base + bank->regs->datain) & m; 903 904 m = ~direction & *mask; 905 if (m) 906 val |= readl_relaxed(base + bank->regs->dataout) & m; 907 908 *bits = val; 909 910 return 0; 911 } 912 913 static int omap_gpio_debounce(struct gpio_chip *chip, unsigned offset, 914 unsigned debounce) 915 { 916 struct gpio_bank *bank; 917 unsigned long flags; 918 int ret; 919 920 bank = gpiochip_get_data(chip); 921 922 raw_spin_lock_irqsave(&bank->lock, flags); 923 ret = omap2_set_gpio_debounce(bank, offset, debounce); 924 raw_spin_unlock_irqrestore(&bank->lock, flags); 925 926 if (ret) 927 dev_info(chip->parent, 928 "Could not set line %u debounce to %u microseconds (%d)", 929 offset, debounce, ret); 930 931 return ret; 932 } 933 934 static int omap_gpio_set_config(struct gpio_chip *chip, unsigned offset, 935 unsigned long config) 936 { 937 u32 debounce; 938 int ret = -ENOTSUPP; 939 940 switch (pinconf_to_config_param(config)) { 941 case PIN_CONFIG_BIAS_DISABLE: 942 case PIN_CONFIG_BIAS_PULL_UP: 943 case PIN_CONFIG_BIAS_PULL_DOWN: 944 ret = gpiochip_generic_config(chip, offset, config); 945 break; 946 case PIN_CONFIG_INPUT_DEBOUNCE: 947 debounce = pinconf_to_config_argument(config); 948 ret = omap_gpio_debounce(chip, offset, debounce); 949 break; 950 default: 951 break; 952 } 953 954 return ret; 955 } 956 957 static void omap_gpio_set(struct gpio_chip *chip, unsigned offset, int value) 958 { 959 struct gpio_bank *bank; 960 unsigned long flags; 961 962 bank = gpiochip_get_data(chip); 963 raw_spin_lock_irqsave(&bank->lock, flags); 964 bank->set_dataout(bank, offset, value); 965 raw_spin_unlock_irqrestore(&bank->lock, flags); 966 } 967 968 static void omap_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mask, 969 unsigned long *bits) 970 { 971 struct gpio_bank *bank = gpiochip_get_data(chip); 972 void __iomem *reg = bank->base + bank->regs->dataout; 973 unsigned long flags; 974 u32 l; 975 976 raw_spin_lock_irqsave(&bank->lock, flags); 977 l = (readl_relaxed(reg) & ~*mask) | (*bits & *mask); 978 writel_relaxed(l, reg); 979 bank->context.dataout = l; 980 raw_spin_unlock_irqrestore(&bank->lock, flags); 981 } 982 983 /*---------------------------------------------------------------------*/ 984 985 static void omap_gpio_show_rev(struct gpio_bank *bank) 986 { 987 static bool called; 988 u32 rev; 989 990 if (called || bank->regs->revision == USHRT_MAX) 991 return; 992 993 rev = readw_relaxed(bank->base + bank->regs->revision); 994 pr_info("OMAP GPIO hardware version %d.%d\n", 995 (rev >> 4) & 0x0f, rev & 0x0f); 996 997 called = true; 998 } 999 1000 static void omap_gpio_mod_init(struct gpio_bank *bank) 1001 { 1002 void __iomem *base = bank->base; 1003 u32 l = 0xffffffff; 1004 1005 if (bank->width == 16) 1006 l = 0xffff; 1007 1008 if (bank->is_mpuio) { 1009 writel_relaxed(l, bank->base + bank->regs->irqenable); 1010 return; 1011 } 1012 1013 omap_gpio_rmw(base + bank->regs->irqenable, l, 1014 bank->regs->irqenable_inv); 1015 omap_gpio_rmw(base + bank->regs->irqstatus, l, 1016 !bank->regs->irqenable_inv); 1017 if (bank->regs->debounce_en) 1018 writel_relaxed(0, base + bank->regs->debounce_en); 1019 1020 /* Save OE default value (0xffffffff) in the context */ 1021 bank->context.oe = readl_relaxed(bank->base + bank->regs->direction); 1022 /* Initialize interface clk ungated, module enabled */ 1023 if (bank->regs->ctrl) 1024 writel_relaxed(0, base + bank->regs->ctrl); 1025 } 1026 1027 static int omap_gpio_chip_init(struct gpio_bank *bank, struct device *pm_dev) 1028 { 1029 struct gpio_irq_chip *irq; 1030 static int gpio; 1031 const char *label; 1032 int ret; 1033 1034 /* 1035 * REVISIT eventually switch from OMAP-specific gpio structs 1036 * over to the generic ones 1037 */ 1038 bank->chip.request = omap_gpio_request; 1039 bank->chip.free = omap_gpio_free; 1040 bank->chip.get_direction = omap_gpio_get_direction; 1041 bank->chip.direction_input = omap_gpio_input; 1042 bank->chip.get = omap_gpio_get; 1043 bank->chip.get_multiple = omap_gpio_get_multiple; 1044 bank->chip.direction_output = omap_gpio_output; 1045 bank->chip.set_config = omap_gpio_set_config; 1046 bank->chip.set = omap_gpio_set; 1047 bank->chip.set_multiple = omap_gpio_set_multiple; 1048 if (bank->is_mpuio) { 1049 bank->chip.label = "mpuio"; 1050 if (bank->regs->wkup_en) 1051 bank->chip.parent = &omap_mpuio_device.dev; 1052 bank->chip.base = OMAP_MPUIO(0); 1053 } else { 1054 label = devm_kasprintf(bank->chip.parent, GFP_KERNEL, "gpio-%d-%d", 1055 gpio, gpio + bank->width - 1); 1056 if (!label) 1057 return -ENOMEM; 1058 bank->chip.label = label; 1059 bank->chip.base = -1; 1060 } 1061 bank->chip.ngpio = bank->width; 1062 1063 irq = &bank->chip.irq; 1064 /* MPUIO is a bit different, reading IRQ status clears it */ 1065 if (bank->is_mpuio && !bank->regs->wkup_en) 1066 gpio_irq_chip_set_chip(irq, &omap_gpio_irq_chip_nowake); 1067 else 1068 gpio_irq_chip_set_chip(irq, &omap_gpio_irq_chip); 1069 irq->handler = handle_bad_irq; 1070 irq->default_type = IRQ_TYPE_NONE; 1071 irq->num_parents = 1; 1072 irq->parents = &bank->irq; 1073 1074 ret = gpiochip_add_data(&bank->chip, bank); 1075 if (ret) 1076 return dev_err_probe(bank->chip.parent, ret, "Could not register gpio chip\n"); 1077 1078 irq_domain_set_pm_device(bank->chip.irq.domain, pm_dev); 1079 ret = devm_request_irq(bank->chip.parent, bank->irq, 1080 omap_gpio_irq_handler, 1081 0, dev_name(bank->chip.parent), bank); 1082 if (ret) 1083 gpiochip_remove(&bank->chip); 1084 1085 if (!bank->is_mpuio) 1086 gpio += bank->width; 1087 1088 return ret; 1089 } 1090 1091 static void omap_gpio_init_context(struct gpio_bank *p) 1092 { 1093 const struct omap_gpio_reg_offs *regs = p->regs; 1094 void __iomem *base = p->base; 1095 1096 p->context.sysconfig = readl_relaxed(base + regs->sysconfig); 1097 p->context.ctrl = readl_relaxed(base + regs->ctrl); 1098 p->context.oe = readl_relaxed(base + regs->direction); 1099 p->context.wake_en = readl_relaxed(base + regs->wkup_en); 1100 p->context.leveldetect0 = readl_relaxed(base + regs->leveldetect0); 1101 p->context.leveldetect1 = readl_relaxed(base + regs->leveldetect1); 1102 p->context.risingdetect = readl_relaxed(base + regs->risingdetect); 1103 p->context.fallingdetect = readl_relaxed(base + regs->fallingdetect); 1104 p->context.irqenable1 = readl_relaxed(base + regs->irqenable); 1105 p->context.irqenable2 = readl_relaxed(base + regs->irqenable2); 1106 p->context.dataout = readl_relaxed(base + regs->dataout); 1107 1108 p->context_valid = true; 1109 } 1110 1111 static void omap_gpio_restore_context(struct gpio_bank *bank) 1112 { 1113 const struct omap_gpio_reg_offs *regs = bank->regs; 1114 void __iomem *base = bank->base; 1115 1116 writel_relaxed(bank->context.sysconfig, base + regs->sysconfig); 1117 writel_relaxed(bank->context.wake_en, base + regs->wkup_en); 1118 writel_relaxed(bank->context.ctrl, base + regs->ctrl); 1119 writel_relaxed(bank->context.leveldetect0, base + regs->leveldetect0); 1120 writel_relaxed(bank->context.leveldetect1, base + regs->leveldetect1); 1121 writel_relaxed(bank->context.risingdetect, base + regs->risingdetect); 1122 writel_relaxed(bank->context.fallingdetect, base + regs->fallingdetect); 1123 writel_relaxed(bank->context.dataout, base + regs->dataout); 1124 writel_relaxed(bank->context.oe, base + regs->direction); 1125 1126 if (bank->dbck_enable_mask) { 1127 writel_relaxed(bank->context.debounce, base + regs->debounce); 1128 writel_relaxed(bank->context.debounce_en, 1129 base + regs->debounce_en); 1130 } 1131 1132 writel_relaxed(bank->context.irqenable1, base + regs->irqenable); 1133 writel_relaxed(bank->context.irqenable2, base + regs->irqenable2); 1134 } 1135 1136 static void omap_gpio_idle(struct gpio_bank *bank, bool may_lose_context) 1137 { 1138 struct device *dev = bank->chip.parent; 1139 void __iomem *base = bank->base; 1140 u32 mask, nowake; 1141 1142 bank->saved_datain = readl_relaxed(base + bank->regs->datain); 1143 1144 /* Save syconfig, it's runtime value can be different from init value */ 1145 if (bank->loses_context) 1146 bank->context.sysconfig = readl_relaxed(base + bank->regs->sysconfig); 1147 1148 if (!bank->enabled_non_wakeup_gpios) 1149 goto update_gpio_context_count; 1150 1151 /* Check for pending EDGE_FALLING, ignore EDGE_BOTH */ 1152 mask = bank->enabled_non_wakeup_gpios & bank->context.fallingdetect; 1153 mask &= ~bank->context.risingdetect; 1154 bank->saved_datain |= mask; 1155 1156 /* Check for pending EDGE_RISING, ignore EDGE_BOTH */ 1157 mask = bank->enabled_non_wakeup_gpios & bank->context.risingdetect; 1158 mask &= ~bank->context.fallingdetect; 1159 bank->saved_datain &= ~mask; 1160 1161 if (!may_lose_context) 1162 goto update_gpio_context_count; 1163 1164 /* 1165 * If going to OFF, remove triggering for all wkup domain 1166 * non-wakeup GPIOs. Otherwise spurious IRQs will be 1167 * generated. See OMAP2420 Errata item 1.101. 1168 */ 1169 if (!bank->loses_context && bank->enabled_non_wakeup_gpios) { 1170 nowake = bank->enabled_non_wakeup_gpios; 1171 omap_gpio_rmw(base + bank->regs->fallingdetect, nowake, ~nowake); 1172 omap_gpio_rmw(base + bank->regs->risingdetect, nowake, ~nowake); 1173 } 1174 1175 update_gpio_context_count: 1176 if (bank->get_context_loss_count) 1177 bank->context_loss_count = 1178 bank->get_context_loss_count(dev); 1179 1180 omap_gpio_dbck_disable(bank); 1181 } 1182 1183 static void omap_gpio_unidle(struct gpio_bank *bank) 1184 { 1185 struct device *dev = bank->chip.parent; 1186 u32 l = 0, gen, gen0, gen1; 1187 int c; 1188 1189 /* 1190 * On the first resume during the probe, the context has not 1191 * been initialised and so initialise it now. Also initialise 1192 * the context loss count. 1193 */ 1194 if (bank->loses_context && !bank->context_valid) { 1195 omap_gpio_init_context(bank); 1196 1197 if (bank->get_context_loss_count) 1198 bank->context_loss_count = 1199 bank->get_context_loss_count(dev); 1200 } 1201 1202 omap_gpio_dbck_enable(bank); 1203 1204 if (bank->loses_context) { 1205 if (!bank->get_context_loss_count) { 1206 omap_gpio_restore_context(bank); 1207 } else { 1208 c = bank->get_context_loss_count(dev); 1209 if (c != bank->context_loss_count) { 1210 omap_gpio_restore_context(bank); 1211 } else { 1212 return; 1213 } 1214 } 1215 } else { 1216 /* Restore changes done for OMAP2420 errata 1.101 */ 1217 writel_relaxed(bank->context.fallingdetect, 1218 bank->base + bank->regs->fallingdetect); 1219 writel_relaxed(bank->context.risingdetect, 1220 bank->base + bank->regs->risingdetect); 1221 } 1222 1223 l = readl_relaxed(bank->base + bank->regs->datain); 1224 1225 /* 1226 * Check if any of the non-wakeup interrupt GPIOs have changed 1227 * state. If so, generate an IRQ by software. This is 1228 * horribly racy, but it's the best we can do to work around 1229 * this silicon bug. 1230 */ 1231 l ^= bank->saved_datain; 1232 l &= bank->enabled_non_wakeup_gpios; 1233 1234 /* 1235 * No need to generate IRQs for the rising edge for gpio IRQs 1236 * configured with falling edge only; and vice versa. 1237 */ 1238 gen0 = l & bank->context.fallingdetect; 1239 gen0 &= bank->saved_datain; 1240 1241 gen1 = l & bank->context.risingdetect; 1242 gen1 &= ~(bank->saved_datain); 1243 1244 /* FIXME: Consider GPIO IRQs with level detections properly! */ 1245 gen = l & (~(bank->context.fallingdetect) & 1246 ~(bank->context.risingdetect)); 1247 /* Consider all GPIO IRQs needed to be updated */ 1248 gen |= gen0 | gen1; 1249 1250 if (gen) { 1251 u32 old0, old1; 1252 1253 old0 = readl_relaxed(bank->base + bank->regs->leveldetect0); 1254 old1 = readl_relaxed(bank->base + bank->regs->leveldetect1); 1255 1256 if (!bank->regs->irqstatus_raw0) { 1257 writel_relaxed(old0 | gen, bank->base + 1258 bank->regs->leveldetect0); 1259 writel_relaxed(old1 | gen, bank->base + 1260 bank->regs->leveldetect1); 1261 } 1262 1263 if (bank->regs->irqstatus_raw0) { 1264 writel_relaxed(old0 | l, bank->base + 1265 bank->regs->leveldetect0); 1266 writel_relaxed(old1 | l, bank->base + 1267 bank->regs->leveldetect1); 1268 } 1269 writel_relaxed(old0, bank->base + bank->regs->leveldetect0); 1270 writel_relaxed(old1, bank->base + bank->regs->leveldetect1); 1271 } 1272 } 1273 1274 static int gpio_omap_cpu_notifier(struct notifier_block *nb, 1275 unsigned long cmd, void *v) 1276 { 1277 struct gpio_bank *bank; 1278 unsigned long flags; 1279 int ret = NOTIFY_OK; 1280 u32 isr, mask; 1281 1282 bank = container_of(nb, struct gpio_bank, nb); 1283 1284 raw_spin_lock_irqsave(&bank->lock, flags); 1285 if (bank->is_suspended) 1286 goto out_unlock; 1287 1288 switch (cmd) { 1289 case CPU_CLUSTER_PM_ENTER: 1290 mask = omap_get_gpio_irqbank_mask(bank); 1291 isr = readl_relaxed(bank->base + bank->regs->irqstatus) & mask; 1292 if (isr) { 1293 ret = NOTIFY_BAD; 1294 break; 1295 } 1296 omap_gpio_idle(bank, true); 1297 break; 1298 case CPU_CLUSTER_PM_ENTER_FAILED: 1299 case CPU_CLUSTER_PM_EXIT: 1300 omap_gpio_unidle(bank); 1301 break; 1302 } 1303 1304 out_unlock: 1305 raw_spin_unlock_irqrestore(&bank->lock, flags); 1306 1307 return ret; 1308 } 1309 1310 static const struct omap_gpio_reg_offs omap2_gpio_regs = { 1311 .revision = OMAP24XX_GPIO_REVISION, 1312 .sysconfig = OMAP24XX_GPIO_SYSCONFIG, 1313 .direction = OMAP24XX_GPIO_OE, 1314 .datain = OMAP24XX_GPIO_DATAIN, 1315 .dataout = OMAP24XX_GPIO_DATAOUT, 1316 .set_dataout = OMAP24XX_GPIO_SETDATAOUT, 1317 .clr_dataout = OMAP24XX_GPIO_CLEARDATAOUT, 1318 .irqstatus = OMAP24XX_GPIO_IRQSTATUS1, 1319 .irqstatus2 = OMAP24XX_GPIO_IRQSTATUS2, 1320 .irqenable = OMAP24XX_GPIO_IRQENABLE1, 1321 .irqenable2 = OMAP24XX_GPIO_IRQENABLE2, 1322 .set_irqenable = OMAP24XX_GPIO_SETIRQENABLE1, 1323 .clr_irqenable = OMAP24XX_GPIO_CLEARIRQENABLE1, 1324 .debounce = OMAP24XX_GPIO_DEBOUNCE_VAL, 1325 .debounce_en = OMAP24XX_GPIO_DEBOUNCE_EN, 1326 .ctrl = OMAP24XX_GPIO_CTRL, 1327 .wkup_en = OMAP24XX_GPIO_WAKE_EN, 1328 .leveldetect0 = OMAP24XX_GPIO_LEVELDETECT0, 1329 .leveldetect1 = OMAP24XX_GPIO_LEVELDETECT1, 1330 .risingdetect = OMAP24XX_GPIO_RISINGDETECT, 1331 .fallingdetect = OMAP24XX_GPIO_FALLINGDETECT, 1332 }; 1333 1334 static const struct omap_gpio_reg_offs omap4_gpio_regs = { 1335 .revision = OMAP4_GPIO_REVISION, 1336 .sysconfig = OMAP4_GPIO_SYSCONFIG, 1337 .direction = OMAP4_GPIO_OE, 1338 .datain = OMAP4_GPIO_DATAIN, 1339 .dataout = OMAP4_GPIO_DATAOUT, 1340 .set_dataout = OMAP4_GPIO_SETDATAOUT, 1341 .clr_dataout = OMAP4_GPIO_CLEARDATAOUT, 1342 .irqstatus = OMAP4_GPIO_IRQSTATUS0, 1343 .irqstatus2 = OMAP4_GPIO_IRQSTATUS1, 1344 .irqstatus_raw0 = OMAP4_GPIO_IRQSTATUSRAW0, 1345 .irqstatus_raw1 = OMAP4_GPIO_IRQSTATUSRAW1, 1346 .irqenable = OMAP4_GPIO_IRQSTATUSSET0, 1347 .irqenable2 = OMAP4_GPIO_IRQSTATUSSET1, 1348 .set_irqenable = OMAP4_GPIO_IRQSTATUSSET0, 1349 .clr_irqenable = OMAP4_GPIO_IRQSTATUSCLR0, 1350 .debounce = OMAP4_GPIO_DEBOUNCINGTIME, 1351 .debounce_en = OMAP4_GPIO_DEBOUNCENABLE, 1352 .ctrl = OMAP4_GPIO_CTRL, 1353 .wkup_en = OMAP4_GPIO_IRQWAKEN0, 1354 .leveldetect0 = OMAP4_GPIO_LEVELDETECT0, 1355 .leveldetect1 = OMAP4_GPIO_LEVELDETECT1, 1356 .risingdetect = OMAP4_GPIO_RISINGDETECT, 1357 .fallingdetect = OMAP4_GPIO_FALLINGDETECT, 1358 }; 1359 1360 static const struct omap_gpio_platform_data omap2_pdata = { 1361 .regs = &omap2_gpio_regs, 1362 .bank_width = 32, 1363 .dbck_flag = false, 1364 }; 1365 1366 static const struct omap_gpio_platform_data omap3_pdata = { 1367 .regs = &omap2_gpio_regs, 1368 .bank_width = 32, 1369 .dbck_flag = true, 1370 }; 1371 1372 static const struct omap_gpio_platform_data omap4_pdata = { 1373 .regs = &omap4_gpio_regs, 1374 .bank_width = 32, 1375 .dbck_flag = true, 1376 }; 1377 1378 static const struct of_device_id omap_gpio_match[] = { 1379 { 1380 .compatible = "ti,omap4-gpio", 1381 .data = &omap4_pdata, 1382 }, 1383 { 1384 .compatible = "ti,omap3-gpio", 1385 .data = &omap3_pdata, 1386 }, 1387 { 1388 .compatible = "ti,omap2-gpio", 1389 .data = &omap2_pdata, 1390 }, 1391 { }, 1392 }; 1393 MODULE_DEVICE_TABLE(of, omap_gpio_match); 1394 1395 static int omap_gpio_probe(struct platform_device *pdev) 1396 { 1397 struct device *dev = &pdev->dev; 1398 struct device_node *node = dev->of_node; 1399 const struct omap_gpio_platform_data *pdata; 1400 struct gpio_bank *bank; 1401 int ret; 1402 1403 pdata = device_get_match_data(dev); 1404 1405 pdata = pdata ?: dev_get_platdata(dev); 1406 if (!pdata) 1407 return -EINVAL; 1408 1409 bank = devm_kzalloc(dev, sizeof(*bank), GFP_KERNEL); 1410 if (!bank) 1411 return -ENOMEM; 1412 1413 bank->dev = dev; 1414 1415 bank->irq = platform_get_irq(pdev, 0); 1416 if (bank->irq <= 0) { 1417 if (!bank->irq) 1418 bank->irq = -ENXIO; 1419 return dev_err_probe(dev, bank->irq, "can't get irq resource\n"); 1420 } 1421 1422 bank->chip.parent = dev; 1423 bank->chip.owner = THIS_MODULE; 1424 bank->dbck_flag = pdata->dbck_flag; 1425 bank->stride = pdata->bank_stride; 1426 bank->width = pdata->bank_width; 1427 bank->is_mpuio = pdata->is_mpuio; 1428 bank->non_wakeup_gpios = pdata->non_wakeup_gpios; 1429 bank->regs = pdata->regs; 1430 1431 if (node) { 1432 if (!of_property_read_bool(node, "ti,gpio-always-on")) 1433 bank->loses_context = true; 1434 } else { 1435 bank->loses_context = pdata->loses_context; 1436 1437 if (bank->loses_context) 1438 bank->get_context_loss_count = 1439 pdata->get_context_loss_count; 1440 } 1441 1442 if (bank->regs->set_dataout && bank->regs->clr_dataout) 1443 bank->set_dataout = omap_set_gpio_dataout_reg; 1444 else 1445 bank->set_dataout = omap_set_gpio_dataout_mask; 1446 1447 raw_spin_lock_init(&bank->lock); 1448 raw_spin_lock_init(&bank->wa_lock); 1449 1450 /* Static mapping, never released */ 1451 bank->base = devm_platform_ioremap_resource(pdev, 0); 1452 if (IS_ERR(bank->base)) { 1453 return PTR_ERR(bank->base); 1454 } 1455 1456 if (bank->dbck_flag) { 1457 bank->dbck = devm_clk_get(dev, "dbclk"); 1458 if (IS_ERR(bank->dbck)) { 1459 dev_err(dev, 1460 "Could not get gpio dbck. Disable debounce\n"); 1461 bank->dbck_flag = false; 1462 } else { 1463 clk_prepare(bank->dbck); 1464 } 1465 } 1466 1467 platform_set_drvdata(pdev, bank); 1468 1469 pm_runtime_enable(dev); 1470 pm_runtime_get_sync(dev); 1471 1472 if (bank->is_mpuio) 1473 omap_mpuio_init(bank); 1474 1475 omap_gpio_mod_init(bank); 1476 1477 ret = omap_gpio_chip_init(bank, dev); 1478 if (ret) { 1479 pm_runtime_put_sync(dev); 1480 pm_runtime_disable(dev); 1481 if (bank->dbck_flag) 1482 clk_unprepare(bank->dbck); 1483 return ret; 1484 } 1485 1486 omap_gpio_show_rev(bank); 1487 1488 bank->nb.notifier_call = gpio_omap_cpu_notifier; 1489 cpu_pm_register_notifier(&bank->nb); 1490 1491 pm_runtime_put(dev); 1492 1493 return 0; 1494 } 1495 1496 static int omap_gpio_remove(struct platform_device *pdev) 1497 { 1498 struct gpio_bank *bank = platform_get_drvdata(pdev); 1499 1500 cpu_pm_unregister_notifier(&bank->nb); 1501 gpiochip_remove(&bank->chip); 1502 pm_runtime_disable(&pdev->dev); 1503 if (bank->dbck_flag) 1504 clk_unprepare(bank->dbck); 1505 1506 return 0; 1507 } 1508 1509 static int __maybe_unused omap_gpio_runtime_suspend(struct device *dev) 1510 { 1511 struct gpio_bank *bank = dev_get_drvdata(dev); 1512 unsigned long flags; 1513 1514 raw_spin_lock_irqsave(&bank->lock, flags); 1515 omap_gpio_idle(bank, true); 1516 bank->is_suspended = true; 1517 raw_spin_unlock_irqrestore(&bank->lock, flags); 1518 1519 return 0; 1520 } 1521 1522 static int __maybe_unused omap_gpio_runtime_resume(struct device *dev) 1523 { 1524 struct gpio_bank *bank = dev_get_drvdata(dev); 1525 unsigned long flags; 1526 1527 raw_spin_lock_irqsave(&bank->lock, flags); 1528 omap_gpio_unidle(bank); 1529 bank->is_suspended = false; 1530 raw_spin_unlock_irqrestore(&bank->lock, flags); 1531 1532 return 0; 1533 } 1534 1535 static int __maybe_unused omap_gpio_suspend(struct device *dev) 1536 { 1537 struct gpio_bank *bank = dev_get_drvdata(dev); 1538 1539 if (bank->is_suspended) 1540 return 0; 1541 1542 bank->needs_resume = 1; 1543 1544 return omap_gpio_runtime_suspend(dev); 1545 } 1546 1547 static int __maybe_unused omap_gpio_resume(struct device *dev) 1548 { 1549 struct gpio_bank *bank = dev_get_drvdata(dev); 1550 1551 if (!bank->needs_resume) 1552 return 0; 1553 1554 bank->needs_resume = 0; 1555 1556 return omap_gpio_runtime_resume(dev); 1557 } 1558 1559 static const struct dev_pm_ops gpio_pm_ops = { 1560 SET_RUNTIME_PM_OPS(omap_gpio_runtime_suspend, omap_gpio_runtime_resume, 1561 NULL) 1562 SET_LATE_SYSTEM_SLEEP_PM_OPS(omap_gpio_suspend, omap_gpio_resume) 1563 }; 1564 1565 static struct platform_driver omap_gpio_driver = { 1566 .probe = omap_gpio_probe, 1567 .remove = omap_gpio_remove, 1568 .driver = { 1569 .name = "omap_gpio", 1570 .pm = &gpio_pm_ops, 1571 .of_match_table = omap_gpio_match, 1572 }, 1573 }; 1574 1575 /* 1576 * gpio driver register needs to be done before 1577 * machine_init functions access gpio APIs. 1578 * Hence omap_gpio_drv_reg() is a postcore_initcall. 1579 */ 1580 static int __init omap_gpio_drv_reg(void) 1581 { 1582 return platform_driver_register(&omap_gpio_driver); 1583 } 1584 postcore_initcall(omap_gpio_drv_reg); 1585 1586 static void __exit omap_gpio_exit(void) 1587 { 1588 platform_driver_unregister(&omap_gpio_driver); 1589 } 1590 module_exit(omap_gpio_exit); 1591 1592 MODULE_DESCRIPTION("omap gpio driver"); 1593 MODULE_ALIAS("platform:gpio-omap"); 1594 MODULE_LICENSE("GPL v2"); 1595