1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Support functions for OMAP GPIO 4 * 5 * Copyright (C) 2003-2005 Nokia Corporation 6 * Written by Juha Yrjölä <juha.yrjola@nokia.com> 7 * 8 * Copyright (C) 2009 Texas Instruments 9 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com> 10 */ 11 12 #include <linux/init.h> 13 #include <linux/module.h> 14 #include <linux/interrupt.h> 15 #include <linux/syscore_ops.h> 16 #include <linux/err.h> 17 #include <linux/clk.h> 18 #include <linux/io.h> 19 #include <linux/cpu_pm.h> 20 #include <linux/device.h> 21 #include <linux/pm_runtime.h> 22 #include <linux/pm.h> 23 #include <linux/of.h> 24 #include <linux/of_device.h> 25 #include <linux/gpio/driver.h> 26 #include <linux/bitops.h> 27 #include <linux/platform_data/gpio-omap.h> 28 29 #define OMAP4_GPIO_DEBOUNCINGTIME_MASK 0xFF 30 31 struct gpio_regs { 32 u32 irqenable1; 33 u32 irqenable2; 34 u32 wake_en; 35 u32 ctrl; 36 u32 oe; 37 u32 leveldetect0; 38 u32 leveldetect1; 39 u32 risingdetect; 40 u32 fallingdetect; 41 u32 dataout; 42 u32 debounce; 43 u32 debounce_en; 44 }; 45 46 struct gpio_bank { 47 void __iomem *base; 48 const struct omap_gpio_reg_offs *regs; 49 50 int irq; 51 u32 non_wakeup_gpios; 52 u32 enabled_non_wakeup_gpios; 53 struct gpio_regs context; 54 u32 saved_datain; 55 u32 level_mask; 56 u32 toggle_mask; 57 raw_spinlock_t lock; 58 raw_spinlock_t wa_lock; 59 struct gpio_chip chip; 60 struct clk *dbck; 61 struct notifier_block nb; 62 unsigned int is_suspended:1; 63 u32 mod_usage; 64 u32 irq_usage; 65 u32 dbck_enable_mask; 66 bool dbck_enabled; 67 bool is_mpuio; 68 bool dbck_flag; 69 bool loses_context; 70 bool context_valid; 71 int stride; 72 u32 width; 73 int context_loss_count; 74 75 void (*set_dataout)(struct gpio_bank *bank, unsigned gpio, int enable); 76 int (*get_context_loss_count)(struct device *dev); 77 }; 78 79 #define GPIO_MOD_CTRL_BIT BIT(0) 80 81 #define BANK_USED(bank) (bank->mod_usage || bank->irq_usage) 82 #define LINE_USED(line, offset) (line & (BIT(offset))) 83 84 static void omap_gpio_unmask_irq(struct irq_data *d); 85 86 static inline struct gpio_bank *omap_irq_data_get_bank(struct irq_data *d) 87 { 88 struct gpio_chip *chip = irq_data_get_irq_chip_data(d); 89 return gpiochip_get_data(chip); 90 } 91 92 static inline u32 omap_gpio_rmw(void __iomem *reg, u32 mask, bool set) 93 { 94 u32 val = readl_relaxed(reg); 95 96 if (set) 97 val |= mask; 98 else 99 val &= ~mask; 100 101 writel_relaxed(val, reg); 102 103 return val; 104 } 105 106 static void omap_set_gpio_direction(struct gpio_bank *bank, int gpio, 107 int is_input) 108 { 109 bank->context.oe = omap_gpio_rmw(bank->base + bank->regs->direction, 110 BIT(gpio), is_input); 111 } 112 113 114 /* set data out value using dedicate set/clear register */ 115 static void omap_set_gpio_dataout_reg(struct gpio_bank *bank, unsigned offset, 116 int enable) 117 { 118 void __iomem *reg = bank->base; 119 u32 l = BIT(offset); 120 121 if (enable) { 122 reg += bank->regs->set_dataout; 123 bank->context.dataout |= l; 124 } else { 125 reg += bank->regs->clr_dataout; 126 bank->context.dataout &= ~l; 127 } 128 129 writel_relaxed(l, reg); 130 } 131 132 /* set data out value using mask register */ 133 static void omap_set_gpio_dataout_mask(struct gpio_bank *bank, unsigned offset, 134 int enable) 135 { 136 bank->context.dataout = omap_gpio_rmw(bank->base + bank->regs->dataout, 137 BIT(offset), enable); 138 } 139 140 static inline void omap_gpio_dbck_enable(struct gpio_bank *bank) 141 { 142 if (bank->dbck_enable_mask && !bank->dbck_enabled) { 143 clk_enable(bank->dbck); 144 bank->dbck_enabled = true; 145 146 writel_relaxed(bank->dbck_enable_mask, 147 bank->base + bank->regs->debounce_en); 148 } 149 } 150 151 static inline void omap_gpio_dbck_disable(struct gpio_bank *bank) 152 { 153 if (bank->dbck_enable_mask && bank->dbck_enabled) { 154 /* 155 * Disable debounce before cutting it's clock. If debounce is 156 * enabled but the clock is not, GPIO module seems to be unable 157 * to detect events and generate interrupts at least on OMAP3. 158 */ 159 writel_relaxed(0, bank->base + bank->regs->debounce_en); 160 161 clk_disable(bank->dbck); 162 bank->dbck_enabled = false; 163 } 164 } 165 166 /** 167 * omap2_set_gpio_debounce - low level gpio debounce time 168 * @bank: the gpio bank we're acting upon 169 * @offset: the gpio number on this @bank 170 * @debounce: debounce time to use 171 * 172 * OMAP's debounce time is in 31us steps 173 * <debounce time> = (GPIO_DEBOUNCINGTIME[7:0].DEBOUNCETIME + 1) x 31 174 * so we need to convert and round up to the closest unit. 175 * 176 * Return: 0 on success, negative error otherwise. 177 */ 178 static int omap2_set_gpio_debounce(struct gpio_bank *bank, unsigned offset, 179 unsigned debounce) 180 { 181 u32 val; 182 u32 l; 183 bool enable = !!debounce; 184 185 if (!bank->dbck_flag) 186 return -ENOTSUPP; 187 188 if (enable) { 189 debounce = DIV_ROUND_UP(debounce, 31) - 1; 190 if ((debounce & OMAP4_GPIO_DEBOUNCINGTIME_MASK) != debounce) 191 return -EINVAL; 192 } 193 194 l = BIT(offset); 195 196 clk_enable(bank->dbck); 197 writel_relaxed(debounce, bank->base + bank->regs->debounce); 198 199 val = omap_gpio_rmw(bank->base + bank->regs->debounce_en, l, enable); 200 bank->dbck_enable_mask = val; 201 202 clk_disable(bank->dbck); 203 /* 204 * Enable debounce clock per module. 205 * This call is mandatory because in omap_gpio_request() when 206 * *_runtime_get_sync() is called, _gpio_dbck_enable() within 207 * runtime callbck fails to turn on dbck because dbck_enable_mask 208 * used within _gpio_dbck_enable() is still not initialized at 209 * that point. Therefore we have to enable dbck here. 210 */ 211 omap_gpio_dbck_enable(bank); 212 if (bank->dbck_enable_mask) { 213 bank->context.debounce = debounce; 214 bank->context.debounce_en = val; 215 } 216 217 return 0; 218 } 219 220 /** 221 * omap_clear_gpio_debounce - clear debounce settings for a gpio 222 * @bank: the gpio bank we're acting upon 223 * @offset: the gpio number on this @bank 224 * 225 * If a gpio is using debounce, then clear the debounce enable bit and if 226 * this is the only gpio in this bank using debounce, then clear the debounce 227 * time too. The debounce clock will also be disabled when calling this function 228 * if this is the only gpio in the bank using debounce. 229 */ 230 static void omap_clear_gpio_debounce(struct gpio_bank *bank, unsigned offset) 231 { 232 u32 gpio_bit = BIT(offset); 233 234 if (!bank->dbck_flag) 235 return; 236 237 if (!(bank->dbck_enable_mask & gpio_bit)) 238 return; 239 240 bank->dbck_enable_mask &= ~gpio_bit; 241 bank->context.debounce_en &= ~gpio_bit; 242 writel_relaxed(bank->context.debounce_en, 243 bank->base + bank->regs->debounce_en); 244 245 if (!bank->dbck_enable_mask) { 246 bank->context.debounce = 0; 247 writel_relaxed(bank->context.debounce, bank->base + 248 bank->regs->debounce); 249 clk_disable(bank->dbck); 250 bank->dbck_enabled = false; 251 } 252 } 253 254 /* 255 * Off mode wake-up capable GPIOs in bank(s) that are in the wakeup domain. 256 * See TRM section for GPIO for "Wake-Up Generation" for the list of GPIOs 257 * in wakeup domain. If bank->non_wakeup_gpios is not configured, assume none 258 * are capable waking up the system from off mode. 259 */ 260 static bool omap_gpio_is_off_wakeup_capable(struct gpio_bank *bank, u32 gpio_mask) 261 { 262 u32 no_wake = bank->non_wakeup_gpios; 263 264 if (no_wake) 265 return !!(~no_wake & gpio_mask); 266 267 return false; 268 } 269 270 static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio, 271 unsigned trigger) 272 { 273 void __iomem *base = bank->base; 274 u32 gpio_bit = BIT(gpio); 275 276 omap_gpio_rmw(base + bank->regs->leveldetect0, gpio_bit, 277 trigger & IRQ_TYPE_LEVEL_LOW); 278 omap_gpio_rmw(base + bank->regs->leveldetect1, gpio_bit, 279 trigger & IRQ_TYPE_LEVEL_HIGH); 280 281 /* 282 * We need the edge detection enabled for to allow the GPIO block 283 * to be woken from idle state. Set the appropriate edge detection 284 * in addition to the level detection. 285 */ 286 omap_gpio_rmw(base + bank->regs->risingdetect, gpio_bit, 287 trigger & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_LEVEL_HIGH)); 288 omap_gpio_rmw(base + bank->regs->fallingdetect, gpio_bit, 289 trigger & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW)); 290 291 bank->context.leveldetect0 = 292 readl_relaxed(bank->base + bank->regs->leveldetect0); 293 bank->context.leveldetect1 = 294 readl_relaxed(bank->base + bank->regs->leveldetect1); 295 bank->context.risingdetect = 296 readl_relaxed(bank->base + bank->regs->risingdetect); 297 bank->context.fallingdetect = 298 readl_relaxed(bank->base + bank->regs->fallingdetect); 299 300 bank->level_mask = bank->context.leveldetect0 | 301 bank->context.leveldetect1; 302 303 /* This part needs to be executed always for OMAP{34xx, 44xx} */ 304 if (!bank->regs->irqctrl && !omap_gpio_is_off_wakeup_capable(bank, gpio)) { 305 /* 306 * Log the edge gpio and manually trigger the IRQ 307 * after resume if the input level changes 308 * to avoid irq lost during PER RET/OFF mode 309 * Applies for omap2 non-wakeup gpio and all omap3 gpios 310 */ 311 if (trigger & IRQ_TYPE_EDGE_BOTH) 312 bank->enabled_non_wakeup_gpios |= gpio_bit; 313 else 314 bank->enabled_non_wakeup_gpios &= ~gpio_bit; 315 } 316 } 317 318 /* 319 * This only applies to chips that can't do both rising and falling edge 320 * detection at once. For all other chips, this function is a noop. 321 */ 322 static void omap_toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio) 323 { 324 if (IS_ENABLED(CONFIG_ARCH_OMAP1) && bank->regs->irqctrl) { 325 void __iomem *reg = bank->base + bank->regs->irqctrl; 326 327 writel_relaxed(readl_relaxed(reg) ^ BIT(gpio), reg); 328 } 329 } 330 331 static int omap_set_gpio_triggering(struct gpio_bank *bank, int gpio, 332 unsigned trigger) 333 { 334 void __iomem *reg = bank->base; 335 u32 l = 0; 336 337 if (bank->regs->leveldetect0 && bank->regs->wkup_en) { 338 omap_set_gpio_trigger(bank, gpio, trigger); 339 } else if (bank->regs->irqctrl) { 340 reg += bank->regs->irqctrl; 341 342 l = readl_relaxed(reg); 343 if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) 344 bank->toggle_mask |= BIT(gpio); 345 if (trigger & IRQ_TYPE_EDGE_RISING) 346 l |= BIT(gpio); 347 else if (trigger & IRQ_TYPE_EDGE_FALLING) 348 l &= ~(BIT(gpio)); 349 else 350 return -EINVAL; 351 352 writel_relaxed(l, reg); 353 } else if (bank->regs->edgectrl1) { 354 if (gpio & 0x08) 355 reg += bank->regs->edgectrl2; 356 else 357 reg += bank->regs->edgectrl1; 358 359 gpio &= 0x07; 360 l = readl_relaxed(reg); 361 l &= ~(3 << (gpio << 1)); 362 if (trigger & IRQ_TYPE_EDGE_RISING) 363 l |= 2 << (gpio << 1); 364 if (trigger & IRQ_TYPE_EDGE_FALLING) 365 l |= BIT(gpio << 1); 366 writel_relaxed(l, reg); 367 } 368 return 0; 369 } 370 371 static void omap_enable_gpio_module(struct gpio_bank *bank, unsigned offset) 372 { 373 if (bank->regs->pinctrl) { 374 void __iomem *reg = bank->base + bank->regs->pinctrl; 375 376 /* Claim the pin for MPU */ 377 writel_relaxed(readl_relaxed(reg) | (BIT(offset)), reg); 378 } 379 380 if (bank->regs->ctrl && !BANK_USED(bank)) { 381 void __iomem *reg = bank->base + bank->regs->ctrl; 382 u32 ctrl; 383 384 ctrl = readl_relaxed(reg); 385 /* Module is enabled, clocks are not gated */ 386 ctrl &= ~GPIO_MOD_CTRL_BIT; 387 writel_relaxed(ctrl, reg); 388 bank->context.ctrl = ctrl; 389 } 390 } 391 392 static void omap_disable_gpio_module(struct gpio_bank *bank, unsigned offset) 393 { 394 if (bank->regs->ctrl && !BANK_USED(bank)) { 395 void __iomem *reg = bank->base + bank->regs->ctrl; 396 u32 ctrl; 397 398 ctrl = readl_relaxed(reg); 399 /* Module is disabled, clocks are gated */ 400 ctrl |= GPIO_MOD_CTRL_BIT; 401 writel_relaxed(ctrl, reg); 402 bank->context.ctrl = ctrl; 403 } 404 } 405 406 static int omap_gpio_is_input(struct gpio_bank *bank, unsigned offset) 407 { 408 void __iomem *reg = bank->base + bank->regs->direction; 409 410 return readl_relaxed(reg) & BIT(offset); 411 } 412 413 static void omap_gpio_init_irq(struct gpio_bank *bank, unsigned offset) 414 { 415 if (!LINE_USED(bank->mod_usage, offset)) { 416 omap_enable_gpio_module(bank, offset); 417 omap_set_gpio_direction(bank, offset, 1); 418 } 419 bank->irq_usage |= BIT(offset); 420 } 421 422 static int omap_gpio_irq_type(struct irq_data *d, unsigned type) 423 { 424 struct gpio_bank *bank = omap_irq_data_get_bank(d); 425 int retval; 426 unsigned long flags; 427 unsigned offset = d->hwirq; 428 429 if (type & ~IRQ_TYPE_SENSE_MASK) 430 return -EINVAL; 431 432 if (!bank->regs->leveldetect0 && 433 (type & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH))) 434 return -EINVAL; 435 436 raw_spin_lock_irqsave(&bank->lock, flags); 437 retval = omap_set_gpio_triggering(bank, offset, type); 438 if (retval) { 439 raw_spin_unlock_irqrestore(&bank->lock, flags); 440 goto error; 441 } 442 omap_gpio_init_irq(bank, offset); 443 if (!omap_gpio_is_input(bank, offset)) { 444 raw_spin_unlock_irqrestore(&bank->lock, flags); 445 retval = -EINVAL; 446 goto error; 447 } 448 raw_spin_unlock_irqrestore(&bank->lock, flags); 449 450 if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) 451 irq_set_handler_locked(d, handle_level_irq); 452 else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) 453 /* 454 * Edge IRQs are already cleared/acked in irq_handler and 455 * not need to be masked, as result handle_edge_irq() 456 * logic is excessed here and may cause lose of interrupts. 457 * So just use handle_simple_irq. 458 */ 459 irq_set_handler_locked(d, handle_simple_irq); 460 461 return 0; 462 463 error: 464 return retval; 465 } 466 467 static void omap_clear_gpio_irqbank(struct gpio_bank *bank, int gpio_mask) 468 { 469 void __iomem *reg = bank->base; 470 471 reg += bank->regs->irqstatus; 472 writel_relaxed(gpio_mask, reg); 473 474 /* Workaround for clearing DSP GPIO interrupts to allow retention */ 475 if (bank->regs->irqstatus2) { 476 reg = bank->base + bank->regs->irqstatus2; 477 writel_relaxed(gpio_mask, reg); 478 } 479 480 /* Flush posted write for the irq status to avoid spurious interrupts */ 481 readl_relaxed(reg); 482 } 483 484 static inline void omap_clear_gpio_irqstatus(struct gpio_bank *bank, 485 unsigned offset) 486 { 487 omap_clear_gpio_irqbank(bank, BIT(offset)); 488 } 489 490 static u32 omap_get_gpio_irqbank_mask(struct gpio_bank *bank) 491 { 492 void __iomem *reg = bank->base; 493 u32 l; 494 u32 mask = (BIT(bank->width)) - 1; 495 496 reg += bank->regs->irqenable; 497 l = readl_relaxed(reg); 498 if (bank->regs->irqenable_inv) 499 l = ~l; 500 l &= mask; 501 return l; 502 } 503 504 static inline void omap_set_gpio_irqenable(struct gpio_bank *bank, 505 unsigned offset, int enable) 506 { 507 void __iomem *reg = bank->base; 508 u32 gpio_mask = BIT(offset); 509 510 if (bank->regs->set_irqenable && bank->regs->clr_irqenable) { 511 if (enable) { 512 reg += bank->regs->set_irqenable; 513 bank->context.irqenable1 |= gpio_mask; 514 } else { 515 reg += bank->regs->clr_irqenable; 516 bank->context.irqenable1 &= ~gpio_mask; 517 } 518 writel_relaxed(gpio_mask, reg); 519 } else { 520 bank->context.irqenable1 = 521 omap_gpio_rmw(reg + bank->regs->irqenable, gpio_mask, 522 enable ^ bank->regs->irqenable_inv); 523 } 524 525 /* 526 * Program GPIO wakeup along with IRQ enable to satisfy OMAP4430 TRM 527 * note requiring correlation between the IRQ enable registers and 528 * the wakeup registers. In any case, we want wakeup from idle 529 * enabled for the GPIOs which support this feature. 530 */ 531 if (bank->regs->wkup_en && 532 (bank->regs->edgectrl1 || !(bank->non_wakeup_gpios & gpio_mask))) { 533 bank->context.wake_en = 534 omap_gpio_rmw(bank->base + bank->regs->wkup_en, 535 gpio_mask, enable); 536 } 537 } 538 539 /* Use disable_irq_wake() and enable_irq_wake() functions from drivers */ 540 static int omap_gpio_wake_enable(struct irq_data *d, unsigned int enable) 541 { 542 struct gpio_bank *bank = omap_irq_data_get_bank(d); 543 544 return irq_set_irq_wake(bank->irq, enable); 545 } 546 547 /* 548 * We need to unmask the GPIO bank interrupt as soon as possible to 549 * avoid missing GPIO interrupts for other lines in the bank. 550 * Then we need to mask-read-clear-unmask the triggered GPIO lines 551 * in the bank to avoid missing nested interrupts for a GPIO line. 552 * If we wait to unmask individual GPIO lines in the bank after the 553 * line's interrupt handler has been run, we may miss some nested 554 * interrupts. 555 */ 556 static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank) 557 { 558 void __iomem *isr_reg = NULL; 559 u32 enabled, isr, edge; 560 unsigned int bit; 561 struct gpio_bank *bank = gpiobank; 562 unsigned long wa_lock_flags; 563 unsigned long lock_flags; 564 565 isr_reg = bank->base + bank->regs->irqstatus; 566 if (WARN_ON(!isr_reg)) 567 goto exit; 568 569 if (WARN_ONCE(!pm_runtime_active(bank->chip.parent), 570 "gpio irq%i while runtime suspended?\n", irq)) 571 return IRQ_NONE; 572 573 while (1) { 574 raw_spin_lock_irqsave(&bank->lock, lock_flags); 575 576 enabled = omap_get_gpio_irqbank_mask(bank); 577 isr = readl_relaxed(isr_reg) & enabled; 578 579 /* 580 * Clear edge sensitive interrupts before calling handler(s) 581 * so subsequent edge transitions are not missed while the 582 * handlers are running. 583 */ 584 edge = isr & ~bank->level_mask; 585 if (edge) 586 omap_clear_gpio_irqbank(bank, edge); 587 588 raw_spin_unlock_irqrestore(&bank->lock, lock_flags); 589 590 if (!isr) 591 break; 592 593 while (isr) { 594 bit = __ffs(isr); 595 isr &= ~(BIT(bit)); 596 597 raw_spin_lock_irqsave(&bank->lock, lock_flags); 598 /* 599 * Some chips can't respond to both rising and falling 600 * at the same time. If this irq was requested with 601 * both flags, we need to flip the ICR data for the IRQ 602 * to respond to the IRQ for the opposite direction. 603 * This will be indicated in the bank toggle_mask. 604 */ 605 if (bank->toggle_mask & (BIT(bit))) 606 omap_toggle_gpio_edge_triggering(bank, bit); 607 608 raw_spin_unlock_irqrestore(&bank->lock, lock_flags); 609 610 raw_spin_lock_irqsave(&bank->wa_lock, wa_lock_flags); 611 612 generic_handle_irq(irq_find_mapping(bank->chip.irq.domain, 613 bit)); 614 615 raw_spin_unlock_irqrestore(&bank->wa_lock, 616 wa_lock_flags); 617 } 618 } 619 exit: 620 return IRQ_HANDLED; 621 } 622 623 static unsigned int omap_gpio_irq_startup(struct irq_data *d) 624 { 625 struct gpio_bank *bank = omap_irq_data_get_bank(d); 626 unsigned long flags; 627 unsigned offset = d->hwirq; 628 629 raw_spin_lock_irqsave(&bank->lock, flags); 630 631 if (!LINE_USED(bank->mod_usage, offset)) 632 omap_set_gpio_direction(bank, offset, 1); 633 omap_enable_gpio_module(bank, offset); 634 bank->irq_usage |= BIT(offset); 635 636 raw_spin_unlock_irqrestore(&bank->lock, flags); 637 omap_gpio_unmask_irq(d); 638 639 return 0; 640 } 641 642 static void omap_gpio_irq_shutdown(struct irq_data *d) 643 { 644 struct gpio_bank *bank = omap_irq_data_get_bank(d); 645 unsigned long flags; 646 unsigned offset = d->hwirq; 647 648 raw_spin_lock_irqsave(&bank->lock, flags); 649 bank->irq_usage &= ~(BIT(offset)); 650 omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE); 651 omap_clear_gpio_irqstatus(bank, offset); 652 omap_set_gpio_irqenable(bank, offset, 0); 653 if (!LINE_USED(bank->mod_usage, offset)) 654 omap_clear_gpio_debounce(bank, offset); 655 omap_disable_gpio_module(bank, offset); 656 raw_spin_unlock_irqrestore(&bank->lock, flags); 657 } 658 659 static void omap_gpio_irq_bus_lock(struct irq_data *data) 660 { 661 struct gpio_bank *bank = omap_irq_data_get_bank(data); 662 663 pm_runtime_get_sync(bank->chip.parent); 664 } 665 666 static void gpio_irq_bus_sync_unlock(struct irq_data *data) 667 { 668 struct gpio_bank *bank = omap_irq_data_get_bank(data); 669 670 pm_runtime_put(bank->chip.parent); 671 } 672 673 static void omap_gpio_mask_irq(struct irq_data *d) 674 { 675 struct gpio_bank *bank = omap_irq_data_get_bank(d); 676 unsigned offset = d->hwirq; 677 unsigned long flags; 678 679 raw_spin_lock_irqsave(&bank->lock, flags); 680 omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE); 681 omap_set_gpio_irqenable(bank, offset, 0); 682 raw_spin_unlock_irqrestore(&bank->lock, flags); 683 } 684 685 static void omap_gpio_unmask_irq(struct irq_data *d) 686 { 687 struct gpio_bank *bank = omap_irq_data_get_bank(d); 688 unsigned offset = d->hwirq; 689 u32 trigger = irqd_get_trigger_type(d); 690 unsigned long flags; 691 692 raw_spin_lock_irqsave(&bank->lock, flags); 693 omap_set_gpio_irqenable(bank, offset, 1); 694 695 /* 696 * For level-triggered GPIOs, clearing must be done after the source 697 * is cleared, thus after the handler has run. OMAP4 needs this done 698 * after enabing the interrupt to clear the wakeup status. 699 */ 700 if (bank->regs->leveldetect0 && bank->regs->wkup_en && 701 trigger & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) 702 omap_clear_gpio_irqstatus(bank, offset); 703 704 if (trigger) 705 omap_set_gpio_triggering(bank, offset, trigger); 706 707 raw_spin_unlock_irqrestore(&bank->lock, flags); 708 } 709 710 /*---------------------------------------------------------------------*/ 711 712 static int omap_mpuio_suspend_noirq(struct device *dev) 713 { 714 struct gpio_bank *bank = dev_get_drvdata(dev); 715 void __iomem *mask_reg = bank->base + 716 OMAP_MPUIO_GPIO_MASKIT / bank->stride; 717 unsigned long flags; 718 719 raw_spin_lock_irqsave(&bank->lock, flags); 720 writel_relaxed(0xffff & ~bank->context.wake_en, mask_reg); 721 raw_spin_unlock_irqrestore(&bank->lock, flags); 722 723 return 0; 724 } 725 726 static int omap_mpuio_resume_noirq(struct device *dev) 727 { 728 struct gpio_bank *bank = dev_get_drvdata(dev); 729 void __iomem *mask_reg = bank->base + 730 OMAP_MPUIO_GPIO_MASKIT / bank->stride; 731 unsigned long flags; 732 733 raw_spin_lock_irqsave(&bank->lock, flags); 734 writel_relaxed(bank->context.wake_en, mask_reg); 735 raw_spin_unlock_irqrestore(&bank->lock, flags); 736 737 return 0; 738 } 739 740 static const struct dev_pm_ops omap_mpuio_dev_pm_ops = { 741 .suspend_noirq = omap_mpuio_suspend_noirq, 742 .resume_noirq = omap_mpuio_resume_noirq, 743 }; 744 745 /* use platform_driver for this. */ 746 static struct platform_driver omap_mpuio_driver = { 747 .driver = { 748 .name = "mpuio", 749 .pm = &omap_mpuio_dev_pm_ops, 750 }, 751 }; 752 753 static struct platform_device omap_mpuio_device = { 754 .name = "mpuio", 755 .id = -1, 756 .dev = { 757 .driver = &omap_mpuio_driver.driver, 758 } 759 /* could list the /proc/iomem resources */ 760 }; 761 762 static inline void omap_mpuio_init(struct gpio_bank *bank) 763 { 764 platform_set_drvdata(&omap_mpuio_device, bank); 765 766 if (platform_driver_register(&omap_mpuio_driver) == 0) 767 (void) platform_device_register(&omap_mpuio_device); 768 } 769 770 /*---------------------------------------------------------------------*/ 771 772 static int omap_gpio_request(struct gpio_chip *chip, unsigned offset) 773 { 774 struct gpio_bank *bank = gpiochip_get_data(chip); 775 unsigned long flags; 776 777 pm_runtime_get_sync(chip->parent); 778 779 raw_spin_lock_irqsave(&bank->lock, flags); 780 omap_enable_gpio_module(bank, offset); 781 bank->mod_usage |= BIT(offset); 782 raw_spin_unlock_irqrestore(&bank->lock, flags); 783 784 return 0; 785 } 786 787 static void omap_gpio_free(struct gpio_chip *chip, unsigned offset) 788 { 789 struct gpio_bank *bank = gpiochip_get_data(chip); 790 unsigned long flags; 791 792 raw_spin_lock_irqsave(&bank->lock, flags); 793 bank->mod_usage &= ~(BIT(offset)); 794 if (!LINE_USED(bank->irq_usage, offset)) { 795 omap_set_gpio_direction(bank, offset, 1); 796 omap_clear_gpio_debounce(bank, offset); 797 } 798 omap_disable_gpio_module(bank, offset); 799 raw_spin_unlock_irqrestore(&bank->lock, flags); 800 801 pm_runtime_put(chip->parent); 802 } 803 804 static int omap_gpio_get_direction(struct gpio_chip *chip, unsigned offset) 805 { 806 struct gpio_bank *bank = gpiochip_get_data(chip); 807 808 if (readl_relaxed(bank->base + bank->regs->direction) & BIT(offset)) 809 return GPIO_LINE_DIRECTION_IN; 810 811 return GPIO_LINE_DIRECTION_OUT; 812 } 813 814 static int omap_gpio_input(struct gpio_chip *chip, unsigned offset) 815 { 816 struct gpio_bank *bank; 817 unsigned long flags; 818 819 bank = gpiochip_get_data(chip); 820 raw_spin_lock_irqsave(&bank->lock, flags); 821 omap_set_gpio_direction(bank, offset, 1); 822 raw_spin_unlock_irqrestore(&bank->lock, flags); 823 return 0; 824 } 825 826 static int omap_gpio_get(struct gpio_chip *chip, unsigned offset) 827 { 828 struct gpio_bank *bank = gpiochip_get_data(chip); 829 void __iomem *reg; 830 831 if (omap_gpio_is_input(bank, offset)) 832 reg = bank->base + bank->regs->datain; 833 else 834 reg = bank->base + bank->regs->dataout; 835 836 return (readl_relaxed(reg) & BIT(offset)) != 0; 837 } 838 839 static int omap_gpio_output(struct gpio_chip *chip, unsigned offset, int value) 840 { 841 struct gpio_bank *bank; 842 unsigned long flags; 843 844 bank = gpiochip_get_data(chip); 845 raw_spin_lock_irqsave(&bank->lock, flags); 846 bank->set_dataout(bank, offset, value); 847 omap_set_gpio_direction(bank, offset, 0); 848 raw_spin_unlock_irqrestore(&bank->lock, flags); 849 return 0; 850 } 851 852 static int omap_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask, 853 unsigned long *bits) 854 { 855 struct gpio_bank *bank = gpiochip_get_data(chip); 856 void __iomem *base = bank->base; 857 u32 direction, m, val = 0; 858 859 direction = readl_relaxed(base + bank->regs->direction); 860 861 m = direction & *mask; 862 if (m) 863 val |= readl_relaxed(base + bank->regs->datain) & m; 864 865 m = ~direction & *mask; 866 if (m) 867 val |= readl_relaxed(base + bank->regs->dataout) & m; 868 869 *bits = val; 870 871 return 0; 872 } 873 874 static int omap_gpio_debounce(struct gpio_chip *chip, unsigned offset, 875 unsigned debounce) 876 { 877 struct gpio_bank *bank; 878 unsigned long flags; 879 int ret; 880 881 bank = gpiochip_get_data(chip); 882 883 raw_spin_lock_irqsave(&bank->lock, flags); 884 ret = omap2_set_gpio_debounce(bank, offset, debounce); 885 raw_spin_unlock_irqrestore(&bank->lock, flags); 886 887 if (ret) 888 dev_info(chip->parent, 889 "Could not set line %u debounce to %u microseconds (%d)", 890 offset, debounce, ret); 891 892 return ret; 893 } 894 895 static int omap_gpio_set_config(struct gpio_chip *chip, unsigned offset, 896 unsigned long config) 897 { 898 u32 debounce; 899 900 if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE) 901 return -ENOTSUPP; 902 903 debounce = pinconf_to_config_argument(config); 904 return omap_gpio_debounce(chip, offset, debounce); 905 } 906 907 static void omap_gpio_set(struct gpio_chip *chip, unsigned offset, int value) 908 { 909 struct gpio_bank *bank; 910 unsigned long flags; 911 912 bank = gpiochip_get_data(chip); 913 raw_spin_lock_irqsave(&bank->lock, flags); 914 bank->set_dataout(bank, offset, value); 915 raw_spin_unlock_irqrestore(&bank->lock, flags); 916 } 917 918 static void omap_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mask, 919 unsigned long *bits) 920 { 921 struct gpio_bank *bank = gpiochip_get_data(chip); 922 void __iomem *reg = bank->base + bank->regs->dataout; 923 unsigned long flags; 924 u32 l; 925 926 raw_spin_lock_irqsave(&bank->lock, flags); 927 l = (readl_relaxed(reg) & ~*mask) | (*bits & *mask); 928 writel_relaxed(l, reg); 929 bank->context.dataout = l; 930 raw_spin_unlock_irqrestore(&bank->lock, flags); 931 } 932 933 /*---------------------------------------------------------------------*/ 934 935 static void omap_gpio_show_rev(struct gpio_bank *bank) 936 { 937 static bool called; 938 u32 rev; 939 940 if (called || bank->regs->revision == USHRT_MAX) 941 return; 942 943 rev = readw_relaxed(bank->base + bank->regs->revision); 944 pr_info("OMAP GPIO hardware version %d.%d\n", 945 (rev >> 4) & 0x0f, rev & 0x0f); 946 947 called = true; 948 } 949 950 static void omap_gpio_mod_init(struct gpio_bank *bank) 951 { 952 void __iomem *base = bank->base; 953 u32 l = 0xffffffff; 954 955 if (bank->width == 16) 956 l = 0xffff; 957 958 if (bank->is_mpuio) { 959 writel_relaxed(l, bank->base + bank->regs->irqenable); 960 return; 961 } 962 963 omap_gpio_rmw(base + bank->regs->irqenable, l, 964 bank->regs->irqenable_inv); 965 omap_gpio_rmw(base + bank->regs->irqstatus, l, 966 !bank->regs->irqenable_inv); 967 if (bank->regs->debounce_en) 968 writel_relaxed(0, base + bank->regs->debounce_en); 969 970 /* Save OE default value (0xffffffff) in the context */ 971 bank->context.oe = readl_relaxed(bank->base + bank->regs->direction); 972 /* Initialize interface clk ungated, module enabled */ 973 if (bank->regs->ctrl) 974 writel_relaxed(0, base + bank->regs->ctrl); 975 } 976 977 static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc) 978 { 979 struct gpio_irq_chip *irq; 980 static int gpio; 981 const char *label; 982 int irq_base = 0; 983 int ret; 984 985 /* 986 * REVISIT eventually switch from OMAP-specific gpio structs 987 * over to the generic ones 988 */ 989 bank->chip.request = omap_gpio_request; 990 bank->chip.free = omap_gpio_free; 991 bank->chip.get_direction = omap_gpio_get_direction; 992 bank->chip.direction_input = omap_gpio_input; 993 bank->chip.get = omap_gpio_get; 994 bank->chip.get_multiple = omap_gpio_get_multiple; 995 bank->chip.direction_output = omap_gpio_output; 996 bank->chip.set_config = omap_gpio_set_config; 997 bank->chip.set = omap_gpio_set; 998 bank->chip.set_multiple = omap_gpio_set_multiple; 999 if (bank->is_mpuio) { 1000 bank->chip.label = "mpuio"; 1001 if (bank->regs->wkup_en) 1002 bank->chip.parent = &omap_mpuio_device.dev; 1003 bank->chip.base = OMAP_MPUIO(0); 1004 } else { 1005 label = devm_kasprintf(bank->chip.parent, GFP_KERNEL, "gpio-%d-%d", 1006 gpio, gpio + bank->width - 1); 1007 if (!label) 1008 return -ENOMEM; 1009 bank->chip.label = label; 1010 bank->chip.base = gpio; 1011 } 1012 bank->chip.ngpio = bank->width; 1013 1014 #ifdef CONFIG_ARCH_OMAP1 1015 /* 1016 * REVISIT: Once we have OMAP1 supporting SPARSE_IRQ, we can drop 1017 * irq_alloc_descs() since a base IRQ offset will no longer be needed. 1018 */ 1019 irq_base = devm_irq_alloc_descs(bank->chip.parent, 1020 -1, 0, bank->width, 0); 1021 if (irq_base < 0) { 1022 dev_err(bank->chip.parent, "Couldn't allocate IRQ numbers\n"); 1023 return -ENODEV; 1024 } 1025 #endif 1026 1027 /* MPUIO is a bit different, reading IRQ status clears it */ 1028 if (bank->is_mpuio && !bank->regs->wkup_en) 1029 irqc->irq_set_wake = NULL; 1030 1031 irq = &bank->chip.irq; 1032 irq->chip = irqc; 1033 irq->handler = handle_bad_irq; 1034 irq->default_type = IRQ_TYPE_NONE; 1035 irq->num_parents = 1; 1036 irq->parents = &bank->irq; 1037 irq->first = irq_base; 1038 1039 ret = gpiochip_add_data(&bank->chip, bank); 1040 if (ret) { 1041 dev_err(bank->chip.parent, 1042 "Could not register gpio chip %d\n", ret); 1043 return ret; 1044 } 1045 1046 ret = devm_request_irq(bank->chip.parent, bank->irq, 1047 omap_gpio_irq_handler, 1048 0, dev_name(bank->chip.parent), bank); 1049 if (ret) 1050 gpiochip_remove(&bank->chip); 1051 1052 if (!bank->is_mpuio) 1053 gpio += bank->width; 1054 1055 return ret; 1056 } 1057 1058 static void omap_gpio_init_context(struct gpio_bank *p) 1059 { 1060 const struct omap_gpio_reg_offs *regs = p->regs; 1061 void __iomem *base = p->base; 1062 1063 p->context.ctrl = readl_relaxed(base + regs->ctrl); 1064 p->context.oe = readl_relaxed(base + regs->direction); 1065 p->context.wake_en = readl_relaxed(base + regs->wkup_en); 1066 p->context.leveldetect0 = readl_relaxed(base + regs->leveldetect0); 1067 p->context.leveldetect1 = readl_relaxed(base + regs->leveldetect1); 1068 p->context.risingdetect = readl_relaxed(base + regs->risingdetect); 1069 p->context.fallingdetect = readl_relaxed(base + regs->fallingdetect); 1070 p->context.irqenable1 = readl_relaxed(base + regs->irqenable); 1071 p->context.irqenable2 = readl_relaxed(base + regs->irqenable2); 1072 p->context.dataout = readl_relaxed(base + regs->dataout); 1073 1074 p->context_valid = true; 1075 } 1076 1077 static void omap_gpio_restore_context(struct gpio_bank *bank) 1078 { 1079 const struct omap_gpio_reg_offs *regs = bank->regs; 1080 void __iomem *base = bank->base; 1081 1082 writel_relaxed(bank->context.wake_en, base + regs->wkup_en); 1083 writel_relaxed(bank->context.ctrl, base + regs->ctrl); 1084 writel_relaxed(bank->context.leveldetect0, base + regs->leveldetect0); 1085 writel_relaxed(bank->context.leveldetect1, base + regs->leveldetect1); 1086 writel_relaxed(bank->context.risingdetect, base + regs->risingdetect); 1087 writel_relaxed(bank->context.fallingdetect, base + regs->fallingdetect); 1088 writel_relaxed(bank->context.dataout, base + regs->dataout); 1089 writel_relaxed(bank->context.oe, base + regs->direction); 1090 1091 if (bank->dbck_enable_mask) { 1092 writel_relaxed(bank->context.debounce, base + regs->debounce); 1093 writel_relaxed(bank->context.debounce_en, 1094 base + regs->debounce_en); 1095 } 1096 1097 writel_relaxed(bank->context.irqenable1, base + regs->irqenable); 1098 writel_relaxed(bank->context.irqenable2, base + regs->irqenable2); 1099 } 1100 1101 static void omap_gpio_idle(struct gpio_bank *bank, bool may_lose_context) 1102 { 1103 struct device *dev = bank->chip.parent; 1104 void __iomem *base = bank->base; 1105 u32 mask, nowake; 1106 1107 bank->saved_datain = readl_relaxed(base + bank->regs->datain); 1108 1109 if (!bank->enabled_non_wakeup_gpios) 1110 goto update_gpio_context_count; 1111 1112 /* Check for pending EDGE_FALLING, ignore EDGE_BOTH */ 1113 mask = bank->enabled_non_wakeup_gpios & bank->context.fallingdetect; 1114 mask &= ~bank->context.risingdetect; 1115 bank->saved_datain |= mask; 1116 1117 /* Check for pending EDGE_RISING, ignore EDGE_BOTH */ 1118 mask = bank->enabled_non_wakeup_gpios & bank->context.risingdetect; 1119 mask &= ~bank->context.fallingdetect; 1120 bank->saved_datain &= ~mask; 1121 1122 if (!may_lose_context) 1123 goto update_gpio_context_count; 1124 1125 /* 1126 * If going to OFF, remove triggering for all wkup domain 1127 * non-wakeup GPIOs. Otherwise spurious IRQs will be 1128 * generated. See OMAP2420 Errata item 1.101. 1129 */ 1130 if (!bank->loses_context && bank->enabled_non_wakeup_gpios) { 1131 nowake = bank->enabled_non_wakeup_gpios; 1132 omap_gpio_rmw(base + bank->regs->fallingdetect, nowake, ~nowake); 1133 omap_gpio_rmw(base + bank->regs->risingdetect, nowake, ~nowake); 1134 } 1135 1136 update_gpio_context_count: 1137 if (bank->get_context_loss_count) 1138 bank->context_loss_count = 1139 bank->get_context_loss_count(dev); 1140 1141 omap_gpio_dbck_disable(bank); 1142 } 1143 1144 static void omap_gpio_unidle(struct gpio_bank *bank) 1145 { 1146 struct device *dev = bank->chip.parent; 1147 u32 l = 0, gen, gen0, gen1; 1148 int c; 1149 1150 /* 1151 * On the first resume during the probe, the context has not 1152 * been initialised and so initialise it now. Also initialise 1153 * the context loss count. 1154 */ 1155 if (bank->loses_context && !bank->context_valid) { 1156 omap_gpio_init_context(bank); 1157 1158 if (bank->get_context_loss_count) 1159 bank->context_loss_count = 1160 bank->get_context_loss_count(dev); 1161 } 1162 1163 omap_gpio_dbck_enable(bank); 1164 1165 if (bank->loses_context) { 1166 if (!bank->get_context_loss_count) { 1167 omap_gpio_restore_context(bank); 1168 } else { 1169 c = bank->get_context_loss_count(dev); 1170 if (c != bank->context_loss_count) { 1171 omap_gpio_restore_context(bank); 1172 } else { 1173 return; 1174 } 1175 } 1176 } else { 1177 /* Restore changes done for OMAP2420 errata 1.101 */ 1178 writel_relaxed(bank->context.fallingdetect, 1179 bank->base + bank->regs->fallingdetect); 1180 writel_relaxed(bank->context.risingdetect, 1181 bank->base + bank->regs->risingdetect); 1182 } 1183 1184 l = readl_relaxed(bank->base + bank->regs->datain); 1185 1186 /* 1187 * Check if any of the non-wakeup interrupt GPIOs have changed 1188 * state. If so, generate an IRQ by software. This is 1189 * horribly racy, but it's the best we can do to work around 1190 * this silicon bug. 1191 */ 1192 l ^= bank->saved_datain; 1193 l &= bank->enabled_non_wakeup_gpios; 1194 1195 /* 1196 * No need to generate IRQs for the rising edge for gpio IRQs 1197 * configured with falling edge only; and vice versa. 1198 */ 1199 gen0 = l & bank->context.fallingdetect; 1200 gen0 &= bank->saved_datain; 1201 1202 gen1 = l & bank->context.risingdetect; 1203 gen1 &= ~(bank->saved_datain); 1204 1205 /* FIXME: Consider GPIO IRQs with level detections properly! */ 1206 gen = l & (~(bank->context.fallingdetect) & 1207 ~(bank->context.risingdetect)); 1208 /* Consider all GPIO IRQs needed to be updated */ 1209 gen |= gen0 | gen1; 1210 1211 if (gen) { 1212 u32 old0, old1; 1213 1214 old0 = readl_relaxed(bank->base + bank->regs->leveldetect0); 1215 old1 = readl_relaxed(bank->base + bank->regs->leveldetect1); 1216 1217 if (!bank->regs->irqstatus_raw0) { 1218 writel_relaxed(old0 | gen, bank->base + 1219 bank->regs->leveldetect0); 1220 writel_relaxed(old1 | gen, bank->base + 1221 bank->regs->leveldetect1); 1222 } 1223 1224 if (bank->regs->irqstatus_raw0) { 1225 writel_relaxed(old0 | l, bank->base + 1226 bank->regs->leveldetect0); 1227 writel_relaxed(old1 | l, bank->base + 1228 bank->regs->leveldetect1); 1229 } 1230 writel_relaxed(old0, bank->base + bank->regs->leveldetect0); 1231 writel_relaxed(old1, bank->base + bank->regs->leveldetect1); 1232 } 1233 } 1234 1235 static int gpio_omap_cpu_notifier(struct notifier_block *nb, 1236 unsigned long cmd, void *v) 1237 { 1238 struct gpio_bank *bank; 1239 unsigned long flags; 1240 1241 bank = container_of(nb, struct gpio_bank, nb); 1242 1243 raw_spin_lock_irqsave(&bank->lock, flags); 1244 switch (cmd) { 1245 case CPU_CLUSTER_PM_ENTER: 1246 if (bank->is_suspended) 1247 break; 1248 omap_gpio_idle(bank, true); 1249 break; 1250 case CPU_CLUSTER_PM_ENTER_FAILED: 1251 case CPU_CLUSTER_PM_EXIT: 1252 if (bank->is_suspended) 1253 break; 1254 omap_gpio_unidle(bank); 1255 break; 1256 } 1257 raw_spin_unlock_irqrestore(&bank->lock, flags); 1258 1259 return NOTIFY_OK; 1260 } 1261 1262 static const struct omap_gpio_reg_offs omap2_gpio_regs = { 1263 .revision = OMAP24XX_GPIO_REVISION, 1264 .direction = OMAP24XX_GPIO_OE, 1265 .datain = OMAP24XX_GPIO_DATAIN, 1266 .dataout = OMAP24XX_GPIO_DATAOUT, 1267 .set_dataout = OMAP24XX_GPIO_SETDATAOUT, 1268 .clr_dataout = OMAP24XX_GPIO_CLEARDATAOUT, 1269 .irqstatus = OMAP24XX_GPIO_IRQSTATUS1, 1270 .irqstatus2 = OMAP24XX_GPIO_IRQSTATUS2, 1271 .irqenable = OMAP24XX_GPIO_IRQENABLE1, 1272 .irqenable2 = OMAP24XX_GPIO_IRQENABLE2, 1273 .set_irqenable = OMAP24XX_GPIO_SETIRQENABLE1, 1274 .clr_irqenable = OMAP24XX_GPIO_CLEARIRQENABLE1, 1275 .debounce = OMAP24XX_GPIO_DEBOUNCE_VAL, 1276 .debounce_en = OMAP24XX_GPIO_DEBOUNCE_EN, 1277 .ctrl = OMAP24XX_GPIO_CTRL, 1278 .wkup_en = OMAP24XX_GPIO_WAKE_EN, 1279 .leveldetect0 = OMAP24XX_GPIO_LEVELDETECT0, 1280 .leveldetect1 = OMAP24XX_GPIO_LEVELDETECT1, 1281 .risingdetect = OMAP24XX_GPIO_RISINGDETECT, 1282 .fallingdetect = OMAP24XX_GPIO_FALLINGDETECT, 1283 }; 1284 1285 static const struct omap_gpio_reg_offs omap4_gpio_regs = { 1286 .revision = OMAP4_GPIO_REVISION, 1287 .direction = OMAP4_GPIO_OE, 1288 .datain = OMAP4_GPIO_DATAIN, 1289 .dataout = OMAP4_GPIO_DATAOUT, 1290 .set_dataout = OMAP4_GPIO_SETDATAOUT, 1291 .clr_dataout = OMAP4_GPIO_CLEARDATAOUT, 1292 .irqstatus = OMAP4_GPIO_IRQSTATUS0, 1293 .irqstatus2 = OMAP4_GPIO_IRQSTATUS1, 1294 .irqstatus_raw0 = OMAP4_GPIO_IRQSTATUSRAW0, 1295 .irqstatus_raw1 = OMAP4_GPIO_IRQSTATUSRAW1, 1296 .irqenable = OMAP4_GPIO_IRQSTATUSSET0, 1297 .irqenable2 = OMAP4_GPIO_IRQSTATUSSET1, 1298 .set_irqenable = OMAP4_GPIO_IRQSTATUSSET0, 1299 .clr_irqenable = OMAP4_GPIO_IRQSTATUSCLR0, 1300 .debounce = OMAP4_GPIO_DEBOUNCINGTIME, 1301 .debounce_en = OMAP4_GPIO_DEBOUNCENABLE, 1302 .ctrl = OMAP4_GPIO_CTRL, 1303 .wkup_en = OMAP4_GPIO_IRQWAKEN0, 1304 .leveldetect0 = OMAP4_GPIO_LEVELDETECT0, 1305 .leveldetect1 = OMAP4_GPIO_LEVELDETECT1, 1306 .risingdetect = OMAP4_GPIO_RISINGDETECT, 1307 .fallingdetect = OMAP4_GPIO_FALLINGDETECT, 1308 }; 1309 1310 static const struct omap_gpio_platform_data omap2_pdata = { 1311 .regs = &omap2_gpio_regs, 1312 .bank_width = 32, 1313 .dbck_flag = false, 1314 }; 1315 1316 static const struct omap_gpio_platform_data omap3_pdata = { 1317 .regs = &omap2_gpio_regs, 1318 .bank_width = 32, 1319 .dbck_flag = true, 1320 }; 1321 1322 static const struct omap_gpio_platform_data omap4_pdata = { 1323 .regs = &omap4_gpio_regs, 1324 .bank_width = 32, 1325 .dbck_flag = true, 1326 }; 1327 1328 static const struct of_device_id omap_gpio_match[] = { 1329 { 1330 .compatible = "ti,omap4-gpio", 1331 .data = &omap4_pdata, 1332 }, 1333 { 1334 .compatible = "ti,omap3-gpio", 1335 .data = &omap3_pdata, 1336 }, 1337 { 1338 .compatible = "ti,omap2-gpio", 1339 .data = &omap2_pdata, 1340 }, 1341 { }, 1342 }; 1343 MODULE_DEVICE_TABLE(of, omap_gpio_match); 1344 1345 static int omap_gpio_probe(struct platform_device *pdev) 1346 { 1347 struct device *dev = &pdev->dev; 1348 struct device_node *node = dev->of_node; 1349 const struct of_device_id *match; 1350 const struct omap_gpio_platform_data *pdata; 1351 struct gpio_bank *bank; 1352 struct irq_chip *irqc; 1353 int ret; 1354 1355 match = of_match_device(of_match_ptr(omap_gpio_match), dev); 1356 1357 pdata = match ? match->data : dev_get_platdata(dev); 1358 if (!pdata) 1359 return -EINVAL; 1360 1361 bank = devm_kzalloc(dev, sizeof(*bank), GFP_KERNEL); 1362 if (!bank) 1363 return -ENOMEM; 1364 1365 irqc = devm_kzalloc(dev, sizeof(*irqc), GFP_KERNEL); 1366 if (!irqc) 1367 return -ENOMEM; 1368 1369 irqc->irq_startup = omap_gpio_irq_startup, 1370 irqc->irq_shutdown = omap_gpio_irq_shutdown, 1371 irqc->irq_ack = dummy_irq_chip.irq_ack, 1372 irqc->irq_mask = omap_gpio_mask_irq, 1373 irqc->irq_unmask = omap_gpio_unmask_irq, 1374 irqc->irq_set_type = omap_gpio_irq_type, 1375 irqc->irq_set_wake = omap_gpio_wake_enable, 1376 irqc->irq_bus_lock = omap_gpio_irq_bus_lock, 1377 irqc->irq_bus_sync_unlock = gpio_irq_bus_sync_unlock, 1378 irqc->name = dev_name(&pdev->dev); 1379 irqc->flags = IRQCHIP_MASK_ON_SUSPEND; 1380 irqc->parent_device = dev; 1381 1382 bank->irq = platform_get_irq(pdev, 0); 1383 if (bank->irq <= 0) { 1384 if (!bank->irq) 1385 bank->irq = -ENXIO; 1386 if (bank->irq != -EPROBE_DEFER) 1387 dev_err(dev, 1388 "can't get irq resource ret=%d\n", bank->irq); 1389 return bank->irq; 1390 } 1391 1392 bank->chip.parent = dev; 1393 bank->chip.owner = THIS_MODULE; 1394 bank->dbck_flag = pdata->dbck_flag; 1395 bank->stride = pdata->bank_stride; 1396 bank->width = pdata->bank_width; 1397 bank->is_mpuio = pdata->is_mpuio; 1398 bank->non_wakeup_gpios = pdata->non_wakeup_gpios; 1399 bank->regs = pdata->regs; 1400 #ifdef CONFIG_OF_GPIO 1401 bank->chip.of_node = of_node_get(node); 1402 #endif 1403 1404 if (node) { 1405 if (!of_property_read_bool(node, "ti,gpio-always-on")) 1406 bank->loses_context = true; 1407 } else { 1408 bank->loses_context = pdata->loses_context; 1409 1410 if (bank->loses_context) 1411 bank->get_context_loss_count = 1412 pdata->get_context_loss_count; 1413 } 1414 1415 if (bank->regs->set_dataout && bank->regs->clr_dataout) 1416 bank->set_dataout = omap_set_gpio_dataout_reg; 1417 else 1418 bank->set_dataout = omap_set_gpio_dataout_mask; 1419 1420 raw_spin_lock_init(&bank->lock); 1421 raw_spin_lock_init(&bank->wa_lock); 1422 1423 /* Static mapping, never released */ 1424 bank->base = devm_platform_ioremap_resource(pdev, 0); 1425 if (IS_ERR(bank->base)) { 1426 return PTR_ERR(bank->base); 1427 } 1428 1429 if (bank->dbck_flag) { 1430 bank->dbck = devm_clk_get(dev, "dbclk"); 1431 if (IS_ERR(bank->dbck)) { 1432 dev_err(dev, 1433 "Could not get gpio dbck. Disable debounce\n"); 1434 bank->dbck_flag = false; 1435 } else { 1436 clk_prepare(bank->dbck); 1437 } 1438 } 1439 1440 platform_set_drvdata(pdev, bank); 1441 1442 pm_runtime_enable(dev); 1443 pm_runtime_get_sync(dev); 1444 1445 if (bank->is_mpuio) 1446 omap_mpuio_init(bank); 1447 1448 omap_gpio_mod_init(bank); 1449 1450 ret = omap_gpio_chip_init(bank, irqc); 1451 if (ret) { 1452 pm_runtime_put_sync(dev); 1453 pm_runtime_disable(dev); 1454 if (bank->dbck_flag) 1455 clk_unprepare(bank->dbck); 1456 return ret; 1457 } 1458 1459 omap_gpio_show_rev(bank); 1460 1461 bank->nb.notifier_call = gpio_omap_cpu_notifier; 1462 cpu_pm_register_notifier(&bank->nb); 1463 1464 pm_runtime_put(dev); 1465 1466 return 0; 1467 } 1468 1469 static int omap_gpio_remove(struct platform_device *pdev) 1470 { 1471 struct gpio_bank *bank = platform_get_drvdata(pdev); 1472 1473 cpu_pm_unregister_notifier(&bank->nb); 1474 gpiochip_remove(&bank->chip); 1475 pm_runtime_disable(&pdev->dev); 1476 if (bank->dbck_flag) 1477 clk_unprepare(bank->dbck); 1478 1479 return 0; 1480 } 1481 1482 static int __maybe_unused omap_gpio_runtime_suspend(struct device *dev) 1483 { 1484 struct gpio_bank *bank = dev_get_drvdata(dev); 1485 unsigned long flags; 1486 1487 raw_spin_lock_irqsave(&bank->lock, flags); 1488 omap_gpio_idle(bank, true); 1489 bank->is_suspended = true; 1490 raw_spin_unlock_irqrestore(&bank->lock, flags); 1491 1492 return 0; 1493 } 1494 1495 static int __maybe_unused omap_gpio_runtime_resume(struct device *dev) 1496 { 1497 struct gpio_bank *bank = dev_get_drvdata(dev); 1498 unsigned long flags; 1499 1500 raw_spin_lock_irqsave(&bank->lock, flags); 1501 omap_gpio_unidle(bank); 1502 bank->is_suspended = false; 1503 raw_spin_unlock_irqrestore(&bank->lock, flags); 1504 1505 return 0; 1506 } 1507 1508 static const struct dev_pm_ops gpio_pm_ops = { 1509 SET_RUNTIME_PM_OPS(omap_gpio_runtime_suspend, omap_gpio_runtime_resume, 1510 NULL) 1511 }; 1512 1513 static struct platform_driver omap_gpio_driver = { 1514 .probe = omap_gpio_probe, 1515 .remove = omap_gpio_remove, 1516 .driver = { 1517 .name = "omap_gpio", 1518 .pm = &gpio_pm_ops, 1519 .of_match_table = omap_gpio_match, 1520 }, 1521 }; 1522 1523 /* 1524 * gpio driver register needs to be done before 1525 * machine_init functions access gpio APIs. 1526 * Hence omap_gpio_drv_reg() is a postcore_initcall. 1527 */ 1528 static int __init omap_gpio_drv_reg(void) 1529 { 1530 return platform_driver_register(&omap_gpio_driver); 1531 } 1532 postcore_initcall(omap_gpio_drv_reg); 1533 1534 static void __exit omap_gpio_exit(void) 1535 { 1536 platform_driver_unregister(&omap_gpio_driver); 1537 } 1538 module_exit(omap_gpio_exit); 1539 1540 MODULE_DESCRIPTION("omap gpio driver"); 1541 MODULE_ALIAS("platform:gpio-omap"); 1542 MODULE_LICENSE("GPL v2"); 1543