1 /* 2 * Support functions for OMAP GPIO 3 * 4 * Copyright (C) 2003-2005 Nokia Corporation 5 * Written by Juha Yrjölä <juha.yrjola@nokia.com> 6 * 7 * Copyright (C) 2009 Texas Instruments 8 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com> 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License version 2 as 12 * published by the Free Software Foundation. 13 */ 14 15 #include <linux/init.h> 16 #include <linux/module.h> 17 #include <linux/interrupt.h> 18 #include <linux/syscore_ops.h> 19 #include <linux/err.h> 20 #include <linux/clk.h> 21 #include <linux/io.h> 22 #include <linux/device.h> 23 #include <linux/pm_runtime.h> 24 #include <linux/pm.h> 25 #include <linux/of.h> 26 #include <linux/of_device.h> 27 #include <linux/gpio.h> 28 #include <linux/bitops.h> 29 #include <linux/platform_data/gpio-omap.h> 30 31 #define OFF_MODE 1 32 33 static LIST_HEAD(omap_gpio_list); 34 35 struct gpio_regs { 36 u32 irqenable1; 37 u32 irqenable2; 38 u32 wake_en; 39 u32 ctrl; 40 u32 oe; 41 u32 leveldetect0; 42 u32 leveldetect1; 43 u32 risingdetect; 44 u32 fallingdetect; 45 u32 dataout; 46 u32 debounce; 47 u32 debounce_en; 48 }; 49 50 struct gpio_bank { 51 struct list_head node; 52 void __iomem *base; 53 u16 irq; 54 u32 non_wakeup_gpios; 55 u32 enabled_non_wakeup_gpios; 56 struct gpio_regs context; 57 u32 saved_datain; 58 u32 level_mask; 59 u32 toggle_mask; 60 spinlock_t lock; 61 struct gpio_chip chip; 62 struct clk *dbck; 63 u32 mod_usage; 64 u32 irq_usage; 65 u32 dbck_enable_mask; 66 bool dbck_enabled; 67 struct device *dev; 68 bool is_mpuio; 69 bool dbck_flag; 70 bool loses_context; 71 bool context_valid; 72 int stride; 73 u32 width; 74 int context_loss_count; 75 int power_mode; 76 bool workaround_enabled; 77 78 void (*set_dataout)(struct gpio_bank *bank, int gpio, int enable); 79 int (*get_context_loss_count)(struct device *dev); 80 81 struct omap_gpio_reg_offs *regs; 82 }; 83 84 #define GPIO_INDEX(bank, gpio) (gpio % bank->width) 85 #define GPIO_BIT(bank, gpio) (BIT(GPIO_INDEX(bank, gpio))) 86 #define GPIO_MOD_CTRL_BIT BIT(0) 87 88 #define BANK_USED(bank) (bank->mod_usage || bank->irq_usage) 89 #define LINE_USED(line, offset) (line & (BIT(offset))) 90 91 static void omap_gpio_unmask_irq(struct irq_data *d); 92 93 static int omap_irq_to_gpio(struct gpio_bank *bank, unsigned int gpio_irq) 94 { 95 return bank->chip.base + gpio_irq; 96 } 97 98 static inline struct gpio_bank *omap_irq_data_get_bank(struct irq_data *d) 99 { 100 struct gpio_chip *chip = irq_data_get_irq_chip_data(d); 101 return container_of(chip, struct gpio_bank, chip); 102 } 103 104 static void omap_set_gpio_direction(struct gpio_bank *bank, int gpio, 105 int is_input) 106 { 107 void __iomem *reg = bank->base; 108 u32 l; 109 110 reg += bank->regs->direction; 111 l = readl_relaxed(reg); 112 if (is_input) 113 l |= BIT(gpio); 114 else 115 l &= ~(BIT(gpio)); 116 writel_relaxed(l, reg); 117 bank->context.oe = l; 118 } 119 120 121 /* set data out value using dedicate set/clear register */ 122 static void omap_set_gpio_dataout_reg(struct gpio_bank *bank, int gpio, 123 int enable) 124 { 125 void __iomem *reg = bank->base; 126 u32 l = GPIO_BIT(bank, gpio); 127 128 if (enable) { 129 reg += bank->regs->set_dataout; 130 bank->context.dataout |= l; 131 } else { 132 reg += bank->regs->clr_dataout; 133 bank->context.dataout &= ~l; 134 } 135 136 writel_relaxed(l, reg); 137 } 138 139 /* set data out value using mask register */ 140 static void omap_set_gpio_dataout_mask(struct gpio_bank *bank, int gpio, 141 int enable) 142 { 143 void __iomem *reg = bank->base + bank->regs->dataout; 144 u32 gpio_bit = GPIO_BIT(bank, gpio); 145 u32 l; 146 147 l = readl_relaxed(reg); 148 if (enable) 149 l |= gpio_bit; 150 else 151 l &= ~gpio_bit; 152 writel_relaxed(l, reg); 153 bank->context.dataout = l; 154 } 155 156 static int omap_get_gpio_datain(struct gpio_bank *bank, int offset) 157 { 158 void __iomem *reg = bank->base + bank->regs->datain; 159 160 return (readl_relaxed(reg) & (BIT(offset))) != 0; 161 } 162 163 static int omap_get_gpio_dataout(struct gpio_bank *bank, int offset) 164 { 165 void __iomem *reg = bank->base + bank->regs->dataout; 166 167 return (readl_relaxed(reg) & (BIT(offset))) != 0; 168 } 169 170 static inline void omap_gpio_rmw(void __iomem *base, u32 reg, u32 mask, bool set) 171 { 172 int l = readl_relaxed(base + reg); 173 174 if (set) 175 l |= mask; 176 else 177 l &= ~mask; 178 179 writel_relaxed(l, base + reg); 180 } 181 182 static inline void omap_gpio_dbck_enable(struct gpio_bank *bank) 183 { 184 if (bank->dbck_enable_mask && !bank->dbck_enabled) { 185 clk_prepare_enable(bank->dbck); 186 bank->dbck_enabled = true; 187 188 writel_relaxed(bank->dbck_enable_mask, 189 bank->base + bank->regs->debounce_en); 190 } 191 } 192 193 static inline void omap_gpio_dbck_disable(struct gpio_bank *bank) 194 { 195 if (bank->dbck_enable_mask && bank->dbck_enabled) { 196 /* 197 * Disable debounce before cutting it's clock. If debounce is 198 * enabled but the clock is not, GPIO module seems to be unable 199 * to detect events and generate interrupts at least on OMAP3. 200 */ 201 writel_relaxed(0, bank->base + bank->regs->debounce_en); 202 203 clk_disable_unprepare(bank->dbck); 204 bank->dbck_enabled = false; 205 } 206 } 207 208 /** 209 * omap2_set_gpio_debounce - low level gpio debounce time 210 * @bank: the gpio bank we're acting upon 211 * @gpio: the gpio number on this @gpio 212 * @debounce: debounce time to use 213 * 214 * OMAP's debounce time is in 31us steps so we need 215 * to convert and round up to the closest unit. 216 */ 217 static void omap2_set_gpio_debounce(struct gpio_bank *bank, unsigned gpio, 218 unsigned debounce) 219 { 220 void __iomem *reg; 221 u32 val; 222 u32 l; 223 224 if (!bank->dbck_flag) 225 return; 226 227 if (debounce < 32) 228 debounce = 0x01; 229 else if (debounce > 7936) 230 debounce = 0xff; 231 else 232 debounce = (debounce / 0x1f) - 1; 233 234 l = GPIO_BIT(bank, gpio); 235 236 clk_prepare_enable(bank->dbck); 237 reg = bank->base + bank->regs->debounce; 238 writel_relaxed(debounce, reg); 239 240 reg = bank->base + bank->regs->debounce_en; 241 val = readl_relaxed(reg); 242 243 if (debounce) 244 val |= l; 245 else 246 val &= ~l; 247 bank->dbck_enable_mask = val; 248 249 writel_relaxed(val, reg); 250 clk_disable_unprepare(bank->dbck); 251 /* 252 * Enable debounce clock per module. 253 * This call is mandatory because in omap_gpio_request() when 254 * *_runtime_get_sync() is called, _gpio_dbck_enable() within 255 * runtime callbck fails to turn on dbck because dbck_enable_mask 256 * used within _gpio_dbck_enable() is still not initialized at 257 * that point. Therefore we have to enable dbck here. 258 */ 259 omap_gpio_dbck_enable(bank); 260 if (bank->dbck_enable_mask) { 261 bank->context.debounce = debounce; 262 bank->context.debounce_en = val; 263 } 264 } 265 266 /** 267 * omap_clear_gpio_debounce - clear debounce settings for a gpio 268 * @bank: the gpio bank we're acting upon 269 * @gpio: the gpio number on this @gpio 270 * 271 * If a gpio is using debounce, then clear the debounce enable bit and if 272 * this is the only gpio in this bank using debounce, then clear the debounce 273 * time too. The debounce clock will also be disabled when calling this function 274 * if this is the only gpio in the bank using debounce. 275 */ 276 static void omap_clear_gpio_debounce(struct gpio_bank *bank, unsigned gpio) 277 { 278 u32 gpio_bit = GPIO_BIT(bank, gpio); 279 280 if (!bank->dbck_flag) 281 return; 282 283 if (!(bank->dbck_enable_mask & gpio_bit)) 284 return; 285 286 bank->dbck_enable_mask &= ~gpio_bit; 287 bank->context.debounce_en &= ~gpio_bit; 288 writel_relaxed(bank->context.debounce_en, 289 bank->base + bank->regs->debounce_en); 290 291 if (!bank->dbck_enable_mask) { 292 bank->context.debounce = 0; 293 writel_relaxed(bank->context.debounce, bank->base + 294 bank->regs->debounce); 295 clk_disable_unprepare(bank->dbck); 296 bank->dbck_enabled = false; 297 } 298 } 299 300 static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio, 301 unsigned trigger) 302 { 303 void __iomem *base = bank->base; 304 u32 gpio_bit = BIT(gpio); 305 306 omap_gpio_rmw(base, bank->regs->leveldetect0, gpio_bit, 307 trigger & IRQ_TYPE_LEVEL_LOW); 308 omap_gpio_rmw(base, bank->regs->leveldetect1, gpio_bit, 309 trigger & IRQ_TYPE_LEVEL_HIGH); 310 omap_gpio_rmw(base, bank->regs->risingdetect, gpio_bit, 311 trigger & IRQ_TYPE_EDGE_RISING); 312 omap_gpio_rmw(base, bank->regs->fallingdetect, gpio_bit, 313 trigger & IRQ_TYPE_EDGE_FALLING); 314 315 bank->context.leveldetect0 = 316 readl_relaxed(bank->base + bank->regs->leveldetect0); 317 bank->context.leveldetect1 = 318 readl_relaxed(bank->base + bank->regs->leveldetect1); 319 bank->context.risingdetect = 320 readl_relaxed(bank->base + bank->regs->risingdetect); 321 bank->context.fallingdetect = 322 readl_relaxed(bank->base + bank->regs->fallingdetect); 323 324 if (likely(!(bank->non_wakeup_gpios & gpio_bit))) { 325 omap_gpio_rmw(base, bank->regs->wkup_en, gpio_bit, trigger != 0); 326 bank->context.wake_en = 327 readl_relaxed(bank->base + bank->regs->wkup_en); 328 } 329 330 /* This part needs to be executed always for OMAP{34xx, 44xx} */ 331 if (!bank->regs->irqctrl) { 332 /* On omap24xx proceed only when valid GPIO bit is set */ 333 if (bank->non_wakeup_gpios) { 334 if (!(bank->non_wakeup_gpios & gpio_bit)) 335 goto exit; 336 } 337 338 /* 339 * Log the edge gpio and manually trigger the IRQ 340 * after resume if the input level changes 341 * to avoid irq lost during PER RET/OFF mode 342 * Applies for omap2 non-wakeup gpio and all omap3 gpios 343 */ 344 if (trigger & IRQ_TYPE_EDGE_BOTH) 345 bank->enabled_non_wakeup_gpios |= gpio_bit; 346 else 347 bank->enabled_non_wakeup_gpios &= ~gpio_bit; 348 } 349 350 exit: 351 bank->level_mask = 352 readl_relaxed(bank->base + bank->regs->leveldetect0) | 353 readl_relaxed(bank->base + bank->regs->leveldetect1); 354 } 355 356 #ifdef CONFIG_ARCH_OMAP1 357 /* 358 * This only applies to chips that can't do both rising and falling edge 359 * detection at once. For all other chips, this function is a noop. 360 */ 361 static void omap_toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio) 362 { 363 void __iomem *reg = bank->base; 364 u32 l = 0; 365 366 if (!bank->regs->irqctrl) 367 return; 368 369 reg += bank->regs->irqctrl; 370 371 l = readl_relaxed(reg); 372 if ((l >> gpio) & 1) 373 l &= ~(BIT(gpio)); 374 else 375 l |= BIT(gpio); 376 377 writel_relaxed(l, reg); 378 } 379 #else 380 static void omap_toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio) {} 381 #endif 382 383 static int omap_set_gpio_triggering(struct gpio_bank *bank, int gpio, 384 unsigned trigger) 385 { 386 void __iomem *reg = bank->base; 387 void __iomem *base = bank->base; 388 u32 l = 0; 389 390 if (bank->regs->leveldetect0 && bank->regs->wkup_en) { 391 omap_set_gpio_trigger(bank, gpio, trigger); 392 } else if (bank->regs->irqctrl) { 393 reg += bank->regs->irqctrl; 394 395 l = readl_relaxed(reg); 396 if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) 397 bank->toggle_mask |= BIT(gpio); 398 if (trigger & IRQ_TYPE_EDGE_RISING) 399 l |= BIT(gpio); 400 else if (trigger & IRQ_TYPE_EDGE_FALLING) 401 l &= ~(BIT(gpio)); 402 else 403 return -EINVAL; 404 405 writel_relaxed(l, reg); 406 } else if (bank->regs->edgectrl1) { 407 if (gpio & 0x08) 408 reg += bank->regs->edgectrl2; 409 else 410 reg += bank->regs->edgectrl1; 411 412 gpio &= 0x07; 413 l = readl_relaxed(reg); 414 l &= ~(3 << (gpio << 1)); 415 if (trigger & IRQ_TYPE_EDGE_RISING) 416 l |= 2 << (gpio << 1); 417 if (trigger & IRQ_TYPE_EDGE_FALLING) 418 l |= BIT(gpio << 1); 419 420 /* Enable wake-up during idle for dynamic tick */ 421 omap_gpio_rmw(base, bank->regs->wkup_en, BIT(gpio), trigger); 422 bank->context.wake_en = 423 readl_relaxed(bank->base + bank->regs->wkup_en); 424 writel_relaxed(l, reg); 425 } 426 return 0; 427 } 428 429 static void omap_enable_gpio_module(struct gpio_bank *bank, unsigned offset) 430 { 431 if (bank->regs->pinctrl) { 432 void __iomem *reg = bank->base + bank->regs->pinctrl; 433 434 /* Claim the pin for MPU */ 435 writel_relaxed(readl_relaxed(reg) | (BIT(offset)), reg); 436 } 437 438 if (bank->regs->ctrl && !BANK_USED(bank)) { 439 void __iomem *reg = bank->base + bank->regs->ctrl; 440 u32 ctrl; 441 442 ctrl = readl_relaxed(reg); 443 /* Module is enabled, clocks are not gated */ 444 ctrl &= ~GPIO_MOD_CTRL_BIT; 445 writel_relaxed(ctrl, reg); 446 bank->context.ctrl = ctrl; 447 } 448 } 449 450 static void omap_disable_gpio_module(struct gpio_bank *bank, unsigned offset) 451 { 452 void __iomem *base = bank->base; 453 454 if (bank->regs->wkup_en && 455 !LINE_USED(bank->mod_usage, offset) && 456 !LINE_USED(bank->irq_usage, offset)) { 457 /* Disable wake-up during idle for dynamic tick */ 458 omap_gpio_rmw(base, bank->regs->wkup_en, BIT(offset), 0); 459 bank->context.wake_en = 460 readl_relaxed(bank->base + bank->regs->wkup_en); 461 } 462 463 if (bank->regs->ctrl && !BANK_USED(bank)) { 464 void __iomem *reg = bank->base + bank->regs->ctrl; 465 u32 ctrl; 466 467 ctrl = readl_relaxed(reg); 468 /* Module is disabled, clocks are gated */ 469 ctrl |= GPIO_MOD_CTRL_BIT; 470 writel_relaxed(ctrl, reg); 471 bank->context.ctrl = ctrl; 472 } 473 } 474 475 static int omap_gpio_is_input(struct gpio_bank *bank, int mask) 476 { 477 void __iomem *reg = bank->base + bank->regs->direction; 478 479 return readl_relaxed(reg) & mask; 480 } 481 482 static void omap_gpio_init_irq(struct gpio_bank *bank, unsigned gpio, 483 unsigned offset) 484 { 485 if (!LINE_USED(bank->mod_usage, offset)) { 486 omap_enable_gpio_module(bank, offset); 487 omap_set_gpio_direction(bank, offset, 1); 488 } 489 bank->irq_usage |= BIT(GPIO_INDEX(bank, gpio)); 490 } 491 492 static int omap_gpio_irq_type(struct irq_data *d, unsigned type) 493 { 494 struct gpio_bank *bank = omap_irq_data_get_bank(d); 495 unsigned gpio = 0; 496 int retval; 497 unsigned long flags; 498 unsigned offset; 499 500 if (!BANK_USED(bank)) 501 pm_runtime_get_sync(bank->dev); 502 503 #ifdef CONFIG_ARCH_OMAP1 504 if (d->irq > IH_MPUIO_BASE) 505 gpio = OMAP_MPUIO(d->irq - IH_MPUIO_BASE); 506 #endif 507 508 if (!gpio) 509 gpio = omap_irq_to_gpio(bank, d->hwirq); 510 511 if (type & ~IRQ_TYPE_SENSE_MASK) 512 return -EINVAL; 513 514 if (!bank->regs->leveldetect0 && 515 (type & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH))) 516 return -EINVAL; 517 518 spin_lock_irqsave(&bank->lock, flags); 519 offset = GPIO_INDEX(bank, gpio); 520 retval = omap_set_gpio_triggering(bank, offset, type); 521 omap_gpio_init_irq(bank, gpio, offset); 522 if (!omap_gpio_is_input(bank, BIT(offset))) { 523 spin_unlock_irqrestore(&bank->lock, flags); 524 return -EINVAL; 525 } 526 spin_unlock_irqrestore(&bank->lock, flags); 527 528 if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) 529 __irq_set_handler_locked(d->irq, handle_level_irq); 530 else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) 531 __irq_set_handler_locked(d->irq, handle_edge_irq); 532 533 return retval; 534 } 535 536 static void omap_clear_gpio_irqbank(struct gpio_bank *bank, int gpio_mask) 537 { 538 void __iomem *reg = bank->base; 539 540 reg += bank->regs->irqstatus; 541 writel_relaxed(gpio_mask, reg); 542 543 /* Workaround for clearing DSP GPIO interrupts to allow retention */ 544 if (bank->regs->irqstatus2) { 545 reg = bank->base + bank->regs->irqstatus2; 546 writel_relaxed(gpio_mask, reg); 547 } 548 549 /* Flush posted write for the irq status to avoid spurious interrupts */ 550 readl_relaxed(reg); 551 } 552 553 static inline void omap_clear_gpio_irqstatus(struct gpio_bank *bank, int gpio) 554 { 555 omap_clear_gpio_irqbank(bank, GPIO_BIT(bank, gpio)); 556 } 557 558 static u32 omap_get_gpio_irqbank_mask(struct gpio_bank *bank) 559 { 560 void __iomem *reg = bank->base; 561 u32 l; 562 u32 mask = (BIT(bank->width)) - 1; 563 564 reg += bank->regs->irqenable; 565 l = readl_relaxed(reg); 566 if (bank->regs->irqenable_inv) 567 l = ~l; 568 l &= mask; 569 return l; 570 } 571 572 static void omap_enable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask) 573 { 574 void __iomem *reg = bank->base; 575 u32 l; 576 577 if (bank->regs->set_irqenable) { 578 reg += bank->regs->set_irqenable; 579 l = gpio_mask; 580 bank->context.irqenable1 |= gpio_mask; 581 } else { 582 reg += bank->regs->irqenable; 583 l = readl_relaxed(reg); 584 if (bank->regs->irqenable_inv) 585 l &= ~gpio_mask; 586 else 587 l |= gpio_mask; 588 bank->context.irqenable1 = l; 589 } 590 591 writel_relaxed(l, reg); 592 } 593 594 static void omap_disable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask) 595 { 596 void __iomem *reg = bank->base; 597 u32 l; 598 599 if (bank->regs->clr_irqenable) { 600 reg += bank->regs->clr_irqenable; 601 l = gpio_mask; 602 bank->context.irqenable1 &= ~gpio_mask; 603 } else { 604 reg += bank->regs->irqenable; 605 l = readl_relaxed(reg); 606 if (bank->regs->irqenable_inv) 607 l |= gpio_mask; 608 else 609 l &= ~gpio_mask; 610 bank->context.irqenable1 = l; 611 } 612 613 writel_relaxed(l, reg); 614 } 615 616 static inline void omap_set_gpio_irqenable(struct gpio_bank *bank, int gpio, 617 int enable) 618 { 619 if (enable) 620 omap_enable_gpio_irqbank(bank, GPIO_BIT(bank, gpio)); 621 else 622 omap_disable_gpio_irqbank(bank, GPIO_BIT(bank, gpio)); 623 } 624 625 /* 626 * Note that ENAWAKEUP needs to be enabled in GPIO_SYSCONFIG register. 627 * 1510 does not seem to have a wake-up register. If JTAG is connected 628 * to the target, system will wake up always on GPIO events. While 629 * system is running all registered GPIO interrupts need to have wake-up 630 * enabled. When system is suspended, only selected GPIO interrupts need 631 * to have wake-up enabled. 632 */ 633 static int omap_set_gpio_wakeup(struct gpio_bank *bank, int gpio, int enable) 634 { 635 u32 gpio_bit = GPIO_BIT(bank, gpio); 636 unsigned long flags; 637 638 if (bank->non_wakeup_gpios & gpio_bit) { 639 dev_err(bank->dev, 640 "Unable to modify wakeup on non-wakeup GPIO%d\n", gpio); 641 return -EINVAL; 642 } 643 644 spin_lock_irqsave(&bank->lock, flags); 645 if (enable) 646 bank->context.wake_en |= gpio_bit; 647 else 648 bank->context.wake_en &= ~gpio_bit; 649 650 writel_relaxed(bank->context.wake_en, bank->base + bank->regs->wkup_en); 651 spin_unlock_irqrestore(&bank->lock, flags); 652 653 return 0; 654 } 655 656 static void omap_reset_gpio(struct gpio_bank *bank, int gpio) 657 { 658 omap_set_gpio_direction(bank, GPIO_INDEX(bank, gpio), 1); 659 omap_set_gpio_irqenable(bank, gpio, 0); 660 omap_clear_gpio_irqstatus(bank, gpio); 661 omap_set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE); 662 omap_clear_gpio_debounce(bank, gpio); 663 } 664 665 /* Use disable_irq_wake() and enable_irq_wake() functions from drivers */ 666 static int omap_gpio_wake_enable(struct irq_data *d, unsigned int enable) 667 { 668 struct gpio_bank *bank = omap_irq_data_get_bank(d); 669 unsigned int gpio = omap_irq_to_gpio(bank, d->hwirq); 670 671 return omap_set_gpio_wakeup(bank, gpio, enable); 672 } 673 674 static int omap_gpio_request(struct gpio_chip *chip, unsigned offset) 675 { 676 struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip); 677 unsigned long flags; 678 679 /* 680 * If this is the first gpio_request for the bank, 681 * enable the bank module. 682 */ 683 if (!BANK_USED(bank)) 684 pm_runtime_get_sync(bank->dev); 685 686 spin_lock_irqsave(&bank->lock, flags); 687 /* Set trigger to none. You need to enable the desired trigger with 688 * request_irq() or set_irq_type(). Only do this if the IRQ line has 689 * not already been requested. 690 */ 691 if (!LINE_USED(bank->irq_usage, offset)) { 692 omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE); 693 omap_enable_gpio_module(bank, offset); 694 } 695 bank->mod_usage |= BIT(offset); 696 spin_unlock_irqrestore(&bank->lock, flags); 697 698 return 0; 699 } 700 701 static void omap_gpio_free(struct gpio_chip *chip, unsigned offset) 702 { 703 struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip); 704 unsigned long flags; 705 706 spin_lock_irqsave(&bank->lock, flags); 707 bank->mod_usage &= ~(BIT(offset)); 708 omap_disable_gpio_module(bank, offset); 709 omap_reset_gpio(bank, bank->chip.base + offset); 710 spin_unlock_irqrestore(&bank->lock, flags); 711 712 /* 713 * If this is the last gpio to be freed in the bank, 714 * disable the bank module. 715 */ 716 if (!BANK_USED(bank)) 717 pm_runtime_put(bank->dev); 718 } 719 720 /* 721 * We need to unmask the GPIO bank interrupt as soon as possible to 722 * avoid missing GPIO interrupts for other lines in the bank. 723 * Then we need to mask-read-clear-unmask the triggered GPIO lines 724 * in the bank to avoid missing nested interrupts for a GPIO line. 725 * If we wait to unmask individual GPIO lines in the bank after the 726 * line's interrupt handler has been run, we may miss some nested 727 * interrupts. 728 */ 729 static void omap_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) 730 { 731 void __iomem *isr_reg = NULL; 732 u32 isr; 733 unsigned int bit; 734 struct gpio_bank *bank; 735 int unmasked = 0; 736 struct irq_chip *irqchip = irq_desc_get_chip(desc); 737 struct gpio_chip *chip = irq_get_handler_data(irq); 738 739 chained_irq_enter(irqchip, desc); 740 741 bank = container_of(chip, struct gpio_bank, chip); 742 isr_reg = bank->base + bank->regs->irqstatus; 743 pm_runtime_get_sync(bank->dev); 744 745 if (WARN_ON(!isr_reg)) 746 goto exit; 747 748 while (1) { 749 u32 isr_saved, level_mask = 0; 750 u32 enabled; 751 752 enabled = omap_get_gpio_irqbank_mask(bank); 753 isr_saved = isr = readl_relaxed(isr_reg) & enabled; 754 755 if (bank->level_mask) 756 level_mask = bank->level_mask & enabled; 757 758 /* clear edge sensitive interrupts before handler(s) are 759 called so that we don't miss any interrupt occurred while 760 executing them */ 761 omap_disable_gpio_irqbank(bank, isr_saved & ~level_mask); 762 omap_clear_gpio_irqbank(bank, isr_saved & ~level_mask); 763 omap_enable_gpio_irqbank(bank, isr_saved & ~level_mask); 764 765 /* if there is only edge sensitive GPIO pin interrupts 766 configured, we could unmask GPIO bank interrupt immediately */ 767 if (!level_mask && !unmasked) { 768 unmasked = 1; 769 chained_irq_exit(irqchip, desc); 770 } 771 772 if (!isr) 773 break; 774 775 while (isr) { 776 bit = __ffs(isr); 777 isr &= ~(BIT(bit)); 778 779 /* 780 * Some chips can't respond to both rising and falling 781 * at the same time. If this irq was requested with 782 * both flags, we need to flip the ICR data for the IRQ 783 * to respond to the IRQ for the opposite direction. 784 * This will be indicated in the bank toggle_mask. 785 */ 786 if (bank->toggle_mask & (BIT(bit))) 787 omap_toggle_gpio_edge_triggering(bank, bit); 788 789 generic_handle_irq(irq_find_mapping(bank->chip.irqdomain, 790 bit)); 791 } 792 } 793 /* if bank has any level sensitive GPIO pin interrupt 794 configured, we must unmask the bank interrupt only after 795 handler(s) are executed in order to avoid spurious bank 796 interrupt */ 797 exit: 798 if (!unmasked) 799 chained_irq_exit(irqchip, desc); 800 pm_runtime_put(bank->dev); 801 } 802 803 static unsigned int omap_gpio_irq_startup(struct irq_data *d) 804 { 805 struct gpio_bank *bank = omap_irq_data_get_bank(d); 806 unsigned int gpio = omap_irq_to_gpio(bank, d->hwirq); 807 unsigned long flags; 808 unsigned offset = GPIO_INDEX(bank, gpio); 809 810 if (!BANK_USED(bank)) 811 pm_runtime_get_sync(bank->dev); 812 813 spin_lock_irqsave(&bank->lock, flags); 814 omap_gpio_init_irq(bank, gpio, offset); 815 spin_unlock_irqrestore(&bank->lock, flags); 816 omap_gpio_unmask_irq(d); 817 818 return 0; 819 } 820 821 static void omap_gpio_irq_shutdown(struct irq_data *d) 822 { 823 struct gpio_bank *bank = omap_irq_data_get_bank(d); 824 unsigned int gpio = omap_irq_to_gpio(bank, d->hwirq); 825 unsigned long flags; 826 unsigned offset = GPIO_INDEX(bank, gpio); 827 828 spin_lock_irqsave(&bank->lock, flags); 829 gpiochip_unlock_as_irq(&bank->chip, offset); 830 bank->irq_usage &= ~(BIT(offset)); 831 omap_disable_gpio_module(bank, offset); 832 omap_reset_gpio(bank, gpio); 833 spin_unlock_irqrestore(&bank->lock, flags); 834 835 /* 836 * If this is the last IRQ to be freed in the bank, 837 * disable the bank module. 838 */ 839 if (!BANK_USED(bank)) 840 pm_runtime_put(bank->dev); 841 } 842 843 static void omap_gpio_ack_irq(struct irq_data *d) 844 { 845 struct gpio_bank *bank = omap_irq_data_get_bank(d); 846 unsigned int gpio = omap_irq_to_gpio(bank, d->hwirq); 847 848 omap_clear_gpio_irqstatus(bank, gpio); 849 } 850 851 static void omap_gpio_mask_irq(struct irq_data *d) 852 { 853 struct gpio_bank *bank = omap_irq_data_get_bank(d); 854 unsigned int gpio = omap_irq_to_gpio(bank, d->hwirq); 855 unsigned long flags; 856 857 spin_lock_irqsave(&bank->lock, flags); 858 omap_set_gpio_irqenable(bank, gpio, 0); 859 omap_set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE); 860 spin_unlock_irqrestore(&bank->lock, flags); 861 } 862 863 static void omap_gpio_unmask_irq(struct irq_data *d) 864 { 865 struct gpio_bank *bank = omap_irq_data_get_bank(d); 866 unsigned int gpio = omap_irq_to_gpio(bank, d->hwirq); 867 unsigned int irq_mask = GPIO_BIT(bank, gpio); 868 u32 trigger = irqd_get_trigger_type(d); 869 unsigned long flags; 870 871 spin_lock_irqsave(&bank->lock, flags); 872 if (trigger) 873 omap_set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), trigger); 874 875 /* For level-triggered GPIOs, the clearing must be done after 876 * the HW source is cleared, thus after the handler has run */ 877 if (bank->level_mask & irq_mask) { 878 omap_set_gpio_irqenable(bank, gpio, 0); 879 omap_clear_gpio_irqstatus(bank, gpio); 880 } 881 882 omap_set_gpio_irqenable(bank, gpio, 1); 883 spin_unlock_irqrestore(&bank->lock, flags); 884 } 885 886 /*---------------------------------------------------------------------*/ 887 888 static int omap_mpuio_suspend_noirq(struct device *dev) 889 { 890 struct platform_device *pdev = to_platform_device(dev); 891 struct gpio_bank *bank = platform_get_drvdata(pdev); 892 void __iomem *mask_reg = bank->base + 893 OMAP_MPUIO_GPIO_MASKIT / bank->stride; 894 unsigned long flags; 895 896 spin_lock_irqsave(&bank->lock, flags); 897 writel_relaxed(0xffff & ~bank->context.wake_en, mask_reg); 898 spin_unlock_irqrestore(&bank->lock, flags); 899 900 return 0; 901 } 902 903 static int omap_mpuio_resume_noirq(struct device *dev) 904 { 905 struct platform_device *pdev = to_platform_device(dev); 906 struct gpio_bank *bank = platform_get_drvdata(pdev); 907 void __iomem *mask_reg = bank->base + 908 OMAP_MPUIO_GPIO_MASKIT / bank->stride; 909 unsigned long flags; 910 911 spin_lock_irqsave(&bank->lock, flags); 912 writel_relaxed(bank->context.wake_en, mask_reg); 913 spin_unlock_irqrestore(&bank->lock, flags); 914 915 return 0; 916 } 917 918 static const struct dev_pm_ops omap_mpuio_dev_pm_ops = { 919 .suspend_noirq = omap_mpuio_suspend_noirq, 920 .resume_noirq = omap_mpuio_resume_noirq, 921 }; 922 923 /* use platform_driver for this. */ 924 static struct platform_driver omap_mpuio_driver = { 925 .driver = { 926 .name = "mpuio", 927 .pm = &omap_mpuio_dev_pm_ops, 928 }, 929 }; 930 931 static struct platform_device omap_mpuio_device = { 932 .name = "mpuio", 933 .id = -1, 934 .dev = { 935 .driver = &omap_mpuio_driver.driver, 936 } 937 /* could list the /proc/iomem resources */ 938 }; 939 940 static inline void omap_mpuio_init(struct gpio_bank *bank) 941 { 942 platform_set_drvdata(&omap_mpuio_device, bank); 943 944 if (platform_driver_register(&omap_mpuio_driver) == 0) 945 (void) platform_device_register(&omap_mpuio_device); 946 } 947 948 /*---------------------------------------------------------------------*/ 949 950 static int omap_gpio_get_direction(struct gpio_chip *chip, unsigned offset) 951 { 952 struct gpio_bank *bank; 953 unsigned long flags; 954 void __iomem *reg; 955 int dir; 956 957 bank = container_of(chip, struct gpio_bank, chip); 958 reg = bank->base + bank->regs->direction; 959 spin_lock_irqsave(&bank->lock, flags); 960 dir = !!(readl_relaxed(reg) & BIT(offset)); 961 spin_unlock_irqrestore(&bank->lock, flags); 962 return dir; 963 } 964 965 static int omap_gpio_input(struct gpio_chip *chip, unsigned offset) 966 { 967 struct gpio_bank *bank; 968 unsigned long flags; 969 970 bank = container_of(chip, struct gpio_bank, chip); 971 spin_lock_irqsave(&bank->lock, flags); 972 omap_set_gpio_direction(bank, offset, 1); 973 spin_unlock_irqrestore(&bank->lock, flags); 974 return 0; 975 } 976 977 static int omap_gpio_get(struct gpio_chip *chip, unsigned offset) 978 { 979 struct gpio_bank *bank; 980 u32 mask; 981 982 bank = container_of(chip, struct gpio_bank, chip); 983 mask = (BIT(offset)); 984 985 if (omap_gpio_is_input(bank, mask)) 986 return omap_get_gpio_datain(bank, offset); 987 else 988 return omap_get_gpio_dataout(bank, offset); 989 } 990 991 static int omap_gpio_output(struct gpio_chip *chip, unsigned offset, int value) 992 { 993 struct gpio_bank *bank; 994 unsigned long flags; 995 996 bank = container_of(chip, struct gpio_bank, chip); 997 spin_lock_irqsave(&bank->lock, flags); 998 bank->set_dataout(bank, offset, value); 999 omap_set_gpio_direction(bank, offset, 0); 1000 spin_unlock_irqrestore(&bank->lock, flags); 1001 return 0; 1002 } 1003 1004 static int omap_gpio_debounce(struct gpio_chip *chip, unsigned offset, 1005 unsigned debounce) 1006 { 1007 struct gpio_bank *bank; 1008 unsigned long flags; 1009 1010 bank = container_of(chip, struct gpio_bank, chip); 1011 1012 spin_lock_irqsave(&bank->lock, flags); 1013 omap2_set_gpio_debounce(bank, offset, debounce); 1014 spin_unlock_irqrestore(&bank->lock, flags); 1015 1016 return 0; 1017 } 1018 1019 static void omap_gpio_set(struct gpio_chip *chip, unsigned offset, int value) 1020 { 1021 struct gpio_bank *bank; 1022 unsigned long flags; 1023 1024 bank = container_of(chip, struct gpio_bank, chip); 1025 spin_lock_irqsave(&bank->lock, flags); 1026 bank->set_dataout(bank, offset, value); 1027 spin_unlock_irqrestore(&bank->lock, flags); 1028 } 1029 1030 /*---------------------------------------------------------------------*/ 1031 1032 static void __init omap_gpio_show_rev(struct gpio_bank *bank) 1033 { 1034 static bool called; 1035 u32 rev; 1036 1037 if (called || bank->regs->revision == USHRT_MAX) 1038 return; 1039 1040 rev = readw_relaxed(bank->base + bank->regs->revision); 1041 pr_info("OMAP GPIO hardware version %d.%d\n", 1042 (rev >> 4) & 0x0f, rev & 0x0f); 1043 1044 called = true; 1045 } 1046 1047 static void omap_gpio_mod_init(struct gpio_bank *bank) 1048 { 1049 void __iomem *base = bank->base; 1050 u32 l = 0xffffffff; 1051 1052 if (bank->width == 16) 1053 l = 0xffff; 1054 1055 if (bank->is_mpuio) { 1056 writel_relaxed(l, bank->base + bank->regs->irqenable); 1057 return; 1058 } 1059 1060 omap_gpio_rmw(base, bank->regs->irqenable, l, 1061 bank->regs->irqenable_inv); 1062 omap_gpio_rmw(base, bank->regs->irqstatus, l, 1063 !bank->regs->irqenable_inv); 1064 if (bank->regs->debounce_en) 1065 writel_relaxed(0, base + bank->regs->debounce_en); 1066 1067 /* Save OE default value (0xffffffff) in the context */ 1068 bank->context.oe = readl_relaxed(bank->base + bank->regs->direction); 1069 /* Initialize interface clk ungated, module enabled */ 1070 if (bank->regs->ctrl) 1071 writel_relaxed(0, base + bank->regs->ctrl); 1072 1073 bank->dbck = clk_get(bank->dev, "dbclk"); 1074 if (IS_ERR(bank->dbck)) 1075 dev_err(bank->dev, "Could not get gpio dbck\n"); 1076 } 1077 1078 static void 1079 omap_mpuio_alloc_gc(struct gpio_bank *bank, unsigned int irq_start, 1080 unsigned int num) 1081 { 1082 struct irq_chip_generic *gc; 1083 struct irq_chip_type *ct; 1084 1085 gc = irq_alloc_generic_chip("MPUIO", 1, irq_start, bank->base, 1086 handle_simple_irq); 1087 if (!gc) { 1088 dev_err(bank->dev, "Memory alloc failed for gc\n"); 1089 return; 1090 } 1091 1092 ct = gc->chip_types; 1093 1094 /* NOTE: No ack required, reading IRQ status clears it. */ 1095 ct->chip.irq_mask = irq_gc_mask_set_bit; 1096 ct->chip.irq_unmask = irq_gc_mask_clr_bit; 1097 ct->chip.irq_set_type = omap_gpio_irq_type; 1098 1099 if (bank->regs->wkup_en) 1100 ct->chip.irq_set_wake = omap_gpio_wake_enable; 1101 1102 ct->regs.mask = OMAP_MPUIO_GPIO_INT / bank->stride; 1103 irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE, 1104 IRQ_NOREQUEST | IRQ_NOPROBE, 0); 1105 } 1106 1107 static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc) 1108 { 1109 int j; 1110 static int gpio; 1111 int irq_base = 0; 1112 int ret; 1113 1114 /* 1115 * REVISIT eventually switch from OMAP-specific gpio structs 1116 * over to the generic ones 1117 */ 1118 bank->chip.request = omap_gpio_request; 1119 bank->chip.free = omap_gpio_free; 1120 bank->chip.get_direction = omap_gpio_get_direction; 1121 bank->chip.direction_input = omap_gpio_input; 1122 bank->chip.get = omap_gpio_get; 1123 bank->chip.direction_output = omap_gpio_output; 1124 bank->chip.set_debounce = omap_gpio_debounce; 1125 bank->chip.set = omap_gpio_set; 1126 if (bank->is_mpuio) { 1127 bank->chip.label = "mpuio"; 1128 if (bank->regs->wkup_en) 1129 bank->chip.dev = &omap_mpuio_device.dev; 1130 bank->chip.base = OMAP_MPUIO(0); 1131 } else { 1132 bank->chip.label = "gpio"; 1133 bank->chip.base = gpio; 1134 gpio += bank->width; 1135 } 1136 bank->chip.ngpio = bank->width; 1137 1138 ret = gpiochip_add(&bank->chip); 1139 if (ret) { 1140 dev_err(bank->dev, "Could not register gpio chip %d\n", ret); 1141 return ret; 1142 } 1143 1144 #ifdef CONFIG_ARCH_OMAP1 1145 /* 1146 * REVISIT: Once we have OMAP1 supporting SPARSE_IRQ, we can drop 1147 * irq_alloc_descs() since a base IRQ offset will no longer be needed. 1148 */ 1149 irq_base = irq_alloc_descs(-1, 0, bank->width, 0); 1150 if (irq_base < 0) { 1151 dev_err(bank->dev, "Couldn't allocate IRQ numbers\n"); 1152 return -ENODEV; 1153 } 1154 #endif 1155 1156 ret = gpiochip_irqchip_add(&bank->chip, irqc, 1157 irq_base, omap_gpio_irq_handler, 1158 IRQ_TYPE_NONE); 1159 1160 if (ret) { 1161 dev_err(bank->dev, "Couldn't add irqchip to gpiochip %d\n", ret); 1162 gpiochip_remove(&bank->chip); 1163 return -ENODEV; 1164 } 1165 1166 gpiochip_set_chained_irqchip(&bank->chip, irqc, 1167 bank->irq, omap_gpio_irq_handler); 1168 1169 for (j = 0; j < bank->width; j++) { 1170 int irq = irq_find_mapping(bank->chip.irqdomain, j); 1171 if (bank->is_mpuio) { 1172 omap_mpuio_alloc_gc(bank, irq, bank->width); 1173 irq_set_chip_and_handler(irq, NULL, NULL); 1174 set_irq_flags(irq, 0); 1175 } 1176 } 1177 1178 return 0; 1179 } 1180 1181 static const struct of_device_id omap_gpio_match[]; 1182 1183 static int omap_gpio_probe(struct platform_device *pdev) 1184 { 1185 struct device *dev = &pdev->dev; 1186 struct device_node *node = dev->of_node; 1187 const struct of_device_id *match; 1188 const struct omap_gpio_platform_data *pdata; 1189 struct resource *res; 1190 struct gpio_bank *bank; 1191 struct irq_chip *irqc; 1192 int ret; 1193 1194 match = of_match_device(of_match_ptr(omap_gpio_match), dev); 1195 1196 pdata = match ? match->data : dev_get_platdata(dev); 1197 if (!pdata) 1198 return -EINVAL; 1199 1200 bank = devm_kzalloc(dev, sizeof(struct gpio_bank), GFP_KERNEL); 1201 if (!bank) { 1202 dev_err(dev, "Memory alloc failed\n"); 1203 return -ENOMEM; 1204 } 1205 1206 irqc = devm_kzalloc(dev, sizeof(*irqc), GFP_KERNEL); 1207 if (!irqc) 1208 return -ENOMEM; 1209 1210 irqc->irq_startup = omap_gpio_irq_startup, 1211 irqc->irq_shutdown = omap_gpio_irq_shutdown, 1212 irqc->irq_ack = omap_gpio_ack_irq, 1213 irqc->irq_mask = omap_gpio_mask_irq, 1214 irqc->irq_unmask = omap_gpio_unmask_irq, 1215 irqc->irq_set_type = omap_gpio_irq_type, 1216 irqc->irq_set_wake = omap_gpio_wake_enable, 1217 irqc->name = dev_name(&pdev->dev); 1218 1219 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 1220 if (unlikely(!res)) { 1221 dev_err(dev, "Invalid IRQ resource\n"); 1222 return -ENODEV; 1223 } 1224 1225 bank->irq = res->start; 1226 bank->dev = dev; 1227 bank->chip.dev = dev; 1228 bank->dbck_flag = pdata->dbck_flag; 1229 bank->stride = pdata->bank_stride; 1230 bank->width = pdata->bank_width; 1231 bank->is_mpuio = pdata->is_mpuio; 1232 bank->non_wakeup_gpios = pdata->non_wakeup_gpios; 1233 bank->regs = pdata->regs; 1234 #ifdef CONFIG_OF_GPIO 1235 bank->chip.of_node = of_node_get(node); 1236 #endif 1237 if (node) { 1238 if (!of_property_read_bool(node, "ti,gpio-always-on")) 1239 bank->loses_context = true; 1240 } else { 1241 bank->loses_context = pdata->loses_context; 1242 1243 if (bank->loses_context) 1244 bank->get_context_loss_count = 1245 pdata->get_context_loss_count; 1246 } 1247 1248 if (bank->regs->set_dataout && bank->regs->clr_dataout) 1249 bank->set_dataout = omap_set_gpio_dataout_reg; 1250 else 1251 bank->set_dataout = omap_set_gpio_dataout_mask; 1252 1253 spin_lock_init(&bank->lock); 1254 1255 /* Static mapping, never released */ 1256 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1257 bank->base = devm_ioremap_resource(dev, res); 1258 if (IS_ERR(bank->base)) { 1259 irq_domain_remove(bank->chip.irqdomain); 1260 return PTR_ERR(bank->base); 1261 } 1262 1263 platform_set_drvdata(pdev, bank); 1264 1265 pm_runtime_enable(bank->dev); 1266 pm_runtime_irq_safe(bank->dev); 1267 pm_runtime_get_sync(bank->dev); 1268 1269 if (bank->is_mpuio) 1270 omap_mpuio_init(bank); 1271 1272 omap_gpio_mod_init(bank); 1273 1274 ret = omap_gpio_chip_init(bank, irqc); 1275 if (ret) 1276 return ret; 1277 1278 omap_gpio_show_rev(bank); 1279 1280 pm_runtime_put(bank->dev); 1281 1282 list_add_tail(&bank->node, &omap_gpio_list); 1283 1284 return 0; 1285 } 1286 1287 #ifdef CONFIG_ARCH_OMAP2PLUS 1288 1289 #if defined(CONFIG_PM) 1290 static void omap_gpio_restore_context(struct gpio_bank *bank); 1291 1292 static int omap_gpio_runtime_suspend(struct device *dev) 1293 { 1294 struct platform_device *pdev = to_platform_device(dev); 1295 struct gpio_bank *bank = platform_get_drvdata(pdev); 1296 u32 l1 = 0, l2 = 0; 1297 unsigned long flags; 1298 u32 wake_low, wake_hi; 1299 1300 spin_lock_irqsave(&bank->lock, flags); 1301 1302 /* 1303 * Only edges can generate a wakeup event to the PRCM. 1304 * 1305 * Therefore, ensure any wake-up capable GPIOs have 1306 * edge-detection enabled before going idle to ensure a wakeup 1307 * to the PRCM is generated on a GPIO transition. (c.f. 34xx 1308 * NDA TRM 25.5.3.1) 1309 * 1310 * The normal values will be restored upon ->runtime_resume() 1311 * by writing back the values saved in bank->context. 1312 */ 1313 wake_low = bank->context.leveldetect0 & bank->context.wake_en; 1314 if (wake_low) 1315 writel_relaxed(wake_low | bank->context.fallingdetect, 1316 bank->base + bank->regs->fallingdetect); 1317 wake_hi = bank->context.leveldetect1 & bank->context.wake_en; 1318 if (wake_hi) 1319 writel_relaxed(wake_hi | bank->context.risingdetect, 1320 bank->base + bank->regs->risingdetect); 1321 1322 if (!bank->enabled_non_wakeup_gpios) 1323 goto update_gpio_context_count; 1324 1325 if (bank->power_mode != OFF_MODE) { 1326 bank->power_mode = 0; 1327 goto update_gpio_context_count; 1328 } 1329 /* 1330 * If going to OFF, remove triggering for all 1331 * non-wakeup GPIOs. Otherwise spurious IRQs will be 1332 * generated. See OMAP2420 Errata item 1.101. 1333 */ 1334 bank->saved_datain = readl_relaxed(bank->base + 1335 bank->regs->datain); 1336 l1 = bank->context.fallingdetect; 1337 l2 = bank->context.risingdetect; 1338 1339 l1 &= ~bank->enabled_non_wakeup_gpios; 1340 l2 &= ~bank->enabled_non_wakeup_gpios; 1341 1342 writel_relaxed(l1, bank->base + bank->regs->fallingdetect); 1343 writel_relaxed(l2, bank->base + bank->regs->risingdetect); 1344 1345 bank->workaround_enabled = true; 1346 1347 update_gpio_context_count: 1348 if (bank->get_context_loss_count) 1349 bank->context_loss_count = 1350 bank->get_context_loss_count(bank->dev); 1351 1352 omap_gpio_dbck_disable(bank); 1353 spin_unlock_irqrestore(&bank->lock, flags); 1354 1355 return 0; 1356 } 1357 1358 static void omap_gpio_init_context(struct gpio_bank *p); 1359 1360 static int omap_gpio_runtime_resume(struct device *dev) 1361 { 1362 struct platform_device *pdev = to_platform_device(dev); 1363 struct gpio_bank *bank = platform_get_drvdata(pdev); 1364 u32 l = 0, gen, gen0, gen1; 1365 unsigned long flags; 1366 int c; 1367 1368 spin_lock_irqsave(&bank->lock, flags); 1369 1370 /* 1371 * On the first resume during the probe, the context has not 1372 * been initialised and so initialise it now. Also initialise 1373 * the context loss count. 1374 */ 1375 if (bank->loses_context && !bank->context_valid) { 1376 omap_gpio_init_context(bank); 1377 1378 if (bank->get_context_loss_count) 1379 bank->context_loss_count = 1380 bank->get_context_loss_count(bank->dev); 1381 } 1382 1383 omap_gpio_dbck_enable(bank); 1384 1385 /* 1386 * In ->runtime_suspend(), level-triggered, wakeup-enabled 1387 * GPIOs were set to edge trigger also in order to be able to 1388 * generate a PRCM wakeup. Here we restore the 1389 * pre-runtime_suspend() values for edge triggering. 1390 */ 1391 writel_relaxed(bank->context.fallingdetect, 1392 bank->base + bank->regs->fallingdetect); 1393 writel_relaxed(bank->context.risingdetect, 1394 bank->base + bank->regs->risingdetect); 1395 1396 if (bank->loses_context) { 1397 if (!bank->get_context_loss_count) { 1398 omap_gpio_restore_context(bank); 1399 } else { 1400 c = bank->get_context_loss_count(bank->dev); 1401 if (c != bank->context_loss_count) { 1402 omap_gpio_restore_context(bank); 1403 } else { 1404 spin_unlock_irqrestore(&bank->lock, flags); 1405 return 0; 1406 } 1407 } 1408 } 1409 1410 if (!bank->workaround_enabled) { 1411 spin_unlock_irqrestore(&bank->lock, flags); 1412 return 0; 1413 } 1414 1415 l = readl_relaxed(bank->base + bank->regs->datain); 1416 1417 /* 1418 * Check if any of the non-wakeup interrupt GPIOs have changed 1419 * state. If so, generate an IRQ by software. This is 1420 * horribly racy, but it's the best we can do to work around 1421 * this silicon bug. 1422 */ 1423 l ^= bank->saved_datain; 1424 l &= bank->enabled_non_wakeup_gpios; 1425 1426 /* 1427 * No need to generate IRQs for the rising edge for gpio IRQs 1428 * configured with falling edge only; and vice versa. 1429 */ 1430 gen0 = l & bank->context.fallingdetect; 1431 gen0 &= bank->saved_datain; 1432 1433 gen1 = l & bank->context.risingdetect; 1434 gen1 &= ~(bank->saved_datain); 1435 1436 /* FIXME: Consider GPIO IRQs with level detections properly! */ 1437 gen = l & (~(bank->context.fallingdetect) & 1438 ~(bank->context.risingdetect)); 1439 /* Consider all GPIO IRQs needed to be updated */ 1440 gen |= gen0 | gen1; 1441 1442 if (gen) { 1443 u32 old0, old1; 1444 1445 old0 = readl_relaxed(bank->base + bank->regs->leveldetect0); 1446 old1 = readl_relaxed(bank->base + bank->regs->leveldetect1); 1447 1448 if (!bank->regs->irqstatus_raw0) { 1449 writel_relaxed(old0 | gen, bank->base + 1450 bank->regs->leveldetect0); 1451 writel_relaxed(old1 | gen, bank->base + 1452 bank->regs->leveldetect1); 1453 } 1454 1455 if (bank->regs->irqstatus_raw0) { 1456 writel_relaxed(old0 | l, bank->base + 1457 bank->regs->leveldetect0); 1458 writel_relaxed(old1 | l, bank->base + 1459 bank->regs->leveldetect1); 1460 } 1461 writel_relaxed(old0, bank->base + bank->regs->leveldetect0); 1462 writel_relaxed(old1, bank->base + bank->regs->leveldetect1); 1463 } 1464 1465 bank->workaround_enabled = false; 1466 spin_unlock_irqrestore(&bank->lock, flags); 1467 1468 return 0; 1469 } 1470 #endif /* CONFIG_PM */ 1471 1472 void omap2_gpio_prepare_for_idle(int pwr_mode) 1473 { 1474 struct gpio_bank *bank; 1475 1476 list_for_each_entry(bank, &omap_gpio_list, node) { 1477 if (!BANK_USED(bank) || !bank->loses_context) 1478 continue; 1479 1480 bank->power_mode = pwr_mode; 1481 1482 pm_runtime_put_sync_suspend(bank->dev); 1483 } 1484 } 1485 1486 void omap2_gpio_resume_after_idle(void) 1487 { 1488 struct gpio_bank *bank; 1489 1490 list_for_each_entry(bank, &omap_gpio_list, node) { 1491 if (!BANK_USED(bank) || !bank->loses_context) 1492 continue; 1493 1494 pm_runtime_get_sync(bank->dev); 1495 } 1496 } 1497 1498 #if defined(CONFIG_PM) 1499 static void omap_gpio_init_context(struct gpio_bank *p) 1500 { 1501 struct omap_gpio_reg_offs *regs = p->regs; 1502 void __iomem *base = p->base; 1503 1504 p->context.ctrl = readl_relaxed(base + regs->ctrl); 1505 p->context.oe = readl_relaxed(base + regs->direction); 1506 p->context.wake_en = readl_relaxed(base + regs->wkup_en); 1507 p->context.leveldetect0 = readl_relaxed(base + regs->leveldetect0); 1508 p->context.leveldetect1 = readl_relaxed(base + regs->leveldetect1); 1509 p->context.risingdetect = readl_relaxed(base + regs->risingdetect); 1510 p->context.fallingdetect = readl_relaxed(base + regs->fallingdetect); 1511 p->context.irqenable1 = readl_relaxed(base + regs->irqenable); 1512 p->context.irqenable2 = readl_relaxed(base + regs->irqenable2); 1513 1514 if (regs->set_dataout && p->regs->clr_dataout) 1515 p->context.dataout = readl_relaxed(base + regs->set_dataout); 1516 else 1517 p->context.dataout = readl_relaxed(base + regs->dataout); 1518 1519 p->context_valid = true; 1520 } 1521 1522 static void omap_gpio_restore_context(struct gpio_bank *bank) 1523 { 1524 writel_relaxed(bank->context.wake_en, 1525 bank->base + bank->regs->wkup_en); 1526 writel_relaxed(bank->context.ctrl, bank->base + bank->regs->ctrl); 1527 writel_relaxed(bank->context.leveldetect0, 1528 bank->base + bank->regs->leveldetect0); 1529 writel_relaxed(bank->context.leveldetect1, 1530 bank->base + bank->regs->leveldetect1); 1531 writel_relaxed(bank->context.risingdetect, 1532 bank->base + bank->regs->risingdetect); 1533 writel_relaxed(bank->context.fallingdetect, 1534 bank->base + bank->regs->fallingdetect); 1535 if (bank->regs->set_dataout && bank->regs->clr_dataout) 1536 writel_relaxed(bank->context.dataout, 1537 bank->base + bank->regs->set_dataout); 1538 else 1539 writel_relaxed(bank->context.dataout, 1540 bank->base + bank->regs->dataout); 1541 writel_relaxed(bank->context.oe, bank->base + bank->regs->direction); 1542 1543 if (bank->dbck_enable_mask) { 1544 writel_relaxed(bank->context.debounce, bank->base + 1545 bank->regs->debounce); 1546 writel_relaxed(bank->context.debounce_en, 1547 bank->base + bank->regs->debounce_en); 1548 } 1549 1550 writel_relaxed(bank->context.irqenable1, 1551 bank->base + bank->regs->irqenable); 1552 writel_relaxed(bank->context.irqenable2, 1553 bank->base + bank->regs->irqenable2); 1554 } 1555 #endif /* CONFIG_PM */ 1556 #else 1557 #define omap_gpio_runtime_suspend NULL 1558 #define omap_gpio_runtime_resume NULL 1559 static inline void omap_gpio_init_context(struct gpio_bank *p) {} 1560 #endif 1561 1562 static const struct dev_pm_ops gpio_pm_ops = { 1563 SET_RUNTIME_PM_OPS(omap_gpio_runtime_suspend, omap_gpio_runtime_resume, 1564 NULL) 1565 }; 1566 1567 #if defined(CONFIG_OF) 1568 static struct omap_gpio_reg_offs omap2_gpio_regs = { 1569 .revision = OMAP24XX_GPIO_REVISION, 1570 .direction = OMAP24XX_GPIO_OE, 1571 .datain = OMAP24XX_GPIO_DATAIN, 1572 .dataout = OMAP24XX_GPIO_DATAOUT, 1573 .set_dataout = OMAP24XX_GPIO_SETDATAOUT, 1574 .clr_dataout = OMAP24XX_GPIO_CLEARDATAOUT, 1575 .irqstatus = OMAP24XX_GPIO_IRQSTATUS1, 1576 .irqstatus2 = OMAP24XX_GPIO_IRQSTATUS2, 1577 .irqenable = OMAP24XX_GPIO_IRQENABLE1, 1578 .irqenable2 = OMAP24XX_GPIO_IRQENABLE2, 1579 .set_irqenable = OMAP24XX_GPIO_SETIRQENABLE1, 1580 .clr_irqenable = OMAP24XX_GPIO_CLEARIRQENABLE1, 1581 .debounce = OMAP24XX_GPIO_DEBOUNCE_VAL, 1582 .debounce_en = OMAP24XX_GPIO_DEBOUNCE_EN, 1583 .ctrl = OMAP24XX_GPIO_CTRL, 1584 .wkup_en = OMAP24XX_GPIO_WAKE_EN, 1585 .leveldetect0 = OMAP24XX_GPIO_LEVELDETECT0, 1586 .leveldetect1 = OMAP24XX_GPIO_LEVELDETECT1, 1587 .risingdetect = OMAP24XX_GPIO_RISINGDETECT, 1588 .fallingdetect = OMAP24XX_GPIO_FALLINGDETECT, 1589 }; 1590 1591 static struct omap_gpio_reg_offs omap4_gpio_regs = { 1592 .revision = OMAP4_GPIO_REVISION, 1593 .direction = OMAP4_GPIO_OE, 1594 .datain = OMAP4_GPIO_DATAIN, 1595 .dataout = OMAP4_GPIO_DATAOUT, 1596 .set_dataout = OMAP4_GPIO_SETDATAOUT, 1597 .clr_dataout = OMAP4_GPIO_CLEARDATAOUT, 1598 .irqstatus = OMAP4_GPIO_IRQSTATUS0, 1599 .irqstatus2 = OMAP4_GPIO_IRQSTATUS1, 1600 .irqenable = OMAP4_GPIO_IRQSTATUSSET0, 1601 .irqenable2 = OMAP4_GPIO_IRQSTATUSSET1, 1602 .set_irqenable = OMAP4_GPIO_IRQSTATUSSET0, 1603 .clr_irqenable = OMAP4_GPIO_IRQSTATUSCLR0, 1604 .debounce = OMAP4_GPIO_DEBOUNCINGTIME, 1605 .debounce_en = OMAP4_GPIO_DEBOUNCENABLE, 1606 .ctrl = OMAP4_GPIO_CTRL, 1607 .wkup_en = OMAP4_GPIO_IRQWAKEN0, 1608 .leveldetect0 = OMAP4_GPIO_LEVELDETECT0, 1609 .leveldetect1 = OMAP4_GPIO_LEVELDETECT1, 1610 .risingdetect = OMAP4_GPIO_RISINGDETECT, 1611 .fallingdetect = OMAP4_GPIO_FALLINGDETECT, 1612 }; 1613 1614 static const struct omap_gpio_platform_data omap2_pdata = { 1615 .regs = &omap2_gpio_regs, 1616 .bank_width = 32, 1617 .dbck_flag = false, 1618 }; 1619 1620 static const struct omap_gpio_platform_data omap3_pdata = { 1621 .regs = &omap2_gpio_regs, 1622 .bank_width = 32, 1623 .dbck_flag = true, 1624 }; 1625 1626 static const struct omap_gpio_platform_data omap4_pdata = { 1627 .regs = &omap4_gpio_regs, 1628 .bank_width = 32, 1629 .dbck_flag = true, 1630 }; 1631 1632 static const struct of_device_id omap_gpio_match[] = { 1633 { 1634 .compatible = "ti,omap4-gpio", 1635 .data = &omap4_pdata, 1636 }, 1637 { 1638 .compatible = "ti,omap3-gpio", 1639 .data = &omap3_pdata, 1640 }, 1641 { 1642 .compatible = "ti,omap2-gpio", 1643 .data = &omap2_pdata, 1644 }, 1645 { }, 1646 }; 1647 MODULE_DEVICE_TABLE(of, omap_gpio_match); 1648 #endif 1649 1650 static struct platform_driver omap_gpio_driver = { 1651 .probe = omap_gpio_probe, 1652 .driver = { 1653 .name = "omap_gpio", 1654 .pm = &gpio_pm_ops, 1655 .of_match_table = of_match_ptr(omap_gpio_match), 1656 }, 1657 }; 1658 1659 /* 1660 * gpio driver register needs to be done before 1661 * machine_init functions access gpio APIs. 1662 * Hence omap_gpio_drv_reg() is a postcore_initcall. 1663 */ 1664 static int __init omap_gpio_drv_reg(void) 1665 { 1666 return platform_driver_register(&omap_gpio_driver); 1667 } 1668 postcore_initcall(omap_gpio_drv_reg); 1669