1 /* 2 * Support functions for OMAP GPIO 3 * 4 * Copyright (C) 2003-2005 Nokia Corporation 5 * Written by Juha Yrjölä <juha.yrjola@nokia.com> 6 * 7 * Copyright (C) 2009 Texas Instruments 8 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com> 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License version 2 as 12 * published by the Free Software Foundation. 13 */ 14 15 #include <linux/init.h> 16 #include <linux/module.h> 17 #include <linux/interrupt.h> 18 #include <linux/syscore_ops.h> 19 #include <linux/err.h> 20 #include <linux/clk.h> 21 #include <linux/io.h> 22 #include <linux/device.h> 23 #include <linux/pm_runtime.h> 24 #include <linux/pm.h> 25 #include <linux/of.h> 26 #include <linux/of_device.h> 27 #include <linux/gpio.h> 28 #include <linux/bitops.h> 29 #include <linux/platform_data/gpio-omap.h> 30 31 #define OFF_MODE 1 32 #define OMAP4_GPIO_DEBOUNCINGTIME_MASK 0xFF 33 34 static LIST_HEAD(omap_gpio_list); 35 36 struct gpio_regs { 37 u32 irqenable1; 38 u32 irqenable2; 39 u32 wake_en; 40 u32 ctrl; 41 u32 oe; 42 u32 leveldetect0; 43 u32 leveldetect1; 44 u32 risingdetect; 45 u32 fallingdetect; 46 u32 dataout; 47 u32 debounce; 48 u32 debounce_en; 49 }; 50 51 struct gpio_bank { 52 struct list_head node; 53 void __iomem *base; 54 int irq; 55 u32 non_wakeup_gpios; 56 u32 enabled_non_wakeup_gpios; 57 struct gpio_regs context; 58 u32 saved_datain; 59 u32 level_mask; 60 u32 toggle_mask; 61 raw_spinlock_t lock; 62 raw_spinlock_t wa_lock; 63 struct gpio_chip chip; 64 struct clk *dbck; 65 u32 mod_usage; 66 u32 irq_usage; 67 u32 dbck_enable_mask; 68 bool dbck_enabled; 69 struct device *dev; 70 bool is_mpuio; 71 bool dbck_flag; 72 bool loses_context; 73 bool context_valid; 74 int stride; 75 u32 width; 76 int context_loss_count; 77 int power_mode; 78 bool workaround_enabled; 79 80 void (*set_dataout)(struct gpio_bank *bank, unsigned gpio, int enable); 81 int (*get_context_loss_count)(struct device *dev); 82 83 struct omap_gpio_reg_offs *regs; 84 }; 85 86 #define GPIO_MOD_CTRL_BIT BIT(0) 87 88 #define BANK_USED(bank) (bank->mod_usage || bank->irq_usage) 89 #define LINE_USED(line, offset) (line & (BIT(offset))) 90 91 static void omap_gpio_unmask_irq(struct irq_data *d); 92 93 static inline struct gpio_bank *omap_irq_data_get_bank(struct irq_data *d) 94 { 95 struct gpio_chip *chip = irq_data_get_irq_chip_data(d); 96 return container_of(chip, struct gpio_bank, chip); 97 } 98 99 static void omap_set_gpio_direction(struct gpio_bank *bank, int gpio, 100 int is_input) 101 { 102 void __iomem *reg = bank->base; 103 u32 l; 104 105 reg += bank->regs->direction; 106 l = readl_relaxed(reg); 107 if (is_input) 108 l |= BIT(gpio); 109 else 110 l &= ~(BIT(gpio)); 111 writel_relaxed(l, reg); 112 bank->context.oe = l; 113 } 114 115 116 /* set data out value using dedicate set/clear register */ 117 static void omap_set_gpio_dataout_reg(struct gpio_bank *bank, unsigned offset, 118 int enable) 119 { 120 void __iomem *reg = bank->base; 121 u32 l = BIT(offset); 122 123 if (enable) { 124 reg += bank->regs->set_dataout; 125 bank->context.dataout |= l; 126 } else { 127 reg += bank->regs->clr_dataout; 128 bank->context.dataout &= ~l; 129 } 130 131 writel_relaxed(l, reg); 132 } 133 134 /* set data out value using mask register */ 135 static void omap_set_gpio_dataout_mask(struct gpio_bank *bank, unsigned offset, 136 int enable) 137 { 138 void __iomem *reg = bank->base + bank->regs->dataout; 139 u32 gpio_bit = BIT(offset); 140 u32 l; 141 142 l = readl_relaxed(reg); 143 if (enable) 144 l |= gpio_bit; 145 else 146 l &= ~gpio_bit; 147 writel_relaxed(l, reg); 148 bank->context.dataout = l; 149 } 150 151 static int omap_get_gpio_datain(struct gpio_bank *bank, int offset) 152 { 153 void __iomem *reg = bank->base + bank->regs->datain; 154 155 return (readl_relaxed(reg) & (BIT(offset))) != 0; 156 } 157 158 static int omap_get_gpio_dataout(struct gpio_bank *bank, int offset) 159 { 160 void __iomem *reg = bank->base + bank->regs->dataout; 161 162 return (readl_relaxed(reg) & (BIT(offset))) != 0; 163 } 164 165 static inline void omap_gpio_rmw(void __iomem *base, u32 reg, u32 mask, bool set) 166 { 167 int l = readl_relaxed(base + reg); 168 169 if (set) 170 l |= mask; 171 else 172 l &= ~mask; 173 174 writel_relaxed(l, base + reg); 175 } 176 177 static inline void omap_gpio_dbck_enable(struct gpio_bank *bank) 178 { 179 if (bank->dbck_enable_mask && !bank->dbck_enabled) { 180 clk_enable(bank->dbck); 181 bank->dbck_enabled = true; 182 183 writel_relaxed(bank->dbck_enable_mask, 184 bank->base + bank->regs->debounce_en); 185 } 186 } 187 188 static inline void omap_gpio_dbck_disable(struct gpio_bank *bank) 189 { 190 if (bank->dbck_enable_mask && bank->dbck_enabled) { 191 /* 192 * Disable debounce before cutting it's clock. If debounce is 193 * enabled but the clock is not, GPIO module seems to be unable 194 * to detect events and generate interrupts at least on OMAP3. 195 */ 196 writel_relaxed(0, bank->base + bank->regs->debounce_en); 197 198 clk_disable(bank->dbck); 199 bank->dbck_enabled = false; 200 } 201 } 202 203 /** 204 * omap2_set_gpio_debounce - low level gpio debounce time 205 * @bank: the gpio bank we're acting upon 206 * @offset: the gpio number on this @bank 207 * @debounce: debounce time to use 208 * 209 * OMAP's debounce time is in 31us steps 210 * <debounce time> = (GPIO_DEBOUNCINGTIME[7:0].DEBOUNCETIME + 1) x 31 211 * so we need to convert and round up to the closest unit. 212 */ 213 static void omap2_set_gpio_debounce(struct gpio_bank *bank, unsigned offset, 214 unsigned debounce) 215 { 216 void __iomem *reg; 217 u32 val; 218 u32 l; 219 bool enable = !!debounce; 220 221 if (!bank->dbck_flag) 222 return; 223 224 if (enable) { 225 debounce = DIV_ROUND_UP(debounce, 31) - 1; 226 debounce &= OMAP4_GPIO_DEBOUNCINGTIME_MASK; 227 } 228 229 l = BIT(offset); 230 231 clk_enable(bank->dbck); 232 reg = bank->base + bank->regs->debounce; 233 writel_relaxed(debounce, reg); 234 235 reg = bank->base + bank->regs->debounce_en; 236 val = readl_relaxed(reg); 237 238 if (enable) 239 val |= l; 240 else 241 val &= ~l; 242 bank->dbck_enable_mask = val; 243 244 writel_relaxed(val, reg); 245 clk_disable(bank->dbck); 246 /* 247 * Enable debounce clock per module. 248 * This call is mandatory because in omap_gpio_request() when 249 * *_runtime_get_sync() is called, _gpio_dbck_enable() within 250 * runtime callbck fails to turn on dbck because dbck_enable_mask 251 * used within _gpio_dbck_enable() is still not initialized at 252 * that point. Therefore we have to enable dbck here. 253 */ 254 omap_gpio_dbck_enable(bank); 255 if (bank->dbck_enable_mask) { 256 bank->context.debounce = debounce; 257 bank->context.debounce_en = val; 258 } 259 } 260 261 /** 262 * omap_clear_gpio_debounce - clear debounce settings for a gpio 263 * @bank: the gpio bank we're acting upon 264 * @offset: the gpio number on this @bank 265 * 266 * If a gpio is using debounce, then clear the debounce enable bit and if 267 * this is the only gpio in this bank using debounce, then clear the debounce 268 * time too. The debounce clock will also be disabled when calling this function 269 * if this is the only gpio in the bank using debounce. 270 */ 271 static void omap_clear_gpio_debounce(struct gpio_bank *bank, unsigned offset) 272 { 273 u32 gpio_bit = BIT(offset); 274 275 if (!bank->dbck_flag) 276 return; 277 278 if (!(bank->dbck_enable_mask & gpio_bit)) 279 return; 280 281 bank->dbck_enable_mask &= ~gpio_bit; 282 bank->context.debounce_en &= ~gpio_bit; 283 writel_relaxed(bank->context.debounce_en, 284 bank->base + bank->regs->debounce_en); 285 286 if (!bank->dbck_enable_mask) { 287 bank->context.debounce = 0; 288 writel_relaxed(bank->context.debounce, bank->base + 289 bank->regs->debounce); 290 clk_disable(bank->dbck); 291 bank->dbck_enabled = false; 292 } 293 } 294 295 static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio, 296 unsigned trigger) 297 { 298 void __iomem *base = bank->base; 299 u32 gpio_bit = BIT(gpio); 300 301 omap_gpio_rmw(base, bank->regs->leveldetect0, gpio_bit, 302 trigger & IRQ_TYPE_LEVEL_LOW); 303 omap_gpio_rmw(base, bank->regs->leveldetect1, gpio_bit, 304 trigger & IRQ_TYPE_LEVEL_HIGH); 305 omap_gpio_rmw(base, bank->regs->risingdetect, gpio_bit, 306 trigger & IRQ_TYPE_EDGE_RISING); 307 omap_gpio_rmw(base, bank->regs->fallingdetect, gpio_bit, 308 trigger & IRQ_TYPE_EDGE_FALLING); 309 310 bank->context.leveldetect0 = 311 readl_relaxed(bank->base + bank->regs->leveldetect0); 312 bank->context.leveldetect1 = 313 readl_relaxed(bank->base + bank->regs->leveldetect1); 314 bank->context.risingdetect = 315 readl_relaxed(bank->base + bank->regs->risingdetect); 316 bank->context.fallingdetect = 317 readl_relaxed(bank->base + bank->regs->fallingdetect); 318 319 if (likely(!(bank->non_wakeup_gpios & gpio_bit))) { 320 omap_gpio_rmw(base, bank->regs->wkup_en, gpio_bit, trigger != 0); 321 bank->context.wake_en = 322 readl_relaxed(bank->base + bank->regs->wkup_en); 323 } 324 325 /* This part needs to be executed always for OMAP{34xx, 44xx} */ 326 if (!bank->regs->irqctrl) { 327 /* On omap24xx proceed only when valid GPIO bit is set */ 328 if (bank->non_wakeup_gpios) { 329 if (!(bank->non_wakeup_gpios & gpio_bit)) 330 goto exit; 331 } 332 333 /* 334 * Log the edge gpio and manually trigger the IRQ 335 * after resume if the input level changes 336 * to avoid irq lost during PER RET/OFF mode 337 * Applies for omap2 non-wakeup gpio and all omap3 gpios 338 */ 339 if (trigger & IRQ_TYPE_EDGE_BOTH) 340 bank->enabled_non_wakeup_gpios |= gpio_bit; 341 else 342 bank->enabled_non_wakeup_gpios &= ~gpio_bit; 343 } 344 345 exit: 346 bank->level_mask = 347 readl_relaxed(bank->base + bank->regs->leveldetect0) | 348 readl_relaxed(bank->base + bank->regs->leveldetect1); 349 } 350 351 #ifdef CONFIG_ARCH_OMAP1 352 /* 353 * This only applies to chips that can't do both rising and falling edge 354 * detection at once. For all other chips, this function is a noop. 355 */ 356 static void omap_toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio) 357 { 358 void __iomem *reg = bank->base; 359 u32 l = 0; 360 361 if (!bank->regs->irqctrl) 362 return; 363 364 reg += bank->regs->irqctrl; 365 366 l = readl_relaxed(reg); 367 if ((l >> gpio) & 1) 368 l &= ~(BIT(gpio)); 369 else 370 l |= BIT(gpio); 371 372 writel_relaxed(l, reg); 373 } 374 #else 375 static void omap_toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio) {} 376 #endif 377 378 static int omap_set_gpio_triggering(struct gpio_bank *bank, int gpio, 379 unsigned trigger) 380 { 381 void __iomem *reg = bank->base; 382 void __iomem *base = bank->base; 383 u32 l = 0; 384 385 if (bank->regs->leveldetect0 && bank->regs->wkup_en) { 386 omap_set_gpio_trigger(bank, gpio, trigger); 387 } else if (bank->regs->irqctrl) { 388 reg += bank->regs->irqctrl; 389 390 l = readl_relaxed(reg); 391 if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) 392 bank->toggle_mask |= BIT(gpio); 393 if (trigger & IRQ_TYPE_EDGE_RISING) 394 l |= BIT(gpio); 395 else if (trigger & IRQ_TYPE_EDGE_FALLING) 396 l &= ~(BIT(gpio)); 397 else 398 return -EINVAL; 399 400 writel_relaxed(l, reg); 401 } else if (bank->regs->edgectrl1) { 402 if (gpio & 0x08) 403 reg += bank->regs->edgectrl2; 404 else 405 reg += bank->regs->edgectrl1; 406 407 gpio &= 0x07; 408 l = readl_relaxed(reg); 409 l &= ~(3 << (gpio << 1)); 410 if (trigger & IRQ_TYPE_EDGE_RISING) 411 l |= 2 << (gpio << 1); 412 if (trigger & IRQ_TYPE_EDGE_FALLING) 413 l |= BIT(gpio << 1); 414 415 /* Enable wake-up during idle for dynamic tick */ 416 omap_gpio_rmw(base, bank->regs->wkup_en, BIT(gpio), trigger); 417 bank->context.wake_en = 418 readl_relaxed(bank->base + bank->regs->wkup_en); 419 writel_relaxed(l, reg); 420 } 421 return 0; 422 } 423 424 static void omap_enable_gpio_module(struct gpio_bank *bank, unsigned offset) 425 { 426 if (bank->regs->pinctrl) { 427 void __iomem *reg = bank->base + bank->regs->pinctrl; 428 429 /* Claim the pin for MPU */ 430 writel_relaxed(readl_relaxed(reg) | (BIT(offset)), reg); 431 } 432 433 if (bank->regs->ctrl && !BANK_USED(bank)) { 434 void __iomem *reg = bank->base + bank->regs->ctrl; 435 u32 ctrl; 436 437 ctrl = readl_relaxed(reg); 438 /* Module is enabled, clocks are not gated */ 439 ctrl &= ~GPIO_MOD_CTRL_BIT; 440 writel_relaxed(ctrl, reg); 441 bank->context.ctrl = ctrl; 442 } 443 } 444 445 static void omap_disable_gpio_module(struct gpio_bank *bank, unsigned offset) 446 { 447 void __iomem *base = bank->base; 448 449 if (bank->regs->wkup_en && 450 !LINE_USED(bank->mod_usage, offset) && 451 !LINE_USED(bank->irq_usage, offset)) { 452 /* Disable wake-up during idle for dynamic tick */ 453 omap_gpio_rmw(base, bank->regs->wkup_en, BIT(offset), 0); 454 bank->context.wake_en = 455 readl_relaxed(bank->base + bank->regs->wkup_en); 456 } 457 458 if (bank->regs->ctrl && !BANK_USED(bank)) { 459 void __iomem *reg = bank->base + bank->regs->ctrl; 460 u32 ctrl; 461 462 ctrl = readl_relaxed(reg); 463 /* Module is disabled, clocks are gated */ 464 ctrl |= GPIO_MOD_CTRL_BIT; 465 writel_relaxed(ctrl, reg); 466 bank->context.ctrl = ctrl; 467 } 468 } 469 470 static int omap_gpio_is_input(struct gpio_bank *bank, unsigned offset) 471 { 472 void __iomem *reg = bank->base + bank->regs->direction; 473 474 return readl_relaxed(reg) & BIT(offset); 475 } 476 477 static void omap_gpio_init_irq(struct gpio_bank *bank, unsigned offset) 478 { 479 if (!LINE_USED(bank->mod_usage, offset)) { 480 omap_enable_gpio_module(bank, offset); 481 omap_set_gpio_direction(bank, offset, 1); 482 } 483 bank->irq_usage |= BIT(offset); 484 } 485 486 static int omap_gpio_irq_type(struct irq_data *d, unsigned type) 487 { 488 struct gpio_bank *bank = omap_irq_data_get_bank(d); 489 int retval; 490 unsigned long flags; 491 unsigned offset = d->hwirq; 492 493 if (type & ~IRQ_TYPE_SENSE_MASK) 494 return -EINVAL; 495 496 if (!bank->regs->leveldetect0 && 497 (type & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH))) 498 return -EINVAL; 499 500 raw_spin_lock_irqsave(&bank->lock, flags); 501 retval = omap_set_gpio_triggering(bank, offset, type); 502 if (retval) { 503 raw_spin_unlock_irqrestore(&bank->lock, flags); 504 goto error; 505 } 506 omap_gpio_init_irq(bank, offset); 507 if (!omap_gpio_is_input(bank, offset)) { 508 raw_spin_unlock_irqrestore(&bank->lock, flags); 509 retval = -EINVAL; 510 goto error; 511 } 512 raw_spin_unlock_irqrestore(&bank->lock, flags); 513 514 if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) 515 irq_set_handler_locked(d, handle_level_irq); 516 else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) 517 irq_set_handler_locked(d, handle_edge_irq); 518 519 return 0; 520 521 error: 522 return retval; 523 } 524 525 static void omap_clear_gpio_irqbank(struct gpio_bank *bank, int gpio_mask) 526 { 527 void __iomem *reg = bank->base; 528 529 reg += bank->regs->irqstatus; 530 writel_relaxed(gpio_mask, reg); 531 532 /* Workaround for clearing DSP GPIO interrupts to allow retention */ 533 if (bank->regs->irqstatus2) { 534 reg = bank->base + bank->regs->irqstatus2; 535 writel_relaxed(gpio_mask, reg); 536 } 537 538 /* Flush posted write for the irq status to avoid spurious interrupts */ 539 readl_relaxed(reg); 540 } 541 542 static inline void omap_clear_gpio_irqstatus(struct gpio_bank *bank, 543 unsigned offset) 544 { 545 omap_clear_gpio_irqbank(bank, BIT(offset)); 546 } 547 548 static u32 omap_get_gpio_irqbank_mask(struct gpio_bank *bank) 549 { 550 void __iomem *reg = bank->base; 551 u32 l; 552 u32 mask = (BIT(bank->width)) - 1; 553 554 reg += bank->regs->irqenable; 555 l = readl_relaxed(reg); 556 if (bank->regs->irqenable_inv) 557 l = ~l; 558 l &= mask; 559 return l; 560 } 561 562 static void omap_enable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask) 563 { 564 void __iomem *reg = bank->base; 565 u32 l; 566 567 if (bank->regs->set_irqenable) { 568 reg += bank->regs->set_irqenable; 569 l = gpio_mask; 570 bank->context.irqenable1 |= gpio_mask; 571 } else { 572 reg += bank->regs->irqenable; 573 l = readl_relaxed(reg); 574 if (bank->regs->irqenable_inv) 575 l &= ~gpio_mask; 576 else 577 l |= gpio_mask; 578 bank->context.irqenable1 = l; 579 } 580 581 writel_relaxed(l, reg); 582 } 583 584 static void omap_disable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask) 585 { 586 void __iomem *reg = bank->base; 587 u32 l; 588 589 if (bank->regs->clr_irqenable) { 590 reg += bank->regs->clr_irqenable; 591 l = gpio_mask; 592 bank->context.irqenable1 &= ~gpio_mask; 593 } else { 594 reg += bank->regs->irqenable; 595 l = readl_relaxed(reg); 596 if (bank->regs->irqenable_inv) 597 l |= gpio_mask; 598 else 599 l &= ~gpio_mask; 600 bank->context.irqenable1 = l; 601 } 602 603 writel_relaxed(l, reg); 604 } 605 606 static inline void omap_set_gpio_irqenable(struct gpio_bank *bank, 607 unsigned offset, int enable) 608 { 609 if (enable) 610 omap_enable_gpio_irqbank(bank, BIT(offset)); 611 else 612 omap_disable_gpio_irqbank(bank, BIT(offset)); 613 } 614 615 /* 616 * Note that ENAWAKEUP needs to be enabled in GPIO_SYSCONFIG register. 617 * 1510 does not seem to have a wake-up register. If JTAG is connected 618 * to the target, system will wake up always on GPIO events. While 619 * system is running all registered GPIO interrupts need to have wake-up 620 * enabled. When system is suspended, only selected GPIO interrupts need 621 * to have wake-up enabled. 622 */ 623 static int omap_set_gpio_wakeup(struct gpio_bank *bank, unsigned offset, 624 int enable) 625 { 626 u32 gpio_bit = BIT(offset); 627 unsigned long flags; 628 629 if (bank->non_wakeup_gpios & gpio_bit) { 630 dev_err(bank->dev, 631 "Unable to modify wakeup on non-wakeup GPIO%d\n", 632 offset); 633 return -EINVAL; 634 } 635 636 raw_spin_lock_irqsave(&bank->lock, flags); 637 if (enable) 638 bank->context.wake_en |= gpio_bit; 639 else 640 bank->context.wake_en &= ~gpio_bit; 641 642 writel_relaxed(bank->context.wake_en, bank->base + bank->regs->wkup_en); 643 raw_spin_unlock_irqrestore(&bank->lock, flags); 644 645 return 0; 646 } 647 648 /* Use disable_irq_wake() and enable_irq_wake() functions from drivers */ 649 static int omap_gpio_wake_enable(struct irq_data *d, unsigned int enable) 650 { 651 struct gpio_bank *bank = omap_irq_data_get_bank(d); 652 unsigned offset = d->hwirq; 653 int ret; 654 655 ret = omap_set_gpio_wakeup(bank, offset, enable); 656 if (!ret) 657 ret = irq_set_irq_wake(bank->irq, enable); 658 659 return ret; 660 } 661 662 static int omap_gpio_request(struct gpio_chip *chip, unsigned offset) 663 { 664 struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip); 665 unsigned long flags; 666 667 /* 668 * If this is the first gpio_request for the bank, 669 * enable the bank module. 670 */ 671 if (!BANK_USED(bank)) 672 pm_runtime_get_sync(bank->dev); 673 674 raw_spin_lock_irqsave(&bank->lock, flags); 675 omap_enable_gpio_module(bank, offset); 676 bank->mod_usage |= BIT(offset); 677 raw_spin_unlock_irqrestore(&bank->lock, flags); 678 679 return 0; 680 } 681 682 static void omap_gpio_free(struct gpio_chip *chip, unsigned offset) 683 { 684 struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip); 685 unsigned long flags; 686 687 raw_spin_lock_irqsave(&bank->lock, flags); 688 bank->mod_usage &= ~(BIT(offset)); 689 if (!LINE_USED(bank->irq_usage, offset)) { 690 omap_set_gpio_direction(bank, offset, 1); 691 omap_clear_gpio_debounce(bank, offset); 692 } 693 omap_disable_gpio_module(bank, offset); 694 raw_spin_unlock_irqrestore(&bank->lock, flags); 695 696 /* 697 * If this is the last gpio to be freed in the bank, 698 * disable the bank module. 699 */ 700 if (!BANK_USED(bank)) 701 pm_runtime_put(bank->dev); 702 } 703 704 /* 705 * We need to unmask the GPIO bank interrupt as soon as possible to 706 * avoid missing GPIO interrupts for other lines in the bank. 707 * Then we need to mask-read-clear-unmask the triggered GPIO lines 708 * in the bank to avoid missing nested interrupts for a GPIO line. 709 * If we wait to unmask individual GPIO lines in the bank after the 710 * line's interrupt handler has been run, we may miss some nested 711 * interrupts. 712 */ 713 static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank) 714 { 715 void __iomem *isr_reg = NULL; 716 u32 isr; 717 unsigned int bit; 718 struct gpio_bank *bank = gpiobank; 719 unsigned long wa_lock_flags; 720 unsigned long lock_flags; 721 722 isr_reg = bank->base + bank->regs->irqstatus; 723 if (WARN_ON(!isr_reg)) 724 goto exit; 725 726 pm_runtime_get_sync(bank->dev); 727 728 while (1) { 729 u32 isr_saved, level_mask = 0; 730 u32 enabled; 731 732 raw_spin_lock_irqsave(&bank->lock, lock_flags); 733 734 enabled = omap_get_gpio_irqbank_mask(bank); 735 isr_saved = isr = readl_relaxed(isr_reg) & enabled; 736 737 if (bank->level_mask) 738 level_mask = bank->level_mask & enabled; 739 740 /* clear edge sensitive interrupts before handler(s) are 741 called so that we don't miss any interrupt occurred while 742 executing them */ 743 omap_disable_gpio_irqbank(bank, isr_saved & ~level_mask); 744 omap_clear_gpio_irqbank(bank, isr_saved & ~level_mask); 745 omap_enable_gpio_irqbank(bank, isr_saved & ~level_mask); 746 747 raw_spin_unlock_irqrestore(&bank->lock, lock_flags); 748 749 if (!isr) 750 break; 751 752 while (isr) { 753 bit = __ffs(isr); 754 isr &= ~(BIT(bit)); 755 756 raw_spin_lock_irqsave(&bank->lock, lock_flags); 757 /* 758 * Some chips can't respond to both rising and falling 759 * at the same time. If this irq was requested with 760 * both flags, we need to flip the ICR data for the IRQ 761 * to respond to the IRQ for the opposite direction. 762 * This will be indicated in the bank toggle_mask. 763 */ 764 if (bank->toggle_mask & (BIT(bit))) 765 omap_toggle_gpio_edge_triggering(bank, bit); 766 767 raw_spin_unlock_irqrestore(&bank->lock, lock_flags); 768 769 raw_spin_lock_irqsave(&bank->wa_lock, wa_lock_flags); 770 771 generic_handle_irq(irq_find_mapping(bank->chip.irqdomain, 772 bit)); 773 774 raw_spin_unlock_irqrestore(&bank->wa_lock, 775 wa_lock_flags); 776 } 777 } 778 exit: 779 pm_runtime_put(bank->dev); 780 return IRQ_HANDLED; 781 } 782 783 static unsigned int omap_gpio_irq_startup(struct irq_data *d) 784 { 785 struct gpio_bank *bank = omap_irq_data_get_bank(d); 786 unsigned long flags; 787 unsigned offset = d->hwirq; 788 789 raw_spin_lock_irqsave(&bank->lock, flags); 790 791 if (!LINE_USED(bank->mod_usage, offset)) 792 omap_set_gpio_direction(bank, offset, 1); 793 else if (!omap_gpio_is_input(bank, offset)) 794 goto err; 795 omap_enable_gpio_module(bank, offset); 796 bank->irq_usage |= BIT(offset); 797 798 raw_spin_unlock_irqrestore(&bank->lock, flags); 799 omap_gpio_unmask_irq(d); 800 801 return 0; 802 err: 803 raw_spin_unlock_irqrestore(&bank->lock, flags); 804 return -EINVAL; 805 } 806 807 static void omap_gpio_irq_shutdown(struct irq_data *d) 808 { 809 struct gpio_bank *bank = omap_irq_data_get_bank(d); 810 unsigned long flags; 811 unsigned offset = d->hwirq; 812 813 raw_spin_lock_irqsave(&bank->lock, flags); 814 bank->irq_usage &= ~(BIT(offset)); 815 omap_set_gpio_irqenable(bank, offset, 0); 816 omap_clear_gpio_irqstatus(bank, offset); 817 omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE); 818 if (!LINE_USED(bank->mod_usage, offset)) 819 omap_clear_gpio_debounce(bank, offset); 820 omap_disable_gpio_module(bank, offset); 821 raw_spin_unlock_irqrestore(&bank->lock, flags); 822 } 823 824 static void omap_gpio_irq_bus_lock(struct irq_data *data) 825 { 826 struct gpio_bank *bank = omap_irq_data_get_bank(data); 827 828 if (!BANK_USED(bank)) 829 pm_runtime_get_sync(bank->dev); 830 } 831 832 static void gpio_irq_bus_sync_unlock(struct irq_data *data) 833 { 834 struct gpio_bank *bank = omap_irq_data_get_bank(data); 835 836 /* 837 * If this is the last IRQ to be freed in the bank, 838 * disable the bank module. 839 */ 840 if (!BANK_USED(bank)) 841 pm_runtime_put(bank->dev); 842 } 843 844 static void omap_gpio_ack_irq(struct irq_data *d) 845 { 846 struct gpio_bank *bank = omap_irq_data_get_bank(d); 847 unsigned offset = d->hwirq; 848 849 omap_clear_gpio_irqstatus(bank, offset); 850 } 851 852 static void omap_gpio_mask_irq(struct irq_data *d) 853 { 854 struct gpio_bank *bank = omap_irq_data_get_bank(d); 855 unsigned offset = d->hwirq; 856 unsigned long flags; 857 858 raw_spin_lock_irqsave(&bank->lock, flags); 859 omap_set_gpio_irqenable(bank, offset, 0); 860 omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE); 861 raw_spin_unlock_irqrestore(&bank->lock, flags); 862 } 863 864 static void omap_gpio_unmask_irq(struct irq_data *d) 865 { 866 struct gpio_bank *bank = omap_irq_data_get_bank(d); 867 unsigned offset = d->hwirq; 868 u32 trigger = irqd_get_trigger_type(d); 869 unsigned long flags; 870 871 raw_spin_lock_irqsave(&bank->lock, flags); 872 if (trigger) 873 omap_set_gpio_triggering(bank, offset, trigger); 874 875 /* For level-triggered GPIOs, the clearing must be done after 876 * the HW source is cleared, thus after the handler has run */ 877 if (bank->level_mask & BIT(offset)) { 878 omap_set_gpio_irqenable(bank, offset, 0); 879 omap_clear_gpio_irqstatus(bank, offset); 880 } 881 882 omap_set_gpio_irqenable(bank, offset, 1); 883 raw_spin_unlock_irqrestore(&bank->lock, flags); 884 } 885 886 /*---------------------------------------------------------------------*/ 887 888 static int omap_mpuio_suspend_noirq(struct device *dev) 889 { 890 struct platform_device *pdev = to_platform_device(dev); 891 struct gpio_bank *bank = platform_get_drvdata(pdev); 892 void __iomem *mask_reg = bank->base + 893 OMAP_MPUIO_GPIO_MASKIT / bank->stride; 894 unsigned long flags; 895 896 raw_spin_lock_irqsave(&bank->lock, flags); 897 writel_relaxed(0xffff & ~bank->context.wake_en, mask_reg); 898 raw_spin_unlock_irqrestore(&bank->lock, flags); 899 900 return 0; 901 } 902 903 static int omap_mpuio_resume_noirq(struct device *dev) 904 { 905 struct platform_device *pdev = to_platform_device(dev); 906 struct gpio_bank *bank = platform_get_drvdata(pdev); 907 void __iomem *mask_reg = bank->base + 908 OMAP_MPUIO_GPIO_MASKIT / bank->stride; 909 unsigned long flags; 910 911 raw_spin_lock_irqsave(&bank->lock, flags); 912 writel_relaxed(bank->context.wake_en, mask_reg); 913 raw_spin_unlock_irqrestore(&bank->lock, flags); 914 915 return 0; 916 } 917 918 static const struct dev_pm_ops omap_mpuio_dev_pm_ops = { 919 .suspend_noirq = omap_mpuio_suspend_noirq, 920 .resume_noirq = omap_mpuio_resume_noirq, 921 }; 922 923 /* use platform_driver for this. */ 924 static struct platform_driver omap_mpuio_driver = { 925 .driver = { 926 .name = "mpuio", 927 .pm = &omap_mpuio_dev_pm_ops, 928 }, 929 }; 930 931 static struct platform_device omap_mpuio_device = { 932 .name = "mpuio", 933 .id = -1, 934 .dev = { 935 .driver = &omap_mpuio_driver.driver, 936 } 937 /* could list the /proc/iomem resources */ 938 }; 939 940 static inline void omap_mpuio_init(struct gpio_bank *bank) 941 { 942 platform_set_drvdata(&omap_mpuio_device, bank); 943 944 if (platform_driver_register(&omap_mpuio_driver) == 0) 945 (void) platform_device_register(&omap_mpuio_device); 946 } 947 948 /*---------------------------------------------------------------------*/ 949 950 static int omap_gpio_get_direction(struct gpio_chip *chip, unsigned offset) 951 { 952 struct gpio_bank *bank; 953 unsigned long flags; 954 void __iomem *reg; 955 int dir; 956 957 bank = container_of(chip, struct gpio_bank, chip); 958 reg = bank->base + bank->regs->direction; 959 raw_spin_lock_irqsave(&bank->lock, flags); 960 dir = !!(readl_relaxed(reg) & BIT(offset)); 961 raw_spin_unlock_irqrestore(&bank->lock, flags); 962 return dir; 963 } 964 965 static int omap_gpio_input(struct gpio_chip *chip, unsigned offset) 966 { 967 struct gpio_bank *bank; 968 unsigned long flags; 969 970 bank = container_of(chip, struct gpio_bank, chip); 971 raw_spin_lock_irqsave(&bank->lock, flags); 972 omap_set_gpio_direction(bank, offset, 1); 973 raw_spin_unlock_irqrestore(&bank->lock, flags); 974 return 0; 975 } 976 977 static int omap_gpio_get(struct gpio_chip *chip, unsigned offset) 978 { 979 struct gpio_bank *bank; 980 981 bank = container_of(chip, struct gpio_bank, chip); 982 983 if (omap_gpio_is_input(bank, offset)) 984 return omap_get_gpio_datain(bank, offset); 985 else 986 return omap_get_gpio_dataout(bank, offset); 987 } 988 989 static int omap_gpio_output(struct gpio_chip *chip, unsigned offset, int value) 990 { 991 struct gpio_bank *bank; 992 unsigned long flags; 993 994 bank = container_of(chip, struct gpio_bank, chip); 995 raw_spin_lock_irqsave(&bank->lock, flags); 996 bank->set_dataout(bank, offset, value); 997 omap_set_gpio_direction(bank, offset, 0); 998 raw_spin_unlock_irqrestore(&bank->lock, flags); 999 return 0; 1000 } 1001 1002 static int omap_gpio_debounce(struct gpio_chip *chip, unsigned offset, 1003 unsigned debounce) 1004 { 1005 struct gpio_bank *bank; 1006 unsigned long flags; 1007 1008 bank = container_of(chip, struct gpio_bank, chip); 1009 1010 raw_spin_lock_irqsave(&bank->lock, flags); 1011 omap2_set_gpio_debounce(bank, offset, debounce); 1012 raw_spin_unlock_irqrestore(&bank->lock, flags); 1013 1014 return 0; 1015 } 1016 1017 static void omap_gpio_set(struct gpio_chip *chip, unsigned offset, int value) 1018 { 1019 struct gpio_bank *bank; 1020 unsigned long flags; 1021 1022 bank = container_of(chip, struct gpio_bank, chip); 1023 raw_spin_lock_irqsave(&bank->lock, flags); 1024 bank->set_dataout(bank, offset, value); 1025 raw_spin_unlock_irqrestore(&bank->lock, flags); 1026 } 1027 1028 /*---------------------------------------------------------------------*/ 1029 1030 static void __init omap_gpio_show_rev(struct gpio_bank *bank) 1031 { 1032 static bool called; 1033 u32 rev; 1034 1035 if (called || bank->regs->revision == USHRT_MAX) 1036 return; 1037 1038 rev = readw_relaxed(bank->base + bank->regs->revision); 1039 pr_info("OMAP GPIO hardware version %d.%d\n", 1040 (rev >> 4) & 0x0f, rev & 0x0f); 1041 1042 called = true; 1043 } 1044 1045 static void omap_gpio_mod_init(struct gpio_bank *bank) 1046 { 1047 void __iomem *base = bank->base; 1048 u32 l = 0xffffffff; 1049 1050 if (bank->width == 16) 1051 l = 0xffff; 1052 1053 if (bank->is_mpuio) { 1054 writel_relaxed(l, bank->base + bank->regs->irqenable); 1055 return; 1056 } 1057 1058 omap_gpio_rmw(base, bank->regs->irqenable, l, 1059 bank->regs->irqenable_inv); 1060 omap_gpio_rmw(base, bank->regs->irqstatus, l, 1061 !bank->regs->irqenable_inv); 1062 if (bank->regs->debounce_en) 1063 writel_relaxed(0, base + bank->regs->debounce_en); 1064 1065 /* Save OE default value (0xffffffff) in the context */ 1066 bank->context.oe = readl_relaxed(bank->base + bank->regs->direction); 1067 /* Initialize interface clk ungated, module enabled */ 1068 if (bank->regs->ctrl) 1069 writel_relaxed(0, base + bank->regs->ctrl); 1070 } 1071 1072 static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc) 1073 { 1074 static int gpio; 1075 int irq_base = 0; 1076 int ret; 1077 1078 /* 1079 * REVISIT eventually switch from OMAP-specific gpio structs 1080 * over to the generic ones 1081 */ 1082 bank->chip.request = omap_gpio_request; 1083 bank->chip.free = omap_gpio_free; 1084 bank->chip.get_direction = omap_gpio_get_direction; 1085 bank->chip.direction_input = omap_gpio_input; 1086 bank->chip.get = omap_gpio_get; 1087 bank->chip.direction_output = omap_gpio_output; 1088 bank->chip.set_debounce = omap_gpio_debounce; 1089 bank->chip.set = omap_gpio_set; 1090 if (bank->is_mpuio) { 1091 bank->chip.label = "mpuio"; 1092 if (bank->regs->wkup_en) 1093 bank->chip.dev = &omap_mpuio_device.dev; 1094 bank->chip.base = OMAP_MPUIO(0); 1095 } else { 1096 bank->chip.label = "gpio"; 1097 bank->chip.base = gpio; 1098 } 1099 bank->chip.ngpio = bank->width; 1100 1101 ret = gpiochip_add(&bank->chip); 1102 if (ret) { 1103 dev_err(bank->dev, "Could not register gpio chip %d\n", ret); 1104 return ret; 1105 } 1106 1107 if (!bank->is_mpuio) 1108 gpio += bank->width; 1109 1110 #ifdef CONFIG_ARCH_OMAP1 1111 /* 1112 * REVISIT: Once we have OMAP1 supporting SPARSE_IRQ, we can drop 1113 * irq_alloc_descs() since a base IRQ offset will no longer be needed. 1114 */ 1115 irq_base = irq_alloc_descs(-1, 0, bank->width, 0); 1116 if (irq_base < 0) { 1117 dev_err(bank->dev, "Couldn't allocate IRQ numbers\n"); 1118 return -ENODEV; 1119 } 1120 #endif 1121 1122 /* MPUIO is a bit different, reading IRQ status clears it */ 1123 if (bank->is_mpuio) { 1124 irqc->irq_ack = dummy_irq_chip.irq_ack; 1125 irqc->irq_mask = irq_gc_mask_set_bit; 1126 irqc->irq_unmask = irq_gc_mask_clr_bit; 1127 if (!bank->regs->wkup_en) 1128 irqc->irq_set_wake = NULL; 1129 } 1130 1131 ret = gpiochip_irqchip_add(&bank->chip, irqc, 1132 irq_base, handle_bad_irq, 1133 IRQ_TYPE_NONE); 1134 1135 if (ret) { 1136 dev_err(bank->dev, "Couldn't add irqchip to gpiochip %d\n", ret); 1137 gpiochip_remove(&bank->chip); 1138 return -ENODEV; 1139 } 1140 1141 gpiochip_set_chained_irqchip(&bank->chip, irqc, bank->irq, NULL); 1142 1143 ret = devm_request_irq(bank->dev, bank->irq, omap_gpio_irq_handler, 1144 0, dev_name(bank->dev), bank); 1145 if (ret) 1146 gpiochip_remove(&bank->chip); 1147 1148 return ret; 1149 } 1150 1151 static const struct of_device_id omap_gpio_match[]; 1152 1153 static int omap_gpio_probe(struct platform_device *pdev) 1154 { 1155 struct device *dev = &pdev->dev; 1156 struct device_node *node = dev->of_node; 1157 const struct of_device_id *match; 1158 const struct omap_gpio_platform_data *pdata; 1159 struct resource *res; 1160 struct gpio_bank *bank; 1161 struct irq_chip *irqc; 1162 int ret; 1163 1164 match = of_match_device(of_match_ptr(omap_gpio_match), dev); 1165 1166 pdata = match ? match->data : dev_get_platdata(dev); 1167 if (!pdata) 1168 return -EINVAL; 1169 1170 bank = devm_kzalloc(dev, sizeof(struct gpio_bank), GFP_KERNEL); 1171 if (!bank) { 1172 dev_err(dev, "Memory alloc failed\n"); 1173 return -ENOMEM; 1174 } 1175 1176 irqc = devm_kzalloc(dev, sizeof(*irqc), GFP_KERNEL); 1177 if (!irqc) 1178 return -ENOMEM; 1179 1180 irqc->irq_startup = omap_gpio_irq_startup, 1181 irqc->irq_shutdown = omap_gpio_irq_shutdown, 1182 irqc->irq_ack = omap_gpio_ack_irq, 1183 irqc->irq_mask = omap_gpio_mask_irq, 1184 irqc->irq_unmask = omap_gpio_unmask_irq, 1185 irqc->irq_set_type = omap_gpio_irq_type, 1186 irqc->irq_set_wake = omap_gpio_wake_enable, 1187 irqc->irq_bus_lock = omap_gpio_irq_bus_lock, 1188 irqc->irq_bus_sync_unlock = gpio_irq_bus_sync_unlock, 1189 irqc->name = dev_name(&pdev->dev); 1190 1191 bank->irq = platform_get_irq(pdev, 0); 1192 if (bank->irq <= 0) { 1193 if (!bank->irq) 1194 bank->irq = -ENXIO; 1195 if (bank->irq != -EPROBE_DEFER) 1196 dev_err(dev, 1197 "can't get irq resource ret=%d\n", bank->irq); 1198 return bank->irq; 1199 } 1200 1201 bank->dev = dev; 1202 bank->chip.dev = dev; 1203 bank->chip.owner = THIS_MODULE; 1204 bank->dbck_flag = pdata->dbck_flag; 1205 bank->stride = pdata->bank_stride; 1206 bank->width = pdata->bank_width; 1207 bank->is_mpuio = pdata->is_mpuio; 1208 bank->non_wakeup_gpios = pdata->non_wakeup_gpios; 1209 bank->regs = pdata->regs; 1210 #ifdef CONFIG_OF_GPIO 1211 bank->chip.of_node = of_node_get(node); 1212 #endif 1213 if (node) { 1214 if (!of_property_read_bool(node, "ti,gpio-always-on")) 1215 bank->loses_context = true; 1216 } else { 1217 bank->loses_context = pdata->loses_context; 1218 1219 if (bank->loses_context) 1220 bank->get_context_loss_count = 1221 pdata->get_context_loss_count; 1222 } 1223 1224 if (bank->regs->set_dataout && bank->regs->clr_dataout) 1225 bank->set_dataout = omap_set_gpio_dataout_reg; 1226 else 1227 bank->set_dataout = omap_set_gpio_dataout_mask; 1228 1229 raw_spin_lock_init(&bank->lock); 1230 raw_spin_lock_init(&bank->wa_lock); 1231 1232 /* Static mapping, never released */ 1233 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1234 bank->base = devm_ioremap_resource(dev, res); 1235 if (IS_ERR(bank->base)) { 1236 return PTR_ERR(bank->base); 1237 } 1238 1239 if (bank->dbck_flag) { 1240 bank->dbck = devm_clk_get(bank->dev, "dbclk"); 1241 if (IS_ERR(bank->dbck)) { 1242 dev_err(bank->dev, 1243 "Could not get gpio dbck. Disable debounce\n"); 1244 bank->dbck_flag = false; 1245 } else { 1246 clk_prepare(bank->dbck); 1247 } 1248 } 1249 1250 platform_set_drvdata(pdev, bank); 1251 1252 pm_runtime_enable(bank->dev); 1253 pm_runtime_irq_safe(bank->dev); 1254 pm_runtime_get_sync(bank->dev); 1255 1256 if (bank->is_mpuio) 1257 omap_mpuio_init(bank); 1258 1259 omap_gpio_mod_init(bank); 1260 1261 ret = omap_gpio_chip_init(bank, irqc); 1262 if (ret) { 1263 pm_runtime_put_sync(bank->dev); 1264 pm_runtime_disable(bank->dev); 1265 return ret; 1266 } 1267 1268 omap_gpio_show_rev(bank); 1269 1270 pm_runtime_put(bank->dev); 1271 1272 list_add_tail(&bank->node, &omap_gpio_list); 1273 1274 return 0; 1275 } 1276 1277 static int omap_gpio_remove(struct platform_device *pdev) 1278 { 1279 struct gpio_bank *bank = platform_get_drvdata(pdev); 1280 1281 list_del(&bank->node); 1282 gpiochip_remove(&bank->chip); 1283 pm_runtime_disable(bank->dev); 1284 if (bank->dbck_flag) 1285 clk_unprepare(bank->dbck); 1286 1287 return 0; 1288 } 1289 1290 #ifdef CONFIG_ARCH_OMAP2PLUS 1291 1292 #if defined(CONFIG_PM) 1293 static void omap_gpio_restore_context(struct gpio_bank *bank); 1294 1295 static int omap_gpio_runtime_suspend(struct device *dev) 1296 { 1297 struct platform_device *pdev = to_platform_device(dev); 1298 struct gpio_bank *bank = platform_get_drvdata(pdev); 1299 u32 l1 = 0, l2 = 0; 1300 unsigned long flags; 1301 u32 wake_low, wake_hi; 1302 1303 raw_spin_lock_irqsave(&bank->lock, flags); 1304 1305 /* 1306 * Only edges can generate a wakeup event to the PRCM. 1307 * 1308 * Therefore, ensure any wake-up capable GPIOs have 1309 * edge-detection enabled before going idle to ensure a wakeup 1310 * to the PRCM is generated on a GPIO transition. (c.f. 34xx 1311 * NDA TRM 25.5.3.1) 1312 * 1313 * The normal values will be restored upon ->runtime_resume() 1314 * by writing back the values saved in bank->context. 1315 */ 1316 wake_low = bank->context.leveldetect0 & bank->context.wake_en; 1317 if (wake_low) 1318 writel_relaxed(wake_low | bank->context.fallingdetect, 1319 bank->base + bank->regs->fallingdetect); 1320 wake_hi = bank->context.leveldetect1 & bank->context.wake_en; 1321 if (wake_hi) 1322 writel_relaxed(wake_hi | bank->context.risingdetect, 1323 bank->base + bank->regs->risingdetect); 1324 1325 if (!bank->enabled_non_wakeup_gpios) 1326 goto update_gpio_context_count; 1327 1328 if (bank->power_mode != OFF_MODE) { 1329 bank->power_mode = 0; 1330 goto update_gpio_context_count; 1331 } 1332 /* 1333 * If going to OFF, remove triggering for all 1334 * non-wakeup GPIOs. Otherwise spurious IRQs will be 1335 * generated. See OMAP2420 Errata item 1.101. 1336 */ 1337 bank->saved_datain = readl_relaxed(bank->base + 1338 bank->regs->datain); 1339 l1 = bank->context.fallingdetect; 1340 l2 = bank->context.risingdetect; 1341 1342 l1 &= ~bank->enabled_non_wakeup_gpios; 1343 l2 &= ~bank->enabled_non_wakeup_gpios; 1344 1345 writel_relaxed(l1, bank->base + bank->regs->fallingdetect); 1346 writel_relaxed(l2, bank->base + bank->regs->risingdetect); 1347 1348 bank->workaround_enabled = true; 1349 1350 update_gpio_context_count: 1351 if (bank->get_context_loss_count) 1352 bank->context_loss_count = 1353 bank->get_context_loss_count(bank->dev); 1354 1355 omap_gpio_dbck_disable(bank); 1356 raw_spin_unlock_irqrestore(&bank->lock, flags); 1357 1358 return 0; 1359 } 1360 1361 static void omap_gpio_init_context(struct gpio_bank *p); 1362 1363 static int omap_gpio_runtime_resume(struct device *dev) 1364 { 1365 struct platform_device *pdev = to_platform_device(dev); 1366 struct gpio_bank *bank = platform_get_drvdata(pdev); 1367 u32 l = 0, gen, gen0, gen1; 1368 unsigned long flags; 1369 int c; 1370 1371 raw_spin_lock_irqsave(&bank->lock, flags); 1372 1373 /* 1374 * On the first resume during the probe, the context has not 1375 * been initialised and so initialise it now. Also initialise 1376 * the context loss count. 1377 */ 1378 if (bank->loses_context && !bank->context_valid) { 1379 omap_gpio_init_context(bank); 1380 1381 if (bank->get_context_loss_count) 1382 bank->context_loss_count = 1383 bank->get_context_loss_count(bank->dev); 1384 } 1385 1386 omap_gpio_dbck_enable(bank); 1387 1388 /* 1389 * In ->runtime_suspend(), level-triggered, wakeup-enabled 1390 * GPIOs were set to edge trigger also in order to be able to 1391 * generate a PRCM wakeup. Here we restore the 1392 * pre-runtime_suspend() values for edge triggering. 1393 */ 1394 writel_relaxed(bank->context.fallingdetect, 1395 bank->base + bank->regs->fallingdetect); 1396 writel_relaxed(bank->context.risingdetect, 1397 bank->base + bank->regs->risingdetect); 1398 1399 if (bank->loses_context) { 1400 if (!bank->get_context_loss_count) { 1401 omap_gpio_restore_context(bank); 1402 } else { 1403 c = bank->get_context_loss_count(bank->dev); 1404 if (c != bank->context_loss_count) { 1405 omap_gpio_restore_context(bank); 1406 } else { 1407 raw_spin_unlock_irqrestore(&bank->lock, flags); 1408 return 0; 1409 } 1410 } 1411 } 1412 1413 if (!bank->workaround_enabled) { 1414 raw_spin_unlock_irqrestore(&bank->lock, flags); 1415 return 0; 1416 } 1417 1418 l = readl_relaxed(bank->base + bank->regs->datain); 1419 1420 /* 1421 * Check if any of the non-wakeup interrupt GPIOs have changed 1422 * state. If so, generate an IRQ by software. This is 1423 * horribly racy, but it's the best we can do to work around 1424 * this silicon bug. 1425 */ 1426 l ^= bank->saved_datain; 1427 l &= bank->enabled_non_wakeup_gpios; 1428 1429 /* 1430 * No need to generate IRQs for the rising edge for gpio IRQs 1431 * configured with falling edge only; and vice versa. 1432 */ 1433 gen0 = l & bank->context.fallingdetect; 1434 gen0 &= bank->saved_datain; 1435 1436 gen1 = l & bank->context.risingdetect; 1437 gen1 &= ~(bank->saved_datain); 1438 1439 /* FIXME: Consider GPIO IRQs with level detections properly! */ 1440 gen = l & (~(bank->context.fallingdetect) & 1441 ~(bank->context.risingdetect)); 1442 /* Consider all GPIO IRQs needed to be updated */ 1443 gen |= gen0 | gen1; 1444 1445 if (gen) { 1446 u32 old0, old1; 1447 1448 old0 = readl_relaxed(bank->base + bank->regs->leveldetect0); 1449 old1 = readl_relaxed(bank->base + bank->regs->leveldetect1); 1450 1451 if (!bank->regs->irqstatus_raw0) { 1452 writel_relaxed(old0 | gen, bank->base + 1453 bank->regs->leveldetect0); 1454 writel_relaxed(old1 | gen, bank->base + 1455 bank->regs->leveldetect1); 1456 } 1457 1458 if (bank->regs->irqstatus_raw0) { 1459 writel_relaxed(old0 | l, bank->base + 1460 bank->regs->leveldetect0); 1461 writel_relaxed(old1 | l, bank->base + 1462 bank->regs->leveldetect1); 1463 } 1464 writel_relaxed(old0, bank->base + bank->regs->leveldetect0); 1465 writel_relaxed(old1, bank->base + bank->regs->leveldetect1); 1466 } 1467 1468 bank->workaround_enabled = false; 1469 raw_spin_unlock_irqrestore(&bank->lock, flags); 1470 1471 return 0; 1472 } 1473 #endif /* CONFIG_PM */ 1474 1475 #if IS_BUILTIN(CONFIG_GPIO_OMAP) 1476 void omap2_gpio_prepare_for_idle(int pwr_mode) 1477 { 1478 struct gpio_bank *bank; 1479 1480 list_for_each_entry(bank, &omap_gpio_list, node) { 1481 if (!BANK_USED(bank) || !bank->loses_context) 1482 continue; 1483 1484 bank->power_mode = pwr_mode; 1485 1486 pm_runtime_put_sync_suspend(bank->dev); 1487 } 1488 } 1489 1490 void omap2_gpio_resume_after_idle(void) 1491 { 1492 struct gpio_bank *bank; 1493 1494 list_for_each_entry(bank, &omap_gpio_list, node) { 1495 if (!BANK_USED(bank) || !bank->loses_context) 1496 continue; 1497 1498 pm_runtime_get_sync(bank->dev); 1499 } 1500 } 1501 #endif 1502 1503 #if defined(CONFIG_PM) 1504 static void omap_gpio_init_context(struct gpio_bank *p) 1505 { 1506 struct omap_gpio_reg_offs *regs = p->regs; 1507 void __iomem *base = p->base; 1508 1509 p->context.ctrl = readl_relaxed(base + regs->ctrl); 1510 p->context.oe = readl_relaxed(base + regs->direction); 1511 p->context.wake_en = readl_relaxed(base + regs->wkup_en); 1512 p->context.leveldetect0 = readl_relaxed(base + regs->leveldetect0); 1513 p->context.leveldetect1 = readl_relaxed(base + regs->leveldetect1); 1514 p->context.risingdetect = readl_relaxed(base + regs->risingdetect); 1515 p->context.fallingdetect = readl_relaxed(base + regs->fallingdetect); 1516 p->context.irqenable1 = readl_relaxed(base + regs->irqenable); 1517 p->context.irqenable2 = readl_relaxed(base + regs->irqenable2); 1518 1519 if (regs->set_dataout && p->regs->clr_dataout) 1520 p->context.dataout = readl_relaxed(base + regs->set_dataout); 1521 else 1522 p->context.dataout = readl_relaxed(base + regs->dataout); 1523 1524 p->context_valid = true; 1525 } 1526 1527 static void omap_gpio_restore_context(struct gpio_bank *bank) 1528 { 1529 writel_relaxed(bank->context.wake_en, 1530 bank->base + bank->regs->wkup_en); 1531 writel_relaxed(bank->context.ctrl, bank->base + bank->regs->ctrl); 1532 writel_relaxed(bank->context.leveldetect0, 1533 bank->base + bank->regs->leveldetect0); 1534 writel_relaxed(bank->context.leveldetect1, 1535 bank->base + bank->regs->leveldetect1); 1536 writel_relaxed(bank->context.risingdetect, 1537 bank->base + bank->regs->risingdetect); 1538 writel_relaxed(bank->context.fallingdetect, 1539 bank->base + bank->regs->fallingdetect); 1540 if (bank->regs->set_dataout && bank->regs->clr_dataout) 1541 writel_relaxed(bank->context.dataout, 1542 bank->base + bank->regs->set_dataout); 1543 else 1544 writel_relaxed(bank->context.dataout, 1545 bank->base + bank->regs->dataout); 1546 writel_relaxed(bank->context.oe, bank->base + bank->regs->direction); 1547 1548 if (bank->dbck_enable_mask) { 1549 writel_relaxed(bank->context.debounce, bank->base + 1550 bank->regs->debounce); 1551 writel_relaxed(bank->context.debounce_en, 1552 bank->base + bank->regs->debounce_en); 1553 } 1554 1555 writel_relaxed(bank->context.irqenable1, 1556 bank->base + bank->regs->irqenable); 1557 writel_relaxed(bank->context.irqenable2, 1558 bank->base + bank->regs->irqenable2); 1559 } 1560 #endif /* CONFIG_PM */ 1561 #else 1562 #define omap_gpio_runtime_suspend NULL 1563 #define omap_gpio_runtime_resume NULL 1564 static inline void omap_gpio_init_context(struct gpio_bank *p) {} 1565 #endif 1566 1567 static const struct dev_pm_ops gpio_pm_ops = { 1568 SET_RUNTIME_PM_OPS(omap_gpio_runtime_suspend, omap_gpio_runtime_resume, 1569 NULL) 1570 }; 1571 1572 #if defined(CONFIG_OF) 1573 static struct omap_gpio_reg_offs omap2_gpio_regs = { 1574 .revision = OMAP24XX_GPIO_REVISION, 1575 .direction = OMAP24XX_GPIO_OE, 1576 .datain = OMAP24XX_GPIO_DATAIN, 1577 .dataout = OMAP24XX_GPIO_DATAOUT, 1578 .set_dataout = OMAP24XX_GPIO_SETDATAOUT, 1579 .clr_dataout = OMAP24XX_GPIO_CLEARDATAOUT, 1580 .irqstatus = OMAP24XX_GPIO_IRQSTATUS1, 1581 .irqstatus2 = OMAP24XX_GPIO_IRQSTATUS2, 1582 .irqenable = OMAP24XX_GPIO_IRQENABLE1, 1583 .irqenable2 = OMAP24XX_GPIO_IRQENABLE2, 1584 .set_irqenable = OMAP24XX_GPIO_SETIRQENABLE1, 1585 .clr_irqenable = OMAP24XX_GPIO_CLEARIRQENABLE1, 1586 .debounce = OMAP24XX_GPIO_DEBOUNCE_VAL, 1587 .debounce_en = OMAP24XX_GPIO_DEBOUNCE_EN, 1588 .ctrl = OMAP24XX_GPIO_CTRL, 1589 .wkup_en = OMAP24XX_GPIO_WAKE_EN, 1590 .leveldetect0 = OMAP24XX_GPIO_LEVELDETECT0, 1591 .leveldetect1 = OMAP24XX_GPIO_LEVELDETECT1, 1592 .risingdetect = OMAP24XX_GPIO_RISINGDETECT, 1593 .fallingdetect = OMAP24XX_GPIO_FALLINGDETECT, 1594 }; 1595 1596 static struct omap_gpio_reg_offs omap4_gpio_regs = { 1597 .revision = OMAP4_GPIO_REVISION, 1598 .direction = OMAP4_GPIO_OE, 1599 .datain = OMAP4_GPIO_DATAIN, 1600 .dataout = OMAP4_GPIO_DATAOUT, 1601 .set_dataout = OMAP4_GPIO_SETDATAOUT, 1602 .clr_dataout = OMAP4_GPIO_CLEARDATAOUT, 1603 .irqstatus = OMAP4_GPIO_IRQSTATUS0, 1604 .irqstatus2 = OMAP4_GPIO_IRQSTATUS1, 1605 .irqenable = OMAP4_GPIO_IRQSTATUSSET0, 1606 .irqenable2 = OMAP4_GPIO_IRQSTATUSSET1, 1607 .set_irqenable = OMAP4_GPIO_IRQSTATUSSET0, 1608 .clr_irqenable = OMAP4_GPIO_IRQSTATUSCLR0, 1609 .debounce = OMAP4_GPIO_DEBOUNCINGTIME, 1610 .debounce_en = OMAP4_GPIO_DEBOUNCENABLE, 1611 .ctrl = OMAP4_GPIO_CTRL, 1612 .wkup_en = OMAP4_GPIO_IRQWAKEN0, 1613 .leveldetect0 = OMAP4_GPIO_LEVELDETECT0, 1614 .leveldetect1 = OMAP4_GPIO_LEVELDETECT1, 1615 .risingdetect = OMAP4_GPIO_RISINGDETECT, 1616 .fallingdetect = OMAP4_GPIO_FALLINGDETECT, 1617 }; 1618 1619 static const struct omap_gpio_platform_data omap2_pdata = { 1620 .regs = &omap2_gpio_regs, 1621 .bank_width = 32, 1622 .dbck_flag = false, 1623 }; 1624 1625 static const struct omap_gpio_platform_data omap3_pdata = { 1626 .regs = &omap2_gpio_regs, 1627 .bank_width = 32, 1628 .dbck_flag = true, 1629 }; 1630 1631 static const struct omap_gpio_platform_data omap4_pdata = { 1632 .regs = &omap4_gpio_regs, 1633 .bank_width = 32, 1634 .dbck_flag = true, 1635 }; 1636 1637 static const struct of_device_id omap_gpio_match[] = { 1638 { 1639 .compatible = "ti,omap4-gpio", 1640 .data = &omap4_pdata, 1641 }, 1642 { 1643 .compatible = "ti,omap3-gpio", 1644 .data = &omap3_pdata, 1645 }, 1646 { 1647 .compatible = "ti,omap2-gpio", 1648 .data = &omap2_pdata, 1649 }, 1650 { }, 1651 }; 1652 MODULE_DEVICE_TABLE(of, omap_gpio_match); 1653 #endif 1654 1655 static struct platform_driver omap_gpio_driver = { 1656 .probe = omap_gpio_probe, 1657 .remove = omap_gpio_remove, 1658 .driver = { 1659 .name = "omap_gpio", 1660 .pm = &gpio_pm_ops, 1661 .of_match_table = of_match_ptr(omap_gpio_match), 1662 }, 1663 }; 1664 1665 /* 1666 * gpio driver register needs to be done before 1667 * machine_init functions access gpio APIs. 1668 * Hence omap_gpio_drv_reg() is a postcore_initcall. 1669 */ 1670 static int __init omap_gpio_drv_reg(void) 1671 { 1672 return platform_driver_register(&omap_gpio_driver); 1673 } 1674 postcore_initcall(omap_gpio_drv_reg); 1675 1676 static void __exit omap_gpio_exit(void) 1677 { 1678 platform_driver_unregister(&omap_gpio_driver); 1679 } 1680 module_exit(omap_gpio_exit); 1681 1682 MODULE_DESCRIPTION("omap gpio driver"); 1683 MODULE_ALIAS("platform:gpio-omap"); 1684 MODULE_LICENSE("GPL v2"); 1685