1 /* 2 * Support functions for OMAP GPIO 3 * 4 * Copyright (C) 2003-2005 Nokia Corporation 5 * Written by Juha Yrjölä <juha.yrjola@nokia.com> 6 * 7 * Copyright (C) 2009 Texas Instruments 8 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com> 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License version 2 as 12 * published by the Free Software Foundation. 13 */ 14 15 #include <linux/init.h> 16 #include <linux/module.h> 17 #include <linux/interrupt.h> 18 #include <linux/syscore_ops.h> 19 #include <linux/err.h> 20 #include <linux/clk.h> 21 #include <linux/io.h> 22 #include <linux/cpu_pm.h> 23 #include <linux/device.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/pm.h> 26 #include <linux/of.h> 27 #include <linux/of_device.h> 28 #include <linux/gpio/driver.h> 29 #include <linux/bitops.h> 30 #include <linux/platform_data/gpio-omap.h> 31 32 #define OMAP4_GPIO_DEBOUNCINGTIME_MASK 0xFF 33 34 struct gpio_regs { 35 u32 irqenable1; 36 u32 irqenable2; 37 u32 wake_en; 38 u32 ctrl; 39 u32 oe; 40 u32 leveldetect0; 41 u32 leveldetect1; 42 u32 risingdetect; 43 u32 fallingdetect; 44 u32 dataout; 45 u32 debounce; 46 u32 debounce_en; 47 }; 48 49 struct gpio_bank { 50 struct list_head node; 51 void __iomem *base; 52 int irq; 53 u32 non_wakeup_gpios; 54 u32 enabled_non_wakeup_gpios; 55 struct gpio_regs context; 56 u32 saved_datain; 57 u32 level_mask; 58 u32 toggle_mask; 59 raw_spinlock_t lock; 60 raw_spinlock_t wa_lock; 61 struct gpio_chip chip; 62 struct clk *dbck; 63 struct notifier_block nb; 64 unsigned int is_suspended:1; 65 u32 mod_usage; 66 u32 irq_usage; 67 u32 dbck_enable_mask; 68 bool dbck_enabled; 69 bool is_mpuio; 70 bool dbck_flag; 71 bool loses_context; 72 bool context_valid; 73 int stride; 74 u32 width; 75 int context_loss_count; 76 77 void (*set_dataout)(struct gpio_bank *bank, unsigned gpio, int enable); 78 void (*set_dataout_multiple)(struct gpio_bank *bank, 79 unsigned long *mask, unsigned long *bits); 80 int (*get_context_loss_count)(struct device *dev); 81 82 struct omap_gpio_reg_offs *regs; 83 }; 84 85 #define GPIO_MOD_CTRL_BIT BIT(0) 86 87 #define BANK_USED(bank) (bank->mod_usage || bank->irq_usage) 88 #define LINE_USED(line, offset) (line & (BIT(offset))) 89 90 static void omap_gpio_unmask_irq(struct irq_data *d); 91 92 static inline struct gpio_bank *omap_irq_data_get_bank(struct irq_data *d) 93 { 94 struct gpio_chip *chip = irq_data_get_irq_chip_data(d); 95 return gpiochip_get_data(chip); 96 } 97 98 static void omap_set_gpio_direction(struct gpio_bank *bank, int gpio, 99 int is_input) 100 { 101 void __iomem *reg = bank->base; 102 u32 l; 103 104 reg += bank->regs->direction; 105 l = readl_relaxed(reg); 106 if (is_input) 107 l |= BIT(gpio); 108 else 109 l &= ~(BIT(gpio)); 110 writel_relaxed(l, reg); 111 bank->context.oe = l; 112 } 113 114 115 /* set data out value using dedicate set/clear register */ 116 static void omap_set_gpio_dataout_reg(struct gpio_bank *bank, unsigned offset, 117 int enable) 118 { 119 void __iomem *reg = bank->base; 120 u32 l = BIT(offset); 121 122 if (enable) { 123 reg += bank->regs->set_dataout; 124 bank->context.dataout |= l; 125 } else { 126 reg += bank->regs->clr_dataout; 127 bank->context.dataout &= ~l; 128 } 129 130 writel_relaxed(l, reg); 131 } 132 133 /* set data out value using mask register */ 134 static void omap_set_gpio_dataout_mask(struct gpio_bank *bank, unsigned offset, 135 int enable) 136 { 137 void __iomem *reg = bank->base + bank->regs->dataout; 138 u32 gpio_bit = BIT(offset); 139 u32 l; 140 141 l = readl_relaxed(reg); 142 if (enable) 143 l |= gpio_bit; 144 else 145 l &= ~gpio_bit; 146 writel_relaxed(l, reg); 147 bank->context.dataout = l; 148 } 149 150 static int omap_get_gpio_datain(struct gpio_bank *bank, int offset) 151 { 152 void __iomem *reg = bank->base + bank->regs->datain; 153 154 return (readl_relaxed(reg) & (BIT(offset))) != 0; 155 } 156 157 static int omap_get_gpio_dataout(struct gpio_bank *bank, int offset) 158 { 159 void __iomem *reg = bank->base + bank->regs->dataout; 160 161 return (readl_relaxed(reg) & (BIT(offset))) != 0; 162 } 163 164 /* set multiple data out values using dedicate set/clear register */ 165 static void omap_set_gpio_dataout_reg_multiple(struct gpio_bank *bank, 166 unsigned long *mask, 167 unsigned long *bits) 168 { 169 void __iomem *reg = bank->base; 170 u32 l; 171 172 l = *bits & *mask; 173 writel_relaxed(l, reg + bank->regs->set_dataout); 174 bank->context.dataout |= l; 175 176 l = ~*bits & *mask; 177 writel_relaxed(l, reg + bank->regs->clr_dataout); 178 bank->context.dataout &= ~l; 179 } 180 181 /* set multiple data out values using mask register */ 182 static void omap_set_gpio_dataout_mask_multiple(struct gpio_bank *bank, 183 unsigned long *mask, 184 unsigned long *bits) 185 { 186 void __iomem *reg = bank->base + bank->regs->dataout; 187 u32 l = (readl_relaxed(reg) & ~*mask) | (*bits & *mask); 188 189 writel_relaxed(l, reg); 190 bank->context.dataout = l; 191 } 192 193 static unsigned long omap_get_gpio_datain_multiple(struct gpio_bank *bank, 194 unsigned long *mask) 195 { 196 void __iomem *reg = bank->base + bank->regs->datain; 197 198 return readl_relaxed(reg) & *mask; 199 } 200 201 static unsigned long omap_get_gpio_dataout_multiple(struct gpio_bank *bank, 202 unsigned long *mask) 203 { 204 void __iomem *reg = bank->base + bank->regs->dataout; 205 206 return readl_relaxed(reg) & *mask; 207 } 208 209 static inline void omap_gpio_rmw(void __iomem *base, u32 reg, u32 mask, bool set) 210 { 211 int l = readl_relaxed(base + reg); 212 213 if (set) 214 l |= mask; 215 else 216 l &= ~mask; 217 218 writel_relaxed(l, base + reg); 219 } 220 221 static inline void omap_gpio_dbck_enable(struct gpio_bank *bank) 222 { 223 if (bank->dbck_enable_mask && !bank->dbck_enabled) { 224 clk_enable(bank->dbck); 225 bank->dbck_enabled = true; 226 227 writel_relaxed(bank->dbck_enable_mask, 228 bank->base + bank->regs->debounce_en); 229 } 230 } 231 232 static inline void omap_gpio_dbck_disable(struct gpio_bank *bank) 233 { 234 if (bank->dbck_enable_mask && bank->dbck_enabled) { 235 /* 236 * Disable debounce before cutting it's clock. If debounce is 237 * enabled but the clock is not, GPIO module seems to be unable 238 * to detect events and generate interrupts at least on OMAP3. 239 */ 240 writel_relaxed(0, bank->base + bank->regs->debounce_en); 241 242 clk_disable(bank->dbck); 243 bank->dbck_enabled = false; 244 } 245 } 246 247 /** 248 * omap2_set_gpio_debounce - low level gpio debounce time 249 * @bank: the gpio bank we're acting upon 250 * @offset: the gpio number on this @bank 251 * @debounce: debounce time to use 252 * 253 * OMAP's debounce time is in 31us steps 254 * <debounce time> = (GPIO_DEBOUNCINGTIME[7:0].DEBOUNCETIME + 1) x 31 255 * so we need to convert and round up to the closest unit. 256 * 257 * Return: 0 on success, negative error otherwise. 258 */ 259 static int omap2_set_gpio_debounce(struct gpio_bank *bank, unsigned offset, 260 unsigned debounce) 261 { 262 void __iomem *reg; 263 u32 val; 264 u32 l; 265 bool enable = !!debounce; 266 267 if (!bank->dbck_flag) 268 return -ENOTSUPP; 269 270 if (enable) { 271 debounce = DIV_ROUND_UP(debounce, 31) - 1; 272 if ((debounce & OMAP4_GPIO_DEBOUNCINGTIME_MASK) != debounce) 273 return -EINVAL; 274 } 275 276 l = BIT(offset); 277 278 clk_enable(bank->dbck); 279 reg = bank->base + bank->regs->debounce; 280 writel_relaxed(debounce, reg); 281 282 reg = bank->base + bank->regs->debounce_en; 283 val = readl_relaxed(reg); 284 285 if (enable) 286 val |= l; 287 else 288 val &= ~l; 289 bank->dbck_enable_mask = val; 290 291 writel_relaxed(val, reg); 292 clk_disable(bank->dbck); 293 /* 294 * Enable debounce clock per module. 295 * This call is mandatory because in omap_gpio_request() when 296 * *_runtime_get_sync() is called, _gpio_dbck_enable() within 297 * runtime callbck fails to turn on dbck because dbck_enable_mask 298 * used within _gpio_dbck_enable() is still not initialized at 299 * that point. Therefore we have to enable dbck here. 300 */ 301 omap_gpio_dbck_enable(bank); 302 if (bank->dbck_enable_mask) { 303 bank->context.debounce = debounce; 304 bank->context.debounce_en = val; 305 } 306 307 return 0; 308 } 309 310 /** 311 * omap_clear_gpio_debounce - clear debounce settings for a gpio 312 * @bank: the gpio bank we're acting upon 313 * @offset: the gpio number on this @bank 314 * 315 * If a gpio is using debounce, then clear the debounce enable bit and if 316 * this is the only gpio in this bank using debounce, then clear the debounce 317 * time too. The debounce clock will also be disabled when calling this function 318 * if this is the only gpio in the bank using debounce. 319 */ 320 static void omap_clear_gpio_debounce(struct gpio_bank *bank, unsigned offset) 321 { 322 u32 gpio_bit = BIT(offset); 323 324 if (!bank->dbck_flag) 325 return; 326 327 if (!(bank->dbck_enable_mask & gpio_bit)) 328 return; 329 330 bank->dbck_enable_mask &= ~gpio_bit; 331 bank->context.debounce_en &= ~gpio_bit; 332 writel_relaxed(bank->context.debounce_en, 333 bank->base + bank->regs->debounce_en); 334 335 if (!bank->dbck_enable_mask) { 336 bank->context.debounce = 0; 337 writel_relaxed(bank->context.debounce, bank->base + 338 bank->regs->debounce); 339 clk_disable(bank->dbck); 340 bank->dbck_enabled = false; 341 } 342 } 343 344 /* 345 * Off mode wake-up capable GPIOs in bank(s) that are in the wakeup domain. 346 * See TRM section for GPIO for "Wake-Up Generation" for the list of GPIOs 347 * in wakeup domain. If bank->non_wakeup_gpios is not configured, assume none 348 * are capable waking up the system from off mode. 349 */ 350 static bool omap_gpio_is_off_wakeup_capable(struct gpio_bank *bank, u32 gpio_mask) 351 { 352 u32 no_wake = bank->non_wakeup_gpios; 353 354 if (no_wake) 355 return !!(~no_wake & gpio_mask); 356 357 return false; 358 } 359 360 static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio, 361 unsigned trigger) 362 { 363 void __iomem *base = bank->base; 364 u32 gpio_bit = BIT(gpio); 365 366 omap_gpio_rmw(base, bank->regs->leveldetect0, gpio_bit, 367 trigger & IRQ_TYPE_LEVEL_LOW); 368 omap_gpio_rmw(base, bank->regs->leveldetect1, gpio_bit, 369 trigger & IRQ_TYPE_LEVEL_HIGH); 370 371 /* 372 * We need the edge detection enabled for to allow the GPIO block 373 * to be woken from idle state. Set the appropriate edge detection 374 * in addition to the level detection. 375 */ 376 omap_gpio_rmw(base, bank->regs->risingdetect, gpio_bit, 377 trigger & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_LEVEL_HIGH)); 378 omap_gpio_rmw(base, bank->regs->fallingdetect, gpio_bit, 379 trigger & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW)); 380 381 bank->context.leveldetect0 = 382 readl_relaxed(bank->base + bank->regs->leveldetect0); 383 bank->context.leveldetect1 = 384 readl_relaxed(bank->base + bank->regs->leveldetect1); 385 bank->context.risingdetect = 386 readl_relaxed(bank->base + bank->regs->risingdetect); 387 bank->context.fallingdetect = 388 readl_relaxed(bank->base + bank->regs->fallingdetect); 389 390 if (likely(!(bank->non_wakeup_gpios & gpio_bit))) { 391 omap_gpio_rmw(base, bank->regs->wkup_en, gpio_bit, trigger != 0); 392 bank->context.wake_en = 393 readl_relaxed(bank->base + bank->regs->wkup_en); 394 } 395 396 /* This part needs to be executed always for OMAP{34xx, 44xx} */ 397 if (!bank->regs->irqctrl && !omap_gpio_is_off_wakeup_capable(bank, gpio)) { 398 /* 399 * Log the edge gpio and manually trigger the IRQ 400 * after resume if the input level changes 401 * to avoid irq lost during PER RET/OFF mode 402 * Applies for omap2 non-wakeup gpio and all omap3 gpios 403 */ 404 if (trigger & IRQ_TYPE_EDGE_BOTH) 405 bank->enabled_non_wakeup_gpios |= gpio_bit; 406 else 407 bank->enabled_non_wakeup_gpios &= ~gpio_bit; 408 } 409 410 bank->level_mask = 411 readl_relaxed(bank->base + bank->regs->leveldetect0) | 412 readl_relaxed(bank->base + bank->regs->leveldetect1); 413 } 414 415 #ifdef CONFIG_ARCH_OMAP1 416 /* 417 * This only applies to chips that can't do both rising and falling edge 418 * detection at once. For all other chips, this function is a noop. 419 */ 420 static void omap_toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio) 421 { 422 void __iomem *reg = bank->base; 423 u32 l = 0; 424 425 if (!bank->regs->irqctrl) 426 return; 427 428 reg += bank->regs->irqctrl; 429 430 l = readl_relaxed(reg); 431 if ((l >> gpio) & 1) 432 l &= ~(BIT(gpio)); 433 else 434 l |= BIT(gpio); 435 436 writel_relaxed(l, reg); 437 } 438 #else 439 static void omap_toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio) {} 440 #endif 441 442 static int omap_set_gpio_triggering(struct gpio_bank *bank, int gpio, 443 unsigned trigger) 444 { 445 void __iomem *reg = bank->base; 446 void __iomem *base = bank->base; 447 u32 l = 0; 448 449 if (bank->regs->leveldetect0 && bank->regs->wkup_en) { 450 omap_set_gpio_trigger(bank, gpio, trigger); 451 } else if (bank->regs->irqctrl) { 452 reg += bank->regs->irqctrl; 453 454 l = readl_relaxed(reg); 455 if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) 456 bank->toggle_mask |= BIT(gpio); 457 if (trigger & IRQ_TYPE_EDGE_RISING) 458 l |= BIT(gpio); 459 else if (trigger & IRQ_TYPE_EDGE_FALLING) 460 l &= ~(BIT(gpio)); 461 else 462 return -EINVAL; 463 464 writel_relaxed(l, reg); 465 } else if (bank->regs->edgectrl1) { 466 if (gpio & 0x08) 467 reg += bank->regs->edgectrl2; 468 else 469 reg += bank->regs->edgectrl1; 470 471 gpio &= 0x07; 472 l = readl_relaxed(reg); 473 l &= ~(3 << (gpio << 1)); 474 if (trigger & IRQ_TYPE_EDGE_RISING) 475 l |= 2 << (gpio << 1); 476 if (trigger & IRQ_TYPE_EDGE_FALLING) 477 l |= BIT(gpio << 1); 478 479 /* Enable wake-up during idle for dynamic tick */ 480 omap_gpio_rmw(base, bank->regs->wkup_en, BIT(gpio), trigger); 481 bank->context.wake_en = 482 readl_relaxed(bank->base + bank->regs->wkup_en); 483 writel_relaxed(l, reg); 484 } 485 return 0; 486 } 487 488 static void omap_enable_gpio_module(struct gpio_bank *bank, unsigned offset) 489 { 490 if (bank->regs->pinctrl) { 491 void __iomem *reg = bank->base + bank->regs->pinctrl; 492 493 /* Claim the pin for MPU */ 494 writel_relaxed(readl_relaxed(reg) | (BIT(offset)), reg); 495 } 496 497 if (bank->regs->ctrl && !BANK_USED(bank)) { 498 void __iomem *reg = bank->base + bank->regs->ctrl; 499 u32 ctrl; 500 501 ctrl = readl_relaxed(reg); 502 /* Module is enabled, clocks are not gated */ 503 ctrl &= ~GPIO_MOD_CTRL_BIT; 504 writel_relaxed(ctrl, reg); 505 bank->context.ctrl = ctrl; 506 } 507 } 508 509 static void omap_disable_gpio_module(struct gpio_bank *bank, unsigned offset) 510 { 511 void __iomem *base = bank->base; 512 513 if (bank->regs->wkup_en && 514 !LINE_USED(bank->mod_usage, offset) && 515 !LINE_USED(bank->irq_usage, offset)) { 516 /* Disable wake-up during idle for dynamic tick */ 517 omap_gpio_rmw(base, bank->regs->wkup_en, BIT(offset), 0); 518 bank->context.wake_en = 519 readl_relaxed(bank->base + bank->regs->wkup_en); 520 } 521 522 if (bank->regs->ctrl && !BANK_USED(bank)) { 523 void __iomem *reg = bank->base + bank->regs->ctrl; 524 u32 ctrl; 525 526 ctrl = readl_relaxed(reg); 527 /* Module is disabled, clocks are gated */ 528 ctrl |= GPIO_MOD_CTRL_BIT; 529 writel_relaxed(ctrl, reg); 530 bank->context.ctrl = ctrl; 531 } 532 } 533 534 static int omap_gpio_is_input(struct gpio_bank *bank, unsigned offset) 535 { 536 void __iomem *reg = bank->base + bank->regs->direction; 537 538 return readl_relaxed(reg) & BIT(offset); 539 } 540 541 static void omap_gpio_init_irq(struct gpio_bank *bank, unsigned offset) 542 { 543 if (!LINE_USED(bank->mod_usage, offset)) { 544 omap_enable_gpio_module(bank, offset); 545 omap_set_gpio_direction(bank, offset, 1); 546 } 547 bank->irq_usage |= BIT(offset); 548 } 549 550 static int omap_gpio_irq_type(struct irq_data *d, unsigned type) 551 { 552 struct gpio_bank *bank = omap_irq_data_get_bank(d); 553 int retval; 554 unsigned long flags; 555 unsigned offset = d->hwirq; 556 557 if (type & ~IRQ_TYPE_SENSE_MASK) 558 return -EINVAL; 559 560 if (!bank->regs->leveldetect0 && 561 (type & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH))) 562 return -EINVAL; 563 564 raw_spin_lock_irqsave(&bank->lock, flags); 565 retval = omap_set_gpio_triggering(bank, offset, type); 566 if (retval) { 567 raw_spin_unlock_irqrestore(&bank->lock, flags); 568 goto error; 569 } 570 omap_gpio_init_irq(bank, offset); 571 if (!omap_gpio_is_input(bank, offset)) { 572 raw_spin_unlock_irqrestore(&bank->lock, flags); 573 retval = -EINVAL; 574 goto error; 575 } 576 raw_spin_unlock_irqrestore(&bank->lock, flags); 577 578 if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) 579 irq_set_handler_locked(d, handle_level_irq); 580 else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) 581 /* 582 * Edge IRQs are already cleared/acked in irq_handler and 583 * not need to be masked, as result handle_edge_irq() 584 * logic is excessed here and may cause lose of interrupts. 585 * So just use handle_simple_irq. 586 */ 587 irq_set_handler_locked(d, handle_simple_irq); 588 589 return 0; 590 591 error: 592 return retval; 593 } 594 595 static void omap_clear_gpio_irqbank(struct gpio_bank *bank, int gpio_mask) 596 { 597 void __iomem *reg = bank->base; 598 599 reg += bank->regs->irqstatus; 600 writel_relaxed(gpio_mask, reg); 601 602 /* Workaround for clearing DSP GPIO interrupts to allow retention */ 603 if (bank->regs->irqstatus2) { 604 reg = bank->base + bank->regs->irqstatus2; 605 writel_relaxed(gpio_mask, reg); 606 } 607 608 /* Flush posted write for the irq status to avoid spurious interrupts */ 609 readl_relaxed(reg); 610 } 611 612 static inline void omap_clear_gpio_irqstatus(struct gpio_bank *bank, 613 unsigned offset) 614 { 615 omap_clear_gpio_irqbank(bank, BIT(offset)); 616 } 617 618 static u32 omap_get_gpio_irqbank_mask(struct gpio_bank *bank) 619 { 620 void __iomem *reg = bank->base; 621 u32 l; 622 u32 mask = (BIT(bank->width)) - 1; 623 624 reg += bank->regs->irqenable; 625 l = readl_relaxed(reg); 626 if (bank->regs->irqenable_inv) 627 l = ~l; 628 l &= mask; 629 return l; 630 } 631 632 static void omap_enable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask) 633 { 634 void __iomem *reg = bank->base; 635 u32 l; 636 637 if (bank->regs->set_irqenable) { 638 reg += bank->regs->set_irqenable; 639 l = gpio_mask; 640 bank->context.irqenable1 |= gpio_mask; 641 } else { 642 reg += bank->regs->irqenable; 643 l = readl_relaxed(reg); 644 if (bank->regs->irqenable_inv) 645 l &= ~gpio_mask; 646 else 647 l |= gpio_mask; 648 bank->context.irqenable1 = l; 649 } 650 651 writel_relaxed(l, reg); 652 } 653 654 static void omap_disable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask) 655 { 656 void __iomem *reg = bank->base; 657 u32 l; 658 659 if (bank->regs->clr_irqenable) { 660 reg += bank->regs->clr_irqenable; 661 l = gpio_mask; 662 bank->context.irqenable1 &= ~gpio_mask; 663 } else { 664 reg += bank->regs->irqenable; 665 l = readl_relaxed(reg); 666 if (bank->regs->irqenable_inv) 667 l |= gpio_mask; 668 else 669 l &= ~gpio_mask; 670 bank->context.irqenable1 = l; 671 } 672 673 writel_relaxed(l, reg); 674 } 675 676 static inline void omap_set_gpio_irqenable(struct gpio_bank *bank, 677 unsigned offset, int enable) 678 { 679 if (enable) 680 omap_enable_gpio_irqbank(bank, BIT(offset)); 681 else 682 omap_disable_gpio_irqbank(bank, BIT(offset)); 683 } 684 685 /* Use disable_irq_wake() and enable_irq_wake() functions from drivers */ 686 static int omap_gpio_wake_enable(struct irq_data *d, unsigned int enable) 687 { 688 struct gpio_bank *bank = omap_irq_data_get_bank(d); 689 690 return irq_set_irq_wake(bank->irq, enable); 691 } 692 693 static int omap_gpio_request(struct gpio_chip *chip, unsigned offset) 694 { 695 struct gpio_bank *bank = gpiochip_get_data(chip); 696 unsigned long flags; 697 698 pm_runtime_get_sync(chip->parent); 699 700 raw_spin_lock_irqsave(&bank->lock, flags); 701 omap_enable_gpio_module(bank, offset); 702 bank->mod_usage |= BIT(offset); 703 raw_spin_unlock_irqrestore(&bank->lock, flags); 704 705 return 0; 706 } 707 708 static void omap_gpio_free(struct gpio_chip *chip, unsigned offset) 709 { 710 struct gpio_bank *bank = gpiochip_get_data(chip); 711 unsigned long flags; 712 713 raw_spin_lock_irqsave(&bank->lock, flags); 714 bank->mod_usage &= ~(BIT(offset)); 715 if (!LINE_USED(bank->irq_usage, offset)) { 716 omap_set_gpio_direction(bank, offset, 1); 717 omap_clear_gpio_debounce(bank, offset); 718 } 719 omap_disable_gpio_module(bank, offset); 720 raw_spin_unlock_irqrestore(&bank->lock, flags); 721 722 pm_runtime_put(chip->parent); 723 } 724 725 /* 726 * We need to unmask the GPIO bank interrupt as soon as possible to 727 * avoid missing GPIO interrupts for other lines in the bank. 728 * Then we need to mask-read-clear-unmask the triggered GPIO lines 729 * in the bank to avoid missing nested interrupts for a GPIO line. 730 * If we wait to unmask individual GPIO lines in the bank after the 731 * line's interrupt handler has been run, we may miss some nested 732 * interrupts. 733 */ 734 static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank) 735 { 736 void __iomem *isr_reg = NULL; 737 u32 enabled, isr, level_mask; 738 unsigned int bit; 739 struct gpio_bank *bank = gpiobank; 740 unsigned long wa_lock_flags; 741 unsigned long lock_flags; 742 743 isr_reg = bank->base + bank->regs->irqstatus; 744 if (WARN_ON(!isr_reg)) 745 goto exit; 746 747 if (WARN_ONCE(!pm_runtime_active(bank->chip.parent), 748 "gpio irq%i while runtime suspended?\n", irq)) 749 return IRQ_NONE; 750 751 while (1) { 752 raw_spin_lock_irqsave(&bank->lock, lock_flags); 753 754 enabled = omap_get_gpio_irqbank_mask(bank); 755 isr = readl_relaxed(isr_reg) & enabled; 756 757 if (bank->level_mask) 758 level_mask = bank->level_mask & enabled; 759 else 760 level_mask = 0; 761 762 /* clear edge sensitive interrupts before handler(s) are 763 called so that we don't miss any interrupt occurred while 764 executing them */ 765 if (isr & ~level_mask) 766 omap_clear_gpio_irqbank(bank, isr & ~level_mask); 767 768 raw_spin_unlock_irqrestore(&bank->lock, lock_flags); 769 770 if (!isr) 771 break; 772 773 while (isr) { 774 bit = __ffs(isr); 775 isr &= ~(BIT(bit)); 776 777 raw_spin_lock_irqsave(&bank->lock, lock_flags); 778 /* 779 * Some chips can't respond to both rising and falling 780 * at the same time. If this irq was requested with 781 * both flags, we need to flip the ICR data for the IRQ 782 * to respond to the IRQ for the opposite direction. 783 * This will be indicated in the bank toggle_mask. 784 */ 785 if (bank->toggle_mask & (BIT(bit))) 786 omap_toggle_gpio_edge_triggering(bank, bit); 787 788 raw_spin_unlock_irqrestore(&bank->lock, lock_flags); 789 790 raw_spin_lock_irqsave(&bank->wa_lock, wa_lock_flags); 791 792 generic_handle_irq(irq_find_mapping(bank->chip.irq.domain, 793 bit)); 794 795 raw_spin_unlock_irqrestore(&bank->wa_lock, 796 wa_lock_flags); 797 } 798 } 799 exit: 800 return IRQ_HANDLED; 801 } 802 803 static unsigned int omap_gpio_irq_startup(struct irq_data *d) 804 { 805 struct gpio_bank *bank = omap_irq_data_get_bank(d); 806 unsigned long flags; 807 unsigned offset = d->hwirq; 808 809 raw_spin_lock_irqsave(&bank->lock, flags); 810 811 if (!LINE_USED(bank->mod_usage, offset)) 812 omap_set_gpio_direction(bank, offset, 1); 813 else if (!omap_gpio_is_input(bank, offset)) 814 goto err; 815 omap_enable_gpio_module(bank, offset); 816 bank->irq_usage |= BIT(offset); 817 818 raw_spin_unlock_irqrestore(&bank->lock, flags); 819 omap_gpio_unmask_irq(d); 820 821 return 0; 822 err: 823 raw_spin_unlock_irqrestore(&bank->lock, flags); 824 return -EINVAL; 825 } 826 827 static void omap_gpio_irq_shutdown(struct irq_data *d) 828 { 829 struct gpio_bank *bank = omap_irq_data_get_bank(d); 830 unsigned long flags; 831 unsigned offset = d->hwirq; 832 833 raw_spin_lock_irqsave(&bank->lock, flags); 834 bank->irq_usage &= ~(BIT(offset)); 835 omap_set_gpio_irqenable(bank, offset, 0); 836 omap_clear_gpio_irqstatus(bank, offset); 837 omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE); 838 if (!LINE_USED(bank->mod_usage, offset)) 839 omap_clear_gpio_debounce(bank, offset); 840 omap_disable_gpio_module(bank, offset); 841 raw_spin_unlock_irqrestore(&bank->lock, flags); 842 } 843 844 static void omap_gpio_irq_bus_lock(struct irq_data *data) 845 { 846 struct gpio_bank *bank = omap_irq_data_get_bank(data); 847 848 pm_runtime_get_sync(bank->chip.parent); 849 } 850 851 static void gpio_irq_bus_sync_unlock(struct irq_data *data) 852 { 853 struct gpio_bank *bank = omap_irq_data_get_bank(data); 854 855 pm_runtime_put(bank->chip.parent); 856 } 857 858 static void omap_gpio_ack_irq(struct irq_data *d) 859 { 860 struct gpio_bank *bank = omap_irq_data_get_bank(d); 861 unsigned offset = d->hwirq; 862 863 omap_clear_gpio_irqstatus(bank, offset); 864 } 865 866 static void omap_gpio_mask_irq(struct irq_data *d) 867 { 868 struct gpio_bank *bank = omap_irq_data_get_bank(d); 869 unsigned offset = d->hwirq; 870 unsigned long flags; 871 872 raw_spin_lock_irqsave(&bank->lock, flags); 873 omap_set_gpio_irqenable(bank, offset, 0); 874 omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE); 875 raw_spin_unlock_irqrestore(&bank->lock, flags); 876 } 877 878 static void omap_gpio_unmask_irq(struct irq_data *d) 879 { 880 struct gpio_bank *bank = omap_irq_data_get_bank(d); 881 unsigned offset = d->hwirq; 882 u32 trigger = irqd_get_trigger_type(d); 883 unsigned long flags; 884 885 raw_spin_lock_irqsave(&bank->lock, flags); 886 if (trigger) 887 omap_set_gpio_triggering(bank, offset, trigger); 888 889 omap_set_gpio_irqenable(bank, offset, 1); 890 891 /* 892 * For level-triggered GPIOs, clearing must be done after the source 893 * is cleared, thus after the handler has run. OMAP4 needs this done 894 * after enabing the interrupt to clear the wakeup status. 895 */ 896 if (bank->level_mask & BIT(offset)) 897 omap_clear_gpio_irqstatus(bank, offset); 898 899 raw_spin_unlock_irqrestore(&bank->lock, flags); 900 } 901 902 /*---------------------------------------------------------------------*/ 903 904 static int omap_mpuio_suspend_noirq(struct device *dev) 905 { 906 struct gpio_bank *bank = dev_get_drvdata(dev); 907 void __iomem *mask_reg = bank->base + 908 OMAP_MPUIO_GPIO_MASKIT / bank->stride; 909 unsigned long flags; 910 911 raw_spin_lock_irqsave(&bank->lock, flags); 912 writel_relaxed(0xffff & ~bank->context.wake_en, mask_reg); 913 raw_spin_unlock_irqrestore(&bank->lock, flags); 914 915 return 0; 916 } 917 918 static int omap_mpuio_resume_noirq(struct device *dev) 919 { 920 struct gpio_bank *bank = dev_get_drvdata(dev); 921 void __iomem *mask_reg = bank->base + 922 OMAP_MPUIO_GPIO_MASKIT / bank->stride; 923 unsigned long flags; 924 925 raw_spin_lock_irqsave(&bank->lock, flags); 926 writel_relaxed(bank->context.wake_en, mask_reg); 927 raw_spin_unlock_irqrestore(&bank->lock, flags); 928 929 return 0; 930 } 931 932 static const struct dev_pm_ops omap_mpuio_dev_pm_ops = { 933 .suspend_noirq = omap_mpuio_suspend_noirq, 934 .resume_noirq = omap_mpuio_resume_noirq, 935 }; 936 937 /* use platform_driver for this. */ 938 static struct platform_driver omap_mpuio_driver = { 939 .driver = { 940 .name = "mpuio", 941 .pm = &omap_mpuio_dev_pm_ops, 942 }, 943 }; 944 945 static struct platform_device omap_mpuio_device = { 946 .name = "mpuio", 947 .id = -1, 948 .dev = { 949 .driver = &omap_mpuio_driver.driver, 950 } 951 /* could list the /proc/iomem resources */ 952 }; 953 954 static inline void omap_mpuio_init(struct gpio_bank *bank) 955 { 956 platform_set_drvdata(&omap_mpuio_device, bank); 957 958 if (platform_driver_register(&omap_mpuio_driver) == 0) 959 (void) platform_device_register(&omap_mpuio_device); 960 } 961 962 /*---------------------------------------------------------------------*/ 963 964 static int omap_gpio_get_direction(struct gpio_chip *chip, unsigned offset) 965 { 966 struct gpio_bank *bank; 967 unsigned long flags; 968 void __iomem *reg; 969 int dir; 970 971 bank = gpiochip_get_data(chip); 972 reg = bank->base + bank->regs->direction; 973 raw_spin_lock_irqsave(&bank->lock, flags); 974 dir = !!(readl_relaxed(reg) & BIT(offset)); 975 raw_spin_unlock_irqrestore(&bank->lock, flags); 976 return dir; 977 } 978 979 static int omap_gpio_input(struct gpio_chip *chip, unsigned offset) 980 { 981 struct gpio_bank *bank; 982 unsigned long flags; 983 984 bank = gpiochip_get_data(chip); 985 raw_spin_lock_irqsave(&bank->lock, flags); 986 omap_set_gpio_direction(bank, offset, 1); 987 raw_spin_unlock_irqrestore(&bank->lock, flags); 988 return 0; 989 } 990 991 static int omap_gpio_get(struct gpio_chip *chip, unsigned offset) 992 { 993 struct gpio_bank *bank; 994 995 bank = gpiochip_get_data(chip); 996 997 if (omap_gpio_is_input(bank, offset)) 998 return omap_get_gpio_datain(bank, offset); 999 else 1000 return omap_get_gpio_dataout(bank, offset); 1001 } 1002 1003 static int omap_gpio_output(struct gpio_chip *chip, unsigned offset, int value) 1004 { 1005 struct gpio_bank *bank; 1006 unsigned long flags; 1007 1008 bank = gpiochip_get_data(chip); 1009 raw_spin_lock_irqsave(&bank->lock, flags); 1010 bank->set_dataout(bank, offset, value); 1011 omap_set_gpio_direction(bank, offset, 0); 1012 raw_spin_unlock_irqrestore(&bank->lock, flags); 1013 return 0; 1014 } 1015 1016 static int omap_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask, 1017 unsigned long *bits) 1018 { 1019 struct gpio_bank *bank = gpiochip_get_data(chip); 1020 void __iomem *reg = bank->base + bank->regs->direction; 1021 unsigned long in = readl_relaxed(reg), l; 1022 1023 *bits = 0; 1024 1025 l = in & *mask; 1026 if (l) 1027 *bits |= omap_get_gpio_datain_multiple(bank, &l); 1028 1029 l = ~in & *mask; 1030 if (l) 1031 *bits |= omap_get_gpio_dataout_multiple(bank, &l); 1032 1033 return 0; 1034 } 1035 1036 static int omap_gpio_debounce(struct gpio_chip *chip, unsigned offset, 1037 unsigned debounce) 1038 { 1039 struct gpio_bank *bank; 1040 unsigned long flags; 1041 int ret; 1042 1043 bank = gpiochip_get_data(chip); 1044 1045 raw_spin_lock_irqsave(&bank->lock, flags); 1046 ret = omap2_set_gpio_debounce(bank, offset, debounce); 1047 raw_spin_unlock_irqrestore(&bank->lock, flags); 1048 1049 if (ret) 1050 dev_info(chip->parent, 1051 "Could not set line %u debounce to %u microseconds (%d)", 1052 offset, debounce, ret); 1053 1054 return ret; 1055 } 1056 1057 static int omap_gpio_set_config(struct gpio_chip *chip, unsigned offset, 1058 unsigned long config) 1059 { 1060 u32 debounce; 1061 1062 if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE) 1063 return -ENOTSUPP; 1064 1065 debounce = pinconf_to_config_argument(config); 1066 return omap_gpio_debounce(chip, offset, debounce); 1067 } 1068 1069 static void omap_gpio_set(struct gpio_chip *chip, unsigned offset, int value) 1070 { 1071 struct gpio_bank *bank; 1072 unsigned long flags; 1073 1074 bank = gpiochip_get_data(chip); 1075 raw_spin_lock_irqsave(&bank->lock, flags); 1076 bank->set_dataout(bank, offset, value); 1077 raw_spin_unlock_irqrestore(&bank->lock, flags); 1078 } 1079 1080 static void omap_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mask, 1081 unsigned long *bits) 1082 { 1083 struct gpio_bank *bank = gpiochip_get_data(chip); 1084 unsigned long flags; 1085 1086 raw_spin_lock_irqsave(&bank->lock, flags); 1087 bank->set_dataout_multiple(bank, mask, bits); 1088 raw_spin_unlock_irqrestore(&bank->lock, flags); 1089 } 1090 1091 /*---------------------------------------------------------------------*/ 1092 1093 static void omap_gpio_show_rev(struct gpio_bank *bank) 1094 { 1095 static bool called; 1096 u32 rev; 1097 1098 if (called || bank->regs->revision == USHRT_MAX) 1099 return; 1100 1101 rev = readw_relaxed(bank->base + bank->regs->revision); 1102 pr_info("OMAP GPIO hardware version %d.%d\n", 1103 (rev >> 4) & 0x0f, rev & 0x0f); 1104 1105 called = true; 1106 } 1107 1108 static void omap_gpio_mod_init(struct gpio_bank *bank) 1109 { 1110 void __iomem *base = bank->base; 1111 u32 l = 0xffffffff; 1112 1113 if (bank->width == 16) 1114 l = 0xffff; 1115 1116 if (bank->is_mpuio) { 1117 writel_relaxed(l, bank->base + bank->regs->irqenable); 1118 return; 1119 } 1120 1121 omap_gpio_rmw(base, bank->regs->irqenable, l, 1122 bank->regs->irqenable_inv); 1123 omap_gpio_rmw(base, bank->regs->irqstatus, l, 1124 !bank->regs->irqenable_inv); 1125 if (bank->regs->debounce_en) 1126 writel_relaxed(0, base + bank->regs->debounce_en); 1127 1128 /* Save OE default value (0xffffffff) in the context */ 1129 bank->context.oe = readl_relaxed(bank->base + bank->regs->direction); 1130 /* Initialize interface clk ungated, module enabled */ 1131 if (bank->regs->ctrl) 1132 writel_relaxed(0, base + bank->regs->ctrl); 1133 } 1134 1135 static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc) 1136 { 1137 struct gpio_irq_chip *irq; 1138 static int gpio; 1139 const char *label; 1140 int irq_base = 0; 1141 int ret; 1142 1143 /* 1144 * REVISIT eventually switch from OMAP-specific gpio structs 1145 * over to the generic ones 1146 */ 1147 bank->chip.request = omap_gpio_request; 1148 bank->chip.free = omap_gpio_free; 1149 bank->chip.get_direction = omap_gpio_get_direction; 1150 bank->chip.direction_input = omap_gpio_input; 1151 bank->chip.get = omap_gpio_get; 1152 bank->chip.get_multiple = omap_gpio_get_multiple; 1153 bank->chip.direction_output = omap_gpio_output; 1154 bank->chip.set_config = omap_gpio_set_config; 1155 bank->chip.set = omap_gpio_set; 1156 bank->chip.set_multiple = omap_gpio_set_multiple; 1157 if (bank->is_mpuio) { 1158 bank->chip.label = "mpuio"; 1159 if (bank->regs->wkup_en) 1160 bank->chip.parent = &omap_mpuio_device.dev; 1161 bank->chip.base = OMAP_MPUIO(0); 1162 } else { 1163 label = devm_kasprintf(bank->chip.parent, GFP_KERNEL, "gpio-%d-%d", 1164 gpio, gpio + bank->width - 1); 1165 if (!label) 1166 return -ENOMEM; 1167 bank->chip.label = label; 1168 bank->chip.base = gpio; 1169 } 1170 bank->chip.ngpio = bank->width; 1171 1172 #ifdef CONFIG_ARCH_OMAP1 1173 /* 1174 * REVISIT: Once we have OMAP1 supporting SPARSE_IRQ, we can drop 1175 * irq_alloc_descs() since a base IRQ offset will no longer be needed. 1176 */ 1177 irq_base = devm_irq_alloc_descs(bank->chip.parent, 1178 -1, 0, bank->width, 0); 1179 if (irq_base < 0) { 1180 dev_err(bank->chip.parent, "Couldn't allocate IRQ numbers\n"); 1181 return -ENODEV; 1182 } 1183 #endif 1184 1185 /* MPUIO is a bit different, reading IRQ status clears it */ 1186 if (bank->is_mpuio) { 1187 irqc->irq_ack = dummy_irq_chip.irq_ack; 1188 if (!bank->regs->wkup_en) 1189 irqc->irq_set_wake = NULL; 1190 } 1191 1192 irq = &bank->chip.irq; 1193 irq->chip = irqc; 1194 irq->handler = handle_bad_irq; 1195 irq->default_type = IRQ_TYPE_NONE; 1196 irq->num_parents = 1; 1197 irq->parents = &bank->irq; 1198 irq->first = irq_base; 1199 1200 ret = gpiochip_add_data(&bank->chip, bank); 1201 if (ret) { 1202 dev_err(bank->chip.parent, 1203 "Could not register gpio chip %d\n", ret); 1204 return ret; 1205 } 1206 1207 ret = devm_request_irq(bank->chip.parent, bank->irq, 1208 omap_gpio_irq_handler, 1209 0, dev_name(bank->chip.parent), bank); 1210 if (ret) 1211 gpiochip_remove(&bank->chip); 1212 1213 if (!bank->is_mpuio) 1214 gpio += bank->width; 1215 1216 return ret; 1217 } 1218 1219 static void omap_gpio_init_context(struct gpio_bank *p) 1220 { 1221 struct omap_gpio_reg_offs *regs = p->regs; 1222 void __iomem *base = p->base; 1223 1224 p->context.ctrl = readl_relaxed(base + regs->ctrl); 1225 p->context.oe = readl_relaxed(base + regs->direction); 1226 p->context.wake_en = readl_relaxed(base + regs->wkup_en); 1227 p->context.leveldetect0 = readl_relaxed(base + regs->leveldetect0); 1228 p->context.leveldetect1 = readl_relaxed(base + regs->leveldetect1); 1229 p->context.risingdetect = readl_relaxed(base + regs->risingdetect); 1230 p->context.fallingdetect = readl_relaxed(base + regs->fallingdetect); 1231 p->context.irqenable1 = readl_relaxed(base + regs->irqenable); 1232 p->context.irqenable2 = readl_relaxed(base + regs->irqenable2); 1233 1234 if (regs->set_dataout && p->regs->clr_dataout) 1235 p->context.dataout = readl_relaxed(base + regs->set_dataout); 1236 else 1237 p->context.dataout = readl_relaxed(base + regs->dataout); 1238 1239 p->context_valid = true; 1240 } 1241 1242 static void omap_gpio_restore_context(struct gpio_bank *bank) 1243 { 1244 writel_relaxed(bank->context.wake_en, 1245 bank->base + bank->regs->wkup_en); 1246 writel_relaxed(bank->context.ctrl, bank->base + bank->regs->ctrl); 1247 writel_relaxed(bank->context.leveldetect0, 1248 bank->base + bank->regs->leveldetect0); 1249 writel_relaxed(bank->context.leveldetect1, 1250 bank->base + bank->regs->leveldetect1); 1251 writel_relaxed(bank->context.risingdetect, 1252 bank->base + bank->regs->risingdetect); 1253 writel_relaxed(bank->context.fallingdetect, 1254 bank->base + bank->regs->fallingdetect); 1255 if (bank->regs->set_dataout && bank->regs->clr_dataout) 1256 writel_relaxed(bank->context.dataout, 1257 bank->base + bank->regs->set_dataout); 1258 else 1259 writel_relaxed(bank->context.dataout, 1260 bank->base + bank->regs->dataout); 1261 writel_relaxed(bank->context.oe, bank->base + bank->regs->direction); 1262 1263 if (bank->dbck_enable_mask) { 1264 writel_relaxed(bank->context.debounce, bank->base + 1265 bank->regs->debounce); 1266 writel_relaxed(bank->context.debounce_en, 1267 bank->base + bank->regs->debounce_en); 1268 } 1269 1270 writel_relaxed(bank->context.irqenable1, 1271 bank->base + bank->regs->irqenable); 1272 writel_relaxed(bank->context.irqenable2, 1273 bank->base + bank->regs->irqenable2); 1274 } 1275 1276 static void omap_gpio_idle(struct gpio_bank *bank, bool may_lose_context) 1277 { 1278 struct device *dev = bank->chip.parent; 1279 void __iomem *base = bank->base; 1280 u32 nowake; 1281 1282 bank->saved_datain = readl_relaxed(base + bank->regs->datain); 1283 1284 if (!bank->enabled_non_wakeup_gpios) 1285 goto update_gpio_context_count; 1286 1287 if (!may_lose_context) 1288 goto update_gpio_context_count; 1289 1290 /* 1291 * If going to OFF, remove triggering for all wkup domain 1292 * non-wakeup GPIOs. Otherwise spurious IRQs will be 1293 * generated. See OMAP2420 Errata item 1.101. 1294 */ 1295 if (!bank->loses_context && bank->enabled_non_wakeup_gpios) { 1296 nowake = bank->enabled_non_wakeup_gpios; 1297 omap_gpio_rmw(base, bank->regs->fallingdetect, nowake, ~nowake); 1298 omap_gpio_rmw(base, bank->regs->risingdetect, nowake, ~nowake); 1299 } 1300 1301 update_gpio_context_count: 1302 if (bank->get_context_loss_count) 1303 bank->context_loss_count = 1304 bank->get_context_loss_count(dev); 1305 1306 omap_gpio_dbck_disable(bank); 1307 } 1308 1309 static void omap_gpio_unidle(struct gpio_bank *bank) 1310 { 1311 struct device *dev = bank->chip.parent; 1312 u32 l = 0, gen, gen0, gen1; 1313 int c; 1314 1315 /* 1316 * On the first resume during the probe, the context has not 1317 * been initialised and so initialise it now. Also initialise 1318 * the context loss count. 1319 */ 1320 if (bank->loses_context && !bank->context_valid) { 1321 omap_gpio_init_context(bank); 1322 1323 if (bank->get_context_loss_count) 1324 bank->context_loss_count = 1325 bank->get_context_loss_count(dev); 1326 } 1327 1328 omap_gpio_dbck_enable(bank); 1329 1330 if (bank->loses_context) { 1331 if (!bank->get_context_loss_count) { 1332 omap_gpio_restore_context(bank); 1333 } else { 1334 c = bank->get_context_loss_count(dev); 1335 if (c != bank->context_loss_count) { 1336 omap_gpio_restore_context(bank); 1337 } else { 1338 return; 1339 } 1340 } 1341 } else { 1342 /* Restore changes done for OMAP2420 errata 1.101 */ 1343 writel_relaxed(bank->context.fallingdetect, 1344 bank->base + bank->regs->fallingdetect); 1345 writel_relaxed(bank->context.risingdetect, 1346 bank->base + bank->regs->risingdetect); 1347 } 1348 1349 l = readl_relaxed(bank->base + bank->regs->datain); 1350 1351 /* 1352 * Check if any of the non-wakeup interrupt GPIOs have changed 1353 * state. If so, generate an IRQ by software. This is 1354 * horribly racy, but it's the best we can do to work around 1355 * this silicon bug. 1356 */ 1357 l ^= bank->saved_datain; 1358 l &= bank->enabled_non_wakeup_gpios; 1359 1360 /* 1361 * No need to generate IRQs for the rising edge for gpio IRQs 1362 * configured with falling edge only; and vice versa. 1363 */ 1364 gen0 = l & bank->context.fallingdetect; 1365 gen0 &= bank->saved_datain; 1366 1367 gen1 = l & bank->context.risingdetect; 1368 gen1 &= ~(bank->saved_datain); 1369 1370 /* FIXME: Consider GPIO IRQs with level detections properly! */ 1371 gen = l & (~(bank->context.fallingdetect) & 1372 ~(bank->context.risingdetect)); 1373 /* Consider all GPIO IRQs needed to be updated */ 1374 gen |= gen0 | gen1; 1375 1376 if (gen) { 1377 u32 old0, old1; 1378 1379 old0 = readl_relaxed(bank->base + bank->regs->leveldetect0); 1380 old1 = readl_relaxed(bank->base + bank->regs->leveldetect1); 1381 1382 if (!bank->regs->irqstatus_raw0) { 1383 writel_relaxed(old0 | gen, bank->base + 1384 bank->regs->leveldetect0); 1385 writel_relaxed(old1 | gen, bank->base + 1386 bank->regs->leveldetect1); 1387 } 1388 1389 if (bank->regs->irqstatus_raw0) { 1390 writel_relaxed(old0 | l, bank->base + 1391 bank->regs->leveldetect0); 1392 writel_relaxed(old1 | l, bank->base + 1393 bank->regs->leveldetect1); 1394 } 1395 writel_relaxed(old0, bank->base + bank->regs->leveldetect0); 1396 writel_relaxed(old1, bank->base + bank->regs->leveldetect1); 1397 } 1398 } 1399 1400 static int gpio_omap_cpu_notifier(struct notifier_block *nb, 1401 unsigned long cmd, void *v) 1402 { 1403 struct gpio_bank *bank; 1404 unsigned long flags; 1405 1406 bank = container_of(nb, struct gpio_bank, nb); 1407 1408 raw_spin_lock_irqsave(&bank->lock, flags); 1409 switch (cmd) { 1410 case CPU_CLUSTER_PM_ENTER: 1411 if (bank->is_suspended) 1412 break; 1413 omap_gpio_idle(bank, true); 1414 break; 1415 case CPU_CLUSTER_PM_ENTER_FAILED: 1416 case CPU_CLUSTER_PM_EXIT: 1417 if (bank->is_suspended) 1418 break; 1419 omap_gpio_unidle(bank); 1420 break; 1421 } 1422 raw_spin_unlock_irqrestore(&bank->lock, flags); 1423 1424 return NOTIFY_OK; 1425 } 1426 1427 static struct omap_gpio_reg_offs omap2_gpio_regs = { 1428 .revision = OMAP24XX_GPIO_REVISION, 1429 .direction = OMAP24XX_GPIO_OE, 1430 .datain = OMAP24XX_GPIO_DATAIN, 1431 .dataout = OMAP24XX_GPIO_DATAOUT, 1432 .set_dataout = OMAP24XX_GPIO_SETDATAOUT, 1433 .clr_dataout = OMAP24XX_GPIO_CLEARDATAOUT, 1434 .irqstatus = OMAP24XX_GPIO_IRQSTATUS1, 1435 .irqstatus2 = OMAP24XX_GPIO_IRQSTATUS2, 1436 .irqenable = OMAP24XX_GPIO_IRQENABLE1, 1437 .irqenable2 = OMAP24XX_GPIO_IRQENABLE2, 1438 .set_irqenable = OMAP24XX_GPIO_SETIRQENABLE1, 1439 .clr_irqenable = OMAP24XX_GPIO_CLEARIRQENABLE1, 1440 .debounce = OMAP24XX_GPIO_DEBOUNCE_VAL, 1441 .debounce_en = OMAP24XX_GPIO_DEBOUNCE_EN, 1442 .ctrl = OMAP24XX_GPIO_CTRL, 1443 .wkup_en = OMAP24XX_GPIO_WAKE_EN, 1444 .leveldetect0 = OMAP24XX_GPIO_LEVELDETECT0, 1445 .leveldetect1 = OMAP24XX_GPIO_LEVELDETECT1, 1446 .risingdetect = OMAP24XX_GPIO_RISINGDETECT, 1447 .fallingdetect = OMAP24XX_GPIO_FALLINGDETECT, 1448 }; 1449 1450 static struct omap_gpio_reg_offs omap4_gpio_regs = { 1451 .revision = OMAP4_GPIO_REVISION, 1452 .direction = OMAP4_GPIO_OE, 1453 .datain = OMAP4_GPIO_DATAIN, 1454 .dataout = OMAP4_GPIO_DATAOUT, 1455 .set_dataout = OMAP4_GPIO_SETDATAOUT, 1456 .clr_dataout = OMAP4_GPIO_CLEARDATAOUT, 1457 .irqstatus = OMAP4_GPIO_IRQSTATUS0, 1458 .irqstatus2 = OMAP4_GPIO_IRQSTATUS1, 1459 .irqenable = OMAP4_GPIO_IRQSTATUSSET0, 1460 .irqenable2 = OMAP4_GPIO_IRQSTATUSSET1, 1461 .set_irqenable = OMAP4_GPIO_IRQSTATUSSET0, 1462 .clr_irqenable = OMAP4_GPIO_IRQSTATUSCLR0, 1463 .debounce = OMAP4_GPIO_DEBOUNCINGTIME, 1464 .debounce_en = OMAP4_GPIO_DEBOUNCENABLE, 1465 .ctrl = OMAP4_GPIO_CTRL, 1466 .wkup_en = OMAP4_GPIO_IRQWAKEN0, 1467 .leveldetect0 = OMAP4_GPIO_LEVELDETECT0, 1468 .leveldetect1 = OMAP4_GPIO_LEVELDETECT1, 1469 .risingdetect = OMAP4_GPIO_RISINGDETECT, 1470 .fallingdetect = OMAP4_GPIO_FALLINGDETECT, 1471 }; 1472 1473 static const struct omap_gpio_platform_data omap2_pdata = { 1474 .regs = &omap2_gpio_regs, 1475 .bank_width = 32, 1476 .dbck_flag = false, 1477 }; 1478 1479 static const struct omap_gpio_platform_data omap3_pdata = { 1480 .regs = &omap2_gpio_regs, 1481 .bank_width = 32, 1482 .dbck_flag = true, 1483 }; 1484 1485 static const struct omap_gpio_platform_data omap4_pdata = { 1486 .regs = &omap4_gpio_regs, 1487 .bank_width = 32, 1488 .dbck_flag = true, 1489 }; 1490 1491 static const struct of_device_id omap_gpio_match[] = { 1492 { 1493 .compatible = "ti,omap4-gpio", 1494 .data = &omap4_pdata, 1495 }, 1496 { 1497 .compatible = "ti,omap3-gpio", 1498 .data = &omap3_pdata, 1499 }, 1500 { 1501 .compatible = "ti,omap2-gpio", 1502 .data = &omap2_pdata, 1503 }, 1504 { }, 1505 }; 1506 MODULE_DEVICE_TABLE(of, omap_gpio_match); 1507 1508 static int omap_gpio_probe(struct platform_device *pdev) 1509 { 1510 struct device *dev = &pdev->dev; 1511 struct device_node *node = dev->of_node; 1512 const struct of_device_id *match; 1513 const struct omap_gpio_platform_data *pdata; 1514 struct gpio_bank *bank; 1515 struct irq_chip *irqc; 1516 int ret; 1517 1518 match = of_match_device(of_match_ptr(omap_gpio_match), dev); 1519 1520 pdata = match ? match->data : dev_get_platdata(dev); 1521 if (!pdata) 1522 return -EINVAL; 1523 1524 bank = devm_kzalloc(dev, sizeof(*bank), GFP_KERNEL); 1525 if (!bank) 1526 return -ENOMEM; 1527 1528 irqc = devm_kzalloc(dev, sizeof(*irqc), GFP_KERNEL); 1529 if (!irqc) 1530 return -ENOMEM; 1531 1532 irqc->irq_startup = omap_gpio_irq_startup, 1533 irqc->irq_shutdown = omap_gpio_irq_shutdown, 1534 irqc->irq_ack = omap_gpio_ack_irq, 1535 irqc->irq_mask = omap_gpio_mask_irq, 1536 irqc->irq_unmask = omap_gpio_unmask_irq, 1537 irqc->irq_set_type = omap_gpio_irq_type, 1538 irqc->irq_set_wake = omap_gpio_wake_enable, 1539 irqc->irq_bus_lock = omap_gpio_irq_bus_lock, 1540 irqc->irq_bus_sync_unlock = gpio_irq_bus_sync_unlock, 1541 irqc->name = dev_name(&pdev->dev); 1542 irqc->flags = IRQCHIP_MASK_ON_SUSPEND; 1543 irqc->parent_device = dev; 1544 1545 bank->irq = platform_get_irq(pdev, 0); 1546 if (bank->irq <= 0) { 1547 if (!bank->irq) 1548 bank->irq = -ENXIO; 1549 if (bank->irq != -EPROBE_DEFER) 1550 dev_err(dev, 1551 "can't get irq resource ret=%d\n", bank->irq); 1552 return bank->irq; 1553 } 1554 1555 bank->chip.parent = dev; 1556 bank->chip.owner = THIS_MODULE; 1557 bank->dbck_flag = pdata->dbck_flag; 1558 bank->stride = pdata->bank_stride; 1559 bank->width = pdata->bank_width; 1560 bank->is_mpuio = pdata->is_mpuio; 1561 bank->non_wakeup_gpios = pdata->non_wakeup_gpios; 1562 bank->regs = pdata->regs; 1563 #ifdef CONFIG_OF_GPIO 1564 bank->chip.of_node = of_node_get(node); 1565 #endif 1566 1567 if (node) { 1568 if (!of_property_read_bool(node, "ti,gpio-always-on")) 1569 bank->loses_context = true; 1570 } else { 1571 bank->loses_context = pdata->loses_context; 1572 1573 if (bank->loses_context) 1574 bank->get_context_loss_count = 1575 pdata->get_context_loss_count; 1576 } 1577 1578 if (bank->regs->set_dataout && bank->regs->clr_dataout) { 1579 bank->set_dataout = omap_set_gpio_dataout_reg; 1580 bank->set_dataout_multiple = omap_set_gpio_dataout_reg_multiple; 1581 } else { 1582 bank->set_dataout = omap_set_gpio_dataout_mask; 1583 bank->set_dataout_multiple = 1584 omap_set_gpio_dataout_mask_multiple; 1585 } 1586 1587 raw_spin_lock_init(&bank->lock); 1588 raw_spin_lock_init(&bank->wa_lock); 1589 1590 /* Static mapping, never released */ 1591 bank->base = devm_platform_ioremap_resource(pdev, 0); 1592 if (IS_ERR(bank->base)) { 1593 return PTR_ERR(bank->base); 1594 } 1595 1596 if (bank->dbck_flag) { 1597 bank->dbck = devm_clk_get(dev, "dbclk"); 1598 if (IS_ERR(bank->dbck)) { 1599 dev_err(dev, 1600 "Could not get gpio dbck. Disable debounce\n"); 1601 bank->dbck_flag = false; 1602 } else { 1603 clk_prepare(bank->dbck); 1604 } 1605 } 1606 1607 platform_set_drvdata(pdev, bank); 1608 1609 pm_runtime_enable(dev); 1610 pm_runtime_get_sync(dev); 1611 1612 if (bank->is_mpuio) 1613 omap_mpuio_init(bank); 1614 1615 omap_gpio_mod_init(bank); 1616 1617 ret = omap_gpio_chip_init(bank, irqc); 1618 if (ret) { 1619 pm_runtime_put_sync(dev); 1620 pm_runtime_disable(dev); 1621 if (bank->dbck_flag) 1622 clk_unprepare(bank->dbck); 1623 return ret; 1624 } 1625 1626 omap_gpio_show_rev(bank); 1627 1628 bank->nb.notifier_call = gpio_omap_cpu_notifier; 1629 cpu_pm_register_notifier(&bank->nb); 1630 1631 pm_runtime_put(dev); 1632 1633 return 0; 1634 } 1635 1636 static int omap_gpio_remove(struct platform_device *pdev) 1637 { 1638 struct gpio_bank *bank = platform_get_drvdata(pdev); 1639 1640 cpu_pm_unregister_notifier(&bank->nb); 1641 list_del(&bank->node); 1642 gpiochip_remove(&bank->chip); 1643 pm_runtime_disable(&pdev->dev); 1644 if (bank->dbck_flag) 1645 clk_unprepare(bank->dbck); 1646 1647 return 0; 1648 } 1649 1650 static int __maybe_unused omap_gpio_runtime_suspend(struct device *dev) 1651 { 1652 struct gpio_bank *bank = dev_get_drvdata(dev); 1653 unsigned long flags; 1654 1655 raw_spin_lock_irqsave(&bank->lock, flags); 1656 omap_gpio_idle(bank, true); 1657 bank->is_suspended = true; 1658 raw_spin_unlock_irqrestore(&bank->lock, flags); 1659 1660 return 0; 1661 } 1662 1663 static int __maybe_unused omap_gpio_runtime_resume(struct device *dev) 1664 { 1665 struct gpio_bank *bank = dev_get_drvdata(dev); 1666 unsigned long flags; 1667 1668 raw_spin_lock_irqsave(&bank->lock, flags); 1669 omap_gpio_unidle(bank); 1670 bank->is_suspended = false; 1671 raw_spin_unlock_irqrestore(&bank->lock, flags); 1672 1673 return 0; 1674 } 1675 1676 static const struct dev_pm_ops gpio_pm_ops = { 1677 SET_RUNTIME_PM_OPS(omap_gpio_runtime_suspend, omap_gpio_runtime_resume, 1678 NULL) 1679 }; 1680 1681 static struct platform_driver omap_gpio_driver = { 1682 .probe = omap_gpio_probe, 1683 .remove = omap_gpio_remove, 1684 .driver = { 1685 .name = "omap_gpio", 1686 .pm = &gpio_pm_ops, 1687 .of_match_table = omap_gpio_match, 1688 }, 1689 }; 1690 1691 /* 1692 * gpio driver register needs to be done before 1693 * machine_init functions access gpio APIs. 1694 * Hence omap_gpio_drv_reg() is a postcore_initcall. 1695 */ 1696 static int __init omap_gpio_drv_reg(void) 1697 { 1698 return platform_driver_register(&omap_gpio_driver); 1699 } 1700 postcore_initcall(omap_gpio_drv_reg); 1701 1702 static void __exit omap_gpio_exit(void) 1703 { 1704 platform_driver_unregister(&omap_gpio_driver); 1705 } 1706 module_exit(omap_gpio_exit); 1707 1708 MODULE_DESCRIPTION("omap gpio driver"); 1709 MODULE_ALIAS("platform:gpio-omap"); 1710 MODULE_LICENSE("GPL v2"); 1711