1 /* 2 * Support functions for OMAP GPIO 3 * 4 * Copyright (C) 2003-2005 Nokia Corporation 5 * Written by Juha Yrjölä <juha.yrjola@nokia.com> 6 * 7 * Copyright (C) 2009 Texas Instruments 8 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com> 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License version 2 as 12 * published by the Free Software Foundation. 13 */ 14 15 #include <linux/init.h> 16 #include <linux/module.h> 17 #include <linux/interrupt.h> 18 #include <linux/syscore_ops.h> 19 #include <linux/err.h> 20 #include <linux/clk.h> 21 #include <linux/io.h> 22 #include <linux/device.h> 23 #include <linux/pm_runtime.h> 24 #include <linux/pm.h> 25 #include <linux/of.h> 26 #include <linux/of_device.h> 27 #include <linux/gpio/driver.h> 28 #include <linux/bitops.h> 29 #include <linux/platform_data/gpio-omap.h> 30 31 #define OFF_MODE 1 32 #define OMAP4_GPIO_DEBOUNCINGTIME_MASK 0xFF 33 34 static LIST_HEAD(omap_gpio_list); 35 36 struct gpio_regs { 37 u32 irqenable1; 38 u32 irqenable2; 39 u32 wake_en; 40 u32 ctrl; 41 u32 oe; 42 u32 leveldetect0; 43 u32 leveldetect1; 44 u32 risingdetect; 45 u32 fallingdetect; 46 u32 dataout; 47 u32 debounce; 48 u32 debounce_en; 49 }; 50 51 struct gpio_bank { 52 struct list_head node; 53 void __iomem *base; 54 int irq; 55 u32 non_wakeup_gpios; 56 u32 enabled_non_wakeup_gpios; 57 struct gpio_regs context; 58 u32 saved_datain; 59 u32 level_mask; 60 u32 toggle_mask; 61 raw_spinlock_t lock; 62 raw_spinlock_t wa_lock; 63 struct gpio_chip chip; 64 struct clk *dbck; 65 u32 mod_usage; 66 u32 irq_usage; 67 u32 dbck_enable_mask; 68 bool dbck_enabled; 69 bool is_mpuio; 70 bool dbck_flag; 71 bool loses_context; 72 bool context_valid; 73 int stride; 74 u32 width; 75 int context_loss_count; 76 int power_mode; 77 bool workaround_enabled; 78 79 void (*set_dataout)(struct gpio_bank *bank, unsigned gpio, int enable); 80 void (*set_dataout_multiple)(struct gpio_bank *bank, 81 unsigned long *mask, unsigned long *bits); 82 int (*get_context_loss_count)(struct device *dev); 83 84 struct omap_gpio_reg_offs *regs; 85 }; 86 87 #define GPIO_MOD_CTRL_BIT BIT(0) 88 89 #define BANK_USED(bank) (bank->mod_usage || bank->irq_usage) 90 #define LINE_USED(line, offset) (line & (BIT(offset))) 91 92 static void omap_gpio_unmask_irq(struct irq_data *d); 93 94 static inline struct gpio_bank *omap_irq_data_get_bank(struct irq_data *d) 95 { 96 struct gpio_chip *chip = irq_data_get_irq_chip_data(d); 97 return gpiochip_get_data(chip); 98 } 99 100 static void omap_set_gpio_direction(struct gpio_bank *bank, int gpio, 101 int is_input) 102 { 103 void __iomem *reg = bank->base; 104 u32 l; 105 106 reg += bank->regs->direction; 107 l = readl_relaxed(reg); 108 if (is_input) 109 l |= BIT(gpio); 110 else 111 l &= ~(BIT(gpio)); 112 writel_relaxed(l, reg); 113 bank->context.oe = l; 114 } 115 116 117 /* set data out value using dedicate set/clear register */ 118 static void omap_set_gpio_dataout_reg(struct gpio_bank *bank, unsigned offset, 119 int enable) 120 { 121 void __iomem *reg = bank->base; 122 u32 l = BIT(offset); 123 124 if (enable) { 125 reg += bank->regs->set_dataout; 126 bank->context.dataout |= l; 127 } else { 128 reg += bank->regs->clr_dataout; 129 bank->context.dataout &= ~l; 130 } 131 132 writel_relaxed(l, reg); 133 } 134 135 /* set data out value using mask register */ 136 static void omap_set_gpio_dataout_mask(struct gpio_bank *bank, unsigned offset, 137 int enable) 138 { 139 void __iomem *reg = bank->base + bank->regs->dataout; 140 u32 gpio_bit = BIT(offset); 141 u32 l; 142 143 l = readl_relaxed(reg); 144 if (enable) 145 l |= gpio_bit; 146 else 147 l &= ~gpio_bit; 148 writel_relaxed(l, reg); 149 bank->context.dataout = l; 150 } 151 152 static int omap_get_gpio_datain(struct gpio_bank *bank, int offset) 153 { 154 void __iomem *reg = bank->base + bank->regs->datain; 155 156 return (readl_relaxed(reg) & (BIT(offset))) != 0; 157 } 158 159 static int omap_get_gpio_dataout(struct gpio_bank *bank, int offset) 160 { 161 void __iomem *reg = bank->base + bank->regs->dataout; 162 163 return (readl_relaxed(reg) & (BIT(offset))) != 0; 164 } 165 166 /* set multiple data out values using dedicate set/clear register */ 167 static void omap_set_gpio_dataout_reg_multiple(struct gpio_bank *bank, 168 unsigned long *mask, 169 unsigned long *bits) 170 { 171 void __iomem *reg = bank->base; 172 u32 l; 173 174 l = *bits & *mask; 175 writel_relaxed(l, reg + bank->regs->set_dataout); 176 bank->context.dataout |= l; 177 178 l = ~*bits & *mask; 179 writel_relaxed(l, reg + bank->regs->clr_dataout); 180 bank->context.dataout &= ~l; 181 } 182 183 /* set multiple data out values using mask register */ 184 static void omap_set_gpio_dataout_mask_multiple(struct gpio_bank *bank, 185 unsigned long *mask, 186 unsigned long *bits) 187 { 188 void __iomem *reg = bank->base + bank->regs->dataout; 189 u32 l = (readl_relaxed(reg) & ~*mask) | (*bits & *mask); 190 191 writel_relaxed(l, reg); 192 bank->context.dataout = l; 193 } 194 195 static unsigned long omap_get_gpio_datain_multiple(struct gpio_bank *bank, 196 unsigned long *mask) 197 { 198 void __iomem *reg = bank->base + bank->regs->datain; 199 200 return readl_relaxed(reg) & *mask; 201 } 202 203 static unsigned long omap_get_gpio_dataout_multiple(struct gpio_bank *bank, 204 unsigned long *mask) 205 { 206 void __iomem *reg = bank->base + bank->regs->dataout; 207 208 return readl_relaxed(reg) & *mask; 209 } 210 211 static inline void omap_gpio_rmw(void __iomem *base, u32 reg, u32 mask, bool set) 212 { 213 int l = readl_relaxed(base + reg); 214 215 if (set) 216 l |= mask; 217 else 218 l &= ~mask; 219 220 writel_relaxed(l, base + reg); 221 } 222 223 static inline void omap_gpio_dbck_enable(struct gpio_bank *bank) 224 { 225 if (bank->dbck_enable_mask && !bank->dbck_enabled) { 226 clk_enable(bank->dbck); 227 bank->dbck_enabled = true; 228 229 writel_relaxed(bank->dbck_enable_mask, 230 bank->base + bank->regs->debounce_en); 231 } 232 } 233 234 static inline void omap_gpio_dbck_disable(struct gpio_bank *bank) 235 { 236 if (bank->dbck_enable_mask && bank->dbck_enabled) { 237 /* 238 * Disable debounce before cutting it's clock. If debounce is 239 * enabled but the clock is not, GPIO module seems to be unable 240 * to detect events and generate interrupts at least on OMAP3. 241 */ 242 writel_relaxed(0, bank->base + bank->regs->debounce_en); 243 244 clk_disable(bank->dbck); 245 bank->dbck_enabled = false; 246 } 247 } 248 249 /** 250 * omap2_set_gpio_debounce - low level gpio debounce time 251 * @bank: the gpio bank we're acting upon 252 * @offset: the gpio number on this @bank 253 * @debounce: debounce time to use 254 * 255 * OMAP's debounce time is in 31us steps 256 * <debounce time> = (GPIO_DEBOUNCINGTIME[7:0].DEBOUNCETIME + 1) x 31 257 * so we need to convert and round up to the closest unit. 258 * 259 * Return: 0 on success, negative error otherwise. 260 */ 261 static int omap2_set_gpio_debounce(struct gpio_bank *bank, unsigned offset, 262 unsigned debounce) 263 { 264 void __iomem *reg; 265 u32 val; 266 u32 l; 267 bool enable = !!debounce; 268 269 if (!bank->dbck_flag) 270 return -ENOTSUPP; 271 272 if (enable) { 273 debounce = DIV_ROUND_UP(debounce, 31) - 1; 274 if ((debounce & OMAP4_GPIO_DEBOUNCINGTIME_MASK) != debounce) 275 return -EINVAL; 276 } 277 278 l = BIT(offset); 279 280 clk_enable(bank->dbck); 281 reg = bank->base + bank->regs->debounce; 282 writel_relaxed(debounce, reg); 283 284 reg = bank->base + bank->regs->debounce_en; 285 val = readl_relaxed(reg); 286 287 if (enable) 288 val |= l; 289 else 290 val &= ~l; 291 bank->dbck_enable_mask = val; 292 293 writel_relaxed(val, reg); 294 clk_disable(bank->dbck); 295 /* 296 * Enable debounce clock per module. 297 * This call is mandatory because in omap_gpio_request() when 298 * *_runtime_get_sync() is called, _gpio_dbck_enable() within 299 * runtime callbck fails to turn on dbck because dbck_enable_mask 300 * used within _gpio_dbck_enable() is still not initialized at 301 * that point. Therefore we have to enable dbck here. 302 */ 303 omap_gpio_dbck_enable(bank); 304 if (bank->dbck_enable_mask) { 305 bank->context.debounce = debounce; 306 bank->context.debounce_en = val; 307 } 308 309 return 0; 310 } 311 312 /** 313 * omap_clear_gpio_debounce - clear debounce settings for a gpio 314 * @bank: the gpio bank we're acting upon 315 * @offset: the gpio number on this @bank 316 * 317 * If a gpio is using debounce, then clear the debounce enable bit and if 318 * this is the only gpio in this bank using debounce, then clear the debounce 319 * time too. The debounce clock will also be disabled when calling this function 320 * if this is the only gpio in the bank using debounce. 321 */ 322 static void omap_clear_gpio_debounce(struct gpio_bank *bank, unsigned offset) 323 { 324 u32 gpio_bit = BIT(offset); 325 326 if (!bank->dbck_flag) 327 return; 328 329 if (!(bank->dbck_enable_mask & gpio_bit)) 330 return; 331 332 bank->dbck_enable_mask &= ~gpio_bit; 333 bank->context.debounce_en &= ~gpio_bit; 334 writel_relaxed(bank->context.debounce_en, 335 bank->base + bank->regs->debounce_en); 336 337 if (!bank->dbck_enable_mask) { 338 bank->context.debounce = 0; 339 writel_relaxed(bank->context.debounce, bank->base + 340 bank->regs->debounce); 341 clk_disable(bank->dbck); 342 bank->dbck_enabled = false; 343 } 344 } 345 346 static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio, 347 unsigned trigger) 348 { 349 void __iomem *base = bank->base; 350 u32 gpio_bit = BIT(gpio); 351 352 omap_gpio_rmw(base, bank->regs->leveldetect0, gpio_bit, 353 trigger & IRQ_TYPE_LEVEL_LOW); 354 omap_gpio_rmw(base, bank->regs->leveldetect1, gpio_bit, 355 trigger & IRQ_TYPE_LEVEL_HIGH); 356 omap_gpio_rmw(base, bank->regs->risingdetect, gpio_bit, 357 trigger & IRQ_TYPE_EDGE_RISING); 358 omap_gpio_rmw(base, bank->regs->fallingdetect, gpio_bit, 359 trigger & IRQ_TYPE_EDGE_FALLING); 360 361 bank->context.leveldetect0 = 362 readl_relaxed(bank->base + bank->regs->leveldetect0); 363 bank->context.leveldetect1 = 364 readl_relaxed(bank->base + bank->regs->leveldetect1); 365 bank->context.risingdetect = 366 readl_relaxed(bank->base + bank->regs->risingdetect); 367 bank->context.fallingdetect = 368 readl_relaxed(bank->base + bank->regs->fallingdetect); 369 370 if (likely(!(bank->non_wakeup_gpios & gpio_bit))) { 371 omap_gpio_rmw(base, bank->regs->wkup_en, gpio_bit, trigger != 0); 372 bank->context.wake_en = 373 readl_relaxed(bank->base + bank->regs->wkup_en); 374 } 375 376 /* This part needs to be executed always for OMAP{34xx, 44xx} */ 377 if (!bank->regs->irqctrl) { 378 /* On omap24xx proceed only when valid GPIO bit is set */ 379 if (bank->non_wakeup_gpios) { 380 if (!(bank->non_wakeup_gpios & gpio_bit)) 381 goto exit; 382 } 383 384 /* 385 * Log the edge gpio and manually trigger the IRQ 386 * after resume if the input level changes 387 * to avoid irq lost during PER RET/OFF mode 388 * Applies for omap2 non-wakeup gpio and all omap3 gpios 389 */ 390 if (trigger & IRQ_TYPE_EDGE_BOTH) 391 bank->enabled_non_wakeup_gpios |= gpio_bit; 392 else 393 bank->enabled_non_wakeup_gpios &= ~gpio_bit; 394 } 395 396 exit: 397 bank->level_mask = 398 readl_relaxed(bank->base + bank->regs->leveldetect0) | 399 readl_relaxed(bank->base + bank->regs->leveldetect1); 400 } 401 402 #ifdef CONFIG_ARCH_OMAP1 403 /* 404 * This only applies to chips that can't do both rising and falling edge 405 * detection at once. For all other chips, this function is a noop. 406 */ 407 static void omap_toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio) 408 { 409 void __iomem *reg = bank->base; 410 u32 l = 0; 411 412 if (!bank->regs->irqctrl) 413 return; 414 415 reg += bank->regs->irqctrl; 416 417 l = readl_relaxed(reg); 418 if ((l >> gpio) & 1) 419 l &= ~(BIT(gpio)); 420 else 421 l |= BIT(gpio); 422 423 writel_relaxed(l, reg); 424 } 425 #else 426 static void omap_toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio) {} 427 #endif 428 429 static int omap_set_gpio_triggering(struct gpio_bank *bank, int gpio, 430 unsigned trigger) 431 { 432 void __iomem *reg = bank->base; 433 void __iomem *base = bank->base; 434 u32 l = 0; 435 436 if (bank->regs->leveldetect0 && bank->regs->wkup_en) { 437 omap_set_gpio_trigger(bank, gpio, trigger); 438 } else if (bank->regs->irqctrl) { 439 reg += bank->regs->irqctrl; 440 441 l = readl_relaxed(reg); 442 if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) 443 bank->toggle_mask |= BIT(gpio); 444 if (trigger & IRQ_TYPE_EDGE_RISING) 445 l |= BIT(gpio); 446 else if (trigger & IRQ_TYPE_EDGE_FALLING) 447 l &= ~(BIT(gpio)); 448 else 449 return -EINVAL; 450 451 writel_relaxed(l, reg); 452 } else if (bank->regs->edgectrl1) { 453 if (gpio & 0x08) 454 reg += bank->regs->edgectrl2; 455 else 456 reg += bank->regs->edgectrl1; 457 458 gpio &= 0x07; 459 l = readl_relaxed(reg); 460 l &= ~(3 << (gpio << 1)); 461 if (trigger & IRQ_TYPE_EDGE_RISING) 462 l |= 2 << (gpio << 1); 463 if (trigger & IRQ_TYPE_EDGE_FALLING) 464 l |= BIT(gpio << 1); 465 466 /* Enable wake-up during idle for dynamic tick */ 467 omap_gpio_rmw(base, bank->regs->wkup_en, BIT(gpio), trigger); 468 bank->context.wake_en = 469 readl_relaxed(bank->base + bank->regs->wkup_en); 470 writel_relaxed(l, reg); 471 } 472 return 0; 473 } 474 475 static void omap_enable_gpio_module(struct gpio_bank *bank, unsigned offset) 476 { 477 if (bank->regs->pinctrl) { 478 void __iomem *reg = bank->base + bank->regs->pinctrl; 479 480 /* Claim the pin for MPU */ 481 writel_relaxed(readl_relaxed(reg) | (BIT(offset)), reg); 482 } 483 484 if (bank->regs->ctrl && !BANK_USED(bank)) { 485 void __iomem *reg = bank->base + bank->regs->ctrl; 486 u32 ctrl; 487 488 ctrl = readl_relaxed(reg); 489 /* Module is enabled, clocks are not gated */ 490 ctrl &= ~GPIO_MOD_CTRL_BIT; 491 writel_relaxed(ctrl, reg); 492 bank->context.ctrl = ctrl; 493 } 494 } 495 496 static void omap_disable_gpio_module(struct gpio_bank *bank, unsigned offset) 497 { 498 void __iomem *base = bank->base; 499 500 if (bank->regs->wkup_en && 501 !LINE_USED(bank->mod_usage, offset) && 502 !LINE_USED(bank->irq_usage, offset)) { 503 /* Disable wake-up during idle for dynamic tick */ 504 omap_gpio_rmw(base, bank->regs->wkup_en, BIT(offset), 0); 505 bank->context.wake_en = 506 readl_relaxed(bank->base + bank->regs->wkup_en); 507 } 508 509 if (bank->regs->ctrl && !BANK_USED(bank)) { 510 void __iomem *reg = bank->base + bank->regs->ctrl; 511 u32 ctrl; 512 513 ctrl = readl_relaxed(reg); 514 /* Module is disabled, clocks are gated */ 515 ctrl |= GPIO_MOD_CTRL_BIT; 516 writel_relaxed(ctrl, reg); 517 bank->context.ctrl = ctrl; 518 } 519 } 520 521 static int omap_gpio_is_input(struct gpio_bank *bank, unsigned offset) 522 { 523 void __iomem *reg = bank->base + bank->regs->direction; 524 525 return readl_relaxed(reg) & BIT(offset); 526 } 527 528 static void omap_gpio_init_irq(struct gpio_bank *bank, unsigned offset) 529 { 530 if (!LINE_USED(bank->mod_usage, offset)) { 531 omap_enable_gpio_module(bank, offset); 532 omap_set_gpio_direction(bank, offset, 1); 533 } 534 bank->irq_usage |= BIT(offset); 535 } 536 537 static int omap_gpio_irq_type(struct irq_data *d, unsigned type) 538 { 539 struct gpio_bank *bank = omap_irq_data_get_bank(d); 540 int retval; 541 unsigned long flags; 542 unsigned offset = d->hwirq; 543 544 if (type & ~IRQ_TYPE_SENSE_MASK) 545 return -EINVAL; 546 547 if (!bank->regs->leveldetect0 && 548 (type & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH))) 549 return -EINVAL; 550 551 raw_spin_lock_irqsave(&bank->lock, flags); 552 retval = omap_set_gpio_triggering(bank, offset, type); 553 if (retval) { 554 raw_spin_unlock_irqrestore(&bank->lock, flags); 555 goto error; 556 } 557 omap_gpio_init_irq(bank, offset); 558 if (!omap_gpio_is_input(bank, offset)) { 559 raw_spin_unlock_irqrestore(&bank->lock, flags); 560 retval = -EINVAL; 561 goto error; 562 } 563 raw_spin_unlock_irqrestore(&bank->lock, flags); 564 565 if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) 566 irq_set_handler_locked(d, handle_level_irq); 567 else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) 568 /* 569 * Edge IRQs are already cleared/acked in irq_handler and 570 * not need to be masked, as result handle_edge_irq() 571 * logic is excessed here and may cause lose of interrupts. 572 * So just use handle_simple_irq. 573 */ 574 irq_set_handler_locked(d, handle_simple_irq); 575 576 return 0; 577 578 error: 579 return retval; 580 } 581 582 static void omap_clear_gpio_irqbank(struct gpio_bank *bank, int gpio_mask) 583 { 584 void __iomem *reg = bank->base; 585 586 reg += bank->regs->irqstatus; 587 writel_relaxed(gpio_mask, reg); 588 589 /* Workaround for clearing DSP GPIO interrupts to allow retention */ 590 if (bank->regs->irqstatus2) { 591 reg = bank->base + bank->regs->irqstatus2; 592 writel_relaxed(gpio_mask, reg); 593 } 594 595 /* Flush posted write for the irq status to avoid spurious interrupts */ 596 readl_relaxed(reg); 597 } 598 599 static inline void omap_clear_gpio_irqstatus(struct gpio_bank *bank, 600 unsigned offset) 601 { 602 omap_clear_gpio_irqbank(bank, BIT(offset)); 603 } 604 605 static u32 omap_get_gpio_irqbank_mask(struct gpio_bank *bank) 606 { 607 void __iomem *reg = bank->base; 608 u32 l; 609 u32 mask = (BIT(bank->width)) - 1; 610 611 reg += bank->regs->irqenable; 612 l = readl_relaxed(reg); 613 if (bank->regs->irqenable_inv) 614 l = ~l; 615 l &= mask; 616 return l; 617 } 618 619 static void omap_enable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask) 620 { 621 void __iomem *reg = bank->base; 622 u32 l; 623 624 if (bank->regs->set_irqenable) { 625 reg += bank->regs->set_irqenable; 626 l = gpio_mask; 627 bank->context.irqenable1 |= gpio_mask; 628 } else { 629 reg += bank->regs->irqenable; 630 l = readl_relaxed(reg); 631 if (bank->regs->irqenable_inv) 632 l &= ~gpio_mask; 633 else 634 l |= gpio_mask; 635 bank->context.irqenable1 = l; 636 } 637 638 writel_relaxed(l, reg); 639 } 640 641 static void omap_disable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask) 642 { 643 void __iomem *reg = bank->base; 644 u32 l; 645 646 if (bank->regs->clr_irqenable) { 647 reg += bank->regs->clr_irqenable; 648 l = gpio_mask; 649 bank->context.irqenable1 &= ~gpio_mask; 650 } else { 651 reg += bank->regs->irqenable; 652 l = readl_relaxed(reg); 653 if (bank->regs->irqenable_inv) 654 l |= gpio_mask; 655 else 656 l &= ~gpio_mask; 657 bank->context.irqenable1 = l; 658 } 659 660 writel_relaxed(l, reg); 661 } 662 663 static inline void omap_set_gpio_irqenable(struct gpio_bank *bank, 664 unsigned offset, int enable) 665 { 666 if (enable) 667 omap_enable_gpio_irqbank(bank, BIT(offset)); 668 else 669 omap_disable_gpio_irqbank(bank, BIT(offset)); 670 } 671 672 /* Use disable_irq_wake() and enable_irq_wake() functions from drivers */ 673 static int omap_gpio_wake_enable(struct irq_data *d, unsigned int enable) 674 { 675 struct gpio_bank *bank = omap_irq_data_get_bank(d); 676 677 return irq_set_irq_wake(bank->irq, enable); 678 } 679 680 static int omap_gpio_request(struct gpio_chip *chip, unsigned offset) 681 { 682 struct gpio_bank *bank = gpiochip_get_data(chip); 683 unsigned long flags; 684 685 /* 686 * If this is the first gpio_request for the bank, 687 * enable the bank module. 688 */ 689 if (!BANK_USED(bank)) 690 pm_runtime_get_sync(chip->parent); 691 692 raw_spin_lock_irqsave(&bank->lock, flags); 693 omap_enable_gpio_module(bank, offset); 694 bank->mod_usage |= BIT(offset); 695 raw_spin_unlock_irqrestore(&bank->lock, flags); 696 697 return 0; 698 } 699 700 static void omap_gpio_free(struct gpio_chip *chip, unsigned offset) 701 { 702 struct gpio_bank *bank = gpiochip_get_data(chip); 703 unsigned long flags; 704 705 raw_spin_lock_irqsave(&bank->lock, flags); 706 bank->mod_usage &= ~(BIT(offset)); 707 if (!LINE_USED(bank->irq_usage, offset)) { 708 omap_set_gpio_direction(bank, offset, 1); 709 omap_clear_gpio_debounce(bank, offset); 710 } 711 omap_disable_gpio_module(bank, offset); 712 raw_spin_unlock_irqrestore(&bank->lock, flags); 713 714 /* 715 * If this is the last gpio to be freed in the bank, 716 * disable the bank module. 717 */ 718 if (!BANK_USED(bank)) 719 pm_runtime_put(chip->parent); 720 } 721 722 /* 723 * We need to unmask the GPIO bank interrupt as soon as possible to 724 * avoid missing GPIO interrupts for other lines in the bank. 725 * Then we need to mask-read-clear-unmask the triggered GPIO lines 726 * in the bank to avoid missing nested interrupts for a GPIO line. 727 * If we wait to unmask individual GPIO lines in the bank after the 728 * line's interrupt handler has been run, we may miss some nested 729 * interrupts. 730 */ 731 static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank) 732 { 733 void __iomem *isr_reg = NULL; 734 u32 enabled, isr, level_mask; 735 unsigned int bit; 736 struct gpio_bank *bank = gpiobank; 737 unsigned long wa_lock_flags; 738 unsigned long lock_flags; 739 740 isr_reg = bank->base + bank->regs->irqstatus; 741 if (WARN_ON(!isr_reg)) 742 goto exit; 743 744 pm_runtime_get_sync(bank->chip.parent); 745 746 while (1) { 747 raw_spin_lock_irqsave(&bank->lock, lock_flags); 748 749 enabled = omap_get_gpio_irqbank_mask(bank); 750 isr = readl_relaxed(isr_reg) & enabled; 751 752 if (bank->level_mask) 753 level_mask = bank->level_mask & enabled; 754 else 755 level_mask = 0; 756 757 /* clear edge sensitive interrupts before handler(s) are 758 called so that we don't miss any interrupt occurred while 759 executing them */ 760 if (isr & ~level_mask) 761 omap_clear_gpio_irqbank(bank, isr & ~level_mask); 762 763 raw_spin_unlock_irqrestore(&bank->lock, lock_flags); 764 765 if (!isr) 766 break; 767 768 while (isr) { 769 bit = __ffs(isr); 770 isr &= ~(BIT(bit)); 771 772 raw_spin_lock_irqsave(&bank->lock, lock_flags); 773 /* 774 * Some chips can't respond to both rising and falling 775 * at the same time. If this irq was requested with 776 * both flags, we need to flip the ICR data for the IRQ 777 * to respond to the IRQ for the opposite direction. 778 * This will be indicated in the bank toggle_mask. 779 */ 780 if (bank->toggle_mask & (BIT(bit))) 781 omap_toggle_gpio_edge_triggering(bank, bit); 782 783 raw_spin_unlock_irqrestore(&bank->lock, lock_flags); 784 785 raw_spin_lock_irqsave(&bank->wa_lock, wa_lock_flags); 786 787 generic_handle_irq(irq_find_mapping(bank->chip.irq.domain, 788 bit)); 789 790 raw_spin_unlock_irqrestore(&bank->wa_lock, 791 wa_lock_flags); 792 } 793 } 794 exit: 795 pm_runtime_put(bank->chip.parent); 796 return IRQ_HANDLED; 797 } 798 799 static unsigned int omap_gpio_irq_startup(struct irq_data *d) 800 { 801 struct gpio_bank *bank = omap_irq_data_get_bank(d); 802 unsigned long flags; 803 unsigned offset = d->hwirq; 804 805 raw_spin_lock_irqsave(&bank->lock, flags); 806 807 if (!LINE_USED(bank->mod_usage, offset)) 808 omap_set_gpio_direction(bank, offset, 1); 809 else if (!omap_gpio_is_input(bank, offset)) 810 goto err; 811 omap_enable_gpio_module(bank, offset); 812 bank->irq_usage |= BIT(offset); 813 814 raw_spin_unlock_irqrestore(&bank->lock, flags); 815 omap_gpio_unmask_irq(d); 816 817 return 0; 818 err: 819 raw_spin_unlock_irqrestore(&bank->lock, flags); 820 return -EINVAL; 821 } 822 823 static void omap_gpio_irq_shutdown(struct irq_data *d) 824 { 825 struct gpio_bank *bank = omap_irq_data_get_bank(d); 826 unsigned long flags; 827 unsigned offset = d->hwirq; 828 829 raw_spin_lock_irqsave(&bank->lock, flags); 830 bank->irq_usage &= ~(BIT(offset)); 831 omap_set_gpio_irqenable(bank, offset, 0); 832 omap_clear_gpio_irqstatus(bank, offset); 833 omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE); 834 if (!LINE_USED(bank->mod_usage, offset)) 835 omap_clear_gpio_debounce(bank, offset); 836 omap_disable_gpio_module(bank, offset); 837 raw_spin_unlock_irqrestore(&bank->lock, flags); 838 } 839 840 static void omap_gpio_irq_bus_lock(struct irq_data *data) 841 { 842 struct gpio_bank *bank = omap_irq_data_get_bank(data); 843 844 if (!BANK_USED(bank)) 845 pm_runtime_get_sync(bank->chip.parent); 846 } 847 848 static void gpio_irq_bus_sync_unlock(struct irq_data *data) 849 { 850 struct gpio_bank *bank = omap_irq_data_get_bank(data); 851 852 /* 853 * If this is the last IRQ to be freed in the bank, 854 * disable the bank module. 855 */ 856 if (!BANK_USED(bank)) 857 pm_runtime_put(bank->chip.parent); 858 } 859 860 static void omap_gpio_ack_irq(struct irq_data *d) 861 { 862 struct gpio_bank *bank = omap_irq_data_get_bank(d); 863 unsigned offset = d->hwirq; 864 865 omap_clear_gpio_irqstatus(bank, offset); 866 } 867 868 static void omap_gpio_mask_irq(struct irq_data *d) 869 { 870 struct gpio_bank *bank = omap_irq_data_get_bank(d); 871 unsigned offset = d->hwirq; 872 unsigned long flags; 873 874 raw_spin_lock_irqsave(&bank->lock, flags); 875 omap_set_gpio_irqenable(bank, offset, 0); 876 omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE); 877 raw_spin_unlock_irqrestore(&bank->lock, flags); 878 } 879 880 static void omap_gpio_unmask_irq(struct irq_data *d) 881 { 882 struct gpio_bank *bank = omap_irq_data_get_bank(d); 883 unsigned offset = d->hwirq; 884 u32 trigger = irqd_get_trigger_type(d); 885 unsigned long flags; 886 887 raw_spin_lock_irqsave(&bank->lock, flags); 888 if (trigger) 889 omap_set_gpio_triggering(bank, offset, trigger); 890 891 /* For level-triggered GPIOs, the clearing must be done after 892 * the HW source is cleared, thus after the handler has run */ 893 if (bank->level_mask & BIT(offset)) { 894 omap_set_gpio_irqenable(bank, offset, 0); 895 omap_clear_gpio_irqstatus(bank, offset); 896 } 897 898 omap_set_gpio_irqenable(bank, offset, 1); 899 raw_spin_unlock_irqrestore(&bank->lock, flags); 900 } 901 902 /*---------------------------------------------------------------------*/ 903 904 static int omap_mpuio_suspend_noirq(struct device *dev) 905 { 906 struct platform_device *pdev = to_platform_device(dev); 907 struct gpio_bank *bank = platform_get_drvdata(pdev); 908 void __iomem *mask_reg = bank->base + 909 OMAP_MPUIO_GPIO_MASKIT / bank->stride; 910 unsigned long flags; 911 912 raw_spin_lock_irqsave(&bank->lock, flags); 913 writel_relaxed(0xffff & ~bank->context.wake_en, mask_reg); 914 raw_spin_unlock_irqrestore(&bank->lock, flags); 915 916 return 0; 917 } 918 919 static int omap_mpuio_resume_noirq(struct device *dev) 920 { 921 struct platform_device *pdev = to_platform_device(dev); 922 struct gpio_bank *bank = platform_get_drvdata(pdev); 923 void __iomem *mask_reg = bank->base + 924 OMAP_MPUIO_GPIO_MASKIT / bank->stride; 925 unsigned long flags; 926 927 raw_spin_lock_irqsave(&bank->lock, flags); 928 writel_relaxed(bank->context.wake_en, mask_reg); 929 raw_spin_unlock_irqrestore(&bank->lock, flags); 930 931 return 0; 932 } 933 934 static const struct dev_pm_ops omap_mpuio_dev_pm_ops = { 935 .suspend_noirq = omap_mpuio_suspend_noirq, 936 .resume_noirq = omap_mpuio_resume_noirq, 937 }; 938 939 /* use platform_driver for this. */ 940 static struct platform_driver omap_mpuio_driver = { 941 .driver = { 942 .name = "mpuio", 943 .pm = &omap_mpuio_dev_pm_ops, 944 }, 945 }; 946 947 static struct platform_device omap_mpuio_device = { 948 .name = "mpuio", 949 .id = -1, 950 .dev = { 951 .driver = &omap_mpuio_driver.driver, 952 } 953 /* could list the /proc/iomem resources */ 954 }; 955 956 static inline void omap_mpuio_init(struct gpio_bank *bank) 957 { 958 platform_set_drvdata(&omap_mpuio_device, bank); 959 960 if (platform_driver_register(&omap_mpuio_driver) == 0) 961 (void) platform_device_register(&omap_mpuio_device); 962 } 963 964 /*---------------------------------------------------------------------*/ 965 966 static int omap_gpio_get_direction(struct gpio_chip *chip, unsigned offset) 967 { 968 struct gpio_bank *bank; 969 unsigned long flags; 970 void __iomem *reg; 971 int dir; 972 973 bank = gpiochip_get_data(chip); 974 reg = bank->base + bank->regs->direction; 975 raw_spin_lock_irqsave(&bank->lock, flags); 976 dir = !!(readl_relaxed(reg) & BIT(offset)); 977 raw_spin_unlock_irqrestore(&bank->lock, flags); 978 return dir; 979 } 980 981 static int omap_gpio_input(struct gpio_chip *chip, unsigned offset) 982 { 983 struct gpio_bank *bank; 984 unsigned long flags; 985 986 bank = gpiochip_get_data(chip); 987 raw_spin_lock_irqsave(&bank->lock, flags); 988 omap_set_gpio_direction(bank, offset, 1); 989 raw_spin_unlock_irqrestore(&bank->lock, flags); 990 return 0; 991 } 992 993 static int omap_gpio_get(struct gpio_chip *chip, unsigned offset) 994 { 995 struct gpio_bank *bank; 996 997 bank = gpiochip_get_data(chip); 998 999 if (omap_gpio_is_input(bank, offset)) 1000 return omap_get_gpio_datain(bank, offset); 1001 else 1002 return omap_get_gpio_dataout(bank, offset); 1003 } 1004 1005 static int omap_gpio_output(struct gpio_chip *chip, unsigned offset, int value) 1006 { 1007 struct gpio_bank *bank; 1008 unsigned long flags; 1009 1010 bank = gpiochip_get_data(chip); 1011 raw_spin_lock_irqsave(&bank->lock, flags); 1012 bank->set_dataout(bank, offset, value); 1013 omap_set_gpio_direction(bank, offset, 0); 1014 raw_spin_unlock_irqrestore(&bank->lock, flags); 1015 return 0; 1016 } 1017 1018 static int omap_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask, 1019 unsigned long *bits) 1020 { 1021 struct gpio_bank *bank = gpiochip_get_data(chip); 1022 void __iomem *reg = bank->base + bank->regs->direction; 1023 unsigned long in = readl_relaxed(reg), l; 1024 1025 *bits = 0; 1026 1027 l = in & *mask; 1028 if (l) 1029 *bits |= omap_get_gpio_datain_multiple(bank, &l); 1030 1031 l = ~in & *mask; 1032 if (l) 1033 *bits |= omap_get_gpio_dataout_multiple(bank, &l); 1034 1035 return 0; 1036 } 1037 1038 static int omap_gpio_debounce(struct gpio_chip *chip, unsigned offset, 1039 unsigned debounce) 1040 { 1041 struct gpio_bank *bank; 1042 unsigned long flags; 1043 int ret; 1044 1045 bank = gpiochip_get_data(chip); 1046 1047 raw_spin_lock_irqsave(&bank->lock, flags); 1048 ret = omap2_set_gpio_debounce(bank, offset, debounce); 1049 raw_spin_unlock_irqrestore(&bank->lock, flags); 1050 1051 if (ret) 1052 dev_info(chip->parent, 1053 "Could not set line %u debounce to %u microseconds (%d)", 1054 offset, debounce, ret); 1055 1056 return ret; 1057 } 1058 1059 static int omap_gpio_set_config(struct gpio_chip *chip, unsigned offset, 1060 unsigned long config) 1061 { 1062 u32 debounce; 1063 1064 if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE) 1065 return -ENOTSUPP; 1066 1067 debounce = pinconf_to_config_argument(config); 1068 return omap_gpio_debounce(chip, offset, debounce); 1069 } 1070 1071 static void omap_gpio_set(struct gpio_chip *chip, unsigned offset, int value) 1072 { 1073 struct gpio_bank *bank; 1074 unsigned long flags; 1075 1076 bank = gpiochip_get_data(chip); 1077 raw_spin_lock_irqsave(&bank->lock, flags); 1078 bank->set_dataout(bank, offset, value); 1079 raw_spin_unlock_irqrestore(&bank->lock, flags); 1080 } 1081 1082 static void omap_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mask, 1083 unsigned long *bits) 1084 { 1085 struct gpio_bank *bank = gpiochip_get_data(chip); 1086 unsigned long flags; 1087 1088 raw_spin_lock_irqsave(&bank->lock, flags); 1089 bank->set_dataout_multiple(bank, mask, bits); 1090 raw_spin_unlock_irqrestore(&bank->lock, flags); 1091 } 1092 1093 /*---------------------------------------------------------------------*/ 1094 1095 static void omap_gpio_show_rev(struct gpio_bank *bank) 1096 { 1097 static bool called; 1098 u32 rev; 1099 1100 if (called || bank->regs->revision == USHRT_MAX) 1101 return; 1102 1103 rev = readw_relaxed(bank->base + bank->regs->revision); 1104 pr_info("OMAP GPIO hardware version %d.%d\n", 1105 (rev >> 4) & 0x0f, rev & 0x0f); 1106 1107 called = true; 1108 } 1109 1110 static void omap_gpio_mod_init(struct gpio_bank *bank) 1111 { 1112 void __iomem *base = bank->base; 1113 u32 l = 0xffffffff; 1114 1115 if (bank->width == 16) 1116 l = 0xffff; 1117 1118 if (bank->is_mpuio) { 1119 writel_relaxed(l, bank->base + bank->regs->irqenable); 1120 return; 1121 } 1122 1123 omap_gpio_rmw(base, bank->regs->irqenable, l, 1124 bank->regs->irqenable_inv); 1125 omap_gpio_rmw(base, bank->regs->irqstatus, l, 1126 !bank->regs->irqenable_inv); 1127 if (bank->regs->debounce_en) 1128 writel_relaxed(0, base + bank->regs->debounce_en); 1129 1130 /* Save OE default value (0xffffffff) in the context */ 1131 bank->context.oe = readl_relaxed(bank->base + bank->regs->direction); 1132 /* Initialize interface clk ungated, module enabled */ 1133 if (bank->regs->ctrl) 1134 writel_relaxed(0, base + bank->regs->ctrl); 1135 } 1136 1137 static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc) 1138 { 1139 struct gpio_irq_chip *irq; 1140 static int gpio; 1141 const char *label; 1142 int irq_base = 0; 1143 int ret; 1144 1145 /* 1146 * REVISIT eventually switch from OMAP-specific gpio structs 1147 * over to the generic ones 1148 */ 1149 bank->chip.request = omap_gpio_request; 1150 bank->chip.free = omap_gpio_free; 1151 bank->chip.get_direction = omap_gpio_get_direction; 1152 bank->chip.direction_input = omap_gpio_input; 1153 bank->chip.get = omap_gpio_get; 1154 bank->chip.get_multiple = omap_gpio_get_multiple; 1155 bank->chip.direction_output = omap_gpio_output; 1156 bank->chip.set_config = omap_gpio_set_config; 1157 bank->chip.set = omap_gpio_set; 1158 bank->chip.set_multiple = omap_gpio_set_multiple; 1159 if (bank->is_mpuio) { 1160 bank->chip.label = "mpuio"; 1161 if (bank->regs->wkup_en) 1162 bank->chip.parent = &omap_mpuio_device.dev; 1163 bank->chip.base = OMAP_MPUIO(0); 1164 } else { 1165 label = devm_kasprintf(bank->chip.parent, GFP_KERNEL, "gpio-%d-%d", 1166 gpio, gpio + bank->width - 1); 1167 if (!label) 1168 return -ENOMEM; 1169 bank->chip.label = label; 1170 bank->chip.base = gpio; 1171 } 1172 bank->chip.ngpio = bank->width; 1173 1174 #ifdef CONFIG_ARCH_OMAP1 1175 /* 1176 * REVISIT: Once we have OMAP1 supporting SPARSE_IRQ, we can drop 1177 * irq_alloc_descs() since a base IRQ offset will no longer be needed. 1178 */ 1179 irq_base = devm_irq_alloc_descs(bank->chip.parent, 1180 -1, 0, bank->width, 0); 1181 if (irq_base < 0) { 1182 dev_err(bank->chip.parent, "Couldn't allocate IRQ numbers\n"); 1183 return -ENODEV; 1184 } 1185 #endif 1186 1187 /* MPUIO is a bit different, reading IRQ status clears it */ 1188 if (bank->is_mpuio) { 1189 irqc->irq_ack = dummy_irq_chip.irq_ack; 1190 if (!bank->regs->wkup_en) 1191 irqc->irq_set_wake = NULL; 1192 } 1193 1194 irq = &bank->chip.irq; 1195 irq->chip = irqc; 1196 irq->handler = handle_bad_irq; 1197 irq->default_type = IRQ_TYPE_NONE; 1198 irq->num_parents = 1; 1199 irq->parents = &bank->irq; 1200 irq->first = irq_base; 1201 1202 ret = gpiochip_add_data(&bank->chip, bank); 1203 if (ret) { 1204 dev_err(bank->chip.parent, 1205 "Could not register gpio chip %d\n", ret); 1206 return ret; 1207 } 1208 1209 ret = devm_request_irq(bank->chip.parent, bank->irq, 1210 omap_gpio_irq_handler, 1211 0, dev_name(bank->chip.parent), bank); 1212 if (ret) 1213 gpiochip_remove(&bank->chip); 1214 1215 if (!bank->is_mpuio) 1216 gpio += bank->width; 1217 1218 return ret; 1219 } 1220 1221 static const struct of_device_id omap_gpio_match[]; 1222 1223 static int omap_gpio_probe(struct platform_device *pdev) 1224 { 1225 struct device *dev = &pdev->dev; 1226 struct device_node *node = dev->of_node; 1227 const struct of_device_id *match; 1228 const struct omap_gpio_platform_data *pdata; 1229 struct resource *res; 1230 struct gpio_bank *bank; 1231 struct irq_chip *irqc; 1232 int ret; 1233 1234 match = of_match_device(of_match_ptr(omap_gpio_match), dev); 1235 1236 pdata = match ? match->data : dev_get_platdata(dev); 1237 if (!pdata) 1238 return -EINVAL; 1239 1240 bank = devm_kzalloc(dev, sizeof(*bank), GFP_KERNEL); 1241 if (!bank) 1242 return -ENOMEM; 1243 1244 irqc = devm_kzalloc(dev, sizeof(*irqc), GFP_KERNEL); 1245 if (!irqc) 1246 return -ENOMEM; 1247 1248 irqc->irq_startup = omap_gpio_irq_startup, 1249 irqc->irq_shutdown = omap_gpio_irq_shutdown, 1250 irqc->irq_ack = omap_gpio_ack_irq, 1251 irqc->irq_mask = omap_gpio_mask_irq, 1252 irqc->irq_unmask = omap_gpio_unmask_irq, 1253 irqc->irq_set_type = omap_gpio_irq_type, 1254 irqc->irq_set_wake = omap_gpio_wake_enable, 1255 irqc->irq_bus_lock = omap_gpio_irq_bus_lock, 1256 irqc->irq_bus_sync_unlock = gpio_irq_bus_sync_unlock, 1257 irqc->name = dev_name(&pdev->dev); 1258 irqc->flags = IRQCHIP_MASK_ON_SUSPEND; 1259 1260 bank->irq = platform_get_irq(pdev, 0); 1261 if (bank->irq <= 0) { 1262 if (!bank->irq) 1263 bank->irq = -ENXIO; 1264 if (bank->irq != -EPROBE_DEFER) 1265 dev_err(dev, 1266 "can't get irq resource ret=%d\n", bank->irq); 1267 return bank->irq; 1268 } 1269 1270 bank->chip.parent = dev; 1271 bank->chip.owner = THIS_MODULE; 1272 bank->dbck_flag = pdata->dbck_flag; 1273 bank->stride = pdata->bank_stride; 1274 bank->width = pdata->bank_width; 1275 bank->is_mpuio = pdata->is_mpuio; 1276 bank->non_wakeup_gpios = pdata->non_wakeup_gpios; 1277 bank->regs = pdata->regs; 1278 #ifdef CONFIG_OF_GPIO 1279 bank->chip.of_node = of_node_get(node); 1280 #endif 1281 if (node) { 1282 if (!of_property_read_bool(node, "ti,gpio-always-on")) 1283 bank->loses_context = true; 1284 } else { 1285 bank->loses_context = pdata->loses_context; 1286 1287 if (bank->loses_context) 1288 bank->get_context_loss_count = 1289 pdata->get_context_loss_count; 1290 } 1291 1292 if (bank->regs->set_dataout && bank->regs->clr_dataout) { 1293 bank->set_dataout = omap_set_gpio_dataout_reg; 1294 bank->set_dataout_multiple = omap_set_gpio_dataout_reg_multiple; 1295 } else { 1296 bank->set_dataout = omap_set_gpio_dataout_mask; 1297 bank->set_dataout_multiple = 1298 omap_set_gpio_dataout_mask_multiple; 1299 } 1300 1301 raw_spin_lock_init(&bank->lock); 1302 raw_spin_lock_init(&bank->wa_lock); 1303 1304 /* Static mapping, never released */ 1305 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1306 bank->base = devm_ioremap_resource(dev, res); 1307 if (IS_ERR(bank->base)) { 1308 return PTR_ERR(bank->base); 1309 } 1310 1311 if (bank->dbck_flag) { 1312 bank->dbck = devm_clk_get(dev, "dbclk"); 1313 if (IS_ERR(bank->dbck)) { 1314 dev_err(dev, 1315 "Could not get gpio dbck. Disable debounce\n"); 1316 bank->dbck_flag = false; 1317 } else { 1318 clk_prepare(bank->dbck); 1319 } 1320 } 1321 1322 platform_set_drvdata(pdev, bank); 1323 1324 pm_runtime_enable(dev); 1325 pm_runtime_irq_safe(dev); 1326 pm_runtime_get_sync(dev); 1327 1328 if (bank->is_mpuio) 1329 omap_mpuio_init(bank); 1330 1331 omap_gpio_mod_init(bank); 1332 1333 ret = omap_gpio_chip_init(bank, irqc); 1334 if (ret) { 1335 pm_runtime_put_sync(dev); 1336 pm_runtime_disable(dev); 1337 if (bank->dbck_flag) 1338 clk_unprepare(bank->dbck); 1339 return ret; 1340 } 1341 1342 omap_gpio_show_rev(bank); 1343 1344 pm_runtime_put(dev); 1345 1346 list_add_tail(&bank->node, &omap_gpio_list); 1347 1348 return 0; 1349 } 1350 1351 static int omap_gpio_remove(struct platform_device *pdev) 1352 { 1353 struct gpio_bank *bank = platform_get_drvdata(pdev); 1354 1355 list_del(&bank->node); 1356 gpiochip_remove(&bank->chip); 1357 pm_runtime_disable(&pdev->dev); 1358 if (bank->dbck_flag) 1359 clk_unprepare(bank->dbck); 1360 1361 return 0; 1362 } 1363 1364 #ifdef CONFIG_ARCH_OMAP2PLUS 1365 1366 #if defined(CONFIG_PM) 1367 static void omap_gpio_restore_context(struct gpio_bank *bank); 1368 1369 static int omap_gpio_runtime_suspend(struct device *dev) 1370 { 1371 struct platform_device *pdev = to_platform_device(dev); 1372 struct gpio_bank *bank = platform_get_drvdata(pdev); 1373 u32 l1 = 0, l2 = 0; 1374 unsigned long flags; 1375 u32 wake_low, wake_hi; 1376 1377 raw_spin_lock_irqsave(&bank->lock, flags); 1378 1379 /* 1380 * Only edges can generate a wakeup event to the PRCM. 1381 * 1382 * Therefore, ensure any wake-up capable GPIOs have 1383 * edge-detection enabled before going idle to ensure a wakeup 1384 * to the PRCM is generated on a GPIO transition. (c.f. 34xx 1385 * NDA TRM 25.5.3.1) 1386 * 1387 * The normal values will be restored upon ->runtime_resume() 1388 * by writing back the values saved in bank->context. 1389 */ 1390 wake_low = bank->context.leveldetect0 & bank->context.wake_en; 1391 if (wake_low) 1392 writel_relaxed(wake_low | bank->context.fallingdetect, 1393 bank->base + bank->regs->fallingdetect); 1394 wake_hi = bank->context.leveldetect1 & bank->context.wake_en; 1395 if (wake_hi) 1396 writel_relaxed(wake_hi | bank->context.risingdetect, 1397 bank->base + bank->regs->risingdetect); 1398 1399 if (!bank->enabled_non_wakeup_gpios) 1400 goto update_gpio_context_count; 1401 1402 if (bank->power_mode != OFF_MODE) { 1403 bank->power_mode = 0; 1404 goto update_gpio_context_count; 1405 } 1406 /* 1407 * If going to OFF, remove triggering for all 1408 * non-wakeup GPIOs. Otherwise spurious IRQs will be 1409 * generated. See OMAP2420 Errata item 1.101. 1410 */ 1411 bank->saved_datain = readl_relaxed(bank->base + 1412 bank->regs->datain); 1413 l1 = bank->context.fallingdetect; 1414 l2 = bank->context.risingdetect; 1415 1416 l1 &= ~bank->enabled_non_wakeup_gpios; 1417 l2 &= ~bank->enabled_non_wakeup_gpios; 1418 1419 writel_relaxed(l1, bank->base + bank->regs->fallingdetect); 1420 writel_relaxed(l2, bank->base + bank->regs->risingdetect); 1421 1422 bank->workaround_enabled = true; 1423 1424 update_gpio_context_count: 1425 if (bank->get_context_loss_count) 1426 bank->context_loss_count = 1427 bank->get_context_loss_count(dev); 1428 1429 omap_gpio_dbck_disable(bank); 1430 raw_spin_unlock_irqrestore(&bank->lock, flags); 1431 1432 return 0; 1433 } 1434 1435 static void omap_gpio_init_context(struct gpio_bank *p); 1436 1437 static int omap_gpio_runtime_resume(struct device *dev) 1438 { 1439 struct platform_device *pdev = to_platform_device(dev); 1440 struct gpio_bank *bank = platform_get_drvdata(pdev); 1441 u32 l = 0, gen, gen0, gen1; 1442 unsigned long flags; 1443 int c; 1444 1445 raw_spin_lock_irqsave(&bank->lock, flags); 1446 1447 /* 1448 * On the first resume during the probe, the context has not 1449 * been initialised and so initialise it now. Also initialise 1450 * the context loss count. 1451 */ 1452 if (bank->loses_context && !bank->context_valid) { 1453 omap_gpio_init_context(bank); 1454 1455 if (bank->get_context_loss_count) 1456 bank->context_loss_count = 1457 bank->get_context_loss_count(dev); 1458 } 1459 1460 omap_gpio_dbck_enable(bank); 1461 1462 /* 1463 * In ->runtime_suspend(), level-triggered, wakeup-enabled 1464 * GPIOs were set to edge trigger also in order to be able to 1465 * generate a PRCM wakeup. Here we restore the 1466 * pre-runtime_suspend() values for edge triggering. 1467 */ 1468 writel_relaxed(bank->context.fallingdetect, 1469 bank->base + bank->regs->fallingdetect); 1470 writel_relaxed(bank->context.risingdetect, 1471 bank->base + bank->regs->risingdetect); 1472 1473 if (bank->loses_context) { 1474 if (!bank->get_context_loss_count) { 1475 omap_gpio_restore_context(bank); 1476 } else { 1477 c = bank->get_context_loss_count(dev); 1478 if (c != bank->context_loss_count) { 1479 omap_gpio_restore_context(bank); 1480 } else { 1481 raw_spin_unlock_irqrestore(&bank->lock, flags); 1482 return 0; 1483 } 1484 } 1485 } 1486 1487 if (!bank->workaround_enabled) { 1488 raw_spin_unlock_irqrestore(&bank->lock, flags); 1489 return 0; 1490 } 1491 1492 l = readl_relaxed(bank->base + bank->regs->datain); 1493 1494 /* 1495 * Check if any of the non-wakeup interrupt GPIOs have changed 1496 * state. If so, generate an IRQ by software. This is 1497 * horribly racy, but it's the best we can do to work around 1498 * this silicon bug. 1499 */ 1500 l ^= bank->saved_datain; 1501 l &= bank->enabled_non_wakeup_gpios; 1502 1503 /* 1504 * No need to generate IRQs for the rising edge for gpio IRQs 1505 * configured with falling edge only; and vice versa. 1506 */ 1507 gen0 = l & bank->context.fallingdetect; 1508 gen0 &= bank->saved_datain; 1509 1510 gen1 = l & bank->context.risingdetect; 1511 gen1 &= ~(bank->saved_datain); 1512 1513 /* FIXME: Consider GPIO IRQs with level detections properly! */ 1514 gen = l & (~(bank->context.fallingdetect) & 1515 ~(bank->context.risingdetect)); 1516 /* Consider all GPIO IRQs needed to be updated */ 1517 gen |= gen0 | gen1; 1518 1519 if (gen) { 1520 u32 old0, old1; 1521 1522 old0 = readl_relaxed(bank->base + bank->regs->leveldetect0); 1523 old1 = readl_relaxed(bank->base + bank->regs->leveldetect1); 1524 1525 if (!bank->regs->irqstatus_raw0) { 1526 writel_relaxed(old0 | gen, bank->base + 1527 bank->regs->leveldetect0); 1528 writel_relaxed(old1 | gen, bank->base + 1529 bank->regs->leveldetect1); 1530 } 1531 1532 if (bank->regs->irqstatus_raw0) { 1533 writel_relaxed(old0 | l, bank->base + 1534 bank->regs->leveldetect0); 1535 writel_relaxed(old1 | l, bank->base + 1536 bank->regs->leveldetect1); 1537 } 1538 writel_relaxed(old0, bank->base + bank->regs->leveldetect0); 1539 writel_relaxed(old1, bank->base + bank->regs->leveldetect1); 1540 } 1541 1542 bank->workaround_enabled = false; 1543 raw_spin_unlock_irqrestore(&bank->lock, flags); 1544 1545 return 0; 1546 } 1547 #endif /* CONFIG_PM */ 1548 1549 #if IS_BUILTIN(CONFIG_GPIO_OMAP) 1550 void omap2_gpio_prepare_for_idle(int pwr_mode) 1551 { 1552 struct gpio_bank *bank; 1553 1554 list_for_each_entry(bank, &omap_gpio_list, node) { 1555 if (!BANK_USED(bank) || !bank->loses_context) 1556 continue; 1557 1558 bank->power_mode = pwr_mode; 1559 1560 pm_runtime_put_sync_suspend(bank->chip.parent); 1561 } 1562 } 1563 1564 void omap2_gpio_resume_after_idle(void) 1565 { 1566 struct gpio_bank *bank; 1567 1568 list_for_each_entry(bank, &omap_gpio_list, node) { 1569 if (!BANK_USED(bank) || !bank->loses_context) 1570 continue; 1571 1572 pm_runtime_get_sync(bank->chip.parent); 1573 } 1574 } 1575 #endif 1576 1577 #if defined(CONFIG_PM) 1578 static void omap_gpio_init_context(struct gpio_bank *p) 1579 { 1580 struct omap_gpio_reg_offs *regs = p->regs; 1581 void __iomem *base = p->base; 1582 1583 p->context.ctrl = readl_relaxed(base + regs->ctrl); 1584 p->context.oe = readl_relaxed(base + regs->direction); 1585 p->context.wake_en = readl_relaxed(base + regs->wkup_en); 1586 p->context.leveldetect0 = readl_relaxed(base + regs->leveldetect0); 1587 p->context.leveldetect1 = readl_relaxed(base + regs->leveldetect1); 1588 p->context.risingdetect = readl_relaxed(base + regs->risingdetect); 1589 p->context.fallingdetect = readl_relaxed(base + regs->fallingdetect); 1590 p->context.irqenable1 = readl_relaxed(base + regs->irqenable); 1591 p->context.irqenable2 = readl_relaxed(base + regs->irqenable2); 1592 1593 if (regs->set_dataout && p->regs->clr_dataout) 1594 p->context.dataout = readl_relaxed(base + regs->set_dataout); 1595 else 1596 p->context.dataout = readl_relaxed(base + regs->dataout); 1597 1598 p->context_valid = true; 1599 } 1600 1601 static void omap_gpio_restore_context(struct gpio_bank *bank) 1602 { 1603 writel_relaxed(bank->context.wake_en, 1604 bank->base + bank->regs->wkup_en); 1605 writel_relaxed(bank->context.ctrl, bank->base + bank->regs->ctrl); 1606 writel_relaxed(bank->context.leveldetect0, 1607 bank->base + bank->regs->leveldetect0); 1608 writel_relaxed(bank->context.leveldetect1, 1609 bank->base + bank->regs->leveldetect1); 1610 writel_relaxed(bank->context.risingdetect, 1611 bank->base + bank->regs->risingdetect); 1612 writel_relaxed(bank->context.fallingdetect, 1613 bank->base + bank->regs->fallingdetect); 1614 if (bank->regs->set_dataout && bank->regs->clr_dataout) 1615 writel_relaxed(bank->context.dataout, 1616 bank->base + bank->regs->set_dataout); 1617 else 1618 writel_relaxed(bank->context.dataout, 1619 bank->base + bank->regs->dataout); 1620 writel_relaxed(bank->context.oe, bank->base + bank->regs->direction); 1621 1622 if (bank->dbck_enable_mask) { 1623 writel_relaxed(bank->context.debounce, bank->base + 1624 bank->regs->debounce); 1625 writel_relaxed(bank->context.debounce_en, 1626 bank->base + bank->regs->debounce_en); 1627 } 1628 1629 writel_relaxed(bank->context.irqenable1, 1630 bank->base + bank->regs->irqenable); 1631 writel_relaxed(bank->context.irqenable2, 1632 bank->base + bank->regs->irqenable2); 1633 } 1634 #endif /* CONFIG_PM */ 1635 #else 1636 #define omap_gpio_runtime_suspend NULL 1637 #define omap_gpio_runtime_resume NULL 1638 static inline void omap_gpio_init_context(struct gpio_bank *p) {} 1639 #endif 1640 1641 static const struct dev_pm_ops gpio_pm_ops = { 1642 SET_RUNTIME_PM_OPS(omap_gpio_runtime_suspend, omap_gpio_runtime_resume, 1643 NULL) 1644 }; 1645 1646 #if defined(CONFIG_OF) 1647 static struct omap_gpio_reg_offs omap2_gpio_regs = { 1648 .revision = OMAP24XX_GPIO_REVISION, 1649 .direction = OMAP24XX_GPIO_OE, 1650 .datain = OMAP24XX_GPIO_DATAIN, 1651 .dataout = OMAP24XX_GPIO_DATAOUT, 1652 .set_dataout = OMAP24XX_GPIO_SETDATAOUT, 1653 .clr_dataout = OMAP24XX_GPIO_CLEARDATAOUT, 1654 .irqstatus = OMAP24XX_GPIO_IRQSTATUS1, 1655 .irqstatus2 = OMAP24XX_GPIO_IRQSTATUS2, 1656 .irqenable = OMAP24XX_GPIO_IRQENABLE1, 1657 .irqenable2 = OMAP24XX_GPIO_IRQENABLE2, 1658 .set_irqenable = OMAP24XX_GPIO_SETIRQENABLE1, 1659 .clr_irqenable = OMAP24XX_GPIO_CLEARIRQENABLE1, 1660 .debounce = OMAP24XX_GPIO_DEBOUNCE_VAL, 1661 .debounce_en = OMAP24XX_GPIO_DEBOUNCE_EN, 1662 .ctrl = OMAP24XX_GPIO_CTRL, 1663 .wkup_en = OMAP24XX_GPIO_WAKE_EN, 1664 .leveldetect0 = OMAP24XX_GPIO_LEVELDETECT0, 1665 .leveldetect1 = OMAP24XX_GPIO_LEVELDETECT1, 1666 .risingdetect = OMAP24XX_GPIO_RISINGDETECT, 1667 .fallingdetect = OMAP24XX_GPIO_FALLINGDETECT, 1668 }; 1669 1670 static struct omap_gpio_reg_offs omap4_gpio_regs = { 1671 .revision = OMAP4_GPIO_REVISION, 1672 .direction = OMAP4_GPIO_OE, 1673 .datain = OMAP4_GPIO_DATAIN, 1674 .dataout = OMAP4_GPIO_DATAOUT, 1675 .set_dataout = OMAP4_GPIO_SETDATAOUT, 1676 .clr_dataout = OMAP4_GPIO_CLEARDATAOUT, 1677 .irqstatus = OMAP4_GPIO_IRQSTATUS0, 1678 .irqstatus2 = OMAP4_GPIO_IRQSTATUS1, 1679 .irqenable = OMAP4_GPIO_IRQSTATUSSET0, 1680 .irqenable2 = OMAP4_GPIO_IRQSTATUSSET1, 1681 .set_irqenable = OMAP4_GPIO_IRQSTATUSSET0, 1682 .clr_irqenable = OMAP4_GPIO_IRQSTATUSCLR0, 1683 .debounce = OMAP4_GPIO_DEBOUNCINGTIME, 1684 .debounce_en = OMAP4_GPIO_DEBOUNCENABLE, 1685 .ctrl = OMAP4_GPIO_CTRL, 1686 .wkup_en = OMAP4_GPIO_IRQWAKEN0, 1687 .leveldetect0 = OMAP4_GPIO_LEVELDETECT0, 1688 .leveldetect1 = OMAP4_GPIO_LEVELDETECT1, 1689 .risingdetect = OMAP4_GPIO_RISINGDETECT, 1690 .fallingdetect = OMAP4_GPIO_FALLINGDETECT, 1691 }; 1692 1693 static const struct omap_gpio_platform_data omap2_pdata = { 1694 .regs = &omap2_gpio_regs, 1695 .bank_width = 32, 1696 .dbck_flag = false, 1697 }; 1698 1699 static const struct omap_gpio_platform_data omap3_pdata = { 1700 .regs = &omap2_gpio_regs, 1701 .bank_width = 32, 1702 .dbck_flag = true, 1703 }; 1704 1705 static const struct omap_gpio_platform_data omap4_pdata = { 1706 .regs = &omap4_gpio_regs, 1707 .bank_width = 32, 1708 .dbck_flag = true, 1709 }; 1710 1711 static const struct of_device_id omap_gpio_match[] = { 1712 { 1713 .compatible = "ti,omap4-gpio", 1714 .data = &omap4_pdata, 1715 }, 1716 { 1717 .compatible = "ti,omap3-gpio", 1718 .data = &omap3_pdata, 1719 }, 1720 { 1721 .compatible = "ti,omap2-gpio", 1722 .data = &omap2_pdata, 1723 }, 1724 { }, 1725 }; 1726 MODULE_DEVICE_TABLE(of, omap_gpio_match); 1727 #endif 1728 1729 static struct platform_driver omap_gpio_driver = { 1730 .probe = omap_gpio_probe, 1731 .remove = omap_gpio_remove, 1732 .driver = { 1733 .name = "omap_gpio", 1734 .pm = &gpio_pm_ops, 1735 .of_match_table = of_match_ptr(omap_gpio_match), 1736 }, 1737 }; 1738 1739 /* 1740 * gpio driver register needs to be done before 1741 * machine_init functions access gpio APIs. 1742 * Hence omap_gpio_drv_reg() is a postcore_initcall. 1743 */ 1744 static int __init omap_gpio_drv_reg(void) 1745 { 1746 return platform_driver_register(&omap_gpio_driver); 1747 } 1748 postcore_initcall(omap_gpio_drv_reg); 1749 1750 static void __exit omap_gpio_exit(void) 1751 { 1752 platform_driver_unregister(&omap_gpio_driver); 1753 } 1754 module_exit(omap_gpio_exit); 1755 1756 MODULE_DESCRIPTION("omap gpio driver"); 1757 MODULE_ALIAS("platform:gpio-omap"); 1758 MODULE_LICENSE("GPL v2"); 1759