xref: /openbmc/linux/drivers/gpio/gpio-omap.c (revision 275876e2)
1 /*
2  * Support functions for OMAP GPIO
3  *
4  * Copyright (C) 2003-2005 Nokia Corporation
5  * Written by Juha Yrjölä <juha.yrjola@nokia.com>
6  *
7  * Copyright (C) 2009 Texas Instruments
8  * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  */
14 
15 #include <linux/init.h>
16 #include <linux/module.h>
17 #include <linux/interrupt.h>
18 #include <linux/syscore_ops.h>
19 #include <linux/err.h>
20 #include <linux/clk.h>
21 #include <linux/io.h>
22 #include <linux/device.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/pm.h>
25 #include <linux/of.h>
26 #include <linux/of_device.h>
27 #include <linux/gpio.h>
28 #include <linux/bitops.h>
29 #include <linux/platform_data/gpio-omap.h>
30 
31 #define OFF_MODE	1
32 
33 static LIST_HEAD(omap_gpio_list);
34 
35 struct gpio_regs {
36 	u32 irqenable1;
37 	u32 irqenable2;
38 	u32 wake_en;
39 	u32 ctrl;
40 	u32 oe;
41 	u32 leveldetect0;
42 	u32 leveldetect1;
43 	u32 risingdetect;
44 	u32 fallingdetect;
45 	u32 dataout;
46 	u32 debounce;
47 	u32 debounce_en;
48 };
49 
50 struct gpio_bank {
51 	struct list_head node;
52 	void __iomem *base;
53 	u16 irq;
54 	u32 non_wakeup_gpios;
55 	u32 enabled_non_wakeup_gpios;
56 	struct gpio_regs context;
57 	u32 saved_datain;
58 	u32 level_mask;
59 	u32 toggle_mask;
60 	spinlock_t lock;
61 	struct gpio_chip chip;
62 	struct clk *dbck;
63 	u32 mod_usage;
64 	u32 irq_usage;
65 	u32 dbck_enable_mask;
66 	bool dbck_enabled;
67 	struct device *dev;
68 	bool is_mpuio;
69 	bool dbck_flag;
70 	bool loses_context;
71 	bool context_valid;
72 	int stride;
73 	u32 width;
74 	int context_loss_count;
75 	int power_mode;
76 	bool workaround_enabled;
77 
78 	void (*set_dataout)(struct gpio_bank *bank, int gpio, int enable);
79 	int (*get_context_loss_count)(struct device *dev);
80 
81 	struct omap_gpio_reg_offs *regs;
82 };
83 
84 #define GPIO_INDEX(bank, gpio) (gpio % bank->width)
85 #define GPIO_BIT(bank, gpio) (BIT(GPIO_INDEX(bank, gpio)))
86 #define GPIO_MOD_CTRL_BIT	BIT(0)
87 
88 #define BANK_USED(bank) (bank->mod_usage || bank->irq_usage)
89 #define LINE_USED(line, offset) (line & (BIT(offset)))
90 
91 static int omap_irq_to_gpio(struct gpio_bank *bank, unsigned int gpio_irq)
92 {
93 	return bank->chip.base + gpio_irq;
94 }
95 
96 static inline struct gpio_bank *omap_irq_data_get_bank(struct irq_data *d)
97 {
98 	struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
99 	return container_of(chip, struct gpio_bank, chip);
100 }
101 
102 static void omap_set_gpio_direction(struct gpio_bank *bank, int gpio,
103 				    int is_input)
104 {
105 	void __iomem *reg = bank->base;
106 	u32 l;
107 
108 	reg += bank->regs->direction;
109 	l = readl_relaxed(reg);
110 	if (is_input)
111 		l |= BIT(gpio);
112 	else
113 		l &= ~(BIT(gpio));
114 	writel_relaxed(l, reg);
115 	bank->context.oe = l;
116 }
117 
118 
119 /* set data out value using dedicate set/clear register */
120 static void omap_set_gpio_dataout_reg(struct gpio_bank *bank, int gpio,
121 				      int enable)
122 {
123 	void __iomem *reg = bank->base;
124 	u32 l = GPIO_BIT(bank, gpio);
125 
126 	if (enable) {
127 		reg += bank->regs->set_dataout;
128 		bank->context.dataout |= l;
129 	} else {
130 		reg += bank->regs->clr_dataout;
131 		bank->context.dataout &= ~l;
132 	}
133 
134 	writel_relaxed(l, reg);
135 }
136 
137 /* set data out value using mask register */
138 static void omap_set_gpio_dataout_mask(struct gpio_bank *bank, int gpio,
139 				       int enable)
140 {
141 	void __iomem *reg = bank->base + bank->regs->dataout;
142 	u32 gpio_bit = GPIO_BIT(bank, gpio);
143 	u32 l;
144 
145 	l = readl_relaxed(reg);
146 	if (enable)
147 		l |= gpio_bit;
148 	else
149 		l &= ~gpio_bit;
150 	writel_relaxed(l, reg);
151 	bank->context.dataout = l;
152 }
153 
154 static int omap_get_gpio_datain(struct gpio_bank *bank, int offset)
155 {
156 	void __iomem *reg = bank->base + bank->regs->datain;
157 
158 	return (readl_relaxed(reg) & (BIT(offset))) != 0;
159 }
160 
161 static int omap_get_gpio_dataout(struct gpio_bank *bank, int offset)
162 {
163 	void __iomem *reg = bank->base + bank->regs->dataout;
164 
165 	return (readl_relaxed(reg) & (BIT(offset))) != 0;
166 }
167 
168 static inline void omap_gpio_rmw(void __iomem *base, u32 reg, u32 mask, bool set)
169 {
170 	int l = readl_relaxed(base + reg);
171 
172 	if (set)
173 		l |= mask;
174 	else
175 		l &= ~mask;
176 
177 	writel_relaxed(l, base + reg);
178 }
179 
180 static inline void omap_gpio_dbck_enable(struct gpio_bank *bank)
181 {
182 	if (bank->dbck_enable_mask && !bank->dbck_enabled) {
183 		clk_prepare_enable(bank->dbck);
184 		bank->dbck_enabled = true;
185 
186 		writel_relaxed(bank->dbck_enable_mask,
187 			     bank->base + bank->regs->debounce_en);
188 	}
189 }
190 
191 static inline void omap_gpio_dbck_disable(struct gpio_bank *bank)
192 {
193 	if (bank->dbck_enable_mask && bank->dbck_enabled) {
194 		/*
195 		 * Disable debounce before cutting it's clock. If debounce is
196 		 * enabled but the clock is not, GPIO module seems to be unable
197 		 * to detect events and generate interrupts at least on OMAP3.
198 		 */
199 		writel_relaxed(0, bank->base + bank->regs->debounce_en);
200 
201 		clk_disable_unprepare(bank->dbck);
202 		bank->dbck_enabled = false;
203 	}
204 }
205 
206 /**
207  * omap2_set_gpio_debounce - low level gpio debounce time
208  * @bank: the gpio bank we're acting upon
209  * @gpio: the gpio number on this @gpio
210  * @debounce: debounce time to use
211  *
212  * OMAP's debounce time is in 31us steps so we need
213  * to convert and round up to the closest unit.
214  */
215 static void omap2_set_gpio_debounce(struct gpio_bank *bank, unsigned gpio,
216 				    unsigned debounce)
217 {
218 	void __iomem		*reg;
219 	u32			val;
220 	u32			l;
221 
222 	if (!bank->dbck_flag)
223 		return;
224 
225 	if (debounce < 32)
226 		debounce = 0x01;
227 	else if (debounce > 7936)
228 		debounce = 0xff;
229 	else
230 		debounce = (debounce / 0x1f) - 1;
231 
232 	l = GPIO_BIT(bank, gpio);
233 
234 	clk_prepare_enable(bank->dbck);
235 	reg = bank->base + bank->regs->debounce;
236 	writel_relaxed(debounce, reg);
237 
238 	reg = bank->base + bank->regs->debounce_en;
239 	val = readl_relaxed(reg);
240 
241 	if (debounce)
242 		val |= l;
243 	else
244 		val &= ~l;
245 	bank->dbck_enable_mask = val;
246 
247 	writel_relaxed(val, reg);
248 	clk_disable_unprepare(bank->dbck);
249 	/*
250 	 * Enable debounce clock per module.
251 	 * This call is mandatory because in omap_gpio_request() when
252 	 * *_runtime_get_sync() is called,  _gpio_dbck_enable() within
253 	 * runtime callbck fails to turn on dbck because dbck_enable_mask
254 	 * used within _gpio_dbck_enable() is still not initialized at
255 	 * that point. Therefore we have to enable dbck here.
256 	 */
257 	omap_gpio_dbck_enable(bank);
258 	if (bank->dbck_enable_mask) {
259 		bank->context.debounce = debounce;
260 		bank->context.debounce_en = val;
261 	}
262 }
263 
264 /**
265  * omap_clear_gpio_debounce - clear debounce settings for a gpio
266  * @bank: the gpio bank we're acting upon
267  * @gpio: the gpio number on this @gpio
268  *
269  * If a gpio is using debounce, then clear the debounce enable bit and if
270  * this is the only gpio in this bank using debounce, then clear the debounce
271  * time too. The debounce clock will also be disabled when calling this function
272  * if this is the only gpio in the bank using debounce.
273  */
274 static void omap_clear_gpio_debounce(struct gpio_bank *bank, unsigned gpio)
275 {
276 	u32 gpio_bit = GPIO_BIT(bank, gpio);
277 
278 	if (!bank->dbck_flag)
279 		return;
280 
281 	if (!(bank->dbck_enable_mask & gpio_bit))
282 		return;
283 
284 	bank->dbck_enable_mask &= ~gpio_bit;
285 	bank->context.debounce_en &= ~gpio_bit;
286         writel_relaxed(bank->context.debounce_en,
287 		     bank->base + bank->regs->debounce_en);
288 
289 	if (!bank->dbck_enable_mask) {
290 		bank->context.debounce = 0;
291 		writel_relaxed(bank->context.debounce, bank->base +
292 			     bank->regs->debounce);
293 		clk_disable_unprepare(bank->dbck);
294 		bank->dbck_enabled = false;
295 	}
296 }
297 
298 static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio,
299 						unsigned trigger)
300 {
301 	void __iomem *base = bank->base;
302 	u32 gpio_bit = BIT(gpio);
303 
304 	omap_gpio_rmw(base, bank->regs->leveldetect0, gpio_bit,
305 		      trigger & IRQ_TYPE_LEVEL_LOW);
306 	omap_gpio_rmw(base, bank->regs->leveldetect1, gpio_bit,
307 		      trigger & IRQ_TYPE_LEVEL_HIGH);
308 	omap_gpio_rmw(base, bank->regs->risingdetect, gpio_bit,
309 		      trigger & IRQ_TYPE_EDGE_RISING);
310 	omap_gpio_rmw(base, bank->regs->fallingdetect, gpio_bit,
311 		      trigger & IRQ_TYPE_EDGE_FALLING);
312 
313 	bank->context.leveldetect0 =
314 			readl_relaxed(bank->base + bank->regs->leveldetect0);
315 	bank->context.leveldetect1 =
316 			readl_relaxed(bank->base + bank->regs->leveldetect1);
317 	bank->context.risingdetect =
318 			readl_relaxed(bank->base + bank->regs->risingdetect);
319 	bank->context.fallingdetect =
320 			readl_relaxed(bank->base + bank->regs->fallingdetect);
321 
322 	if (likely(!(bank->non_wakeup_gpios & gpio_bit))) {
323 		omap_gpio_rmw(base, bank->regs->wkup_en, gpio_bit, trigger != 0);
324 		bank->context.wake_en =
325 			readl_relaxed(bank->base + bank->regs->wkup_en);
326 	}
327 
328 	/* This part needs to be executed always for OMAP{34xx, 44xx} */
329 	if (!bank->regs->irqctrl) {
330 		/* On omap24xx proceed only when valid GPIO bit is set */
331 		if (bank->non_wakeup_gpios) {
332 			if (!(bank->non_wakeup_gpios & gpio_bit))
333 				goto exit;
334 		}
335 
336 		/*
337 		 * Log the edge gpio and manually trigger the IRQ
338 		 * after resume if the input level changes
339 		 * to avoid irq lost during PER RET/OFF mode
340 		 * Applies for omap2 non-wakeup gpio and all omap3 gpios
341 		 */
342 		if (trigger & IRQ_TYPE_EDGE_BOTH)
343 			bank->enabled_non_wakeup_gpios |= gpio_bit;
344 		else
345 			bank->enabled_non_wakeup_gpios &= ~gpio_bit;
346 	}
347 
348 exit:
349 	bank->level_mask =
350 		readl_relaxed(bank->base + bank->regs->leveldetect0) |
351 		readl_relaxed(bank->base + bank->regs->leveldetect1);
352 }
353 
354 #ifdef CONFIG_ARCH_OMAP1
355 /*
356  * This only applies to chips that can't do both rising and falling edge
357  * detection at once.  For all other chips, this function is a noop.
358  */
359 static void omap_toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio)
360 {
361 	void __iomem *reg = bank->base;
362 	u32 l = 0;
363 
364 	if (!bank->regs->irqctrl)
365 		return;
366 
367 	reg += bank->regs->irqctrl;
368 
369 	l = readl_relaxed(reg);
370 	if ((l >> gpio) & 1)
371 		l &= ~(BIT(gpio));
372 	else
373 		l |= BIT(gpio);
374 
375 	writel_relaxed(l, reg);
376 }
377 #else
378 static void omap_toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio) {}
379 #endif
380 
381 static int omap_set_gpio_triggering(struct gpio_bank *bank, int gpio,
382 				    unsigned trigger)
383 {
384 	void __iomem *reg = bank->base;
385 	void __iomem *base = bank->base;
386 	u32 l = 0;
387 
388 	if (bank->regs->leveldetect0 && bank->regs->wkup_en) {
389 		omap_set_gpio_trigger(bank, gpio, trigger);
390 	} else if (bank->regs->irqctrl) {
391 		reg += bank->regs->irqctrl;
392 
393 		l = readl_relaxed(reg);
394 		if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
395 			bank->toggle_mask |= BIT(gpio);
396 		if (trigger & IRQ_TYPE_EDGE_RISING)
397 			l |= BIT(gpio);
398 		else if (trigger & IRQ_TYPE_EDGE_FALLING)
399 			l &= ~(BIT(gpio));
400 		else
401 			return -EINVAL;
402 
403 		writel_relaxed(l, reg);
404 	} else if (bank->regs->edgectrl1) {
405 		if (gpio & 0x08)
406 			reg += bank->regs->edgectrl2;
407 		else
408 			reg += bank->regs->edgectrl1;
409 
410 		gpio &= 0x07;
411 		l = readl_relaxed(reg);
412 		l &= ~(3 << (gpio << 1));
413 		if (trigger & IRQ_TYPE_EDGE_RISING)
414 			l |= 2 << (gpio << 1);
415 		if (trigger & IRQ_TYPE_EDGE_FALLING)
416 			l |= BIT(gpio << 1);
417 
418 		/* Enable wake-up during idle for dynamic tick */
419 		omap_gpio_rmw(base, bank->regs->wkup_en, BIT(gpio), trigger);
420 		bank->context.wake_en =
421 			readl_relaxed(bank->base + bank->regs->wkup_en);
422 		writel_relaxed(l, reg);
423 	}
424 	return 0;
425 }
426 
427 static void omap_enable_gpio_module(struct gpio_bank *bank, unsigned offset)
428 {
429 	if (bank->regs->pinctrl) {
430 		void __iomem *reg = bank->base + bank->regs->pinctrl;
431 
432 		/* Claim the pin for MPU */
433 		writel_relaxed(readl_relaxed(reg) | (BIT(offset)), reg);
434 	}
435 
436 	if (bank->regs->ctrl && !BANK_USED(bank)) {
437 		void __iomem *reg = bank->base + bank->regs->ctrl;
438 		u32 ctrl;
439 
440 		ctrl = readl_relaxed(reg);
441 		/* Module is enabled, clocks are not gated */
442 		ctrl &= ~GPIO_MOD_CTRL_BIT;
443 		writel_relaxed(ctrl, reg);
444 		bank->context.ctrl = ctrl;
445 	}
446 }
447 
448 static void omap_disable_gpio_module(struct gpio_bank *bank, unsigned offset)
449 {
450 	void __iomem *base = bank->base;
451 
452 	if (bank->regs->wkup_en &&
453 	    !LINE_USED(bank->mod_usage, offset) &&
454 	    !LINE_USED(bank->irq_usage, offset)) {
455 		/* Disable wake-up during idle for dynamic tick */
456 		omap_gpio_rmw(base, bank->regs->wkup_en, BIT(offset), 0);
457 		bank->context.wake_en =
458 			readl_relaxed(bank->base + bank->regs->wkup_en);
459 	}
460 
461 	if (bank->regs->ctrl && !BANK_USED(bank)) {
462 		void __iomem *reg = bank->base + bank->regs->ctrl;
463 		u32 ctrl;
464 
465 		ctrl = readl_relaxed(reg);
466 		/* Module is disabled, clocks are gated */
467 		ctrl |= GPIO_MOD_CTRL_BIT;
468 		writel_relaxed(ctrl, reg);
469 		bank->context.ctrl = ctrl;
470 	}
471 }
472 
473 static int omap_gpio_is_input(struct gpio_bank *bank, int mask)
474 {
475 	void __iomem *reg = bank->base + bank->regs->direction;
476 
477 	return readl_relaxed(reg) & mask;
478 }
479 
480 static int omap_gpio_irq_type(struct irq_data *d, unsigned type)
481 {
482 	struct gpio_bank *bank = omap_irq_data_get_bank(d);
483 	unsigned gpio = 0;
484 	int retval;
485 	unsigned long flags;
486 	unsigned offset;
487 
488 	if (!BANK_USED(bank))
489 		pm_runtime_get_sync(bank->dev);
490 
491 #ifdef CONFIG_ARCH_OMAP1
492 	if (d->irq > IH_MPUIO_BASE)
493 		gpio = OMAP_MPUIO(d->irq - IH_MPUIO_BASE);
494 #endif
495 
496 	if (!gpio)
497 		gpio = omap_irq_to_gpio(bank, d->hwirq);
498 
499 	if (type & ~IRQ_TYPE_SENSE_MASK)
500 		return -EINVAL;
501 
502 	if (!bank->regs->leveldetect0 &&
503 		(type & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH)))
504 		return -EINVAL;
505 
506 	spin_lock_irqsave(&bank->lock, flags);
507 	offset = GPIO_INDEX(bank, gpio);
508 	retval = omap_set_gpio_triggering(bank, offset, type);
509 	if (!LINE_USED(bank->mod_usage, offset)) {
510 		omap_enable_gpio_module(bank, offset);
511 		omap_set_gpio_direction(bank, offset, 1);
512 	} else if (!omap_gpio_is_input(bank, BIT(offset))) {
513 		spin_unlock_irqrestore(&bank->lock, flags);
514 		return -EINVAL;
515 	}
516 
517 	bank->irq_usage |= BIT(GPIO_INDEX(bank, gpio));
518 	spin_unlock_irqrestore(&bank->lock, flags);
519 
520 	if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
521 		__irq_set_handler_locked(d->irq, handle_level_irq);
522 	else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
523 		__irq_set_handler_locked(d->irq, handle_edge_irq);
524 
525 	return retval;
526 }
527 
528 static void omap_clear_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
529 {
530 	void __iomem *reg = bank->base;
531 
532 	reg += bank->regs->irqstatus;
533 	writel_relaxed(gpio_mask, reg);
534 
535 	/* Workaround for clearing DSP GPIO interrupts to allow retention */
536 	if (bank->regs->irqstatus2) {
537 		reg = bank->base + bank->regs->irqstatus2;
538 		writel_relaxed(gpio_mask, reg);
539 	}
540 
541 	/* Flush posted write for the irq status to avoid spurious interrupts */
542 	readl_relaxed(reg);
543 }
544 
545 static inline void omap_clear_gpio_irqstatus(struct gpio_bank *bank, int gpio)
546 {
547 	omap_clear_gpio_irqbank(bank, GPIO_BIT(bank, gpio));
548 }
549 
550 static u32 omap_get_gpio_irqbank_mask(struct gpio_bank *bank)
551 {
552 	void __iomem *reg = bank->base;
553 	u32 l;
554 	u32 mask = (BIT(bank->width)) - 1;
555 
556 	reg += bank->regs->irqenable;
557 	l = readl_relaxed(reg);
558 	if (bank->regs->irqenable_inv)
559 		l = ~l;
560 	l &= mask;
561 	return l;
562 }
563 
564 static void omap_enable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
565 {
566 	void __iomem *reg = bank->base;
567 	u32 l;
568 
569 	if (bank->regs->set_irqenable) {
570 		reg += bank->regs->set_irqenable;
571 		l = gpio_mask;
572 		bank->context.irqenable1 |= gpio_mask;
573 	} else {
574 		reg += bank->regs->irqenable;
575 		l = readl_relaxed(reg);
576 		if (bank->regs->irqenable_inv)
577 			l &= ~gpio_mask;
578 		else
579 			l |= gpio_mask;
580 		bank->context.irqenable1 = l;
581 	}
582 
583 	writel_relaxed(l, reg);
584 }
585 
586 static void omap_disable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
587 {
588 	void __iomem *reg = bank->base;
589 	u32 l;
590 
591 	if (bank->regs->clr_irqenable) {
592 		reg += bank->regs->clr_irqenable;
593 		l = gpio_mask;
594 		bank->context.irqenable1 &= ~gpio_mask;
595 	} else {
596 		reg += bank->regs->irqenable;
597 		l = readl_relaxed(reg);
598 		if (bank->regs->irqenable_inv)
599 			l |= gpio_mask;
600 		else
601 			l &= ~gpio_mask;
602 		bank->context.irqenable1 = l;
603 	}
604 
605 	writel_relaxed(l, reg);
606 }
607 
608 static inline void omap_set_gpio_irqenable(struct gpio_bank *bank, int gpio,
609 					   int enable)
610 {
611 	if (enable)
612 		omap_enable_gpio_irqbank(bank, GPIO_BIT(bank, gpio));
613 	else
614 		omap_disable_gpio_irqbank(bank, GPIO_BIT(bank, gpio));
615 }
616 
617 /*
618  * Note that ENAWAKEUP needs to be enabled in GPIO_SYSCONFIG register.
619  * 1510 does not seem to have a wake-up register. If JTAG is connected
620  * to the target, system will wake up always on GPIO events. While
621  * system is running all registered GPIO interrupts need to have wake-up
622  * enabled. When system is suspended, only selected GPIO interrupts need
623  * to have wake-up enabled.
624  */
625 static int omap_set_gpio_wakeup(struct gpio_bank *bank, int gpio, int enable)
626 {
627 	u32 gpio_bit = GPIO_BIT(bank, gpio);
628 	unsigned long flags;
629 
630 	if (bank->non_wakeup_gpios & gpio_bit) {
631 		dev_err(bank->dev,
632 			"Unable to modify wakeup on non-wakeup GPIO%d\n", gpio);
633 		return -EINVAL;
634 	}
635 
636 	spin_lock_irqsave(&bank->lock, flags);
637 	if (enable)
638 		bank->context.wake_en |= gpio_bit;
639 	else
640 		bank->context.wake_en &= ~gpio_bit;
641 
642 	writel_relaxed(bank->context.wake_en, bank->base + bank->regs->wkup_en);
643 	spin_unlock_irqrestore(&bank->lock, flags);
644 
645 	return 0;
646 }
647 
648 static void omap_reset_gpio(struct gpio_bank *bank, int gpio)
649 {
650 	omap_set_gpio_direction(bank, GPIO_INDEX(bank, gpio), 1);
651 	omap_set_gpio_irqenable(bank, gpio, 0);
652 	omap_clear_gpio_irqstatus(bank, gpio);
653 	omap_set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE);
654 	omap_clear_gpio_debounce(bank, gpio);
655 }
656 
657 /* Use disable_irq_wake() and enable_irq_wake() functions from drivers */
658 static int omap_gpio_wake_enable(struct irq_data *d, unsigned int enable)
659 {
660 	struct gpio_bank *bank = omap_irq_data_get_bank(d);
661 	unsigned int gpio = omap_irq_to_gpio(bank, d->hwirq);
662 
663 	return omap_set_gpio_wakeup(bank, gpio, enable);
664 }
665 
666 static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
667 {
668 	struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
669 	unsigned long flags;
670 
671 	/*
672 	 * If this is the first gpio_request for the bank,
673 	 * enable the bank module.
674 	 */
675 	if (!BANK_USED(bank))
676 		pm_runtime_get_sync(bank->dev);
677 
678 	spin_lock_irqsave(&bank->lock, flags);
679 	/* Set trigger to none. You need to enable the desired trigger with
680 	 * request_irq() or set_irq_type(). Only do this if the IRQ line has
681 	 * not already been requested.
682 	 */
683 	if (!LINE_USED(bank->irq_usage, offset)) {
684 		omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
685 		omap_enable_gpio_module(bank, offset);
686 	}
687 	bank->mod_usage |= BIT(offset);
688 	spin_unlock_irqrestore(&bank->lock, flags);
689 
690 	return 0;
691 }
692 
693 static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
694 {
695 	struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
696 	unsigned long flags;
697 
698 	spin_lock_irqsave(&bank->lock, flags);
699 	bank->mod_usage &= ~(BIT(offset));
700 	omap_disable_gpio_module(bank, offset);
701 	omap_reset_gpio(bank, bank->chip.base + offset);
702 	spin_unlock_irqrestore(&bank->lock, flags);
703 
704 	/*
705 	 * If this is the last gpio to be freed in the bank,
706 	 * disable the bank module.
707 	 */
708 	if (!BANK_USED(bank))
709 		pm_runtime_put(bank->dev);
710 }
711 
712 /*
713  * We need to unmask the GPIO bank interrupt as soon as possible to
714  * avoid missing GPIO interrupts for other lines in the bank.
715  * Then we need to mask-read-clear-unmask the triggered GPIO lines
716  * in the bank to avoid missing nested interrupts for a GPIO line.
717  * If we wait to unmask individual GPIO lines in the bank after the
718  * line's interrupt handler has been run, we may miss some nested
719  * interrupts.
720  */
721 static void omap_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
722 {
723 	void __iomem *isr_reg = NULL;
724 	u32 isr;
725 	unsigned int bit;
726 	struct gpio_bank *bank;
727 	int unmasked = 0;
728 	struct irq_chip *irqchip = irq_desc_get_chip(desc);
729 	struct gpio_chip *chip = irq_get_handler_data(irq);
730 
731 	chained_irq_enter(irqchip, desc);
732 
733 	bank = container_of(chip, struct gpio_bank, chip);
734 	isr_reg = bank->base + bank->regs->irqstatus;
735 	pm_runtime_get_sync(bank->dev);
736 
737 	if (WARN_ON(!isr_reg))
738 		goto exit;
739 
740 	while (1) {
741 		u32 isr_saved, level_mask = 0;
742 		u32 enabled;
743 
744 		enabled = omap_get_gpio_irqbank_mask(bank);
745 		isr_saved = isr = readl_relaxed(isr_reg) & enabled;
746 
747 		if (bank->level_mask)
748 			level_mask = bank->level_mask & enabled;
749 
750 		/* clear edge sensitive interrupts before handler(s) are
751 		called so that we don't miss any interrupt occurred while
752 		executing them */
753 		omap_disable_gpio_irqbank(bank, isr_saved & ~level_mask);
754 		omap_clear_gpio_irqbank(bank, isr_saved & ~level_mask);
755 		omap_enable_gpio_irqbank(bank, isr_saved & ~level_mask);
756 
757 		/* if there is only edge sensitive GPIO pin interrupts
758 		configured, we could unmask GPIO bank interrupt immediately */
759 		if (!level_mask && !unmasked) {
760 			unmasked = 1;
761 			chained_irq_exit(irqchip, desc);
762 		}
763 
764 		if (!isr)
765 			break;
766 
767 		while (isr) {
768 			bit = __ffs(isr);
769 			isr &= ~(BIT(bit));
770 
771 			/*
772 			 * Some chips can't respond to both rising and falling
773 			 * at the same time.  If this irq was requested with
774 			 * both flags, we need to flip the ICR data for the IRQ
775 			 * to respond to the IRQ for the opposite direction.
776 			 * This will be indicated in the bank toggle_mask.
777 			 */
778 			if (bank->toggle_mask & (BIT(bit)))
779 				omap_toggle_gpio_edge_triggering(bank, bit);
780 
781 			generic_handle_irq(irq_find_mapping(bank->chip.irqdomain,
782 							    bit));
783 		}
784 	}
785 	/* if bank has any level sensitive GPIO pin interrupt
786 	configured, we must unmask the bank interrupt only after
787 	handler(s) are executed in order to avoid spurious bank
788 	interrupt */
789 exit:
790 	if (!unmasked)
791 		chained_irq_exit(irqchip, desc);
792 	pm_runtime_put(bank->dev);
793 }
794 
795 static void omap_gpio_irq_shutdown(struct irq_data *d)
796 {
797 	struct gpio_bank *bank = omap_irq_data_get_bank(d);
798 	unsigned int gpio = omap_irq_to_gpio(bank, d->hwirq);
799 	unsigned long flags;
800 	unsigned offset = GPIO_INDEX(bank, gpio);
801 
802 	spin_lock_irqsave(&bank->lock, flags);
803 	gpio_unlock_as_irq(&bank->chip, offset);
804 	bank->irq_usage &= ~(BIT(offset));
805 	omap_disable_gpio_module(bank, offset);
806 	omap_reset_gpio(bank, gpio);
807 	spin_unlock_irqrestore(&bank->lock, flags);
808 
809 	/*
810 	 * If this is the last IRQ to be freed in the bank,
811 	 * disable the bank module.
812 	 */
813 	if (!BANK_USED(bank))
814 		pm_runtime_put(bank->dev);
815 }
816 
817 static void omap_gpio_ack_irq(struct irq_data *d)
818 {
819 	struct gpio_bank *bank = omap_irq_data_get_bank(d);
820 	unsigned int gpio = omap_irq_to_gpio(bank, d->hwirq);
821 
822 	omap_clear_gpio_irqstatus(bank, gpio);
823 }
824 
825 static void omap_gpio_mask_irq(struct irq_data *d)
826 {
827 	struct gpio_bank *bank = omap_irq_data_get_bank(d);
828 	unsigned int gpio = omap_irq_to_gpio(bank, d->hwirq);
829 	unsigned long flags;
830 
831 	spin_lock_irqsave(&bank->lock, flags);
832 	omap_set_gpio_irqenable(bank, gpio, 0);
833 	omap_set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE);
834 	spin_unlock_irqrestore(&bank->lock, flags);
835 }
836 
837 static void omap_gpio_unmask_irq(struct irq_data *d)
838 {
839 	struct gpio_bank *bank = omap_irq_data_get_bank(d);
840 	unsigned int gpio = omap_irq_to_gpio(bank, d->hwirq);
841 	unsigned int irq_mask = GPIO_BIT(bank, gpio);
842 	u32 trigger = irqd_get_trigger_type(d);
843 	unsigned long flags;
844 
845 	spin_lock_irqsave(&bank->lock, flags);
846 	if (trigger)
847 		omap_set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), trigger);
848 
849 	/* For level-triggered GPIOs, the clearing must be done after
850 	 * the HW source is cleared, thus after the handler has run */
851 	if (bank->level_mask & irq_mask) {
852 		omap_set_gpio_irqenable(bank, gpio, 0);
853 		omap_clear_gpio_irqstatus(bank, gpio);
854 	}
855 
856 	omap_set_gpio_irqenable(bank, gpio, 1);
857 	spin_unlock_irqrestore(&bank->lock, flags);
858 }
859 
860 static struct irq_chip gpio_irq_chip = {
861 	.name		= "GPIO",
862 	.irq_shutdown	= omap_gpio_irq_shutdown,
863 	.irq_ack	= omap_gpio_ack_irq,
864 	.irq_mask	= omap_gpio_mask_irq,
865 	.irq_unmask	= omap_gpio_unmask_irq,
866 	.irq_set_type	= omap_gpio_irq_type,
867 	.irq_set_wake	= omap_gpio_wake_enable,
868 };
869 
870 /*---------------------------------------------------------------------*/
871 
872 static int omap_mpuio_suspend_noirq(struct device *dev)
873 {
874 	struct platform_device *pdev = to_platform_device(dev);
875 	struct gpio_bank	*bank = platform_get_drvdata(pdev);
876 	void __iomem		*mask_reg = bank->base +
877 					OMAP_MPUIO_GPIO_MASKIT / bank->stride;
878 	unsigned long		flags;
879 
880 	spin_lock_irqsave(&bank->lock, flags);
881 	writel_relaxed(0xffff & ~bank->context.wake_en, mask_reg);
882 	spin_unlock_irqrestore(&bank->lock, flags);
883 
884 	return 0;
885 }
886 
887 static int omap_mpuio_resume_noirq(struct device *dev)
888 {
889 	struct platform_device *pdev = to_platform_device(dev);
890 	struct gpio_bank	*bank = platform_get_drvdata(pdev);
891 	void __iomem		*mask_reg = bank->base +
892 					OMAP_MPUIO_GPIO_MASKIT / bank->stride;
893 	unsigned long		flags;
894 
895 	spin_lock_irqsave(&bank->lock, flags);
896 	writel_relaxed(bank->context.wake_en, mask_reg);
897 	spin_unlock_irqrestore(&bank->lock, flags);
898 
899 	return 0;
900 }
901 
902 static const struct dev_pm_ops omap_mpuio_dev_pm_ops = {
903 	.suspend_noirq = omap_mpuio_suspend_noirq,
904 	.resume_noirq = omap_mpuio_resume_noirq,
905 };
906 
907 /* use platform_driver for this. */
908 static struct platform_driver omap_mpuio_driver = {
909 	.driver		= {
910 		.name	= "mpuio",
911 		.pm	= &omap_mpuio_dev_pm_ops,
912 	},
913 };
914 
915 static struct platform_device omap_mpuio_device = {
916 	.name		= "mpuio",
917 	.id		= -1,
918 	.dev = {
919 		.driver = &omap_mpuio_driver.driver,
920 	}
921 	/* could list the /proc/iomem resources */
922 };
923 
924 static inline void omap_mpuio_init(struct gpio_bank *bank)
925 {
926 	platform_set_drvdata(&omap_mpuio_device, bank);
927 
928 	if (platform_driver_register(&omap_mpuio_driver) == 0)
929 		(void) platform_device_register(&omap_mpuio_device);
930 }
931 
932 /*---------------------------------------------------------------------*/
933 
934 static int omap_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
935 {
936 	struct gpio_bank *bank;
937 	unsigned long flags;
938 	void __iomem *reg;
939 	int dir;
940 
941 	bank = container_of(chip, struct gpio_bank, chip);
942 	reg = bank->base + bank->regs->direction;
943 	spin_lock_irqsave(&bank->lock, flags);
944 	dir = !!(readl_relaxed(reg) & BIT(offset));
945 	spin_unlock_irqrestore(&bank->lock, flags);
946 	return dir;
947 }
948 
949 static int omap_gpio_input(struct gpio_chip *chip, unsigned offset)
950 {
951 	struct gpio_bank *bank;
952 	unsigned long flags;
953 
954 	bank = container_of(chip, struct gpio_bank, chip);
955 	spin_lock_irqsave(&bank->lock, flags);
956 	omap_set_gpio_direction(bank, offset, 1);
957 	spin_unlock_irqrestore(&bank->lock, flags);
958 	return 0;
959 }
960 
961 static int omap_gpio_get(struct gpio_chip *chip, unsigned offset)
962 {
963 	struct gpio_bank *bank;
964 	u32 mask;
965 
966 	bank = container_of(chip, struct gpio_bank, chip);
967 	mask = (BIT(offset));
968 
969 	if (omap_gpio_is_input(bank, mask))
970 		return omap_get_gpio_datain(bank, offset);
971 	else
972 		return omap_get_gpio_dataout(bank, offset);
973 }
974 
975 static int omap_gpio_output(struct gpio_chip *chip, unsigned offset, int value)
976 {
977 	struct gpio_bank *bank;
978 	unsigned long flags;
979 
980 	bank = container_of(chip, struct gpio_bank, chip);
981 	spin_lock_irqsave(&bank->lock, flags);
982 	bank->set_dataout(bank, offset, value);
983 	omap_set_gpio_direction(bank, offset, 0);
984 	spin_unlock_irqrestore(&bank->lock, flags);
985 	return 0;
986 }
987 
988 static int omap_gpio_debounce(struct gpio_chip *chip, unsigned offset,
989 			      unsigned debounce)
990 {
991 	struct gpio_bank *bank;
992 	unsigned long flags;
993 
994 	bank = container_of(chip, struct gpio_bank, chip);
995 
996 	spin_lock_irqsave(&bank->lock, flags);
997 	omap2_set_gpio_debounce(bank, offset, debounce);
998 	spin_unlock_irqrestore(&bank->lock, flags);
999 
1000 	return 0;
1001 }
1002 
1003 static void omap_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
1004 {
1005 	struct gpio_bank *bank;
1006 	unsigned long flags;
1007 
1008 	bank = container_of(chip, struct gpio_bank, chip);
1009 	spin_lock_irqsave(&bank->lock, flags);
1010 	bank->set_dataout(bank, offset, value);
1011 	spin_unlock_irqrestore(&bank->lock, flags);
1012 }
1013 
1014 /*---------------------------------------------------------------------*/
1015 
1016 static void __init omap_gpio_show_rev(struct gpio_bank *bank)
1017 {
1018 	static bool called;
1019 	u32 rev;
1020 
1021 	if (called || bank->regs->revision == USHRT_MAX)
1022 		return;
1023 
1024 	rev = readw_relaxed(bank->base + bank->regs->revision);
1025 	pr_info("OMAP GPIO hardware version %d.%d\n",
1026 		(rev >> 4) & 0x0f, rev & 0x0f);
1027 
1028 	called = true;
1029 }
1030 
1031 static void omap_gpio_mod_init(struct gpio_bank *bank)
1032 {
1033 	void __iomem *base = bank->base;
1034 	u32 l = 0xffffffff;
1035 
1036 	if (bank->width == 16)
1037 		l = 0xffff;
1038 
1039 	if (bank->is_mpuio) {
1040 		writel_relaxed(l, bank->base + bank->regs->irqenable);
1041 		return;
1042 	}
1043 
1044 	omap_gpio_rmw(base, bank->regs->irqenable, l,
1045 		      bank->regs->irqenable_inv);
1046 	omap_gpio_rmw(base, bank->regs->irqstatus, l,
1047 		      !bank->regs->irqenable_inv);
1048 	if (bank->regs->debounce_en)
1049 		writel_relaxed(0, base + bank->regs->debounce_en);
1050 
1051 	/* Save OE default value (0xffffffff) in the context */
1052 	bank->context.oe = readl_relaxed(bank->base + bank->regs->direction);
1053 	 /* Initialize interface clk ungated, module enabled */
1054 	if (bank->regs->ctrl)
1055 		writel_relaxed(0, base + bank->regs->ctrl);
1056 
1057 	bank->dbck = clk_get(bank->dev, "dbclk");
1058 	if (IS_ERR(bank->dbck))
1059 		dev_err(bank->dev, "Could not get gpio dbck\n");
1060 }
1061 
1062 static void
1063 omap_mpuio_alloc_gc(struct gpio_bank *bank, unsigned int irq_start,
1064 		    unsigned int num)
1065 {
1066 	struct irq_chip_generic *gc;
1067 	struct irq_chip_type *ct;
1068 
1069 	gc = irq_alloc_generic_chip("MPUIO", 1, irq_start, bank->base,
1070 				    handle_simple_irq);
1071 	if (!gc) {
1072 		dev_err(bank->dev, "Memory alloc failed for gc\n");
1073 		return;
1074 	}
1075 
1076 	ct = gc->chip_types;
1077 
1078 	/* NOTE: No ack required, reading IRQ status clears it. */
1079 	ct->chip.irq_mask = irq_gc_mask_set_bit;
1080 	ct->chip.irq_unmask = irq_gc_mask_clr_bit;
1081 	ct->chip.irq_set_type = omap_gpio_irq_type;
1082 
1083 	if (bank->regs->wkup_en)
1084 		ct->chip.irq_set_wake = omap_gpio_wake_enable;
1085 
1086 	ct->regs.mask = OMAP_MPUIO_GPIO_INT / bank->stride;
1087 	irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
1088 			       IRQ_NOREQUEST | IRQ_NOPROBE, 0);
1089 }
1090 
1091 static int omap_gpio_chip_init(struct gpio_bank *bank)
1092 {
1093 	int j;
1094 	static int gpio;
1095 	int irq_base = 0;
1096 	int ret;
1097 
1098 	/*
1099 	 * REVISIT eventually switch from OMAP-specific gpio structs
1100 	 * over to the generic ones
1101 	 */
1102 	bank->chip.request = omap_gpio_request;
1103 	bank->chip.free = omap_gpio_free;
1104 	bank->chip.get_direction = omap_gpio_get_direction;
1105 	bank->chip.direction_input = omap_gpio_input;
1106 	bank->chip.get = omap_gpio_get;
1107 	bank->chip.direction_output = omap_gpio_output;
1108 	bank->chip.set_debounce = omap_gpio_debounce;
1109 	bank->chip.set = omap_gpio_set;
1110 	if (bank->is_mpuio) {
1111 		bank->chip.label = "mpuio";
1112 		if (bank->regs->wkup_en)
1113 			bank->chip.dev = &omap_mpuio_device.dev;
1114 		bank->chip.base = OMAP_MPUIO(0);
1115 	} else {
1116 		bank->chip.label = "gpio";
1117 		bank->chip.base = gpio;
1118 		gpio += bank->width;
1119 	}
1120 	bank->chip.ngpio = bank->width;
1121 
1122 	ret = gpiochip_add(&bank->chip);
1123 	if (ret) {
1124 		dev_err(bank->dev, "Could not register gpio chip %d\n", ret);
1125 		return ret;
1126 	}
1127 
1128 #ifdef CONFIG_ARCH_OMAP1
1129 	/*
1130 	 * REVISIT: Once we have OMAP1 supporting SPARSE_IRQ, we can drop
1131 	 * irq_alloc_descs() since a base IRQ offset will no longer be needed.
1132 	 */
1133 	irq_base = irq_alloc_descs(-1, 0, bank->width, 0);
1134 	if (irq_base < 0) {
1135 		dev_err(bank->dev, "Couldn't allocate IRQ numbers\n");
1136 		return -ENODEV;
1137 	}
1138 #endif
1139 
1140 	ret = gpiochip_irqchip_add(&bank->chip, &gpio_irq_chip,
1141 				   irq_base, omap_gpio_irq_handler,
1142 				   IRQ_TYPE_NONE);
1143 
1144 	if (ret) {
1145 		dev_err(bank->dev, "Couldn't add irqchip to gpiochip %d\n", ret);
1146 		ret = gpiochip_remove(&bank->chip);
1147 		return -ENODEV;
1148 	}
1149 
1150 	gpiochip_set_chained_irqchip(&bank->chip, &gpio_irq_chip,
1151 				     bank->irq, omap_gpio_irq_handler);
1152 
1153 	for (j = 0; j < bank->width; j++) {
1154 		int irq = irq_find_mapping(bank->chip.irqdomain, j);
1155 		if (bank->is_mpuio) {
1156 			omap_mpuio_alloc_gc(bank, irq, bank->width);
1157 			irq_set_chip_and_handler(irq, NULL, NULL);
1158 			set_irq_flags(irq, 0);
1159 		}
1160 	}
1161 
1162 	return 0;
1163 }
1164 
1165 static const struct of_device_id omap_gpio_match[];
1166 
1167 static int omap_gpio_probe(struct platform_device *pdev)
1168 {
1169 	struct device *dev = &pdev->dev;
1170 	struct device_node *node = dev->of_node;
1171 	const struct of_device_id *match;
1172 	const struct omap_gpio_platform_data *pdata;
1173 	struct resource *res;
1174 	struct gpio_bank *bank;
1175 	int ret;
1176 
1177 	match = of_match_device(of_match_ptr(omap_gpio_match), dev);
1178 
1179 	pdata = match ? match->data : dev_get_platdata(dev);
1180 	if (!pdata)
1181 		return -EINVAL;
1182 
1183 	bank = devm_kzalloc(dev, sizeof(struct gpio_bank), GFP_KERNEL);
1184 	if (!bank) {
1185 		dev_err(dev, "Memory alloc failed\n");
1186 		return -ENOMEM;
1187 	}
1188 
1189 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1190 	if (unlikely(!res)) {
1191 		dev_err(dev, "Invalid IRQ resource\n");
1192 		return -ENODEV;
1193 	}
1194 
1195 	bank->irq = res->start;
1196 	bank->dev = dev;
1197 	bank->chip.dev = dev;
1198 	bank->dbck_flag = pdata->dbck_flag;
1199 	bank->stride = pdata->bank_stride;
1200 	bank->width = pdata->bank_width;
1201 	bank->is_mpuio = pdata->is_mpuio;
1202 	bank->non_wakeup_gpios = pdata->non_wakeup_gpios;
1203 	bank->regs = pdata->regs;
1204 #ifdef CONFIG_OF_GPIO
1205 	bank->chip.of_node = of_node_get(node);
1206 #endif
1207 	if (node) {
1208 		if (!of_property_read_bool(node, "ti,gpio-always-on"))
1209 			bank->loses_context = true;
1210 	} else {
1211 		bank->loses_context = pdata->loses_context;
1212 
1213 		if (bank->loses_context)
1214 			bank->get_context_loss_count =
1215 				pdata->get_context_loss_count;
1216 	}
1217 
1218 	if (bank->regs->set_dataout && bank->regs->clr_dataout)
1219 		bank->set_dataout = omap_set_gpio_dataout_reg;
1220 	else
1221 		bank->set_dataout = omap_set_gpio_dataout_mask;
1222 
1223 	spin_lock_init(&bank->lock);
1224 
1225 	/* Static mapping, never released */
1226 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1227 	bank->base = devm_ioremap_resource(dev, res);
1228 	if (IS_ERR(bank->base)) {
1229 		irq_domain_remove(bank->chip.irqdomain);
1230 		return PTR_ERR(bank->base);
1231 	}
1232 
1233 	platform_set_drvdata(pdev, bank);
1234 
1235 	pm_runtime_enable(bank->dev);
1236 	pm_runtime_irq_safe(bank->dev);
1237 	pm_runtime_get_sync(bank->dev);
1238 
1239 	if (bank->is_mpuio)
1240 		omap_mpuio_init(bank);
1241 
1242 	omap_gpio_mod_init(bank);
1243 
1244 	ret = omap_gpio_chip_init(bank);
1245 	if (ret)
1246 		return ret;
1247 
1248 	omap_gpio_show_rev(bank);
1249 
1250 	pm_runtime_put(bank->dev);
1251 
1252 	list_add_tail(&bank->node, &omap_gpio_list);
1253 
1254 	return 0;
1255 }
1256 
1257 #ifdef CONFIG_ARCH_OMAP2PLUS
1258 
1259 #if defined(CONFIG_PM_RUNTIME)
1260 static void omap_gpio_restore_context(struct gpio_bank *bank);
1261 
1262 static int omap_gpio_runtime_suspend(struct device *dev)
1263 {
1264 	struct platform_device *pdev = to_platform_device(dev);
1265 	struct gpio_bank *bank = platform_get_drvdata(pdev);
1266 	u32 l1 = 0, l2 = 0;
1267 	unsigned long flags;
1268 	u32 wake_low, wake_hi;
1269 
1270 	spin_lock_irqsave(&bank->lock, flags);
1271 
1272 	/*
1273 	 * Only edges can generate a wakeup event to the PRCM.
1274 	 *
1275 	 * Therefore, ensure any wake-up capable GPIOs have
1276 	 * edge-detection enabled before going idle to ensure a wakeup
1277 	 * to the PRCM is generated on a GPIO transition. (c.f. 34xx
1278 	 * NDA TRM 25.5.3.1)
1279 	 *
1280 	 * The normal values will be restored upon ->runtime_resume()
1281 	 * by writing back the values saved in bank->context.
1282 	 */
1283 	wake_low = bank->context.leveldetect0 & bank->context.wake_en;
1284 	if (wake_low)
1285 		writel_relaxed(wake_low | bank->context.fallingdetect,
1286 			     bank->base + bank->regs->fallingdetect);
1287 	wake_hi = bank->context.leveldetect1 & bank->context.wake_en;
1288 	if (wake_hi)
1289 		writel_relaxed(wake_hi | bank->context.risingdetect,
1290 			     bank->base + bank->regs->risingdetect);
1291 
1292 	if (!bank->enabled_non_wakeup_gpios)
1293 		goto update_gpio_context_count;
1294 
1295 	if (bank->power_mode != OFF_MODE) {
1296 		bank->power_mode = 0;
1297 		goto update_gpio_context_count;
1298 	}
1299 	/*
1300 	 * If going to OFF, remove triggering for all
1301 	 * non-wakeup GPIOs.  Otherwise spurious IRQs will be
1302 	 * generated.  See OMAP2420 Errata item 1.101.
1303 	 */
1304 	bank->saved_datain = readl_relaxed(bank->base +
1305 						bank->regs->datain);
1306 	l1 = bank->context.fallingdetect;
1307 	l2 = bank->context.risingdetect;
1308 
1309 	l1 &= ~bank->enabled_non_wakeup_gpios;
1310 	l2 &= ~bank->enabled_non_wakeup_gpios;
1311 
1312 	writel_relaxed(l1, bank->base + bank->regs->fallingdetect);
1313 	writel_relaxed(l2, bank->base + bank->regs->risingdetect);
1314 
1315 	bank->workaround_enabled = true;
1316 
1317 update_gpio_context_count:
1318 	if (bank->get_context_loss_count)
1319 		bank->context_loss_count =
1320 				bank->get_context_loss_count(bank->dev);
1321 
1322 	omap_gpio_dbck_disable(bank);
1323 	spin_unlock_irqrestore(&bank->lock, flags);
1324 
1325 	return 0;
1326 }
1327 
1328 static void omap_gpio_init_context(struct gpio_bank *p);
1329 
1330 static int omap_gpio_runtime_resume(struct device *dev)
1331 {
1332 	struct platform_device *pdev = to_platform_device(dev);
1333 	struct gpio_bank *bank = platform_get_drvdata(pdev);
1334 	u32 l = 0, gen, gen0, gen1;
1335 	unsigned long flags;
1336 	int c;
1337 
1338 	spin_lock_irqsave(&bank->lock, flags);
1339 
1340 	/*
1341 	 * On the first resume during the probe, the context has not
1342 	 * been initialised and so initialise it now. Also initialise
1343 	 * the context loss count.
1344 	 */
1345 	if (bank->loses_context && !bank->context_valid) {
1346 		omap_gpio_init_context(bank);
1347 
1348 		if (bank->get_context_loss_count)
1349 			bank->context_loss_count =
1350 				bank->get_context_loss_count(bank->dev);
1351 	}
1352 
1353 	omap_gpio_dbck_enable(bank);
1354 
1355 	/*
1356 	 * In ->runtime_suspend(), level-triggered, wakeup-enabled
1357 	 * GPIOs were set to edge trigger also in order to be able to
1358 	 * generate a PRCM wakeup.  Here we restore the
1359 	 * pre-runtime_suspend() values for edge triggering.
1360 	 */
1361 	writel_relaxed(bank->context.fallingdetect,
1362 		     bank->base + bank->regs->fallingdetect);
1363 	writel_relaxed(bank->context.risingdetect,
1364 		     bank->base + bank->regs->risingdetect);
1365 
1366 	if (bank->loses_context) {
1367 		if (!bank->get_context_loss_count) {
1368 			omap_gpio_restore_context(bank);
1369 		} else {
1370 			c = bank->get_context_loss_count(bank->dev);
1371 			if (c != bank->context_loss_count) {
1372 				omap_gpio_restore_context(bank);
1373 			} else {
1374 				spin_unlock_irqrestore(&bank->lock, flags);
1375 				return 0;
1376 			}
1377 		}
1378 	}
1379 
1380 	if (!bank->workaround_enabled) {
1381 		spin_unlock_irqrestore(&bank->lock, flags);
1382 		return 0;
1383 	}
1384 
1385 	l = readl_relaxed(bank->base + bank->regs->datain);
1386 
1387 	/*
1388 	 * Check if any of the non-wakeup interrupt GPIOs have changed
1389 	 * state.  If so, generate an IRQ by software.  This is
1390 	 * horribly racy, but it's the best we can do to work around
1391 	 * this silicon bug.
1392 	 */
1393 	l ^= bank->saved_datain;
1394 	l &= bank->enabled_non_wakeup_gpios;
1395 
1396 	/*
1397 	 * No need to generate IRQs for the rising edge for gpio IRQs
1398 	 * configured with falling edge only; and vice versa.
1399 	 */
1400 	gen0 = l & bank->context.fallingdetect;
1401 	gen0 &= bank->saved_datain;
1402 
1403 	gen1 = l & bank->context.risingdetect;
1404 	gen1 &= ~(bank->saved_datain);
1405 
1406 	/* FIXME: Consider GPIO IRQs with level detections properly! */
1407 	gen = l & (~(bank->context.fallingdetect) &
1408 					 ~(bank->context.risingdetect));
1409 	/* Consider all GPIO IRQs needed to be updated */
1410 	gen |= gen0 | gen1;
1411 
1412 	if (gen) {
1413 		u32 old0, old1;
1414 
1415 		old0 = readl_relaxed(bank->base + bank->regs->leveldetect0);
1416 		old1 = readl_relaxed(bank->base + bank->regs->leveldetect1);
1417 
1418 		if (!bank->regs->irqstatus_raw0) {
1419 			writel_relaxed(old0 | gen, bank->base +
1420 						bank->regs->leveldetect0);
1421 			writel_relaxed(old1 | gen, bank->base +
1422 						bank->regs->leveldetect1);
1423 		}
1424 
1425 		if (bank->regs->irqstatus_raw0) {
1426 			writel_relaxed(old0 | l, bank->base +
1427 						bank->regs->leveldetect0);
1428 			writel_relaxed(old1 | l, bank->base +
1429 						bank->regs->leveldetect1);
1430 		}
1431 		writel_relaxed(old0, bank->base + bank->regs->leveldetect0);
1432 		writel_relaxed(old1, bank->base + bank->regs->leveldetect1);
1433 	}
1434 
1435 	bank->workaround_enabled = false;
1436 	spin_unlock_irqrestore(&bank->lock, flags);
1437 
1438 	return 0;
1439 }
1440 #endif /* CONFIG_PM_RUNTIME */
1441 
1442 void omap2_gpio_prepare_for_idle(int pwr_mode)
1443 {
1444 	struct gpio_bank *bank;
1445 
1446 	list_for_each_entry(bank, &omap_gpio_list, node) {
1447 		if (!BANK_USED(bank) || !bank->loses_context)
1448 			continue;
1449 
1450 		bank->power_mode = pwr_mode;
1451 
1452 		pm_runtime_put_sync_suspend(bank->dev);
1453 	}
1454 }
1455 
1456 void omap2_gpio_resume_after_idle(void)
1457 {
1458 	struct gpio_bank *bank;
1459 
1460 	list_for_each_entry(bank, &omap_gpio_list, node) {
1461 		if (!BANK_USED(bank) || !bank->loses_context)
1462 			continue;
1463 
1464 		pm_runtime_get_sync(bank->dev);
1465 	}
1466 }
1467 
1468 #if defined(CONFIG_PM_RUNTIME)
1469 static void omap_gpio_init_context(struct gpio_bank *p)
1470 {
1471 	struct omap_gpio_reg_offs *regs = p->regs;
1472 	void __iomem *base = p->base;
1473 
1474 	p->context.ctrl		= readl_relaxed(base + regs->ctrl);
1475 	p->context.oe		= readl_relaxed(base + regs->direction);
1476 	p->context.wake_en	= readl_relaxed(base + regs->wkup_en);
1477 	p->context.leveldetect0	= readl_relaxed(base + regs->leveldetect0);
1478 	p->context.leveldetect1	= readl_relaxed(base + regs->leveldetect1);
1479 	p->context.risingdetect	= readl_relaxed(base + regs->risingdetect);
1480 	p->context.fallingdetect = readl_relaxed(base + regs->fallingdetect);
1481 	p->context.irqenable1	= readl_relaxed(base + regs->irqenable);
1482 	p->context.irqenable2	= readl_relaxed(base + regs->irqenable2);
1483 
1484 	if (regs->set_dataout && p->regs->clr_dataout)
1485 		p->context.dataout = readl_relaxed(base + regs->set_dataout);
1486 	else
1487 		p->context.dataout = readl_relaxed(base + regs->dataout);
1488 
1489 	p->context_valid = true;
1490 }
1491 
1492 static void omap_gpio_restore_context(struct gpio_bank *bank)
1493 {
1494 	writel_relaxed(bank->context.wake_en,
1495 				bank->base + bank->regs->wkup_en);
1496 	writel_relaxed(bank->context.ctrl, bank->base + bank->regs->ctrl);
1497 	writel_relaxed(bank->context.leveldetect0,
1498 				bank->base + bank->regs->leveldetect0);
1499 	writel_relaxed(bank->context.leveldetect1,
1500 				bank->base + bank->regs->leveldetect1);
1501 	writel_relaxed(bank->context.risingdetect,
1502 				bank->base + bank->regs->risingdetect);
1503 	writel_relaxed(bank->context.fallingdetect,
1504 				bank->base + bank->regs->fallingdetect);
1505 	if (bank->regs->set_dataout && bank->regs->clr_dataout)
1506 		writel_relaxed(bank->context.dataout,
1507 				bank->base + bank->regs->set_dataout);
1508 	else
1509 		writel_relaxed(bank->context.dataout,
1510 				bank->base + bank->regs->dataout);
1511 	writel_relaxed(bank->context.oe, bank->base + bank->regs->direction);
1512 
1513 	if (bank->dbck_enable_mask) {
1514 		writel_relaxed(bank->context.debounce, bank->base +
1515 					bank->regs->debounce);
1516 		writel_relaxed(bank->context.debounce_en,
1517 					bank->base + bank->regs->debounce_en);
1518 	}
1519 
1520 	writel_relaxed(bank->context.irqenable1,
1521 				bank->base + bank->regs->irqenable);
1522 	writel_relaxed(bank->context.irqenable2,
1523 				bank->base + bank->regs->irqenable2);
1524 }
1525 #endif /* CONFIG_PM_RUNTIME */
1526 #else
1527 #define omap_gpio_runtime_suspend NULL
1528 #define omap_gpio_runtime_resume NULL
1529 static inline void omap_gpio_init_context(struct gpio_bank *p) {}
1530 #endif
1531 
1532 static const struct dev_pm_ops gpio_pm_ops = {
1533 	SET_RUNTIME_PM_OPS(omap_gpio_runtime_suspend, omap_gpio_runtime_resume,
1534 									NULL)
1535 };
1536 
1537 #if defined(CONFIG_OF)
1538 static struct omap_gpio_reg_offs omap2_gpio_regs = {
1539 	.revision =		OMAP24XX_GPIO_REVISION,
1540 	.direction =		OMAP24XX_GPIO_OE,
1541 	.datain =		OMAP24XX_GPIO_DATAIN,
1542 	.dataout =		OMAP24XX_GPIO_DATAOUT,
1543 	.set_dataout =		OMAP24XX_GPIO_SETDATAOUT,
1544 	.clr_dataout =		OMAP24XX_GPIO_CLEARDATAOUT,
1545 	.irqstatus =		OMAP24XX_GPIO_IRQSTATUS1,
1546 	.irqstatus2 =		OMAP24XX_GPIO_IRQSTATUS2,
1547 	.irqenable =		OMAP24XX_GPIO_IRQENABLE1,
1548 	.irqenable2 =		OMAP24XX_GPIO_IRQENABLE2,
1549 	.set_irqenable =	OMAP24XX_GPIO_SETIRQENABLE1,
1550 	.clr_irqenable =	OMAP24XX_GPIO_CLEARIRQENABLE1,
1551 	.debounce =		OMAP24XX_GPIO_DEBOUNCE_VAL,
1552 	.debounce_en =		OMAP24XX_GPIO_DEBOUNCE_EN,
1553 	.ctrl =			OMAP24XX_GPIO_CTRL,
1554 	.wkup_en =		OMAP24XX_GPIO_WAKE_EN,
1555 	.leveldetect0 =		OMAP24XX_GPIO_LEVELDETECT0,
1556 	.leveldetect1 =		OMAP24XX_GPIO_LEVELDETECT1,
1557 	.risingdetect =		OMAP24XX_GPIO_RISINGDETECT,
1558 	.fallingdetect =	OMAP24XX_GPIO_FALLINGDETECT,
1559 };
1560 
1561 static struct omap_gpio_reg_offs omap4_gpio_regs = {
1562 	.revision =		OMAP4_GPIO_REVISION,
1563 	.direction =		OMAP4_GPIO_OE,
1564 	.datain =		OMAP4_GPIO_DATAIN,
1565 	.dataout =		OMAP4_GPIO_DATAOUT,
1566 	.set_dataout =		OMAP4_GPIO_SETDATAOUT,
1567 	.clr_dataout =		OMAP4_GPIO_CLEARDATAOUT,
1568 	.irqstatus =		OMAP4_GPIO_IRQSTATUS0,
1569 	.irqstatus2 =		OMAP4_GPIO_IRQSTATUS1,
1570 	.irqenable =		OMAP4_GPIO_IRQSTATUSSET0,
1571 	.irqenable2 =		OMAP4_GPIO_IRQSTATUSSET1,
1572 	.set_irqenable =	OMAP4_GPIO_IRQSTATUSSET0,
1573 	.clr_irqenable =	OMAP4_GPIO_IRQSTATUSCLR0,
1574 	.debounce =		OMAP4_GPIO_DEBOUNCINGTIME,
1575 	.debounce_en =		OMAP4_GPIO_DEBOUNCENABLE,
1576 	.ctrl =			OMAP4_GPIO_CTRL,
1577 	.wkup_en =		OMAP4_GPIO_IRQWAKEN0,
1578 	.leveldetect0 =		OMAP4_GPIO_LEVELDETECT0,
1579 	.leveldetect1 =		OMAP4_GPIO_LEVELDETECT1,
1580 	.risingdetect =		OMAP4_GPIO_RISINGDETECT,
1581 	.fallingdetect =	OMAP4_GPIO_FALLINGDETECT,
1582 };
1583 
1584 static const struct omap_gpio_platform_data omap2_pdata = {
1585 	.regs = &omap2_gpio_regs,
1586 	.bank_width = 32,
1587 	.dbck_flag = false,
1588 };
1589 
1590 static const struct omap_gpio_platform_data omap3_pdata = {
1591 	.regs = &omap2_gpio_regs,
1592 	.bank_width = 32,
1593 	.dbck_flag = true,
1594 };
1595 
1596 static const struct omap_gpio_platform_data omap4_pdata = {
1597 	.regs = &omap4_gpio_regs,
1598 	.bank_width = 32,
1599 	.dbck_flag = true,
1600 };
1601 
1602 static const struct of_device_id omap_gpio_match[] = {
1603 	{
1604 		.compatible = "ti,omap4-gpio",
1605 		.data = &omap4_pdata,
1606 	},
1607 	{
1608 		.compatible = "ti,omap3-gpio",
1609 		.data = &omap3_pdata,
1610 	},
1611 	{
1612 		.compatible = "ti,omap2-gpio",
1613 		.data = &omap2_pdata,
1614 	},
1615 	{ },
1616 };
1617 MODULE_DEVICE_TABLE(of, omap_gpio_match);
1618 #endif
1619 
1620 static struct platform_driver omap_gpio_driver = {
1621 	.probe		= omap_gpio_probe,
1622 	.driver		= {
1623 		.name	= "omap_gpio",
1624 		.pm	= &gpio_pm_ops,
1625 		.of_match_table = of_match_ptr(omap_gpio_match),
1626 	},
1627 };
1628 
1629 /*
1630  * gpio driver register needs to be done before
1631  * machine_init functions access gpio APIs.
1632  * Hence omap_gpio_drv_reg() is a postcore_initcall.
1633  */
1634 static int __init omap_gpio_drv_reg(void)
1635 {
1636 	return platform_driver_register(&omap_gpio_driver);
1637 }
1638 postcore_initcall(omap_gpio_drv_reg);
1639