xref: /openbmc/linux/drivers/gpio/gpio-pxa.c (revision 93f5715e)
1 /*
2  *  linux/arch/arm/plat-pxa/gpio.c
3  *
4  *  Generic PXA GPIO handling
5  *
6  *  Author:	Nicolas Pitre
7  *  Created:	Jun 15, 2001
8  *  Copyright:	MontaVista Software Inc.
9  *
10  *  This program is free software; you can redistribute it and/or modify
11  *  it under the terms of the GNU General Public License version 2 as
12  *  published by the Free Software Foundation.
13  */
14 #include <linux/module.h>
15 #include <linux/clk.h>
16 #include <linux/err.h>
17 #include <linux/gpio/driver.h>
18 #include <linux/gpio-pxa.h>
19 #include <linux/init.h>
20 #include <linux/interrupt.h>
21 #include <linux/irq.h>
22 #include <linux/irqdomain.h>
23 #include <linux/irqchip/chained_irq.h>
24 #include <linux/io.h>
25 #include <linux/of.h>
26 #include <linux/of_device.h>
27 #include <linux/pinctrl/consumer.h>
28 #include <linux/platform_device.h>
29 #include <linux/syscore_ops.h>
30 #include <linux/slab.h>
31 
32 /*
33  * We handle the GPIOs by banks, each bank covers up to 32 GPIOs with
34  * one set of registers. The register offsets are organized below:
35  *
36  *           GPLR    GPDR    GPSR    GPCR    GRER    GFER    GEDR
37  * BANK 0 - 0x0000  0x000C  0x0018  0x0024  0x0030  0x003C  0x0048
38  * BANK 1 - 0x0004  0x0010  0x001C  0x0028  0x0034  0x0040  0x004C
39  * BANK 2 - 0x0008  0x0014  0x0020  0x002C  0x0038  0x0044  0x0050
40  *
41  * BANK 3 - 0x0100  0x010C  0x0118  0x0124  0x0130  0x013C  0x0148
42  * BANK 4 - 0x0104  0x0110  0x011C  0x0128  0x0134  0x0140  0x014C
43  * BANK 5 - 0x0108  0x0114  0x0120  0x012C  0x0138  0x0144  0x0150
44  *
45  * BANK 6 - 0x0200  0x020C  0x0218  0x0224  0x0230  0x023C  0x0248
46  *
47  * NOTE:
48  *   BANK 3 is only available on PXA27x and later processors.
49  *   BANK 4 and 5 are only available on PXA935, PXA1928
50  *   BANK 6 is only available on PXA1928
51  */
52 
53 #define GPLR_OFFSET	0x00
54 #define GPDR_OFFSET	0x0C
55 #define GPSR_OFFSET	0x18
56 #define GPCR_OFFSET	0x24
57 #define GRER_OFFSET	0x30
58 #define GFER_OFFSET	0x3C
59 #define GEDR_OFFSET	0x48
60 #define GAFR_OFFSET	0x54
61 #define ED_MASK_OFFSET	0x9C	/* GPIO edge detection for AP side */
62 
63 #define BANK_OFF(n)	(((n) / 3) << 8) + (((n) % 3) << 2)
64 
65 int pxa_last_gpio;
66 static int irq_base;
67 
68 struct pxa_gpio_bank {
69 	void __iomem	*regbase;
70 	unsigned long	irq_mask;
71 	unsigned long	irq_edge_rise;
72 	unsigned long	irq_edge_fall;
73 
74 #ifdef CONFIG_PM
75 	unsigned long	saved_gplr;
76 	unsigned long	saved_gpdr;
77 	unsigned long	saved_grer;
78 	unsigned long	saved_gfer;
79 #endif
80 };
81 
82 struct pxa_gpio_chip {
83 	struct device *dev;
84 	struct gpio_chip chip;
85 	struct pxa_gpio_bank *banks;
86 	struct irq_domain *irqdomain;
87 
88 	int irq0;
89 	int irq1;
90 	int (*set_wake)(unsigned int gpio, unsigned int on);
91 };
92 
93 enum pxa_gpio_type {
94 	PXA25X_GPIO = 0,
95 	PXA26X_GPIO,
96 	PXA27X_GPIO,
97 	PXA3XX_GPIO,
98 	PXA93X_GPIO,
99 	MMP_GPIO = 0x10,
100 	MMP2_GPIO,
101 	PXA1928_GPIO,
102 };
103 
104 struct pxa_gpio_id {
105 	enum pxa_gpio_type	type;
106 	int			gpio_nums;
107 };
108 
109 static DEFINE_SPINLOCK(gpio_lock);
110 static struct pxa_gpio_chip *pxa_gpio_chip;
111 static enum pxa_gpio_type gpio_type;
112 
113 static struct pxa_gpio_id pxa25x_id = {
114 	.type		= PXA25X_GPIO,
115 	.gpio_nums	= 85,
116 };
117 
118 static struct pxa_gpio_id pxa26x_id = {
119 	.type		= PXA26X_GPIO,
120 	.gpio_nums	= 90,
121 };
122 
123 static struct pxa_gpio_id pxa27x_id = {
124 	.type		= PXA27X_GPIO,
125 	.gpio_nums	= 121,
126 };
127 
128 static struct pxa_gpio_id pxa3xx_id = {
129 	.type		= PXA3XX_GPIO,
130 	.gpio_nums	= 128,
131 };
132 
133 static struct pxa_gpio_id pxa93x_id = {
134 	.type		= PXA93X_GPIO,
135 	.gpio_nums	= 192,
136 };
137 
138 static struct pxa_gpio_id mmp_id = {
139 	.type		= MMP_GPIO,
140 	.gpio_nums	= 128,
141 };
142 
143 static struct pxa_gpio_id mmp2_id = {
144 	.type		= MMP2_GPIO,
145 	.gpio_nums	= 192,
146 };
147 
148 static struct pxa_gpio_id pxa1928_id = {
149 	.type		= PXA1928_GPIO,
150 	.gpio_nums	= 224,
151 };
152 
153 #define for_each_gpio_bank(i, b, pc)					\
154 	for (i = 0, b = pc->banks; i <= pxa_last_gpio; i += 32, b++)
155 
156 static inline struct pxa_gpio_chip *chip_to_pxachip(struct gpio_chip *c)
157 {
158 	struct pxa_gpio_chip *pxa_chip = gpiochip_get_data(c);
159 
160 	return pxa_chip;
161 }
162 
163 static inline void __iomem *gpio_bank_base(struct gpio_chip *c, int gpio)
164 {
165 	struct pxa_gpio_chip *p = gpiochip_get_data(c);
166 	struct pxa_gpio_bank *bank = p->banks + (gpio / 32);
167 
168 	return bank->regbase;
169 }
170 
171 static inline struct pxa_gpio_bank *gpio_to_pxabank(struct gpio_chip *c,
172 						    unsigned gpio)
173 {
174 	return chip_to_pxachip(c)->banks + gpio / 32;
175 }
176 
177 static inline int gpio_is_pxa_type(int type)
178 {
179 	return (type & MMP_GPIO) == 0;
180 }
181 
182 static inline int gpio_is_mmp_type(int type)
183 {
184 	return (type & MMP_GPIO) != 0;
185 }
186 
187 /* GPIO86/87/88/89 on PXA26x have their direction bits in PXA_GPDR(2 inverted,
188  * as well as their Alternate Function value being '1' for GPIO in GAFRx.
189  */
190 static inline int __gpio_is_inverted(int gpio)
191 {
192 	if ((gpio_type == PXA26X_GPIO) && (gpio > 85))
193 		return 1;
194 	return 0;
195 }
196 
197 /*
198  * On PXA25x and PXA27x, GAFRx and GPDRx together decide the alternate
199  * function of a GPIO, and GPDRx cannot be altered once configured. It
200  * is attributed as "occupied" here (I know this terminology isn't
201  * accurate, you are welcome to propose a better one :-)
202  */
203 static inline int __gpio_is_occupied(struct pxa_gpio_chip *pchip, unsigned gpio)
204 {
205 	void __iomem *base;
206 	unsigned long gafr = 0, gpdr = 0;
207 	int ret, af = 0, dir = 0;
208 
209 	base = gpio_bank_base(&pchip->chip, gpio);
210 	gpdr = readl_relaxed(base + GPDR_OFFSET);
211 
212 	switch (gpio_type) {
213 	case PXA25X_GPIO:
214 	case PXA26X_GPIO:
215 	case PXA27X_GPIO:
216 		gafr = readl_relaxed(base + GAFR_OFFSET);
217 		af = (gafr >> ((gpio & 0xf) * 2)) & 0x3;
218 		dir = gpdr & GPIO_bit(gpio);
219 
220 		if (__gpio_is_inverted(gpio))
221 			ret = (af != 1) || (dir == 0);
222 		else
223 			ret = (af != 0) || (dir != 0);
224 		break;
225 	default:
226 		ret = gpdr & GPIO_bit(gpio);
227 		break;
228 	}
229 	return ret;
230 }
231 
232 int pxa_irq_to_gpio(int irq)
233 {
234 	struct pxa_gpio_chip *pchip = pxa_gpio_chip;
235 	int irq_gpio0;
236 
237 	irq_gpio0 = irq_find_mapping(pchip->irqdomain, 0);
238 	if (irq_gpio0 > 0)
239 		return irq - irq_gpio0;
240 
241 	return irq_gpio0;
242 }
243 
244 static bool pxa_gpio_has_pinctrl(void)
245 {
246 	switch (gpio_type) {
247 	case PXA3XX_GPIO:
248 		return false;
249 
250 	default:
251 		return true;
252 	}
253 }
254 
255 static int pxa_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
256 {
257 	struct pxa_gpio_chip *pchip = chip_to_pxachip(chip);
258 
259 	return irq_find_mapping(pchip->irqdomain, offset);
260 }
261 
262 static int pxa_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
263 {
264 	void __iomem *base = gpio_bank_base(chip, offset);
265 	uint32_t value, mask = GPIO_bit(offset);
266 	unsigned long flags;
267 	int ret;
268 
269 	if (pxa_gpio_has_pinctrl()) {
270 		ret = pinctrl_gpio_direction_input(chip->base + offset);
271 		if (!ret)
272 			return 0;
273 	}
274 
275 	spin_lock_irqsave(&gpio_lock, flags);
276 
277 	value = readl_relaxed(base + GPDR_OFFSET);
278 	if (__gpio_is_inverted(chip->base + offset))
279 		value |= mask;
280 	else
281 		value &= ~mask;
282 	writel_relaxed(value, base + GPDR_OFFSET);
283 
284 	spin_unlock_irqrestore(&gpio_lock, flags);
285 	return 0;
286 }
287 
288 static int pxa_gpio_direction_output(struct gpio_chip *chip,
289 				     unsigned offset, int value)
290 {
291 	void __iomem *base = gpio_bank_base(chip, offset);
292 	uint32_t tmp, mask = GPIO_bit(offset);
293 	unsigned long flags;
294 	int ret;
295 
296 	writel_relaxed(mask, base + (value ? GPSR_OFFSET : GPCR_OFFSET));
297 
298 	if (pxa_gpio_has_pinctrl()) {
299 		ret = pinctrl_gpio_direction_output(chip->base + offset);
300 		if (ret)
301 			return ret;
302 	}
303 
304 	spin_lock_irqsave(&gpio_lock, flags);
305 
306 	tmp = readl_relaxed(base + GPDR_OFFSET);
307 	if (__gpio_is_inverted(chip->base + offset))
308 		tmp &= ~mask;
309 	else
310 		tmp |= mask;
311 	writel_relaxed(tmp, base + GPDR_OFFSET);
312 
313 	spin_unlock_irqrestore(&gpio_lock, flags);
314 	return 0;
315 }
316 
317 static int pxa_gpio_get(struct gpio_chip *chip, unsigned offset)
318 {
319 	void __iomem *base = gpio_bank_base(chip, offset);
320 	u32 gplr = readl_relaxed(base + GPLR_OFFSET);
321 
322 	return !!(gplr & GPIO_bit(offset));
323 }
324 
325 static void pxa_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
326 {
327 	void __iomem *base = gpio_bank_base(chip, offset);
328 
329 	writel_relaxed(GPIO_bit(offset),
330 		       base + (value ? GPSR_OFFSET : GPCR_OFFSET));
331 }
332 
333 #ifdef CONFIG_OF_GPIO
334 static int pxa_gpio_of_xlate(struct gpio_chip *gc,
335 			     const struct of_phandle_args *gpiospec,
336 			     u32 *flags)
337 {
338 	if (gpiospec->args[0] > pxa_last_gpio)
339 		return -EINVAL;
340 
341 	if (flags)
342 		*flags = gpiospec->args[1];
343 
344 	return gpiospec->args[0];
345 }
346 #endif
347 
348 static int pxa_init_gpio_chip(struct pxa_gpio_chip *pchip, int ngpio,
349 			      struct device_node *np, void __iomem *regbase)
350 {
351 	int i, gpio, nbanks = DIV_ROUND_UP(ngpio, 32);
352 	struct pxa_gpio_bank *bank;
353 
354 	pchip->banks = devm_kcalloc(pchip->dev, nbanks, sizeof(*pchip->banks),
355 				    GFP_KERNEL);
356 	if (!pchip->banks)
357 		return -ENOMEM;
358 
359 	pchip->chip.label = "gpio-pxa";
360 	pchip->chip.direction_input  = pxa_gpio_direction_input;
361 	pchip->chip.direction_output = pxa_gpio_direction_output;
362 	pchip->chip.get = pxa_gpio_get;
363 	pchip->chip.set = pxa_gpio_set;
364 	pchip->chip.to_irq = pxa_gpio_to_irq;
365 	pchip->chip.ngpio = ngpio;
366 
367 	if (pxa_gpio_has_pinctrl()) {
368 		pchip->chip.request = gpiochip_generic_request;
369 		pchip->chip.free = gpiochip_generic_free;
370 	}
371 
372 #ifdef CONFIG_OF_GPIO
373 	pchip->chip.of_node = np;
374 	pchip->chip.of_xlate = pxa_gpio_of_xlate;
375 	pchip->chip.of_gpio_n_cells = 2;
376 #endif
377 
378 	for (i = 0, gpio = 0; i < nbanks; i++, gpio += 32) {
379 		bank = pchip->banks + i;
380 		bank->regbase = regbase + BANK_OFF(i);
381 	}
382 
383 	return gpiochip_add_data(&pchip->chip, pchip);
384 }
385 
386 /* Update only those GRERx and GFERx edge detection register bits if those
387  * bits are set in c->irq_mask
388  */
389 static inline void update_edge_detect(struct pxa_gpio_bank *c)
390 {
391 	uint32_t grer, gfer;
392 
393 	grer = readl_relaxed(c->regbase + GRER_OFFSET) & ~c->irq_mask;
394 	gfer = readl_relaxed(c->regbase + GFER_OFFSET) & ~c->irq_mask;
395 	grer |= c->irq_edge_rise & c->irq_mask;
396 	gfer |= c->irq_edge_fall & c->irq_mask;
397 	writel_relaxed(grer, c->regbase + GRER_OFFSET);
398 	writel_relaxed(gfer, c->regbase + GFER_OFFSET);
399 }
400 
401 static int pxa_gpio_irq_type(struct irq_data *d, unsigned int type)
402 {
403 	struct pxa_gpio_chip *pchip = irq_data_get_irq_chip_data(d);
404 	unsigned int gpio = irqd_to_hwirq(d);
405 	struct pxa_gpio_bank *c = gpio_to_pxabank(&pchip->chip, gpio);
406 	unsigned long gpdr, mask = GPIO_bit(gpio);
407 
408 	if (type == IRQ_TYPE_PROBE) {
409 		/* Don't mess with enabled GPIOs using preconfigured edges or
410 		 * GPIOs set to alternate function or to output during probe
411 		 */
412 		if ((c->irq_edge_rise | c->irq_edge_fall) & GPIO_bit(gpio))
413 			return 0;
414 
415 		if (__gpio_is_occupied(pchip, gpio))
416 			return 0;
417 
418 		type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
419 	}
420 
421 	gpdr = readl_relaxed(c->regbase + GPDR_OFFSET);
422 
423 	if (__gpio_is_inverted(gpio))
424 		writel_relaxed(gpdr | mask,  c->regbase + GPDR_OFFSET);
425 	else
426 		writel_relaxed(gpdr & ~mask, c->regbase + GPDR_OFFSET);
427 
428 	if (type & IRQ_TYPE_EDGE_RISING)
429 		c->irq_edge_rise |= mask;
430 	else
431 		c->irq_edge_rise &= ~mask;
432 
433 	if (type & IRQ_TYPE_EDGE_FALLING)
434 		c->irq_edge_fall |= mask;
435 	else
436 		c->irq_edge_fall &= ~mask;
437 
438 	update_edge_detect(c);
439 
440 	pr_debug("%s: IRQ%d (GPIO%d) - edge%s%s\n", __func__, d->irq, gpio,
441 		((type & IRQ_TYPE_EDGE_RISING)  ? " rising"  : ""),
442 		((type & IRQ_TYPE_EDGE_FALLING) ? " falling" : ""));
443 	return 0;
444 }
445 
446 static irqreturn_t pxa_gpio_demux_handler(int in_irq, void *d)
447 {
448 	int loop, gpio, n, handled = 0;
449 	unsigned long gedr;
450 	struct pxa_gpio_chip *pchip = d;
451 	struct pxa_gpio_bank *c;
452 
453 	do {
454 		loop = 0;
455 		for_each_gpio_bank(gpio, c, pchip) {
456 			gedr = readl_relaxed(c->regbase + GEDR_OFFSET);
457 			gedr = gedr & c->irq_mask;
458 			writel_relaxed(gedr, c->regbase + GEDR_OFFSET);
459 
460 			for_each_set_bit(n, &gedr, BITS_PER_LONG) {
461 				loop = 1;
462 
463 				generic_handle_irq(
464 					irq_find_mapping(pchip->irqdomain,
465 							 gpio + n));
466 			}
467 		}
468 		handled += loop;
469 	} while (loop);
470 
471 	return handled ? IRQ_HANDLED : IRQ_NONE;
472 }
473 
474 static irqreturn_t pxa_gpio_direct_handler(int in_irq, void *d)
475 {
476 	struct pxa_gpio_chip *pchip = d;
477 
478 	if (in_irq == pchip->irq0) {
479 		generic_handle_irq(irq_find_mapping(pchip->irqdomain, 0));
480 	} else if (in_irq == pchip->irq1) {
481 		generic_handle_irq(irq_find_mapping(pchip->irqdomain, 1));
482 	} else {
483 		pr_err("%s() unknown irq %d\n", __func__, in_irq);
484 		return IRQ_NONE;
485 	}
486 	return IRQ_HANDLED;
487 }
488 
489 static void pxa_ack_muxed_gpio(struct irq_data *d)
490 {
491 	struct pxa_gpio_chip *pchip = irq_data_get_irq_chip_data(d);
492 	unsigned int gpio = irqd_to_hwirq(d);
493 	void __iomem *base = gpio_bank_base(&pchip->chip, gpio);
494 
495 	writel_relaxed(GPIO_bit(gpio), base + GEDR_OFFSET);
496 }
497 
498 static void pxa_mask_muxed_gpio(struct irq_data *d)
499 {
500 	struct pxa_gpio_chip *pchip = irq_data_get_irq_chip_data(d);
501 	unsigned int gpio = irqd_to_hwirq(d);
502 	struct pxa_gpio_bank *b = gpio_to_pxabank(&pchip->chip, gpio);
503 	void __iomem *base = gpio_bank_base(&pchip->chip, gpio);
504 	uint32_t grer, gfer;
505 
506 	b->irq_mask &= ~GPIO_bit(gpio);
507 
508 	grer = readl_relaxed(base + GRER_OFFSET) & ~GPIO_bit(gpio);
509 	gfer = readl_relaxed(base + GFER_OFFSET) & ~GPIO_bit(gpio);
510 	writel_relaxed(grer, base + GRER_OFFSET);
511 	writel_relaxed(gfer, base + GFER_OFFSET);
512 }
513 
514 static int pxa_gpio_set_wake(struct irq_data *d, unsigned int on)
515 {
516 	struct pxa_gpio_chip *pchip = irq_data_get_irq_chip_data(d);
517 	unsigned int gpio = irqd_to_hwirq(d);
518 
519 	if (pchip->set_wake)
520 		return pchip->set_wake(gpio, on);
521 	else
522 		return 0;
523 }
524 
525 static void pxa_unmask_muxed_gpio(struct irq_data *d)
526 {
527 	struct pxa_gpio_chip *pchip = irq_data_get_irq_chip_data(d);
528 	unsigned int gpio = irqd_to_hwirq(d);
529 	struct pxa_gpio_bank *c = gpio_to_pxabank(&pchip->chip, gpio);
530 
531 	c->irq_mask |= GPIO_bit(gpio);
532 	update_edge_detect(c);
533 }
534 
535 static struct irq_chip pxa_muxed_gpio_chip = {
536 	.name		= "GPIO",
537 	.irq_ack	= pxa_ack_muxed_gpio,
538 	.irq_mask	= pxa_mask_muxed_gpio,
539 	.irq_unmask	= pxa_unmask_muxed_gpio,
540 	.irq_set_type	= pxa_gpio_irq_type,
541 	.irq_set_wake	= pxa_gpio_set_wake,
542 };
543 
544 static int pxa_gpio_nums(struct platform_device *pdev)
545 {
546 	const struct platform_device_id *id = platform_get_device_id(pdev);
547 	struct pxa_gpio_id *pxa_id = (struct pxa_gpio_id *)id->driver_data;
548 	int count = 0;
549 
550 	switch (pxa_id->type) {
551 	case PXA25X_GPIO:
552 	case PXA26X_GPIO:
553 	case PXA27X_GPIO:
554 	case PXA3XX_GPIO:
555 	case PXA93X_GPIO:
556 	case MMP_GPIO:
557 	case MMP2_GPIO:
558 	case PXA1928_GPIO:
559 		gpio_type = pxa_id->type;
560 		count = pxa_id->gpio_nums - 1;
561 		break;
562 	default:
563 		count = -EINVAL;
564 		break;
565 	}
566 	return count;
567 }
568 
569 static int pxa_irq_domain_map(struct irq_domain *d, unsigned int irq,
570 			      irq_hw_number_t hw)
571 {
572 	irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip,
573 				 handle_edge_irq);
574 	irq_set_chip_data(irq, d->host_data);
575 	irq_set_noprobe(irq);
576 	return 0;
577 }
578 
579 const struct irq_domain_ops pxa_irq_domain_ops = {
580 	.map	= pxa_irq_domain_map,
581 	.xlate	= irq_domain_xlate_twocell,
582 };
583 
584 #ifdef CONFIG_OF
585 static const struct of_device_id pxa_gpio_dt_ids[] = {
586 	{ .compatible = "intel,pxa25x-gpio",	.data = &pxa25x_id, },
587 	{ .compatible = "intel,pxa26x-gpio",	.data = &pxa26x_id, },
588 	{ .compatible = "intel,pxa27x-gpio",	.data = &pxa27x_id, },
589 	{ .compatible = "intel,pxa3xx-gpio",	.data = &pxa3xx_id, },
590 	{ .compatible = "marvell,pxa93x-gpio",	.data = &pxa93x_id, },
591 	{ .compatible = "marvell,mmp-gpio",	.data = &mmp_id, },
592 	{ .compatible = "marvell,mmp2-gpio",	.data = &mmp2_id, },
593 	{ .compatible = "marvell,pxa1928-gpio",	.data = &pxa1928_id, },
594 	{}
595 };
596 
597 static int pxa_gpio_probe_dt(struct platform_device *pdev,
598 			     struct pxa_gpio_chip *pchip)
599 {
600 	int nr_gpios;
601 	const struct pxa_gpio_id *gpio_id;
602 
603 	gpio_id = of_device_get_match_data(&pdev->dev);
604 	gpio_type = gpio_id->type;
605 
606 	nr_gpios = gpio_id->gpio_nums;
607 	pxa_last_gpio = nr_gpios - 1;
608 
609 	irq_base = devm_irq_alloc_descs(&pdev->dev, -1, 0, nr_gpios, 0);
610 	if (irq_base < 0) {
611 		dev_err(&pdev->dev, "Failed to allocate IRQ numbers\n");
612 		return irq_base;
613 	}
614 	return irq_base;
615 }
616 #else
617 #define pxa_gpio_probe_dt(pdev, pchip)		(-1)
618 #endif
619 
620 static int pxa_gpio_probe(struct platform_device *pdev)
621 {
622 	struct pxa_gpio_chip *pchip;
623 	struct pxa_gpio_bank *c;
624 	struct resource *res;
625 	struct clk *clk;
626 	struct pxa_gpio_platform_data *info;
627 	void __iomem *gpio_reg_base;
628 	int gpio, ret;
629 	int irq0 = 0, irq1 = 0, irq_mux;
630 
631 	pchip = devm_kzalloc(&pdev->dev, sizeof(*pchip), GFP_KERNEL);
632 	if (!pchip)
633 		return -ENOMEM;
634 	pchip->dev = &pdev->dev;
635 
636 	info = dev_get_platdata(&pdev->dev);
637 	if (info) {
638 		irq_base = info->irq_base;
639 		if (irq_base <= 0)
640 			return -EINVAL;
641 		pxa_last_gpio = pxa_gpio_nums(pdev);
642 		pchip->set_wake = info->gpio_set_wake;
643 	} else {
644 		irq_base = pxa_gpio_probe_dt(pdev, pchip);
645 		if (irq_base < 0)
646 			return -EINVAL;
647 	}
648 
649 	if (!pxa_last_gpio)
650 		return -EINVAL;
651 
652 	pchip->irqdomain = irq_domain_add_legacy(pdev->dev.of_node,
653 						 pxa_last_gpio + 1, irq_base,
654 						 0, &pxa_irq_domain_ops, pchip);
655 	if (!pchip->irqdomain)
656 		return -ENOMEM;
657 
658 	irq0 = platform_get_irq_byname(pdev, "gpio0");
659 	irq1 = platform_get_irq_byname(pdev, "gpio1");
660 	irq_mux = platform_get_irq_byname(pdev, "gpio_mux");
661 	if ((irq0 > 0 && irq1 <= 0) || (irq0 <= 0 && irq1 > 0)
662 		|| (irq_mux <= 0))
663 		return -EINVAL;
664 
665 	pchip->irq0 = irq0;
666 	pchip->irq1 = irq1;
667 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
668 	if (!res)
669 		return -EINVAL;
670 	gpio_reg_base = devm_ioremap(&pdev->dev, res->start,
671 				     resource_size(res));
672 	if (!gpio_reg_base)
673 		return -EINVAL;
674 
675 	clk = clk_get(&pdev->dev, NULL);
676 	if (IS_ERR(clk)) {
677 		dev_err(&pdev->dev, "Error %ld to get gpio clock\n",
678 			PTR_ERR(clk));
679 		return PTR_ERR(clk);
680 	}
681 	ret = clk_prepare_enable(clk);
682 	if (ret) {
683 		clk_put(clk);
684 		return ret;
685 	}
686 
687 	/* Initialize GPIO chips */
688 	ret = pxa_init_gpio_chip(pchip, pxa_last_gpio + 1, pdev->dev.of_node,
689 				 gpio_reg_base);
690 	if (ret) {
691 		clk_put(clk);
692 		return ret;
693 	}
694 
695 	/* clear all GPIO edge detects */
696 	for_each_gpio_bank(gpio, c, pchip) {
697 		writel_relaxed(0, c->regbase + GFER_OFFSET);
698 		writel_relaxed(0, c->regbase + GRER_OFFSET);
699 		writel_relaxed(~0, c->regbase + GEDR_OFFSET);
700 		/* unmask GPIO edge detect for AP side */
701 		if (gpio_is_mmp_type(gpio_type))
702 			writel_relaxed(~0, c->regbase + ED_MASK_OFFSET);
703 	}
704 
705 	if (irq0 > 0) {
706 		ret = devm_request_irq(&pdev->dev,
707 				       irq0, pxa_gpio_direct_handler, 0,
708 				       "gpio-0", pchip);
709 		if (ret)
710 			dev_err(&pdev->dev, "request of gpio0 irq failed: %d\n",
711 				ret);
712 	}
713 	if (irq1 > 0) {
714 		ret = devm_request_irq(&pdev->dev,
715 				       irq1, pxa_gpio_direct_handler, 0,
716 				       "gpio-1", pchip);
717 		if (ret)
718 			dev_err(&pdev->dev, "request of gpio1 irq failed: %d\n",
719 				ret);
720 	}
721 	ret = devm_request_irq(&pdev->dev,
722 			       irq_mux, pxa_gpio_demux_handler, 0,
723 				       "gpio-mux", pchip);
724 	if (ret)
725 		dev_err(&pdev->dev, "request of gpio-mux irq failed: %d\n",
726 				ret);
727 
728 	pxa_gpio_chip = pchip;
729 
730 	return 0;
731 }
732 
733 static const struct platform_device_id gpio_id_table[] = {
734 	{ "pxa25x-gpio",	(unsigned long)&pxa25x_id },
735 	{ "pxa26x-gpio",	(unsigned long)&pxa26x_id },
736 	{ "pxa27x-gpio",	(unsigned long)&pxa27x_id },
737 	{ "pxa3xx-gpio",	(unsigned long)&pxa3xx_id },
738 	{ "pxa93x-gpio",	(unsigned long)&pxa93x_id },
739 	{ "mmp-gpio",		(unsigned long)&mmp_id },
740 	{ "mmp2-gpio",		(unsigned long)&mmp2_id },
741 	{ "pxa1928-gpio",	(unsigned long)&pxa1928_id },
742 	{ },
743 };
744 
745 static struct platform_driver pxa_gpio_driver = {
746 	.probe		= pxa_gpio_probe,
747 	.driver		= {
748 		.name	= "pxa-gpio",
749 		.of_match_table = of_match_ptr(pxa_gpio_dt_ids),
750 	},
751 	.id_table	= gpio_id_table,
752 };
753 
754 static int __init pxa_gpio_legacy_init(void)
755 {
756 	if (of_have_populated_dt())
757 		return 0;
758 
759 	return platform_driver_register(&pxa_gpio_driver);
760 }
761 postcore_initcall(pxa_gpio_legacy_init);
762 
763 static int __init pxa_gpio_dt_init(void)
764 {
765 	if (of_have_populated_dt())
766 		return platform_driver_register(&pxa_gpio_driver);
767 
768 	return 0;
769 }
770 device_initcall(pxa_gpio_dt_init);
771 
772 #ifdef CONFIG_PM
773 static int pxa_gpio_suspend(void)
774 {
775 	struct pxa_gpio_chip *pchip = pxa_gpio_chip;
776 	struct pxa_gpio_bank *c;
777 	int gpio;
778 
779 	for_each_gpio_bank(gpio, c, pchip) {
780 		c->saved_gplr = readl_relaxed(c->regbase + GPLR_OFFSET);
781 		c->saved_gpdr = readl_relaxed(c->regbase + GPDR_OFFSET);
782 		c->saved_grer = readl_relaxed(c->regbase + GRER_OFFSET);
783 		c->saved_gfer = readl_relaxed(c->regbase + GFER_OFFSET);
784 
785 		/* Clear GPIO transition detect bits */
786 		writel_relaxed(0xffffffff, c->regbase + GEDR_OFFSET);
787 	}
788 	return 0;
789 }
790 
791 static void pxa_gpio_resume(void)
792 {
793 	struct pxa_gpio_chip *pchip = pxa_gpio_chip;
794 	struct pxa_gpio_bank *c;
795 	int gpio;
796 
797 	for_each_gpio_bank(gpio, c, pchip) {
798 		/* restore level with set/clear */
799 		writel_relaxed(c->saved_gplr, c->regbase + GPSR_OFFSET);
800 		writel_relaxed(~c->saved_gplr, c->regbase + GPCR_OFFSET);
801 
802 		writel_relaxed(c->saved_grer, c->regbase + GRER_OFFSET);
803 		writel_relaxed(c->saved_gfer, c->regbase + GFER_OFFSET);
804 		writel_relaxed(c->saved_gpdr, c->regbase + GPDR_OFFSET);
805 	}
806 }
807 #else
808 #define pxa_gpio_suspend	NULL
809 #define pxa_gpio_resume		NULL
810 #endif
811 
812 struct syscore_ops pxa_gpio_syscore_ops = {
813 	.suspend	= pxa_gpio_suspend,
814 	.resume		= pxa_gpio_resume,
815 };
816 
817 static int __init pxa_gpio_sysinit(void)
818 {
819 	register_syscore_ops(&pxa_gpio_syscore_ops);
820 	return 0;
821 }
822 postcore_initcall(pxa_gpio_sysinit);
823