xref: /openbmc/linux/drivers/gpio/gpio-pxa.c (revision 8e8e69d6)
1 /*
2  *  linux/arch/arm/plat-pxa/gpio.c
3  *
4  *  Generic PXA GPIO handling
5  *
6  *  Author:	Nicolas Pitre
7  *  Created:	Jun 15, 2001
8  *  Copyright:	MontaVista Software Inc.
9  *
10  *  This program is free software; you can redistribute it and/or modify
11  *  it under the terms of the GNU General Public License version 2 as
12  *  published by the Free Software Foundation.
13  */
14 #include <linux/module.h>
15 #include <linux/clk.h>
16 #include <linux/err.h>
17 #include <linux/gpio/driver.h>
18 #include <linux/gpio-pxa.h>
19 #include <linux/init.h>
20 #include <linux/interrupt.h>
21 #include <linux/irq.h>
22 #include <linux/irqdomain.h>
23 #include <linux/irqchip/chained_irq.h>
24 #include <linux/io.h>
25 #include <linux/of.h>
26 #include <linux/of_device.h>
27 #include <linux/pinctrl/consumer.h>
28 #include <linux/platform_device.h>
29 #include <linux/syscore_ops.h>
30 #include <linux/slab.h>
31 
32 /*
33  * We handle the GPIOs by banks, each bank covers up to 32 GPIOs with
34  * one set of registers. The register offsets are organized below:
35  *
36  *           GPLR    GPDR    GPSR    GPCR    GRER    GFER    GEDR
37  * BANK 0 - 0x0000  0x000C  0x0018  0x0024  0x0030  0x003C  0x0048
38  * BANK 1 - 0x0004  0x0010  0x001C  0x0028  0x0034  0x0040  0x004C
39  * BANK 2 - 0x0008  0x0014  0x0020  0x002C  0x0038  0x0044  0x0050
40  *
41  * BANK 3 - 0x0100  0x010C  0x0118  0x0124  0x0130  0x013C  0x0148
42  * BANK 4 - 0x0104  0x0110  0x011C  0x0128  0x0134  0x0140  0x014C
43  * BANK 5 - 0x0108  0x0114  0x0120  0x012C  0x0138  0x0144  0x0150
44  *
45  * BANK 6 - 0x0200  0x020C  0x0218  0x0224  0x0230  0x023C  0x0248
46  *
47  * NOTE:
48  *   BANK 3 is only available on PXA27x and later processors.
49  *   BANK 4 and 5 are only available on PXA935, PXA1928
50  *   BANK 6 is only available on PXA1928
51  */
52 
53 #define GPLR_OFFSET	0x00
54 #define GPDR_OFFSET	0x0C
55 #define GPSR_OFFSET	0x18
56 #define GPCR_OFFSET	0x24
57 #define GRER_OFFSET	0x30
58 #define GFER_OFFSET	0x3C
59 #define GEDR_OFFSET	0x48
60 #define GAFR_OFFSET	0x54
61 #define ED_MASK_OFFSET	0x9C	/* GPIO edge detection for AP side */
62 
63 #define BANK_OFF(n)	(((n) / 3) << 8) + (((n) % 3) << 2)
64 
65 int pxa_last_gpio;
66 static int irq_base;
67 
68 struct pxa_gpio_bank {
69 	void __iomem	*regbase;
70 	unsigned long	irq_mask;
71 	unsigned long	irq_edge_rise;
72 	unsigned long	irq_edge_fall;
73 
74 #ifdef CONFIG_PM
75 	unsigned long	saved_gplr;
76 	unsigned long	saved_gpdr;
77 	unsigned long	saved_grer;
78 	unsigned long	saved_gfer;
79 #endif
80 };
81 
82 struct pxa_gpio_chip {
83 	struct device *dev;
84 	struct gpio_chip chip;
85 	struct pxa_gpio_bank *banks;
86 	struct irq_domain *irqdomain;
87 
88 	int irq0;
89 	int irq1;
90 	int (*set_wake)(unsigned int gpio, unsigned int on);
91 };
92 
93 enum pxa_gpio_type {
94 	PXA25X_GPIO = 0,
95 	PXA26X_GPIO,
96 	PXA27X_GPIO,
97 	PXA3XX_GPIO,
98 	PXA93X_GPIO,
99 	MMP_GPIO = 0x10,
100 	MMP2_GPIO,
101 	PXA1928_GPIO,
102 };
103 
104 struct pxa_gpio_id {
105 	enum pxa_gpio_type	type;
106 	int			gpio_nums;
107 };
108 
109 static DEFINE_SPINLOCK(gpio_lock);
110 static struct pxa_gpio_chip *pxa_gpio_chip;
111 static enum pxa_gpio_type gpio_type;
112 
113 static struct pxa_gpio_id pxa25x_id = {
114 	.type		= PXA25X_GPIO,
115 	.gpio_nums	= 85,
116 };
117 
118 static struct pxa_gpio_id pxa26x_id = {
119 	.type		= PXA26X_GPIO,
120 	.gpio_nums	= 90,
121 };
122 
123 static struct pxa_gpio_id pxa27x_id = {
124 	.type		= PXA27X_GPIO,
125 	.gpio_nums	= 121,
126 };
127 
128 static struct pxa_gpio_id pxa3xx_id = {
129 	.type		= PXA3XX_GPIO,
130 	.gpio_nums	= 128,
131 };
132 
133 static struct pxa_gpio_id pxa93x_id = {
134 	.type		= PXA93X_GPIO,
135 	.gpio_nums	= 192,
136 };
137 
138 static struct pxa_gpio_id mmp_id = {
139 	.type		= MMP_GPIO,
140 	.gpio_nums	= 128,
141 };
142 
143 static struct pxa_gpio_id mmp2_id = {
144 	.type		= MMP2_GPIO,
145 	.gpio_nums	= 192,
146 };
147 
148 static struct pxa_gpio_id pxa1928_id = {
149 	.type		= PXA1928_GPIO,
150 	.gpio_nums	= 224,
151 };
152 
153 #define for_each_gpio_bank(i, b, pc)					\
154 	for (i = 0, b = pc->banks; i <= pxa_last_gpio; i += 32, b++)
155 
156 static inline struct pxa_gpio_chip *chip_to_pxachip(struct gpio_chip *c)
157 {
158 	struct pxa_gpio_chip *pxa_chip = gpiochip_get_data(c);
159 
160 	return pxa_chip;
161 }
162 
163 static inline void __iomem *gpio_bank_base(struct gpio_chip *c, int gpio)
164 {
165 	struct pxa_gpio_chip *p = gpiochip_get_data(c);
166 	struct pxa_gpio_bank *bank = p->banks + (gpio / 32);
167 
168 	return bank->regbase;
169 }
170 
171 static inline struct pxa_gpio_bank *gpio_to_pxabank(struct gpio_chip *c,
172 						    unsigned gpio)
173 {
174 	return chip_to_pxachip(c)->banks + gpio / 32;
175 }
176 
177 static inline int gpio_is_pxa_type(int type)
178 {
179 	return (type & MMP_GPIO) == 0;
180 }
181 
182 static inline int gpio_is_mmp_type(int type)
183 {
184 	return (type & MMP_GPIO) != 0;
185 }
186 
187 /* GPIO86/87/88/89 on PXA26x have their direction bits in PXA_GPDR(2 inverted,
188  * as well as their Alternate Function value being '1' for GPIO in GAFRx.
189  */
190 static inline int __gpio_is_inverted(int gpio)
191 {
192 	if ((gpio_type == PXA26X_GPIO) && (gpio > 85))
193 		return 1;
194 	return 0;
195 }
196 
197 /*
198  * On PXA25x and PXA27x, GAFRx and GPDRx together decide the alternate
199  * function of a GPIO, and GPDRx cannot be altered once configured. It
200  * is attributed as "occupied" here (I know this terminology isn't
201  * accurate, you are welcome to propose a better one :-)
202  */
203 static inline int __gpio_is_occupied(struct pxa_gpio_chip *pchip, unsigned gpio)
204 {
205 	void __iomem *base;
206 	unsigned long gafr = 0, gpdr = 0;
207 	int ret, af = 0, dir = 0;
208 
209 	base = gpio_bank_base(&pchip->chip, gpio);
210 	gpdr = readl_relaxed(base + GPDR_OFFSET);
211 
212 	switch (gpio_type) {
213 	case PXA25X_GPIO:
214 	case PXA26X_GPIO:
215 	case PXA27X_GPIO:
216 		gafr = readl_relaxed(base + GAFR_OFFSET);
217 		af = (gafr >> ((gpio & 0xf) * 2)) & 0x3;
218 		dir = gpdr & GPIO_bit(gpio);
219 
220 		if (__gpio_is_inverted(gpio))
221 			ret = (af != 1) || (dir == 0);
222 		else
223 			ret = (af != 0) || (dir != 0);
224 		break;
225 	default:
226 		ret = gpdr & GPIO_bit(gpio);
227 		break;
228 	}
229 	return ret;
230 }
231 
232 int pxa_irq_to_gpio(int irq)
233 {
234 	struct pxa_gpio_chip *pchip = pxa_gpio_chip;
235 	int irq_gpio0;
236 
237 	irq_gpio0 = irq_find_mapping(pchip->irqdomain, 0);
238 	if (irq_gpio0 > 0)
239 		return irq - irq_gpio0;
240 
241 	return irq_gpio0;
242 }
243 
244 static bool pxa_gpio_has_pinctrl(void)
245 {
246 	switch (gpio_type) {
247 	case PXA3XX_GPIO:
248 	case MMP2_GPIO:
249 		return false;
250 
251 	default:
252 		return true;
253 	}
254 }
255 
256 static int pxa_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
257 {
258 	struct pxa_gpio_chip *pchip = chip_to_pxachip(chip);
259 
260 	return irq_find_mapping(pchip->irqdomain, offset);
261 }
262 
263 static int pxa_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
264 {
265 	void __iomem *base = gpio_bank_base(chip, offset);
266 	uint32_t value, mask = GPIO_bit(offset);
267 	unsigned long flags;
268 	int ret;
269 
270 	if (pxa_gpio_has_pinctrl()) {
271 		ret = pinctrl_gpio_direction_input(chip->base + offset);
272 		if (ret)
273 			return ret;
274 	}
275 
276 	spin_lock_irqsave(&gpio_lock, flags);
277 
278 	value = readl_relaxed(base + GPDR_OFFSET);
279 	if (__gpio_is_inverted(chip->base + offset))
280 		value |= mask;
281 	else
282 		value &= ~mask;
283 	writel_relaxed(value, base + GPDR_OFFSET);
284 
285 	spin_unlock_irqrestore(&gpio_lock, flags);
286 	return 0;
287 }
288 
289 static int pxa_gpio_direction_output(struct gpio_chip *chip,
290 				     unsigned offset, int value)
291 {
292 	void __iomem *base = gpio_bank_base(chip, offset);
293 	uint32_t tmp, mask = GPIO_bit(offset);
294 	unsigned long flags;
295 	int ret;
296 
297 	writel_relaxed(mask, base + (value ? GPSR_OFFSET : GPCR_OFFSET));
298 
299 	if (pxa_gpio_has_pinctrl()) {
300 		ret = pinctrl_gpio_direction_output(chip->base + offset);
301 		if (ret)
302 			return ret;
303 	}
304 
305 	spin_lock_irqsave(&gpio_lock, flags);
306 
307 	tmp = readl_relaxed(base + GPDR_OFFSET);
308 	if (__gpio_is_inverted(chip->base + offset))
309 		tmp &= ~mask;
310 	else
311 		tmp |= mask;
312 	writel_relaxed(tmp, base + GPDR_OFFSET);
313 
314 	spin_unlock_irqrestore(&gpio_lock, flags);
315 	return 0;
316 }
317 
318 static int pxa_gpio_get(struct gpio_chip *chip, unsigned offset)
319 {
320 	void __iomem *base = gpio_bank_base(chip, offset);
321 	u32 gplr = readl_relaxed(base + GPLR_OFFSET);
322 
323 	return !!(gplr & GPIO_bit(offset));
324 }
325 
326 static void pxa_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
327 {
328 	void __iomem *base = gpio_bank_base(chip, offset);
329 
330 	writel_relaxed(GPIO_bit(offset),
331 		       base + (value ? GPSR_OFFSET : GPCR_OFFSET));
332 }
333 
334 #ifdef CONFIG_OF_GPIO
335 static int pxa_gpio_of_xlate(struct gpio_chip *gc,
336 			     const struct of_phandle_args *gpiospec,
337 			     u32 *flags)
338 {
339 	if (gpiospec->args[0] > pxa_last_gpio)
340 		return -EINVAL;
341 
342 	if (flags)
343 		*flags = gpiospec->args[1];
344 
345 	return gpiospec->args[0];
346 }
347 #endif
348 
349 static int pxa_init_gpio_chip(struct pxa_gpio_chip *pchip, int ngpio,
350 			      struct device_node *np, void __iomem *regbase)
351 {
352 	int i, gpio, nbanks = DIV_ROUND_UP(ngpio, 32);
353 	struct pxa_gpio_bank *bank;
354 
355 	pchip->banks = devm_kcalloc(pchip->dev, nbanks, sizeof(*pchip->banks),
356 				    GFP_KERNEL);
357 	if (!pchip->banks)
358 		return -ENOMEM;
359 
360 	pchip->chip.label = "gpio-pxa";
361 	pchip->chip.direction_input  = pxa_gpio_direction_input;
362 	pchip->chip.direction_output = pxa_gpio_direction_output;
363 	pchip->chip.get = pxa_gpio_get;
364 	pchip->chip.set = pxa_gpio_set;
365 	pchip->chip.to_irq = pxa_gpio_to_irq;
366 	pchip->chip.ngpio = ngpio;
367 
368 	if (pxa_gpio_has_pinctrl()) {
369 		pchip->chip.request = gpiochip_generic_request;
370 		pchip->chip.free = gpiochip_generic_free;
371 	}
372 
373 #ifdef CONFIG_OF_GPIO
374 	pchip->chip.of_node = np;
375 	pchip->chip.of_xlate = pxa_gpio_of_xlate;
376 	pchip->chip.of_gpio_n_cells = 2;
377 #endif
378 
379 	for (i = 0, gpio = 0; i < nbanks; i++, gpio += 32) {
380 		bank = pchip->banks + i;
381 		bank->regbase = regbase + BANK_OFF(i);
382 	}
383 
384 	return gpiochip_add_data(&pchip->chip, pchip);
385 }
386 
387 /* Update only those GRERx and GFERx edge detection register bits if those
388  * bits are set in c->irq_mask
389  */
390 static inline void update_edge_detect(struct pxa_gpio_bank *c)
391 {
392 	uint32_t grer, gfer;
393 
394 	grer = readl_relaxed(c->regbase + GRER_OFFSET) & ~c->irq_mask;
395 	gfer = readl_relaxed(c->regbase + GFER_OFFSET) & ~c->irq_mask;
396 	grer |= c->irq_edge_rise & c->irq_mask;
397 	gfer |= c->irq_edge_fall & c->irq_mask;
398 	writel_relaxed(grer, c->regbase + GRER_OFFSET);
399 	writel_relaxed(gfer, c->regbase + GFER_OFFSET);
400 }
401 
402 static int pxa_gpio_irq_type(struct irq_data *d, unsigned int type)
403 {
404 	struct pxa_gpio_chip *pchip = irq_data_get_irq_chip_data(d);
405 	unsigned int gpio = irqd_to_hwirq(d);
406 	struct pxa_gpio_bank *c = gpio_to_pxabank(&pchip->chip, gpio);
407 	unsigned long gpdr, mask = GPIO_bit(gpio);
408 
409 	if (type == IRQ_TYPE_PROBE) {
410 		/* Don't mess with enabled GPIOs using preconfigured edges or
411 		 * GPIOs set to alternate function or to output during probe
412 		 */
413 		if ((c->irq_edge_rise | c->irq_edge_fall) & GPIO_bit(gpio))
414 			return 0;
415 
416 		if (__gpio_is_occupied(pchip, gpio))
417 			return 0;
418 
419 		type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
420 	}
421 
422 	gpdr = readl_relaxed(c->regbase + GPDR_OFFSET);
423 
424 	if (__gpio_is_inverted(gpio))
425 		writel_relaxed(gpdr | mask,  c->regbase + GPDR_OFFSET);
426 	else
427 		writel_relaxed(gpdr & ~mask, c->regbase + GPDR_OFFSET);
428 
429 	if (type & IRQ_TYPE_EDGE_RISING)
430 		c->irq_edge_rise |= mask;
431 	else
432 		c->irq_edge_rise &= ~mask;
433 
434 	if (type & IRQ_TYPE_EDGE_FALLING)
435 		c->irq_edge_fall |= mask;
436 	else
437 		c->irq_edge_fall &= ~mask;
438 
439 	update_edge_detect(c);
440 
441 	pr_debug("%s: IRQ%d (GPIO%d) - edge%s%s\n", __func__, d->irq, gpio,
442 		((type & IRQ_TYPE_EDGE_RISING)  ? " rising"  : ""),
443 		((type & IRQ_TYPE_EDGE_FALLING) ? " falling" : ""));
444 	return 0;
445 }
446 
447 static irqreturn_t pxa_gpio_demux_handler(int in_irq, void *d)
448 {
449 	int loop, gpio, n, handled = 0;
450 	unsigned long gedr;
451 	struct pxa_gpio_chip *pchip = d;
452 	struct pxa_gpio_bank *c;
453 
454 	do {
455 		loop = 0;
456 		for_each_gpio_bank(gpio, c, pchip) {
457 			gedr = readl_relaxed(c->regbase + GEDR_OFFSET);
458 			gedr = gedr & c->irq_mask;
459 			writel_relaxed(gedr, c->regbase + GEDR_OFFSET);
460 
461 			for_each_set_bit(n, &gedr, BITS_PER_LONG) {
462 				loop = 1;
463 
464 				generic_handle_irq(
465 					irq_find_mapping(pchip->irqdomain,
466 							 gpio + n));
467 			}
468 		}
469 		handled += loop;
470 	} while (loop);
471 
472 	return handled ? IRQ_HANDLED : IRQ_NONE;
473 }
474 
475 static irqreturn_t pxa_gpio_direct_handler(int in_irq, void *d)
476 {
477 	struct pxa_gpio_chip *pchip = d;
478 
479 	if (in_irq == pchip->irq0) {
480 		generic_handle_irq(irq_find_mapping(pchip->irqdomain, 0));
481 	} else if (in_irq == pchip->irq1) {
482 		generic_handle_irq(irq_find_mapping(pchip->irqdomain, 1));
483 	} else {
484 		pr_err("%s() unknown irq %d\n", __func__, in_irq);
485 		return IRQ_NONE;
486 	}
487 	return IRQ_HANDLED;
488 }
489 
490 static void pxa_ack_muxed_gpio(struct irq_data *d)
491 {
492 	struct pxa_gpio_chip *pchip = irq_data_get_irq_chip_data(d);
493 	unsigned int gpio = irqd_to_hwirq(d);
494 	void __iomem *base = gpio_bank_base(&pchip->chip, gpio);
495 
496 	writel_relaxed(GPIO_bit(gpio), base + GEDR_OFFSET);
497 }
498 
499 static void pxa_mask_muxed_gpio(struct irq_data *d)
500 {
501 	struct pxa_gpio_chip *pchip = irq_data_get_irq_chip_data(d);
502 	unsigned int gpio = irqd_to_hwirq(d);
503 	struct pxa_gpio_bank *b = gpio_to_pxabank(&pchip->chip, gpio);
504 	void __iomem *base = gpio_bank_base(&pchip->chip, gpio);
505 	uint32_t grer, gfer;
506 
507 	b->irq_mask &= ~GPIO_bit(gpio);
508 
509 	grer = readl_relaxed(base + GRER_OFFSET) & ~GPIO_bit(gpio);
510 	gfer = readl_relaxed(base + GFER_OFFSET) & ~GPIO_bit(gpio);
511 	writel_relaxed(grer, base + GRER_OFFSET);
512 	writel_relaxed(gfer, base + GFER_OFFSET);
513 }
514 
515 static int pxa_gpio_set_wake(struct irq_data *d, unsigned int on)
516 {
517 	struct pxa_gpio_chip *pchip = irq_data_get_irq_chip_data(d);
518 	unsigned int gpio = irqd_to_hwirq(d);
519 
520 	if (pchip->set_wake)
521 		return pchip->set_wake(gpio, on);
522 	else
523 		return 0;
524 }
525 
526 static void pxa_unmask_muxed_gpio(struct irq_data *d)
527 {
528 	struct pxa_gpio_chip *pchip = irq_data_get_irq_chip_data(d);
529 	unsigned int gpio = irqd_to_hwirq(d);
530 	struct pxa_gpio_bank *c = gpio_to_pxabank(&pchip->chip, gpio);
531 
532 	c->irq_mask |= GPIO_bit(gpio);
533 	update_edge_detect(c);
534 }
535 
536 static struct irq_chip pxa_muxed_gpio_chip = {
537 	.name		= "GPIO",
538 	.irq_ack	= pxa_ack_muxed_gpio,
539 	.irq_mask	= pxa_mask_muxed_gpio,
540 	.irq_unmask	= pxa_unmask_muxed_gpio,
541 	.irq_set_type	= pxa_gpio_irq_type,
542 	.irq_set_wake	= pxa_gpio_set_wake,
543 };
544 
545 static int pxa_gpio_nums(struct platform_device *pdev)
546 {
547 	const struct platform_device_id *id = platform_get_device_id(pdev);
548 	struct pxa_gpio_id *pxa_id = (struct pxa_gpio_id *)id->driver_data;
549 	int count = 0;
550 
551 	switch (pxa_id->type) {
552 	case PXA25X_GPIO:
553 	case PXA26X_GPIO:
554 	case PXA27X_GPIO:
555 	case PXA3XX_GPIO:
556 	case PXA93X_GPIO:
557 	case MMP_GPIO:
558 	case MMP2_GPIO:
559 	case PXA1928_GPIO:
560 		gpio_type = pxa_id->type;
561 		count = pxa_id->gpio_nums - 1;
562 		break;
563 	default:
564 		count = -EINVAL;
565 		break;
566 	}
567 	return count;
568 }
569 
570 static int pxa_irq_domain_map(struct irq_domain *d, unsigned int irq,
571 			      irq_hw_number_t hw)
572 {
573 	irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip,
574 				 handle_edge_irq);
575 	irq_set_chip_data(irq, d->host_data);
576 	irq_set_noprobe(irq);
577 	return 0;
578 }
579 
580 static const struct irq_domain_ops pxa_irq_domain_ops = {
581 	.map	= pxa_irq_domain_map,
582 	.xlate	= irq_domain_xlate_twocell,
583 };
584 
585 #ifdef CONFIG_OF
586 static const struct of_device_id pxa_gpio_dt_ids[] = {
587 	{ .compatible = "intel,pxa25x-gpio",	.data = &pxa25x_id, },
588 	{ .compatible = "intel,pxa26x-gpio",	.data = &pxa26x_id, },
589 	{ .compatible = "intel,pxa27x-gpio",	.data = &pxa27x_id, },
590 	{ .compatible = "intel,pxa3xx-gpio",	.data = &pxa3xx_id, },
591 	{ .compatible = "marvell,pxa93x-gpio",	.data = &pxa93x_id, },
592 	{ .compatible = "marvell,mmp-gpio",	.data = &mmp_id, },
593 	{ .compatible = "marvell,mmp2-gpio",	.data = &mmp2_id, },
594 	{ .compatible = "marvell,pxa1928-gpio",	.data = &pxa1928_id, },
595 	{}
596 };
597 
598 static int pxa_gpio_probe_dt(struct platform_device *pdev,
599 			     struct pxa_gpio_chip *pchip)
600 {
601 	int nr_gpios;
602 	const struct pxa_gpio_id *gpio_id;
603 
604 	gpio_id = of_device_get_match_data(&pdev->dev);
605 	gpio_type = gpio_id->type;
606 
607 	nr_gpios = gpio_id->gpio_nums;
608 	pxa_last_gpio = nr_gpios - 1;
609 
610 	irq_base = devm_irq_alloc_descs(&pdev->dev, -1, 0, nr_gpios, 0);
611 	if (irq_base < 0) {
612 		dev_err(&pdev->dev, "Failed to allocate IRQ numbers\n");
613 		return irq_base;
614 	}
615 	return irq_base;
616 }
617 #else
618 #define pxa_gpio_probe_dt(pdev, pchip)		(-1)
619 #endif
620 
621 static int pxa_gpio_probe(struct platform_device *pdev)
622 {
623 	struct pxa_gpio_chip *pchip;
624 	struct pxa_gpio_bank *c;
625 	struct clk *clk;
626 	struct pxa_gpio_platform_data *info;
627 	void __iomem *gpio_reg_base;
628 	int gpio, ret;
629 	int irq0 = 0, irq1 = 0, irq_mux;
630 
631 	pchip = devm_kzalloc(&pdev->dev, sizeof(*pchip), GFP_KERNEL);
632 	if (!pchip)
633 		return -ENOMEM;
634 	pchip->dev = &pdev->dev;
635 
636 	info = dev_get_platdata(&pdev->dev);
637 	if (info) {
638 		irq_base = info->irq_base;
639 		if (irq_base <= 0)
640 			return -EINVAL;
641 		pxa_last_gpio = pxa_gpio_nums(pdev);
642 		pchip->set_wake = info->gpio_set_wake;
643 	} else {
644 		irq_base = pxa_gpio_probe_dt(pdev, pchip);
645 		if (irq_base < 0)
646 			return -EINVAL;
647 	}
648 
649 	if (!pxa_last_gpio)
650 		return -EINVAL;
651 
652 	pchip->irqdomain = irq_domain_add_legacy(pdev->dev.of_node,
653 						 pxa_last_gpio + 1, irq_base,
654 						 0, &pxa_irq_domain_ops, pchip);
655 	if (!pchip->irqdomain)
656 		return -ENOMEM;
657 
658 	irq0 = platform_get_irq_byname(pdev, "gpio0");
659 	irq1 = platform_get_irq_byname(pdev, "gpio1");
660 	irq_mux = platform_get_irq_byname(pdev, "gpio_mux");
661 	if ((irq0 > 0 && irq1 <= 0) || (irq0 <= 0 && irq1 > 0)
662 		|| (irq_mux <= 0))
663 		return -EINVAL;
664 
665 	pchip->irq0 = irq0;
666 	pchip->irq1 = irq1;
667 
668 	gpio_reg_base = devm_platform_ioremap_resource(pdev, 0);
669 	if (!gpio_reg_base)
670 		return -EINVAL;
671 
672 	clk = clk_get(&pdev->dev, NULL);
673 	if (IS_ERR(clk)) {
674 		dev_err(&pdev->dev, "Error %ld to get gpio clock\n",
675 			PTR_ERR(clk));
676 		return PTR_ERR(clk);
677 	}
678 	ret = clk_prepare_enable(clk);
679 	if (ret) {
680 		clk_put(clk);
681 		return ret;
682 	}
683 
684 	/* Initialize GPIO chips */
685 	ret = pxa_init_gpio_chip(pchip, pxa_last_gpio + 1, pdev->dev.of_node,
686 				 gpio_reg_base);
687 	if (ret) {
688 		clk_put(clk);
689 		return ret;
690 	}
691 
692 	/* clear all GPIO edge detects */
693 	for_each_gpio_bank(gpio, c, pchip) {
694 		writel_relaxed(0, c->regbase + GFER_OFFSET);
695 		writel_relaxed(0, c->regbase + GRER_OFFSET);
696 		writel_relaxed(~0, c->regbase + GEDR_OFFSET);
697 		/* unmask GPIO edge detect for AP side */
698 		if (gpio_is_mmp_type(gpio_type))
699 			writel_relaxed(~0, c->regbase + ED_MASK_OFFSET);
700 	}
701 
702 	if (irq0 > 0) {
703 		ret = devm_request_irq(&pdev->dev,
704 				       irq0, pxa_gpio_direct_handler, 0,
705 				       "gpio-0", pchip);
706 		if (ret)
707 			dev_err(&pdev->dev, "request of gpio0 irq failed: %d\n",
708 				ret);
709 	}
710 	if (irq1 > 0) {
711 		ret = devm_request_irq(&pdev->dev,
712 				       irq1, pxa_gpio_direct_handler, 0,
713 				       "gpio-1", pchip);
714 		if (ret)
715 			dev_err(&pdev->dev, "request of gpio1 irq failed: %d\n",
716 				ret);
717 	}
718 	ret = devm_request_irq(&pdev->dev,
719 			       irq_mux, pxa_gpio_demux_handler, 0,
720 				       "gpio-mux", pchip);
721 	if (ret)
722 		dev_err(&pdev->dev, "request of gpio-mux irq failed: %d\n",
723 				ret);
724 
725 	pxa_gpio_chip = pchip;
726 
727 	return 0;
728 }
729 
730 static const struct platform_device_id gpio_id_table[] = {
731 	{ "pxa25x-gpio",	(unsigned long)&pxa25x_id },
732 	{ "pxa26x-gpio",	(unsigned long)&pxa26x_id },
733 	{ "pxa27x-gpio",	(unsigned long)&pxa27x_id },
734 	{ "pxa3xx-gpio",	(unsigned long)&pxa3xx_id },
735 	{ "pxa93x-gpio",	(unsigned long)&pxa93x_id },
736 	{ "mmp-gpio",		(unsigned long)&mmp_id },
737 	{ "mmp2-gpio",		(unsigned long)&mmp2_id },
738 	{ "pxa1928-gpio",	(unsigned long)&pxa1928_id },
739 	{ },
740 };
741 
742 static struct platform_driver pxa_gpio_driver = {
743 	.probe		= pxa_gpio_probe,
744 	.driver		= {
745 		.name	= "pxa-gpio",
746 		.of_match_table = of_match_ptr(pxa_gpio_dt_ids),
747 	},
748 	.id_table	= gpio_id_table,
749 };
750 
751 static int __init pxa_gpio_legacy_init(void)
752 {
753 	if (of_have_populated_dt())
754 		return 0;
755 
756 	return platform_driver_register(&pxa_gpio_driver);
757 }
758 postcore_initcall(pxa_gpio_legacy_init);
759 
760 static int __init pxa_gpio_dt_init(void)
761 {
762 	if (of_have_populated_dt())
763 		return platform_driver_register(&pxa_gpio_driver);
764 
765 	return 0;
766 }
767 device_initcall(pxa_gpio_dt_init);
768 
769 #ifdef CONFIG_PM
770 static int pxa_gpio_suspend(void)
771 {
772 	struct pxa_gpio_chip *pchip = pxa_gpio_chip;
773 	struct pxa_gpio_bank *c;
774 	int gpio;
775 
776 	if (!pchip)
777 		return 0;
778 
779 	for_each_gpio_bank(gpio, c, pchip) {
780 		c->saved_gplr = readl_relaxed(c->regbase + GPLR_OFFSET);
781 		c->saved_gpdr = readl_relaxed(c->regbase + GPDR_OFFSET);
782 		c->saved_grer = readl_relaxed(c->regbase + GRER_OFFSET);
783 		c->saved_gfer = readl_relaxed(c->regbase + GFER_OFFSET);
784 
785 		/* Clear GPIO transition detect bits */
786 		writel_relaxed(0xffffffff, c->regbase + GEDR_OFFSET);
787 	}
788 	return 0;
789 }
790 
791 static void pxa_gpio_resume(void)
792 {
793 	struct pxa_gpio_chip *pchip = pxa_gpio_chip;
794 	struct pxa_gpio_bank *c;
795 	int gpio;
796 
797 	if (!pchip)
798 		return;
799 
800 	for_each_gpio_bank(gpio, c, pchip) {
801 		/* restore level with set/clear */
802 		writel_relaxed(c->saved_gplr, c->regbase + GPSR_OFFSET);
803 		writel_relaxed(~c->saved_gplr, c->regbase + GPCR_OFFSET);
804 
805 		writel_relaxed(c->saved_grer, c->regbase + GRER_OFFSET);
806 		writel_relaxed(c->saved_gfer, c->regbase + GFER_OFFSET);
807 		writel_relaxed(c->saved_gpdr, c->regbase + GPDR_OFFSET);
808 	}
809 }
810 #else
811 #define pxa_gpio_suspend	NULL
812 #define pxa_gpio_resume		NULL
813 #endif
814 
815 static struct syscore_ops pxa_gpio_syscore_ops = {
816 	.suspend	= pxa_gpio_suspend,
817 	.resume		= pxa_gpio_resume,
818 };
819 
820 static int __init pxa_gpio_sysinit(void)
821 {
822 	register_syscore_ops(&pxa_gpio_syscore_ops);
823 	return 0;
824 }
825 postcore_initcall(pxa_gpio_sysinit);
826