xref: /openbmc/linux/drivers/irqchip/irq-gic.c (revision 5104d265)
1 /*
2  *  linux/arch/arm/common/gic.c
3  *
4  *  Copyright (C) 2002 ARM Limited, All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * Interrupt architecture for the GIC:
11  *
12  * o There is one Interrupt Distributor, which receives interrupts
13  *   from system devices and sends them to the Interrupt Controllers.
14  *
15  * o There is one CPU Interface per CPU, which sends interrupts sent
16  *   by the Distributor, and interrupts generated locally, to the
17  *   associated CPU. The base address of the CPU interface is usually
18  *   aliased so that the same address points to different chips depending
19  *   on the CPU it is accessed from.
20  *
21  * Note that IRQs 0-31 are special - they are local to each CPU.
22  * As such, the enable set/clear, pending set/clear and active bit
23  * registers are banked per-cpu for these sources.
24  */
25 #include <linux/init.h>
26 #include <linux/kernel.h>
27 #include <linux/err.h>
28 #include <linux/module.h>
29 #include <linux/list.h>
30 #include <linux/smp.h>
31 #include <linux/cpu.h>
32 #include <linux/cpu_pm.h>
33 #include <linux/cpumask.h>
34 #include <linux/io.h>
35 #include <linux/of.h>
36 #include <linux/of_address.h>
37 #include <linux/of_irq.h>
38 #include <linux/irqdomain.h>
39 #include <linux/interrupt.h>
40 #include <linux/percpu.h>
41 #include <linux/slab.h>
42 #include <linux/irqchip/chained_irq.h>
43 #include <linux/irqchip/arm-gic.h>
44 
45 #include <asm/irq.h>
46 #include <asm/exception.h>
47 #include <asm/smp_plat.h>
48 
49 #include "irqchip.h"
50 
51 union gic_base {
52 	void __iomem *common_base;
53 	void __percpu __iomem **percpu_base;
54 };
55 
56 struct gic_chip_data {
57 	union gic_base dist_base;
58 	union gic_base cpu_base;
59 #ifdef CONFIG_CPU_PM
60 	u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
61 	u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)];
62 	u32 saved_spi_target[DIV_ROUND_UP(1020, 4)];
63 	u32 __percpu *saved_ppi_enable;
64 	u32 __percpu *saved_ppi_conf;
65 #endif
66 	struct irq_domain *domain;
67 	unsigned int gic_irqs;
68 #ifdef CONFIG_GIC_NON_BANKED
69 	void __iomem *(*get_base)(union gic_base *);
70 #endif
71 };
72 
73 static DEFINE_RAW_SPINLOCK(irq_controller_lock);
74 
75 /*
76  * The GIC mapping of CPU interfaces does not necessarily match
77  * the logical CPU numbering.  Let's use a mapping as returned
78  * by the GIC itself.
79  */
80 #define NR_GIC_CPU_IF 8
81 static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
82 
83 /*
84  * Supported arch specific GIC irq extension.
85  * Default make them NULL.
86  */
87 struct irq_chip gic_arch_extn = {
88 	.irq_eoi	= NULL,
89 	.irq_mask	= NULL,
90 	.irq_unmask	= NULL,
91 	.irq_retrigger	= NULL,
92 	.irq_set_type	= NULL,
93 	.irq_set_wake	= NULL,
94 };
95 
96 #ifndef MAX_GIC_NR
97 #define MAX_GIC_NR	1
98 #endif
99 
100 static struct gic_chip_data gic_data[MAX_GIC_NR] __read_mostly;
101 
102 #ifdef CONFIG_GIC_NON_BANKED
103 static void __iomem *gic_get_percpu_base(union gic_base *base)
104 {
105 	return *__this_cpu_ptr(base->percpu_base);
106 }
107 
108 static void __iomem *gic_get_common_base(union gic_base *base)
109 {
110 	return base->common_base;
111 }
112 
113 static inline void __iomem *gic_data_dist_base(struct gic_chip_data *data)
114 {
115 	return data->get_base(&data->dist_base);
116 }
117 
118 static inline void __iomem *gic_data_cpu_base(struct gic_chip_data *data)
119 {
120 	return data->get_base(&data->cpu_base);
121 }
122 
123 static inline void gic_set_base_accessor(struct gic_chip_data *data,
124 					 void __iomem *(*f)(union gic_base *))
125 {
126 	data->get_base = f;
127 }
128 #else
129 #define gic_data_dist_base(d)	((d)->dist_base.common_base)
130 #define gic_data_cpu_base(d)	((d)->cpu_base.common_base)
131 #define gic_set_base_accessor(d, f)
132 #endif
133 
134 static inline void __iomem *gic_dist_base(struct irq_data *d)
135 {
136 	struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
137 	return gic_data_dist_base(gic_data);
138 }
139 
140 static inline void __iomem *gic_cpu_base(struct irq_data *d)
141 {
142 	struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
143 	return gic_data_cpu_base(gic_data);
144 }
145 
146 static inline unsigned int gic_irq(struct irq_data *d)
147 {
148 	return d->hwirq;
149 }
150 
151 /*
152  * Routines to acknowledge, disable and enable interrupts
153  */
154 static void gic_mask_irq(struct irq_data *d)
155 {
156 	u32 mask = 1 << (gic_irq(d) % 32);
157 
158 	raw_spin_lock(&irq_controller_lock);
159 	writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4);
160 	if (gic_arch_extn.irq_mask)
161 		gic_arch_extn.irq_mask(d);
162 	raw_spin_unlock(&irq_controller_lock);
163 }
164 
165 static void gic_unmask_irq(struct irq_data *d)
166 {
167 	u32 mask = 1 << (gic_irq(d) % 32);
168 
169 	raw_spin_lock(&irq_controller_lock);
170 	if (gic_arch_extn.irq_unmask)
171 		gic_arch_extn.irq_unmask(d);
172 	writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4);
173 	raw_spin_unlock(&irq_controller_lock);
174 }
175 
176 static void gic_eoi_irq(struct irq_data *d)
177 {
178 	if (gic_arch_extn.irq_eoi) {
179 		raw_spin_lock(&irq_controller_lock);
180 		gic_arch_extn.irq_eoi(d);
181 		raw_spin_unlock(&irq_controller_lock);
182 	}
183 
184 	writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
185 }
186 
187 static int gic_set_type(struct irq_data *d, unsigned int type)
188 {
189 	void __iomem *base = gic_dist_base(d);
190 	unsigned int gicirq = gic_irq(d);
191 	u32 enablemask = 1 << (gicirq % 32);
192 	u32 enableoff = (gicirq / 32) * 4;
193 	u32 confmask = 0x2 << ((gicirq % 16) * 2);
194 	u32 confoff = (gicirq / 16) * 4;
195 	bool enabled = false;
196 	u32 val;
197 
198 	/* Interrupt configuration for SGIs can't be changed */
199 	if (gicirq < 16)
200 		return -EINVAL;
201 
202 	if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
203 		return -EINVAL;
204 
205 	raw_spin_lock(&irq_controller_lock);
206 
207 	if (gic_arch_extn.irq_set_type)
208 		gic_arch_extn.irq_set_type(d, type);
209 
210 	val = readl_relaxed(base + GIC_DIST_CONFIG + confoff);
211 	if (type == IRQ_TYPE_LEVEL_HIGH)
212 		val &= ~confmask;
213 	else if (type == IRQ_TYPE_EDGE_RISING)
214 		val |= confmask;
215 
216 	/*
217 	 * As recommended by the spec, disable the interrupt before changing
218 	 * the configuration
219 	 */
220 	if (readl_relaxed(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) {
221 		writel_relaxed(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff);
222 		enabled = true;
223 	}
224 
225 	writel_relaxed(val, base + GIC_DIST_CONFIG + confoff);
226 
227 	if (enabled)
228 		writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff);
229 
230 	raw_spin_unlock(&irq_controller_lock);
231 
232 	return 0;
233 }
234 
235 static int gic_retrigger(struct irq_data *d)
236 {
237 	if (gic_arch_extn.irq_retrigger)
238 		return gic_arch_extn.irq_retrigger(d);
239 
240 	/* the genirq layer expects 0 if we can't retrigger in hardware */
241 	return 0;
242 }
243 
244 #ifdef CONFIG_SMP
245 static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
246 			    bool force)
247 {
248 	void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
249 	unsigned int shift = (gic_irq(d) % 4) * 8;
250 	unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
251 	u32 val, mask, bit;
252 
253 	if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
254 		return -EINVAL;
255 
256 	mask = 0xff << shift;
257 	bit = gic_cpu_map[cpu] << shift;
258 
259 	raw_spin_lock(&irq_controller_lock);
260 	val = readl_relaxed(reg) & ~mask;
261 	writel_relaxed(val | bit, reg);
262 	raw_spin_unlock(&irq_controller_lock);
263 
264 	return IRQ_SET_MASK_OK;
265 }
266 #endif
267 
268 #ifdef CONFIG_PM
269 static int gic_set_wake(struct irq_data *d, unsigned int on)
270 {
271 	int ret = -ENXIO;
272 
273 	if (gic_arch_extn.irq_set_wake)
274 		ret = gic_arch_extn.irq_set_wake(d, on);
275 
276 	return ret;
277 }
278 
279 #else
280 #define gic_set_wake	NULL
281 #endif
282 
283 static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
284 {
285 	u32 irqstat, irqnr;
286 	struct gic_chip_data *gic = &gic_data[0];
287 	void __iomem *cpu_base = gic_data_cpu_base(gic);
288 
289 	do {
290 		irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
291 		irqnr = irqstat & ~0x1c00;
292 
293 		if (likely(irqnr > 15 && irqnr < 1021)) {
294 			irqnr = irq_find_mapping(gic->domain, irqnr);
295 			handle_IRQ(irqnr, regs);
296 			continue;
297 		}
298 		if (irqnr < 16) {
299 			writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
300 #ifdef CONFIG_SMP
301 			handle_IPI(irqnr, regs);
302 #endif
303 			continue;
304 		}
305 		break;
306 	} while (1);
307 }
308 
309 static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
310 {
311 	struct gic_chip_data *chip_data = irq_get_handler_data(irq);
312 	struct irq_chip *chip = irq_get_chip(irq);
313 	unsigned int cascade_irq, gic_irq;
314 	unsigned long status;
315 
316 	chained_irq_enter(chip, desc);
317 
318 	raw_spin_lock(&irq_controller_lock);
319 	status = readl_relaxed(gic_data_cpu_base(chip_data) + GIC_CPU_INTACK);
320 	raw_spin_unlock(&irq_controller_lock);
321 
322 	gic_irq = (status & 0x3ff);
323 	if (gic_irq == 1023)
324 		goto out;
325 
326 	cascade_irq = irq_find_mapping(chip_data->domain, gic_irq);
327 	if (unlikely(gic_irq < 32 || gic_irq > 1020))
328 		handle_bad_irq(cascade_irq, desc);
329 	else
330 		generic_handle_irq(cascade_irq);
331 
332  out:
333 	chained_irq_exit(chip, desc);
334 }
335 
336 static struct irq_chip gic_chip = {
337 	.name			= "GIC",
338 	.irq_mask		= gic_mask_irq,
339 	.irq_unmask		= gic_unmask_irq,
340 	.irq_eoi		= gic_eoi_irq,
341 	.irq_set_type		= gic_set_type,
342 	.irq_retrigger		= gic_retrigger,
343 #ifdef CONFIG_SMP
344 	.irq_set_affinity	= gic_set_affinity,
345 #endif
346 	.irq_set_wake		= gic_set_wake,
347 };
348 
349 void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
350 {
351 	if (gic_nr >= MAX_GIC_NR)
352 		BUG();
353 	if (irq_set_handler_data(irq, &gic_data[gic_nr]) != 0)
354 		BUG();
355 	irq_set_chained_handler(irq, gic_handle_cascade_irq);
356 }
357 
358 static u8 gic_get_cpumask(struct gic_chip_data *gic)
359 {
360 	void __iomem *base = gic_data_dist_base(gic);
361 	u32 mask, i;
362 
363 	for (i = mask = 0; i < 32; i += 4) {
364 		mask = readl_relaxed(base + GIC_DIST_TARGET + i);
365 		mask |= mask >> 16;
366 		mask |= mask >> 8;
367 		if (mask)
368 			break;
369 	}
370 
371 	if (!mask)
372 		pr_crit("GIC CPU mask not found - kernel will fail to boot.\n");
373 
374 	return mask;
375 }
376 
377 static void __init gic_dist_init(struct gic_chip_data *gic)
378 {
379 	unsigned int i;
380 	u32 cpumask;
381 	unsigned int gic_irqs = gic->gic_irqs;
382 	void __iomem *base = gic_data_dist_base(gic);
383 
384 	writel_relaxed(0, base + GIC_DIST_CTRL);
385 
386 	/*
387 	 * Set all global interrupts to be level triggered, active low.
388 	 */
389 	for (i = 32; i < gic_irqs; i += 16)
390 		writel_relaxed(0, base + GIC_DIST_CONFIG + i * 4 / 16);
391 
392 	/*
393 	 * Set all global interrupts to this CPU only.
394 	 */
395 	cpumask = gic_get_cpumask(gic);
396 	cpumask |= cpumask << 8;
397 	cpumask |= cpumask << 16;
398 	for (i = 32; i < gic_irqs; i += 4)
399 		writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
400 
401 	/*
402 	 * Set priority on all global interrupts.
403 	 */
404 	for (i = 32; i < gic_irqs; i += 4)
405 		writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4);
406 
407 	/*
408 	 * Disable all interrupts.  Leave the PPI and SGIs alone
409 	 * as these enables are banked registers.
410 	 */
411 	for (i = 32; i < gic_irqs; i += 32)
412 		writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
413 
414 	writel_relaxed(1, base + GIC_DIST_CTRL);
415 }
416 
417 static void gic_cpu_init(struct gic_chip_data *gic)
418 {
419 	void __iomem *dist_base = gic_data_dist_base(gic);
420 	void __iomem *base = gic_data_cpu_base(gic);
421 	unsigned int cpu_mask, cpu = smp_processor_id();
422 	int i;
423 
424 	/*
425 	 * Get what the GIC says our CPU mask is.
426 	 */
427 	BUG_ON(cpu >= NR_GIC_CPU_IF);
428 	cpu_mask = gic_get_cpumask(gic);
429 	gic_cpu_map[cpu] = cpu_mask;
430 
431 	/*
432 	 * Clear our mask from the other map entries in case they're
433 	 * still undefined.
434 	 */
435 	for (i = 0; i < NR_GIC_CPU_IF; i++)
436 		if (i != cpu)
437 			gic_cpu_map[i] &= ~cpu_mask;
438 
439 	/*
440 	 * Deal with the banked PPI and SGI interrupts - disable all
441 	 * PPI interrupts, ensure all SGI interrupts are enabled.
442 	 */
443 	writel_relaxed(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR);
444 	writel_relaxed(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET);
445 
446 	/*
447 	 * Set priority on PPI and SGI interrupts
448 	 */
449 	for (i = 0; i < 32; i += 4)
450 		writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4);
451 
452 	writel_relaxed(0xf0, base + GIC_CPU_PRIMASK);
453 	writel_relaxed(1, base + GIC_CPU_CTRL);
454 }
455 
456 #ifdef CONFIG_CPU_PM
457 /*
458  * Saves the GIC distributor registers during suspend or idle.  Must be called
459  * with interrupts disabled but before powering down the GIC.  After calling
460  * this function, no interrupts will be delivered by the GIC, and another
461  * platform-specific wakeup source must be enabled.
462  */
463 static void gic_dist_save(unsigned int gic_nr)
464 {
465 	unsigned int gic_irqs;
466 	void __iomem *dist_base;
467 	int i;
468 
469 	if (gic_nr >= MAX_GIC_NR)
470 		BUG();
471 
472 	gic_irqs = gic_data[gic_nr].gic_irqs;
473 	dist_base = gic_data_dist_base(&gic_data[gic_nr]);
474 
475 	if (!dist_base)
476 		return;
477 
478 	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
479 		gic_data[gic_nr].saved_spi_conf[i] =
480 			readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
481 
482 	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
483 		gic_data[gic_nr].saved_spi_target[i] =
484 			readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
485 
486 	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
487 		gic_data[gic_nr].saved_spi_enable[i] =
488 			readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
489 }
490 
491 /*
492  * Restores the GIC distributor registers during resume or when coming out of
493  * idle.  Must be called before enabling interrupts.  If a level interrupt
494  * that occured while the GIC was suspended is still present, it will be
495  * handled normally, but any edge interrupts that occured will not be seen by
496  * the GIC and need to be handled by the platform-specific wakeup source.
497  */
498 static void gic_dist_restore(unsigned int gic_nr)
499 {
500 	unsigned int gic_irqs;
501 	unsigned int i;
502 	void __iomem *dist_base;
503 
504 	if (gic_nr >= MAX_GIC_NR)
505 		BUG();
506 
507 	gic_irqs = gic_data[gic_nr].gic_irqs;
508 	dist_base = gic_data_dist_base(&gic_data[gic_nr]);
509 
510 	if (!dist_base)
511 		return;
512 
513 	writel_relaxed(0, dist_base + GIC_DIST_CTRL);
514 
515 	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
516 		writel_relaxed(gic_data[gic_nr].saved_spi_conf[i],
517 			dist_base + GIC_DIST_CONFIG + i * 4);
518 
519 	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
520 		writel_relaxed(0xa0a0a0a0,
521 			dist_base + GIC_DIST_PRI + i * 4);
522 
523 	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
524 		writel_relaxed(gic_data[gic_nr].saved_spi_target[i],
525 			dist_base + GIC_DIST_TARGET + i * 4);
526 
527 	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
528 		writel_relaxed(gic_data[gic_nr].saved_spi_enable[i],
529 			dist_base + GIC_DIST_ENABLE_SET + i * 4);
530 
531 	writel_relaxed(1, dist_base + GIC_DIST_CTRL);
532 }
533 
534 static void gic_cpu_save(unsigned int gic_nr)
535 {
536 	int i;
537 	u32 *ptr;
538 	void __iomem *dist_base;
539 	void __iomem *cpu_base;
540 
541 	if (gic_nr >= MAX_GIC_NR)
542 		BUG();
543 
544 	dist_base = gic_data_dist_base(&gic_data[gic_nr]);
545 	cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
546 
547 	if (!dist_base || !cpu_base)
548 		return;
549 
550 	ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
551 	for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
552 		ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
553 
554 	ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
555 	for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
556 		ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
557 
558 }
559 
560 static void gic_cpu_restore(unsigned int gic_nr)
561 {
562 	int i;
563 	u32 *ptr;
564 	void __iomem *dist_base;
565 	void __iomem *cpu_base;
566 
567 	if (gic_nr >= MAX_GIC_NR)
568 		BUG();
569 
570 	dist_base = gic_data_dist_base(&gic_data[gic_nr]);
571 	cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
572 
573 	if (!dist_base || !cpu_base)
574 		return;
575 
576 	ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
577 	for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
578 		writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4);
579 
580 	ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
581 	for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
582 		writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4);
583 
584 	for (i = 0; i < DIV_ROUND_UP(32, 4); i++)
585 		writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4);
586 
587 	writel_relaxed(0xf0, cpu_base + GIC_CPU_PRIMASK);
588 	writel_relaxed(1, cpu_base + GIC_CPU_CTRL);
589 }
590 
591 static int gic_notifier(struct notifier_block *self, unsigned long cmd,	void *v)
592 {
593 	int i;
594 
595 	for (i = 0; i < MAX_GIC_NR; i++) {
596 #ifdef CONFIG_GIC_NON_BANKED
597 		/* Skip over unused GICs */
598 		if (!gic_data[i].get_base)
599 			continue;
600 #endif
601 		switch (cmd) {
602 		case CPU_PM_ENTER:
603 			gic_cpu_save(i);
604 			break;
605 		case CPU_PM_ENTER_FAILED:
606 		case CPU_PM_EXIT:
607 			gic_cpu_restore(i);
608 			break;
609 		case CPU_CLUSTER_PM_ENTER:
610 			gic_dist_save(i);
611 			break;
612 		case CPU_CLUSTER_PM_ENTER_FAILED:
613 		case CPU_CLUSTER_PM_EXIT:
614 			gic_dist_restore(i);
615 			break;
616 		}
617 	}
618 
619 	return NOTIFY_OK;
620 }
621 
622 static struct notifier_block gic_notifier_block = {
623 	.notifier_call = gic_notifier,
624 };
625 
626 static void __init gic_pm_init(struct gic_chip_data *gic)
627 {
628 	gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
629 		sizeof(u32));
630 	BUG_ON(!gic->saved_ppi_enable);
631 
632 	gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4,
633 		sizeof(u32));
634 	BUG_ON(!gic->saved_ppi_conf);
635 
636 	if (gic == &gic_data[0])
637 		cpu_pm_register_notifier(&gic_notifier_block);
638 }
639 #else
640 static void __init gic_pm_init(struct gic_chip_data *gic)
641 {
642 }
643 #endif
644 
645 #ifdef CONFIG_SMP
646 void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
647 {
648 	int cpu;
649 	unsigned long map = 0;
650 
651 	/* Convert our logical CPU mask into a physical one. */
652 	for_each_cpu(cpu, mask)
653 		map |= gic_cpu_map[cpu];
654 
655 	/*
656 	 * Ensure that stores to Normal memory are visible to the
657 	 * other CPUs before issuing the IPI.
658 	 */
659 	dsb();
660 
661 	/* this always happens on GIC0 */
662 	writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
663 }
664 #endif
665 
666 static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
667 				irq_hw_number_t hw)
668 {
669 	if (hw < 32) {
670 		irq_set_percpu_devid(irq);
671 		irq_set_chip_and_handler(irq, &gic_chip,
672 					 handle_percpu_devid_irq);
673 		set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
674 	} else {
675 		irq_set_chip_and_handler(irq, &gic_chip,
676 					 handle_fasteoi_irq);
677 		set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
678 	}
679 	irq_set_chip_data(irq, d->host_data);
680 	return 0;
681 }
682 
683 static int gic_irq_domain_xlate(struct irq_domain *d,
684 				struct device_node *controller,
685 				const u32 *intspec, unsigned int intsize,
686 				unsigned long *out_hwirq, unsigned int *out_type)
687 {
688 	if (d->of_node != controller)
689 		return -EINVAL;
690 	if (intsize < 3)
691 		return -EINVAL;
692 
693 	/* Get the interrupt number and add 16 to skip over SGIs */
694 	*out_hwirq = intspec[1] + 16;
695 
696 	/* For SPIs, we need to add 16 more to get the GIC irq ID number */
697 	if (!intspec[0])
698 		*out_hwirq += 16;
699 
700 	*out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
701 	return 0;
702 }
703 
704 #ifdef CONFIG_SMP
705 static int gic_secondary_init(struct notifier_block *nfb, unsigned long action,
706 			      void *hcpu)
707 {
708 	if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
709 		gic_cpu_init(&gic_data[0]);
710 	return NOTIFY_OK;
711 }
712 
713 /*
714  * Notifier for enabling the GIC CPU interface. Set an arbitrarily high
715  * priority because the GIC needs to be up before the ARM generic timers.
716  */
717 static struct notifier_block gic_cpu_notifier = {
718 	.notifier_call = gic_secondary_init,
719 	.priority = 100,
720 };
721 #endif
722 
723 const struct irq_domain_ops gic_irq_domain_ops = {
724 	.map = gic_irq_domain_map,
725 	.xlate = gic_irq_domain_xlate,
726 };
727 
728 void __init gic_init_bases(unsigned int gic_nr, int irq_start,
729 			   void __iomem *dist_base, void __iomem *cpu_base,
730 			   u32 percpu_offset, struct device_node *node)
731 {
732 	irq_hw_number_t hwirq_base;
733 	struct gic_chip_data *gic;
734 	int gic_irqs, irq_base, i;
735 
736 	BUG_ON(gic_nr >= MAX_GIC_NR);
737 
738 	gic = &gic_data[gic_nr];
739 #ifdef CONFIG_GIC_NON_BANKED
740 	if (percpu_offset) { /* Frankein-GIC without banked registers... */
741 		unsigned int cpu;
742 
743 		gic->dist_base.percpu_base = alloc_percpu(void __iomem *);
744 		gic->cpu_base.percpu_base = alloc_percpu(void __iomem *);
745 		if (WARN_ON(!gic->dist_base.percpu_base ||
746 			    !gic->cpu_base.percpu_base)) {
747 			free_percpu(gic->dist_base.percpu_base);
748 			free_percpu(gic->cpu_base.percpu_base);
749 			return;
750 		}
751 
752 		for_each_possible_cpu(cpu) {
753 			unsigned long offset = percpu_offset * cpu_logical_map(cpu);
754 			*per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset;
755 			*per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset;
756 		}
757 
758 		gic_set_base_accessor(gic, gic_get_percpu_base);
759 	} else
760 #endif
761 	{			/* Normal, sane GIC... */
762 		WARN(percpu_offset,
763 		     "GIC_NON_BANKED not enabled, ignoring %08x offset!",
764 		     percpu_offset);
765 		gic->dist_base.common_base = dist_base;
766 		gic->cpu_base.common_base = cpu_base;
767 		gic_set_base_accessor(gic, gic_get_common_base);
768 	}
769 
770 	/*
771 	 * Initialize the CPU interface map to all CPUs.
772 	 * It will be refined as each CPU probes its ID.
773 	 */
774 	for (i = 0; i < NR_GIC_CPU_IF; i++)
775 		gic_cpu_map[i] = 0xff;
776 
777 	/*
778 	 * For primary GICs, skip over SGIs.
779 	 * For secondary GICs, skip over PPIs, too.
780 	 */
781 	if (gic_nr == 0 && (irq_start & 31) > 0) {
782 		hwirq_base = 16;
783 		if (irq_start != -1)
784 			irq_start = (irq_start & ~31) + 16;
785 	} else {
786 		hwirq_base = 32;
787 	}
788 
789 	/*
790 	 * Find out how many interrupts are supported.
791 	 * The GIC only supports up to 1020 interrupt sources.
792 	 */
793 	gic_irqs = readl_relaxed(gic_data_dist_base(gic) + GIC_DIST_CTR) & 0x1f;
794 	gic_irqs = (gic_irqs + 1) * 32;
795 	if (gic_irqs > 1020)
796 		gic_irqs = 1020;
797 	gic->gic_irqs = gic_irqs;
798 
799 	gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */
800 	irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, numa_node_id());
801 	if (IS_ERR_VALUE(irq_base)) {
802 		WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
803 		     irq_start);
804 		irq_base = irq_start;
805 	}
806 	gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base,
807 				    hwirq_base, &gic_irq_domain_ops, gic);
808 	if (WARN_ON(!gic->domain))
809 		return;
810 
811 #ifdef CONFIG_SMP
812 	set_smp_cross_call(gic_raise_softirq);
813 	register_cpu_notifier(&gic_cpu_notifier);
814 #endif
815 
816 	set_handle_irq(gic_handle_irq);
817 
818 	gic_chip.flags |= gic_arch_extn.flags;
819 	gic_dist_init(gic);
820 	gic_cpu_init(gic);
821 	gic_pm_init(gic);
822 }
823 
824 #ifdef CONFIG_OF
825 static int gic_cnt __initdata;
826 
827 int __init gic_of_init(struct device_node *node, struct device_node *parent)
828 {
829 	void __iomem *cpu_base;
830 	void __iomem *dist_base;
831 	u32 percpu_offset;
832 	int irq;
833 
834 	if (WARN_ON(!node))
835 		return -ENODEV;
836 
837 	dist_base = of_iomap(node, 0);
838 	WARN(!dist_base, "unable to map gic dist registers\n");
839 
840 	cpu_base = of_iomap(node, 1);
841 	WARN(!cpu_base, "unable to map gic cpu registers\n");
842 
843 	if (of_property_read_u32(node, "cpu-offset", &percpu_offset))
844 		percpu_offset = 0;
845 
846 	gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset, node);
847 
848 	if (parent) {
849 		irq = irq_of_parse_and_map(node, 0);
850 		gic_cascade_irq(gic_cnt, irq);
851 	}
852 	gic_cnt++;
853 	return 0;
854 }
855 IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init);
856 IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init);
857 IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init);
858 IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init);
859 
860 #endif
861