xref: /openbmc/linux/drivers/irqchip/irq-gic-v3.c (revision ec62a746)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  */
6 
7 #define pr_fmt(fmt)	"GICv3: " fmt
8 
9 #include <linux/acpi.h>
10 #include <linux/cpu.h>
11 #include <linux/cpu_pm.h>
12 #include <linux/delay.h>
13 #include <linux/interrupt.h>
14 #include <linux/irqdomain.h>
15 #include <linux/kstrtox.h>
16 #include <linux/of.h>
17 #include <linux/of_address.h>
18 #include <linux/of_irq.h>
19 #include <linux/percpu.h>
20 #include <linux/refcount.h>
21 #include <linux/slab.h>
22 
23 #include <linux/irqchip.h>
24 #include <linux/irqchip/arm-gic-common.h>
25 #include <linux/irqchip/arm-gic-v3.h>
26 #include <linux/irqchip/irq-partition-percpu.h>
27 #include <linux/bitfield.h>
28 #include <linux/bits.h>
29 #include <linux/arm-smccc.h>
30 
31 #include <asm/cputype.h>
32 #include <asm/exception.h>
33 #include <asm/smp_plat.h>
34 #include <asm/virt.h>
35 
36 #include "irq-gic-common.h"
37 
38 #define GICD_INT_NMI_PRI	(GICD_INT_DEF_PRI & ~0x80)
39 
40 #define FLAGS_WORKAROUND_GICR_WAKER_MSM8996	(1ULL << 0)
41 #define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539	(1ULL << 1)
42 #define FLAGS_WORKAROUND_MTK_GICR_SAVE		(1ULL << 2)
43 #define FLAGS_WORKAROUND_ASR_ERRATUM_8601001	(1ULL << 3)
44 
45 #define GIC_IRQ_TYPE_PARTITION	(GIC_IRQ_TYPE_LPI + 1)
46 
47 struct redist_region {
48 	void __iomem		*redist_base;
49 	phys_addr_t		phys_base;
50 	bool			single_redist;
51 };
52 
53 struct gic_chip_data {
54 	struct fwnode_handle	*fwnode;
55 	phys_addr_t		dist_phys_base;
56 	void __iomem		*dist_base;
57 	struct redist_region	*redist_regions;
58 	struct rdists		rdists;
59 	struct irq_domain	*domain;
60 	u64			redist_stride;
61 	u32			nr_redist_regions;
62 	u64			flags;
63 	bool			has_rss;
64 	unsigned int		ppi_nr;
65 	struct partition_desc	**ppi_descs;
66 };
67 
68 #define T241_CHIPS_MAX		4
69 static void __iomem *t241_dist_base_alias[T241_CHIPS_MAX] __read_mostly;
70 static DEFINE_STATIC_KEY_FALSE(gic_nvidia_t241_erratum);
71 
72 static struct gic_chip_data gic_data __read_mostly;
73 static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
74 
75 #define GIC_ID_NR	(1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer))
76 #define GIC_LINE_NR	min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U)
77 #define GIC_ESPI_NR	GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer)
78 
79 /*
80  * The behaviours of RPR and PMR registers differ depending on the value of
81  * SCR_EL3.FIQ, and the behaviour of non-secure priority registers of the
82  * distributor and redistributors depends on whether security is enabled in the
83  * GIC.
84  *
85  * When security is enabled, non-secure priority values from the (re)distributor
86  * are presented to the GIC CPUIF as follow:
87  *     (GIC_(R)DIST_PRI[irq] >> 1) | 0x80;
88  *
89  * If SCR_EL3.FIQ == 1, the values written to/read from PMR and RPR at non-secure
90  * EL1 are subject to a similar operation thus matching the priorities presented
91  * from the (re)distributor when security is enabled. When SCR_EL3.FIQ == 0,
92  * these values are unchanged by the GIC.
93  *
94  * see GICv3/GICv4 Architecture Specification (IHI0069D):
95  * - section 4.8.1 Non-secure accesses to register fields for Secure interrupt
96  *   priorities.
97  * - Figure 4-7 Secure read of the priority field for a Non-secure Group 1
98  *   interrupt.
99  */
100 static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis);
101 
102 DEFINE_STATIC_KEY_FALSE(gic_nonsecure_priorities);
103 EXPORT_SYMBOL(gic_nonsecure_priorities);
104 
105 /*
106  * When the Non-secure world has access to group 0 interrupts (as a
107  * consequence of SCR_EL3.FIQ == 0), reading the ICC_RPR_EL1 register will
108  * return the Distributor's view of the interrupt priority.
109  *
110  * When GIC security is enabled (GICD_CTLR.DS == 0), the interrupt priority
111  * written by software is moved to the Non-secure range by the Distributor.
112  *
113  * If both are true (which is when gic_nonsecure_priorities gets enabled),
114  * we need to shift down the priority programmed by software to match it
115  * against the value returned by ICC_RPR_EL1.
116  */
117 #define GICD_INT_RPR_PRI(priority)					\
118 	({								\
119 		u32 __priority = (priority);				\
120 		if (static_branch_unlikely(&gic_nonsecure_priorities))	\
121 			__priority = 0x80 | (__priority >> 1);		\
122 									\
123 		__priority;						\
124 	})
125 
126 /* ppi_nmi_refs[n] == number of cpus having ppi[n + 16] set as NMI */
127 static refcount_t *ppi_nmi_refs;
128 
129 static struct gic_kvm_info gic_v3_kvm_info __initdata;
130 static DEFINE_PER_CPU(bool, has_rss);
131 
132 #define MPIDR_RS(mpidr)			(((mpidr) & 0xF0UL) >> 4)
133 #define gic_data_rdist()		(this_cpu_ptr(gic_data.rdists.rdist))
134 #define gic_data_rdist_rd_base()	(gic_data_rdist()->rd_base)
135 #define gic_data_rdist_sgi_base()	(gic_data_rdist_rd_base() + SZ_64K)
136 
137 /* Our default, arbitrary priority value. Linux only uses one anyway. */
138 #define DEFAULT_PMR_VALUE	0xf0
139 
140 enum gic_intid_range {
141 	SGI_RANGE,
142 	PPI_RANGE,
143 	SPI_RANGE,
144 	EPPI_RANGE,
145 	ESPI_RANGE,
146 	LPI_RANGE,
147 	__INVALID_RANGE__
148 };
149 
150 static enum gic_intid_range __get_intid_range(irq_hw_number_t hwirq)
151 {
152 	switch (hwirq) {
153 	case 0 ... 15:
154 		return SGI_RANGE;
155 	case 16 ... 31:
156 		return PPI_RANGE;
157 	case 32 ... 1019:
158 		return SPI_RANGE;
159 	case EPPI_BASE_INTID ... (EPPI_BASE_INTID + 63):
160 		return EPPI_RANGE;
161 	case ESPI_BASE_INTID ... (ESPI_BASE_INTID + 1023):
162 		return ESPI_RANGE;
163 	case 8192 ... GENMASK(23, 0):
164 		return LPI_RANGE;
165 	default:
166 		return __INVALID_RANGE__;
167 	}
168 }
169 
170 static enum gic_intid_range get_intid_range(struct irq_data *d)
171 {
172 	return __get_intid_range(d->hwirq);
173 }
174 
175 static inline unsigned int gic_irq(struct irq_data *d)
176 {
177 	return d->hwirq;
178 }
179 
180 static inline bool gic_irq_in_rdist(struct irq_data *d)
181 {
182 	switch (get_intid_range(d)) {
183 	case SGI_RANGE:
184 	case PPI_RANGE:
185 	case EPPI_RANGE:
186 		return true;
187 	default:
188 		return false;
189 	}
190 }
191 
192 static inline void __iomem *gic_dist_base_alias(struct irq_data *d)
193 {
194 	if (static_branch_unlikely(&gic_nvidia_t241_erratum)) {
195 		irq_hw_number_t hwirq = irqd_to_hwirq(d);
196 		u32 chip;
197 
198 		/*
199 		 * For the erratum T241-FABRIC-4, read accesses to GICD_In{E}
200 		 * registers are directed to the chip that owns the SPI. The
201 		 * the alias region can also be used for writes to the
202 		 * GICD_In{E} except GICD_ICENABLERn. Each chip has support
203 		 * for 320 {E}SPIs. Mappings for all 4 chips:
204 		 *    Chip0 = 32-351
205 		 *    Chip1 = 352-671
206 		 *    Chip2 = 672-991
207 		 *    Chip3 = 4096-4415
208 		 */
209 		switch (__get_intid_range(hwirq)) {
210 		case SPI_RANGE:
211 			chip = (hwirq - 32) / 320;
212 			break;
213 		case ESPI_RANGE:
214 			chip = 3;
215 			break;
216 		default:
217 			unreachable();
218 		}
219 		return t241_dist_base_alias[chip];
220 	}
221 
222 	return gic_data.dist_base;
223 }
224 
225 static inline void __iomem *gic_dist_base(struct irq_data *d)
226 {
227 	switch (get_intid_range(d)) {
228 	case SGI_RANGE:
229 	case PPI_RANGE:
230 	case EPPI_RANGE:
231 		/* SGI+PPI -> SGI_base for this CPU */
232 		return gic_data_rdist_sgi_base();
233 
234 	case SPI_RANGE:
235 	case ESPI_RANGE:
236 		/* SPI -> dist_base */
237 		return gic_data.dist_base;
238 
239 	default:
240 		return NULL;
241 	}
242 }
243 
244 static void gic_do_wait_for_rwp(void __iomem *base, u32 bit)
245 {
246 	u32 count = 1000000;	/* 1s! */
247 
248 	while (readl_relaxed(base + GICD_CTLR) & bit) {
249 		count--;
250 		if (!count) {
251 			pr_err_ratelimited("RWP timeout, gone fishing\n");
252 			return;
253 		}
254 		cpu_relax();
255 		udelay(1);
256 	}
257 }
258 
259 /* Wait for completion of a distributor change */
260 static void gic_dist_wait_for_rwp(void)
261 {
262 	gic_do_wait_for_rwp(gic_data.dist_base, GICD_CTLR_RWP);
263 }
264 
265 /* Wait for completion of a redistributor change */
266 static void gic_redist_wait_for_rwp(void)
267 {
268 	gic_do_wait_for_rwp(gic_data_rdist_rd_base(), GICR_CTLR_RWP);
269 }
270 
271 #ifdef CONFIG_ARM64
272 
273 static u64 __maybe_unused gic_read_iar(void)
274 {
275 	if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_23154))
276 		return gic_read_iar_cavium_thunderx();
277 	else
278 		return gic_read_iar_common();
279 }
280 #endif
281 
282 static void gic_enable_redist(bool enable)
283 {
284 	void __iomem *rbase;
285 	u32 count = 1000000;	/* 1s! */
286 	u32 val;
287 
288 	if (gic_data.flags & FLAGS_WORKAROUND_GICR_WAKER_MSM8996)
289 		return;
290 
291 	rbase = gic_data_rdist_rd_base();
292 
293 	val = readl_relaxed(rbase + GICR_WAKER);
294 	if (enable)
295 		/* Wake up this CPU redistributor */
296 		val &= ~GICR_WAKER_ProcessorSleep;
297 	else
298 		val |= GICR_WAKER_ProcessorSleep;
299 	writel_relaxed(val, rbase + GICR_WAKER);
300 
301 	if (!enable) {		/* Check that GICR_WAKER is writeable */
302 		val = readl_relaxed(rbase + GICR_WAKER);
303 		if (!(val & GICR_WAKER_ProcessorSleep))
304 			return;	/* No PM support in this redistributor */
305 	}
306 
307 	while (--count) {
308 		val = readl_relaxed(rbase + GICR_WAKER);
309 		if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep))
310 			break;
311 		cpu_relax();
312 		udelay(1);
313 	}
314 	if (!count)
315 		pr_err_ratelimited("redistributor failed to %s...\n",
316 				   enable ? "wakeup" : "sleep");
317 }
318 
319 /*
320  * Routines to disable, enable, EOI and route interrupts
321  */
322 static u32 convert_offset_index(struct irq_data *d, u32 offset, u32 *index)
323 {
324 	switch (get_intid_range(d)) {
325 	case SGI_RANGE:
326 	case PPI_RANGE:
327 	case SPI_RANGE:
328 		*index = d->hwirq;
329 		return offset;
330 	case EPPI_RANGE:
331 		/*
332 		 * Contrary to the ESPI range, the EPPI range is contiguous
333 		 * to the PPI range in the registers, so let's adjust the
334 		 * displacement accordingly. Consistency is overrated.
335 		 */
336 		*index = d->hwirq - EPPI_BASE_INTID + 32;
337 		return offset;
338 	case ESPI_RANGE:
339 		*index = d->hwirq - ESPI_BASE_INTID;
340 		switch (offset) {
341 		case GICD_ISENABLER:
342 			return GICD_ISENABLERnE;
343 		case GICD_ICENABLER:
344 			return GICD_ICENABLERnE;
345 		case GICD_ISPENDR:
346 			return GICD_ISPENDRnE;
347 		case GICD_ICPENDR:
348 			return GICD_ICPENDRnE;
349 		case GICD_ISACTIVER:
350 			return GICD_ISACTIVERnE;
351 		case GICD_ICACTIVER:
352 			return GICD_ICACTIVERnE;
353 		case GICD_IPRIORITYR:
354 			return GICD_IPRIORITYRnE;
355 		case GICD_ICFGR:
356 			return GICD_ICFGRnE;
357 		case GICD_IROUTER:
358 			return GICD_IROUTERnE;
359 		default:
360 			break;
361 		}
362 		break;
363 	default:
364 		break;
365 	}
366 
367 	WARN_ON(1);
368 	*index = d->hwirq;
369 	return offset;
370 }
371 
372 static int gic_peek_irq(struct irq_data *d, u32 offset)
373 {
374 	void __iomem *base;
375 	u32 index, mask;
376 
377 	offset = convert_offset_index(d, offset, &index);
378 	mask = 1 << (index % 32);
379 
380 	if (gic_irq_in_rdist(d))
381 		base = gic_data_rdist_sgi_base();
382 	else
383 		base = gic_dist_base_alias(d);
384 
385 	return !!(readl_relaxed(base + offset + (index / 32) * 4) & mask);
386 }
387 
388 static void gic_poke_irq(struct irq_data *d, u32 offset)
389 {
390 	void __iomem *base;
391 	u32 index, mask;
392 
393 	offset = convert_offset_index(d, offset, &index);
394 	mask = 1 << (index % 32);
395 
396 	if (gic_irq_in_rdist(d))
397 		base = gic_data_rdist_sgi_base();
398 	else
399 		base = gic_data.dist_base;
400 
401 	writel_relaxed(mask, base + offset + (index / 32) * 4);
402 }
403 
404 static void gic_mask_irq(struct irq_data *d)
405 {
406 	gic_poke_irq(d, GICD_ICENABLER);
407 	if (gic_irq_in_rdist(d))
408 		gic_redist_wait_for_rwp();
409 	else
410 		gic_dist_wait_for_rwp();
411 }
412 
413 static void gic_eoimode1_mask_irq(struct irq_data *d)
414 {
415 	gic_mask_irq(d);
416 	/*
417 	 * When masking a forwarded interrupt, make sure it is
418 	 * deactivated as well.
419 	 *
420 	 * This ensures that an interrupt that is getting
421 	 * disabled/masked will not get "stuck", because there is
422 	 * noone to deactivate it (guest is being terminated).
423 	 */
424 	if (irqd_is_forwarded_to_vcpu(d))
425 		gic_poke_irq(d, GICD_ICACTIVER);
426 }
427 
428 static void gic_unmask_irq(struct irq_data *d)
429 {
430 	gic_poke_irq(d, GICD_ISENABLER);
431 }
432 
433 static inline bool gic_supports_nmi(void)
434 {
435 	return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
436 	       static_branch_likely(&supports_pseudo_nmis);
437 }
438 
439 static int gic_irq_set_irqchip_state(struct irq_data *d,
440 				     enum irqchip_irq_state which, bool val)
441 {
442 	u32 reg;
443 
444 	if (d->hwirq >= 8192) /* SGI/PPI/SPI only */
445 		return -EINVAL;
446 
447 	switch (which) {
448 	case IRQCHIP_STATE_PENDING:
449 		reg = val ? GICD_ISPENDR : GICD_ICPENDR;
450 		break;
451 
452 	case IRQCHIP_STATE_ACTIVE:
453 		reg = val ? GICD_ISACTIVER : GICD_ICACTIVER;
454 		break;
455 
456 	case IRQCHIP_STATE_MASKED:
457 		if (val) {
458 			gic_mask_irq(d);
459 			return 0;
460 		}
461 		reg = GICD_ISENABLER;
462 		break;
463 
464 	default:
465 		return -EINVAL;
466 	}
467 
468 	gic_poke_irq(d, reg);
469 	return 0;
470 }
471 
472 static int gic_irq_get_irqchip_state(struct irq_data *d,
473 				     enum irqchip_irq_state which, bool *val)
474 {
475 	if (d->hwirq >= 8192) /* PPI/SPI only */
476 		return -EINVAL;
477 
478 	switch (which) {
479 	case IRQCHIP_STATE_PENDING:
480 		*val = gic_peek_irq(d, GICD_ISPENDR);
481 		break;
482 
483 	case IRQCHIP_STATE_ACTIVE:
484 		*val = gic_peek_irq(d, GICD_ISACTIVER);
485 		break;
486 
487 	case IRQCHIP_STATE_MASKED:
488 		*val = !gic_peek_irq(d, GICD_ISENABLER);
489 		break;
490 
491 	default:
492 		return -EINVAL;
493 	}
494 
495 	return 0;
496 }
497 
498 static void gic_irq_set_prio(struct irq_data *d, u8 prio)
499 {
500 	void __iomem *base = gic_dist_base(d);
501 	u32 offset, index;
502 
503 	offset = convert_offset_index(d, GICD_IPRIORITYR, &index);
504 
505 	writeb_relaxed(prio, base + offset + index);
506 }
507 
508 static u32 __gic_get_ppi_index(irq_hw_number_t hwirq)
509 {
510 	switch (__get_intid_range(hwirq)) {
511 	case PPI_RANGE:
512 		return hwirq - 16;
513 	case EPPI_RANGE:
514 		return hwirq - EPPI_BASE_INTID + 16;
515 	default:
516 		unreachable();
517 	}
518 }
519 
520 static u32 gic_get_ppi_index(struct irq_data *d)
521 {
522 	return __gic_get_ppi_index(d->hwirq);
523 }
524 
525 static int gic_irq_nmi_setup(struct irq_data *d)
526 {
527 	struct irq_desc *desc = irq_to_desc(d->irq);
528 
529 	if (!gic_supports_nmi())
530 		return -EINVAL;
531 
532 	if (gic_peek_irq(d, GICD_ISENABLER)) {
533 		pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq);
534 		return -EINVAL;
535 	}
536 
537 	/*
538 	 * A secondary irq_chip should be in charge of LPI request,
539 	 * it should not be possible to get there
540 	 */
541 	if (WARN_ON(gic_irq(d) >= 8192))
542 		return -EINVAL;
543 
544 	/* desc lock should already be held */
545 	if (gic_irq_in_rdist(d)) {
546 		u32 idx = gic_get_ppi_index(d);
547 
548 		/* Setting up PPI as NMI, only switch handler for first NMI */
549 		if (!refcount_inc_not_zero(&ppi_nmi_refs[idx])) {
550 			refcount_set(&ppi_nmi_refs[idx], 1);
551 			desc->handle_irq = handle_percpu_devid_fasteoi_nmi;
552 		}
553 	} else {
554 		desc->handle_irq = handle_fasteoi_nmi;
555 	}
556 
557 	gic_irq_set_prio(d, GICD_INT_NMI_PRI);
558 
559 	return 0;
560 }
561 
562 static void gic_irq_nmi_teardown(struct irq_data *d)
563 {
564 	struct irq_desc *desc = irq_to_desc(d->irq);
565 
566 	if (WARN_ON(!gic_supports_nmi()))
567 		return;
568 
569 	if (gic_peek_irq(d, GICD_ISENABLER)) {
570 		pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq);
571 		return;
572 	}
573 
574 	/*
575 	 * A secondary irq_chip should be in charge of LPI request,
576 	 * it should not be possible to get there
577 	 */
578 	if (WARN_ON(gic_irq(d) >= 8192))
579 		return;
580 
581 	/* desc lock should already be held */
582 	if (gic_irq_in_rdist(d)) {
583 		u32 idx = gic_get_ppi_index(d);
584 
585 		/* Tearing down NMI, only switch handler for last NMI */
586 		if (refcount_dec_and_test(&ppi_nmi_refs[idx]))
587 			desc->handle_irq = handle_percpu_devid_irq;
588 	} else {
589 		desc->handle_irq = handle_fasteoi_irq;
590 	}
591 
592 	gic_irq_set_prio(d, GICD_INT_DEF_PRI);
593 }
594 
595 static void gic_eoi_irq(struct irq_data *d)
596 {
597 	write_gicreg(gic_irq(d), ICC_EOIR1_EL1);
598 	isb();
599 }
600 
601 static void gic_eoimode1_eoi_irq(struct irq_data *d)
602 {
603 	/*
604 	 * No need to deactivate an LPI, or an interrupt that
605 	 * is is getting forwarded to a vcpu.
606 	 */
607 	if (gic_irq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d))
608 		return;
609 	gic_write_dir(gic_irq(d));
610 }
611 
612 static int gic_set_type(struct irq_data *d, unsigned int type)
613 {
614 	enum gic_intid_range range;
615 	unsigned int irq = gic_irq(d);
616 	void __iomem *base;
617 	u32 offset, index;
618 	int ret;
619 
620 	range = get_intid_range(d);
621 
622 	/* Interrupt configuration for SGIs can't be changed */
623 	if (range == SGI_RANGE)
624 		return type != IRQ_TYPE_EDGE_RISING ? -EINVAL : 0;
625 
626 	/* SPIs have restrictions on the supported types */
627 	if ((range == SPI_RANGE || range == ESPI_RANGE) &&
628 	    type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
629 		return -EINVAL;
630 
631 	if (gic_irq_in_rdist(d))
632 		base = gic_data_rdist_sgi_base();
633 	else
634 		base = gic_dist_base_alias(d);
635 
636 	offset = convert_offset_index(d, GICD_ICFGR, &index);
637 
638 	ret = gic_configure_irq(index, type, base + offset, NULL);
639 	if (ret && (range == PPI_RANGE || range == EPPI_RANGE)) {
640 		/* Misconfigured PPIs are usually not fatal */
641 		pr_warn("GIC: PPI INTID%d is secure or misconfigured\n", irq);
642 		ret = 0;
643 	}
644 
645 	return ret;
646 }
647 
648 static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
649 {
650 	if (get_intid_range(d) == SGI_RANGE)
651 		return -EINVAL;
652 
653 	if (vcpu)
654 		irqd_set_forwarded_to_vcpu(d);
655 	else
656 		irqd_clr_forwarded_to_vcpu(d);
657 	return 0;
658 }
659 
660 static u64 gic_cpu_to_affinity(int cpu)
661 {
662 	u64 mpidr = cpu_logical_map(cpu);
663 	u64 aff;
664 
665 	/* ASR8601 needs to have its affinities shifted down... */
666 	if (unlikely(gic_data.flags & FLAGS_WORKAROUND_ASR_ERRATUM_8601001))
667 		mpidr = (MPIDR_AFFINITY_LEVEL(mpidr, 1)	|
668 			 (MPIDR_AFFINITY_LEVEL(mpidr, 2) << 8));
669 
670 	aff = ((u64)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
671 	       MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
672 	       MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8  |
673 	       MPIDR_AFFINITY_LEVEL(mpidr, 0));
674 
675 	return aff;
676 }
677 
678 static void gic_deactivate_unhandled(u32 irqnr)
679 {
680 	if (static_branch_likely(&supports_deactivate_key)) {
681 		if (irqnr < 8192)
682 			gic_write_dir(irqnr);
683 	} else {
684 		write_gicreg(irqnr, ICC_EOIR1_EL1);
685 		isb();
686 	}
687 }
688 
689 /*
690  * Follow a read of the IAR with any HW maintenance that needs to happen prior
691  * to invoking the relevant IRQ handler. We must do two things:
692  *
693  * (1) Ensure instruction ordering between a read of IAR and subsequent
694  *     instructions in the IRQ handler using an ISB.
695  *
696  *     It is possible for the IAR to report an IRQ which was signalled *after*
697  *     the CPU took an IRQ exception as multiple interrupts can race to be
698  *     recognized by the GIC, earlier interrupts could be withdrawn, and/or
699  *     later interrupts could be prioritized by the GIC.
700  *
701  *     For devices which are tightly coupled to the CPU, such as PMUs, a
702  *     context synchronization event is necessary to ensure that system
703  *     register state is not stale, as these may have been indirectly written
704  *     *after* exception entry.
705  *
706  * (2) Deactivate the interrupt when EOI mode 1 is in use.
707  */
708 static inline void gic_complete_ack(u32 irqnr)
709 {
710 	if (static_branch_likely(&supports_deactivate_key))
711 		write_gicreg(irqnr, ICC_EOIR1_EL1);
712 
713 	isb();
714 }
715 
716 static bool gic_rpr_is_nmi_prio(void)
717 {
718 	if (!gic_supports_nmi())
719 		return false;
720 
721 	return unlikely(gic_read_rpr() == GICD_INT_RPR_PRI(GICD_INT_NMI_PRI));
722 }
723 
724 static bool gic_irqnr_is_special(u32 irqnr)
725 {
726 	return irqnr >= 1020 && irqnr <= 1023;
727 }
728 
729 static void __gic_handle_irq(u32 irqnr, struct pt_regs *regs)
730 {
731 	if (gic_irqnr_is_special(irqnr))
732 		return;
733 
734 	gic_complete_ack(irqnr);
735 
736 	if (generic_handle_domain_irq(gic_data.domain, irqnr)) {
737 		WARN_ONCE(true, "Unexpected interrupt (irqnr %u)\n", irqnr);
738 		gic_deactivate_unhandled(irqnr);
739 	}
740 }
741 
742 static void __gic_handle_nmi(u32 irqnr, struct pt_regs *regs)
743 {
744 	if (gic_irqnr_is_special(irqnr))
745 		return;
746 
747 	gic_complete_ack(irqnr);
748 
749 	if (generic_handle_domain_nmi(gic_data.domain, irqnr)) {
750 		WARN_ONCE(true, "Unexpected pseudo-NMI (irqnr %u)\n", irqnr);
751 		gic_deactivate_unhandled(irqnr);
752 	}
753 }
754 
755 /*
756  * An exception has been taken from a context with IRQs enabled, and this could
757  * be an IRQ or an NMI.
758  *
759  * The entry code called us with DAIF.IF set to keep NMIs masked. We must clear
760  * DAIF.IF (and update ICC_PMR_EL1 to mask regular IRQs) prior to returning,
761  * after handling any NMI but before handling any IRQ.
762  *
763  * The entry code has performed IRQ entry, and if an NMI is detected we must
764  * perform NMI entry/exit around invoking the handler.
765  */
766 static void __gic_handle_irq_from_irqson(struct pt_regs *regs)
767 {
768 	bool is_nmi;
769 	u32 irqnr;
770 
771 	irqnr = gic_read_iar();
772 
773 	is_nmi = gic_rpr_is_nmi_prio();
774 
775 	if (is_nmi) {
776 		nmi_enter();
777 		__gic_handle_nmi(irqnr, regs);
778 		nmi_exit();
779 	}
780 
781 	if (gic_prio_masking_enabled()) {
782 		gic_pmr_mask_irqs();
783 		gic_arch_enable_irqs();
784 	}
785 
786 	if (!is_nmi)
787 		__gic_handle_irq(irqnr, regs);
788 }
789 
790 /*
791  * An exception has been taken from a context with IRQs disabled, which can only
792  * be an NMI.
793  *
794  * The entry code called us with DAIF.IF set to keep NMIs masked. We must leave
795  * DAIF.IF (and ICC_PMR_EL1) unchanged.
796  *
797  * The entry code has performed NMI entry.
798  */
799 static void __gic_handle_irq_from_irqsoff(struct pt_regs *regs)
800 {
801 	u64 pmr;
802 	u32 irqnr;
803 
804 	/*
805 	 * We were in a context with IRQs disabled. However, the
806 	 * entry code has set PMR to a value that allows any
807 	 * interrupt to be acknowledged, and not just NMIs. This can
808 	 * lead to surprising effects if the NMI has been retired in
809 	 * the meantime, and that there is an IRQ pending. The IRQ
810 	 * would then be taken in NMI context, something that nobody
811 	 * wants to debug twice.
812 	 *
813 	 * Until we sort this, drop PMR again to a level that will
814 	 * actually only allow NMIs before reading IAR, and then
815 	 * restore it to what it was.
816 	 */
817 	pmr = gic_read_pmr();
818 	gic_pmr_mask_irqs();
819 	isb();
820 	irqnr = gic_read_iar();
821 	gic_write_pmr(pmr);
822 
823 	__gic_handle_nmi(irqnr, regs);
824 }
825 
826 static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
827 {
828 	if (unlikely(gic_supports_nmi() && !interrupts_enabled(regs)))
829 		__gic_handle_irq_from_irqsoff(regs);
830 	else
831 		__gic_handle_irq_from_irqson(regs);
832 }
833 
834 static u32 gic_get_pribits(void)
835 {
836 	u32 pribits;
837 
838 	pribits = gic_read_ctlr();
839 	pribits &= ICC_CTLR_EL1_PRI_BITS_MASK;
840 	pribits >>= ICC_CTLR_EL1_PRI_BITS_SHIFT;
841 	pribits++;
842 
843 	return pribits;
844 }
845 
846 static bool gic_has_group0(void)
847 {
848 	u32 val;
849 	u32 old_pmr;
850 
851 	old_pmr = gic_read_pmr();
852 
853 	/*
854 	 * Let's find out if Group0 is under control of EL3 or not by
855 	 * setting the highest possible, non-zero priority in PMR.
856 	 *
857 	 * If SCR_EL3.FIQ is set, the priority gets shifted down in
858 	 * order for the CPU interface to set bit 7, and keep the
859 	 * actual priority in the non-secure range. In the process, it
860 	 * looses the least significant bit and the actual priority
861 	 * becomes 0x80. Reading it back returns 0, indicating that
862 	 * we're don't have access to Group0.
863 	 */
864 	gic_write_pmr(BIT(8 - gic_get_pribits()));
865 	val = gic_read_pmr();
866 
867 	gic_write_pmr(old_pmr);
868 
869 	return val != 0;
870 }
871 
872 static void __init gic_dist_init(void)
873 {
874 	unsigned int i;
875 	u64 affinity;
876 	void __iomem *base = gic_data.dist_base;
877 	u32 val;
878 
879 	/* Disable the distributor */
880 	writel_relaxed(0, base + GICD_CTLR);
881 	gic_dist_wait_for_rwp();
882 
883 	/*
884 	 * Configure SPIs as non-secure Group-1. This will only matter
885 	 * if the GIC only has a single security state. This will not
886 	 * do the right thing if the kernel is running in secure mode,
887 	 * but that's not the intended use case anyway.
888 	 */
889 	for (i = 32; i < GIC_LINE_NR; i += 32)
890 		writel_relaxed(~0, base + GICD_IGROUPR + i / 8);
891 
892 	/* Extended SPI range, not handled by the GICv2/GICv3 common code */
893 	for (i = 0; i < GIC_ESPI_NR; i += 32) {
894 		writel_relaxed(~0U, base + GICD_ICENABLERnE + i / 8);
895 		writel_relaxed(~0U, base + GICD_ICACTIVERnE + i / 8);
896 	}
897 
898 	for (i = 0; i < GIC_ESPI_NR; i += 32)
899 		writel_relaxed(~0U, base + GICD_IGROUPRnE + i / 8);
900 
901 	for (i = 0; i < GIC_ESPI_NR; i += 16)
902 		writel_relaxed(0, base + GICD_ICFGRnE + i / 4);
903 
904 	for (i = 0; i < GIC_ESPI_NR; i += 4)
905 		writel_relaxed(GICD_INT_DEF_PRI_X4, base + GICD_IPRIORITYRnE + i);
906 
907 	/* Now do the common stuff */
908 	gic_dist_config(base, GIC_LINE_NR, NULL);
909 
910 	val = GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1;
911 	if (gic_data.rdists.gicd_typer2 & GICD_TYPER2_nASSGIcap) {
912 		pr_info("Enabling SGIs without active state\n");
913 		val |= GICD_CTLR_nASSGIreq;
914 	}
915 
916 	/* Enable distributor with ARE, Group1, and wait for it to drain */
917 	writel_relaxed(val, base + GICD_CTLR);
918 	gic_dist_wait_for_rwp();
919 
920 	/*
921 	 * Set all global interrupts to the boot CPU only. ARE must be
922 	 * enabled.
923 	 */
924 	affinity = gic_cpu_to_affinity(smp_processor_id());
925 	for (i = 32; i < GIC_LINE_NR; i++)
926 		gic_write_irouter(affinity, base + GICD_IROUTER + i * 8);
927 
928 	for (i = 0; i < GIC_ESPI_NR; i++)
929 		gic_write_irouter(affinity, base + GICD_IROUTERnE + i * 8);
930 }
931 
932 static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *))
933 {
934 	int ret = -ENODEV;
935 	int i;
936 
937 	for (i = 0; i < gic_data.nr_redist_regions; i++) {
938 		void __iomem *ptr = gic_data.redist_regions[i].redist_base;
939 		u64 typer;
940 		u32 reg;
941 
942 		reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK;
943 		if (reg != GIC_PIDR2_ARCH_GICv3 &&
944 		    reg != GIC_PIDR2_ARCH_GICv4) { /* We're in trouble... */
945 			pr_warn("No redistributor present @%p\n", ptr);
946 			break;
947 		}
948 
949 		do {
950 			typer = gic_read_typer(ptr + GICR_TYPER);
951 			ret = fn(gic_data.redist_regions + i, ptr);
952 			if (!ret)
953 				return 0;
954 
955 			if (gic_data.redist_regions[i].single_redist)
956 				break;
957 
958 			if (gic_data.redist_stride) {
959 				ptr += gic_data.redist_stride;
960 			} else {
961 				ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */
962 				if (typer & GICR_TYPER_VLPIS)
963 					ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */
964 			}
965 		} while (!(typer & GICR_TYPER_LAST));
966 	}
967 
968 	return ret ? -ENODEV : 0;
969 }
970 
971 static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr)
972 {
973 	unsigned long mpidr;
974 	u64 typer;
975 	u32 aff;
976 
977 	/*
978 	 * Convert affinity to a 32bit value that can be matched to
979 	 * GICR_TYPER bits [63:32].
980 	 */
981 	mpidr = gic_cpu_to_affinity(smp_processor_id());
982 
983 	aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
984 	       MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
985 	       MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
986 	       MPIDR_AFFINITY_LEVEL(mpidr, 0));
987 
988 	typer = gic_read_typer(ptr + GICR_TYPER);
989 	if ((typer >> 32) == aff) {
990 		u64 offset = ptr - region->redist_base;
991 		raw_spin_lock_init(&gic_data_rdist()->rd_lock);
992 		gic_data_rdist_rd_base() = ptr;
993 		gic_data_rdist()->phys_base = region->phys_base + offset;
994 
995 		pr_info("CPU%d: found redistributor %lx region %d:%pa\n",
996 			smp_processor_id(), mpidr,
997 			(int)(region - gic_data.redist_regions),
998 			&gic_data_rdist()->phys_base);
999 		return 0;
1000 	}
1001 
1002 	/* Try next one */
1003 	return 1;
1004 }
1005 
1006 static int gic_populate_rdist(void)
1007 {
1008 	if (gic_iterate_rdists(__gic_populate_rdist) == 0)
1009 		return 0;
1010 
1011 	/* We couldn't even deal with ourselves... */
1012 	WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n",
1013 	     smp_processor_id(),
1014 	     (unsigned long)cpu_logical_map(smp_processor_id()));
1015 	return -ENODEV;
1016 }
1017 
1018 static int __gic_update_rdist_properties(struct redist_region *region,
1019 					 void __iomem *ptr)
1020 {
1021 	u64 typer = gic_read_typer(ptr + GICR_TYPER);
1022 	u32 ctlr = readl_relaxed(ptr + GICR_CTLR);
1023 
1024 	/* Boot-time cleanup */
1025 	if ((typer & GICR_TYPER_VLPIS) && (typer & GICR_TYPER_RVPEID)) {
1026 		u64 val;
1027 
1028 		/* Deactivate any present vPE */
1029 		val = gicr_read_vpendbaser(ptr + SZ_128K + GICR_VPENDBASER);
1030 		if (val & GICR_VPENDBASER_Valid)
1031 			gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast,
1032 					      ptr + SZ_128K + GICR_VPENDBASER);
1033 
1034 		/* Mark the VPE table as invalid */
1035 		val = gicr_read_vpropbaser(ptr + SZ_128K + GICR_VPROPBASER);
1036 		val &= ~GICR_VPROPBASER_4_1_VALID;
1037 		gicr_write_vpropbaser(val, ptr + SZ_128K + GICR_VPROPBASER);
1038 	}
1039 
1040 	gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS);
1041 
1042 	/*
1043 	 * TYPER.RVPEID implies some form of DirectLPI, no matter what the
1044 	 * doc says... :-/ And CTLR.IR implies another subset of DirectLPI
1045 	 * that the ITS driver can make use of for LPIs (and not VLPIs).
1046 	 *
1047 	 * These are 3 different ways to express the same thing, depending
1048 	 * on the revision of the architecture and its relaxations over
1049 	 * time. Just group them under the 'direct_lpi' banner.
1050 	 */
1051 	gic_data.rdists.has_rvpeid &= !!(typer & GICR_TYPER_RVPEID);
1052 	gic_data.rdists.has_direct_lpi &= (!!(typer & GICR_TYPER_DirectLPIS) |
1053 					   !!(ctlr & GICR_CTLR_IR) |
1054 					   gic_data.rdists.has_rvpeid);
1055 	gic_data.rdists.has_vpend_valid_dirty &= !!(typer & GICR_TYPER_DIRTY);
1056 
1057 	/* Detect non-sensical configurations */
1058 	if (WARN_ON_ONCE(gic_data.rdists.has_rvpeid && !gic_data.rdists.has_vlpis)) {
1059 		gic_data.rdists.has_direct_lpi = false;
1060 		gic_data.rdists.has_vlpis = false;
1061 		gic_data.rdists.has_rvpeid = false;
1062 	}
1063 
1064 	gic_data.ppi_nr = min(GICR_TYPER_NR_PPIS(typer), gic_data.ppi_nr);
1065 
1066 	return 1;
1067 }
1068 
1069 static void gic_update_rdist_properties(void)
1070 {
1071 	gic_data.ppi_nr = UINT_MAX;
1072 	gic_iterate_rdists(__gic_update_rdist_properties);
1073 	if (WARN_ON(gic_data.ppi_nr == UINT_MAX))
1074 		gic_data.ppi_nr = 0;
1075 	pr_info("GICv3 features: %d PPIs%s%s\n",
1076 		gic_data.ppi_nr,
1077 		gic_data.has_rss ? ", RSS" : "",
1078 		gic_data.rdists.has_direct_lpi ? ", DirectLPI" : "");
1079 
1080 	if (gic_data.rdists.has_vlpis)
1081 		pr_info("GICv4 features: %s%s%s\n",
1082 			gic_data.rdists.has_direct_lpi ? "DirectLPI " : "",
1083 			gic_data.rdists.has_rvpeid ? "RVPEID " : "",
1084 			gic_data.rdists.has_vpend_valid_dirty ? "Valid+Dirty " : "");
1085 }
1086 
1087 /* Check whether it's single security state view */
1088 static inline bool gic_dist_security_disabled(void)
1089 {
1090 	return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS;
1091 }
1092 
1093 static void gic_cpu_sys_reg_init(void)
1094 {
1095 	int i, cpu = smp_processor_id();
1096 	u64 mpidr = gic_cpu_to_affinity(cpu);
1097 	u64 need_rss = MPIDR_RS(mpidr);
1098 	bool group0;
1099 	u32 pribits;
1100 
1101 	/*
1102 	 * Need to check that the SRE bit has actually been set. If
1103 	 * not, it means that SRE is disabled at EL2. We're going to
1104 	 * die painfully, and there is nothing we can do about it.
1105 	 *
1106 	 * Kindly inform the luser.
1107 	 */
1108 	if (!gic_enable_sre())
1109 		pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
1110 
1111 	pribits = gic_get_pribits();
1112 
1113 	group0 = gic_has_group0();
1114 
1115 	/* Set priority mask register */
1116 	if (!gic_prio_masking_enabled()) {
1117 		write_gicreg(DEFAULT_PMR_VALUE, ICC_PMR_EL1);
1118 	} else if (gic_supports_nmi()) {
1119 		/*
1120 		 * Mismatch configuration with boot CPU, the system is likely
1121 		 * to die as interrupt masking will not work properly on all
1122 		 * CPUs
1123 		 *
1124 		 * The boot CPU calls this function before enabling NMI support,
1125 		 * and as a result we'll never see this warning in the boot path
1126 		 * for that CPU.
1127 		 */
1128 		if (static_branch_unlikely(&gic_nonsecure_priorities))
1129 			WARN_ON(!group0 || gic_dist_security_disabled());
1130 		else
1131 			WARN_ON(group0 && !gic_dist_security_disabled());
1132 	}
1133 
1134 	/*
1135 	 * Some firmwares hand over to the kernel with the BPR changed from
1136 	 * its reset value (and with a value large enough to prevent
1137 	 * any pre-emptive interrupts from working at all). Writing a zero
1138 	 * to BPR restores is reset value.
1139 	 */
1140 	gic_write_bpr1(0);
1141 
1142 	if (static_branch_likely(&supports_deactivate_key)) {
1143 		/* EOI drops priority only (mode 1) */
1144 		gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop);
1145 	} else {
1146 		/* EOI deactivates interrupt too (mode 0) */
1147 		gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir);
1148 	}
1149 
1150 	/* Always whack Group0 before Group1 */
1151 	if (group0) {
1152 		switch(pribits) {
1153 		case 8:
1154 		case 7:
1155 			write_gicreg(0, ICC_AP0R3_EL1);
1156 			write_gicreg(0, ICC_AP0R2_EL1);
1157 			fallthrough;
1158 		case 6:
1159 			write_gicreg(0, ICC_AP0R1_EL1);
1160 			fallthrough;
1161 		case 5:
1162 		case 4:
1163 			write_gicreg(0, ICC_AP0R0_EL1);
1164 		}
1165 
1166 		isb();
1167 	}
1168 
1169 	switch(pribits) {
1170 	case 8:
1171 	case 7:
1172 		write_gicreg(0, ICC_AP1R3_EL1);
1173 		write_gicreg(0, ICC_AP1R2_EL1);
1174 		fallthrough;
1175 	case 6:
1176 		write_gicreg(0, ICC_AP1R1_EL1);
1177 		fallthrough;
1178 	case 5:
1179 	case 4:
1180 		write_gicreg(0, ICC_AP1R0_EL1);
1181 	}
1182 
1183 	isb();
1184 
1185 	/* ... and let's hit the road... */
1186 	gic_write_grpen1(1);
1187 
1188 	/* Keep the RSS capability status in per_cpu variable */
1189 	per_cpu(has_rss, cpu) = !!(gic_read_ctlr() & ICC_CTLR_EL1_RSS);
1190 
1191 	/* Check all the CPUs have capable of sending SGIs to other CPUs */
1192 	for_each_online_cpu(i) {
1193 		bool have_rss = per_cpu(has_rss, i) && per_cpu(has_rss, cpu);
1194 
1195 		need_rss |= MPIDR_RS(gic_cpu_to_affinity(i));
1196 		if (need_rss && (!have_rss))
1197 			pr_crit("CPU%d (%lx) can't SGI CPU%d (%lx), no RSS\n",
1198 				cpu, (unsigned long)mpidr,
1199 				i, (unsigned long)gic_cpu_to_affinity(i));
1200 	}
1201 
1202 	/**
1203 	 * GIC spec says, when ICC_CTLR_EL1.RSS==1 and GICD_TYPER.RSS==0,
1204 	 * writing ICC_ASGI1R_EL1 register with RS != 0 is a CONSTRAINED
1205 	 * UNPREDICTABLE choice of :
1206 	 *   - The write is ignored.
1207 	 *   - The RS field is treated as 0.
1208 	 */
1209 	if (need_rss && (!gic_data.has_rss))
1210 		pr_crit_once("RSS is required but GICD doesn't support it\n");
1211 }
1212 
1213 static bool gicv3_nolpi;
1214 
1215 static int __init gicv3_nolpi_cfg(char *buf)
1216 {
1217 	return kstrtobool(buf, &gicv3_nolpi);
1218 }
1219 early_param("irqchip.gicv3_nolpi", gicv3_nolpi_cfg);
1220 
1221 static int gic_dist_supports_lpis(void)
1222 {
1223 	return (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) &&
1224 		!!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS) &&
1225 		!gicv3_nolpi);
1226 }
1227 
1228 static void gic_cpu_init(void)
1229 {
1230 	void __iomem *rbase;
1231 	int i;
1232 
1233 	/* Register ourselves with the rest of the world */
1234 	if (gic_populate_rdist())
1235 		return;
1236 
1237 	gic_enable_redist(true);
1238 
1239 	WARN((gic_data.ppi_nr > 16 || GIC_ESPI_NR != 0) &&
1240 	     !(gic_read_ctlr() & ICC_CTLR_EL1_ExtRange),
1241 	     "Distributor has extended ranges, but CPU%d doesn't\n",
1242 	     smp_processor_id());
1243 
1244 	rbase = gic_data_rdist_sgi_base();
1245 
1246 	/* Configure SGIs/PPIs as non-secure Group-1 */
1247 	for (i = 0; i < gic_data.ppi_nr + 16; i += 32)
1248 		writel_relaxed(~0, rbase + GICR_IGROUPR0 + i / 8);
1249 
1250 	gic_cpu_config(rbase, gic_data.ppi_nr + 16, gic_redist_wait_for_rwp);
1251 
1252 	/* initialise system registers */
1253 	gic_cpu_sys_reg_init();
1254 }
1255 
1256 #ifdef CONFIG_SMP
1257 
1258 #define MPIDR_TO_SGI_RS(mpidr)	(MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT)
1259 #define MPIDR_TO_SGI_CLUSTER_ID(mpidr)	((mpidr) & ~0xFUL)
1260 
1261 static int gic_starting_cpu(unsigned int cpu)
1262 {
1263 	gic_cpu_init();
1264 
1265 	if (gic_dist_supports_lpis())
1266 		its_cpu_init();
1267 
1268 	return 0;
1269 }
1270 
1271 static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
1272 				   unsigned long cluster_id)
1273 {
1274 	int next_cpu, cpu = *base_cpu;
1275 	unsigned long mpidr;
1276 	u16 tlist = 0;
1277 
1278 	mpidr = gic_cpu_to_affinity(cpu);
1279 
1280 	while (cpu < nr_cpu_ids) {
1281 		tlist |= 1 << (mpidr & 0xf);
1282 
1283 		next_cpu = cpumask_next(cpu, mask);
1284 		if (next_cpu >= nr_cpu_ids)
1285 			goto out;
1286 		cpu = next_cpu;
1287 
1288 		mpidr = gic_cpu_to_affinity(cpu);
1289 
1290 		if (cluster_id != MPIDR_TO_SGI_CLUSTER_ID(mpidr)) {
1291 			cpu--;
1292 			goto out;
1293 		}
1294 	}
1295 out:
1296 	*base_cpu = cpu;
1297 	return tlist;
1298 }
1299 
1300 #define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \
1301 	(MPIDR_AFFINITY_LEVEL(cluster_id, level) \
1302 		<< ICC_SGI1R_AFFINITY_## level ##_SHIFT)
1303 
1304 static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
1305 {
1306 	u64 val;
1307 
1308 	val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3)	|
1309 	       MPIDR_TO_SGI_AFFINITY(cluster_id, 2)	|
1310 	       irq << ICC_SGI1R_SGI_ID_SHIFT		|
1311 	       MPIDR_TO_SGI_AFFINITY(cluster_id, 1)	|
1312 	       MPIDR_TO_SGI_RS(cluster_id)		|
1313 	       tlist << ICC_SGI1R_TARGET_LIST_SHIFT);
1314 
1315 	pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
1316 	gic_write_sgi1r(val);
1317 }
1318 
1319 static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
1320 {
1321 	int cpu;
1322 
1323 	if (WARN_ON(d->hwirq >= 16))
1324 		return;
1325 
1326 	/*
1327 	 * Ensure that stores to Normal memory are visible to the
1328 	 * other CPUs before issuing the IPI.
1329 	 */
1330 	dsb(ishst);
1331 
1332 	for_each_cpu(cpu, mask) {
1333 		u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(gic_cpu_to_affinity(cpu));
1334 		u16 tlist;
1335 
1336 		tlist = gic_compute_target_list(&cpu, mask, cluster_id);
1337 		gic_send_sgi(cluster_id, tlist, d->hwirq);
1338 	}
1339 
1340 	/* Force the above writes to ICC_SGI1R_EL1 to be executed */
1341 	isb();
1342 }
1343 
1344 static void __init gic_smp_init(void)
1345 {
1346 	struct irq_fwspec sgi_fwspec = {
1347 		.fwnode		= gic_data.fwnode,
1348 		.param_count	= 1,
1349 	};
1350 	int base_sgi;
1351 
1352 	cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
1353 				  "irqchip/arm/gicv3:starting",
1354 				  gic_starting_cpu, NULL);
1355 
1356 	/* Register all 8 non-secure SGIs */
1357 	base_sgi = irq_domain_alloc_irqs(gic_data.domain, 8, NUMA_NO_NODE, &sgi_fwspec);
1358 	if (WARN_ON(base_sgi <= 0))
1359 		return;
1360 
1361 	set_smp_ipi_range(base_sgi, 8);
1362 }
1363 
1364 static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
1365 			    bool force)
1366 {
1367 	unsigned int cpu;
1368 	u32 offset, index;
1369 	void __iomem *reg;
1370 	int enabled;
1371 	u64 val;
1372 
1373 	if (force)
1374 		cpu = cpumask_first(mask_val);
1375 	else
1376 		cpu = cpumask_any_and(mask_val, cpu_online_mask);
1377 
1378 	if (cpu >= nr_cpu_ids)
1379 		return -EINVAL;
1380 
1381 	if (gic_irq_in_rdist(d))
1382 		return -EINVAL;
1383 
1384 	/* If interrupt was enabled, disable it first */
1385 	enabled = gic_peek_irq(d, GICD_ISENABLER);
1386 	if (enabled)
1387 		gic_mask_irq(d);
1388 
1389 	offset = convert_offset_index(d, GICD_IROUTER, &index);
1390 	reg = gic_dist_base(d) + offset + (index * 8);
1391 	val = gic_cpu_to_affinity(cpu);
1392 
1393 	gic_write_irouter(val, reg);
1394 
1395 	/*
1396 	 * If the interrupt was enabled, enabled it again. Otherwise,
1397 	 * just wait for the distributor to have digested our changes.
1398 	 */
1399 	if (enabled)
1400 		gic_unmask_irq(d);
1401 
1402 	irq_data_update_effective_affinity(d, cpumask_of(cpu));
1403 
1404 	return IRQ_SET_MASK_OK_DONE;
1405 }
1406 #else
1407 #define gic_set_affinity	NULL
1408 #define gic_ipi_send_mask	NULL
1409 #define gic_smp_init()		do { } while(0)
1410 #endif
1411 
1412 static int gic_retrigger(struct irq_data *data)
1413 {
1414 	return !gic_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING, true);
1415 }
1416 
1417 #ifdef CONFIG_CPU_PM
1418 static int gic_cpu_pm_notifier(struct notifier_block *self,
1419 			       unsigned long cmd, void *v)
1420 {
1421 	if (cmd == CPU_PM_EXIT) {
1422 		if (gic_dist_security_disabled())
1423 			gic_enable_redist(true);
1424 		gic_cpu_sys_reg_init();
1425 	} else if (cmd == CPU_PM_ENTER && gic_dist_security_disabled()) {
1426 		gic_write_grpen1(0);
1427 		gic_enable_redist(false);
1428 	}
1429 	return NOTIFY_OK;
1430 }
1431 
1432 static struct notifier_block gic_cpu_pm_notifier_block = {
1433 	.notifier_call = gic_cpu_pm_notifier,
1434 };
1435 
1436 static void gic_cpu_pm_init(void)
1437 {
1438 	cpu_pm_register_notifier(&gic_cpu_pm_notifier_block);
1439 }
1440 
1441 #else
1442 static inline void gic_cpu_pm_init(void) { }
1443 #endif /* CONFIG_CPU_PM */
1444 
1445 static struct irq_chip gic_chip = {
1446 	.name			= "GICv3",
1447 	.irq_mask		= gic_mask_irq,
1448 	.irq_unmask		= gic_unmask_irq,
1449 	.irq_eoi		= gic_eoi_irq,
1450 	.irq_set_type		= gic_set_type,
1451 	.irq_set_affinity	= gic_set_affinity,
1452 	.irq_retrigger          = gic_retrigger,
1453 	.irq_get_irqchip_state	= gic_irq_get_irqchip_state,
1454 	.irq_set_irqchip_state	= gic_irq_set_irqchip_state,
1455 	.irq_nmi_setup		= gic_irq_nmi_setup,
1456 	.irq_nmi_teardown	= gic_irq_nmi_teardown,
1457 	.ipi_send_mask		= gic_ipi_send_mask,
1458 	.flags			= IRQCHIP_SET_TYPE_MASKED |
1459 				  IRQCHIP_SKIP_SET_WAKE |
1460 				  IRQCHIP_MASK_ON_SUSPEND,
1461 };
1462 
1463 static struct irq_chip gic_eoimode1_chip = {
1464 	.name			= "GICv3",
1465 	.irq_mask		= gic_eoimode1_mask_irq,
1466 	.irq_unmask		= gic_unmask_irq,
1467 	.irq_eoi		= gic_eoimode1_eoi_irq,
1468 	.irq_set_type		= gic_set_type,
1469 	.irq_set_affinity	= gic_set_affinity,
1470 	.irq_retrigger          = gic_retrigger,
1471 	.irq_get_irqchip_state	= gic_irq_get_irqchip_state,
1472 	.irq_set_irqchip_state	= gic_irq_set_irqchip_state,
1473 	.irq_set_vcpu_affinity	= gic_irq_set_vcpu_affinity,
1474 	.irq_nmi_setup		= gic_irq_nmi_setup,
1475 	.irq_nmi_teardown	= gic_irq_nmi_teardown,
1476 	.ipi_send_mask		= gic_ipi_send_mask,
1477 	.flags			= IRQCHIP_SET_TYPE_MASKED |
1478 				  IRQCHIP_SKIP_SET_WAKE |
1479 				  IRQCHIP_MASK_ON_SUSPEND,
1480 };
1481 
1482 static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
1483 			      irq_hw_number_t hw)
1484 {
1485 	struct irq_chip *chip = &gic_chip;
1486 	struct irq_data *irqd = irq_desc_get_irq_data(irq_to_desc(irq));
1487 
1488 	if (static_branch_likely(&supports_deactivate_key))
1489 		chip = &gic_eoimode1_chip;
1490 
1491 	switch (__get_intid_range(hw)) {
1492 	case SGI_RANGE:
1493 	case PPI_RANGE:
1494 	case EPPI_RANGE:
1495 		irq_set_percpu_devid(irq);
1496 		irq_domain_set_info(d, irq, hw, chip, d->host_data,
1497 				    handle_percpu_devid_irq, NULL, NULL);
1498 		break;
1499 
1500 	case SPI_RANGE:
1501 	case ESPI_RANGE:
1502 		irq_domain_set_info(d, irq, hw, chip, d->host_data,
1503 				    handle_fasteoi_irq, NULL, NULL);
1504 		irq_set_probe(irq);
1505 		irqd_set_single_target(irqd);
1506 		break;
1507 
1508 	case LPI_RANGE:
1509 		if (!gic_dist_supports_lpis())
1510 			return -EPERM;
1511 		irq_domain_set_info(d, irq, hw, chip, d->host_data,
1512 				    handle_fasteoi_irq, NULL, NULL);
1513 		break;
1514 
1515 	default:
1516 		return -EPERM;
1517 	}
1518 
1519 	/* Prevents SW retriggers which mess up the ACK/EOI ordering */
1520 	irqd_set_handle_enforce_irqctx(irqd);
1521 	return 0;
1522 }
1523 
1524 static int gic_irq_domain_translate(struct irq_domain *d,
1525 				    struct irq_fwspec *fwspec,
1526 				    unsigned long *hwirq,
1527 				    unsigned int *type)
1528 {
1529 	if (fwspec->param_count == 1 && fwspec->param[0] < 16) {
1530 		*hwirq = fwspec->param[0];
1531 		*type = IRQ_TYPE_EDGE_RISING;
1532 		return 0;
1533 	}
1534 
1535 	if (is_of_node(fwspec->fwnode)) {
1536 		if (fwspec->param_count < 3)
1537 			return -EINVAL;
1538 
1539 		switch (fwspec->param[0]) {
1540 		case 0:			/* SPI */
1541 			*hwirq = fwspec->param[1] + 32;
1542 			break;
1543 		case 1:			/* PPI */
1544 			*hwirq = fwspec->param[1] + 16;
1545 			break;
1546 		case 2:			/* ESPI */
1547 			*hwirq = fwspec->param[1] + ESPI_BASE_INTID;
1548 			break;
1549 		case 3:			/* EPPI */
1550 			*hwirq = fwspec->param[1] + EPPI_BASE_INTID;
1551 			break;
1552 		case GIC_IRQ_TYPE_LPI:	/* LPI */
1553 			*hwirq = fwspec->param[1];
1554 			break;
1555 		case GIC_IRQ_TYPE_PARTITION:
1556 			*hwirq = fwspec->param[1];
1557 			if (fwspec->param[1] >= 16)
1558 				*hwirq += EPPI_BASE_INTID - 16;
1559 			else
1560 				*hwirq += 16;
1561 			break;
1562 		default:
1563 			return -EINVAL;
1564 		}
1565 
1566 		*type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
1567 
1568 		/*
1569 		 * Make it clear that broken DTs are... broken.
1570 		 * Partitioned PPIs are an unfortunate exception.
1571 		 */
1572 		WARN_ON(*type == IRQ_TYPE_NONE &&
1573 			fwspec->param[0] != GIC_IRQ_TYPE_PARTITION);
1574 		return 0;
1575 	}
1576 
1577 	if (is_fwnode_irqchip(fwspec->fwnode)) {
1578 		if(fwspec->param_count != 2)
1579 			return -EINVAL;
1580 
1581 		if (fwspec->param[0] < 16) {
1582 			pr_err(FW_BUG "Illegal GSI%d translation request\n",
1583 			       fwspec->param[0]);
1584 			return -EINVAL;
1585 		}
1586 
1587 		*hwirq = fwspec->param[0];
1588 		*type = fwspec->param[1];
1589 
1590 		WARN_ON(*type == IRQ_TYPE_NONE);
1591 		return 0;
1592 	}
1593 
1594 	return -EINVAL;
1595 }
1596 
1597 static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1598 				unsigned int nr_irqs, void *arg)
1599 {
1600 	int i, ret;
1601 	irq_hw_number_t hwirq;
1602 	unsigned int type = IRQ_TYPE_NONE;
1603 	struct irq_fwspec *fwspec = arg;
1604 
1605 	ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type);
1606 	if (ret)
1607 		return ret;
1608 
1609 	for (i = 0; i < nr_irqs; i++) {
1610 		ret = gic_irq_domain_map(domain, virq + i, hwirq + i);
1611 		if (ret)
1612 			return ret;
1613 	}
1614 
1615 	return 0;
1616 }
1617 
1618 static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
1619 				unsigned int nr_irqs)
1620 {
1621 	int i;
1622 
1623 	for (i = 0; i < nr_irqs; i++) {
1624 		struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
1625 		irq_set_handler(virq + i, NULL);
1626 		irq_domain_reset_irq_data(d);
1627 	}
1628 }
1629 
1630 static bool fwspec_is_partitioned_ppi(struct irq_fwspec *fwspec,
1631 				      irq_hw_number_t hwirq)
1632 {
1633 	enum gic_intid_range range;
1634 
1635 	if (!gic_data.ppi_descs)
1636 		return false;
1637 
1638 	if (!is_of_node(fwspec->fwnode))
1639 		return false;
1640 
1641 	if (fwspec->param_count < 4 || !fwspec->param[3])
1642 		return false;
1643 
1644 	range = __get_intid_range(hwirq);
1645 	if (range != PPI_RANGE && range != EPPI_RANGE)
1646 		return false;
1647 
1648 	return true;
1649 }
1650 
1651 static int gic_irq_domain_select(struct irq_domain *d,
1652 				 struct irq_fwspec *fwspec,
1653 				 enum irq_domain_bus_token bus_token)
1654 {
1655 	unsigned int type, ret, ppi_idx;
1656 	irq_hw_number_t hwirq;
1657 
1658 	/* Not for us */
1659         if (fwspec->fwnode != d->fwnode)
1660 		return 0;
1661 
1662 	/* If this is not DT, then we have a single domain */
1663 	if (!is_of_node(fwspec->fwnode))
1664 		return 1;
1665 
1666 	ret = gic_irq_domain_translate(d, fwspec, &hwirq, &type);
1667 	if (WARN_ON_ONCE(ret))
1668 		return 0;
1669 
1670 	if (!fwspec_is_partitioned_ppi(fwspec, hwirq))
1671 		return d == gic_data.domain;
1672 
1673 	/*
1674 	 * If this is a PPI and we have a 4th (non-null) parameter,
1675 	 * then we need to match the partition domain.
1676 	 */
1677 	ppi_idx = __gic_get_ppi_index(hwirq);
1678 	return d == partition_get_domain(gic_data.ppi_descs[ppi_idx]);
1679 }
1680 
1681 static const struct irq_domain_ops gic_irq_domain_ops = {
1682 	.translate = gic_irq_domain_translate,
1683 	.alloc = gic_irq_domain_alloc,
1684 	.free = gic_irq_domain_free,
1685 	.select = gic_irq_domain_select,
1686 };
1687 
1688 static int partition_domain_translate(struct irq_domain *d,
1689 				      struct irq_fwspec *fwspec,
1690 				      unsigned long *hwirq,
1691 				      unsigned int *type)
1692 {
1693 	unsigned long ppi_intid;
1694 	struct device_node *np;
1695 	unsigned int ppi_idx;
1696 	int ret;
1697 
1698 	if (!gic_data.ppi_descs)
1699 		return -ENOMEM;
1700 
1701 	np = of_find_node_by_phandle(fwspec->param[3]);
1702 	if (WARN_ON(!np))
1703 		return -EINVAL;
1704 
1705 	ret = gic_irq_domain_translate(d, fwspec, &ppi_intid, type);
1706 	if (WARN_ON_ONCE(ret))
1707 		return 0;
1708 
1709 	ppi_idx = __gic_get_ppi_index(ppi_intid);
1710 	ret = partition_translate_id(gic_data.ppi_descs[ppi_idx],
1711 				     of_node_to_fwnode(np));
1712 	if (ret < 0)
1713 		return ret;
1714 
1715 	*hwirq = ret;
1716 	*type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
1717 
1718 	return 0;
1719 }
1720 
1721 static const struct irq_domain_ops partition_domain_ops = {
1722 	.translate = partition_domain_translate,
1723 	.select = gic_irq_domain_select,
1724 };
1725 
1726 static bool gic_enable_quirk_msm8996(void *data)
1727 {
1728 	struct gic_chip_data *d = data;
1729 
1730 	d->flags |= FLAGS_WORKAROUND_GICR_WAKER_MSM8996;
1731 
1732 	return true;
1733 }
1734 
1735 static bool gic_enable_quirk_mtk_gicr(void *data)
1736 {
1737 	struct gic_chip_data *d = data;
1738 
1739 	d->flags |= FLAGS_WORKAROUND_MTK_GICR_SAVE;
1740 
1741 	return true;
1742 }
1743 
1744 static bool gic_enable_quirk_cavium_38539(void *data)
1745 {
1746 	struct gic_chip_data *d = data;
1747 
1748 	d->flags |= FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539;
1749 
1750 	return true;
1751 }
1752 
1753 static bool gic_enable_quirk_hip06_07(void *data)
1754 {
1755 	struct gic_chip_data *d = data;
1756 
1757 	/*
1758 	 * HIP06 GICD_IIDR clashes with GIC-600 product number (despite
1759 	 * not being an actual ARM implementation). The saving grace is
1760 	 * that GIC-600 doesn't have ESPI, so nothing to do in that case.
1761 	 * HIP07 doesn't even have a proper IIDR, and still pretends to
1762 	 * have ESPI. In both cases, put them right.
1763 	 */
1764 	if (d->rdists.gicd_typer & GICD_TYPER_ESPI) {
1765 		/* Zero both ESPI and the RES0 field next to it... */
1766 		d->rdists.gicd_typer &= ~GENMASK(9, 8);
1767 		return true;
1768 	}
1769 
1770 	return false;
1771 }
1772 
1773 #define T241_CHIPN_MASK		GENMASK_ULL(45, 44)
1774 #define T241_CHIP_GICDA_OFFSET	0x1580000
1775 #define SMCCC_SOC_ID_T241	0x036b0241
1776 
1777 static bool gic_enable_quirk_nvidia_t241(void *data)
1778 {
1779 	s32 soc_id = arm_smccc_get_soc_id_version();
1780 	unsigned long chip_bmask = 0;
1781 	phys_addr_t phys;
1782 	u32 i;
1783 
1784 	/* Check JEP106 code for NVIDIA T241 chip (036b:0241) */
1785 	if ((soc_id < 0) || (soc_id != SMCCC_SOC_ID_T241))
1786 		return false;
1787 
1788 	/* Find the chips based on GICR regions PHYS addr */
1789 	for (i = 0; i < gic_data.nr_redist_regions; i++) {
1790 		chip_bmask |= BIT(FIELD_GET(T241_CHIPN_MASK,
1791 				  (u64)gic_data.redist_regions[i].phys_base));
1792 	}
1793 
1794 	if (hweight32(chip_bmask) < 3)
1795 		return false;
1796 
1797 	/* Setup GICD alias regions */
1798 	for (i = 0; i < ARRAY_SIZE(t241_dist_base_alias); i++) {
1799 		if (chip_bmask & BIT(i)) {
1800 			phys = gic_data.dist_phys_base + T241_CHIP_GICDA_OFFSET;
1801 			phys |= FIELD_PREP(T241_CHIPN_MASK, i);
1802 			t241_dist_base_alias[i] = ioremap(phys, SZ_64K);
1803 			WARN_ON_ONCE(!t241_dist_base_alias[i]);
1804 		}
1805 	}
1806 	static_branch_enable(&gic_nvidia_t241_erratum);
1807 	return true;
1808 }
1809 
1810 static bool gic_enable_quirk_asr8601(void *data)
1811 {
1812 	struct gic_chip_data *d = data;
1813 
1814 	d->flags |= FLAGS_WORKAROUND_ASR_ERRATUM_8601001;
1815 
1816 	return true;
1817 }
1818 
1819 static const struct gic_quirk gic_quirks[] = {
1820 	{
1821 		.desc	= "GICv3: Qualcomm MSM8996 broken firmware",
1822 		.compatible = "qcom,msm8996-gic-v3",
1823 		.init	= gic_enable_quirk_msm8996,
1824 	},
1825 	{
1826 		.desc	= "GICv3: ASR erratum 8601001",
1827 		.compatible = "asr,asr8601-gic-v3",
1828 		.init	= gic_enable_quirk_asr8601,
1829 	},
1830 	{
1831 		.desc	= "GICv3: Mediatek Chromebook GICR save problem",
1832 		.property = "mediatek,broken-save-restore-fw",
1833 		.init	= gic_enable_quirk_mtk_gicr,
1834 	},
1835 	{
1836 		.desc	= "GICv3: HIP06 erratum 161010803",
1837 		.iidr	= 0x0204043b,
1838 		.mask	= 0xffffffff,
1839 		.init	= gic_enable_quirk_hip06_07,
1840 	},
1841 	{
1842 		.desc	= "GICv3: HIP07 erratum 161010803",
1843 		.iidr	= 0x00000000,
1844 		.mask	= 0xffffffff,
1845 		.init	= gic_enable_quirk_hip06_07,
1846 	},
1847 	{
1848 		/*
1849 		 * Reserved register accesses generate a Synchronous
1850 		 * External Abort. This erratum applies to:
1851 		 * - ThunderX: CN88xx
1852 		 * - OCTEON TX: CN83xx, CN81xx
1853 		 * - OCTEON TX2: CN93xx, CN96xx, CN98xx, CNF95xx*
1854 		 */
1855 		.desc	= "GICv3: Cavium erratum 38539",
1856 		.iidr	= 0xa000034c,
1857 		.mask	= 0xe8f00fff,
1858 		.init	= gic_enable_quirk_cavium_38539,
1859 	},
1860 	{
1861 		.desc	= "GICv3: NVIDIA erratum T241-FABRIC-4",
1862 		.iidr	= 0x0402043b,
1863 		.mask	= 0xffffffff,
1864 		.init	= gic_enable_quirk_nvidia_t241,
1865 	},
1866 	{
1867 	}
1868 };
1869 
1870 static void gic_enable_nmi_support(void)
1871 {
1872 	int i;
1873 
1874 	if (!gic_prio_masking_enabled())
1875 		return;
1876 
1877 	if (gic_data.flags & FLAGS_WORKAROUND_MTK_GICR_SAVE) {
1878 		pr_warn("Skipping NMI enable due to firmware issues\n");
1879 		return;
1880 	}
1881 
1882 	ppi_nmi_refs = kcalloc(gic_data.ppi_nr, sizeof(*ppi_nmi_refs), GFP_KERNEL);
1883 	if (!ppi_nmi_refs)
1884 		return;
1885 
1886 	for (i = 0; i < gic_data.ppi_nr; i++)
1887 		refcount_set(&ppi_nmi_refs[i], 0);
1888 
1889 	pr_info("Pseudo-NMIs enabled using %s ICC_PMR_EL1 synchronisation\n",
1890 		gic_has_relaxed_pmr_sync() ? "relaxed" : "forced");
1891 
1892 	/*
1893 	 * How priority values are used by the GIC depends on two things:
1894 	 * the security state of the GIC (controlled by the GICD_CTRL.DS bit)
1895 	 * and if Group 0 interrupts can be delivered to Linux in the non-secure
1896 	 * world as FIQs (controlled by the SCR_EL3.FIQ bit). These affect the
1897 	 * ICC_PMR_EL1 register and the priority that software assigns to
1898 	 * interrupts:
1899 	 *
1900 	 * GICD_CTRL.DS | SCR_EL3.FIQ | ICC_PMR_EL1 | Group 1 priority
1901 	 * -----------------------------------------------------------
1902 	 *      1       |      -      |  unchanged  |    unchanged
1903 	 * -----------------------------------------------------------
1904 	 *      0       |      1      |  non-secure |    non-secure
1905 	 * -----------------------------------------------------------
1906 	 *      0       |      0      |  unchanged  |    non-secure
1907 	 *
1908 	 * where non-secure means that the value is right-shifted by one and the
1909 	 * MSB bit set, to make it fit in the non-secure priority range.
1910 	 *
1911 	 * In the first two cases, where ICC_PMR_EL1 and the interrupt priority
1912 	 * are both either modified or unchanged, we can use the same set of
1913 	 * priorities.
1914 	 *
1915 	 * In the last case, where only the interrupt priorities are modified to
1916 	 * be in the non-secure range, we use a different PMR value to mask IRQs
1917 	 * and the rest of the values that we use remain unchanged.
1918 	 */
1919 	if (gic_has_group0() && !gic_dist_security_disabled())
1920 		static_branch_enable(&gic_nonsecure_priorities);
1921 
1922 	static_branch_enable(&supports_pseudo_nmis);
1923 
1924 	if (static_branch_likely(&supports_deactivate_key))
1925 		gic_eoimode1_chip.flags |= IRQCHIP_SUPPORTS_NMI;
1926 	else
1927 		gic_chip.flags |= IRQCHIP_SUPPORTS_NMI;
1928 }
1929 
1930 static int __init gic_init_bases(phys_addr_t dist_phys_base,
1931 				 void __iomem *dist_base,
1932 				 struct redist_region *rdist_regs,
1933 				 u32 nr_redist_regions,
1934 				 u64 redist_stride,
1935 				 struct fwnode_handle *handle)
1936 {
1937 	u32 typer;
1938 	int err;
1939 
1940 	if (!is_hyp_mode_available())
1941 		static_branch_disable(&supports_deactivate_key);
1942 
1943 	if (static_branch_likely(&supports_deactivate_key))
1944 		pr_info("GIC: Using split EOI/Deactivate mode\n");
1945 
1946 	gic_data.fwnode = handle;
1947 	gic_data.dist_phys_base = dist_phys_base;
1948 	gic_data.dist_base = dist_base;
1949 	gic_data.redist_regions = rdist_regs;
1950 	gic_data.nr_redist_regions = nr_redist_regions;
1951 	gic_data.redist_stride = redist_stride;
1952 
1953 	/*
1954 	 * Find out how many interrupts are supported.
1955 	 */
1956 	typer = readl_relaxed(gic_data.dist_base + GICD_TYPER);
1957 	gic_data.rdists.gicd_typer = typer;
1958 
1959 	gic_enable_quirks(readl_relaxed(gic_data.dist_base + GICD_IIDR),
1960 			  gic_quirks, &gic_data);
1961 
1962 	pr_info("%d SPIs implemented\n", GIC_LINE_NR - 32);
1963 	pr_info("%d Extended SPIs implemented\n", GIC_ESPI_NR);
1964 
1965 	/*
1966 	 * ThunderX1 explodes on reading GICD_TYPER2, in violation of the
1967 	 * architecture spec (which says that reserved registers are RES0).
1968 	 */
1969 	if (!(gic_data.flags & FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539))
1970 		gic_data.rdists.gicd_typer2 = readl_relaxed(gic_data.dist_base + GICD_TYPER2);
1971 
1972 	gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops,
1973 						 &gic_data);
1974 	gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
1975 	if (!static_branch_unlikely(&gic_nvidia_t241_erratum)) {
1976 		/* Disable GICv4.x features for the erratum T241-FABRIC-4 */
1977 		gic_data.rdists.has_rvpeid = true;
1978 		gic_data.rdists.has_vlpis = true;
1979 		gic_data.rdists.has_direct_lpi = true;
1980 		gic_data.rdists.has_vpend_valid_dirty = true;
1981 	}
1982 
1983 	if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
1984 		err = -ENOMEM;
1985 		goto out_free;
1986 	}
1987 
1988 	irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED);
1989 
1990 	gic_data.has_rss = !!(typer & GICD_TYPER_RSS);
1991 
1992 	if (typer & GICD_TYPER_MBIS) {
1993 		err = mbi_init(handle, gic_data.domain);
1994 		if (err)
1995 			pr_err("Failed to initialize MBIs\n");
1996 	}
1997 
1998 	set_handle_irq(gic_handle_irq);
1999 
2000 	gic_update_rdist_properties();
2001 
2002 	gic_dist_init();
2003 	gic_cpu_init();
2004 	gic_smp_init();
2005 	gic_cpu_pm_init();
2006 
2007 	if (gic_dist_supports_lpis()) {
2008 		its_init(handle, &gic_data.rdists, gic_data.domain);
2009 		its_cpu_init();
2010 		its_lpi_memreserve_init();
2011 	} else {
2012 		if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
2013 			gicv2m_init(handle, gic_data.domain);
2014 	}
2015 
2016 	gic_enable_nmi_support();
2017 
2018 	return 0;
2019 
2020 out_free:
2021 	if (gic_data.domain)
2022 		irq_domain_remove(gic_data.domain);
2023 	free_percpu(gic_data.rdists.rdist);
2024 	return err;
2025 }
2026 
2027 static int __init gic_validate_dist_version(void __iomem *dist_base)
2028 {
2029 	u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
2030 
2031 	if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4)
2032 		return -ENODEV;
2033 
2034 	return 0;
2035 }
2036 
2037 /* Create all possible partitions at boot time */
2038 static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
2039 {
2040 	struct device_node *parts_node, *child_part;
2041 	int part_idx = 0, i;
2042 	int nr_parts;
2043 	struct partition_affinity *parts;
2044 
2045 	parts_node = of_get_child_by_name(gic_node, "ppi-partitions");
2046 	if (!parts_node)
2047 		return;
2048 
2049 	gic_data.ppi_descs = kcalloc(gic_data.ppi_nr, sizeof(*gic_data.ppi_descs), GFP_KERNEL);
2050 	if (!gic_data.ppi_descs)
2051 		goto out_put_node;
2052 
2053 	nr_parts = of_get_child_count(parts_node);
2054 
2055 	if (!nr_parts)
2056 		goto out_put_node;
2057 
2058 	parts = kcalloc(nr_parts, sizeof(*parts), GFP_KERNEL);
2059 	if (WARN_ON(!parts))
2060 		goto out_put_node;
2061 
2062 	for_each_child_of_node(parts_node, child_part) {
2063 		struct partition_affinity *part;
2064 		int n;
2065 
2066 		part = &parts[part_idx];
2067 
2068 		part->partition_id = of_node_to_fwnode(child_part);
2069 
2070 		pr_info("GIC: PPI partition %pOFn[%d] { ",
2071 			child_part, part_idx);
2072 
2073 		n = of_property_count_elems_of_size(child_part, "affinity",
2074 						    sizeof(u32));
2075 		WARN_ON(n <= 0);
2076 
2077 		for (i = 0; i < n; i++) {
2078 			int err, cpu;
2079 			u32 cpu_phandle;
2080 			struct device_node *cpu_node;
2081 
2082 			err = of_property_read_u32_index(child_part, "affinity",
2083 							 i, &cpu_phandle);
2084 			if (WARN_ON(err))
2085 				continue;
2086 
2087 			cpu_node = of_find_node_by_phandle(cpu_phandle);
2088 			if (WARN_ON(!cpu_node))
2089 				continue;
2090 
2091 			cpu = of_cpu_node_to_id(cpu_node);
2092 			if (WARN_ON(cpu < 0)) {
2093 				of_node_put(cpu_node);
2094 				continue;
2095 			}
2096 
2097 			pr_cont("%pOF[%d] ", cpu_node, cpu);
2098 
2099 			cpumask_set_cpu(cpu, &part->mask);
2100 			of_node_put(cpu_node);
2101 		}
2102 
2103 		pr_cont("}\n");
2104 		part_idx++;
2105 	}
2106 
2107 	for (i = 0; i < gic_data.ppi_nr; i++) {
2108 		unsigned int irq;
2109 		struct partition_desc *desc;
2110 		struct irq_fwspec ppi_fwspec = {
2111 			.fwnode		= gic_data.fwnode,
2112 			.param_count	= 3,
2113 			.param		= {
2114 				[0]	= GIC_IRQ_TYPE_PARTITION,
2115 				[1]	= i,
2116 				[2]	= IRQ_TYPE_NONE,
2117 			},
2118 		};
2119 
2120 		irq = irq_create_fwspec_mapping(&ppi_fwspec);
2121 		if (WARN_ON(!irq))
2122 			continue;
2123 		desc = partition_create_desc(gic_data.fwnode, parts, nr_parts,
2124 					     irq, &partition_domain_ops);
2125 		if (WARN_ON(!desc))
2126 			continue;
2127 
2128 		gic_data.ppi_descs[i] = desc;
2129 	}
2130 
2131 out_put_node:
2132 	of_node_put(parts_node);
2133 }
2134 
2135 static void __init gic_of_setup_kvm_info(struct device_node *node)
2136 {
2137 	int ret;
2138 	struct resource r;
2139 	u32 gicv_idx;
2140 
2141 	gic_v3_kvm_info.type = GIC_V3;
2142 
2143 	gic_v3_kvm_info.maint_irq = irq_of_parse_and_map(node, 0);
2144 	if (!gic_v3_kvm_info.maint_irq)
2145 		return;
2146 
2147 	if (of_property_read_u32(node, "#redistributor-regions",
2148 				 &gicv_idx))
2149 		gicv_idx = 1;
2150 
2151 	gicv_idx += 3;	/* Also skip GICD, GICC, GICH */
2152 	ret = of_address_to_resource(node, gicv_idx, &r);
2153 	if (!ret)
2154 		gic_v3_kvm_info.vcpu = r;
2155 
2156 	gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
2157 	gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
2158 	vgic_set_kvm_info(&gic_v3_kvm_info);
2159 }
2160 
2161 static void gic_request_region(resource_size_t base, resource_size_t size,
2162 			       const char *name)
2163 {
2164 	if (!request_mem_region(base, size, name))
2165 		pr_warn_once(FW_BUG "%s region %pa has overlapping address\n",
2166 			     name, &base);
2167 }
2168 
2169 static void __iomem *gic_of_iomap(struct device_node *node, int idx,
2170 				  const char *name, struct resource *res)
2171 {
2172 	void __iomem *base;
2173 	int ret;
2174 
2175 	ret = of_address_to_resource(node, idx, res);
2176 	if (ret)
2177 		return IOMEM_ERR_PTR(ret);
2178 
2179 	gic_request_region(res->start, resource_size(res), name);
2180 	base = of_iomap(node, idx);
2181 
2182 	return base ?: IOMEM_ERR_PTR(-ENOMEM);
2183 }
2184 
2185 static int __init gic_of_init(struct device_node *node, struct device_node *parent)
2186 {
2187 	phys_addr_t dist_phys_base;
2188 	void __iomem *dist_base;
2189 	struct redist_region *rdist_regs;
2190 	struct resource res;
2191 	u64 redist_stride;
2192 	u32 nr_redist_regions;
2193 	int err, i;
2194 
2195 	dist_base = gic_of_iomap(node, 0, "GICD", &res);
2196 	if (IS_ERR(dist_base)) {
2197 		pr_err("%pOF: unable to map gic dist registers\n", node);
2198 		return PTR_ERR(dist_base);
2199 	}
2200 
2201 	dist_phys_base = res.start;
2202 
2203 	err = gic_validate_dist_version(dist_base);
2204 	if (err) {
2205 		pr_err("%pOF: no distributor detected, giving up\n", node);
2206 		goto out_unmap_dist;
2207 	}
2208 
2209 	if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions))
2210 		nr_redist_regions = 1;
2211 
2212 	rdist_regs = kcalloc(nr_redist_regions, sizeof(*rdist_regs),
2213 			     GFP_KERNEL);
2214 	if (!rdist_regs) {
2215 		err = -ENOMEM;
2216 		goto out_unmap_dist;
2217 	}
2218 
2219 	for (i = 0; i < nr_redist_regions; i++) {
2220 		rdist_regs[i].redist_base = gic_of_iomap(node, 1 + i, "GICR", &res);
2221 		if (IS_ERR(rdist_regs[i].redist_base)) {
2222 			pr_err("%pOF: couldn't map region %d\n", node, i);
2223 			err = -ENODEV;
2224 			goto out_unmap_rdist;
2225 		}
2226 		rdist_regs[i].phys_base = res.start;
2227 	}
2228 
2229 	if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
2230 		redist_stride = 0;
2231 
2232 	gic_enable_of_quirks(node, gic_quirks, &gic_data);
2233 
2234 	err = gic_init_bases(dist_phys_base, dist_base, rdist_regs,
2235 			     nr_redist_regions, redist_stride, &node->fwnode);
2236 	if (err)
2237 		goto out_unmap_rdist;
2238 
2239 	gic_populate_ppi_partitions(node);
2240 
2241 	if (static_branch_likely(&supports_deactivate_key))
2242 		gic_of_setup_kvm_info(node);
2243 	return 0;
2244 
2245 out_unmap_rdist:
2246 	for (i = 0; i < nr_redist_regions; i++)
2247 		if (rdist_regs[i].redist_base && !IS_ERR(rdist_regs[i].redist_base))
2248 			iounmap(rdist_regs[i].redist_base);
2249 	kfree(rdist_regs);
2250 out_unmap_dist:
2251 	iounmap(dist_base);
2252 	return err;
2253 }
2254 
2255 IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init);
2256 
2257 #ifdef CONFIG_ACPI
2258 static struct
2259 {
2260 	void __iomem *dist_base;
2261 	struct redist_region *redist_regs;
2262 	u32 nr_redist_regions;
2263 	bool single_redist;
2264 	int enabled_rdists;
2265 	u32 maint_irq;
2266 	int maint_irq_mode;
2267 	phys_addr_t vcpu_base;
2268 } acpi_data __initdata;
2269 
2270 static void __init
2271 gic_acpi_register_redist(phys_addr_t phys_base, void __iomem *redist_base)
2272 {
2273 	static int count = 0;
2274 
2275 	acpi_data.redist_regs[count].phys_base = phys_base;
2276 	acpi_data.redist_regs[count].redist_base = redist_base;
2277 	acpi_data.redist_regs[count].single_redist = acpi_data.single_redist;
2278 	count++;
2279 }
2280 
2281 static int __init
2282 gic_acpi_parse_madt_redist(union acpi_subtable_headers *header,
2283 			   const unsigned long end)
2284 {
2285 	struct acpi_madt_generic_redistributor *redist =
2286 			(struct acpi_madt_generic_redistributor *)header;
2287 	void __iomem *redist_base;
2288 
2289 	redist_base = ioremap(redist->base_address, redist->length);
2290 	if (!redist_base) {
2291 		pr_err("Couldn't map GICR region @%llx\n", redist->base_address);
2292 		return -ENOMEM;
2293 	}
2294 	gic_request_region(redist->base_address, redist->length, "GICR");
2295 
2296 	gic_acpi_register_redist(redist->base_address, redist_base);
2297 	return 0;
2298 }
2299 
2300 static int __init
2301 gic_acpi_parse_madt_gicc(union acpi_subtable_headers *header,
2302 			 const unsigned long end)
2303 {
2304 	struct acpi_madt_generic_interrupt *gicc =
2305 				(struct acpi_madt_generic_interrupt *)header;
2306 	u32 reg = readl_relaxed(acpi_data.dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
2307 	u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2;
2308 	void __iomem *redist_base;
2309 
2310 	/* GICC entry which has !ACPI_MADT_ENABLED is not unusable so skip */
2311 	if (!(gicc->flags & ACPI_MADT_ENABLED))
2312 		return 0;
2313 
2314 	redist_base = ioremap(gicc->gicr_base_address, size);
2315 	if (!redist_base)
2316 		return -ENOMEM;
2317 	gic_request_region(gicc->gicr_base_address, size, "GICR");
2318 
2319 	gic_acpi_register_redist(gicc->gicr_base_address, redist_base);
2320 	return 0;
2321 }
2322 
2323 static int __init gic_acpi_collect_gicr_base(void)
2324 {
2325 	acpi_tbl_entry_handler redist_parser;
2326 	enum acpi_madt_type type;
2327 
2328 	if (acpi_data.single_redist) {
2329 		type = ACPI_MADT_TYPE_GENERIC_INTERRUPT;
2330 		redist_parser = gic_acpi_parse_madt_gicc;
2331 	} else {
2332 		type = ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR;
2333 		redist_parser = gic_acpi_parse_madt_redist;
2334 	}
2335 
2336 	/* Collect redistributor base addresses in GICR entries */
2337 	if (acpi_table_parse_madt(type, redist_parser, 0) > 0)
2338 		return 0;
2339 
2340 	pr_info("No valid GICR entries exist\n");
2341 	return -ENODEV;
2342 }
2343 
2344 static int __init gic_acpi_match_gicr(union acpi_subtable_headers *header,
2345 				  const unsigned long end)
2346 {
2347 	/* Subtable presence means that redist exists, that's it */
2348 	return 0;
2349 }
2350 
2351 static int __init gic_acpi_match_gicc(union acpi_subtable_headers *header,
2352 				      const unsigned long end)
2353 {
2354 	struct acpi_madt_generic_interrupt *gicc =
2355 				(struct acpi_madt_generic_interrupt *)header;
2356 
2357 	/*
2358 	 * If GICC is enabled and has valid gicr base address, then it means
2359 	 * GICR base is presented via GICC
2360 	 */
2361 	if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address) {
2362 		acpi_data.enabled_rdists++;
2363 		return 0;
2364 	}
2365 
2366 	/*
2367 	 * It's perfectly valid firmware can pass disabled GICC entry, driver
2368 	 * should not treat as errors, skip the entry instead of probe fail.
2369 	 */
2370 	if (!(gicc->flags & ACPI_MADT_ENABLED))
2371 		return 0;
2372 
2373 	return -ENODEV;
2374 }
2375 
2376 static int __init gic_acpi_count_gicr_regions(void)
2377 {
2378 	int count;
2379 
2380 	/*
2381 	 * Count how many redistributor regions we have. It is not allowed
2382 	 * to mix redistributor description, GICR and GICC subtables have to be
2383 	 * mutually exclusive.
2384 	 */
2385 	count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR,
2386 				      gic_acpi_match_gicr, 0);
2387 	if (count > 0) {
2388 		acpi_data.single_redist = false;
2389 		return count;
2390 	}
2391 
2392 	count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
2393 				      gic_acpi_match_gicc, 0);
2394 	if (count > 0) {
2395 		acpi_data.single_redist = true;
2396 		count = acpi_data.enabled_rdists;
2397 	}
2398 
2399 	return count;
2400 }
2401 
2402 static bool __init acpi_validate_gic_table(struct acpi_subtable_header *header,
2403 					   struct acpi_probe_entry *ape)
2404 {
2405 	struct acpi_madt_generic_distributor *dist;
2406 	int count;
2407 
2408 	dist = (struct acpi_madt_generic_distributor *)header;
2409 	if (dist->version != ape->driver_data)
2410 		return false;
2411 
2412 	/* We need to do that exercise anyway, the sooner the better */
2413 	count = gic_acpi_count_gicr_regions();
2414 	if (count <= 0)
2415 		return false;
2416 
2417 	acpi_data.nr_redist_regions = count;
2418 	return true;
2419 }
2420 
2421 static int __init gic_acpi_parse_virt_madt_gicc(union acpi_subtable_headers *header,
2422 						const unsigned long end)
2423 {
2424 	struct acpi_madt_generic_interrupt *gicc =
2425 		(struct acpi_madt_generic_interrupt *)header;
2426 	int maint_irq_mode;
2427 	static int first_madt = true;
2428 
2429 	/* Skip unusable CPUs */
2430 	if (!(gicc->flags & ACPI_MADT_ENABLED))
2431 		return 0;
2432 
2433 	maint_irq_mode = (gicc->flags & ACPI_MADT_VGIC_IRQ_MODE) ?
2434 		ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE;
2435 
2436 	if (first_madt) {
2437 		first_madt = false;
2438 
2439 		acpi_data.maint_irq = gicc->vgic_interrupt;
2440 		acpi_data.maint_irq_mode = maint_irq_mode;
2441 		acpi_data.vcpu_base = gicc->gicv_base_address;
2442 
2443 		return 0;
2444 	}
2445 
2446 	/*
2447 	 * The maintenance interrupt and GICV should be the same for every CPU
2448 	 */
2449 	if ((acpi_data.maint_irq != gicc->vgic_interrupt) ||
2450 	    (acpi_data.maint_irq_mode != maint_irq_mode) ||
2451 	    (acpi_data.vcpu_base != gicc->gicv_base_address))
2452 		return -EINVAL;
2453 
2454 	return 0;
2455 }
2456 
2457 static bool __init gic_acpi_collect_virt_info(void)
2458 {
2459 	int count;
2460 
2461 	count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
2462 				      gic_acpi_parse_virt_madt_gicc, 0);
2463 
2464 	return (count > 0);
2465 }
2466 
2467 #define ACPI_GICV3_DIST_MEM_SIZE (SZ_64K)
2468 #define ACPI_GICV2_VCTRL_MEM_SIZE	(SZ_4K)
2469 #define ACPI_GICV2_VCPU_MEM_SIZE	(SZ_8K)
2470 
2471 static void __init gic_acpi_setup_kvm_info(void)
2472 {
2473 	int irq;
2474 
2475 	if (!gic_acpi_collect_virt_info()) {
2476 		pr_warn("Unable to get hardware information used for virtualization\n");
2477 		return;
2478 	}
2479 
2480 	gic_v3_kvm_info.type = GIC_V3;
2481 
2482 	irq = acpi_register_gsi(NULL, acpi_data.maint_irq,
2483 				acpi_data.maint_irq_mode,
2484 				ACPI_ACTIVE_HIGH);
2485 	if (irq <= 0)
2486 		return;
2487 
2488 	gic_v3_kvm_info.maint_irq = irq;
2489 
2490 	if (acpi_data.vcpu_base) {
2491 		struct resource *vcpu = &gic_v3_kvm_info.vcpu;
2492 
2493 		vcpu->flags = IORESOURCE_MEM;
2494 		vcpu->start = acpi_data.vcpu_base;
2495 		vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1;
2496 	}
2497 
2498 	gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
2499 	gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
2500 	vgic_set_kvm_info(&gic_v3_kvm_info);
2501 }
2502 
2503 static struct fwnode_handle *gsi_domain_handle;
2504 
2505 static struct fwnode_handle *gic_v3_get_gsi_domain_id(u32 gsi)
2506 {
2507 	return gsi_domain_handle;
2508 }
2509 
2510 static int __init
2511 gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end)
2512 {
2513 	struct acpi_madt_generic_distributor *dist;
2514 	size_t size;
2515 	int i, err;
2516 
2517 	/* Get distributor base address */
2518 	dist = (struct acpi_madt_generic_distributor *)header;
2519 	acpi_data.dist_base = ioremap(dist->base_address,
2520 				      ACPI_GICV3_DIST_MEM_SIZE);
2521 	if (!acpi_data.dist_base) {
2522 		pr_err("Unable to map GICD registers\n");
2523 		return -ENOMEM;
2524 	}
2525 	gic_request_region(dist->base_address, ACPI_GICV3_DIST_MEM_SIZE, "GICD");
2526 
2527 	err = gic_validate_dist_version(acpi_data.dist_base);
2528 	if (err) {
2529 		pr_err("No distributor detected at @%p, giving up\n",
2530 		       acpi_data.dist_base);
2531 		goto out_dist_unmap;
2532 	}
2533 
2534 	size = sizeof(*acpi_data.redist_regs) * acpi_data.nr_redist_regions;
2535 	acpi_data.redist_regs = kzalloc(size, GFP_KERNEL);
2536 	if (!acpi_data.redist_regs) {
2537 		err = -ENOMEM;
2538 		goto out_dist_unmap;
2539 	}
2540 
2541 	err = gic_acpi_collect_gicr_base();
2542 	if (err)
2543 		goto out_redist_unmap;
2544 
2545 	gsi_domain_handle = irq_domain_alloc_fwnode(&dist->base_address);
2546 	if (!gsi_domain_handle) {
2547 		err = -ENOMEM;
2548 		goto out_redist_unmap;
2549 	}
2550 
2551 	err = gic_init_bases(dist->base_address, acpi_data.dist_base,
2552 			     acpi_data.redist_regs, acpi_data.nr_redist_regions,
2553 			     0, gsi_domain_handle);
2554 	if (err)
2555 		goto out_fwhandle_free;
2556 
2557 	acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, gic_v3_get_gsi_domain_id);
2558 
2559 	if (static_branch_likely(&supports_deactivate_key))
2560 		gic_acpi_setup_kvm_info();
2561 
2562 	return 0;
2563 
2564 out_fwhandle_free:
2565 	irq_domain_free_fwnode(gsi_domain_handle);
2566 out_redist_unmap:
2567 	for (i = 0; i < acpi_data.nr_redist_regions; i++)
2568 		if (acpi_data.redist_regs[i].redist_base)
2569 			iounmap(acpi_data.redist_regs[i].redist_base);
2570 	kfree(acpi_data.redist_regs);
2571 out_dist_unmap:
2572 	iounmap(acpi_data.dist_base);
2573 	return err;
2574 }
2575 IRQCHIP_ACPI_DECLARE(gic_v3, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
2576 		     acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V3,
2577 		     gic_acpi_init);
2578 IRQCHIP_ACPI_DECLARE(gic_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
2579 		     acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V4,
2580 		     gic_acpi_init);
2581 IRQCHIP_ACPI_DECLARE(gic_v3_or_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
2582 		     acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_NONE,
2583 		     gic_acpi_init);
2584 #endif
2585