xref: /openbmc/linux/arch/powerpc/sysdev/xive/common.c (revision 17df41fe)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright 2016,2017 IBM Corporation.
4  */
5 
6 #define pr_fmt(fmt) "xive: " fmt
7 
8 #include <linux/types.h>
9 #include <linux/threads.h>
10 #include <linux/kernel.h>
11 #include <linux/irq.h>
12 #include <linux/debugfs.h>
13 #include <linux/smp.h>
14 #include <linux/interrupt.h>
15 #include <linux/seq_file.h>
16 #include <linux/init.h>
17 #include <linux/cpu.h>
18 #include <linux/of.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/msi.h>
22 #include <linux/vmalloc.h>
23 
24 #include <asm/debugfs.h>
25 #include <asm/prom.h>
26 #include <asm/io.h>
27 #include <asm/smp.h>
28 #include <asm/machdep.h>
29 #include <asm/irq.h>
30 #include <asm/errno.h>
31 #include <asm/xive.h>
32 #include <asm/xive-regs.h>
33 #include <asm/xmon.h>
34 
35 #include "xive-internal.h"
36 
37 #undef DEBUG_FLUSH
38 #undef DEBUG_ALL
39 
40 #ifdef DEBUG_ALL
41 #define DBG_VERBOSE(fmt, ...)	pr_devel("cpu %d - " fmt, \
42 					 smp_processor_id(), ## __VA_ARGS__)
43 #else
44 #define DBG_VERBOSE(fmt...)	do { } while(0)
45 #endif
46 
47 bool __xive_enabled;
48 EXPORT_SYMBOL_GPL(__xive_enabled);
49 bool xive_cmdline_disabled;
50 
51 /* We use only one priority for now */
52 static u8 xive_irq_priority;
53 
54 /* TIMA exported to KVM */
55 void __iomem *xive_tima;
56 EXPORT_SYMBOL_GPL(xive_tima);
57 u32 xive_tima_offset;
58 
59 /* Backend ops */
60 static const struct xive_ops *xive_ops;
61 
62 /* Our global interrupt domain */
63 static struct irq_domain *xive_irq_domain;
64 
65 #ifdef CONFIG_SMP
66 /* The IPIs use the same logical irq number when on the same chip */
67 static struct xive_ipi_desc {
68 	unsigned int irq;
69 	char name[16];
70 } *xive_ipis;
71 
72 /*
73  * Use early_cpu_to_node() for hot-plugged CPUs
74  */
75 static unsigned int xive_ipi_cpu_to_irq(unsigned int cpu)
76 {
77 	return xive_ipis[early_cpu_to_node(cpu)].irq;
78 }
79 #endif
80 
81 /* Xive state for each CPU */
82 static DEFINE_PER_CPU(struct xive_cpu *, xive_cpu);
83 
84 /* An invalid CPU target */
85 #define XIVE_INVALID_TARGET	(-1)
86 
87 /*
88  * Read the next entry in a queue, return its content if it's valid
89  * or 0 if there is no new entry.
90  *
91  * The queue pointer is moved forward unless "just_peek" is set
92  */
93 static u32 xive_read_eq(struct xive_q *q, bool just_peek)
94 {
95 	u32 cur;
96 
97 	if (!q->qpage)
98 		return 0;
99 	cur = be32_to_cpup(q->qpage + q->idx);
100 
101 	/* Check valid bit (31) vs current toggle polarity */
102 	if ((cur >> 31) == q->toggle)
103 		return 0;
104 
105 	/* If consuming from the queue ... */
106 	if (!just_peek) {
107 		/* Next entry */
108 		q->idx = (q->idx + 1) & q->msk;
109 
110 		/* Wrap around: flip valid toggle */
111 		if (q->idx == 0)
112 			q->toggle ^= 1;
113 	}
114 	/* Mask out the valid bit (31) */
115 	return cur & 0x7fffffff;
116 }
117 
118 /*
119  * Scans all the queue that may have interrupts in them
120  * (based on "pending_prio") in priority order until an
121  * interrupt is found or all the queues are empty.
122  *
123  * Then updates the CPPR (Current Processor Priority
124  * Register) based on the most favored interrupt found
125  * (0xff if none) and return what was found (0 if none).
126  *
127  * If just_peek is set, return the most favored pending
128  * interrupt if any but don't update the queue pointers.
129  *
130  * Note: This function can operate generically on any number
131  * of queues (up to 8). The current implementation of the XIVE
132  * driver only uses a single queue however.
133  *
134  * Note2: This will also "flush" "the pending_count" of a queue
135  * into the "count" when that queue is observed to be empty.
136  * This is used to keep track of the amount of interrupts
137  * targetting a queue. When an interrupt is moved away from
138  * a queue, we only decrement that queue count once the queue
139  * has been observed empty to avoid races.
140  */
141 static u32 xive_scan_interrupts(struct xive_cpu *xc, bool just_peek)
142 {
143 	u32 irq = 0;
144 	u8 prio = 0;
145 
146 	/* Find highest pending priority */
147 	while (xc->pending_prio != 0) {
148 		struct xive_q *q;
149 
150 		prio = ffs(xc->pending_prio) - 1;
151 		DBG_VERBOSE("scan_irq: trying prio %d\n", prio);
152 
153 		/* Try to fetch */
154 		irq = xive_read_eq(&xc->queue[prio], just_peek);
155 
156 		/* Found something ? That's it */
157 		if (irq) {
158 			if (just_peek || irq_to_desc(irq))
159 				break;
160 			/*
161 			 * We should never get here; if we do then we must
162 			 * have failed to synchronize the interrupt properly
163 			 * when shutting it down.
164 			 */
165 			pr_crit("xive: got interrupt %d without descriptor, dropping\n",
166 				irq);
167 			WARN_ON(1);
168 			continue;
169 		}
170 
171 		/* Clear pending bits */
172 		xc->pending_prio &= ~(1 << prio);
173 
174 		/*
175 		 * Check if the queue count needs adjusting due to
176 		 * interrupts being moved away. See description of
177 		 * xive_dec_target_count()
178 		 */
179 		q = &xc->queue[prio];
180 		if (atomic_read(&q->pending_count)) {
181 			int p = atomic_xchg(&q->pending_count, 0);
182 			if (p) {
183 				WARN_ON(p > atomic_read(&q->count));
184 				atomic_sub(p, &q->count);
185 			}
186 		}
187 	}
188 
189 	/* If nothing was found, set CPPR to 0xff */
190 	if (irq == 0)
191 		prio = 0xff;
192 
193 	/* Update HW CPPR to match if necessary */
194 	if (prio != xc->cppr) {
195 		DBG_VERBOSE("scan_irq: adjusting CPPR to %d\n", prio);
196 		xc->cppr = prio;
197 		out_8(xive_tima + xive_tima_offset + TM_CPPR, prio);
198 	}
199 
200 	return irq;
201 }
202 
203 /*
204  * This is used to perform the magic loads from an ESB
205  * described in xive-regs.h
206  */
207 static notrace u8 xive_esb_read(struct xive_irq_data *xd, u32 offset)
208 {
209 	u64 val;
210 
211 	if (offset == XIVE_ESB_SET_PQ_10 && xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
212 		offset |= XIVE_ESB_LD_ST_MO;
213 
214 	if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw)
215 		val = xive_ops->esb_rw(xd->hw_irq, offset, 0, 0);
216 	else
217 		val = in_be64(xd->eoi_mmio + offset);
218 
219 	return (u8)val;
220 }
221 
222 static void xive_esb_write(struct xive_irq_data *xd, u32 offset, u64 data)
223 {
224 	if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw)
225 		xive_ops->esb_rw(xd->hw_irq, offset, data, 1);
226 	else
227 		out_be64(xd->eoi_mmio + offset, data);
228 }
229 
230 #ifdef CONFIG_XMON
231 static notrace void xive_dump_eq(const char *name, struct xive_q *q)
232 {
233 	u32 i0, i1, idx;
234 
235 	if (!q->qpage)
236 		return;
237 	idx = q->idx;
238 	i0 = be32_to_cpup(q->qpage + idx);
239 	idx = (idx + 1) & q->msk;
240 	i1 = be32_to_cpup(q->qpage + idx);
241 	xmon_printf("%s idx=%d T=%d %08x %08x ...", name,
242 		     q->idx, q->toggle, i0, i1);
243 }
244 
245 notrace void xmon_xive_do_dump(int cpu)
246 {
247 	struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
248 
249 	xmon_printf("CPU %d:", cpu);
250 	if (xc) {
251 		xmon_printf("pp=%02x CPPR=%02x ", xc->pending_prio, xc->cppr);
252 
253 #ifdef CONFIG_SMP
254 		{
255 			u64 val = xive_esb_read(&xc->ipi_data, XIVE_ESB_GET);
256 
257 			xmon_printf("IPI=0x%08x PQ=%c%c ", xc->hw_ipi,
258 				    val & XIVE_ESB_VAL_P ? 'P' : '-',
259 				    val & XIVE_ESB_VAL_Q ? 'Q' : '-');
260 		}
261 #endif
262 		xive_dump_eq("EQ", &xc->queue[xive_irq_priority]);
263 	}
264 	xmon_printf("\n");
265 }
266 
267 static struct irq_data *xive_get_irq_data(u32 hw_irq)
268 {
269 	unsigned int irq = irq_find_mapping(xive_irq_domain, hw_irq);
270 
271 	return irq ? irq_get_irq_data(irq) : NULL;
272 }
273 
274 int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d)
275 {
276 	int rc;
277 	u32 target;
278 	u8 prio;
279 	u32 lirq;
280 
281 	rc = xive_ops->get_irq_config(hw_irq, &target, &prio, &lirq);
282 	if (rc) {
283 		xmon_printf("IRQ 0x%08x : no config rc=%d\n", hw_irq, rc);
284 		return rc;
285 	}
286 
287 	xmon_printf("IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ",
288 		    hw_irq, target, prio, lirq);
289 
290 	if (!d)
291 		d = xive_get_irq_data(hw_irq);
292 
293 	if (d) {
294 		struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
295 		u64 val = xive_esb_read(xd, XIVE_ESB_GET);
296 
297 		xmon_printf("flags=%c%c%c PQ=%c%c",
298 			    xd->flags & XIVE_IRQ_FLAG_STORE_EOI ? 'S' : ' ',
299 			    xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ',
300 			    xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ',
301 			    val & XIVE_ESB_VAL_P ? 'P' : '-',
302 			    val & XIVE_ESB_VAL_Q ? 'Q' : '-');
303 	}
304 
305 	xmon_printf("\n");
306 	return 0;
307 }
308 
309 void xmon_xive_get_irq_all(void)
310 {
311 	unsigned int i;
312 	struct irq_desc *desc;
313 
314 	for_each_irq_desc(i, desc) {
315 		struct irq_data *d = irq_domain_get_irq_data(xive_irq_domain, i);
316 
317 		if (d)
318 			xmon_xive_get_irq_config(irqd_to_hwirq(d), d);
319 	}
320 }
321 
322 #endif /* CONFIG_XMON */
323 
324 static unsigned int xive_get_irq(void)
325 {
326 	struct xive_cpu *xc = __this_cpu_read(xive_cpu);
327 	u32 irq;
328 
329 	/*
330 	 * This can be called either as a result of a HW interrupt or
331 	 * as a "replay" because EOI decided there was still something
332 	 * in one of the queues.
333 	 *
334 	 * First we perform an ACK cycle in order to update our mask
335 	 * of pending priorities. This will also have the effect of
336 	 * updating the CPPR to the most favored pending interrupts.
337 	 *
338 	 * In the future, if we have a way to differentiate a first
339 	 * entry (on HW interrupt) from a replay triggered by EOI,
340 	 * we could skip this on replays unless we soft-mask tells us
341 	 * that a new HW interrupt occurred.
342 	 */
343 	xive_ops->update_pending(xc);
344 
345 	DBG_VERBOSE("get_irq: pending=%02x\n", xc->pending_prio);
346 
347 	/* Scan our queue(s) for interrupts */
348 	irq = xive_scan_interrupts(xc, false);
349 
350 	DBG_VERBOSE("get_irq: got irq 0x%x, new pending=0x%02x\n",
351 	    irq, xc->pending_prio);
352 
353 	/* Return pending interrupt if any */
354 	if (irq == XIVE_BAD_IRQ)
355 		return 0;
356 	return irq;
357 }
358 
359 /*
360  * After EOI'ing an interrupt, we need to re-check the queue
361  * to see if another interrupt is pending since multiple
362  * interrupts can coalesce into a single notification to the
363  * CPU.
364  *
365  * If we find that there is indeed more in there, we call
366  * force_external_irq_replay() to make Linux synthetize an
367  * external interrupt on the next call to local_irq_restore().
368  */
369 static void xive_do_queue_eoi(struct xive_cpu *xc)
370 {
371 	if (xive_scan_interrupts(xc, true) != 0) {
372 		DBG_VERBOSE("eoi: pending=0x%02x\n", xc->pending_prio);
373 		force_external_irq_replay();
374 	}
375 }
376 
377 /*
378  * EOI an interrupt at the source. There are several methods
379  * to do this depending on the HW version and source type
380  */
381 static void xive_do_source_eoi(struct xive_irq_data *xd)
382 {
383 	u8 eoi_val;
384 
385 	xd->stale_p = false;
386 
387 	/* If the XIVE supports the new "store EOI facility, use it */
388 	if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI) {
389 		xive_esb_write(xd, XIVE_ESB_STORE_EOI, 0);
390 		return;
391 	}
392 
393 	/*
394 	 * For LSIs, we use the "EOI cycle" special load rather than
395 	 * PQ bits, as they are automatically re-triggered in HW when
396 	 * still pending.
397 	 */
398 	if (xd->flags & XIVE_IRQ_FLAG_LSI) {
399 		xive_esb_read(xd, XIVE_ESB_LOAD_EOI);
400 		return;
401 	}
402 
403 	/*
404 	 * Otherwise, we use the special MMIO that does a clear of
405 	 * both P and Q and returns the old Q. This allows us to then
406 	 * do a re-trigger if Q was set rather than synthesizing an
407 	 * interrupt in software
408 	 */
409 	eoi_val = xive_esb_read(xd, XIVE_ESB_SET_PQ_00);
410 	DBG_VERBOSE("eoi_val=%x\n", eoi_val);
411 
412 	/* Re-trigger if needed */
413 	if ((eoi_val & XIVE_ESB_VAL_Q) && xd->trig_mmio)
414 		out_be64(xd->trig_mmio, 0);
415 }
416 
417 /* irq_chip eoi callback, called with irq descriptor lock held */
418 static void xive_irq_eoi(struct irq_data *d)
419 {
420 	struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
421 	struct xive_cpu *xc = __this_cpu_read(xive_cpu);
422 
423 	DBG_VERBOSE("eoi_irq: irq=%d [0x%lx] pending=%02x\n",
424 		    d->irq, irqd_to_hwirq(d), xc->pending_prio);
425 
426 	/*
427 	 * EOI the source if it hasn't been disabled and hasn't
428 	 * been passed-through to a KVM guest
429 	 */
430 	if (!irqd_irq_disabled(d) && !irqd_is_forwarded_to_vcpu(d) &&
431 	    !(xd->flags & XIVE_IRQ_FLAG_NO_EOI))
432 		xive_do_source_eoi(xd);
433 	else
434 		xd->stale_p = true;
435 
436 	/*
437 	 * Clear saved_p to indicate that it's no longer occupying
438 	 * a queue slot on the target queue
439 	 */
440 	xd->saved_p = false;
441 
442 	/* Check for more work in the queue */
443 	xive_do_queue_eoi(xc);
444 }
445 
446 /*
447  * Helper used to mask and unmask an interrupt source.
448  */
449 static void xive_do_source_set_mask(struct xive_irq_data *xd,
450 				    bool mask)
451 {
452 	u64 val;
453 
454 	/*
455 	 * If the interrupt had P set, it may be in a queue.
456 	 *
457 	 * We need to make sure we don't re-enable it until it
458 	 * has been fetched from that queue and EOId. We keep
459 	 * a copy of that P state and use it to restore the
460 	 * ESB accordingly on unmask.
461 	 */
462 	if (mask) {
463 		val = xive_esb_read(xd, XIVE_ESB_SET_PQ_01);
464 		if (!xd->stale_p && !!(val & XIVE_ESB_VAL_P))
465 			xd->saved_p = true;
466 		xd->stale_p = false;
467 	} else if (xd->saved_p) {
468 		xive_esb_read(xd, XIVE_ESB_SET_PQ_10);
469 		xd->saved_p = false;
470 	} else {
471 		xive_esb_read(xd, XIVE_ESB_SET_PQ_00);
472 		xd->stale_p = false;
473 	}
474 }
475 
476 /*
477  * Try to chose "cpu" as a new interrupt target. Increments
478  * the queue accounting for that target if it's not already
479  * full.
480  */
481 static bool xive_try_pick_target(int cpu)
482 {
483 	struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
484 	struct xive_q *q = &xc->queue[xive_irq_priority];
485 	int max;
486 
487 	/*
488 	 * Calculate max number of interrupts in that queue.
489 	 *
490 	 * We leave a gap of 1 just in case...
491 	 */
492 	max = (q->msk + 1) - 1;
493 	return !!atomic_add_unless(&q->count, 1, max);
494 }
495 
496 /*
497  * Un-account an interrupt for a target CPU. We don't directly
498  * decrement q->count since the interrupt might still be present
499  * in the queue.
500  *
501  * Instead increment a separate counter "pending_count" which
502  * will be substracted from "count" later when that CPU observes
503  * the queue to be empty.
504  */
505 static void xive_dec_target_count(int cpu)
506 {
507 	struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
508 	struct xive_q *q = &xc->queue[xive_irq_priority];
509 
510 	if (WARN_ON(cpu < 0 || !xc)) {
511 		pr_err("%s: cpu=%d xc=%p\n", __func__, cpu, xc);
512 		return;
513 	}
514 
515 	/*
516 	 * We increment the "pending count" which will be used
517 	 * to decrement the target queue count whenever it's next
518 	 * processed and found empty. This ensure that we don't
519 	 * decrement while we still have the interrupt there
520 	 * occupying a slot.
521 	 */
522 	atomic_inc(&q->pending_count);
523 }
524 
525 /* Find a tentative CPU target in a CPU mask */
526 static int xive_find_target_in_mask(const struct cpumask *mask,
527 				    unsigned int fuzz)
528 {
529 	int cpu, first, num, i;
530 
531 	/* Pick up a starting point CPU in the mask based on  fuzz */
532 	num = min_t(int, cpumask_weight(mask), nr_cpu_ids);
533 	first = fuzz % num;
534 
535 	/* Locate it */
536 	cpu = cpumask_first(mask);
537 	for (i = 0; i < first && cpu < nr_cpu_ids; i++)
538 		cpu = cpumask_next(cpu, mask);
539 
540 	/* Sanity check */
541 	if (WARN_ON(cpu >= nr_cpu_ids))
542 		cpu = cpumask_first(cpu_online_mask);
543 
544 	/* Remember first one to handle wrap-around */
545 	first = cpu;
546 
547 	/*
548 	 * Now go through the entire mask until we find a valid
549 	 * target.
550 	 */
551 	do {
552 		/*
553 		 * We re-check online as the fallback case passes us
554 		 * an untested affinity mask
555 		 */
556 		if (cpu_online(cpu) && xive_try_pick_target(cpu))
557 			return cpu;
558 		cpu = cpumask_next(cpu, mask);
559 		/* Wrap around */
560 		if (cpu >= nr_cpu_ids)
561 			cpu = cpumask_first(mask);
562 	} while (cpu != first);
563 
564 	return -1;
565 }
566 
567 /*
568  * Pick a target CPU for an interrupt. This is done at
569  * startup or if the affinity is changed in a way that
570  * invalidates the current target.
571  */
572 static int xive_pick_irq_target(struct irq_data *d,
573 				const struct cpumask *affinity)
574 {
575 	static unsigned int fuzz;
576 	struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
577 	cpumask_var_t mask;
578 	int cpu = -1;
579 
580 	/*
581 	 * If we have chip IDs, first we try to build a mask of
582 	 * CPUs matching the CPU and find a target in there
583 	 */
584 	if (xd->src_chip != XIVE_INVALID_CHIP_ID &&
585 		zalloc_cpumask_var(&mask, GFP_ATOMIC)) {
586 		/* Build a mask of matching chip IDs */
587 		for_each_cpu_and(cpu, affinity, cpu_online_mask) {
588 			struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
589 			if (xc->chip_id == xd->src_chip)
590 				cpumask_set_cpu(cpu, mask);
591 		}
592 		/* Try to find a target */
593 		if (cpumask_empty(mask))
594 			cpu = -1;
595 		else
596 			cpu = xive_find_target_in_mask(mask, fuzz++);
597 		free_cpumask_var(mask);
598 		if (cpu >= 0)
599 			return cpu;
600 		fuzz--;
601 	}
602 
603 	/* No chip IDs, fallback to using the affinity mask */
604 	return xive_find_target_in_mask(affinity, fuzz++);
605 }
606 
607 static unsigned int xive_irq_startup(struct irq_data *d)
608 {
609 	struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
610 	unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
611 	int target, rc;
612 
613 	xd->saved_p = false;
614 	xd->stale_p = false;
615 	pr_devel("xive_irq_startup: irq %d [0x%x] data @%p\n",
616 		 d->irq, hw_irq, d);
617 
618 	/* Pick a target */
619 	target = xive_pick_irq_target(d, irq_data_get_affinity_mask(d));
620 	if (target == XIVE_INVALID_TARGET) {
621 		/* Try again breaking affinity */
622 		target = xive_pick_irq_target(d, cpu_online_mask);
623 		if (target == XIVE_INVALID_TARGET)
624 			return -ENXIO;
625 		pr_warn("irq %d started with broken affinity\n", d->irq);
626 	}
627 
628 	/* Sanity check */
629 	if (WARN_ON(target == XIVE_INVALID_TARGET ||
630 		    target >= nr_cpu_ids))
631 		target = smp_processor_id();
632 
633 	xd->target = target;
634 
635 	/*
636 	 * Configure the logical number to be the Linux IRQ number
637 	 * and set the target queue
638 	 */
639 	rc = xive_ops->configure_irq(hw_irq,
640 				     get_hard_smp_processor_id(target),
641 				     xive_irq_priority, d->irq);
642 	if (rc)
643 		return rc;
644 
645 	/* Unmask the ESB */
646 	xive_do_source_set_mask(xd, false);
647 
648 	return 0;
649 }
650 
651 /* called with irq descriptor lock held */
652 static void xive_irq_shutdown(struct irq_data *d)
653 {
654 	struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
655 	unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
656 
657 	pr_devel("xive_irq_shutdown: irq %d [0x%x] data @%p\n",
658 		 d->irq, hw_irq, d);
659 
660 	if (WARN_ON(xd->target == XIVE_INVALID_TARGET))
661 		return;
662 
663 	/* Mask the interrupt at the source */
664 	xive_do_source_set_mask(xd, true);
665 
666 	/*
667 	 * Mask the interrupt in HW in the IVT/EAS and set the number
668 	 * to be the "bad" IRQ number
669 	 */
670 	xive_ops->configure_irq(hw_irq,
671 				get_hard_smp_processor_id(xd->target),
672 				0xff, XIVE_BAD_IRQ);
673 
674 	xive_dec_target_count(xd->target);
675 	xd->target = XIVE_INVALID_TARGET;
676 }
677 
678 static void xive_irq_unmask(struct irq_data *d)
679 {
680 	struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
681 
682 	pr_devel("xive_irq_unmask: irq %d data @%p\n", d->irq, xd);
683 
684 	xive_do_source_set_mask(xd, false);
685 }
686 
687 static void xive_irq_mask(struct irq_data *d)
688 {
689 	struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
690 
691 	pr_devel("xive_irq_mask: irq %d data @%p\n", d->irq, xd);
692 
693 	xive_do_source_set_mask(xd, true);
694 }
695 
696 static int xive_irq_set_affinity(struct irq_data *d,
697 				 const struct cpumask *cpumask,
698 				 bool force)
699 {
700 	struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
701 	unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
702 	u32 target, old_target;
703 	int rc = 0;
704 
705 	pr_debug("%s: irq %d/%x\n", __func__, d->irq, hw_irq);
706 
707 	/* Is this valid ? */
708 	if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids)
709 		return -EINVAL;
710 
711 	/*
712 	 * If existing target is already in the new mask, and is
713 	 * online then do nothing.
714 	 */
715 	if (xd->target != XIVE_INVALID_TARGET &&
716 	    cpu_online(xd->target) &&
717 	    cpumask_test_cpu(xd->target, cpumask))
718 		return IRQ_SET_MASK_OK;
719 
720 	/* Pick a new target */
721 	target = xive_pick_irq_target(d, cpumask);
722 
723 	/* No target found */
724 	if (target == XIVE_INVALID_TARGET)
725 		return -ENXIO;
726 
727 	/* Sanity check */
728 	if (WARN_ON(target >= nr_cpu_ids))
729 		target = smp_processor_id();
730 
731 	old_target = xd->target;
732 
733 	/*
734 	 * Only configure the irq if it's not currently passed-through to
735 	 * a KVM guest
736 	 */
737 	if (!irqd_is_forwarded_to_vcpu(d))
738 		rc = xive_ops->configure_irq(hw_irq,
739 					     get_hard_smp_processor_id(target),
740 					     xive_irq_priority, d->irq);
741 	if (rc < 0) {
742 		pr_err("Error %d reconfiguring irq %d\n", rc, d->irq);
743 		return rc;
744 	}
745 
746 	pr_debug("  target: 0x%x\n", target);
747 	xd->target = target;
748 
749 	/* Give up previous target */
750 	if (old_target != XIVE_INVALID_TARGET)
751 	    xive_dec_target_count(old_target);
752 
753 	return IRQ_SET_MASK_OK;
754 }
755 
756 static int xive_irq_set_type(struct irq_data *d, unsigned int flow_type)
757 {
758 	struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
759 
760 	/*
761 	 * We only support these. This has really no effect other than setting
762 	 * the corresponding descriptor bits mind you but those will in turn
763 	 * affect the resend function when re-enabling an edge interrupt.
764 	 *
765 	 * Set set the default to edge as explained in map().
766 	 */
767 	if (flow_type == IRQ_TYPE_DEFAULT || flow_type == IRQ_TYPE_NONE)
768 		flow_type = IRQ_TYPE_EDGE_RISING;
769 
770 	if (flow_type != IRQ_TYPE_EDGE_RISING &&
771 	    flow_type != IRQ_TYPE_LEVEL_LOW)
772 		return -EINVAL;
773 
774 	irqd_set_trigger_type(d, flow_type);
775 
776 	/*
777 	 * Double check it matches what the FW thinks
778 	 *
779 	 * NOTE: We don't know yet if the PAPR interface will provide
780 	 * the LSI vs MSI information apart from the device-tree so
781 	 * this check might have to move into an optional backend call
782 	 * that is specific to the native backend
783 	 */
784 	if ((flow_type == IRQ_TYPE_LEVEL_LOW) !=
785 	    !!(xd->flags & XIVE_IRQ_FLAG_LSI)) {
786 		pr_warn("Interrupt %d (HW 0x%x) type mismatch, Linux says %s, FW says %s\n",
787 			d->irq, (u32)irqd_to_hwirq(d),
788 			(flow_type == IRQ_TYPE_LEVEL_LOW) ? "Level" : "Edge",
789 			(xd->flags & XIVE_IRQ_FLAG_LSI) ? "Level" : "Edge");
790 	}
791 
792 	return IRQ_SET_MASK_OK_NOCOPY;
793 }
794 
795 static int xive_irq_retrigger(struct irq_data *d)
796 {
797 	struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
798 
799 	/* This should be only for MSIs */
800 	if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI))
801 		return 0;
802 
803 	/*
804 	 * To perform a retrigger, we first set the PQ bits to
805 	 * 11, then perform an EOI.
806 	 */
807 	xive_esb_read(xd, XIVE_ESB_SET_PQ_11);
808 	xive_do_source_eoi(xd);
809 
810 	return 1;
811 }
812 
813 /*
814  * Caller holds the irq descriptor lock, so this won't be called
815  * concurrently with xive_get_irqchip_state on the same interrupt.
816  */
817 static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state)
818 {
819 	struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
820 	unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
821 	int rc;
822 	u8 pq;
823 
824 	/*
825 	 * This is called by KVM with state non-NULL for enabling
826 	 * pass-through or NULL for disabling it
827 	 */
828 	if (state) {
829 		irqd_set_forwarded_to_vcpu(d);
830 
831 		/* Set it to PQ=10 state to prevent further sends */
832 		pq = xive_esb_read(xd, XIVE_ESB_SET_PQ_10);
833 		if (!xd->stale_p) {
834 			xd->saved_p = !!(pq & XIVE_ESB_VAL_P);
835 			xd->stale_p = !xd->saved_p;
836 		}
837 
838 		/* No target ? nothing to do */
839 		if (xd->target == XIVE_INVALID_TARGET) {
840 			/*
841 			 * An untargetted interrupt should have been
842 			 * also masked at the source
843 			 */
844 			WARN_ON(xd->saved_p);
845 
846 			return 0;
847 		}
848 
849 		/*
850 		 * If P was set, adjust state to PQ=11 to indicate
851 		 * that a resend is needed for the interrupt to reach
852 		 * the guest. Also remember the value of P.
853 		 *
854 		 * This also tells us that it's in flight to a host queue
855 		 * or has already been fetched but hasn't been EOIed yet
856 		 * by the host. This it's potentially using up a host
857 		 * queue slot. This is important to know because as long
858 		 * as this is the case, we must not hard-unmask it when
859 		 * "returning" that interrupt to the host.
860 		 *
861 		 * This saved_p is cleared by the host EOI, when we know
862 		 * for sure the queue slot is no longer in use.
863 		 */
864 		if (xd->saved_p) {
865 			xive_esb_read(xd, XIVE_ESB_SET_PQ_11);
866 
867 			/*
868 			 * Sync the XIVE source HW to ensure the interrupt
869 			 * has gone through the EAS before we change its
870 			 * target to the guest. That should guarantee us
871 			 * that we *will* eventually get an EOI for it on
872 			 * the host. Otherwise there would be a small window
873 			 * for P to be seen here but the interrupt going
874 			 * to the guest queue.
875 			 */
876 			if (xive_ops->sync_source)
877 				xive_ops->sync_source(hw_irq);
878 		}
879 	} else {
880 		irqd_clr_forwarded_to_vcpu(d);
881 
882 		/* No host target ? hard mask and return */
883 		if (xd->target == XIVE_INVALID_TARGET) {
884 			xive_do_source_set_mask(xd, true);
885 			return 0;
886 		}
887 
888 		/*
889 		 * Sync the XIVE source HW to ensure the interrupt
890 		 * has gone through the EAS before we change its
891 		 * target to the host.
892 		 */
893 		if (xive_ops->sync_source)
894 			xive_ops->sync_source(hw_irq);
895 
896 		/*
897 		 * By convention we are called with the interrupt in
898 		 * a PQ=10 or PQ=11 state, ie, it won't fire and will
899 		 * have latched in Q whether there's a pending HW
900 		 * interrupt or not.
901 		 *
902 		 * First reconfigure the target.
903 		 */
904 		rc = xive_ops->configure_irq(hw_irq,
905 					     get_hard_smp_processor_id(xd->target),
906 					     xive_irq_priority, d->irq);
907 		if (rc)
908 			return rc;
909 
910 		/*
911 		 * Then if saved_p is not set, effectively re-enable the
912 		 * interrupt with an EOI. If it is set, we know there is
913 		 * still a message in a host queue somewhere that will be
914 		 * EOId eventually.
915 		 *
916 		 * Note: We don't check irqd_irq_disabled(). Effectively,
917 		 * we *will* let the irq get through even if masked if the
918 		 * HW is still firing it in order to deal with the whole
919 		 * saved_p business properly. If the interrupt triggers
920 		 * while masked, the generic code will re-mask it anyway.
921 		 */
922 		if (!xd->saved_p)
923 			xive_do_source_eoi(xd);
924 
925 	}
926 	return 0;
927 }
928 
929 /* Called with irq descriptor lock held. */
930 static int xive_get_irqchip_state(struct irq_data *data,
931 				  enum irqchip_irq_state which, bool *state)
932 {
933 	struct xive_irq_data *xd = irq_data_get_irq_handler_data(data);
934 	u8 pq;
935 
936 	switch (which) {
937 	case IRQCHIP_STATE_ACTIVE:
938 		pq = xive_esb_read(xd, XIVE_ESB_GET);
939 
940 		/*
941 		 * The esb value being all 1's means we couldn't get
942 		 * the PQ state of the interrupt through mmio. It may
943 		 * happen, for example when querying a PHB interrupt
944 		 * while the PHB is in an error state. We consider the
945 		 * interrupt to be inactive in that case.
946 		 */
947 		*state = (pq != XIVE_ESB_INVALID) && !xd->stale_p &&
948 			(xd->saved_p || !!(pq & XIVE_ESB_VAL_P));
949 		return 0;
950 	default:
951 		return -EINVAL;
952 	}
953 }
954 
955 static struct irq_chip xive_irq_chip = {
956 	.name = "XIVE-IRQ",
957 	.irq_startup = xive_irq_startup,
958 	.irq_shutdown = xive_irq_shutdown,
959 	.irq_eoi = xive_irq_eoi,
960 	.irq_mask = xive_irq_mask,
961 	.irq_unmask = xive_irq_unmask,
962 	.irq_set_affinity = xive_irq_set_affinity,
963 	.irq_set_type = xive_irq_set_type,
964 	.irq_retrigger = xive_irq_retrigger,
965 	.irq_set_vcpu_affinity = xive_irq_set_vcpu_affinity,
966 	.irq_get_irqchip_state = xive_get_irqchip_state,
967 };
968 
969 bool is_xive_irq(struct irq_chip *chip)
970 {
971 	return chip == &xive_irq_chip;
972 }
973 EXPORT_SYMBOL_GPL(is_xive_irq);
974 
975 void xive_cleanup_irq_data(struct xive_irq_data *xd)
976 {
977 	pr_debug("%s for HW %x\n", __func__, xd->hw_irq);
978 
979 	if (xd->eoi_mmio) {
980 		iounmap(xd->eoi_mmio);
981 		if (xd->eoi_mmio == xd->trig_mmio)
982 			xd->trig_mmio = NULL;
983 		xd->eoi_mmio = NULL;
984 	}
985 	if (xd->trig_mmio) {
986 		iounmap(xd->trig_mmio);
987 		xd->trig_mmio = NULL;
988 	}
989 }
990 EXPORT_SYMBOL_GPL(xive_cleanup_irq_data);
991 
992 static int xive_irq_alloc_data(unsigned int virq, irq_hw_number_t hw)
993 {
994 	struct xive_irq_data *xd;
995 	int rc;
996 
997 	xd = kzalloc(sizeof(struct xive_irq_data), GFP_KERNEL);
998 	if (!xd)
999 		return -ENOMEM;
1000 	rc = xive_ops->populate_irq_data(hw, xd);
1001 	if (rc) {
1002 		kfree(xd);
1003 		return rc;
1004 	}
1005 	xd->target = XIVE_INVALID_TARGET;
1006 	irq_set_handler_data(virq, xd);
1007 
1008 	/*
1009 	 * Turn OFF by default the interrupt being mapped. A side
1010 	 * effect of this check is the mapping the ESB page of the
1011 	 * interrupt in the Linux address space. This prevents page
1012 	 * fault issues in the crash handler which masks all
1013 	 * interrupts.
1014 	 */
1015 	xive_esb_read(xd, XIVE_ESB_SET_PQ_01);
1016 
1017 	return 0;
1018 }
1019 
1020 void xive_irq_free_data(unsigned int virq)
1021 {
1022 	struct xive_irq_data *xd = irq_get_handler_data(virq);
1023 
1024 	if (!xd)
1025 		return;
1026 	irq_set_handler_data(virq, NULL);
1027 	xive_cleanup_irq_data(xd);
1028 	kfree(xd);
1029 }
1030 EXPORT_SYMBOL_GPL(xive_irq_free_data);
1031 
1032 #ifdef CONFIG_SMP
1033 
1034 static void xive_cause_ipi(int cpu)
1035 {
1036 	struct xive_cpu *xc;
1037 	struct xive_irq_data *xd;
1038 
1039 	xc = per_cpu(xive_cpu, cpu);
1040 
1041 	DBG_VERBOSE("IPI CPU %d -> %d (HW IRQ 0x%x)\n",
1042 		    smp_processor_id(), cpu, xc->hw_ipi);
1043 
1044 	xd = &xc->ipi_data;
1045 	if (WARN_ON(!xd->trig_mmio))
1046 		return;
1047 	out_be64(xd->trig_mmio, 0);
1048 }
1049 
1050 static irqreturn_t xive_muxed_ipi_action(int irq, void *dev_id)
1051 {
1052 	return smp_ipi_demux();
1053 }
1054 
1055 static void xive_ipi_eoi(struct irq_data *d)
1056 {
1057 	struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1058 
1059 	/* Handle possible race with unplug and drop stale IPIs */
1060 	if (!xc)
1061 		return;
1062 
1063 	DBG_VERBOSE("IPI eoi: irq=%d [0x%lx] (HW IRQ 0x%x) pending=%02x\n",
1064 		    d->irq, irqd_to_hwirq(d), xc->hw_ipi, xc->pending_prio);
1065 
1066 	xive_do_source_eoi(&xc->ipi_data);
1067 	xive_do_queue_eoi(xc);
1068 }
1069 
1070 static void xive_ipi_do_nothing(struct irq_data *d)
1071 {
1072 	/*
1073 	 * Nothing to do, we never mask/unmask IPIs, but the callback
1074 	 * has to exist for the struct irq_chip.
1075 	 */
1076 }
1077 
1078 static struct irq_chip xive_ipi_chip = {
1079 	.name = "XIVE-IPI",
1080 	.irq_eoi = xive_ipi_eoi,
1081 	.irq_mask = xive_ipi_do_nothing,
1082 	.irq_unmask = xive_ipi_do_nothing,
1083 };
1084 
1085 /*
1086  * IPIs are marked per-cpu. We use separate HW interrupts under the
1087  * hood but associated with the same "linux" interrupt
1088  */
1089 struct xive_ipi_alloc_info {
1090 	irq_hw_number_t hwirq;
1091 };
1092 
1093 static int xive_ipi_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1094 				     unsigned int nr_irqs, void *arg)
1095 {
1096 	struct xive_ipi_alloc_info *info = arg;
1097 	int i;
1098 
1099 	for (i = 0; i < nr_irqs; i++) {
1100 		irq_domain_set_info(domain, virq + i, info->hwirq + i, &xive_ipi_chip,
1101 				    domain->host_data, handle_percpu_irq,
1102 				    NULL, NULL);
1103 	}
1104 	return 0;
1105 }
1106 
1107 static const struct irq_domain_ops xive_ipi_irq_domain_ops = {
1108 	.alloc  = xive_ipi_irq_domain_alloc,
1109 };
1110 
1111 static int __init xive_request_ipi(void)
1112 {
1113 	struct fwnode_handle *fwnode;
1114 	struct irq_domain *ipi_domain;
1115 	unsigned int node;
1116 	int ret = -ENOMEM;
1117 
1118 	fwnode = irq_domain_alloc_named_fwnode("XIVE-IPI");
1119 	if (!fwnode)
1120 		goto out;
1121 
1122 	ipi_domain = irq_domain_create_linear(fwnode, nr_node_ids,
1123 					      &xive_ipi_irq_domain_ops, NULL);
1124 	if (!ipi_domain)
1125 		goto out_free_fwnode;
1126 
1127 	xive_ipis = kcalloc(nr_node_ids, sizeof(*xive_ipis), GFP_KERNEL | __GFP_NOFAIL);
1128 	if (!xive_ipis)
1129 		goto out_free_domain;
1130 
1131 	for_each_node(node) {
1132 		struct xive_ipi_desc *xid = &xive_ipis[node];
1133 		struct xive_ipi_alloc_info info = { node };
1134 
1135 		/* Skip nodes without CPUs */
1136 		if (cpumask_empty(cpumask_of_node(node)))
1137 			continue;
1138 
1139 		/*
1140 		 * Map one IPI interrupt per node for all cpus of that node.
1141 		 * Since the HW interrupt number doesn't have any meaning,
1142 		 * simply use the node number.
1143 		 */
1144 		ret = irq_domain_alloc_irqs(ipi_domain, 1, node, &info);
1145 		if (ret < 0)
1146 			goto out_free_xive_ipis;
1147 		xid->irq = ret;
1148 
1149 		snprintf(xid->name, sizeof(xid->name), "IPI-%d", node);
1150 
1151 		ret = request_irq(xid->irq, xive_muxed_ipi_action,
1152 				  IRQF_NO_DEBUG | IRQF_PERCPU | IRQF_NO_THREAD,
1153 				  xid->name, NULL);
1154 
1155 		WARN(ret < 0, "Failed to request IPI %d: %d\n", xid->irq, ret);
1156 	}
1157 
1158 	return ret;
1159 
1160 out_free_xive_ipis:
1161 	kfree(xive_ipis);
1162 out_free_domain:
1163 	irq_domain_remove(ipi_domain);
1164 out_free_fwnode:
1165 	irq_domain_free_fwnode(fwnode);
1166 out:
1167 	return ret;
1168 }
1169 
1170 static int xive_setup_cpu_ipi(unsigned int cpu)
1171 {
1172 	unsigned int xive_ipi_irq = xive_ipi_cpu_to_irq(cpu);
1173 	struct xive_cpu *xc;
1174 	int rc;
1175 
1176 	pr_debug("Setting up IPI for CPU %d\n", cpu);
1177 
1178 	xc = per_cpu(xive_cpu, cpu);
1179 
1180 	/* Check if we are already setup */
1181 	if (xc->hw_ipi != XIVE_BAD_IRQ)
1182 		return 0;
1183 
1184 	/* Grab an IPI from the backend, this will populate xc->hw_ipi */
1185 	if (xive_ops->get_ipi(cpu, xc))
1186 		return -EIO;
1187 
1188 	/*
1189 	 * Populate the IRQ data in the xive_cpu structure and
1190 	 * configure the HW / enable the IPIs.
1191 	 */
1192 	rc = xive_ops->populate_irq_data(xc->hw_ipi, &xc->ipi_data);
1193 	if (rc) {
1194 		pr_err("Failed to populate IPI data on CPU %d\n", cpu);
1195 		return -EIO;
1196 	}
1197 	rc = xive_ops->configure_irq(xc->hw_ipi,
1198 				     get_hard_smp_processor_id(cpu),
1199 				     xive_irq_priority, xive_ipi_irq);
1200 	if (rc) {
1201 		pr_err("Failed to map IPI CPU %d\n", cpu);
1202 		return -EIO;
1203 	}
1204 	pr_devel("CPU %d HW IPI %x, virq %d, trig_mmio=%p\n", cpu,
1205 	    xc->hw_ipi, xive_ipi_irq, xc->ipi_data.trig_mmio);
1206 
1207 	/* Unmask it */
1208 	xive_do_source_set_mask(&xc->ipi_data, false);
1209 
1210 	return 0;
1211 }
1212 
1213 static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc)
1214 {
1215 	unsigned int xive_ipi_irq = xive_ipi_cpu_to_irq(cpu);
1216 
1217 	/* Disable the IPI and free the IRQ data */
1218 
1219 	/* Already cleaned up ? */
1220 	if (xc->hw_ipi == XIVE_BAD_IRQ)
1221 		return;
1222 
1223 	/* Mask the IPI */
1224 	xive_do_source_set_mask(&xc->ipi_data, true);
1225 
1226 	/*
1227 	 * Note: We don't call xive_cleanup_irq_data() to free
1228 	 * the mappings as this is called from an IPI on kexec
1229 	 * which is not a safe environment to call iounmap()
1230 	 */
1231 
1232 	/* Deconfigure/mask in the backend */
1233 	xive_ops->configure_irq(xc->hw_ipi, hard_smp_processor_id(),
1234 				0xff, xive_ipi_irq);
1235 
1236 	/* Free the IPIs in the backend */
1237 	xive_ops->put_ipi(cpu, xc);
1238 }
1239 
1240 void __init xive_smp_probe(void)
1241 {
1242 	smp_ops->cause_ipi = xive_cause_ipi;
1243 
1244 	/* Register the IPI */
1245 	xive_request_ipi();
1246 
1247 	/* Allocate and setup IPI for the boot CPU */
1248 	xive_setup_cpu_ipi(smp_processor_id());
1249 }
1250 
1251 #endif /* CONFIG_SMP */
1252 
1253 static int xive_irq_domain_map(struct irq_domain *h, unsigned int virq,
1254 			       irq_hw_number_t hw)
1255 {
1256 	int rc;
1257 
1258 	/*
1259 	 * Mark interrupts as edge sensitive by default so that resend
1260 	 * actually works. Will fix that up below if needed.
1261 	 */
1262 	irq_clear_status_flags(virq, IRQ_LEVEL);
1263 
1264 	rc = xive_irq_alloc_data(virq, hw);
1265 	if (rc)
1266 		return rc;
1267 
1268 	irq_set_chip_and_handler(virq, &xive_irq_chip, handle_fasteoi_irq);
1269 
1270 	return 0;
1271 }
1272 
1273 static void xive_irq_domain_unmap(struct irq_domain *d, unsigned int virq)
1274 {
1275 	xive_irq_free_data(virq);
1276 }
1277 
1278 static int xive_irq_domain_xlate(struct irq_domain *h, struct device_node *ct,
1279 				 const u32 *intspec, unsigned int intsize,
1280 				 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
1281 
1282 {
1283 	*out_hwirq = intspec[0];
1284 
1285 	/*
1286 	 * If intsize is at least 2, we look for the type in the second cell,
1287 	 * we assume the LSB indicates a level interrupt.
1288 	 */
1289 	if (intsize > 1) {
1290 		if (intspec[1] & 1)
1291 			*out_flags = IRQ_TYPE_LEVEL_LOW;
1292 		else
1293 			*out_flags = IRQ_TYPE_EDGE_RISING;
1294 	} else
1295 		*out_flags = IRQ_TYPE_LEVEL_LOW;
1296 
1297 	return 0;
1298 }
1299 
1300 static int xive_irq_domain_match(struct irq_domain *h, struct device_node *node,
1301 				 enum irq_domain_bus_token bus_token)
1302 {
1303 	return xive_ops->match(node);
1304 }
1305 
1306 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
1307 static const char * const esb_names[] = { "RESET", "OFF", "PENDING", "QUEUED" };
1308 
1309 static const struct {
1310 	u64  mask;
1311 	char *name;
1312 } xive_irq_flags[] = {
1313 	{ XIVE_IRQ_FLAG_STORE_EOI, "STORE_EOI" },
1314 	{ XIVE_IRQ_FLAG_LSI,       "LSI"       },
1315 	{ XIVE_IRQ_FLAG_H_INT_ESB, "H_INT_ESB" },
1316 	{ XIVE_IRQ_FLAG_NO_EOI,    "NO_EOI"    },
1317 };
1318 
1319 static void xive_irq_domain_debug_show(struct seq_file *m, struct irq_domain *d,
1320 				       struct irq_data *irqd, int ind)
1321 {
1322 	struct xive_irq_data *xd;
1323 	u64 val;
1324 	int i;
1325 
1326 	/* No IRQ domain level information. To be done */
1327 	if (!irqd)
1328 		return;
1329 
1330 	if (!is_xive_irq(irq_data_get_irq_chip(irqd)))
1331 		return;
1332 
1333 	seq_printf(m, "%*sXIVE:\n", ind, "");
1334 	ind++;
1335 
1336 	xd = irq_data_get_irq_handler_data(irqd);
1337 	if (!xd) {
1338 		seq_printf(m, "%*snot assigned\n", ind, "");
1339 		return;
1340 	}
1341 
1342 	val = xive_esb_read(xd, XIVE_ESB_GET);
1343 	seq_printf(m, "%*sESB:      %s\n", ind, "", esb_names[val & 0x3]);
1344 	seq_printf(m, "%*sPstate:   %s %s\n", ind, "", xd->stale_p ? "stale" : "",
1345 		   xd->saved_p ? "saved" : "");
1346 	seq_printf(m, "%*sTarget:   %d\n", ind, "", xd->target);
1347 	seq_printf(m, "%*sChip:     %d\n", ind, "", xd->src_chip);
1348 	seq_printf(m, "%*sTrigger:  0x%016llx\n", ind, "", xd->trig_page);
1349 	seq_printf(m, "%*sEOI:      0x%016llx\n", ind, "", xd->eoi_page);
1350 	seq_printf(m, "%*sFlags:    0x%llx\n", ind, "", xd->flags);
1351 	for (i = 0; i < ARRAY_SIZE(xive_irq_flags); i++) {
1352 		if (xd->flags & xive_irq_flags[i].mask)
1353 			seq_printf(m, "%*s%s\n", ind + 12, "", xive_irq_flags[i].name);
1354 	}
1355 }
1356 #endif
1357 
1358 #ifdef	CONFIG_IRQ_DOMAIN_HIERARCHY
1359 static int xive_irq_domain_translate(struct irq_domain *d,
1360 				     struct irq_fwspec *fwspec,
1361 				     unsigned long *hwirq,
1362 				     unsigned int *type)
1363 {
1364 	return xive_irq_domain_xlate(d, to_of_node(fwspec->fwnode),
1365 				     fwspec->param, fwspec->param_count,
1366 				     hwirq, type);
1367 }
1368 
1369 static int xive_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1370 				 unsigned int nr_irqs, void *arg)
1371 {
1372 	struct irq_fwspec *fwspec = arg;
1373 	irq_hw_number_t hwirq;
1374 	unsigned int type = IRQ_TYPE_NONE;
1375 	int i, rc;
1376 
1377 	rc = xive_irq_domain_translate(domain, fwspec, &hwirq, &type);
1378 	if (rc)
1379 		return rc;
1380 
1381 	pr_debug("%s %d/%lx #%d\n", __func__, virq, hwirq, nr_irqs);
1382 
1383 	for (i = 0; i < nr_irqs; i++) {
1384 		/* TODO: call xive_irq_domain_map() */
1385 
1386 		/*
1387 		 * Mark interrupts as edge sensitive by default so that resend
1388 		 * actually works. Will fix that up below if needed.
1389 		 */
1390 		irq_clear_status_flags(virq, IRQ_LEVEL);
1391 
1392 		/* allocates and sets handler data */
1393 		rc = xive_irq_alloc_data(virq + i, hwirq + i);
1394 		if (rc)
1395 			return rc;
1396 
1397 		irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
1398 					      &xive_irq_chip, domain->host_data);
1399 		irq_set_handler(virq + i, handle_fasteoi_irq);
1400 	}
1401 
1402 	return 0;
1403 }
1404 
1405 static void xive_irq_domain_free(struct irq_domain *domain,
1406 				 unsigned int virq, unsigned int nr_irqs)
1407 {
1408 	int i;
1409 
1410 	pr_debug("%s %d #%d\n", __func__, virq, nr_irqs);
1411 
1412 	for (i = 0; i < nr_irqs; i++)
1413 		xive_irq_free_data(virq + i);
1414 }
1415 #endif
1416 
1417 static const struct irq_domain_ops xive_irq_domain_ops = {
1418 #ifdef	CONFIG_IRQ_DOMAIN_HIERARCHY
1419 	.alloc	= xive_irq_domain_alloc,
1420 	.free	= xive_irq_domain_free,
1421 	.translate = xive_irq_domain_translate,
1422 #endif
1423 	.match = xive_irq_domain_match,
1424 	.map = xive_irq_domain_map,
1425 	.unmap = xive_irq_domain_unmap,
1426 	.xlate = xive_irq_domain_xlate,
1427 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
1428 	.debug_show = xive_irq_domain_debug_show,
1429 #endif
1430 };
1431 
1432 static void __init xive_init_host(struct device_node *np)
1433 {
1434 	xive_irq_domain = irq_domain_add_nomap(np, XIVE_MAX_IRQ,
1435 					       &xive_irq_domain_ops, NULL);
1436 	if (WARN_ON(xive_irq_domain == NULL))
1437 		return;
1438 	irq_set_default_host(xive_irq_domain);
1439 }
1440 
1441 static void xive_cleanup_cpu_queues(unsigned int cpu, struct xive_cpu *xc)
1442 {
1443 	if (xc->queue[xive_irq_priority].qpage)
1444 		xive_ops->cleanup_queue(cpu, xc, xive_irq_priority);
1445 }
1446 
1447 static int xive_setup_cpu_queues(unsigned int cpu, struct xive_cpu *xc)
1448 {
1449 	int rc = 0;
1450 
1451 	/* We setup 1 queues for now with a 64k page */
1452 	if (!xc->queue[xive_irq_priority].qpage)
1453 		rc = xive_ops->setup_queue(cpu, xc, xive_irq_priority);
1454 
1455 	return rc;
1456 }
1457 
1458 static int xive_prepare_cpu(unsigned int cpu)
1459 {
1460 	struct xive_cpu *xc;
1461 
1462 	xc = per_cpu(xive_cpu, cpu);
1463 	if (!xc) {
1464 		xc = kzalloc_node(sizeof(struct xive_cpu),
1465 				  GFP_KERNEL, cpu_to_node(cpu));
1466 		if (!xc)
1467 			return -ENOMEM;
1468 		xc->hw_ipi = XIVE_BAD_IRQ;
1469 		xc->chip_id = XIVE_INVALID_CHIP_ID;
1470 		if (xive_ops->prepare_cpu)
1471 			xive_ops->prepare_cpu(cpu, xc);
1472 
1473 		per_cpu(xive_cpu, cpu) = xc;
1474 	}
1475 
1476 	/* Setup EQs if not already */
1477 	return xive_setup_cpu_queues(cpu, xc);
1478 }
1479 
1480 static void xive_setup_cpu(void)
1481 {
1482 	struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1483 
1484 	/* The backend might have additional things to do */
1485 	if (xive_ops->setup_cpu)
1486 		xive_ops->setup_cpu(smp_processor_id(), xc);
1487 
1488 	/* Set CPPR to 0xff to enable flow of interrupts */
1489 	xc->cppr = 0xff;
1490 	out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff);
1491 }
1492 
1493 #ifdef CONFIG_SMP
1494 void xive_smp_setup_cpu(void)
1495 {
1496 	pr_devel("SMP setup CPU %d\n", smp_processor_id());
1497 
1498 	/* This will have already been done on the boot CPU */
1499 	if (smp_processor_id() != boot_cpuid)
1500 		xive_setup_cpu();
1501 
1502 }
1503 
1504 int xive_smp_prepare_cpu(unsigned int cpu)
1505 {
1506 	int rc;
1507 
1508 	/* Allocate per-CPU data and queues */
1509 	rc = xive_prepare_cpu(cpu);
1510 	if (rc)
1511 		return rc;
1512 
1513 	/* Allocate and setup IPI for the new CPU */
1514 	return xive_setup_cpu_ipi(cpu);
1515 }
1516 
1517 #ifdef CONFIG_HOTPLUG_CPU
1518 static void xive_flush_cpu_queue(unsigned int cpu, struct xive_cpu *xc)
1519 {
1520 	u32 irq;
1521 
1522 	/* We assume local irqs are disabled */
1523 	WARN_ON(!irqs_disabled());
1524 
1525 	/* Check what's already in the CPU queue */
1526 	while ((irq = xive_scan_interrupts(xc, false)) != 0) {
1527 		/*
1528 		 * We need to re-route that interrupt to its new destination.
1529 		 * First get and lock the descriptor
1530 		 */
1531 		struct irq_desc *desc = irq_to_desc(irq);
1532 		struct irq_data *d = irq_desc_get_irq_data(desc);
1533 		struct xive_irq_data *xd;
1534 
1535 		/*
1536 		 * Ignore anything that isn't a XIVE irq and ignore
1537 		 * IPIs, so can just be dropped.
1538 		 */
1539 		if (d->domain != xive_irq_domain)
1540 			continue;
1541 
1542 		/*
1543 		 * The IRQ should have already been re-routed, it's just a
1544 		 * stale in the old queue, so re-trigger it in order to make
1545 		 * it reach is new destination.
1546 		 */
1547 #ifdef DEBUG_FLUSH
1548 		pr_info("CPU %d: Got irq %d while offline, re-sending...\n",
1549 			cpu, irq);
1550 #endif
1551 		raw_spin_lock(&desc->lock);
1552 		xd = irq_desc_get_handler_data(desc);
1553 
1554 		/*
1555 		 * Clear saved_p to indicate that it's no longer pending
1556 		 */
1557 		xd->saved_p = false;
1558 
1559 		/*
1560 		 * For LSIs, we EOI, this will cause a resend if it's
1561 		 * still asserted. Otherwise do an MSI retrigger.
1562 		 */
1563 		if (xd->flags & XIVE_IRQ_FLAG_LSI)
1564 			xive_do_source_eoi(xd);
1565 		else
1566 			xive_irq_retrigger(d);
1567 
1568 		raw_spin_unlock(&desc->lock);
1569 	}
1570 }
1571 
1572 void xive_smp_disable_cpu(void)
1573 {
1574 	struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1575 	unsigned int cpu = smp_processor_id();
1576 
1577 	/* Migrate interrupts away from the CPU */
1578 	irq_migrate_all_off_this_cpu();
1579 
1580 	/* Set CPPR to 0 to disable flow of interrupts */
1581 	xc->cppr = 0;
1582 	out_8(xive_tima + xive_tima_offset + TM_CPPR, 0);
1583 
1584 	/* Flush everything still in the queue */
1585 	xive_flush_cpu_queue(cpu, xc);
1586 
1587 	/* Re-enable CPPR  */
1588 	xc->cppr = 0xff;
1589 	out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff);
1590 }
1591 
1592 void xive_flush_interrupt(void)
1593 {
1594 	struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1595 	unsigned int cpu = smp_processor_id();
1596 
1597 	/* Called if an interrupt occurs while the CPU is hot unplugged */
1598 	xive_flush_cpu_queue(cpu, xc);
1599 }
1600 
1601 #endif /* CONFIG_HOTPLUG_CPU */
1602 
1603 #endif /* CONFIG_SMP */
1604 
1605 void xive_teardown_cpu(void)
1606 {
1607 	struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1608 	unsigned int cpu = smp_processor_id();
1609 
1610 	/* Set CPPR to 0 to disable flow of interrupts */
1611 	xc->cppr = 0;
1612 	out_8(xive_tima + xive_tima_offset + TM_CPPR, 0);
1613 
1614 	if (xive_ops->teardown_cpu)
1615 		xive_ops->teardown_cpu(cpu, xc);
1616 
1617 #ifdef CONFIG_SMP
1618 	/* Get rid of IPI */
1619 	xive_cleanup_cpu_ipi(cpu, xc);
1620 #endif
1621 
1622 	/* Disable and free the queues */
1623 	xive_cleanup_cpu_queues(cpu, xc);
1624 }
1625 
1626 void xive_shutdown(void)
1627 {
1628 	xive_ops->shutdown();
1629 }
1630 
1631 bool __init xive_core_init(struct device_node *np, const struct xive_ops *ops,
1632 			   void __iomem *area, u32 offset, u8 max_prio)
1633 {
1634 	xive_tima = area;
1635 	xive_tima_offset = offset;
1636 	xive_ops = ops;
1637 	xive_irq_priority = max_prio;
1638 
1639 	ppc_md.get_irq = xive_get_irq;
1640 	__xive_enabled = true;
1641 
1642 	pr_devel("Initializing host..\n");
1643 	xive_init_host(np);
1644 
1645 	pr_devel("Initializing boot CPU..\n");
1646 
1647 	/* Allocate per-CPU data and queues */
1648 	xive_prepare_cpu(smp_processor_id());
1649 
1650 	/* Get ready for interrupts */
1651 	xive_setup_cpu();
1652 
1653 	pr_info("Interrupt handling initialized with %s backend\n",
1654 		xive_ops->name);
1655 	pr_info("Using priority %d for all interrupts\n", max_prio);
1656 
1657 	return true;
1658 }
1659 
1660 __be32 *xive_queue_page_alloc(unsigned int cpu, u32 queue_shift)
1661 {
1662 	unsigned int alloc_order;
1663 	struct page *pages;
1664 	__be32 *qpage;
1665 
1666 	alloc_order = xive_alloc_order(queue_shift);
1667 	pages = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, alloc_order);
1668 	if (!pages)
1669 		return ERR_PTR(-ENOMEM);
1670 	qpage = (__be32 *)page_address(pages);
1671 	memset(qpage, 0, 1 << queue_shift);
1672 
1673 	return qpage;
1674 }
1675 
1676 static int __init xive_off(char *arg)
1677 {
1678 	xive_cmdline_disabled = true;
1679 	return 0;
1680 }
1681 __setup("xive=off", xive_off);
1682 
1683 static void xive_debug_show_cpu(struct seq_file *m, int cpu)
1684 {
1685 	struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
1686 
1687 	seq_printf(m, "CPU %d:", cpu);
1688 	if (xc) {
1689 		seq_printf(m, "pp=%02x CPPR=%02x ", xc->pending_prio, xc->cppr);
1690 
1691 #ifdef CONFIG_SMP
1692 		{
1693 			u64 val = xive_esb_read(&xc->ipi_data, XIVE_ESB_GET);
1694 
1695 			seq_printf(m, "IPI=0x%08x PQ=%c%c ", xc->hw_ipi,
1696 				   val & XIVE_ESB_VAL_P ? 'P' : '-',
1697 				   val & XIVE_ESB_VAL_Q ? 'Q' : '-');
1698 		}
1699 #endif
1700 		{
1701 			struct xive_q *q = &xc->queue[xive_irq_priority];
1702 			u32 i0, i1, idx;
1703 
1704 			if (q->qpage) {
1705 				idx = q->idx;
1706 				i0 = be32_to_cpup(q->qpage + idx);
1707 				idx = (idx + 1) & q->msk;
1708 				i1 = be32_to_cpup(q->qpage + idx);
1709 				seq_printf(m, "EQ idx=%d T=%d %08x %08x ...",
1710 					   q->idx, q->toggle, i0, i1);
1711 			}
1712 		}
1713 	}
1714 	seq_puts(m, "\n");
1715 }
1716 
1717 static void xive_debug_show_irq(struct seq_file *m, struct irq_data *d)
1718 {
1719 	unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
1720 	int rc;
1721 	u32 target;
1722 	u8 prio;
1723 	u32 lirq;
1724 	struct xive_irq_data *xd;
1725 	u64 val;
1726 
1727 	rc = xive_ops->get_irq_config(hw_irq, &target, &prio, &lirq);
1728 	if (rc) {
1729 		seq_printf(m, "IRQ 0x%08x : no config rc=%d\n", hw_irq, rc);
1730 		return;
1731 	}
1732 
1733 	seq_printf(m, "IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ",
1734 		   hw_irq, target, prio, lirq);
1735 
1736 	xd = irq_data_get_irq_handler_data(d);
1737 	val = xive_esb_read(xd, XIVE_ESB_GET);
1738 	seq_printf(m, "flags=%c%c%c PQ=%c%c",
1739 		   xd->flags & XIVE_IRQ_FLAG_STORE_EOI ? 'S' : ' ',
1740 		   xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ',
1741 		   xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ',
1742 		   val & XIVE_ESB_VAL_P ? 'P' : '-',
1743 		   val & XIVE_ESB_VAL_Q ? 'Q' : '-');
1744 	seq_puts(m, "\n");
1745 }
1746 
1747 static int xive_core_debug_show(struct seq_file *m, void *private)
1748 {
1749 	unsigned int i;
1750 	struct irq_desc *desc;
1751 	int cpu;
1752 
1753 	if (xive_ops->debug_show)
1754 		xive_ops->debug_show(m, private);
1755 
1756 	for_each_possible_cpu(cpu)
1757 		xive_debug_show_cpu(m, cpu);
1758 
1759 	for_each_irq_desc(i, desc) {
1760 		struct irq_data *d = irq_domain_get_irq_data(xive_irq_domain, i);
1761 
1762 		if (d)
1763 			xive_debug_show_irq(m, d);
1764 	}
1765 	return 0;
1766 }
1767 DEFINE_SHOW_ATTRIBUTE(xive_core_debug);
1768 
1769 int xive_core_debug_init(void)
1770 {
1771 	if (xive_enabled())
1772 		debugfs_create_file("xive", 0400, powerpc_debugfs_root,
1773 				    NULL, &xive_core_debug_fops);
1774 	return 0;
1775 }
1776