xref: /openbmc/linux/arch/powerpc/sysdev/xive/common.c (revision 675aaf05)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright 2016,2017 IBM Corporation.
4  */
5 
6 #define pr_fmt(fmt) "xive: " fmt
7 
8 #include <linux/types.h>
9 #include <linux/threads.h>
10 #include <linux/kernel.h>
11 #include <linux/irq.h>
12 #include <linux/debugfs.h>
13 #include <linux/smp.h>
14 #include <linux/interrupt.h>
15 #include <linux/seq_file.h>
16 #include <linux/init.h>
17 #include <linux/cpu.h>
18 #include <linux/of.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/msi.h>
22 
23 #include <asm/prom.h>
24 #include <asm/io.h>
25 #include <asm/smp.h>
26 #include <asm/machdep.h>
27 #include <asm/irq.h>
28 #include <asm/errno.h>
29 #include <asm/xive.h>
30 #include <asm/xive-regs.h>
31 #include <asm/xmon.h>
32 
33 #include "xive-internal.h"
34 
35 #undef DEBUG_FLUSH
36 #undef DEBUG_ALL
37 
38 #ifdef DEBUG_ALL
39 #define DBG_VERBOSE(fmt, ...)	pr_devel("cpu %d - " fmt, \
40 					 smp_processor_id(), ## __VA_ARGS__)
41 #else
42 #define DBG_VERBOSE(fmt...)	do { } while(0)
43 #endif
44 
45 bool __xive_enabled;
46 EXPORT_SYMBOL_GPL(__xive_enabled);
47 bool xive_cmdline_disabled;
48 
49 /* We use only one priority for now */
50 static u8 xive_irq_priority;
51 
52 /* TIMA exported to KVM */
53 void __iomem *xive_tima;
54 EXPORT_SYMBOL_GPL(xive_tima);
55 u32 xive_tima_offset;
56 
57 /* Backend ops */
58 static const struct xive_ops *xive_ops;
59 
60 /* Our global interrupt domain */
61 static struct irq_domain *xive_irq_domain;
62 
63 #ifdef CONFIG_SMP
64 /* The IPIs all use the same logical irq number */
65 static u32 xive_ipi_irq;
66 #endif
67 
68 /* Xive state for each CPU */
69 static DEFINE_PER_CPU(struct xive_cpu *, xive_cpu);
70 
71 /*
72  * A "disabled" interrupt should never fire, to catch problems
73  * we set its logical number to this
74  */
75 #define XIVE_BAD_IRQ		0x7fffffff
76 #define XIVE_MAX_IRQ		(XIVE_BAD_IRQ - 1)
77 
78 /* An invalid CPU target */
79 #define XIVE_INVALID_TARGET	(-1)
80 
81 /*
82  * Read the next entry in a queue, return its content if it's valid
83  * or 0 if there is no new entry.
84  *
85  * The queue pointer is moved forward unless "just_peek" is set
86  */
87 static u32 xive_read_eq(struct xive_q *q, bool just_peek)
88 {
89 	u32 cur;
90 
91 	if (!q->qpage)
92 		return 0;
93 	cur = be32_to_cpup(q->qpage + q->idx);
94 
95 	/* Check valid bit (31) vs current toggle polarity */
96 	if ((cur >> 31) == q->toggle)
97 		return 0;
98 
99 	/* If consuming from the queue ... */
100 	if (!just_peek) {
101 		/* Next entry */
102 		q->idx = (q->idx + 1) & q->msk;
103 
104 		/* Wrap around: flip valid toggle */
105 		if (q->idx == 0)
106 			q->toggle ^= 1;
107 	}
108 	/* Mask out the valid bit (31) */
109 	return cur & 0x7fffffff;
110 }
111 
112 /*
113  * Scans all the queue that may have interrupts in them
114  * (based on "pending_prio") in priority order until an
115  * interrupt is found or all the queues are empty.
116  *
117  * Then updates the CPPR (Current Processor Priority
118  * Register) based on the most favored interrupt found
119  * (0xff if none) and return what was found (0 if none).
120  *
121  * If just_peek is set, return the most favored pending
122  * interrupt if any but don't update the queue pointers.
123  *
124  * Note: This function can operate generically on any number
125  * of queues (up to 8). The current implementation of the XIVE
126  * driver only uses a single queue however.
127  *
128  * Note2: This will also "flush" "the pending_count" of a queue
129  * into the "count" when that queue is observed to be empty.
130  * This is used to keep track of the amount of interrupts
131  * targetting a queue. When an interrupt is moved away from
132  * a queue, we only decrement that queue count once the queue
133  * has been observed empty to avoid races.
134  */
135 static u32 xive_scan_interrupts(struct xive_cpu *xc, bool just_peek)
136 {
137 	u32 irq = 0;
138 	u8 prio;
139 
140 	/* Find highest pending priority */
141 	while (xc->pending_prio != 0) {
142 		struct xive_q *q;
143 
144 		prio = ffs(xc->pending_prio) - 1;
145 		DBG_VERBOSE("scan_irq: trying prio %d\n", prio);
146 
147 		/* Try to fetch */
148 		irq = xive_read_eq(&xc->queue[prio], just_peek);
149 
150 		/* Found something ? That's it */
151 		if (irq)
152 			break;
153 
154 		/* Clear pending bits */
155 		xc->pending_prio &= ~(1 << prio);
156 
157 		/*
158 		 * Check if the queue count needs adjusting due to
159 		 * interrupts being moved away. See description of
160 		 * xive_dec_target_count()
161 		 */
162 		q = &xc->queue[prio];
163 		if (atomic_read(&q->pending_count)) {
164 			int p = atomic_xchg(&q->pending_count, 0);
165 			if (p) {
166 				WARN_ON(p > atomic_read(&q->count));
167 				atomic_sub(p, &q->count);
168 			}
169 		}
170 	}
171 
172 	/* If nothing was found, set CPPR to 0xff */
173 	if (irq == 0)
174 		prio = 0xff;
175 
176 	/* Update HW CPPR to match if necessary */
177 	if (prio != xc->cppr) {
178 		DBG_VERBOSE("scan_irq: adjusting CPPR to %d\n", prio);
179 		xc->cppr = prio;
180 		out_8(xive_tima + xive_tima_offset + TM_CPPR, prio);
181 	}
182 
183 	return irq;
184 }
185 
186 /*
187  * This is used to perform the magic loads from an ESB
188  * described in xive.h
189  */
190 static notrace u8 xive_esb_read(struct xive_irq_data *xd, u32 offset)
191 {
192 	u64 val;
193 
194 	/* Handle HW errata */
195 	if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG)
196 		offset |= offset << 4;
197 
198 	if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw)
199 		val = xive_ops->esb_rw(xd->hw_irq, offset, 0, 0);
200 	else
201 		val = in_be64(xd->eoi_mmio + offset);
202 
203 	return (u8)val;
204 }
205 
206 static void xive_esb_write(struct xive_irq_data *xd, u32 offset, u64 data)
207 {
208 	/* Handle HW errata */
209 	if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG)
210 		offset |= offset << 4;
211 
212 	if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw)
213 		xive_ops->esb_rw(xd->hw_irq, offset, data, 1);
214 	else
215 		out_be64(xd->eoi_mmio + offset, data);
216 }
217 
218 #ifdef CONFIG_XMON
219 static notrace void xive_dump_eq(const char *name, struct xive_q *q)
220 {
221 	u32 i0, i1, idx;
222 
223 	if (!q->qpage)
224 		return;
225 	idx = q->idx;
226 	i0 = be32_to_cpup(q->qpage + idx);
227 	idx = (idx + 1) & q->msk;
228 	i1 = be32_to_cpup(q->qpage + idx);
229 	xmon_printf("  %s Q T=%d %08x %08x ...\n", name,
230 		    q->toggle, i0, i1);
231 }
232 
233 notrace void xmon_xive_do_dump(int cpu)
234 {
235 	struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
236 
237 	xmon_printf("XIVE state for CPU %d:\n", cpu);
238 	xmon_printf("  pp=%02x cppr=%02x\n", xc->pending_prio, xc->cppr);
239 	xive_dump_eq("IRQ", &xc->queue[xive_irq_priority]);
240 #ifdef CONFIG_SMP
241 	{
242 		u64 val = xive_esb_read(&xc->ipi_data, XIVE_ESB_GET);
243 		xmon_printf("  IPI state: %x:%c%c\n", xc->hw_ipi,
244 			val & XIVE_ESB_VAL_P ? 'P' : 'p',
245 			val & XIVE_ESB_VAL_Q ? 'Q' : 'q');
246 	}
247 #endif
248 }
249 #endif /* CONFIG_XMON */
250 
251 static unsigned int xive_get_irq(void)
252 {
253 	struct xive_cpu *xc = __this_cpu_read(xive_cpu);
254 	u32 irq;
255 
256 	/*
257 	 * This can be called either as a result of a HW interrupt or
258 	 * as a "replay" because EOI decided there was still something
259 	 * in one of the queues.
260 	 *
261 	 * First we perform an ACK cycle in order to update our mask
262 	 * of pending priorities. This will also have the effect of
263 	 * updating the CPPR to the most favored pending interrupts.
264 	 *
265 	 * In the future, if we have a way to differentiate a first
266 	 * entry (on HW interrupt) from a replay triggered by EOI,
267 	 * we could skip this on replays unless we soft-mask tells us
268 	 * that a new HW interrupt occurred.
269 	 */
270 	xive_ops->update_pending(xc);
271 
272 	DBG_VERBOSE("get_irq: pending=%02x\n", xc->pending_prio);
273 
274 	/* Scan our queue(s) for interrupts */
275 	irq = xive_scan_interrupts(xc, false);
276 
277 	DBG_VERBOSE("get_irq: got irq 0x%x, new pending=0x%02x\n",
278 	    irq, xc->pending_prio);
279 
280 	/* Return pending interrupt if any */
281 	if (irq == XIVE_BAD_IRQ)
282 		return 0;
283 	return irq;
284 }
285 
286 /*
287  * After EOI'ing an interrupt, we need to re-check the queue
288  * to see if another interrupt is pending since multiple
289  * interrupts can coalesce into a single notification to the
290  * CPU.
291  *
292  * If we find that there is indeed more in there, we call
293  * force_external_irq_replay() to make Linux synthetize an
294  * external interrupt on the next call to local_irq_restore().
295  */
296 static void xive_do_queue_eoi(struct xive_cpu *xc)
297 {
298 	if (xive_scan_interrupts(xc, true) != 0) {
299 		DBG_VERBOSE("eoi: pending=0x%02x\n", xc->pending_prio);
300 		force_external_irq_replay();
301 	}
302 }
303 
304 /*
305  * EOI an interrupt at the source. There are several methods
306  * to do this depending on the HW version and source type
307  */
308 static void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd)
309 {
310 	/* If the XIVE supports the new "store EOI facility, use it */
311 	if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
312 		xive_esb_write(xd, XIVE_ESB_STORE_EOI, 0);
313 	else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) {
314 		/*
315 		 * The FW told us to call it. This happens for some
316 		 * interrupt sources that need additional HW whacking
317 		 * beyond the ESB manipulation. For example LPC interrupts
318 		 * on P9 DD1.0 needed a latch to be clared in the LPC bridge
319 		 * itself. The Firmware will take care of it.
320 		 */
321 		if (WARN_ON_ONCE(!xive_ops->eoi))
322 			return;
323 		xive_ops->eoi(hw_irq);
324 	} else {
325 		u8 eoi_val;
326 
327 		/*
328 		 * Otherwise for EOI, we use the special MMIO that does
329 		 * a clear of both P and Q and returns the old Q,
330 		 * except for LSIs where we use the "EOI cycle" special
331 		 * load.
332 		 *
333 		 * This allows us to then do a re-trigger if Q was set
334 		 * rather than synthesizing an interrupt in software
335 		 *
336 		 * For LSIs the HW EOI cycle is used rather than PQ bits,
337 		 * as they are automatically re-triggred in HW when still
338 		 * pending.
339 		 */
340 		if (xd->flags & XIVE_IRQ_FLAG_LSI)
341 			xive_esb_read(xd, XIVE_ESB_LOAD_EOI);
342 		else {
343 			eoi_val = xive_esb_read(xd, XIVE_ESB_SET_PQ_00);
344 			DBG_VERBOSE("eoi_val=%x\n", eoi_val);
345 
346 			/* Re-trigger if needed */
347 			if ((eoi_val & XIVE_ESB_VAL_Q) && xd->trig_mmio)
348 				out_be64(xd->trig_mmio, 0);
349 		}
350 	}
351 }
352 
353 /* irq_chip eoi callback */
354 static void xive_irq_eoi(struct irq_data *d)
355 {
356 	struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
357 	struct xive_cpu *xc = __this_cpu_read(xive_cpu);
358 
359 	DBG_VERBOSE("eoi_irq: irq=%d [0x%lx] pending=%02x\n",
360 		    d->irq, irqd_to_hwirq(d), xc->pending_prio);
361 
362 	/*
363 	 * EOI the source if it hasn't been disabled and hasn't
364 	 * been passed-through to a KVM guest
365 	 */
366 	if (!irqd_irq_disabled(d) && !irqd_is_forwarded_to_vcpu(d) &&
367 	    !(xd->flags & XIVE_IRQ_NO_EOI))
368 		xive_do_source_eoi(irqd_to_hwirq(d), xd);
369 
370 	/*
371 	 * Clear saved_p to indicate that it's no longer occupying
372 	 * a queue slot on the target queue
373 	 */
374 	xd->saved_p = false;
375 
376 	/* Check for more work in the queue */
377 	xive_do_queue_eoi(xc);
378 }
379 
380 /*
381  * Helper used to mask and unmask an interrupt source. This
382  * is only called for normal interrupts that do not require
383  * masking/unmasking via firmware.
384  */
385 static void xive_do_source_set_mask(struct xive_irq_data *xd,
386 				    bool mask)
387 {
388 	u64 val;
389 
390 	/*
391 	 * If the interrupt had P set, it may be in a queue.
392 	 *
393 	 * We need to make sure we don't re-enable it until it
394 	 * has been fetched from that queue and EOId. We keep
395 	 * a copy of that P state and use it to restore the
396 	 * ESB accordingly on unmask.
397 	 */
398 	if (mask) {
399 		val = xive_esb_read(xd, XIVE_ESB_SET_PQ_01);
400 		xd->saved_p = !!(val & XIVE_ESB_VAL_P);
401 	} else if (xd->saved_p)
402 		xive_esb_read(xd, XIVE_ESB_SET_PQ_10);
403 	else
404 		xive_esb_read(xd, XIVE_ESB_SET_PQ_00);
405 }
406 
407 /*
408  * Try to chose "cpu" as a new interrupt target. Increments
409  * the queue accounting for that target if it's not already
410  * full.
411  */
412 static bool xive_try_pick_target(int cpu)
413 {
414 	struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
415 	struct xive_q *q = &xc->queue[xive_irq_priority];
416 	int max;
417 
418 	/*
419 	 * Calculate max number of interrupts in that queue.
420 	 *
421 	 * We leave a gap of 1 just in case...
422 	 */
423 	max = (q->msk + 1) - 1;
424 	return !!atomic_add_unless(&q->count, 1, max);
425 }
426 
427 /*
428  * Un-account an interrupt for a target CPU. We don't directly
429  * decrement q->count since the interrupt might still be present
430  * in the queue.
431  *
432  * Instead increment a separate counter "pending_count" which
433  * will be substracted from "count" later when that CPU observes
434  * the queue to be empty.
435  */
436 static void xive_dec_target_count(int cpu)
437 {
438 	struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
439 	struct xive_q *q = &xc->queue[xive_irq_priority];
440 
441 	if (WARN_ON(cpu < 0 || !xc)) {
442 		pr_err("%s: cpu=%d xc=%p\n", __func__, cpu, xc);
443 		return;
444 	}
445 
446 	/*
447 	 * We increment the "pending count" which will be used
448 	 * to decrement the target queue count whenever it's next
449 	 * processed and found empty. This ensure that we don't
450 	 * decrement while we still have the interrupt there
451 	 * occupying a slot.
452 	 */
453 	atomic_inc(&q->pending_count);
454 }
455 
456 /* Find a tentative CPU target in a CPU mask */
457 static int xive_find_target_in_mask(const struct cpumask *mask,
458 				    unsigned int fuzz)
459 {
460 	int cpu, first, num, i;
461 
462 	/* Pick up a starting point CPU in the mask based on  fuzz */
463 	num = min_t(int, cpumask_weight(mask), nr_cpu_ids);
464 	first = fuzz % num;
465 
466 	/* Locate it */
467 	cpu = cpumask_first(mask);
468 	for (i = 0; i < first && cpu < nr_cpu_ids; i++)
469 		cpu = cpumask_next(cpu, mask);
470 
471 	/* Sanity check */
472 	if (WARN_ON(cpu >= nr_cpu_ids))
473 		cpu = cpumask_first(cpu_online_mask);
474 
475 	/* Remember first one to handle wrap-around */
476 	first = cpu;
477 
478 	/*
479 	 * Now go through the entire mask until we find a valid
480 	 * target.
481 	 */
482 	for (;;) {
483 		/*
484 		 * We re-check online as the fallback case passes us
485 		 * an untested affinity mask
486 		 */
487 		if (cpu_online(cpu) && xive_try_pick_target(cpu))
488 			return cpu;
489 		cpu = cpumask_next(cpu, mask);
490 		if (cpu == first)
491 			break;
492 		/* Wrap around */
493 		if (cpu >= nr_cpu_ids)
494 			cpu = cpumask_first(mask);
495 	}
496 	return -1;
497 }
498 
499 /*
500  * Pick a target CPU for an interrupt. This is done at
501  * startup or if the affinity is changed in a way that
502  * invalidates the current target.
503  */
504 static int xive_pick_irq_target(struct irq_data *d,
505 				const struct cpumask *affinity)
506 {
507 	static unsigned int fuzz;
508 	struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
509 	cpumask_var_t mask;
510 	int cpu = -1;
511 
512 	/*
513 	 * If we have chip IDs, first we try to build a mask of
514 	 * CPUs matching the CPU and find a target in there
515 	 */
516 	if (xd->src_chip != XIVE_INVALID_CHIP_ID &&
517 		zalloc_cpumask_var(&mask, GFP_ATOMIC)) {
518 		/* Build a mask of matching chip IDs */
519 		for_each_cpu_and(cpu, affinity, cpu_online_mask) {
520 			struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
521 			if (xc->chip_id == xd->src_chip)
522 				cpumask_set_cpu(cpu, mask);
523 		}
524 		/* Try to find a target */
525 		if (cpumask_empty(mask))
526 			cpu = -1;
527 		else
528 			cpu = xive_find_target_in_mask(mask, fuzz++);
529 		free_cpumask_var(mask);
530 		if (cpu >= 0)
531 			return cpu;
532 		fuzz--;
533 	}
534 
535 	/* No chip IDs, fallback to using the affinity mask */
536 	return xive_find_target_in_mask(affinity, fuzz++);
537 }
538 
539 static unsigned int xive_irq_startup(struct irq_data *d)
540 {
541 	struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
542 	unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
543 	int target, rc;
544 
545 	pr_devel("xive_irq_startup: irq %d [0x%x] data @%p\n",
546 		 d->irq, hw_irq, d);
547 
548 #ifdef CONFIG_PCI_MSI
549 	/*
550 	 * The generic MSI code returns with the interrupt disabled on the
551 	 * card, using the MSI mask bits. Firmware doesn't appear to unmask
552 	 * at that level, so we do it here by hand.
553 	 */
554 	if (irq_data_get_msi_desc(d))
555 		pci_msi_unmask_irq(d);
556 #endif
557 
558 	/* Pick a target */
559 	target = xive_pick_irq_target(d, irq_data_get_affinity_mask(d));
560 	if (target == XIVE_INVALID_TARGET) {
561 		/* Try again breaking affinity */
562 		target = xive_pick_irq_target(d, cpu_online_mask);
563 		if (target == XIVE_INVALID_TARGET)
564 			return -ENXIO;
565 		pr_warn("irq %d started with broken affinity\n", d->irq);
566 	}
567 
568 	/* Sanity check */
569 	if (WARN_ON(target == XIVE_INVALID_TARGET ||
570 		    target >= nr_cpu_ids))
571 		target = smp_processor_id();
572 
573 	xd->target = target;
574 
575 	/*
576 	 * Configure the logical number to be the Linux IRQ number
577 	 * and set the target queue
578 	 */
579 	rc = xive_ops->configure_irq(hw_irq,
580 				     get_hard_smp_processor_id(target),
581 				     xive_irq_priority, d->irq);
582 	if (rc)
583 		return rc;
584 
585 	/* Unmask the ESB */
586 	xive_do_source_set_mask(xd, false);
587 
588 	return 0;
589 }
590 
591 static void xive_irq_shutdown(struct irq_data *d)
592 {
593 	struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
594 	unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
595 
596 	pr_devel("xive_irq_shutdown: irq %d [0x%x] data @%p\n",
597 		 d->irq, hw_irq, d);
598 
599 	if (WARN_ON(xd->target == XIVE_INVALID_TARGET))
600 		return;
601 
602 	/* Mask the interrupt at the source */
603 	xive_do_source_set_mask(xd, true);
604 
605 	/*
606 	 * The above may have set saved_p. We clear it otherwise it
607 	 * will prevent re-enabling later on. It is ok to forget the
608 	 * fact that the interrupt might be in a queue because we are
609 	 * accounting that already in xive_dec_target_count() and will
610 	 * be re-routing it to a new queue with proper accounting when
611 	 * it's started up again
612 	 */
613 	xd->saved_p = false;
614 
615 	/*
616 	 * Mask the interrupt in HW in the IVT/EAS and set the number
617 	 * to be the "bad" IRQ number
618 	 */
619 	xive_ops->configure_irq(hw_irq,
620 				get_hard_smp_processor_id(xd->target),
621 				0xff, XIVE_BAD_IRQ);
622 
623 	xive_dec_target_count(xd->target);
624 	xd->target = XIVE_INVALID_TARGET;
625 }
626 
627 static void xive_irq_unmask(struct irq_data *d)
628 {
629 	struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
630 
631 	pr_devel("xive_irq_unmask: irq %d data @%p\n", d->irq, xd);
632 
633 	/*
634 	 * This is a workaround for PCI LSI problems on P9, for
635 	 * these, we call FW to set the mask. The problems might
636 	 * be fixed by P9 DD2.0, if that is the case, firmware
637 	 * will no longer set that flag.
638 	 */
639 	if (xd->flags & XIVE_IRQ_FLAG_MASK_FW) {
640 		unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
641 		xive_ops->configure_irq(hw_irq,
642 					get_hard_smp_processor_id(xd->target),
643 					xive_irq_priority, d->irq);
644 		return;
645 	}
646 
647 	xive_do_source_set_mask(xd, false);
648 }
649 
650 static void xive_irq_mask(struct irq_data *d)
651 {
652 	struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
653 
654 	pr_devel("xive_irq_mask: irq %d data @%p\n", d->irq, xd);
655 
656 	/*
657 	 * This is a workaround for PCI LSI problems on P9, for
658 	 * these, we call OPAL to set the mask. The problems might
659 	 * be fixed by P9 DD2.0, if that is the case, firmware
660 	 * will no longer set that flag.
661 	 */
662 	if (xd->flags & XIVE_IRQ_FLAG_MASK_FW) {
663 		unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
664 		xive_ops->configure_irq(hw_irq,
665 					get_hard_smp_processor_id(xd->target),
666 					0xff, d->irq);
667 		return;
668 	}
669 
670 	xive_do_source_set_mask(xd, true);
671 }
672 
673 static int xive_irq_set_affinity(struct irq_data *d,
674 				 const struct cpumask *cpumask,
675 				 bool force)
676 {
677 	struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
678 	unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
679 	u32 target, old_target;
680 	int rc = 0;
681 
682 	pr_devel("xive_irq_set_affinity: irq %d\n", d->irq);
683 
684 	/* Is this valid ? */
685 	if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids)
686 		return -EINVAL;
687 
688 	/* Don't do anything if the interrupt isn't started */
689 	if (!irqd_is_started(d))
690 		return IRQ_SET_MASK_OK;
691 
692 	/*
693 	 * If existing target is already in the new mask, and is
694 	 * online then do nothing.
695 	 */
696 	if (xd->target != XIVE_INVALID_TARGET &&
697 	    cpu_online(xd->target) &&
698 	    cpumask_test_cpu(xd->target, cpumask))
699 		return IRQ_SET_MASK_OK;
700 
701 	/* Pick a new target */
702 	target = xive_pick_irq_target(d, cpumask);
703 
704 	/* No target found */
705 	if (target == XIVE_INVALID_TARGET)
706 		return -ENXIO;
707 
708 	/* Sanity check */
709 	if (WARN_ON(target >= nr_cpu_ids))
710 		target = smp_processor_id();
711 
712 	old_target = xd->target;
713 
714 	/*
715 	 * Only configure the irq if it's not currently passed-through to
716 	 * a KVM guest
717 	 */
718 	if (!irqd_is_forwarded_to_vcpu(d))
719 		rc = xive_ops->configure_irq(hw_irq,
720 					     get_hard_smp_processor_id(target),
721 					     xive_irq_priority, d->irq);
722 	if (rc < 0) {
723 		pr_err("Error %d reconfiguring irq %d\n", rc, d->irq);
724 		return rc;
725 	}
726 
727 	pr_devel("  target: 0x%x\n", target);
728 	xd->target = target;
729 
730 	/* Give up previous target */
731 	if (old_target != XIVE_INVALID_TARGET)
732 	    xive_dec_target_count(old_target);
733 
734 	return IRQ_SET_MASK_OK;
735 }
736 
737 static int xive_irq_set_type(struct irq_data *d, unsigned int flow_type)
738 {
739 	struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
740 
741 	/*
742 	 * We only support these. This has really no effect other than setting
743 	 * the corresponding descriptor bits mind you but those will in turn
744 	 * affect the resend function when re-enabling an edge interrupt.
745 	 *
746 	 * Set set the default to edge as explained in map().
747 	 */
748 	if (flow_type == IRQ_TYPE_DEFAULT || flow_type == IRQ_TYPE_NONE)
749 		flow_type = IRQ_TYPE_EDGE_RISING;
750 
751 	if (flow_type != IRQ_TYPE_EDGE_RISING &&
752 	    flow_type != IRQ_TYPE_LEVEL_LOW)
753 		return -EINVAL;
754 
755 	irqd_set_trigger_type(d, flow_type);
756 
757 	/*
758 	 * Double check it matches what the FW thinks
759 	 *
760 	 * NOTE: We don't know yet if the PAPR interface will provide
761 	 * the LSI vs MSI information apart from the device-tree so
762 	 * this check might have to move into an optional backend call
763 	 * that is specific to the native backend
764 	 */
765 	if ((flow_type == IRQ_TYPE_LEVEL_LOW) !=
766 	    !!(xd->flags & XIVE_IRQ_FLAG_LSI)) {
767 		pr_warn("Interrupt %d (HW 0x%x) type mismatch, Linux says %s, FW says %s\n",
768 			d->irq, (u32)irqd_to_hwirq(d),
769 			(flow_type == IRQ_TYPE_LEVEL_LOW) ? "Level" : "Edge",
770 			(xd->flags & XIVE_IRQ_FLAG_LSI) ? "Level" : "Edge");
771 	}
772 
773 	return IRQ_SET_MASK_OK_NOCOPY;
774 }
775 
776 static int xive_irq_retrigger(struct irq_data *d)
777 {
778 	struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
779 
780 	/* This should be only for MSIs */
781 	if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI))
782 		return 0;
783 
784 	/*
785 	 * To perform a retrigger, we first set the PQ bits to
786 	 * 11, then perform an EOI.
787 	 */
788 	xive_esb_read(xd, XIVE_ESB_SET_PQ_11);
789 
790 	/*
791 	 * Note: We pass "0" to the hw_irq argument in order to
792 	 * avoid calling into the backend EOI code which we don't
793 	 * want to do in the case of a re-trigger. Backends typically
794 	 * only do EOI for LSIs anyway.
795 	 */
796 	xive_do_source_eoi(0, xd);
797 
798 	return 1;
799 }
800 
801 static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state)
802 {
803 	struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
804 	unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
805 	int rc;
806 	u8 pq;
807 
808 	/*
809 	 * We only support this on interrupts that do not require
810 	 * firmware calls for masking and unmasking
811 	 */
812 	if (xd->flags & XIVE_IRQ_FLAG_MASK_FW)
813 		return -EIO;
814 
815 	/*
816 	 * This is called by KVM with state non-NULL for enabling
817 	 * pass-through or NULL for disabling it
818 	 */
819 	if (state) {
820 		irqd_set_forwarded_to_vcpu(d);
821 
822 		/* Set it to PQ=10 state to prevent further sends */
823 		pq = xive_esb_read(xd, XIVE_ESB_SET_PQ_10);
824 
825 		/* No target ? nothing to do */
826 		if (xd->target == XIVE_INVALID_TARGET) {
827 			/*
828 			 * An untargetted interrupt should have been
829 			 * also masked at the source
830 			 */
831 			WARN_ON(pq & 2);
832 
833 			return 0;
834 		}
835 
836 		/*
837 		 * If P was set, adjust state to PQ=11 to indicate
838 		 * that a resend is needed for the interrupt to reach
839 		 * the guest. Also remember the value of P.
840 		 *
841 		 * This also tells us that it's in flight to a host queue
842 		 * or has already been fetched but hasn't been EOIed yet
843 		 * by the host. This it's potentially using up a host
844 		 * queue slot. This is important to know because as long
845 		 * as this is the case, we must not hard-unmask it when
846 		 * "returning" that interrupt to the host.
847 		 *
848 		 * This saved_p is cleared by the host EOI, when we know
849 		 * for sure the queue slot is no longer in use.
850 		 */
851 		if (pq & 2) {
852 			pq = xive_esb_read(xd, XIVE_ESB_SET_PQ_11);
853 			xd->saved_p = true;
854 
855 			/*
856 			 * Sync the XIVE source HW to ensure the interrupt
857 			 * has gone through the EAS before we change its
858 			 * target to the guest. That should guarantee us
859 			 * that we *will* eventually get an EOI for it on
860 			 * the host. Otherwise there would be a small window
861 			 * for P to be seen here but the interrupt going
862 			 * to the guest queue.
863 			 */
864 			if (xive_ops->sync_source)
865 				xive_ops->sync_source(hw_irq);
866 		} else
867 			xd->saved_p = false;
868 	} else {
869 		irqd_clr_forwarded_to_vcpu(d);
870 
871 		/* No host target ? hard mask and return */
872 		if (xd->target == XIVE_INVALID_TARGET) {
873 			xive_do_source_set_mask(xd, true);
874 			return 0;
875 		}
876 
877 		/*
878 		 * Sync the XIVE source HW to ensure the interrupt
879 		 * has gone through the EAS before we change its
880 		 * target to the host.
881 		 */
882 		if (xive_ops->sync_source)
883 			xive_ops->sync_source(hw_irq);
884 
885 		/*
886 		 * By convention we are called with the interrupt in
887 		 * a PQ=10 or PQ=11 state, ie, it won't fire and will
888 		 * have latched in Q whether there's a pending HW
889 		 * interrupt or not.
890 		 *
891 		 * First reconfigure the target.
892 		 */
893 		rc = xive_ops->configure_irq(hw_irq,
894 					     get_hard_smp_processor_id(xd->target),
895 					     xive_irq_priority, d->irq);
896 		if (rc)
897 			return rc;
898 
899 		/*
900 		 * Then if saved_p is not set, effectively re-enable the
901 		 * interrupt with an EOI. If it is set, we know there is
902 		 * still a message in a host queue somewhere that will be
903 		 * EOId eventually.
904 		 *
905 		 * Note: We don't check irqd_irq_disabled(). Effectively,
906 		 * we *will* let the irq get through even if masked if the
907 		 * HW is still firing it in order to deal with the whole
908 		 * saved_p business properly. If the interrupt triggers
909 		 * while masked, the generic code will re-mask it anyway.
910 		 */
911 		if (!xd->saved_p)
912 			xive_do_source_eoi(hw_irq, xd);
913 
914 	}
915 	return 0;
916 }
917 
918 static struct irq_chip xive_irq_chip = {
919 	.name = "XIVE-IRQ",
920 	.irq_startup = xive_irq_startup,
921 	.irq_shutdown = xive_irq_shutdown,
922 	.irq_eoi = xive_irq_eoi,
923 	.irq_mask = xive_irq_mask,
924 	.irq_unmask = xive_irq_unmask,
925 	.irq_set_affinity = xive_irq_set_affinity,
926 	.irq_set_type = xive_irq_set_type,
927 	.irq_retrigger = xive_irq_retrigger,
928 	.irq_set_vcpu_affinity = xive_irq_set_vcpu_affinity,
929 };
930 
931 bool is_xive_irq(struct irq_chip *chip)
932 {
933 	return chip == &xive_irq_chip;
934 }
935 EXPORT_SYMBOL_GPL(is_xive_irq);
936 
937 void xive_cleanup_irq_data(struct xive_irq_data *xd)
938 {
939 	if (xd->eoi_mmio) {
940 		iounmap(xd->eoi_mmio);
941 		if (xd->eoi_mmio == xd->trig_mmio)
942 			xd->trig_mmio = NULL;
943 		xd->eoi_mmio = NULL;
944 	}
945 	if (xd->trig_mmio) {
946 		iounmap(xd->trig_mmio);
947 		xd->trig_mmio = NULL;
948 	}
949 }
950 EXPORT_SYMBOL_GPL(xive_cleanup_irq_data);
951 
952 static int xive_irq_alloc_data(unsigned int virq, irq_hw_number_t hw)
953 {
954 	struct xive_irq_data *xd;
955 	int rc;
956 
957 	xd = kzalloc(sizeof(struct xive_irq_data), GFP_KERNEL);
958 	if (!xd)
959 		return -ENOMEM;
960 	rc = xive_ops->populate_irq_data(hw, xd);
961 	if (rc) {
962 		kfree(xd);
963 		return rc;
964 	}
965 	xd->target = XIVE_INVALID_TARGET;
966 	irq_set_handler_data(virq, xd);
967 
968 	return 0;
969 }
970 
971 static void xive_irq_free_data(unsigned int virq)
972 {
973 	struct xive_irq_data *xd = irq_get_handler_data(virq);
974 
975 	if (!xd)
976 		return;
977 	irq_set_handler_data(virq, NULL);
978 	xive_cleanup_irq_data(xd);
979 	kfree(xd);
980 }
981 
982 #ifdef CONFIG_SMP
983 
984 static void xive_cause_ipi(int cpu)
985 {
986 	struct xive_cpu *xc;
987 	struct xive_irq_data *xd;
988 
989 	xc = per_cpu(xive_cpu, cpu);
990 
991 	DBG_VERBOSE("IPI CPU %d -> %d (HW IRQ 0x%x)\n",
992 		    smp_processor_id(), cpu, xc->hw_ipi);
993 
994 	xd = &xc->ipi_data;
995 	if (WARN_ON(!xd->trig_mmio))
996 		return;
997 	out_be64(xd->trig_mmio, 0);
998 }
999 
1000 static irqreturn_t xive_muxed_ipi_action(int irq, void *dev_id)
1001 {
1002 	return smp_ipi_demux();
1003 }
1004 
1005 static void xive_ipi_eoi(struct irq_data *d)
1006 {
1007 	struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1008 
1009 	/* Handle possible race with unplug and drop stale IPIs */
1010 	if (!xc)
1011 		return;
1012 
1013 	DBG_VERBOSE("IPI eoi: irq=%d [0x%lx] (HW IRQ 0x%x) pending=%02x\n",
1014 		    d->irq, irqd_to_hwirq(d), xc->hw_ipi, xc->pending_prio);
1015 
1016 	xive_do_source_eoi(xc->hw_ipi, &xc->ipi_data);
1017 	xive_do_queue_eoi(xc);
1018 }
1019 
1020 static void xive_ipi_do_nothing(struct irq_data *d)
1021 {
1022 	/*
1023 	 * Nothing to do, we never mask/unmask IPIs, but the callback
1024 	 * has to exist for the struct irq_chip.
1025 	 */
1026 }
1027 
1028 static struct irq_chip xive_ipi_chip = {
1029 	.name = "XIVE-IPI",
1030 	.irq_eoi = xive_ipi_eoi,
1031 	.irq_mask = xive_ipi_do_nothing,
1032 	.irq_unmask = xive_ipi_do_nothing,
1033 };
1034 
1035 static void __init xive_request_ipi(void)
1036 {
1037 	unsigned int virq;
1038 
1039 	/*
1040 	 * Initialization failed, move on, we might manage to
1041 	 * reach the point where we display our errors before
1042 	 * the system falls appart
1043 	 */
1044 	if (!xive_irq_domain)
1045 		return;
1046 
1047 	/* Initialize it */
1048 	virq = irq_create_mapping(xive_irq_domain, 0);
1049 	xive_ipi_irq = virq;
1050 
1051 	WARN_ON(request_irq(virq, xive_muxed_ipi_action,
1052 			    IRQF_PERCPU | IRQF_NO_THREAD, "IPI", NULL));
1053 }
1054 
1055 static int xive_setup_cpu_ipi(unsigned int cpu)
1056 {
1057 	struct xive_cpu *xc;
1058 	int rc;
1059 
1060 	pr_debug("Setting up IPI for CPU %d\n", cpu);
1061 
1062 	xc = per_cpu(xive_cpu, cpu);
1063 
1064 	/* Check if we are already setup */
1065 	if (xc->hw_ipi != 0)
1066 		return 0;
1067 
1068 	/* Grab an IPI from the backend, this will populate xc->hw_ipi */
1069 	if (xive_ops->get_ipi(cpu, xc))
1070 		return -EIO;
1071 
1072 	/*
1073 	 * Populate the IRQ data in the xive_cpu structure and
1074 	 * configure the HW / enable the IPIs.
1075 	 */
1076 	rc = xive_ops->populate_irq_data(xc->hw_ipi, &xc->ipi_data);
1077 	if (rc) {
1078 		pr_err("Failed to populate IPI data on CPU %d\n", cpu);
1079 		return -EIO;
1080 	}
1081 	rc = xive_ops->configure_irq(xc->hw_ipi,
1082 				     get_hard_smp_processor_id(cpu),
1083 				     xive_irq_priority, xive_ipi_irq);
1084 	if (rc) {
1085 		pr_err("Failed to map IPI CPU %d\n", cpu);
1086 		return -EIO;
1087 	}
1088 	pr_devel("CPU %d HW IPI %x, virq %d, trig_mmio=%p\n", cpu,
1089 	    xc->hw_ipi, xive_ipi_irq, xc->ipi_data.trig_mmio);
1090 
1091 	/* Unmask it */
1092 	xive_do_source_set_mask(&xc->ipi_data, false);
1093 
1094 	return 0;
1095 }
1096 
1097 static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc)
1098 {
1099 	/* Disable the IPI and free the IRQ data */
1100 
1101 	/* Already cleaned up ? */
1102 	if (xc->hw_ipi == 0)
1103 		return;
1104 
1105 	/* Mask the IPI */
1106 	xive_do_source_set_mask(&xc->ipi_data, true);
1107 
1108 	/*
1109 	 * Note: We don't call xive_cleanup_irq_data() to free
1110 	 * the mappings as this is called from an IPI on kexec
1111 	 * which is not a safe environment to call iounmap()
1112 	 */
1113 
1114 	/* Deconfigure/mask in the backend */
1115 	xive_ops->configure_irq(xc->hw_ipi, hard_smp_processor_id(),
1116 				0xff, xive_ipi_irq);
1117 
1118 	/* Free the IPIs in the backend */
1119 	xive_ops->put_ipi(cpu, xc);
1120 }
1121 
1122 void __init xive_smp_probe(void)
1123 {
1124 	smp_ops->cause_ipi = xive_cause_ipi;
1125 
1126 	/* Register the IPI */
1127 	xive_request_ipi();
1128 
1129 	/* Allocate and setup IPI for the boot CPU */
1130 	xive_setup_cpu_ipi(smp_processor_id());
1131 }
1132 
1133 #endif /* CONFIG_SMP */
1134 
1135 static int xive_irq_domain_map(struct irq_domain *h, unsigned int virq,
1136 			       irq_hw_number_t hw)
1137 {
1138 	int rc;
1139 
1140 	/*
1141 	 * Mark interrupts as edge sensitive by default so that resend
1142 	 * actually works. Will fix that up below if needed.
1143 	 */
1144 	irq_clear_status_flags(virq, IRQ_LEVEL);
1145 
1146 #ifdef CONFIG_SMP
1147 	/* IPIs are special and come up with HW number 0 */
1148 	if (hw == 0) {
1149 		/*
1150 		 * IPIs are marked per-cpu. We use separate HW interrupts under
1151 		 * the hood but associated with the same "linux" interrupt
1152 		 */
1153 		irq_set_chip_and_handler(virq, &xive_ipi_chip,
1154 					 handle_percpu_irq);
1155 		return 0;
1156 	}
1157 #endif
1158 
1159 	rc = xive_irq_alloc_data(virq, hw);
1160 	if (rc)
1161 		return rc;
1162 
1163 	irq_set_chip_and_handler(virq, &xive_irq_chip, handle_fasteoi_irq);
1164 
1165 	return 0;
1166 }
1167 
1168 static void xive_irq_domain_unmap(struct irq_domain *d, unsigned int virq)
1169 {
1170 	struct irq_data *data = irq_get_irq_data(virq);
1171 	unsigned int hw_irq;
1172 
1173 	/* XXX Assign BAD number */
1174 	if (!data)
1175 		return;
1176 	hw_irq = (unsigned int)irqd_to_hwirq(data);
1177 	if (hw_irq)
1178 		xive_irq_free_data(virq);
1179 }
1180 
1181 static int xive_irq_domain_xlate(struct irq_domain *h, struct device_node *ct,
1182 				 const u32 *intspec, unsigned int intsize,
1183 				 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
1184 
1185 {
1186 	*out_hwirq = intspec[0];
1187 
1188 	/*
1189 	 * If intsize is at least 2, we look for the type in the second cell,
1190 	 * we assume the LSB indicates a level interrupt.
1191 	 */
1192 	if (intsize > 1) {
1193 		if (intspec[1] & 1)
1194 			*out_flags = IRQ_TYPE_LEVEL_LOW;
1195 		else
1196 			*out_flags = IRQ_TYPE_EDGE_RISING;
1197 	} else
1198 		*out_flags = IRQ_TYPE_LEVEL_LOW;
1199 
1200 	return 0;
1201 }
1202 
1203 static int xive_irq_domain_match(struct irq_domain *h, struct device_node *node,
1204 				 enum irq_domain_bus_token bus_token)
1205 {
1206 	return xive_ops->match(node);
1207 }
1208 
1209 static const struct irq_domain_ops xive_irq_domain_ops = {
1210 	.match = xive_irq_domain_match,
1211 	.map = xive_irq_domain_map,
1212 	.unmap = xive_irq_domain_unmap,
1213 	.xlate = xive_irq_domain_xlate,
1214 };
1215 
1216 static void __init xive_init_host(void)
1217 {
1218 	xive_irq_domain = irq_domain_add_nomap(NULL, XIVE_MAX_IRQ,
1219 					       &xive_irq_domain_ops, NULL);
1220 	if (WARN_ON(xive_irq_domain == NULL))
1221 		return;
1222 	irq_set_default_host(xive_irq_domain);
1223 }
1224 
1225 static void xive_cleanup_cpu_queues(unsigned int cpu, struct xive_cpu *xc)
1226 {
1227 	if (xc->queue[xive_irq_priority].qpage)
1228 		xive_ops->cleanup_queue(cpu, xc, xive_irq_priority);
1229 }
1230 
1231 static int xive_setup_cpu_queues(unsigned int cpu, struct xive_cpu *xc)
1232 {
1233 	int rc = 0;
1234 
1235 	/* We setup 1 queues for now with a 64k page */
1236 	if (!xc->queue[xive_irq_priority].qpage)
1237 		rc = xive_ops->setup_queue(cpu, xc, xive_irq_priority);
1238 
1239 	return rc;
1240 }
1241 
1242 static int xive_prepare_cpu(unsigned int cpu)
1243 {
1244 	struct xive_cpu *xc;
1245 
1246 	xc = per_cpu(xive_cpu, cpu);
1247 	if (!xc) {
1248 		struct device_node *np;
1249 
1250 		xc = kzalloc_node(sizeof(struct xive_cpu),
1251 				  GFP_KERNEL, cpu_to_node(cpu));
1252 		if (!xc)
1253 			return -ENOMEM;
1254 		np = of_get_cpu_node(cpu, NULL);
1255 		if (np)
1256 			xc->chip_id = of_get_ibm_chip_id(np);
1257 		of_node_put(np);
1258 
1259 		per_cpu(xive_cpu, cpu) = xc;
1260 	}
1261 
1262 	/* Setup EQs if not already */
1263 	return xive_setup_cpu_queues(cpu, xc);
1264 }
1265 
1266 static void xive_setup_cpu(void)
1267 {
1268 	struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1269 
1270 	/* The backend might have additional things to do */
1271 	if (xive_ops->setup_cpu)
1272 		xive_ops->setup_cpu(smp_processor_id(), xc);
1273 
1274 	/* Set CPPR to 0xff to enable flow of interrupts */
1275 	xc->cppr = 0xff;
1276 	out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff);
1277 }
1278 
1279 #ifdef CONFIG_SMP
1280 void xive_smp_setup_cpu(void)
1281 {
1282 	pr_devel("SMP setup CPU %d\n", smp_processor_id());
1283 
1284 	/* This will have already been done on the boot CPU */
1285 	if (smp_processor_id() != boot_cpuid)
1286 		xive_setup_cpu();
1287 
1288 }
1289 
1290 int xive_smp_prepare_cpu(unsigned int cpu)
1291 {
1292 	int rc;
1293 
1294 	/* Allocate per-CPU data and queues */
1295 	rc = xive_prepare_cpu(cpu);
1296 	if (rc)
1297 		return rc;
1298 
1299 	/* Allocate and setup IPI for the new CPU */
1300 	return xive_setup_cpu_ipi(cpu);
1301 }
1302 
1303 #ifdef CONFIG_HOTPLUG_CPU
1304 static void xive_flush_cpu_queue(unsigned int cpu, struct xive_cpu *xc)
1305 {
1306 	u32 irq;
1307 
1308 	/* We assume local irqs are disabled */
1309 	WARN_ON(!irqs_disabled());
1310 
1311 	/* Check what's already in the CPU queue */
1312 	while ((irq = xive_scan_interrupts(xc, false)) != 0) {
1313 		/*
1314 		 * We need to re-route that interrupt to its new destination.
1315 		 * First get and lock the descriptor
1316 		 */
1317 		struct irq_desc *desc = irq_to_desc(irq);
1318 		struct irq_data *d = irq_desc_get_irq_data(desc);
1319 		struct xive_irq_data *xd;
1320 		unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
1321 
1322 		/*
1323 		 * Ignore anything that isn't a XIVE irq and ignore
1324 		 * IPIs, so can just be dropped.
1325 		 */
1326 		if (d->domain != xive_irq_domain || hw_irq == 0)
1327 			continue;
1328 
1329 		/*
1330 		 * The IRQ should have already been re-routed, it's just a
1331 		 * stale in the old queue, so re-trigger it in order to make
1332 		 * it reach is new destination.
1333 		 */
1334 #ifdef DEBUG_FLUSH
1335 		pr_info("CPU %d: Got irq %d while offline, re-sending...\n",
1336 			cpu, irq);
1337 #endif
1338 		raw_spin_lock(&desc->lock);
1339 		xd = irq_desc_get_handler_data(desc);
1340 
1341 		/*
1342 		 * For LSIs, we EOI, this will cause a resend if it's
1343 		 * still asserted. Otherwise do an MSI retrigger.
1344 		 */
1345 		if (xd->flags & XIVE_IRQ_FLAG_LSI)
1346 			xive_do_source_eoi(irqd_to_hwirq(d), xd);
1347 		else
1348 			xive_irq_retrigger(d);
1349 
1350 		raw_spin_unlock(&desc->lock);
1351 	}
1352 }
1353 
1354 void xive_smp_disable_cpu(void)
1355 {
1356 	struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1357 	unsigned int cpu = smp_processor_id();
1358 
1359 	/* Migrate interrupts away from the CPU */
1360 	irq_migrate_all_off_this_cpu();
1361 
1362 	/* Set CPPR to 0 to disable flow of interrupts */
1363 	xc->cppr = 0;
1364 	out_8(xive_tima + xive_tima_offset + TM_CPPR, 0);
1365 
1366 	/* Flush everything still in the queue */
1367 	xive_flush_cpu_queue(cpu, xc);
1368 
1369 	/* Re-enable CPPR  */
1370 	xc->cppr = 0xff;
1371 	out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff);
1372 }
1373 
1374 void xive_flush_interrupt(void)
1375 {
1376 	struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1377 	unsigned int cpu = smp_processor_id();
1378 
1379 	/* Called if an interrupt occurs while the CPU is hot unplugged */
1380 	xive_flush_cpu_queue(cpu, xc);
1381 }
1382 
1383 #endif /* CONFIG_HOTPLUG_CPU */
1384 
1385 #endif /* CONFIG_SMP */
1386 
1387 void xive_teardown_cpu(void)
1388 {
1389 	struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1390 	unsigned int cpu = smp_processor_id();
1391 
1392 	/* Set CPPR to 0 to disable flow of interrupts */
1393 	xc->cppr = 0;
1394 	out_8(xive_tima + xive_tima_offset + TM_CPPR, 0);
1395 
1396 	if (xive_ops->teardown_cpu)
1397 		xive_ops->teardown_cpu(cpu, xc);
1398 
1399 #ifdef CONFIG_SMP
1400 	/* Get rid of IPI */
1401 	xive_cleanup_cpu_ipi(cpu, xc);
1402 #endif
1403 
1404 	/* Disable and free the queues */
1405 	xive_cleanup_cpu_queues(cpu, xc);
1406 }
1407 
1408 void xive_shutdown(void)
1409 {
1410 	xive_ops->shutdown();
1411 }
1412 
1413 bool __init xive_core_init(const struct xive_ops *ops, void __iomem *area, u32 offset,
1414 			   u8 max_prio)
1415 {
1416 	xive_tima = area;
1417 	xive_tima_offset = offset;
1418 	xive_ops = ops;
1419 	xive_irq_priority = max_prio;
1420 
1421 	ppc_md.get_irq = xive_get_irq;
1422 	__xive_enabled = true;
1423 
1424 	pr_devel("Initializing host..\n");
1425 	xive_init_host();
1426 
1427 	pr_devel("Initializing boot CPU..\n");
1428 
1429 	/* Allocate per-CPU data and queues */
1430 	xive_prepare_cpu(smp_processor_id());
1431 
1432 	/* Get ready for interrupts */
1433 	xive_setup_cpu();
1434 
1435 	pr_info("Interrupt handling initialized with %s backend\n",
1436 		xive_ops->name);
1437 	pr_info("Using priority %d for all interrupts\n", max_prio);
1438 
1439 	return true;
1440 }
1441 
1442 __be32 *xive_queue_page_alloc(unsigned int cpu, u32 queue_shift)
1443 {
1444 	unsigned int alloc_order;
1445 	struct page *pages;
1446 	__be32 *qpage;
1447 
1448 	alloc_order = xive_alloc_order(queue_shift);
1449 	pages = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, alloc_order);
1450 	if (!pages)
1451 		return ERR_PTR(-ENOMEM);
1452 	qpage = (__be32 *)page_address(pages);
1453 	memset(qpage, 0, 1 << queue_shift);
1454 
1455 	return qpage;
1456 }
1457 
1458 static int __init xive_off(char *arg)
1459 {
1460 	xive_cmdline_disabled = true;
1461 	return 0;
1462 }
1463 __setup("xive=off", xive_off);
1464