1 /*
2  * Copyright 2016 IBM Corporation.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version
7  * 2 of the License, or (at your option) any later version.
8  */
9 #include <linux/types.h>
10 #include <linux/kernel.h>
11 #include <linux/irq.h>
12 #include <linux/smp.h>
13 #include <linux/interrupt.h>
14 #include <linux/cpu.h>
15 #include <linux/of.h>
16 
17 #include <asm/smp.h>
18 #include <asm/irq.h>
19 #include <asm/errno.h>
20 #include <asm/xics.h>
21 #include <asm/io.h>
22 #include <asm/opal.h>
23 
24 static void icp_opal_teardown_cpu(void)
25 {
26 	int hw_cpu = hard_smp_processor_id();
27 
28 	/* Clear any pending IPI */
29 	opal_int_set_mfrr(hw_cpu, 0xff);
30 }
31 
32 static void icp_opal_flush_ipi(void)
33 {
34 	/*
35 	 * We take the ipi irq but and never return so we need to EOI the IPI,
36 	 * but want to leave our priority 0.
37 	 *
38 	 * Should we check all the other interrupts too?
39 	 * Should we be flagging idle loop instead?
40 	 * Or creating some task to be scheduled?
41 	 */
42 	opal_int_eoi((0x00 << 24) | XICS_IPI);
43 }
44 
45 static unsigned int icp_opal_get_irq(void)
46 {
47 	unsigned int xirr;
48 	unsigned int vec;
49 	unsigned int irq;
50 	int64_t rc;
51 
52 	rc = opal_int_get_xirr(&xirr, false);
53 	if (rc < 0)
54 		return 0;
55 	xirr = be32_to_cpu(xirr);
56 	vec = xirr & 0x00ffffff;
57 	if (vec == XICS_IRQ_SPURIOUS)
58 		return 0;
59 
60 	irq = irq_find_mapping(xics_host, vec);
61 	if (likely(irq)) {
62 		xics_push_cppr(vec);
63 		return irq;
64 	}
65 
66 	/* We don't have a linux mapping, so have rtas mask it. */
67 	xics_mask_unknown_vec(vec);
68 
69 	/* We might learn about it later, so EOI it */
70 	opal_int_eoi(xirr);
71 
72 	return 0;
73 }
74 
75 static void icp_opal_set_cpu_priority(unsigned char cppr)
76 {
77 	xics_set_base_cppr(cppr);
78 	opal_int_set_cppr(cppr);
79 	iosync();
80 }
81 
82 static void icp_opal_eoi(struct irq_data *d)
83 {
84 	unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
85 	int64_t rc;
86 
87 	iosync();
88 	rc = opal_int_eoi((xics_pop_cppr() << 24) | hw_irq);
89 
90 	/*
91 	 * EOI tells us whether there are more interrupts to fetch.
92 	 *
93 	 * Some HW implementations might not be able to send us another
94 	 * external interrupt in that case, so we force a replay.
95 	 */
96 	if (rc > 0)
97 		force_external_irq_replay();
98 }
99 
100 #ifdef CONFIG_SMP
101 
102 static void icp_opal_cause_ipi(int cpu, unsigned long data)
103 {
104 	int hw_cpu = get_hard_smp_processor_id(cpu);
105 
106 	opal_int_set_mfrr(hw_cpu, IPI_PRIORITY);
107 }
108 
109 static irqreturn_t icp_opal_ipi_action(int irq, void *dev_id)
110 {
111 	int hw_cpu = hard_smp_processor_id();
112 
113 	opal_int_set_mfrr(hw_cpu, 0xff);
114 
115 	return smp_ipi_demux();
116 }
117 
118 #endif /* CONFIG_SMP */
119 
120 static const struct icp_ops icp_opal_ops = {
121 	.get_irq	= icp_opal_get_irq,
122 	.eoi		= icp_opal_eoi,
123 	.set_priority	= icp_opal_set_cpu_priority,
124 	.teardown_cpu	= icp_opal_teardown_cpu,
125 	.flush_ipi	= icp_opal_flush_ipi,
126 #ifdef CONFIG_SMP
127 	.ipi_action	= icp_opal_ipi_action,
128 	.cause_ipi	= icp_opal_cause_ipi,
129 #endif
130 };
131 
132 int icp_opal_init(void)
133 {
134 	struct device_node *np;
135 
136 	np = of_find_compatible_node(NULL, NULL, "ibm,opal-intc");
137 	if (!np)
138 		return -ENODEV;
139 
140 	icp_ops = &icp_opal_ops;
141 
142 	printk("XICS: Using OPAL ICP fallbacks\n");
143 
144 	return 0;
145 }
146 
147