xref: /openbmc/linux/arch/powerpc/sysdev/ehv_pic.c (revision c265735f)
1 /*
2  *  Driver for ePAPR Embedded Hypervisor PIC
3  *
4  *  Copyright 2008-2011 Freescale Semiconductor, Inc.
5  *
6  *  Author: Ashish Kalra <ashish.kalra@freescale.com>
7  *
8  * This file is licensed under the terms of the GNU General Public License
9  * version 2.  This program is licensed "as is" without any warranty of any
10  * kind, whether express or implied.
11  */
12 
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/init.h>
16 #include <linux/irq.h>
17 #include <linux/irqdomain.h>
18 #include <linux/smp.h>
19 #include <linux/interrupt.h>
20 #include <linux/slab.h>
21 #include <linux/spinlock.h>
22 #include <linux/of.h>
23 #include <linux/of_address.h>
24 
25 #include <asm/io.h>
26 #include <asm/irq.h>
27 #include <asm/smp.h>
28 #include <asm/machdep.h>
29 #include <asm/ehv_pic.h>
30 #include <asm/fsl_hcalls.h>
31 
32 static struct ehv_pic *global_ehv_pic;
33 static DEFINE_SPINLOCK(ehv_pic_lock);
34 
35 static u32 hwirq_intspec[NR_EHV_PIC_INTS];
36 static u32 __iomem *mpic_percpu_base_vaddr;
37 
38 #define IRQ_TYPE_MPIC_DIRECT 4
39 #define MPIC_EOI  0x00B0
40 
41 /*
42  * Linux descriptor level callbacks
43  */
44 
ehv_pic_unmask_irq(struct irq_data * d)45 static void ehv_pic_unmask_irq(struct irq_data *d)
46 {
47 	unsigned int src = virq_to_hw(d->irq);
48 
49 	ev_int_set_mask(src, 0);
50 }
51 
ehv_pic_mask_irq(struct irq_data * d)52 static void ehv_pic_mask_irq(struct irq_data *d)
53 {
54 	unsigned int src = virq_to_hw(d->irq);
55 
56 	ev_int_set_mask(src, 1);
57 }
58 
ehv_pic_end_irq(struct irq_data * d)59 static void ehv_pic_end_irq(struct irq_data *d)
60 {
61 	unsigned int src = virq_to_hw(d->irq);
62 
63 	ev_int_eoi(src);
64 }
65 
ehv_pic_direct_end_irq(struct irq_data * d)66 static void ehv_pic_direct_end_irq(struct irq_data *d)
67 {
68 	out_be32(mpic_percpu_base_vaddr + MPIC_EOI / 4, 0);
69 }
70 
ehv_pic_set_affinity(struct irq_data * d,const struct cpumask * dest,bool force)71 static int ehv_pic_set_affinity(struct irq_data *d, const struct cpumask *dest,
72 			 bool force)
73 {
74 	unsigned int src = virq_to_hw(d->irq);
75 	unsigned int config, prio, cpu_dest;
76 	int cpuid = irq_choose_cpu(dest);
77 	unsigned long flags;
78 
79 	spin_lock_irqsave(&ehv_pic_lock, flags);
80 	ev_int_get_config(src, &config, &prio, &cpu_dest);
81 	ev_int_set_config(src, config, prio, cpuid);
82 	spin_unlock_irqrestore(&ehv_pic_lock, flags);
83 
84 	return IRQ_SET_MASK_OK;
85 }
86 
ehv_pic_type_to_vecpri(unsigned int type)87 static unsigned int ehv_pic_type_to_vecpri(unsigned int type)
88 {
89 	/* Now convert sense value */
90 
91 	switch (type & IRQ_TYPE_SENSE_MASK) {
92 	case IRQ_TYPE_EDGE_RISING:
93 		return EHV_PIC_INFO(VECPRI_SENSE_EDGE) |
94 		       EHV_PIC_INFO(VECPRI_POLARITY_POSITIVE);
95 
96 	case IRQ_TYPE_EDGE_FALLING:
97 	case IRQ_TYPE_EDGE_BOTH:
98 		return EHV_PIC_INFO(VECPRI_SENSE_EDGE) |
99 		       EHV_PIC_INFO(VECPRI_POLARITY_NEGATIVE);
100 
101 	case IRQ_TYPE_LEVEL_HIGH:
102 		return EHV_PIC_INFO(VECPRI_SENSE_LEVEL) |
103 		       EHV_PIC_INFO(VECPRI_POLARITY_POSITIVE);
104 
105 	case IRQ_TYPE_LEVEL_LOW:
106 	default:
107 		return EHV_PIC_INFO(VECPRI_SENSE_LEVEL) |
108 		       EHV_PIC_INFO(VECPRI_POLARITY_NEGATIVE);
109 	}
110 }
111 
ehv_pic_set_irq_type(struct irq_data * d,unsigned int flow_type)112 static int ehv_pic_set_irq_type(struct irq_data *d, unsigned int flow_type)
113 {
114 	unsigned int src = virq_to_hw(d->irq);
115 	unsigned int vecpri, vold, vnew, prio, cpu_dest;
116 	unsigned long flags;
117 
118 	if (flow_type == IRQ_TYPE_NONE)
119 		flow_type = IRQ_TYPE_LEVEL_LOW;
120 
121 	irqd_set_trigger_type(d, flow_type);
122 
123 	vecpri = ehv_pic_type_to_vecpri(flow_type);
124 
125 	spin_lock_irqsave(&ehv_pic_lock, flags);
126 	ev_int_get_config(src, &vold, &prio, &cpu_dest);
127 	vnew = vold & ~(EHV_PIC_INFO(VECPRI_POLARITY_MASK) |
128 			EHV_PIC_INFO(VECPRI_SENSE_MASK));
129 	vnew |= vecpri;
130 
131 	/*
132 	 * TODO : Add specific interface call for platform to set
133 	 * individual interrupt priorities.
134 	 * platform currently using static/default priority for all ints
135 	 */
136 
137 	prio = 8;
138 
139 	ev_int_set_config(src, vecpri, prio, cpu_dest);
140 
141 	spin_unlock_irqrestore(&ehv_pic_lock, flags);
142 	return IRQ_SET_MASK_OK_NOCOPY;
143 }
144 
145 static struct irq_chip ehv_pic_irq_chip = {
146 	.irq_mask	= ehv_pic_mask_irq,
147 	.irq_unmask	= ehv_pic_unmask_irq,
148 	.irq_eoi	= ehv_pic_end_irq,
149 	.irq_set_type	= ehv_pic_set_irq_type,
150 };
151 
152 static struct irq_chip ehv_pic_direct_eoi_irq_chip = {
153 	.irq_mask	= ehv_pic_mask_irq,
154 	.irq_unmask	= ehv_pic_unmask_irq,
155 	.irq_eoi	= ehv_pic_direct_end_irq,
156 	.irq_set_type	= ehv_pic_set_irq_type,
157 };
158 
159 /* Return an interrupt vector or 0 if no interrupt is pending. */
ehv_pic_get_irq(void)160 unsigned int ehv_pic_get_irq(void)
161 {
162 	int irq;
163 
164 	BUG_ON(global_ehv_pic == NULL);
165 
166 	if (global_ehv_pic->coreint_flag)
167 		irq = mfspr(SPRN_EPR); /* if core int mode */
168 	else
169 		ev_int_iack(0, &irq); /* legacy mode */
170 
171 	if (irq == 0xFFFF)    /* 0xFFFF --> no irq is pending */
172 		return 0;
173 
174 	/*
175 	 * this will also setup revmap[] in the slow path for the first
176 	 * time, next calls will always use fast path by indexing revmap
177 	 */
178 	return irq_linear_revmap(global_ehv_pic->irqhost, irq);
179 }
180 
ehv_pic_host_match(struct irq_domain * h,struct device_node * node,enum irq_domain_bus_token bus_token)181 static int ehv_pic_host_match(struct irq_domain *h, struct device_node *node,
182 			      enum irq_domain_bus_token bus_token)
183 {
184 	/* Exact match, unless ehv_pic node is NULL */
185 	struct device_node *of_node = irq_domain_get_of_node(h);
186 	return of_node == NULL || of_node == node;
187 }
188 
ehv_pic_host_map(struct irq_domain * h,unsigned int virq,irq_hw_number_t hw)189 static int ehv_pic_host_map(struct irq_domain *h, unsigned int virq,
190 			 irq_hw_number_t hw)
191 {
192 	struct ehv_pic *ehv_pic = h->host_data;
193 	struct irq_chip *chip;
194 
195 	/* Default chip */
196 	chip = &ehv_pic->hc_irq;
197 
198 	if (mpic_percpu_base_vaddr)
199 		if (hwirq_intspec[hw] & IRQ_TYPE_MPIC_DIRECT)
200 			chip = &ehv_pic_direct_eoi_irq_chip;
201 
202 	irq_set_chip_data(virq, chip);
203 	/*
204 	 * using handle_fasteoi_irq as our irq handler, this will
205 	 * only call the eoi callback and suitable for the MPIC
206 	 * controller which set ISR/IPR automatically and clear the
207 	 * highest priority active interrupt in ISR/IPR when we do
208 	 * a specific eoi
209 	 */
210 	irq_set_chip_and_handler(virq, chip, handle_fasteoi_irq);
211 
212 	/* Set default irq type */
213 	irq_set_irq_type(virq, IRQ_TYPE_NONE);
214 
215 	return 0;
216 }
217 
ehv_pic_host_xlate(struct irq_domain * h,struct device_node * ct,const u32 * intspec,unsigned int intsize,irq_hw_number_t * out_hwirq,unsigned int * out_flags)218 static int ehv_pic_host_xlate(struct irq_domain *h, struct device_node *ct,
219 			   const u32 *intspec, unsigned int intsize,
220 			   irq_hw_number_t *out_hwirq, unsigned int *out_flags)
221 
222 {
223 	/*
224 	 * interrupt sense values coming from the guest device tree
225 	 * interrupt specifiers can have four possible sense and
226 	 * level encoding information and they need to
227 	 * be translated between firmware type & linux type.
228 	 */
229 
230 	static unsigned char map_of_senses_to_linux_irqtype[4] = {
231 		IRQ_TYPE_EDGE_FALLING,
232 		IRQ_TYPE_EDGE_RISING,
233 		IRQ_TYPE_LEVEL_LOW,
234 		IRQ_TYPE_LEVEL_HIGH,
235 	};
236 
237 	*out_hwirq = intspec[0];
238 	if (intsize > 1) {
239 		hwirq_intspec[intspec[0]] = intspec[1];
240 		*out_flags = map_of_senses_to_linux_irqtype[intspec[1] &
241 							~IRQ_TYPE_MPIC_DIRECT];
242 	} else {
243 		*out_flags = IRQ_TYPE_NONE;
244 	}
245 
246 	return 0;
247 }
248 
249 static const struct irq_domain_ops ehv_pic_host_ops = {
250 	.match = ehv_pic_host_match,
251 	.map = ehv_pic_host_map,
252 	.xlate = ehv_pic_host_xlate,
253 };
254 
ehv_pic_init(void)255 void __init ehv_pic_init(void)
256 {
257 	struct device_node *np, *np2;
258 	struct ehv_pic *ehv_pic;
259 
260 	np = of_find_compatible_node(NULL, NULL, "epapr,hv-pic");
261 	if (!np) {
262 		pr_err("ehv_pic_init: could not find epapr,hv-pic node\n");
263 		return;
264 	}
265 
266 	ehv_pic = kzalloc(sizeof(struct ehv_pic), GFP_KERNEL);
267 	if (!ehv_pic) {
268 		of_node_put(np);
269 		return;
270 	}
271 
272 	ehv_pic->irqhost = irq_domain_add_linear(np, NR_EHV_PIC_INTS,
273 						 &ehv_pic_host_ops, ehv_pic);
274 	if (!ehv_pic->irqhost) {
275 		of_node_put(np);
276 		kfree(ehv_pic);
277 		return;
278 	}
279 
280 	np2 = of_find_compatible_node(NULL, NULL, "fsl,hv-mpic-per-cpu");
281 	if (np2) {
282 		mpic_percpu_base_vaddr = of_iomap(np2, 0);
283 		if (!mpic_percpu_base_vaddr)
284 			pr_err("ehv_pic_init: of_iomap failed\n");
285 
286 		of_node_put(np2);
287 	}
288 
289 	ehv_pic->hc_irq = ehv_pic_irq_chip;
290 	ehv_pic->hc_irq.irq_set_affinity = ehv_pic_set_affinity;
291 	ehv_pic->coreint_flag = of_property_read_bool(np, "has-external-proxy");
292 
293 	global_ehv_pic = ehv_pic;
294 	irq_set_default_host(global_ehv_pic->irqhost);
295 }
296