xref: /openbmc/linux/arch/powerpc/sysdev/mpic.c (revision 6714465e)
1 /*
2  *  arch/powerpc/kernel/mpic.c
3  *
4  *  Driver for interrupt controllers following the OpenPIC standard, the
5  *  common implementation beeing IBM's MPIC. This driver also can deal
6  *  with various broken implementations of this HW.
7  *
8  *  Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
9  *
10  *  This file is subject to the terms and conditions of the GNU General Public
11  *  License.  See the file COPYING in the main directory of this archive
12  *  for more details.
13  */
14 
15 #undef DEBUG
16 #undef DEBUG_IPI
17 #undef DEBUG_IRQ
18 #undef DEBUG_LOW
19 
20 #include <linux/types.h>
21 #include <linux/kernel.h>
22 #include <linux/init.h>
23 #include <linux/irq.h>
24 #include <linux/smp.h>
25 #include <linux/interrupt.h>
26 #include <linux/bootmem.h>
27 #include <linux/spinlock.h>
28 #include <linux/pci.h>
29 
30 #include <asm/ptrace.h>
31 #include <asm/signal.h>
32 #include <asm/io.h>
33 #include <asm/pgtable.h>
34 #include <asm/irq.h>
35 #include <asm/machdep.h>
36 #include <asm/mpic.h>
37 #include <asm/smp.h>
38 
39 #ifdef DEBUG
40 #define DBG(fmt...) printk(fmt)
41 #else
42 #define DBG(fmt...)
43 #endif
44 
45 static struct mpic *mpics;
46 static struct mpic *mpic_primary;
47 static DEFINE_SPINLOCK(mpic_lock);
48 
49 #ifdef CONFIG_PPC32	/* XXX for now */
50 #ifdef CONFIG_IRQ_ALL_CPUS
51 #define distribute_irqs	(1)
52 #else
53 #define distribute_irqs	(0)
54 #endif
55 #endif
56 
57 /*
58  * Register accessor functions
59  */
60 
61 
62 static inline u32 _mpic_read(unsigned int be, volatile u32 __iomem *base,
63 			    unsigned int reg)
64 {
65 	if (be)
66 		return in_be32(base + (reg >> 2));
67 	else
68 		return in_le32(base + (reg >> 2));
69 }
70 
71 static inline void _mpic_write(unsigned int be, volatile u32 __iomem *base,
72 			      unsigned int reg, u32 value)
73 {
74 	if (be)
75 		out_be32(base + (reg >> 2), value);
76 	else
77 		out_le32(base + (reg >> 2), value);
78 }
79 
80 static inline u32 _mpic_ipi_read(struct mpic *mpic, unsigned int ipi)
81 {
82 	unsigned int be = (mpic->flags & MPIC_BIG_ENDIAN) != 0;
83 	unsigned int offset = MPIC_GREG_IPI_VECTOR_PRI_0 + (ipi * 0x10);
84 
85 	if (mpic->flags & MPIC_BROKEN_IPI)
86 		be = !be;
87 	return _mpic_read(be, mpic->gregs, offset);
88 }
89 
90 static inline void _mpic_ipi_write(struct mpic *mpic, unsigned int ipi, u32 value)
91 {
92 	unsigned int offset = MPIC_GREG_IPI_VECTOR_PRI_0 + (ipi * 0x10);
93 
94 	_mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->gregs, offset, value);
95 }
96 
97 static inline u32 _mpic_cpu_read(struct mpic *mpic, unsigned int reg)
98 {
99 	unsigned int cpu = 0;
100 
101 	if (mpic->flags & MPIC_PRIMARY)
102 		cpu = hard_smp_processor_id();
103 
104 	return _mpic_read(mpic->flags & MPIC_BIG_ENDIAN, mpic->cpuregs[cpu], reg);
105 }
106 
107 static inline void _mpic_cpu_write(struct mpic *mpic, unsigned int reg, u32 value)
108 {
109 	unsigned int cpu = 0;
110 
111 	if (mpic->flags & MPIC_PRIMARY)
112 		cpu = hard_smp_processor_id();
113 
114 	_mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->cpuregs[cpu], reg, value);
115 }
116 
117 static inline u32 _mpic_irq_read(struct mpic *mpic, unsigned int src_no, unsigned int reg)
118 {
119 	unsigned int	isu = src_no >> mpic->isu_shift;
120 	unsigned int	idx = src_no & mpic->isu_mask;
121 
122 	return _mpic_read(mpic->flags & MPIC_BIG_ENDIAN, mpic->isus[isu],
123 			  reg + (idx * MPIC_IRQ_STRIDE));
124 }
125 
126 static inline void _mpic_irq_write(struct mpic *mpic, unsigned int src_no,
127 				   unsigned int reg, u32 value)
128 {
129 	unsigned int	isu = src_no >> mpic->isu_shift;
130 	unsigned int	idx = src_no & mpic->isu_mask;
131 
132 	_mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->isus[isu],
133 		    reg + (idx * MPIC_IRQ_STRIDE), value);
134 }
135 
136 #define mpic_read(b,r)		_mpic_read(mpic->flags & MPIC_BIG_ENDIAN,(b),(r))
137 #define mpic_write(b,r,v)	_mpic_write(mpic->flags & MPIC_BIG_ENDIAN,(b),(r),(v))
138 #define mpic_ipi_read(i)	_mpic_ipi_read(mpic,(i))
139 #define mpic_ipi_write(i,v)	_mpic_ipi_write(mpic,(i),(v))
140 #define mpic_cpu_read(i)	_mpic_cpu_read(mpic,(i))
141 #define mpic_cpu_write(i,v)	_mpic_cpu_write(mpic,(i),(v))
142 #define mpic_irq_read(s,r)	_mpic_irq_read(mpic,(s),(r))
143 #define mpic_irq_write(s,r,v)	_mpic_irq_write(mpic,(s),(r),(v))
144 
145 
146 /*
147  * Low level utility functions
148  */
149 
150 
151 
152 /* Check if we have one of those nice broken MPICs with a flipped endian on
153  * reads from IPI registers
154  */
155 static void __init mpic_test_broken_ipi(struct mpic *mpic)
156 {
157 	u32 r;
158 
159 	mpic_write(mpic->gregs, MPIC_GREG_IPI_VECTOR_PRI_0, MPIC_VECPRI_MASK);
160 	r = mpic_read(mpic->gregs, MPIC_GREG_IPI_VECTOR_PRI_0);
161 
162 	if (r == le32_to_cpu(MPIC_VECPRI_MASK)) {
163 		printk(KERN_INFO "mpic: Detected reversed IPI registers\n");
164 		mpic->flags |= MPIC_BROKEN_IPI;
165 	}
166 }
167 
168 #ifdef CONFIG_MPIC_BROKEN_U3
169 
170 /* Test if an interrupt is sourced from HyperTransport (used on broken U3s)
171  * to force the edge setting on the MPIC and do the ack workaround.
172  */
173 static inline int mpic_is_ht_interrupt(struct mpic *mpic, unsigned int source)
174 {
175 	if (source >= 128 || !mpic->fixups)
176 		return 0;
177 	return mpic->fixups[source].base != NULL;
178 }
179 
180 
181 static inline void mpic_ht_end_irq(struct mpic *mpic, unsigned int source)
182 {
183 	struct mpic_irq_fixup *fixup = &mpic->fixups[source];
184 
185 	if (fixup->applebase) {
186 		unsigned int soff = (fixup->index >> 3) & ~3;
187 		unsigned int mask = 1U << (fixup->index & 0x1f);
188 		writel(mask, fixup->applebase + soff);
189 	} else {
190 		spin_lock(&mpic->fixup_lock);
191 		writeb(0x11 + 2 * fixup->index, fixup->base + 2);
192 		writel(fixup->data, fixup->base + 4);
193 		spin_unlock(&mpic->fixup_lock);
194 	}
195 }
196 
197 static void mpic_startup_ht_interrupt(struct mpic *mpic, unsigned int source,
198 				      unsigned int irqflags)
199 {
200 	struct mpic_irq_fixup *fixup = &mpic->fixups[source];
201 	unsigned long flags;
202 	u32 tmp;
203 
204 	if (fixup->base == NULL)
205 		return;
206 
207 	DBG("startup_ht_interrupt(%u, %u) index: %d\n",
208 	    source, irqflags, fixup->index);
209 	spin_lock_irqsave(&mpic->fixup_lock, flags);
210 	/* Enable and configure */
211 	writeb(0x10 + 2 * fixup->index, fixup->base + 2);
212 	tmp = readl(fixup->base + 4);
213 	tmp &= ~(0x23U);
214 	if (irqflags & IRQ_LEVEL)
215 		tmp |= 0x22;
216 	writel(tmp, fixup->base + 4);
217 	spin_unlock_irqrestore(&mpic->fixup_lock, flags);
218 }
219 
220 static void mpic_shutdown_ht_interrupt(struct mpic *mpic, unsigned int source,
221 				       unsigned int irqflags)
222 {
223 	struct mpic_irq_fixup *fixup = &mpic->fixups[source];
224 	unsigned long flags;
225 	u32 tmp;
226 
227 	if (fixup->base == NULL)
228 		return;
229 
230 	DBG("shutdown_ht_interrupt(%u, %u)\n", source, irqflags);
231 
232 	/* Disable */
233 	spin_lock_irqsave(&mpic->fixup_lock, flags);
234 	writeb(0x10 + 2 * fixup->index, fixup->base + 2);
235 	tmp = readl(fixup->base + 4);
236 	tmp |= 1;
237 	writel(tmp, fixup->base + 4);
238 	spin_unlock_irqrestore(&mpic->fixup_lock, flags);
239 }
240 
241 static void __init mpic_scan_ht_pic(struct mpic *mpic, u8 __iomem *devbase,
242 				    unsigned int devfn, u32 vdid)
243 {
244 	int i, irq, n;
245 	u8 __iomem *base;
246 	u32 tmp;
247 	u8 pos;
248 
249 	for (pos = readb(devbase + PCI_CAPABILITY_LIST); pos != 0;
250 	     pos = readb(devbase + pos + PCI_CAP_LIST_NEXT)) {
251 		u8 id = readb(devbase + pos + PCI_CAP_LIST_ID);
252 		if (id == PCI_CAP_ID_HT_IRQCONF) {
253 			id = readb(devbase + pos + 3);
254 			if (id == 0x80)
255 				break;
256 		}
257 	}
258 	if (pos == 0)
259 		return;
260 
261 	base = devbase + pos;
262 	writeb(0x01, base + 2);
263 	n = (readl(base + 4) >> 16) & 0xff;
264 
265 	printk(KERN_INFO "mpic:   - HT:%02x.%x [0x%02x] vendor %04x device %04x"
266 	       " has %d irqs\n",
267 	       devfn >> 3, devfn & 0x7, pos, vdid & 0xffff, vdid >> 16, n + 1);
268 
269 	for (i = 0; i <= n; i++) {
270 		writeb(0x10 + 2 * i, base + 2);
271 		tmp = readl(base + 4);
272 		irq = (tmp >> 16) & 0xff;
273 		DBG("HT PIC index 0x%x, irq 0x%x, tmp: %08x\n", i, irq, tmp);
274 		/* mask it , will be unmasked later */
275 		tmp |= 0x1;
276 		writel(tmp, base + 4);
277 		mpic->fixups[irq].index = i;
278 		mpic->fixups[irq].base = base;
279 		/* Apple HT PIC has a non-standard way of doing EOIs */
280 		if ((vdid & 0xffff) == 0x106b)
281 			mpic->fixups[irq].applebase = devbase + 0x60;
282 		else
283 			mpic->fixups[irq].applebase = NULL;
284 		writeb(0x11 + 2 * i, base + 2);
285 		mpic->fixups[irq].data = readl(base + 4) | 0x80000000;
286 	}
287 }
288 
289 
290 static void __init mpic_scan_ht_pics(struct mpic *mpic)
291 {
292 	unsigned int devfn;
293 	u8 __iomem *cfgspace;
294 
295 	printk(KERN_INFO "mpic: Setting up HT PICs workarounds for U3/U4\n");
296 
297 	/* Allocate fixups array */
298 	mpic->fixups = alloc_bootmem(128 * sizeof(struct mpic_irq_fixup));
299 	BUG_ON(mpic->fixups == NULL);
300 	memset(mpic->fixups, 0, 128 * sizeof(struct mpic_irq_fixup));
301 
302 	/* Init spinlock */
303 	spin_lock_init(&mpic->fixup_lock);
304 
305 	/* Map U3 config space. We assume all IO-APICs are on the primary bus
306 	 * so we only need to map 64kB.
307 	 */
308 	cfgspace = ioremap(0xf2000000, 0x10000);
309 	BUG_ON(cfgspace == NULL);
310 
311 	/* Now we scan all slots. We do a very quick scan, we read the header
312 	 * type, vendor ID and device ID only, that's plenty enough
313 	 */
314 	for (devfn = 0; devfn < 0x100; devfn++) {
315 		u8 __iomem *devbase = cfgspace + (devfn << 8);
316 		u8 hdr_type = readb(devbase + PCI_HEADER_TYPE);
317 		u32 l = readl(devbase + PCI_VENDOR_ID);
318 		u16 s;
319 
320 		DBG("devfn %x, l: %x\n", devfn, l);
321 
322 		/* If no device, skip */
323 		if (l == 0xffffffff || l == 0x00000000 ||
324 		    l == 0x0000ffff || l == 0xffff0000)
325 			goto next;
326 		/* Check if is supports capability lists */
327 		s = readw(devbase + PCI_STATUS);
328 		if (!(s & PCI_STATUS_CAP_LIST))
329 			goto next;
330 
331 		mpic_scan_ht_pic(mpic, devbase, devfn, l);
332 
333 	next:
334 		/* next device, if function 0 */
335 		if (PCI_FUNC(devfn) == 0 && (hdr_type & 0x80) == 0)
336 			devfn += 7;
337 	}
338 }
339 
340 #endif /* CONFIG_MPIC_BROKEN_U3 */
341 
342 
343 /* Find an mpic associated with a given linux interrupt */
344 static struct mpic *mpic_find(unsigned int irq, unsigned int *is_ipi)
345 {
346 	struct mpic *mpic = mpics;
347 
348 	while(mpic) {
349 		/* search IPIs first since they may override the main interrupts */
350 		if (irq >= mpic->ipi_offset && irq < (mpic->ipi_offset + 4)) {
351 			if (is_ipi)
352 				*is_ipi = 1;
353 			return mpic;
354 		}
355 		if (irq >= mpic->irq_offset &&
356 		    irq < (mpic->irq_offset + mpic->irq_count)) {
357 			if (is_ipi)
358 				*is_ipi = 0;
359 			return mpic;
360 		}
361 		mpic = mpic -> next;
362 	}
363 	return NULL;
364 }
365 
366 /* Convert a cpu mask from logical to physical cpu numbers. */
367 static inline u32 mpic_physmask(u32 cpumask)
368 {
369 	int i;
370 	u32 mask = 0;
371 
372 	for (i = 0; i < NR_CPUS; ++i, cpumask >>= 1)
373 		mask |= (cpumask & 1) << get_hard_smp_processor_id(i);
374 	return mask;
375 }
376 
377 #ifdef CONFIG_SMP
378 /* Get the mpic structure from the IPI number */
379 static inline struct mpic * mpic_from_ipi(unsigned int ipi)
380 {
381 	return container_of(irq_desc[ipi].chip, struct mpic, hc_ipi);
382 }
383 #endif
384 
385 /* Get the mpic structure from the irq number */
386 static inline struct mpic * mpic_from_irq(unsigned int irq)
387 {
388 	return container_of(irq_desc[irq].chip, struct mpic, hc_irq);
389 }
390 
391 /* Send an EOI */
392 static inline void mpic_eoi(struct mpic *mpic)
393 {
394 	mpic_cpu_write(MPIC_CPU_EOI, 0);
395 	(void)mpic_cpu_read(MPIC_CPU_WHOAMI);
396 }
397 
398 #ifdef CONFIG_SMP
399 static irqreturn_t mpic_ipi_action(int irq, void *dev_id, struct pt_regs *regs)
400 {
401 	struct mpic *mpic = dev_id;
402 
403 	smp_message_recv(irq - mpic->ipi_offset, regs);
404 	return IRQ_HANDLED;
405 }
406 #endif /* CONFIG_SMP */
407 
408 /*
409  * Linux descriptor level callbacks
410  */
411 
412 
413 static void mpic_enable_irq(unsigned int irq)
414 {
415 	unsigned int loops = 100000;
416 	struct mpic *mpic = mpic_from_irq(irq);
417 	unsigned int src = irq - mpic->irq_offset;
418 
419 	DBG("%p: %s: enable_irq: %d (src %d)\n", mpic, mpic->name, irq, src);
420 
421 	mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI,
422 		       mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) &
423 		       ~MPIC_VECPRI_MASK);
424 
425 	/* make sure mask gets to controller before we return to user */
426 	do {
427 		if (!loops--) {
428 			printk(KERN_ERR "mpic_enable_irq timeout\n");
429 			break;
430 		}
431 	} while(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK);
432 
433 #ifdef CONFIG_MPIC_BROKEN_U3
434 	if (mpic->flags & MPIC_BROKEN_U3) {
435 		unsigned int src = irq - mpic->irq_offset;
436 		if (mpic_is_ht_interrupt(mpic, src) &&
437 		    (irq_desc[irq].status & IRQ_LEVEL))
438 			mpic_ht_end_irq(mpic, src);
439 	}
440 #endif /* CONFIG_MPIC_BROKEN_U3 */
441 }
442 
443 static unsigned int mpic_startup_irq(unsigned int irq)
444 {
445 #ifdef CONFIG_MPIC_BROKEN_U3
446 	struct mpic *mpic = mpic_from_irq(irq);
447 	unsigned int src = irq - mpic->irq_offset;
448 #endif /* CONFIG_MPIC_BROKEN_U3 */
449 
450 	mpic_enable_irq(irq);
451 
452 #ifdef CONFIG_MPIC_BROKEN_U3
453 	if (mpic_is_ht_interrupt(mpic, src))
454 		mpic_startup_ht_interrupt(mpic, src, irq_desc[irq].status);
455 #endif /* CONFIG_MPIC_BROKEN_U3 */
456 
457 	return 0;
458 }
459 
460 static void mpic_disable_irq(unsigned int irq)
461 {
462 	unsigned int loops = 100000;
463 	struct mpic *mpic = mpic_from_irq(irq);
464 	unsigned int src = irq - mpic->irq_offset;
465 
466 	DBG("%s: disable_irq: %d (src %d)\n", mpic->name, irq, src);
467 
468 	mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI,
469 		       mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) |
470 		       MPIC_VECPRI_MASK);
471 
472 	/* make sure mask gets to controller before we return to user */
473 	do {
474 		if (!loops--) {
475 			printk(KERN_ERR "mpic_enable_irq timeout\n");
476 			break;
477 		}
478 	} while(!(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK));
479 }
480 
481 static void mpic_shutdown_irq(unsigned int irq)
482 {
483 #ifdef CONFIG_MPIC_BROKEN_U3
484 	struct mpic *mpic = mpic_from_irq(irq);
485 	unsigned int src = irq - mpic->irq_offset;
486 
487 	if (mpic_is_ht_interrupt(mpic, src))
488 		mpic_shutdown_ht_interrupt(mpic, src, irq_desc[irq].status);
489 
490 #endif /* CONFIG_MPIC_BROKEN_U3 */
491 
492 	mpic_disable_irq(irq);
493 }
494 
495 static void mpic_end_irq(unsigned int irq)
496 {
497 	struct mpic *mpic = mpic_from_irq(irq);
498 
499 #ifdef DEBUG_IRQ
500 	DBG("%s: end_irq: %d\n", mpic->name, irq);
501 #endif
502 	/* We always EOI on end_irq() even for edge interrupts since that
503 	 * should only lower the priority, the MPIC should have properly
504 	 * latched another edge interrupt coming in anyway
505 	 */
506 
507 #ifdef CONFIG_MPIC_BROKEN_U3
508 	if (mpic->flags & MPIC_BROKEN_U3) {
509 		unsigned int src = irq - mpic->irq_offset;
510 		if (mpic_is_ht_interrupt(mpic, src) &&
511 		    (irq_desc[irq].status & IRQ_LEVEL))
512 			mpic_ht_end_irq(mpic, src);
513 	}
514 #endif /* CONFIG_MPIC_BROKEN_U3 */
515 
516 	mpic_eoi(mpic);
517 }
518 
519 #ifdef CONFIG_SMP
520 
521 static void mpic_enable_ipi(unsigned int irq)
522 {
523 	struct mpic *mpic = mpic_from_ipi(irq);
524 	unsigned int src = irq - mpic->ipi_offset;
525 
526 	DBG("%s: enable_ipi: %d (ipi %d)\n", mpic->name, irq, src);
527 	mpic_ipi_write(src, mpic_ipi_read(src) & ~MPIC_VECPRI_MASK);
528 }
529 
530 static void mpic_disable_ipi(unsigned int irq)
531 {
532 	/* NEVER disable an IPI... that's just plain wrong! */
533 }
534 
535 static void mpic_end_ipi(unsigned int irq)
536 {
537 	struct mpic *mpic = mpic_from_ipi(irq);
538 
539 	/*
540 	 * IPIs are marked IRQ_PER_CPU. This has the side effect of
541 	 * preventing the IRQ_PENDING/IRQ_INPROGRESS logic from
542 	 * applying to them. We EOI them late to avoid re-entering.
543 	 * We mark IPI's with IRQF_DISABLED as they must run with
544 	 * irqs disabled.
545 	 */
546 	mpic_eoi(mpic);
547 }
548 
549 #endif /* CONFIG_SMP */
550 
551 static void mpic_set_affinity(unsigned int irq, cpumask_t cpumask)
552 {
553 	struct mpic *mpic = mpic_from_irq(irq);
554 
555 	cpumask_t tmp;
556 
557 	cpus_and(tmp, cpumask, cpu_online_map);
558 
559 	mpic_irq_write(irq - mpic->irq_offset, MPIC_IRQ_DESTINATION,
560 		       mpic_physmask(cpus_addr(tmp)[0]));
561 }
562 
563 
564 /*
565  * Exported functions
566  */
567 
568 
569 struct mpic * __init mpic_alloc(unsigned long phys_addr,
570 				unsigned int flags,
571 				unsigned int isu_size,
572 				unsigned int irq_offset,
573 				unsigned int irq_count,
574 				unsigned int ipi_offset,
575 				unsigned char *senses,
576 				unsigned int senses_count,
577 				const char *name)
578 {
579 	struct mpic	*mpic;
580 	u32		reg;
581 	const char	*vers;
582 	int		i;
583 
584 	mpic = alloc_bootmem(sizeof(struct mpic));
585 	if (mpic == NULL)
586 		return NULL;
587 
588 
589 	memset(mpic, 0, sizeof(struct mpic));
590 	mpic->name = name;
591 
592 	mpic->hc_irq.typename = name;
593 	mpic->hc_irq.startup = mpic_startup_irq;
594 	mpic->hc_irq.shutdown = mpic_shutdown_irq;
595 	mpic->hc_irq.enable = mpic_enable_irq;
596 	mpic->hc_irq.disable = mpic_disable_irq;
597 	mpic->hc_irq.end = mpic_end_irq;
598 	if (flags & MPIC_PRIMARY)
599 		mpic->hc_irq.set_affinity = mpic_set_affinity;
600 #ifdef CONFIG_SMP
601 	mpic->hc_ipi.typename = name;
602 	mpic->hc_ipi.enable = mpic_enable_ipi;
603 	mpic->hc_ipi.disable = mpic_disable_ipi;
604 	mpic->hc_ipi.end = mpic_end_ipi;
605 #endif /* CONFIG_SMP */
606 
607 	mpic->flags = flags;
608 	mpic->isu_size = isu_size;
609 	mpic->irq_offset = irq_offset;
610 	mpic->irq_count = irq_count;
611 	mpic->ipi_offset = ipi_offset;
612 	mpic->num_sources = 0; /* so far */
613 	mpic->senses = senses;
614 	mpic->senses_count = senses_count;
615 
616 	/* Map the global registers */
617 	mpic->gregs = ioremap(phys_addr + MPIC_GREG_BASE, 0x1000);
618 	mpic->tmregs = mpic->gregs + ((MPIC_TIMER_BASE - MPIC_GREG_BASE) >> 2);
619 	BUG_ON(mpic->gregs == NULL);
620 
621 	/* Reset */
622 	if (flags & MPIC_WANTS_RESET) {
623 		mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0,
624 			   mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0)
625 			   | MPIC_GREG_GCONF_RESET);
626 		while( mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0)
627 		       & MPIC_GREG_GCONF_RESET)
628 			mb();
629 	}
630 
631 	/* Read feature register, calculate num CPUs and, for non-ISU
632 	 * MPICs, num sources as well. On ISU MPICs, sources are counted
633 	 * as ISUs are added
634 	 */
635 	reg = mpic_read(mpic->gregs, MPIC_GREG_FEATURE_0);
636 	mpic->num_cpus = ((reg & MPIC_GREG_FEATURE_LAST_CPU_MASK)
637 			  >> MPIC_GREG_FEATURE_LAST_CPU_SHIFT) + 1;
638 	if (isu_size == 0)
639 		mpic->num_sources = ((reg & MPIC_GREG_FEATURE_LAST_SRC_MASK)
640 				     >> MPIC_GREG_FEATURE_LAST_SRC_SHIFT) + 1;
641 
642 	/* Map the per-CPU registers */
643 	for (i = 0; i < mpic->num_cpus; i++) {
644 		mpic->cpuregs[i] = ioremap(phys_addr + MPIC_CPU_BASE +
645 					   i * MPIC_CPU_STRIDE, 0x1000);
646 		BUG_ON(mpic->cpuregs[i] == NULL);
647 	}
648 
649 	/* Initialize main ISU if none provided */
650 	if (mpic->isu_size == 0) {
651 		mpic->isu_size = mpic->num_sources;
652 		mpic->isus[0] = ioremap(phys_addr + MPIC_IRQ_BASE,
653 					MPIC_IRQ_STRIDE * mpic->isu_size);
654 		BUG_ON(mpic->isus[0] == NULL);
655 	}
656 	mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1);
657 	mpic->isu_mask = (1 << mpic->isu_shift) - 1;
658 
659 	/* Display version */
660 	switch (reg & MPIC_GREG_FEATURE_VERSION_MASK) {
661 	case 1:
662 		vers = "1.0";
663 		break;
664 	case 2:
665 		vers = "1.2";
666 		break;
667 	case 3:
668 		vers = "1.3";
669 		break;
670 	default:
671 		vers = "<unknown>";
672 		break;
673 	}
674 	printk(KERN_INFO "mpic: Setting up MPIC \"%s\" version %s at %lx, max %d CPUs\n",
675 	       name, vers, phys_addr, mpic->num_cpus);
676 	printk(KERN_INFO "mpic: ISU size: %d, shift: %d, mask: %x\n", mpic->isu_size,
677 	       mpic->isu_shift, mpic->isu_mask);
678 
679 	mpic->next = mpics;
680 	mpics = mpic;
681 
682 	if (flags & MPIC_PRIMARY)
683 		mpic_primary = mpic;
684 
685 	return mpic;
686 }
687 
688 void __init mpic_assign_isu(struct mpic *mpic, unsigned int isu_num,
689 			    unsigned long phys_addr)
690 {
691 	unsigned int isu_first = isu_num * mpic->isu_size;
692 
693 	BUG_ON(isu_num >= MPIC_MAX_ISU);
694 
695 	mpic->isus[isu_num] = ioremap(phys_addr, MPIC_IRQ_STRIDE * mpic->isu_size);
696 	if ((isu_first + mpic->isu_size) > mpic->num_sources)
697 		mpic->num_sources = isu_first + mpic->isu_size;
698 }
699 
700 void __init mpic_setup_cascade(unsigned int irq, mpic_cascade_t handler,
701 			       void *data)
702 {
703 	struct mpic *mpic = mpic_find(irq, NULL);
704 	unsigned long flags;
705 
706 	/* Synchronization here is a bit dodgy, so don't try to replace cascade
707 	 * interrupts on the fly too often ... but normally it's set up at boot.
708 	 */
709 	spin_lock_irqsave(&mpic_lock, flags);
710 	if (mpic->cascade)
711 		mpic_disable_irq(mpic->cascade_vec + mpic->irq_offset);
712 	mpic->cascade = NULL;
713 	wmb();
714 	mpic->cascade_vec = irq - mpic->irq_offset;
715 	mpic->cascade_data = data;
716 	wmb();
717 	mpic->cascade = handler;
718 	mpic_enable_irq(irq);
719 	spin_unlock_irqrestore(&mpic_lock, flags);
720 }
721 
722 void __init mpic_init(struct mpic *mpic)
723 {
724 	int i;
725 
726 	BUG_ON(mpic->num_sources == 0);
727 
728 	printk(KERN_INFO "mpic: Initializing for %d sources\n", mpic->num_sources);
729 
730 	/* Set current processor priority to max */
731 	mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0xf);
732 
733 	/* Initialize timers: just disable them all */
734 	for (i = 0; i < 4; i++) {
735 		mpic_write(mpic->tmregs,
736 			   i * MPIC_TIMER_STRIDE + MPIC_TIMER_DESTINATION, 0);
737 		mpic_write(mpic->tmregs,
738 			   i * MPIC_TIMER_STRIDE + MPIC_TIMER_VECTOR_PRI,
739 			   MPIC_VECPRI_MASK |
740 			   (MPIC_VEC_TIMER_0 + i));
741 	}
742 
743 	/* Initialize IPIs to our reserved vectors and mark them disabled for now */
744 	mpic_test_broken_ipi(mpic);
745 	for (i = 0; i < 4; i++) {
746 		mpic_ipi_write(i,
747 			       MPIC_VECPRI_MASK |
748 			       (10 << MPIC_VECPRI_PRIORITY_SHIFT) |
749 			       (MPIC_VEC_IPI_0 + i));
750 #ifdef CONFIG_SMP
751 		if (!(mpic->flags & MPIC_PRIMARY))
752 			continue;
753 		irq_desc[mpic->ipi_offset+i].status |= IRQ_PER_CPU;
754 		irq_desc[mpic->ipi_offset+i].chip = &mpic->hc_ipi;
755 #endif /* CONFIG_SMP */
756 	}
757 
758 	/* Initialize interrupt sources */
759 	if (mpic->irq_count == 0)
760 		mpic->irq_count = mpic->num_sources;
761 
762 #ifdef CONFIG_MPIC_BROKEN_U3
763 	/* Do the HT PIC fixups on U3 broken mpic */
764 	DBG("MPIC flags: %x\n", mpic->flags);
765 	if ((mpic->flags & MPIC_BROKEN_U3) && (mpic->flags & MPIC_PRIMARY))
766 		mpic_scan_ht_pics(mpic);
767 #endif /* CONFIG_MPIC_BROKEN_U3 */
768 
769 	for (i = 0; i < mpic->num_sources; i++) {
770 		/* start with vector = source number, and masked */
771 		u32 vecpri = MPIC_VECPRI_MASK | i | (8 << MPIC_VECPRI_PRIORITY_SHIFT);
772 		int level = 0;
773 
774 		/* if it's an IPI, we skip it */
775 		if ((mpic->irq_offset + i) >= (mpic->ipi_offset + i) &&
776 		    (mpic->irq_offset + i) <  (mpic->ipi_offset + i + 4))
777 			continue;
778 
779 		/* do senses munging */
780 		if (mpic->senses && i < mpic->senses_count) {
781 			if (mpic->senses[i] & IRQ_SENSE_LEVEL)
782 				vecpri |= MPIC_VECPRI_SENSE_LEVEL;
783 			if (mpic->senses[i] & IRQ_POLARITY_POSITIVE)
784 				vecpri |= MPIC_VECPRI_POLARITY_POSITIVE;
785 		} else
786 			vecpri |= MPIC_VECPRI_SENSE_LEVEL;
787 
788 		/* remember if it was a level interrupts */
789 		level = (vecpri & MPIC_VECPRI_SENSE_LEVEL);
790 
791 		/* deal with broken U3 */
792 		if (mpic->flags & MPIC_BROKEN_U3) {
793 #ifdef CONFIG_MPIC_BROKEN_U3
794 			if (mpic_is_ht_interrupt(mpic, i)) {
795 				vecpri &= ~(MPIC_VECPRI_SENSE_MASK |
796 					    MPIC_VECPRI_POLARITY_MASK);
797 				vecpri |= MPIC_VECPRI_POLARITY_POSITIVE;
798 			}
799 #else
800 			printk(KERN_ERR "mpic: BROKEN_U3 set, but CONFIG doesn't match\n");
801 #endif
802 		}
803 
804 		DBG("setup source %d, vecpri: %08x, level: %d\n", i, vecpri,
805 		    (level != 0));
806 
807 		/* init hw */
808 		mpic_irq_write(i, MPIC_IRQ_VECTOR_PRI, vecpri);
809 		mpic_irq_write(i, MPIC_IRQ_DESTINATION,
810 			       1 << hard_smp_processor_id());
811 
812 		/* init linux descriptors */
813 		if (i < mpic->irq_count) {
814 			irq_desc[mpic->irq_offset+i].status = level ? IRQ_LEVEL : 0;
815 			irq_desc[mpic->irq_offset+i].chip = &mpic->hc_irq;
816 		}
817 	}
818 
819 	/* Init spurrious vector */
820 	mpic_write(mpic->gregs, MPIC_GREG_SPURIOUS, MPIC_VEC_SPURRIOUS);
821 
822 	/* Disable 8259 passthrough */
823 	mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0,
824 		   mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0)
825 		   | MPIC_GREG_GCONF_8259_PTHROU_DIS);
826 
827 	/* Set current processor priority to 0 */
828 	mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0);
829 }
830 
831 void __init mpic_set_clk_ratio(struct mpic *mpic, u32 clock_ratio)
832 {
833 	u32 v;
834 
835 	v = mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1);
836 	v &= ~MPIC_GREG_GLOBAL_CONF_1_CLK_RATIO_MASK;
837 	v |= MPIC_GREG_GLOBAL_CONF_1_CLK_RATIO(clock_ratio);
838 	mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1, v);
839 }
840 
841 void __init mpic_set_serial_int(struct mpic *mpic, int enable)
842 {
843 	u32 v;
844 
845 	v = mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1);
846 	if (enable)
847 		v |= MPIC_GREG_GLOBAL_CONF_1_SIE;
848 	else
849 		v &= ~MPIC_GREG_GLOBAL_CONF_1_SIE;
850 	mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1, v);
851 }
852 
853 void mpic_irq_set_priority(unsigned int irq, unsigned int pri)
854 {
855 	int is_ipi;
856 	struct mpic *mpic = mpic_find(irq, &is_ipi);
857 	unsigned long flags;
858 	u32 reg;
859 
860 	spin_lock_irqsave(&mpic_lock, flags);
861 	if (is_ipi) {
862 		reg = mpic_ipi_read(irq - mpic->ipi_offset) &
863 			~MPIC_VECPRI_PRIORITY_MASK;
864 		mpic_ipi_write(irq - mpic->ipi_offset,
865 			       reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
866 	} else {
867 		reg = mpic_irq_read(irq - mpic->irq_offset,MPIC_IRQ_VECTOR_PRI)
868 			& ~MPIC_VECPRI_PRIORITY_MASK;
869 		mpic_irq_write(irq - mpic->irq_offset, MPIC_IRQ_VECTOR_PRI,
870 			       reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
871 	}
872 	spin_unlock_irqrestore(&mpic_lock, flags);
873 }
874 
875 unsigned int mpic_irq_get_priority(unsigned int irq)
876 {
877 	int is_ipi;
878 	struct mpic *mpic = mpic_find(irq, &is_ipi);
879 	unsigned long flags;
880 	u32 reg;
881 
882 	spin_lock_irqsave(&mpic_lock, flags);
883 	if (is_ipi)
884 		reg = mpic_ipi_read(irq - mpic->ipi_offset);
885 	else
886 		reg = mpic_irq_read(irq - mpic->irq_offset, MPIC_IRQ_VECTOR_PRI);
887 	spin_unlock_irqrestore(&mpic_lock, flags);
888 	return (reg & MPIC_VECPRI_PRIORITY_MASK) >> MPIC_VECPRI_PRIORITY_SHIFT;
889 }
890 
891 void mpic_setup_this_cpu(void)
892 {
893 #ifdef CONFIG_SMP
894 	struct mpic *mpic = mpic_primary;
895 	unsigned long flags;
896 	u32 msk = 1 << hard_smp_processor_id();
897 	unsigned int i;
898 
899 	BUG_ON(mpic == NULL);
900 
901 	DBG("%s: setup_this_cpu(%d)\n", mpic->name, hard_smp_processor_id());
902 
903 	spin_lock_irqsave(&mpic_lock, flags);
904 
905  	/* let the mpic know we want intrs. default affinity is 0xffffffff
906 	 * until changed via /proc. That's how it's done on x86. If we want
907 	 * it differently, then we should make sure we also change the default
908 	 * values of irq_desc[].affinity in irq.c.
909  	 */
910 	if (distribute_irqs) {
911 	 	for (i = 0; i < mpic->num_sources ; i++)
912 			mpic_irq_write(i, MPIC_IRQ_DESTINATION,
913 				mpic_irq_read(i, MPIC_IRQ_DESTINATION) | msk);
914 	}
915 
916 	/* Set current processor priority to 0 */
917 	mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0);
918 
919 	spin_unlock_irqrestore(&mpic_lock, flags);
920 #endif /* CONFIG_SMP */
921 }
922 
923 int mpic_cpu_get_priority(void)
924 {
925 	struct mpic *mpic = mpic_primary;
926 
927 	return mpic_cpu_read(MPIC_CPU_CURRENT_TASK_PRI);
928 }
929 
930 void mpic_cpu_set_priority(int prio)
931 {
932 	struct mpic *mpic = mpic_primary;
933 
934 	prio &= MPIC_CPU_TASKPRI_MASK;
935 	mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, prio);
936 }
937 
938 /*
939  * XXX: someone who knows mpic should check this.
940  * do we need to eoi the ipi including for kexec cpu here (see xics comments)?
941  * or can we reset the mpic in the new kernel?
942  */
943 void mpic_teardown_this_cpu(int secondary)
944 {
945 	struct mpic *mpic = mpic_primary;
946 	unsigned long flags;
947 	u32 msk = 1 << hard_smp_processor_id();
948 	unsigned int i;
949 
950 	BUG_ON(mpic == NULL);
951 
952 	DBG("%s: teardown_this_cpu(%d)\n", mpic->name, hard_smp_processor_id());
953 	spin_lock_irqsave(&mpic_lock, flags);
954 
955 	/* let the mpic know we don't want intrs.  */
956 	for (i = 0; i < mpic->num_sources ; i++)
957 		mpic_irq_write(i, MPIC_IRQ_DESTINATION,
958 			mpic_irq_read(i, MPIC_IRQ_DESTINATION) & ~msk);
959 
960 	/* Set current processor priority to max */
961 	mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0xf);
962 
963 	spin_unlock_irqrestore(&mpic_lock, flags);
964 }
965 
966 
967 void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask)
968 {
969 	struct mpic *mpic = mpic_primary;
970 
971 	BUG_ON(mpic == NULL);
972 
973 #ifdef DEBUG_IPI
974 	DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, ipi_no);
975 #endif
976 
977 	mpic_cpu_write(MPIC_CPU_IPI_DISPATCH_0 + ipi_no * 0x10,
978 		       mpic_physmask(cpu_mask & cpus_addr(cpu_online_map)[0]));
979 }
980 
981 int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs)
982 {
983 	u32 irq;
984 
985 	irq = mpic_cpu_read(MPIC_CPU_INTACK) & MPIC_VECPRI_VECTOR_MASK;
986 #ifdef DEBUG_LOW
987 	DBG("%s: get_one_irq(): %d\n", mpic->name, irq);
988 #endif
989 	if (mpic->cascade && irq == mpic->cascade_vec) {
990 #ifdef DEBUG_LOW
991 		DBG("%s: cascading ...\n", mpic->name);
992 #endif
993 		irq = mpic->cascade(regs, mpic->cascade_data);
994 		mpic_eoi(mpic);
995 		return irq;
996 	}
997 	if (unlikely(irq == MPIC_VEC_SPURRIOUS))
998 		return -1;
999 	if (irq < MPIC_VEC_IPI_0) {
1000 #ifdef DEBUG_IRQ
1001 		DBG("%s: irq %d\n", mpic->name, irq + mpic->irq_offset);
1002 #endif
1003 		return irq + mpic->irq_offset;
1004 	}
1005 #ifdef DEBUG_IPI
1006        	DBG("%s: ipi %d !\n", mpic->name, irq - MPIC_VEC_IPI_0);
1007 #endif
1008 	return irq - MPIC_VEC_IPI_0 + mpic->ipi_offset;
1009 }
1010 
1011 int mpic_get_irq(struct pt_regs *regs)
1012 {
1013 	struct mpic *mpic = mpic_primary;
1014 
1015 	BUG_ON(mpic == NULL);
1016 
1017 	return mpic_get_one_irq(mpic, regs);
1018 }
1019 
1020 
1021 #ifdef CONFIG_SMP
1022 void mpic_request_ipis(void)
1023 {
1024 	struct mpic *mpic = mpic_primary;
1025 
1026 	BUG_ON(mpic == NULL);
1027 
1028 	printk("requesting IPIs ... \n");
1029 
1030 	/*
1031 	 * IPIs are marked IRQF_DISABLED as they must run with irqs
1032 	 * disabled
1033 	 */
1034 	request_irq(mpic->ipi_offset+0, mpic_ipi_action, IRQF_DISABLED,
1035 		    "IPI0 (call function)", mpic);
1036 	request_irq(mpic->ipi_offset+1, mpic_ipi_action, IRQF_DISABLED,
1037 		   "IPI1 (reschedule)", mpic);
1038 	request_irq(mpic->ipi_offset+2, mpic_ipi_action, IRQF_DISABLED,
1039 		   "IPI2 (unused)", mpic);
1040 	request_irq(mpic->ipi_offset+3, mpic_ipi_action, IRQF_DISABLED,
1041 		   "IPI3 (debugger break)", mpic);
1042 
1043 	printk("IPIs requested... \n");
1044 }
1045 
1046 void smp_mpic_message_pass(int target, int msg)
1047 {
1048 	/* make sure we're sending something that translates to an IPI */
1049 	if ((unsigned int)msg > 3) {
1050 		printk("SMP %d: smp_message_pass: unknown msg %d\n",
1051 		       smp_processor_id(), msg);
1052 		return;
1053 	}
1054 	switch (target) {
1055 	case MSG_ALL:
1056 		mpic_send_ipi(msg, 0xffffffff);
1057 		break;
1058 	case MSG_ALL_BUT_SELF:
1059 		mpic_send_ipi(msg, 0xffffffff & ~(1 << smp_processor_id()));
1060 		break;
1061 	default:
1062 		mpic_send_ipi(msg, 1 << target);
1063 		break;
1064 	}
1065 }
1066 #endif /* CONFIG_SMP */
1067