xref: /openbmc/linux/arch/powerpc/sysdev/mpic.c (revision 72b13819)
1 /*
2  *  arch/powerpc/kernel/mpic.c
3  *
4  *  Driver for interrupt controllers following the OpenPIC standard, the
5  *  common implementation beeing IBM's MPIC. This driver also can deal
6  *  with various broken implementations of this HW.
7  *
8  *  Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
9  *
10  *  This file is subject to the terms and conditions of the GNU General Public
11  *  License.  See the file COPYING in the main directory of this archive
12  *  for more details.
13  */
14 
15 #undef DEBUG
16 #undef DEBUG_IPI
17 #undef DEBUG_IRQ
18 #undef DEBUG_LOW
19 
20 #include <linux/config.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/init.h>
24 #include <linux/irq.h>
25 #include <linux/smp.h>
26 #include <linux/interrupt.h>
27 #include <linux/bootmem.h>
28 #include <linux/spinlock.h>
29 #include <linux/pci.h>
30 
31 #include <asm/ptrace.h>
32 #include <asm/signal.h>
33 #include <asm/io.h>
34 #include <asm/pgtable.h>
35 #include <asm/irq.h>
36 #include <asm/machdep.h>
37 #include <asm/mpic.h>
38 #include <asm/smp.h>
39 
40 #ifdef DEBUG
41 #define DBG(fmt...) printk(fmt)
42 #else
43 #define DBG(fmt...)
44 #endif
45 
46 static struct mpic *mpics;
47 static struct mpic *mpic_primary;
48 static DEFINE_SPINLOCK(mpic_lock);
49 
50 #ifdef CONFIG_PPC32	/* XXX for now */
51 #ifdef CONFIG_IRQ_ALL_CPUS
52 #define distribute_irqs	(1)
53 #else
54 #define distribute_irqs	(0)
55 #endif
56 #endif
57 
58 /*
59  * Register accessor functions
60  */
61 
62 
63 static inline u32 _mpic_read(unsigned int be, volatile u32 __iomem *base,
64 			    unsigned int reg)
65 {
66 	if (be)
67 		return in_be32(base + (reg >> 2));
68 	else
69 		return in_le32(base + (reg >> 2));
70 }
71 
72 static inline void _mpic_write(unsigned int be, volatile u32 __iomem *base,
73 			      unsigned int reg, u32 value)
74 {
75 	if (be)
76 		out_be32(base + (reg >> 2), value);
77 	else
78 		out_le32(base + (reg >> 2), value);
79 }
80 
81 static inline u32 _mpic_ipi_read(struct mpic *mpic, unsigned int ipi)
82 {
83 	unsigned int be = (mpic->flags & MPIC_BIG_ENDIAN) != 0;
84 	unsigned int offset = MPIC_GREG_IPI_VECTOR_PRI_0 + (ipi * 0x10);
85 
86 	if (mpic->flags & MPIC_BROKEN_IPI)
87 		be = !be;
88 	return _mpic_read(be, mpic->gregs, offset);
89 }
90 
91 static inline void _mpic_ipi_write(struct mpic *mpic, unsigned int ipi, u32 value)
92 {
93 	unsigned int offset = MPIC_GREG_IPI_VECTOR_PRI_0 + (ipi * 0x10);
94 
95 	_mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->gregs, offset, value);
96 }
97 
98 static inline u32 _mpic_cpu_read(struct mpic *mpic, unsigned int reg)
99 {
100 	unsigned int cpu = 0;
101 
102 	if (mpic->flags & MPIC_PRIMARY)
103 		cpu = hard_smp_processor_id();
104 
105 	return _mpic_read(mpic->flags & MPIC_BIG_ENDIAN, mpic->cpuregs[cpu], reg);
106 }
107 
108 static inline void _mpic_cpu_write(struct mpic *mpic, unsigned int reg, u32 value)
109 {
110 	unsigned int cpu = 0;
111 
112 	if (mpic->flags & MPIC_PRIMARY)
113 		cpu = hard_smp_processor_id();
114 
115 	_mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->cpuregs[cpu], reg, value);
116 }
117 
118 static inline u32 _mpic_irq_read(struct mpic *mpic, unsigned int src_no, unsigned int reg)
119 {
120 	unsigned int	isu = src_no >> mpic->isu_shift;
121 	unsigned int	idx = src_no & mpic->isu_mask;
122 
123 	return _mpic_read(mpic->flags & MPIC_BIG_ENDIAN, mpic->isus[isu],
124 			  reg + (idx * MPIC_IRQ_STRIDE));
125 }
126 
127 static inline void _mpic_irq_write(struct mpic *mpic, unsigned int src_no,
128 				   unsigned int reg, u32 value)
129 {
130 	unsigned int	isu = src_no >> mpic->isu_shift;
131 	unsigned int	idx = src_no & mpic->isu_mask;
132 
133 	_mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->isus[isu],
134 		    reg + (idx * MPIC_IRQ_STRIDE), value);
135 }
136 
137 #define mpic_read(b,r)		_mpic_read(mpic->flags & MPIC_BIG_ENDIAN,(b),(r))
138 #define mpic_write(b,r,v)	_mpic_write(mpic->flags & MPIC_BIG_ENDIAN,(b),(r),(v))
139 #define mpic_ipi_read(i)	_mpic_ipi_read(mpic,(i))
140 #define mpic_ipi_write(i,v)	_mpic_ipi_write(mpic,(i),(v))
141 #define mpic_cpu_read(i)	_mpic_cpu_read(mpic,(i))
142 #define mpic_cpu_write(i,v)	_mpic_cpu_write(mpic,(i),(v))
143 #define mpic_irq_read(s,r)	_mpic_irq_read(mpic,(s),(r))
144 #define mpic_irq_write(s,r,v)	_mpic_irq_write(mpic,(s),(r),(v))
145 
146 
147 /*
148  * Low level utility functions
149  */
150 
151 
152 
153 /* Check if we have one of those nice broken MPICs with a flipped endian on
154  * reads from IPI registers
155  */
156 static void __init mpic_test_broken_ipi(struct mpic *mpic)
157 {
158 	u32 r;
159 
160 	mpic_write(mpic->gregs, MPIC_GREG_IPI_VECTOR_PRI_0, MPIC_VECPRI_MASK);
161 	r = mpic_read(mpic->gregs, MPIC_GREG_IPI_VECTOR_PRI_0);
162 
163 	if (r == le32_to_cpu(MPIC_VECPRI_MASK)) {
164 		printk(KERN_INFO "mpic: Detected reversed IPI registers\n");
165 		mpic->flags |= MPIC_BROKEN_IPI;
166 	}
167 }
168 
169 #ifdef CONFIG_MPIC_BROKEN_U3
170 
171 /* Test if an interrupt is sourced from HyperTransport (used on broken U3s)
172  * to force the edge setting on the MPIC and do the ack workaround.
173  */
174 static inline int mpic_is_ht_interrupt(struct mpic *mpic, unsigned int source)
175 {
176 	if (source >= 128 || !mpic->fixups)
177 		return 0;
178 	return mpic->fixups[source].base != NULL;
179 }
180 
181 
182 static inline void mpic_ht_end_irq(struct mpic *mpic, unsigned int source)
183 {
184 	struct mpic_irq_fixup *fixup = &mpic->fixups[source];
185 
186 	if (fixup->applebase) {
187 		unsigned int soff = (fixup->index >> 3) & ~3;
188 		unsigned int mask = 1U << (fixup->index & 0x1f);
189 		writel(mask, fixup->applebase + soff);
190 	} else {
191 		spin_lock(&mpic->fixup_lock);
192 		writeb(0x11 + 2 * fixup->index, fixup->base + 2);
193 		writel(fixup->data, fixup->base + 4);
194 		spin_unlock(&mpic->fixup_lock);
195 	}
196 }
197 
198 static void mpic_startup_ht_interrupt(struct mpic *mpic, unsigned int source,
199 				      unsigned int irqflags)
200 {
201 	struct mpic_irq_fixup *fixup = &mpic->fixups[source];
202 	unsigned long flags;
203 	u32 tmp;
204 
205 	if (fixup->base == NULL)
206 		return;
207 
208 	DBG("startup_ht_interrupt(%u, %u) index: %d\n",
209 	    source, irqflags, fixup->index);
210 	spin_lock_irqsave(&mpic->fixup_lock, flags);
211 	/* Enable and configure */
212 	writeb(0x10 + 2 * fixup->index, fixup->base + 2);
213 	tmp = readl(fixup->base + 4);
214 	tmp &= ~(0x23U);
215 	if (irqflags & IRQ_LEVEL)
216 		tmp |= 0x22;
217 	writel(tmp, fixup->base + 4);
218 	spin_unlock_irqrestore(&mpic->fixup_lock, flags);
219 }
220 
221 static void mpic_shutdown_ht_interrupt(struct mpic *mpic, unsigned int source,
222 				       unsigned int irqflags)
223 {
224 	struct mpic_irq_fixup *fixup = &mpic->fixups[source];
225 	unsigned long flags;
226 	u32 tmp;
227 
228 	if (fixup->base == NULL)
229 		return;
230 
231 	DBG("shutdown_ht_interrupt(%u, %u)\n", source, irqflags);
232 
233 	/* Disable */
234 	spin_lock_irqsave(&mpic->fixup_lock, flags);
235 	writeb(0x10 + 2 * fixup->index, fixup->base + 2);
236 	tmp = readl(fixup->base + 4);
237 	tmp |= 1;
238 	writel(tmp, fixup->base + 4);
239 	spin_unlock_irqrestore(&mpic->fixup_lock, flags);
240 }
241 
242 static void __init mpic_scan_ht_pic(struct mpic *mpic, u8 __iomem *devbase,
243 				    unsigned int devfn, u32 vdid)
244 {
245 	int i, irq, n;
246 	u8 __iomem *base;
247 	u32 tmp;
248 	u8 pos;
249 
250 	for (pos = readb(devbase + PCI_CAPABILITY_LIST); pos != 0;
251 	     pos = readb(devbase + pos + PCI_CAP_LIST_NEXT)) {
252 		u8 id = readb(devbase + pos + PCI_CAP_LIST_ID);
253 		if (id == PCI_CAP_ID_HT_IRQCONF) {
254 			id = readb(devbase + pos + 3);
255 			if (id == 0x80)
256 				break;
257 		}
258 	}
259 	if (pos == 0)
260 		return;
261 
262 	base = devbase + pos;
263 	writeb(0x01, base + 2);
264 	n = (readl(base + 4) >> 16) & 0xff;
265 
266 	printk(KERN_INFO "mpic:   - HT:%02x.%x [0x%02x] vendor %04x device %04x"
267 	       " has %d irqs\n",
268 	       devfn >> 3, devfn & 0x7, pos, vdid & 0xffff, vdid >> 16, n + 1);
269 
270 	for (i = 0; i <= n; i++) {
271 		writeb(0x10 + 2 * i, base + 2);
272 		tmp = readl(base + 4);
273 		irq = (tmp >> 16) & 0xff;
274 		DBG("HT PIC index 0x%x, irq 0x%x, tmp: %08x\n", i, irq, tmp);
275 		/* mask it , will be unmasked later */
276 		tmp |= 0x1;
277 		writel(tmp, base + 4);
278 		mpic->fixups[irq].index = i;
279 		mpic->fixups[irq].base = base;
280 		/* Apple HT PIC has a non-standard way of doing EOIs */
281 		if ((vdid & 0xffff) == 0x106b)
282 			mpic->fixups[irq].applebase = devbase + 0x60;
283 		else
284 			mpic->fixups[irq].applebase = NULL;
285 		writeb(0x11 + 2 * i, base + 2);
286 		mpic->fixups[irq].data = readl(base + 4) | 0x80000000;
287 	}
288 }
289 
290 
291 static void __init mpic_scan_ht_pics(struct mpic *mpic)
292 {
293 	unsigned int devfn;
294 	u8 __iomem *cfgspace;
295 
296 	printk(KERN_INFO "mpic: Setting up HT PICs workarounds for U3/U4\n");
297 
298 	/* Allocate fixups array */
299 	mpic->fixups = alloc_bootmem(128 * sizeof(struct mpic_irq_fixup));
300 	BUG_ON(mpic->fixups == NULL);
301 	memset(mpic->fixups, 0, 128 * sizeof(struct mpic_irq_fixup));
302 
303 	/* Init spinlock */
304 	spin_lock_init(&mpic->fixup_lock);
305 
306 	/* Map U3 config space. We assume all IO-APICs are on the primary bus
307 	 * so we only need to map 64kB.
308 	 */
309 	cfgspace = ioremap(0xf2000000, 0x10000);
310 	BUG_ON(cfgspace == NULL);
311 
312 	/* Now we scan all slots. We do a very quick scan, we read the header
313 	 * type, vendor ID and device ID only, that's plenty enough
314 	 */
315 	for (devfn = 0; devfn < 0x100; devfn++) {
316 		u8 __iomem *devbase = cfgspace + (devfn << 8);
317 		u8 hdr_type = readb(devbase + PCI_HEADER_TYPE);
318 		u32 l = readl(devbase + PCI_VENDOR_ID);
319 		u16 s;
320 
321 		DBG("devfn %x, l: %x\n", devfn, l);
322 
323 		/* If no device, skip */
324 		if (l == 0xffffffff || l == 0x00000000 ||
325 		    l == 0x0000ffff || l == 0xffff0000)
326 			goto next;
327 		/* Check if is supports capability lists */
328 		s = readw(devbase + PCI_STATUS);
329 		if (!(s & PCI_STATUS_CAP_LIST))
330 			goto next;
331 
332 		mpic_scan_ht_pic(mpic, devbase, devfn, l);
333 
334 	next:
335 		/* next device, if function 0 */
336 		if (PCI_FUNC(devfn) == 0 && (hdr_type & 0x80) == 0)
337 			devfn += 7;
338 	}
339 }
340 
341 #endif /* CONFIG_MPIC_BROKEN_U3 */
342 
343 
344 /* Find an mpic associated with a given linux interrupt */
345 static struct mpic *mpic_find(unsigned int irq, unsigned int *is_ipi)
346 {
347 	struct mpic *mpic = mpics;
348 
349 	while(mpic) {
350 		/* search IPIs first since they may override the main interrupts */
351 		if (irq >= mpic->ipi_offset && irq < (mpic->ipi_offset + 4)) {
352 			if (is_ipi)
353 				*is_ipi = 1;
354 			return mpic;
355 		}
356 		if (irq >= mpic->irq_offset &&
357 		    irq < (mpic->irq_offset + mpic->irq_count)) {
358 			if (is_ipi)
359 				*is_ipi = 0;
360 			return mpic;
361 		}
362 		mpic = mpic -> next;
363 	}
364 	return NULL;
365 }
366 
367 /* Convert a cpu mask from logical to physical cpu numbers. */
368 static inline u32 mpic_physmask(u32 cpumask)
369 {
370 	int i;
371 	u32 mask = 0;
372 
373 	for (i = 0; i < NR_CPUS; ++i, cpumask >>= 1)
374 		mask |= (cpumask & 1) << get_hard_smp_processor_id(i);
375 	return mask;
376 }
377 
378 #ifdef CONFIG_SMP
379 /* Get the mpic structure from the IPI number */
380 static inline struct mpic * mpic_from_ipi(unsigned int ipi)
381 {
382 	return container_of(irq_desc[ipi].handler, struct mpic, hc_ipi);
383 }
384 #endif
385 
386 /* Get the mpic structure from the irq number */
387 static inline struct mpic * mpic_from_irq(unsigned int irq)
388 {
389 	return container_of(irq_desc[irq].handler, struct mpic, hc_irq);
390 }
391 
392 /* Send an EOI */
393 static inline void mpic_eoi(struct mpic *mpic)
394 {
395 	mpic_cpu_write(MPIC_CPU_EOI, 0);
396 	(void)mpic_cpu_read(MPIC_CPU_WHOAMI);
397 }
398 
399 #ifdef CONFIG_SMP
400 static irqreturn_t mpic_ipi_action(int irq, void *dev_id, struct pt_regs *regs)
401 {
402 	struct mpic *mpic = dev_id;
403 
404 	smp_message_recv(irq - mpic->ipi_offset, regs);
405 	return IRQ_HANDLED;
406 }
407 #endif /* CONFIG_SMP */
408 
409 /*
410  * Linux descriptor level callbacks
411  */
412 
413 
414 static void mpic_enable_irq(unsigned int irq)
415 {
416 	unsigned int loops = 100000;
417 	struct mpic *mpic = mpic_from_irq(irq);
418 	unsigned int src = irq - mpic->irq_offset;
419 
420 	DBG("%p: %s: enable_irq: %d (src %d)\n", mpic, mpic->name, irq, src);
421 
422 	mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI,
423 		       mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) &
424 		       ~MPIC_VECPRI_MASK);
425 
426 	/* make sure mask gets to controller before we return to user */
427 	do {
428 		if (!loops--) {
429 			printk(KERN_ERR "mpic_enable_irq timeout\n");
430 			break;
431 		}
432 	} while(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK);
433 
434 #ifdef CONFIG_MPIC_BROKEN_U3
435 	if (mpic->flags & MPIC_BROKEN_U3) {
436 		unsigned int src = irq - mpic->irq_offset;
437 		if (mpic_is_ht_interrupt(mpic, src) &&
438 		    (irq_desc[irq].status & IRQ_LEVEL))
439 			mpic_ht_end_irq(mpic, src);
440 	}
441 #endif /* CONFIG_MPIC_BROKEN_U3 */
442 }
443 
444 static unsigned int mpic_startup_irq(unsigned int irq)
445 {
446 #ifdef CONFIG_MPIC_BROKEN_U3
447 	struct mpic *mpic = mpic_from_irq(irq);
448 	unsigned int src = irq - mpic->irq_offset;
449 #endif /* CONFIG_MPIC_BROKEN_U3 */
450 
451 	mpic_enable_irq(irq);
452 
453 #ifdef CONFIG_MPIC_BROKEN_U3
454 	if (mpic_is_ht_interrupt(mpic, src))
455 		mpic_startup_ht_interrupt(mpic, src, irq_desc[irq].status);
456 #endif /* CONFIG_MPIC_BROKEN_U3 */
457 
458 	return 0;
459 }
460 
461 static void mpic_disable_irq(unsigned int irq)
462 {
463 	unsigned int loops = 100000;
464 	struct mpic *mpic = mpic_from_irq(irq);
465 	unsigned int src = irq - mpic->irq_offset;
466 
467 	DBG("%s: disable_irq: %d (src %d)\n", mpic->name, irq, src);
468 
469 	mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI,
470 		       mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) |
471 		       MPIC_VECPRI_MASK);
472 
473 	/* make sure mask gets to controller before we return to user */
474 	do {
475 		if (!loops--) {
476 			printk(KERN_ERR "mpic_enable_irq timeout\n");
477 			break;
478 		}
479 	} while(!(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK));
480 }
481 
482 static void mpic_shutdown_irq(unsigned int irq)
483 {
484 #ifdef CONFIG_MPIC_BROKEN_U3
485 	struct mpic *mpic = mpic_from_irq(irq);
486 	unsigned int src = irq - mpic->irq_offset;
487 
488 	if (mpic_is_ht_interrupt(mpic, src))
489 		mpic_shutdown_ht_interrupt(mpic, src, irq_desc[irq].status);
490 
491 #endif /* CONFIG_MPIC_BROKEN_U3 */
492 
493 	mpic_disable_irq(irq);
494 }
495 
496 static void mpic_end_irq(unsigned int irq)
497 {
498 	struct mpic *mpic = mpic_from_irq(irq);
499 
500 #ifdef DEBUG_IRQ
501 	DBG("%s: end_irq: %d\n", mpic->name, irq);
502 #endif
503 	/* We always EOI on end_irq() even for edge interrupts since that
504 	 * should only lower the priority, the MPIC should have properly
505 	 * latched another edge interrupt coming in anyway
506 	 */
507 
508 #ifdef CONFIG_MPIC_BROKEN_U3
509 	if (mpic->flags & MPIC_BROKEN_U3) {
510 		unsigned int src = irq - mpic->irq_offset;
511 		if (mpic_is_ht_interrupt(mpic, src) &&
512 		    (irq_desc[irq].status & IRQ_LEVEL))
513 			mpic_ht_end_irq(mpic, src);
514 	}
515 #endif /* CONFIG_MPIC_BROKEN_U3 */
516 
517 	mpic_eoi(mpic);
518 }
519 
520 #ifdef CONFIG_SMP
521 
522 static void mpic_enable_ipi(unsigned int irq)
523 {
524 	struct mpic *mpic = mpic_from_ipi(irq);
525 	unsigned int src = irq - mpic->ipi_offset;
526 
527 	DBG("%s: enable_ipi: %d (ipi %d)\n", mpic->name, irq, src);
528 	mpic_ipi_write(src, mpic_ipi_read(src) & ~MPIC_VECPRI_MASK);
529 }
530 
531 static void mpic_disable_ipi(unsigned int irq)
532 {
533 	/* NEVER disable an IPI... that's just plain wrong! */
534 }
535 
536 static void mpic_end_ipi(unsigned int irq)
537 {
538 	struct mpic *mpic = mpic_from_ipi(irq);
539 
540 	/*
541 	 * IPIs are marked IRQ_PER_CPU. This has the side effect of
542 	 * preventing the IRQ_PENDING/IRQ_INPROGRESS logic from
543 	 * applying to them. We EOI them late to avoid re-entering.
544 	 * We mark IPI's with SA_INTERRUPT as they must run with
545 	 * irqs disabled.
546 	 */
547 	mpic_eoi(mpic);
548 }
549 
550 #endif /* CONFIG_SMP */
551 
552 static void mpic_set_affinity(unsigned int irq, cpumask_t cpumask)
553 {
554 	struct mpic *mpic = mpic_from_irq(irq);
555 
556 	cpumask_t tmp;
557 
558 	cpus_and(tmp, cpumask, cpu_online_map);
559 
560 	mpic_irq_write(irq - mpic->irq_offset, MPIC_IRQ_DESTINATION,
561 		       mpic_physmask(cpus_addr(tmp)[0]));
562 }
563 
564 
565 /*
566  * Exported functions
567  */
568 
569 
570 struct mpic * __init mpic_alloc(unsigned long phys_addr,
571 				unsigned int flags,
572 				unsigned int isu_size,
573 				unsigned int irq_offset,
574 				unsigned int irq_count,
575 				unsigned int ipi_offset,
576 				unsigned char *senses,
577 				unsigned int senses_count,
578 				const char *name)
579 {
580 	struct mpic	*mpic;
581 	u32		reg;
582 	const char	*vers;
583 	int		i;
584 
585 	mpic = alloc_bootmem(sizeof(struct mpic));
586 	if (mpic == NULL)
587 		return NULL;
588 
589 
590 	memset(mpic, 0, sizeof(struct mpic));
591 	mpic->name = name;
592 
593 	mpic->hc_irq.typename = name;
594 	mpic->hc_irq.startup = mpic_startup_irq;
595 	mpic->hc_irq.shutdown = mpic_shutdown_irq;
596 	mpic->hc_irq.enable = mpic_enable_irq;
597 	mpic->hc_irq.disable = mpic_disable_irq;
598 	mpic->hc_irq.end = mpic_end_irq;
599 	if (flags & MPIC_PRIMARY)
600 		mpic->hc_irq.set_affinity = mpic_set_affinity;
601 #ifdef CONFIG_SMP
602 	mpic->hc_ipi.typename = name;
603 	mpic->hc_ipi.enable = mpic_enable_ipi;
604 	mpic->hc_ipi.disable = mpic_disable_ipi;
605 	mpic->hc_ipi.end = mpic_end_ipi;
606 #endif /* CONFIG_SMP */
607 
608 	mpic->flags = flags;
609 	mpic->isu_size = isu_size;
610 	mpic->irq_offset = irq_offset;
611 	mpic->irq_count = irq_count;
612 	mpic->ipi_offset = ipi_offset;
613 	mpic->num_sources = 0; /* so far */
614 	mpic->senses = senses;
615 	mpic->senses_count = senses_count;
616 
617 	/* Map the global registers */
618 	mpic->gregs = ioremap(phys_addr + MPIC_GREG_BASE, 0x1000);
619 	mpic->tmregs = mpic->gregs + ((MPIC_TIMER_BASE - MPIC_GREG_BASE) >> 2);
620 	BUG_ON(mpic->gregs == NULL);
621 
622 	/* Reset */
623 	if (flags & MPIC_WANTS_RESET) {
624 		mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0,
625 			   mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0)
626 			   | MPIC_GREG_GCONF_RESET);
627 		while( mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0)
628 		       & MPIC_GREG_GCONF_RESET)
629 			mb();
630 	}
631 
632 	/* Read feature register, calculate num CPUs and, for non-ISU
633 	 * MPICs, num sources as well. On ISU MPICs, sources are counted
634 	 * as ISUs are added
635 	 */
636 	reg = mpic_read(mpic->gregs, MPIC_GREG_FEATURE_0);
637 	mpic->num_cpus = ((reg & MPIC_GREG_FEATURE_LAST_CPU_MASK)
638 			  >> MPIC_GREG_FEATURE_LAST_CPU_SHIFT) + 1;
639 	if (isu_size == 0)
640 		mpic->num_sources = ((reg & MPIC_GREG_FEATURE_LAST_SRC_MASK)
641 				     >> MPIC_GREG_FEATURE_LAST_SRC_SHIFT) + 1;
642 
643 	/* Map the per-CPU registers */
644 	for (i = 0; i < mpic->num_cpus; i++) {
645 		mpic->cpuregs[i] = ioremap(phys_addr + MPIC_CPU_BASE +
646 					   i * MPIC_CPU_STRIDE, 0x1000);
647 		BUG_ON(mpic->cpuregs[i] == NULL);
648 	}
649 
650 	/* Initialize main ISU if none provided */
651 	if (mpic->isu_size == 0) {
652 		mpic->isu_size = mpic->num_sources;
653 		mpic->isus[0] = ioremap(phys_addr + MPIC_IRQ_BASE,
654 					MPIC_IRQ_STRIDE * mpic->isu_size);
655 		BUG_ON(mpic->isus[0] == NULL);
656 	}
657 	mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1);
658 	mpic->isu_mask = (1 << mpic->isu_shift) - 1;
659 
660 	/* Display version */
661 	switch (reg & MPIC_GREG_FEATURE_VERSION_MASK) {
662 	case 1:
663 		vers = "1.0";
664 		break;
665 	case 2:
666 		vers = "1.2";
667 		break;
668 	case 3:
669 		vers = "1.3";
670 		break;
671 	default:
672 		vers = "<unknown>";
673 		break;
674 	}
675 	printk(KERN_INFO "mpic: Setting up MPIC \"%s\" version %s at %lx, max %d CPUs\n",
676 	       name, vers, phys_addr, mpic->num_cpus);
677 	printk(KERN_INFO "mpic: ISU size: %d, shift: %d, mask: %x\n", mpic->isu_size,
678 	       mpic->isu_shift, mpic->isu_mask);
679 
680 	mpic->next = mpics;
681 	mpics = mpic;
682 
683 	if (flags & MPIC_PRIMARY)
684 		mpic_primary = mpic;
685 
686 	return mpic;
687 }
688 
689 void __init mpic_assign_isu(struct mpic *mpic, unsigned int isu_num,
690 			    unsigned long phys_addr)
691 {
692 	unsigned int isu_first = isu_num * mpic->isu_size;
693 
694 	BUG_ON(isu_num >= MPIC_MAX_ISU);
695 
696 	mpic->isus[isu_num] = ioremap(phys_addr, MPIC_IRQ_STRIDE * mpic->isu_size);
697 	if ((isu_first + mpic->isu_size) > mpic->num_sources)
698 		mpic->num_sources = isu_first + mpic->isu_size;
699 }
700 
701 void __init mpic_setup_cascade(unsigned int irq, mpic_cascade_t handler,
702 			       void *data)
703 {
704 	struct mpic *mpic = mpic_find(irq, NULL);
705 	unsigned long flags;
706 
707 	/* Synchronization here is a bit dodgy, so don't try to replace cascade
708 	 * interrupts on the fly too often ... but normally it's set up at boot.
709 	 */
710 	spin_lock_irqsave(&mpic_lock, flags);
711 	if (mpic->cascade)
712 		mpic_disable_irq(mpic->cascade_vec + mpic->irq_offset);
713 	mpic->cascade = NULL;
714 	wmb();
715 	mpic->cascade_vec = irq - mpic->irq_offset;
716 	mpic->cascade_data = data;
717 	wmb();
718 	mpic->cascade = handler;
719 	mpic_enable_irq(irq);
720 	spin_unlock_irqrestore(&mpic_lock, flags);
721 }
722 
723 void __init mpic_init(struct mpic *mpic)
724 {
725 	int i;
726 
727 	BUG_ON(mpic->num_sources == 0);
728 
729 	printk(KERN_INFO "mpic: Initializing for %d sources\n", mpic->num_sources);
730 
731 	/* Set current processor priority to max */
732 	mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0xf);
733 
734 	/* Initialize timers: just disable them all */
735 	for (i = 0; i < 4; i++) {
736 		mpic_write(mpic->tmregs,
737 			   i * MPIC_TIMER_STRIDE + MPIC_TIMER_DESTINATION, 0);
738 		mpic_write(mpic->tmregs,
739 			   i * MPIC_TIMER_STRIDE + MPIC_TIMER_VECTOR_PRI,
740 			   MPIC_VECPRI_MASK |
741 			   (MPIC_VEC_TIMER_0 + i));
742 	}
743 
744 	/* Initialize IPIs to our reserved vectors and mark them disabled for now */
745 	mpic_test_broken_ipi(mpic);
746 	for (i = 0; i < 4; i++) {
747 		mpic_ipi_write(i,
748 			       MPIC_VECPRI_MASK |
749 			       (10 << MPIC_VECPRI_PRIORITY_SHIFT) |
750 			       (MPIC_VEC_IPI_0 + i));
751 #ifdef CONFIG_SMP
752 		if (!(mpic->flags & MPIC_PRIMARY))
753 			continue;
754 		irq_desc[mpic->ipi_offset+i].status |= IRQ_PER_CPU;
755 		irq_desc[mpic->ipi_offset+i].handler = &mpic->hc_ipi;
756 #endif /* CONFIG_SMP */
757 	}
758 
759 	/* Initialize interrupt sources */
760 	if (mpic->irq_count == 0)
761 		mpic->irq_count = mpic->num_sources;
762 
763 #ifdef CONFIG_MPIC_BROKEN_U3
764 	/* Do the HT PIC fixups on U3 broken mpic */
765 	DBG("MPIC flags: %x\n", mpic->flags);
766 	if ((mpic->flags & MPIC_BROKEN_U3) && (mpic->flags & MPIC_PRIMARY))
767 		mpic_scan_ht_pics(mpic);
768 #endif /* CONFIG_MPIC_BROKEN_U3 */
769 
770 	for (i = 0; i < mpic->num_sources; i++) {
771 		/* start with vector = source number, and masked */
772 		u32 vecpri = MPIC_VECPRI_MASK | i | (8 << MPIC_VECPRI_PRIORITY_SHIFT);
773 		int level = 0;
774 
775 		/* if it's an IPI, we skip it */
776 		if ((mpic->irq_offset + i) >= (mpic->ipi_offset + i) &&
777 		    (mpic->irq_offset + i) <  (mpic->ipi_offset + i + 4))
778 			continue;
779 
780 		/* do senses munging */
781 		if (mpic->senses && i < mpic->senses_count) {
782 			if (mpic->senses[i] & IRQ_SENSE_LEVEL)
783 				vecpri |= MPIC_VECPRI_SENSE_LEVEL;
784 			if (mpic->senses[i] & IRQ_POLARITY_POSITIVE)
785 				vecpri |= MPIC_VECPRI_POLARITY_POSITIVE;
786 		} else
787 			vecpri |= MPIC_VECPRI_SENSE_LEVEL;
788 
789 		/* remember if it was a level interrupts */
790 		level = (vecpri & MPIC_VECPRI_SENSE_LEVEL);
791 
792 		/* deal with broken U3 */
793 		if (mpic->flags & MPIC_BROKEN_U3) {
794 #ifdef CONFIG_MPIC_BROKEN_U3
795 			if (mpic_is_ht_interrupt(mpic, i)) {
796 				vecpri &= ~(MPIC_VECPRI_SENSE_MASK |
797 					    MPIC_VECPRI_POLARITY_MASK);
798 				vecpri |= MPIC_VECPRI_POLARITY_POSITIVE;
799 			}
800 #else
801 			printk(KERN_ERR "mpic: BROKEN_U3 set, but CONFIG doesn't match\n");
802 #endif
803 		}
804 
805 		DBG("setup source %d, vecpri: %08x, level: %d\n", i, vecpri,
806 		    (level != 0));
807 
808 		/* init hw */
809 		mpic_irq_write(i, MPIC_IRQ_VECTOR_PRI, vecpri);
810 		mpic_irq_write(i, MPIC_IRQ_DESTINATION,
811 			       1 << hard_smp_processor_id());
812 
813 		/* init linux descriptors */
814 		if (i < mpic->irq_count) {
815 			irq_desc[mpic->irq_offset+i].status = level ? IRQ_LEVEL : 0;
816 			irq_desc[mpic->irq_offset+i].handler = &mpic->hc_irq;
817 		}
818 	}
819 
820 	/* Init spurrious vector */
821 	mpic_write(mpic->gregs, MPIC_GREG_SPURIOUS, MPIC_VEC_SPURRIOUS);
822 
823 	/* Disable 8259 passthrough */
824 	mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0,
825 		   mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0)
826 		   | MPIC_GREG_GCONF_8259_PTHROU_DIS);
827 
828 	/* Set current processor priority to 0 */
829 	mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0);
830 }
831 
832 
833 
834 void mpic_irq_set_priority(unsigned int irq, unsigned int pri)
835 {
836 	int is_ipi;
837 	struct mpic *mpic = mpic_find(irq, &is_ipi);
838 	unsigned long flags;
839 	u32 reg;
840 
841 	spin_lock_irqsave(&mpic_lock, flags);
842 	if (is_ipi) {
843 		reg = mpic_ipi_read(irq - mpic->ipi_offset) &
844 			~MPIC_VECPRI_PRIORITY_MASK;
845 		mpic_ipi_write(irq - mpic->ipi_offset,
846 			       reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
847 	} else {
848 		reg = mpic_irq_read(irq - mpic->irq_offset,MPIC_IRQ_VECTOR_PRI)
849 			& ~MPIC_VECPRI_PRIORITY_MASK;
850 		mpic_irq_write(irq - mpic->irq_offset, MPIC_IRQ_VECTOR_PRI,
851 			       reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
852 	}
853 	spin_unlock_irqrestore(&mpic_lock, flags);
854 }
855 
856 unsigned int mpic_irq_get_priority(unsigned int irq)
857 {
858 	int is_ipi;
859 	struct mpic *mpic = mpic_find(irq, &is_ipi);
860 	unsigned long flags;
861 	u32 reg;
862 
863 	spin_lock_irqsave(&mpic_lock, flags);
864 	if (is_ipi)
865 		reg = mpic_ipi_read(irq - mpic->ipi_offset);
866 	else
867 		reg = mpic_irq_read(irq - mpic->irq_offset, MPIC_IRQ_VECTOR_PRI);
868 	spin_unlock_irqrestore(&mpic_lock, flags);
869 	return (reg & MPIC_VECPRI_PRIORITY_MASK) >> MPIC_VECPRI_PRIORITY_SHIFT;
870 }
871 
872 void mpic_setup_this_cpu(void)
873 {
874 #ifdef CONFIG_SMP
875 	struct mpic *mpic = mpic_primary;
876 	unsigned long flags;
877 	u32 msk = 1 << hard_smp_processor_id();
878 	unsigned int i;
879 
880 	BUG_ON(mpic == NULL);
881 
882 	DBG("%s: setup_this_cpu(%d)\n", mpic->name, hard_smp_processor_id());
883 
884 	spin_lock_irqsave(&mpic_lock, flags);
885 
886  	/* let the mpic know we want intrs. default affinity is 0xffffffff
887 	 * until changed via /proc. That's how it's done on x86. If we want
888 	 * it differently, then we should make sure we also change the default
889 	 * values of irq_affinity in irq.c.
890  	 */
891 	if (distribute_irqs) {
892 	 	for (i = 0; i < mpic->num_sources ; i++)
893 			mpic_irq_write(i, MPIC_IRQ_DESTINATION,
894 				mpic_irq_read(i, MPIC_IRQ_DESTINATION) | msk);
895 	}
896 
897 	/* Set current processor priority to 0 */
898 	mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0);
899 
900 	spin_unlock_irqrestore(&mpic_lock, flags);
901 #endif /* CONFIG_SMP */
902 }
903 
904 int mpic_cpu_get_priority(void)
905 {
906 	struct mpic *mpic = mpic_primary;
907 
908 	return mpic_cpu_read(MPIC_CPU_CURRENT_TASK_PRI);
909 }
910 
911 void mpic_cpu_set_priority(int prio)
912 {
913 	struct mpic *mpic = mpic_primary;
914 
915 	prio &= MPIC_CPU_TASKPRI_MASK;
916 	mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, prio);
917 }
918 
919 /*
920  * XXX: someone who knows mpic should check this.
921  * do we need to eoi the ipi including for kexec cpu here (see xics comments)?
922  * or can we reset the mpic in the new kernel?
923  */
924 void mpic_teardown_this_cpu(int secondary)
925 {
926 	struct mpic *mpic = mpic_primary;
927 	unsigned long flags;
928 	u32 msk = 1 << hard_smp_processor_id();
929 	unsigned int i;
930 
931 	BUG_ON(mpic == NULL);
932 
933 	DBG("%s: teardown_this_cpu(%d)\n", mpic->name, hard_smp_processor_id());
934 	spin_lock_irqsave(&mpic_lock, flags);
935 
936 	/* let the mpic know we don't want intrs.  */
937 	for (i = 0; i < mpic->num_sources ; i++)
938 		mpic_irq_write(i, MPIC_IRQ_DESTINATION,
939 			mpic_irq_read(i, MPIC_IRQ_DESTINATION) & ~msk);
940 
941 	/* Set current processor priority to max */
942 	mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0xf);
943 
944 	spin_unlock_irqrestore(&mpic_lock, flags);
945 }
946 
947 
948 void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask)
949 {
950 	struct mpic *mpic = mpic_primary;
951 
952 	BUG_ON(mpic == NULL);
953 
954 #ifdef DEBUG_IPI
955 	DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, ipi_no);
956 #endif
957 
958 	mpic_cpu_write(MPIC_CPU_IPI_DISPATCH_0 + ipi_no * 0x10,
959 		       mpic_physmask(cpu_mask & cpus_addr(cpu_online_map)[0]));
960 }
961 
962 int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs)
963 {
964 	u32 irq;
965 
966 	irq = mpic_cpu_read(MPIC_CPU_INTACK) & MPIC_VECPRI_VECTOR_MASK;
967 #ifdef DEBUG_LOW
968 	DBG("%s: get_one_irq(): %d\n", mpic->name, irq);
969 #endif
970 	if (mpic->cascade && irq == mpic->cascade_vec) {
971 #ifdef DEBUG_LOW
972 		DBG("%s: cascading ...\n", mpic->name);
973 #endif
974 		irq = mpic->cascade(regs, mpic->cascade_data);
975 		mpic_eoi(mpic);
976 		return irq;
977 	}
978 	if (unlikely(irq == MPIC_VEC_SPURRIOUS))
979 		return -1;
980 	if (irq < MPIC_VEC_IPI_0) {
981 #ifdef DEBUG_IRQ
982 		DBG("%s: irq %d\n", mpic->name, irq + mpic->irq_offset);
983 #endif
984 		return irq + mpic->irq_offset;
985 	}
986 #ifdef DEBUG_IPI
987        	DBG("%s: ipi %d !\n", mpic->name, irq - MPIC_VEC_IPI_0);
988 #endif
989 	return irq - MPIC_VEC_IPI_0 + mpic->ipi_offset;
990 }
991 
992 int mpic_get_irq(struct pt_regs *regs)
993 {
994 	struct mpic *mpic = mpic_primary;
995 
996 	BUG_ON(mpic == NULL);
997 
998 	return mpic_get_one_irq(mpic, regs);
999 }
1000 
1001 
1002 #ifdef CONFIG_SMP
1003 void mpic_request_ipis(void)
1004 {
1005 	struct mpic *mpic = mpic_primary;
1006 
1007 	BUG_ON(mpic == NULL);
1008 
1009 	printk("requesting IPIs ... \n");
1010 
1011 	/* IPIs are marked SA_INTERRUPT as they must run with irqs disabled */
1012 	request_irq(mpic->ipi_offset+0, mpic_ipi_action, SA_INTERRUPT,
1013 		    "IPI0 (call function)", mpic);
1014 	request_irq(mpic->ipi_offset+1, mpic_ipi_action, SA_INTERRUPT,
1015 		   "IPI1 (reschedule)", mpic);
1016 	request_irq(mpic->ipi_offset+2, mpic_ipi_action, SA_INTERRUPT,
1017 		   "IPI2 (unused)", mpic);
1018 	request_irq(mpic->ipi_offset+3, mpic_ipi_action, SA_INTERRUPT,
1019 		   "IPI3 (debugger break)", mpic);
1020 
1021 	printk("IPIs requested... \n");
1022 }
1023 
1024 void smp_mpic_message_pass(int target, int msg)
1025 {
1026 	/* make sure we're sending something that translates to an IPI */
1027 	if ((unsigned int)msg > 3) {
1028 		printk("SMP %d: smp_message_pass: unknown msg %d\n",
1029 		       smp_processor_id(), msg);
1030 		return;
1031 	}
1032 	switch (target) {
1033 	case MSG_ALL:
1034 		mpic_send_ipi(msg, 0xffffffff);
1035 		break;
1036 	case MSG_ALL_BUT_SELF:
1037 		mpic_send_ipi(msg, 0xffffffff & ~(1 << smp_processor_id()));
1038 		break;
1039 	default:
1040 		mpic_send_ipi(msg, 1 << target);
1041 		break;
1042 	}
1043 }
1044 #endif /* CONFIG_SMP */
1045