1 /*
2  *  Support for the interrupt controllers found on Power Macintosh,
3  *  currently Apple's "Grand Central" interrupt controller in all
4  *  it's incarnations. OpenPIC support used on newer machines is
5  *  in a separate file
6  *
7  *  Copyright (C) 1997 Paul Mackerras (paulus@samba.org)
8  *  Copyright (C) 2005 Benjamin Herrenschmidt (benh@kernel.crashing.org)
9  *                     IBM, Corp.
10  *
11  *  This program is free software; you can redistribute it and/or
12  *  modify it under the terms of the GNU General Public License
13  *  as published by the Free Software Foundation; either version
14  *  2 of the License, or (at your option) any later version.
15  *
16  */
17 
18 #include <linux/stddef.h>
19 #include <linux/init.h>
20 #include <linux/sched.h>
21 #include <linux/signal.h>
22 #include <linux/pci.h>
23 #include <linux/interrupt.h>
24 #include <linux/syscore_ops.h>
25 #include <linux/adb.h>
26 #include <linux/pmu.h>
27 #include <linux/module.h>
28 
29 #include <asm/sections.h>
30 #include <asm/io.h>
31 #include <asm/smp.h>
32 #include <asm/prom.h>
33 #include <asm/pci-bridge.h>
34 #include <asm/time.h>
35 #include <asm/pmac_feature.h>
36 #include <asm/mpic.h>
37 #include <asm/xmon.h>
38 
39 #include "pmac.h"
40 
41 #ifdef CONFIG_PPC32
42 struct pmac_irq_hw {
43         unsigned int    event;
44         unsigned int    enable;
45         unsigned int    ack;
46         unsigned int    level;
47 };
48 
49 /* Workaround flags for 32bit powermac machines */
50 unsigned int of_irq_workarounds;
51 struct device_node *of_irq_dflt_pic;
52 
53 /* Default addresses */
54 static volatile struct pmac_irq_hw __iomem *pmac_irq_hw[4];
55 
56 #define GC_LEVEL_MASK		0x3ff00000
57 #define OHARE_LEVEL_MASK	0x1ff00000
58 #define HEATHROW_LEVEL_MASK	0x1ff00000
59 
60 static int max_irqs;
61 static int max_real_irqs;
62 static u32 level_mask[4];
63 
64 static DEFINE_RAW_SPINLOCK(pmac_pic_lock);
65 
66 #define NR_MASK_WORDS	((NR_IRQS + 31) / 32)
67 static unsigned long ppc_lost_interrupts[NR_MASK_WORDS];
68 static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
69 static int pmac_irq_cascade = -1;
70 static struct irq_host *pmac_pic_host;
71 
72 static void __pmac_retrigger(unsigned int irq_nr)
73 {
74 	if (irq_nr >= max_real_irqs && pmac_irq_cascade > 0) {
75 		__set_bit(irq_nr, ppc_lost_interrupts);
76 		irq_nr = pmac_irq_cascade;
77 		mb();
78 	}
79 	if (!__test_and_set_bit(irq_nr, ppc_lost_interrupts)) {
80 		atomic_inc(&ppc_n_lost_interrupts);
81 		set_dec(1);
82 	}
83 }
84 
85 static void pmac_mask_and_ack_irq(struct irq_data *d)
86 {
87 	unsigned int src = irqd_to_hwirq(d);
88         unsigned long bit = 1UL << (src & 0x1f);
89         int i = src >> 5;
90         unsigned long flags;
91 
92 	raw_spin_lock_irqsave(&pmac_pic_lock, flags);
93         __clear_bit(src, ppc_cached_irq_mask);
94         if (__test_and_clear_bit(src, ppc_lost_interrupts))
95                 atomic_dec(&ppc_n_lost_interrupts);
96         out_le32(&pmac_irq_hw[i]->enable, ppc_cached_irq_mask[i]);
97         out_le32(&pmac_irq_hw[i]->ack, bit);
98         do {
99                 /* make sure ack gets to controller before we enable
100                    interrupts */
101                 mb();
102         } while((in_le32(&pmac_irq_hw[i]->enable) & bit)
103                 != (ppc_cached_irq_mask[i] & bit));
104 	raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
105 }
106 
107 static void pmac_ack_irq(struct irq_data *d)
108 {
109 	unsigned int src = irqd_to_hwirq(d);
110         unsigned long bit = 1UL << (src & 0x1f);
111         int i = src >> 5;
112         unsigned long flags;
113 
114 	raw_spin_lock_irqsave(&pmac_pic_lock, flags);
115 	if (__test_and_clear_bit(src, ppc_lost_interrupts))
116                 atomic_dec(&ppc_n_lost_interrupts);
117         out_le32(&pmac_irq_hw[i]->ack, bit);
118         (void)in_le32(&pmac_irq_hw[i]->ack);
119 	raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
120 }
121 
122 static void __pmac_set_irq_mask(unsigned int irq_nr, int nokicklost)
123 {
124         unsigned long bit = 1UL << (irq_nr & 0x1f);
125         int i = irq_nr >> 5;
126 
127         if ((unsigned)irq_nr >= max_irqs)
128                 return;
129 
130         /* enable unmasked interrupts */
131         out_le32(&pmac_irq_hw[i]->enable, ppc_cached_irq_mask[i]);
132 
133         do {
134                 /* make sure mask gets to controller before we
135                    return to user */
136                 mb();
137         } while((in_le32(&pmac_irq_hw[i]->enable) & bit)
138                 != (ppc_cached_irq_mask[i] & bit));
139 
140         /*
141          * Unfortunately, setting the bit in the enable register
142          * when the device interrupt is already on *doesn't* set
143          * the bit in the flag register or request another interrupt.
144          */
145         if (bit & ppc_cached_irq_mask[i] & in_le32(&pmac_irq_hw[i]->level))
146 		__pmac_retrigger(irq_nr);
147 }
148 
149 /* When an irq gets requested for the first client, if it's an
150  * edge interrupt, we clear any previous one on the controller
151  */
152 static unsigned int pmac_startup_irq(struct irq_data *d)
153 {
154 	unsigned long flags;
155 	unsigned int src = irqd_to_hwirq(d);
156         unsigned long bit = 1UL << (src & 0x1f);
157         int i = src >> 5;
158 
159 	raw_spin_lock_irqsave(&pmac_pic_lock, flags);
160 	if (!irqd_is_level_type(d))
161 		out_le32(&pmac_irq_hw[i]->ack, bit);
162         __set_bit(src, ppc_cached_irq_mask);
163         __pmac_set_irq_mask(src, 0);
164 	raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
165 
166 	return 0;
167 }
168 
169 static void pmac_mask_irq(struct irq_data *d)
170 {
171 	unsigned long flags;
172 	unsigned int src = irqd_to_hwirq(d);
173 
174 	raw_spin_lock_irqsave(&pmac_pic_lock, flags);
175         __clear_bit(src, ppc_cached_irq_mask);
176         __pmac_set_irq_mask(src, 1);
177 	raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
178 }
179 
180 static void pmac_unmask_irq(struct irq_data *d)
181 {
182 	unsigned long flags;
183 	unsigned int src = irqd_to_hwirq(d);
184 
185 	raw_spin_lock_irqsave(&pmac_pic_lock, flags);
186 	__set_bit(src, ppc_cached_irq_mask);
187         __pmac_set_irq_mask(src, 0);
188 	raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
189 }
190 
191 static int pmac_retrigger(struct irq_data *d)
192 {
193 	unsigned long flags;
194 
195 	raw_spin_lock_irqsave(&pmac_pic_lock, flags);
196 	__pmac_retrigger(irqd_to_hwirq(d));
197 	raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
198 	return 1;
199 }
200 
201 static struct irq_chip pmac_pic = {
202 	.name		= "PMAC-PIC",
203 	.irq_startup	= pmac_startup_irq,
204 	.irq_mask	= pmac_mask_irq,
205 	.irq_ack	= pmac_ack_irq,
206 	.irq_mask_ack	= pmac_mask_and_ack_irq,
207 	.irq_unmask	= pmac_unmask_irq,
208 	.irq_retrigger	= pmac_retrigger,
209 };
210 
211 static irqreturn_t gatwick_action(int cpl, void *dev_id)
212 {
213 	unsigned long flags;
214 	int irq, bits;
215 	int rc = IRQ_NONE;
216 
217 	raw_spin_lock_irqsave(&pmac_pic_lock, flags);
218 	for (irq = max_irqs; (irq -= 32) >= max_real_irqs; ) {
219 		int i = irq >> 5;
220 		bits = in_le32(&pmac_irq_hw[i]->event) | ppc_lost_interrupts[i];
221 		/* We must read level interrupts from the level register */
222 		bits |= (in_le32(&pmac_irq_hw[i]->level) & level_mask[i]);
223 		bits &= ppc_cached_irq_mask[i];
224 		if (bits == 0)
225 			continue;
226 		irq += __ilog2(bits);
227 		raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
228 		generic_handle_irq(irq);
229 		raw_spin_lock_irqsave(&pmac_pic_lock, flags);
230 		rc = IRQ_HANDLED;
231 	}
232 	raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
233 	return rc;
234 }
235 
236 static unsigned int pmac_pic_get_irq(void)
237 {
238 	int irq;
239 	unsigned long bits = 0;
240 	unsigned long flags;
241 
242 #ifdef CONFIG_PPC_PMAC32_PSURGE
243 	/* IPI's are a hack on the powersurge -- Cort */
244 	if (smp_processor_id() != 0) {
245 		return  psurge_secondary_virq;
246         }
247 #endif /* CONFIG_PPC_PMAC32_PSURGE */
248 	raw_spin_lock_irqsave(&pmac_pic_lock, flags);
249 	for (irq = max_real_irqs; (irq -= 32) >= 0; ) {
250 		int i = irq >> 5;
251 		bits = in_le32(&pmac_irq_hw[i]->event) | ppc_lost_interrupts[i];
252 		/* We must read level interrupts from the level register */
253 		bits |= (in_le32(&pmac_irq_hw[i]->level) & level_mask[i]);
254 		bits &= ppc_cached_irq_mask[i];
255 		if (bits == 0)
256 			continue;
257 		irq += __ilog2(bits);
258 		break;
259 	}
260 	raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
261 	if (unlikely(irq < 0))
262 		return NO_IRQ;
263 	return irq_linear_revmap(pmac_pic_host, irq);
264 }
265 
266 #ifdef CONFIG_XMON
267 static struct irqaction xmon_action = {
268 	.handler	= xmon_irq,
269 	.flags		= 0,
270 	.name		= "NMI - XMON"
271 };
272 #endif
273 
274 static struct irqaction gatwick_cascade_action = {
275 	.handler	= gatwick_action,
276 	.flags		= IRQF_DISABLED,
277 	.name		= "cascade",
278 };
279 
280 static int pmac_pic_host_match(struct irq_host *h, struct device_node *node)
281 {
282 	/* We match all, we don't always have a node anyway */
283 	return 1;
284 }
285 
286 static int pmac_pic_host_map(struct irq_host *h, unsigned int virq,
287 			     irq_hw_number_t hw)
288 {
289 	int level;
290 
291 	if (hw >= max_irqs)
292 		return -EINVAL;
293 
294 	/* Mark level interrupts, set delayed disable for edge ones and set
295 	 * handlers
296 	 */
297 	level = !!(level_mask[hw >> 5] & (1UL << (hw & 0x1f)));
298 	if (level)
299 		irq_set_status_flags(virq, IRQ_LEVEL);
300 	irq_set_chip_and_handler(virq, &pmac_pic,
301 				 level ? handle_level_irq : handle_edge_irq);
302 	return 0;
303 }
304 
305 static int pmac_pic_host_xlate(struct irq_host *h, struct device_node *ct,
306 			       const u32 *intspec, unsigned int intsize,
307 			       irq_hw_number_t *out_hwirq,
308 			       unsigned int *out_flags)
309 
310 {
311 	*out_flags = IRQ_TYPE_NONE;
312 	*out_hwirq = *intspec;
313 	return 0;
314 }
315 
316 static struct irq_host_ops pmac_pic_host_ops = {
317 	.match = pmac_pic_host_match,
318 	.map = pmac_pic_host_map,
319 	.xlate = pmac_pic_host_xlate,
320 };
321 
322 static void __init pmac_pic_probe_oldstyle(void)
323 {
324         int i;
325         struct device_node *master = NULL;
326 	struct device_node *slave = NULL;
327 	u8 __iomem *addr;
328 	struct resource r;
329 
330 	/* Set our get_irq function */
331 	ppc_md.get_irq = pmac_pic_get_irq;
332 
333 	/*
334 	 * Find the interrupt controller type & node
335 	 */
336 
337 	if ((master = of_find_node_by_name(NULL, "gc")) != NULL) {
338 		max_irqs = max_real_irqs = 32;
339 		level_mask[0] = GC_LEVEL_MASK;
340 	} else if ((master = of_find_node_by_name(NULL, "ohare")) != NULL) {
341 		max_irqs = max_real_irqs = 32;
342 		level_mask[0] = OHARE_LEVEL_MASK;
343 
344 		/* We might have a second cascaded ohare */
345 		slave = of_find_node_by_name(NULL, "pci106b,7");
346 		if (slave) {
347 			max_irqs = 64;
348 			level_mask[1] = OHARE_LEVEL_MASK;
349 		}
350 	} else if ((master = of_find_node_by_name(NULL, "mac-io")) != NULL) {
351 		max_irqs = max_real_irqs = 64;
352 		level_mask[0] = HEATHROW_LEVEL_MASK;
353 		level_mask[1] = 0;
354 
355 		/* We might have a second cascaded heathrow */
356 		slave = of_find_node_by_name(master, "mac-io");
357 
358 		/* Check ordering of master & slave */
359 		if (of_device_is_compatible(master, "gatwick")) {
360 			struct device_node *tmp;
361 			BUG_ON(slave == NULL);
362 			tmp = master;
363 			master = slave;
364 			slave = tmp;
365 		}
366 
367 		/* We found a slave */
368 		if (slave) {
369 			max_irqs = 128;
370 			level_mask[2] = HEATHROW_LEVEL_MASK;
371 			level_mask[3] = 0;
372 		}
373 	}
374 	BUG_ON(master == NULL);
375 
376 	/*
377 	 * Allocate an irq host
378 	 */
379 	pmac_pic_host = irq_alloc_host(master, IRQ_HOST_MAP_LINEAR, max_irqs,
380 				       &pmac_pic_host_ops,
381 				       max_irqs);
382 	BUG_ON(pmac_pic_host == NULL);
383 	irq_set_default_host(pmac_pic_host);
384 
385 	/* Get addresses of first controller if we have a node for it */
386 	BUG_ON(of_address_to_resource(master, 0, &r));
387 
388 	/* Map interrupts of primary controller */
389 	addr = (u8 __iomem *) ioremap(r.start, 0x40);
390 	i = 0;
391 	pmac_irq_hw[i++] = (volatile struct pmac_irq_hw __iomem *)
392 		(addr + 0x20);
393 	if (max_real_irqs > 32)
394 		pmac_irq_hw[i++] = (volatile struct pmac_irq_hw __iomem *)
395 			(addr + 0x10);
396 	of_node_put(master);
397 
398 	printk(KERN_INFO "irq: Found primary Apple PIC %s for %d irqs\n",
399 	       master->full_name, max_real_irqs);
400 
401 	/* Map interrupts of cascaded controller */
402 	if (slave && !of_address_to_resource(slave, 0, &r)) {
403 		addr = (u8 __iomem *)ioremap(r.start, 0x40);
404 		pmac_irq_hw[i++] = (volatile struct pmac_irq_hw __iomem *)
405 			(addr + 0x20);
406 		if (max_irqs > 64)
407 			pmac_irq_hw[i++] =
408 				(volatile struct pmac_irq_hw __iomem *)
409 				(addr + 0x10);
410 		pmac_irq_cascade = irq_of_parse_and_map(slave, 0);
411 
412 		printk(KERN_INFO "irq: Found slave Apple PIC %s for %d irqs"
413 		       " cascade: %d\n", slave->full_name,
414 		       max_irqs - max_real_irqs, pmac_irq_cascade);
415 	}
416 	of_node_put(slave);
417 
418 	/* Disable all interrupts in all controllers */
419 	for (i = 0; i * 32 < max_irqs; ++i)
420 		out_le32(&pmac_irq_hw[i]->enable, 0);
421 
422 	/* Hookup cascade irq */
423 	if (slave && pmac_irq_cascade != NO_IRQ)
424 		setup_irq(pmac_irq_cascade, &gatwick_cascade_action);
425 
426 	printk(KERN_INFO "irq: System has %d possible interrupts\n", max_irqs);
427 #ifdef CONFIG_XMON
428 	setup_irq(irq_create_mapping(NULL, 20), &xmon_action);
429 #endif
430 }
431 
432 int of_irq_map_oldworld(struct device_node *device, int index,
433 			struct of_irq *out_irq)
434 {
435 	const u32 *ints = NULL;
436 	int intlen;
437 
438 	/*
439 	 * Old machines just have a list of interrupt numbers
440 	 * and no interrupt-controller nodes. We also have dodgy
441 	 * cases where the APPL,interrupts property is completely
442 	 * missing behind pci-pci bridges and we have to get it
443 	 * from the parent (the bridge itself, as apple just wired
444 	 * everything together on these)
445 	 */
446 	while (device) {
447 		ints = of_get_property(device, "AAPL,interrupts", &intlen);
448 		if (ints != NULL)
449 			break;
450 		device = device->parent;
451 		if (device && strcmp(device->type, "pci") != 0)
452 			break;
453 	}
454 	if (ints == NULL)
455 		return -EINVAL;
456 	intlen /= sizeof(u32);
457 
458 	if (index >= intlen)
459 		return -EINVAL;
460 
461 	out_irq->controller = NULL;
462 	out_irq->specifier[0] = ints[index];
463 	out_irq->size = 1;
464 
465 	return 0;
466 }
467 #endif /* CONFIG_PPC32 */
468 
469 static void pmac_u3_cascade(unsigned int irq, struct irq_desc *desc)
470 {
471 	struct irq_chip *chip = irq_desc_get_chip(desc);
472 	struct mpic *mpic = irq_desc_get_handler_data(desc);
473 	unsigned int cascade_irq = mpic_get_one_irq(mpic);
474 
475 	if (cascade_irq != NO_IRQ)
476 		generic_handle_irq(cascade_irq);
477 
478 	chip->irq_eoi(&desc->irq_data);
479 }
480 
481 static void __init pmac_pic_setup_mpic_nmi(struct mpic *mpic)
482 {
483 #if defined(CONFIG_XMON) && defined(CONFIG_PPC32)
484 	struct device_node* pswitch;
485 	int nmi_irq;
486 
487 	pswitch = of_find_node_by_name(NULL, "programmer-switch");
488 	if (pswitch) {
489 		nmi_irq = irq_of_parse_and_map(pswitch, 0);
490 		if (nmi_irq != NO_IRQ) {
491 			mpic_irq_set_priority(nmi_irq, 9);
492 			setup_irq(nmi_irq, &xmon_action);
493 		}
494 		of_node_put(pswitch);
495 	}
496 #endif	/* defined(CONFIG_XMON) && defined(CONFIG_PPC32) */
497 }
498 
499 static struct mpic * __init pmac_setup_one_mpic(struct device_node *np,
500 						int master)
501 {
502 	const char *name = master ? " MPIC 1   " : " MPIC 2   ";
503 	struct resource r;
504 	struct mpic *mpic;
505 	unsigned int flags = master ? MPIC_PRIMARY : 0;
506 	int rc;
507 
508 	rc = of_address_to_resource(np, 0, &r);
509 	if (rc)
510 		return NULL;
511 
512 	pmac_call_feature(PMAC_FTR_ENABLE_MPIC, np, 0, 0);
513 
514 	flags |= MPIC_WANTS_RESET;
515 	if (of_get_property(np, "big-endian", NULL))
516 		flags |= MPIC_BIG_ENDIAN;
517 
518 	/* Primary Big Endian means HT interrupts. This is quite dodgy
519 	 * but works until I find a better way
520 	 */
521 	if (master && (flags & MPIC_BIG_ENDIAN))
522 		flags |= MPIC_U3_HT_IRQS;
523 
524 	mpic = mpic_alloc(np, r.start, flags, 0, 0, name);
525 	if (mpic == NULL)
526 		return NULL;
527 
528 	mpic_init(mpic);
529 
530 	return mpic;
531  }
532 
533 static int __init pmac_pic_probe_mpic(void)
534 {
535 	struct mpic *mpic1, *mpic2;
536 	struct device_node *np, *master = NULL, *slave = NULL;
537 	unsigned int cascade;
538 
539 	/* We can have up to 2 MPICs cascaded */
540 	for (np = NULL; (np = of_find_node_by_type(np, "open-pic"))
541 		     != NULL;) {
542 		if (master == NULL &&
543 		    of_get_property(np, "interrupts", NULL) == NULL)
544 			master = of_node_get(np);
545 		else if (slave == NULL)
546 			slave = of_node_get(np);
547 		if (master && slave)
548 			break;
549 	}
550 
551 	/* Check for bogus setups */
552 	if (master == NULL && slave != NULL) {
553 		master = slave;
554 		slave = NULL;
555 	}
556 
557 	/* Not found, default to good old pmac pic */
558 	if (master == NULL)
559 		return -ENODEV;
560 
561 	/* Set master handler */
562 	ppc_md.get_irq = mpic_get_irq;
563 
564 	/* Setup master */
565 	mpic1 = pmac_setup_one_mpic(master, 1);
566 	BUG_ON(mpic1 == NULL);
567 
568 	/* Install NMI if any */
569 	pmac_pic_setup_mpic_nmi(mpic1);
570 
571 	of_node_put(master);
572 
573 	/* No slave, let's go out */
574 	if (slave == NULL)
575 		return 0;
576 
577 	/* Get/Map slave interrupt */
578 	cascade = irq_of_parse_and_map(slave, 0);
579 	if (cascade == NO_IRQ) {
580 		printk(KERN_ERR "Failed to map cascade IRQ\n");
581 		return 0;
582 	}
583 
584 	mpic2 = pmac_setup_one_mpic(slave, 0);
585 	if (mpic2 == NULL) {
586 		printk(KERN_ERR "Failed to setup slave MPIC\n");
587 		of_node_put(slave);
588 		return 0;
589 	}
590 	irq_set_handler_data(cascade, mpic2);
591 	irq_set_chained_handler(cascade, pmac_u3_cascade);
592 
593 	of_node_put(slave);
594 	return 0;
595 }
596 
597 
598 void __init pmac_pic_init(void)
599 {
600 	/* We configure the OF parsing based on our oldworld vs. newworld
601 	 * platform type and wether we were booted by BootX.
602 	 */
603 #ifdef CONFIG_PPC32
604 	if (!pmac_newworld)
605 		of_irq_workarounds |= OF_IMAP_OLDWORLD_MAC;
606 	if (of_get_property(of_chosen, "linux,bootx", NULL) != NULL)
607 		of_irq_workarounds |= OF_IMAP_NO_PHANDLE;
608 
609 	/* If we don't have phandles on a newworld, then try to locate a
610 	 * default interrupt controller (happens when booting with BootX).
611 	 * We do a first match here, hopefully, that only ever happens on
612 	 * machines with one controller.
613 	 */
614 	if (pmac_newworld && (of_irq_workarounds & OF_IMAP_NO_PHANDLE)) {
615 		struct device_node *np;
616 
617 		for_each_node_with_property(np, "interrupt-controller") {
618 			/* Skip /chosen/interrupt-controller */
619 			if (strcmp(np->name, "chosen") == 0)
620 				continue;
621 			/* It seems like at least one person wants
622 			 * to use BootX on a machine with an AppleKiwi
623 			 * controller which happens to pretend to be an
624 			 * interrupt controller too. */
625 			if (strcmp(np->name, "AppleKiwi") == 0)
626 				continue;
627 			/* I think we found one ! */
628 			of_irq_dflt_pic = np;
629 			break;
630 		}
631 	}
632 #endif /* CONFIG_PPC32 */
633 
634 	/* We first try to detect Apple's new Core99 chipset, since mac-io
635 	 * is quite different on those machines and contains an IBM MPIC2.
636 	 */
637 	if (pmac_pic_probe_mpic() == 0)
638 		return;
639 
640 #ifdef CONFIG_PPC32
641 	pmac_pic_probe_oldstyle();
642 #endif
643 }
644 
645 #if defined(CONFIG_PM) && defined(CONFIG_PPC32)
646 /*
647  * These procedures are used in implementing sleep on the powerbooks.
648  * sleep_save_intrs() saves the states of all interrupt enables
649  * and disables all interrupts except for the nominated one.
650  * sleep_restore_intrs() restores the states of all interrupt enables.
651  */
652 unsigned long sleep_save_mask[2];
653 
654 /* This used to be passed by the PMU driver but that link got
655  * broken with the new driver model. We use this tweak for now...
656  * We really want to do things differently though...
657  */
658 static int pmacpic_find_viaint(void)
659 {
660 	int viaint = -1;
661 
662 #ifdef CONFIG_ADB_PMU
663 	struct device_node *np;
664 
665 	if (pmu_get_model() != PMU_OHARE_BASED)
666 		goto not_found;
667 	np = of_find_node_by_name(NULL, "via-pmu");
668 	if (np == NULL)
669 		goto not_found;
670 	viaint = irq_of_parse_and_map(np, 0);
671 
672 not_found:
673 #endif /* CONFIG_ADB_PMU */
674 	return viaint;
675 }
676 
677 static int pmacpic_suspend(void)
678 {
679 	int viaint = pmacpic_find_viaint();
680 
681 	sleep_save_mask[0] = ppc_cached_irq_mask[0];
682 	sleep_save_mask[1] = ppc_cached_irq_mask[1];
683 	ppc_cached_irq_mask[0] = 0;
684 	ppc_cached_irq_mask[1] = 0;
685 	if (viaint > 0)
686 		set_bit(viaint, ppc_cached_irq_mask);
687 	out_le32(&pmac_irq_hw[0]->enable, ppc_cached_irq_mask[0]);
688 	if (max_real_irqs > 32)
689 		out_le32(&pmac_irq_hw[1]->enable, ppc_cached_irq_mask[1]);
690 	(void)in_le32(&pmac_irq_hw[0]->event);
691 	/* make sure mask gets to controller before we return to caller */
692 	mb();
693         (void)in_le32(&pmac_irq_hw[0]->enable);
694 
695         return 0;
696 }
697 
698 static void pmacpic_resume(void)
699 {
700 	int i;
701 
702 	out_le32(&pmac_irq_hw[0]->enable, 0);
703 	if (max_real_irqs > 32)
704 		out_le32(&pmac_irq_hw[1]->enable, 0);
705 	mb();
706 	for (i = 0; i < max_real_irqs; ++i)
707 		if (test_bit(i, sleep_save_mask))
708 			pmac_unmask_irq(irq_get_irq_data(i));
709 }
710 
711 static struct syscore_ops pmacpic_syscore_ops = {
712 	.suspend	= pmacpic_suspend,
713 	.resume		= pmacpic_resume,
714 };
715 
716 static int __init init_pmacpic_syscore(void)
717 {
718 	if (pmac_irq_hw[0])
719 		register_syscore_ops(&pmacpic_syscore_ops);
720 	return 0;
721 }
722 
723 machine_subsys_initcall(powermac, init_pmacpic_syscore);
724 
725 #endif /* CONFIG_PM && CONFIG_PPC32 */
726