1 /*
2  *  Support for the interrupt controllers found on Power Macintosh,
3  *  currently Apple's "Grand Central" interrupt controller in all
4  *  it's incarnations. OpenPIC support used on newer machines is
5  *  in a separate file
6  *
7  *  Copyright (C) 1997 Paul Mackerras (paulus@samba.org)
8  *  Copyright (C) 2005 Benjamin Herrenschmidt (benh@kernel.crashing.org)
9  *                     IBM, Corp.
10  *
11  *  This program is free software; you can redistribute it and/or
12  *  modify it under the terms of the GNU General Public License
13  *  as published by the Free Software Foundation; either version
14  *  2 of the License, or (at your option) any later version.
15  *
16  */
17 
18 #include <linux/stddef.h>
19 #include <linux/init.h>
20 #include <linux/sched.h>
21 #include <linux/signal.h>
22 #include <linux/pci.h>
23 #include <linux/interrupt.h>
24 #include <linux/syscore_ops.h>
25 #include <linux/adb.h>
26 #include <linux/pmu.h>
27 
28 #include <asm/sections.h>
29 #include <asm/io.h>
30 #include <asm/smp.h>
31 #include <asm/prom.h>
32 #include <asm/pci-bridge.h>
33 #include <asm/time.h>
34 #include <asm/pmac_feature.h>
35 #include <asm/mpic.h>
36 #include <asm/xmon.h>
37 
38 #include "pmac.h"
39 
40 #ifdef CONFIG_PPC32
41 struct pmac_irq_hw {
42         unsigned int    event;
43         unsigned int    enable;
44         unsigned int    ack;
45         unsigned int    level;
46 };
47 
48 /* Workaround flags for 32bit powermac machines */
49 unsigned int of_irq_workarounds;
50 struct device_node *of_irq_dflt_pic;
51 
52 /* Default addresses */
53 static volatile struct pmac_irq_hw __iomem *pmac_irq_hw[4];
54 
55 static int max_irqs;
56 static int max_real_irqs;
57 
58 static DEFINE_RAW_SPINLOCK(pmac_pic_lock);
59 
60 /* The max irq number this driver deals with is 128; see max_irqs */
61 static DECLARE_BITMAP(ppc_lost_interrupts, 128);
62 static DECLARE_BITMAP(ppc_cached_irq_mask, 128);
63 static int pmac_irq_cascade = -1;
64 static struct irq_domain *pmac_pic_host;
65 
66 static void __pmac_retrigger(unsigned int irq_nr)
67 {
68 	if (irq_nr >= max_real_irqs && pmac_irq_cascade > 0) {
69 		__set_bit(irq_nr, ppc_lost_interrupts);
70 		irq_nr = pmac_irq_cascade;
71 		mb();
72 	}
73 	if (!__test_and_set_bit(irq_nr, ppc_lost_interrupts)) {
74 		atomic_inc(&ppc_n_lost_interrupts);
75 		set_dec(1);
76 	}
77 }
78 
79 static void pmac_mask_and_ack_irq(struct irq_data *d)
80 {
81 	unsigned int src = irqd_to_hwirq(d);
82         unsigned long bit = 1UL << (src & 0x1f);
83         int i = src >> 5;
84         unsigned long flags;
85 
86 	raw_spin_lock_irqsave(&pmac_pic_lock, flags);
87         __clear_bit(src, ppc_cached_irq_mask);
88         if (__test_and_clear_bit(src, ppc_lost_interrupts))
89                 atomic_dec(&ppc_n_lost_interrupts);
90         out_le32(&pmac_irq_hw[i]->enable, ppc_cached_irq_mask[i]);
91         out_le32(&pmac_irq_hw[i]->ack, bit);
92         do {
93                 /* make sure ack gets to controller before we enable
94                    interrupts */
95                 mb();
96         } while((in_le32(&pmac_irq_hw[i]->enable) & bit)
97                 != (ppc_cached_irq_mask[i] & bit));
98 	raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
99 }
100 
101 static void pmac_ack_irq(struct irq_data *d)
102 {
103 	unsigned int src = irqd_to_hwirq(d);
104         unsigned long bit = 1UL << (src & 0x1f);
105         int i = src >> 5;
106         unsigned long flags;
107 
108 	raw_spin_lock_irqsave(&pmac_pic_lock, flags);
109 	if (__test_and_clear_bit(src, ppc_lost_interrupts))
110                 atomic_dec(&ppc_n_lost_interrupts);
111         out_le32(&pmac_irq_hw[i]->ack, bit);
112         (void)in_le32(&pmac_irq_hw[i]->ack);
113 	raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
114 }
115 
116 static void __pmac_set_irq_mask(unsigned int irq_nr, int nokicklost)
117 {
118         unsigned long bit = 1UL << (irq_nr & 0x1f);
119         int i = irq_nr >> 5;
120 
121         if ((unsigned)irq_nr >= max_irqs)
122                 return;
123 
124         /* enable unmasked interrupts */
125         out_le32(&pmac_irq_hw[i]->enable, ppc_cached_irq_mask[i]);
126 
127         do {
128                 /* make sure mask gets to controller before we
129                    return to user */
130                 mb();
131         } while((in_le32(&pmac_irq_hw[i]->enable) & bit)
132                 != (ppc_cached_irq_mask[i] & bit));
133 
134         /*
135          * Unfortunately, setting the bit in the enable register
136          * when the device interrupt is already on *doesn't* set
137          * the bit in the flag register or request another interrupt.
138          */
139         if (bit & ppc_cached_irq_mask[i] & in_le32(&pmac_irq_hw[i]->level))
140 		__pmac_retrigger(irq_nr);
141 }
142 
143 /* When an irq gets requested for the first client, if it's an
144  * edge interrupt, we clear any previous one on the controller
145  */
146 static unsigned int pmac_startup_irq(struct irq_data *d)
147 {
148 	unsigned long flags;
149 	unsigned int src = irqd_to_hwirq(d);
150         unsigned long bit = 1UL << (src & 0x1f);
151         int i = src >> 5;
152 
153 	raw_spin_lock_irqsave(&pmac_pic_lock, flags);
154 	if (!irqd_is_level_type(d))
155 		out_le32(&pmac_irq_hw[i]->ack, bit);
156         __set_bit(src, ppc_cached_irq_mask);
157         __pmac_set_irq_mask(src, 0);
158 	raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
159 
160 	return 0;
161 }
162 
163 static void pmac_mask_irq(struct irq_data *d)
164 {
165 	unsigned long flags;
166 	unsigned int src = irqd_to_hwirq(d);
167 
168 	raw_spin_lock_irqsave(&pmac_pic_lock, flags);
169         __clear_bit(src, ppc_cached_irq_mask);
170         __pmac_set_irq_mask(src, 1);
171 	raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
172 }
173 
174 static void pmac_unmask_irq(struct irq_data *d)
175 {
176 	unsigned long flags;
177 	unsigned int src = irqd_to_hwirq(d);
178 
179 	raw_spin_lock_irqsave(&pmac_pic_lock, flags);
180 	__set_bit(src, ppc_cached_irq_mask);
181         __pmac_set_irq_mask(src, 0);
182 	raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
183 }
184 
185 static int pmac_retrigger(struct irq_data *d)
186 {
187 	unsigned long flags;
188 
189 	raw_spin_lock_irqsave(&pmac_pic_lock, flags);
190 	__pmac_retrigger(irqd_to_hwirq(d));
191 	raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
192 	return 1;
193 }
194 
195 static struct irq_chip pmac_pic = {
196 	.name		= "PMAC-PIC",
197 	.irq_startup	= pmac_startup_irq,
198 	.irq_mask	= pmac_mask_irq,
199 	.irq_ack	= pmac_ack_irq,
200 	.irq_mask_ack	= pmac_mask_and_ack_irq,
201 	.irq_unmask	= pmac_unmask_irq,
202 	.irq_retrigger	= pmac_retrigger,
203 };
204 
205 static irqreturn_t gatwick_action(int cpl, void *dev_id)
206 {
207 	unsigned long flags;
208 	int irq, bits;
209 	int rc = IRQ_NONE;
210 
211 	raw_spin_lock_irqsave(&pmac_pic_lock, flags);
212 	for (irq = max_irqs; (irq -= 32) >= max_real_irqs; ) {
213 		int i = irq >> 5;
214 		bits = in_le32(&pmac_irq_hw[i]->event) | ppc_lost_interrupts[i];
215 		bits |= in_le32(&pmac_irq_hw[i]->level);
216 		bits &= ppc_cached_irq_mask[i];
217 		if (bits == 0)
218 			continue;
219 		irq += __ilog2(bits);
220 		raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
221 		generic_handle_irq(irq);
222 		raw_spin_lock_irqsave(&pmac_pic_lock, flags);
223 		rc = IRQ_HANDLED;
224 	}
225 	raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
226 	return rc;
227 }
228 
229 static unsigned int pmac_pic_get_irq(void)
230 {
231 	int irq;
232 	unsigned long bits = 0;
233 	unsigned long flags;
234 
235 #ifdef CONFIG_PPC_PMAC32_PSURGE
236 	/* IPI's are a hack on the powersurge -- Cort */
237 	if (smp_processor_id() != 0) {
238 		return  psurge_secondary_virq;
239         }
240 #endif /* CONFIG_PPC_PMAC32_PSURGE */
241 	raw_spin_lock_irqsave(&pmac_pic_lock, flags);
242 	for (irq = max_real_irqs; (irq -= 32) >= 0; ) {
243 		int i = irq >> 5;
244 		bits = in_le32(&pmac_irq_hw[i]->event) | ppc_lost_interrupts[i];
245 		bits |= in_le32(&pmac_irq_hw[i]->level);
246 		bits &= ppc_cached_irq_mask[i];
247 		if (bits == 0)
248 			continue;
249 		irq += __ilog2(bits);
250 		break;
251 	}
252 	raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
253 	if (unlikely(irq < 0))
254 		return 0;
255 	return irq_linear_revmap(pmac_pic_host, irq);
256 }
257 
258 #ifdef CONFIG_XMON
259 static struct irqaction xmon_action = {
260 	.handler	= xmon_irq,
261 	.flags		= IRQF_NO_THREAD,
262 	.name		= "NMI - XMON"
263 };
264 #endif
265 
266 static struct irqaction gatwick_cascade_action = {
267 	.handler	= gatwick_action,
268 	.flags		= IRQF_NO_THREAD,
269 	.name		= "cascade",
270 };
271 
272 static int pmac_pic_host_match(struct irq_domain *h, struct device_node *node,
273 			       enum irq_domain_bus_token bus_token)
274 {
275 	/* We match all, we don't always have a node anyway */
276 	return 1;
277 }
278 
279 static int pmac_pic_host_map(struct irq_domain *h, unsigned int virq,
280 			     irq_hw_number_t hw)
281 {
282 	if (hw >= max_irqs)
283 		return -EINVAL;
284 
285 	/* Mark level interrupts, set delayed disable for edge ones and set
286 	 * handlers
287 	 */
288 	irq_set_status_flags(virq, IRQ_LEVEL);
289 	irq_set_chip_and_handler(virq, &pmac_pic, handle_level_irq);
290 	return 0;
291 }
292 
293 static const struct irq_domain_ops pmac_pic_host_ops = {
294 	.match = pmac_pic_host_match,
295 	.map = pmac_pic_host_map,
296 	.xlate = irq_domain_xlate_onecell,
297 };
298 
299 static void __init pmac_pic_probe_oldstyle(void)
300 {
301         int i;
302         struct device_node *master = NULL;
303 	struct device_node *slave = NULL;
304 	u8 __iomem *addr;
305 	struct resource r;
306 
307 	/* Set our get_irq function */
308 	ppc_md.get_irq = pmac_pic_get_irq;
309 
310 	/*
311 	 * Find the interrupt controller type & node
312 	 */
313 
314 	if ((master = of_find_node_by_name(NULL, "gc")) != NULL) {
315 		max_irqs = max_real_irqs = 32;
316 	} else if ((master = of_find_node_by_name(NULL, "ohare")) != NULL) {
317 		max_irqs = max_real_irqs = 32;
318 		/* We might have a second cascaded ohare */
319 		slave = of_find_node_by_name(NULL, "pci106b,7");
320 		if (slave)
321 			max_irqs = 64;
322 	} else if ((master = of_find_node_by_name(NULL, "mac-io")) != NULL) {
323 		max_irqs = max_real_irqs = 64;
324 
325 		/* We might have a second cascaded heathrow */
326 
327 		/* Compensate for of_node_put() in of_find_node_by_name() */
328 		of_node_get(master);
329 		slave = of_find_node_by_name(master, "mac-io");
330 
331 		/* Check ordering of master & slave */
332 		if (of_device_is_compatible(master, "gatwick")) {
333 			struct device_node *tmp;
334 			BUG_ON(slave == NULL);
335 			tmp = master;
336 			master = slave;
337 			slave = tmp;
338 		}
339 
340 		/* We found a slave */
341 		if (slave)
342 			max_irqs = 128;
343 	}
344 	BUG_ON(master == NULL);
345 
346 	/*
347 	 * Allocate an irq host
348 	 */
349 	pmac_pic_host = irq_domain_add_linear(master, max_irqs,
350 					      &pmac_pic_host_ops, NULL);
351 	BUG_ON(pmac_pic_host == NULL);
352 	irq_set_default_host(pmac_pic_host);
353 
354 	/* Get addresses of first controller if we have a node for it */
355 	BUG_ON(of_address_to_resource(master, 0, &r));
356 
357 	/* Map interrupts of primary controller */
358 	addr = (u8 __iomem *) ioremap(r.start, 0x40);
359 	i = 0;
360 	pmac_irq_hw[i++] = (volatile struct pmac_irq_hw __iomem *)
361 		(addr + 0x20);
362 	if (max_real_irqs > 32)
363 		pmac_irq_hw[i++] = (volatile struct pmac_irq_hw __iomem *)
364 			(addr + 0x10);
365 	of_node_put(master);
366 
367 	printk(KERN_INFO "irq: Found primary Apple PIC %s for %d irqs\n",
368 	       master->full_name, max_real_irqs);
369 
370 	/* Map interrupts of cascaded controller */
371 	if (slave && !of_address_to_resource(slave, 0, &r)) {
372 		addr = (u8 __iomem *)ioremap(r.start, 0x40);
373 		pmac_irq_hw[i++] = (volatile struct pmac_irq_hw __iomem *)
374 			(addr + 0x20);
375 		if (max_irqs > 64)
376 			pmac_irq_hw[i++] =
377 				(volatile struct pmac_irq_hw __iomem *)
378 				(addr + 0x10);
379 		pmac_irq_cascade = irq_of_parse_and_map(slave, 0);
380 
381 		printk(KERN_INFO "irq: Found slave Apple PIC %s for %d irqs"
382 		       " cascade: %d\n", slave->full_name,
383 		       max_irqs - max_real_irqs, pmac_irq_cascade);
384 	}
385 	of_node_put(slave);
386 
387 	/* Disable all interrupts in all controllers */
388 	for (i = 0; i * 32 < max_irqs; ++i)
389 		out_le32(&pmac_irq_hw[i]->enable, 0);
390 
391 	/* Hookup cascade irq */
392 	if (slave && pmac_irq_cascade)
393 		setup_irq(pmac_irq_cascade, &gatwick_cascade_action);
394 
395 	printk(KERN_INFO "irq: System has %d possible interrupts\n", max_irqs);
396 #ifdef CONFIG_XMON
397 	setup_irq(irq_create_mapping(NULL, 20), &xmon_action);
398 #endif
399 }
400 
401 int of_irq_parse_oldworld(struct device_node *device, int index,
402 			struct of_phandle_args *out_irq)
403 {
404 	const u32 *ints = NULL;
405 	int intlen;
406 
407 	/*
408 	 * Old machines just have a list of interrupt numbers
409 	 * and no interrupt-controller nodes. We also have dodgy
410 	 * cases where the APPL,interrupts property is completely
411 	 * missing behind pci-pci bridges and we have to get it
412 	 * from the parent (the bridge itself, as apple just wired
413 	 * everything together on these)
414 	 */
415 	while (device) {
416 		ints = of_get_property(device, "AAPL,interrupts", &intlen);
417 		if (ints != NULL)
418 			break;
419 		device = device->parent;
420 		if (device && strcmp(device->type, "pci") != 0)
421 			break;
422 	}
423 	if (ints == NULL)
424 		return -EINVAL;
425 	intlen /= sizeof(u32);
426 
427 	if (index >= intlen)
428 		return -EINVAL;
429 
430 	out_irq->np = NULL;
431 	out_irq->args[0] = ints[index];
432 	out_irq->args_count = 1;
433 
434 	return 0;
435 }
436 #endif /* CONFIG_PPC32 */
437 
438 static void __init pmac_pic_setup_mpic_nmi(struct mpic *mpic)
439 {
440 #if defined(CONFIG_XMON) && defined(CONFIG_PPC32)
441 	struct device_node* pswitch;
442 	int nmi_irq;
443 
444 	pswitch = of_find_node_by_name(NULL, "programmer-switch");
445 	if (pswitch) {
446 		nmi_irq = irq_of_parse_and_map(pswitch, 0);
447 		if (nmi_irq) {
448 			mpic_irq_set_priority(nmi_irq, 9);
449 			setup_irq(nmi_irq, &xmon_action);
450 		}
451 		of_node_put(pswitch);
452 	}
453 #endif	/* defined(CONFIG_XMON) && defined(CONFIG_PPC32) */
454 }
455 
456 static struct mpic * __init pmac_setup_one_mpic(struct device_node *np,
457 						int master)
458 {
459 	const char *name = master ? " MPIC 1   " : " MPIC 2   ";
460 	struct mpic *mpic;
461 	unsigned int flags = master ? 0 : MPIC_SECONDARY;
462 
463 	pmac_call_feature(PMAC_FTR_ENABLE_MPIC, np, 0, 0);
464 
465 	if (of_get_property(np, "big-endian", NULL))
466 		flags |= MPIC_BIG_ENDIAN;
467 
468 	/* Primary Big Endian means HT interrupts. This is quite dodgy
469 	 * but works until I find a better way
470 	 */
471 	if (master && (flags & MPIC_BIG_ENDIAN))
472 		flags |= MPIC_U3_HT_IRQS;
473 
474 	mpic = mpic_alloc(np, 0, flags, 0, 0, name);
475 	if (mpic == NULL)
476 		return NULL;
477 
478 	mpic_init(mpic);
479 
480 	return mpic;
481  }
482 
483 static int __init pmac_pic_probe_mpic(void)
484 {
485 	struct mpic *mpic1, *mpic2;
486 	struct device_node *np, *master = NULL, *slave = NULL;
487 
488 	/* We can have up to 2 MPICs cascaded */
489 	for (np = NULL; (np = of_find_node_by_type(np, "open-pic"))
490 		     != NULL;) {
491 		if (master == NULL &&
492 		    of_get_property(np, "interrupts", NULL) == NULL)
493 			master = of_node_get(np);
494 		else if (slave == NULL)
495 			slave = of_node_get(np);
496 		if (master && slave)
497 			break;
498 	}
499 
500 	/* Check for bogus setups */
501 	if (master == NULL && slave != NULL) {
502 		master = slave;
503 		slave = NULL;
504 	}
505 
506 	/* Not found, default to good old pmac pic */
507 	if (master == NULL)
508 		return -ENODEV;
509 
510 	/* Set master handler */
511 	ppc_md.get_irq = mpic_get_irq;
512 
513 	/* Setup master */
514 	mpic1 = pmac_setup_one_mpic(master, 1);
515 	BUG_ON(mpic1 == NULL);
516 
517 	/* Install NMI if any */
518 	pmac_pic_setup_mpic_nmi(mpic1);
519 
520 	of_node_put(master);
521 
522 	/* Set up a cascaded controller, if present */
523 	if (slave) {
524 		mpic2 = pmac_setup_one_mpic(slave, 0);
525 		if (mpic2 == NULL)
526 			printk(KERN_ERR "Failed to setup slave MPIC\n");
527 		of_node_put(slave);
528 	}
529 
530 	return 0;
531 }
532 
533 
534 void __init pmac_pic_init(void)
535 {
536 	/* We configure the OF parsing based on our oldworld vs. newworld
537 	 * platform type and whether we were booted by BootX.
538 	 */
539 #ifdef CONFIG_PPC32
540 	if (!pmac_newworld)
541 		of_irq_workarounds |= OF_IMAP_OLDWORLD_MAC;
542 	if (of_get_property(of_chosen, "linux,bootx", NULL) != NULL)
543 		of_irq_workarounds |= OF_IMAP_NO_PHANDLE;
544 
545 	/* If we don't have phandles on a newworld, then try to locate a
546 	 * default interrupt controller (happens when booting with BootX).
547 	 * We do a first match here, hopefully, that only ever happens on
548 	 * machines with one controller.
549 	 */
550 	if (pmac_newworld && (of_irq_workarounds & OF_IMAP_NO_PHANDLE)) {
551 		struct device_node *np;
552 
553 		for_each_node_with_property(np, "interrupt-controller") {
554 			/* Skip /chosen/interrupt-controller */
555 			if (strcmp(np->name, "chosen") == 0)
556 				continue;
557 			/* It seems like at least one person wants
558 			 * to use BootX on a machine with an AppleKiwi
559 			 * controller which happens to pretend to be an
560 			 * interrupt controller too. */
561 			if (strcmp(np->name, "AppleKiwi") == 0)
562 				continue;
563 			/* I think we found one ! */
564 			of_irq_dflt_pic = np;
565 			break;
566 		}
567 	}
568 #endif /* CONFIG_PPC32 */
569 
570 	/* We first try to detect Apple's new Core99 chipset, since mac-io
571 	 * is quite different on those machines and contains an IBM MPIC2.
572 	 */
573 	if (pmac_pic_probe_mpic() == 0)
574 		return;
575 
576 #ifdef CONFIG_PPC32
577 	pmac_pic_probe_oldstyle();
578 #endif
579 }
580 
581 #if defined(CONFIG_PM) && defined(CONFIG_PPC32)
582 /*
583  * These procedures are used in implementing sleep on the powerbooks.
584  * sleep_save_intrs() saves the states of all interrupt enables
585  * and disables all interrupts except for the nominated one.
586  * sleep_restore_intrs() restores the states of all interrupt enables.
587  */
588 unsigned long sleep_save_mask[2];
589 
590 /* This used to be passed by the PMU driver but that link got
591  * broken with the new driver model. We use this tweak for now...
592  * We really want to do things differently though...
593  */
594 static int pmacpic_find_viaint(void)
595 {
596 	int viaint = -1;
597 
598 #ifdef CONFIG_ADB_PMU
599 	struct device_node *np;
600 
601 	if (pmu_get_model() != PMU_OHARE_BASED)
602 		goto not_found;
603 	np = of_find_node_by_name(NULL, "via-pmu");
604 	if (np == NULL)
605 		goto not_found;
606 	viaint = irq_of_parse_and_map(np, 0);
607 
608 not_found:
609 #endif /* CONFIG_ADB_PMU */
610 	return viaint;
611 }
612 
613 static int pmacpic_suspend(void)
614 {
615 	int viaint = pmacpic_find_viaint();
616 
617 	sleep_save_mask[0] = ppc_cached_irq_mask[0];
618 	sleep_save_mask[1] = ppc_cached_irq_mask[1];
619 	ppc_cached_irq_mask[0] = 0;
620 	ppc_cached_irq_mask[1] = 0;
621 	if (viaint > 0)
622 		set_bit(viaint, ppc_cached_irq_mask);
623 	out_le32(&pmac_irq_hw[0]->enable, ppc_cached_irq_mask[0]);
624 	if (max_real_irqs > 32)
625 		out_le32(&pmac_irq_hw[1]->enable, ppc_cached_irq_mask[1]);
626 	(void)in_le32(&pmac_irq_hw[0]->event);
627 	/* make sure mask gets to controller before we return to caller */
628 	mb();
629         (void)in_le32(&pmac_irq_hw[0]->enable);
630 
631         return 0;
632 }
633 
634 static void pmacpic_resume(void)
635 {
636 	int i;
637 
638 	out_le32(&pmac_irq_hw[0]->enable, 0);
639 	if (max_real_irqs > 32)
640 		out_le32(&pmac_irq_hw[1]->enable, 0);
641 	mb();
642 	for (i = 0; i < max_real_irqs; ++i)
643 		if (test_bit(i, sleep_save_mask))
644 			pmac_unmask_irq(irq_get_irq_data(i));
645 }
646 
647 static struct syscore_ops pmacpic_syscore_ops = {
648 	.suspend	= pmacpic_suspend,
649 	.resume		= pmacpic_resume,
650 };
651 
652 static int __init init_pmacpic_syscore(void)
653 {
654 	if (pmac_irq_hw[0])
655 		register_syscore_ops(&pmacpic_syscore_ops);
656 	return 0;
657 }
658 
659 machine_subsys_initcall(powermac, init_pmacpic_syscore);
660 
661 #endif /* CONFIG_PM && CONFIG_PPC32 */
662