1 /* 2 * Support for the interrupt controllers found on Power Macintosh, 3 * currently Apple's "Grand Central" interrupt controller in all 4 * it's incarnations. OpenPIC support used on newer machines is 5 * in a separate file 6 * 7 * Copyright (C) 1997 Paul Mackerras (paulus@samba.org) 8 * Copyright (C) 2005 Benjamin Herrenschmidt (benh@kernel.crashing.org) 9 * IBM, Corp. 10 * 11 * This program is free software; you can redistribute it and/or 12 * modify it under the terms of the GNU General Public License 13 * as published by the Free Software Foundation; either version 14 * 2 of the License, or (at your option) any later version. 15 * 16 */ 17 18 #include <linux/stddef.h> 19 #include <linux/init.h> 20 #include <linux/sched.h> 21 #include <linux/signal.h> 22 #include <linux/pci.h> 23 #include <linux/interrupt.h> 24 #include <linux/sysdev.h> 25 #include <linux/adb.h> 26 #include <linux/pmu.h> 27 #include <linux/module.h> 28 29 #include <asm/sections.h> 30 #include <asm/io.h> 31 #include <asm/smp.h> 32 #include <asm/prom.h> 33 #include <asm/pci-bridge.h> 34 #include <asm/time.h> 35 #include <asm/pmac_feature.h> 36 #include <asm/mpic.h> 37 38 #include "pmac.h" 39 40 /* 41 * XXX this should be in xmon.h, but putting it there means xmon.h 42 * has to include <linux/interrupt.h> (to get irqreturn_t), which 43 * causes all sorts of problems. -- paulus 44 */ 45 extern irqreturn_t xmon_irq(int, void *); 46 47 #ifdef CONFIG_PPC32 48 struct pmac_irq_hw { 49 unsigned int event; 50 unsigned int enable; 51 unsigned int ack; 52 unsigned int level; 53 }; 54 55 /* Default addresses */ 56 static volatile struct pmac_irq_hw __iomem *pmac_irq_hw[4]; 57 58 #define GC_LEVEL_MASK 0x3ff00000 59 #define OHARE_LEVEL_MASK 0x1ff00000 60 #define HEATHROW_LEVEL_MASK 0x1ff00000 61 62 static int max_irqs; 63 static int max_real_irqs; 64 static u32 level_mask[4]; 65 66 static DEFINE_SPINLOCK(pmac_pic_lock); 67 68 #define NR_MASK_WORDS ((NR_IRQS + 31) / 32) 69 static unsigned long ppc_lost_interrupts[NR_MASK_WORDS]; 70 static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; 71 static int pmac_irq_cascade = -1; 72 static struct irq_host *pmac_pic_host; 73 74 static void __pmac_retrigger(unsigned int irq_nr) 75 { 76 if (irq_nr >= max_real_irqs && pmac_irq_cascade > 0) { 77 __set_bit(irq_nr, ppc_lost_interrupts); 78 irq_nr = pmac_irq_cascade; 79 mb(); 80 } 81 if (!__test_and_set_bit(irq_nr, ppc_lost_interrupts)) { 82 atomic_inc(&ppc_n_lost_interrupts); 83 set_dec(1); 84 } 85 } 86 87 static void pmac_mask_and_ack_irq(unsigned int virq) 88 { 89 unsigned int src = irq_map[virq].hwirq; 90 unsigned long bit = 1UL << (src & 0x1f); 91 int i = src >> 5; 92 unsigned long flags; 93 94 spin_lock_irqsave(&pmac_pic_lock, flags); 95 __clear_bit(src, ppc_cached_irq_mask); 96 if (__test_and_clear_bit(src, ppc_lost_interrupts)) 97 atomic_dec(&ppc_n_lost_interrupts); 98 out_le32(&pmac_irq_hw[i]->enable, ppc_cached_irq_mask[i]); 99 out_le32(&pmac_irq_hw[i]->ack, bit); 100 do { 101 /* make sure ack gets to controller before we enable 102 interrupts */ 103 mb(); 104 } while((in_le32(&pmac_irq_hw[i]->enable) & bit) 105 != (ppc_cached_irq_mask[i] & bit)); 106 spin_unlock_irqrestore(&pmac_pic_lock, flags); 107 } 108 109 static void pmac_ack_irq(unsigned int virq) 110 { 111 unsigned int src = irq_map[virq].hwirq; 112 unsigned long bit = 1UL << (src & 0x1f); 113 int i = src >> 5; 114 unsigned long flags; 115 116 spin_lock_irqsave(&pmac_pic_lock, flags); 117 if (__test_and_clear_bit(src, ppc_lost_interrupts)) 118 atomic_dec(&ppc_n_lost_interrupts); 119 out_le32(&pmac_irq_hw[i]->ack, bit); 120 (void)in_le32(&pmac_irq_hw[i]->ack); 121 spin_unlock_irqrestore(&pmac_pic_lock, flags); 122 } 123 124 static void __pmac_set_irq_mask(unsigned int irq_nr, int nokicklost) 125 { 126 unsigned long bit = 1UL << (irq_nr & 0x1f); 127 int i = irq_nr >> 5; 128 129 if ((unsigned)irq_nr >= max_irqs) 130 return; 131 132 /* enable unmasked interrupts */ 133 out_le32(&pmac_irq_hw[i]->enable, ppc_cached_irq_mask[i]); 134 135 do { 136 /* make sure mask gets to controller before we 137 return to user */ 138 mb(); 139 } while((in_le32(&pmac_irq_hw[i]->enable) & bit) 140 != (ppc_cached_irq_mask[i] & bit)); 141 142 /* 143 * Unfortunately, setting the bit in the enable register 144 * when the device interrupt is already on *doesn't* set 145 * the bit in the flag register or request another interrupt. 146 */ 147 if (bit & ppc_cached_irq_mask[i] & in_le32(&pmac_irq_hw[i]->level)) 148 __pmac_retrigger(irq_nr); 149 } 150 151 /* When an irq gets requested for the first client, if it's an 152 * edge interrupt, we clear any previous one on the controller 153 */ 154 static unsigned int pmac_startup_irq(unsigned int virq) 155 { 156 unsigned long flags; 157 unsigned int src = irq_map[virq].hwirq; 158 unsigned long bit = 1UL << (src & 0x1f); 159 int i = src >> 5; 160 161 spin_lock_irqsave(&pmac_pic_lock, flags); 162 if ((irq_desc[virq].status & IRQ_LEVEL) == 0) 163 out_le32(&pmac_irq_hw[i]->ack, bit); 164 __set_bit(src, ppc_cached_irq_mask); 165 __pmac_set_irq_mask(src, 0); 166 spin_unlock_irqrestore(&pmac_pic_lock, flags); 167 168 return 0; 169 } 170 171 static void pmac_mask_irq(unsigned int virq) 172 { 173 unsigned long flags; 174 unsigned int src = irq_map[virq].hwirq; 175 176 spin_lock_irqsave(&pmac_pic_lock, flags); 177 __clear_bit(src, ppc_cached_irq_mask); 178 __pmac_set_irq_mask(src, 1); 179 spin_unlock_irqrestore(&pmac_pic_lock, flags); 180 } 181 182 static void pmac_unmask_irq(unsigned int virq) 183 { 184 unsigned long flags; 185 unsigned int src = irq_map[virq].hwirq; 186 187 spin_lock_irqsave(&pmac_pic_lock, flags); 188 __set_bit(src, ppc_cached_irq_mask); 189 __pmac_set_irq_mask(src, 0); 190 spin_unlock_irqrestore(&pmac_pic_lock, flags); 191 } 192 193 static int pmac_retrigger(unsigned int virq) 194 { 195 unsigned long flags; 196 197 spin_lock_irqsave(&pmac_pic_lock, flags); 198 __pmac_retrigger(irq_map[virq].hwirq); 199 spin_unlock_irqrestore(&pmac_pic_lock, flags); 200 return 1; 201 } 202 203 static struct irq_chip pmac_pic = { 204 .typename = " PMAC-PIC ", 205 .startup = pmac_startup_irq, 206 .mask = pmac_mask_irq, 207 .ack = pmac_ack_irq, 208 .mask_ack = pmac_mask_and_ack_irq, 209 .unmask = pmac_unmask_irq, 210 .retrigger = pmac_retrigger, 211 }; 212 213 static irqreturn_t gatwick_action(int cpl, void *dev_id) 214 { 215 unsigned long flags; 216 int irq, bits; 217 int rc = IRQ_NONE; 218 219 spin_lock_irqsave(&pmac_pic_lock, flags); 220 for (irq = max_irqs; (irq -= 32) >= max_real_irqs; ) { 221 int i = irq >> 5; 222 bits = in_le32(&pmac_irq_hw[i]->event) | ppc_lost_interrupts[i]; 223 /* We must read level interrupts from the level register */ 224 bits |= (in_le32(&pmac_irq_hw[i]->level) & level_mask[i]); 225 bits &= ppc_cached_irq_mask[i]; 226 if (bits == 0) 227 continue; 228 irq += __ilog2(bits); 229 spin_unlock_irqrestore(&pmac_pic_lock, flags); 230 __do_IRQ(irq); 231 spin_lock_irqsave(&pmac_pic_lock, flags); 232 rc = IRQ_HANDLED; 233 } 234 spin_unlock_irqrestore(&pmac_pic_lock, flags); 235 return rc; 236 } 237 238 static unsigned int pmac_pic_get_irq(void) 239 { 240 int irq; 241 unsigned long bits = 0; 242 unsigned long flags; 243 244 #ifdef CONFIG_SMP 245 void psurge_smp_message_recv(void); 246 247 /* IPI's are a hack on the powersurge -- Cort */ 248 if ( smp_processor_id() != 0 ) { 249 psurge_smp_message_recv(); 250 return NO_IRQ_IGNORE; /* ignore, already handled */ 251 } 252 #endif /* CONFIG_SMP */ 253 spin_lock_irqsave(&pmac_pic_lock, flags); 254 for (irq = max_real_irqs; (irq -= 32) >= 0; ) { 255 int i = irq >> 5; 256 bits = in_le32(&pmac_irq_hw[i]->event) | ppc_lost_interrupts[i]; 257 /* We must read level interrupts from the level register */ 258 bits |= (in_le32(&pmac_irq_hw[i]->level) & level_mask[i]); 259 bits &= ppc_cached_irq_mask[i]; 260 if (bits == 0) 261 continue; 262 irq += __ilog2(bits); 263 break; 264 } 265 spin_unlock_irqrestore(&pmac_pic_lock, flags); 266 if (unlikely(irq < 0)) 267 return NO_IRQ; 268 return irq_linear_revmap(pmac_pic_host, irq); 269 } 270 271 #ifdef CONFIG_XMON 272 static struct irqaction xmon_action = { 273 .handler = xmon_irq, 274 .flags = 0, 275 .mask = CPU_MASK_NONE, 276 .name = "NMI - XMON" 277 }; 278 #endif 279 280 static struct irqaction gatwick_cascade_action = { 281 .handler = gatwick_action, 282 .flags = IRQF_DISABLED, 283 .mask = CPU_MASK_NONE, 284 .name = "cascade", 285 }; 286 287 static int pmac_pic_host_match(struct irq_host *h, struct device_node *node) 288 { 289 /* We match all, we don't always have a node anyway */ 290 return 1; 291 } 292 293 static int pmac_pic_host_map(struct irq_host *h, unsigned int virq, 294 irq_hw_number_t hw) 295 { 296 struct irq_desc *desc = get_irq_desc(virq); 297 int level; 298 299 if (hw >= max_irqs) 300 return -EINVAL; 301 302 /* Mark level interrupts, set delayed disable for edge ones and set 303 * handlers 304 */ 305 level = !!(level_mask[hw >> 5] & (1UL << (hw & 0x1f))); 306 if (level) 307 desc->status |= IRQ_LEVEL; 308 set_irq_chip_and_handler(virq, &pmac_pic, level ? 309 handle_level_irq : handle_edge_irq); 310 return 0; 311 } 312 313 static int pmac_pic_host_xlate(struct irq_host *h, struct device_node *ct, 314 u32 *intspec, unsigned int intsize, 315 irq_hw_number_t *out_hwirq, 316 unsigned int *out_flags) 317 318 { 319 *out_flags = IRQ_TYPE_NONE; 320 *out_hwirq = *intspec; 321 return 0; 322 } 323 324 static struct irq_host_ops pmac_pic_host_ops = { 325 .match = pmac_pic_host_match, 326 .map = pmac_pic_host_map, 327 .xlate = pmac_pic_host_xlate, 328 }; 329 330 static void __init pmac_pic_probe_oldstyle(void) 331 { 332 int i; 333 struct device_node *master = NULL; 334 struct device_node *slave = NULL; 335 u8 __iomem *addr; 336 struct resource r; 337 338 /* Set our get_irq function */ 339 ppc_md.get_irq = pmac_pic_get_irq; 340 341 /* 342 * Find the interrupt controller type & node 343 */ 344 345 if ((master = of_find_node_by_name(NULL, "gc")) != NULL) { 346 max_irqs = max_real_irqs = 32; 347 level_mask[0] = GC_LEVEL_MASK; 348 } else if ((master = of_find_node_by_name(NULL, "ohare")) != NULL) { 349 max_irqs = max_real_irqs = 32; 350 level_mask[0] = OHARE_LEVEL_MASK; 351 352 /* We might have a second cascaded ohare */ 353 slave = of_find_node_by_name(NULL, "pci106b,7"); 354 if (slave) { 355 max_irqs = 64; 356 level_mask[1] = OHARE_LEVEL_MASK; 357 } 358 } else if ((master = of_find_node_by_name(NULL, "mac-io")) != NULL) { 359 max_irqs = max_real_irqs = 64; 360 level_mask[0] = HEATHROW_LEVEL_MASK; 361 level_mask[1] = 0; 362 363 /* We might have a second cascaded heathrow */ 364 slave = of_find_node_by_name(master, "mac-io"); 365 366 /* Check ordering of master & slave */ 367 if (of_device_is_compatible(master, "gatwick")) { 368 struct device_node *tmp; 369 BUG_ON(slave == NULL); 370 tmp = master; 371 master = slave; 372 slave = tmp; 373 } 374 375 /* We found a slave */ 376 if (slave) { 377 max_irqs = 128; 378 level_mask[2] = HEATHROW_LEVEL_MASK; 379 level_mask[3] = 0; 380 } 381 } 382 BUG_ON(master == NULL); 383 384 /* 385 * Allocate an irq host 386 */ 387 pmac_pic_host = irq_alloc_host(master, IRQ_HOST_MAP_LINEAR, max_irqs, 388 &pmac_pic_host_ops, 389 max_irqs); 390 BUG_ON(pmac_pic_host == NULL); 391 irq_set_default_host(pmac_pic_host); 392 393 /* Get addresses of first controller if we have a node for it */ 394 BUG_ON(of_address_to_resource(master, 0, &r)); 395 396 /* Map interrupts of primary controller */ 397 addr = (u8 __iomem *) ioremap(r.start, 0x40); 398 i = 0; 399 pmac_irq_hw[i++] = (volatile struct pmac_irq_hw __iomem *) 400 (addr + 0x20); 401 if (max_real_irqs > 32) 402 pmac_irq_hw[i++] = (volatile struct pmac_irq_hw __iomem *) 403 (addr + 0x10); 404 of_node_put(master); 405 406 printk(KERN_INFO "irq: Found primary Apple PIC %s for %d irqs\n", 407 master->full_name, max_real_irqs); 408 409 /* Map interrupts of cascaded controller */ 410 if (slave && !of_address_to_resource(slave, 0, &r)) { 411 addr = (u8 __iomem *)ioremap(r.start, 0x40); 412 pmac_irq_hw[i++] = (volatile struct pmac_irq_hw __iomem *) 413 (addr + 0x20); 414 if (max_irqs > 64) 415 pmac_irq_hw[i++] = 416 (volatile struct pmac_irq_hw __iomem *) 417 (addr + 0x10); 418 pmac_irq_cascade = irq_of_parse_and_map(slave, 0); 419 420 printk(KERN_INFO "irq: Found slave Apple PIC %s for %d irqs" 421 " cascade: %d\n", slave->full_name, 422 max_irqs - max_real_irqs, pmac_irq_cascade); 423 } 424 of_node_put(slave); 425 426 /* Disable all interrupts in all controllers */ 427 for (i = 0; i * 32 < max_irqs; ++i) 428 out_le32(&pmac_irq_hw[i]->enable, 0); 429 430 /* Hookup cascade irq */ 431 if (slave && pmac_irq_cascade != NO_IRQ) 432 setup_irq(pmac_irq_cascade, &gatwick_cascade_action); 433 434 printk(KERN_INFO "irq: System has %d possible interrupts\n", max_irqs); 435 #ifdef CONFIG_XMON 436 setup_irq(irq_create_mapping(NULL, 20), &xmon_action); 437 #endif 438 } 439 #endif /* CONFIG_PPC32 */ 440 441 static void pmac_u3_cascade(unsigned int irq, struct irq_desc *desc) 442 { 443 struct mpic *mpic = desc->handler_data; 444 445 unsigned int cascade_irq = mpic_get_one_irq(mpic); 446 if (cascade_irq != NO_IRQ) 447 generic_handle_irq(cascade_irq); 448 desc->chip->eoi(irq); 449 } 450 451 static void __init pmac_pic_setup_mpic_nmi(struct mpic *mpic) 452 { 453 #if defined(CONFIG_XMON) && defined(CONFIG_PPC32) 454 struct device_node* pswitch; 455 int nmi_irq; 456 457 pswitch = of_find_node_by_name(NULL, "programmer-switch"); 458 if (pswitch) { 459 nmi_irq = irq_of_parse_and_map(pswitch, 0); 460 if (nmi_irq != NO_IRQ) { 461 mpic_irq_set_priority(nmi_irq, 9); 462 setup_irq(nmi_irq, &xmon_action); 463 } 464 of_node_put(pswitch); 465 } 466 #endif /* defined(CONFIG_XMON) && defined(CONFIG_PPC32) */ 467 } 468 469 static struct mpic * __init pmac_setup_one_mpic(struct device_node *np, 470 int master) 471 { 472 const char *name = master ? " MPIC 1 " : " MPIC 2 "; 473 struct resource r; 474 struct mpic *mpic; 475 unsigned int flags = master ? MPIC_PRIMARY : 0; 476 int rc; 477 478 rc = of_address_to_resource(np, 0, &r); 479 if (rc) 480 return NULL; 481 482 pmac_call_feature(PMAC_FTR_ENABLE_MPIC, np, 0, 0); 483 484 flags |= MPIC_WANTS_RESET; 485 if (of_get_property(np, "big-endian", NULL)) 486 flags |= MPIC_BIG_ENDIAN; 487 488 /* Primary Big Endian means HT interrupts. This is quite dodgy 489 * but works until I find a better way 490 */ 491 if (master && (flags & MPIC_BIG_ENDIAN)) 492 flags |= MPIC_U3_HT_IRQS; 493 494 mpic = mpic_alloc(np, r.start, flags, 0, 0, name); 495 if (mpic == NULL) 496 return NULL; 497 498 mpic_init(mpic); 499 500 return mpic; 501 } 502 503 static int __init pmac_pic_probe_mpic(void) 504 { 505 struct mpic *mpic1, *mpic2; 506 struct device_node *np, *master = NULL, *slave = NULL; 507 unsigned int cascade; 508 509 /* We can have up to 2 MPICs cascaded */ 510 for (np = NULL; (np = of_find_node_by_type(np, "open-pic")) 511 != NULL;) { 512 if (master == NULL && 513 of_get_property(np, "interrupts", NULL) == NULL) 514 master = of_node_get(np); 515 else if (slave == NULL) 516 slave = of_node_get(np); 517 if (master && slave) 518 break; 519 } 520 521 /* Check for bogus setups */ 522 if (master == NULL && slave != NULL) { 523 master = slave; 524 slave = NULL; 525 } 526 527 /* Not found, default to good old pmac pic */ 528 if (master == NULL) 529 return -ENODEV; 530 531 /* Set master handler */ 532 ppc_md.get_irq = mpic_get_irq; 533 534 /* Setup master */ 535 mpic1 = pmac_setup_one_mpic(master, 1); 536 BUG_ON(mpic1 == NULL); 537 538 /* Install NMI if any */ 539 pmac_pic_setup_mpic_nmi(mpic1); 540 541 of_node_put(master); 542 543 /* No slave, let's go out */ 544 if (slave == NULL) 545 return 0; 546 547 /* Get/Map slave interrupt */ 548 cascade = irq_of_parse_and_map(slave, 0); 549 if (cascade == NO_IRQ) { 550 printk(KERN_ERR "Failed to map cascade IRQ\n"); 551 return 0; 552 } 553 554 mpic2 = pmac_setup_one_mpic(slave, 0); 555 if (mpic2 == NULL) { 556 printk(KERN_ERR "Failed to setup slave MPIC\n"); 557 of_node_put(slave); 558 return 0; 559 } 560 set_irq_data(cascade, mpic2); 561 set_irq_chained_handler(cascade, pmac_u3_cascade); 562 563 of_node_put(slave); 564 return 0; 565 } 566 567 568 void __init pmac_pic_init(void) 569 { 570 unsigned int flags = 0; 571 572 /* We configure the OF parsing based on our oldworld vs. newworld 573 * platform type and wether we were booted by BootX. 574 */ 575 #ifdef CONFIG_PPC32 576 if (!pmac_newworld) 577 flags |= OF_IMAP_OLDWORLD_MAC; 578 if (of_get_property(of_chosen, "linux,bootx", NULL) != NULL) 579 flags |= OF_IMAP_NO_PHANDLE; 580 #endif /* CONFIG_PPC_32 */ 581 582 of_irq_map_init(flags); 583 584 /* We first try to detect Apple's new Core99 chipset, since mac-io 585 * is quite different on those machines and contains an IBM MPIC2. 586 */ 587 if (pmac_pic_probe_mpic() == 0) 588 return; 589 590 #ifdef CONFIG_PPC32 591 pmac_pic_probe_oldstyle(); 592 #endif 593 } 594 595 #if defined(CONFIG_PM) && defined(CONFIG_PPC32) 596 /* 597 * These procedures are used in implementing sleep on the powerbooks. 598 * sleep_save_intrs() saves the states of all interrupt enables 599 * and disables all interrupts except for the nominated one. 600 * sleep_restore_intrs() restores the states of all interrupt enables. 601 */ 602 unsigned long sleep_save_mask[2]; 603 604 /* This used to be passed by the PMU driver but that link got 605 * broken with the new driver model. We use this tweak for now... 606 * We really want to do things differently though... 607 */ 608 static int pmacpic_find_viaint(void) 609 { 610 int viaint = -1; 611 612 #ifdef CONFIG_ADB_PMU 613 struct device_node *np; 614 615 if (pmu_get_model() != PMU_OHARE_BASED) 616 goto not_found; 617 np = of_find_node_by_name(NULL, "via-pmu"); 618 if (np == NULL) 619 goto not_found; 620 viaint = irq_of_parse_and_map(np, 0);; 621 622 not_found: 623 #endif /* CONFIG_ADB_PMU */ 624 return viaint; 625 } 626 627 static int pmacpic_suspend(struct sys_device *sysdev, pm_message_t state) 628 { 629 int viaint = pmacpic_find_viaint(); 630 631 sleep_save_mask[0] = ppc_cached_irq_mask[0]; 632 sleep_save_mask[1] = ppc_cached_irq_mask[1]; 633 ppc_cached_irq_mask[0] = 0; 634 ppc_cached_irq_mask[1] = 0; 635 if (viaint > 0) 636 set_bit(viaint, ppc_cached_irq_mask); 637 out_le32(&pmac_irq_hw[0]->enable, ppc_cached_irq_mask[0]); 638 if (max_real_irqs > 32) 639 out_le32(&pmac_irq_hw[1]->enable, ppc_cached_irq_mask[1]); 640 (void)in_le32(&pmac_irq_hw[0]->event); 641 /* make sure mask gets to controller before we return to caller */ 642 mb(); 643 (void)in_le32(&pmac_irq_hw[0]->enable); 644 645 return 0; 646 } 647 648 static int pmacpic_resume(struct sys_device *sysdev) 649 { 650 int i; 651 652 out_le32(&pmac_irq_hw[0]->enable, 0); 653 if (max_real_irqs > 32) 654 out_le32(&pmac_irq_hw[1]->enable, 0); 655 mb(); 656 for (i = 0; i < max_real_irqs; ++i) 657 if (test_bit(i, sleep_save_mask)) 658 pmac_unmask_irq(i); 659 660 return 0; 661 } 662 663 #endif /* CONFIG_PM && CONFIG_PPC32 */ 664 665 static struct sysdev_class pmacpic_sysclass = { 666 .name = "pmac_pic", 667 }; 668 669 static struct sys_device device_pmacpic = { 670 .id = 0, 671 .cls = &pmacpic_sysclass, 672 }; 673 674 static struct sysdev_driver driver_pmacpic = { 675 #if defined(CONFIG_PM) && defined(CONFIG_PPC32) 676 .suspend = &pmacpic_suspend, 677 .resume = &pmacpic_resume, 678 #endif /* CONFIG_PM && CONFIG_PPC32 */ 679 }; 680 681 static int __init init_pmacpic_sysfs(void) 682 { 683 #ifdef CONFIG_PPC32 684 if (max_irqs == 0) 685 return -ENODEV; 686 #endif 687 printk(KERN_DEBUG "Registering pmac pic with sysfs...\n"); 688 sysdev_class_register(&pmacpic_sysclass); 689 sysdev_register(&device_pmacpic); 690 sysdev_driver_register(&pmacpic_sysclass, &driver_pmacpic); 691 return 0; 692 } 693 machine_subsys_initcall(powermac, init_pmacpic_sysfs); 694 695