1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Device driver for the PMU in Apple PowerBooks and PowerMacs. 4 * 5 * The VIA (versatile interface adapter) interfaces to the PMU, 6 * a 6805 microprocessor core whose primary function is to control 7 * battery charging and system power on the PowerBook 3400 and 2400. 8 * The PMU also controls the ADB (Apple Desktop Bus) which connects 9 * to the keyboard and mouse, as well as the non-volatile RAM 10 * and the RTC (real time clock) chip. 11 * 12 * Copyright (C) 1998 Paul Mackerras and Fabio Riccardi. 13 * Copyright (C) 2001-2002 Benjamin Herrenschmidt 14 * Copyright (C) 2006-2007 Johannes Berg 15 * 16 * THIS DRIVER IS BECOMING A TOTAL MESS ! 17 * - Cleanup atomically disabling reply to PMU events after 18 * a sleep or a freq. switch 19 * 20 */ 21 #include <stdarg.h> 22 #include <linux/mutex.h> 23 #include <linux/types.h> 24 #include <linux/errno.h> 25 #include <linux/kernel.h> 26 #include <linux/delay.h> 27 #include <linux/sched/signal.h> 28 #include <linux/miscdevice.h> 29 #include <linux/blkdev.h> 30 #include <linux/pci.h> 31 #include <linux/slab.h> 32 #include <linux/poll.h> 33 #include <linux/adb.h> 34 #include <linux/pmu.h> 35 #include <linux/cuda.h> 36 #include <linux/module.h> 37 #include <linux/spinlock.h> 38 #include <linux/pm.h> 39 #include <linux/proc_fs.h> 40 #include <linux/seq_file.h> 41 #include <linux/init.h> 42 #include <linux/interrupt.h> 43 #include <linux/device.h> 44 #include <linux/syscore_ops.h> 45 #include <linux/freezer.h> 46 #include <linux/syscalls.h> 47 #include <linux/suspend.h> 48 #include <linux/cpu.h> 49 #include <linux/compat.h> 50 #include <linux/of_address.h> 51 #include <linux/of_irq.h> 52 #include <linux/uaccess.h> 53 #include <asm/machdep.h> 54 #include <asm/io.h> 55 #include <asm/pgtable.h> 56 #include <asm/sections.h> 57 #include <asm/irq.h> 58 #ifdef CONFIG_PPC_PMAC 59 #include <asm/pmac_feature.h> 60 #include <asm/pmac_pfunc.h> 61 #include <asm/pmac_low_i2c.h> 62 #include <asm/prom.h> 63 #include <asm/mmu_context.h> 64 #include <asm/cputable.h> 65 #include <asm/time.h> 66 #include <asm/backlight.h> 67 #else 68 #include <asm/macintosh.h> 69 #include <asm/macints.h> 70 #include <asm/mac_via.h> 71 #endif 72 73 #include "via-pmu-event.h" 74 75 /* Some compile options */ 76 #undef DEBUG_SLEEP 77 78 /* How many iterations between battery polls */ 79 #define BATTERY_POLLING_COUNT 2 80 81 static DEFINE_MUTEX(pmu_info_proc_mutex); 82 83 /* VIA registers - spaced 0x200 bytes apart */ 84 #define RS 0x200 /* skip between registers */ 85 #define B 0 /* B-side data */ 86 #define A RS /* A-side data */ 87 #define DIRB (2*RS) /* B-side direction (1=output) */ 88 #define DIRA (3*RS) /* A-side direction (1=output) */ 89 #define T1CL (4*RS) /* Timer 1 ctr/latch (low 8 bits) */ 90 #define T1CH (5*RS) /* Timer 1 counter (high 8 bits) */ 91 #define T1LL (6*RS) /* Timer 1 latch (low 8 bits) */ 92 #define T1LH (7*RS) /* Timer 1 latch (high 8 bits) */ 93 #define T2CL (8*RS) /* Timer 2 ctr/latch (low 8 bits) */ 94 #define T2CH (9*RS) /* Timer 2 counter (high 8 bits) */ 95 #define SR (10*RS) /* Shift register */ 96 #define ACR (11*RS) /* Auxiliary control register */ 97 #define PCR (12*RS) /* Peripheral control register */ 98 #define IFR (13*RS) /* Interrupt flag register */ 99 #define IER (14*RS) /* Interrupt enable register */ 100 #define ANH (15*RS) /* A-side data, no handshake */ 101 102 /* Bits in B data register: both active low */ 103 #ifdef CONFIG_PPC_PMAC 104 #define TACK 0x08 /* Transfer acknowledge (input) */ 105 #define TREQ 0x10 /* Transfer request (output) */ 106 #else 107 #define TACK 0x02 108 #define TREQ 0x04 109 #endif 110 111 /* Bits in ACR */ 112 #define SR_CTRL 0x1c /* Shift register control bits */ 113 #define SR_EXT 0x0c /* Shift on external clock */ 114 #define SR_OUT 0x10 /* Shift out if 1 */ 115 116 /* Bits in IFR and IER */ 117 #define IER_SET 0x80 /* set bits in IER */ 118 #define IER_CLR 0 /* clear bits in IER */ 119 #define SR_INT 0x04 /* Shift register full/empty */ 120 #define CB2_INT 0x08 121 #define CB1_INT 0x10 /* transition on CB1 input */ 122 123 static volatile enum pmu_state { 124 uninitialized = 0, 125 idle, 126 sending, 127 intack, 128 reading, 129 reading_intr, 130 locked, 131 } pmu_state; 132 133 static volatile enum int_data_state { 134 int_data_empty, 135 int_data_fill, 136 int_data_ready, 137 int_data_flush 138 } int_data_state[2] = { int_data_empty, int_data_empty }; 139 140 static struct adb_request *current_req; 141 static struct adb_request *last_req; 142 static struct adb_request *req_awaiting_reply; 143 static unsigned char interrupt_data[2][32]; 144 static int interrupt_data_len[2]; 145 static int int_data_last; 146 static unsigned char *reply_ptr; 147 static int data_index; 148 static int data_len; 149 static volatile int adb_int_pending; 150 static volatile int disable_poll; 151 static int pmu_kind = PMU_UNKNOWN; 152 static int pmu_fully_inited; 153 static int pmu_has_adb; 154 #ifdef CONFIG_PPC_PMAC 155 static volatile unsigned char __iomem *via1; 156 static volatile unsigned char __iomem *via2; 157 static struct device_node *vias; 158 static struct device_node *gpio_node; 159 #endif 160 static unsigned char __iomem *gpio_reg; 161 static int gpio_irq = 0; 162 static int gpio_irq_enabled = -1; 163 static volatile int pmu_suspended; 164 static spinlock_t pmu_lock; 165 static u8 pmu_intr_mask; 166 static int pmu_version; 167 static int drop_interrupts; 168 #if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32) 169 static int option_lid_wakeup = 1; 170 #endif /* CONFIG_SUSPEND && CONFIG_PPC32 */ 171 static unsigned long async_req_locks; 172 173 #define NUM_IRQ_STATS 13 174 static unsigned int pmu_irq_stats[NUM_IRQ_STATS]; 175 176 static struct proc_dir_entry *proc_pmu_root; 177 static struct proc_dir_entry *proc_pmu_info; 178 static struct proc_dir_entry *proc_pmu_irqstats; 179 static struct proc_dir_entry *proc_pmu_options; 180 static int option_server_mode; 181 182 int pmu_battery_count; 183 int pmu_cur_battery; 184 unsigned int pmu_power_flags = PMU_PWR_AC_PRESENT; 185 struct pmu_battery_info pmu_batteries[PMU_MAX_BATTERIES]; 186 static int query_batt_timer = BATTERY_POLLING_COUNT; 187 static struct adb_request batt_req; 188 static struct proc_dir_entry *proc_pmu_batt[PMU_MAX_BATTERIES]; 189 190 int __fake_sleep; 191 int asleep; 192 193 #ifdef CONFIG_ADB 194 static int adb_dev_map; 195 static int pmu_adb_flags; 196 197 static int pmu_probe(void); 198 static int pmu_init(void); 199 static int pmu_send_request(struct adb_request *req, int sync); 200 static int pmu_adb_autopoll(int devs); 201 static int pmu_adb_reset_bus(void); 202 #endif /* CONFIG_ADB */ 203 204 static int init_pmu(void); 205 static void pmu_start(void); 206 static irqreturn_t via_pmu_interrupt(int irq, void *arg); 207 static irqreturn_t gpio1_interrupt(int irq, void *arg); 208 static int pmu_info_proc_show(struct seq_file *m, void *v); 209 static int pmu_irqstats_proc_show(struct seq_file *m, void *v); 210 static int pmu_battery_proc_show(struct seq_file *m, void *v); 211 static void pmu_pass_intr(unsigned char *data, int len); 212 static const struct proc_ops pmu_options_proc_ops; 213 214 #ifdef CONFIG_ADB 215 const struct adb_driver via_pmu_driver = { 216 .name = "PMU", 217 .probe = pmu_probe, 218 .init = pmu_init, 219 .send_request = pmu_send_request, 220 .autopoll = pmu_adb_autopoll, 221 .poll = pmu_poll_adb, 222 .reset_bus = pmu_adb_reset_bus, 223 }; 224 #endif /* CONFIG_ADB */ 225 226 extern void low_sleep_handler(void); 227 extern void enable_kernel_altivec(void); 228 extern void enable_kernel_fp(void); 229 230 #ifdef DEBUG_SLEEP 231 int pmu_polled_request(struct adb_request *req); 232 void pmu_blink(int n); 233 #endif 234 235 /* 236 * This table indicates for each PMU opcode: 237 * - the number of data bytes to be sent with the command, or -1 238 * if a length byte should be sent, 239 * - the number of response bytes which the PMU will return, or 240 * -1 if it will send a length byte. 241 */ 242 static const s8 pmu_data_len[256][2] = { 243 /* 0 1 2 3 4 5 6 7 */ 244 /*00*/ {-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, 245 /*08*/ {-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1}, 246 /*10*/ { 1, 0},{ 1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, 247 /*18*/ { 0, 1},{ 0, 1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{ 0, 0}, 248 /*20*/ {-1, 0},{ 0, 0},{ 2, 0},{ 1, 0},{ 1, 0},{-1, 0},{-1, 0},{-1, 0}, 249 /*28*/ { 0,-1},{ 0,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{ 0,-1}, 250 /*30*/ { 4, 0},{20, 0},{-1, 0},{ 3, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, 251 /*38*/ { 0, 4},{ 0,20},{ 2,-1},{ 2, 1},{ 3,-1},{-1,-1},{-1,-1},{ 4, 0}, 252 /*40*/ { 1, 0},{ 1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, 253 /*48*/ { 0, 1},{ 0, 1},{-1,-1},{ 1, 0},{ 1, 0},{-1,-1},{-1,-1},{-1,-1}, 254 /*50*/ { 1, 0},{ 0, 0},{ 2, 0},{ 2, 0},{-1, 0},{ 1, 0},{ 3, 0},{ 1, 0}, 255 /*58*/ { 0, 1},{ 1, 0},{ 0, 2},{ 0, 2},{ 0,-1},{-1,-1},{-1,-1},{-1,-1}, 256 /*60*/ { 2, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, 257 /*68*/ { 0, 3},{ 0, 3},{ 0, 2},{ 0, 8},{ 0,-1},{ 0,-1},{-1,-1},{-1,-1}, 258 /*70*/ { 1, 0},{ 1, 0},{ 1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, 259 /*78*/ { 0,-1},{ 0,-1},{-1,-1},{-1,-1},{-1,-1},{ 5, 1},{ 4, 1},{ 4, 1}, 260 /*80*/ { 4, 0},{-1, 0},{ 0, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, 261 /*88*/ { 0, 5},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1}, 262 /*90*/ { 1, 0},{ 2, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, 263 /*98*/ { 0, 1},{ 0, 1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1}, 264 /*a0*/ { 2, 0},{ 2, 0},{ 2, 0},{ 4, 0},{-1, 0},{ 0, 0},{-1, 0},{-1, 0}, 265 /*a8*/ { 1, 1},{ 1, 0},{ 3, 0},{ 2, 0},{-1,-1},{-1,-1},{-1,-1},{-1,-1}, 266 /*b0*/ {-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, 267 /*b8*/ {-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1}, 268 /*c0*/ {-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, 269 /*c8*/ {-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1}, 270 /*d0*/ { 0, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, 271 /*d8*/ { 1, 1},{ 1, 1},{-1,-1},{-1,-1},{ 0, 1},{ 0,-1},{-1,-1},{-1,-1}, 272 /*e0*/ {-1, 0},{ 4, 0},{ 0, 1},{-1, 0},{-1, 0},{ 4, 0},{-1, 0},{-1, 0}, 273 /*e8*/ { 3,-1},{-1,-1},{ 0, 1},{-1,-1},{ 0,-1},{-1,-1},{-1,-1},{ 0, 0}, 274 /*f0*/ {-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, 275 /*f8*/ {-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1}, 276 }; 277 278 static char *pbook_type[] = { 279 "Unknown PowerBook", 280 "PowerBook 2400/3400/3500(G3)", 281 "PowerBook G3 Series", 282 "1999 PowerBook G3", 283 "Core99" 284 }; 285 286 int __init find_via_pmu(void) 287 { 288 #ifdef CONFIG_PPC_PMAC 289 u64 taddr; 290 const u32 *reg; 291 292 if (pmu_state != uninitialized) 293 return 1; 294 vias = of_find_node_by_name(NULL, "via-pmu"); 295 if (vias == NULL) 296 return 0; 297 298 reg = of_get_property(vias, "reg", NULL); 299 if (reg == NULL) { 300 printk(KERN_ERR "via-pmu: No \"reg\" property !\n"); 301 goto fail; 302 } 303 taddr = of_translate_address(vias, reg); 304 if (taddr == OF_BAD_ADDR) { 305 printk(KERN_ERR "via-pmu: Can't translate address !\n"); 306 goto fail; 307 } 308 309 spin_lock_init(&pmu_lock); 310 311 pmu_has_adb = 1; 312 313 pmu_intr_mask = PMU_INT_PCEJECT | 314 PMU_INT_SNDBRT | 315 PMU_INT_ADB | 316 PMU_INT_TICK; 317 318 if (of_node_name_eq(vias->parent, "ohare") || 319 of_device_is_compatible(vias->parent, "ohare")) 320 pmu_kind = PMU_OHARE_BASED; 321 else if (of_device_is_compatible(vias->parent, "paddington")) 322 pmu_kind = PMU_PADDINGTON_BASED; 323 else if (of_device_is_compatible(vias->parent, "heathrow")) 324 pmu_kind = PMU_HEATHROW_BASED; 325 else if (of_device_is_compatible(vias->parent, "Keylargo") 326 || of_device_is_compatible(vias->parent, "K2-Keylargo")) { 327 struct device_node *gpiop; 328 struct device_node *adbp; 329 u64 gaddr = OF_BAD_ADDR; 330 331 pmu_kind = PMU_KEYLARGO_BASED; 332 adbp = of_find_node_by_type(NULL, "adb"); 333 pmu_has_adb = (adbp != NULL); 334 of_node_put(adbp); 335 pmu_intr_mask = PMU_INT_PCEJECT | 336 PMU_INT_SNDBRT | 337 PMU_INT_ADB | 338 PMU_INT_TICK | 339 PMU_INT_ENVIRONMENT; 340 341 gpiop = of_find_node_by_name(NULL, "gpio"); 342 if (gpiop) { 343 reg = of_get_property(gpiop, "reg", NULL); 344 if (reg) 345 gaddr = of_translate_address(gpiop, reg); 346 if (gaddr != OF_BAD_ADDR) 347 gpio_reg = ioremap(gaddr, 0x10); 348 of_node_put(gpiop); 349 } 350 if (gpio_reg == NULL) { 351 printk(KERN_ERR "via-pmu: Can't find GPIO reg !\n"); 352 goto fail; 353 } 354 } else 355 pmu_kind = PMU_UNKNOWN; 356 357 via1 = via2 = ioremap(taddr, 0x2000); 358 if (via1 == NULL) { 359 printk(KERN_ERR "via-pmu: Can't map address !\n"); 360 goto fail_via_remap; 361 } 362 363 out_8(&via1[IER], IER_CLR | 0x7f); /* disable all intrs */ 364 out_8(&via1[IFR], 0x7f); /* clear IFR */ 365 366 pmu_state = idle; 367 368 if (!init_pmu()) 369 goto fail_init; 370 371 sys_ctrler = SYS_CTRLER_PMU; 372 373 return 1; 374 375 fail_init: 376 iounmap(via1); 377 via1 = via2 = NULL; 378 fail_via_remap: 379 iounmap(gpio_reg); 380 gpio_reg = NULL; 381 fail: 382 of_node_put(vias); 383 vias = NULL; 384 pmu_state = uninitialized; 385 return 0; 386 #else 387 if (macintosh_config->adb_type != MAC_ADB_PB2) 388 return 0; 389 390 pmu_kind = PMU_UNKNOWN; 391 392 spin_lock_init(&pmu_lock); 393 394 pmu_has_adb = 1; 395 396 pmu_intr_mask = PMU_INT_PCEJECT | 397 PMU_INT_SNDBRT | 398 PMU_INT_ADB | 399 PMU_INT_TICK; 400 401 pmu_state = idle; 402 403 if (!init_pmu()) { 404 pmu_state = uninitialized; 405 return 0; 406 } 407 408 return 1; 409 #endif /* !CONFIG_PPC_PMAC */ 410 } 411 412 #ifdef CONFIG_ADB 413 static int pmu_probe(void) 414 { 415 return pmu_state == uninitialized ? -ENODEV : 0; 416 } 417 418 static int pmu_init(void) 419 { 420 return pmu_state == uninitialized ? -ENODEV : 0; 421 } 422 #endif /* CONFIG_ADB */ 423 424 /* 425 * We can't wait until pmu_init gets called, that happens too late. 426 * It happens after IDE and SCSI initialization, which can take a few 427 * seconds, and by that time the PMU could have given up on us and 428 * turned us off. 429 * Thus this is called with arch_initcall rather than device_initcall. 430 */ 431 static int __init via_pmu_start(void) 432 { 433 unsigned int __maybe_unused irq; 434 435 if (pmu_state == uninitialized) 436 return -ENODEV; 437 438 batt_req.complete = 1; 439 440 #ifdef CONFIG_PPC_PMAC 441 irq = irq_of_parse_and_map(vias, 0); 442 if (!irq) { 443 printk(KERN_ERR "via-pmu: can't map interrupt\n"); 444 return -ENODEV; 445 } 446 /* We set IRQF_NO_SUSPEND because we don't want the interrupt 447 * to be disabled between the 2 passes of driver suspend, we 448 * control our own disabling for that one 449 */ 450 if (request_irq(irq, via_pmu_interrupt, IRQF_NO_SUSPEND, 451 "VIA-PMU", (void *)0)) { 452 printk(KERN_ERR "via-pmu: can't request irq %d\n", irq); 453 return -ENODEV; 454 } 455 456 if (pmu_kind == PMU_KEYLARGO_BASED) { 457 gpio_node = of_find_node_by_name(NULL, "extint-gpio1"); 458 if (gpio_node == NULL) 459 gpio_node = of_find_node_by_name(NULL, 460 "pmu-interrupt"); 461 if (gpio_node) 462 gpio_irq = irq_of_parse_and_map(gpio_node, 0); 463 464 if (gpio_irq) { 465 if (request_irq(gpio_irq, gpio1_interrupt, 466 IRQF_NO_SUSPEND, "GPIO1 ADB", 467 (void *)0)) 468 printk(KERN_ERR "pmu: can't get irq %d" 469 " (GPIO1)\n", gpio_irq); 470 else 471 gpio_irq_enabled = 1; 472 } 473 } 474 475 /* Enable interrupts */ 476 out_8(&via1[IER], IER_SET | SR_INT | CB1_INT); 477 #else 478 if (request_irq(IRQ_MAC_ADB_SR, via_pmu_interrupt, IRQF_NO_SUSPEND, 479 "VIA-PMU-SR", NULL)) { 480 pr_err("%s: couldn't get SR irq\n", __func__); 481 return -ENODEV; 482 } 483 if (request_irq(IRQ_MAC_ADB_CL, via_pmu_interrupt, IRQF_NO_SUSPEND, 484 "VIA-PMU-CL", NULL)) { 485 pr_err("%s: couldn't get CL irq\n", __func__); 486 free_irq(IRQ_MAC_ADB_SR, NULL); 487 return -ENODEV; 488 } 489 #endif /* !CONFIG_PPC_PMAC */ 490 491 pmu_fully_inited = 1; 492 493 /* Make sure PMU settle down before continuing. This is _very_ important 494 * since the IDE probe may shut interrupts down for quite a bit of time. If 495 * a PMU communication is pending while this happens, the PMU may timeout 496 * Not that on Core99 machines, the PMU keeps sending us environement 497 * messages, we should find a way to either fix IDE or make it call 498 * pmu_suspend() before masking interrupts. This can also happens while 499 * scolling with some fbdevs. 500 */ 501 do { 502 pmu_poll(); 503 } while (pmu_state != idle); 504 505 return 0; 506 } 507 508 arch_initcall(via_pmu_start); 509 510 /* 511 * This has to be done after pci_init, which is a subsys_initcall. 512 */ 513 static int __init via_pmu_dev_init(void) 514 { 515 if (pmu_state == uninitialized) 516 return -ENODEV; 517 518 #ifdef CONFIG_PMAC_BACKLIGHT 519 /* Initialize backlight */ 520 pmu_backlight_init(); 521 #endif 522 523 #ifdef CONFIG_PPC32 524 if (of_machine_is_compatible("AAPL,3400/2400") || 525 of_machine_is_compatible("AAPL,3500")) { 526 int mb = pmac_call_feature(PMAC_FTR_GET_MB_INFO, 527 NULL, PMAC_MB_INFO_MODEL, 0); 528 pmu_battery_count = 1; 529 if (mb == PMAC_TYPE_COMET) 530 pmu_batteries[0].flags |= PMU_BATT_TYPE_COMET; 531 else 532 pmu_batteries[0].flags |= PMU_BATT_TYPE_HOOPER; 533 } else if (of_machine_is_compatible("AAPL,PowerBook1998") || 534 of_machine_is_compatible("PowerBook1,1")) { 535 pmu_battery_count = 2; 536 pmu_batteries[0].flags |= PMU_BATT_TYPE_SMART; 537 pmu_batteries[1].flags |= PMU_BATT_TYPE_SMART; 538 } else { 539 struct device_node* prim = 540 of_find_node_by_name(NULL, "power-mgt"); 541 const u32 *prim_info = NULL; 542 if (prim) 543 prim_info = of_get_property(prim, "prim-info", NULL); 544 if (prim_info) { 545 /* Other stuffs here yet unknown */ 546 pmu_battery_count = (prim_info[6] >> 16) & 0xff; 547 pmu_batteries[0].flags |= PMU_BATT_TYPE_SMART; 548 if (pmu_battery_count > 1) 549 pmu_batteries[1].flags |= PMU_BATT_TYPE_SMART; 550 } 551 of_node_put(prim); 552 } 553 #endif /* CONFIG_PPC32 */ 554 555 /* Create /proc/pmu */ 556 proc_pmu_root = proc_mkdir("pmu", NULL); 557 if (proc_pmu_root) { 558 long i; 559 560 for (i=0; i<pmu_battery_count; i++) { 561 char title[16]; 562 sprintf(title, "battery_%ld", i); 563 proc_pmu_batt[i] = proc_create_single_data(title, 0, 564 proc_pmu_root, pmu_battery_proc_show, 565 (void *)i); 566 } 567 568 proc_pmu_info = proc_create_single("info", 0, proc_pmu_root, 569 pmu_info_proc_show); 570 proc_pmu_irqstats = proc_create_single("interrupts", 0, 571 proc_pmu_root, pmu_irqstats_proc_show); 572 proc_pmu_options = proc_create("options", 0600, proc_pmu_root, 573 &pmu_options_proc_ops); 574 } 575 return 0; 576 } 577 578 device_initcall(via_pmu_dev_init); 579 580 static int 581 init_pmu(void) 582 { 583 int timeout; 584 struct adb_request req; 585 586 /* Negate TREQ. Set TACK to input and TREQ to output. */ 587 out_8(&via2[B], in_8(&via2[B]) | TREQ); 588 out_8(&via2[DIRB], (in_8(&via2[DIRB]) | TREQ) & ~TACK); 589 590 pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, pmu_intr_mask); 591 timeout = 100000; 592 while (!req.complete) { 593 if (--timeout < 0) { 594 printk(KERN_ERR "init_pmu: no response from PMU\n"); 595 return 0; 596 } 597 udelay(10); 598 pmu_poll(); 599 } 600 601 /* ack all pending interrupts */ 602 timeout = 100000; 603 interrupt_data[0][0] = 1; 604 while (interrupt_data[0][0] || pmu_state != idle) { 605 if (--timeout < 0) { 606 printk(KERN_ERR "init_pmu: timed out acking intrs\n"); 607 return 0; 608 } 609 if (pmu_state == idle) 610 adb_int_pending = 1; 611 via_pmu_interrupt(0, NULL); 612 udelay(10); 613 } 614 615 /* Tell PMU we are ready. */ 616 if (pmu_kind == PMU_KEYLARGO_BASED) { 617 pmu_request(&req, NULL, 2, PMU_SYSTEM_READY, 2); 618 while (!req.complete) 619 pmu_poll(); 620 } 621 622 /* Read PMU version */ 623 pmu_request(&req, NULL, 1, PMU_GET_VERSION); 624 pmu_wait_complete(&req); 625 if (req.reply_len > 0) 626 pmu_version = req.reply[0]; 627 628 /* Read server mode setting */ 629 if (pmu_kind == PMU_KEYLARGO_BASED) { 630 pmu_request(&req, NULL, 2, PMU_POWER_EVENTS, 631 PMU_PWR_GET_POWERUP_EVENTS); 632 pmu_wait_complete(&req); 633 if (req.reply_len == 2) { 634 if (req.reply[1] & PMU_PWR_WAKEUP_AC_INSERT) 635 option_server_mode = 1; 636 printk(KERN_INFO "via-pmu: Server Mode is %s\n", 637 option_server_mode ? "enabled" : "disabled"); 638 } 639 } 640 641 printk(KERN_INFO "PMU driver v%d initialized for %s, firmware: %02x\n", 642 PMU_DRIVER_VERSION, pbook_type[pmu_kind], pmu_version); 643 644 return 1; 645 } 646 647 int 648 pmu_get_model(void) 649 { 650 return pmu_kind; 651 } 652 653 static void pmu_set_server_mode(int server_mode) 654 { 655 struct adb_request req; 656 657 if (pmu_kind != PMU_KEYLARGO_BASED) 658 return; 659 660 option_server_mode = server_mode; 661 pmu_request(&req, NULL, 2, PMU_POWER_EVENTS, PMU_PWR_GET_POWERUP_EVENTS); 662 pmu_wait_complete(&req); 663 if (req.reply_len < 2) 664 return; 665 if (server_mode) 666 pmu_request(&req, NULL, 4, PMU_POWER_EVENTS, 667 PMU_PWR_SET_POWERUP_EVENTS, 668 req.reply[0], PMU_PWR_WAKEUP_AC_INSERT); 669 else 670 pmu_request(&req, NULL, 4, PMU_POWER_EVENTS, 671 PMU_PWR_CLR_POWERUP_EVENTS, 672 req.reply[0], PMU_PWR_WAKEUP_AC_INSERT); 673 pmu_wait_complete(&req); 674 } 675 676 /* This new version of the code for 2400/3400/3500 powerbooks 677 * is inspired from the implementation in gkrellm-pmu 678 */ 679 static void 680 done_battery_state_ohare(struct adb_request* req) 681 { 682 #ifdef CONFIG_PPC_PMAC 683 /* format: 684 * [0] : flags 685 * 0x01 : AC indicator 686 * 0x02 : charging 687 * 0x04 : battery exist 688 * 0x08 : 689 * 0x10 : 690 * 0x20 : full charged 691 * 0x40 : pcharge reset 692 * 0x80 : battery exist 693 * 694 * [1][2] : battery voltage 695 * [3] : CPU temperature 696 * [4] : battery temperature 697 * [5] : current 698 * [6][7] : pcharge 699 * --tkoba 700 */ 701 unsigned int bat_flags = PMU_BATT_TYPE_HOOPER; 702 long pcharge, charge, vb, vmax, lmax; 703 long vmax_charging, vmax_charged; 704 long amperage, voltage, time, max; 705 int mb = pmac_call_feature(PMAC_FTR_GET_MB_INFO, 706 NULL, PMAC_MB_INFO_MODEL, 0); 707 708 if (req->reply[0] & 0x01) 709 pmu_power_flags |= PMU_PWR_AC_PRESENT; 710 else 711 pmu_power_flags &= ~PMU_PWR_AC_PRESENT; 712 713 if (mb == PMAC_TYPE_COMET) { 714 vmax_charged = 189; 715 vmax_charging = 213; 716 lmax = 6500; 717 } else { 718 vmax_charged = 330; 719 vmax_charging = 330; 720 lmax = 6500; 721 } 722 vmax = vmax_charged; 723 724 /* If battery installed */ 725 if (req->reply[0] & 0x04) { 726 bat_flags |= PMU_BATT_PRESENT; 727 if (req->reply[0] & 0x02) 728 bat_flags |= PMU_BATT_CHARGING; 729 vb = (req->reply[1] << 8) | req->reply[2]; 730 voltage = (vb * 265 + 72665) / 10; 731 amperage = req->reply[5]; 732 if ((req->reply[0] & 0x01) == 0) { 733 if (amperage > 200) 734 vb += ((amperage - 200) * 15)/100; 735 } else if (req->reply[0] & 0x02) { 736 vb = (vb * 97) / 100; 737 vmax = vmax_charging; 738 } 739 charge = (100 * vb) / vmax; 740 if (req->reply[0] & 0x40) { 741 pcharge = (req->reply[6] << 8) + req->reply[7]; 742 if (pcharge > lmax) 743 pcharge = lmax; 744 pcharge *= 100; 745 pcharge = 100 - pcharge / lmax; 746 if (pcharge < charge) 747 charge = pcharge; 748 } 749 if (amperage > 0) 750 time = (charge * 16440) / amperage; 751 else 752 time = 0; 753 max = 100; 754 amperage = -amperage; 755 } else 756 charge = max = amperage = voltage = time = 0; 757 758 pmu_batteries[pmu_cur_battery].flags = bat_flags; 759 pmu_batteries[pmu_cur_battery].charge = charge; 760 pmu_batteries[pmu_cur_battery].max_charge = max; 761 pmu_batteries[pmu_cur_battery].amperage = amperage; 762 pmu_batteries[pmu_cur_battery].voltage = voltage; 763 pmu_batteries[pmu_cur_battery].time_remaining = time; 764 #endif /* CONFIG_PPC_PMAC */ 765 766 clear_bit(0, &async_req_locks); 767 } 768 769 static void 770 done_battery_state_smart(struct adb_request* req) 771 { 772 /* format: 773 * [0] : format of this structure (known: 3,4,5) 774 * [1] : flags 775 * 776 * format 3 & 4: 777 * 778 * [2] : charge 779 * [3] : max charge 780 * [4] : current 781 * [5] : voltage 782 * 783 * format 5: 784 * 785 * [2][3] : charge 786 * [4][5] : max charge 787 * [6][7] : current 788 * [8][9] : voltage 789 */ 790 791 unsigned int bat_flags = PMU_BATT_TYPE_SMART; 792 int amperage; 793 unsigned int capa, max, voltage; 794 795 if (req->reply[1] & 0x01) 796 pmu_power_flags |= PMU_PWR_AC_PRESENT; 797 else 798 pmu_power_flags &= ~PMU_PWR_AC_PRESENT; 799 800 801 capa = max = amperage = voltage = 0; 802 803 if (req->reply[1] & 0x04) { 804 bat_flags |= PMU_BATT_PRESENT; 805 switch(req->reply[0]) { 806 case 3: 807 case 4: capa = req->reply[2]; 808 max = req->reply[3]; 809 amperage = *((signed char *)&req->reply[4]); 810 voltage = req->reply[5]; 811 break; 812 case 5: capa = (req->reply[2] << 8) | req->reply[3]; 813 max = (req->reply[4] << 8) | req->reply[5]; 814 amperage = *((signed short *)&req->reply[6]); 815 voltage = (req->reply[8] << 8) | req->reply[9]; 816 break; 817 default: 818 pr_warn("pmu.c: unrecognized battery info, " 819 "len: %d, %4ph\n", req->reply_len, 820 req->reply); 821 break; 822 } 823 } 824 825 if ((req->reply[1] & 0x01) && (amperage > 0)) 826 bat_flags |= PMU_BATT_CHARGING; 827 828 pmu_batteries[pmu_cur_battery].flags = bat_flags; 829 pmu_batteries[pmu_cur_battery].charge = capa; 830 pmu_batteries[pmu_cur_battery].max_charge = max; 831 pmu_batteries[pmu_cur_battery].amperage = amperage; 832 pmu_batteries[pmu_cur_battery].voltage = voltage; 833 if (amperage) { 834 if ((req->reply[1] & 0x01) && (amperage > 0)) 835 pmu_batteries[pmu_cur_battery].time_remaining 836 = ((max-capa) * 3600) / amperage; 837 else 838 pmu_batteries[pmu_cur_battery].time_remaining 839 = (capa * 3600) / (-amperage); 840 } else 841 pmu_batteries[pmu_cur_battery].time_remaining = 0; 842 843 pmu_cur_battery = (pmu_cur_battery + 1) % pmu_battery_count; 844 845 clear_bit(0, &async_req_locks); 846 } 847 848 static void 849 query_battery_state(void) 850 { 851 if (test_and_set_bit(0, &async_req_locks)) 852 return; 853 if (pmu_kind == PMU_OHARE_BASED) 854 pmu_request(&batt_req, done_battery_state_ohare, 855 1, PMU_BATTERY_STATE); 856 else 857 pmu_request(&batt_req, done_battery_state_smart, 858 2, PMU_SMART_BATTERY_STATE, pmu_cur_battery+1); 859 } 860 861 static int pmu_info_proc_show(struct seq_file *m, void *v) 862 { 863 seq_printf(m, "PMU driver version : %d\n", PMU_DRIVER_VERSION); 864 seq_printf(m, "PMU firmware version : %02x\n", pmu_version); 865 seq_printf(m, "AC Power : %d\n", 866 ((pmu_power_flags & PMU_PWR_AC_PRESENT) != 0) || pmu_battery_count == 0); 867 seq_printf(m, "Battery count : %d\n", pmu_battery_count); 868 869 return 0; 870 } 871 872 static int pmu_irqstats_proc_show(struct seq_file *m, void *v) 873 { 874 int i; 875 static const char *irq_names[NUM_IRQ_STATS] = { 876 "Unknown interrupt (type 0)", 877 "Unknown interrupt (type 1)", 878 "PC-Card eject button", 879 "Sound/Brightness button", 880 "ADB message", 881 "Battery state change", 882 "Environment interrupt", 883 "Tick timer", 884 "Ghost interrupt (zero len)", 885 "Empty interrupt (empty mask)", 886 "Max irqs in a row", 887 "Total CB1 triggered events", 888 "Total GPIO1 triggered events", 889 }; 890 891 for (i = 0; i < NUM_IRQ_STATS; i++) { 892 seq_printf(m, " %2u: %10u (%s)\n", 893 i, pmu_irq_stats[i], irq_names[i]); 894 } 895 return 0; 896 } 897 898 static int pmu_battery_proc_show(struct seq_file *m, void *v) 899 { 900 long batnum = (long)m->private; 901 902 seq_putc(m, '\n'); 903 seq_printf(m, "flags : %08x\n", pmu_batteries[batnum].flags); 904 seq_printf(m, "charge : %d\n", pmu_batteries[batnum].charge); 905 seq_printf(m, "max_charge : %d\n", pmu_batteries[batnum].max_charge); 906 seq_printf(m, "current : %d\n", pmu_batteries[batnum].amperage); 907 seq_printf(m, "voltage : %d\n", pmu_batteries[batnum].voltage); 908 seq_printf(m, "time rem. : %d\n", pmu_batteries[batnum].time_remaining); 909 return 0; 910 } 911 912 static int pmu_options_proc_show(struct seq_file *m, void *v) 913 { 914 #if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32) 915 if (pmu_kind == PMU_KEYLARGO_BASED && 916 pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,0,-1) >= 0) 917 seq_printf(m, "lid_wakeup=%d\n", option_lid_wakeup); 918 #endif 919 if (pmu_kind == PMU_KEYLARGO_BASED) 920 seq_printf(m, "server_mode=%d\n", option_server_mode); 921 922 return 0; 923 } 924 925 static int pmu_options_proc_open(struct inode *inode, struct file *file) 926 { 927 return single_open(file, pmu_options_proc_show, NULL); 928 } 929 930 static ssize_t pmu_options_proc_write(struct file *file, 931 const char __user *buffer, size_t count, loff_t *pos) 932 { 933 char tmp[33]; 934 char *label, *val; 935 size_t fcount = count; 936 937 if (!count) 938 return -EINVAL; 939 if (count > 32) 940 count = 32; 941 if (copy_from_user(tmp, buffer, count)) 942 return -EFAULT; 943 tmp[count] = 0; 944 945 label = tmp; 946 while(*label == ' ') 947 label++; 948 val = label; 949 while(*val && (*val != '=')) { 950 if (*val == ' ') 951 *val = 0; 952 val++; 953 } 954 if ((*val) == 0) 955 return -EINVAL; 956 *(val++) = 0; 957 while(*val == ' ') 958 val++; 959 #if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32) 960 if (pmu_kind == PMU_KEYLARGO_BASED && 961 pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,0,-1) >= 0) 962 if (!strcmp(label, "lid_wakeup")) 963 option_lid_wakeup = ((*val) == '1'); 964 #endif 965 if (pmu_kind == PMU_KEYLARGO_BASED && !strcmp(label, "server_mode")) { 966 int new_value; 967 new_value = ((*val) == '1'); 968 if (new_value != option_server_mode) 969 pmu_set_server_mode(new_value); 970 } 971 return fcount; 972 } 973 974 static const struct proc_ops pmu_options_proc_ops = { 975 .proc_open = pmu_options_proc_open, 976 .proc_read = seq_read, 977 .proc_lseek = seq_lseek, 978 .proc_release = single_release, 979 .proc_write = pmu_options_proc_write, 980 }; 981 982 #ifdef CONFIG_ADB 983 /* Send an ADB command */ 984 static int pmu_send_request(struct adb_request *req, int sync) 985 { 986 int i, ret; 987 988 if (pmu_state == uninitialized || !pmu_fully_inited) { 989 req->complete = 1; 990 return -ENXIO; 991 } 992 993 ret = -EINVAL; 994 995 switch (req->data[0]) { 996 case PMU_PACKET: 997 for (i = 0; i < req->nbytes - 1; ++i) 998 req->data[i] = req->data[i+1]; 999 --req->nbytes; 1000 if (pmu_data_len[req->data[0]][1] != 0) { 1001 req->reply[0] = ADB_RET_OK; 1002 req->reply_len = 1; 1003 } else 1004 req->reply_len = 0; 1005 ret = pmu_queue_request(req); 1006 break; 1007 case CUDA_PACKET: 1008 switch (req->data[1]) { 1009 case CUDA_GET_TIME: 1010 if (req->nbytes != 2) 1011 break; 1012 req->data[0] = PMU_READ_RTC; 1013 req->nbytes = 1; 1014 req->reply_len = 3; 1015 req->reply[0] = CUDA_PACKET; 1016 req->reply[1] = 0; 1017 req->reply[2] = CUDA_GET_TIME; 1018 ret = pmu_queue_request(req); 1019 break; 1020 case CUDA_SET_TIME: 1021 if (req->nbytes != 6) 1022 break; 1023 req->data[0] = PMU_SET_RTC; 1024 req->nbytes = 5; 1025 for (i = 1; i <= 4; ++i) 1026 req->data[i] = req->data[i+1]; 1027 req->reply_len = 3; 1028 req->reply[0] = CUDA_PACKET; 1029 req->reply[1] = 0; 1030 req->reply[2] = CUDA_SET_TIME; 1031 ret = pmu_queue_request(req); 1032 break; 1033 } 1034 break; 1035 case ADB_PACKET: 1036 if (!pmu_has_adb) 1037 return -ENXIO; 1038 for (i = req->nbytes - 1; i > 1; --i) 1039 req->data[i+2] = req->data[i]; 1040 req->data[3] = req->nbytes - 2; 1041 req->data[2] = pmu_adb_flags; 1042 /*req->data[1] = req->data[1];*/ 1043 req->data[0] = PMU_ADB_CMD; 1044 req->nbytes += 2; 1045 req->reply_expected = 1; 1046 req->reply_len = 0; 1047 ret = pmu_queue_request(req); 1048 break; 1049 } 1050 if (ret) { 1051 req->complete = 1; 1052 return ret; 1053 } 1054 1055 if (sync) 1056 while (!req->complete) 1057 pmu_poll(); 1058 1059 return 0; 1060 } 1061 1062 /* Enable/disable autopolling */ 1063 static int __pmu_adb_autopoll(int devs) 1064 { 1065 struct adb_request req; 1066 1067 if (devs) { 1068 pmu_request(&req, NULL, 5, PMU_ADB_CMD, 0, 0x86, 1069 adb_dev_map >> 8, adb_dev_map); 1070 pmu_adb_flags = 2; 1071 } else { 1072 pmu_request(&req, NULL, 1, PMU_ADB_POLL_OFF); 1073 pmu_adb_flags = 0; 1074 } 1075 while (!req.complete) 1076 pmu_poll(); 1077 return 0; 1078 } 1079 1080 static int pmu_adb_autopoll(int devs) 1081 { 1082 if (pmu_state == uninitialized || !pmu_fully_inited || !pmu_has_adb) 1083 return -ENXIO; 1084 1085 adb_dev_map = devs; 1086 return __pmu_adb_autopoll(devs); 1087 } 1088 1089 /* Reset the ADB bus */ 1090 static int pmu_adb_reset_bus(void) 1091 { 1092 struct adb_request req; 1093 int save_autopoll = adb_dev_map; 1094 1095 if (pmu_state == uninitialized || !pmu_fully_inited || !pmu_has_adb) 1096 return -ENXIO; 1097 1098 /* anyone got a better idea?? */ 1099 __pmu_adb_autopoll(0); 1100 1101 req.nbytes = 4; 1102 req.done = NULL; 1103 req.data[0] = PMU_ADB_CMD; 1104 req.data[1] = ADB_BUSRESET; 1105 req.data[2] = 0; 1106 req.data[3] = 0; 1107 req.data[4] = 0; 1108 req.reply_len = 0; 1109 req.reply_expected = 1; 1110 if (pmu_queue_request(&req) != 0) { 1111 printk(KERN_ERR "pmu_adb_reset_bus: pmu_queue_request failed\n"); 1112 return -EIO; 1113 } 1114 pmu_wait_complete(&req); 1115 1116 if (save_autopoll != 0) 1117 __pmu_adb_autopoll(save_autopoll); 1118 1119 return 0; 1120 } 1121 #endif /* CONFIG_ADB */ 1122 1123 /* Construct and send a pmu request */ 1124 int 1125 pmu_request(struct adb_request *req, void (*done)(struct adb_request *), 1126 int nbytes, ...) 1127 { 1128 va_list list; 1129 int i; 1130 1131 if (pmu_state == uninitialized) 1132 return -ENXIO; 1133 1134 if (nbytes < 0 || nbytes > 32) { 1135 printk(KERN_ERR "pmu_request: bad nbytes (%d)\n", nbytes); 1136 req->complete = 1; 1137 return -EINVAL; 1138 } 1139 req->nbytes = nbytes; 1140 req->done = done; 1141 va_start(list, nbytes); 1142 for (i = 0; i < nbytes; ++i) 1143 req->data[i] = va_arg(list, int); 1144 va_end(list); 1145 req->reply_len = 0; 1146 req->reply_expected = 0; 1147 return pmu_queue_request(req); 1148 } 1149 1150 int 1151 pmu_queue_request(struct adb_request *req) 1152 { 1153 unsigned long flags; 1154 int nsend; 1155 1156 if (pmu_state == uninitialized) { 1157 req->complete = 1; 1158 return -ENXIO; 1159 } 1160 if (req->nbytes <= 0) { 1161 req->complete = 1; 1162 return 0; 1163 } 1164 nsend = pmu_data_len[req->data[0]][0]; 1165 if (nsend >= 0 && req->nbytes != nsend + 1) { 1166 req->complete = 1; 1167 return -EINVAL; 1168 } 1169 1170 req->next = NULL; 1171 req->sent = 0; 1172 req->complete = 0; 1173 1174 spin_lock_irqsave(&pmu_lock, flags); 1175 if (current_req) { 1176 last_req->next = req; 1177 last_req = req; 1178 } else { 1179 current_req = req; 1180 last_req = req; 1181 if (pmu_state == idle) 1182 pmu_start(); 1183 } 1184 spin_unlock_irqrestore(&pmu_lock, flags); 1185 1186 return 0; 1187 } 1188 1189 static inline void 1190 wait_for_ack(void) 1191 { 1192 /* Sightly increased the delay, I had one occurrence of the message 1193 * reported 1194 */ 1195 int timeout = 4000; 1196 while ((in_8(&via2[B]) & TACK) == 0) { 1197 if (--timeout < 0) { 1198 printk(KERN_ERR "PMU not responding (!ack)\n"); 1199 return; 1200 } 1201 udelay(10); 1202 } 1203 } 1204 1205 /* New PMU seems to be very sensitive to those timings, so we make sure 1206 * PCI is flushed immediately */ 1207 static inline void 1208 send_byte(int x) 1209 { 1210 out_8(&via1[ACR], in_8(&via1[ACR]) | SR_OUT | SR_EXT); 1211 out_8(&via1[SR], x); 1212 out_8(&via2[B], in_8(&via2[B]) & ~TREQ); /* assert TREQ */ 1213 (void)in_8(&via2[B]); 1214 } 1215 1216 static inline void 1217 recv_byte(void) 1218 { 1219 out_8(&via1[ACR], (in_8(&via1[ACR]) & ~SR_OUT) | SR_EXT); 1220 in_8(&via1[SR]); /* resets SR */ 1221 out_8(&via2[B], in_8(&via2[B]) & ~TREQ); 1222 (void)in_8(&via2[B]); 1223 } 1224 1225 static inline void 1226 pmu_done(struct adb_request *req) 1227 { 1228 void (*done)(struct adb_request *) = req->done; 1229 mb(); 1230 req->complete = 1; 1231 /* Here, we assume that if the request has a done member, the 1232 * struct request will survive to setting req->complete to 1 1233 */ 1234 if (done) 1235 (*done)(req); 1236 } 1237 1238 static void 1239 pmu_start(void) 1240 { 1241 struct adb_request *req; 1242 1243 /* assert pmu_state == idle */ 1244 /* get the packet to send */ 1245 req = current_req; 1246 if (!req || pmu_state != idle 1247 || (/*req->reply_expected && */req_awaiting_reply)) 1248 return; 1249 1250 pmu_state = sending; 1251 data_index = 1; 1252 data_len = pmu_data_len[req->data[0]][0]; 1253 1254 /* Sounds safer to make sure ACK is high before writing. This helped 1255 * kill a problem with ADB and some iBooks 1256 */ 1257 wait_for_ack(); 1258 /* set the shift register to shift out and send a byte */ 1259 send_byte(req->data[0]); 1260 } 1261 1262 void 1263 pmu_poll(void) 1264 { 1265 if (pmu_state == uninitialized) 1266 return; 1267 if (disable_poll) 1268 return; 1269 via_pmu_interrupt(0, NULL); 1270 } 1271 1272 void 1273 pmu_poll_adb(void) 1274 { 1275 if (pmu_state == uninitialized) 1276 return; 1277 if (disable_poll) 1278 return; 1279 /* Kicks ADB read when PMU is suspended */ 1280 adb_int_pending = 1; 1281 do { 1282 via_pmu_interrupt(0, NULL); 1283 } while (pmu_suspended && (adb_int_pending || pmu_state != idle 1284 || req_awaiting_reply)); 1285 } 1286 1287 void 1288 pmu_wait_complete(struct adb_request *req) 1289 { 1290 if (pmu_state == uninitialized) 1291 return; 1292 while((pmu_state != idle && pmu_state != locked) || !req->complete) 1293 via_pmu_interrupt(0, NULL); 1294 } 1295 1296 /* This function loops until the PMU is idle and prevents it from 1297 * anwsering to ADB interrupts. pmu_request can still be called. 1298 * This is done to avoid spurrious shutdowns when we know we'll have 1299 * interrupts switched off for a long time 1300 */ 1301 void 1302 pmu_suspend(void) 1303 { 1304 unsigned long flags; 1305 1306 if (pmu_state == uninitialized) 1307 return; 1308 1309 spin_lock_irqsave(&pmu_lock, flags); 1310 pmu_suspended++; 1311 if (pmu_suspended > 1) { 1312 spin_unlock_irqrestore(&pmu_lock, flags); 1313 return; 1314 } 1315 1316 do { 1317 spin_unlock_irqrestore(&pmu_lock, flags); 1318 if (req_awaiting_reply) 1319 adb_int_pending = 1; 1320 via_pmu_interrupt(0, NULL); 1321 spin_lock_irqsave(&pmu_lock, flags); 1322 if (!adb_int_pending && pmu_state == idle && !req_awaiting_reply) { 1323 if (gpio_irq >= 0) 1324 disable_irq_nosync(gpio_irq); 1325 out_8(&via1[IER], CB1_INT | IER_CLR); 1326 spin_unlock_irqrestore(&pmu_lock, flags); 1327 break; 1328 } 1329 } while (1); 1330 } 1331 1332 void 1333 pmu_resume(void) 1334 { 1335 unsigned long flags; 1336 1337 if (pmu_state == uninitialized || pmu_suspended < 1) 1338 return; 1339 1340 spin_lock_irqsave(&pmu_lock, flags); 1341 pmu_suspended--; 1342 if (pmu_suspended > 0) { 1343 spin_unlock_irqrestore(&pmu_lock, flags); 1344 return; 1345 } 1346 adb_int_pending = 1; 1347 if (gpio_irq >= 0) 1348 enable_irq(gpio_irq); 1349 out_8(&via1[IER], CB1_INT | IER_SET); 1350 spin_unlock_irqrestore(&pmu_lock, flags); 1351 pmu_poll(); 1352 } 1353 1354 /* Interrupt data could be the result data from an ADB cmd */ 1355 static void 1356 pmu_handle_data(unsigned char *data, int len) 1357 { 1358 unsigned char ints; 1359 int idx; 1360 int i = 0; 1361 1362 asleep = 0; 1363 if (drop_interrupts || len < 1) { 1364 adb_int_pending = 0; 1365 pmu_irq_stats[8]++; 1366 return; 1367 } 1368 1369 /* Get PMU interrupt mask */ 1370 ints = data[0]; 1371 1372 /* Record zero interrupts for stats */ 1373 if (ints == 0) 1374 pmu_irq_stats[9]++; 1375 1376 /* Hack to deal with ADB autopoll flag */ 1377 if (ints & PMU_INT_ADB) 1378 ints &= ~(PMU_INT_ADB_AUTO | PMU_INT_AUTO_SRQ_POLL); 1379 1380 next: 1381 if (ints == 0) { 1382 if (i > pmu_irq_stats[10]) 1383 pmu_irq_stats[10] = i; 1384 return; 1385 } 1386 i++; 1387 1388 idx = ffs(ints) - 1; 1389 ints &= ~BIT(idx); 1390 1391 pmu_irq_stats[idx]++; 1392 1393 /* Note: for some reason, we get an interrupt with len=1, 1394 * data[0]==0 after each normal ADB interrupt, at least 1395 * on the Pismo. Still investigating... --BenH 1396 */ 1397 switch (BIT(idx)) { 1398 case PMU_INT_ADB: 1399 if ((data[0] & PMU_INT_ADB_AUTO) == 0) { 1400 struct adb_request *req = req_awaiting_reply; 1401 if (!req) { 1402 printk(KERN_ERR "PMU: extra ADB reply\n"); 1403 return; 1404 } 1405 req_awaiting_reply = NULL; 1406 if (len <= 2) 1407 req->reply_len = 0; 1408 else { 1409 memcpy(req->reply, data + 1, len - 1); 1410 req->reply_len = len - 1; 1411 } 1412 pmu_done(req); 1413 } else { 1414 #ifdef CONFIG_XMON 1415 if (len == 4 && data[1] == 0x2c) { 1416 extern int xmon_wants_key, xmon_adb_keycode; 1417 if (xmon_wants_key) { 1418 xmon_adb_keycode = data[2]; 1419 return; 1420 } 1421 } 1422 #endif /* CONFIG_XMON */ 1423 #ifdef CONFIG_ADB 1424 /* 1425 * XXX On the [23]400 the PMU gives us an up 1426 * event for keycodes 0x74 or 0x75 when the PC 1427 * card eject buttons are released, so we 1428 * ignore those events. 1429 */ 1430 if (!(pmu_kind == PMU_OHARE_BASED && len == 4 1431 && data[1] == 0x2c && data[3] == 0xff 1432 && (data[2] & ~1) == 0xf4)) 1433 adb_input(data+1, len-1, 1); 1434 #endif /* CONFIG_ADB */ 1435 } 1436 break; 1437 1438 /* Sound/brightness button pressed */ 1439 case PMU_INT_SNDBRT: 1440 #ifdef CONFIG_PMAC_BACKLIGHT 1441 if (len == 3) 1442 pmac_backlight_set_legacy_brightness_pmu(data[1] >> 4); 1443 #endif 1444 break; 1445 1446 /* Tick interrupt */ 1447 case PMU_INT_TICK: 1448 /* Environment or tick interrupt, query batteries */ 1449 if (pmu_battery_count) { 1450 if ((--query_batt_timer) == 0) { 1451 query_battery_state(); 1452 query_batt_timer = BATTERY_POLLING_COUNT; 1453 } 1454 } 1455 break; 1456 1457 case PMU_INT_ENVIRONMENT: 1458 if (pmu_battery_count) 1459 query_battery_state(); 1460 pmu_pass_intr(data, len); 1461 /* len == 6 is probably a bad check. But how do I 1462 * know what PMU versions send what events here? */ 1463 if (len == 6) { 1464 via_pmu_event(PMU_EVT_POWER, !!(data[1]&8)); 1465 via_pmu_event(PMU_EVT_LID, data[1]&1); 1466 } 1467 break; 1468 1469 default: 1470 pmu_pass_intr(data, len); 1471 } 1472 goto next; 1473 } 1474 1475 static struct adb_request* 1476 pmu_sr_intr(void) 1477 { 1478 struct adb_request *req; 1479 int bite = 0; 1480 1481 if (in_8(&via2[B]) & TREQ) { 1482 printk(KERN_ERR "PMU: spurious SR intr (%x)\n", in_8(&via2[B])); 1483 return NULL; 1484 } 1485 /* The ack may not yet be low when we get the interrupt */ 1486 while ((in_8(&via2[B]) & TACK) != 0) 1487 ; 1488 1489 /* if reading grab the byte, and reset the interrupt */ 1490 if (pmu_state == reading || pmu_state == reading_intr) 1491 bite = in_8(&via1[SR]); 1492 1493 /* reset TREQ and wait for TACK to go high */ 1494 out_8(&via2[B], in_8(&via2[B]) | TREQ); 1495 wait_for_ack(); 1496 1497 switch (pmu_state) { 1498 case sending: 1499 req = current_req; 1500 if (data_len < 0) { 1501 data_len = req->nbytes - 1; 1502 send_byte(data_len); 1503 break; 1504 } 1505 if (data_index <= data_len) { 1506 send_byte(req->data[data_index++]); 1507 break; 1508 } 1509 req->sent = 1; 1510 data_len = pmu_data_len[req->data[0]][1]; 1511 if (data_len == 0) { 1512 pmu_state = idle; 1513 current_req = req->next; 1514 if (req->reply_expected) 1515 req_awaiting_reply = req; 1516 else 1517 return req; 1518 } else { 1519 pmu_state = reading; 1520 data_index = 0; 1521 reply_ptr = req->reply + req->reply_len; 1522 recv_byte(); 1523 } 1524 break; 1525 1526 case intack: 1527 data_index = 0; 1528 data_len = -1; 1529 pmu_state = reading_intr; 1530 reply_ptr = interrupt_data[int_data_last]; 1531 recv_byte(); 1532 if (gpio_irq >= 0 && !gpio_irq_enabled) { 1533 enable_irq(gpio_irq); 1534 gpio_irq_enabled = 1; 1535 } 1536 break; 1537 1538 case reading: 1539 case reading_intr: 1540 if (data_len == -1) { 1541 data_len = bite; 1542 if (bite > 32) 1543 printk(KERN_ERR "PMU: bad reply len %d\n", bite); 1544 } else if (data_index < 32) { 1545 reply_ptr[data_index++] = bite; 1546 } 1547 if (data_index < data_len) { 1548 recv_byte(); 1549 break; 1550 } 1551 1552 if (pmu_state == reading_intr) { 1553 pmu_state = idle; 1554 int_data_state[int_data_last] = int_data_ready; 1555 interrupt_data_len[int_data_last] = data_len; 1556 } else { 1557 req = current_req; 1558 /* 1559 * For PMU sleep and freq change requests, we lock the 1560 * PMU until it's explicitly unlocked. This avoids any 1561 * spurrious event polling getting in 1562 */ 1563 current_req = req->next; 1564 req->reply_len += data_index; 1565 if (req->data[0] == PMU_SLEEP || req->data[0] == PMU_CPU_SPEED) 1566 pmu_state = locked; 1567 else 1568 pmu_state = idle; 1569 return req; 1570 } 1571 break; 1572 1573 default: 1574 printk(KERN_ERR "via_pmu_interrupt: unknown state %d?\n", 1575 pmu_state); 1576 } 1577 return NULL; 1578 } 1579 1580 static irqreturn_t 1581 via_pmu_interrupt(int irq, void *arg) 1582 { 1583 unsigned long flags; 1584 int intr; 1585 int nloop = 0; 1586 int int_data = -1; 1587 struct adb_request *req = NULL; 1588 int handled = 0; 1589 1590 /* This is a bit brutal, we can probably do better */ 1591 spin_lock_irqsave(&pmu_lock, flags); 1592 ++disable_poll; 1593 1594 for (;;) { 1595 /* On 68k Macs, VIA interrupts are dispatched individually. 1596 * Unless we are polling, the relevant IRQ flag has already 1597 * been cleared. 1598 */ 1599 intr = 0; 1600 if (IS_ENABLED(CONFIG_PPC_PMAC) || !irq) { 1601 intr = in_8(&via1[IFR]) & (SR_INT | CB1_INT); 1602 out_8(&via1[IFR], intr); 1603 } 1604 #ifndef CONFIG_PPC_PMAC 1605 switch (irq) { 1606 case IRQ_MAC_ADB_CL: 1607 intr = CB1_INT; 1608 break; 1609 case IRQ_MAC_ADB_SR: 1610 intr = SR_INT; 1611 break; 1612 } 1613 #endif 1614 if (intr == 0) 1615 break; 1616 handled = 1; 1617 if (++nloop > 1000) { 1618 printk(KERN_DEBUG "PMU: stuck in intr loop, " 1619 "intr=%x, ier=%x pmu_state=%d\n", 1620 intr, in_8(&via1[IER]), pmu_state); 1621 break; 1622 } 1623 if (intr & CB1_INT) { 1624 adb_int_pending = 1; 1625 pmu_irq_stats[11]++; 1626 } 1627 if (intr & SR_INT) { 1628 req = pmu_sr_intr(); 1629 if (req) 1630 break; 1631 } 1632 #ifndef CONFIG_PPC_PMAC 1633 break; 1634 #endif 1635 } 1636 1637 recheck: 1638 if (pmu_state == idle) { 1639 if (adb_int_pending) { 1640 if (int_data_state[0] == int_data_empty) 1641 int_data_last = 0; 1642 else if (int_data_state[1] == int_data_empty) 1643 int_data_last = 1; 1644 else 1645 goto no_free_slot; 1646 pmu_state = intack; 1647 int_data_state[int_data_last] = int_data_fill; 1648 /* Sounds safer to make sure ACK is high before writing. 1649 * This helped kill a problem with ADB and some iBooks 1650 */ 1651 wait_for_ack(); 1652 send_byte(PMU_INT_ACK); 1653 adb_int_pending = 0; 1654 } else if (current_req) 1655 pmu_start(); 1656 } 1657 no_free_slot: 1658 /* Mark the oldest buffer for flushing */ 1659 if (int_data_state[!int_data_last] == int_data_ready) { 1660 int_data_state[!int_data_last] = int_data_flush; 1661 int_data = !int_data_last; 1662 } else if (int_data_state[int_data_last] == int_data_ready) { 1663 int_data_state[int_data_last] = int_data_flush; 1664 int_data = int_data_last; 1665 } 1666 --disable_poll; 1667 spin_unlock_irqrestore(&pmu_lock, flags); 1668 1669 /* Deal with completed PMU requests outside of the lock */ 1670 if (req) { 1671 pmu_done(req); 1672 req = NULL; 1673 } 1674 1675 /* Deal with interrupt datas outside of the lock */ 1676 if (int_data >= 0) { 1677 pmu_handle_data(interrupt_data[int_data], interrupt_data_len[int_data]); 1678 spin_lock_irqsave(&pmu_lock, flags); 1679 ++disable_poll; 1680 int_data_state[int_data] = int_data_empty; 1681 int_data = -1; 1682 goto recheck; 1683 } 1684 1685 return IRQ_RETVAL(handled); 1686 } 1687 1688 void 1689 pmu_unlock(void) 1690 { 1691 unsigned long flags; 1692 1693 spin_lock_irqsave(&pmu_lock, flags); 1694 if (pmu_state == locked) 1695 pmu_state = idle; 1696 adb_int_pending = 1; 1697 spin_unlock_irqrestore(&pmu_lock, flags); 1698 } 1699 1700 1701 static __maybe_unused irqreturn_t 1702 gpio1_interrupt(int irq, void *arg) 1703 { 1704 unsigned long flags; 1705 1706 if ((in_8(gpio_reg + 0x9) & 0x02) == 0) { 1707 spin_lock_irqsave(&pmu_lock, flags); 1708 if (gpio_irq_enabled > 0) { 1709 disable_irq_nosync(gpio_irq); 1710 gpio_irq_enabled = 0; 1711 } 1712 pmu_irq_stats[12]++; 1713 adb_int_pending = 1; 1714 spin_unlock_irqrestore(&pmu_lock, flags); 1715 via_pmu_interrupt(0, NULL); 1716 return IRQ_HANDLED; 1717 } 1718 return IRQ_NONE; 1719 } 1720 1721 void 1722 pmu_enable_irled(int on) 1723 { 1724 struct adb_request req; 1725 1726 if (pmu_state == uninitialized) 1727 return ; 1728 if (pmu_kind == PMU_KEYLARGO_BASED) 1729 return ; 1730 1731 pmu_request(&req, NULL, 2, PMU_POWER_CTRL, PMU_POW_IRLED | 1732 (on ? PMU_POW_ON : PMU_POW_OFF)); 1733 pmu_wait_complete(&req); 1734 } 1735 1736 /* Offset between Unix time (1970-based) and Mac time (1904-based) */ 1737 #define RTC_OFFSET 2082844800 1738 1739 time64_t pmu_get_time(void) 1740 { 1741 struct adb_request req; 1742 u32 now; 1743 1744 if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0) 1745 return 0; 1746 pmu_wait_complete(&req); 1747 if (req.reply_len != 4) 1748 pr_err("%s: got %d byte reply\n", __func__, req.reply_len); 1749 now = (req.reply[0] << 24) + (req.reply[1] << 16) + 1750 (req.reply[2] << 8) + req.reply[3]; 1751 return (time64_t)now - RTC_OFFSET; 1752 } 1753 1754 int pmu_set_rtc_time(struct rtc_time *tm) 1755 { 1756 u32 now; 1757 struct adb_request req; 1758 1759 now = lower_32_bits(rtc_tm_to_time64(tm) + RTC_OFFSET); 1760 if (pmu_request(&req, NULL, 5, PMU_SET_RTC, 1761 now >> 24, now >> 16, now >> 8, now) < 0) 1762 return -ENXIO; 1763 pmu_wait_complete(&req); 1764 if (req.reply_len != 0) 1765 pr_err("%s: got %d byte reply\n", __func__, req.reply_len); 1766 return 0; 1767 } 1768 1769 void 1770 pmu_restart(void) 1771 { 1772 struct adb_request req; 1773 1774 if (pmu_state == uninitialized) 1775 return; 1776 1777 local_irq_disable(); 1778 1779 drop_interrupts = 1; 1780 1781 if (pmu_kind != PMU_KEYLARGO_BASED) { 1782 pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, PMU_INT_ADB | 1783 PMU_INT_TICK ); 1784 while(!req.complete) 1785 pmu_poll(); 1786 } 1787 1788 pmu_request(&req, NULL, 1, PMU_RESET); 1789 pmu_wait_complete(&req); 1790 for (;;) 1791 ; 1792 } 1793 1794 void 1795 pmu_shutdown(void) 1796 { 1797 struct adb_request req; 1798 1799 if (pmu_state == uninitialized) 1800 return; 1801 1802 local_irq_disable(); 1803 1804 drop_interrupts = 1; 1805 1806 if (pmu_kind != PMU_KEYLARGO_BASED) { 1807 pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, PMU_INT_ADB | 1808 PMU_INT_TICK ); 1809 pmu_wait_complete(&req); 1810 } else { 1811 /* Disable server mode on shutdown or we'll just 1812 * wake up again 1813 */ 1814 pmu_set_server_mode(0); 1815 } 1816 1817 pmu_request(&req, NULL, 5, PMU_SHUTDOWN, 1818 'M', 'A', 'T', 'T'); 1819 pmu_wait_complete(&req); 1820 for (;;) 1821 ; 1822 } 1823 1824 int 1825 pmu_present(void) 1826 { 1827 return pmu_state != uninitialized; 1828 } 1829 1830 #if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32) 1831 /* 1832 * Put the powerbook to sleep. 1833 */ 1834 1835 static u32 save_via[8]; 1836 1837 static void 1838 save_via_state(void) 1839 { 1840 save_via[0] = in_8(&via1[ANH]); 1841 save_via[1] = in_8(&via1[DIRA]); 1842 save_via[2] = in_8(&via1[B]); 1843 save_via[3] = in_8(&via1[DIRB]); 1844 save_via[4] = in_8(&via1[PCR]); 1845 save_via[5] = in_8(&via1[ACR]); 1846 save_via[6] = in_8(&via1[T1CL]); 1847 save_via[7] = in_8(&via1[T1CH]); 1848 } 1849 static void 1850 restore_via_state(void) 1851 { 1852 out_8(&via1[ANH], save_via[0]); 1853 out_8(&via1[DIRA], save_via[1]); 1854 out_8(&via1[B], save_via[2]); 1855 out_8(&via1[DIRB], save_via[3]); 1856 out_8(&via1[PCR], save_via[4]); 1857 out_8(&via1[ACR], save_via[5]); 1858 out_8(&via1[T1CL], save_via[6]); 1859 out_8(&via1[T1CH], save_via[7]); 1860 out_8(&via1[IER], IER_CLR | 0x7f); /* disable all intrs */ 1861 out_8(&via1[IFR], 0x7f); /* clear IFR */ 1862 out_8(&via1[IER], IER_SET | SR_INT | CB1_INT); 1863 } 1864 1865 #define GRACKLE_PM (1<<7) 1866 #define GRACKLE_DOZE (1<<5) 1867 #define GRACKLE_NAP (1<<4) 1868 #define GRACKLE_SLEEP (1<<3) 1869 1870 static int powerbook_sleep_grackle(void) 1871 { 1872 unsigned long save_l2cr; 1873 unsigned short pmcr1; 1874 struct adb_request req; 1875 struct pci_dev *grackle; 1876 1877 grackle = pci_get_domain_bus_and_slot(0, 0, 0); 1878 if (!grackle) 1879 return -ENODEV; 1880 1881 /* Turn off various things. Darwin does some retry tests here... */ 1882 pmu_request(&req, NULL, 2, PMU_POWER_CTRL0, PMU_POW0_OFF|PMU_POW0_HARD_DRIVE); 1883 pmu_wait_complete(&req); 1884 pmu_request(&req, NULL, 2, PMU_POWER_CTRL, 1885 PMU_POW_OFF|PMU_POW_BACKLIGHT|PMU_POW_IRLED|PMU_POW_MEDIABAY); 1886 pmu_wait_complete(&req); 1887 1888 /* For 750, save backside cache setting and disable it */ 1889 save_l2cr = _get_L2CR(); /* (returns -1 if not available) */ 1890 1891 if (!__fake_sleep) { 1892 /* Ask the PMU to put us to sleep */ 1893 pmu_request(&req, NULL, 5, PMU_SLEEP, 'M', 'A', 'T', 'T'); 1894 pmu_wait_complete(&req); 1895 } 1896 1897 /* The VIA is supposed not to be restored correctly*/ 1898 save_via_state(); 1899 /* We shut down some HW */ 1900 pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,0,1); 1901 1902 pci_read_config_word(grackle, 0x70, &pmcr1); 1903 /* Apparently, MacOS uses NAP mode for Grackle ??? */ 1904 pmcr1 &= ~(GRACKLE_DOZE|GRACKLE_SLEEP); 1905 pmcr1 |= GRACKLE_PM|GRACKLE_NAP; 1906 pci_write_config_word(grackle, 0x70, pmcr1); 1907 1908 /* Call low-level ASM sleep handler */ 1909 if (__fake_sleep) 1910 mdelay(5000); 1911 else 1912 low_sleep_handler(); 1913 1914 /* We're awake again, stop grackle PM */ 1915 pci_read_config_word(grackle, 0x70, &pmcr1); 1916 pmcr1 &= ~(GRACKLE_PM|GRACKLE_DOZE|GRACKLE_SLEEP|GRACKLE_NAP); 1917 pci_write_config_word(grackle, 0x70, pmcr1); 1918 1919 pci_dev_put(grackle); 1920 1921 /* Make sure the PMU is idle */ 1922 pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,0,0); 1923 restore_via_state(); 1924 1925 /* Restore L2 cache */ 1926 if (save_l2cr != 0xffffffff && (save_l2cr & L2CR_L2E) != 0) 1927 _set_L2CR(save_l2cr); 1928 1929 /* Restore userland MMU context */ 1930 switch_mmu_context(NULL, current->active_mm, NULL); 1931 1932 /* Power things up */ 1933 pmu_unlock(); 1934 pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, pmu_intr_mask); 1935 pmu_wait_complete(&req); 1936 pmu_request(&req, NULL, 2, PMU_POWER_CTRL0, 1937 PMU_POW0_ON|PMU_POW0_HARD_DRIVE); 1938 pmu_wait_complete(&req); 1939 pmu_request(&req, NULL, 2, PMU_POWER_CTRL, 1940 PMU_POW_ON|PMU_POW_BACKLIGHT|PMU_POW_CHARGER|PMU_POW_IRLED|PMU_POW_MEDIABAY); 1941 pmu_wait_complete(&req); 1942 1943 return 0; 1944 } 1945 1946 static int 1947 powerbook_sleep_Core99(void) 1948 { 1949 unsigned long save_l2cr; 1950 unsigned long save_l3cr; 1951 struct adb_request req; 1952 1953 if (pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,0,-1) < 0) { 1954 printk(KERN_ERR "Sleep mode not supported on this machine\n"); 1955 return -ENOSYS; 1956 } 1957 1958 if (num_online_cpus() > 1 || cpu_is_offline(0)) 1959 return -EAGAIN; 1960 1961 /* Stop environment and ADB interrupts */ 1962 pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, 0); 1963 pmu_wait_complete(&req); 1964 1965 /* Tell PMU what events will wake us up */ 1966 pmu_request(&req, NULL, 4, PMU_POWER_EVENTS, PMU_PWR_CLR_WAKEUP_EVENTS, 1967 0xff, 0xff); 1968 pmu_wait_complete(&req); 1969 pmu_request(&req, NULL, 4, PMU_POWER_EVENTS, PMU_PWR_SET_WAKEUP_EVENTS, 1970 0, PMU_PWR_WAKEUP_KEY | 1971 (option_lid_wakeup ? PMU_PWR_WAKEUP_LID_OPEN : 0)); 1972 pmu_wait_complete(&req); 1973 1974 /* Save the state of the L2 and L3 caches */ 1975 save_l3cr = _get_L3CR(); /* (returns -1 if not available) */ 1976 save_l2cr = _get_L2CR(); /* (returns -1 if not available) */ 1977 1978 if (!__fake_sleep) { 1979 /* Ask the PMU to put us to sleep */ 1980 pmu_request(&req, NULL, 5, PMU_SLEEP, 'M', 'A', 'T', 'T'); 1981 pmu_wait_complete(&req); 1982 } 1983 1984 /* The VIA is supposed not to be restored correctly*/ 1985 save_via_state(); 1986 1987 /* Shut down various ASICs. There's a chance that we can no longer 1988 * talk to the PMU after this, so I moved it to _after_ sending the 1989 * sleep command to it. Still need to be checked. 1990 */ 1991 pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, 1); 1992 1993 /* Call low-level ASM sleep handler */ 1994 if (__fake_sleep) 1995 mdelay(5000); 1996 else 1997 low_sleep_handler(); 1998 1999 /* Restore Apple core ASICs state */ 2000 pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, 0); 2001 2002 /* Restore VIA */ 2003 restore_via_state(); 2004 2005 /* tweak LPJ before cpufreq is there */ 2006 loops_per_jiffy *= 2; 2007 2008 /* Restore video */ 2009 pmac_call_early_video_resume(); 2010 2011 /* Restore L2 cache */ 2012 if (save_l2cr != 0xffffffff && (save_l2cr & L2CR_L2E) != 0) 2013 _set_L2CR(save_l2cr); 2014 /* Restore L3 cache */ 2015 if (save_l3cr != 0xffffffff && (save_l3cr & L3CR_L3E) != 0) 2016 _set_L3CR(save_l3cr); 2017 2018 /* Restore userland MMU context */ 2019 switch_mmu_context(NULL, current->active_mm, NULL); 2020 2021 /* Tell PMU we are ready */ 2022 pmu_unlock(); 2023 pmu_request(&req, NULL, 2, PMU_SYSTEM_READY, 2); 2024 pmu_wait_complete(&req); 2025 pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, pmu_intr_mask); 2026 pmu_wait_complete(&req); 2027 2028 /* Restore LPJ, cpufreq will adjust the cpu frequency */ 2029 loops_per_jiffy /= 2; 2030 2031 return 0; 2032 } 2033 2034 #define PB3400_MEM_CTRL 0xf8000000 2035 #define PB3400_MEM_CTRL_SLEEP 0x70 2036 2037 static void __iomem *pb3400_mem_ctrl; 2038 2039 static void powerbook_sleep_init_3400(void) 2040 { 2041 /* map in the memory controller registers */ 2042 pb3400_mem_ctrl = ioremap(PB3400_MEM_CTRL, 0x100); 2043 if (pb3400_mem_ctrl == NULL) 2044 printk(KERN_WARNING "ioremap failed: sleep won't be possible"); 2045 } 2046 2047 static int powerbook_sleep_3400(void) 2048 { 2049 int i, x; 2050 unsigned int hid0; 2051 unsigned long msr; 2052 struct adb_request sleep_req; 2053 unsigned int __iomem *mem_ctrl_sleep; 2054 2055 if (pb3400_mem_ctrl == NULL) 2056 return -ENOMEM; 2057 mem_ctrl_sleep = pb3400_mem_ctrl + PB3400_MEM_CTRL_SLEEP; 2058 2059 /* Set the memory controller to keep the memory refreshed 2060 while we're asleep */ 2061 for (i = 0x403f; i >= 0x4000; --i) { 2062 out_be32(mem_ctrl_sleep, i); 2063 do { 2064 x = (in_be32(mem_ctrl_sleep) >> 16) & 0x3ff; 2065 } while (x == 0); 2066 if (x >= 0x100) 2067 break; 2068 } 2069 2070 /* Ask the PMU to put us to sleep */ 2071 pmu_request(&sleep_req, NULL, 5, PMU_SLEEP, 'M', 'A', 'T', 'T'); 2072 pmu_wait_complete(&sleep_req); 2073 pmu_unlock(); 2074 2075 pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, 1); 2076 2077 asleep = 1; 2078 2079 /* Put the CPU into sleep mode */ 2080 hid0 = mfspr(SPRN_HID0); 2081 hid0 = (hid0 & ~(HID0_NAP | HID0_DOZE)) | HID0_SLEEP; 2082 mtspr(SPRN_HID0, hid0); 2083 local_irq_enable(); 2084 msr = mfmsr() | MSR_POW; 2085 while (asleep) { 2086 mb(); 2087 mtmsr(msr); 2088 isync(); 2089 } 2090 local_irq_disable(); 2091 2092 /* OK, we're awake again, start restoring things */ 2093 out_be32(mem_ctrl_sleep, 0x3f); 2094 pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, 0); 2095 2096 return 0; 2097 } 2098 2099 #endif /* CONFIG_SUSPEND && CONFIG_PPC32 */ 2100 2101 /* 2102 * Support for /dev/pmu device 2103 */ 2104 #define RB_SIZE 0x10 2105 struct pmu_private { 2106 struct list_head list; 2107 int rb_get; 2108 int rb_put; 2109 struct rb_entry { 2110 unsigned short len; 2111 unsigned char data[16]; 2112 } rb_buf[RB_SIZE]; 2113 wait_queue_head_t wait; 2114 spinlock_t lock; 2115 #if defined(CONFIG_INPUT_ADBHID) && defined(CONFIG_PMAC_BACKLIGHT) 2116 int backlight_locker; 2117 #endif 2118 }; 2119 2120 static LIST_HEAD(all_pmu_pvt); 2121 static DEFINE_SPINLOCK(all_pvt_lock); 2122 2123 static void 2124 pmu_pass_intr(unsigned char *data, int len) 2125 { 2126 struct pmu_private *pp; 2127 struct list_head *list; 2128 int i; 2129 unsigned long flags; 2130 2131 if (len > sizeof(pp->rb_buf[0].data)) 2132 len = sizeof(pp->rb_buf[0].data); 2133 spin_lock_irqsave(&all_pvt_lock, flags); 2134 for (list = &all_pmu_pvt; (list = list->next) != &all_pmu_pvt; ) { 2135 pp = list_entry(list, struct pmu_private, list); 2136 spin_lock(&pp->lock); 2137 i = pp->rb_put + 1; 2138 if (i >= RB_SIZE) 2139 i = 0; 2140 if (i != pp->rb_get) { 2141 struct rb_entry *rp = &pp->rb_buf[pp->rb_put]; 2142 rp->len = len; 2143 memcpy(rp->data, data, len); 2144 pp->rb_put = i; 2145 wake_up_interruptible(&pp->wait); 2146 } 2147 spin_unlock(&pp->lock); 2148 } 2149 spin_unlock_irqrestore(&all_pvt_lock, flags); 2150 } 2151 2152 static int 2153 pmu_open(struct inode *inode, struct file *file) 2154 { 2155 struct pmu_private *pp; 2156 unsigned long flags; 2157 2158 pp = kmalloc(sizeof(struct pmu_private), GFP_KERNEL); 2159 if (!pp) 2160 return -ENOMEM; 2161 pp->rb_get = pp->rb_put = 0; 2162 spin_lock_init(&pp->lock); 2163 init_waitqueue_head(&pp->wait); 2164 mutex_lock(&pmu_info_proc_mutex); 2165 spin_lock_irqsave(&all_pvt_lock, flags); 2166 #if defined(CONFIG_INPUT_ADBHID) && defined(CONFIG_PMAC_BACKLIGHT) 2167 pp->backlight_locker = 0; 2168 #endif 2169 list_add(&pp->list, &all_pmu_pvt); 2170 spin_unlock_irqrestore(&all_pvt_lock, flags); 2171 file->private_data = pp; 2172 mutex_unlock(&pmu_info_proc_mutex); 2173 return 0; 2174 } 2175 2176 static ssize_t 2177 pmu_read(struct file *file, char __user *buf, 2178 size_t count, loff_t *ppos) 2179 { 2180 struct pmu_private *pp = file->private_data; 2181 DECLARE_WAITQUEUE(wait, current); 2182 unsigned long flags; 2183 int ret = 0; 2184 2185 if (count < 1 || !pp) 2186 return -EINVAL; 2187 if (!access_ok(buf, count)) 2188 return -EFAULT; 2189 2190 spin_lock_irqsave(&pp->lock, flags); 2191 add_wait_queue(&pp->wait, &wait); 2192 set_current_state(TASK_INTERRUPTIBLE); 2193 2194 for (;;) { 2195 ret = -EAGAIN; 2196 if (pp->rb_get != pp->rb_put) { 2197 int i = pp->rb_get; 2198 struct rb_entry *rp = &pp->rb_buf[i]; 2199 ret = rp->len; 2200 spin_unlock_irqrestore(&pp->lock, flags); 2201 if (ret > count) 2202 ret = count; 2203 if (ret > 0 && copy_to_user(buf, rp->data, ret)) 2204 ret = -EFAULT; 2205 if (++i >= RB_SIZE) 2206 i = 0; 2207 spin_lock_irqsave(&pp->lock, flags); 2208 pp->rb_get = i; 2209 } 2210 if (ret >= 0) 2211 break; 2212 if (file->f_flags & O_NONBLOCK) 2213 break; 2214 ret = -ERESTARTSYS; 2215 if (signal_pending(current)) 2216 break; 2217 spin_unlock_irqrestore(&pp->lock, flags); 2218 schedule(); 2219 spin_lock_irqsave(&pp->lock, flags); 2220 } 2221 __set_current_state(TASK_RUNNING); 2222 remove_wait_queue(&pp->wait, &wait); 2223 spin_unlock_irqrestore(&pp->lock, flags); 2224 2225 return ret; 2226 } 2227 2228 static ssize_t 2229 pmu_write(struct file *file, const char __user *buf, 2230 size_t count, loff_t *ppos) 2231 { 2232 return 0; 2233 } 2234 2235 static __poll_t 2236 pmu_fpoll(struct file *filp, poll_table *wait) 2237 { 2238 struct pmu_private *pp = filp->private_data; 2239 __poll_t mask = 0; 2240 unsigned long flags; 2241 2242 if (!pp) 2243 return 0; 2244 poll_wait(filp, &pp->wait, wait); 2245 spin_lock_irqsave(&pp->lock, flags); 2246 if (pp->rb_get != pp->rb_put) 2247 mask |= EPOLLIN; 2248 spin_unlock_irqrestore(&pp->lock, flags); 2249 return mask; 2250 } 2251 2252 static int 2253 pmu_release(struct inode *inode, struct file *file) 2254 { 2255 struct pmu_private *pp = file->private_data; 2256 unsigned long flags; 2257 2258 if (pp) { 2259 file->private_data = NULL; 2260 spin_lock_irqsave(&all_pvt_lock, flags); 2261 list_del(&pp->list); 2262 spin_unlock_irqrestore(&all_pvt_lock, flags); 2263 2264 #if defined(CONFIG_INPUT_ADBHID) && defined(CONFIG_PMAC_BACKLIGHT) 2265 if (pp->backlight_locker) 2266 pmac_backlight_enable(); 2267 #endif 2268 2269 kfree(pp); 2270 } 2271 return 0; 2272 } 2273 2274 #if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32) 2275 static void pmac_suspend_disable_irqs(void) 2276 { 2277 /* Call platform functions marked "on sleep" */ 2278 pmac_pfunc_i2c_suspend(); 2279 pmac_pfunc_base_suspend(); 2280 } 2281 2282 static int powerbook_sleep(suspend_state_t state) 2283 { 2284 int error = 0; 2285 2286 /* Wait for completion of async requests */ 2287 while (!batt_req.complete) 2288 pmu_poll(); 2289 2290 /* Giveup the lazy FPU & vec so we don't have to back them 2291 * up from the low level code 2292 */ 2293 enable_kernel_fp(); 2294 2295 #ifdef CONFIG_ALTIVEC 2296 if (cpu_has_feature(CPU_FTR_ALTIVEC)) 2297 enable_kernel_altivec(); 2298 #endif /* CONFIG_ALTIVEC */ 2299 2300 switch (pmu_kind) { 2301 case PMU_OHARE_BASED: 2302 error = powerbook_sleep_3400(); 2303 break; 2304 case PMU_HEATHROW_BASED: 2305 case PMU_PADDINGTON_BASED: 2306 error = powerbook_sleep_grackle(); 2307 break; 2308 case PMU_KEYLARGO_BASED: 2309 error = powerbook_sleep_Core99(); 2310 break; 2311 default: 2312 return -ENOSYS; 2313 } 2314 2315 if (error) 2316 return error; 2317 2318 mdelay(100); 2319 2320 return 0; 2321 } 2322 2323 static void pmac_suspend_enable_irqs(void) 2324 { 2325 /* Force a poll of ADB interrupts */ 2326 adb_int_pending = 1; 2327 via_pmu_interrupt(0, NULL); 2328 2329 mdelay(10); 2330 2331 /* Call platform functions marked "on wake" */ 2332 pmac_pfunc_base_resume(); 2333 pmac_pfunc_i2c_resume(); 2334 } 2335 2336 static int pmu_sleep_valid(suspend_state_t state) 2337 { 2338 return state == PM_SUSPEND_MEM 2339 && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0); 2340 } 2341 2342 static const struct platform_suspend_ops pmu_pm_ops = { 2343 .enter = powerbook_sleep, 2344 .valid = pmu_sleep_valid, 2345 }; 2346 2347 static int register_pmu_pm_ops(void) 2348 { 2349 if (pmu_kind == PMU_OHARE_BASED) 2350 powerbook_sleep_init_3400(); 2351 ppc_md.suspend_disable_irqs = pmac_suspend_disable_irqs; 2352 ppc_md.suspend_enable_irqs = pmac_suspend_enable_irqs; 2353 suspend_set_ops(&pmu_pm_ops); 2354 2355 return 0; 2356 } 2357 2358 device_initcall(register_pmu_pm_ops); 2359 #endif 2360 2361 static int pmu_ioctl(struct file *filp, 2362 u_int cmd, u_long arg) 2363 { 2364 __u32 __user *argp = (__u32 __user *)arg; 2365 int error = -EINVAL; 2366 2367 switch (cmd) { 2368 #ifdef CONFIG_PPC_PMAC 2369 case PMU_IOC_SLEEP: 2370 if (!capable(CAP_SYS_ADMIN)) 2371 return -EACCES; 2372 return pm_suspend(PM_SUSPEND_MEM); 2373 case PMU_IOC_CAN_SLEEP: 2374 if (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) < 0) 2375 return put_user(0, argp); 2376 else 2377 return put_user(1, argp); 2378 #endif 2379 2380 #ifdef CONFIG_PMAC_BACKLIGHT_LEGACY 2381 /* Compatibility ioctl's for backlight */ 2382 case PMU_IOC_GET_BACKLIGHT: 2383 { 2384 int brightness; 2385 2386 brightness = pmac_backlight_get_legacy_brightness(); 2387 if (brightness < 0) 2388 return brightness; 2389 else 2390 return put_user(brightness, argp); 2391 2392 } 2393 case PMU_IOC_SET_BACKLIGHT: 2394 { 2395 int brightness; 2396 2397 error = get_user(brightness, argp); 2398 if (error) 2399 return error; 2400 2401 return pmac_backlight_set_legacy_brightness(brightness); 2402 } 2403 #ifdef CONFIG_INPUT_ADBHID 2404 case PMU_IOC_GRAB_BACKLIGHT: { 2405 struct pmu_private *pp = filp->private_data; 2406 2407 if (pp->backlight_locker) 2408 return 0; 2409 2410 pp->backlight_locker = 1; 2411 pmac_backlight_disable(); 2412 2413 return 0; 2414 } 2415 #endif /* CONFIG_INPUT_ADBHID */ 2416 #endif /* CONFIG_PMAC_BACKLIGHT_LEGACY */ 2417 2418 case PMU_IOC_GET_MODEL: 2419 return put_user(pmu_kind, argp); 2420 case PMU_IOC_HAS_ADB: 2421 return put_user(pmu_has_adb, argp); 2422 } 2423 return error; 2424 } 2425 2426 static long pmu_unlocked_ioctl(struct file *filp, 2427 u_int cmd, u_long arg) 2428 { 2429 int ret; 2430 2431 mutex_lock(&pmu_info_proc_mutex); 2432 ret = pmu_ioctl(filp, cmd, arg); 2433 mutex_unlock(&pmu_info_proc_mutex); 2434 2435 return ret; 2436 } 2437 2438 #ifdef CONFIG_COMPAT 2439 #define PMU_IOC_GET_BACKLIGHT32 _IOR('B', 1, compat_size_t) 2440 #define PMU_IOC_SET_BACKLIGHT32 _IOW('B', 2, compat_size_t) 2441 #define PMU_IOC_GET_MODEL32 _IOR('B', 3, compat_size_t) 2442 #define PMU_IOC_HAS_ADB32 _IOR('B', 4, compat_size_t) 2443 #define PMU_IOC_CAN_SLEEP32 _IOR('B', 5, compat_size_t) 2444 #define PMU_IOC_GRAB_BACKLIGHT32 _IOR('B', 6, compat_size_t) 2445 2446 static long compat_pmu_ioctl (struct file *filp, u_int cmd, u_long arg) 2447 { 2448 switch (cmd) { 2449 case PMU_IOC_SLEEP: 2450 break; 2451 case PMU_IOC_GET_BACKLIGHT32: 2452 cmd = PMU_IOC_GET_BACKLIGHT; 2453 break; 2454 case PMU_IOC_SET_BACKLIGHT32: 2455 cmd = PMU_IOC_SET_BACKLIGHT; 2456 break; 2457 case PMU_IOC_GET_MODEL32: 2458 cmd = PMU_IOC_GET_MODEL; 2459 break; 2460 case PMU_IOC_HAS_ADB32: 2461 cmd = PMU_IOC_HAS_ADB; 2462 break; 2463 case PMU_IOC_CAN_SLEEP32: 2464 cmd = PMU_IOC_CAN_SLEEP; 2465 break; 2466 case PMU_IOC_GRAB_BACKLIGHT32: 2467 cmd = PMU_IOC_GRAB_BACKLIGHT; 2468 break; 2469 default: 2470 return -ENOIOCTLCMD; 2471 } 2472 return pmu_unlocked_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); 2473 } 2474 #endif 2475 2476 static const struct file_operations pmu_device_fops = { 2477 .read = pmu_read, 2478 .write = pmu_write, 2479 .poll = pmu_fpoll, 2480 .unlocked_ioctl = pmu_unlocked_ioctl, 2481 #ifdef CONFIG_COMPAT 2482 .compat_ioctl = compat_pmu_ioctl, 2483 #endif 2484 .open = pmu_open, 2485 .release = pmu_release, 2486 .llseek = noop_llseek, 2487 }; 2488 2489 static struct miscdevice pmu_device = { 2490 PMU_MINOR, "pmu", &pmu_device_fops 2491 }; 2492 2493 static int pmu_device_init(void) 2494 { 2495 if (pmu_state == uninitialized) 2496 return 0; 2497 if (misc_register(&pmu_device) < 0) 2498 printk(KERN_ERR "via-pmu: cannot register misc device.\n"); 2499 return 0; 2500 } 2501 device_initcall(pmu_device_init); 2502 2503 2504 #ifdef DEBUG_SLEEP 2505 static inline void 2506 polled_handshake(void) 2507 { 2508 via2[B] &= ~TREQ; eieio(); 2509 while ((via2[B] & TACK) != 0) 2510 ; 2511 via2[B] |= TREQ; eieio(); 2512 while ((via2[B] & TACK) == 0) 2513 ; 2514 } 2515 2516 static inline void 2517 polled_send_byte(int x) 2518 { 2519 via1[ACR] |= SR_OUT | SR_EXT; eieio(); 2520 via1[SR] = x; eieio(); 2521 polled_handshake(); 2522 } 2523 2524 static inline int 2525 polled_recv_byte(void) 2526 { 2527 int x; 2528 2529 via1[ACR] = (via1[ACR] & ~SR_OUT) | SR_EXT; eieio(); 2530 x = via1[SR]; eieio(); 2531 polled_handshake(); 2532 x = via1[SR]; eieio(); 2533 return x; 2534 } 2535 2536 int 2537 pmu_polled_request(struct adb_request *req) 2538 { 2539 unsigned long flags; 2540 int i, l, c; 2541 2542 req->complete = 1; 2543 c = req->data[0]; 2544 l = pmu_data_len[c][0]; 2545 if (l >= 0 && req->nbytes != l + 1) 2546 return -EINVAL; 2547 2548 local_irq_save(flags); 2549 while (pmu_state != idle) 2550 pmu_poll(); 2551 2552 while ((via2[B] & TACK) == 0) 2553 ; 2554 polled_send_byte(c); 2555 if (l < 0) { 2556 l = req->nbytes - 1; 2557 polled_send_byte(l); 2558 } 2559 for (i = 1; i <= l; ++i) 2560 polled_send_byte(req->data[i]); 2561 2562 l = pmu_data_len[c][1]; 2563 if (l < 0) 2564 l = polled_recv_byte(); 2565 for (i = 0; i < l; ++i) 2566 req->reply[i + req->reply_len] = polled_recv_byte(); 2567 2568 if (req->done) 2569 (*req->done)(req); 2570 2571 local_irq_restore(flags); 2572 return 0; 2573 } 2574 2575 /* N.B. This doesn't work on the 3400 */ 2576 void pmu_blink(int n) 2577 { 2578 struct adb_request req; 2579 2580 memset(&req, 0, sizeof(req)); 2581 2582 for (; n > 0; --n) { 2583 req.nbytes = 4; 2584 req.done = NULL; 2585 req.data[0] = 0xee; 2586 req.data[1] = 4; 2587 req.data[2] = 0; 2588 req.data[3] = 1; 2589 req.reply[0] = ADB_RET_OK; 2590 req.reply_len = 1; 2591 req.reply_expected = 0; 2592 pmu_polled_request(&req); 2593 mdelay(50); 2594 req.nbytes = 4; 2595 req.done = NULL; 2596 req.data[0] = 0xee; 2597 req.data[1] = 4; 2598 req.data[2] = 0; 2599 req.data[3] = 0; 2600 req.reply[0] = ADB_RET_OK; 2601 req.reply_len = 1; 2602 req.reply_expected = 0; 2603 pmu_polled_request(&req); 2604 mdelay(50); 2605 } 2606 mdelay(50); 2607 } 2608 #endif /* DEBUG_SLEEP */ 2609 2610 #if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32) 2611 int pmu_sys_suspended; 2612 2613 static int pmu_syscore_suspend(void) 2614 { 2615 /* Suspend PMU event interrupts */ 2616 pmu_suspend(); 2617 pmu_sys_suspended = 1; 2618 2619 #ifdef CONFIG_PMAC_BACKLIGHT 2620 /* Tell backlight code not to muck around with the chip anymore */ 2621 pmu_backlight_set_sleep(1); 2622 #endif 2623 2624 return 0; 2625 } 2626 2627 static void pmu_syscore_resume(void) 2628 { 2629 struct adb_request req; 2630 2631 if (!pmu_sys_suspended) 2632 return; 2633 2634 /* Tell PMU we are ready */ 2635 pmu_request(&req, NULL, 2, PMU_SYSTEM_READY, 2); 2636 pmu_wait_complete(&req); 2637 2638 #ifdef CONFIG_PMAC_BACKLIGHT 2639 /* Tell backlight code it can use the chip again */ 2640 pmu_backlight_set_sleep(0); 2641 #endif 2642 /* Resume PMU event interrupts */ 2643 pmu_resume(); 2644 pmu_sys_suspended = 0; 2645 } 2646 2647 static struct syscore_ops pmu_syscore_ops = { 2648 .suspend = pmu_syscore_suspend, 2649 .resume = pmu_syscore_resume, 2650 }; 2651 2652 static int pmu_syscore_register(void) 2653 { 2654 register_syscore_ops(&pmu_syscore_ops); 2655 2656 return 0; 2657 } 2658 subsys_initcall(pmu_syscore_register); 2659 #endif /* CONFIG_SUSPEND && CONFIG_PPC32 */ 2660 2661 EXPORT_SYMBOL(pmu_request); 2662 EXPORT_SYMBOL(pmu_queue_request); 2663 EXPORT_SYMBOL(pmu_poll); 2664 EXPORT_SYMBOL(pmu_poll_adb); 2665 EXPORT_SYMBOL(pmu_wait_complete); 2666 EXPORT_SYMBOL(pmu_suspend); 2667 EXPORT_SYMBOL(pmu_resume); 2668 EXPORT_SYMBOL(pmu_unlock); 2669 #if defined(CONFIG_PPC32) 2670 EXPORT_SYMBOL(pmu_enable_irled); 2671 EXPORT_SYMBOL(pmu_battery_count); 2672 EXPORT_SYMBOL(pmu_batteries); 2673 EXPORT_SYMBOL(pmu_power_flags); 2674 #endif /* CONFIG_SUSPEND && CONFIG_PPC32 */ 2675 2676