1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Device driver for the PMU in Apple PowerBooks and PowerMacs. 4 * 5 * The VIA (versatile interface adapter) interfaces to the PMU, 6 * a 6805 microprocessor core whose primary function is to control 7 * battery charging and system power on the PowerBook 3400 and 2400. 8 * The PMU also controls the ADB (Apple Desktop Bus) which connects 9 * to the keyboard and mouse, as well as the non-volatile RAM 10 * and the RTC (real time clock) chip. 11 * 12 * Copyright (C) 1998 Paul Mackerras and Fabio Riccardi. 13 * Copyright (C) 2001-2002 Benjamin Herrenschmidt 14 * Copyright (C) 2006-2007 Johannes Berg 15 * 16 * THIS DRIVER IS BECOMING A TOTAL MESS ! 17 * - Cleanup atomically disabling reply to PMU events after 18 * a sleep or a freq. switch 19 * 20 */ 21 #include <stdarg.h> 22 #include <linux/mutex.h> 23 #include <linux/types.h> 24 #include <linux/errno.h> 25 #include <linux/kernel.h> 26 #include <linux/delay.h> 27 #include <linux/sched/signal.h> 28 #include <linux/miscdevice.h> 29 #include <linux/blkdev.h> 30 #include <linux/pci.h> 31 #include <linux/slab.h> 32 #include <linux/poll.h> 33 #include <linux/adb.h> 34 #include <linux/pmu.h> 35 #include <linux/cuda.h> 36 #include <linux/module.h> 37 #include <linux/spinlock.h> 38 #include <linux/pm.h> 39 #include <linux/proc_fs.h> 40 #include <linux/seq_file.h> 41 #include <linux/init.h> 42 #include <linux/interrupt.h> 43 #include <linux/device.h> 44 #include <linux/syscore_ops.h> 45 #include <linux/freezer.h> 46 #include <linux/syscalls.h> 47 #include <linux/suspend.h> 48 #include <linux/cpu.h> 49 #include <linux/compat.h> 50 #include <linux/of_address.h> 51 #include <linux/of_irq.h> 52 #include <linux/uaccess.h> 53 #include <linux/pgtable.h> 54 #include <asm/machdep.h> 55 #include <asm/io.h> 56 #include <asm/sections.h> 57 #include <asm/irq.h> 58 #ifdef CONFIG_PPC_PMAC 59 #include <asm/pmac_feature.h> 60 #include <asm/pmac_pfunc.h> 61 #include <asm/pmac_low_i2c.h> 62 #include <asm/prom.h> 63 #include <asm/mmu_context.h> 64 #include <asm/cputable.h> 65 #include <asm/time.h> 66 #include <asm/backlight.h> 67 #else 68 #include <asm/macintosh.h> 69 #include <asm/macints.h> 70 #include <asm/mac_via.h> 71 #endif 72 73 #include "via-pmu-event.h" 74 75 /* Some compile options */ 76 #undef DEBUG_SLEEP 77 78 /* How many iterations between battery polls */ 79 #define BATTERY_POLLING_COUNT 2 80 81 static DEFINE_MUTEX(pmu_info_proc_mutex); 82 83 /* VIA registers - spaced 0x200 bytes apart */ 84 #define RS 0x200 /* skip between registers */ 85 #define B 0 /* B-side data */ 86 #define A RS /* A-side data */ 87 #define DIRB (2*RS) /* B-side direction (1=output) */ 88 #define DIRA (3*RS) /* A-side direction (1=output) */ 89 #define T1CL (4*RS) /* Timer 1 ctr/latch (low 8 bits) */ 90 #define T1CH (5*RS) /* Timer 1 counter (high 8 bits) */ 91 #define T1LL (6*RS) /* Timer 1 latch (low 8 bits) */ 92 #define T1LH (7*RS) /* Timer 1 latch (high 8 bits) */ 93 #define T2CL (8*RS) /* Timer 2 ctr/latch (low 8 bits) */ 94 #define T2CH (9*RS) /* Timer 2 counter (high 8 bits) */ 95 #define SR (10*RS) /* Shift register */ 96 #define ACR (11*RS) /* Auxiliary control register */ 97 #define PCR (12*RS) /* Peripheral control register */ 98 #define IFR (13*RS) /* Interrupt flag register */ 99 #define IER (14*RS) /* Interrupt enable register */ 100 #define ANH (15*RS) /* A-side data, no handshake */ 101 102 /* Bits in B data register: both active low */ 103 #ifdef CONFIG_PPC_PMAC 104 #define TACK 0x08 /* Transfer acknowledge (input) */ 105 #define TREQ 0x10 /* Transfer request (output) */ 106 #else 107 #define TACK 0x02 108 #define TREQ 0x04 109 #endif 110 111 /* Bits in ACR */ 112 #define SR_CTRL 0x1c /* Shift register control bits */ 113 #define SR_EXT 0x0c /* Shift on external clock */ 114 #define SR_OUT 0x10 /* Shift out if 1 */ 115 116 /* Bits in IFR and IER */ 117 #define IER_SET 0x80 /* set bits in IER */ 118 #define IER_CLR 0 /* clear bits in IER */ 119 #define SR_INT 0x04 /* Shift register full/empty */ 120 #define CB2_INT 0x08 121 #define CB1_INT 0x10 /* transition on CB1 input */ 122 123 static volatile enum pmu_state { 124 uninitialized = 0, 125 idle, 126 sending, 127 intack, 128 reading, 129 reading_intr, 130 locked, 131 } pmu_state; 132 133 static volatile enum int_data_state { 134 int_data_empty, 135 int_data_fill, 136 int_data_ready, 137 int_data_flush 138 } int_data_state[2] = { int_data_empty, int_data_empty }; 139 140 static struct adb_request *current_req; 141 static struct adb_request *last_req; 142 static struct adb_request *req_awaiting_reply; 143 static unsigned char interrupt_data[2][32]; 144 static int interrupt_data_len[2]; 145 static int int_data_last; 146 static unsigned char *reply_ptr; 147 static int data_index; 148 static int data_len; 149 static volatile int adb_int_pending; 150 static volatile int disable_poll; 151 static int pmu_kind = PMU_UNKNOWN; 152 static int pmu_fully_inited; 153 static int pmu_has_adb; 154 #ifdef CONFIG_PPC_PMAC 155 static volatile unsigned char __iomem *via1; 156 static volatile unsigned char __iomem *via2; 157 static struct device_node *vias; 158 static struct device_node *gpio_node; 159 #endif 160 static unsigned char __iomem *gpio_reg; 161 static int gpio_irq = 0; 162 static int gpio_irq_enabled = -1; 163 static volatile int pmu_suspended; 164 static spinlock_t pmu_lock; 165 static u8 pmu_intr_mask; 166 static int pmu_version; 167 static int drop_interrupts; 168 #if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32) 169 static int option_lid_wakeup = 1; 170 #endif /* CONFIG_SUSPEND && CONFIG_PPC32 */ 171 static unsigned long async_req_locks; 172 173 #define NUM_IRQ_STATS 13 174 static unsigned int pmu_irq_stats[NUM_IRQ_STATS]; 175 176 static struct proc_dir_entry *proc_pmu_root; 177 static struct proc_dir_entry *proc_pmu_info; 178 static struct proc_dir_entry *proc_pmu_irqstats; 179 static struct proc_dir_entry *proc_pmu_options; 180 static int option_server_mode; 181 182 int pmu_battery_count; 183 static int pmu_cur_battery; 184 unsigned int pmu_power_flags = PMU_PWR_AC_PRESENT; 185 struct pmu_battery_info pmu_batteries[PMU_MAX_BATTERIES]; 186 static int query_batt_timer = BATTERY_POLLING_COUNT; 187 static struct adb_request batt_req; 188 static struct proc_dir_entry *proc_pmu_batt[PMU_MAX_BATTERIES]; 189 190 int asleep; 191 192 #ifdef CONFIG_ADB 193 static int adb_dev_map; 194 static int pmu_adb_flags; 195 196 static int pmu_probe(void); 197 static int pmu_init(void); 198 static int pmu_send_request(struct adb_request *req, int sync); 199 static int pmu_adb_autopoll(int devs); 200 static int pmu_adb_reset_bus(void); 201 #endif /* CONFIG_ADB */ 202 203 static int init_pmu(void); 204 static void pmu_start(void); 205 static irqreturn_t via_pmu_interrupt(int irq, void *arg); 206 static irqreturn_t gpio1_interrupt(int irq, void *arg); 207 static int pmu_info_proc_show(struct seq_file *m, void *v); 208 static int pmu_irqstats_proc_show(struct seq_file *m, void *v); 209 static int pmu_battery_proc_show(struct seq_file *m, void *v); 210 static void pmu_pass_intr(unsigned char *data, int len); 211 static const struct proc_ops pmu_options_proc_ops; 212 213 #ifdef CONFIG_ADB 214 const struct adb_driver via_pmu_driver = { 215 .name = "PMU", 216 .probe = pmu_probe, 217 .init = pmu_init, 218 .send_request = pmu_send_request, 219 .autopoll = pmu_adb_autopoll, 220 .poll = pmu_poll_adb, 221 .reset_bus = pmu_adb_reset_bus, 222 }; 223 #endif /* CONFIG_ADB */ 224 225 extern void low_sleep_handler(void); 226 extern void enable_kernel_altivec(void); 227 extern void enable_kernel_fp(void); 228 229 #ifdef DEBUG_SLEEP 230 int pmu_polled_request(struct adb_request *req); 231 void pmu_blink(int n); 232 #endif 233 234 /* 235 * This table indicates for each PMU opcode: 236 * - the number of data bytes to be sent with the command, or -1 237 * if a length byte should be sent, 238 * - the number of response bytes which the PMU will return, or 239 * -1 if it will send a length byte. 240 */ 241 static const s8 pmu_data_len[256][2] = { 242 /* 0 1 2 3 4 5 6 7 */ 243 /*00*/ {-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, 244 /*08*/ {-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1}, 245 /*10*/ { 1, 0},{ 1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, 246 /*18*/ { 0, 1},{ 0, 1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{ 0, 0}, 247 /*20*/ {-1, 0},{ 0, 0},{ 2, 0},{ 1, 0},{ 1, 0},{-1, 0},{-1, 0},{-1, 0}, 248 /*28*/ { 0,-1},{ 0,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{ 0,-1}, 249 /*30*/ { 4, 0},{20, 0},{-1, 0},{ 3, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, 250 /*38*/ { 0, 4},{ 0,20},{ 2,-1},{ 2, 1},{ 3,-1},{-1,-1},{-1,-1},{ 4, 0}, 251 /*40*/ { 1, 0},{ 1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, 252 /*48*/ { 0, 1},{ 0, 1},{-1,-1},{ 1, 0},{ 1, 0},{-1,-1},{-1,-1},{-1,-1}, 253 /*50*/ { 1, 0},{ 0, 0},{ 2, 0},{ 2, 0},{-1, 0},{ 1, 0},{ 3, 0},{ 1, 0}, 254 /*58*/ { 0, 1},{ 1, 0},{ 0, 2},{ 0, 2},{ 0,-1},{-1,-1},{-1,-1},{-1,-1}, 255 /*60*/ { 2, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, 256 /*68*/ { 0, 3},{ 0, 3},{ 0, 2},{ 0, 8},{ 0,-1},{ 0,-1},{-1,-1},{-1,-1}, 257 /*70*/ { 1, 0},{ 1, 0},{ 1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, 258 /*78*/ { 0,-1},{ 0,-1},{-1,-1},{-1,-1},{-1,-1},{ 5, 1},{ 4, 1},{ 4, 1}, 259 /*80*/ { 4, 0},{-1, 0},{ 0, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, 260 /*88*/ { 0, 5},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1}, 261 /*90*/ { 1, 0},{ 2, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, 262 /*98*/ { 0, 1},{ 0, 1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1}, 263 /*a0*/ { 2, 0},{ 2, 0},{ 2, 0},{ 4, 0},{-1, 0},{ 0, 0},{-1, 0},{-1, 0}, 264 /*a8*/ { 1, 1},{ 1, 0},{ 3, 0},{ 2, 0},{-1,-1},{-1,-1},{-1,-1},{-1,-1}, 265 /*b0*/ {-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, 266 /*b8*/ {-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1}, 267 /*c0*/ {-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, 268 /*c8*/ {-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1}, 269 /*d0*/ { 0, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, 270 /*d8*/ { 1, 1},{ 1, 1},{-1,-1},{-1,-1},{ 0, 1},{ 0,-1},{-1,-1},{-1,-1}, 271 /*e0*/ {-1, 0},{ 4, 0},{ 0, 1},{-1, 0},{-1, 0},{ 4, 0},{-1, 0},{-1, 0}, 272 /*e8*/ { 3,-1},{-1,-1},{ 0, 1},{-1,-1},{ 0,-1},{-1,-1},{-1,-1},{ 0, 0}, 273 /*f0*/ {-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, 274 /*f8*/ {-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1}, 275 }; 276 277 static char *pbook_type[] = { 278 "Unknown PowerBook", 279 "PowerBook 2400/3400/3500(G3)", 280 "PowerBook G3 Series", 281 "1999 PowerBook G3", 282 "Core99" 283 }; 284 285 int __init find_via_pmu(void) 286 { 287 #ifdef CONFIG_PPC_PMAC 288 u64 taddr; 289 const u32 *reg; 290 291 if (pmu_state != uninitialized) 292 return 1; 293 vias = of_find_node_by_name(NULL, "via-pmu"); 294 if (vias == NULL) 295 return 0; 296 297 reg = of_get_property(vias, "reg", NULL); 298 if (reg == NULL) { 299 printk(KERN_ERR "via-pmu: No \"reg\" property !\n"); 300 goto fail; 301 } 302 taddr = of_translate_address(vias, reg); 303 if (taddr == OF_BAD_ADDR) { 304 printk(KERN_ERR "via-pmu: Can't translate address !\n"); 305 goto fail; 306 } 307 308 spin_lock_init(&pmu_lock); 309 310 pmu_has_adb = 1; 311 312 pmu_intr_mask = PMU_INT_PCEJECT | 313 PMU_INT_SNDBRT | 314 PMU_INT_ADB | 315 PMU_INT_TICK; 316 317 if (of_node_name_eq(vias->parent, "ohare") || 318 of_device_is_compatible(vias->parent, "ohare")) 319 pmu_kind = PMU_OHARE_BASED; 320 else if (of_device_is_compatible(vias->parent, "paddington")) 321 pmu_kind = PMU_PADDINGTON_BASED; 322 else if (of_device_is_compatible(vias->parent, "heathrow")) 323 pmu_kind = PMU_HEATHROW_BASED; 324 else if (of_device_is_compatible(vias->parent, "Keylargo") 325 || of_device_is_compatible(vias->parent, "K2-Keylargo")) { 326 struct device_node *gpiop; 327 struct device_node *adbp; 328 u64 gaddr = OF_BAD_ADDR; 329 330 pmu_kind = PMU_KEYLARGO_BASED; 331 adbp = of_find_node_by_type(NULL, "adb"); 332 pmu_has_adb = (adbp != NULL); 333 of_node_put(adbp); 334 pmu_intr_mask = PMU_INT_PCEJECT | 335 PMU_INT_SNDBRT | 336 PMU_INT_ADB | 337 PMU_INT_TICK | 338 PMU_INT_ENVIRONMENT; 339 340 gpiop = of_find_node_by_name(NULL, "gpio"); 341 if (gpiop) { 342 reg = of_get_property(gpiop, "reg", NULL); 343 if (reg) 344 gaddr = of_translate_address(gpiop, reg); 345 if (gaddr != OF_BAD_ADDR) 346 gpio_reg = ioremap(gaddr, 0x10); 347 of_node_put(gpiop); 348 } 349 if (gpio_reg == NULL) { 350 printk(KERN_ERR "via-pmu: Can't find GPIO reg !\n"); 351 goto fail; 352 } 353 } else 354 pmu_kind = PMU_UNKNOWN; 355 356 via1 = via2 = ioremap(taddr, 0x2000); 357 if (via1 == NULL) { 358 printk(KERN_ERR "via-pmu: Can't map address !\n"); 359 goto fail_via_remap; 360 } 361 362 out_8(&via1[IER], IER_CLR | 0x7f); /* disable all intrs */ 363 out_8(&via1[IFR], 0x7f); /* clear IFR */ 364 365 pmu_state = idle; 366 367 if (!init_pmu()) 368 goto fail_init; 369 370 sys_ctrler = SYS_CTRLER_PMU; 371 372 return 1; 373 374 fail_init: 375 iounmap(via1); 376 via1 = via2 = NULL; 377 fail_via_remap: 378 iounmap(gpio_reg); 379 gpio_reg = NULL; 380 fail: 381 of_node_put(vias); 382 vias = NULL; 383 pmu_state = uninitialized; 384 return 0; 385 #else 386 if (macintosh_config->adb_type != MAC_ADB_PB2) 387 return 0; 388 389 pmu_kind = PMU_UNKNOWN; 390 391 spin_lock_init(&pmu_lock); 392 393 pmu_has_adb = 1; 394 395 pmu_intr_mask = PMU_INT_PCEJECT | 396 PMU_INT_SNDBRT | 397 PMU_INT_ADB | 398 PMU_INT_TICK; 399 400 pmu_state = idle; 401 402 if (!init_pmu()) { 403 pmu_state = uninitialized; 404 return 0; 405 } 406 407 return 1; 408 #endif /* !CONFIG_PPC_PMAC */ 409 } 410 411 #ifdef CONFIG_ADB 412 static int pmu_probe(void) 413 { 414 return pmu_state == uninitialized ? -ENODEV : 0; 415 } 416 417 static int pmu_init(void) 418 { 419 return pmu_state == uninitialized ? -ENODEV : 0; 420 } 421 #endif /* CONFIG_ADB */ 422 423 /* 424 * We can't wait until pmu_init gets called, that happens too late. 425 * It happens after IDE and SCSI initialization, which can take a few 426 * seconds, and by that time the PMU could have given up on us and 427 * turned us off. 428 * Thus this is called with arch_initcall rather than device_initcall. 429 */ 430 static int __init via_pmu_start(void) 431 { 432 unsigned int __maybe_unused irq; 433 434 if (pmu_state == uninitialized) 435 return -ENODEV; 436 437 batt_req.complete = 1; 438 439 #ifdef CONFIG_PPC_PMAC 440 irq = irq_of_parse_and_map(vias, 0); 441 if (!irq) { 442 printk(KERN_ERR "via-pmu: can't map interrupt\n"); 443 return -ENODEV; 444 } 445 /* We set IRQF_NO_SUSPEND because we don't want the interrupt 446 * to be disabled between the 2 passes of driver suspend, we 447 * control our own disabling for that one 448 */ 449 if (request_irq(irq, via_pmu_interrupt, IRQF_NO_SUSPEND, 450 "VIA-PMU", (void *)0)) { 451 printk(KERN_ERR "via-pmu: can't request irq %d\n", irq); 452 return -ENODEV; 453 } 454 455 if (pmu_kind == PMU_KEYLARGO_BASED) { 456 gpio_node = of_find_node_by_name(NULL, "extint-gpio1"); 457 if (gpio_node == NULL) 458 gpio_node = of_find_node_by_name(NULL, 459 "pmu-interrupt"); 460 if (gpio_node) 461 gpio_irq = irq_of_parse_and_map(gpio_node, 0); 462 463 if (gpio_irq) { 464 if (request_irq(gpio_irq, gpio1_interrupt, 465 IRQF_NO_SUSPEND, "GPIO1 ADB", 466 (void *)0)) 467 printk(KERN_ERR "pmu: can't get irq %d" 468 " (GPIO1)\n", gpio_irq); 469 else 470 gpio_irq_enabled = 1; 471 } 472 } 473 474 /* Enable interrupts */ 475 out_8(&via1[IER], IER_SET | SR_INT | CB1_INT); 476 #else 477 if (request_irq(IRQ_MAC_ADB_SR, via_pmu_interrupt, IRQF_NO_SUSPEND, 478 "VIA-PMU-SR", NULL)) { 479 pr_err("%s: couldn't get SR irq\n", __func__); 480 return -ENODEV; 481 } 482 if (request_irq(IRQ_MAC_ADB_CL, via_pmu_interrupt, IRQF_NO_SUSPEND, 483 "VIA-PMU-CL", NULL)) { 484 pr_err("%s: couldn't get CL irq\n", __func__); 485 free_irq(IRQ_MAC_ADB_SR, NULL); 486 return -ENODEV; 487 } 488 #endif /* !CONFIG_PPC_PMAC */ 489 490 pmu_fully_inited = 1; 491 492 /* Make sure PMU settle down before continuing. This is _very_ important 493 * since the IDE probe may shut interrupts down for quite a bit of time. If 494 * a PMU communication is pending while this happens, the PMU may timeout 495 * Not that on Core99 machines, the PMU keeps sending us environement 496 * messages, we should find a way to either fix IDE or make it call 497 * pmu_suspend() before masking interrupts. This can also happens while 498 * scolling with some fbdevs. 499 */ 500 do { 501 pmu_poll(); 502 } while (pmu_state != idle); 503 504 return 0; 505 } 506 507 arch_initcall(via_pmu_start); 508 509 /* 510 * This has to be done after pci_init, which is a subsys_initcall. 511 */ 512 static int __init via_pmu_dev_init(void) 513 { 514 if (pmu_state == uninitialized) 515 return -ENODEV; 516 517 #ifdef CONFIG_PMAC_BACKLIGHT 518 /* Initialize backlight */ 519 pmu_backlight_init(); 520 #endif 521 522 #ifdef CONFIG_PPC32 523 if (of_machine_is_compatible("AAPL,3400/2400") || 524 of_machine_is_compatible("AAPL,3500")) { 525 int mb = pmac_call_feature(PMAC_FTR_GET_MB_INFO, 526 NULL, PMAC_MB_INFO_MODEL, 0); 527 pmu_battery_count = 1; 528 if (mb == PMAC_TYPE_COMET) 529 pmu_batteries[0].flags |= PMU_BATT_TYPE_COMET; 530 else 531 pmu_batteries[0].flags |= PMU_BATT_TYPE_HOOPER; 532 } else if (of_machine_is_compatible("AAPL,PowerBook1998") || 533 of_machine_is_compatible("PowerBook1,1")) { 534 pmu_battery_count = 2; 535 pmu_batteries[0].flags |= PMU_BATT_TYPE_SMART; 536 pmu_batteries[1].flags |= PMU_BATT_TYPE_SMART; 537 } else { 538 struct device_node* prim = 539 of_find_node_by_name(NULL, "power-mgt"); 540 const u32 *prim_info = NULL; 541 if (prim) 542 prim_info = of_get_property(prim, "prim-info", NULL); 543 if (prim_info) { 544 /* Other stuffs here yet unknown */ 545 pmu_battery_count = (prim_info[6] >> 16) & 0xff; 546 pmu_batteries[0].flags |= PMU_BATT_TYPE_SMART; 547 if (pmu_battery_count > 1) 548 pmu_batteries[1].flags |= PMU_BATT_TYPE_SMART; 549 } 550 of_node_put(prim); 551 } 552 #endif /* CONFIG_PPC32 */ 553 554 /* Create /proc/pmu */ 555 proc_pmu_root = proc_mkdir("pmu", NULL); 556 if (proc_pmu_root) { 557 long i; 558 559 for (i=0; i<pmu_battery_count; i++) { 560 char title[16]; 561 sprintf(title, "battery_%ld", i); 562 proc_pmu_batt[i] = proc_create_single_data(title, 0, 563 proc_pmu_root, pmu_battery_proc_show, 564 (void *)i); 565 } 566 567 proc_pmu_info = proc_create_single("info", 0, proc_pmu_root, 568 pmu_info_proc_show); 569 proc_pmu_irqstats = proc_create_single("interrupts", 0, 570 proc_pmu_root, pmu_irqstats_proc_show); 571 proc_pmu_options = proc_create("options", 0600, proc_pmu_root, 572 &pmu_options_proc_ops); 573 } 574 return 0; 575 } 576 577 device_initcall(via_pmu_dev_init); 578 579 static int 580 init_pmu(void) 581 { 582 int timeout; 583 struct adb_request req; 584 585 /* Negate TREQ. Set TACK to input and TREQ to output. */ 586 out_8(&via2[B], in_8(&via2[B]) | TREQ); 587 out_8(&via2[DIRB], (in_8(&via2[DIRB]) | TREQ) & ~TACK); 588 589 pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, pmu_intr_mask); 590 timeout = 100000; 591 while (!req.complete) { 592 if (--timeout < 0) { 593 printk(KERN_ERR "init_pmu: no response from PMU\n"); 594 return 0; 595 } 596 udelay(10); 597 pmu_poll(); 598 } 599 600 /* ack all pending interrupts */ 601 timeout = 100000; 602 interrupt_data[0][0] = 1; 603 while (interrupt_data[0][0] || pmu_state != idle) { 604 if (--timeout < 0) { 605 printk(KERN_ERR "init_pmu: timed out acking intrs\n"); 606 return 0; 607 } 608 if (pmu_state == idle) 609 adb_int_pending = 1; 610 via_pmu_interrupt(0, NULL); 611 udelay(10); 612 } 613 614 /* Tell PMU we are ready. */ 615 if (pmu_kind == PMU_KEYLARGO_BASED) { 616 pmu_request(&req, NULL, 2, PMU_SYSTEM_READY, 2); 617 while (!req.complete) 618 pmu_poll(); 619 } 620 621 /* Read PMU version */ 622 pmu_request(&req, NULL, 1, PMU_GET_VERSION); 623 pmu_wait_complete(&req); 624 if (req.reply_len > 0) 625 pmu_version = req.reply[0]; 626 627 /* Read server mode setting */ 628 if (pmu_kind == PMU_KEYLARGO_BASED) { 629 pmu_request(&req, NULL, 2, PMU_POWER_EVENTS, 630 PMU_PWR_GET_POWERUP_EVENTS); 631 pmu_wait_complete(&req); 632 if (req.reply_len == 2) { 633 if (req.reply[1] & PMU_PWR_WAKEUP_AC_INSERT) 634 option_server_mode = 1; 635 printk(KERN_INFO "via-pmu: Server Mode is %s\n", 636 option_server_mode ? "enabled" : "disabled"); 637 } 638 } 639 640 printk(KERN_INFO "PMU driver v%d initialized for %s, firmware: %02x\n", 641 PMU_DRIVER_VERSION, pbook_type[pmu_kind], pmu_version); 642 643 return 1; 644 } 645 646 int 647 pmu_get_model(void) 648 { 649 return pmu_kind; 650 } 651 652 static void pmu_set_server_mode(int server_mode) 653 { 654 struct adb_request req; 655 656 if (pmu_kind != PMU_KEYLARGO_BASED) 657 return; 658 659 option_server_mode = server_mode; 660 pmu_request(&req, NULL, 2, PMU_POWER_EVENTS, PMU_PWR_GET_POWERUP_EVENTS); 661 pmu_wait_complete(&req); 662 if (req.reply_len < 2) 663 return; 664 if (server_mode) 665 pmu_request(&req, NULL, 4, PMU_POWER_EVENTS, 666 PMU_PWR_SET_POWERUP_EVENTS, 667 req.reply[0], PMU_PWR_WAKEUP_AC_INSERT); 668 else 669 pmu_request(&req, NULL, 4, PMU_POWER_EVENTS, 670 PMU_PWR_CLR_POWERUP_EVENTS, 671 req.reply[0], PMU_PWR_WAKEUP_AC_INSERT); 672 pmu_wait_complete(&req); 673 } 674 675 /* This new version of the code for 2400/3400/3500 powerbooks 676 * is inspired from the implementation in gkrellm-pmu 677 */ 678 static void 679 done_battery_state_ohare(struct adb_request* req) 680 { 681 #ifdef CONFIG_PPC_PMAC 682 /* format: 683 * [0] : flags 684 * 0x01 : AC indicator 685 * 0x02 : charging 686 * 0x04 : battery exist 687 * 0x08 : 688 * 0x10 : 689 * 0x20 : full charged 690 * 0x40 : pcharge reset 691 * 0x80 : battery exist 692 * 693 * [1][2] : battery voltage 694 * [3] : CPU temperature 695 * [4] : battery temperature 696 * [5] : current 697 * [6][7] : pcharge 698 * --tkoba 699 */ 700 unsigned int bat_flags = PMU_BATT_TYPE_HOOPER; 701 long pcharge, charge, vb, vmax, lmax; 702 long vmax_charging, vmax_charged; 703 long amperage, voltage, time, max; 704 int mb = pmac_call_feature(PMAC_FTR_GET_MB_INFO, 705 NULL, PMAC_MB_INFO_MODEL, 0); 706 707 if (req->reply[0] & 0x01) 708 pmu_power_flags |= PMU_PWR_AC_PRESENT; 709 else 710 pmu_power_flags &= ~PMU_PWR_AC_PRESENT; 711 712 if (mb == PMAC_TYPE_COMET) { 713 vmax_charged = 189; 714 vmax_charging = 213; 715 lmax = 6500; 716 } else { 717 vmax_charged = 330; 718 vmax_charging = 330; 719 lmax = 6500; 720 } 721 vmax = vmax_charged; 722 723 /* If battery installed */ 724 if (req->reply[0] & 0x04) { 725 bat_flags |= PMU_BATT_PRESENT; 726 if (req->reply[0] & 0x02) 727 bat_flags |= PMU_BATT_CHARGING; 728 vb = (req->reply[1] << 8) | req->reply[2]; 729 voltage = (vb * 265 + 72665) / 10; 730 amperage = req->reply[5]; 731 if ((req->reply[0] & 0x01) == 0) { 732 if (amperage > 200) 733 vb += ((amperage - 200) * 15)/100; 734 } else if (req->reply[0] & 0x02) { 735 vb = (vb * 97) / 100; 736 vmax = vmax_charging; 737 } 738 charge = (100 * vb) / vmax; 739 if (req->reply[0] & 0x40) { 740 pcharge = (req->reply[6] << 8) + req->reply[7]; 741 if (pcharge > lmax) 742 pcharge = lmax; 743 pcharge *= 100; 744 pcharge = 100 - pcharge / lmax; 745 if (pcharge < charge) 746 charge = pcharge; 747 } 748 if (amperage > 0) 749 time = (charge * 16440) / amperage; 750 else 751 time = 0; 752 max = 100; 753 amperage = -amperage; 754 } else 755 charge = max = amperage = voltage = time = 0; 756 757 pmu_batteries[pmu_cur_battery].flags = bat_flags; 758 pmu_batteries[pmu_cur_battery].charge = charge; 759 pmu_batteries[pmu_cur_battery].max_charge = max; 760 pmu_batteries[pmu_cur_battery].amperage = amperage; 761 pmu_batteries[pmu_cur_battery].voltage = voltage; 762 pmu_batteries[pmu_cur_battery].time_remaining = time; 763 #endif /* CONFIG_PPC_PMAC */ 764 765 clear_bit(0, &async_req_locks); 766 } 767 768 static void 769 done_battery_state_smart(struct adb_request* req) 770 { 771 /* format: 772 * [0] : format of this structure (known: 3,4,5) 773 * [1] : flags 774 * 775 * format 3 & 4: 776 * 777 * [2] : charge 778 * [3] : max charge 779 * [4] : current 780 * [5] : voltage 781 * 782 * format 5: 783 * 784 * [2][3] : charge 785 * [4][5] : max charge 786 * [6][7] : current 787 * [8][9] : voltage 788 */ 789 790 unsigned int bat_flags = PMU_BATT_TYPE_SMART; 791 int amperage; 792 unsigned int capa, max, voltage; 793 794 if (req->reply[1] & 0x01) 795 pmu_power_flags |= PMU_PWR_AC_PRESENT; 796 else 797 pmu_power_flags &= ~PMU_PWR_AC_PRESENT; 798 799 800 capa = max = amperage = voltage = 0; 801 802 if (req->reply[1] & 0x04) { 803 bat_flags |= PMU_BATT_PRESENT; 804 switch(req->reply[0]) { 805 case 3: 806 case 4: capa = req->reply[2]; 807 max = req->reply[3]; 808 amperage = *((signed char *)&req->reply[4]); 809 voltage = req->reply[5]; 810 break; 811 case 5: capa = (req->reply[2] << 8) | req->reply[3]; 812 max = (req->reply[4] << 8) | req->reply[5]; 813 amperage = *((signed short *)&req->reply[6]); 814 voltage = (req->reply[8] << 8) | req->reply[9]; 815 break; 816 default: 817 pr_warn("pmu.c: unrecognized battery info, " 818 "len: %d, %4ph\n", req->reply_len, 819 req->reply); 820 break; 821 } 822 } 823 824 if ((req->reply[1] & 0x01) && (amperage > 0)) 825 bat_flags |= PMU_BATT_CHARGING; 826 827 pmu_batteries[pmu_cur_battery].flags = bat_flags; 828 pmu_batteries[pmu_cur_battery].charge = capa; 829 pmu_batteries[pmu_cur_battery].max_charge = max; 830 pmu_batteries[pmu_cur_battery].amperage = amperage; 831 pmu_batteries[pmu_cur_battery].voltage = voltage; 832 if (amperage) { 833 if ((req->reply[1] & 0x01) && (amperage > 0)) 834 pmu_batteries[pmu_cur_battery].time_remaining 835 = ((max-capa) * 3600) / amperage; 836 else 837 pmu_batteries[pmu_cur_battery].time_remaining 838 = (capa * 3600) / (-amperage); 839 } else 840 pmu_batteries[pmu_cur_battery].time_remaining = 0; 841 842 pmu_cur_battery = (pmu_cur_battery + 1) % pmu_battery_count; 843 844 clear_bit(0, &async_req_locks); 845 } 846 847 static void 848 query_battery_state(void) 849 { 850 if (test_and_set_bit(0, &async_req_locks)) 851 return; 852 if (pmu_kind == PMU_OHARE_BASED) 853 pmu_request(&batt_req, done_battery_state_ohare, 854 1, PMU_BATTERY_STATE); 855 else 856 pmu_request(&batt_req, done_battery_state_smart, 857 2, PMU_SMART_BATTERY_STATE, pmu_cur_battery+1); 858 } 859 860 static int pmu_info_proc_show(struct seq_file *m, void *v) 861 { 862 seq_printf(m, "PMU driver version : %d\n", PMU_DRIVER_VERSION); 863 seq_printf(m, "PMU firmware version : %02x\n", pmu_version); 864 seq_printf(m, "AC Power : %d\n", 865 ((pmu_power_flags & PMU_PWR_AC_PRESENT) != 0) || pmu_battery_count == 0); 866 seq_printf(m, "Battery count : %d\n", pmu_battery_count); 867 868 return 0; 869 } 870 871 static int pmu_irqstats_proc_show(struct seq_file *m, void *v) 872 { 873 int i; 874 static const char *irq_names[NUM_IRQ_STATS] = { 875 "Unknown interrupt (type 0)", 876 "Unknown interrupt (type 1)", 877 "PC-Card eject button", 878 "Sound/Brightness button", 879 "ADB message", 880 "Battery state change", 881 "Environment interrupt", 882 "Tick timer", 883 "Ghost interrupt (zero len)", 884 "Empty interrupt (empty mask)", 885 "Max irqs in a row", 886 "Total CB1 triggered events", 887 "Total GPIO1 triggered events", 888 }; 889 890 for (i = 0; i < NUM_IRQ_STATS; i++) { 891 seq_printf(m, " %2u: %10u (%s)\n", 892 i, pmu_irq_stats[i], irq_names[i]); 893 } 894 return 0; 895 } 896 897 static int pmu_battery_proc_show(struct seq_file *m, void *v) 898 { 899 long batnum = (long)m->private; 900 901 seq_putc(m, '\n'); 902 seq_printf(m, "flags : %08x\n", pmu_batteries[batnum].flags); 903 seq_printf(m, "charge : %d\n", pmu_batteries[batnum].charge); 904 seq_printf(m, "max_charge : %d\n", pmu_batteries[batnum].max_charge); 905 seq_printf(m, "current : %d\n", pmu_batteries[batnum].amperage); 906 seq_printf(m, "voltage : %d\n", pmu_batteries[batnum].voltage); 907 seq_printf(m, "time rem. : %d\n", pmu_batteries[batnum].time_remaining); 908 return 0; 909 } 910 911 static int pmu_options_proc_show(struct seq_file *m, void *v) 912 { 913 #if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32) 914 if (pmu_kind == PMU_KEYLARGO_BASED && 915 pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,0,-1) >= 0) 916 seq_printf(m, "lid_wakeup=%d\n", option_lid_wakeup); 917 #endif 918 if (pmu_kind == PMU_KEYLARGO_BASED) 919 seq_printf(m, "server_mode=%d\n", option_server_mode); 920 921 return 0; 922 } 923 924 static int pmu_options_proc_open(struct inode *inode, struct file *file) 925 { 926 return single_open(file, pmu_options_proc_show, NULL); 927 } 928 929 static ssize_t pmu_options_proc_write(struct file *file, 930 const char __user *buffer, size_t count, loff_t *pos) 931 { 932 char tmp[33]; 933 char *label, *val; 934 size_t fcount = count; 935 936 if (!count) 937 return -EINVAL; 938 if (count > 32) 939 count = 32; 940 if (copy_from_user(tmp, buffer, count)) 941 return -EFAULT; 942 tmp[count] = 0; 943 944 label = tmp; 945 while(*label == ' ') 946 label++; 947 val = label; 948 while(*val && (*val != '=')) { 949 if (*val == ' ') 950 *val = 0; 951 val++; 952 } 953 if ((*val) == 0) 954 return -EINVAL; 955 *(val++) = 0; 956 while(*val == ' ') 957 val++; 958 #if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32) 959 if (pmu_kind == PMU_KEYLARGO_BASED && 960 pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,0,-1) >= 0) 961 if (!strcmp(label, "lid_wakeup")) 962 option_lid_wakeup = ((*val) == '1'); 963 #endif 964 if (pmu_kind == PMU_KEYLARGO_BASED && !strcmp(label, "server_mode")) { 965 int new_value; 966 new_value = ((*val) == '1'); 967 if (new_value != option_server_mode) 968 pmu_set_server_mode(new_value); 969 } 970 return fcount; 971 } 972 973 static const struct proc_ops pmu_options_proc_ops = { 974 .proc_open = pmu_options_proc_open, 975 .proc_read = seq_read, 976 .proc_lseek = seq_lseek, 977 .proc_release = single_release, 978 .proc_write = pmu_options_proc_write, 979 }; 980 981 #ifdef CONFIG_ADB 982 /* Send an ADB command */ 983 static int pmu_send_request(struct adb_request *req, int sync) 984 { 985 int i, ret; 986 987 if (pmu_state == uninitialized || !pmu_fully_inited) { 988 req->complete = 1; 989 return -ENXIO; 990 } 991 992 ret = -EINVAL; 993 994 switch (req->data[0]) { 995 case PMU_PACKET: 996 for (i = 0; i < req->nbytes - 1; ++i) 997 req->data[i] = req->data[i+1]; 998 --req->nbytes; 999 if (pmu_data_len[req->data[0]][1] != 0) { 1000 req->reply[0] = ADB_RET_OK; 1001 req->reply_len = 1; 1002 } else 1003 req->reply_len = 0; 1004 ret = pmu_queue_request(req); 1005 break; 1006 case CUDA_PACKET: 1007 switch (req->data[1]) { 1008 case CUDA_GET_TIME: 1009 if (req->nbytes != 2) 1010 break; 1011 req->data[0] = PMU_READ_RTC; 1012 req->nbytes = 1; 1013 req->reply_len = 3; 1014 req->reply[0] = CUDA_PACKET; 1015 req->reply[1] = 0; 1016 req->reply[2] = CUDA_GET_TIME; 1017 ret = pmu_queue_request(req); 1018 break; 1019 case CUDA_SET_TIME: 1020 if (req->nbytes != 6) 1021 break; 1022 req->data[0] = PMU_SET_RTC; 1023 req->nbytes = 5; 1024 for (i = 1; i <= 4; ++i) 1025 req->data[i] = req->data[i+1]; 1026 req->reply_len = 3; 1027 req->reply[0] = CUDA_PACKET; 1028 req->reply[1] = 0; 1029 req->reply[2] = CUDA_SET_TIME; 1030 ret = pmu_queue_request(req); 1031 break; 1032 } 1033 break; 1034 case ADB_PACKET: 1035 if (!pmu_has_adb) 1036 return -ENXIO; 1037 for (i = req->nbytes - 1; i > 1; --i) 1038 req->data[i+2] = req->data[i]; 1039 req->data[3] = req->nbytes - 2; 1040 req->data[2] = pmu_adb_flags; 1041 /*req->data[1] = req->data[1];*/ 1042 req->data[0] = PMU_ADB_CMD; 1043 req->nbytes += 2; 1044 req->reply_expected = 1; 1045 req->reply_len = 0; 1046 ret = pmu_queue_request(req); 1047 break; 1048 } 1049 if (ret) { 1050 req->complete = 1; 1051 return ret; 1052 } 1053 1054 if (sync) 1055 while (!req->complete) 1056 pmu_poll(); 1057 1058 return 0; 1059 } 1060 1061 /* Enable/disable autopolling */ 1062 static int __pmu_adb_autopoll(int devs) 1063 { 1064 struct adb_request req; 1065 1066 if (devs) { 1067 pmu_request(&req, NULL, 5, PMU_ADB_CMD, 0, 0x86, 1068 adb_dev_map >> 8, adb_dev_map); 1069 pmu_adb_flags = 2; 1070 } else { 1071 pmu_request(&req, NULL, 1, PMU_ADB_POLL_OFF); 1072 pmu_adb_flags = 0; 1073 } 1074 while (!req.complete) 1075 pmu_poll(); 1076 return 0; 1077 } 1078 1079 static int pmu_adb_autopoll(int devs) 1080 { 1081 if (pmu_state == uninitialized || !pmu_fully_inited || !pmu_has_adb) 1082 return -ENXIO; 1083 1084 adb_dev_map = devs; 1085 return __pmu_adb_autopoll(devs); 1086 } 1087 1088 /* Reset the ADB bus */ 1089 static int pmu_adb_reset_bus(void) 1090 { 1091 struct adb_request req; 1092 int save_autopoll = adb_dev_map; 1093 1094 if (pmu_state == uninitialized || !pmu_fully_inited || !pmu_has_adb) 1095 return -ENXIO; 1096 1097 /* anyone got a better idea?? */ 1098 __pmu_adb_autopoll(0); 1099 1100 req.nbytes = 4; 1101 req.done = NULL; 1102 req.data[0] = PMU_ADB_CMD; 1103 req.data[1] = ADB_BUSRESET; 1104 req.data[2] = 0; 1105 req.data[3] = 0; 1106 req.data[4] = 0; 1107 req.reply_len = 0; 1108 req.reply_expected = 1; 1109 if (pmu_queue_request(&req) != 0) { 1110 printk(KERN_ERR "pmu_adb_reset_bus: pmu_queue_request failed\n"); 1111 return -EIO; 1112 } 1113 pmu_wait_complete(&req); 1114 1115 if (save_autopoll != 0) 1116 __pmu_adb_autopoll(save_autopoll); 1117 1118 return 0; 1119 } 1120 #endif /* CONFIG_ADB */ 1121 1122 /* Construct and send a pmu request */ 1123 int 1124 pmu_request(struct adb_request *req, void (*done)(struct adb_request *), 1125 int nbytes, ...) 1126 { 1127 va_list list; 1128 int i; 1129 1130 if (pmu_state == uninitialized) 1131 return -ENXIO; 1132 1133 if (nbytes < 0 || nbytes > 32) { 1134 printk(KERN_ERR "pmu_request: bad nbytes (%d)\n", nbytes); 1135 req->complete = 1; 1136 return -EINVAL; 1137 } 1138 req->nbytes = nbytes; 1139 req->done = done; 1140 va_start(list, nbytes); 1141 for (i = 0; i < nbytes; ++i) 1142 req->data[i] = va_arg(list, int); 1143 va_end(list); 1144 req->reply_len = 0; 1145 req->reply_expected = 0; 1146 return pmu_queue_request(req); 1147 } 1148 1149 int 1150 pmu_queue_request(struct adb_request *req) 1151 { 1152 unsigned long flags; 1153 int nsend; 1154 1155 if (pmu_state == uninitialized) { 1156 req->complete = 1; 1157 return -ENXIO; 1158 } 1159 if (req->nbytes <= 0) { 1160 req->complete = 1; 1161 return 0; 1162 } 1163 nsend = pmu_data_len[req->data[0]][0]; 1164 if (nsend >= 0 && req->nbytes != nsend + 1) { 1165 req->complete = 1; 1166 return -EINVAL; 1167 } 1168 1169 req->next = NULL; 1170 req->sent = 0; 1171 req->complete = 0; 1172 1173 spin_lock_irqsave(&pmu_lock, flags); 1174 if (current_req) { 1175 last_req->next = req; 1176 last_req = req; 1177 } else { 1178 current_req = req; 1179 last_req = req; 1180 if (pmu_state == idle) 1181 pmu_start(); 1182 } 1183 spin_unlock_irqrestore(&pmu_lock, flags); 1184 1185 return 0; 1186 } 1187 1188 static inline void 1189 wait_for_ack(void) 1190 { 1191 /* Sightly increased the delay, I had one occurrence of the message 1192 * reported 1193 */ 1194 int timeout = 4000; 1195 while ((in_8(&via2[B]) & TACK) == 0) { 1196 if (--timeout < 0) { 1197 printk(KERN_ERR "PMU not responding (!ack)\n"); 1198 return; 1199 } 1200 udelay(10); 1201 } 1202 } 1203 1204 /* New PMU seems to be very sensitive to those timings, so we make sure 1205 * PCI is flushed immediately */ 1206 static inline void 1207 send_byte(int x) 1208 { 1209 out_8(&via1[ACR], in_8(&via1[ACR]) | SR_OUT | SR_EXT); 1210 out_8(&via1[SR], x); 1211 out_8(&via2[B], in_8(&via2[B]) & ~TREQ); /* assert TREQ */ 1212 (void)in_8(&via2[B]); 1213 } 1214 1215 static inline void 1216 recv_byte(void) 1217 { 1218 out_8(&via1[ACR], (in_8(&via1[ACR]) & ~SR_OUT) | SR_EXT); 1219 in_8(&via1[SR]); /* resets SR */ 1220 out_8(&via2[B], in_8(&via2[B]) & ~TREQ); 1221 (void)in_8(&via2[B]); 1222 } 1223 1224 static inline void 1225 pmu_done(struct adb_request *req) 1226 { 1227 void (*done)(struct adb_request *) = req->done; 1228 mb(); 1229 req->complete = 1; 1230 /* Here, we assume that if the request has a done member, the 1231 * struct request will survive to setting req->complete to 1 1232 */ 1233 if (done) 1234 (*done)(req); 1235 } 1236 1237 static void 1238 pmu_start(void) 1239 { 1240 struct adb_request *req; 1241 1242 /* assert pmu_state == idle */ 1243 /* get the packet to send */ 1244 req = current_req; 1245 if (!req || pmu_state != idle 1246 || (/*req->reply_expected && */req_awaiting_reply)) 1247 return; 1248 1249 pmu_state = sending; 1250 data_index = 1; 1251 data_len = pmu_data_len[req->data[0]][0]; 1252 1253 /* Sounds safer to make sure ACK is high before writing. This helped 1254 * kill a problem with ADB and some iBooks 1255 */ 1256 wait_for_ack(); 1257 /* set the shift register to shift out and send a byte */ 1258 send_byte(req->data[0]); 1259 } 1260 1261 void 1262 pmu_poll(void) 1263 { 1264 if (pmu_state == uninitialized) 1265 return; 1266 if (disable_poll) 1267 return; 1268 via_pmu_interrupt(0, NULL); 1269 } 1270 1271 void 1272 pmu_poll_adb(void) 1273 { 1274 if (pmu_state == uninitialized) 1275 return; 1276 if (disable_poll) 1277 return; 1278 /* Kicks ADB read when PMU is suspended */ 1279 adb_int_pending = 1; 1280 do { 1281 via_pmu_interrupt(0, NULL); 1282 } while (pmu_suspended && (adb_int_pending || pmu_state != idle 1283 || req_awaiting_reply)); 1284 } 1285 1286 void 1287 pmu_wait_complete(struct adb_request *req) 1288 { 1289 if (pmu_state == uninitialized) 1290 return; 1291 while((pmu_state != idle && pmu_state != locked) || !req->complete) 1292 via_pmu_interrupt(0, NULL); 1293 } 1294 1295 /* This function loops until the PMU is idle and prevents it from 1296 * anwsering to ADB interrupts. pmu_request can still be called. 1297 * This is done to avoid spurrious shutdowns when we know we'll have 1298 * interrupts switched off for a long time 1299 */ 1300 void 1301 pmu_suspend(void) 1302 { 1303 unsigned long flags; 1304 1305 if (pmu_state == uninitialized) 1306 return; 1307 1308 spin_lock_irqsave(&pmu_lock, flags); 1309 pmu_suspended++; 1310 if (pmu_suspended > 1) { 1311 spin_unlock_irqrestore(&pmu_lock, flags); 1312 return; 1313 } 1314 1315 do { 1316 spin_unlock_irqrestore(&pmu_lock, flags); 1317 if (req_awaiting_reply) 1318 adb_int_pending = 1; 1319 via_pmu_interrupt(0, NULL); 1320 spin_lock_irqsave(&pmu_lock, flags); 1321 if (!adb_int_pending && pmu_state == idle && !req_awaiting_reply) { 1322 if (gpio_irq >= 0) 1323 disable_irq_nosync(gpio_irq); 1324 out_8(&via1[IER], CB1_INT | IER_CLR); 1325 spin_unlock_irqrestore(&pmu_lock, flags); 1326 break; 1327 } 1328 } while (1); 1329 } 1330 1331 void 1332 pmu_resume(void) 1333 { 1334 unsigned long flags; 1335 1336 if (pmu_state == uninitialized || pmu_suspended < 1) 1337 return; 1338 1339 spin_lock_irqsave(&pmu_lock, flags); 1340 pmu_suspended--; 1341 if (pmu_suspended > 0) { 1342 spin_unlock_irqrestore(&pmu_lock, flags); 1343 return; 1344 } 1345 adb_int_pending = 1; 1346 if (gpio_irq >= 0) 1347 enable_irq(gpio_irq); 1348 out_8(&via1[IER], CB1_INT | IER_SET); 1349 spin_unlock_irqrestore(&pmu_lock, flags); 1350 pmu_poll(); 1351 } 1352 1353 /* Interrupt data could be the result data from an ADB cmd */ 1354 static void 1355 pmu_handle_data(unsigned char *data, int len) 1356 { 1357 unsigned char ints; 1358 int idx; 1359 int i = 0; 1360 1361 asleep = 0; 1362 if (drop_interrupts || len < 1) { 1363 adb_int_pending = 0; 1364 pmu_irq_stats[8]++; 1365 return; 1366 } 1367 1368 /* Get PMU interrupt mask */ 1369 ints = data[0]; 1370 1371 /* Record zero interrupts for stats */ 1372 if (ints == 0) 1373 pmu_irq_stats[9]++; 1374 1375 /* Hack to deal with ADB autopoll flag */ 1376 if (ints & PMU_INT_ADB) 1377 ints &= ~(PMU_INT_ADB_AUTO | PMU_INT_AUTO_SRQ_POLL); 1378 1379 next: 1380 if (ints == 0) { 1381 if (i > pmu_irq_stats[10]) 1382 pmu_irq_stats[10] = i; 1383 return; 1384 } 1385 i++; 1386 1387 idx = ffs(ints) - 1; 1388 ints &= ~BIT(idx); 1389 1390 pmu_irq_stats[idx]++; 1391 1392 /* Note: for some reason, we get an interrupt with len=1, 1393 * data[0]==0 after each normal ADB interrupt, at least 1394 * on the Pismo. Still investigating... --BenH 1395 */ 1396 switch (BIT(idx)) { 1397 case PMU_INT_ADB: 1398 if ((data[0] & PMU_INT_ADB_AUTO) == 0) { 1399 struct adb_request *req = req_awaiting_reply; 1400 if (!req) { 1401 printk(KERN_ERR "PMU: extra ADB reply\n"); 1402 return; 1403 } 1404 req_awaiting_reply = NULL; 1405 if (len <= 2) 1406 req->reply_len = 0; 1407 else { 1408 memcpy(req->reply, data + 1, len - 1); 1409 req->reply_len = len - 1; 1410 } 1411 pmu_done(req); 1412 } else { 1413 #ifdef CONFIG_XMON 1414 if (len == 4 && data[1] == 0x2c) { 1415 extern int xmon_wants_key, xmon_adb_keycode; 1416 if (xmon_wants_key) { 1417 xmon_adb_keycode = data[2]; 1418 return; 1419 } 1420 } 1421 #endif /* CONFIG_XMON */ 1422 #ifdef CONFIG_ADB 1423 /* 1424 * XXX On the [23]400 the PMU gives us an up 1425 * event for keycodes 0x74 or 0x75 when the PC 1426 * card eject buttons are released, so we 1427 * ignore those events. 1428 */ 1429 if (!(pmu_kind == PMU_OHARE_BASED && len == 4 1430 && data[1] == 0x2c && data[3] == 0xff 1431 && (data[2] & ~1) == 0xf4)) 1432 adb_input(data+1, len-1, 1); 1433 #endif /* CONFIG_ADB */ 1434 } 1435 break; 1436 1437 /* Sound/brightness button pressed */ 1438 case PMU_INT_SNDBRT: 1439 #ifdef CONFIG_PMAC_BACKLIGHT 1440 if (len == 3) 1441 pmac_backlight_set_legacy_brightness_pmu(data[1] >> 4); 1442 #endif 1443 break; 1444 1445 /* Tick interrupt */ 1446 case PMU_INT_TICK: 1447 /* Environment or tick interrupt, query batteries */ 1448 if (pmu_battery_count) { 1449 if ((--query_batt_timer) == 0) { 1450 query_battery_state(); 1451 query_batt_timer = BATTERY_POLLING_COUNT; 1452 } 1453 } 1454 break; 1455 1456 case PMU_INT_ENVIRONMENT: 1457 if (pmu_battery_count) 1458 query_battery_state(); 1459 pmu_pass_intr(data, len); 1460 /* len == 6 is probably a bad check. But how do I 1461 * know what PMU versions send what events here? */ 1462 if (len == 6) { 1463 via_pmu_event(PMU_EVT_POWER, !!(data[1]&8)); 1464 via_pmu_event(PMU_EVT_LID, data[1]&1); 1465 } 1466 break; 1467 1468 default: 1469 pmu_pass_intr(data, len); 1470 } 1471 goto next; 1472 } 1473 1474 static struct adb_request* 1475 pmu_sr_intr(void) 1476 { 1477 struct adb_request *req; 1478 int bite = 0; 1479 1480 if (in_8(&via2[B]) & TREQ) { 1481 printk(KERN_ERR "PMU: spurious SR intr (%x)\n", in_8(&via2[B])); 1482 return NULL; 1483 } 1484 /* The ack may not yet be low when we get the interrupt */ 1485 while ((in_8(&via2[B]) & TACK) != 0) 1486 ; 1487 1488 /* if reading grab the byte, and reset the interrupt */ 1489 if (pmu_state == reading || pmu_state == reading_intr) 1490 bite = in_8(&via1[SR]); 1491 1492 /* reset TREQ and wait for TACK to go high */ 1493 out_8(&via2[B], in_8(&via2[B]) | TREQ); 1494 wait_for_ack(); 1495 1496 switch (pmu_state) { 1497 case sending: 1498 req = current_req; 1499 if (data_len < 0) { 1500 data_len = req->nbytes - 1; 1501 send_byte(data_len); 1502 break; 1503 } 1504 if (data_index <= data_len) { 1505 send_byte(req->data[data_index++]); 1506 break; 1507 } 1508 req->sent = 1; 1509 data_len = pmu_data_len[req->data[0]][1]; 1510 if (data_len == 0) { 1511 pmu_state = idle; 1512 current_req = req->next; 1513 if (req->reply_expected) 1514 req_awaiting_reply = req; 1515 else 1516 return req; 1517 } else { 1518 pmu_state = reading; 1519 data_index = 0; 1520 reply_ptr = req->reply + req->reply_len; 1521 recv_byte(); 1522 } 1523 break; 1524 1525 case intack: 1526 data_index = 0; 1527 data_len = -1; 1528 pmu_state = reading_intr; 1529 reply_ptr = interrupt_data[int_data_last]; 1530 recv_byte(); 1531 if (gpio_irq >= 0 && !gpio_irq_enabled) { 1532 enable_irq(gpio_irq); 1533 gpio_irq_enabled = 1; 1534 } 1535 break; 1536 1537 case reading: 1538 case reading_intr: 1539 if (data_len == -1) { 1540 data_len = bite; 1541 if (bite > 32) 1542 printk(KERN_ERR "PMU: bad reply len %d\n", bite); 1543 } else if (data_index < 32) { 1544 reply_ptr[data_index++] = bite; 1545 } 1546 if (data_index < data_len) { 1547 recv_byte(); 1548 break; 1549 } 1550 1551 if (pmu_state == reading_intr) { 1552 pmu_state = idle; 1553 int_data_state[int_data_last] = int_data_ready; 1554 interrupt_data_len[int_data_last] = data_len; 1555 } else { 1556 req = current_req; 1557 /* 1558 * For PMU sleep and freq change requests, we lock the 1559 * PMU until it's explicitly unlocked. This avoids any 1560 * spurrious event polling getting in 1561 */ 1562 current_req = req->next; 1563 req->reply_len += data_index; 1564 if (req->data[0] == PMU_SLEEP || req->data[0] == PMU_CPU_SPEED) 1565 pmu_state = locked; 1566 else 1567 pmu_state = idle; 1568 return req; 1569 } 1570 break; 1571 1572 default: 1573 printk(KERN_ERR "via_pmu_interrupt: unknown state %d?\n", 1574 pmu_state); 1575 } 1576 return NULL; 1577 } 1578 1579 static irqreturn_t 1580 via_pmu_interrupt(int irq, void *arg) 1581 { 1582 unsigned long flags; 1583 int intr; 1584 int nloop = 0; 1585 int int_data = -1; 1586 struct adb_request *req = NULL; 1587 int handled = 0; 1588 1589 /* This is a bit brutal, we can probably do better */ 1590 spin_lock_irqsave(&pmu_lock, flags); 1591 ++disable_poll; 1592 1593 for (;;) { 1594 /* On 68k Macs, VIA interrupts are dispatched individually. 1595 * Unless we are polling, the relevant IRQ flag has already 1596 * been cleared. 1597 */ 1598 intr = 0; 1599 if (IS_ENABLED(CONFIG_PPC_PMAC) || !irq) { 1600 intr = in_8(&via1[IFR]) & (SR_INT | CB1_INT); 1601 out_8(&via1[IFR], intr); 1602 } 1603 #ifndef CONFIG_PPC_PMAC 1604 switch (irq) { 1605 case IRQ_MAC_ADB_CL: 1606 intr = CB1_INT; 1607 break; 1608 case IRQ_MAC_ADB_SR: 1609 intr = SR_INT; 1610 break; 1611 } 1612 #endif 1613 if (intr == 0) 1614 break; 1615 handled = 1; 1616 if (++nloop > 1000) { 1617 printk(KERN_DEBUG "PMU: stuck in intr loop, " 1618 "intr=%x, ier=%x pmu_state=%d\n", 1619 intr, in_8(&via1[IER]), pmu_state); 1620 break; 1621 } 1622 if (intr & CB1_INT) { 1623 adb_int_pending = 1; 1624 pmu_irq_stats[11]++; 1625 } 1626 if (intr & SR_INT) { 1627 req = pmu_sr_intr(); 1628 if (req) 1629 break; 1630 } 1631 #ifndef CONFIG_PPC_PMAC 1632 break; 1633 #endif 1634 } 1635 1636 recheck: 1637 if (pmu_state == idle) { 1638 if (adb_int_pending) { 1639 if (int_data_state[0] == int_data_empty) 1640 int_data_last = 0; 1641 else if (int_data_state[1] == int_data_empty) 1642 int_data_last = 1; 1643 else 1644 goto no_free_slot; 1645 pmu_state = intack; 1646 int_data_state[int_data_last] = int_data_fill; 1647 /* Sounds safer to make sure ACK is high before writing. 1648 * This helped kill a problem with ADB and some iBooks 1649 */ 1650 wait_for_ack(); 1651 send_byte(PMU_INT_ACK); 1652 adb_int_pending = 0; 1653 } else if (current_req) 1654 pmu_start(); 1655 } 1656 no_free_slot: 1657 /* Mark the oldest buffer for flushing */ 1658 if (int_data_state[!int_data_last] == int_data_ready) { 1659 int_data_state[!int_data_last] = int_data_flush; 1660 int_data = !int_data_last; 1661 } else if (int_data_state[int_data_last] == int_data_ready) { 1662 int_data_state[int_data_last] = int_data_flush; 1663 int_data = int_data_last; 1664 } 1665 --disable_poll; 1666 spin_unlock_irqrestore(&pmu_lock, flags); 1667 1668 /* Deal with completed PMU requests outside of the lock */ 1669 if (req) { 1670 pmu_done(req); 1671 req = NULL; 1672 } 1673 1674 /* Deal with interrupt datas outside of the lock */ 1675 if (int_data >= 0) { 1676 pmu_handle_data(interrupt_data[int_data], interrupt_data_len[int_data]); 1677 spin_lock_irqsave(&pmu_lock, flags); 1678 ++disable_poll; 1679 int_data_state[int_data] = int_data_empty; 1680 int_data = -1; 1681 goto recheck; 1682 } 1683 1684 return IRQ_RETVAL(handled); 1685 } 1686 1687 void 1688 pmu_unlock(void) 1689 { 1690 unsigned long flags; 1691 1692 spin_lock_irqsave(&pmu_lock, flags); 1693 if (pmu_state == locked) 1694 pmu_state = idle; 1695 adb_int_pending = 1; 1696 spin_unlock_irqrestore(&pmu_lock, flags); 1697 } 1698 1699 1700 static __maybe_unused irqreturn_t 1701 gpio1_interrupt(int irq, void *arg) 1702 { 1703 unsigned long flags; 1704 1705 if ((in_8(gpio_reg + 0x9) & 0x02) == 0) { 1706 spin_lock_irqsave(&pmu_lock, flags); 1707 if (gpio_irq_enabled > 0) { 1708 disable_irq_nosync(gpio_irq); 1709 gpio_irq_enabled = 0; 1710 } 1711 pmu_irq_stats[12]++; 1712 adb_int_pending = 1; 1713 spin_unlock_irqrestore(&pmu_lock, flags); 1714 via_pmu_interrupt(0, NULL); 1715 return IRQ_HANDLED; 1716 } 1717 return IRQ_NONE; 1718 } 1719 1720 void 1721 pmu_enable_irled(int on) 1722 { 1723 struct adb_request req; 1724 1725 if (pmu_state == uninitialized) 1726 return ; 1727 if (pmu_kind == PMU_KEYLARGO_BASED) 1728 return ; 1729 1730 pmu_request(&req, NULL, 2, PMU_POWER_CTRL, PMU_POW_IRLED | 1731 (on ? PMU_POW_ON : PMU_POW_OFF)); 1732 pmu_wait_complete(&req); 1733 } 1734 1735 /* Offset between Unix time (1970-based) and Mac time (1904-based) */ 1736 #define RTC_OFFSET 2082844800 1737 1738 time64_t pmu_get_time(void) 1739 { 1740 struct adb_request req; 1741 u32 now; 1742 1743 if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0) 1744 return 0; 1745 pmu_wait_complete(&req); 1746 if (req.reply_len != 4) 1747 pr_err("%s: got %d byte reply\n", __func__, req.reply_len); 1748 now = (req.reply[0] << 24) + (req.reply[1] << 16) + 1749 (req.reply[2] << 8) + req.reply[3]; 1750 return (time64_t)now - RTC_OFFSET; 1751 } 1752 1753 int pmu_set_rtc_time(struct rtc_time *tm) 1754 { 1755 u32 now; 1756 struct adb_request req; 1757 1758 now = lower_32_bits(rtc_tm_to_time64(tm) + RTC_OFFSET); 1759 if (pmu_request(&req, NULL, 5, PMU_SET_RTC, 1760 now >> 24, now >> 16, now >> 8, now) < 0) 1761 return -ENXIO; 1762 pmu_wait_complete(&req); 1763 if (req.reply_len != 0) 1764 pr_err("%s: got %d byte reply\n", __func__, req.reply_len); 1765 return 0; 1766 } 1767 1768 void 1769 pmu_restart(void) 1770 { 1771 struct adb_request req; 1772 1773 if (pmu_state == uninitialized) 1774 return; 1775 1776 local_irq_disable(); 1777 1778 drop_interrupts = 1; 1779 1780 if (pmu_kind != PMU_KEYLARGO_BASED) { 1781 pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, PMU_INT_ADB | 1782 PMU_INT_TICK ); 1783 while(!req.complete) 1784 pmu_poll(); 1785 } 1786 1787 pmu_request(&req, NULL, 1, PMU_RESET); 1788 pmu_wait_complete(&req); 1789 for (;;) 1790 ; 1791 } 1792 1793 void 1794 pmu_shutdown(void) 1795 { 1796 struct adb_request req; 1797 1798 if (pmu_state == uninitialized) 1799 return; 1800 1801 local_irq_disable(); 1802 1803 drop_interrupts = 1; 1804 1805 if (pmu_kind != PMU_KEYLARGO_BASED) { 1806 pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, PMU_INT_ADB | 1807 PMU_INT_TICK ); 1808 pmu_wait_complete(&req); 1809 } else { 1810 /* Disable server mode on shutdown or we'll just 1811 * wake up again 1812 */ 1813 pmu_set_server_mode(0); 1814 } 1815 1816 pmu_request(&req, NULL, 5, PMU_SHUTDOWN, 1817 'M', 'A', 'T', 'T'); 1818 pmu_wait_complete(&req); 1819 for (;;) 1820 ; 1821 } 1822 1823 int 1824 pmu_present(void) 1825 { 1826 return pmu_state != uninitialized; 1827 } 1828 1829 #if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32) 1830 /* 1831 * Put the powerbook to sleep. 1832 */ 1833 1834 static u32 save_via[8]; 1835 static int __fake_sleep; 1836 1837 static void 1838 save_via_state(void) 1839 { 1840 save_via[0] = in_8(&via1[ANH]); 1841 save_via[1] = in_8(&via1[DIRA]); 1842 save_via[2] = in_8(&via1[B]); 1843 save_via[3] = in_8(&via1[DIRB]); 1844 save_via[4] = in_8(&via1[PCR]); 1845 save_via[5] = in_8(&via1[ACR]); 1846 save_via[6] = in_8(&via1[T1CL]); 1847 save_via[7] = in_8(&via1[T1CH]); 1848 } 1849 static void 1850 restore_via_state(void) 1851 { 1852 out_8(&via1[ANH], save_via[0]); 1853 out_8(&via1[DIRA], save_via[1]); 1854 out_8(&via1[B], save_via[2]); 1855 out_8(&via1[DIRB], save_via[3]); 1856 out_8(&via1[PCR], save_via[4]); 1857 out_8(&via1[ACR], save_via[5]); 1858 out_8(&via1[T1CL], save_via[6]); 1859 out_8(&via1[T1CH], save_via[7]); 1860 out_8(&via1[IER], IER_CLR | 0x7f); /* disable all intrs */ 1861 out_8(&via1[IFR], 0x7f); /* clear IFR */ 1862 out_8(&via1[IER], IER_SET | SR_INT | CB1_INT); 1863 } 1864 1865 #define GRACKLE_PM (1<<7) 1866 #define GRACKLE_DOZE (1<<5) 1867 #define GRACKLE_NAP (1<<4) 1868 #define GRACKLE_SLEEP (1<<3) 1869 1870 static int powerbook_sleep_grackle(void) 1871 { 1872 unsigned long save_l2cr; 1873 unsigned short pmcr1; 1874 struct adb_request req; 1875 struct pci_dev *grackle; 1876 1877 grackle = pci_get_domain_bus_and_slot(0, 0, 0); 1878 if (!grackle) 1879 return -ENODEV; 1880 1881 /* Turn off various things. Darwin does some retry tests here... */ 1882 pmu_request(&req, NULL, 2, PMU_POWER_CTRL0, PMU_POW0_OFF|PMU_POW0_HARD_DRIVE); 1883 pmu_wait_complete(&req); 1884 pmu_request(&req, NULL, 2, PMU_POWER_CTRL, 1885 PMU_POW_OFF|PMU_POW_BACKLIGHT|PMU_POW_IRLED|PMU_POW_MEDIABAY); 1886 pmu_wait_complete(&req); 1887 1888 /* For 750, save backside cache setting and disable it */ 1889 save_l2cr = _get_L2CR(); /* (returns -1 if not available) */ 1890 1891 if (!__fake_sleep) { 1892 /* Ask the PMU to put us to sleep */ 1893 pmu_request(&req, NULL, 5, PMU_SLEEP, 'M', 'A', 'T', 'T'); 1894 pmu_wait_complete(&req); 1895 } 1896 1897 /* The VIA is supposed not to be restored correctly*/ 1898 save_via_state(); 1899 /* We shut down some HW */ 1900 pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,0,1); 1901 1902 pci_read_config_word(grackle, 0x70, &pmcr1); 1903 /* Apparently, MacOS uses NAP mode for Grackle ??? */ 1904 pmcr1 &= ~(GRACKLE_DOZE|GRACKLE_SLEEP); 1905 pmcr1 |= GRACKLE_PM|GRACKLE_NAP; 1906 pci_write_config_word(grackle, 0x70, pmcr1); 1907 1908 /* Call low-level ASM sleep handler */ 1909 if (__fake_sleep) 1910 mdelay(5000); 1911 else 1912 low_sleep_handler(); 1913 1914 /* We're awake again, stop grackle PM */ 1915 pci_read_config_word(grackle, 0x70, &pmcr1); 1916 pmcr1 &= ~(GRACKLE_PM|GRACKLE_DOZE|GRACKLE_SLEEP|GRACKLE_NAP); 1917 pci_write_config_word(grackle, 0x70, pmcr1); 1918 1919 pci_dev_put(grackle); 1920 1921 /* Make sure the PMU is idle */ 1922 pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,0,0); 1923 restore_via_state(); 1924 1925 /* Restore L2 cache */ 1926 if (save_l2cr != 0xffffffff && (save_l2cr & L2CR_L2E) != 0) 1927 _set_L2CR(save_l2cr); 1928 1929 /* Restore userland MMU context */ 1930 switch_mmu_context(NULL, current->active_mm, NULL); 1931 1932 /* Power things up */ 1933 pmu_unlock(); 1934 pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, pmu_intr_mask); 1935 pmu_wait_complete(&req); 1936 pmu_request(&req, NULL, 2, PMU_POWER_CTRL0, 1937 PMU_POW0_ON|PMU_POW0_HARD_DRIVE); 1938 pmu_wait_complete(&req); 1939 pmu_request(&req, NULL, 2, PMU_POWER_CTRL, 1940 PMU_POW_ON|PMU_POW_BACKLIGHT|PMU_POW_CHARGER|PMU_POW_IRLED|PMU_POW_MEDIABAY); 1941 pmu_wait_complete(&req); 1942 1943 return 0; 1944 } 1945 1946 static int 1947 powerbook_sleep_Core99(void) 1948 { 1949 unsigned long save_l2cr; 1950 unsigned long save_l3cr; 1951 struct adb_request req; 1952 1953 if (pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,0,-1) < 0) { 1954 printk(KERN_ERR "Sleep mode not supported on this machine\n"); 1955 return -ENOSYS; 1956 } 1957 1958 if (num_online_cpus() > 1 || cpu_is_offline(0)) 1959 return -EAGAIN; 1960 1961 /* Stop environment and ADB interrupts */ 1962 pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, 0); 1963 pmu_wait_complete(&req); 1964 1965 /* Tell PMU what events will wake us up */ 1966 pmu_request(&req, NULL, 4, PMU_POWER_EVENTS, PMU_PWR_CLR_WAKEUP_EVENTS, 1967 0xff, 0xff); 1968 pmu_wait_complete(&req); 1969 pmu_request(&req, NULL, 4, PMU_POWER_EVENTS, PMU_PWR_SET_WAKEUP_EVENTS, 1970 0, PMU_PWR_WAKEUP_KEY | 1971 (option_lid_wakeup ? PMU_PWR_WAKEUP_LID_OPEN : 0)); 1972 pmu_wait_complete(&req); 1973 1974 /* Save the state of the L2 and L3 caches */ 1975 save_l3cr = _get_L3CR(); /* (returns -1 if not available) */ 1976 save_l2cr = _get_L2CR(); /* (returns -1 if not available) */ 1977 1978 if (!__fake_sleep) { 1979 /* Ask the PMU to put us to sleep */ 1980 pmu_request(&req, NULL, 5, PMU_SLEEP, 'M', 'A', 'T', 'T'); 1981 pmu_wait_complete(&req); 1982 } 1983 1984 /* The VIA is supposed not to be restored correctly*/ 1985 save_via_state(); 1986 1987 /* Shut down various ASICs. There's a chance that we can no longer 1988 * talk to the PMU after this, so I moved it to _after_ sending the 1989 * sleep command to it. Still need to be checked. 1990 */ 1991 pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, 1); 1992 1993 /* Call low-level ASM sleep handler */ 1994 if (__fake_sleep) 1995 mdelay(5000); 1996 else 1997 low_sleep_handler(); 1998 1999 /* Restore Apple core ASICs state */ 2000 pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, 0); 2001 2002 /* Restore VIA */ 2003 restore_via_state(); 2004 2005 /* tweak LPJ before cpufreq is there */ 2006 loops_per_jiffy *= 2; 2007 2008 /* Restore video */ 2009 pmac_call_early_video_resume(); 2010 2011 /* Restore L2 cache */ 2012 if (save_l2cr != 0xffffffff && (save_l2cr & L2CR_L2E) != 0) 2013 _set_L2CR(save_l2cr); 2014 /* Restore L3 cache */ 2015 if (save_l3cr != 0xffffffff && (save_l3cr & L3CR_L3E) != 0) 2016 _set_L3CR(save_l3cr); 2017 2018 /* Restore userland MMU context */ 2019 switch_mmu_context(NULL, current->active_mm, NULL); 2020 2021 /* Tell PMU we are ready */ 2022 pmu_unlock(); 2023 pmu_request(&req, NULL, 2, PMU_SYSTEM_READY, 2); 2024 pmu_wait_complete(&req); 2025 pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, pmu_intr_mask); 2026 pmu_wait_complete(&req); 2027 2028 /* Restore LPJ, cpufreq will adjust the cpu frequency */ 2029 loops_per_jiffy /= 2; 2030 2031 return 0; 2032 } 2033 2034 #define PB3400_MEM_CTRL 0xf8000000 2035 #define PB3400_MEM_CTRL_SLEEP 0x70 2036 2037 static void __iomem *pb3400_mem_ctrl; 2038 2039 static void powerbook_sleep_init_3400(void) 2040 { 2041 /* map in the memory controller registers */ 2042 pb3400_mem_ctrl = ioremap(PB3400_MEM_CTRL, 0x100); 2043 if (pb3400_mem_ctrl == NULL) 2044 printk(KERN_WARNING "ioremap failed: sleep won't be possible"); 2045 } 2046 2047 static int powerbook_sleep_3400(void) 2048 { 2049 int i, x; 2050 unsigned int hid0; 2051 unsigned long msr; 2052 struct adb_request sleep_req; 2053 unsigned int __iomem *mem_ctrl_sleep; 2054 2055 if (pb3400_mem_ctrl == NULL) 2056 return -ENOMEM; 2057 mem_ctrl_sleep = pb3400_mem_ctrl + PB3400_MEM_CTRL_SLEEP; 2058 2059 /* Set the memory controller to keep the memory refreshed 2060 while we're asleep */ 2061 for (i = 0x403f; i >= 0x4000; --i) { 2062 out_be32(mem_ctrl_sleep, i); 2063 do { 2064 x = (in_be32(mem_ctrl_sleep) >> 16) & 0x3ff; 2065 } while (x == 0); 2066 if (x >= 0x100) 2067 break; 2068 } 2069 2070 /* Ask the PMU to put us to sleep */ 2071 pmu_request(&sleep_req, NULL, 5, PMU_SLEEP, 'M', 'A', 'T', 'T'); 2072 pmu_wait_complete(&sleep_req); 2073 pmu_unlock(); 2074 2075 pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, 1); 2076 2077 asleep = 1; 2078 2079 /* Put the CPU into sleep mode */ 2080 hid0 = mfspr(SPRN_HID0); 2081 hid0 = (hid0 & ~(HID0_NAP | HID0_DOZE)) | HID0_SLEEP; 2082 mtspr(SPRN_HID0, hid0); 2083 local_irq_enable(); 2084 msr = mfmsr() | MSR_POW; 2085 while (asleep) { 2086 mb(); 2087 mtmsr(msr); 2088 isync(); 2089 } 2090 local_irq_disable(); 2091 2092 /* OK, we're awake again, start restoring things */ 2093 out_be32(mem_ctrl_sleep, 0x3f); 2094 pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, 0); 2095 2096 return 0; 2097 } 2098 2099 #endif /* CONFIG_SUSPEND && CONFIG_PPC32 */ 2100 2101 /* 2102 * Support for /dev/pmu device 2103 */ 2104 #define RB_SIZE 0x10 2105 struct pmu_private { 2106 struct list_head list; 2107 int rb_get; 2108 int rb_put; 2109 struct rb_entry { 2110 unsigned short len; 2111 unsigned char data[16]; 2112 } rb_buf[RB_SIZE]; 2113 wait_queue_head_t wait; 2114 spinlock_t lock; 2115 #if defined(CONFIG_INPUT_ADBHID) && defined(CONFIG_PMAC_BACKLIGHT) 2116 int backlight_locker; 2117 #endif 2118 }; 2119 2120 static LIST_HEAD(all_pmu_pvt); 2121 static DEFINE_SPINLOCK(all_pvt_lock); 2122 2123 static void 2124 pmu_pass_intr(unsigned char *data, int len) 2125 { 2126 struct pmu_private *pp; 2127 struct list_head *list; 2128 int i; 2129 unsigned long flags; 2130 2131 if (len > sizeof(pp->rb_buf[0].data)) 2132 len = sizeof(pp->rb_buf[0].data); 2133 spin_lock_irqsave(&all_pvt_lock, flags); 2134 for (list = &all_pmu_pvt; (list = list->next) != &all_pmu_pvt; ) { 2135 pp = list_entry(list, struct pmu_private, list); 2136 spin_lock(&pp->lock); 2137 i = pp->rb_put + 1; 2138 if (i >= RB_SIZE) 2139 i = 0; 2140 if (i != pp->rb_get) { 2141 struct rb_entry *rp = &pp->rb_buf[pp->rb_put]; 2142 rp->len = len; 2143 memcpy(rp->data, data, len); 2144 pp->rb_put = i; 2145 wake_up_interruptible(&pp->wait); 2146 } 2147 spin_unlock(&pp->lock); 2148 } 2149 spin_unlock_irqrestore(&all_pvt_lock, flags); 2150 } 2151 2152 static int 2153 pmu_open(struct inode *inode, struct file *file) 2154 { 2155 struct pmu_private *pp; 2156 unsigned long flags; 2157 2158 pp = kmalloc(sizeof(struct pmu_private), GFP_KERNEL); 2159 if (!pp) 2160 return -ENOMEM; 2161 pp->rb_get = pp->rb_put = 0; 2162 spin_lock_init(&pp->lock); 2163 init_waitqueue_head(&pp->wait); 2164 mutex_lock(&pmu_info_proc_mutex); 2165 spin_lock_irqsave(&all_pvt_lock, flags); 2166 #if defined(CONFIG_INPUT_ADBHID) && defined(CONFIG_PMAC_BACKLIGHT) 2167 pp->backlight_locker = 0; 2168 #endif 2169 list_add(&pp->list, &all_pmu_pvt); 2170 spin_unlock_irqrestore(&all_pvt_lock, flags); 2171 file->private_data = pp; 2172 mutex_unlock(&pmu_info_proc_mutex); 2173 return 0; 2174 } 2175 2176 static ssize_t 2177 pmu_read(struct file *file, char __user *buf, 2178 size_t count, loff_t *ppos) 2179 { 2180 struct pmu_private *pp = file->private_data; 2181 DECLARE_WAITQUEUE(wait, current); 2182 unsigned long flags; 2183 int ret = 0; 2184 2185 if (count < 1 || !pp) 2186 return -EINVAL; 2187 2188 spin_lock_irqsave(&pp->lock, flags); 2189 add_wait_queue(&pp->wait, &wait); 2190 set_current_state(TASK_INTERRUPTIBLE); 2191 2192 for (;;) { 2193 ret = -EAGAIN; 2194 if (pp->rb_get != pp->rb_put) { 2195 int i = pp->rb_get; 2196 struct rb_entry *rp = &pp->rb_buf[i]; 2197 ret = rp->len; 2198 spin_unlock_irqrestore(&pp->lock, flags); 2199 if (ret > count) 2200 ret = count; 2201 if (ret > 0 && copy_to_user(buf, rp->data, ret)) 2202 ret = -EFAULT; 2203 if (++i >= RB_SIZE) 2204 i = 0; 2205 spin_lock_irqsave(&pp->lock, flags); 2206 pp->rb_get = i; 2207 } 2208 if (ret >= 0) 2209 break; 2210 if (file->f_flags & O_NONBLOCK) 2211 break; 2212 ret = -ERESTARTSYS; 2213 if (signal_pending(current)) 2214 break; 2215 spin_unlock_irqrestore(&pp->lock, flags); 2216 schedule(); 2217 spin_lock_irqsave(&pp->lock, flags); 2218 } 2219 __set_current_state(TASK_RUNNING); 2220 remove_wait_queue(&pp->wait, &wait); 2221 spin_unlock_irqrestore(&pp->lock, flags); 2222 2223 return ret; 2224 } 2225 2226 static ssize_t 2227 pmu_write(struct file *file, const char __user *buf, 2228 size_t count, loff_t *ppos) 2229 { 2230 return 0; 2231 } 2232 2233 static __poll_t 2234 pmu_fpoll(struct file *filp, poll_table *wait) 2235 { 2236 struct pmu_private *pp = filp->private_data; 2237 __poll_t mask = 0; 2238 unsigned long flags; 2239 2240 if (!pp) 2241 return 0; 2242 poll_wait(filp, &pp->wait, wait); 2243 spin_lock_irqsave(&pp->lock, flags); 2244 if (pp->rb_get != pp->rb_put) 2245 mask |= EPOLLIN; 2246 spin_unlock_irqrestore(&pp->lock, flags); 2247 return mask; 2248 } 2249 2250 static int 2251 pmu_release(struct inode *inode, struct file *file) 2252 { 2253 struct pmu_private *pp = file->private_data; 2254 unsigned long flags; 2255 2256 if (pp) { 2257 file->private_data = NULL; 2258 spin_lock_irqsave(&all_pvt_lock, flags); 2259 list_del(&pp->list); 2260 spin_unlock_irqrestore(&all_pvt_lock, flags); 2261 2262 #if defined(CONFIG_INPUT_ADBHID) && defined(CONFIG_PMAC_BACKLIGHT) 2263 if (pp->backlight_locker) 2264 pmac_backlight_enable(); 2265 #endif 2266 2267 kfree(pp); 2268 } 2269 return 0; 2270 } 2271 2272 #if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32) 2273 static void pmac_suspend_disable_irqs(void) 2274 { 2275 /* Call platform functions marked "on sleep" */ 2276 pmac_pfunc_i2c_suspend(); 2277 pmac_pfunc_base_suspend(); 2278 } 2279 2280 static int powerbook_sleep(suspend_state_t state) 2281 { 2282 int error = 0; 2283 2284 /* Wait for completion of async requests */ 2285 while (!batt_req.complete) 2286 pmu_poll(); 2287 2288 /* Giveup the lazy FPU & vec so we don't have to back them 2289 * up from the low level code 2290 */ 2291 enable_kernel_fp(); 2292 2293 #ifdef CONFIG_ALTIVEC 2294 if (cpu_has_feature(CPU_FTR_ALTIVEC)) 2295 enable_kernel_altivec(); 2296 #endif /* CONFIG_ALTIVEC */ 2297 2298 switch (pmu_kind) { 2299 case PMU_OHARE_BASED: 2300 error = powerbook_sleep_3400(); 2301 break; 2302 case PMU_HEATHROW_BASED: 2303 case PMU_PADDINGTON_BASED: 2304 error = powerbook_sleep_grackle(); 2305 break; 2306 case PMU_KEYLARGO_BASED: 2307 error = powerbook_sleep_Core99(); 2308 break; 2309 default: 2310 return -ENOSYS; 2311 } 2312 2313 if (error) 2314 return error; 2315 2316 mdelay(100); 2317 2318 return 0; 2319 } 2320 2321 static void pmac_suspend_enable_irqs(void) 2322 { 2323 /* Force a poll of ADB interrupts */ 2324 adb_int_pending = 1; 2325 via_pmu_interrupt(0, NULL); 2326 2327 mdelay(10); 2328 2329 /* Call platform functions marked "on wake" */ 2330 pmac_pfunc_base_resume(); 2331 pmac_pfunc_i2c_resume(); 2332 } 2333 2334 static int pmu_sleep_valid(suspend_state_t state) 2335 { 2336 return state == PM_SUSPEND_MEM 2337 && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0); 2338 } 2339 2340 static const struct platform_suspend_ops pmu_pm_ops = { 2341 .enter = powerbook_sleep, 2342 .valid = pmu_sleep_valid, 2343 }; 2344 2345 static int register_pmu_pm_ops(void) 2346 { 2347 if (pmu_kind == PMU_OHARE_BASED) 2348 powerbook_sleep_init_3400(); 2349 ppc_md.suspend_disable_irqs = pmac_suspend_disable_irqs; 2350 ppc_md.suspend_enable_irqs = pmac_suspend_enable_irqs; 2351 suspend_set_ops(&pmu_pm_ops); 2352 2353 return 0; 2354 } 2355 2356 device_initcall(register_pmu_pm_ops); 2357 #endif 2358 2359 static int pmu_ioctl(struct file *filp, 2360 u_int cmd, u_long arg) 2361 { 2362 __u32 __user *argp = (__u32 __user *)arg; 2363 int error = -EINVAL; 2364 2365 switch (cmd) { 2366 #ifdef CONFIG_PPC_PMAC 2367 case PMU_IOC_SLEEP: 2368 if (!capable(CAP_SYS_ADMIN)) 2369 return -EACCES; 2370 return pm_suspend(PM_SUSPEND_MEM); 2371 case PMU_IOC_CAN_SLEEP: 2372 if (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) < 0) 2373 return put_user(0, argp); 2374 else 2375 return put_user(1, argp); 2376 #endif 2377 2378 #ifdef CONFIG_PMAC_BACKLIGHT_LEGACY 2379 /* Compatibility ioctl's for backlight */ 2380 case PMU_IOC_GET_BACKLIGHT: 2381 { 2382 int brightness; 2383 2384 brightness = pmac_backlight_get_legacy_brightness(); 2385 if (brightness < 0) 2386 return brightness; 2387 else 2388 return put_user(brightness, argp); 2389 2390 } 2391 case PMU_IOC_SET_BACKLIGHT: 2392 { 2393 int brightness; 2394 2395 error = get_user(brightness, argp); 2396 if (error) 2397 return error; 2398 2399 return pmac_backlight_set_legacy_brightness(brightness); 2400 } 2401 #ifdef CONFIG_INPUT_ADBHID 2402 case PMU_IOC_GRAB_BACKLIGHT: { 2403 struct pmu_private *pp = filp->private_data; 2404 2405 if (pp->backlight_locker) 2406 return 0; 2407 2408 pp->backlight_locker = 1; 2409 pmac_backlight_disable(); 2410 2411 return 0; 2412 } 2413 #endif /* CONFIG_INPUT_ADBHID */ 2414 #endif /* CONFIG_PMAC_BACKLIGHT_LEGACY */ 2415 2416 case PMU_IOC_GET_MODEL: 2417 return put_user(pmu_kind, argp); 2418 case PMU_IOC_HAS_ADB: 2419 return put_user(pmu_has_adb, argp); 2420 } 2421 return error; 2422 } 2423 2424 static long pmu_unlocked_ioctl(struct file *filp, 2425 u_int cmd, u_long arg) 2426 { 2427 int ret; 2428 2429 mutex_lock(&pmu_info_proc_mutex); 2430 ret = pmu_ioctl(filp, cmd, arg); 2431 mutex_unlock(&pmu_info_proc_mutex); 2432 2433 return ret; 2434 } 2435 2436 #ifdef CONFIG_COMPAT 2437 #define PMU_IOC_GET_BACKLIGHT32 _IOR('B', 1, compat_size_t) 2438 #define PMU_IOC_SET_BACKLIGHT32 _IOW('B', 2, compat_size_t) 2439 #define PMU_IOC_GET_MODEL32 _IOR('B', 3, compat_size_t) 2440 #define PMU_IOC_HAS_ADB32 _IOR('B', 4, compat_size_t) 2441 #define PMU_IOC_CAN_SLEEP32 _IOR('B', 5, compat_size_t) 2442 #define PMU_IOC_GRAB_BACKLIGHT32 _IOR('B', 6, compat_size_t) 2443 2444 static long compat_pmu_ioctl (struct file *filp, u_int cmd, u_long arg) 2445 { 2446 switch (cmd) { 2447 case PMU_IOC_SLEEP: 2448 break; 2449 case PMU_IOC_GET_BACKLIGHT32: 2450 cmd = PMU_IOC_GET_BACKLIGHT; 2451 break; 2452 case PMU_IOC_SET_BACKLIGHT32: 2453 cmd = PMU_IOC_SET_BACKLIGHT; 2454 break; 2455 case PMU_IOC_GET_MODEL32: 2456 cmd = PMU_IOC_GET_MODEL; 2457 break; 2458 case PMU_IOC_HAS_ADB32: 2459 cmd = PMU_IOC_HAS_ADB; 2460 break; 2461 case PMU_IOC_CAN_SLEEP32: 2462 cmd = PMU_IOC_CAN_SLEEP; 2463 break; 2464 case PMU_IOC_GRAB_BACKLIGHT32: 2465 cmd = PMU_IOC_GRAB_BACKLIGHT; 2466 break; 2467 default: 2468 return -ENOIOCTLCMD; 2469 } 2470 return pmu_unlocked_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); 2471 } 2472 #endif 2473 2474 static const struct file_operations pmu_device_fops = { 2475 .read = pmu_read, 2476 .write = pmu_write, 2477 .poll = pmu_fpoll, 2478 .unlocked_ioctl = pmu_unlocked_ioctl, 2479 #ifdef CONFIG_COMPAT 2480 .compat_ioctl = compat_pmu_ioctl, 2481 #endif 2482 .open = pmu_open, 2483 .release = pmu_release, 2484 .llseek = noop_llseek, 2485 }; 2486 2487 static struct miscdevice pmu_device = { 2488 PMU_MINOR, "pmu", &pmu_device_fops 2489 }; 2490 2491 static int pmu_device_init(void) 2492 { 2493 if (pmu_state == uninitialized) 2494 return 0; 2495 if (misc_register(&pmu_device) < 0) 2496 printk(KERN_ERR "via-pmu: cannot register misc device.\n"); 2497 return 0; 2498 } 2499 device_initcall(pmu_device_init); 2500 2501 2502 #ifdef DEBUG_SLEEP 2503 static inline void 2504 polled_handshake(void) 2505 { 2506 via2[B] &= ~TREQ; eieio(); 2507 while ((via2[B] & TACK) != 0) 2508 ; 2509 via2[B] |= TREQ; eieio(); 2510 while ((via2[B] & TACK) == 0) 2511 ; 2512 } 2513 2514 static inline void 2515 polled_send_byte(int x) 2516 { 2517 via1[ACR] |= SR_OUT | SR_EXT; eieio(); 2518 via1[SR] = x; eieio(); 2519 polled_handshake(); 2520 } 2521 2522 static inline int 2523 polled_recv_byte(void) 2524 { 2525 int x; 2526 2527 via1[ACR] = (via1[ACR] & ~SR_OUT) | SR_EXT; eieio(); 2528 x = via1[SR]; eieio(); 2529 polled_handshake(); 2530 x = via1[SR]; eieio(); 2531 return x; 2532 } 2533 2534 int 2535 pmu_polled_request(struct adb_request *req) 2536 { 2537 unsigned long flags; 2538 int i, l, c; 2539 2540 req->complete = 1; 2541 c = req->data[0]; 2542 l = pmu_data_len[c][0]; 2543 if (l >= 0 && req->nbytes != l + 1) 2544 return -EINVAL; 2545 2546 local_irq_save(flags); 2547 while (pmu_state != idle) 2548 pmu_poll(); 2549 2550 while ((via2[B] & TACK) == 0) 2551 ; 2552 polled_send_byte(c); 2553 if (l < 0) { 2554 l = req->nbytes - 1; 2555 polled_send_byte(l); 2556 } 2557 for (i = 1; i <= l; ++i) 2558 polled_send_byte(req->data[i]); 2559 2560 l = pmu_data_len[c][1]; 2561 if (l < 0) 2562 l = polled_recv_byte(); 2563 for (i = 0; i < l; ++i) 2564 req->reply[i + req->reply_len] = polled_recv_byte(); 2565 2566 if (req->done) 2567 (*req->done)(req); 2568 2569 local_irq_restore(flags); 2570 return 0; 2571 } 2572 2573 /* N.B. This doesn't work on the 3400 */ 2574 void pmu_blink(int n) 2575 { 2576 struct adb_request req; 2577 2578 memset(&req, 0, sizeof(req)); 2579 2580 for (; n > 0; --n) { 2581 req.nbytes = 4; 2582 req.done = NULL; 2583 req.data[0] = 0xee; 2584 req.data[1] = 4; 2585 req.data[2] = 0; 2586 req.data[3] = 1; 2587 req.reply[0] = ADB_RET_OK; 2588 req.reply_len = 1; 2589 req.reply_expected = 0; 2590 pmu_polled_request(&req); 2591 mdelay(50); 2592 req.nbytes = 4; 2593 req.done = NULL; 2594 req.data[0] = 0xee; 2595 req.data[1] = 4; 2596 req.data[2] = 0; 2597 req.data[3] = 0; 2598 req.reply[0] = ADB_RET_OK; 2599 req.reply_len = 1; 2600 req.reply_expected = 0; 2601 pmu_polled_request(&req); 2602 mdelay(50); 2603 } 2604 mdelay(50); 2605 } 2606 #endif /* DEBUG_SLEEP */ 2607 2608 #if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32) 2609 int pmu_sys_suspended; 2610 2611 static int pmu_syscore_suspend(void) 2612 { 2613 /* Suspend PMU event interrupts */ 2614 pmu_suspend(); 2615 pmu_sys_suspended = 1; 2616 2617 #ifdef CONFIG_PMAC_BACKLIGHT 2618 /* Tell backlight code not to muck around with the chip anymore */ 2619 pmu_backlight_set_sleep(1); 2620 #endif 2621 2622 return 0; 2623 } 2624 2625 static void pmu_syscore_resume(void) 2626 { 2627 struct adb_request req; 2628 2629 if (!pmu_sys_suspended) 2630 return; 2631 2632 /* Tell PMU we are ready */ 2633 pmu_request(&req, NULL, 2, PMU_SYSTEM_READY, 2); 2634 pmu_wait_complete(&req); 2635 2636 #ifdef CONFIG_PMAC_BACKLIGHT 2637 /* Tell backlight code it can use the chip again */ 2638 pmu_backlight_set_sleep(0); 2639 #endif 2640 /* Resume PMU event interrupts */ 2641 pmu_resume(); 2642 pmu_sys_suspended = 0; 2643 } 2644 2645 static struct syscore_ops pmu_syscore_ops = { 2646 .suspend = pmu_syscore_suspend, 2647 .resume = pmu_syscore_resume, 2648 }; 2649 2650 static int pmu_syscore_register(void) 2651 { 2652 register_syscore_ops(&pmu_syscore_ops); 2653 2654 return 0; 2655 } 2656 subsys_initcall(pmu_syscore_register); 2657 #endif /* CONFIG_SUSPEND && CONFIG_PPC32 */ 2658 2659 EXPORT_SYMBOL(pmu_request); 2660 EXPORT_SYMBOL(pmu_queue_request); 2661 EXPORT_SYMBOL(pmu_poll); 2662 EXPORT_SYMBOL(pmu_poll_adb); 2663 EXPORT_SYMBOL(pmu_wait_complete); 2664 EXPORT_SYMBOL(pmu_suspend); 2665 EXPORT_SYMBOL(pmu_resume); 2666 EXPORT_SYMBOL(pmu_unlock); 2667 #if defined(CONFIG_PPC32) 2668 EXPORT_SYMBOL(pmu_enable_irled); 2669 EXPORT_SYMBOL(pmu_battery_count); 2670 EXPORT_SYMBOL(pmu_batteries); 2671 EXPORT_SYMBOL(pmu_power_flags); 2672 #endif /* CONFIG_SUSPEND && CONFIG_PPC32 */ 2673 2674