1 /* 2 * NVEC: NVIDIA compliant embedded controller interface 3 * 4 * Copyright (C) 2011 The AC100 Kernel Team <ac100@lists.lauchpad.net> 5 * 6 * Authors: Pierre-Hugues Husson <phhusson@free.fr> 7 * Ilya Petrov <ilya.muromec@gmail.com> 8 * Marc Dietrich <marvin24@gmx.de> 9 * Julian Andres Klode <jak@jak-linux.org> 10 * 11 * This file is subject to the terms and conditions of the GNU General Public 12 * License. See the file "COPYING" in the main directory of this archive 13 * for more details. 14 * 15 */ 16 17 #include <linux/kernel.h> 18 #include <linux/module.h> 19 #include <linux/atomic.h> 20 #include <linux/clk.h> 21 #include <linux/completion.h> 22 #include <linux/delay.h> 23 #include <linux/err.h> 24 #include <linux/gpio.h> 25 #include <linux/interrupt.h> 26 #include <linux/io.h> 27 #include <linux/irq.h> 28 #include <linux/of.h> 29 #include <linux/of_gpio.h> 30 #include <linux/list.h> 31 #include <linux/mfd/core.h> 32 #include <linux/mutex.h> 33 #include <linux/notifier.h> 34 #include <linux/slab.h> 35 #include <linux/spinlock.h> 36 #include <linux/workqueue.h> 37 38 #include "nvec.h" 39 40 #define I2C_CNFG 0x00 41 #define I2C_CNFG_PACKET_MODE_EN BIT(10) 42 #define I2C_CNFG_NEW_MASTER_SFM BIT(11) 43 #define I2C_CNFG_DEBOUNCE_CNT_SHIFT 12 44 45 #define I2C_SL_CNFG 0x20 46 #define I2C_SL_NEWSL BIT(2) 47 #define I2C_SL_NACK BIT(1) 48 #define I2C_SL_RESP BIT(0) 49 #define I2C_SL_IRQ BIT(3) 50 #define END_TRANS BIT(4) 51 #define RCVD BIT(2) 52 #define RNW BIT(1) 53 54 #define I2C_SL_RCVD 0x24 55 #define I2C_SL_STATUS 0x28 56 #define I2C_SL_ADDR1 0x2c 57 #define I2C_SL_ADDR2 0x30 58 #define I2C_SL_DELAY_COUNT 0x3c 59 60 /** 61 * enum nvec_msg_category - Message categories for nvec_msg_alloc() 62 * @NVEC_MSG_RX: The message is an incoming message (from EC) 63 * @NVEC_MSG_TX: The message is an outgoing message (to EC) 64 */ 65 enum nvec_msg_category { 66 NVEC_MSG_RX, 67 NVEC_MSG_TX, 68 }; 69 70 enum nvec_sleep_subcmds { 71 GLOBAL_EVENTS, 72 AP_PWR_DOWN, 73 AP_SUSPEND, 74 }; 75 76 #define CNF_EVENT_REPORTING 0x01 77 #define GET_FIRMWARE_VERSION 0x15 78 #define LID_SWITCH BIT(1) 79 #define PWR_BUTTON BIT(15) 80 81 static struct nvec_chip *nvec_power_handle; 82 83 static const struct mfd_cell nvec_devices[] = { 84 { 85 .name = "nvec-kbd", 86 }, 87 { 88 .name = "nvec-mouse", 89 }, 90 { 91 .name = "nvec-power", 92 .id = 0, 93 }, 94 { 95 .name = "nvec-power", 96 .id = 1, 97 }, 98 { 99 .name = "nvec-paz00", 100 }, 101 }; 102 103 /** 104 * nvec_register_notifier - Register a notifier with nvec 105 * @nvec: A &struct nvec_chip 106 * @nb: The notifier block to register 107 * 108 * Registers a notifier with @nvec. The notifier will be added to an atomic 109 * notifier chain that is called for all received messages except those that 110 * correspond to a request initiated by nvec_write_sync(). 111 */ 112 int nvec_register_notifier(struct nvec_chip *nvec, struct notifier_block *nb, 113 unsigned int events) 114 { 115 return atomic_notifier_chain_register(&nvec->notifier_list, nb); 116 } 117 EXPORT_SYMBOL_GPL(nvec_register_notifier); 118 119 /** 120 * nvec_unregister_notifier - Unregister a notifier with nvec 121 * @nvec: A &struct nvec_chip 122 * @nb: The notifier block to unregister 123 * 124 * Unregisters a notifier with @nvec. The notifier will be removed from the 125 * atomic notifier chain. 126 */ 127 int nvec_unregister_notifier(struct nvec_chip *nvec, struct notifier_block *nb) 128 { 129 return atomic_notifier_chain_unregister(&nvec->notifier_list, nb); 130 } 131 EXPORT_SYMBOL_GPL(nvec_unregister_notifier); 132 133 /** 134 * nvec_status_notifier - The final notifier 135 * 136 * Prints a message about control events not handled in the notifier 137 * chain. 138 */ 139 static int nvec_status_notifier(struct notifier_block *nb, 140 unsigned long event_type, void *data) 141 { 142 struct nvec_chip *nvec = container_of(nb, struct nvec_chip, 143 nvec_status_notifier); 144 unsigned char *msg = data; 145 146 if (event_type != NVEC_CNTL) 147 return NOTIFY_DONE; 148 149 dev_warn(nvec->dev, "unhandled msg type %ld\n", event_type); 150 print_hex_dump(KERN_WARNING, "payload: ", DUMP_PREFIX_NONE, 16, 1, 151 msg, msg[1] + 2, true); 152 153 return NOTIFY_OK; 154 } 155 156 /** 157 * nvec_msg_alloc: 158 * @nvec: A &struct nvec_chip 159 * @category: Pool category, see &enum nvec_msg_category 160 * 161 * Allocate a single &struct nvec_msg object from the message pool of 162 * @nvec. The result shall be passed to nvec_msg_free() if no longer 163 * used. 164 * 165 * Outgoing messages are placed in the upper 75% of the pool, keeping the 166 * lower 25% available for RX buffers only. The reason is to prevent a 167 * situation where all buffers are full and a message is thus endlessly 168 * retried because the response could never be processed. 169 */ 170 static struct nvec_msg *nvec_msg_alloc(struct nvec_chip *nvec, 171 enum nvec_msg_category category) 172 { 173 int i = (category == NVEC_MSG_TX) ? (NVEC_POOL_SIZE / 4) : 0; 174 175 for (; i < NVEC_POOL_SIZE; i++) { 176 if (atomic_xchg(&nvec->msg_pool[i].used, 1) == 0) { 177 dev_vdbg(nvec->dev, "INFO: Allocate %i\n", i); 178 return &nvec->msg_pool[i]; 179 } 180 } 181 182 dev_err(nvec->dev, "could not allocate %s buffer\n", 183 (category == NVEC_MSG_TX) ? "TX" : "RX"); 184 185 return NULL; 186 } 187 188 /** 189 * nvec_msg_free: 190 * @nvec: A &struct nvec_chip 191 * @msg: A message (must be allocated by nvec_msg_alloc() and belong to @nvec) 192 * 193 * Free the given message 194 */ 195 void nvec_msg_free(struct nvec_chip *nvec, struct nvec_msg *msg) 196 { 197 if (msg != &nvec->tx_scratch) 198 dev_vdbg(nvec->dev, "INFO: Free %ti\n", msg - nvec->msg_pool); 199 atomic_set(&msg->used, 0); 200 } 201 EXPORT_SYMBOL_GPL(nvec_msg_free); 202 203 /** 204 * nvec_msg_is_event - Return %true if @msg is an event 205 * @msg: A message 206 */ 207 static bool nvec_msg_is_event(struct nvec_msg *msg) 208 { 209 return msg->data[0] >> 7; 210 } 211 212 /** 213 * nvec_msg_size - Get the size of a message 214 * @msg: The message to get the size for 215 * 216 * This only works for received messages, not for outgoing messages. 217 */ 218 static size_t nvec_msg_size(struct nvec_msg *msg) 219 { 220 bool is_event = nvec_msg_is_event(msg); 221 int event_length = (msg->data[0] & 0x60) >> 5; 222 223 /* for variable size, payload size in byte 1 + count (1) + cmd (1) */ 224 if (!is_event || event_length == NVEC_VAR_SIZE) 225 return (msg->pos || msg->size) ? (msg->data[1] + 2) : 0; 226 else if (event_length == NVEC_2BYTES) 227 return 2; 228 else if (event_length == NVEC_3BYTES) 229 return 3; 230 return 0; 231 } 232 233 /** 234 * nvec_gpio_set_value - Set the GPIO value 235 * @nvec: A &struct nvec_chip 236 * @value: The value to write (0 or 1) 237 * 238 * Like gpio_set_value(), but generating debugging information 239 */ 240 static void nvec_gpio_set_value(struct nvec_chip *nvec, int value) 241 { 242 dev_dbg(nvec->dev, "GPIO changed from %u to %u\n", 243 gpio_get_value(nvec->gpio), value); 244 gpio_set_value(nvec->gpio, value); 245 } 246 247 /** 248 * nvec_write_async - Asynchronously write a message to NVEC 249 * @nvec: An nvec_chip instance 250 * @data: The message data, starting with the request type 251 * @size: The size of @data 252 * 253 * Queue a single message to be transferred to the embedded controller 254 * and return immediately. 255 * 256 * Returns: 0 on success, a negative error code on failure. If a failure 257 * occurred, the nvec driver may print an error. 258 */ 259 int nvec_write_async(struct nvec_chip *nvec, const unsigned char *data, 260 short size) 261 { 262 struct nvec_msg *msg; 263 unsigned long flags; 264 265 msg = nvec_msg_alloc(nvec, NVEC_MSG_TX); 266 267 if (!msg) 268 return -ENOMEM; 269 270 msg->data[0] = size; 271 memcpy(msg->data + 1, data, size); 272 msg->size = size + 1; 273 274 spin_lock_irqsave(&nvec->tx_lock, flags); 275 list_add_tail(&msg->node, &nvec->tx_data); 276 spin_unlock_irqrestore(&nvec->tx_lock, flags); 277 278 schedule_work(&nvec->tx_work); 279 280 return 0; 281 } 282 EXPORT_SYMBOL(nvec_write_async); 283 284 /** 285 * nvec_write_sync - Write a message to nvec and read the response 286 * @nvec: An &struct nvec_chip 287 * @data: The data to write 288 * @size: The size of @data 289 * @msg: The response message received 290 * 291 * This is similar to nvec_write_async(), but waits for the 292 * request to be answered before returning. This function 293 * uses a mutex and can thus not be called from e.g. 294 * interrupt handlers. 295 * 296 * Returns: 0 on success, a negative error code on failure. 297 * The response message is returned in @msg. Shall be freed with 298 * with nvec_msg_free() once no longer used. 299 * 300 */ 301 int nvec_write_sync(struct nvec_chip *nvec, 302 const unsigned char *data, short size, 303 struct nvec_msg **msg) 304 { 305 mutex_lock(&nvec->sync_write_mutex); 306 307 *msg = NULL; 308 nvec->sync_write_pending = (data[1] << 8) + data[0]; 309 310 if (nvec_write_async(nvec, data, size) < 0) { 311 mutex_unlock(&nvec->sync_write_mutex); 312 return -ENOMEM; 313 } 314 315 dev_dbg(nvec->dev, "nvec_sync_write: 0x%04x\n", 316 nvec->sync_write_pending); 317 if (!(wait_for_completion_timeout(&nvec->sync_write, 318 msecs_to_jiffies(2000)))) { 319 dev_warn(nvec->dev, 320 "timeout waiting for sync write to complete\n"); 321 mutex_unlock(&nvec->sync_write_mutex); 322 return -ETIMEDOUT; 323 } 324 325 dev_dbg(nvec->dev, "nvec_sync_write: pong!\n"); 326 327 *msg = nvec->last_sync_msg; 328 329 mutex_unlock(&nvec->sync_write_mutex); 330 331 return 0; 332 } 333 EXPORT_SYMBOL(nvec_write_sync); 334 335 /** 336 * nvec_toggle_global_events - enables or disables global event reporting 337 * @nvec: nvec handle 338 * @state: true for enable, false for disable 339 * 340 * This switches on/off global event reports by the embedded controller. 341 */ 342 static void nvec_toggle_global_events(struct nvec_chip *nvec, bool state) 343 { 344 unsigned char global_events[] = { NVEC_SLEEP, GLOBAL_EVENTS, state }; 345 346 nvec_write_async(nvec, global_events, 3); 347 } 348 349 /** 350 * nvec_event_mask - fill the command string with event bitfield 351 * ev: points to event command string 352 * mask: bit to insert into the event mask 353 * 354 * Configure event command expects a 32 bit bitfield which describes 355 * which events to enable. The bitfield has the following structure 356 * (from highest byte to lowest): 357 * system state bits 7-0 358 * system state bits 15-8 359 * oem system state bits 7-0 360 * oem system state bits 15-8 361 */ 362 static void nvec_event_mask(char *ev, u32 mask) 363 { 364 ev[3] = mask >> 16 & 0xff; 365 ev[4] = mask >> 24 & 0xff; 366 ev[5] = mask >> 0 & 0xff; 367 ev[6] = mask >> 8 & 0xff; 368 } 369 370 /** 371 * nvec_request_master - Process outgoing messages 372 * @work: A &struct work_struct (the tx_worker member of &struct nvec_chip) 373 * 374 * Processes all outgoing requests by sending the request and awaiting the 375 * response, then continuing with the next request. Once a request has a 376 * matching response, it will be freed and removed from the list. 377 */ 378 static void nvec_request_master(struct work_struct *work) 379 { 380 struct nvec_chip *nvec = container_of(work, struct nvec_chip, tx_work); 381 unsigned long flags; 382 long err; 383 struct nvec_msg *msg; 384 385 spin_lock_irqsave(&nvec->tx_lock, flags); 386 while (!list_empty(&nvec->tx_data)) { 387 msg = list_first_entry(&nvec->tx_data, struct nvec_msg, node); 388 spin_unlock_irqrestore(&nvec->tx_lock, flags); 389 nvec_gpio_set_value(nvec, 0); 390 err = wait_for_completion_interruptible_timeout( 391 &nvec->ec_transfer, msecs_to_jiffies(5000)); 392 393 if (err == 0) { 394 dev_warn(nvec->dev, "timeout waiting for ec transfer\n"); 395 nvec_gpio_set_value(nvec, 1); 396 msg->pos = 0; 397 } 398 399 spin_lock_irqsave(&nvec->tx_lock, flags); 400 401 if (err > 0) { 402 list_del_init(&msg->node); 403 nvec_msg_free(nvec, msg); 404 } 405 } 406 spin_unlock_irqrestore(&nvec->tx_lock, flags); 407 } 408 409 /** 410 * parse_msg - Print some information and call the notifiers on an RX message 411 * @nvec: A &struct nvec_chip 412 * @msg: A message received by @nvec 413 * 414 * Paarse some pieces of the message and then call the chain of notifiers 415 * registered via nvec_register_notifier. 416 */ 417 static int parse_msg(struct nvec_chip *nvec, struct nvec_msg *msg) 418 { 419 if ((msg->data[0] & 1 << 7) == 0 && msg->data[3]) { 420 dev_err(nvec->dev, "ec responded %*ph\n", 4, msg->data); 421 return -EINVAL; 422 } 423 424 if ((msg->data[0] >> 7) == 1 && (msg->data[0] & 0x0f) == 5) 425 print_hex_dump(KERN_WARNING, "ec system event ", 426 DUMP_PREFIX_NONE, 16, 1, msg->data, 427 msg->data[1] + 2, true); 428 429 atomic_notifier_call_chain(&nvec->notifier_list, msg->data[0] & 0x8f, 430 msg->data); 431 432 return 0; 433 } 434 435 /** 436 * nvec_dispatch - Process messages received from the EC 437 * @work: A &struct work_struct (the tx_worker member of &struct nvec_chip) 438 * 439 * Process messages previously received from the EC and put into the RX 440 * queue of the &struct nvec_chip instance associated with @work. 441 */ 442 static void nvec_dispatch(struct work_struct *work) 443 { 444 struct nvec_chip *nvec = container_of(work, struct nvec_chip, rx_work); 445 unsigned long flags; 446 struct nvec_msg *msg; 447 448 spin_lock_irqsave(&nvec->rx_lock, flags); 449 while (!list_empty(&nvec->rx_data)) { 450 msg = list_first_entry(&nvec->rx_data, struct nvec_msg, node); 451 list_del_init(&msg->node); 452 spin_unlock_irqrestore(&nvec->rx_lock, flags); 453 454 if (nvec->sync_write_pending == 455 (msg->data[2] << 8) + msg->data[0]) { 456 dev_dbg(nvec->dev, "sync write completed!\n"); 457 nvec->sync_write_pending = 0; 458 nvec->last_sync_msg = msg; 459 complete(&nvec->sync_write); 460 } else { 461 parse_msg(nvec, msg); 462 nvec_msg_free(nvec, msg); 463 } 464 spin_lock_irqsave(&nvec->rx_lock, flags); 465 } 466 spin_unlock_irqrestore(&nvec->rx_lock, flags); 467 } 468 469 /** 470 * nvec_tx_completed - Complete the current transfer 471 * @nvec: A &struct nvec_chip 472 * 473 * This is called when we have received an END_TRANS on a TX transfer. 474 */ 475 static void nvec_tx_completed(struct nvec_chip *nvec) 476 { 477 /* We got an END_TRANS, let's skip this, maybe there's an event */ 478 if (nvec->tx->pos != nvec->tx->size) { 479 dev_err(nvec->dev, "premature END_TRANS, resending\n"); 480 nvec->tx->pos = 0; 481 nvec_gpio_set_value(nvec, 0); 482 } else { 483 nvec->state = 0; 484 } 485 } 486 487 /** 488 * nvec_rx_completed - Complete the current transfer 489 * @nvec: A &struct nvec_chip 490 * 491 * This is called when we have received an END_TRANS on a RX transfer. 492 */ 493 static void nvec_rx_completed(struct nvec_chip *nvec) 494 { 495 if (nvec->rx->pos != nvec_msg_size(nvec->rx)) { 496 dev_err(nvec->dev, "RX incomplete: Expected %u bytes, got %u\n", 497 (uint)nvec_msg_size(nvec->rx), 498 (uint)nvec->rx->pos); 499 500 nvec_msg_free(nvec, nvec->rx); 501 nvec->state = 0; 502 503 /* Battery quirk - Often incomplete, and likes to crash */ 504 if (nvec->rx->data[0] == NVEC_BAT) 505 complete(&nvec->ec_transfer); 506 507 return; 508 } 509 510 spin_lock(&nvec->rx_lock); 511 512 /* 513 * Add the received data to the work list and move the ring buffer 514 * pointer to the next entry. 515 */ 516 list_add_tail(&nvec->rx->node, &nvec->rx_data); 517 518 spin_unlock(&nvec->rx_lock); 519 520 nvec->state = 0; 521 522 if (!nvec_msg_is_event(nvec->rx)) 523 complete(&nvec->ec_transfer); 524 525 schedule_work(&nvec->rx_work); 526 } 527 528 /** 529 * nvec_invalid_flags - Send an error message about invalid flags and jump 530 * @nvec: The nvec device 531 * @status: The status flags 532 * @reset: Whether we shall jump to state 0. 533 */ 534 static void nvec_invalid_flags(struct nvec_chip *nvec, unsigned int status, 535 bool reset) 536 { 537 dev_err(nvec->dev, "unexpected status flags 0x%02x during state %i\n", 538 status, nvec->state); 539 if (reset) 540 nvec->state = 0; 541 } 542 543 /** 544 * nvec_tx_set - Set the message to transfer (nvec->tx) 545 * @nvec: A &struct nvec_chip 546 * 547 * Gets the first entry from the tx_data list of @nvec and sets the 548 * tx member to it. If the tx_data list is empty, this uses the 549 * tx_scratch message to send a no operation message. 550 */ 551 static void nvec_tx_set(struct nvec_chip *nvec) 552 { 553 spin_lock(&nvec->tx_lock); 554 if (list_empty(&nvec->tx_data)) { 555 dev_err(nvec->dev, "empty tx - sending no-op\n"); 556 memcpy(nvec->tx_scratch.data, "\x02\x07\x02", 3); 557 nvec->tx_scratch.size = 3; 558 nvec->tx_scratch.pos = 0; 559 nvec->tx = &nvec->tx_scratch; 560 list_add_tail(&nvec->tx->node, &nvec->tx_data); 561 } else { 562 nvec->tx = list_first_entry(&nvec->tx_data, struct nvec_msg, 563 node); 564 nvec->tx->pos = 0; 565 } 566 spin_unlock(&nvec->tx_lock); 567 568 dev_dbg(nvec->dev, "Sending message of length %u, command 0x%x\n", 569 (uint)nvec->tx->size, nvec->tx->data[1]); 570 } 571 572 /** 573 * nvec_interrupt - Interrupt handler 574 * @irq: The IRQ 575 * @dev: The nvec device 576 * 577 * Interrupt handler that fills our RX buffers and empties our TX 578 * buffers. This uses a finite state machine with ridiculous amounts 579 * of error checking, in order to be fairly reliable. 580 */ 581 static irqreturn_t nvec_interrupt(int irq, void *dev) 582 { 583 unsigned long status; 584 unsigned int received = 0; 585 unsigned char to_send = 0xff; 586 const unsigned long irq_mask = I2C_SL_IRQ | END_TRANS | RCVD | RNW; 587 struct nvec_chip *nvec = dev; 588 unsigned int state = nvec->state; 589 590 status = readl(nvec->base + I2C_SL_STATUS); 591 592 /* Filter out some errors */ 593 if ((status & irq_mask) == 0 && (status & ~irq_mask) != 0) { 594 dev_err(nvec->dev, "unexpected irq mask %lx\n", status); 595 return IRQ_HANDLED; 596 } 597 if ((status & I2C_SL_IRQ) == 0) { 598 dev_err(nvec->dev, "Spurious IRQ\n"); 599 return IRQ_HANDLED; 600 } 601 602 /* The EC did not request a read, so it send us something, read it */ 603 if ((status & RNW) == 0) { 604 received = readl(nvec->base + I2C_SL_RCVD); 605 if (status & RCVD) 606 writel(0, nvec->base + I2C_SL_RCVD); 607 } 608 609 if (status == (I2C_SL_IRQ | RCVD)) 610 nvec->state = 0; 611 612 switch (nvec->state) { 613 case 0: /* Verify that its a transfer start, the rest later */ 614 if (status != (I2C_SL_IRQ | RCVD)) 615 nvec_invalid_flags(nvec, status, false); 616 break; 617 case 1: /* command byte */ 618 if (status != I2C_SL_IRQ) { 619 nvec_invalid_flags(nvec, status, true); 620 } else { 621 nvec->rx = nvec_msg_alloc(nvec, NVEC_MSG_RX); 622 /* Should not happen in a normal world */ 623 if (unlikely(!nvec->rx)) { 624 nvec->state = 0; 625 break; 626 } 627 nvec->rx->data[0] = received; 628 nvec->rx->pos = 1; 629 nvec->state = 2; 630 } 631 break; 632 case 2: /* first byte after command */ 633 if (status == (I2C_SL_IRQ | RNW | RCVD)) { 634 udelay(33); 635 if (nvec->rx->data[0] != 0x01) { 636 dev_err(nvec->dev, 637 "Read without prior read command\n"); 638 nvec->state = 0; 639 break; 640 } 641 nvec_msg_free(nvec, nvec->rx); 642 nvec->state = 3; 643 nvec_tx_set(nvec); 644 to_send = nvec->tx->data[0]; 645 nvec->tx->pos = 1; 646 } else if (status == (I2C_SL_IRQ)) { 647 nvec->rx->data[1] = received; 648 nvec->rx->pos = 2; 649 nvec->state = 4; 650 } else { 651 nvec_invalid_flags(nvec, status, true); 652 } 653 break; 654 case 3: /* EC does a block read, we transmit data */ 655 if (status & END_TRANS) { 656 nvec_tx_completed(nvec); 657 } else if ((status & RNW) == 0 || (status & RCVD)) { 658 nvec_invalid_flags(nvec, status, true); 659 } else if (nvec->tx && nvec->tx->pos < nvec->tx->size) { 660 to_send = nvec->tx->data[nvec->tx->pos++]; 661 } else { 662 dev_err(nvec->dev, 663 "tx buffer underflow on %p (%u > %u)\n", 664 nvec->tx, 665 (uint)(nvec->tx ? nvec->tx->pos : 0), 666 (uint)(nvec->tx ? nvec->tx->size : 0)); 667 nvec->state = 0; 668 } 669 break; 670 case 4: /* EC does some write, we read the data */ 671 if ((status & (END_TRANS | RNW)) == END_TRANS) 672 nvec_rx_completed(nvec); 673 else if (status & (RNW | RCVD)) 674 nvec_invalid_flags(nvec, status, true); 675 else if (nvec->rx && nvec->rx->pos < NVEC_MSG_SIZE) 676 nvec->rx->data[nvec->rx->pos++] = received; 677 else 678 dev_err(nvec->dev, 679 "RX buffer overflow on %p: Trying to write byte %u of %u\n", 680 nvec->rx, nvec->rx ? nvec->rx->pos : 0, 681 NVEC_MSG_SIZE); 682 break; 683 default: 684 nvec->state = 0; 685 } 686 687 /* If we are told that a new transfer starts, verify it */ 688 if ((status & (RCVD | RNW)) == RCVD) { 689 if (received != nvec->i2c_addr) 690 dev_err(nvec->dev, 691 "received address 0x%02x, expected 0x%02x\n", 692 received, nvec->i2c_addr); 693 nvec->state = 1; 694 } 695 696 /* Send data if requested, but not on end of transmission */ 697 if ((status & (RNW | END_TRANS)) == RNW) 698 writel(to_send, nvec->base + I2C_SL_RCVD); 699 700 /* If we have send the first byte */ 701 if (status == (I2C_SL_IRQ | RNW | RCVD)) 702 nvec_gpio_set_value(nvec, 1); 703 704 dev_dbg(nvec->dev, 705 "Handled: %s 0x%02x, %s 0x%02x in state %u [%s%s%s]\n", 706 (status & RNW) == 0 ? "received" : "R=", 707 received, 708 (status & (RNW | END_TRANS)) ? "sent" : "S=", 709 to_send, 710 state, 711 status & END_TRANS ? " END_TRANS" : "", 712 status & RCVD ? " RCVD" : "", 713 status & RNW ? " RNW" : ""); 714 715 /* 716 * TODO: A correct fix needs to be found for this. 717 * 718 * We experience less incomplete messages with this delay than without 719 * it, but we don't know why. Help is appreciated. 720 */ 721 udelay(100); 722 723 return IRQ_HANDLED; 724 } 725 726 static void tegra_init_i2c_slave(struct nvec_chip *nvec) 727 { 728 u32 val; 729 730 clk_prepare_enable(nvec->i2c_clk); 731 732 reset_control_assert(nvec->rst); 733 udelay(2); 734 reset_control_deassert(nvec->rst); 735 736 val = I2C_CNFG_NEW_MASTER_SFM | I2C_CNFG_PACKET_MODE_EN | 737 (0x2 << I2C_CNFG_DEBOUNCE_CNT_SHIFT); 738 writel(val, nvec->base + I2C_CNFG); 739 740 clk_set_rate(nvec->i2c_clk, 8 * 80000); 741 742 writel(I2C_SL_NEWSL, nvec->base + I2C_SL_CNFG); 743 writel(0x1E, nvec->base + I2C_SL_DELAY_COUNT); 744 745 writel(nvec->i2c_addr >> 1, nvec->base + I2C_SL_ADDR1); 746 writel(0, nvec->base + I2C_SL_ADDR2); 747 748 enable_irq(nvec->irq); 749 } 750 751 #ifdef CONFIG_PM_SLEEP 752 static void nvec_disable_i2c_slave(struct nvec_chip *nvec) 753 { 754 disable_irq(nvec->irq); 755 writel(I2C_SL_NEWSL | I2C_SL_NACK, nvec->base + I2C_SL_CNFG); 756 clk_disable_unprepare(nvec->i2c_clk); 757 } 758 #endif 759 760 static void nvec_power_off(void) 761 { 762 char ap_pwr_down[] = { NVEC_SLEEP, AP_PWR_DOWN }; 763 764 nvec_toggle_global_events(nvec_power_handle, false); 765 nvec_write_async(nvec_power_handle, ap_pwr_down, 2); 766 } 767 768 /* 769 * Parse common device tree data 770 */ 771 static int nvec_i2c_parse_dt_pdata(struct nvec_chip *nvec) 772 { 773 nvec->gpio = of_get_named_gpio(nvec->dev->of_node, "request-gpios", 0); 774 775 if (nvec->gpio < 0) { 776 dev_err(nvec->dev, "no gpio specified"); 777 return -ENODEV; 778 } 779 780 if (of_property_read_u32(nvec->dev->of_node, "slave-addr", 781 &nvec->i2c_addr)) { 782 dev_err(nvec->dev, "no i2c address specified"); 783 return -ENODEV; 784 } 785 786 return 0; 787 } 788 789 static int tegra_nvec_probe(struct platform_device *pdev) 790 { 791 int err, ret; 792 struct clk *i2c_clk; 793 struct nvec_chip *nvec; 794 struct nvec_msg *msg; 795 struct resource *res; 796 void __iomem *base; 797 char get_firmware_version[] = { NVEC_CNTL, GET_FIRMWARE_VERSION }, 798 unmute_speakers[] = { NVEC_OEM0, 0x10, 0x59, 0x95 }, 799 enable_event[7] = { NVEC_SYS, CNF_EVENT_REPORTING, true }; 800 801 if (!pdev->dev.of_node) { 802 dev_err(&pdev->dev, "must be instantiated using device tree\n"); 803 return -ENODEV; 804 } 805 806 nvec = devm_kzalloc(&pdev->dev, sizeof(struct nvec_chip), GFP_KERNEL); 807 if (!nvec) 808 return -ENOMEM; 809 810 platform_set_drvdata(pdev, nvec); 811 nvec->dev = &pdev->dev; 812 813 err = nvec_i2c_parse_dt_pdata(nvec); 814 if (err < 0) 815 return err; 816 817 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 818 base = devm_ioremap_resource(&pdev->dev, res); 819 if (IS_ERR(base)) 820 return PTR_ERR(base); 821 822 nvec->irq = platform_get_irq(pdev, 0); 823 if (nvec->irq < 0) { 824 dev_err(&pdev->dev, "no irq resource?\n"); 825 return -ENODEV; 826 } 827 828 i2c_clk = devm_clk_get(&pdev->dev, "div-clk"); 829 if (IS_ERR(i2c_clk)) { 830 dev_err(nvec->dev, "failed to get controller clock\n"); 831 return -ENODEV; 832 } 833 834 nvec->rst = devm_reset_control_get(&pdev->dev, "i2c"); 835 if (IS_ERR(nvec->rst)) { 836 dev_err(nvec->dev, "failed to get controller reset\n"); 837 return PTR_ERR(nvec->rst); 838 } 839 840 nvec->base = base; 841 nvec->i2c_clk = i2c_clk; 842 nvec->rx = &nvec->msg_pool[0]; 843 844 ATOMIC_INIT_NOTIFIER_HEAD(&nvec->notifier_list); 845 846 init_completion(&nvec->sync_write); 847 init_completion(&nvec->ec_transfer); 848 mutex_init(&nvec->sync_write_mutex); 849 spin_lock_init(&nvec->tx_lock); 850 spin_lock_init(&nvec->rx_lock); 851 INIT_LIST_HEAD(&nvec->rx_data); 852 INIT_LIST_HEAD(&nvec->tx_data); 853 INIT_WORK(&nvec->rx_work, nvec_dispatch); 854 INIT_WORK(&nvec->tx_work, nvec_request_master); 855 856 err = devm_gpio_request_one(&pdev->dev, nvec->gpio, GPIOF_OUT_INIT_HIGH, 857 "nvec gpio"); 858 if (err < 0) { 859 dev_err(nvec->dev, "couldn't request gpio\n"); 860 return -ENODEV; 861 } 862 863 err = devm_request_irq(&pdev->dev, nvec->irq, nvec_interrupt, 0, 864 "nvec", nvec); 865 if (err) { 866 dev_err(nvec->dev, "couldn't request irq\n"); 867 return -ENODEV; 868 } 869 disable_irq(nvec->irq); 870 871 tegra_init_i2c_slave(nvec); 872 873 /* enable event reporting */ 874 nvec_toggle_global_events(nvec, true); 875 876 nvec->nvec_status_notifier.notifier_call = nvec_status_notifier; 877 nvec_register_notifier(nvec, &nvec->nvec_status_notifier, 0); 878 879 nvec_power_handle = nvec; 880 pm_power_off = nvec_power_off; 881 882 /* Get Firmware Version */ 883 err = nvec_write_sync(nvec, get_firmware_version, 2, &msg); 884 885 if (!err) { 886 dev_warn(nvec->dev, 887 "ec firmware version %02x.%02x.%02x / %02x\n", 888 msg->data[4], msg->data[5], 889 msg->data[6], msg->data[7]); 890 891 nvec_msg_free(nvec, msg); 892 } 893 894 ret = mfd_add_devices(nvec->dev, 0, nvec_devices, 895 ARRAY_SIZE(nvec_devices), NULL, 0, NULL); 896 if (ret) 897 dev_err(nvec->dev, "error adding subdevices\n"); 898 899 /* unmute speakers? */ 900 nvec_write_async(nvec, unmute_speakers, 4); 901 902 /* enable lid switch event */ 903 nvec_event_mask(enable_event, LID_SWITCH); 904 nvec_write_async(nvec, enable_event, 7); 905 906 /* enable power button event */ 907 nvec_event_mask(enable_event, PWR_BUTTON); 908 nvec_write_async(nvec, enable_event, 7); 909 910 return 0; 911 } 912 913 static int tegra_nvec_remove(struct platform_device *pdev) 914 { 915 struct nvec_chip *nvec = platform_get_drvdata(pdev); 916 917 nvec_toggle_global_events(nvec, false); 918 mfd_remove_devices(nvec->dev); 919 nvec_unregister_notifier(nvec, &nvec->nvec_status_notifier); 920 cancel_work_sync(&nvec->rx_work); 921 cancel_work_sync(&nvec->tx_work); 922 /* FIXME: needs check whether nvec is responsible for power off */ 923 pm_power_off = NULL; 924 925 return 0; 926 } 927 928 #ifdef CONFIG_PM_SLEEP 929 static int nvec_suspend(struct device *dev) 930 { 931 int err; 932 struct platform_device *pdev = to_platform_device(dev); 933 struct nvec_chip *nvec = platform_get_drvdata(pdev); 934 struct nvec_msg *msg; 935 char ap_suspend[] = { NVEC_SLEEP, AP_SUSPEND }; 936 937 dev_dbg(nvec->dev, "suspending\n"); 938 939 /* keep these sync or you'll break suspend */ 940 nvec_toggle_global_events(nvec, false); 941 942 err = nvec_write_sync(nvec, ap_suspend, sizeof(ap_suspend), &msg); 943 if (!err) 944 nvec_msg_free(nvec, msg); 945 946 nvec_disable_i2c_slave(nvec); 947 948 return 0; 949 } 950 951 static int nvec_resume(struct device *dev) 952 { 953 struct platform_device *pdev = to_platform_device(dev); 954 struct nvec_chip *nvec = platform_get_drvdata(pdev); 955 956 dev_dbg(nvec->dev, "resuming\n"); 957 tegra_init_i2c_slave(nvec); 958 nvec_toggle_global_events(nvec, true); 959 960 return 0; 961 } 962 #endif 963 964 static SIMPLE_DEV_PM_OPS(nvec_pm_ops, nvec_suspend, nvec_resume); 965 966 /* Match table for of_platform binding */ 967 static const struct of_device_id nvidia_nvec_of_match[] = { 968 { .compatible = "nvidia,nvec", }, 969 {}, 970 }; 971 MODULE_DEVICE_TABLE(of, nvidia_nvec_of_match); 972 973 static struct platform_driver nvec_device_driver = { 974 .probe = tegra_nvec_probe, 975 .remove = tegra_nvec_remove, 976 .driver = { 977 .name = "nvec", 978 .pm = &nvec_pm_ops, 979 .of_match_table = nvidia_nvec_of_match, 980 } 981 }; 982 983 module_platform_driver(nvec_device_driver); 984 985 MODULE_ALIAS("platform:nvec"); 986 MODULE_DESCRIPTION("NVIDIA compliant embedded controller interface"); 987 MODULE_AUTHOR("Marc Dietrich <marvin24@gmx.de>"); 988 MODULE_LICENSE("GPL"); 989