xref: /openbmc/linux/drivers/staging/nvec/nvec.c (revision 12b5a55d)
1 /*
2  * NVEC: NVIDIA compliant embedded controller interface
3  *
4  * Copyright (C) 2011 The AC100 Kernel Team <ac100@lists.lauchpad.net>
5  *
6  * Authors:  Pierre-Hugues Husson <phhusson@free.fr>
7  *           Ilya Petrov <ilya.muromec@gmail.com>
8  *           Marc Dietrich <marvin24@gmx.de>
9  *           Julian Andres Klode <jak@jak-linux.org>
10  *
11  * This file is subject to the terms and conditions of the GNU General Public
12  * License.  See the file "COPYING" in the main directory of this archive
13  * for more details.
14  *
15  */
16 
17 /* #define DEBUG */
18 
19 #include <linux/kernel.h>
20 #include <linux/atomic.h>
21 #include <linux/clk.h>
22 #include <linux/completion.h>
23 #include <linux/delay.h>
24 #include <linux/err.h>
25 #include <linux/gpio.h>
26 #include <linux/interrupt.h>
27 #include <linux/io.h>
28 #include <linux/irq.h>
29 #include <linux/list.h>
30 #include <linux/mfd/core.h>
31 #include <linux/mutex.h>
32 #include <linux/notifier.h>
33 #include <linux/platform_device.h>
34 #include <linux/slab.h>
35 #include <linux/spinlock.h>
36 #include <linux/workqueue.h>
37 
38 #include <mach/clk.h>
39 #include <mach/iomap.h>
40 
41 #include "nvec.h"
42 
43 #define I2C_CNFG			0x00
44 #define I2C_CNFG_PACKET_MODE_EN		(1<<10)
45 #define I2C_CNFG_NEW_MASTER_SFM		(1<<11)
46 #define I2C_CNFG_DEBOUNCE_CNT_SHIFT	12
47 
48 #define I2C_SL_CNFG		0x20
49 #define I2C_SL_NEWL		(1<<2)
50 #define I2C_SL_NACK		(1<<1)
51 #define I2C_SL_RESP		(1<<0)
52 #define I2C_SL_IRQ		(1<<3)
53 #define END_TRANS		(1<<4)
54 #define RCVD			(1<<2)
55 #define RNW			(1<<1)
56 
57 #define I2C_SL_RCVD		0x24
58 #define I2C_SL_STATUS		0x28
59 #define I2C_SL_ADDR1		0x2c
60 #define I2C_SL_ADDR2		0x30
61 #define I2C_SL_DELAY_COUNT	0x3c
62 
63 /**
64  * enum nvec_msg_category - Message categories for nvec_msg_alloc()
65  * @NVEC_MSG_RX: The message is an incoming message (from EC)
66  * @NVEC_MSG_TX: The message is an outgoing message (to EC)
67  */
68 enum nvec_msg_category  {
69 	NVEC_MSG_RX,
70 	NVEC_MSG_TX,
71 };
72 
73 static const unsigned char EC_DISABLE_EVENT_REPORTING[3] = "\x04\x00\x00";
74 static const unsigned char EC_ENABLE_EVENT_REPORTING[3]  = "\x04\x00\x01";
75 static const unsigned char EC_GET_FIRMWARE_VERSION[2]    = "\x07\x15";
76 
77 static struct nvec_chip *nvec_power_handle;
78 
79 static struct mfd_cell nvec_devices[] = {
80 	{
81 		.name = "nvec-kbd",
82 		.id = 1,
83 	},
84 	{
85 		.name = "nvec-mouse",
86 		.id = 1,
87 	},
88 	{
89 		.name = "nvec-power",
90 		.id = 1,
91 	},
92 	{
93 		.name = "nvec-power",
94 		.id = 2,
95 	},
96 	{
97 		.name = "nvec-leds",
98 		.id = 1,
99 	},
100 };
101 
102 /**
103  * nvec_register_notifier - Register a notifier with nvec
104  * @nvec: A &struct nvec_chip
105  * @nb: The notifier block to register
106  *
107  * Registers a notifier with @nvec. The notifier will be added to an atomic
108  * notifier chain that is called for all received messages except those that
109  * correspond to a request initiated by nvec_write_sync().
110  */
111 int nvec_register_notifier(struct nvec_chip *nvec, struct notifier_block *nb,
112 			   unsigned int events)
113 {
114 	return atomic_notifier_chain_register(&nvec->notifier_list, nb);
115 }
116 EXPORT_SYMBOL_GPL(nvec_register_notifier);
117 
118 /**
119  * nvec_status_notifier - The final notifier
120  *
121  * Prints a message about control events not handled in the notifier
122  * chain.
123  */
124 static int nvec_status_notifier(struct notifier_block *nb,
125 				unsigned long event_type, void *data)
126 {
127 	unsigned char *msg = (unsigned char *)data;
128 
129 	if (event_type != NVEC_CNTL)
130 		return NOTIFY_DONE;
131 
132 	printk(KERN_WARNING "unhandled msg type %ld\n", event_type);
133 	print_hex_dump(KERN_WARNING, "payload: ", DUMP_PREFIX_NONE, 16, 1,
134 		msg, msg[1] + 2, true);
135 
136 	return NOTIFY_OK;
137 }
138 
139 /**
140  * nvec_msg_alloc:
141  * @nvec: A &struct nvec_chip
142  * @category: Pool category, see &enum nvec_msg_category
143  *
144  * Allocate a single &struct nvec_msg object from the message pool of
145  * @nvec. The result shall be passed to nvec_msg_free() if no longer
146  * used.
147  *
148  * Outgoing messages are placed in the upper 75% of the pool, keeping the
149  * lower 25% available for RX buffers only. The reason is to prevent a
150  * situation where all buffers are full and a message is thus endlessly
151  * retried because the response could never be processed.
152  */
153 static struct nvec_msg *nvec_msg_alloc(struct nvec_chip *nvec,
154 				       enum nvec_msg_category category)
155 {
156 	int i = (category == NVEC_MSG_TX) ? (NVEC_POOL_SIZE / 4) : 0;
157 
158 	for (; i < NVEC_POOL_SIZE; i++) {
159 		if (atomic_xchg(&nvec->msg_pool[i].used, 1) == 0) {
160 			dev_vdbg(nvec->dev, "INFO: Allocate %i\n", i);
161 			return &nvec->msg_pool[i];
162 		}
163 	}
164 
165 	dev_err(nvec->dev, "could not allocate %s buffer\n",
166 		(category == NVEC_MSG_TX) ? "TX" : "RX");
167 
168 	return NULL;
169 }
170 
171 /**
172  * nvec_msg_free:
173  * @nvec: A &struct nvec_chip
174  * @msg:  A message (must be allocated by nvec_msg_alloc() and belong to @nvec)
175  *
176  * Free the given message
177  */
178 inline void nvec_msg_free(struct nvec_chip *nvec, struct nvec_msg *msg)
179 {
180 	if (msg != &nvec->tx_scratch)
181 		dev_vdbg(nvec->dev, "INFO: Free %ti\n", msg - nvec->msg_pool);
182 	atomic_set(&msg->used, 0);
183 }
184 EXPORT_SYMBOL_GPL(nvec_msg_free);
185 
186 /**
187  * nvec_msg_is_event - Return %true if @msg is an event
188  * @msg: A message
189  */
190 static bool nvec_msg_is_event(struct nvec_msg *msg)
191 {
192 	return msg->data[0] >> 7;
193 }
194 
195 /**
196  * nvec_msg_size - Get the size of a message
197  * @msg: The message to get the size for
198  *
199  * This only works for received messages, not for outgoing messages.
200  */
201 static size_t nvec_msg_size(struct nvec_msg *msg)
202 {
203 	bool is_event = nvec_msg_is_event(msg);
204 	int event_length = (msg->data[0] & 0x60) >> 5;
205 
206 	/* for variable size, payload size in byte 1 + count (1) + cmd (1) */
207 	if (!is_event || event_length == NVEC_VAR_SIZE)
208 		return (msg->pos || msg->size) ? (msg->data[1] + 2) : 0;
209 	else if (event_length == NVEC_2BYTES)
210 		return 2;
211 	else if (event_length == NVEC_3BYTES)
212 		return 3;
213 	else
214 		return 0;
215 }
216 
217 /**
218  * nvec_gpio_set_value - Set the GPIO value
219  * @nvec: A &struct nvec_chip
220  * @value: The value to write (0 or 1)
221  *
222  * Like gpio_set_value(), but generating debugging information
223  */
224 static void nvec_gpio_set_value(struct nvec_chip *nvec, int value)
225 {
226 	dev_dbg(nvec->dev, "GPIO changed from %u to %u\n",
227 		gpio_get_value(nvec->gpio), value);
228 	gpio_set_value(nvec->gpio, value);
229 }
230 
231 /**
232  * nvec_write_async - Asynchronously write a message to NVEC
233  * @nvec: An nvec_chip instance
234  * @data: The message data, starting with the request type
235  * @size: The size of @data
236  *
237  * Queue a single message to be transferred to the embedded controller
238  * and return immediately.
239  *
240  * Returns: 0 on success, a negative error code on failure. If a failure
241  * occured, the nvec driver may print an error.
242  */
243 int nvec_write_async(struct nvec_chip *nvec, const unsigned char *data,
244 			short size)
245 {
246 	struct nvec_msg *msg;
247 	unsigned long flags;
248 
249 	msg = nvec_msg_alloc(nvec, NVEC_MSG_TX);
250 
251 	if (msg == NULL)
252 		return -ENOMEM;
253 
254 	msg->data[0] = size;
255 	memcpy(msg->data + 1, data, size);
256 	msg->size = size + 1;
257 
258 	spin_lock_irqsave(&nvec->tx_lock, flags);
259 	list_add_tail(&msg->node, &nvec->tx_data);
260 	spin_unlock_irqrestore(&nvec->tx_lock, flags);
261 
262 	queue_work(nvec->wq, &nvec->tx_work);
263 
264 	return 0;
265 }
266 EXPORT_SYMBOL(nvec_write_async);
267 
268 /**
269  * nvec_write_sync - Write a message to nvec and read the response
270  * @nvec: An &struct nvec_chip
271  * @data: The data to write
272  * @size: The size of @data
273  *
274  * This is similar to nvec_write_async(), but waits for the
275  * request to be answered before returning. This function
276  * uses a mutex and can thus not be called from e.g.
277  * interrupt handlers.
278  *
279  * Returns: A pointer to the response message on success,
280  * %NULL on failure. Free with nvec_msg_free() once no longer
281  * used.
282  */
283 struct nvec_msg *nvec_write_sync(struct nvec_chip *nvec,
284 		const unsigned char *data, short size)
285 {
286 	struct nvec_msg *msg;
287 
288 	mutex_lock(&nvec->sync_write_mutex);
289 
290 	nvec->sync_write_pending = (data[1] << 8) + data[0];
291 
292 	if (nvec_write_async(nvec, data, size) < 0)
293 		return NULL;
294 
295 	dev_dbg(nvec->dev, "nvec_sync_write: 0x%04x\n",
296 					nvec->sync_write_pending);
297 	if (!(wait_for_completion_timeout(&nvec->sync_write,
298 				msecs_to_jiffies(2000)))) {
299 		dev_warn(nvec->dev, "timeout waiting for sync write to complete\n");
300 		mutex_unlock(&nvec->sync_write_mutex);
301 		return NULL;
302 	}
303 
304 	dev_dbg(nvec->dev, "nvec_sync_write: pong!\n");
305 
306 	msg = nvec->last_sync_msg;
307 
308 	mutex_unlock(&nvec->sync_write_mutex);
309 
310 	return msg;
311 }
312 EXPORT_SYMBOL(nvec_write_sync);
313 
314 /**
315  * nvec_request_master - Process outgoing messages
316  * @work: A &struct work_struct (the tx_worker member of &struct nvec_chip)
317  *
318  * Processes all outgoing requests by sending the request and awaiting the
319  * response, then continuing with the next request. Once a request has a
320  * matching response, it will be freed and removed from the list.
321  */
322 static void nvec_request_master(struct work_struct *work)
323 {
324 	struct nvec_chip *nvec = container_of(work, struct nvec_chip, tx_work);
325 	unsigned long flags;
326 	long err;
327 	struct nvec_msg *msg;
328 
329 	spin_lock_irqsave(&nvec->tx_lock, flags);
330 	while (!list_empty(&nvec->tx_data)) {
331 		msg = list_first_entry(&nvec->tx_data, struct nvec_msg, node);
332 		spin_unlock_irqrestore(&nvec->tx_lock, flags);
333 		nvec_gpio_set_value(nvec, 0);
334 		err = wait_for_completion_interruptible_timeout(
335 				&nvec->ec_transfer, msecs_to_jiffies(5000));
336 
337 		if (err == 0) {
338 			dev_warn(nvec->dev, "timeout waiting for ec transfer\n");
339 			nvec_gpio_set_value(nvec, 1);
340 			msg->pos = 0;
341 		}
342 
343 		spin_lock_irqsave(&nvec->tx_lock, flags);
344 
345 		if (err > 0) {
346 			list_del_init(&msg->node);
347 			nvec_msg_free(nvec, msg);
348 		}
349 	}
350 	spin_unlock_irqrestore(&nvec->tx_lock, flags);
351 }
352 
353 /**
354  * parse_msg - Print some information and call the notifiers on an RX message
355  * @nvec: A &struct nvec_chip
356  * @msg: A message received by @nvec
357  *
358  * Paarse some pieces of the message and then call the chain of notifiers
359  * registered via nvec_register_notifier.
360  */
361 static int parse_msg(struct nvec_chip *nvec, struct nvec_msg *msg)
362 {
363 	if ((msg->data[0] & 1 << 7) == 0 && msg->data[3]) {
364 		dev_err(nvec->dev, "ec responded %02x %02x %02x %02x\n",
365 			msg->data[0], msg->data[1], msg->data[2], msg->data[3]);
366 		return -EINVAL;
367 	}
368 
369 	if ((msg->data[0] >> 7) == 1 && (msg->data[0] & 0x0f) == 5)
370 		print_hex_dump(KERN_WARNING, "ec system event ",
371 				DUMP_PREFIX_NONE, 16, 1, msg->data,
372 				msg->data[1] + 2, true);
373 
374 	atomic_notifier_call_chain(&nvec->notifier_list, msg->data[0] & 0x8f,
375 				   msg->data);
376 
377 	return 0;
378 }
379 
380 /**
381  * nvec_dispatch - Process messages received from the EC
382  * @work: A &struct work_struct (the tx_worker member of &struct nvec_chip)
383  *
384  * Process messages previously received from the EC and put into the RX
385  * queue of the &struct nvec_chip instance associated with @work.
386  */
387 static void nvec_dispatch(struct work_struct *work)
388 {
389 	struct nvec_chip *nvec = container_of(work, struct nvec_chip, rx_work);
390 	unsigned long flags;
391 	struct nvec_msg *msg;
392 
393 	spin_lock_irqsave(&nvec->rx_lock, flags);
394 	while (!list_empty(&nvec->rx_data)) {
395 		msg = list_first_entry(&nvec->rx_data, struct nvec_msg, node);
396 		list_del_init(&msg->node);
397 		spin_unlock_irqrestore(&nvec->rx_lock, flags);
398 
399 		if (nvec->sync_write_pending ==
400 		      (msg->data[2] << 8) + msg->data[0]) {
401 			dev_dbg(nvec->dev, "sync write completed!\n");
402 			nvec->sync_write_pending = 0;
403 			nvec->last_sync_msg = msg;
404 			complete(&nvec->sync_write);
405 		} else {
406 			parse_msg(nvec, msg);
407 			nvec_msg_free(nvec, msg);
408 		}
409 		spin_lock_irqsave(&nvec->rx_lock, flags);
410 	}
411 	spin_unlock_irqrestore(&nvec->rx_lock, flags);
412 }
413 
414 /**
415  * nvec_tx_completed - Complete the current transfer
416  * @nvec: A &struct nvec_chip
417  *
418  * This is called when we have received an END_TRANS on a TX transfer.
419  */
420 static void nvec_tx_completed(struct nvec_chip *nvec)
421 {
422 	/* We got an END_TRANS, let's skip this, maybe there's an event */
423 	if (nvec->tx->pos != nvec->tx->size) {
424 		dev_err(nvec->dev, "premature END_TRANS, resending\n");
425 		nvec->tx->pos = 0;
426 		nvec_gpio_set_value(nvec, 0);
427 	} else {
428 		nvec->state = 0;
429 	}
430 }
431 
432 /**
433  * nvec_rx_completed - Complete the current transfer
434  * @nvec: A &struct nvec_chip
435  *
436  * This is called when we have received an END_TRANS on a RX transfer.
437  */
438 static void nvec_rx_completed(struct nvec_chip *nvec)
439 {
440 	if (nvec->rx->pos != nvec_msg_size(nvec->rx)) {
441 		dev_err(nvec->dev, "RX incomplete: Expected %u bytes, got %u\n",
442 			   (uint) nvec_msg_size(nvec->rx),
443 			   (uint) nvec->rx->pos);
444 
445 		nvec_msg_free(nvec, nvec->rx);
446 		nvec->state = 0;
447 
448 		/* Battery quirk - Often incomplete, and likes to crash */
449 		if (nvec->rx->data[0] == NVEC_BAT)
450 			complete(&nvec->ec_transfer);
451 
452 		return;
453 	}
454 
455 	spin_lock(&nvec->rx_lock);
456 
457 	/* add the received data to the work list
458 	   and move the ring buffer pointer to the next entry */
459 	list_add_tail(&nvec->rx->node, &nvec->rx_data);
460 
461 	spin_unlock(&nvec->rx_lock);
462 
463 	nvec->state = 0;
464 
465 	if (!nvec_msg_is_event(nvec->rx))
466 		complete(&nvec->ec_transfer);
467 
468 	queue_work(nvec->wq, &nvec->rx_work);
469 }
470 
471 /**
472  * nvec_invalid_flags - Send an error message about invalid flags and jump
473  * @nvec: The nvec device
474  * @status: The status flags
475  * @reset: Whether we shall jump to state 0.
476  */
477 static void nvec_invalid_flags(struct nvec_chip *nvec, unsigned int status,
478 			       bool reset)
479 {
480 	dev_err(nvec->dev, "unexpected status flags 0x%02x during state %i\n",
481 		status, nvec->state);
482 	if (reset)
483 		nvec->state = 0;
484 }
485 
486 /**
487  * nvec_tx_set - Set the message to transfer (nvec->tx)
488  * @nvec: A &struct nvec_chip
489  *
490  * Gets the first entry from the tx_data list of @nvec and sets the
491  * tx member to it. If the tx_data list is empty, this uses the
492  * tx_scratch message to send a no operation message.
493  */
494 static void nvec_tx_set(struct nvec_chip *nvec)
495 {
496 	spin_lock(&nvec->tx_lock);
497 	if (list_empty(&nvec->tx_data)) {
498 		dev_err(nvec->dev, "empty tx - sending no-op\n");
499 		memcpy(nvec->tx_scratch.data, "\x02\x07\x02", 3);
500 		nvec->tx_scratch.size = 3;
501 		nvec->tx_scratch.pos = 0;
502 		nvec->tx = &nvec->tx_scratch;
503 		list_add_tail(&nvec->tx->node, &nvec->tx_data);
504 	} else {
505 		nvec->tx = list_first_entry(&nvec->tx_data, struct nvec_msg,
506 					    node);
507 		nvec->tx->pos = 0;
508 	}
509 	spin_unlock(&nvec->tx_lock);
510 
511 	dev_dbg(nvec->dev, "Sending message of length %u, command 0x%x\n",
512 		(uint)nvec->tx->size, nvec->tx->data[1]);
513 }
514 
515 /**
516  * nvec_interrupt - Interrupt handler
517  * @irq: The IRQ
518  * @dev: The nvec device
519  *
520  * Interrupt handler that fills our RX buffers and empties our TX
521  * buffers. This uses a finite state machine with ridiculous amounts
522  * of error checking, in order to be fairly reliable.
523  */
524 static irqreturn_t nvec_interrupt(int irq, void *dev)
525 {
526 	unsigned long status;
527 	unsigned int received = 0;
528 	unsigned char to_send = 0xff;
529 	const unsigned long irq_mask = I2C_SL_IRQ | END_TRANS | RCVD | RNW;
530 	struct nvec_chip *nvec = dev;
531 	unsigned int state = nvec->state;
532 
533 	status = readl(nvec->base + I2C_SL_STATUS);
534 
535 	/* Filter out some errors */
536 	if ((status & irq_mask) == 0 && (status & ~irq_mask) != 0) {
537 		dev_err(nvec->dev, "unexpected irq mask %lx\n", status);
538 		return IRQ_HANDLED;
539 	}
540 	if ((status & I2C_SL_IRQ) == 0) {
541 		dev_err(nvec->dev, "Spurious IRQ\n");
542 		return IRQ_HANDLED;
543 	}
544 
545 	/* The EC did not request a read, so it send us something, read it */
546 	if ((status & RNW) == 0) {
547 		received = readl(nvec->base + I2C_SL_RCVD);
548 		if (status & RCVD)
549 			writel(0, nvec->base + I2C_SL_RCVD);
550 	}
551 
552 	if (status == (I2C_SL_IRQ | RCVD))
553 		nvec->state = 0;
554 
555 	switch (nvec->state) {
556 	case 0:		/* Verify that its a transfer start, the rest later */
557 		if (status != (I2C_SL_IRQ | RCVD))
558 			nvec_invalid_flags(nvec, status, false);
559 		break;
560 	case 1:		/* command byte */
561 		if (status != I2C_SL_IRQ) {
562 			nvec_invalid_flags(nvec, status, true);
563 		} else {
564 			nvec->rx = nvec_msg_alloc(nvec, NVEC_MSG_RX);
565 			/* Should not happen in a normal world */
566 			if (unlikely(nvec->rx == NULL)) {
567 				nvec->state = 0;
568 				break;
569 			}
570 			nvec->rx->data[0] = received;
571 			nvec->rx->pos = 1;
572 			nvec->state = 2;
573 		}
574 		break;
575 	case 2:		/* first byte after command */
576 		if (status == (I2C_SL_IRQ | RNW | RCVD)) {
577 			udelay(33);
578 			if (nvec->rx->data[0] != 0x01) {
579 				dev_err(nvec->dev,
580 					"Read without prior read command\n");
581 				nvec->state = 0;
582 				break;
583 			}
584 			nvec_msg_free(nvec, nvec->rx);
585 			nvec->state = 3;
586 			nvec_tx_set(nvec);
587 			BUG_ON(nvec->tx->size < 1);
588 			to_send = nvec->tx->data[0];
589 			nvec->tx->pos = 1;
590 		} else if (status == (I2C_SL_IRQ)) {
591 			BUG_ON(nvec->rx == NULL);
592 			nvec->rx->data[1] = received;
593 			nvec->rx->pos = 2;
594 			nvec->state = 4;
595 		} else {
596 			nvec_invalid_flags(nvec, status, true);
597 		}
598 		break;
599 	case 3:		/* EC does a block read, we transmit data */
600 		if (status & END_TRANS) {
601 			nvec_tx_completed(nvec);
602 		} else if ((status & RNW) == 0 || (status & RCVD)) {
603 			nvec_invalid_flags(nvec, status, true);
604 		} else if (nvec->tx && nvec->tx->pos < nvec->tx->size) {
605 			to_send = nvec->tx->data[nvec->tx->pos++];
606 		} else {
607 			dev_err(nvec->dev, "tx buffer underflow on %p (%u > %u)\n",
608 				nvec->tx,
609 				(uint) (nvec->tx ? nvec->tx->pos : 0),
610 				(uint) (nvec->tx ? nvec->tx->size : 0));
611 			nvec->state = 0;
612 		}
613 		break;
614 	case 4:		/* EC does some write, we read the data */
615 		if ((status & (END_TRANS | RNW)) == END_TRANS)
616 			nvec_rx_completed(nvec);
617 		else if (status & (RNW | RCVD))
618 			nvec_invalid_flags(nvec, status, true);
619 		else if (nvec->rx && nvec->rx->pos < NVEC_MSG_SIZE)
620 			nvec->rx->data[nvec->rx->pos++] = received;
621 		else
622 			dev_err(nvec->dev,
623 				"RX buffer overflow on %p: "
624 				"Trying to write byte %u of %u\n",
625 				nvec->rx, nvec->rx->pos, NVEC_MSG_SIZE);
626 		break;
627 	default:
628 		nvec->state = 0;
629 	}
630 
631 	/* If we are told that a new transfer starts, verify it */
632 	if ((status & (RCVD | RNW)) == RCVD) {
633 		if (received != nvec->i2c_addr)
634 			dev_err(nvec->dev,
635 			"received address 0x%02x, expected 0x%02x\n",
636 			received, nvec->i2c_addr);
637 		nvec->state = 1;
638 	}
639 
640 	/* Send data if requested, but not on end of transmission */
641 	if ((status & (RNW | END_TRANS)) == RNW)
642 		writel(to_send, nvec->base + I2C_SL_RCVD);
643 
644 	/* If we have send the first byte */
645 	if (status == (I2C_SL_IRQ | RNW | RCVD))
646 		nvec_gpio_set_value(nvec, 1);
647 
648 	dev_dbg(nvec->dev,
649 		"Handled: %s 0x%02x, %s 0x%02x in state %u [%s%s%s]\n",
650 		(status & RNW) == 0 ? "received" : "R=",
651 		received,
652 		(status & (RNW | END_TRANS)) ? "sent" : "S=",
653 		to_send,
654 		state,
655 		status & END_TRANS ? " END_TRANS" : "",
656 		status & RCVD ? " RCVD" : "",
657 		status & RNW ? " RNW" : "");
658 
659 	return IRQ_HANDLED;
660 }
661 
662 static void tegra_init_i2c_slave(struct nvec_chip *nvec)
663 {
664 	u32 val;
665 
666 	clk_enable(nvec->i2c_clk);
667 
668 	tegra_periph_reset_assert(nvec->i2c_clk);
669 	udelay(2);
670 	tegra_periph_reset_deassert(nvec->i2c_clk);
671 
672 	val = I2C_CNFG_NEW_MASTER_SFM | I2C_CNFG_PACKET_MODE_EN |
673 	    (0x2 << I2C_CNFG_DEBOUNCE_CNT_SHIFT);
674 	writel(val, nvec->base + I2C_CNFG);
675 
676 	clk_set_rate(nvec->i2c_clk, 8 * 80000);
677 
678 	writel(I2C_SL_NEWL, nvec->base + I2C_SL_CNFG);
679 	writel(0x1E, nvec->base + I2C_SL_DELAY_COUNT);
680 
681 	writel(nvec->i2c_addr>>1, nvec->base + I2C_SL_ADDR1);
682 	writel(0, nvec->base + I2C_SL_ADDR2);
683 
684 	enable_irq(nvec->irq);
685 
686 	clk_disable(nvec->i2c_clk);
687 }
688 
689 static void nvec_disable_i2c_slave(struct nvec_chip *nvec)
690 {
691 	disable_irq(nvec->irq);
692 	writel(I2C_SL_NEWL | I2C_SL_NACK, nvec->base + I2C_SL_CNFG);
693 	clk_disable(nvec->i2c_clk);
694 }
695 
696 static void nvec_power_off(void)
697 {
698 	nvec_write_async(nvec_power_handle, EC_DISABLE_EVENT_REPORTING, 3);
699 	nvec_write_async(nvec_power_handle, "\x04\x01", 2);
700 }
701 
702 static int __devinit tegra_nvec_probe(struct platform_device *pdev)
703 {
704 	int err, ret;
705 	struct clk *i2c_clk;
706 	struct nvec_platform_data *pdata = pdev->dev.platform_data;
707 	struct nvec_chip *nvec;
708 	struct nvec_msg *msg;
709 	struct resource *res;
710 	struct resource *iomem;
711 	void __iomem *base;
712 
713 	nvec = kzalloc(sizeof(struct nvec_chip), GFP_KERNEL);
714 	if (nvec == NULL) {
715 		dev_err(&pdev->dev, "failed to reserve memory\n");
716 		return -ENOMEM;
717 	}
718 	platform_set_drvdata(pdev, nvec);
719 	nvec->dev = &pdev->dev;
720 	nvec->gpio = pdata->gpio;
721 	nvec->i2c_addr = pdata->i2c_addr;
722 
723 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
724 	if (!res) {
725 		dev_err(&pdev->dev, "no mem resource?\n");
726 		return -ENODEV;
727 	}
728 
729 	iomem = request_mem_region(res->start, resource_size(res), pdev->name);
730 	if (!iomem) {
731 		dev_err(&pdev->dev, "I2C region already claimed\n");
732 		return -EBUSY;
733 	}
734 
735 	base = ioremap(iomem->start, resource_size(iomem));
736 	if (!base) {
737 		dev_err(&pdev->dev, "Can't ioremap I2C region\n");
738 		return -ENOMEM;
739 	}
740 
741 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
742 	if (!res) {
743 		dev_err(&pdev->dev, "no irq resource?\n");
744 		ret = -ENODEV;
745 		goto err_iounmap;
746 	}
747 
748 	i2c_clk = clk_get_sys("tegra-i2c.2", NULL);
749 	if (IS_ERR(i2c_clk)) {
750 		dev_err(nvec->dev, "failed to get controller clock\n");
751 		goto err_iounmap;
752 	}
753 
754 	nvec->base = base;
755 	nvec->irq = res->start;
756 	nvec->i2c_clk = i2c_clk;
757 	nvec->rx = &nvec->msg_pool[0];
758 
759 	/* Set the gpio to low when we've got something to say */
760 	err = gpio_request(nvec->gpio, "nvec gpio");
761 	if (err < 0)
762 		dev_err(nvec->dev, "couldn't request gpio\n");
763 
764 	ATOMIC_INIT_NOTIFIER_HEAD(&nvec->notifier_list);
765 
766 	init_completion(&nvec->sync_write);
767 	init_completion(&nvec->ec_transfer);
768 	mutex_init(&nvec->sync_write_mutex);
769 	spin_lock_init(&nvec->tx_lock);
770 	spin_lock_init(&nvec->rx_lock);
771 	INIT_LIST_HEAD(&nvec->rx_data);
772 	INIT_LIST_HEAD(&nvec->tx_data);
773 	INIT_WORK(&nvec->rx_work, nvec_dispatch);
774 	INIT_WORK(&nvec->tx_work, nvec_request_master);
775 	nvec->wq = alloc_workqueue("nvec", WQ_NON_REENTRANT, 2);
776 
777 	err = request_irq(nvec->irq, nvec_interrupt, 0, "nvec", nvec);
778 	if (err) {
779 		dev_err(nvec->dev, "couldn't request irq\n");
780 		goto failed;
781 	}
782 	disable_irq(nvec->irq);
783 
784 	tegra_init_i2c_slave(nvec);
785 
786 	clk_enable(i2c_clk);
787 
788 	gpio_direction_output(nvec->gpio, 1);
789 	gpio_set_value(nvec->gpio, 1);
790 
791 	/* enable event reporting */
792 	nvec_write_async(nvec, EC_ENABLE_EVENT_REPORTING,
793 			 sizeof(EC_ENABLE_EVENT_REPORTING));
794 
795 	nvec->nvec_status_notifier.notifier_call = nvec_status_notifier;
796 	nvec_register_notifier(nvec, &nvec->nvec_status_notifier, 0);
797 
798 	nvec_power_handle = nvec;
799 	pm_power_off = nvec_power_off;
800 
801 	/* Get Firmware Version */
802 	msg = nvec_write_sync(nvec, EC_GET_FIRMWARE_VERSION,
803 		sizeof(EC_GET_FIRMWARE_VERSION));
804 
805 	if (msg) {
806 		dev_warn(nvec->dev, "ec firmware version %02x.%02x.%02x / %02x\n",
807 			msg->data[4], msg->data[5], msg->data[6], msg->data[7]);
808 
809 		nvec_msg_free(nvec, msg);
810 	}
811 
812 	ret = mfd_add_devices(nvec->dev, -1, nvec_devices,
813 			      ARRAY_SIZE(nvec_devices), base, 0);
814 	if (ret)
815 		dev_err(nvec->dev, "error adding subdevices\n");
816 
817 	/* unmute speakers? */
818 	nvec_write_async(nvec, "\x0d\x10\x59\x95", 4);
819 
820 	/* enable lid switch event */
821 	nvec_write_async(nvec, "\x01\x01\x01\x00\x00\x02\x00", 7);
822 
823 	/* enable power button event */
824 	nvec_write_async(nvec, "\x01\x01\x01\x00\x00\x80\x00", 7);
825 
826 	return 0;
827 
828 err_iounmap:
829 	iounmap(base);
830 failed:
831 	kfree(nvec);
832 	return -ENOMEM;
833 }
834 
835 static int __devexit tegra_nvec_remove(struct platform_device *pdev)
836 {
837 	struct nvec_chip *nvec = platform_get_drvdata(pdev);
838 
839 	nvec_write_async(nvec, EC_DISABLE_EVENT_REPORTING, 3);
840 	mfd_remove_devices(nvec->dev);
841 	free_irq(nvec->irq, &nvec_interrupt);
842 	iounmap(nvec->base);
843 	gpio_free(nvec->gpio);
844 	destroy_workqueue(nvec->wq);
845 	kfree(nvec);
846 
847 	return 0;
848 }
849 
850 #ifdef CONFIG_PM
851 
852 static int tegra_nvec_suspend(struct platform_device *pdev, pm_message_t state)
853 {
854 	struct nvec_chip *nvec = platform_get_drvdata(pdev);
855 
856 	dev_dbg(nvec->dev, "suspending\n");
857 	nvec_write_async(nvec, EC_DISABLE_EVENT_REPORTING, 3);
858 	nvec_write_async(nvec, "\x04\x02", 2);
859 	nvec_disable_i2c_slave(nvec);
860 
861 	return 0;
862 }
863 
864 static int tegra_nvec_resume(struct platform_device *pdev)
865 {
866 	struct nvec_chip *nvec = platform_get_drvdata(pdev);
867 
868 	dev_dbg(nvec->dev, "resuming\n");
869 	tegra_init_i2c_slave(nvec);
870 	nvec_write_async(nvec, EC_ENABLE_EVENT_REPORTING, 3);
871 
872 	return 0;
873 }
874 
875 #else
876 #define tegra_nvec_suspend NULL
877 #define tegra_nvec_resume NULL
878 #endif
879 
880 static struct platform_driver nvec_device_driver = {
881 	.probe   = tegra_nvec_probe,
882 	.remove  = __devexit_p(tegra_nvec_remove),
883 	.suspend = tegra_nvec_suspend,
884 	.resume  = tegra_nvec_resume,
885 	.driver  = {
886 		.name = "nvec",
887 		.owner = THIS_MODULE,
888 	}
889 };
890 
891 static int __init tegra_nvec_init(void)
892 {
893 	return platform_driver_register(&nvec_device_driver);
894 }
895 
896 module_init(tegra_nvec_init);
897 
898 MODULE_ALIAS("platform:nvec");
899 MODULE_DESCRIPTION("NVIDIA compliant embedded controller interface");
900 MODULE_AUTHOR("Marc Dietrich <marvin24@gmx.de>");
901 MODULE_LICENSE("GPL");
902