xref: /openbmc/linux/drivers/staging/nvec/nvec.c (revision bb0590e2)
1 /*
2  * NVEC: NVIDIA compliant embedded controller interface
3  *
4  * Copyright (C) 2011 The AC100 Kernel Team <ac100@lists.lauchpad.net>
5  *
6  * Authors:  Pierre-Hugues Husson <phhusson@free.fr>
7  *           Ilya Petrov <ilya.muromec@gmail.com>
8  *           Marc Dietrich <marvin24@gmx.de>
9  *           Julian Andres Klode <jak@jak-linux.org>
10  *
11  * This file is subject to the terms and conditions of the GNU General Public
12  * License.  See the file "COPYING" in the main directory of this archive
13  * for more details.
14  *
15  */
16 
17 /* #define DEBUG */
18 
19 #include <asm/irq.h>
20 
21 #include <linux/atomic.h>
22 #include <linux/completion.h>
23 #include <linux/interrupt.h>
24 #include <linux/io.h>
25 #include <linux/irq.h>
26 #include <linux/slab.h>
27 #include <linux/gpio.h>
28 #include <linux/serio.h>
29 #include <linux/delay.h>
30 #include <linux/input.h>
31 #include <linux/workqueue.h>
32 #include <linux/clk.h>
33 
34 #include <linux/semaphore.h>
35 #include <linux/list.h>
36 #include <linux/notifier.h>
37 #include <linux/platform_device.h>
38 #include <linux/mfd/core.h>
39 
40 #include <mach/iomap.h>
41 #include <mach/clk.h>
42 
43 #include "nvec.h"
44 
45 #define I2C_CNFG			0x00
46 #define I2C_CNFG_PACKET_MODE_EN		(1<<10)
47 #define I2C_CNFG_NEW_MASTER_SFM		(1<<11)
48 #define I2C_CNFG_DEBOUNCE_CNT_SHIFT	12
49 
50 #define I2C_SL_CNFG		0x20
51 #define I2C_SL_NEWL		(1<<2)
52 #define I2C_SL_NACK		(1<<1)
53 #define I2C_SL_RESP		(1<<0)
54 #define I2C_SL_IRQ		(1<<3)
55 #define END_TRANS		(1<<4)
56 #define RCVD			(1<<2)
57 #define RNW			(1<<1)
58 
59 #define I2C_SL_RCVD		0x24
60 #define I2C_SL_STATUS		0x28
61 #define I2C_SL_ADDR1		0x2c
62 #define I2C_SL_ADDR2		0x30
63 #define I2C_SL_DELAY_COUNT	0x3c
64 
65 /**
66  * enum nvec_msg_category - Message categories for nvec_msg_alloc()
67  * @NVEC_MSG_RX: The message is an incoming message (from EC)
68  * @NVEC_MSG_TX: The message is an outgoing message (to EC)
69  */
70 enum nvec_msg_category  {
71 	NVEC_MSG_RX,
72 	NVEC_MSG_TX,
73 };
74 
75 static const unsigned char EC_DISABLE_EVENT_REPORTING[3] = "\x04\x00\x00";
76 static const unsigned char EC_ENABLE_EVENT_REPORTING[3]  = "\x04\x00\x01";
77 static const unsigned char EC_GET_FIRMWARE_VERSION[2]    = "\x07\x15";
78 
79 static struct nvec_chip *nvec_power_handle;
80 
81 static struct mfd_cell nvec_devices[] = {
82 	{
83 		.name = "nvec-kbd",
84 		.id = 1,
85 	},
86 	{
87 		.name = "nvec-mouse",
88 		.id = 1,
89 	},
90 	{
91 		.name = "nvec-power",
92 		.id = 1,
93 	},
94 	{
95 		.name = "nvec-power",
96 		.id = 2,
97 	},
98 	{
99 		.name = "nvec-leds",
100 		.id = 1,
101 	},
102 };
103 
104 /**
105  * nvec_register_notifier - Register a notifier with nvec
106  * @nvec: A &struct nvec_chip
107  * @nb: The notifier block to register
108  *
109  * Registers a notifier with @nvec. The notifier will be added to an atomic
110  * notifier chain that is called for all received messages except those that
111  * correspond to a request initiated by nvec_write_sync().
112  */
113 int nvec_register_notifier(struct nvec_chip *nvec, struct notifier_block *nb,
114 			   unsigned int events)
115 {
116 	return atomic_notifier_chain_register(&nvec->notifier_list, nb);
117 }
118 EXPORT_SYMBOL_GPL(nvec_register_notifier);
119 
120 /**
121  * nvec_status_notifier - The final notifier
122  *
123  * Prints a message about control events not handled in the notifier
124  * chain.
125  */
126 static int nvec_status_notifier(struct notifier_block *nb,
127 				unsigned long event_type, void *data)
128 {
129 	unsigned char *msg = (unsigned char *)data;
130 
131 	if (event_type != NVEC_CNTL)
132 		return NOTIFY_DONE;
133 
134 	printk(KERN_WARNING "unhandled msg type %ld\n", event_type);
135 	print_hex_dump(KERN_WARNING, "payload: ", DUMP_PREFIX_NONE, 16, 1,
136 		msg, msg[1] + 2, true);
137 
138 	return NOTIFY_OK;
139 }
140 
141 /**
142  * nvec_msg_alloc:
143  * @nvec: A &struct nvec_chip
144  * @category: Pool category, see &enum nvec_msg_category
145  *
146  * Allocate a single &struct nvec_msg object from the message pool of
147  * @nvec. The result shall be passed to nvec_msg_free() if no longer
148  * used.
149  *
150  * Outgoing messages are placed in the upper 75% of the pool, keeping the
151  * lower 25% available for RX buffers only. The reason is to prevent a
152  * situation where all buffers are full and a message is thus endlessly
153  * retried because the response could never be processed.
154  */
155 static struct nvec_msg *nvec_msg_alloc(struct nvec_chip *nvec,
156 				       enum nvec_msg_category category)
157 {
158 	int i = (category == NVEC_MSG_TX) ? (NVEC_POOL_SIZE / 4) : 0;
159 
160 	for (; i < NVEC_POOL_SIZE; i++) {
161 		if (atomic_xchg(&nvec->msg_pool[i].used, 1) == 0) {
162 			dev_vdbg(nvec->dev, "INFO: Allocate %i\n", i);
163 			return &nvec->msg_pool[i];
164 		}
165 	}
166 
167 	dev_err(nvec->dev, "could not allocate %s buffer\n",
168 		(category == NVEC_MSG_TX) ? "TX" : "RX");
169 
170 	return NULL;
171 }
172 
173 /**
174  * nvec_msg_free:
175  * @nvec: A &struct nvec_chip
176  * @msg:  A message (must be allocated by nvec_msg_alloc() and belong to @nvec)
177  *
178  * Free the given message
179  */
180 inline void nvec_msg_free(struct nvec_chip *nvec, struct nvec_msg *msg)
181 {
182 	if (msg != &nvec->tx_scratch)
183 		dev_vdbg(nvec->dev, "INFO: Free %ti\n", msg - nvec->msg_pool);
184 	atomic_set(&msg->used, 0);
185 }
186 EXPORT_SYMBOL_GPL(nvec_msg_free);
187 
188 /**
189  * nvec_msg_is_event - Return %true if @msg is an event
190  * @msg: A message
191  */
192 static bool nvec_msg_is_event(struct nvec_msg *msg)
193 {
194 	return msg->data[0] >> 7;
195 }
196 
197 /**
198  * nvec_msg_size - Get the size of a message
199  * @msg: The message to get the size for
200  *
201  * This only works for received messages, not for outgoing messages.
202  */
203 static size_t nvec_msg_size(struct nvec_msg *msg)
204 {
205 	bool is_event = nvec_msg_is_event(msg);
206 	int event_length = (msg->data[0] & 0x60) >> 5;
207 
208 	/* for variable size, payload size in byte 1 + count (1) + cmd (1) */
209 	if (!is_event || event_length == NVEC_VAR_SIZE)
210 		return (msg->pos || msg->size) ? (msg->data[1] + 2) : 0;
211 	else if (event_length == NVEC_2BYTES)
212 		return 2;
213 	else if (event_length == NVEC_3BYTES)
214 		return 3;
215 	else
216 		return 0;
217 }
218 
219 /**
220  * nvec_gpio_set_value - Set the GPIO value
221  * @nvec: A &struct nvec_chip
222  * @value: The value to write (0 or 1)
223  *
224  * Like gpio_set_value(), but generating debugging information
225  */
226 static void nvec_gpio_set_value(struct nvec_chip *nvec, int value)
227 {
228 	dev_dbg(nvec->dev, "GPIO changed from %u to %u\n",
229 		gpio_get_value(nvec->gpio), value);
230 	gpio_set_value(nvec->gpio, value);
231 }
232 
233 /**
234  * nvec_write_async - Asynchronously write a message to NVEC
235  * @nvec: An nvec_chip instance
236  * @data: The message data, starting with the request type
237  * @size: The size of @data
238  *
239  * Queue a single message to be transferred to the embedded controller
240  * and return immediately.
241  *
242  * Returns: 0 on success, a negative error code on failure. If a failure
243  * occured, the nvec driver may print an error.
244  */
245 int nvec_write_async(struct nvec_chip *nvec, const unsigned char *data,
246 			short size)
247 {
248 	struct nvec_msg *msg;
249 	unsigned long flags;
250 
251 	msg = nvec_msg_alloc(nvec, NVEC_MSG_TX);
252 
253 	if (msg == NULL)
254 		return -ENOMEM;
255 
256 	msg->data[0] = size;
257 	memcpy(msg->data + 1, data, size);
258 	msg->size = size + 1;
259 
260 	spin_lock_irqsave(&nvec->tx_lock, flags);
261 	list_add_tail(&msg->node, &nvec->tx_data);
262 	spin_unlock_irqrestore(&nvec->tx_lock, flags);
263 
264 	queue_work(nvec->wq, &nvec->tx_work);
265 
266 	return 0;
267 }
268 EXPORT_SYMBOL(nvec_write_async);
269 
270 /**
271  * nvec_write_sync - Write a message to nvec and read the response
272  * @nvec: An &struct nvec_chip
273  * @data: The data to write
274  * @size: The size of @data
275  *
276  * This is similar to nvec_write_async(), but waits for the
277  * request to be answered before returning. This function
278  * uses a mutex and can thus not be called from e.g.
279  * interrupt handlers.
280  *
281  * Returns: A pointer to the response message on success,
282  * %NULL on failure. Free with nvec_msg_free() once no longer
283  * used.
284  */
285 struct nvec_msg *nvec_write_sync(struct nvec_chip *nvec,
286 		const unsigned char *data, short size)
287 {
288 	struct nvec_msg *msg;
289 
290 	mutex_lock(&nvec->sync_write_mutex);
291 
292 	nvec->sync_write_pending = (data[1] << 8) + data[0];
293 
294 	if (nvec_write_async(nvec, data, size) < 0)
295 		return NULL;
296 
297 	dev_dbg(nvec->dev, "nvec_sync_write: 0x%04x\n",
298 					nvec->sync_write_pending);
299 	if (!(wait_for_completion_timeout(&nvec->sync_write,
300 				msecs_to_jiffies(2000)))) {
301 		dev_warn(nvec->dev, "timeout waiting for sync write to complete\n");
302 		mutex_unlock(&nvec->sync_write_mutex);
303 		return NULL;
304 	}
305 
306 	dev_dbg(nvec->dev, "nvec_sync_write: pong!\n");
307 
308 	msg = nvec->last_sync_msg;
309 
310 	mutex_unlock(&nvec->sync_write_mutex);
311 
312 	return msg;
313 }
314 EXPORT_SYMBOL(nvec_write_sync);
315 
316 /**
317  * nvec_request_master - Process outgoing messages
318  * @work: A &struct work_struct (the tx_worker member of &struct nvec_chip)
319  *
320  * Processes all outgoing requests by sending the request and awaiting the
321  * response, then continuing with the next request. Once a request has a
322  * matching response, it will be freed and removed from the list.
323  */
324 static void nvec_request_master(struct work_struct *work)
325 {
326 	struct nvec_chip *nvec = container_of(work, struct nvec_chip, tx_work);
327 	unsigned long flags;
328 	long err;
329 	struct nvec_msg *msg;
330 
331 	spin_lock_irqsave(&nvec->tx_lock, flags);
332 	while (!list_empty(&nvec->tx_data)) {
333 		msg = list_first_entry(&nvec->tx_data, struct nvec_msg, node);
334 		spin_unlock_irqrestore(&nvec->tx_lock, flags);
335 		nvec_gpio_set_value(nvec, 0);
336 		err = wait_for_completion_interruptible_timeout(
337 				&nvec->ec_transfer, msecs_to_jiffies(5000));
338 
339 		if (err == 0) {
340 			dev_warn(nvec->dev, "timeout waiting for ec transfer\n");
341 			nvec_gpio_set_value(nvec, 1);
342 			msg->pos = 0;
343 		}
344 
345 		spin_lock_irqsave(&nvec->tx_lock, flags);
346 
347 		if (err > 0) {
348 			list_del_init(&msg->node);
349 			nvec_msg_free(nvec, msg);
350 		}
351 	}
352 	spin_unlock_irqrestore(&nvec->tx_lock, flags);
353 }
354 
355 /**
356  * parse_msg - Print some information and call the notifiers on an RX message
357  * @nvec: A &struct nvec_chip
358  * @msg: A message received by @nvec
359  *
360  * Paarse some pieces of the message and then call the chain of notifiers
361  * registered via nvec_register_notifier.
362  */
363 static int parse_msg(struct nvec_chip *nvec, struct nvec_msg *msg)
364 {
365 	if ((msg->data[0] & 1 << 7) == 0 && msg->data[3]) {
366 		dev_err(nvec->dev, "ec responded %02x %02x %02x %02x\n",
367 			msg->data[0], msg->data[1], msg->data[2], msg->data[3]);
368 		return -EINVAL;
369 	}
370 
371 	if ((msg->data[0] >> 7) == 1 && (msg->data[0] & 0x0f) == 5)
372 		print_hex_dump(KERN_WARNING, "ec system event ",
373 				DUMP_PREFIX_NONE, 16, 1, msg->data,
374 				msg->data[1] + 2, true);
375 
376 	atomic_notifier_call_chain(&nvec->notifier_list, msg->data[0] & 0x8f,
377 				   msg->data);
378 
379 	return 0;
380 }
381 
382 /**
383  * nvec_dispatch - Process messages received from the EC
384  * @work: A &struct work_struct (the tx_worker member of &struct nvec_chip)
385  *
386  * Process messages previously received from the EC and put into the RX
387  * queue of the &struct nvec_chip instance associated with @work.
388  */
389 static void nvec_dispatch(struct work_struct *work)
390 {
391 	struct nvec_chip *nvec = container_of(work, struct nvec_chip, rx_work);
392 	unsigned long flags;
393 	struct nvec_msg *msg;
394 
395 	spin_lock_irqsave(&nvec->rx_lock, flags);
396 	while (!list_empty(&nvec->rx_data)) {
397 		msg = list_first_entry(&nvec->rx_data, struct nvec_msg, node);
398 		list_del_init(&msg->node);
399 		spin_unlock_irqrestore(&nvec->rx_lock, flags);
400 
401 		if (nvec->sync_write_pending ==
402 		      (msg->data[2] << 8) + msg->data[0]) {
403 			dev_dbg(nvec->dev, "sync write completed!\n");
404 			nvec->sync_write_pending = 0;
405 			nvec->last_sync_msg = msg;
406 			complete(&nvec->sync_write);
407 		} else {
408 			parse_msg(nvec, msg);
409 			nvec_msg_free(nvec, msg);
410 		}
411 		spin_lock_irqsave(&nvec->rx_lock, flags);
412 	}
413 	spin_unlock_irqrestore(&nvec->rx_lock, flags);
414 }
415 
416 /**
417  * nvec_tx_completed - Complete the current transfer
418  * @nvec: A &struct nvec_chip
419  *
420  * This is called when we have received an END_TRANS on a TX transfer.
421  */
422 static void nvec_tx_completed(struct nvec_chip *nvec)
423 {
424 	/* We got an END_TRANS, let's skip this, maybe there's an event */
425 	if (nvec->tx->pos != nvec->tx->size) {
426 		dev_err(nvec->dev, "premature END_TRANS, resending\n");
427 		nvec->tx->pos = 0;
428 		nvec_gpio_set_value(nvec, 0);
429 	} else {
430 		nvec->state = 0;
431 	}
432 }
433 
434 /**
435  * nvec_rx_completed - Complete the current transfer
436  * @nvec: A &struct nvec_chip
437  *
438  * This is called when we have received an END_TRANS on a RX transfer.
439  */
440 static void nvec_rx_completed(struct nvec_chip *nvec)
441 {
442 	if (nvec->rx->pos != nvec_msg_size(nvec->rx))
443 		dev_err(nvec->dev, "RX incomplete: Expected %u bytes, got %u\n",
444 			   (uint) nvec_msg_size(nvec->rx),
445 			   (uint) nvec->rx->pos);
446 
447 	spin_lock(&nvec->rx_lock);
448 
449 	/* add the received data to the work list
450 	   and move the ring buffer pointer to the next entry */
451 	list_add_tail(&nvec->rx->node, &nvec->rx_data);
452 
453 	spin_unlock(&nvec->rx_lock);
454 
455 	nvec->state = 0;
456 
457 	if (!nvec_msg_is_event(nvec->rx))
458 		complete(&nvec->ec_transfer);
459 
460 	queue_work(nvec->wq, &nvec->rx_work);
461 }
462 
463 /**
464  * nvec_invalid_flags - Send an error message about invalid flags and jump
465  * @nvec: The nvec device
466  * @status: The status flags
467  * @reset: Whether we shall jump to state 0.
468  */
469 static void nvec_invalid_flags(struct nvec_chip *nvec, unsigned int status,
470 			       bool reset)
471 {
472 	dev_err(nvec->dev, "unexpected status flags 0x%02x during state %i\n",
473 		status, nvec->state);
474 	if (reset)
475 		nvec->state = 0;
476 }
477 
478 /**
479  * nvec_tx_set - Set the message to transfer (nvec->tx)
480  * @nvec: A &struct nvec_chip
481  *
482  * Gets the first entry from the tx_data list of @nvec and sets the
483  * tx member to it. If the tx_data list is empty, this uses the
484  * tx_scratch message to send a no operation message.
485  */
486 static void nvec_tx_set(struct nvec_chip *nvec)
487 {
488 	spin_lock(&nvec->tx_lock);
489 	if (list_empty(&nvec->tx_data)) {
490 		dev_err(nvec->dev, "empty tx - sending no-op\n");
491 		memcpy(nvec->tx_scratch.data, "\x02\x07\x02", 3);
492 		nvec->tx_scratch.size = 3;
493 		nvec->tx_scratch.pos = 0;
494 		nvec->tx = &nvec->tx_scratch;
495 		list_add_tail(&nvec->tx->node, &nvec->tx_data);
496 	} else {
497 		nvec->tx = list_first_entry(&nvec->tx_data, struct nvec_msg,
498 					    node);
499 		nvec->tx->pos = 0;
500 	}
501 	spin_unlock(&nvec->tx_lock);
502 
503 	dev_dbg(nvec->dev, "Sending message of length %u, command 0x%x\n",
504 		(uint)nvec->tx->size, nvec->tx->data[1]);
505 }
506 
507 /**
508  * nvec_interrupt - Interrupt handler
509  * @irq: The IRQ
510  * @dev: The nvec device
511  *
512  * Interrupt handler that fills our RX buffers and empties our TX
513  * buffers. This uses a finite state machine with ridiculous amounts
514  * of error checking, in order to be fairly reliable.
515  */
516 static irqreturn_t nvec_interrupt(int irq, void *dev)
517 {
518 	unsigned long status;
519 	unsigned int received = 0;
520 	unsigned char to_send = 0xff;
521 	const unsigned long irq_mask = I2C_SL_IRQ | END_TRANS | RCVD | RNW;
522 	struct nvec_chip *nvec = dev;
523 	unsigned int state = nvec->state;
524 
525 	status = readl(nvec->base + I2C_SL_STATUS);
526 
527 	/* Filter out some errors */
528 	if ((status & irq_mask) == 0 && (status & ~irq_mask) != 0) {
529 		dev_err(nvec->dev, "unexpected irq mask %lx\n", status);
530 		return IRQ_HANDLED;
531 	}
532 	if ((status & I2C_SL_IRQ) == 0) {
533 		dev_err(nvec->dev, "Spurious IRQ\n");
534 		return IRQ_HANDLED;
535 	}
536 
537 	/* The EC did not request a read, so it send us something, read it */
538 	if ((status & RNW) == 0) {
539 		received = readl(nvec->base + I2C_SL_RCVD);
540 		if (status & RCVD)
541 			writel(0, nvec->base + I2C_SL_RCVD);
542 	}
543 
544 	if (status == (I2C_SL_IRQ | RCVD))
545 		nvec->state = 0;
546 
547 	switch (nvec->state) {
548 	case 0:		/* Verify that its a transfer start, the rest later */
549 		if (status != (I2C_SL_IRQ | RCVD))
550 			nvec_invalid_flags(nvec, status, false);
551 		break;
552 	case 1:		/* command byte */
553 		if (status != I2C_SL_IRQ) {
554 			nvec_invalid_flags(nvec, status, true);
555 		} else {
556 			nvec->rx = nvec_msg_alloc(nvec, NVEC_MSG_RX);
557 			nvec->rx->data[0] = received;
558 			nvec->rx->pos = 1;
559 			nvec->state = 2;
560 		}
561 		break;
562 	case 2:		/* first byte after command */
563 		if (status == (I2C_SL_IRQ | RNW | RCVD)) {
564 			udelay(33);
565 			if (nvec->rx->data[0] != 0x01) {
566 				dev_err(nvec->dev,
567 					"Read without prior read command\n");
568 				nvec->state = 0;
569 				break;
570 			}
571 			nvec_msg_free(nvec, nvec->rx);
572 			nvec->state = 3;
573 			nvec_tx_set(nvec);
574 			BUG_ON(nvec->tx->size < 1);
575 			to_send = nvec->tx->data[0];
576 			nvec->tx->pos = 1;
577 		} else if (status == (I2C_SL_IRQ)) {
578 			BUG_ON(nvec->rx == NULL);
579 			nvec->rx->data[1] = received;
580 			nvec->rx->pos = 2;
581 			nvec->state = 4;
582 		} else {
583 			nvec_invalid_flags(nvec, status, true);
584 		}
585 		break;
586 	case 3:		/* EC does a block read, we transmit data */
587 		if (status & END_TRANS) {
588 			nvec_tx_completed(nvec);
589 		} else if ((status & RNW) == 0 || (status & RCVD)) {
590 			nvec_invalid_flags(nvec, status, true);
591 		} else if (nvec->tx && nvec->tx->pos < nvec->tx->size) {
592 			to_send = nvec->tx->data[nvec->tx->pos++];
593 		} else {
594 			dev_err(nvec->dev, "tx buffer underflow on %p (%u > %u)\n",
595 				nvec->tx,
596 				(uint) (nvec->tx ? nvec->tx->pos : 0),
597 				(uint) (nvec->tx ? nvec->tx->size : 0));
598 			nvec->state = 0;
599 		}
600 		break;
601 	case 4:		/* EC does some write, we read the data */
602 		if ((status & (END_TRANS | RNW)) == END_TRANS)
603 			nvec_rx_completed(nvec);
604 		else if (status & (RNW | RCVD))
605 			nvec_invalid_flags(nvec, status, true);
606 		else if (nvec->rx && nvec->rx->pos < NVEC_MSG_SIZE)
607 			nvec->rx->data[nvec->rx->pos++] = received;
608 		else
609 			dev_err(nvec->dev,
610 				"RX buffer overflow on %p: "
611 				"Trying to write byte %u of %u\n",
612 				nvec->rx, nvec->rx->pos, NVEC_MSG_SIZE);
613 		break;
614 	default:
615 		nvec->state = 0;
616 	}
617 
618 	/* If we are told that a new transfer starts, verify it */
619 	if ((status & (RCVD | RNW)) == RCVD) {
620 		if (received != nvec->i2c_addr)
621 			dev_err(nvec->dev,
622 			"received address 0x%02x, expected 0x%02x\n",
623 			received, nvec->i2c_addr);
624 		nvec->state = 1;
625 	}
626 
627 	/* Send data if requested, but not on end of transmission */
628 	if ((status & (RNW | END_TRANS)) == RNW)
629 		writel(to_send, nvec->base + I2C_SL_RCVD);
630 
631 	/* If we have send the first byte */
632 	if (status == (I2C_SL_IRQ | RNW | RCVD))
633 		nvec_gpio_set_value(nvec, 1);
634 
635 	dev_dbg(nvec->dev,
636 		"Handled: %s 0x%02x, %s 0x%02x in state %u [%s%s%s]\n",
637 		(status & RNW) == 0 ? "received" : "R=",
638 		received,
639 		(status & (RNW | END_TRANS)) ? "sent" : "S=",
640 		to_send,
641 		state,
642 		status & END_TRANS ? " END_TRANS" : "",
643 		status & RCVD ? " RCVD" : "",
644 		status & RNW ? " RNW" : "");
645 
646 	return IRQ_HANDLED;
647 }
648 
649 static void tegra_init_i2c_slave(struct nvec_chip *nvec)
650 {
651 	u32 val;
652 
653 	clk_enable(nvec->i2c_clk);
654 
655 	tegra_periph_reset_assert(nvec->i2c_clk);
656 	udelay(2);
657 	tegra_periph_reset_deassert(nvec->i2c_clk);
658 
659 	val = I2C_CNFG_NEW_MASTER_SFM | I2C_CNFG_PACKET_MODE_EN |
660 	    (0x2 << I2C_CNFG_DEBOUNCE_CNT_SHIFT);
661 	writel(val, nvec->base + I2C_CNFG);
662 
663 	clk_set_rate(nvec->i2c_clk, 8 * 80000);
664 
665 	writel(I2C_SL_NEWL, nvec->base + I2C_SL_CNFG);
666 	writel(0x1E, nvec->base + I2C_SL_DELAY_COUNT);
667 
668 	writel(nvec->i2c_addr>>1, nvec->base + I2C_SL_ADDR1);
669 	writel(0, nvec->base + I2C_SL_ADDR2);
670 
671 	enable_irq(nvec->irq);
672 
673 	clk_disable(nvec->i2c_clk);
674 }
675 
676 static void nvec_disable_i2c_slave(struct nvec_chip *nvec)
677 {
678 	disable_irq(nvec->irq);
679 	writel(I2C_SL_NEWL | I2C_SL_NACK, nvec->base + I2C_SL_CNFG);
680 	clk_disable(nvec->i2c_clk);
681 }
682 
683 static void nvec_power_off(void)
684 {
685 	nvec_write_async(nvec_power_handle, EC_DISABLE_EVENT_REPORTING, 3);
686 	nvec_write_async(nvec_power_handle, "\x04\x01", 2);
687 }
688 
689 static int __devinit tegra_nvec_probe(struct platform_device *pdev)
690 {
691 	int err, ret;
692 	struct clk *i2c_clk;
693 	struct nvec_platform_data *pdata = pdev->dev.platform_data;
694 	struct nvec_chip *nvec;
695 	struct nvec_msg *msg;
696 	struct resource *res;
697 	struct resource *iomem;
698 	void __iomem *base;
699 
700 	nvec = kzalloc(sizeof(struct nvec_chip), GFP_KERNEL);
701 	if (nvec == NULL) {
702 		dev_err(&pdev->dev, "failed to reserve memory\n");
703 		return -ENOMEM;
704 	}
705 	platform_set_drvdata(pdev, nvec);
706 	nvec->dev = &pdev->dev;
707 	nvec->gpio = pdata->gpio;
708 	nvec->i2c_addr = pdata->i2c_addr;
709 
710 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
711 	if (!res) {
712 		dev_err(&pdev->dev, "no mem resource?\n");
713 		return -ENODEV;
714 	}
715 
716 	iomem = request_mem_region(res->start, resource_size(res), pdev->name);
717 	if (!iomem) {
718 		dev_err(&pdev->dev, "I2C region already claimed\n");
719 		return -EBUSY;
720 	}
721 
722 	base = ioremap(iomem->start, resource_size(iomem));
723 	if (!base) {
724 		dev_err(&pdev->dev, "Can't ioremap I2C region\n");
725 		return -ENOMEM;
726 	}
727 
728 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
729 	if (!res) {
730 		dev_err(&pdev->dev, "no irq resource?\n");
731 		ret = -ENODEV;
732 		goto err_iounmap;
733 	}
734 
735 	i2c_clk = clk_get_sys("tegra-i2c.2", NULL);
736 	if (IS_ERR(i2c_clk)) {
737 		dev_err(nvec->dev, "failed to get controller clock\n");
738 		goto err_iounmap;
739 	}
740 
741 	nvec->base = base;
742 	nvec->irq = res->start;
743 	nvec->i2c_clk = i2c_clk;
744 	nvec->rx = &nvec->msg_pool[0];
745 
746 	/* Set the gpio to low when we've got something to say */
747 	err = gpio_request(nvec->gpio, "nvec gpio");
748 	if (err < 0)
749 		dev_err(nvec->dev, "couldn't request gpio\n");
750 
751 	ATOMIC_INIT_NOTIFIER_HEAD(&nvec->notifier_list);
752 
753 	init_completion(&nvec->sync_write);
754 	init_completion(&nvec->ec_transfer);
755 	mutex_init(&nvec->sync_write_mutex);
756 	spin_lock_init(&nvec->tx_lock);
757 	spin_lock_init(&nvec->rx_lock);
758 	INIT_LIST_HEAD(&nvec->rx_data);
759 	INIT_LIST_HEAD(&nvec->tx_data);
760 	INIT_WORK(&nvec->rx_work, nvec_dispatch);
761 	INIT_WORK(&nvec->tx_work, nvec_request_master);
762 	nvec->wq = alloc_workqueue("nvec", WQ_NON_REENTRANT, 2);
763 
764 	err = request_irq(nvec->irq, nvec_interrupt, 0, "nvec", nvec);
765 	if (err) {
766 		dev_err(nvec->dev, "couldn't request irq\n");
767 		goto failed;
768 	}
769 	disable_irq(nvec->irq);
770 
771 	tegra_init_i2c_slave(nvec);
772 
773 	clk_enable(i2c_clk);
774 
775 	gpio_direction_output(nvec->gpio, 1);
776 	gpio_set_value(nvec->gpio, 1);
777 
778 	/* enable event reporting */
779 	nvec_write_async(nvec, EC_ENABLE_EVENT_REPORTING,
780 			 sizeof(EC_ENABLE_EVENT_REPORTING));
781 
782 	nvec->nvec_status_notifier.notifier_call = nvec_status_notifier;
783 	nvec_register_notifier(nvec, &nvec->nvec_status_notifier, 0);
784 
785 	nvec_power_handle = nvec;
786 	pm_power_off = nvec_power_off;
787 
788 	/* Get Firmware Version */
789 	msg = nvec_write_sync(nvec, EC_GET_FIRMWARE_VERSION,
790 		sizeof(EC_GET_FIRMWARE_VERSION));
791 
792 	if (msg) {
793 		dev_warn(nvec->dev, "ec firmware version %02x.%02x.%02x / %02x\n",
794 			msg->data[4], msg->data[5], msg->data[6], msg->data[7]);
795 
796 		nvec_msg_free(nvec, msg);
797 	}
798 
799 	ret = mfd_add_devices(nvec->dev, -1, nvec_devices,
800 			      ARRAY_SIZE(nvec_devices), base, 0);
801 	if (ret)
802 		dev_err(nvec->dev, "error adding subdevices\n");
803 
804 	/* unmute speakers? */
805 	nvec_write_async(nvec, "\x0d\x10\x59\x95", 4);
806 
807 	/* enable lid switch event */
808 	nvec_write_async(nvec, "\x01\x01\x01\x00\x00\x02\x00", 7);
809 
810 	/* enable power button event */
811 	nvec_write_async(nvec, "\x01\x01\x01\x00\x00\x80\x00", 7);
812 
813 	return 0;
814 
815 err_iounmap:
816 	iounmap(base);
817 failed:
818 	kfree(nvec);
819 	return -ENOMEM;
820 }
821 
822 static int __devexit tegra_nvec_remove(struct platform_device *pdev)
823 {
824 	struct nvec_chip *nvec = platform_get_drvdata(pdev);
825 
826 	nvec_write_async(nvec, EC_DISABLE_EVENT_REPORTING, 3);
827 	mfd_remove_devices(nvec->dev);
828 	free_irq(nvec->irq, &nvec_interrupt);
829 	iounmap(nvec->base);
830 	gpio_free(nvec->gpio);
831 	destroy_workqueue(nvec->wq);
832 	kfree(nvec);
833 
834 	return 0;
835 }
836 
837 #ifdef CONFIG_PM
838 
839 static int tegra_nvec_suspend(struct platform_device *pdev, pm_message_t state)
840 {
841 	struct nvec_chip *nvec = platform_get_drvdata(pdev);
842 
843 	dev_dbg(nvec->dev, "suspending\n");
844 	nvec_write_async(nvec, EC_DISABLE_EVENT_REPORTING, 3);
845 	nvec_write_async(nvec, "\x04\x02", 2);
846 	nvec_disable_i2c_slave(nvec);
847 
848 	return 0;
849 }
850 
851 static int tegra_nvec_resume(struct platform_device *pdev)
852 {
853 	struct nvec_chip *nvec = platform_get_drvdata(pdev);
854 
855 	dev_dbg(nvec->dev, "resuming\n");
856 	tegra_init_i2c_slave(nvec);
857 	nvec_write_async(nvec, EC_ENABLE_EVENT_REPORTING, 3);
858 
859 	return 0;
860 }
861 
862 #else
863 #define tegra_nvec_suspend NULL
864 #define tegra_nvec_resume NULL
865 #endif
866 
867 static struct platform_driver nvec_device_driver = {
868 	.probe   = tegra_nvec_probe,
869 	.remove  = __devexit_p(tegra_nvec_remove),
870 	.suspend = tegra_nvec_suspend,
871 	.resume  = tegra_nvec_resume,
872 	.driver  = {
873 		.name = "nvec",
874 		.owner = THIS_MODULE,
875 	}
876 };
877 
878 static int __init tegra_nvec_init(void)
879 {
880 	return platform_driver_register(&nvec_device_driver);
881 }
882 
883 module_init(tegra_nvec_init);
884 
885 MODULE_ALIAS("platform:nvec");
886 MODULE_DESCRIPTION("NVIDIA compliant embedded controller interface");
887 MODULE_AUTHOR("Marc Dietrich <marvin24@gmx.de>");
888 MODULE_LICENSE("GPL");
889