xref: /openbmc/linux/drivers/staging/nvec/nvec.c (revision b77f2767)
1 /*
2  * NVEC: NVIDIA compliant embedded controller interface
3  *
4  * Copyright (C) 2011 The AC100 Kernel Team <ac100@lists.lauchpad.net>
5  *
6  * Authors:  Pierre-Hugues Husson <phhusson@free.fr>
7  *           Ilya Petrov <ilya.muromec@gmail.com>
8  *           Marc Dietrich <marvin24@gmx.de>
9  *           Julian Andres Klode <jak@jak-linux.org>
10  *
11  * This file is subject to the terms and conditions of the GNU General Public
12  * License.  See the file "COPYING" in the main directory of this archive
13  * for more details.
14  *
15  */
16 
17 /* #define DEBUG */
18 
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/atomic.h>
22 #include <linux/clk.h>
23 #include <linux/completion.h>
24 #include <linux/delay.h>
25 #include <linux/err.h>
26 #include <linux/gpio.h>
27 #include <linux/interrupt.h>
28 #include <linux/io.h>
29 #include <linux/irq.h>
30 #include <linux/of.h>
31 #include <linux/of_gpio.h>
32 #include <linux/list.h>
33 #include <linux/mfd/core.h>
34 #include <linux/mutex.h>
35 #include <linux/notifier.h>
36 #include <linux/slab.h>
37 #include <linux/spinlock.h>
38 #include <linux/workqueue.h>
39 
40 #include "nvec.h"
41 
42 #define I2C_CNFG			0x00
43 #define I2C_CNFG_PACKET_MODE_EN		(1<<10)
44 #define I2C_CNFG_NEW_MASTER_SFM		(1<<11)
45 #define I2C_CNFG_DEBOUNCE_CNT_SHIFT	12
46 
47 #define I2C_SL_CNFG		0x20
48 #define I2C_SL_NEWSL		(1<<2)
49 #define I2C_SL_NACK		(1<<1)
50 #define I2C_SL_RESP		(1<<0)
51 #define I2C_SL_IRQ		(1<<3)
52 #define END_TRANS		(1<<4)
53 #define RCVD			(1<<2)
54 #define RNW			(1<<1)
55 
56 #define I2C_SL_RCVD		0x24
57 #define I2C_SL_STATUS		0x28
58 #define I2C_SL_ADDR1		0x2c
59 #define I2C_SL_ADDR2		0x30
60 #define I2C_SL_DELAY_COUNT	0x3c
61 
62 /**
63  * enum nvec_msg_category - Message categories for nvec_msg_alloc()
64  * @NVEC_MSG_RX: The message is an incoming message (from EC)
65  * @NVEC_MSG_TX: The message is an outgoing message (to EC)
66  */
67 enum nvec_msg_category  {
68 	NVEC_MSG_RX,
69 	NVEC_MSG_TX,
70 };
71 
72 enum nvec_sleep_subcmds {
73 	GLOBAL_EVENTS,
74 	AP_PWR_DOWN,
75 	AP_SUSPEND,
76 };
77 
78 #define CNF_EVENT_REPORTING 0x01
79 #define GET_FIRMWARE_VERSION 0x15
80 #define LID_SWITCH BIT(1)
81 #define PWR_BUTTON BIT(15)
82 
83 static struct nvec_chip *nvec_power_handle;
84 
85 static const struct mfd_cell nvec_devices[] = {
86 	{
87 		.name = "nvec-kbd",
88 		.id = 1,
89 	},
90 	{
91 		.name = "nvec-mouse",
92 		.id = 1,
93 	},
94 	{
95 		.name = "nvec-power",
96 		.id = 1,
97 	},
98 	{
99 		.name = "nvec-power",
100 		.id = 2,
101 	},
102 	{
103 		.name = "nvec-paz00",
104 		.id = 1,
105 	},
106 };
107 
108 /**
109  * nvec_register_notifier - Register a notifier with nvec
110  * @nvec: A &struct nvec_chip
111  * @nb: The notifier block to register
112  *
113  * Registers a notifier with @nvec. The notifier will be added to an atomic
114  * notifier chain that is called for all received messages except those that
115  * correspond to a request initiated by nvec_write_sync().
116  */
117 int nvec_register_notifier(struct nvec_chip *nvec, struct notifier_block *nb,
118 			   unsigned int events)
119 {
120 	return atomic_notifier_chain_register(&nvec->notifier_list, nb);
121 }
122 EXPORT_SYMBOL_GPL(nvec_register_notifier);
123 
124 /**
125  * nvec_unregister_notifier - Unregister a notifier with nvec
126  * @nvec: A &struct nvec_chip
127  * @nb: The notifier block to unregister
128  *
129  * Unregisters a notifier with @nvec. The notifier will be removed from the
130  * atomic notifier chain.
131  */
132 int nvec_unregister_notifier(struct nvec_chip *nvec, struct notifier_block *nb)
133 {
134 	return atomic_notifier_chain_unregister(&nvec->notifier_list, nb);
135 }
136 EXPORT_SYMBOL_GPL(nvec_unregister_notifier);
137 
138 /**
139  * nvec_status_notifier - The final notifier
140  *
141  * Prints a message about control events not handled in the notifier
142  * chain.
143  */
144 static int nvec_status_notifier(struct notifier_block *nb,
145 				unsigned long event_type, void *data)
146 {
147 	struct nvec_chip *nvec = container_of(nb, struct nvec_chip,
148 						nvec_status_notifier);
149 	unsigned char *msg = (unsigned char *)data;
150 
151 	if (event_type != NVEC_CNTL)
152 		return NOTIFY_DONE;
153 
154 	dev_warn(nvec->dev, "unhandled msg type %ld\n", event_type);
155 	print_hex_dump(KERN_WARNING, "payload: ", DUMP_PREFIX_NONE, 16, 1,
156 		msg, msg[1] + 2, true);
157 
158 	return NOTIFY_OK;
159 }
160 
161 /**
162  * nvec_msg_alloc:
163  * @nvec: A &struct nvec_chip
164  * @category: Pool category, see &enum nvec_msg_category
165  *
166  * Allocate a single &struct nvec_msg object from the message pool of
167  * @nvec. The result shall be passed to nvec_msg_free() if no longer
168  * used.
169  *
170  * Outgoing messages are placed in the upper 75% of the pool, keeping the
171  * lower 25% available for RX buffers only. The reason is to prevent a
172  * situation where all buffers are full and a message is thus endlessly
173  * retried because the response could never be processed.
174  */
175 static struct nvec_msg *nvec_msg_alloc(struct nvec_chip *nvec,
176 				       enum nvec_msg_category category)
177 {
178 	int i = (category == NVEC_MSG_TX) ? (NVEC_POOL_SIZE / 4) : 0;
179 
180 	for (; i < NVEC_POOL_SIZE; i++) {
181 		if (atomic_xchg(&nvec->msg_pool[i].used, 1) == 0) {
182 			dev_vdbg(nvec->dev, "INFO: Allocate %i\n", i);
183 			return &nvec->msg_pool[i];
184 		}
185 	}
186 
187 	dev_err(nvec->dev, "could not allocate %s buffer\n",
188 		(category == NVEC_MSG_TX) ? "TX" : "RX");
189 
190 	return NULL;
191 }
192 
193 /**
194  * nvec_msg_free:
195  * @nvec: A &struct nvec_chip
196  * @msg:  A message (must be allocated by nvec_msg_alloc() and belong to @nvec)
197  *
198  * Free the given message
199  */
200 void nvec_msg_free(struct nvec_chip *nvec, struct nvec_msg *msg)
201 {
202 	if (msg != &nvec->tx_scratch)
203 		dev_vdbg(nvec->dev, "INFO: Free %ti\n", msg - nvec->msg_pool);
204 	atomic_set(&msg->used, 0);
205 }
206 EXPORT_SYMBOL_GPL(nvec_msg_free);
207 
208 /**
209  * nvec_msg_is_event - Return %true if @msg is an event
210  * @msg: A message
211  */
212 static bool nvec_msg_is_event(struct nvec_msg *msg)
213 {
214 	return msg->data[0] >> 7;
215 }
216 
217 /**
218  * nvec_msg_size - Get the size of a message
219  * @msg: The message to get the size for
220  *
221  * This only works for received messages, not for outgoing messages.
222  */
223 static size_t nvec_msg_size(struct nvec_msg *msg)
224 {
225 	bool is_event = nvec_msg_is_event(msg);
226 	int event_length = (msg->data[0] & 0x60) >> 5;
227 
228 	/* for variable size, payload size in byte 1 + count (1) + cmd (1) */
229 	if (!is_event || event_length == NVEC_VAR_SIZE)
230 		return (msg->pos || msg->size) ? (msg->data[1] + 2) : 0;
231 	else if (event_length == NVEC_2BYTES)
232 		return 2;
233 	else if (event_length == NVEC_3BYTES)
234 		return 3;
235 	else
236 		return 0;
237 }
238 
239 /**
240  * nvec_gpio_set_value - Set the GPIO value
241  * @nvec: A &struct nvec_chip
242  * @value: The value to write (0 or 1)
243  *
244  * Like gpio_set_value(), but generating debugging information
245  */
246 static void nvec_gpio_set_value(struct nvec_chip *nvec, int value)
247 {
248 	dev_dbg(nvec->dev, "GPIO changed from %u to %u\n",
249 		gpio_get_value(nvec->gpio), value);
250 	gpio_set_value(nvec->gpio, value);
251 }
252 
253 /**
254  * nvec_write_async - Asynchronously write a message to NVEC
255  * @nvec: An nvec_chip instance
256  * @data: The message data, starting with the request type
257  * @size: The size of @data
258  *
259  * Queue a single message to be transferred to the embedded controller
260  * and return immediately.
261  *
262  * Returns: 0 on success, a negative error code on failure. If a failure
263  * occured, the nvec driver may print an error.
264  */
265 int nvec_write_async(struct nvec_chip *nvec, const unsigned char *data,
266 			short size)
267 {
268 	struct nvec_msg *msg;
269 	unsigned long flags;
270 
271 	msg = nvec_msg_alloc(nvec, NVEC_MSG_TX);
272 
273 	if (msg == NULL)
274 		return -ENOMEM;
275 
276 	msg->data[0] = size;
277 	memcpy(msg->data + 1, data, size);
278 	msg->size = size + 1;
279 
280 	spin_lock_irqsave(&nvec->tx_lock, flags);
281 	list_add_tail(&msg->node, &nvec->tx_data);
282 	spin_unlock_irqrestore(&nvec->tx_lock, flags);
283 
284 	schedule_work(&nvec->tx_work);
285 
286 	return 0;
287 }
288 EXPORT_SYMBOL(nvec_write_async);
289 
290 /**
291  * nvec_write_sync - Write a message to nvec and read the response
292  * @nvec: An &struct nvec_chip
293  * @data: The data to write
294  * @size: The size of @data
295  *
296  * This is similar to nvec_write_async(), but waits for the
297  * request to be answered before returning. This function
298  * uses a mutex and can thus not be called from e.g.
299  * interrupt handlers.
300  *
301  * Returns: A pointer to the response message on success,
302  * %NULL on failure. Free with nvec_msg_free() once no longer
303  * used.
304  */
305 struct nvec_msg *nvec_write_sync(struct nvec_chip *nvec,
306 		const unsigned char *data, short size)
307 {
308 	struct nvec_msg *msg;
309 
310 	mutex_lock(&nvec->sync_write_mutex);
311 
312 	nvec->sync_write_pending = (data[1] << 8) + data[0];
313 
314 	if (nvec_write_async(nvec, data, size) < 0) {
315 		mutex_unlock(&nvec->sync_write_mutex);
316 		return NULL;
317 	}
318 
319 	dev_dbg(nvec->dev, "nvec_sync_write: 0x%04x\n",
320 					nvec->sync_write_pending);
321 	if (!(wait_for_completion_timeout(&nvec->sync_write,
322 				msecs_to_jiffies(2000)))) {
323 		dev_warn(nvec->dev, "timeout waiting for sync write to complete\n");
324 		mutex_unlock(&nvec->sync_write_mutex);
325 		return NULL;
326 	}
327 
328 	dev_dbg(nvec->dev, "nvec_sync_write: pong!\n");
329 
330 	msg = nvec->last_sync_msg;
331 
332 	mutex_unlock(&nvec->sync_write_mutex);
333 
334 	return msg;
335 }
336 EXPORT_SYMBOL(nvec_write_sync);
337 
338 /**
339  * nvec_toggle_global_events - enables or disables global event reporting
340  * @nvec: nvec handle
341  * @state: true for enable, false for disable
342  *
343  * This switches on/off global event reports by the embedded controller.
344  */
345 static void nvec_toggle_global_events(struct nvec_chip *nvec, bool state)
346 {
347 	unsigned char global_events[] = { NVEC_SLEEP, GLOBAL_EVENTS, state };
348 
349 	nvec_write_async(nvec, global_events, 3);
350 }
351 
352 /**
353  * nvec_event_mask - fill the command string with event bitfield
354  * ev: points to event command string
355  * mask: bit to insert into the event mask
356  *
357  * Configure event command expects a 32 bit bitfield which describes
358  * which events to enable. The bitfield has the following structure
359  * (from highest byte to lowest):
360  *	system state bits 7-0
361  *	system state bits 15-8
362  *	oem system state bits 7-0
363  *	oem system state bits 15-8
364  */
365 static void nvec_event_mask(char *ev, u32 mask)
366 {
367 	ev[3] = mask >> 16 & 0xff;
368 	ev[4] = mask >> 24 & 0xff;
369 	ev[5] = mask >> 0  & 0xff;
370 	ev[6] = mask >> 8  & 0xff;
371 }
372 
373 /**
374  * nvec_request_master - Process outgoing messages
375  * @work: A &struct work_struct (the tx_worker member of &struct nvec_chip)
376  *
377  * Processes all outgoing requests by sending the request and awaiting the
378  * response, then continuing with the next request. Once a request has a
379  * matching response, it will be freed and removed from the list.
380  */
381 static void nvec_request_master(struct work_struct *work)
382 {
383 	struct nvec_chip *nvec = container_of(work, struct nvec_chip, tx_work);
384 	unsigned long flags;
385 	long err;
386 	struct nvec_msg *msg;
387 
388 	spin_lock_irqsave(&nvec->tx_lock, flags);
389 	while (!list_empty(&nvec->tx_data)) {
390 		msg = list_first_entry(&nvec->tx_data, struct nvec_msg, node);
391 		spin_unlock_irqrestore(&nvec->tx_lock, flags);
392 		nvec_gpio_set_value(nvec, 0);
393 		err = wait_for_completion_interruptible_timeout(
394 				&nvec->ec_transfer, msecs_to_jiffies(5000));
395 
396 		if (err == 0) {
397 			dev_warn(nvec->dev, "timeout waiting for ec transfer\n");
398 			nvec_gpio_set_value(nvec, 1);
399 			msg->pos = 0;
400 		}
401 
402 		spin_lock_irqsave(&nvec->tx_lock, flags);
403 
404 		if (err > 0) {
405 			list_del_init(&msg->node);
406 			nvec_msg_free(nvec, msg);
407 		}
408 	}
409 	spin_unlock_irqrestore(&nvec->tx_lock, flags);
410 }
411 
412 /**
413  * parse_msg - Print some information and call the notifiers on an RX message
414  * @nvec: A &struct nvec_chip
415  * @msg: A message received by @nvec
416  *
417  * Paarse some pieces of the message and then call the chain of notifiers
418  * registered via nvec_register_notifier.
419  */
420 static int parse_msg(struct nvec_chip *nvec, struct nvec_msg *msg)
421 {
422 	if ((msg->data[0] & 1 << 7) == 0 && msg->data[3]) {
423 		dev_err(nvec->dev, "ec responded %*ph\n", 4, msg->data);
424 		return -EINVAL;
425 	}
426 
427 	if ((msg->data[0] >> 7) == 1 && (msg->data[0] & 0x0f) == 5)
428 		print_hex_dump(KERN_WARNING, "ec system event ",
429 				DUMP_PREFIX_NONE, 16, 1, msg->data,
430 				msg->data[1] + 2, true);
431 
432 	atomic_notifier_call_chain(&nvec->notifier_list, msg->data[0] & 0x8f,
433 				   msg->data);
434 
435 	return 0;
436 }
437 
438 /**
439  * nvec_dispatch - Process messages received from the EC
440  * @work: A &struct work_struct (the tx_worker member of &struct nvec_chip)
441  *
442  * Process messages previously received from the EC and put into the RX
443  * queue of the &struct nvec_chip instance associated with @work.
444  */
445 static void nvec_dispatch(struct work_struct *work)
446 {
447 	struct nvec_chip *nvec = container_of(work, struct nvec_chip, rx_work);
448 	unsigned long flags;
449 	struct nvec_msg *msg;
450 
451 	spin_lock_irqsave(&nvec->rx_lock, flags);
452 	while (!list_empty(&nvec->rx_data)) {
453 		msg = list_first_entry(&nvec->rx_data, struct nvec_msg, node);
454 		list_del_init(&msg->node);
455 		spin_unlock_irqrestore(&nvec->rx_lock, flags);
456 
457 		if (nvec->sync_write_pending ==
458 		      (msg->data[2] << 8) + msg->data[0]) {
459 			dev_dbg(nvec->dev, "sync write completed!\n");
460 			nvec->sync_write_pending = 0;
461 			nvec->last_sync_msg = msg;
462 			complete(&nvec->sync_write);
463 		} else {
464 			parse_msg(nvec, msg);
465 			nvec_msg_free(nvec, msg);
466 		}
467 		spin_lock_irqsave(&nvec->rx_lock, flags);
468 	}
469 	spin_unlock_irqrestore(&nvec->rx_lock, flags);
470 }
471 
472 /**
473  * nvec_tx_completed - Complete the current transfer
474  * @nvec: A &struct nvec_chip
475  *
476  * This is called when we have received an END_TRANS on a TX transfer.
477  */
478 static void nvec_tx_completed(struct nvec_chip *nvec)
479 {
480 	/* We got an END_TRANS, let's skip this, maybe there's an event */
481 	if (nvec->tx->pos != nvec->tx->size) {
482 		dev_err(nvec->dev, "premature END_TRANS, resending\n");
483 		nvec->tx->pos = 0;
484 		nvec_gpio_set_value(nvec, 0);
485 	} else {
486 		nvec->state = 0;
487 	}
488 }
489 
490 /**
491  * nvec_rx_completed - Complete the current transfer
492  * @nvec: A &struct nvec_chip
493  *
494  * This is called when we have received an END_TRANS on a RX transfer.
495  */
496 static void nvec_rx_completed(struct nvec_chip *nvec)
497 {
498 	if (nvec->rx->pos != nvec_msg_size(nvec->rx)) {
499 		dev_err(nvec->dev, "RX incomplete: Expected %u bytes, got %u\n",
500 			   (uint) nvec_msg_size(nvec->rx),
501 			   (uint) nvec->rx->pos);
502 
503 		nvec_msg_free(nvec, nvec->rx);
504 		nvec->state = 0;
505 
506 		/* Battery quirk - Often incomplete, and likes to crash */
507 		if (nvec->rx->data[0] == NVEC_BAT)
508 			complete(&nvec->ec_transfer);
509 
510 		return;
511 	}
512 
513 	spin_lock(&nvec->rx_lock);
514 
515 	/* add the received data to the work list
516 	   and move the ring buffer pointer to the next entry */
517 	list_add_tail(&nvec->rx->node, &nvec->rx_data);
518 
519 	spin_unlock(&nvec->rx_lock);
520 
521 	nvec->state = 0;
522 
523 	if (!nvec_msg_is_event(nvec->rx))
524 		complete(&nvec->ec_transfer);
525 
526 	schedule_work(&nvec->rx_work);
527 }
528 
529 /**
530  * nvec_invalid_flags - Send an error message about invalid flags and jump
531  * @nvec: The nvec device
532  * @status: The status flags
533  * @reset: Whether we shall jump to state 0.
534  */
535 static void nvec_invalid_flags(struct nvec_chip *nvec, unsigned int status,
536 			       bool reset)
537 {
538 	dev_err(nvec->dev, "unexpected status flags 0x%02x during state %i\n",
539 		status, nvec->state);
540 	if (reset)
541 		nvec->state = 0;
542 }
543 
544 /**
545  * nvec_tx_set - Set the message to transfer (nvec->tx)
546  * @nvec: A &struct nvec_chip
547  *
548  * Gets the first entry from the tx_data list of @nvec and sets the
549  * tx member to it. If the tx_data list is empty, this uses the
550  * tx_scratch message to send a no operation message.
551  */
552 static void nvec_tx_set(struct nvec_chip *nvec)
553 {
554 	spin_lock(&nvec->tx_lock);
555 	if (list_empty(&nvec->tx_data)) {
556 		dev_err(nvec->dev, "empty tx - sending no-op\n");
557 		memcpy(nvec->tx_scratch.data, "\x02\x07\x02", 3);
558 		nvec->tx_scratch.size = 3;
559 		nvec->tx_scratch.pos = 0;
560 		nvec->tx = &nvec->tx_scratch;
561 		list_add_tail(&nvec->tx->node, &nvec->tx_data);
562 	} else {
563 		nvec->tx = list_first_entry(&nvec->tx_data, struct nvec_msg,
564 					    node);
565 		nvec->tx->pos = 0;
566 	}
567 	spin_unlock(&nvec->tx_lock);
568 
569 	dev_dbg(nvec->dev, "Sending message of length %u, command 0x%x\n",
570 		(uint)nvec->tx->size, nvec->tx->data[1]);
571 }
572 
573 /**
574  * nvec_interrupt - Interrupt handler
575  * @irq: The IRQ
576  * @dev: The nvec device
577  *
578  * Interrupt handler that fills our RX buffers and empties our TX
579  * buffers. This uses a finite state machine with ridiculous amounts
580  * of error checking, in order to be fairly reliable.
581  */
582 static irqreturn_t nvec_interrupt(int irq, void *dev)
583 {
584 	unsigned long status;
585 	unsigned int received = 0;
586 	unsigned char to_send = 0xff;
587 	const unsigned long irq_mask = I2C_SL_IRQ | END_TRANS | RCVD | RNW;
588 	struct nvec_chip *nvec = dev;
589 	unsigned int state = nvec->state;
590 
591 	status = readl(nvec->base + I2C_SL_STATUS);
592 
593 	/* Filter out some errors */
594 	if ((status & irq_mask) == 0 && (status & ~irq_mask) != 0) {
595 		dev_err(nvec->dev, "unexpected irq mask %lx\n", status);
596 		return IRQ_HANDLED;
597 	}
598 	if ((status & I2C_SL_IRQ) == 0) {
599 		dev_err(nvec->dev, "Spurious IRQ\n");
600 		return IRQ_HANDLED;
601 	}
602 
603 	/* The EC did not request a read, so it send us something, read it */
604 	if ((status & RNW) == 0) {
605 		received = readl(nvec->base + I2C_SL_RCVD);
606 		if (status & RCVD)
607 			writel(0, nvec->base + I2C_SL_RCVD);
608 	}
609 
610 	if (status == (I2C_SL_IRQ | RCVD))
611 		nvec->state = 0;
612 
613 	switch (nvec->state) {
614 	case 0:		/* Verify that its a transfer start, the rest later */
615 		if (status != (I2C_SL_IRQ | RCVD))
616 			nvec_invalid_flags(nvec, status, false);
617 		break;
618 	case 1:		/* command byte */
619 		if (status != I2C_SL_IRQ) {
620 			nvec_invalid_flags(nvec, status, true);
621 		} else {
622 			nvec->rx = nvec_msg_alloc(nvec, NVEC_MSG_RX);
623 			/* Should not happen in a normal world */
624 			if (unlikely(nvec->rx == NULL)) {
625 				nvec->state = 0;
626 				break;
627 			}
628 			nvec->rx->data[0] = received;
629 			nvec->rx->pos = 1;
630 			nvec->state = 2;
631 		}
632 		break;
633 	case 2:		/* first byte after command */
634 		if (status == (I2C_SL_IRQ | RNW | RCVD)) {
635 			udelay(33);
636 			if (nvec->rx->data[0] != 0x01) {
637 				dev_err(nvec->dev,
638 					"Read without prior read command\n");
639 				nvec->state = 0;
640 				break;
641 			}
642 			nvec_msg_free(nvec, nvec->rx);
643 			nvec->state = 3;
644 			nvec_tx_set(nvec);
645 			BUG_ON(nvec->tx->size < 1);
646 			to_send = nvec->tx->data[0];
647 			nvec->tx->pos = 1;
648 		} else if (status == (I2C_SL_IRQ)) {
649 			BUG_ON(nvec->rx == NULL);
650 			nvec->rx->data[1] = received;
651 			nvec->rx->pos = 2;
652 			nvec->state = 4;
653 		} else {
654 			nvec_invalid_flags(nvec, status, true);
655 		}
656 		break;
657 	case 3:		/* EC does a block read, we transmit data */
658 		if (status & END_TRANS) {
659 			nvec_tx_completed(nvec);
660 		} else if ((status & RNW) == 0 || (status & RCVD)) {
661 			nvec_invalid_flags(nvec, status, true);
662 		} else if (nvec->tx && nvec->tx->pos < nvec->tx->size) {
663 			to_send = nvec->tx->data[nvec->tx->pos++];
664 		} else {
665 			dev_err(nvec->dev, "tx buffer underflow on %p (%u > %u)\n",
666 				nvec->tx,
667 				(uint) (nvec->tx ? nvec->tx->pos : 0),
668 				(uint) (nvec->tx ? nvec->tx->size : 0));
669 			nvec->state = 0;
670 		}
671 		break;
672 	case 4:		/* EC does some write, we read the data */
673 		if ((status & (END_TRANS | RNW)) == END_TRANS)
674 			nvec_rx_completed(nvec);
675 		else if (status & (RNW | RCVD))
676 			nvec_invalid_flags(nvec, status, true);
677 		else if (nvec->rx && nvec->rx->pos < NVEC_MSG_SIZE)
678 			nvec->rx->data[nvec->rx->pos++] = received;
679 		else
680 			dev_err(nvec->dev,
681 				"RX buffer overflow on %p: Trying to write byte %u of %u\n",
682 				nvec->rx, nvec->rx ? nvec->rx->pos : 0,
683 				NVEC_MSG_SIZE);
684 		break;
685 	default:
686 		nvec->state = 0;
687 	}
688 
689 	/* If we are told that a new transfer starts, verify it */
690 	if ((status & (RCVD | RNW)) == RCVD) {
691 		if (received != nvec->i2c_addr)
692 			dev_err(nvec->dev,
693 			"received address 0x%02x, expected 0x%02x\n",
694 			received, nvec->i2c_addr);
695 		nvec->state = 1;
696 	}
697 
698 	/* Send data if requested, but not on end of transmission */
699 	if ((status & (RNW | END_TRANS)) == RNW)
700 		writel(to_send, nvec->base + I2C_SL_RCVD);
701 
702 	/* If we have send the first byte */
703 	if (status == (I2C_SL_IRQ | RNW | RCVD))
704 		nvec_gpio_set_value(nvec, 1);
705 
706 	dev_dbg(nvec->dev,
707 		"Handled: %s 0x%02x, %s 0x%02x in state %u [%s%s%s]\n",
708 		(status & RNW) == 0 ? "received" : "R=",
709 		received,
710 		(status & (RNW | END_TRANS)) ? "sent" : "S=",
711 		to_send,
712 		state,
713 		status & END_TRANS ? " END_TRANS" : "",
714 		status & RCVD ? " RCVD" : "",
715 		status & RNW ? " RNW" : "");
716 
717 
718 	/*
719 	 * TODO: A correct fix needs to be found for this.
720 	 *
721 	 * We experience less incomplete messages with this delay than without
722 	 * it, but we don't know why. Help is appreciated.
723 	 */
724 	udelay(100);
725 
726 	return IRQ_HANDLED;
727 }
728 
729 static void tegra_init_i2c_slave(struct nvec_chip *nvec)
730 {
731 	u32 val;
732 
733 	clk_prepare_enable(nvec->i2c_clk);
734 
735 	reset_control_assert(nvec->rst);
736 	udelay(2);
737 	reset_control_deassert(nvec->rst);
738 
739 	val = I2C_CNFG_NEW_MASTER_SFM | I2C_CNFG_PACKET_MODE_EN |
740 	    (0x2 << I2C_CNFG_DEBOUNCE_CNT_SHIFT);
741 	writel(val, nvec->base + I2C_CNFG);
742 
743 	clk_set_rate(nvec->i2c_clk, 8 * 80000);
744 
745 	writel(I2C_SL_NEWSL, nvec->base + I2C_SL_CNFG);
746 	writel(0x1E, nvec->base + I2C_SL_DELAY_COUNT);
747 
748 	writel(nvec->i2c_addr>>1, nvec->base + I2C_SL_ADDR1);
749 	writel(0, nvec->base + I2C_SL_ADDR2);
750 
751 	enable_irq(nvec->irq);
752 }
753 
754 #ifdef CONFIG_PM_SLEEP
755 static void nvec_disable_i2c_slave(struct nvec_chip *nvec)
756 {
757 	disable_irq(nvec->irq);
758 	writel(I2C_SL_NEWSL | I2C_SL_NACK, nvec->base + I2C_SL_CNFG);
759 	clk_disable_unprepare(nvec->i2c_clk);
760 }
761 #endif
762 
763 static void nvec_power_off(void)
764 {
765 	char ap_pwr_down[] = { NVEC_SLEEP, AP_PWR_DOWN };
766 
767 	nvec_toggle_global_events(nvec_power_handle, false);
768 	nvec_write_async(nvec_power_handle, ap_pwr_down, 2);
769 }
770 
771 /*
772  *  Parse common device tree data
773  */
774 static int nvec_i2c_parse_dt_pdata(struct nvec_chip *nvec)
775 {
776 	nvec->gpio = of_get_named_gpio(nvec->dev->of_node, "request-gpios", 0);
777 
778 	if (nvec->gpio < 0) {
779 		dev_err(nvec->dev, "no gpio specified");
780 		return -ENODEV;
781 	}
782 
783 	if (of_property_read_u32(nvec->dev->of_node, "slave-addr",
784 				&nvec->i2c_addr)) {
785 		dev_err(nvec->dev, "no i2c address specified");
786 		return -ENODEV;
787 	}
788 
789 	return 0;
790 }
791 
792 static int tegra_nvec_probe(struct platform_device *pdev)
793 {
794 	int err, ret;
795 	struct clk *i2c_clk;
796 	struct nvec_chip *nvec;
797 	struct nvec_msg *msg;
798 	struct resource *res;
799 	void __iomem *base;
800 	char	get_firmware_version[] = { NVEC_CNTL, GET_FIRMWARE_VERSION },
801 		unmute_speakers[] = { NVEC_OEM0, 0x10, 0x59, 0x95 },
802 		enable_event[7] = { NVEC_SYS, CNF_EVENT_REPORTING, true };
803 
804 	if (!pdev->dev.of_node) {
805 		dev_err(&pdev->dev, "must be instantiated using device tree\n");
806 		return -ENODEV;
807 	}
808 
809 	nvec = devm_kzalloc(&pdev->dev, sizeof(struct nvec_chip), GFP_KERNEL);
810 	if (nvec == NULL) {
811 		dev_err(&pdev->dev, "failed to reserve memory\n");
812 		return -ENOMEM;
813 	}
814 	platform_set_drvdata(pdev, nvec);
815 	nvec->dev = &pdev->dev;
816 
817 	err = nvec_i2c_parse_dt_pdata(nvec);
818 	if (err < 0)
819 		return err;
820 
821 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
822 	base = devm_ioremap_resource(&pdev->dev, res);
823 	if (IS_ERR(base))
824 		return PTR_ERR(base);
825 
826 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
827 	if (!res) {
828 		dev_err(&pdev->dev, "no irq resource?\n");
829 		return -ENODEV;
830 	}
831 
832 	i2c_clk = devm_clk_get(&pdev->dev, "div-clk");
833 	if (IS_ERR(i2c_clk)) {
834 		dev_err(nvec->dev, "failed to get controller clock\n");
835 		return -ENODEV;
836 	}
837 
838 	nvec->rst = devm_reset_control_get(&pdev->dev, "i2c");
839 	if (IS_ERR(nvec->rst)) {
840 		dev_err(nvec->dev, "failed to get controller reset\n");
841 		return PTR_ERR(nvec->rst);
842 	}
843 
844 	nvec->base = base;
845 	nvec->irq = res->start;
846 	nvec->i2c_clk = i2c_clk;
847 	nvec->rx = &nvec->msg_pool[0];
848 
849 	ATOMIC_INIT_NOTIFIER_HEAD(&nvec->notifier_list);
850 
851 	init_completion(&nvec->sync_write);
852 	init_completion(&nvec->ec_transfer);
853 	mutex_init(&nvec->sync_write_mutex);
854 	spin_lock_init(&nvec->tx_lock);
855 	spin_lock_init(&nvec->rx_lock);
856 	INIT_LIST_HEAD(&nvec->rx_data);
857 	INIT_LIST_HEAD(&nvec->tx_data);
858 	INIT_WORK(&nvec->rx_work, nvec_dispatch);
859 	INIT_WORK(&nvec->tx_work, nvec_request_master);
860 
861 	err = devm_gpio_request_one(&pdev->dev, nvec->gpio, GPIOF_OUT_INIT_HIGH,
862 					"nvec gpio");
863 	if (err < 0) {
864 		dev_err(nvec->dev, "couldn't request gpio\n");
865 		return -ENODEV;
866 	}
867 
868 	err = devm_request_irq(&pdev->dev, nvec->irq, nvec_interrupt, 0,
869 				"nvec", nvec);
870 	if (err) {
871 		dev_err(nvec->dev, "couldn't request irq\n");
872 		return -ENODEV;
873 	}
874 	disable_irq(nvec->irq);
875 
876 	tegra_init_i2c_slave(nvec);
877 
878 	/* enable event reporting */
879 	nvec_toggle_global_events(nvec, true);
880 
881 	nvec->nvec_status_notifier.notifier_call = nvec_status_notifier;
882 	nvec_register_notifier(nvec, &nvec->nvec_status_notifier, 0);
883 
884 	nvec_power_handle = nvec;
885 	pm_power_off = nvec_power_off;
886 
887 	/* Get Firmware Version */
888 	msg = nvec_write_sync(nvec, get_firmware_version, 2);
889 
890 	if (msg) {
891 		dev_warn(nvec->dev, "ec firmware version %02x.%02x.%02x / %02x\n",
892 			msg->data[4], msg->data[5], msg->data[6], msg->data[7]);
893 
894 		nvec_msg_free(nvec, msg);
895 	}
896 
897 	ret = mfd_add_devices(nvec->dev, -1, nvec_devices,
898 			      ARRAY_SIZE(nvec_devices), base, 0, NULL);
899 	if (ret)
900 		dev_err(nvec->dev, "error adding subdevices\n");
901 
902 	/* unmute speakers? */
903 	nvec_write_async(nvec, unmute_speakers, 4);
904 
905 	/* enable lid switch event */
906 	nvec_event_mask(enable_event, LID_SWITCH);
907 	nvec_write_async(nvec, enable_event, 7);
908 
909 	/* enable power button event */
910 	nvec_event_mask(enable_event, PWR_BUTTON);
911 	nvec_write_async(nvec, enable_event, 7);
912 
913 	return 0;
914 }
915 
916 static int tegra_nvec_remove(struct platform_device *pdev)
917 {
918 	struct nvec_chip *nvec = platform_get_drvdata(pdev);
919 
920 	nvec_toggle_global_events(nvec, false);
921 	mfd_remove_devices(nvec->dev);
922 	nvec_unregister_notifier(nvec, &nvec->nvec_status_notifier);
923 	cancel_work_sync(&nvec->rx_work);
924 	cancel_work_sync(&nvec->tx_work);
925 	/* FIXME: needs check wether nvec is responsible for power off */
926 	pm_power_off = NULL;
927 
928 	return 0;
929 }
930 
931 #ifdef CONFIG_PM_SLEEP
932 static int nvec_suspend(struct device *dev)
933 {
934 	struct platform_device *pdev = to_platform_device(dev);
935 	struct nvec_chip *nvec = platform_get_drvdata(pdev);
936 	struct nvec_msg *msg;
937 	char ap_suspend[] = { NVEC_SLEEP, AP_SUSPEND };
938 
939 	dev_dbg(nvec->dev, "suspending\n");
940 
941 	/* keep these sync or you'll break suspend */
942 	nvec_toggle_global_events(nvec, false);
943 
944 	msg = nvec_write_sync(nvec, ap_suspend, sizeof(ap_suspend));
945 	nvec_msg_free(nvec, msg);
946 
947 	nvec_disable_i2c_slave(nvec);
948 
949 	return 0;
950 }
951 
952 static int nvec_resume(struct device *dev)
953 {
954 	struct platform_device *pdev = to_platform_device(dev);
955 	struct nvec_chip *nvec = platform_get_drvdata(pdev);
956 
957 	dev_dbg(nvec->dev, "resuming\n");
958 	tegra_init_i2c_slave(nvec);
959 	nvec_toggle_global_events(nvec, true);
960 
961 	return 0;
962 }
963 #endif
964 
965 static const SIMPLE_DEV_PM_OPS(nvec_pm_ops, nvec_suspend, nvec_resume);
966 
967 /* Match table for of_platform binding */
968 static const struct of_device_id nvidia_nvec_of_match[] = {
969 	{ .compatible = "nvidia,nvec", },
970 	{},
971 };
972 MODULE_DEVICE_TABLE(of, nvidia_nvec_of_match);
973 
974 static struct platform_driver nvec_device_driver = {
975 	.probe   = tegra_nvec_probe,
976 	.remove  = tegra_nvec_remove,
977 	.driver  = {
978 		.name = "nvec",
979 		.owner = THIS_MODULE,
980 		.pm = &nvec_pm_ops,
981 		.of_match_table = nvidia_nvec_of_match,
982 	}
983 };
984 
985 module_platform_driver(nvec_device_driver);
986 
987 MODULE_ALIAS("platform:nvec");
988 MODULE_DESCRIPTION("NVIDIA compliant embedded controller interface");
989 MODULE_AUTHOR("Marc Dietrich <marvin24@gmx.de>");
990 MODULE_LICENSE("GPL");
991