xref: /openbmc/linux/drivers/staging/nvec/nvec.c (revision 4f39b5b5)
1 /*
2  * NVEC: NVIDIA compliant embedded controller interface
3  *
4  * Copyright (C) 2011 The AC100 Kernel Team <ac100@lists.lauchpad.net>
5  *
6  * Authors:  Pierre-Hugues Husson <phhusson@free.fr>
7  *           Ilya Petrov <ilya.muromec@gmail.com>
8  *           Marc Dietrich <marvin24@gmx.de>
9  *           Julian Andres Klode <jak@jak-linux.org>
10  *
11  * This file is subject to the terms and conditions of the GNU General Public
12  * License.  See the file "COPYING" in the main directory of this archive
13  * for more details.
14  *
15  */
16 
17 /* #define DEBUG */
18 
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/atomic.h>
22 #include <linux/clk.h>
23 #include <linux/completion.h>
24 #include <linux/delay.h>
25 #include <linux/err.h>
26 #include <linux/gpio.h>
27 #include <linux/interrupt.h>
28 #include <linux/io.h>
29 #include <linux/irq.h>
30 #include <linux/of.h>
31 #include <linux/of_gpio.h>
32 #include <linux/list.h>
33 #include <linux/mfd/core.h>
34 #include <linux/mutex.h>
35 #include <linux/notifier.h>
36 #include <linux/platform_device.h>
37 #include <linux/slab.h>
38 #include <linux/spinlock.h>
39 #include <linux/workqueue.h>
40 #include <linux/clk/tegra.h>
41 
42 #include "nvec.h"
43 
44 #define I2C_CNFG			0x00
45 #define I2C_CNFG_PACKET_MODE_EN		(1<<10)
46 #define I2C_CNFG_NEW_MASTER_SFM		(1<<11)
47 #define I2C_CNFG_DEBOUNCE_CNT_SHIFT	12
48 
49 #define I2C_SL_CNFG		0x20
50 #define I2C_SL_NEWSL		(1<<2)
51 #define I2C_SL_NACK		(1<<1)
52 #define I2C_SL_RESP		(1<<0)
53 #define I2C_SL_IRQ		(1<<3)
54 #define END_TRANS		(1<<4)
55 #define RCVD			(1<<2)
56 #define RNW			(1<<1)
57 
58 #define I2C_SL_RCVD		0x24
59 #define I2C_SL_STATUS		0x28
60 #define I2C_SL_ADDR1		0x2c
61 #define I2C_SL_ADDR2		0x30
62 #define I2C_SL_DELAY_COUNT	0x3c
63 
64 /**
65  * enum nvec_msg_category - Message categories for nvec_msg_alloc()
66  * @NVEC_MSG_RX: The message is an incoming message (from EC)
67  * @NVEC_MSG_TX: The message is an outgoing message (to EC)
68  */
69 enum nvec_msg_category  {
70 	NVEC_MSG_RX,
71 	NVEC_MSG_TX,
72 };
73 
74 enum nvec_sleep_subcmds {
75 	GLOBAL_EVENTS,
76 	AP_PWR_DOWN,
77 	AP_SUSPEND,
78 };
79 
80 #define CNF_EVENT_REPORTING 0x01
81 #define GET_FIRMWARE_VERSION 0x15
82 #define LID_SWITCH BIT(1)
83 #define PWR_BUTTON BIT(15)
84 
85 static struct nvec_chip *nvec_power_handle;
86 
87 static struct mfd_cell nvec_devices[] = {
88 	{
89 		.name = "nvec-kbd",
90 		.id = 1,
91 	},
92 	{
93 		.name = "nvec-mouse",
94 		.id = 1,
95 	},
96 	{
97 		.name = "nvec-power",
98 		.id = 1,
99 	},
100 	{
101 		.name = "nvec-power",
102 		.id = 2,
103 	},
104 	{
105 		.name = "nvec-paz00",
106 		.id = 1,
107 	},
108 };
109 
110 /**
111  * nvec_register_notifier - Register a notifier with nvec
112  * @nvec: A &struct nvec_chip
113  * @nb: The notifier block to register
114  *
115  * Registers a notifier with @nvec. The notifier will be added to an atomic
116  * notifier chain that is called for all received messages except those that
117  * correspond to a request initiated by nvec_write_sync().
118  */
119 int nvec_register_notifier(struct nvec_chip *nvec, struct notifier_block *nb,
120 			   unsigned int events)
121 {
122 	return atomic_notifier_chain_register(&nvec->notifier_list, nb);
123 }
124 EXPORT_SYMBOL_GPL(nvec_register_notifier);
125 
126 /**
127  * nvec_status_notifier - The final notifier
128  *
129  * Prints a message about control events not handled in the notifier
130  * chain.
131  */
132 static int nvec_status_notifier(struct notifier_block *nb,
133 				unsigned long event_type, void *data)
134 {
135 	struct nvec_chip *nvec = container_of(nb, struct nvec_chip,
136 						nvec_status_notifier);
137 	unsigned char *msg = (unsigned char *)data;
138 
139 	if (event_type != NVEC_CNTL)
140 		return NOTIFY_DONE;
141 
142 	dev_warn(nvec->dev, "unhandled msg type %ld\n", event_type);
143 	print_hex_dump(KERN_WARNING, "payload: ", DUMP_PREFIX_NONE, 16, 1,
144 		msg, msg[1] + 2, true);
145 
146 	return NOTIFY_OK;
147 }
148 
149 /**
150  * nvec_msg_alloc:
151  * @nvec: A &struct nvec_chip
152  * @category: Pool category, see &enum nvec_msg_category
153  *
154  * Allocate a single &struct nvec_msg object from the message pool of
155  * @nvec. The result shall be passed to nvec_msg_free() if no longer
156  * used.
157  *
158  * Outgoing messages are placed in the upper 75% of the pool, keeping the
159  * lower 25% available for RX buffers only. The reason is to prevent a
160  * situation where all buffers are full and a message is thus endlessly
161  * retried because the response could never be processed.
162  */
163 static struct nvec_msg *nvec_msg_alloc(struct nvec_chip *nvec,
164 				       enum nvec_msg_category category)
165 {
166 	int i = (category == NVEC_MSG_TX) ? (NVEC_POOL_SIZE / 4) : 0;
167 
168 	for (; i < NVEC_POOL_SIZE; i++) {
169 		if (atomic_xchg(&nvec->msg_pool[i].used, 1) == 0) {
170 			dev_vdbg(nvec->dev, "INFO: Allocate %i\n", i);
171 			return &nvec->msg_pool[i];
172 		}
173 	}
174 
175 	dev_err(nvec->dev, "could not allocate %s buffer\n",
176 		(category == NVEC_MSG_TX) ? "TX" : "RX");
177 
178 	return NULL;
179 }
180 
181 /**
182  * nvec_msg_free:
183  * @nvec: A &struct nvec_chip
184  * @msg:  A message (must be allocated by nvec_msg_alloc() and belong to @nvec)
185  *
186  * Free the given message
187  */
188 inline void nvec_msg_free(struct nvec_chip *nvec, struct nvec_msg *msg)
189 {
190 	if (msg != &nvec->tx_scratch)
191 		dev_vdbg(nvec->dev, "INFO: Free %ti\n", msg - nvec->msg_pool);
192 	atomic_set(&msg->used, 0);
193 }
194 EXPORT_SYMBOL_GPL(nvec_msg_free);
195 
196 /**
197  * nvec_msg_is_event - Return %true if @msg is an event
198  * @msg: A message
199  */
200 static bool nvec_msg_is_event(struct nvec_msg *msg)
201 {
202 	return msg->data[0] >> 7;
203 }
204 
205 /**
206  * nvec_msg_size - Get the size of a message
207  * @msg: The message to get the size for
208  *
209  * This only works for received messages, not for outgoing messages.
210  */
211 static size_t nvec_msg_size(struct nvec_msg *msg)
212 {
213 	bool is_event = nvec_msg_is_event(msg);
214 	int event_length = (msg->data[0] & 0x60) >> 5;
215 
216 	/* for variable size, payload size in byte 1 + count (1) + cmd (1) */
217 	if (!is_event || event_length == NVEC_VAR_SIZE)
218 		return (msg->pos || msg->size) ? (msg->data[1] + 2) : 0;
219 	else if (event_length == NVEC_2BYTES)
220 		return 2;
221 	else if (event_length == NVEC_3BYTES)
222 		return 3;
223 	else
224 		return 0;
225 }
226 
227 /**
228  * nvec_gpio_set_value - Set the GPIO value
229  * @nvec: A &struct nvec_chip
230  * @value: The value to write (0 or 1)
231  *
232  * Like gpio_set_value(), but generating debugging information
233  */
234 static void nvec_gpio_set_value(struct nvec_chip *nvec, int value)
235 {
236 	dev_dbg(nvec->dev, "GPIO changed from %u to %u\n",
237 		gpio_get_value(nvec->gpio), value);
238 	gpio_set_value(nvec->gpio, value);
239 }
240 
241 /**
242  * nvec_write_async - Asynchronously write a message to NVEC
243  * @nvec: An nvec_chip instance
244  * @data: The message data, starting with the request type
245  * @size: The size of @data
246  *
247  * Queue a single message to be transferred to the embedded controller
248  * and return immediately.
249  *
250  * Returns: 0 on success, a negative error code on failure. If a failure
251  * occured, the nvec driver may print an error.
252  */
253 int nvec_write_async(struct nvec_chip *nvec, const unsigned char *data,
254 			short size)
255 {
256 	struct nvec_msg *msg;
257 	unsigned long flags;
258 
259 	msg = nvec_msg_alloc(nvec, NVEC_MSG_TX);
260 
261 	if (msg == NULL)
262 		return -ENOMEM;
263 
264 	msg->data[0] = size;
265 	memcpy(msg->data + 1, data, size);
266 	msg->size = size + 1;
267 
268 	spin_lock_irqsave(&nvec->tx_lock, flags);
269 	list_add_tail(&msg->node, &nvec->tx_data);
270 	spin_unlock_irqrestore(&nvec->tx_lock, flags);
271 
272 	schedule_work(&nvec->tx_work);
273 
274 	return 0;
275 }
276 EXPORT_SYMBOL(nvec_write_async);
277 
278 /**
279  * nvec_write_sync - Write a message to nvec and read the response
280  * @nvec: An &struct nvec_chip
281  * @data: The data to write
282  * @size: The size of @data
283  *
284  * This is similar to nvec_write_async(), but waits for the
285  * request to be answered before returning. This function
286  * uses a mutex and can thus not be called from e.g.
287  * interrupt handlers.
288  *
289  * Returns: A pointer to the response message on success,
290  * %NULL on failure. Free with nvec_msg_free() once no longer
291  * used.
292  */
293 struct nvec_msg *nvec_write_sync(struct nvec_chip *nvec,
294 		const unsigned char *data, short size)
295 {
296 	struct nvec_msg *msg;
297 
298 	mutex_lock(&nvec->sync_write_mutex);
299 
300 	nvec->sync_write_pending = (data[1] << 8) + data[0];
301 
302 	if (nvec_write_async(nvec, data, size) < 0) {
303 		mutex_unlock(&nvec->sync_write_mutex);
304 		return NULL;
305 	}
306 
307 	dev_dbg(nvec->dev, "nvec_sync_write: 0x%04x\n",
308 					nvec->sync_write_pending);
309 	if (!(wait_for_completion_timeout(&nvec->sync_write,
310 				msecs_to_jiffies(2000)))) {
311 		dev_warn(nvec->dev, "timeout waiting for sync write to complete\n");
312 		mutex_unlock(&nvec->sync_write_mutex);
313 		return NULL;
314 	}
315 
316 	dev_dbg(nvec->dev, "nvec_sync_write: pong!\n");
317 
318 	msg = nvec->last_sync_msg;
319 
320 	mutex_unlock(&nvec->sync_write_mutex);
321 
322 	return msg;
323 }
324 EXPORT_SYMBOL(nvec_write_sync);
325 
326 /**
327  * nvec_toggle_global_events - enables or disables global event reporting
328  * @nvec: nvec handle
329  * @state: true for enable, false for disable
330  *
331  * This switches on/off global event reports by the embedded controller.
332  */
333 static void nvec_toggle_global_events(struct nvec_chip *nvec, bool state)
334 {
335 	unsigned char global_events[] = { NVEC_SLEEP, GLOBAL_EVENTS, state };
336 
337 	nvec_write_async(nvec, global_events, 3);
338 }
339 
340 /**
341  * nvec_event_mask - fill the command string with event bitfield
342  * ev: points to event command string
343  * mask: bit to insert into the event mask
344  *
345  * Configure event command expects a 32 bit bitfield which describes
346  * which events to enable. The bitfield has the following structure
347  * (from highest byte to lowest):
348  *	system state bits 7-0
349  *	system state bits 15-8
350  *	oem system state bits 7-0
351  *	oem system state bits 15-8
352  */
353 static void nvec_event_mask(char *ev, u32 mask)
354 {
355 	ev[3] = mask >> 16 & 0xff;
356 	ev[4] = mask >> 24 & 0xff;
357 	ev[5] = mask >> 0  & 0xff;
358 	ev[6] = mask >> 8  & 0xff;
359 }
360 
361 /**
362  * nvec_request_master - Process outgoing messages
363  * @work: A &struct work_struct (the tx_worker member of &struct nvec_chip)
364  *
365  * Processes all outgoing requests by sending the request and awaiting the
366  * response, then continuing with the next request. Once a request has a
367  * matching response, it will be freed and removed from the list.
368  */
369 static void nvec_request_master(struct work_struct *work)
370 {
371 	struct nvec_chip *nvec = container_of(work, struct nvec_chip, tx_work);
372 	unsigned long flags;
373 	long err;
374 	struct nvec_msg *msg;
375 
376 	spin_lock_irqsave(&nvec->tx_lock, flags);
377 	while (!list_empty(&nvec->tx_data)) {
378 		msg = list_first_entry(&nvec->tx_data, struct nvec_msg, node);
379 		spin_unlock_irqrestore(&nvec->tx_lock, flags);
380 		nvec_gpio_set_value(nvec, 0);
381 		err = wait_for_completion_interruptible_timeout(
382 				&nvec->ec_transfer, msecs_to_jiffies(5000));
383 
384 		if (err == 0) {
385 			dev_warn(nvec->dev, "timeout waiting for ec transfer\n");
386 			nvec_gpio_set_value(nvec, 1);
387 			msg->pos = 0;
388 		}
389 
390 		spin_lock_irqsave(&nvec->tx_lock, flags);
391 
392 		if (err > 0) {
393 			list_del_init(&msg->node);
394 			nvec_msg_free(nvec, msg);
395 		}
396 	}
397 	spin_unlock_irqrestore(&nvec->tx_lock, flags);
398 }
399 
400 /**
401  * parse_msg - Print some information and call the notifiers on an RX message
402  * @nvec: A &struct nvec_chip
403  * @msg: A message received by @nvec
404  *
405  * Paarse some pieces of the message and then call the chain of notifiers
406  * registered via nvec_register_notifier.
407  */
408 static int parse_msg(struct nvec_chip *nvec, struct nvec_msg *msg)
409 {
410 	if ((msg->data[0] & 1 << 7) == 0 && msg->data[3]) {
411 		dev_err(nvec->dev, "ec responded %*ph\n", 4, msg->data);
412 		return -EINVAL;
413 	}
414 
415 	if ((msg->data[0] >> 7) == 1 && (msg->data[0] & 0x0f) == 5)
416 		print_hex_dump(KERN_WARNING, "ec system event ",
417 				DUMP_PREFIX_NONE, 16, 1, msg->data,
418 				msg->data[1] + 2, true);
419 
420 	atomic_notifier_call_chain(&nvec->notifier_list, msg->data[0] & 0x8f,
421 				   msg->data);
422 
423 	return 0;
424 }
425 
426 /**
427  * nvec_dispatch - Process messages received from the EC
428  * @work: A &struct work_struct (the tx_worker member of &struct nvec_chip)
429  *
430  * Process messages previously received from the EC and put into the RX
431  * queue of the &struct nvec_chip instance associated with @work.
432  */
433 static void nvec_dispatch(struct work_struct *work)
434 {
435 	struct nvec_chip *nvec = container_of(work, struct nvec_chip, rx_work);
436 	unsigned long flags;
437 	struct nvec_msg *msg;
438 
439 	spin_lock_irqsave(&nvec->rx_lock, flags);
440 	while (!list_empty(&nvec->rx_data)) {
441 		msg = list_first_entry(&nvec->rx_data, struct nvec_msg, node);
442 		list_del_init(&msg->node);
443 		spin_unlock_irqrestore(&nvec->rx_lock, flags);
444 
445 		if (nvec->sync_write_pending ==
446 		      (msg->data[2] << 8) + msg->data[0]) {
447 			dev_dbg(nvec->dev, "sync write completed!\n");
448 			nvec->sync_write_pending = 0;
449 			nvec->last_sync_msg = msg;
450 			complete(&nvec->sync_write);
451 		} else {
452 			parse_msg(nvec, msg);
453 			nvec_msg_free(nvec, msg);
454 		}
455 		spin_lock_irqsave(&nvec->rx_lock, flags);
456 	}
457 	spin_unlock_irqrestore(&nvec->rx_lock, flags);
458 }
459 
460 /**
461  * nvec_tx_completed - Complete the current transfer
462  * @nvec: A &struct nvec_chip
463  *
464  * This is called when we have received an END_TRANS on a TX transfer.
465  */
466 static void nvec_tx_completed(struct nvec_chip *nvec)
467 {
468 	/* We got an END_TRANS, let's skip this, maybe there's an event */
469 	if (nvec->tx->pos != nvec->tx->size) {
470 		dev_err(nvec->dev, "premature END_TRANS, resending\n");
471 		nvec->tx->pos = 0;
472 		nvec_gpio_set_value(nvec, 0);
473 	} else {
474 		nvec->state = 0;
475 	}
476 }
477 
478 /**
479  * nvec_rx_completed - Complete the current transfer
480  * @nvec: A &struct nvec_chip
481  *
482  * This is called when we have received an END_TRANS on a RX transfer.
483  */
484 static void nvec_rx_completed(struct nvec_chip *nvec)
485 {
486 	if (nvec->rx->pos != nvec_msg_size(nvec->rx)) {
487 		dev_err(nvec->dev, "RX incomplete: Expected %u bytes, got %u\n",
488 			   (uint) nvec_msg_size(nvec->rx),
489 			   (uint) nvec->rx->pos);
490 
491 		nvec_msg_free(nvec, nvec->rx);
492 		nvec->state = 0;
493 
494 		/* Battery quirk - Often incomplete, and likes to crash */
495 		if (nvec->rx->data[0] == NVEC_BAT)
496 			complete(&nvec->ec_transfer);
497 
498 		return;
499 	}
500 
501 	spin_lock(&nvec->rx_lock);
502 
503 	/* add the received data to the work list
504 	   and move the ring buffer pointer to the next entry */
505 	list_add_tail(&nvec->rx->node, &nvec->rx_data);
506 
507 	spin_unlock(&nvec->rx_lock);
508 
509 	nvec->state = 0;
510 
511 	if (!nvec_msg_is_event(nvec->rx))
512 		complete(&nvec->ec_transfer);
513 
514 	schedule_work(&nvec->rx_work);
515 }
516 
517 /**
518  * nvec_invalid_flags - Send an error message about invalid flags and jump
519  * @nvec: The nvec device
520  * @status: The status flags
521  * @reset: Whether we shall jump to state 0.
522  */
523 static void nvec_invalid_flags(struct nvec_chip *nvec, unsigned int status,
524 			       bool reset)
525 {
526 	dev_err(nvec->dev, "unexpected status flags 0x%02x during state %i\n",
527 		status, nvec->state);
528 	if (reset)
529 		nvec->state = 0;
530 }
531 
532 /**
533  * nvec_tx_set - Set the message to transfer (nvec->tx)
534  * @nvec: A &struct nvec_chip
535  *
536  * Gets the first entry from the tx_data list of @nvec and sets the
537  * tx member to it. If the tx_data list is empty, this uses the
538  * tx_scratch message to send a no operation message.
539  */
540 static void nvec_tx_set(struct nvec_chip *nvec)
541 {
542 	spin_lock(&nvec->tx_lock);
543 	if (list_empty(&nvec->tx_data)) {
544 		dev_err(nvec->dev, "empty tx - sending no-op\n");
545 		memcpy(nvec->tx_scratch.data, "\x02\x07\x02", 3);
546 		nvec->tx_scratch.size = 3;
547 		nvec->tx_scratch.pos = 0;
548 		nvec->tx = &nvec->tx_scratch;
549 		list_add_tail(&nvec->tx->node, &nvec->tx_data);
550 	} else {
551 		nvec->tx = list_first_entry(&nvec->tx_data, struct nvec_msg,
552 					    node);
553 		nvec->tx->pos = 0;
554 	}
555 	spin_unlock(&nvec->tx_lock);
556 
557 	dev_dbg(nvec->dev, "Sending message of length %u, command 0x%x\n",
558 		(uint)nvec->tx->size, nvec->tx->data[1]);
559 }
560 
561 /**
562  * nvec_interrupt - Interrupt handler
563  * @irq: The IRQ
564  * @dev: The nvec device
565  *
566  * Interrupt handler that fills our RX buffers and empties our TX
567  * buffers. This uses a finite state machine with ridiculous amounts
568  * of error checking, in order to be fairly reliable.
569  */
570 static irqreturn_t nvec_interrupt(int irq, void *dev)
571 {
572 	unsigned long status;
573 	unsigned int received = 0;
574 	unsigned char to_send = 0xff;
575 	const unsigned long irq_mask = I2C_SL_IRQ | END_TRANS | RCVD | RNW;
576 	struct nvec_chip *nvec = dev;
577 	unsigned int state = nvec->state;
578 
579 	status = readl(nvec->base + I2C_SL_STATUS);
580 
581 	/* Filter out some errors */
582 	if ((status & irq_mask) == 0 && (status & ~irq_mask) != 0) {
583 		dev_err(nvec->dev, "unexpected irq mask %lx\n", status);
584 		return IRQ_HANDLED;
585 	}
586 	if ((status & I2C_SL_IRQ) == 0) {
587 		dev_err(nvec->dev, "Spurious IRQ\n");
588 		return IRQ_HANDLED;
589 	}
590 
591 	/* The EC did not request a read, so it send us something, read it */
592 	if ((status & RNW) == 0) {
593 		received = readl(nvec->base + I2C_SL_RCVD);
594 		if (status & RCVD)
595 			writel(0, nvec->base + I2C_SL_RCVD);
596 	}
597 
598 	if (status == (I2C_SL_IRQ | RCVD))
599 		nvec->state = 0;
600 
601 	switch (nvec->state) {
602 	case 0:		/* Verify that its a transfer start, the rest later */
603 		if (status != (I2C_SL_IRQ | RCVD))
604 			nvec_invalid_flags(nvec, status, false);
605 		break;
606 	case 1:		/* command byte */
607 		if (status != I2C_SL_IRQ) {
608 			nvec_invalid_flags(nvec, status, true);
609 		} else {
610 			nvec->rx = nvec_msg_alloc(nvec, NVEC_MSG_RX);
611 			/* Should not happen in a normal world */
612 			if (unlikely(nvec->rx == NULL)) {
613 				nvec->state = 0;
614 				break;
615 			}
616 			nvec->rx->data[0] = received;
617 			nvec->rx->pos = 1;
618 			nvec->state = 2;
619 		}
620 		break;
621 	case 2:		/* first byte after command */
622 		if (status == (I2C_SL_IRQ | RNW | RCVD)) {
623 			udelay(33);
624 			if (nvec->rx->data[0] != 0x01) {
625 				dev_err(nvec->dev,
626 					"Read without prior read command\n");
627 				nvec->state = 0;
628 				break;
629 			}
630 			nvec_msg_free(nvec, nvec->rx);
631 			nvec->state = 3;
632 			nvec_tx_set(nvec);
633 			BUG_ON(nvec->tx->size < 1);
634 			to_send = nvec->tx->data[0];
635 			nvec->tx->pos = 1;
636 		} else if (status == (I2C_SL_IRQ)) {
637 			BUG_ON(nvec->rx == NULL);
638 			nvec->rx->data[1] = received;
639 			nvec->rx->pos = 2;
640 			nvec->state = 4;
641 		} else {
642 			nvec_invalid_flags(nvec, status, true);
643 		}
644 		break;
645 	case 3:		/* EC does a block read, we transmit data */
646 		if (status & END_TRANS) {
647 			nvec_tx_completed(nvec);
648 		} else if ((status & RNW) == 0 || (status & RCVD)) {
649 			nvec_invalid_flags(nvec, status, true);
650 		} else if (nvec->tx && nvec->tx->pos < nvec->tx->size) {
651 			to_send = nvec->tx->data[nvec->tx->pos++];
652 		} else {
653 			dev_err(nvec->dev, "tx buffer underflow on %p (%u > %u)\n",
654 				nvec->tx,
655 				(uint) (nvec->tx ? nvec->tx->pos : 0),
656 				(uint) (nvec->tx ? nvec->tx->size : 0));
657 			nvec->state = 0;
658 		}
659 		break;
660 	case 4:		/* EC does some write, we read the data */
661 		if ((status & (END_TRANS | RNW)) == END_TRANS)
662 			nvec_rx_completed(nvec);
663 		else if (status & (RNW | RCVD))
664 			nvec_invalid_flags(nvec, status, true);
665 		else if (nvec->rx && nvec->rx->pos < NVEC_MSG_SIZE)
666 			nvec->rx->data[nvec->rx->pos++] = received;
667 		else
668 			dev_err(nvec->dev,
669 				"RX buffer overflow on %p: "
670 				"Trying to write byte %u of %u\n",
671 				nvec->rx, nvec->rx->pos, NVEC_MSG_SIZE);
672 		break;
673 	default:
674 		nvec->state = 0;
675 	}
676 
677 	/* If we are told that a new transfer starts, verify it */
678 	if ((status & (RCVD | RNW)) == RCVD) {
679 		if (received != nvec->i2c_addr)
680 			dev_err(nvec->dev,
681 			"received address 0x%02x, expected 0x%02x\n",
682 			received, nvec->i2c_addr);
683 		nvec->state = 1;
684 	}
685 
686 	/* Send data if requested, but not on end of transmission */
687 	if ((status & (RNW | END_TRANS)) == RNW)
688 		writel(to_send, nvec->base + I2C_SL_RCVD);
689 
690 	/* If we have send the first byte */
691 	if (status == (I2C_SL_IRQ | RNW | RCVD))
692 		nvec_gpio_set_value(nvec, 1);
693 
694 	dev_dbg(nvec->dev,
695 		"Handled: %s 0x%02x, %s 0x%02x in state %u [%s%s%s]\n",
696 		(status & RNW) == 0 ? "received" : "R=",
697 		received,
698 		(status & (RNW | END_TRANS)) ? "sent" : "S=",
699 		to_send,
700 		state,
701 		status & END_TRANS ? " END_TRANS" : "",
702 		status & RCVD ? " RCVD" : "",
703 		status & RNW ? " RNW" : "");
704 
705 
706 	/*
707 	 * TODO: A correct fix needs to be found for this.
708 	 *
709 	 * We experience less incomplete messages with this delay than without
710 	 * it, but we don't know why. Help is appreciated.
711 	 */
712 	udelay(100);
713 
714 	return IRQ_HANDLED;
715 }
716 
717 static void tegra_init_i2c_slave(struct nvec_chip *nvec)
718 {
719 	u32 val;
720 
721 	clk_prepare_enable(nvec->i2c_clk);
722 
723 	tegra_periph_reset_assert(nvec->i2c_clk);
724 	udelay(2);
725 	tegra_periph_reset_deassert(nvec->i2c_clk);
726 
727 	val = I2C_CNFG_NEW_MASTER_SFM | I2C_CNFG_PACKET_MODE_EN |
728 	    (0x2 << I2C_CNFG_DEBOUNCE_CNT_SHIFT);
729 	writel(val, nvec->base + I2C_CNFG);
730 
731 	clk_set_rate(nvec->i2c_clk, 8 * 80000);
732 
733 	writel(I2C_SL_NEWSL, nvec->base + I2C_SL_CNFG);
734 	writel(0x1E, nvec->base + I2C_SL_DELAY_COUNT);
735 
736 	writel(nvec->i2c_addr>>1, nvec->base + I2C_SL_ADDR1);
737 	writel(0, nvec->base + I2C_SL_ADDR2);
738 
739 	enable_irq(nvec->irq);
740 
741 	clk_disable_unprepare(nvec->i2c_clk);
742 }
743 
744 #ifdef CONFIG_PM_SLEEP
745 static void nvec_disable_i2c_slave(struct nvec_chip *nvec)
746 {
747 	disable_irq(nvec->irq);
748 	writel(I2C_SL_NEWSL | I2C_SL_NACK, nvec->base + I2C_SL_CNFG);
749 	clk_disable_unprepare(nvec->i2c_clk);
750 }
751 #endif
752 
753 static void nvec_power_off(void)
754 {
755 	char ap_pwr_down[] = { NVEC_SLEEP, AP_PWR_DOWN };
756 
757 	nvec_toggle_global_events(nvec_power_handle, false);
758 	nvec_write_async(nvec_power_handle, ap_pwr_down, 2);
759 }
760 
761 static int tegra_nvec_probe(struct platform_device *pdev)
762 {
763 	int err, ret;
764 	struct clk *i2c_clk;
765 	struct nvec_platform_data *pdata = pdev->dev.platform_data;
766 	struct nvec_chip *nvec;
767 	struct nvec_msg *msg;
768 	struct resource *res;
769 	void __iomem *base;
770 	char	get_firmware_version[] = { NVEC_CNTL, GET_FIRMWARE_VERSION },
771 		unmute_speakers[] = { NVEC_OEM0, 0x10, 0x59, 0x95 },
772 		enable_event[7] = { NVEC_SYS, CNF_EVENT_REPORTING, true };
773 
774 	nvec = devm_kzalloc(&pdev->dev, sizeof(struct nvec_chip), GFP_KERNEL);
775 	if (nvec == NULL) {
776 		dev_err(&pdev->dev, "failed to reserve memory\n");
777 		return -ENOMEM;
778 	}
779 	platform_set_drvdata(pdev, nvec);
780 	nvec->dev = &pdev->dev;
781 
782 	if (pdata) {
783 		nvec->gpio = pdata->gpio;
784 		nvec->i2c_addr = pdata->i2c_addr;
785 	} else if (nvec->dev->of_node) {
786 		nvec->gpio = of_get_named_gpio(nvec->dev->of_node,
787 					"request-gpios", 0);
788 		if (nvec->gpio < 0) {
789 			dev_err(&pdev->dev, "no gpio specified");
790 			return -ENODEV;
791 		}
792 		if (of_property_read_u32(nvec->dev->of_node,
793 					"slave-addr", &nvec->i2c_addr)) {
794 			dev_err(&pdev->dev, "no i2c address specified");
795 			return -ENODEV;
796 		}
797 	} else {
798 		dev_err(&pdev->dev, "no platform data\n");
799 		return -ENODEV;
800 	}
801 
802 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
803 	base = devm_ioremap_resource(&pdev->dev, res);
804 	if (IS_ERR(base))
805 		return PTR_ERR(base);
806 
807 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
808 	if (!res) {
809 		dev_err(&pdev->dev, "no irq resource?\n");
810 		return -ENODEV;
811 	}
812 
813 	i2c_clk = clk_get(&pdev->dev, "div-clk");
814 	if (IS_ERR(i2c_clk)) {
815 		dev_err(nvec->dev, "failed to get controller clock\n");
816 		return -ENODEV;
817 	}
818 
819 	nvec->base = base;
820 	nvec->irq = res->start;
821 	nvec->i2c_clk = i2c_clk;
822 	nvec->rx = &nvec->msg_pool[0];
823 
824 	ATOMIC_INIT_NOTIFIER_HEAD(&nvec->notifier_list);
825 
826 	init_completion(&nvec->sync_write);
827 	init_completion(&nvec->ec_transfer);
828 	mutex_init(&nvec->sync_write_mutex);
829 	spin_lock_init(&nvec->tx_lock);
830 	spin_lock_init(&nvec->rx_lock);
831 	INIT_LIST_HEAD(&nvec->rx_data);
832 	INIT_LIST_HEAD(&nvec->tx_data);
833 	INIT_WORK(&nvec->rx_work, nvec_dispatch);
834 	INIT_WORK(&nvec->tx_work, nvec_request_master);
835 
836 	err = devm_gpio_request_one(&pdev->dev, nvec->gpio, GPIOF_OUT_INIT_HIGH,
837 					"nvec gpio");
838 	if (err < 0) {
839 		dev_err(nvec->dev, "couldn't request gpio\n");
840 		return -ENODEV;
841 	}
842 
843 	err = devm_request_irq(&pdev->dev, nvec->irq, nvec_interrupt, 0,
844 				"nvec", nvec);
845 	if (err) {
846 		dev_err(nvec->dev, "couldn't request irq\n");
847 		return -ENODEV;
848 	}
849 	disable_irq(nvec->irq);
850 
851 	tegra_init_i2c_slave(nvec);
852 
853 	clk_prepare_enable(i2c_clk);
854 
855 
856 	/* enable event reporting */
857 	nvec_toggle_global_events(nvec, true);
858 
859 	nvec->nvec_status_notifier.notifier_call = nvec_status_notifier;
860 	nvec_register_notifier(nvec, &nvec->nvec_status_notifier, 0);
861 
862 	nvec_power_handle = nvec;
863 	pm_power_off = nvec_power_off;
864 
865 	/* Get Firmware Version */
866 	msg = nvec_write_sync(nvec, get_firmware_version, 2);
867 
868 	if (msg) {
869 		dev_warn(nvec->dev, "ec firmware version %02x.%02x.%02x / %02x\n",
870 			msg->data[4], msg->data[5], msg->data[6], msg->data[7]);
871 
872 		nvec_msg_free(nvec, msg);
873 	}
874 
875 	ret = mfd_add_devices(nvec->dev, -1, nvec_devices,
876 			      ARRAY_SIZE(nvec_devices), base, 0, NULL);
877 	if (ret)
878 		dev_err(nvec->dev, "error adding subdevices\n");
879 
880 	/* unmute speakers? */
881 	nvec_write_async(nvec, unmute_speakers, 4);
882 
883 	/* enable lid switch event */
884 	nvec_event_mask(enable_event, LID_SWITCH);
885 	nvec_write_async(nvec, enable_event, 7);
886 
887 	/* enable power button event */
888 	nvec_event_mask(enable_event, PWR_BUTTON);
889 	nvec_write_async(nvec, enable_event, 7);
890 
891 	return 0;
892 }
893 
894 static int tegra_nvec_remove(struct platform_device *pdev)
895 {
896 	struct nvec_chip *nvec = platform_get_drvdata(pdev);
897 
898 	nvec_toggle_global_events(nvec, false);
899 	mfd_remove_devices(nvec->dev);
900 	cancel_work_sync(&nvec->rx_work);
901 	cancel_work_sync(&nvec->tx_work);
902 
903 	return 0;
904 }
905 
906 #ifdef CONFIG_PM_SLEEP
907 static int nvec_suspend(struct device *dev)
908 {
909 	struct platform_device *pdev = to_platform_device(dev);
910 	struct nvec_chip *nvec = platform_get_drvdata(pdev);
911 	struct nvec_msg *msg;
912 	char ap_suspend[] = { NVEC_SLEEP, AP_SUSPEND };
913 
914 	dev_dbg(nvec->dev, "suspending\n");
915 
916 	/* keep these sync or you'll break suspend */
917 	nvec_toggle_global_events(nvec, false);
918 
919 	msg = nvec_write_sync(nvec, ap_suspend, sizeof(ap_suspend));
920 	nvec_msg_free(nvec, msg);
921 
922 	nvec_disable_i2c_slave(nvec);
923 
924 	return 0;
925 }
926 
927 static int nvec_resume(struct device *dev)
928 {
929 	struct platform_device *pdev = to_platform_device(dev);
930 	struct nvec_chip *nvec = platform_get_drvdata(pdev);
931 
932 	dev_dbg(nvec->dev, "resuming\n");
933 	tegra_init_i2c_slave(nvec);
934 	nvec_toggle_global_events(nvec, true);
935 
936 	return 0;
937 }
938 #endif
939 
940 static const SIMPLE_DEV_PM_OPS(nvec_pm_ops, nvec_suspend, nvec_resume);
941 
942 /* Match table for of_platform binding */
943 static const struct of_device_id nvidia_nvec_of_match[] = {
944 	{ .compatible = "nvidia,nvec", },
945 	{},
946 };
947 MODULE_DEVICE_TABLE(of, nvidia_nvec_of_match);
948 
949 static struct platform_driver nvec_device_driver = {
950 	.probe   = tegra_nvec_probe,
951 	.remove  = tegra_nvec_remove,
952 	.driver  = {
953 		.name = "nvec",
954 		.owner = THIS_MODULE,
955 		.pm = &nvec_pm_ops,
956 		.of_match_table = nvidia_nvec_of_match,
957 	}
958 };
959 
960 module_platform_driver(nvec_device_driver);
961 
962 MODULE_ALIAS("platform:nvec");
963 MODULE_DESCRIPTION("NVIDIA compliant embedded controller interface");
964 MODULE_AUTHOR("Marc Dietrich <marvin24@gmx.de>");
965 MODULE_LICENSE("GPL");
966