xref: /openbmc/linux/drivers/staging/nvec/nvec.c (revision d09c2a84)
1 /*
2  * NVEC: NVIDIA compliant embedded controller interface
3  *
4  * Copyright (C) 2011 The AC100 Kernel Team <ac100@lists.lauchpad.net>
5  *
6  * Authors:  Pierre-Hugues Husson <phhusson@free.fr>
7  *           Ilya Petrov <ilya.muromec@gmail.com>
8  *           Marc Dietrich <marvin24@gmx.de>
9  *           Julian Andres Klode <jak@jak-linux.org>
10  *
11  * This file is subject to the terms and conditions of the GNU General Public
12  * License.  See the file "COPYING" in the main directory of this archive
13  * for more details.
14  *
15  */
16 
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/atomic.h>
20 #include <linux/clk.h>
21 #include <linux/completion.h>
22 #include <linux/delay.h>
23 #include <linux/err.h>
24 #include <linux/gpio.h>
25 #include <linux/interrupt.h>
26 #include <linux/io.h>
27 #include <linux/irq.h>
28 #include <linux/of.h>
29 #include <linux/of_gpio.h>
30 #include <linux/list.h>
31 #include <linux/mfd/core.h>
32 #include <linux/mutex.h>
33 #include <linux/notifier.h>
34 #include <linux/slab.h>
35 #include <linux/spinlock.h>
36 #include <linux/workqueue.h>
37 
38 #include "nvec.h"
39 
40 #define I2C_CNFG			0x00
41 #define I2C_CNFG_PACKET_MODE_EN		BIT(10)
42 #define I2C_CNFG_NEW_MASTER_SFM		BIT(11)
43 #define I2C_CNFG_DEBOUNCE_CNT_SHIFT	12
44 
45 #define I2C_SL_CNFG		0x20
46 #define I2C_SL_NEWSL		BIT(2)
47 #define I2C_SL_NACK		BIT(1)
48 #define I2C_SL_RESP		BIT(0)
49 #define I2C_SL_IRQ		BIT(3)
50 #define END_TRANS		BIT(4)
51 #define RCVD			BIT(2)
52 #define RNW			BIT(1)
53 
54 #define I2C_SL_RCVD		0x24
55 #define I2C_SL_STATUS		0x28
56 #define I2C_SL_ADDR1		0x2c
57 #define I2C_SL_ADDR2		0x30
58 #define I2C_SL_DELAY_COUNT	0x3c
59 
60 /**
61  * enum nvec_msg_category - Message categories for nvec_msg_alloc()
62  * @NVEC_MSG_RX: The message is an incoming message (from EC)
63  * @NVEC_MSG_TX: The message is an outgoing message (to EC)
64  */
65 enum nvec_msg_category  {
66 	NVEC_MSG_RX,
67 	NVEC_MSG_TX,
68 };
69 
70 enum nvec_sleep_subcmds {
71 	GLOBAL_EVENTS,
72 	AP_PWR_DOWN,
73 	AP_SUSPEND,
74 };
75 
76 #define CNF_EVENT_REPORTING 0x01
77 #define GET_FIRMWARE_VERSION 0x15
78 #define LID_SWITCH BIT(1)
79 #define PWR_BUTTON BIT(15)
80 
81 static struct nvec_chip *nvec_power_handle;
82 
83 static const struct mfd_cell nvec_devices[] = {
84 	{
85 		.name = "nvec-kbd",
86 	},
87 	{
88 		.name = "nvec-mouse",
89 	},
90 	{
91 		.name = "nvec-power",
92 		.id = 0,
93 	},
94 	{
95 		.name = "nvec-power",
96 		.id = 1,
97 	},
98 	{
99 		.name = "nvec-paz00",
100 	},
101 };
102 
103 /**
104  * nvec_register_notifier - Register a notifier with nvec
105  * @nvec: A &struct nvec_chip
106  * @nb: The notifier block to register
107  *
108  * Registers a notifier with @nvec. The notifier will be added to an atomic
109  * notifier chain that is called for all received messages except those that
110  * correspond to a request initiated by nvec_write_sync().
111  */
112 int nvec_register_notifier(struct nvec_chip *nvec, struct notifier_block *nb,
113 			   unsigned int events)
114 {
115 	return atomic_notifier_chain_register(&nvec->notifier_list, nb);
116 }
117 EXPORT_SYMBOL_GPL(nvec_register_notifier);
118 
119 /**
120  * nvec_unregister_notifier - Unregister a notifier with nvec
121  * @nvec: A &struct nvec_chip
122  * @nb: The notifier block to unregister
123  *
124  * Unregisters a notifier with @nvec. The notifier will be removed from the
125  * atomic notifier chain.
126  */
127 int nvec_unregister_notifier(struct nvec_chip *nvec, struct notifier_block *nb)
128 {
129 	return atomic_notifier_chain_unregister(&nvec->notifier_list, nb);
130 }
131 EXPORT_SYMBOL_GPL(nvec_unregister_notifier);
132 
133 /**
134  * nvec_status_notifier - The final notifier
135  *
136  * Prints a message about control events not handled in the notifier
137  * chain.
138  */
139 static int nvec_status_notifier(struct notifier_block *nb,
140 				unsigned long event_type, void *data)
141 {
142 	struct nvec_chip *nvec = container_of(nb, struct nvec_chip,
143 						nvec_status_notifier);
144 	unsigned char *msg = (unsigned char *)data;
145 
146 	if (event_type != NVEC_CNTL)
147 		return NOTIFY_DONE;
148 
149 	dev_warn(nvec->dev, "unhandled msg type %ld\n", event_type);
150 	print_hex_dump(KERN_WARNING, "payload: ", DUMP_PREFIX_NONE, 16, 1,
151 		msg, msg[1] + 2, true);
152 
153 	return NOTIFY_OK;
154 }
155 
156 /**
157  * nvec_msg_alloc:
158  * @nvec: A &struct nvec_chip
159  * @category: Pool category, see &enum nvec_msg_category
160  *
161  * Allocate a single &struct nvec_msg object from the message pool of
162  * @nvec. The result shall be passed to nvec_msg_free() if no longer
163  * used.
164  *
165  * Outgoing messages are placed in the upper 75% of the pool, keeping the
166  * lower 25% available for RX buffers only. The reason is to prevent a
167  * situation where all buffers are full and a message is thus endlessly
168  * retried because the response could never be processed.
169  */
170 static struct nvec_msg *nvec_msg_alloc(struct nvec_chip *nvec,
171 				       enum nvec_msg_category category)
172 {
173 	int i = (category == NVEC_MSG_TX) ? (NVEC_POOL_SIZE / 4) : 0;
174 
175 	for (; i < NVEC_POOL_SIZE; i++) {
176 		if (atomic_xchg(&nvec->msg_pool[i].used, 1) == 0) {
177 			dev_vdbg(nvec->dev, "INFO: Allocate %i\n", i);
178 			return &nvec->msg_pool[i];
179 		}
180 	}
181 
182 	dev_err(nvec->dev, "could not allocate %s buffer\n",
183 		(category == NVEC_MSG_TX) ? "TX" : "RX");
184 
185 	return NULL;
186 }
187 
188 /**
189  * nvec_msg_free:
190  * @nvec: A &struct nvec_chip
191  * @msg:  A message (must be allocated by nvec_msg_alloc() and belong to @nvec)
192  *
193  * Free the given message
194  */
195 void nvec_msg_free(struct nvec_chip *nvec, struct nvec_msg *msg)
196 {
197 	if (msg != &nvec->tx_scratch)
198 		dev_vdbg(nvec->dev, "INFO: Free %ti\n", msg - nvec->msg_pool);
199 	atomic_set(&msg->used, 0);
200 }
201 EXPORT_SYMBOL_GPL(nvec_msg_free);
202 
203 /**
204  * nvec_msg_is_event - Return %true if @msg is an event
205  * @msg: A message
206  */
207 static bool nvec_msg_is_event(struct nvec_msg *msg)
208 {
209 	return msg->data[0] >> 7;
210 }
211 
212 /**
213  * nvec_msg_size - Get the size of a message
214  * @msg: The message to get the size for
215  *
216  * This only works for received messages, not for outgoing messages.
217  */
218 static size_t nvec_msg_size(struct nvec_msg *msg)
219 {
220 	bool is_event = nvec_msg_is_event(msg);
221 	int event_length = (msg->data[0] & 0x60) >> 5;
222 
223 	/* for variable size, payload size in byte 1 + count (1) + cmd (1) */
224 	if (!is_event || event_length == NVEC_VAR_SIZE)
225 		return (msg->pos || msg->size) ? (msg->data[1] + 2) : 0;
226 	else if (event_length == NVEC_2BYTES)
227 		return 2;
228 	else if (event_length == NVEC_3BYTES)
229 		return 3;
230 	return 0;
231 }
232 
233 /**
234  * nvec_gpio_set_value - Set the GPIO value
235  * @nvec: A &struct nvec_chip
236  * @value: The value to write (0 or 1)
237  *
238  * Like gpio_set_value(), but generating debugging information
239  */
240 static void nvec_gpio_set_value(struct nvec_chip *nvec, int value)
241 {
242 	dev_dbg(nvec->dev, "GPIO changed from %u to %u\n",
243 		gpio_get_value(nvec->gpio), value);
244 	gpio_set_value(nvec->gpio, value);
245 }
246 
247 /**
248  * nvec_write_async - Asynchronously write a message to NVEC
249  * @nvec: An nvec_chip instance
250  * @data: The message data, starting with the request type
251  * @size: The size of @data
252  *
253  * Queue a single message to be transferred to the embedded controller
254  * and return immediately.
255  *
256  * Returns: 0 on success, a negative error code on failure. If a failure
257  * occurred, the nvec driver may print an error.
258  */
259 int nvec_write_async(struct nvec_chip *nvec, const unsigned char *data,
260 			short size)
261 {
262 	struct nvec_msg *msg;
263 	unsigned long flags;
264 
265 	msg = nvec_msg_alloc(nvec, NVEC_MSG_TX);
266 
267 	if (msg == NULL)
268 		return -ENOMEM;
269 
270 	msg->data[0] = size;
271 	memcpy(msg->data + 1, data, size);
272 	msg->size = size + 1;
273 
274 	spin_lock_irqsave(&nvec->tx_lock, flags);
275 	list_add_tail(&msg->node, &nvec->tx_data);
276 	spin_unlock_irqrestore(&nvec->tx_lock, flags);
277 
278 	schedule_work(&nvec->tx_work);
279 
280 	return 0;
281 }
282 EXPORT_SYMBOL(nvec_write_async);
283 
284 /**
285  * nvec_write_sync - Write a message to nvec and read the response
286  * @nvec: An &struct nvec_chip
287  * @data: The data to write
288  * @size: The size of @data
289  * @msg:  The response message received
290  *
291  * This is similar to nvec_write_async(), but waits for the
292  * request to be answered before returning. This function
293  * uses a mutex and can thus not be called from e.g.
294  * interrupt handlers.
295  *
296  * Returns: 0 on success, a negative error code on failure.
297  * The response message is returned in @msg. Shall be freed with
298  * with nvec_msg_free() once no longer used.
299  *
300  */
301 int nvec_write_sync(struct nvec_chip *nvec,
302 		    const unsigned char *data, short size,
303 		    struct nvec_msg **msg)
304 {
305 	mutex_lock(&nvec->sync_write_mutex);
306 
307 	*msg = NULL;
308 	nvec->sync_write_pending = (data[1] << 8) + data[0];
309 
310 	if (nvec_write_async(nvec, data, size) < 0) {
311 		mutex_unlock(&nvec->sync_write_mutex);
312 		return -ENOMEM;
313 	}
314 
315 	dev_dbg(nvec->dev, "nvec_sync_write: 0x%04x\n",
316 					nvec->sync_write_pending);
317 	if (!(wait_for_completion_timeout(&nvec->sync_write,
318 				msecs_to_jiffies(2000)))) {
319 		dev_warn(nvec->dev, "timeout waiting for sync write to complete\n");
320 		mutex_unlock(&nvec->sync_write_mutex);
321 		return -ETIMEDOUT;
322 	}
323 
324 	dev_dbg(nvec->dev, "nvec_sync_write: pong!\n");
325 
326 	*msg = nvec->last_sync_msg;
327 
328 	mutex_unlock(&nvec->sync_write_mutex);
329 
330 	return 0;
331 }
332 EXPORT_SYMBOL(nvec_write_sync);
333 
334 /**
335  * nvec_toggle_global_events - enables or disables global event reporting
336  * @nvec: nvec handle
337  * @state: true for enable, false for disable
338  *
339  * This switches on/off global event reports by the embedded controller.
340  */
341 static void nvec_toggle_global_events(struct nvec_chip *nvec, bool state)
342 {
343 	unsigned char global_events[] = { NVEC_SLEEP, GLOBAL_EVENTS, state };
344 
345 	nvec_write_async(nvec, global_events, 3);
346 }
347 
348 /**
349  * nvec_event_mask - fill the command string with event bitfield
350  * ev: points to event command string
351  * mask: bit to insert into the event mask
352  *
353  * Configure event command expects a 32 bit bitfield which describes
354  * which events to enable. The bitfield has the following structure
355  * (from highest byte to lowest):
356  *	system state bits 7-0
357  *	system state bits 15-8
358  *	oem system state bits 7-0
359  *	oem system state bits 15-8
360  */
361 static void nvec_event_mask(char *ev, u32 mask)
362 {
363 	ev[3] = mask >> 16 & 0xff;
364 	ev[4] = mask >> 24 & 0xff;
365 	ev[5] = mask >> 0  & 0xff;
366 	ev[6] = mask >> 8  & 0xff;
367 }
368 
369 /**
370  * nvec_request_master - Process outgoing messages
371  * @work: A &struct work_struct (the tx_worker member of &struct nvec_chip)
372  *
373  * Processes all outgoing requests by sending the request and awaiting the
374  * response, then continuing with the next request. Once a request has a
375  * matching response, it will be freed and removed from the list.
376  */
377 static void nvec_request_master(struct work_struct *work)
378 {
379 	struct nvec_chip *nvec = container_of(work, struct nvec_chip, tx_work);
380 	unsigned long flags;
381 	long err;
382 	struct nvec_msg *msg;
383 
384 	spin_lock_irqsave(&nvec->tx_lock, flags);
385 	while (!list_empty(&nvec->tx_data)) {
386 		msg = list_first_entry(&nvec->tx_data, struct nvec_msg, node);
387 		spin_unlock_irqrestore(&nvec->tx_lock, flags);
388 		nvec_gpio_set_value(nvec, 0);
389 		err = wait_for_completion_interruptible_timeout(
390 				&nvec->ec_transfer, msecs_to_jiffies(5000));
391 
392 		if (err == 0) {
393 			dev_warn(nvec->dev, "timeout waiting for ec transfer\n");
394 			nvec_gpio_set_value(nvec, 1);
395 			msg->pos = 0;
396 		}
397 
398 		spin_lock_irqsave(&nvec->tx_lock, flags);
399 
400 		if (err > 0) {
401 			list_del_init(&msg->node);
402 			nvec_msg_free(nvec, msg);
403 		}
404 	}
405 	spin_unlock_irqrestore(&nvec->tx_lock, flags);
406 }
407 
408 /**
409  * parse_msg - Print some information and call the notifiers on an RX message
410  * @nvec: A &struct nvec_chip
411  * @msg: A message received by @nvec
412  *
413  * Paarse some pieces of the message and then call the chain of notifiers
414  * registered via nvec_register_notifier.
415  */
416 static int parse_msg(struct nvec_chip *nvec, struct nvec_msg *msg)
417 {
418 	if ((msg->data[0] & 1 << 7) == 0 && msg->data[3]) {
419 		dev_err(nvec->dev, "ec responded %*ph\n", 4, msg->data);
420 		return -EINVAL;
421 	}
422 
423 	if ((msg->data[0] >> 7) == 1 && (msg->data[0] & 0x0f) == 5)
424 		print_hex_dump(KERN_WARNING, "ec system event ",
425 				DUMP_PREFIX_NONE, 16, 1, msg->data,
426 				msg->data[1] + 2, true);
427 
428 	atomic_notifier_call_chain(&nvec->notifier_list, msg->data[0] & 0x8f,
429 				   msg->data);
430 
431 	return 0;
432 }
433 
434 /**
435  * nvec_dispatch - Process messages received from the EC
436  * @work: A &struct work_struct (the tx_worker member of &struct nvec_chip)
437  *
438  * Process messages previously received from the EC and put into the RX
439  * queue of the &struct nvec_chip instance associated with @work.
440  */
441 static void nvec_dispatch(struct work_struct *work)
442 {
443 	struct nvec_chip *nvec = container_of(work, struct nvec_chip, rx_work);
444 	unsigned long flags;
445 	struct nvec_msg *msg;
446 
447 	spin_lock_irqsave(&nvec->rx_lock, flags);
448 	while (!list_empty(&nvec->rx_data)) {
449 		msg = list_first_entry(&nvec->rx_data, struct nvec_msg, node);
450 		list_del_init(&msg->node);
451 		spin_unlock_irqrestore(&nvec->rx_lock, flags);
452 
453 		if (nvec->sync_write_pending ==
454 		      (msg->data[2] << 8) + msg->data[0]) {
455 			dev_dbg(nvec->dev, "sync write completed!\n");
456 			nvec->sync_write_pending = 0;
457 			nvec->last_sync_msg = msg;
458 			complete(&nvec->sync_write);
459 		} else {
460 			parse_msg(nvec, msg);
461 			nvec_msg_free(nvec, msg);
462 		}
463 		spin_lock_irqsave(&nvec->rx_lock, flags);
464 	}
465 	spin_unlock_irqrestore(&nvec->rx_lock, flags);
466 }
467 
468 /**
469  * nvec_tx_completed - Complete the current transfer
470  * @nvec: A &struct nvec_chip
471  *
472  * This is called when we have received an END_TRANS on a TX transfer.
473  */
474 static void nvec_tx_completed(struct nvec_chip *nvec)
475 {
476 	/* We got an END_TRANS, let's skip this, maybe there's an event */
477 	if (nvec->tx->pos != nvec->tx->size) {
478 		dev_err(nvec->dev, "premature END_TRANS, resending\n");
479 		nvec->tx->pos = 0;
480 		nvec_gpio_set_value(nvec, 0);
481 	} else {
482 		nvec->state = 0;
483 	}
484 }
485 
486 /**
487  * nvec_rx_completed - Complete the current transfer
488  * @nvec: A &struct nvec_chip
489  *
490  * This is called when we have received an END_TRANS on a RX transfer.
491  */
492 static void nvec_rx_completed(struct nvec_chip *nvec)
493 {
494 	if (nvec->rx->pos != nvec_msg_size(nvec->rx)) {
495 		dev_err(nvec->dev, "RX incomplete: Expected %u bytes, got %u\n",
496 			   (uint) nvec_msg_size(nvec->rx),
497 			   (uint) nvec->rx->pos);
498 
499 		nvec_msg_free(nvec, nvec->rx);
500 		nvec->state = 0;
501 
502 		/* Battery quirk - Often incomplete, and likes to crash */
503 		if (nvec->rx->data[0] == NVEC_BAT)
504 			complete(&nvec->ec_transfer);
505 
506 		return;
507 	}
508 
509 	spin_lock(&nvec->rx_lock);
510 
511 	/*
512 	 * Add the received data to the work list and move the ring buffer
513 	 * pointer to the next entry.
514 	 */
515 	list_add_tail(&nvec->rx->node, &nvec->rx_data);
516 
517 	spin_unlock(&nvec->rx_lock);
518 
519 	nvec->state = 0;
520 
521 	if (!nvec_msg_is_event(nvec->rx))
522 		complete(&nvec->ec_transfer);
523 
524 	schedule_work(&nvec->rx_work);
525 }
526 
527 /**
528  * nvec_invalid_flags - Send an error message about invalid flags and jump
529  * @nvec: The nvec device
530  * @status: The status flags
531  * @reset: Whether we shall jump to state 0.
532  */
533 static void nvec_invalid_flags(struct nvec_chip *nvec, unsigned int status,
534 			       bool reset)
535 {
536 	dev_err(nvec->dev, "unexpected status flags 0x%02x during state %i\n",
537 		status, nvec->state);
538 	if (reset)
539 		nvec->state = 0;
540 }
541 
542 /**
543  * nvec_tx_set - Set the message to transfer (nvec->tx)
544  * @nvec: A &struct nvec_chip
545  *
546  * Gets the first entry from the tx_data list of @nvec and sets the
547  * tx member to it. If the tx_data list is empty, this uses the
548  * tx_scratch message to send a no operation message.
549  */
550 static void nvec_tx_set(struct nvec_chip *nvec)
551 {
552 	spin_lock(&nvec->tx_lock);
553 	if (list_empty(&nvec->tx_data)) {
554 		dev_err(nvec->dev, "empty tx - sending no-op\n");
555 		memcpy(nvec->tx_scratch.data, "\x02\x07\x02", 3);
556 		nvec->tx_scratch.size = 3;
557 		nvec->tx_scratch.pos = 0;
558 		nvec->tx = &nvec->tx_scratch;
559 		list_add_tail(&nvec->tx->node, &nvec->tx_data);
560 	} else {
561 		nvec->tx = list_first_entry(&nvec->tx_data, struct nvec_msg,
562 					    node);
563 		nvec->tx->pos = 0;
564 	}
565 	spin_unlock(&nvec->tx_lock);
566 
567 	dev_dbg(nvec->dev, "Sending message of length %u, command 0x%x\n",
568 		(uint)nvec->tx->size, nvec->tx->data[1]);
569 }
570 
571 /**
572  * nvec_interrupt - Interrupt handler
573  * @irq: The IRQ
574  * @dev: The nvec device
575  *
576  * Interrupt handler that fills our RX buffers and empties our TX
577  * buffers. This uses a finite state machine with ridiculous amounts
578  * of error checking, in order to be fairly reliable.
579  */
580 static irqreturn_t nvec_interrupt(int irq, void *dev)
581 {
582 	unsigned long status;
583 	unsigned int received = 0;
584 	unsigned char to_send = 0xff;
585 	const unsigned long irq_mask = I2C_SL_IRQ | END_TRANS | RCVD | RNW;
586 	struct nvec_chip *nvec = dev;
587 	unsigned int state = nvec->state;
588 
589 	status = readl(nvec->base + I2C_SL_STATUS);
590 
591 	/* Filter out some errors */
592 	if ((status & irq_mask) == 0 && (status & ~irq_mask) != 0) {
593 		dev_err(nvec->dev, "unexpected irq mask %lx\n", status);
594 		return IRQ_HANDLED;
595 	}
596 	if ((status & I2C_SL_IRQ) == 0) {
597 		dev_err(nvec->dev, "Spurious IRQ\n");
598 		return IRQ_HANDLED;
599 	}
600 
601 	/* The EC did not request a read, so it send us something, read it */
602 	if ((status & RNW) == 0) {
603 		received = readl(nvec->base + I2C_SL_RCVD);
604 		if (status & RCVD)
605 			writel(0, nvec->base + I2C_SL_RCVD);
606 	}
607 
608 	if (status == (I2C_SL_IRQ | RCVD))
609 		nvec->state = 0;
610 
611 	switch (nvec->state) {
612 	case 0:		/* Verify that its a transfer start, the rest later */
613 		if (status != (I2C_SL_IRQ | RCVD))
614 			nvec_invalid_flags(nvec, status, false);
615 		break;
616 	case 1:		/* command byte */
617 		if (status != I2C_SL_IRQ) {
618 			nvec_invalid_flags(nvec, status, true);
619 		} else {
620 			nvec->rx = nvec_msg_alloc(nvec, NVEC_MSG_RX);
621 			/* Should not happen in a normal world */
622 			if (unlikely(nvec->rx == NULL)) {
623 				nvec->state = 0;
624 				break;
625 			}
626 			nvec->rx->data[0] = received;
627 			nvec->rx->pos = 1;
628 			nvec->state = 2;
629 		}
630 		break;
631 	case 2:		/* first byte after command */
632 		if (status == (I2C_SL_IRQ | RNW | RCVD)) {
633 			udelay(33);
634 			if (nvec->rx->data[0] != 0x01) {
635 				dev_err(nvec->dev,
636 					"Read without prior read command\n");
637 				nvec->state = 0;
638 				break;
639 			}
640 			nvec_msg_free(nvec, nvec->rx);
641 			nvec->state = 3;
642 			nvec_tx_set(nvec);
643 			BUG_ON(nvec->tx->size < 1);
644 			to_send = nvec->tx->data[0];
645 			nvec->tx->pos = 1;
646 		} else if (status == (I2C_SL_IRQ)) {
647 			BUG_ON(nvec->rx == NULL);
648 			nvec->rx->data[1] = received;
649 			nvec->rx->pos = 2;
650 			nvec->state = 4;
651 		} else {
652 			nvec_invalid_flags(nvec, status, true);
653 		}
654 		break;
655 	case 3:		/* EC does a block read, we transmit data */
656 		if (status & END_TRANS) {
657 			nvec_tx_completed(nvec);
658 		} else if ((status & RNW) == 0 || (status & RCVD)) {
659 			nvec_invalid_flags(nvec, status, true);
660 		} else if (nvec->tx && nvec->tx->pos < nvec->tx->size) {
661 			to_send = nvec->tx->data[nvec->tx->pos++];
662 		} else {
663 			dev_err(nvec->dev, "tx buffer underflow on %p (%u > %u)\n",
664 				nvec->tx,
665 				(uint) (nvec->tx ? nvec->tx->pos : 0),
666 				(uint) (nvec->tx ? nvec->tx->size : 0));
667 			nvec->state = 0;
668 		}
669 		break;
670 	case 4:		/* EC does some write, we read the data */
671 		if ((status & (END_TRANS | RNW)) == END_TRANS)
672 			nvec_rx_completed(nvec);
673 		else if (status & (RNW | RCVD))
674 			nvec_invalid_flags(nvec, status, true);
675 		else if (nvec->rx && nvec->rx->pos < NVEC_MSG_SIZE)
676 			nvec->rx->data[nvec->rx->pos++] = received;
677 		else
678 			dev_err(nvec->dev,
679 				"RX buffer overflow on %p: Trying to write byte %u of %u\n",
680 				nvec->rx, nvec->rx ? nvec->rx->pos : 0,
681 				NVEC_MSG_SIZE);
682 		break;
683 	default:
684 		nvec->state = 0;
685 	}
686 
687 	/* If we are told that a new transfer starts, verify it */
688 	if ((status & (RCVD | RNW)) == RCVD) {
689 		if (received != nvec->i2c_addr)
690 			dev_err(nvec->dev,
691 			"received address 0x%02x, expected 0x%02x\n",
692 			received, nvec->i2c_addr);
693 		nvec->state = 1;
694 	}
695 
696 	/* Send data if requested, but not on end of transmission */
697 	if ((status & (RNW | END_TRANS)) == RNW)
698 		writel(to_send, nvec->base + I2C_SL_RCVD);
699 
700 	/* If we have send the first byte */
701 	if (status == (I2C_SL_IRQ | RNW | RCVD))
702 		nvec_gpio_set_value(nvec, 1);
703 
704 	dev_dbg(nvec->dev,
705 		"Handled: %s 0x%02x, %s 0x%02x in state %u [%s%s%s]\n",
706 		(status & RNW) == 0 ? "received" : "R=",
707 		received,
708 		(status & (RNW | END_TRANS)) ? "sent" : "S=",
709 		to_send,
710 		state,
711 		status & END_TRANS ? " END_TRANS" : "",
712 		status & RCVD ? " RCVD" : "",
713 		status & RNW ? " RNW" : "");
714 
715 	/*
716 	 * TODO: A correct fix needs to be found for this.
717 	 *
718 	 * We experience less incomplete messages with this delay than without
719 	 * it, but we don't know why. Help is appreciated.
720 	 */
721 	udelay(100);
722 
723 	return IRQ_HANDLED;
724 }
725 
726 static void tegra_init_i2c_slave(struct nvec_chip *nvec)
727 {
728 	u32 val;
729 
730 	clk_prepare_enable(nvec->i2c_clk);
731 
732 	reset_control_assert(nvec->rst);
733 	udelay(2);
734 	reset_control_deassert(nvec->rst);
735 
736 	val = I2C_CNFG_NEW_MASTER_SFM | I2C_CNFG_PACKET_MODE_EN |
737 	    (0x2 << I2C_CNFG_DEBOUNCE_CNT_SHIFT);
738 	writel(val, nvec->base + I2C_CNFG);
739 
740 	clk_set_rate(nvec->i2c_clk, 8 * 80000);
741 
742 	writel(I2C_SL_NEWSL, nvec->base + I2C_SL_CNFG);
743 	writel(0x1E, nvec->base + I2C_SL_DELAY_COUNT);
744 
745 	writel(nvec->i2c_addr >> 1, nvec->base + I2C_SL_ADDR1);
746 	writel(0, nvec->base + I2C_SL_ADDR2);
747 
748 	enable_irq(nvec->irq);
749 }
750 
751 #ifdef CONFIG_PM_SLEEP
752 static void nvec_disable_i2c_slave(struct nvec_chip *nvec)
753 {
754 	disable_irq(nvec->irq);
755 	writel(I2C_SL_NEWSL | I2C_SL_NACK, nvec->base + I2C_SL_CNFG);
756 	clk_disable_unprepare(nvec->i2c_clk);
757 }
758 #endif
759 
760 static void nvec_power_off(void)
761 {
762 	char ap_pwr_down[] = { NVEC_SLEEP, AP_PWR_DOWN };
763 
764 	nvec_toggle_global_events(nvec_power_handle, false);
765 	nvec_write_async(nvec_power_handle, ap_pwr_down, 2);
766 }
767 
768 /*
769  *  Parse common device tree data
770  */
771 static int nvec_i2c_parse_dt_pdata(struct nvec_chip *nvec)
772 {
773 	nvec->gpio = of_get_named_gpio(nvec->dev->of_node, "request-gpios", 0);
774 
775 	if (nvec->gpio < 0) {
776 		dev_err(nvec->dev, "no gpio specified");
777 		return -ENODEV;
778 	}
779 
780 	if (of_property_read_u32(nvec->dev->of_node, "slave-addr",
781 				&nvec->i2c_addr)) {
782 		dev_err(nvec->dev, "no i2c address specified");
783 		return -ENODEV;
784 	}
785 
786 	return 0;
787 }
788 
789 static int tegra_nvec_probe(struct platform_device *pdev)
790 {
791 	int err, ret;
792 	struct clk *i2c_clk;
793 	struct nvec_chip *nvec;
794 	struct nvec_msg *msg;
795 	struct resource *res;
796 	void __iomem *base;
797 	char	get_firmware_version[] = { NVEC_CNTL, GET_FIRMWARE_VERSION },
798 		unmute_speakers[] = { NVEC_OEM0, 0x10, 0x59, 0x95 },
799 		enable_event[7] = { NVEC_SYS, CNF_EVENT_REPORTING, true };
800 
801 	if (!pdev->dev.of_node) {
802 		dev_err(&pdev->dev, "must be instantiated using device tree\n");
803 		return -ENODEV;
804 	}
805 
806 	nvec = devm_kzalloc(&pdev->dev, sizeof(struct nvec_chip), GFP_KERNEL);
807 	if (!nvec)
808 		return -ENOMEM;
809 
810 	platform_set_drvdata(pdev, nvec);
811 	nvec->dev = &pdev->dev;
812 
813 	err = nvec_i2c_parse_dt_pdata(nvec);
814 	if (err < 0)
815 		return err;
816 
817 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
818 	base = devm_ioremap_resource(&pdev->dev, res);
819 	if (IS_ERR(base))
820 		return PTR_ERR(base);
821 
822 	nvec->irq = platform_get_irq(pdev, 0);
823 	if (nvec->irq < 0) {
824 		dev_err(&pdev->dev, "no irq resource?\n");
825 		return -ENODEV;
826 	}
827 
828 	i2c_clk = devm_clk_get(&pdev->dev, "div-clk");
829 	if (IS_ERR(i2c_clk)) {
830 		dev_err(nvec->dev, "failed to get controller clock\n");
831 		return -ENODEV;
832 	}
833 
834 	nvec->rst = devm_reset_control_get(&pdev->dev, "i2c");
835 	if (IS_ERR(nvec->rst)) {
836 		dev_err(nvec->dev, "failed to get controller reset\n");
837 		return PTR_ERR(nvec->rst);
838 	}
839 
840 	nvec->base = base;
841 	nvec->i2c_clk = i2c_clk;
842 	nvec->rx = &nvec->msg_pool[0];
843 
844 	ATOMIC_INIT_NOTIFIER_HEAD(&nvec->notifier_list);
845 
846 	init_completion(&nvec->sync_write);
847 	init_completion(&nvec->ec_transfer);
848 	mutex_init(&nvec->sync_write_mutex);
849 	spin_lock_init(&nvec->tx_lock);
850 	spin_lock_init(&nvec->rx_lock);
851 	INIT_LIST_HEAD(&nvec->rx_data);
852 	INIT_LIST_HEAD(&nvec->tx_data);
853 	INIT_WORK(&nvec->rx_work, nvec_dispatch);
854 	INIT_WORK(&nvec->tx_work, nvec_request_master);
855 
856 	err = devm_gpio_request_one(&pdev->dev, nvec->gpio, GPIOF_OUT_INIT_HIGH,
857 					"nvec gpio");
858 	if (err < 0) {
859 		dev_err(nvec->dev, "couldn't request gpio\n");
860 		return -ENODEV;
861 	}
862 
863 	err = devm_request_irq(&pdev->dev, nvec->irq, nvec_interrupt, 0,
864 				"nvec", nvec);
865 	if (err) {
866 		dev_err(nvec->dev, "couldn't request irq\n");
867 		return -ENODEV;
868 	}
869 	disable_irq(nvec->irq);
870 
871 	tegra_init_i2c_slave(nvec);
872 
873 	/* enable event reporting */
874 	nvec_toggle_global_events(nvec, true);
875 
876 	nvec->nvec_status_notifier.notifier_call = nvec_status_notifier;
877 	nvec_register_notifier(nvec, &nvec->nvec_status_notifier, 0);
878 
879 	nvec_power_handle = nvec;
880 	pm_power_off = nvec_power_off;
881 
882 	/* Get Firmware Version */
883 	err = nvec_write_sync(nvec, get_firmware_version, 2, &msg);
884 
885 	if (!err) {
886 		dev_warn(nvec->dev, "ec firmware version %02x.%02x.%02x / %02x\n",
887 			msg->data[4], msg->data[5], msg->data[6], msg->data[7]);
888 
889 		nvec_msg_free(nvec, msg);
890 	}
891 
892 	ret = mfd_add_devices(nvec->dev, 0, nvec_devices,
893 			      ARRAY_SIZE(nvec_devices), NULL, 0, NULL);
894 	if (ret)
895 		dev_err(nvec->dev, "error adding subdevices\n");
896 
897 	/* unmute speakers? */
898 	nvec_write_async(nvec, unmute_speakers, 4);
899 
900 	/* enable lid switch event */
901 	nvec_event_mask(enable_event, LID_SWITCH);
902 	nvec_write_async(nvec, enable_event, 7);
903 
904 	/* enable power button event */
905 	nvec_event_mask(enable_event, PWR_BUTTON);
906 	nvec_write_async(nvec, enable_event, 7);
907 
908 	return 0;
909 }
910 
911 static int tegra_nvec_remove(struct platform_device *pdev)
912 {
913 	struct nvec_chip *nvec = platform_get_drvdata(pdev);
914 
915 	nvec_toggle_global_events(nvec, false);
916 	mfd_remove_devices(nvec->dev);
917 	nvec_unregister_notifier(nvec, &nvec->nvec_status_notifier);
918 	cancel_work_sync(&nvec->rx_work);
919 	cancel_work_sync(&nvec->tx_work);
920 	/* FIXME: needs check whether nvec is responsible for power off */
921 	pm_power_off = NULL;
922 
923 	return 0;
924 }
925 
926 #ifdef CONFIG_PM_SLEEP
927 static int nvec_suspend(struct device *dev)
928 {
929 	int err;
930 	struct platform_device *pdev = to_platform_device(dev);
931 	struct nvec_chip *nvec = platform_get_drvdata(pdev);
932 	struct nvec_msg *msg;
933 	char ap_suspend[] = { NVEC_SLEEP, AP_SUSPEND };
934 
935 	dev_dbg(nvec->dev, "suspending\n");
936 
937 	/* keep these sync or you'll break suspend */
938 	nvec_toggle_global_events(nvec, false);
939 
940 	err = nvec_write_sync(nvec, ap_suspend, sizeof(ap_suspend), &msg);
941 	if (!err)
942 		nvec_msg_free(nvec, msg);
943 
944 	nvec_disable_i2c_slave(nvec);
945 
946 	return 0;
947 }
948 
949 static int nvec_resume(struct device *dev)
950 {
951 	struct platform_device *pdev = to_platform_device(dev);
952 	struct nvec_chip *nvec = platform_get_drvdata(pdev);
953 
954 	dev_dbg(nvec->dev, "resuming\n");
955 	tegra_init_i2c_slave(nvec);
956 	nvec_toggle_global_events(nvec, true);
957 
958 	return 0;
959 }
960 #endif
961 
962 static SIMPLE_DEV_PM_OPS(nvec_pm_ops, nvec_suspend, nvec_resume);
963 
964 /* Match table for of_platform binding */
965 static const struct of_device_id nvidia_nvec_of_match[] = {
966 	{ .compatible = "nvidia,nvec", },
967 	{},
968 };
969 MODULE_DEVICE_TABLE(of, nvidia_nvec_of_match);
970 
971 static struct platform_driver nvec_device_driver = {
972 	.probe   = tegra_nvec_probe,
973 	.remove  = tegra_nvec_remove,
974 	.driver  = {
975 		.name = "nvec",
976 		.pm = &nvec_pm_ops,
977 		.of_match_table = nvidia_nvec_of_match,
978 	}
979 };
980 
981 module_platform_driver(nvec_device_driver);
982 
983 MODULE_ALIAS("platform:nvec");
984 MODULE_DESCRIPTION("NVIDIA compliant embedded controller interface");
985 MODULE_AUTHOR("Marc Dietrich <marvin24@gmx.de>");
986 MODULE_LICENSE("GPL");
987