1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * OMAP mailbox driver
4  *
5  * Copyright (C) 2006-2009 Nokia Corporation. All rights reserved.
6  * Copyright (C) 2013-2021 Texas Instruments Incorporated - https://www.ti.com
7  *
8  * Contact: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
9  *          Suman Anna <s-anna@ti.com>
10  */
11 
12 #include <linux/interrupt.h>
13 #include <linux/spinlock.h>
14 #include <linux/mutex.h>
15 #include <linux/slab.h>
16 #include <linux/kfifo.h>
17 #include <linux/err.h>
18 #include <linux/module.h>
19 #include <linux/of_device.h>
20 #include <linux/platform_device.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/omap-mailbox.h>
23 #include <linux/mailbox_controller.h>
24 #include <linux/mailbox_client.h>
25 
26 #include "mailbox.h"
27 
28 #define MAILBOX_REVISION		0x000
29 #define MAILBOX_MESSAGE(m)		(0x040 + 4 * (m))
30 #define MAILBOX_FIFOSTATUS(m)		(0x080 + 4 * (m))
31 #define MAILBOX_MSGSTATUS(m)		(0x0c0 + 4 * (m))
32 
33 #define OMAP2_MAILBOX_IRQSTATUS(u)	(0x100 + 8 * (u))
34 #define OMAP2_MAILBOX_IRQENABLE(u)	(0x104 + 8 * (u))
35 
36 #define OMAP4_MAILBOX_IRQSTATUS(u)	(0x104 + 0x10 * (u))
37 #define OMAP4_MAILBOX_IRQENABLE(u)	(0x108 + 0x10 * (u))
38 #define OMAP4_MAILBOX_IRQENABLE_CLR(u)	(0x10c + 0x10 * (u))
39 
40 #define MAILBOX_IRQSTATUS(type, u)	(type ? OMAP4_MAILBOX_IRQSTATUS(u) : \
41 						OMAP2_MAILBOX_IRQSTATUS(u))
42 #define MAILBOX_IRQENABLE(type, u)	(type ? OMAP4_MAILBOX_IRQENABLE(u) : \
43 						OMAP2_MAILBOX_IRQENABLE(u))
44 #define MAILBOX_IRQDISABLE(type, u)	(type ? OMAP4_MAILBOX_IRQENABLE_CLR(u) \
45 						: OMAP2_MAILBOX_IRQENABLE(u))
46 
47 #define MAILBOX_IRQ_NEWMSG(m)		(1 << (2 * (m)))
48 #define MAILBOX_IRQ_NOTFULL(m)		(1 << (2 * (m) + 1))
49 
50 /* Interrupt register configuration types */
51 #define MBOX_INTR_CFG_TYPE1		0
52 #define MBOX_INTR_CFG_TYPE2		1
53 
54 struct omap_mbox_fifo {
55 	unsigned long msg;
56 	unsigned long fifo_stat;
57 	unsigned long msg_stat;
58 	unsigned long irqenable;
59 	unsigned long irqstatus;
60 	unsigned long irqdisable;
61 	u32 intr_bit;
62 };
63 
64 struct omap_mbox_queue {
65 	spinlock_t		lock;
66 	struct kfifo		fifo;
67 	struct work_struct	work;
68 	struct omap_mbox	*mbox;
69 	bool full;
70 };
71 
72 struct omap_mbox_match_data {
73 	u32 intr_type;
74 };
75 
76 struct omap_mbox_device {
77 	struct device *dev;
78 	struct mutex cfg_lock;
79 	void __iomem *mbox_base;
80 	u32 *irq_ctx;
81 	u32 num_users;
82 	u32 num_fifos;
83 	u32 intr_type;
84 	struct omap_mbox **mboxes;
85 	struct mbox_controller controller;
86 	struct list_head elem;
87 };
88 
89 struct omap_mbox_fifo_info {
90 	int tx_id;
91 	int tx_usr;
92 	int tx_irq;
93 
94 	int rx_id;
95 	int rx_usr;
96 	int rx_irq;
97 
98 	const char *name;
99 	bool send_no_irq;
100 };
101 
102 struct omap_mbox {
103 	const char		*name;
104 	int			irq;
105 	struct omap_mbox_queue	*rxq;
106 	struct device		*dev;
107 	struct omap_mbox_device *parent;
108 	struct omap_mbox_fifo	tx_fifo;
109 	struct omap_mbox_fifo	rx_fifo;
110 	u32			intr_type;
111 	struct mbox_chan	*chan;
112 	bool			send_no_irq;
113 };
114 
115 /* global variables for the mailbox devices */
116 static DEFINE_MUTEX(omap_mbox_devices_lock);
117 static LIST_HEAD(omap_mbox_devices);
118 
119 static unsigned int mbox_kfifo_size = CONFIG_OMAP_MBOX_KFIFO_SIZE;
120 module_param(mbox_kfifo_size, uint, S_IRUGO);
121 MODULE_PARM_DESC(mbox_kfifo_size, "Size of omap's mailbox kfifo (bytes)");
122 
123 static struct omap_mbox *mbox_chan_to_omap_mbox(struct mbox_chan *chan)
124 {
125 	if (!chan || !chan->con_priv)
126 		return NULL;
127 
128 	return (struct omap_mbox *)chan->con_priv;
129 }
130 
131 static inline
132 unsigned int mbox_read_reg(struct omap_mbox_device *mdev, size_t ofs)
133 {
134 	return __raw_readl(mdev->mbox_base + ofs);
135 }
136 
137 static inline
138 void mbox_write_reg(struct omap_mbox_device *mdev, u32 val, size_t ofs)
139 {
140 	__raw_writel(val, mdev->mbox_base + ofs);
141 }
142 
143 /* Mailbox FIFO handle functions */
144 static u32 mbox_fifo_read(struct omap_mbox *mbox)
145 {
146 	struct omap_mbox_fifo *fifo = &mbox->rx_fifo;
147 
148 	return mbox_read_reg(mbox->parent, fifo->msg);
149 }
150 
151 static void mbox_fifo_write(struct omap_mbox *mbox, u32 msg)
152 {
153 	struct omap_mbox_fifo *fifo = &mbox->tx_fifo;
154 
155 	mbox_write_reg(mbox->parent, msg, fifo->msg);
156 }
157 
158 static int mbox_fifo_empty(struct omap_mbox *mbox)
159 {
160 	struct omap_mbox_fifo *fifo = &mbox->rx_fifo;
161 
162 	return (mbox_read_reg(mbox->parent, fifo->msg_stat) == 0);
163 }
164 
165 static int mbox_fifo_full(struct omap_mbox *mbox)
166 {
167 	struct omap_mbox_fifo *fifo = &mbox->tx_fifo;
168 
169 	return mbox_read_reg(mbox->parent, fifo->fifo_stat);
170 }
171 
172 /* Mailbox IRQ handle functions */
173 static void ack_mbox_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
174 {
175 	struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ?
176 				&mbox->tx_fifo : &mbox->rx_fifo;
177 	u32 bit = fifo->intr_bit;
178 	u32 irqstatus = fifo->irqstatus;
179 
180 	mbox_write_reg(mbox->parent, bit, irqstatus);
181 
182 	/* Flush posted write for irq status to avoid spurious interrupts */
183 	mbox_read_reg(mbox->parent, irqstatus);
184 }
185 
186 static int is_mbox_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
187 {
188 	struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ?
189 				&mbox->tx_fifo : &mbox->rx_fifo;
190 	u32 bit = fifo->intr_bit;
191 	u32 irqenable = fifo->irqenable;
192 	u32 irqstatus = fifo->irqstatus;
193 
194 	u32 enable = mbox_read_reg(mbox->parent, irqenable);
195 	u32 status = mbox_read_reg(mbox->parent, irqstatus);
196 
197 	return (int)(enable & status & bit);
198 }
199 
200 static void _omap_mbox_enable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
201 {
202 	u32 l;
203 	struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ?
204 				&mbox->tx_fifo : &mbox->rx_fifo;
205 	u32 bit = fifo->intr_bit;
206 	u32 irqenable = fifo->irqenable;
207 
208 	l = mbox_read_reg(mbox->parent, irqenable);
209 	l |= bit;
210 	mbox_write_reg(mbox->parent, l, irqenable);
211 }
212 
213 static void _omap_mbox_disable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
214 {
215 	struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ?
216 				&mbox->tx_fifo : &mbox->rx_fifo;
217 	u32 bit = fifo->intr_bit;
218 	u32 irqdisable = fifo->irqdisable;
219 
220 	/*
221 	 * Read and update the interrupt configuration register for pre-OMAP4.
222 	 * OMAP4 and later SoCs have a dedicated interrupt disabling register.
223 	 */
224 	if (!mbox->intr_type)
225 		bit = mbox_read_reg(mbox->parent, irqdisable) & ~bit;
226 
227 	mbox_write_reg(mbox->parent, bit, irqdisable);
228 }
229 
230 void omap_mbox_enable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq)
231 {
232 	struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
233 
234 	if (WARN_ON(!mbox))
235 		return;
236 
237 	_omap_mbox_enable_irq(mbox, irq);
238 }
239 EXPORT_SYMBOL(omap_mbox_enable_irq);
240 
241 void omap_mbox_disable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq)
242 {
243 	struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
244 
245 	if (WARN_ON(!mbox))
246 		return;
247 
248 	_omap_mbox_disable_irq(mbox, irq);
249 }
250 EXPORT_SYMBOL(omap_mbox_disable_irq);
251 
252 /*
253  * Message receiver(workqueue)
254  */
255 static void mbox_rx_work(struct work_struct *work)
256 {
257 	struct omap_mbox_queue *mq =
258 			container_of(work, struct omap_mbox_queue, work);
259 	mbox_msg_t data;
260 	u32 msg;
261 	int len;
262 
263 	while (kfifo_len(&mq->fifo) >= sizeof(msg)) {
264 		len = kfifo_out(&mq->fifo, (unsigned char *)&msg, sizeof(msg));
265 		WARN_ON(len != sizeof(msg));
266 		data = msg;
267 
268 		mbox_chan_received_data(mq->mbox->chan, (void *)data);
269 		spin_lock_irq(&mq->lock);
270 		if (mq->full) {
271 			mq->full = false;
272 			_omap_mbox_enable_irq(mq->mbox, IRQ_RX);
273 		}
274 		spin_unlock_irq(&mq->lock);
275 	}
276 }
277 
278 /*
279  * Mailbox interrupt handler
280  */
281 static void __mbox_tx_interrupt(struct omap_mbox *mbox)
282 {
283 	_omap_mbox_disable_irq(mbox, IRQ_TX);
284 	ack_mbox_irq(mbox, IRQ_TX);
285 	mbox_chan_txdone(mbox->chan, 0);
286 }
287 
288 static void __mbox_rx_interrupt(struct omap_mbox *mbox)
289 {
290 	struct omap_mbox_queue *mq = mbox->rxq;
291 	u32 msg;
292 	int len;
293 
294 	while (!mbox_fifo_empty(mbox)) {
295 		if (unlikely(kfifo_avail(&mq->fifo) < sizeof(msg))) {
296 			_omap_mbox_disable_irq(mbox, IRQ_RX);
297 			mq->full = true;
298 			goto nomem;
299 		}
300 
301 		msg = mbox_fifo_read(mbox);
302 
303 		len = kfifo_in(&mq->fifo, (unsigned char *)&msg, sizeof(msg));
304 		WARN_ON(len != sizeof(msg));
305 	}
306 
307 	/* no more messages in the fifo. clear IRQ source. */
308 	ack_mbox_irq(mbox, IRQ_RX);
309 nomem:
310 	schedule_work(&mbox->rxq->work);
311 }
312 
313 static irqreturn_t mbox_interrupt(int irq, void *p)
314 {
315 	struct omap_mbox *mbox = p;
316 
317 	if (is_mbox_irq(mbox, IRQ_TX))
318 		__mbox_tx_interrupt(mbox);
319 
320 	if (is_mbox_irq(mbox, IRQ_RX))
321 		__mbox_rx_interrupt(mbox);
322 
323 	return IRQ_HANDLED;
324 }
325 
326 static struct omap_mbox_queue *mbox_queue_alloc(struct omap_mbox *mbox,
327 					void (*work)(struct work_struct *))
328 {
329 	struct omap_mbox_queue *mq;
330 
331 	if (!work)
332 		return NULL;
333 
334 	mq = kzalloc(sizeof(*mq), GFP_KERNEL);
335 	if (!mq)
336 		return NULL;
337 
338 	spin_lock_init(&mq->lock);
339 
340 	if (kfifo_alloc(&mq->fifo, mbox_kfifo_size, GFP_KERNEL))
341 		goto error;
342 
343 	INIT_WORK(&mq->work, work);
344 	return mq;
345 
346 error:
347 	kfree(mq);
348 	return NULL;
349 }
350 
351 static void mbox_queue_free(struct omap_mbox_queue *q)
352 {
353 	kfifo_free(&q->fifo);
354 	kfree(q);
355 }
356 
357 static int omap_mbox_startup(struct omap_mbox *mbox)
358 {
359 	int ret = 0;
360 	struct omap_mbox_queue *mq;
361 
362 	mq = mbox_queue_alloc(mbox, mbox_rx_work);
363 	if (!mq)
364 		return -ENOMEM;
365 	mbox->rxq = mq;
366 	mq->mbox = mbox;
367 
368 	ret = request_irq(mbox->irq, mbox_interrupt, IRQF_SHARED,
369 			  mbox->name, mbox);
370 	if (unlikely(ret)) {
371 		pr_err("failed to register mailbox interrupt:%d\n", ret);
372 		goto fail_request_irq;
373 	}
374 
375 	if (mbox->send_no_irq)
376 		mbox->chan->txdone_method = TXDONE_BY_ACK;
377 
378 	_omap_mbox_enable_irq(mbox, IRQ_RX);
379 
380 	return 0;
381 
382 fail_request_irq:
383 	mbox_queue_free(mbox->rxq);
384 	return ret;
385 }
386 
387 static void omap_mbox_fini(struct omap_mbox *mbox)
388 {
389 	_omap_mbox_disable_irq(mbox, IRQ_RX);
390 	free_irq(mbox->irq, mbox);
391 	flush_work(&mbox->rxq->work);
392 	mbox_queue_free(mbox->rxq);
393 }
394 
395 static struct omap_mbox *omap_mbox_device_find(struct omap_mbox_device *mdev,
396 					       const char *mbox_name)
397 {
398 	struct omap_mbox *_mbox, *mbox = NULL;
399 	struct omap_mbox **mboxes = mdev->mboxes;
400 	int i;
401 
402 	if (!mboxes)
403 		return NULL;
404 
405 	for (i = 0; (_mbox = mboxes[i]); i++) {
406 		if (!strcmp(_mbox->name, mbox_name)) {
407 			mbox = _mbox;
408 			break;
409 		}
410 	}
411 	return mbox;
412 }
413 
414 struct mbox_chan *omap_mbox_request_channel(struct mbox_client *cl,
415 					    const char *chan_name)
416 {
417 	struct device *dev = cl->dev;
418 	struct omap_mbox *mbox = NULL;
419 	struct omap_mbox_device *mdev;
420 	struct mbox_chan *chan;
421 	unsigned long flags;
422 	int ret;
423 
424 	if (!dev)
425 		return ERR_PTR(-ENODEV);
426 
427 	if (dev->of_node) {
428 		pr_err("%s: please use mbox_request_channel(), this API is supported only for OMAP non-DT usage\n",
429 		       __func__);
430 		return ERR_PTR(-ENODEV);
431 	}
432 
433 	mutex_lock(&omap_mbox_devices_lock);
434 	list_for_each_entry(mdev, &omap_mbox_devices, elem) {
435 		mbox = omap_mbox_device_find(mdev, chan_name);
436 		if (mbox)
437 			break;
438 	}
439 	mutex_unlock(&omap_mbox_devices_lock);
440 
441 	if (!mbox || !mbox->chan)
442 		return ERR_PTR(-ENOENT);
443 
444 	chan = mbox->chan;
445 	spin_lock_irqsave(&chan->lock, flags);
446 	chan->msg_free = 0;
447 	chan->msg_count = 0;
448 	chan->active_req = NULL;
449 	chan->cl = cl;
450 	init_completion(&chan->tx_complete);
451 	spin_unlock_irqrestore(&chan->lock, flags);
452 
453 	ret = chan->mbox->ops->startup(chan);
454 	if (ret) {
455 		pr_err("Unable to startup the chan (%d)\n", ret);
456 		mbox_free_channel(chan);
457 		chan = ERR_PTR(ret);
458 	}
459 
460 	return chan;
461 }
462 EXPORT_SYMBOL(omap_mbox_request_channel);
463 
464 static struct class omap_mbox_class = { .name = "mbox", };
465 
466 static int omap_mbox_register(struct omap_mbox_device *mdev)
467 {
468 	int ret;
469 	int i;
470 	struct omap_mbox **mboxes;
471 
472 	if (!mdev || !mdev->mboxes)
473 		return -EINVAL;
474 
475 	mboxes = mdev->mboxes;
476 	for (i = 0; mboxes[i]; i++) {
477 		struct omap_mbox *mbox = mboxes[i];
478 
479 		mbox->dev = device_create(&omap_mbox_class, mdev->dev,
480 					0, mbox, "%s", mbox->name);
481 		if (IS_ERR(mbox->dev)) {
482 			ret = PTR_ERR(mbox->dev);
483 			goto err_out;
484 		}
485 	}
486 
487 	mutex_lock(&omap_mbox_devices_lock);
488 	list_add(&mdev->elem, &omap_mbox_devices);
489 	mutex_unlock(&omap_mbox_devices_lock);
490 
491 	ret = devm_mbox_controller_register(mdev->dev, &mdev->controller);
492 
493 err_out:
494 	if (ret) {
495 		while (i--)
496 			device_unregister(mboxes[i]->dev);
497 	}
498 	return ret;
499 }
500 
501 static int omap_mbox_unregister(struct omap_mbox_device *mdev)
502 {
503 	int i;
504 	struct omap_mbox **mboxes;
505 
506 	if (!mdev || !mdev->mboxes)
507 		return -EINVAL;
508 
509 	mutex_lock(&omap_mbox_devices_lock);
510 	list_del(&mdev->elem);
511 	mutex_unlock(&omap_mbox_devices_lock);
512 
513 	mboxes = mdev->mboxes;
514 	for (i = 0; mboxes[i]; i++)
515 		device_unregister(mboxes[i]->dev);
516 	return 0;
517 }
518 
519 static int omap_mbox_chan_startup(struct mbox_chan *chan)
520 {
521 	struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
522 	struct omap_mbox_device *mdev = mbox->parent;
523 	int ret = 0;
524 
525 	mutex_lock(&mdev->cfg_lock);
526 	pm_runtime_get_sync(mdev->dev);
527 	ret = omap_mbox_startup(mbox);
528 	if (ret)
529 		pm_runtime_put_sync(mdev->dev);
530 	mutex_unlock(&mdev->cfg_lock);
531 	return ret;
532 }
533 
534 static void omap_mbox_chan_shutdown(struct mbox_chan *chan)
535 {
536 	struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
537 	struct omap_mbox_device *mdev = mbox->parent;
538 
539 	mutex_lock(&mdev->cfg_lock);
540 	omap_mbox_fini(mbox);
541 	pm_runtime_put_sync(mdev->dev);
542 	mutex_unlock(&mdev->cfg_lock);
543 }
544 
545 static int omap_mbox_chan_send_noirq(struct omap_mbox *mbox, u32 msg)
546 {
547 	int ret = -EBUSY;
548 
549 	if (!mbox_fifo_full(mbox)) {
550 		_omap_mbox_enable_irq(mbox, IRQ_RX);
551 		mbox_fifo_write(mbox, msg);
552 		ret = 0;
553 		_omap_mbox_disable_irq(mbox, IRQ_RX);
554 
555 		/* we must read and ack the interrupt directly from here */
556 		mbox_fifo_read(mbox);
557 		ack_mbox_irq(mbox, IRQ_RX);
558 	}
559 
560 	return ret;
561 }
562 
563 static int omap_mbox_chan_send(struct omap_mbox *mbox, u32 msg)
564 {
565 	int ret = -EBUSY;
566 
567 	if (!mbox_fifo_full(mbox)) {
568 		mbox_fifo_write(mbox, msg);
569 		ret = 0;
570 	}
571 
572 	/* always enable the interrupt */
573 	_omap_mbox_enable_irq(mbox, IRQ_TX);
574 	return ret;
575 }
576 
577 static int omap_mbox_chan_send_data(struct mbox_chan *chan, void *data)
578 {
579 	struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
580 	int ret;
581 	u32 msg = omap_mbox_message(data);
582 
583 	if (!mbox)
584 		return -EINVAL;
585 
586 	if (mbox->send_no_irq)
587 		ret = omap_mbox_chan_send_noirq(mbox, msg);
588 	else
589 		ret = omap_mbox_chan_send(mbox, msg);
590 
591 	return ret;
592 }
593 
594 static const struct mbox_chan_ops omap_mbox_chan_ops = {
595 	.startup        = omap_mbox_chan_startup,
596 	.send_data      = omap_mbox_chan_send_data,
597 	.shutdown       = omap_mbox_chan_shutdown,
598 };
599 
600 #ifdef CONFIG_PM_SLEEP
601 static int omap_mbox_suspend(struct device *dev)
602 {
603 	struct omap_mbox_device *mdev = dev_get_drvdata(dev);
604 	u32 usr, fifo, reg;
605 
606 	if (pm_runtime_status_suspended(dev))
607 		return 0;
608 
609 	for (fifo = 0; fifo < mdev->num_fifos; fifo++) {
610 		if (mbox_read_reg(mdev, MAILBOX_MSGSTATUS(fifo))) {
611 			dev_err(mdev->dev, "fifo %d has unexpected unread messages\n",
612 				fifo);
613 			return -EBUSY;
614 		}
615 	}
616 
617 	for (usr = 0; usr < mdev->num_users; usr++) {
618 		reg = MAILBOX_IRQENABLE(mdev->intr_type, usr);
619 		mdev->irq_ctx[usr] = mbox_read_reg(mdev, reg);
620 	}
621 
622 	return 0;
623 }
624 
625 static int omap_mbox_resume(struct device *dev)
626 {
627 	struct omap_mbox_device *mdev = dev_get_drvdata(dev);
628 	u32 usr, reg;
629 
630 	if (pm_runtime_status_suspended(dev))
631 		return 0;
632 
633 	for (usr = 0; usr < mdev->num_users; usr++) {
634 		reg = MAILBOX_IRQENABLE(mdev->intr_type, usr);
635 		mbox_write_reg(mdev, mdev->irq_ctx[usr], reg);
636 	}
637 
638 	return 0;
639 }
640 #endif
641 
642 static const struct dev_pm_ops omap_mbox_pm_ops = {
643 	SET_SYSTEM_SLEEP_PM_OPS(omap_mbox_suspend, omap_mbox_resume)
644 };
645 
646 static const struct omap_mbox_match_data omap2_data = { MBOX_INTR_CFG_TYPE1 };
647 static const struct omap_mbox_match_data omap4_data = { MBOX_INTR_CFG_TYPE2 };
648 
649 static const struct of_device_id omap_mailbox_of_match[] = {
650 	{
651 		.compatible	= "ti,omap2-mailbox",
652 		.data		= &omap2_data,
653 	},
654 	{
655 		.compatible	= "ti,omap3-mailbox",
656 		.data		= &omap2_data,
657 	},
658 	{
659 		.compatible	= "ti,omap4-mailbox",
660 		.data		= &omap4_data,
661 	},
662 	{
663 		.compatible	= "ti,am654-mailbox",
664 		.data		= &omap4_data,
665 	},
666 	{
667 		.compatible	= "ti,am64-mailbox",
668 		.data		= &omap4_data,
669 	},
670 	{
671 		/* end */
672 	},
673 };
674 MODULE_DEVICE_TABLE(of, omap_mailbox_of_match);
675 
676 static struct mbox_chan *omap_mbox_of_xlate(struct mbox_controller *controller,
677 					    const struct of_phandle_args *sp)
678 {
679 	phandle phandle = sp->args[0];
680 	struct device_node *node;
681 	struct omap_mbox_device *mdev;
682 	struct omap_mbox *mbox;
683 
684 	mdev = container_of(controller, struct omap_mbox_device, controller);
685 	if (WARN_ON(!mdev))
686 		return ERR_PTR(-EINVAL);
687 
688 	node = of_find_node_by_phandle(phandle);
689 	if (!node) {
690 		pr_err("%s: could not find node phandle 0x%x\n",
691 		       __func__, phandle);
692 		return ERR_PTR(-ENODEV);
693 	}
694 
695 	mbox = omap_mbox_device_find(mdev, node->name);
696 	of_node_put(node);
697 	return mbox ? mbox->chan : ERR_PTR(-ENOENT);
698 }
699 
700 static int omap_mbox_probe(struct platform_device *pdev)
701 {
702 	int ret;
703 	struct mbox_chan *chnls;
704 	struct omap_mbox **list, *mbox, *mboxblk;
705 	struct omap_mbox_fifo_info *finfo, *finfoblk;
706 	struct omap_mbox_device *mdev;
707 	struct omap_mbox_fifo *fifo;
708 	struct device_node *node = pdev->dev.of_node;
709 	struct device_node *child;
710 	const struct omap_mbox_match_data *match_data;
711 	u32 intr_type, info_count;
712 	u32 num_users, num_fifos;
713 	u32 tmp[3];
714 	u32 l;
715 	int i;
716 
717 	if (!node) {
718 		pr_err("%s: only DT-based devices are supported\n", __func__);
719 		return -ENODEV;
720 	}
721 
722 	match_data = of_device_get_match_data(&pdev->dev);
723 	if (!match_data)
724 		return -ENODEV;
725 	intr_type = match_data->intr_type;
726 
727 	if (of_property_read_u32(node, "ti,mbox-num-users", &num_users))
728 		return -ENODEV;
729 
730 	if (of_property_read_u32(node, "ti,mbox-num-fifos", &num_fifos))
731 		return -ENODEV;
732 
733 	info_count = of_get_available_child_count(node);
734 	if (!info_count) {
735 		dev_err(&pdev->dev, "no available mbox devices found\n");
736 		return -ENODEV;
737 	}
738 
739 	finfoblk = devm_kcalloc(&pdev->dev, info_count, sizeof(*finfoblk),
740 				GFP_KERNEL);
741 	if (!finfoblk)
742 		return -ENOMEM;
743 
744 	finfo = finfoblk;
745 	child = NULL;
746 	for (i = 0; i < info_count; i++, finfo++) {
747 		child = of_get_next_available_child(node, child);
748 		ret = of_property_read_u32_array(child, "ti,mbox-tx", tmp,
749 						 ARRAY_SIZE(tmp));
750 		if (ret)
751 			return ret;
752 		finfo->tx_id = tmp[0];
753 		finfo->tx_irq = tmp[1];
754 		finfo->tx_usr = tmp[2];
755 
756 		ret = of_property_read_u32_array(child, "ti,mbox-rx", tmp,
757 						 ARRAY_SIZE(tmp));
758 		if (ret)
759 			return ret;
760 		finfo->rx_id = tmp[0];
761 		finfo->rx_irq = tmp[1];
762 		finfo->rx_usr = tmp[2];
763 
764 		finfo->name = child->name;
765 
766 		if (of_find_property(child, "ti,mbox-send-noirq", NULL))
767 			finfo->send_no_irq = true;
768 
769 		if (finfo->tx_id >= num_fifos || finfo->rx_id >= num_fifos ||
770 		    finfo->tx_usr >= num_users || finfo->rx_usr >= num_users)
771 			return -EINVAL;
772 	}
773 
774 	mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_KERNEL);
775 	if (!mdev)
776 		return -ENOMEM;
777 
778 	mdev->mbox_base = devm_platform_ioremap_resource(pdev, 0);
779 	if (IS_ERR(mdev->mbox_base))
780 		return PTR_ERR(mdev->mbox_base);
781 
782 	mdev->irq_ctx = devm_kcalloc(&pdev->dev, num_users, sizeof(u32),
783 				     GFP_KERNEL);
784 	if (!mdev->irq_ctx)
785 		return -ENOMEM;
786 
787 	/* allocate one extra for marking end of list */
788 	list = devm_kcalloc(&pdev->dev, info_count + 1, sizeof(*list),
789 			    GFP_KERNEL);
790 	if (!list)
791 		return -ENOMEM;
792 
793 	chnls = devm_kcalloc(&pdev->dev, info_count + 1, sizeof(*chnls),
794 			     GFP_KERNEL);
795 	if (!chnls)
796 		return -ENOMEM;
797 
798 	mboxblk = devm_kcalloc(&pdev->dev, info_count, sizeof(*mbox),
799 			       GFP_KERNEL);
800 	if (!mboxblk)
801 		return -ENOMEM;
802 
803 	mbox = mboxblk;
804 	finfo = finfoblk;
805 	for (i = 0; i < info_count; i++, finfo++) {
806 		fifo = &mbox->tx_fifo;
807 		fifo->msg = MAILBOX_MESSAGE(finfo->tx_id);
808 		fifo->fifo_stat = MAILBOX_FIFOSTATUS(finfo->tx_id);
809 		fifo->intr_bit = MAILBOX_IRQ_NOTFULL(finfo->tx_id);
810 		fifo->irqenable = MAILBOX_IRQENABLE(intr_type, finfo->tx_usr);
811 		fifo->irqstatus = MAILBOX_IRQSTATUS(intr_type, finfo->tx_usr);
812 		fifo->irqdisable = MAILBOX_IRQDISABLE(intr_type, finfo->tx_usr);
813 
814 		fifo = &mbox->rx_fifo;
815 		fifo->msg = MAILBOX_MESSAGE(finfo->rx_id);
816 		fifo->msg_stat =  MAILBOX_MSGSTATUS(finfo->rx_id);
817 		fifo->intr_bit = MAILBOX_IRQ_NEWMSG(finfo->rx_id);
818 		fifo->irqenable = MAILBOX_IRQENABLE(intr_type, finfo->rx_usr);
819 		fifo->irqstatus = MAILBOX_IRQSTATUS(intr_type, finfo->rx_usr);
820 		fifo->irqdisable = MAILBOX_IRQDISABLE(intr_type, finfo->rx_usr);
821 
822 		mbox->send_no_irq = finfo->send_no_irq;
823 		mbox->intr_type = intr_type;
824 
825 		mbox->parent = mdev;
826 		mbox->name = finfo->name;
827 		mbox->irq = platform_get_irq(pdev, finfo->tx_irq);
828 		if (mbox->irq < 0)
829 			return mbox->irq;
830 		mbox->chan = &chnls[i];
831 		chnls[i].con_priv = mbox;
832 		list[i] = mbox++;
833 	}
834 
835 	mutex_init(&mdev->cfg_lock);
836 	mdev->dev = &pdev->dev;
837 	mdev->num_users = num_users;
838 	mdev->num_fifos = num_fifos;
839 	mdev->intr_type = intr_type;
840 	mdev->mboxes = list;
841 
842 	/*
843 	 * OMAP/K3 Mailbox IP does not have a Tx-Done IRQ, but rather a Tx-Ready
844 	 * IRQ and is needed to run the Tx state machine
845 	 */
846 	mdev->controller.txdone_irq = true;
847 	mdev->controller.dev = mdev->dev;
848 	mdev->controller.ops = &omap_mbox_chan_ops;
849 	mdev->controller.chans = chnls;
850 	mdev->controller.num_chans = info_count;
851 	mdev->controller.of_xlate = omap_mbox_of_xlate;
852 	ret = omap_mbox_register(mdev);
853 	if (ret)
854 		return ret;
855 
856 	platform_set_drvdata(pdev, mdev);
857 	pm_runtime_enable(mdev->dev);
858 
859 	ret = pm_runtime_get_sync(mdev->dev);
860 	if (ret < 0) {
861 		pm_runtime_put_noidle(mdev->dev);
862 		goto unregister;
863 	}
864 
865 	/*
866 	 * just print the raw revision register, the format is not
867 	 * uniform across all SoCs
868 	 */
869 	l = mbox_read_reg(mdev, MAILBOX_REVISION);
870 	dev_info(mdev->dev, "omap mailbox rev 0x%x\n", l);
871 
872 	ret = pm_runtime_put_sync(mdev->dev);
873 	if (ret < 0 && ret != -ENOSYS)
874 		goto unregister;
875 
876 	devm_kfree(&pdev->dev, finfoblk);
877 	return 0;
878 
879 unregister:
880 	pm_runtime_disable(mdev->dev);
881 	omap_mbox_unregister(mdev);
882 	return ret;
883 }
884 
885 static int omap_mbox_remove(struct platform_device *pdev)
886 {
887 	struct omap_mbox_device *mdev = platform_get_drvdata(pdev);
888 
889 	pm_runtime_disable(mdev->dev);
890 	omap_mbox_unregister(mdev);
891 
892 	return 0;
893 }
894 
895 static struct platform_driver omap_mbox_driver = {
896 	.probe	= omap_mbox_probe,
897 	.remove	= omap_mbox_remove,
898 	.driver	= {
899 		.name = "omap-mailbox",
900 		.pm = &omap_mbox_pm_ops,
901 		.of_match_table = of_match_ptr(omap_mailbox_of_match),
902 	},
903 };
904 
905 static int __init omap_mbox_init(void)
906 {
907 	int err;
908 
909 	err = class_register(&omap_mbox_class);
910 	if (err)
911 		return err;
912 
913 	/* kfifo size sanity check: alignment and minimal size */
914 	mbox_kfifo_size = ALIGN(mbox_kfifo_size, sizeof(u32));
915 	mbox_kfifo_size = max_t(unsigned int, mbox_kfifo_size, sizeof(u32));
916 
917 	err = platform_driver_register(&omap_mbox_driver);
918 	if (err)
919 		class_unregister(&omap_mbox_class);
920 
921 	return err;
922 }
923 subsys_initcall(omap_mbox_init);
924 
925 static void __exit omap_mbox_exit(void)
926 {
927 	platform_driver_unregister(&omap_mbox_driver);
928 	class_unregister(&omap_mbox_class);
929 }
930 module_exit(omap_mbox_exit);
931 
932 MODULE_LICENSE("GPL v2");
933 MODULE_DESCRIPTION("omap mailbox: interrupt driven messaging");
934 MODULE_AUTHOR("Toshihiro Kobayashi");
935 MODULE_AUTHOR("Hiroshi DOYU");
936