1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
22b6d83e2SJassi Brar /*
32b6d83e2SJassi Brar * Mailbox: Common code for Mailbox controllers and users
42b6d83e2SJassi Brar *
52b6d83e2SJassi Brar * Copyright (C) 2013-2014 Linaro Ltd.
62b6d83e2SJassi Brar * Author: Jassi Brar <jassisinghbrar@gmail.com>
72b6d83e2SJassi Brar */
82b6d83e2SJassi Brar
92b6d83e2SJassi Brar #include <linux/interrupt.h>
102b6d83e2SJassi Brar #include <linux/spinlock.h>
112b6d83e2SJassi Brar #include <linux/mutex.h>
122b6d83e2SJassi Brar #include <linux/delay.h>
132b6d83e2SJassi Brar #include <linux/slab.h>
142b6d83e2SJassi Brar #include <linux/err.h>
152b6d83e2SJassi Brar #include <linux/module.h>
162b6d83e2SJassi Brar #include <linux/device.h>
172b6d83e2SJassi Brar #include <linux/bitops.h>
182b6d83e2SJassi Brar #include <linux/mailbox_client.h>
192b6d83e2SJassi Brar #include <linux/mailbox_controller.h>
20*e9803aacSRob Herring #include <linux/of.h>
212b6d83e2SJassi Brar
2286c22f8cSAshwin Chaugule #include "mailbox.h"
232b6d83e2SJassi Brar
242b6d83e2SJassi Brar static LIST_HEAD(mbox_cons);
252b6d83e2SJassi Brar static DEFINE_MUTEX(con_mutex);
262b6d83e2SJassi Brar
add_to_rbuf(struct mbox_chan * chan,void * mssg)272b6d83e2SJassi Brar static int add_to_rbuf(struct mbox_chan *chan, void *mssg)
282b6d83e2SJassi Brar {
292b6d83e2SJassi Brar int idx;
302b6d83e2SJassi Brar unsigned long flags;
312b6d83e2SJassi Brar
322b6d83e2SJassi Brar spin_lock_irqsave(&chan->lock, flags);
332b6d83e2SJassi Brar
342b6d83e2SJassi Brar /* See if there is any space left */
352b6d83e2SJassi Brar if (chan->msg_count == MBOX_TX_QUEUE_LEN) {
362b6d83e2SJassi Brar spin_unlock_irqrestore(&chan->lock, flags);
372b6d83e2SJassi Brar return -ENOBUFS;
382b6d83e2SJassi Brar }
392b6d83e2SJassi Brar
402b6d83e2SJassi Brar idx = chan->msg_free;
412b6d83e2SJassi Brar chan->msg_data[idx] = mssg;
422b6d83e2SJassi Brar chan->msg_count++;
432b6d83e2SJassi Brar
442b6d83e2SJassi Brar if (idx == MBOX_TX_QUEUE_LEN - 1)
452b6d83e2SJassi Brar chan->msg_free = 0;
462b6d83e2SJassi Brar else
472b6d83e2SJassi Brar chan->msg_free++;
482b6d83e2SJassi Brar
492b6d83e2SJassi Brar spin_unlock_irqrestore(&chan->lock, flags);
502b6d83e2SJassi Brar
512b6d83e2SJassi Brar return idx;
522b6d83e2SJassi Brar }
532b6d83e2SJassi Brar
msg_submit(struct mbox_chan * chan)542b6d83e2SJassi Brar static void msg_submit(struct mbox_chan *chan)
552b6d83e2SJassi Brar {
562b6d83e2SJassi Brar unsigned count, idx;
572b6d83e2SJassi Brar unsigned long flags;
582b6d83e2SJassi Brar void *data;
5952a49306SAndrew Bresticker int err = -EBUSY;
602b6d83e2SJassi Brar
612b6d83e2SJassi Brar spin_lock_irqsave(&chan->lock, flags);
622b6d83e2SJassi Brar
632b6d83e2SJassi Brar if (!chan->msg_count || chan->active_req)
642b6d83e2SJassi Brar goto exit;
652b6d83e2SJassi Brar
662b6d83e2SJassi Brar count = chan->msg_count;
672b6d83e2SJassi Brar idx = chan->msg_free;
682b6d83e2SJassi Brar if (idx >= count)
692b6d83e2SJassi Brar idx -= count;
702b6d83e2SJassi Brar else
712b6d83e2SJassi Brar idx += MBOX_TX_QUEUE_LEN - count;
722b6d83e2SJassi Brar
732b6d83e2SJassi Brar data = chan->msg_data[idx];
742b6d83e2SJassi Brar
7597b0c7bdSSudeep Holla if (chan->cl->tx_prepare)
7697b0c7bdSSudeep Holla chan->cl->tx_prepare(chan->cl, data);
772b6d83e2SJassi Brar /* Try to submit a message to the MBOX controller */
782b6d83e2SJassi Brar err = chan->mbox->ops->send_data(chan, data);
792b6d83e2SJassi Brar if (!err) {
802b6d83e2SJassi Brar chan->active_req = data;
812b6d83e2SJassi Brar chan->msg_count--;
822b6d83e2SJassi Brar }
832b6d83e2SJassi Brar exit:
842b6d83e2SJassi Brar spin_unlock_irqrestore(&chan->lock, flags);
8552a49306SAndrew Bresticker
86c7dacf5bSJassi Brar if (!err && (chan->txdone_method & TXDONE_BY_POLL)) {
87bca1a100SBjörn Ardö /* kick start the timer immediately to avoid delays */
88bca1a100SBjörn Ardö spin_lock_irqsave(&chan->mbox->poll_hrt_lock, flags);
898b0e1953SThomas Gleixner hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL);
90bca1a100SBjörn Ardö spin_unlock_irqrestore(&chan->mbox->poll_hrt_lock, flags);
912b6d83e2SJassi Brar }
92c7dacf5bSJassi Brar }
932b6d83e2SJassi Brar
tx_tick(struct mbox_chan * chan,int r)942b6d83e2SJassi Brar static void tx_tick(struct mbox_chan *chan, int r)
952b6d83e2SJassi Brar {
962b6d83e2SJassi Brar unsigned long flags;
972b6d83e2SJassi Brar void *mssg;
982b6d83e2SJassi Brar
992b6d83e2SJassi Brar spin_lock_irqsave(&chan->lock, flags);
1002b6d83e2SJassi Brar mssg = chan->active_req;
1012b6d83e2SJassi Brar chan->active_req = NULL;
1022b6d83e2SJassi Brar spin_unlock_irqrestore(&chan->lock, flags);
1032b6d83e2SJassi Brar
1042b6d83e2SJassi Brar /* Submit next message */
1052b6d83e2SJassi Brar msg_submit(chan);
1062b6d83e2SJassi Brar
107cb710ab1SSudeep Holla if (!mssg)
108cb710ab1SSudeep Holla return;
109cb710ab1SSudeep Holla
1102b6d83e2SJassi Brar /* Notify the client */
111cb710ab1SSudeep Holla if (chan->cl->tx_done)
1122b6d83e2SJassi Brar chan->cl->tx_done(chan->cl, mssg, r);
1132b6d83e2SJassi Brar
114cc6eeaa3SSudeep Holla if (r != -ETIME && chan->cl->tx_block)
1152b6d83e2SJassi Brar complete(&chan->tx_complete);
1162b6d83e2SJassi Brar }
1172b6d83e2SJassi Brar
txdone_hrtimer(struct hrtimer * hrtimer)1180cc67945SSudeep Holla static enum hrtimer_restart txdone_hrtimer(struct hrtimer *hrtimer)
1192b6d83e2SJassi Brar {
1200cc67945SSudeep Holla struct mbox_controller *mbox =
1210cc67945SSudeep Holla container_of(hrtimer, struct mbox_controller, poll_hrt);
1222b6d83e2SJassi Brar bool txdone, resched = false;
1232b6d83e2SJassi Brar int i;
124bca1a100SBjörn Ardö unsigned long flags;
1252b6d83e2SJassi Brar
1262b6d83e2SJassi Brar for (i = 0; i < mbox->num_chans; i++) {
1272b6d83e2SJassi Brar struct mbox_chan *chan = &mbox->chans[i];
1282b6d83e2SJassi Brar
1292b6d83e2SJassi Brar if (chan->active_req && chan->cl) {
1302b6d83e2SJassi Brar txdone = chan->mbox->ops->last_tx_done(chan);
1312b6d83e2SJassi Brar if (txdone)
1322b6d83e2SJassi Brar tx_tick(chan, 0);
133bca1a100SBjörn Ardö else
134bca1a100SBjörn Ardö resched = true;
1352b6d83e2SJassi Brar }
1362b6d83e2SJassi Brar }
1372b6d83e2SJassi Brar
1380cc67945SSudeep Holla if (resched) {
139bca1a100SBjörn Ardö spin_lock_irqsave(&mbox->poll_hrt_lock, flags);
140bca1a100SBjörn Ardö if (!hrtimer_is_queued(hrtimer))
1410cc67945SSudeep Holla hrtimer_forward_now(hrtimer, ms_to_ktime(mbox->txpoll_period));
142bca1a100SBjörn Ardö spin_unlock_irqrestore(&mbox->poll_hrt_lock, flags);
143bca1a100SBjörn Ardö
1440cc67945SSudeep Holla return HRTIMER_RESTART;
1450cc67945SSudeep Holla }
1460cc67945SSudeep Holla return HRTIMER_NORESTART;
1472b6d83e2SJassi Brar }
1482b6d83e2SJassi Brar
1492b6d83e2SJassi Brar /**
1502b6d83e2SJassi Brar * mbox_chan_received_data - A way for controller driver to push data
1512b6d83e2SJassi Brar * received from remote to the upper layer.
1522b6d83e2SJassi Brar * @chan: Pointer to the mailbox channel on which RX happened.
1532b6d83e2SJassi Brar * @mssg: Client specific message typecasted as void *
1542b6d83e2SJassi Brar *
1552b6d83e2SJassi Brar * After startup and before shutdown any data received on the chan
1562b6d83e2SJassi Brar * is passed on to the API via atomic mbox_chan_received_data().
1572b6d83e2SJassi Brar * The controller should ACK the RX only after this call returns.
1582b6d83e2SJassi Brar */
mbox_chan_received_data(struct mbox_chan * chan,void * mssg)1592b6d83e2SJassi Brar void mbox_chan_received_data(struct mbox_chan *chan, void *mssg)
1602b6d83e2SJassi Brar {
1612b6d83e2SJassi Brar /* No buffering the received data */
1622b6d83e2SJassi Brar if (chan->cl->rx_callback)
1632b6d83e2SJassi Brar chan->cl->rx_callback(chan->cl, mssg);
1642b6d83e2SJassi Brar }
1652b6d83e2SJassi Brar EXPORT_SYMBOL_GPL(mbox_chan_received_data);
1662b6d83e2SJassi Brar
1672b6d83e2SJassi Brar /**
1682b6d83e2SJassi Brar * mbox_chan_txdone - A way for controller driver to notify the
1692b6d83e2SJassi Brar * framework that the last TX has completed.
1702b6d83e2SJassi Brar * @chan: Pointer to the mailbox chan on which TX happened.
1712b6d83e2SJassi Brar * @r: Status of last TX - OK or ERROR
1722b6d83e2SJassi Brar *
1732b6d83e2SJassi Brar * The controller that has IRQ for TX ACK calls this atomic API
1742b6d83e2SJassi Brar * to tick the TX state machine. It works only if txdone_irq
1752b6d83e2SJassi Brar * is set by the controller.
1762b6d83e2SJassi Brar */
mbox_chan_txdone(struct mbox_chan * chan,int r)1772b6d83e2SJassi Brar void mbox_chan_txdone(struct mbox_chan *chan, int r)
1782b6d83e2SJassi Brar {
1792b6d83e2SJassi Brar if (unlikely(!(chan->txdone_method & TXDONE_BY_IRQ))) {
1802b6d83e2SJassi Brar dev_err(chan->mbox->dev,
1812b6d83e2SJassi Brar "Controller can't run the TX ticker\n");
1822b6d83e2SJassi Brar return;
1832b6d83e2SJassi Brar }
1842b6d83e2SJassi Brar
1852b6d83e2SJassi Brar tx_tick(chan, r);
1862b6d83e2SJassi Brar }
1872b6d83e2SJassi Brar EXPORT_SYMBOL_GPL(mbox_chan_txdone);
1882b6d83e2SJassi Brar
1892b6d83e2SJassi Brar /**
1902b6d83e2SJassi Brar * mbox_client_txdone - The way for a client to run the TX state machine.
1912b6d83e2SJassi Brar * @chan: Mailbox channel assigned to this client.
1922b6d83e2SJassi Brar * @r: Success status of last transmission.
1932b6d83e2SJassi Brar *
1942b6d83e2SJassi Brar * The client/protocol had received some 'ACK' packet and it notifies
1952b6d83e2SJassi Brar * the API that the last packet was sent successfully. This only works
1962b6d83e2SJassi Brar * if the controller can't sense TX-Done.
1972b6d83e2SJassi Brar */
mbox_client_txdone(struct mbox_chan * chan,int r)1982b6d83e2SJassi Brar void mbox_client_txdone(struct mbox_chan *chan, int r)
1992b6d83e2SJassi Brar {
2002b6d83e2SJassi Brar if (unlikely(!(chan->txdone_method & TXDONE_BY_ACK))) {
2012b6d83e2SJassi Brar dev_err(chan->mbox->dev, "Client can't run the TX ticker\n");
2022b6d83e2SJassi Brar return;
2032b6d83e2SJassi Brar }
2042b6d83e2SJassi Brar
2052b6d83e2SJassi Brar tx_tick(chan, r);
2062b6d83e2SJassi Brar }
2072b6d83e2SJassi Brar EXPORT_SYMBOL_GPL(mbox_client_txdone);
2082b6d83e2SJassi Brar
2092b6d83e2SJassi Brar /**
2102b6d83e2SJassi Brar * mbox_client_peek_data - A way for client driver to pull data
2112b6d83e2SJassi Brar * received from remote by the controller.
2122b6d83e2SJassi Brar * @chan: Mailbox channel assigned to this client.
2132b6d83e2SJassi Brar *
2142b6d83e2SJassi Brar * A poke to controller driver for any received data.
2152b6d83e2SJassi Brar * The data is actually passed onto client via the
2162b6d83e2SJassi Brar * mbox_chan_received_data()
2172b6d83e2SJassi Brar * The call can be made from atomic context, so the controller's
2182b6d83e2SJassi Brar * implementation of peek_data() must not sleep.
2192b6d83e2SJassi Brar *
2202b6d83e2SJassi Brar * Return: True, if controller has, and is going to push after this,
2212b6d83e2SJassi Brar * some data.
2222b6d83e2SJassi Brar * False, if controller doesn't have any data to be read.
2232b6d83e2SJassi Brar */
mbox_client_peek_data(struct mbox_chan * chan)2242b6d83e2SJassi Brar bool mbox_client_peek_data(struct mbox_chan *chan)
2252b6d83e2SJassi Brar {
2262b6d83e2SJassi Brar if (chan->mbox->ops->peek_data)
2272b6d83e2SJassi Brar return chan->mbox->ops->peek_data(chan);
2282b6d83e2SJassi Brar
2292b6d83e2SJassi Brar return false;
2302b6d83e2SJassi Brar }
2312b6d83e2SJassi Brar EXPORT_SYMBOL_GPL(mbox_client_peek_data);
2322b6d83e2SJassi Brar
2332b6d83e2SJassi Brar /**
2342b6d83e2SJassi Brar * mbox_send_message - For client to submit a message to be
2352b6d83e2SJassi Brar * sent to the remote.
2362b6d83e2SJassi Brar * @chan: Mailbox channel assigned to this client.
2372b6d83e2SJassi Brar * @mssg: Client specific message typecasted.
2382b6d83e2SJassi Brar *
2392b6d83e2SJassi Brar * For client to submit data to the controller destined for a remote
2402b6d83e2SJassi Brar * processor. If the client had set 'tx_block', the call will return
2412b6d83e2SJassi Brar * either when the remote receives the data or when 'tx_tout' millisecs
2422b6d83e2SJassi Brar * run out.
2432b6d83e2SJassi Brar * In non-blocking mode, the requests are buffered by the API and a
2442b6d83e2SJassi Brar * non-negative token is returned for each queued request. If the request
2452b6d83e2SJassi Brar * is not queued, a negative token is returned. Upon failure or successful
2462b6d83e2SJassi Brar * TX, the API calls 'tx_done' from atomic context, from which the client
2472b6d83e2SJassi Brar * could submit yet another request.
2482b6d83e2SJassi Brar * The pointer to message should be preserved until it is sent
2492b6d83e2SJassi Brar * over the chan, i.e, tx_done() is made.
2502b6d83e2SJassi Brar * This function could be called from atomic context as it simply
2512b6d83e2SJassi Brar * queues the data and returns a token against the request.
2522b6d83e2SJassi Brar *
2532b6d83e2SJassi Brar * Return: Non-negative integer for successful submission (non-blocking mode)
2542b6d83e2SJassi Brar * or transmission over chan (blocking mode).
2552b6d83e2SJassi Brar * Negative value denotes failure.
2562b6d83e2SJassi Brar */
mbox_send_message(struct mbox_chan * chan,void * mssg)2572b6d83e2SJassi Brar int mbox_send_message(struct mbox_chan *chan, void *mssg)
2582b6d83e2SJassi Brar {
2592b6d83e2SJassi Brar int t;
2602b6d83e2SJassi Brar
2612b6d83e2SJassi Brar if (!chan || !chan->cl)
2622b6d83e2SJassi Brar return -EINVAL;
2632b6d83e2SJassi Brar
2642b6d83e2SJassi Brar t = add_to_rbuf(chan, mssg);
2652b6d83e2SJassi Brar if (t < 0) {
2662b6d83e2SJassi Brar dev_err(chan->mbox->dev, "Try increasing MBOX_TX_QUEUE_LEN\n");
2672b6d83e2SJassi Brar return t;
2682b6d83e2SJassi Brar }
2692b6d83e2SJassi Brar
2702b6d83e2SJassi Brar msg_submit(chan);
2712b6d83e2SJassi Brar
272c61b781eSSudeep Holla if (chan->cl->tx_block) {
2732b6d83e2SJassi Brar unsigned long wait;
2742b6d83e2SJassi Brar int ret;
2752b6d83e2SJassi Brar
2762b6d83e2SJassi Brar if (!chan->cl->tx_tout) /* wait forever */
2772b6d83e2SJassi Brar wait = msecs_to_jiffies(3600000);
2782b6d83e2SJassi Brar else
2792b6d83e2SJassi Brar wait = msecs_to_jiffies(chan->cl->tx_tout);
2802b6d83e2SJassi Brar
2812b6d83e2SJassi Brar ret = wait_for_completion_timeout(&chan->tx_complete, wait);
2822b6d83e2SJassi Brar if (ret == 0) {
283cc6eeaa3SSudeep Holla t = -ETIME;
284cc6eeaa3SSudeep Holla tx_tick(chan, t);
2852b6d83e2SJassi Brar }
2862b6d83e2SJassi Brar }
2872b6d83e2SJassi Brar
2882b6d83e2SJassi Brar return t;
2892b6d83e2SJassi Brar }
2902b6d83e2SJassi Brar EXPORT_SYMBOL_GPL(mbox_send_message);
2912b6d83e2SJassi Brar
2922b6d83e2SJassi Brar /**
293a8803d74SThierry Reding * mbox_flush - flush a mailbox channel
294a8803d74SThierry Reding * @chan: mailbox channel to flush
295a8803d74SThierry Reding * @timeout: time, in milliseconds, to allow the flush operation to succeed
296a8803d74SThierry Reding *
297a8803d74SThierry Reding * Mailbox controllers that need to work in atomic context can implement the
298a8803d74SThierry Reding * ->flush() callback to busy loop until a transmission has been completed.
299a8803d74SThierry Reding * The implementation must call mbox_chan_txdone() upon success. Clients can
300a8803d74SThierry Reding * call the mbox_flush() function at any time after mbox_send_message() to
301a8803d74SThierry Reding * flush the transmission. After the function returns success, the mailbox
302a8803d74SThierry Reding * transmission is guaranteed to have completed.
303a8803d74SThierry Reding *
304a8803d74SThierry Reding * Returns: 0 on success or a negative error code on failure.
305a8803d74SThierry Reding */
mbox_flush(struct mbox_chan * chan,unsigned long timeout)306a8803d74SThierry Reding int mbox_flush(struct mbox_chan *chan, unsigned long timeout)
307a8803d74SThierry Reding {
308a8803d74SThierry Reding int ret;
309a8803d74SThierry Reding
310a8803d74SThierry Reding if (!chan->mbox->ops->flush)
311a8803d74SThierry Reding return -ENOTSUPP;
312a8803d74SThierry Reding
313a8803d74SThierry Reding ret = chan->mbox->ops->flush(chan, timeout);
314a8803d74SThierry Reding if (ret < 0)
315a8803d74SThierry Reding tx_tick(chan, ret);
316a8803d74SThierry Reding
317a8803d74SThierry Reding return ret;
318a8803d74SThierry Reding }
3194f055779SThierry Reding EXPORT_SYMBOL_GPL(mbox_flush);
320a8803d74SThierry Reding
__mbox_bind_client(struct mbox_chan * chan,struct mbox_client * cl)32185a95380SElliot Berman static int __mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl)
32285a95380SElliot Berman {
32385a95380SElliot Berman struct device *dev = cl->dev;
32485a95380SElliot Berman unsigned long flags;
32585a95380SElliot Berman int ret;
32685a95380SElliot Berman
32785a95380SElliot Berman if (chan->cl || !try_module_get(chan->mbox->dev->driver->owner)) {
32885a95380SElliot Berman dev_dbg(dev, "%s: mailbox not free\n", __func__);
32985a95380SElliot Berman return -EBUSY;
33085a95380SElliot Berman }
33185a95380SElliot Berman
33285a95380SElliot Berman spin_lock_irqsave(&chan->lock, flags);
33385a95380SElliot Berman chan->msg_free = 0;
33485a95380SElliot Berman chan->msg_count = 0;
33585a95380SElliot Berman chan->active_req = NULL;
33685a95380SElliot Berman chan->cl = cl;
33785a95380SElliot Berman init_completion(&chan->tx_complete);
33885a95380SElliot Berman
33985a95380SElliot Berman if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
34085a95380SElliot Berman chan->txdone_method = TXDONE_BY_ACK;
34185a95380SElliot Berman
34285a95380SElliot Berman spin_unlock_irqrestore(&chan->lock, flags);
34385a95380SElliot Berman
34485a95380SElliot Berman if (chan->mbox->ops->startup) {
34585a95380SElliot Berman ret = chan->mbox->ops->startup(chan);
34685a95380SElliot Berman
34785a95380SElliot Berman if (ret) {
34885a95380SElliot Berman dev_err(dev, "Unable to startup the chan (%d)\n", ret);
34985a95380SElliot Berman mbox_free_channel(chan);
35085a95380SElliot Berman return ret;
35185a95380SElliot Berman }
35285a95380SElliot Berman }
35385a95380SElliot Berman
35485a95380SElliot Berman return 0;
35585a95380SElliot Berman }
35685a95380SElliot Berman
35785a95380SElliot Berman /**
35885a95380SElliot Berman * mbox_bind_client - Request a mailbox channel.
35985a95380SElliot Berman * @chan: The mailbox channel to bind the client to.
36085a95380SElliot Berman * @cl: Identity of the client requesting the channel.
36185a95380SElliot Berman *
36285a95380SElliot Berman * The Client specifies its requirements and capabilities while asking for
36385a95380SElliot Berman * a mailbox channel. It can't be called from atomic context.
36485a95380SElliot Berman * The channel is exclusively allocated and can't be used by another
36585a95380SElliot Berman * client before the owner calls mbox_free_channel.
36685a95380SElliot Berman * After assignment, any packet received on this channel will be
36785a95380SElliot Berman * handed over to the client via the 'rx_callback'.
36885a95380SElliot Berman * The framework holds reference to the client, so the mbox_client
36985a95380SElliot Berman * structure shouldn't be modified until the mbox_free_channel returns.
37085a95380SElliot Berman *
37185a95380SElliot Berman * Return: 0 if the channel was assigned to the client successfully.
37285a95380SElliot Berman * <0 for request failure.
37385a95380SElliot Berman */
mbox_bind_client(struct mbox_chan * chan,struct mbox_client * cl)37485a95380SElliot Berman int mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl)
37585a95380SElliot Berman {
37685a95380SElliot Berman int ret;
37785a95380SElliot Berman
37885a95380SElliot Berman mutex_lock(&con_mutex);
37985a95380SElliot Berman ret = __mbox_bind_client(chan, cl);
38085a95380SElliot Berman mutex_unlock(&con_mutex);
38185a95380SElliot Berman
38285a95380SElliot Berman return ret;
38385a95380SElliot Berman }
38485a95380SElliot Berman EXPORT_SYMBOL_GPL(mbox_bind_client);
38585a95380SElliot Berman
386a8803d74SThierry Reding /**
3872b6d83e2SJassi Brar * mbox_request_channel - Request a mailbox channel.
3882b6d83e2SJassi Brar * @cl: Identity of the client requesting the channel.
3892b6d83e2SJassi Brar * @index: Index of mailbox specifier in 'mboxes' property.
3902b6d83e2SJassi Brar *
3912b6d83e2SJassi Brar * The Client specifies its requirements and capabilities while asking for
3922b6d83e2SJassi Brar * a mailbox channel. It can't be called from atomic context.
3932b6d83e2SJassi Brar * The channel is exclusively allocated and can't be used by another
3942b6d83e2SJassi Brar * client before the owner calls mbox_free_channel.
3952b6d83e2SJassi Brar * After assignment, any packet received on this channel will be
3962b6d83e2SJassi Brar * handed over to the client via the 'rx_callback'.
3972b6d83e2SJassi Brar * The framework holds reference to the client, so the mbox_client
3982b6d83e2SJassi Brar * structure shouldn't be modified until the mbox_free_channel returns.
3992b6d83e2SJassi Brar *
4002b6d83e2SJassi Brar * Return: Pointer to the channel assigned to the client if successful.
4012b6d83e2SJassi Brar * ERR_PTR for request failure.
4022b6d83e2SJassi Brar */
mbox_request_channel(struct mbox_client * cl,int index)4032b6d83e2SJassi Brar struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index)
4042b6d83e2SJassi Brar {
4052b6d83e2SJassi Brar struct device *dev = cl->dev;
4062b6d83e2SJassi Brar struct mbox_controller *mbox;
4072b6d83e2SJassi Brar struct of_phandle_args spec;
4082b6d83e2SJassi Brar struct mbox_chan *chan;
4092b6d83e2SJassi Brar int ret;
4102b6d83e2SJassi Brar
4112b6d83e2SJassi Brar if (!dev || !dev->of_node) {
4122b6d83e2SJassi Brar pr_debug("%s: No owner device node\n", __func__);
4132b6d83e2SJassi Brar return ERR_PTR(-ENODEV);
4142b6d83e2SJassi Brar }
4152b6d83e2SJassi Brar
4162b6d83e2SJassi Brar mutex_lock(&con_mutex);
4172b6d83e2SJassi Brar
4182b6d83e2SJassi Brar if (of_parse_phandle_with_args(dev->of_node, "mboxes",
4192b6d83e2SJassi Brar "#mbox-cells", index, &spec)) {
4202b6d83e2SJassi Brar dev_dbg(dev, "%s: can't parse \"mboxes\" property\n", __func__);
4212b6d83e2SJassi Brar mutex_unlock(&con_mutex);
4222b6d83e2SJassi Brar return ERR_PTR(-ENODEV);
4232b6d83e2SJassi Brar }
4242b6d83e2SJassi Brar
4252d805fc1SBenson Leung chan = ERR_PTR(-EPROBE_DEFER);
4262b6d83e2SJassi Brar list_for_each_entry(mbox, &mbox_cons, node)
4272b6d83e2SJassi Brar if (mbox->dev->of_node == spec.np) {
4282b6d83e2SJassi Brar chan = mbox->of_xlate(mbox, &spec);
4298ed82e23SMikko Perttunen if (!IS_ERR(chan))
4302b6d83e2SJassi Brar break;
4312b6d83e2SJassi Brar }
4322b6d83e2SJassi Brar
4332b6d83e2SJassi Brar of_node_put(spec.np);
4342b6d83e2SJassi Brar
4352d805fc1SBenson Leung if (IS_ERR(chan)) {
4362d805fc1SBenson Leung mutex_unlock(&con_mutex);
4372d805fc1SBenson Leung return chan;
4382d805fc1SBenson Leung }
4392d805fc1SBenson Leung
44085a95380SElliot Berman ret = __mbox_bind_client(chan, cl);
44185a95380SElliot Berman if (ret)
4422b6d83e2SJassi Brar chan = ERR_PTR(ret);
4432b6d83e2SJassi Brar
4442b6d83e2SJassi Brar mutex_unlock(&con_mutex);
4452b6d83e2SJassi Brar return chan;
4462b6d83e2SJassi Brar }
4472b6d83e2SJassi Brar EXPORT_SYMBOL_GPL(mbox_request_channel);
4482b6d83e2SJassi Brar
mbox_request_channel_byname(struct mbox_client * cl,const char * name)449dfabde20SLee Jones struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl,
450dfabde20SLee Jones const char *name)
451dfabde20SLee Jones {
452dfabde20SLee Jones struct device_node *np = cl->dev->of_node;
453dfabde20SLee Jones struct property *prop;
454dfabde20SLee Jones const char *mbox_name;
455dfabde20SLee Jones int index = 0;
456dfabde20SLee Jones
457dfabde20SLee Jones if (!np) {
458dfabde20SLee Jones dev_err(cl->dev, "%s() currently only supports DT\n", __func__);
4590c44d789SLee Jones return ERR_PTR(-EINVAL);
460dfabde20SLee Jones }
461dfabde20SLee Jones
462dfabde20SLee Jones if (!of_get_property(np, "mbox-names", NULL)) {
463dfabde20SLee Jones dev_err(cl->dev,
464dfabde20SLee Jones "%s() requires an \"mbox-names\" property\n", __func__);
4650c44d789SLee Jones return ERR_PTR(-EINVAL);
466dfabde20SLee Jones }
467dfabde20SLee Jones
468dfabde20SLee Jones of_property_for_each_string(np, "mbox-names", prop, mbox_name) {
469dfabde20SLee Jones if (!strncmp(name, mbox_name, strlen(name)))
47025777e57Smorten petersen return mbox_request_channel(cl, index);
471dfabde20SLee Jones index++;
472dfabde20SLee Jones }
473dfabde20SLee Jones
47425777e57Smorten petersen dev_err(cl->dev, "%s() could not locate channel named \"%s\"\n",
47525777e57Smorten petersen __func__, name);
47625777e57Smorten petersen return ERR_PTR(-EINVAL);
477dfabde20SLee Jones }
478dfabde20SLee Jones EXPORT_SYMBOL_GPL(mbox_request_channel_byname);
479dfabde20SLee Jones
4802b6d83e2SJassi Brar /**
4812b6d83e2SJassi Brar * mbox_free_channel - The client relinquishes control of a mailbox
4822b6d83e2SJassi Brar * channel by this call.
4832b6d83e2SJassi Brar * @chan: The mailbox channel to be freed.
4842b6d83e2SJassi Brar */
mbox_free_channel(struct mbox_chan * chan)4852b6d83e2SJassi Brar void mbox_free_channel(struct mbox_chan *chan)
4862b6d83e2SJassi Brar {
4872b6d83e2SJassi Brar unsigned long flags;
4882b6d83e2SJassi Brar
4892b6d83e2SJassi Brar if (!chan || !chan->cl)
4902b6d83e2SJassi Brar return;
4912b6d83e2SJassi Brar
492b7133d6fSBjorn Andersson if (chan->mbox->ops->shutdown)
4932b6d83e2SJassi Brar chan->mbox->ops->shutdown(chan);
4942b6d83e2SJassi Brar
4952b6d83e2SJassi Brar /* The queued TX requests are simply aborted, no callbacks are made */
4962b6d83e2SJassi Brar spin_lock_irqsave(&chan->lock, flags);
4972b6d83e2SJassi Brar chan->cl = NULL;
4982b6d83e2SJassi Brar chan->active_req = NULL;
49933cd7123SSudeep Holla if (chan->txdone_method == TXDONE_BY_ACK)
5002b6d83e2SJassi Brar chan->txdone_method = TXDONE_BY_POLL;
5012b6d83e2SJassi Brar
5022b6d83e2SJassi Brar module_put(chan->mbox->dev->driver->owner);
5032b6d83e2SJassi Brar spin_unlock_irqrestore(&chan->lock, flags);
5042b6d83e2SJassi Brar }
5052b6d83e2SJassi Brar EXPORT_SYMBOL_GPL(mbox_free_channel);
5062b6d83e2SJassi Brar
5072b6d83e2SJassi Brar static struct mbox_chan *
of_mbox_index_xlate(struct mbox_controller * mbox,const struct of_phandle_args * sp)5082b6d83e2SJassi Brar of_mbox_index_xlate(struct mbox_controller *mbox,
5092b6d83e2SJassi Brar const struct of_phandle_args *sp)
5102b6d83e2SJassi Brar {
5112b6d83e2SJassi Brar int ind = sp->args[0];
5122b6d83e2SJassi Brar
5132b6d83e2SJassi Brar if (ind >= mbox->num_chans)
5142d805fc1SBenson Leung return ERR_PTR(-EINVAL);
5152b6d83e2SJassi Brar
5162b6d83e2SJassi Brar return &mbox->chans[ind];
5172b6d83e2SJassi Brar }
5182b6d83e2SJassi Brar
5192b6d83e2SJassi Brar /**
5202b6d83e2SJassi Brar * mbox_controller_register - Register the mailbox controller
5212b6d83e2SJassi Brar * @mbox: Pointer to the mailbox controller.
5222b6d83e2SJassi Brar *
5232b6d83e2SJassi Brar * The controller driver registers its communication channels
5242b6d83e2SJassi Brar */
mbox_controller_register(struct mbox_controller * mbox)5252b6d83e2SJassi Brar int mbox_controller_register(struct mbox_controller *mbox)
5262b6d83e2SJassi Brar {
5272b6d83e2SJassi Brar int i, txdone;
5282b6d83e2SJassi Brar
5292b6d83e2SJassi Brar /* Sanity check */
5302b6d83e2SJassi Brar if (!mbox || !mbox->dev || !mbox->ops || !mbox->num_chans)
5312b6d83e2SJassi Brar return -EINVAL;
5322b6d83e2SJassi Brar
5332b6d83e2SJassi Brar if (mbox->txdone_irq)
5342b6d83e2SJassi Brar txdone = TXDONE_BY_IRQ;
5352b6d83e2SJassi Brar else if (mbox->txdone_poll)
5362b6d83e2SJassi Brar txdone = TXDONE_BY_POLL;
5372b6d83e2SJassi Brar else /* It has to be ACK then */
5382b6d83e2SJassi Brar txdone = TXDONE_BY_ACK;
5392b6d83e2SJassi Brar
5402b6d83e2SJassi Brar if (txdone == TXDONE_BY_POLL) {
5414605fff0SAlexey Klimov
5424605fff0SAlexey Klimov if (!mbox->ops->last_tx_done) {
5434605fff0SAlexey Klimov dev_err(mbox->dev, "last_tx_done method is absent\n");
5444605fff0SAlexey Klimov return -EINVAL;
5454605fff0SAlexey Klimov }
5464605fff0SAlexey Klimov
5470cc67945SSudeep Holla hrtimer_init(&mbox->poll_hrt, CLOCK_MONOTONIC,
5480cc67945SSudeep Holla HRTIMER_MODE_REL);
5490cc67945SSudeep Holla mbox->poll_hrt.function = txdone_hrtimer;
550bca1a100SBjörn Ardö spin_lock_init(&mbox->poll_hrt_lock);
5512b6d83e2SJassi Brar }
5522b6d83e2SJassi Brar
5532b6d83e2SJassi Brar for (i = 0; i < mbox->num_chans; i++) {
5542b6d83e2SJassi Brar struct mbox_chan *chan = &mbox->chans[i];
5552b6d83e2SJassi Brar
5562b6d83e2SJassi Brar chan->cl = NULL;
5572b6d83e2SJassi Brar chan->mbox = mbox;
5582b6d83e2SJassi Brar chan->txdone_method = txdone;
5592b6d83e2SJassi Brar spin_lock_init(&chan->lock);
5602b6d83e2SJassi Brar }
5612b6d83e2SJassi Brar
5622b6d83e2SJassi Brar if (!mbox->of_xlate)
5632b6d83e2SJassi Brar mbox->of_xlate = of_mbox_index_xlate;
5642b6d83e2SJassi Brar
5652b6d83e2SJassi Brar mutex_lock(&con_mutex);
5662b6d83e2SJassi Brar list_add_tail(&mbox->node, &mbox_cons);
5672b6d83e2SJassi Brar mutex_unlock(&con_mutex);
5682b6d83e2SJassi Brar
5692b6d83e2SJassi Brar return 0;
5702b6d83e2SJassi Brar }
5712b6d83e2SJassi Brar EXPORT_SYMBOL_GPL(mbox_controller_register);
5722b6d83e2SJassi Brar
5732b6d83e2SJassi Brar /**
5742b6d83e2SJassi Brar * mbox_controller_unregister - Unregister the mailbox controller
5752b6d83e2SJassi Brar * @mbox: Pointer to the mailbox controller.
5762b6d83e2SJassi Brar */
mbox_controller_unregister(struct mbox_controller * mbox)5772b6d83e2SJassi Brar void mbox_controller_unregister(struct mbox_controller *mbox)
5782b6d83e2SJassi Brar {
5792b6d83e2SJassi Brar int i;
5802b6d83e2SJassi Brar
5812b6d83e2SJassi Brar if (!mbox)
5822b6d83e2SJassi Brar return;
5832b6d83e2SJassi Brar
5842b6d83e2SJassi Brar mutex_lock(&con_mutex);
5852b6d83e2SJassi Brar
5862b6d83e2SJassi Brar list_del(&mbox->node);
5872b6d83e2SJassi Brar
5882b6d83e2SJassi Brar for (i = 0; i < mbox->num_chans; i++)
5892b6d83e2SJassi Brar mbox_free_channel(&mbox->chans[i]);
5902b6d83e2SJassi Brar
5912b6d83e2SJassi Brar if (mbox->txdone_poll)
5920cc67945SSudeep Holla hrtimer_cancel(&mbox->poll_hrt);
5932b6d83e2SJassi Brar
5942b6d83e2SJassi Brar mutex_unlock(&con_mutex);
5952b6d83e2SJassi Brar }
5962b6d83e2SJassi Brar EXPORT_SYMBOL_GPL(mbox_controller_unregister);
597e898d9cdSThierry Reding
__devm_mbox_controller_unregister(struct device * dev,void * res)598e898d9cdSThierry Reding static void __devm_mbox_controller_unregister(struct device *dev, void *res)
599e898d9cdSThierry Reding {
600e898d9cdSThierry Reding struct mbox_controller **mbox = res;
601e898d9cdSThierry Reding
602e898d9cdSThierry Reding mbox_controller_unregister(*mbox);
603e898d9cdSThierry Reding }
604e898d9cdSThierry Reding
devm_mbox_controller_match(struct device * dev,void * res,void * data)605e898d9cdSThierry Reding static int devm_mbox_controller_match(struct device *dev, void *res, void *data)
606e898d9cdSThierry Reding {
607e898d9cdSThierry Reding struct mbox_controller **mbox = res;
608e898d9cdSThierry Reding
609e898d9cdSThierry Reding if (WARN_ON(!mbox || !*mbox))
610e898d9cdSThierry Reding return 0;
611e898d9cdSThierry Reding
612e898d9cdSThierry Reding return *mbox == data;
613e898d9cdSThierry Reding }
614e898d9cdSThierry Reding
615e898d9cdSThierry Reding /**
616e898d9cdSThierry Reding * devm_mbox_controller_register() - managed mbox_controller_register()
617e898d9cdSThierry Reding * @dev: device owning the mailbox controller being registered
618e898d9cdSThierry Reding * @mbox: mailbox controller being registered
619e898d9cdSThierry Reding *
620e898d9cdSThierry Reding * This function adds a device-managed resource that will make sure that the
621e898d9cdSThierry Reding * mailbox controller, which is registered using mbox_controller_register()
622e898d9cdSThierry Reding * as part of this function, will be unregistered along with the rest of
623e898d9cdSThierry Reding * device-managed resources upon driver probe failure or driver removal.
624e898d9cdSThierry Reding *
625e898d9cdSThierry Reding * Returns 0 on success or a negative error code on failure.
626e898d9cdSThierry Reding */
devm_mbox_controller_register(struct device * dev,struct mbox_controller * mbox)627e898d9cdSThierry Reding int devm_mbox_controller_register(struct device *dev,
628e898d9cdSThierry Reding struct mbox_controller *mbox)
629e898d9cdSThierry Reding {
630e898d9cdSThierry Reding struct mbox_controller **ptr;
631e898d9cdSThierry Reding int err;
632e898d9cdSThierry Reding
633e898d9cdSThierry Reding ptr = devres_alloc(__devm_mbox_controller_unregister, sizeof(*ptr),
634e898d9cdSThierry Reding GFP_KERNEL);
635e898d9cdSThierry Reding if (!ptr)
636e898d9cdSThierry Reding return -ENOMEM;
637e898d9cdSThierry Reding
638e898d9cdSThierry Reding err = mbox_controller_register(mbox);
639e898d9cdSThierry Reding if (err < 0) {
640e898d9cdSThierry Reding devres_free(ptr);
641e898d9cdSThierry Reding return err;
642e898d9cdSThierry Reding }
643e898d9cdSThierry Reding
644e898d9cdSThierry Reding devres_add(dev, ptr);
645e898d9cdSThierry Reding *ptr = mbox;
646e898d9cdSThierry Reding
647e898d9cdSThierry Reding return 0;
648e898d9cdSThierry Reding }
649e898d9cdSThierry Reding EXPORT_SYMBOL_GPL(devm_mbox_controller_register);
650e898d9cdSThierry Reding
651e898d9cdSThierry Reding /**
652e898d9cdSThierry Reding * devm_mbox_controller_unregister() - managed mbox_controller_unregister()
653e898d9cdSThierry Reding * @dev: device owning the mailbox controller being unregistered
654e898d9cdSThierry Reding * @mbox: mailbox controller being unregistered
655e898d9cdSThierry Reding *
656e898d9cdSThierry Reding * This function unregisters the mailbox controller and removes the device-
657e898d9cdSThierry Reding * managed resource that was set up to automatically unregister the mailbox
658e898d9cdSThierry Reding * controller on driver probe failure or driver removal. It's typically not
659e898d9cdSThierry Reding * necessary to call this function.
660e898d9cdSThierry Reding */
devm_mbox_controller_unregister(struct device * dev,struct mbox_controller * mbox)661e898d9cdSThierry Reding void devm_mbox_controller_unregister(struct device *dev, struct mbox_controller *mbox)
662e898d9cdSThierry Reding {
663e898d9cdSThierry Reding WARN_ON(devres_release(dev, __devm_mbox_controller_unregister,
664e898d9cdSThierry Reding devm_mbox_controller_match, mbox));
665e898d9cdSThierry Reding }
666e898d9cdSThierry Reding EXPORT_SYMBOL_GPL(devm_mbox_controller_unregister);
667