xref: /openbmc/linux/drivers/mailbox/ti-msgmgr.c (revision a90bb65a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Texas Instruments' Message Manager Driver
4  *
5  * Copyright (C) 2015-2022 Texas Instruments Incorporated - https://www.ti.com/
6  *	Nishanth Menon
7  */
8 
9 #define pr_fmt(fmt) "%s: " fmt, __func__
10 
11 #include <linux/device.h>
12 #include <linux/interrupt.h>
13 #include <linux/io.h>
14 #include <linux/iopoll.h>
15 #include <linux/kernel.h>
16 #include <linux/mailbox_controller.h>
17 #include <linux/module.h>
18 #include <linux/of_device.h>
19 #include <linux/of.h>
20 #include <linux/of_irq.h>
21 #include <linux/platform_device.h>
22 #include <linux/soc/ti/ti-msgmgr.h>
23 
24 #define Q_DATA_OFFSET(proxy, queue, reg)	\
25 		     ((0x10000 * (proxy)) + (0x80 * (queue)) + ((reg) * 4))
26 #define Q_STATE_OFFSET(queue)			((queue) * 0x4)
27 #define Q_STATE_ENTRY_COUNT_MASK		(0xFFF000)
28 
29 #define SPROXY_THREAD_OFFSET(tid) (0x1000 * (tid))
30 #define SPROXY_THREAD_DATA_OFFSET(tid, reg) \
31 	(SPROXY_THREAD_OFFSET(tid) + ((reg) * 0x4) + 0x4)
32 
33 #define SPROXY_THREAD_STATUS_OFFSET(tid) (SPROXY_THREAD_OFFSET(tid))
34 
35 #define SPROXY_THREAD_STATUS_COUNT_MASK (0xFF)
36 
37 #define SPROXY_THREAD_CTRL_OFFSET(tid) (0x1000 + SPROXY_THREAD_OFFSET(tid))
38 #define SPROXY_THREAD_CTRL_DIR_MASK (0x1 << 31)
39 
40 /**
41  * struct ti_msgmgr_valid_queue_desc - SoC valid queues meant for this processor
42  * @queue_id:	Queue Number for this path
43  * @proxy_id:	Proxy ID representing the processor in SoC
44  * @is_tx:	Is this a receive path?
45  */
46 struct ti_msgmgr_valid_queue_desc {
47 	u8 queue_id;
48 	u8 proxy_id;
49 	bool is_tx;
50 };
51 
52 /**
53  * struct ti_msgmgr_desc - Description of message manager integration
54  * @queue_count:	Number of Queues
55  * @max_message_size:	Message size in bytes
56  * @max_messages:	Number of messages
57  * @data_first_reg:	First data register for proxy data region
58  * @data_last_reg:	Last data register for proxy data region
59  * @status_cnt_mask:	Mask for getting the status value
60  * @status_err_mask:	Mask for getting the error value, if applicable
61  * @tx_polled:		Do I need to use polled mechanism for tx
62  * @tx_poll_timeout_ms: Timeout in ms if polled
63  * @valid_queues:	List of Valid queues that the processor can access
64  * @data_region_name:	Name of the proxy data region
65  * @status_region_name:	Name of the proxy status region
66  * @ctrl_region_name:	Name of the proxy control region
67  * @num_valid_queues:	Number of valid queues
68  * @is_sproxy:		Is this an Secure Proxy instance?
69  *
70  * This structure is used in of match data to describe how integration
71  * for a specific compatible SoC is done.
72  */
73 struct ti_msgmgr_desc {
74 	u8 queue_count;
75 	u8 max_message_size;
76 	u8 max_messages;
77 	u8 data_first_reg;
78 	u8 data_last_reg;
79 	u32 status_cnt_mask;
80 	u32 status_err_mask;
81 	bool tx_polled;
82 	int tx_poll_timeout_ms;
83 	const struct ti_msgmgr_valid_queue_desc *valid_queues;
84 	const char *data_region_name;
85 	const char *status_region_name;
86 	const char *ctrl_region_name;
87 	int num_valid_queues;
88 	bool is_sproxy;
89 };
90 
91 /**
92  * struct ti_queue_inst - Description of a queue instance
93  * @name:	Queue Name
94  * @queue_id:	Queue Identifier as mapped on SoC
95  * @proxy_id:	Proxy Identifier as mapped on SoC
96  * @irq:	IRQ for Rx Queue
97  * @is_tx:	'true' if transmit queue, else, 'false'
98  * @queue_buff_start: First register of Data Buffer
99  * @queue_buff_end: Last (or confirmation) register of Data buffer
100  * @queue_state: Queue status register
101  * @queue_ctrl: Queue Control register
102  * @chan:	Mailbox channel
103  * @rx_buff:	Receive buffer pointer allocated at probe, max_message_size
104  * @polled_rx_mode: Use polling for rx instead of interrupts
105  */
106 struct ti_queue_inst {
107 	char name[30];
108 	u8 queue_id;
109 	u8 proxy_id;
110 	int irq;
111 	bool is_tx;
112 	void __iomem *queue_buff_start;
113 	void __iomem *queue_buff_end;
114 	void __iomem *queue_state;
115 	void __iomem *queue_ctrl;
116 	struct mbox_chan *chan;
117 	u32 *rx_buff;
118 	bool polled_rx_mode;
119 };
120 
121 /**
122  * struct ti_msgmgr_inst - Description of a Message Manager Instance
123  * @dev:	device pointer corresponding to the Message Manager instance
124  * @desc:	Description of the SoC integration
125  * @queue_proxy_region:	Queue proxy region where queue buffers are located
126  * @queue_state_debug_region:	Queue status register regions
127  * @queue_ctrl_region:	Queue Control register regions
128  * @num_valid_queues:	Number of valid queues defined for the processor
129  *		Note: other queues are probably reserved for other processors
130  *		in the SoC.
131  * @qinsts:	Array of valid Queue Instances for the Processor
132  * @mbox:	Mailbox Controller
133  * @chans:	Array for channels corresponding to the Queue Instances.
134  */
135 struct ti_msgmgr_inst {
136 	struct device *dev;
137 	const struct ti_msgmgr_desc *desc;
138 	void __iomem *queue_proxy_region;
139 	void __iomem *queue_state_debug_region;
140 	void __iomem *queue_ctrl_region;
141 	u8 num_valid_queues;
142 	struct ti_queue_inst *qinsts;
143 	struct mbox_controller mbox;
144 	struct mbox_chan *chans;
145 };
146 
147 /**
148  * ti_msgmgr_queue_get_num_messages() - Get the number of pending messages
149  * @d:		Description of message manager
150  * @qinst:	Queue instance for which we check the number of pending messages
151  *
152  * Return: number of messages pending in the queue (0 == no pending messages)
153  */
154 static inline int
155 ti_msgmgr_queue_get_num_messages(const struct ti_msgmgr_desc *d,
156 				 struct ti_queue_inst *qinst)
157 {
158 	u32 val;
159 	u32 status_cnt_mask = d->status_cnt_mask;
160 
161 	/*
162 	 * We cannot use relaxed operation here - update may happen
163 	 * real-time.
164 	 */
165 	val = readl(qinst->queue_state) & status_cnt_mask;
166 	val >>= __ffs(status_cnt_mask);
167 
168 	return val;
169 }
170 
171 /**
172  * ti_msgmgr_queue_is_error() - Check to see if there is queue error
173  * @d:		Description of message manager
174  * @qinst:	Queue instance for which we check the number of pending messages
175  *
176  * Return: true if error, else false
177  */
178 static inline bool ti_msgmgr_queue_is_error(const struct ti_msgmgr_desc *d,
179 					    struct ti_queue_inst *qinst)
180 {
181 	u32 val;
182 
183 	/* Msgmgr has no error detection */
184 	if (!d->is_sproxy)
185 		return false;
186 
187 	/*
188 	 * We cannot use relaxed operation here - update may happen
189 	 * real-time.
190 	 */
191 	val = readl(qinst->queue_state) & d->status_err_mask;
192 
193 	return val ? true : false;
194 }
195 
196 static int ti_msgmgr_queue_rx_data(struct mbox_chan *chan, struct ti_queue_inst *qinst,
197 				   const struct ti_msgmgr_desc *desc)
198 {
199 	int num_words;
200 	struct ti_msgmgr_message message;
201 	void __iomem *data_reg;
202 	u32 *word_data;
203 
204 	/*
205 	 * I have no idea about the protocol being used to communicate with the
206 	 * remote producer - 0 could be valid data, so I wont make a judgement
207 	 * of how many bytes I should be reading. Let the client figure this
208 	 * out.. I just read the full message and pass it on..
209 	 */
210 	message.len = desc->max_message_size;
211 	message.buf = (u8 *)qinst->rx_buff;
212 
213 	/*
214 	 * NOTE about register access involved here:
215 	 * the hardware block is implemented with 32bit access operations and no
216 	 * support for data splitting.  We don't want the hardware to misbehave
217 	 * with sub 32bit access - For example: if the last register read is
218 	 * split into byte wise access, it can result in the queue getting
219 	 * stuck or indeterminate behavior. An out of order read operation may
220 	 * result in weird data results as well.
221 	 * Hence, we do not use memcpy_fromio or __ioread32_copy here, instead
222 	 * we depend on readl for the purpose.
223 	 *
224 	 * Also note that the final register read automatically marks the
225 	 * queue message as read.
226 	 */
227 	for (data_reg = qinst->queue_buff_start, word_data = qinst->rx_buff,
228 	     num_words = (desc->max_message_size / sizeof(u32));
229 	     num_words; num_words--, data_reg += sizeof(u32), word_data++)
230 		*word_data = readl(data_reg);
231 
232 	/*
233 	 * Last register read automatically clears the IRQ if only 1 message
234 	 * is pending - so send the data up the stack..
235 	 * NOTE: Client is expected to be as optimal as possible, since
236 	 * we invoke the handler in IRQ context.
237 	 */
238 	mbox_chan_received_data(chan, (void *)&message);
239 
240 	return 0;
241 }
242 
243 static int ti_msgmgr_queue_rx_poll_timeout(struct mbox_chan *chan, int timeout_us)
244 {
245 	struct device *dev = chan->mbox->dev;
246 	struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
247 	struct ti_queue_inst *qinst = chan->con_priv;
248 	const struct ti_msgmgr_desc *desc = inst->desc;
249 	int msg_count;
250 	int ret;
251 
252 	ret = readl_poll_timeout_atomic(qinst->queue_state, msg_count,
253 					(msg_count & desc->status_cnt_mask),
254 					10, timeout_us);
255 	if (ret != 0)
256 		return ret;
257 
258 	ti_msgmgr_queue_rx_data(chan, qinst, desc);
259 
260 	return 0;
261 }
262 
263 /**
264  * ti_msgmgr_queue_rx_interrupt() - Interrupt handler for receive Queue
265  * @irq:	Interrupt number
266  * @p:		Channel Pointer
267  *
268  * Return: -EINVAL if there is no instance
269  * IRQ_NONE if the interrupt is not ours.
270  * IRQ_HANDLED if the rx interrupt was successfully handled.
271  */
272 static irqreturn_t ti_msgmgr_queue_rx_interrupt(int irq, void *p)
273 {
274 	struct mbox_chan *chan = p;
275 	struct device *dev = chan->mbox->dev;
276 	struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
277 	struct ti_queue_inst *qinst = chan->con_priv;
278 	const struct ti_msgmgr_desc *desc;
279 	int msg_count;
280 
281 	if (WARN_ON(!inst)) {
282 		dev_err(dev, "no platform drv data??\n");
283 		return -EINVAL;
284 	}
285 
286 	/* Do I have an invalid interrupt source? */
287 	if (qinst->is_tx) {
288 		dev_err(dev, "Cannot handle rx interrupt on tx channel %s\n",
289 			qinst->name);
290 		return IRQ_NONE;
291 	}
292 
293 	desc = inst->desc;
294 	if (ti_msgmgr_queue_is_error(desc, qinst)) {
295 		dev_err(dev, "Error on Rx channel %s\n", qinst->name);
296 		return IRQ_NONE;
297 	}
298 
299 	/* Do I actually have messages to read? */
300 	msg_count = ti_msgmgr_queue_get_num_messages(desc, qinst);
301 	if (!msg_count) {
302 		/* Shared IRQ? */
303 		dev_dbg(dev, "Spurious event - 0 pending data!\n");
304 		return IRQ_NONE;
305 	}
306 
307 	ti_msgmgr_queue_rx_data(chan, qinst, desc);
308 
309 	return IRQ_HANDLED;
310 }
311 
312 /**
313  * ti_msgmgr_queue_peek_data() - Peek to see if there are any rx messages.
314  * @chan:	Channel Pointer
315  *
316  * Return: 'true' if there is pending rx data, 'false' if there is none.
317  */
318 static bool ti_msgmgr_queue_peek_data(struct mbox_chan *chan)
319 {
320 	struct ti_queue_inst *qinst = chan->con_priv;
321 	struct device *dev = chan->mbox->dev;
322 	struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
323 	const struct ti_msgmgr_desc *desc = inst->desc;
324 	int msg_count;
325 
326 	if (qinst->is_tx)
327 		return false;
328 
329 	if (ti_msgmgr_queue_is_error(desc, qinst)) {
330 		dev_err(dev, "Error on channel %s\n", qinst->name);
331 		return false;
332 	}
333 
334 	msg_count = ti_msgmgr_queue_get_num_messages(desc, qinst);
335 
336 	return msg_count ? true : false;
337 }
338 
339 /**
340  * ti_msgmgr_last_tx_done() - See if all the tx messages are sent
341  * @chan:	Channel pointer
342  *
343  * Return: 'true' is no pending tx data, 'false' if there are any.
344  */
345 static bool ti_msgmgr_last_tx_done(struct mbox_chan *chan)
346 {
347 	struct ti_queue_inst *qinst = chan->con_priv;
348 	struct device *dev = chan->mbox->dev;
349 	struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
350 	const struct ti_msgmgr_desc *desc = inst->desc;
351 	int msg_count;
352 
353 	if (!qinst->is_tx)
354 		return false;
355 
356 	if (ti_msgmgr_queue_is_error(desc, qinst)) {
357 		dev_err(dev, "Error on channel %s\n", qinst->name);
358 		return false;
359 	}
360 
361 	msg_count = ti_msgmgr_queue_get_num_messages(desc, qinst);
362 
363 	if (desc->is_sproxy) {
364 		/* In secure proxy, msg_count indicates how many we can send */
365 		return msg_count ? true : false;
366 	}
367 
368 	/* if we have any messages pending.. */
369 	return msg_count ? false : true;
370 }
371 
372 static bool ti_msgmgr_chan_has_polled_queue_rx(struct mbox_chan *chan)
373 {
374 	struct ti_queue_inst *qinst;
375 
376 	if (!chan)
377 		return false;
378 
379 	qinst = chan->con_priv;
380 	return qinst->polled_rx_mode;
381 }
382 
383 /**
384  * ti_msgmgr_send_data() - Send data
385  * @chan:	Channel Pointer
386  * @data:	ti_msgmgr_message * Message Pointer
387  *
388  * Return: 0 if all goes good, else appropriate error messages.
389  */
390 static int ti_msgmgr_send_data(struct mbox_chan *chan, void *data)
391 {
392 	struct device *dev = chan->mbox->dev;
393 	struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
394 	const struct ti_msgmgr_desc *desc;
395 	struct ti_queue_inst *qinst = chan->con_priv;
396 	int num_words, trail_bytes;
397 	struct ti_msgmgr_message *message = data;
398 	void __iomem *data_reg;
399 	u32 *word_data;
400 	int ret = 0;
401 
402 	if (WARN_ON(!inst)) {
403 		dev_err(dev, "no platform drv data??\n");
404 		return -EINVAL;
405 	}
406 	desc = inst->desc;
407 
408 	if (ti_msgmgr_queue_is_error(desc, qinst)) {
409 		dev_err(dev, "Error on channel %s\n", qinst->name);
410 		return false;
411 	}
412 
413 	if (desc->max_message_size < message->len) {
414 		dev_err(dev, "Queue %s message length %zu > max %d\n",
415 			qinst->name, message->len, desc->max_message_size);
416 		return -EINVAL;
417 	}
418 
419 	/* NOTE: Constraints similar to rx path exists here as well */
420 	for (data_reg = qinst->queue_buff_start,
421 	     num_words = message->len / sizeof(u32),
422 	     word_data = (u32 *)message->buf;
423 	     num_words; num_words--, data_reg += sizeof(u32), word_data++)
424 		writel(*word_data, data_reg);
425 
426 	trail_bytes = message->len % sizeof(u32);
427 	if (trail_bytes) {
428 		u32 data_trail = *word_data;
429 
430 		/* Ensure all unused data is 0 */
431 		data_trail &= 0xFFFFFFFF >> (8 * (sizeof(u32) - trail_bytes));
432 		writel(data_trail, data_reg);
433 		data_reg++;
434 	}
435 	/*
436 	 * 'data_reg' indicates next register to write. If we did not already
437 	 * write on tx complete reg(last reg), we must do so for transmit
438 	 */
439 	if (data_reg <= qinst->queue_buff_end)
440 		writel(0, qinst->queue_buff_end);
441 
442 	/* If we are in polled mode, wait for a response before proceeding */
443 	if (ti_msgmgr_chan_has_polled_queue_rx(message->chan_rx))
444 		ret = ti_msgmgr_queue_rx_poll_timeout(message->chan_rx,
445 						      message->timeout_rx_ms * 1000);
446 
447 	return ret;
448 }
449 
450 /**
451  *  ti_msgmgr_queue_rx_irq_req() - RX IRQ request
452  *  @dev:	device pointer
453  *  @d:		descriptor for ti_msgmgr
454  *  @qinst:	Queue instance
455  *  @chan:	Channel pointer
456  */
457 static int ti_msgmgr_queue_rx_irq_req(struct device *dev,
458 				      const struct ti_msgmgr_desc *d,
459 				      struct ti_queue_inst *qinst,
460 				      struct mbox_chan *chan)
461 {
462 	int ret = 0;
463 	char of_rx_irq_name[7];
464 	struct device_node *np;
465 
466 	snprintf(of_rx_irq_name, sizeof(of_rx_irq_name),
467 		 "rx_%03d", d->is_sproxy ? qinst->proxy_id : qinst->queue_id);
468 
469 	/* Get the IRQ if not found */
470 	if (qinst->irq < 0) {
471 		np = of_node_get(dev->of_node);
472 		if (!np)
473 			return -ENODATA;
474 		qinst->irq = of_irq_get_byname(np, of_rx_irq_name);
475 		of_node_put(np);
476 
477 		if (qinst->irq < 0) {
478 			dev_err(dev,
479 				"QID %d PID %d:No IRQ[%s]: %d\n",
480 				qinst->queue_id, qinst->proxy_id,
481 				of_rx_irq_name, qinst->irq);
482 			return qinst->irq;
483 		}
484 	}
485 
486 	/* With the expectation that the IRQ might be shared in SoC */
487 	ret = request_irq(qinst->irq, ti_msgmgr_queue_rx_interrupt,
488 			  IRQF_SHARED, qinst->name, chan);
489 	if (ret) {
490 		dev_err(dev, "Unable to get IRQ %d on %s(res=%d)\n",
491 			qinst->irq, qinst->name, ret);
492 	}
493 
494 	return ret;
495 }
496 
497 /**
498  * ti_msgmgr_queue_startup() - Startup queue
499  * @chan:	Channel pointer
500  *
501  * Return: 0 if all goes good, else return corresponding error message
502  */
503 static int ti_msgmgr_queue_startup(struct mbox_chan *chan)
504 {
505 	struct device *dev = chan->mbox->dev;
506 	struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
507 	struct ti_queue_inst *qinst = chan->con_priv;
508 	const struct ti_msgmgr_desc *d = inst->desc;
509 	int ret;
510 	int msg_count;
511 
512 	/*
513 	 * If sproxy is starting and can send messages, we are a Tx thread,
514 	 * else Rx
515 	 */
516 	if (d->is_sproxy) {
517 		qinst->is_tx = (readl(qinst->queue_ctrl) &
518 				SPROXY_THREAD_CTRL_DIR_MASK) ? false : true;
519 
520 		msg_count = ti_msgmgr_queue_get_num_messages(d, qinst);
521 
522 		if (!msg_count && qinst->is_tx) {
523 			dev_err(dev, "%s: Cannot transmit with 0 credits!\n",
524 				qinst->name);
525 			return -EINVAL;
526 		}
527 	}
528 
529 	if (!qinst->is_tx) {
530 		/* Allocate usage buffer for rx */
531 		qinst->rx_buff = kzalloc(d->max_message_size, GFP_KERNEL);
532 		if (!qinst->rx_buff)
533 			return -ENOMEM;
534 		/* Request IRQ */
535 		ret = ti_msgmgr_queue_rx_irq_req(dev, d, qinst, chan);
536 		if (ret) {
537 			kfree(qinst->rx_buff);
538 			return ret;
539 		}
540 	}
541 
542 	return 0;
543 }
544 
545 /**
546  * ti_msgmgr_queue_shutdown() - Shutdown the queue
547  * @chan:	Channel pointer
548  */
549 static void ti_msgmgr_queue_shutdown(struct mbox_chan *chan)
550 {
551 	struct ti_queue_inst *qinst = chan->con_priv;
552 
553 	if (!qinst->is_tx) {
554 		free_irq(qinst->irq, chan);
555 		kfree(qinst->rx_buff);
556 	}
557 }
558 
559 /**
560  * ti_msgmgr_of_xlate() - Translation of phandle to queue
561  * @mbox:	Mailbox controller
562  * @p:		phandle pointer
563  *
564  * Return: Mailbox channel corresponding to the queue, else return error
565  * pointer.
566  */
567 static struct mbox_chan *ti_msgmgr_of_xlate(struct mbox_controller *mbox,
568 					    const struct of_phandle_args *p)
569 {
570 	struct ti_msgmgr_inst *inst;
571 	int req_qid, req_pid;
572 	struct ti_queue_inst *qinst;
573 	const struct ti_msgmgr_desc *d;
574 	int i, ncells;
575 
576 	inst = container_of(mbox, struct ti_msgmgr_inst, mbox);
577 	if (WARN_ON(!inst))
578 		return ERR_PTR(-EINVAL);
579 
580 	d = inst->desc;
581 
582 	if (d->is_sproxy)
583 		ncells = 1;
584 	else
585 		ncells = 2;
586 	if (p->args_count != ncells) {
587 		dev_err(inst->dev, "Invalid arguments in dt[%d]. Must be %d\n",
588 			p->args_count, ncells);
589 		return ERR_PTR(-EINVAL);
590 	}
591 	if (ncells == 1) {
592 		req_qid = 0;
593 		req_pid = p->args[0];
594 	} else {
595 		req_qid = p->args[0];
596 		req_pid = p->args[1];
597 	}
598 
599 	if (d->is_sproxy) {
600 		if (req_pid >= d->num_valid_queues)
601 			goto err;
602 		qinst = &inst->qinsts[req_pid];
603 		return qinst->chan;
604 	}
605 
606 	for (qinst = inst->qinsts, i = 0; i < inst->num_valid_queues;
607 	     i++, qinst++) {
608 		if (req_qid == qinst->queue_id && req_pid == qinst->proxy_id)
609 			return qinst->chan;
610 	}
611 
612 err:
613 	dev_err(inst->dev, "Queue ID %d, Proxy ID %d is wrong on %pOFn\n",
614 		req_qid, req_pid, p->np);
615 	return ERR_PTR(-ENOENT);
616 }
617 
618 /**
619  * ti_msgmgr_queue_setup() - Setup data structures for each queue instance
620  * @idx:	index of the queue
621  * @dev:	pointer to the message manager device
622  * @np:		pointer to the of node
623  * @inst:	Queue instance pointer
624  * @d:		Message Manager instance description data
625  * @qd:		Queue description data
626  * @qinst:	Queue instance pointer
627  * @chan:	pointer to mailbox channel
628  *
629  * Return: 0 if all went well, else return corresponding error
630  */
631 static int ti_msgmgr_queue_setup(int idx, struct device *dev,
632 				 struct device_node *np,
633 				 struct ti_msgmgr_inst *inst,
634 				 const struct ti_msgmgr_desc *d,
635 				 const struct ti_msgmgr_valid_queue_desc *qd,
636 				 struct ti_queue_inst *qinst,
637 				 struct mbox_chan *chan)
638 {
639 	char *dir;
640 
641 	qinst->proxy_id = qd->proxy_id;
642 	qinst->queue_id = qd->queue_id;
643 
644 	if (qinst->queue_id > d->queue_count) {
645 		dev_err(dev, "Queue Data [idx=%d] queuid %d > %d\n",
646 			idx, qinst->queue_id, d->queue_count);
647 		return -ERANGE;
648 	}
649 
650 	if (d->is_sproxy) {
651 		qinst->queue_buff_start = inst->queue_proxy_region +
652 		    SPROXY_THREAD_DATA_OFFSET(qinst->proxy_id,
653 					      d->data_first_reg);
654 		qinst->queue_buff_end = inst->queue_proxy_region +
655 		    SPROXY_THREAD_DATA_OFFSET(qinst->proxy_id,
656 					      d->data_last_reg);
657 		qinst->queue_state = inst->queue_state_debug_region +
658 		    SPROXY_THREAD_STATUS_OFFSET(qinst->proxy_id);
659 		qinst->queue_ctrl = inst->queue_ctrl_region +
660 		    SPROXY_THREAD_CTRL_OFFSET(qinst->proxy_id);
661 
662 		/* XXX: DONOT read registers here!.. Some may be unusable */
663 		dir = "thr";
664 		snprintf(qinst->name, sizeof(qinst->name), "%s %s_%03d",
665 			 dev_name(dev), dir, qinst->proxy_id);
666 	} else {
667 		qinst->queue_buff_start = inst->queue_proxy_region +
668 		    Q_DATA_OFFSET(qinst->proxy_id, qinst->queue_id,
669 				  d->data_first_reg);
670 		qinst->queue_buff_end = inst->queue_proxy_region +
671 		    Q_DATA_OFFSET(qinst->proxy_id, qinst->queue_id,
672 				  d->data_last_reg);
673 		qinst->queue_state =
674 		    inst->queue_state_debug_region +
675 		    Q_STATE_OFFSET(qinst->queue_id);
676 		qinst->is_tx = qd->is_tx;
677 		dir = qinst->is_tx ? "tx" : "rx";
678 		snprintf(qinst->name, sizeof(qinst->name), "%s %s_%03d_%03d",
679 			 dev_name(dev), dir, qinst->queue_id, qinst->proxy_id);
680 	}
681 
682 	qinst->chan = chan;
683 
684 	/* Setup an error value for IRQ - Lazy allocation */
685 	qinst->irq = -EINVAL;
686 
687 	chan->con_priv = qinst;
688 
689 	dev_dbg(dev, "[%d] qidx=%d pidx=%d irq=%d q_s=%p q_e = %p\n",
690 		idx, qinst->queue_id, qinst->proxy_id, qinst->irq,
691 		qinst->queue_buff_start, qinst->queue_buff_end);
692 	return 0;
693 }
694 
695 static int ti_msgmgr_queue_rx_set_polled_mode(struct ti_queue_inst *qinst, bool enable)
696 {
697 	if (enable) {
698 		disable_irq(qinst->irq);
699 		qinst->polled_rx_mode = true;
700 	} else {
701 		enable_irq(qinst->irq);
702 		qinst->polled_rx_mode = false;
703 	}
704 
705 	return 0;
706 }
707 
708 static int ti_msgmgr_suspend(struct device *dev)
709 {
710 	struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
711 	struct ti_queue_inst *qinst;
712 	int i;
713 
714 	/*
715 	 * We must switch operation to polled mode now as drivers and the genpd
716 	 * layer may make late TI SCI calls to change clock and device states
717 	 * from the noirq phase of suspend.
718 	 */
719 	for (qinst = inst->qinsts, i = 0; i < inst->num_valid_queues; qinst++, i++) {
720 		if (!qinst->is_tx)
721 			ti_msgmgr_queue_rx_set_polled_mode(qinst, true);
722 	}
723 
724 	return 0;
725 }
726 
727 static int ti_msgmgr_resume(struct device *dev)
728 {
729 	struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
730 	struct ti_queue_inst *qinst;
731 	int i;
732 
733 	for (qinst = inst->qinsts, i = 0; i < inst->num_valid_queues; qinst++, i++) {
734 		if (!qinst->is_tx)
735 			ti_msgmgr_queue_rx_set_polled_mode(qinst, false);
736 	}
737 
738 	return 0;
739 }
740 
741 static DEFINE_SIMPLE_DEV_PM_OPS(ti_msgmgr_pm_ops, ti_msgmgr_suspend, ti_msgmgr_resume);
742 
743 /* Queue operations */
744 static const struct mbox_chan_ops ti_msgmgr_chan_ops = {
745 	.startup = ti_msgmgr_queue_startup,
746 	.shutdown = ti_msgmgr_queue_shutdown,
747 	.peek_data = ti_msgmgr_queue_peek_data,
748 	.last_tx_done = ti_msgmgr_last_tx_done,
749 	.send_data = ti_msgmgr_send_data,
750 };
751 
752 /* Keystone K2G SoC integration details */
753 static const struct ti_msgmgr_valid_queue_desc k2g_valid_queues[] = {
754 	{.queue_id = 0, .proxy_id = 0, .is_tx = true,},
755 	{.queue_id = 1, .proxy_id = 0, .is_tx = true,},
756 	{.queue_id = 2, .proxy_id = 0, .is_tx = true,},
757 	{.queue_id = 3, .proxy_id = 0, .is_tx = true,},
758 	{.queue_id = 5, .proxy_id = 2, .is_tx = false,},
759 	{.queue_id = 56, .proxy_id = 1, .is_tx = true,},
760 	{.queue_id = 57, .proxy_id = 2, .is_tx = false,},
761 	{.queue_id = 58, .proxy_id = 3, .is_tx = true,},
762 	{.queue_id = 59, .proxy_id = 4, .is_tx = true,},
763 	{.queue_id = 60, .proxy_id = 5, .is_tx = true,},
764 	{.queue_id = 61, .proxy_id = 6, .is_tx = true,},
765 };
766 
767 static const struct ti_msgmgr_desc k2g_desc = {
768 	.queue_count = 64,
769 	.max_message_size = 64,
770 	.max_messages = 128,
771 	.data_region_name = "queue_proxy_region",
772 	.status_region_name = "queue_state_debug_region",
773 	.data_first_reg = 16,
774 	.data_last_reg = 31,
775 	.status_cnt_mask = Q_STATE_ENTRY_COUNT_MASK,
776 	.tx_polled = false,
777 	.valid_queues = k2g_valid_queues,
778 	.num_valid_queues = ARRAY_SIZE(k2g_valid_queues),
779 	.is_sproxy = false,
780 };
781 
782 static const struct ti_msgmgr_desc am654_desc = {
783 	.queue_count = 190,
784 	.num_valid_queues = 190,
785 	.max_message_size = 60,
786 	.data_region_name = "target_data",
787 	.status_region_name = "rt",
788 	.ctrl_region_name = "scfg",
789 	.data_first_reg = 0,
790 	.data_last_reg = 14,
791 	.status_cnt_mask = SPROXY_THREAD_STATUS_COUNT_MASK,
792 	.tx_polled = false,
793 	.is_sproxy = true,
794 };
795 
796 static const struct of_device_id ti_msgmgr_of_match[] = {
797 	{.compatible = "ti,k2g-message-manager", .data = &k2g_desc},
798 	{.compatible = "ti,am654-secure-proxy", .data = &am654_desc},
799 	{ /* Sentinel */ }
800 };
801 
802 MODULE_DEVICE_TABLE(of, ti_msgmgr_of_match);
803 
804 static int ti_msgmgr_probe(struct platform_device *pdev)
805 {
806 	struct device *dev = &pdev->dev;
807 	const struct of_device_id *of_id;
808 	struct device_node *np;
809 	struct resource *res;
810 	const struct ti_msgmgr_desc *desc;
811 	struct ti_msgmgr_inst *inst;
812 	struct ti_queue_inst *qinst;
813 	struct mbox_controller *mbox;
814 	struct mbox_chan *chans;
815 	int queue_count;
816 	int i;
817 	int ret = -EINVAL;
818 	const struct ti_msgmgr_valid_queue_desc *queue_desc;
819 
820 	if (!dev->of_node) {
821 		dev_err(dev, "no OF information\n");
822 		return -EINVAL;
823 	}
824 	np = dev->of_node;
825 
826 	of_id = of_match_device(ti_msgmgr_of_match, dev);
827 	if (!of_id) {
828 		dev_err(dev, "OF data missing\n");
829 		return -EINVAL;
830 	}
831 	desc = of_id->data;
832 
833 	inst = devm_kzalloc(dev, sizeof(*inst), GFP_KERNEL);
834 	if (!inst)
835 		return -ENOMEM;
836 
837 	inst->dev = dev;
838 	inst->desc = desc;
839 
840 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
841 					   desc->data_region_name);
842 	inst->queue_proxy_region = devm_ioremap_resource(dev, res);
843 	if (IS_ERR(inst->queue_proxy_region))
844 		return PTR_ERR(inst->queue_proxy_region);
845 
846 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
847 					   desc->status_region_name);
848 	inst->queue_state_debug_region = devm_ioremap_resource(dev, res);
849 	if (IS_ERR(inst->queue_state_debug_region))
850 		return PTR_ERR(inst->queue_state_debug_region);
851 
852 	if (desc->is_sproxy) {
853 		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
854 						   desc->ctrl_region_name);
855 		inst->queue_ctrl_region = devm_ioremap_resource(dev, res);
856 		if (IS_ERR(inst->queue_ctrl_region))
857 			return PTR_ERR(inst->queue_ctrl_region);
858 	}
859 
860 	dev_dbg(dev, "proxy region=%p, queue_state=%p\n",
861 		inst->queue_proxy_region, inst->queue_state_debug_region);
862 
863 	queue_count = desc->num_valid_queues;
864 	if (!queue_count || queue_count > desc->queue_count) {
865 		dev_crit(dev, "Invalid Number of queues %d. Max %d\n",
866 			 queue_count, desc->queue_count);
867 		return -ERANGE;
868 	}
869 	inst->num_valid_queues = queue_count;
870 
871 	qinst = devm_kcalloc(dev, queue_count, sizeof(*qinst), GFP_KERNEL);
872 	if (!qinst)
873 		return -ENOMEM;
874 	inst->qinsts = qinst;
875 
876 	chans = devm_kcalloc(dev, queue_count, sizeof(*chans), GFP_KERNEL);
877 	if (!chans)
878 		return -ENOMEM;
879 	inst->chans = chans;
880 
881 	if (desc->is_sproxy) {
882 		struct ti_msgmgr_valid_queue_desc sproxy_desc;
883 
884 		/* All proxies may be valid in Secure Proxy instance */
885 		for (i = 0; i < queue_count; i++, qinst++, chans++) {
886 			sproxy_desc.queue_id = 0;
887 			sproxy_desc.proxy_id = i;
888 			ret = ti_msgmgr_queue_setup(i, dev, np, inst,
889 						    desc, &sproxy_desc, qinst,
890 						    chans);
891 			if (ret)
892 				return ret;
893 		}
894 	} else {
895 		/* Only Some proxies are valid in Message Manager */
896 		for (i = 0, queue_desc = desc->valid_queues;
897 		     i < queue_count; i++, qinst++, chans++, queue_desc++) {
898 			ret = ti_msgmgr_queue_setup(i, dev, np, inst,
899 						    desc, queue_desc, qinst,
900 						    chans);
901 			if (ret)
902 				return ret;
903 		}
904 	}
905 
906 	mbox = &inst->mbox;
907 	mbox->dev = dev;
908 	mbox->ops = &ti_msgmgr_chan_ops;
909 	mbox->chans = inst->chans;
910 	mbox->num_chans = inst->num_valid_queues;
911 	mbox->txdone_irq = false;
912 	mbox->txdone_poll = desc->tx_polled;
913 	if (desc->tx_polled)
914 		mbox->txpoll_period = desc->tx_poll_timeout_ms;
915 	mbox->of_xlate = ti_msgmgr_of_xlate;
916 
917 	platform_set_drvdata(pdev, inst);
918 	ret = devm_mbox_controller_register(dev, mbox);
919 	if (ret)
920 		dev_err(dev, "Failed to register mbox_controller(%d)\n", ret);
921 
922 	return ret;
923 }
924 
925 static struct platform_driver ti_msgmgr_driver = {
926 	.probe = ti_msgmgr_probe,
927 	.driver = {
928 		   .name = "ti-msgmgr",
929 		   .of_match_table = of_match_ptr(ti_msgmgr_of_match),
930 		   .pm = &ti_msgmgr_pm_ops,
931 	},
932 };
933 module_platform_driver(ti_msgmgr_driver);
934 
935 MODULE_LICENSE("GPL v2");
936 MODULE_DESCRIPTION("TI message manager driver");
937 MODULE_AUTHOR("Nishanth Menon");
938 MODULE_ALIAS("platform:ti-msgmgr");
939