xref: /openbmc/linux/drivers/firmware/ti_sci.c (revision 4f89e4b8)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Texas Instruments System Control Interface Protocol Driver
4  *
5  * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
6  *	Nishanth Menon
7  */
8 
9 #define pr_fmt(fmt) "%s: " fmt, __func__
10 
11 #include <linux/bitmap.h>
12 #include <linux/debugfs.h>
13 #include <linux/export.h>
14 #include <linux/io.h>
15 #include <linux/kernel.h>
16 #include <linux/mailbox_client.h>
17 #include <linux/module.h>
18 #include <linux/of_device.h>
19 #include <linux/semaphore.h>
20 #include <linux/slab.h>
21 #include <linux/soc/ti/ti-msgmgr.h>
22 #include <linux/soc/ti/ti_sci_protocol.h>
23 #include <linux/reboot.h>
24 
25 #include "ti_sci.h"
26 
27 /* List of all TI SCI devices active in system */
28 static LIST_HEAD(ti_sci_list);
29 /* Protection for the entire list */
30 static DEFINE_MUTEX(ti_sci_list_mutex);
31 
32 /**
33  * struct ti_sci_xfer - Structure representing a message flow
34  * @tx_message:	Transmit message
35  * @rx_len:	Receive message length
36  * @xfer_buf:	Preallocated buffer to store receive message
37  *		Since we work with request-ACK protocol, we can
38  *		reuse the same buffer for the rx path as we
39  *		use for the tx path.
40  * @done:	completion event
41  */
42 struct ti_sci_xfer {
43 	struct ti_msgmgr_message tx_message;
44 	u8 rx_len;
45 	u8 *xfer_buf;
46 	struct completion done;
47 };
48 
49 /**
50  * struct ti_sci_xfers_info - Structure to manage transfer information
51  * @sem_xfer_count:	Counting Semaphore for managing max simultaneous
52  *			Messages.
53  * @xfer_block:		Preallocated Message array
54  * @xfer_alloc_table:	Bitmap table for allocated messages.
55  *			Index of this bitmap table is also used for message
56  *			sequence identifier.
57  * @xfer_lock:		Protection for message allocation
58  */
59 struct ti_sci_xfers_info {
60 	struct semaphore sem_xfer_count;
61 	struct ti_sci_xfer *xfer_block;
62 	unsigned long *xfer_alloc_table;
63 	/* protect transfer allocation */
64 	spinlock_t xfer_lock;
65 };
66 
67 /**
68  * struct ti_sci_rm_type_map - Structure representing TISCI Resource
69  *				management representation of dev_ids.
70  * @dev_id:	TISCI device ID
71  * @type:	Corresponding id as identified by TISCI RM.
72  *
73  * Note: This is used only as a work around for using RM range apis
74  *	for AM654 SoC. For future SoCs dev_id will be used as type
75  *	for RM range APIs. In order to maintain ABI backward compatibility
76  *	type is not being changed for AM654 SoC.
77  */
78 struct ti_sci_rm_type_map {
79 	u32 dev_id;
80 	u16 type;
81 };
82 
83 /**
84  * struct ti_sci_desc - Description of SoC integration
85  * @default_host_id:	Host identifier representing the compute entity
86  * @max_rx_timeout_ms:	Timeout for communication with SoC (in Milliseconds)
87  * @max_msgs: Maximum number of messages that can be pending
88  *		  simultaneously in the system
89  * @max_msg_size: Maximum size of data per message that can be handled.
90  * @rm_type_map: RM resource type mapping structure.
91  */
92 struct ti_sci_desc {
93 	u8 default_host_id;
94 	int max_rx_timeout_ms;
95 	int max_msgs;
96 	int max_msg_size;
97 	struct ti_sci_rm_type_map *rm_type_map;
98 };
99 
100 /**
101  * struct ti_sci_info - Structure representing a TI SCI instance
102  * @dev:	Device pointer
103  * @desc:	SoC description for this instance
104  * @nb:	Reboot Notifier block
105  * @d:		Debugfs file entry
106  * @debug_region: Memory region where the debug message are available
107  * @debug_region_size: Debug region size
108  * @debug_buffer: Buffer allocated to copy debug messages.
109  * @handle:	Instance of TI SCI handle to send to clients.
110  * @cl:		Mailbox Client
111  * @chan_tx:	Transmit mailbox channel
112  * @chan_rx:	Receive mailbox channel
113  * @minfo:	Message info
114  * @node:	list head
115  * @host_id:	Host ID
116  * @users:	Number of users of this instance
117  */
118 struct ti_sci_info {
119 	struct device *dev;
120 	struct notifier_block nb;
121 	const struct ti_sci_desc *desc;
122 	struct dentry *d;
123 	void __iomem *debug_region;
124 	char *debug_buffer;
125 	size_t debug_region_size;
126 	struct ti_sci_handle handle;
127 	struct mbox_client cl;
128 	struct mbox_chan *chan_tx;
129 	struct mbox_chan *chan_rx;
130 	struct ti_sci_xfers_info minfo;
131 	struct list_head node;
132 	u8 host_id;
133 	/* protected by ti_sci_list_mutex */
134 	int users;
135 
136 };
137 
138 #define cl_to_ti_sci_info(c)	container_of(c, struct ti_sci_info, cl)
139 #define handle_to_ti_sci_info(h) container_of(h, struct ti_sci_info, handle)
140 #define reboot_to_ti_sci_info(n) container_of(n, struct ti_sci_info, nb)
141 
142 #ifdef CONFIG_DEBUG_FS
143 
144 /**
145  * ti_sci_debug_show() - Helper to dump the debug log
146  * @s:	sequence file pointer
147  * @unused:	unused.
148  *
149  * Return: 0
150  */
151 static int ti_sci_debug_show(struct seq_file *s, void *unused)
152 {
153 	struct ti_sci_info *info = s->private;
154 
155 	memcpy_fromio(info->debug_buffer, info->debug_region,
156 		      info->debug_region_size);
157 	/*
158 	 * We don't trust firmware to leave NULL terminated last byte (hence
159 	 * we have allocated 1 extra 0 byte). Since we cannot guarantee any
160 	 * specific data format for debug messages, We just present the data
161 	 * in the buffer as is - we expect the messages to be self explanatory.
162 	 */
163 	seq_puts(s, info->debug_buffer);
164 	return 0;
165 }
166 
167 /* Provide the log file operations interface*/
168 DEFINE_SHOW_ATTRIBUTE(ti_sci_debug);
169 
170 /**
171  * ti_sci_debugfs_create() - Create log debug file
172  * @pdev:	platform device pointer
173  * @info:	Pointer to SCI entity information
174  *
175  * Return: 0 if all went fine, else corresponding error.
176  */
177 static int ti_sci_debugfs_create(struct platform_device *pdev,
178 				 struct ti_sci_info *info)
179 {
180 	struct device *dev = &pdev->dev;
181 	struct resource *res;
182 	char debug_name[50] = "ti_sci_debug@";
183 
184 	/* Debug region is optional */
185 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
186 					   "debug_messages");
187 	info->debug_region = devm_ioremap_resource(dev, res);
188 	if (IS_ERR(info->debug_region))
189 		return 0;
190 	info->debug_region_size = resource_size(res);
191 
192 	info->debug_buffer = devm_kcalloc(dev, info->debug_region_size + 1,
193 					  sizeof(char), GFP_KERNEL);
194 	if (!info->debug_buffer)
195 		return -ENOMEM;
196 	/* Setup NULL termination */
197 	info->debug_buffer[info->debug_region_size] = 0;
198 
199 	info->d = debugfs_create_file(strncat(debug_name, dev_name(dev),
200 					      sizeof(debug_name) -
201 					      sizeof("ti_sci_debug@")),
202 				      0444, NULL, info, &ti_sci_debug_fops);
203 	if (IS_ERR(info->d))
204 		return PTR_ERR(info->d);
205 
206 	dev_dbg(dev, "Debug region => %p, size = %zu bytes, resource: %pr\n",
207 		info->debug_region, info->debug_region_size, res);
208 	return 0;
209 }
210 
211 /**
212  * ti_sci_debugfs_destroy() - clean up log debug file
213  * @pdev:	platform device pointer
214  * @info:	Pointer to SCI entity information
215  */
216 static void ti_sci_debugfs_destroy(struct platform_device *pdev,
217 				   struct ti_sci_info *info)
218 {
219 	if (IS_ERR(info->debug_region))
220 		return;
221 
222 	debugfs_remove(info->d);
223 }
224 #else /* CONFIG_DEBUG_FS */
225 static inline int ti_sci_debugfs_create(struct platform_device *dev,
226 					struct ti_sci_info *info)
227 {
228 	return 0;
229 }
230 
231 static inline void ti_sci_debugfs_destroy(struct platform_device *dev,
232 					  struct ti_sci_info *info)
233 {
234 }
235 #endif /* CONFIG_DEBUG_FS */
236 
237 /**
238  * ti_sci_dump_header_dbg() - Helper to dump a message header.
239  * @dev:	Device pointer corresponding to the SCI entity
240  * @hdr:	pointer to header.
241  */
242 static inline void ti_sci_dump_header_dbg(struct device *dev,
243 					  struct ti_sci_msg_hdr *hdr)
244 {
245 	dev_dbg(dev, "MSGHDR:type=0x%04x host=0x%02x seq=0x%02x flags=0x%08x\n",
246 		hdr->type, hdr->host, hdr->seq, hdr->flags);
247 }
248 
249 /**
250  * ti_sci_rx_callback() - mailbox client callback for receive messages
251  * @cl:	client pointer
252  * @m:	mailbox message
253  *
254  * Processes one received message to appropriate transfer information and
255  * signals completion of the transfer.
256  *
257  * NOTE: This function will be invoked in IRQ context, hence should be
258  * as optimal as possible.
259  */
260 static void ti_sci_rx_callback(struct mbox_client *cl, void *m)
261 {
262 	struct ti_sci_info *info = cl_to_ti_sci_info(cl);
263 	struct device *dev = info->dev;
264 	struct ti_sci_xfers_info *minfo = &info->minfo;
265 	struct ti_msgmgr_message *mbox_msg = m;
266 	struct ti_sci_msg_hdr *hdr = (struct ti_sci_msg_hdr *)mbox_msg->buf;
267 	struct ti_sci_xfer *xfer;
268 	u8 xfer_id;
269 
270 	xfer_id = hdr->seq;
271 
272 	/*
273 	 * Are we even expecting this?
274 	 * NOTE: barriers were implicit in locks used for modifying the bitmap
275 	 */
276 	if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
277 		dev_err(dev, "Message for %d is not expected!\n", xfer_id);
278 		return;
279 	}
280 
281 	xfer = &minfo->xfer_block[xfer_id];
282 
283 	/* Is the message of valid length? */
284 	if (mbox_msg->len > info->desc->max_msg_size) {
285 		dev_err(dev, "Unable to handle %zu xfer(max %d)\n",
286 			mbox_msg->len, info->desc->max_msg_size);
287 		ti_sci_dump_header_dbg(dev, hdr);
288 		return;
289 	}
290 	if (mbox_msg->len < xfer->rx_len) {
291 		dev_err(dev, "Recv xfer %zu < expected %d length\n",
292 			mbox_msg->len, xfer->rx_len);
293 		ti_sci_dump_header_dbg(dev, hdr);
294 		return;
295 	}
296 
297 	ti_sci_dump_header_dbg(dev, hdr);
298 	/* Take a copy to the rx buffer.. */
299 	memcpy(xfer->xfer_buf, mbox_msg->buf, xfer->rx_len);
300 	complete(&xfer->done);
301 }
302 
303 /**
304  * ti_sci_get_one_xfer() - Allocate one message
305  * @info:	Pointer to SCI entity information
306  * @msg_type:	Message type
307  * @msg_flags:	Flag to set for the message
308  * @tx_message_size: transmit message size
309  * @rx_message_size: receive message size
310  *
311  * Helper function which is used by various command functions that are
312  * exposed to clients of this driver for allocating a message traffic event.
313  *
314  * This function can sleep depending on pending requests already in the system
315  * for the SCI entity. Further, this also holds a spinlock to maintain integrity
316  * of internal data structures.
317  *
318  * Return: 0 if all went fine, else corresponding error.
319  */
320 static struct ti_sci_xfer *ti_sci_get_one_xfer(struct ti_sci_info *info,
321 					       u16 msg_type, u32 msg_flags,
322 					       size_t tx_message_size,
323 					       size_t rx_message_size)
324 {
325 	struct ti_sci_xfers_info *minfo = &info->minfo;
326 	struct ti_sci_xfer *xfer;
327 	struct ti_sci_msg_hdr *hdr;
328 	unsigned long flags;
329 	unsigned long bit_pos;
330 	u8 xfer_id;
331 	int ret;
332 	int timeout;
333 
334 	/* Ensure we have sane transfer sizes */
335 	if (rx_message_size > info->desc->max_msg_size ||
336 	    tx_message_size > info->desc->max_msg_size ||
337 	    rx_message_size < sizeof(*hdr) || tx_message_size < sizeof(*hdr))
338 		return ERR_PTR(-ERANGE);
339 
340 	/*
341 	 * Ensure we have only controlled number of pending messages.
342 	 * Ideally, we might just have to wait a single message, be
343 	 * conservative and wait 5 times that..
344 	 */
345 	timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms) * 5;
346 	ret = down_timeout(&minfo->sem_xfer_count, timeout);
347 	if (ret < 0)
348 		return ERR_PTR(ret);
349 
350 	/* Keep the locked section as small as possible */
351 	spin_lock_irqsave(&minfo->xfer_lock, flags);
352 	bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
353 				      info->desc->max_msgs);
354 	set_bit(bit_pos, minfo->xfer_alloc_table);
355 	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
356 
357 	/*
358 	 * We already ensured in probe that we can have max messages that can
359 	 * fit in  hdr.seq - NOTE: this improves access latencies
360 	 * to predictable O(1) access, BUT, it opens us to risk if
361 	 * remote misbehaves with corrupted message sequence responses.
362 	 * If that happens, we are going to be messed up anyways..
363 	 */
364 	xfer_id = (u8)bit_pos;
365 
366 	xfer = &minfo->xfer_block[xfer_id];
367 
368 	hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
369 	xfer->tx_message.len = tx_message_size;
370 	xfer->rx_len = (u8)rx_message_size;
371 
372 	reinit_completion(&xfer->done);
373 
374 	hdr->seq = xfer_id;
375 	hdr->type = msg_type;
376 	hdr->host = info->host_id;
377 	hdr->flags = msg_flags;
378 
379 	return xfer;
380 }
381 
382 /**
383  * ti_sci_put_one_xfer() - Release a message
384  * @minfo:	transfer info pointer
385  * @xfer:	message that was reserved by ti_sci_get_one_xfer
386  *
387  * This holds a spinlock to maintain integrity of internal data structures.
388  */
389 static void ti_sci_put_one_xfer(struct ti_sci_xfers_info *minfo,
390 				struct ti_sci_xfer *xfer)
391 {
392 	unsigned long flags;
393 	struct ti_sci_msg_hdr *hdr;
394 	u8 xfer_id;
395 
396 	hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
397 	xfer_id = hdr->seq;
398 
399 	/*
400 	 * Keep the locked section as small as possible
401 	 * NOTE: we might escape with smp_mb and no lock here..
402 	 * but just be conservative and symmetric.
403 	 */
404 	spin_lock_irqsave(&minfo->xfer_lock, flags);
405 	clear_bit(xfer_id, minfo->xfer_alloc_table);
406 	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
407 
408 	/* Increment the count for the next user to get through */
409 	up(&minfo->sem_xfer_count);
410 }
411 
412 /**
413  * ti_sci_do_xfer() - Do one transfer
414  * @info:	Pointer to SCI entity information
415  * @xfer:	Transfer to initiate and wait for response
416  *
417  * Return: -ETIMEDOUT in case of no response, if transmit error,
418  *	   return corresponding error, else if all goes well,
419  *	   return 0.
420  */
421 static inline int ti_sci_do_xfer(struct ti_sci_info *info,
422 				 struct ti_sci_xfer *xfer)
423 {
424 	int ret;
425 	int timeout;
426 	struct device *dev = info->dev;
427 
428 	ret = mbox_send_message(info->chan_tx, &xfer->tx_message);
429 	if (ret < 0)
430 		return ret;
431 
432 	ret = 0;
433 
434 	/* And we wait for the response. */
435 	timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
436 	if (!wait_for_completion_timeout(&xfer->done, timeout)) {
437 		dev_err(dev, "Mbox timedout in resp(caller: %pS)\n",
438 			(void *)_RET_IP_);
439 		ret = -ETIMEDOUT;
440 	}
441 	/*
442 	 * NOTE: we might prefer not to need the mailbox ticker to manage the
443 	 * transfer queueing since the protocol layer queues things by itself.
444 	 * Unfortunately, we have to kick the mailbox framework after we have
445 	 * received our message.
446 	 */
447 	mbox_client_txdone(info->chan_tx, ret);
448 
449 	return ret;
450 }
451 
452 /**
453  * ti_sci_cmd_get_revision() - command to get the revision of the SCI entity
454  * @info:	Pointer to SCI entity information
455  *
456  * Updates the SCI information in the internal data structure.
457  *
458  * Return: 0 if all went fine, else return appropriate error.
459  */
460 static int ti_sci_cmd_get_revision(struct ti_sci_info *info)
461 {
462 	struct device *dev = info->dev;
463 	struct ti_sci_handle *handle = &info->handle;
464 	struct ti_sci_version_info *ver = &handle->version;
465 	struct ti_sci_msg_resp_version *rev_info;
466 	struct ti_sci_xfer *xfer;
467 	int ret;
468 
469 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_VERSION,
470 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
471 				   sizeof(struct ti_sci_msg_hdr),
472 				   sizeof(*rev_info));
473 	if (IS_ERR(xfer)) {
474 		ret = PTR_ERR(xfer);
475 		dev_err(dev, "Message alloc failed(%d)\n", ret);
476 		return ret;
477 	}
478 
479 	rev_info = (struct ti_sci_msg_resp_version *)xfer->xfer_buf;
480 
481 	ret = ti_sci_do_xfer(info, xfer);
482 	if (ret) {
483 		dev_err(dev, "Mbox send fail %d\n", ret);
484 		goto fail;
485 	}
486 
487 	ver->abi_major = rev_info->abi_major;
488 	ver->abi_minor = rev_info->abi_minor;
489 	ver->firmware_revision = rev_info->firmware_revision;
490 	strncpy(ver->firmware_description, rev_info->firmware_description,
491 		sizeof(ver->firmware_description));
492 
493 fail:
494 	ti_sci_put_one_xfer(&info->minfo, xfer);
495 	return ret;
496 }
497 
498 /**
499  * ti_sci_is_response_ack() - Generic ACK/NACK message checkup
500  * @r:	pointer to response buffer
501  *
502  * Return: true if the response was an ACK, else returns false.
503  */
504 static inline bool ti_sci_is_response_ack(void *r)
505 {
506 	struct ti_sci_msg_hdr *hdr = r;
507 
508 	return hdr->flags & TI_SCI_FLAG_RESP_GENERIC_ACK ? true : false;
509 }
510 
511 /**
512  * ti_sci_set_device_state() - Set device state helper
513  * @handle:	pointer to TI SCI handle
514  * @id:		Device identifier
515  * @flags:	flags to setup for the device
516  * @state:	State to move the device to
517  *
518  * Return: 0 if all went well, else returns appropriate error value.
519  */
520 static int ti_sci_set_device_state(const struct ti_sci_handle *handle,
521 				   u32 id, u32 flags, u8 state)
522 {
523 	struct ti_sci_info *info;
524 	struct ti_sci_msg_req_set_device_state *req;
525 	struct ti_sci_msg_hdr *resp;
526 	struct ti_sci_xfer *xfer;
527 	struct device *dev;
528 	int ret = 0;
529 
530 	if (IS_ERR(handle))
531 		return PTR_ERR(handle);
532 	if (!handle)
533 		return -EINVAL;
534 
535 	info = handle_to_ti_sci_info(handle);
536 	dev = info->dev;
537 
538 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE,
539 				   flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
540 				   sizeof(*req), sizeof(*resp));
541 	if (IS_ERR(xfer)) {
542 		ret = PTR_ERR(xfer);
543 		dev_err(dev, "Message alloc failed(%d)\n", ret);
544 		return ret;
545 	}
546 	req = (struct ti_sci_msg_req_set_device_state *)xfer->xfer_buf;
547 	req->id = id;
548 	req->state = state;
549 
550 	ret = ti_sci_do_xfer(info, xfer);
551 	if (ret) {
552 		dev_err(dev, "Mbox send fail %d\n", ret);
553 		goto fail;
554 	}
555 
556 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
557 
558 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
559 
560 fail:
561 	ti_sci_put_one_xfer(&info->minfo, xfer);
562 
563 	return ret;
564 }
565 
566 /**
567  * ti_sci_get_device_state() - Get device state helper
568  * @handle:	Handle to the device
569  * @id:		Device Identifier
570  * @clcnt:	Pointer to Context Loss Count
571  * @resets:	pointer to resets
572  * @p_state:	pointer to p_state
573  * @c_state:	pointer to c_state
574  *
575  * Return: 0 if all went fine, else return appropriate error.
576  */
577 static int ti_sci_get_device_state(const struct ti_sci_handle *handle,
578 				   u32 id,  u32 *clcnt,  u32 *resets,
579 				    u8 *p_state,  u8 *c_state)
580 {
581 	struct ti_sci_info *info;
582 	struct ti_sci_msg_req_get_device_state *req;
583 	struct ti_sci_msg_resp_get_device_state *resp;
584 	struct ti_sci_xfer *xfer;
585 	struct device *dev;
586 	int ret = 0;
587 
588 	if (IS_ERR(handle))
589 		return PTR_ERR(handle);
590 	if (!handle)
591 		return -EINVAL;
592 
593 	if (!clcnt && !resets && !p_state && !c_state)
594 		return -EINVAL;
595 
596 	info = handle_to_ti_sci_info(handle);
597 	dev = info->dev;
598 
599 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE,
600 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
601 				   sizeof(*req), sizeof(*resp));
602 	if (IS_ERR(xfer)) {
603 		ret = PTR_ERR(xfer);
604 		dev_err(dev, "Message alloc failed(%d)\n", ret);
605 		return ret;
606 	}
607 	req = (struct ti_sci_msg_req_get_device_state *)xfer->xfer_buf;
608 	req->id = id;
609 
610 	ret = ti_sci_do_xfer(info, xfer);
611 	if (ret) {
612 		dev_err(dev, "Mbox send fail %d\n", ret);
613 		goto fail;
614 	}
615 
616 	resp = (struct ti_sci_msg_resp_get_device_state *)xfer->xfer_buf;
617 	if (!ti_sci_is_response_ack(resp)) {
618 		ret = -ENODEV;
619 		goto fail;
620 	}
621 
622 	if (clcnt)
623 		*clcnt = resp->context_loss_count;
624 	if (resets)
625 		*resets = resp->resets;
626 	if (p_state)
627 		*p_state = resp->programmed_state;
628 	if (c_state)
629 		*c_state = resp->current_state;
630 fail:
631 	ti_sci_put_one_xfer(&info->minfo, xfer);
632 
633 	return ret;
634 }
635 
636 /**
637  * ti_sci_cmd_get_device() - command to request for device managed by TISCI
638  * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
639  * @id:		Device Identifier
640  *
641  * Request for the device - NOTE: the client MUST maintain integrity of
642  * usage count by balancing get_device with put_device. No refcounting is
643  * managed by driver for that purpose.
644  *
645  * NOTE: The request is for exclusive access for the processor.
646  *
647  * Return: 0 if all went fine, else return appropriate error.
648  */
649 static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id)
650 {
651 	return ti_sci_set_device_state(handle, id,
652 				       MSG_FLAG_DEVICE_EXCLUSIVE,
653 				       MSG_DEVICE_SW_STATE_ON);
654 }
655 
656 /**
657  * ti_sci_cmd_idle_device() - Command to idle a device managed by TISCI
658  * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
659  * @id:		Device Identifier
660  *
661  * Request for the device - NOTE: the client MUST maintain integrity of
662  * usage count by balancing get_device with put_device. No refcounting is
663  * managed by driver for that purpose.
664  *
665  * Return: 0 if all went fine, else return appropriate error.
666  */
667 static int ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id)
668 {
669 	return ti_sci_set_device_state(handle, id,
670 				       MSG_FLAG_DEVICE_EXCLUSIVE,
671 				       MSG_DEVICE_SW_STATE_RETENTION);
672 }
673 
674 /**
675  * ti_sci_cmd_put_device() - command to release a device managed by TISCI
676  * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
677  * @id:		Device Identifier
678  *
679  * Request for the device - NOTE: the client MUST maintain integrity of
680  * usage count by balancing get_device with put_device. No refcounting is
681  * managed by driver for that purpose.
682  *
683  * Return: 0 if all went fine, else return appropriate error.
684  */
685 static int ti_sci_cmd_put_device(const struct ti_sci_handle *handle, u32 id)
686 {
687 	return ti_sci_set_device_state(handle, id,
688 				       0, MSG_DEVICE_SW_STATE_AUTO_OFF);
689 }
690 
691 /**
692  * ti_sci_cmd_dev_is_valid() - Is the device valid
693  * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
694  * @id:		Device Identifier
695  *
696  * Return: 0 if all went fine and the device ID is valid, else return
697  * appropriate error.
698  */
699 static int ti_sci_cmd_dev_is_valid(const struct ti_sci_handle *handle, u32 id)
700 {
701 	u8 unused;
702 
703 	/* check the device state which will also tell us if the ID is valid */
704 	return ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &unused);
705 }
706 
707 /**
708  * ti_sci_cmd_dev_get_clcnt() - Get context loss counter
709  * @handle:	Pointer to TISCI handle
710  * @id:		Device Identifier
711  * @count:	Pointer to Context Loss counter to populate
712  *
713  * Return: 0 if all went fine, else return appropriate error.
714  */
715 static int ti_sci_cmd_dev_get_clcnt(const struct ti_sci_handle *handle, u32 id,
716 				    u32 *count)
717 {
718 	return ti_sci_get_device_state(handle, id, count, NULL, NULL, NULL);
719 }
720 
721 /**
722  * ti_sci_cmd_dev_is_idle() - Check if the device is requested to be idle
723  * @handle:	Pointer to TISCI handle
724  * @id:		Device Identifier
725  * @r_state:	true if requested to be idle
726  *
727  * Return: 0 if all went fine, else return appropriate error.
728  */
729 static int ti_sci_cmd_dev_is_idle(const struct ti_sci_handle *handle, u32 id,
730 				  bool *r_state)
731 {
732 	int ret;
733 	u8 state;
734 
735 	if (!r_state)
736 		return -EINVAL;
737 
738 	ret = ti_sci_get_device_state(handle, id, NULL, NULL, &state, NULL);
739 	if (ret)
740 		return ret;
741 
742 	*r_state = (state == MSG_DEVICE_SW_STATE_RETENTION);
743 
744 	return 0;
745 }
746 
747 /**
748  * ti_sci_cmd_dev_is_stop() - Check if the device is requested to be stopped
749  * @handle:	Pointer to TISCI handle
750  * @id:		Device Identifier
751  * @r_state:	true if requested to be stopped
752  * @curr_state:	true if currently stopped.
753  *
754  * Return: 0 if all went fine, else return appropriate error.
755  */
756 static int ti_sci_cmd_dev_is_stop(const struct ti_sci_handle *handle, u32 id,
757 				  bool *r_state,  bool *curr_state)
758 {
759 	int ret;
760 	u8 p_state, c_state;
761 
762 	if (!r_state && !curr_state)
763 		return -EINVAL;
764 
765 	ret =
766 	    ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
767 	if (ret)
768 		return ret;
769 
770 	if (r_state)
771 		*r_state = (p_state == MSG_DEVICE_SW_STATE_AUTO_OFF);
772 	if (curr_state)
773 		*curr_state = (c_state == MSG_DEVICE_HW_STATE_OFF);
774 
775 	return 0;
776 }
777 
778 /**
779  * ti_sci_cmd_dev_is_on() - Check if the device is requested to be ON
780  * @handle:	Pointer to TISCI handle
781  * @id:		Device Identifier
782  * @r_state:	true if requested to be ON
783  * @curr_state:	true if currently ON and active
784  *
785  * Return: 0 if all went fine, else return appropriate error.
786  */
787 static int ti_sci_cmd_dev_is_on(const struct ti_sci_handle *handle, u32 id,
788 				bool *r_state,  bool *curr_state)
789 {
790 	int ret;
791 	u8 p_state, c_state;
792 
793 	if (!r_state && !curr_state)
794 		return -EINVAL;
795 
796 	ret =
797 	    ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
798 	if (ret)
799 		return ret;
800 
801 	if (r_state)
802 		*r_state = (p_state == MSG_DEVICE_SW_STATE_ON);
803 	if (curr_state)
804 		*curr_state = (c_state == MSG_DEVICE_HW_STATE_ON);
805 
806 	return 0;
807 }
808 
809 /**
810  * ti_sci_cmd_dev_is_trans() - Check if the device is currently transitioning
811  * @handle:	Pointer to TISCI handle
812  * @id:		Device Identifier
813  * @curr_state:	true if currently transitioning.
814  *
815  * Return: 0 if all went fine, else return appropriate error.
816  */
817 static int ti_sci_cmd_dev_is_trans(const struct ti_sci_handle *handle, u32 id,
818 				   bool *curr_state)
819 {
820 	int ret;
821 	u8 state;
822 
823 	if (!curr_state)
824 		return -EINVAL;
825 
826 	ret = ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &state);
827 	if (ret)
828 		return ret;
829 
830 	*curr_state = (state == MSG_DEVICE_HW_STATE_TRANS);
831 
832 	return 0;
833 }
834 
835 /**
836  * ti_sci_cmd_set_device_resets() - command to set resets for device managed
837  *				    by TISCI
838  * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
839  * @id:		Device Identifier
840  * @reset_state: Device specific reset bit field
841  *
842  * Return: 0 if all went fine, else return appropriate error.
843  */
844 static int ti_sci_cmd_set_device_resets(const struct ti_sci_handle *handle,
845 					u32 id, u32 reset_state)
846 {
847 	struct ti_sci_info *info;
848 	struct ti_sci_msg_req_set_device_resets *req;
849 	struct ti_sci_msg_hdr *resp;
850 	struct ti_sci_xfer *xfer;
851 	struct device *dev;
852 	int ret = 0;
853 
854 	if (IS_ERR(handle))
855 		return PTR_ERR(handle);
856 	if (!handle)
857 		return -EINVAL;
858 
859 	info = handle_to_ti_sci_info(handle);
860 	dev = info->dev;
861 
862 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_RESETS,
863 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
864 				   sizeof(*req), sizeof(*resp));
865 	if (IS_ERR(xfer)) {
866 		ret = PTR_ERR(xfer);
867 		dev_err(dev, "Message alloc failed(%d)\n", ret);
868 		return ret;
869 	}
870 	req = (struct ti_sci_msg_req_set_device_resets *)xfer->xfer_buf;
871 	req->id = id;
872 	req->resets = reset_state;
873 
874 	ret = ti_sci_do_xfer(info, xfer);
875 	if (ret) {
876 		dev_err(dev, "Mbox send fail %d\n", ret);
877 		goto fail;
878 	}
879 
880 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
881 
882 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
883 
884 fail:
885 	ti_sci_put_one_xfer(&info->minfo, xfer);
886 
887 	return ret;
888 }
889 
890 /**
891  * ti_sci_cmd_get_device_resets() - Get reset state for device managed
892  *				    by TISCI
893  * @handle:		Pointer to TISCI handle
894  * @id:			Device Identifier
895  * @reset_state:	Pointer to reset state to populate
896  *
897  * Return: 0 if all went fine, else return appropriate error.
898  */
899 static int ti_sci_cmd_get_device_resets(const struct ti_sci_handle *handle,
900 					u32 id, u32 *reset_state)
901 {
902 	return ti_sci_get_device_state(handle, id, NULL, reset_state, NULL,
903 				       NULL);
904 }
905 
906 /**
907  * ti_sci_set_clock_state() - Set clock state helper
908  * @handle:	pointer to TI SCI handle
909  * @dev_id:	Device identifier this request is for
910  * @clk_id:	Clock identifier for the device for this request.
911  *		Each device has it's own set of clock inputs. This indexes
912  *		which clock input to modify.
913  * @flags:	Header flags as needed
914  * @state:	State to request for the clock.
915  *
916  * Return: 0 if all went well, else returns appropriate error value.
917  */
918 static int ti_sci_set_clock_state(const struct ti_sci_handle *handle,
919 				  u32 dev_id, u32 clk_id,
920 				  u32 flags, u8 state)
921 {
922 	struct ti_sci_info *info;
923 	struct ti_sci_msg_req_set_clock_state *req;
924 	struct ti_sci_msg_hdr *resp;
925 	struct ti_sci_xfer *xfer;
926 	struct device *dev;
927 	int ret = 0;
928 
929 	if (IS_ERR(handle))
930 		return PTR_ERR(handle);
931 	if (!handle)
932 		return -EINVAL;
933 
934 	info = handle_to_ti_sci_info(handle);
935 	dev = info->dev;
936 
937 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_STATE,
938 				   flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
939 				   sizeof(*req), sizeof(*resp));
940 	if (IS_ERR(xfer)) {
941 		ret = PTR_ERR(xfer);
942 		dev_err(dev, "Message alloc failed(%d)\n", ret);
943 		return ret;
944 	}
945 	req = (struct ti_sci_msg_req_set_clock_state *)xfer->xfer_buf;
946 	req->dev_id = dev_id;
947 	if (clk_id < 255) {
948 		req->clk_id = clk_id;
949 	} else {
950 		req->clk_id = 255;
951 		req->clk_id_32 = clk_id;
952 	}
953 	req->request_state = state;
954 
955 	ret = ti_sci_do_xfer(info, xfer);
956 	if (ret) {
957 		dev_err(dev, "Mbox send fail %d\n", ret);
958 		goto fail;
959 	}
960 
961 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
962 
963 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
964 
965 fail:
966 	ti_sci_put_one_xfer(&info->minfo, xfer);
967 
968 	return ret;
969 }
970 
971 /**
972  * ti_sci_cmd_get_clock_state() - Get clock state helper
973  * @handle:	pointer to TI SCI handle
974  * @dev_id:	Device identifier this request is for
975  * @clk_id:	Clock identifier for the device for this request.
976  *		Each device has it's own set of clock inputs. This indexes
977  *		which clock input to modify.
978  * @programmed_state:	State requested for clock to move to
979  * @current_state:	State that the clock is currently in
980  *
981  * Return: 0 if all went well, else returns appropriate error value.
982  */
983 static int ti_sci_cmd_get_clock_state(const struct ti_sci_handle *handle,
984 				      u32 dev_id, u32 clk_id,
985 				      u8 *programmed_state, u8 *current_state)
986 {
987 	struct ti_sci_info *info;
988 	struct ti_sci_msg_req_get_clock_state *req;
989 	struct ti_sci_msg_resp_get_clock_state *resp;
990 	struct ti_sci_xfer *xfer;
991 	struct device *dev;
992 	int ret = 0;
993 
994 	if (IS_ERR(handle))
995 		return PTR_ERR(handle);
996 	if (!handle)
997 		return -EINVAL;
998 
999 	if (!programmed_state && !current_state)
1000 		return -EINVAL;
1001 
1002 	info = handle_to_ti_sci_info(handle);
1003 	dev = info->dev;
1004 
1005 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_STATE,
1006 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1007 				   sizeof(*req), sizeof(*resp));
1008 	if (IS_ERR(xfer)) {
1009 		ret = PTR_ERR(xfer);
1010 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1011 		return ret;
1012 	}
1013 	req = (struct ti_sci_msg_req_get_clock_state *)xfer->xfer_buf;
1014 	req->dev_id = dev_id;
1015 	if (clk_id < 255) {
1016 		req->clk_id = clk_id;
1017 	} else {
1018 		req->clk_id = 255;
1019 		req->clk_id_32 = clk_id;
1020 	}
1021 
1022 	ret = ti_sci_do_xfer(info, xfer);
1023 	if (ret) {
1024 		dev_err(dev, "Mbox send fail %d\n", ret);
1025 		goto fail;
1026 	}
1027 
1028 	resp = (struct ti_sci_msg_resp_get_clock_state *)xfer->xfer_buf;
1029 
1030 	if (!ti_sci_is_response_ack(resp)) {
1031 		ret = -ENODEV;
1032 		goto fail;
1033 	}
1034 
1035 	if (programmed_state)
1036 		*programmed_state = resp->programmed_state;
1037 	if (current_state)
1038 		*current_state = resp->current_state;
1039 
1040 fail:
1041 	ti_sci_put_one_xfer(&info->minfo, xfer);
1042 
1043 	return ret;
1044 }
1045 
1046 /**
1047  * ti_sci_cmd_get_clock() - Get control of a clock from TI SCI
1048  * @handle:	pointer to TI SCI handle
1049  * @dev_id:	Device identifier this request is for
1050  * @clk_id:	Clock identifier for the device for this request.
1051  *		Each device has it's own set of clock inputs. This indexes
1052  *		which clock input to modify.
1053  * @needs_ssc: 'true' if Spread Spectrum clock is desired, else 'false'
1054  * @can_change_freq: 'true' if frequency change is desired, else 'false'
1055  * @enable_input_term: 'true' if input termination is desired, else 'false'
1056  *
1057  * Return: 0 if all went well, else returns appropriate error value.
1058  */
1059 static int ti_sci_cmd_get_clock(const struct ti_sci_handle *handle, u32 dev_id,
1060 				u32 clk_id, bool needs_ssc,
1061 				bool can_change_freq, bool enable_input_term)
1062 {
1063 	u32 flags = 0;
1064 
1065 	flags |= needs_ssc ? MSG_FLAG_CLOCK_ALLOW_SSC : 0;
1066 	flags |= can_change_freq ? MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE : 0;
1067 	flags |= enable_input_term ? MSG_FLAG_CLOCK_INPUT_TERM : 0;
1068 
1069 	return ti_sci_set_clock_state(handle, dev_id, clk_id, flags,
1070 				      MSG_CLOCK_SW_STATE_REQ);
1071 }
1072 
1073 /**
1074  * ti_sci_cmd_idle_clock() - Idle a clock which is in our control
1075  * @handle:	pointer to TI SCI handle
1076  * @dev_id:	Device identifier this request is for
1077  * @clk_id:	Clock identifier for the device for this request.
1078  *		Each device has it's own set of clock inputs. This indexes
1079  *		which clock input to modify.
1080  *
1081  * NOTE: This clock must have been requested by get_clock previously.
1082  *
1083  * Return: 0 if all went well, else returns appropriate error value.
1084  */
1085 static int ti_sci_cmd_idle_clock(const struct ti_sci_handle *handle,
1086 				 u32 dev_id, u32 clk_id)
1087 {
1088 	return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
1089 				      MSG_CLOCK_SW_STATE_UNREQ);
1090 }
1091 
1092 /**
1093  * ti_sci_cmd_put_clock() - Release a clock from our control back to TISCI
1094  * @handle:	pointer to TI SCI handle
1095  * @dev_id:	Device identifier this request is for
1096  * @clk_id:	Clock identifier for the device for this request.
1097  *		Each device has it's own set of clock inputs. This indexes
1098  *		which clock input to modify.
1099  *
1100  * NOTE: This clock must have been requested by get_clock previously.
1101  *
1102  * Return: 0 if all went well, else returns appropriate error value.
1103  */
1104 static int ti_sci_cmd_put_clock(const struct ti_sci_handle *handle,
1105 				u32 dev_id, u32 clk_id)
1106 {
1107 	return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
1108 				      MSG_CLOCK_SW_STATE_AUTO);
1109 }
1110 
1111 /**
1112  * ti_sci_cmd_clk_is_auto() - Is the clock being auto managed
1113  * @handle:	pointer to TI SCI handle
1114  * @dev_id:	Device identifier this request is for
1115  * @clk_id:	Clock identifier for the device for this request.
1116  *		Each device has it's own set of clock inputs. This indexes
1117  *		which clock input to modify.
1118  * @req_state: state indicating if the clock is auto managed
1119  *
1120  * Return: 0 if all went well, else returns appropriate error value.
1121  */
1122 static int ti_sci_cmd_clk_is_auto(const struct ti_sci_handle *handle,
1123 				  u32 dev_id, u32 clk_id, bool *req_state)
1124 {
1125 	u8 state = 0;
1126 	int ret;
1127 
1128 	if (!req_state)
1129 		return -EINVAL;
1130 
1131 	ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, &state, NULL);
1132 	if (ret)
1133 		return ret;
1134 
1135 	*req_state = (state == MSG_CLOCK_SW_STATE_AUTO);
1136 	return 0;
1137 }
1138 
1139 /**
1140  * ti_sci_cmd_clk_is_on() - Is the clock ON
1141  * @handle:	pointer to TI SCI handle
1142  * @dev_id:	Device identifier this request is for
1143  * @clk_id:	Clock identifier for the device for this request.
1144  *		Each device has it's own set of clock inputs. This indexes
1145  *		which clock input to modify.
1146  * @req_state: state indicating if the clock is managed by us and enabled
1147  * @curr_state: state indicating if the clock is ready for operation
1148  *
1149  * Return: 0 if all went well, else returns appropriate error value.
1150  */
1151 static int ti_sci_cmd_clk_is_on(const struct ti_sci_handle *handle, u32 dev_id,
1152 				u32 clk_id, bool *req_state, bool *curr_state)
1153 {
1154 	u8 c_state = 0, r_state = 0;
1155 	int ret;
1156 
1157 	if (!req_state && !curr_state)
1158 		return -EINVAL;
1159 
1160 	ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
1161 					 &r_state, &c_state);
1162 	if (ret)
1163 		return ret;
1164 
1165 	if (req_state)
1166 		*req_state = (r_state == MSG_CLOCK_SW_STATE_REQ);
1167 	if (curr_state)
1168 		*curr_state = (c_state == MSG_CLOCK_HW_STATE_READY);
1169 	return 0;
1170 }
1171 
1172 /**
1173  * ti_sci_cmd_clk_is_off() - Is the clock OFF
1174  * @handle:	pointer to TI SCI handle
1175  * @dev_id:	Device identifier this request is for
1176  * @clk_id:	Clock identifier for the device for this request.
1177  *		Each device has it's own set of clock inputs. This indexes
1178  *		which clock input to modify.
1179  * @req_state: state indicating if the clock is managed by us and disabled
1180  * @curr_state: state indicating if the clock is NOT ready for operation
1181  *
1182  * Return: 0 if all went well, else returns appropriate error value.
1183  */
1184 static int ti_sci_cmd_clk_is_off(const struct ti_sci_handle *handle, u32 dev_id,
1185 				 u32 clk_id, bool *req_state, bool *curr_state)
1186 {
1187 	u8 c_state = 0, r_state = 0;
1188 	int ret;
1189 
1190 	if (!req_state && !curr_state)
1191 		return -EINVAL;
1192 
1193 	ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
1194 					 &r_state, &c_state);
1195 	if (ret)
1196 		return ret;
1197 
1198 	if (req_state)
1199 		*req_state = (r_state == MSG_CLOCK_SW_STATE_UNREQ);
1200 	if (curr_state)
1201 		*curr_state = (c_state == MSG_CLOCK_HW_STATE_NOT_READY);
1202 	return 0;
1203 }
1204 
1205 /**
1206  * ti_sci_cmd_clk_set_parent() - Set the clock source of a specific device clock
1207  * @handle:	pointer to TI SCI handle
1208  * @dev_id:	Device identifier this request is for
1209  * @clk_id:	Clock identifier for the device for this request.
1210  *		Each device has it's own set of clock inputs. This indexes
1211  *		which clock input to modify.
1212  * @parent_id:	Parent clock identifier to set
1213  *
1214  * Return: 0 if all went well, else returns appropriate error value.
1215  */
1216 static int ti_sci_cmd_clk_set_parent(const struct ti_sci_handle *handle,
1217 				     u32 dev_id, u32 clk_id, u32 parent_id)
1218 {
1219 	struct ti_sci_info *info;
1220 	struct ti_sci_msg_req_set_clock_parent *req;
1221 	struct ti_sci_msg_hdr *resp;
1222 	struct ti_sci_xfer *xfer;
1223 	struct device *dev;
1224 	int ret = 0;
1225 
1226 	if (IS_ERR(handle))
1227 		return PTR_ERR(handle);
1228 	if (!handle)
1229 		return -EINVAL;
1230 
1231 	info = handle_to_ti_sci_info(handle);
1232 	dev = info->dev;
1233 
1234 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_PARENT,
1235 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1236 				   sizeof(*req), sizeof(*resp));
1237 	if (IS_ERR(xfer)) {
1238 		ret = PTR_ERR(xfer);
1239 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1240 		return ret;
1241 	}
1242 	req = (struct ti_sci_msg_req_set_clock_parent *)xfer->xfer_buf;
1243 	req->dev_id = dev_id;
1244 	if (clk_id < 255) {
1245 		req->clk_id = clk_id;
1246 	} else {
1247 		req->clk_id = 255;
1248 		req->clk_id_32 = clk_id;
1249 	}
1250 	if (parent_id < 255) {
1251 		req->parent_id = parent_id;
1252 	} else {
1253 		req->parent_id = 255;
1254 		req->parent_id_32 = parent_id;
1255 	}
1256 
1257 	ret = ti_sci_do_xfer(info, xfer);
1258 	if (ret) {
1259 		dev_err(dev, "Mbox send fail %d\n", ret);
1260 		goto fail;
1261 	}
1262 
1263 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
1264 
1265 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
1266 
1267 fail:
1268 	ti_sci_put_one_xfer(&info->minfo, xfer);
1269 
1270 	return ret;
1271 }
1272 
1273 /**
1274  * ti_sci_cmd_clk_get_parent() - Get current parent clock source
1275  * @handle:	pointer to TI SCI handle
1276  * @dev_id:	Device identifier this request is for
1277  * @clk_id:	Clock identifier for the device for this request.
1278  *		Each device has it's own set of clock inputs. This indexes
1279  *		which clock input to modify.
1280  * @parent_id:	Current clock parent
1281  *
1282  * Return: 0 if all went well, else returns appropriate error value.
1283  */
1284 static int ti_sci_cmd_clk_get_parent(const struct ti_sci_handle *handle,
1285 				     u32 dev_id, u32 clk_id, u32 *parent_id)
1286 {
1287 	struct ti_sci_info *info;
1288 	struct ti_sci_msg_req_get_clock_parent *req;
1289 	struct ti_sci_msg_resp_get_clock_parent *resp;
1290 	struct ti_sci_xfer *xfer;
1291 	struct device *dev;
1292 	int ret = 0;
1293 
1294 	if (IS_ERR(handle))
1295 		return PTR_ERR(handle);
1296 	if (!handle || !parent_id)
1297 		return -EINVAL;
1298 
1299 	info = handle_to_ti_sci_info(handle);
1300 	dev = info->dev;
1301 
1302 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_PARENT,
1303 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1304 				   sizeof(*req), sizeof(*resp));
1305 	if (IS_ERR(xfer)) {
1306 		ret = PTR_ERR(xfer);
1307 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1308 		return ret;
1309 	}
1310 	req = (struct ti_sci_msg_req_get_clock_parent *)xfer->xfer_buf;
1311 	req->dev_id = dev_id;
1312 	if (clk_id < 255) {
1313 		req->clk_id = clk_id;
1314 	} else {
1315 		req->clk_id = 255;
1316 		req->clk_id_32 = clk_id;
1317 	}
1318 
1319 	ret = ti_sci_do_xfer(info, xfer);
1320 	if (ret) {
1321 		dev_err(dev, "Mbox send fail %d\n", ret);
1322 		goto fail;
1323 	}
1324 
1325 	resp = (struct ti_sci_msg_resp_get_clock_parent *)xfer->xfer_buf;
1326 
1327 	if (!ti_sci_is_response_ack(resp)) {
1328 		ret = -ENODEV;
1329 	} else {
1330 		if (resp->parent_id < 255)
1331 			*parent_id = resp->parent_id;
1332 		else
1333 			*parent_id = resp->parent_id_32;
1334 	}
1335 
1336 fail:
1337 	ti_sci_put_one_xfer(&info->minfo, xfer);
1338 
1339 	return ret;
1340 }
1341 
1342 /**
1343  * ti_sci_cmd_clk_get_num_parents() - Get num parents of the current clk source
1344  * @handle:	pointer to TI SCI handle
1345  * @dev_id:	Device identifier this request is for
1346  * @clk_id:	Clock identifier for the device for this request.
1347  *		Each device has it's own set of clock inputs. This indexes
1348  *		which clock input to modify.
1349  * @num_parents: Returns he number of parents to the current clock.
1350  *
1351  * Return: 0 if all went well, else returns appropriate error value.
1352  */
1353 static int ti_sci_cmd_clk_get_num_parents(const struct ti_sci_handle *handle,
1354 					  u32 dev_id, u32 clk_id,
1355 					  u32 *num_parents)
1356 {
1357 	struct ti_sci_info *info;
1358 	struct ti_sci_msg_req_get_clock_num_parents *req;
1359 	struct ti_sci_msg_resp_get_clock_num_parents *resp;
1360 	struct ti_sci_xfer *xfer;
1361 	struct device *dev;
1362 	int ret = 0;
1363 
1364 	if (IS_ERR(handle))
1365 		return PTR_ERR(handle);
1366 	if (!handle || !num_parents)
1367 		return -EINVAL;
1368 
1369 	info = handle_to_ti_sci_info(handle);
1370 	dev = info->dev;
1371 
1372 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_NUM_CLOCK_PARENTS,
1373 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1374 				   sizeof(*req), sizeof(*resp));
1375 	if (IS_ERR(xfer)) {
1376 		ret = PTR_ERR(xfer);
1377 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1378 		return ret;
1379 	}
1380 	req = (struct ti_sci_msg_req_get_clock_num_parents *)xfer->xfer_buf;
1381 	req->dev_id = dev_id;
1382 	if (clk_id < 255) {
1383 		req->clk_id = clk_id;
1384 	} else {
1385 		req->clk_id = 255;
1386 		req->clk_id_32 = clk_id;
1387 	}
1388 
1389 	ret = ti_sci_do_xfer(info, xfer);
1390 	if (ret) {
1391 		dev_err(dev, "Mbox send fail %d\n", ret);
1392 		goto fail;
1393 	}
1394 
1395 	resp = (struct ti_sci_msg_resp_get_clock_num_parents *)xfer->xfer_buf;
1396 
1397 	if (!ti_sci_is_response_ack(resp)) {
1398 		ret = -ENODEV;
1399 	} else {
1400 		if (resp->num_parents < 255)
1401 			*num_parents = resp->num_parents;
1402 		else
1403 			*num_parents = resp->num_parents_32;
1404 	}
1405 
1406 fail:
1407 	ti_sci_put_one_xfer(&info->minfo, xfer);
1408 
1409 	return ret;
1410 }
1411 
1412 /**
1413  * ti_sci_cmd_clk_get_match_freq() - Find a good match for frequency
1414  * @handle:	pointer to TI SCI handle
1415  * @dev_id:	Device identifier this request is for
1416  * @clk_id:	Clock identifier for the device for this request.
1417  *		Each device has it's own set of clock inputs. This indexes
1418  *		which clock input to modify.
1419  * @min_freq:	The minimum allowable frequency in Hz. This is the minimum
1420  *		allowable programmed frequency and does not account for clock
1421  *		tolerances and jitter.
1422  * @target_freq: The target clock frequency in Hz. A frequency will be
1423  *		processed as close to this target frequency as possible.
1424  * @max_freq:	The maximum allowable frequency in Hz. This is the maximum
1425  *		allowable programmed frequency and does not account for clock
1426  *		tolerances and jitter.
1427  * @match_freq:	Frequency match in Hz response.
1428  *
1429  * Return: 0 if all went well, else returns appropriate error value.
1430  */
1431 static int ti_sci_cmd_clk_get_match_freq(const struct ti_sci_handle *handle,
1432 					 u32 dev_id, u32 clk_id, u64 min_freq,
1433 					 u64 target_freq, u64 max_freq,
1434 					 u64 *match_freq)
1435 {
1436 	struct ti_sci_info *info;
1437 	struct ti_sci_msg_req_query_clock_freq *req;
1438 	struct ti_sci_msg_resp_query_clock_freq *resp;
1439 	struct ti_sci_xfer *xfer;
1440 	struct device *dev;
1441 	int ret = 0;
1442 
1443 	if (IS_ERR(handle))
1444 		return PTR_ERR(handle);
1445 	if (!handle || !match_freq)
1446 		return -EINVAL;
1447 
1448 	info = handle_to_ti_sci_info(handle);
1449 	dev = info->dev;
1450 
1451 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_QUERY_CLOCK_FREQ,
1452 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1453 				   sizeof(*req), sizeof(*resp));
1454 	if (IS_ERR(xfer)) {
1455 		ret = PTR_ERR(xfer);
1456 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1457 		return ret;
1458 	}
1459 	req = (struct ti_sci_msg_req_query_clock_freq *)xfer->xfer_buf;
1460 	req->dev_id = dev_id;
1461 	if (clk_id < 255) {
1462 		req->clk_id = clk_id;
1463 	} else {
1464 		req->clk_id = 255;
1465 		req->clk_id_32 = clk_id;
1466 	}
1467 	req->min_freq_hz = min_freq;
1468 	req->target_freq_hz = target_freq;
1469 	req->max_freq_hz = max_freq;
1470 
1471 	ret = ti_sci_do_xfer(info, xfer);
1472 	if (ret) {
1473 		dev_err(dev, "Mbox send fail %d\n", ret);
1474 		goto fail;
1475 	}
1476 
1477 	resp = (struct ti_sci_msg_resp_query_clock_freq *)xfer->xfer_buf;
1478 
1479 	if (!ti_sci_is_response_ack(resp))
1480 		ret = -ENODEV;
1481 	else
1482 		*match_freq = resp->freq_hz;
1483 
1484 fail:
1485 	ti_sci_put_one_xfer(&info->minfo, xfer);
1486 
1487 	return ret;
1488 }
1489 
1490 /**
1491  * ti_sci_cmd_clk_set_freq() - Set a frequency for clock
1492  * @handle:	pointer to TI SCI handle
1493  * @dev_id:	Device identifier this request is for
1494  * @clk_id:	Clock identifier for the device for this request.
1495  *		Each device has it's own set of clock inputs. This indexes
1496  *		which clock input to modify.
1497  * @min_freq:	The minimum allowable frequency in Hz. This is the minimum
1498  *		allowable programmed frequency and does not account for clock
1499  *		tolerances and jitter.
1500  * @target_freq: The target clock frequency in Hz. A frequency will be
1501  *		processed as close to this target frequency as possible.
1502  * @max_freq:	The maximum allowable frequency in Hz. This is the maximum
1503  *		allowable programmed frequency and does not account for clock
1504  *		tolerances and jitter.
1505  *
1506  * Return: 0 if all went well, else returns appropriate error value.
1507  */
1508 static int ti_sci_cmd_clk_set_freq(const struct ti_sci_handle *handle,
1509 				   u32 dev_id, u32 clk_id, u64 min_freq,
1510 				   u64 target_freq, u64 max_freq)
1511 {
1512 	struct ti_sci_info *info;
1513 	struct ti_sci_msg_req_set_clock_freq *req;
1514 	struct ti_sci_msg_hdr *resp;
1515 	struct ti_sci_xfer *xfer;
1516 	struct device *dev;
1517 	int ret = 0;
1518 
1519 	if (IS_ERR(handle))
1520 		return PTR_ERR(handle);
1521 	if (!handle)
1522 		return -EINVAL;
1523 
1524 	info = handle_to_ti_sci_info(handle);
1525 	dev = info->dev;
1526 
1527 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_FREQ,
1528 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1529 				   sizeof(*req), sizeof(*resp));
1530 	if (IS_ERR(xfer)) {
1531 		ret = PTR_ERR(xfer);
1532 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1533 		return ret;
1534 	}
1535 	req = (struct ti_sci_msg_req_set_clock_freq *)xfer->xfer_buf;
1536 	req->dev_id = dev_id;
1537 	if (clk_id < 255) {
1538 		req->clk_id = clk_id;
1539 	} else {
1540 		req->clk_id = 255;
1541 		req->clk_id_32 = clk_id;
1542 	}
1543 	req->min_freq_hz = min_freq;
1544 	req->target_freq_hz = target_freq;
1545 	req->max_freq_hz = max_freq;
1546 
1547 	ret = ti_sci_do_xfer(info, xfer);
1548 	if (ret) {
1549 		dev_err(dev, "Mbox send fail %d\n", ret);
1550 		goto fail;
1551 	}
1552 
1553 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
1554 
1555 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
1556 
1557 fail:
1558 	ti_sci_put_one_xfer(&info->minfo, xfer);
1559 
1560 	return ret;
1561 }
1562 
1563 /**
1564  * ti_sci_cmd_clk_get_freq() - Get current frequency
1565  * @handle:	pointer to TI SCI handle
1566  * @dev_id:	Device identifier this request is for
1567  * @clk_id:	Clock identifier for the device for this request.
1568  *		Each device has it's own set of clock inputs. This indexes
1569  *		which clock input to modify.
1570  * @freq:	Currently frequency in Hz
1571  *
1572  * Return: 0 if all went well, else returns appropriate error value.
1573  */
1574 static int ti_sci_cmd_clk_get_freq(const struct ti_sci_handle *handle,
1575 				   u32 dev_id, u32 clk_id, u64 *freq)
1576 {
1577 	struct ti_sci_info *info;
1578 	struct ti_sci_msg_req_get_clock_freq *req;
1579 	struct ti_sci_msg_resp_get_clock_freq *resp;
1580 	struct ti_sci_xfer *xfer;
1581 	struct device *dev;
1582 	int ret = 0;
1583 
1584 	if (IS_ERR(handle))
1585 		return PTR_ERR(handle);
1586 	if (!handle || !freq)
1587 		return -EINVAL;
1588 
1589 	info = handle_to_ti_sci_info(handle);
1590 	dev = info->dev;
1591 
1592 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_FREQ,
1593 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1594 				   sizeof(*req), sizeof(*resp));
1595 	if (IS_ERR(xfer)) {
1596 		ret = PTR_ERR(xfer);
1597 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1598 		return ret;
1599 	}
1600 	req = (struct ti_sci_msg_req_get_clock_freq *)xfer->xfer_buf;
1601 	req->dev_id = dev_id;
1602 	if (clk_id < 255) {
1603 		req->clk_id = clk_id;
1604 	} else {
1605 		req->clk_id = 255;
1606 		req->clk_id_32 = clk_id;
1607 	}
1608 
1609 	ret = ti_sci_do_xfer(info, xfer);
1610 	if (ret) {
1611 		dev_err(dev, "Mbox send fail %d\n", ret);
1612 		goto fail;
1613 	}
1614 
1615 	resp = (struct ti_sci_msg_resp_get_clock_freq *)xfer->xfer_buf;
1616 
1617 	if (!ti_sci_is_response_ack(resp))
1618 		ret = -ENODEV;
1619 	else
1620 		*freq = resp->freq_hz;
1621 
1622 fail:
1623 	ti_sci_put_one_xfer(&info->minfo, xfer);
1624 
1625 	return ret;
1626 }
1627 
1628 static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle)
1629 {
1630 	struct ti_sci_info *info;
1631 	struct ti_sci_msg_req_reboot *req;
1632 	struct ti_sci_msg_hdr *resp;
1633 	struct ti_sci_xfer *xfer;
1634 	struct device *dev;
1635 	int ret = 0;
1636 
1637 	if (IS_ERR(handle))
1638 		return PTR_ERR(handle);
1639 	if (!handle)
1640 		return -EINVAL;
1641 
1642 	info = handle_to_ti_sci_info(handle);
1643 	dev = info->dev;
1644 
1645 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SYS_RESET,
1646 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1647 				   sizeof(*req), sizeof(*resp));
1648 	if (IS_ERR(xfer)) {
1649 		ret = PTR_ERR(xfer);
1650 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1651 		return ret;
1652 	}
1653 	req = (struct ti_sci_msg_req_reboot *)xfer->xfer_buf;
1654 
1655 	ret = ti_sci_do_xfer(info, xfer);
1656 	if (ret) {
1657 		dev_err(dev, "Mbox send fail %d\n", ret);
1658 		goto fail;
1659 	}
1660 
1661 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
1662 
1663 	if (!ti_sci_is_response_ack(resp))
1664 		ret = -ENODEV;
1665 	else
1666 		ret = 0;
1667 
1668 fail:
1669 	ti_sci_put_one_xfer(&info->minfo, xfer);
1670 
1671 	return ret;
1672 }
1673 
1674 static int ti_sci_get_resource_type(struct ti_sci_info *info, u16 dev_id,
1675 				    u16 *type)
1676 {
1677 	struct ti_sci_rm_type_map *rm_type_map = info->desc->rm_type_map;
1678 	bool found = false;
1679 	int i;
1680 
1681 	/* If map is not provided then assume dev_id is used as type */
1682 	if (!rm_type_map) {
1683 		*type = dev_id;
1684 		return 0;
1685 	}
1686 
1687 	for (i = 0; rm_type_map[i].dev_id; i++) {
1688 		if (rm_type_map[i].dev_id == dev_id) {
1689 			*type = rm_type_map[i].type;
1690 			found = true;
1691 			break;
1692 		}
1693 	}
1694 
1695 	if (!found)
1696 		return -EINVAL;
1697 
1698 	return 0;
1699 }
1700 
1701 /**
1702  * ti_sci_get_resource_range - Helper to get a range of resources assigned
1703  *			       to a host. Resource is uniquely identified by
1704  *			       type and subtype.
1705  * @handle:		Pointer to TISCI handle.
1706  * @dev_id:		TISCI device ID.
1707  * @subtype:		Resource assignment subtype that is being requested
1708  *			from the given device.
1709  * @s_host:		Host processor ID to which the resources are allocated
1710  * @range_start:	Start index of the resource range
1711  * @range_num:		Number of resources in the range
1712  *
1713  * Return: 0 if all went fine, else return appropriate error.
1714  */
1715 static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
1716 				     u32 dev_id, u8 subtype, u8 s_host,
1717 				     u16 *range_start, u16 *range_num)
1718 {
1719 	struct ti_sci_msg_resp_get_resource_range *resp;
1720 	struct ti_sci_msg_req_get_resource_range *req;
1721 	struct ti_sci_xfer *xfer;
1722 	struct ti_sci_info *info;
1723 	struct device *dev;
1724 	u16 type;
1725 	int ret = 0;
1726 
1727 	if (IS_ERR(handle))
1728 		return PTR_ERR(handle);
1729 	if (!handle)
1730 		return -EINVAL;
1731 
1732 	info = handle_to_ti_sci_info(handle);
1733 	dev = info->dev;
1734 
1735 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_RESOURCE_RANGE,
1736 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1737 				   sizeof(*req), sizeof(*resp));
1738 	if (IS_ERR(xfer)) {
1739 		ret = PTR_ERR(xfer);
1740 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1741 		return ret;
1742 	}
1743 
1744 	ret = ti_sci_get_resource_type(info, dev_id, &type);
1745 	if (ret) {
1746 		dev_err(dev, "rm type lookup failed for %u\n", dev_id);
1747 		goto fail;
1748 	}
1749 
1750 	req = (struct ti_sci_msg_req_get_resource_range *)xfer->xfer_buf;
1751 	req->secondary_host = s_host;
1752 	req->type = type & MSG_RM_RESOURCE_TYPE_MASK;
1753 	req->subtype = subtype & MSG_RM_RESOURCE_SUBTYPE_MASK;
1754 
1755 	ret = ti_sci_do_xfer(info, xfer);
1756 	if (ret) {
1757 		dev_err(dev, "Mbox send fail %d\n", ret);
1758 		goto fail;
1759 	}
1760 
1761 	resp = (struct ti_sci_msg_resp_get_resource_range *)xfer->xfer_buf;
1762 
1763 	if (!ti_sci_is_response_ack(resp)) {
1764 		ret = -ENODEV;
1765 	} else if (!resp->range_start && !resp->range_num) {
1766 		ret = -ENODEV;
1767 	} else {
1768 		*range_start = resp->range_start;
1769 		*range_num = resp->range_num;
1770 	};
1771 
1772 fail:
1773 	ti_sci_put_one_xfer(&info->minfo, xfer);
1774 
1775 	return ret;
1776 }
1777 
1778 /**
1779  * ti_sci_cmd_get_resource_range - Get a range of resources assigned to host
1780  *				   that is same as ti sci interface host.
1781  * @handle:		Pointer to TISCI handle.
1782  * @dev_id:		TISCI device ID.
1783  * @subtype:		Resource assignment subtype that is being requested
1784  *			from the given device.
1785  * @range_start:	Start index of the resource range
1786  * @range_num:		Number of resources in the range
1787  *
1788  * Return: 0 if all went fine, else return appropriate error.
1789  */
1790 static int ti_sci_cmd_get_resource_range(const struct ti_sci_handle *handle,
1791 					 u32 dev_id, u8 subtype,
1792 					 u16 *range_start, u16 *range_num)
1793 {
1794 	return ti_sci_get_resource_range(handle, dev_id, subtype,
1795 					 TI_SCI_IRQ_SECONDARY_HOST_INVALID,
1796 					 range_start, range_num);
1797 }
1798 
1799 /**
1800  * ti_sci_cmd_get_resource_range_from_shost - Get a range of resources
1801  *					      assigned to a specified host.
1802  * @handle:		Pointer to TISCI handle.
1803  * @dev_id:		TISCI device ID.
1804  * @subtype:		Resource assignment subtype that is being requested
1805  *			from the given device.
1806  * @s_host:		Host processor ID to which the resources are allocated
1807  * @range_start:	Start index of the resource range
1808  * @range_num:		Number of resources in the range
1809  *
1810  * Return: 0 if all went fine, else return appropriate error.
1811  */
1812 static
1813 int ti_sci_cmd_get_resource_range_from_shost(const struct ti_sci_handle *handle,
1814 					     u32 dev_id, u8 subtype, u8 s_host,
1815 					     u16 *range_start, u16 *range_num)
1816 {
1817 	return ti_sci_get_resource_range(handle, dev_id, subtype, s_host,
1818 					 range_start, range_num);
1819 }
1820 
1821 /**
1822  * ti_sci_manage_irq() - Helper api to configure/release the irq route between
1823  *			 the requested source and destination
1824  * @handle:		Pointer to TISCI handle.
1825  * @valid_params:	Bit fields defining the validity of certain params
1826  * @src_id:		Device ID of the IRQ source
1827  * @src_index:		IRQ source index within the source device
1828  * @dst_id:		Device ID of the IRQ destination
1829  * @dst_host_irq:	IRQ number of the destination device
1830  * @ia_id:		Device ID of the IA, if the IRQ flows through this IA
1831  * @vint:		Virtual interrupt to be used within the IA
1832  * @global_event:	Global event number to be used for the requesting event
1833  * @vint_status_bit:	Virtual interrupt status bit to be used for the event
1834  * @s_host:		Secondary host ID to which the irq/event is being
1835  *			requested for.
1836  * @type:		Request type irq set or release.
1837  *
1838  * Return: 0 if all went fine, else return appropriate error.
1839  */
1840 static int ti_sci_manage_irq(const struct ti_sci_handle *handle,
1841 			     u32 valid_params, u16 src_id, u16 src_index,
1842 			     u16 dst_id, u16 dst_host_irq, u16 ia_id, u16 vint,
1843 			     u16 global_event, u8 vint_status_bit, u8 s_host,
1844 			     u16 type)
1845 {
1846 	struct ti_sci_msg_req_manage_irq *req;
1847 	struct ti_sci_msg_hdr *resp;
1848 	struct ti_sci_xfer *xfer;
1849 	struct ti_sci_info *info;
1850 	struct device *dev;
1851 	int ret = 0;
1852 
1853 	if (IS_ERR(handle))
1854 		return PTR_ERR(handle);
1855 	if (!handle)
1856 		return -EINVAL;
1857 
1858 	info = handle_to_ti_sci_info(handle);
1859 	dev = info->dev;
1860 
1861 	xfer = ti_sci_get_one_xfer(info, type, TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1862 				   sizeof(*req), sizeof(*resp));
1863 	if (IS_ERR(xfer)) {
1864 		ret = PTR_ERR(xfer);
1865 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1866 		return ret;
1867 	}
1868 	req = (struct ti_sci_msg_req_manage_irq *)xfer->xfer_buf;
1869 	req->valid_params = valid_params;
1870 	req->src_id = src_id;
1871 	req->src_index = src_index;
1872 	req->dst_id = dst_id;
1873 	req->dst_host_irq = dst_host_irq;
1874 	req->ia_id = ia_id;
1875 	req->vint = vint;
1876 	req->global_event = global_event;
1877 	req->vint_status_bit = vint_status_bit;
1878 	req->secondary_host = s_host;
1879 
1880 	ret = ti_sci_do_xfer(info, xfer);
1881 	if (ret) {
1882 		dev_err(dev, "Mbox send fail %d\n", ret);
1883 		goto fail;
1884 	}
1885 
1886 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
1887 
1888 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
1889 
1890 fail:
1891 	ti_sci_put_one_xfer(&info->minfo, xfer);
1892 
1893 	return ret;
1894 }
1895 
1896 /**
1897  * ti_sci_set_irq() - Helper api to configure the irq route between the
1898  *		      requested source and destination
1899  * @handle:		Pointer to TISCI handle.
1900  * @valid_params:	Bit fields defining the validity of certain params
1901  * @src_id:		Device ID of the IRQ source
1902  * @src_index:		IRQ source index within the source device
1903  * @dst_id:		Device ID of the IRQ destination
1904  * @dst_host_irq:	IRQ number of the destination device
1905  * @ia_id:		Device ID of the IA, if the IRQ flows through this IA
1906  * @vint:		Virtual interrupt to be used within the IA
1907  * @global_event:	Global event number to be used for the requesting event
1908  * @vint_status_bit:	Virtual interrupt status bit to be used for the event
1909  * @s_host:		Secondary host ID to which the irq/event is being
1910  *			requested for.
1911  *
1912  * Return: 0 if all went fine, else return appropriate error.
1913  */
1914 static int ti_sci_set_irq(const struct ti_sci_handle *handle, u32 valid_params,
1915 			  u16 src_id, u16 src_index, u16 dst_id,
1916 			  u16 dst_host_irq, u16 ia_id, u16 vint,
1917 			  u16 global_event, u8 vint_status_bit, u8 s_host)
1918 {
1919 	pr_debug("%s: IRQ set with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n",
1920 		 __func__, valid_params, src_id, src_index,
1921 		 dst_id, dst_host_irq, ia_id, vint, global_event,
1922 		 vint_status_bit);
1923 
1924 	return ti_sci_manage_irq(handle, valid_params, src_id, src_index,
1925 				 dst_id, dst_host_irq, ia_id, vint,
1926 				 global_event, vint_status_bit, s_host,
1927 				 TI_SCI_MSG_SET_IRQ);
1928 }
1929 
1930 /**
1931  * ti_sci_free_irq() - Helper api to free the irq route between the
1932  *			   requested source and destination
1933  * @handle:		Pointer to TISCI handle.
1934  * @valid_params:	Bit fields defining the validity of certain params
1935  * @src_id:		Device ID of the IRQ source
1936  * @src_index:		IRQ source index within the source device
1937  * @dst_id:		Device ID of the IRQ destination
1938  * @dst_host_irq:	IRQ number of the destination device
1939  * @ia_id:		Device ID of the IA, if the IRQ flows through this IA
1940  * @vint:		Virtual interrupt to be used within the IA
1941  * @global_event:	Global event number to be used for the requesting event
1942  * @vint_status_bit:	Virtual interrupt status bit to be used for the event
1943  * @s_host:		Secondary host ID to which the irq/event is being
1944  *			requested for.
1945  *
1946  * Return: 0 if all went fine, else return appropriate error.
1947  */
1948 static int ti_sci_free_irq(const struct ti_sci_handle *handle, u32 valid_params,
1949 			   u16 src_id, u16 src_index, u16 dst_id,
1950 			   u16 dst_host_irq, u16 ia_id, u16 vint,
1951 			   u16 global_event, u8 vint_status_bit, u8 s_host)
1952 {
1953 	pr_debug("%s: IRQ release with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n",
1954 		 __func__, valid_params, src_id, src_index,
1955 		 dst_id, dst_host_irq, ia_id, vint, global_event,
1956 		 vint_status_bit);
1957 
1958 	return ti_sci_manage_irq(handle, valid_params, src_id, src_index,
1959 				 dst_id, dst_host_irq, ia_id, vint,
1960 				 global_event, vint_status_bit, s_host,
1961 				 TI_SCI_MSG_FREE_IRQ);
1962 }
1963 
1964 /**
1965  * ti_sci_cmd_set_irq() - Configure a host irq route between the requested
1966  *			  source and destination.
1967  * @handle:		Pointer to TISCI handle.
1968  * @src_id:		Device ID of the IRQ source
1969  * @src_index:		IRQ source index within the source device
1970  * @dst_id:		Device ID of the IRQ destination
1971  * @dst_host_irq:	IRQ number of the destination device
1972  * @vint_irq:		Boolean specifying if this interrupt belongs to
1973  *			Interrupt Aggregator.
1974  *
1975  * Return: 0 if all went fine, else return appropriate error.
1976  */
1977 static int ti_sci_cmd_set_irq(const struct ti_sci_handle *handle, u16 src_id,
1978 			      u16 src_index, u16 dst_id, u16 dst_host_irq)
1979 {
1980 	u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID;
1981 
1982 	return ti_sci_set_irq(handle, valid_params, src_id, src_index, dst_id,
1983 			      dst_host_irq, 0, 0, 0, 0, 0);
1984 }
1985 
1986 /**
1987  * ti_sci_cmd_set_event_map() - Configure an event based irq route between the
1988  *				requested source and Interrupt Aggregator.
1989  * @handle:		Pointer to TISCI handle.
1990  * @src_id:		Device ID of the IRQ source
1991  * @src_index:		IRQ source index within the source device
1992  * @ia_id:		Device ID of the IA, if the IRQ flows through this IA
1993  * @vint:		Virtual interrupt to be used within the IA
1994  * @global_event:	Global event number to be used for the requesting event
1995  * @vint_status_bit:	Virtual interrupt status bit to be used for the event
1996  *
1997  * Return: 0 if all went fine, else return appropriate error.
1998  */
1999 static int ti_sci_cmd_set_event_map(const struct ti_sci_handle *handle,
2000 				    u16 src_id, u16 src_index, u16 ia_id,
2001 				    u16 vint, u16 global_event,
2002 				    u8 vint_status_bit)
2003 {
2004 	u32 valid_params = MSG_FLAG_IA_ID_VALID | MSG_FLAG_VINT_VALID |
2005 			   MSG_FLAG_GLB_EVNT_VALID |
2006 			   MSG_FLAG_VINT_STS_BIT_VALID;
2007 
2008 	return ti_sci_set_irq(handle, valid_params, src_id, src_index, 0, 0,
2009 			      ia_id, vint, global_event, vint_status_bit, 0);
2010 }
2011 
2012 /**
2013  * ti_sci_cmd_free_irq() - Free a host irq route between the between the
2014  *			   requested source and destination.
2015  * @handle:		Pointer to TISCI handle.
2016  * @src_id:		Device ID of the IRQ source
2017  * @src_index:		IRQ source index within the source device
2018  * @dst_id:		Device ID of the IRQ destination
2019  * @dst_host_irq:	IRQ number of the destination device
2020  * @vint_irq:		Boolean specifying if this interrupt belongs to
2021  *			Interrupt Aggregator.
2022  *
2023  * Return: 0 if all went fine, else return appropriate error.
2024  */
2025 static int ti_sci_cmd_free_irq(const struct ti_sci_handle *handle, u16 src_id,
2026 			       u16 src_index, u16 dst_id, u16 dst_host_irq)
2027 {
2028 	u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID;
2029 
2030 	return ti_sci_free_irq(handle, valid_params, src_id, src_index, dst_id,
2031 			       dst_host_irq, 0, 0, 0, 0, 0);
2032 }
2033 
2034 /**
2035  * ti_sci_cmd_free_event_map() - Free an event map between the requested source
2036  *				 and Interrupt Aggregator.
2037  * @handle:		Pointer to TISCI handle.
2038  * @src_id:		Device ID of the IRQ source
2039  * @src_index:		IRQ source index within the source device
2040  * @ia_id:		Device ID of the IA, if the IRQ flows through this IA
2041  * @vint:		Virtual interrupt to be used within the IA
2042  * @global_event:	Global event number to be used for the requesting event
2043  * @vint_status_bit:	Virtual interrupt status bit to be used for the event
2044  *
2045  * Return: 0 if all went fine, else return appropriate error.
2046  */
2047 static int ti_sci_cmd_free_event_map(const struct ti_sci_handle *handle,
2048 				     u16 src_id, u16 src_index, u16 ia_id,
2049 				     u16 vint, u16 global_event,
2050 				     u8 vint_status_bit)
2051 {
2052 	u32 valid_params = MSG_FLAG_IA_ID_VALID |
2053 			   MSG_FLAG_VINT_VALID | MSG_FLAG_GLB_EVNT_VALID |
2054 			   MSG_FLAG_VINT_STS_BIT_VALID;
2055 
2056 	return ti_sci_free_irq(handle, valid_params, src_id, src_index, 0, 0,
2057 			       ia_id, vint, global_event, vint_status_bit, 0);
2058 }
2059 
2060 /**
2061  * ti_sci_cmd_ring_config() - configure RA ring
2062  * @handle:		Pointer to TI SCI handle.
2063  * @valid_params:	Bitfield defining validity of ring configuration
2064  *			parameters
2065  * @nav_id:		Device ID of Navigator Subsystem from which the ring is
2066  *			allocated
2067  * @index:		Ring index
2068  * @addr_lo:		The ring base address lo 32 bits
2069  * @addr_hi:		The ring base address hi 32 bits
2070  * @count:		Number of ring elements
2071  * @mode:		The mode of the ring
2072  * @size:		The ring element size.
2073  * @order_id:		Specifies the ring's bus order ID
2074  *
2075  * Return: 0 if all went well, else returns appropriate error value.
2076  *
2077  * See @ti_sci_msg_rm_ring_cfg_req for more info.
2078  */
2079 static int ti_sci_cmd_ring_config(const struct ti_sci_handle *handle,
2080 				  u32 valid_params, u16 nav_id, u16 index,
2081 				  u32 addr_lo, u32 addr_hi, u32 count,
2082 				  u8 mode, u8 size, u8 order_id)
2083 {
2084 	struct ti_sci_msg_rm_ring_cfg_req *req;
2085 	struct ti_sci_msg_hdr *resp;
2086 	struct ti_sci_xfer *xfer;
2087 	struct ti_sci_info *info;
2088 	struct device *dev;
2089 	int ret = 0;
2090 
2091 	if (IS_ERR_OR_NULL(handle))
2092 		return -EINVAL;
2093 
2094 	info = handle_to_ti_sci_info(handle);
2095 	dev = info->dev;
2096 
2097 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_RING_CFG,
2098 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2099 				   sizeof(*req), sizeof(*resp));
2100 	if (IS_ERR(xfer)) {
2101 		ret = PTR_ERR(xfer);
2102 		dev_err(dev, "RM_RA:Message config failed(%d)\n", ret);
2103 		return ret;
2104 	}
2105 	req = (struct ti_sci_msg_rm_ring_cfg_req *)xfer->xfer_buf;
2106 	req->valid_params = valid_params;
2107 	req->nav_id = nav_id;
2108 	req->index = index;
2109 	req->addr_lo = addr_lo;
2110 	req->addr_hi = addr_hi;
2111 	req->count = count;
2112 	req->mode = mode;
2113 	req->size = size;
2114 	req->order_id = order_id;
2115 
2116 	ret = ti_sci_do_xfer(info, xfer);
2117 	if (ret) {
2118 		dev_err(dev, "RM_RA:Mbox config send fail %d\n", ret);
2119 		goto fail;
2120 	}
2121 
2122 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2123 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2124 
2125 fail:
2126 	ti_sci_put_one_xfer(&info->minfo, xfer);
2127 	dev_dbg(dev, "RM_RA:config ring %u ret:%d\n", index, ret);
2128 	return ret;
2129 }
2130 
2131 /**
2132  * ti_sci_cmd_ring_get_config() - get RA ring configuration
2133  * @handle:	Pointer to TI SCI handle.
2134  * @nav_id:	Device ID of Navigator Subsystem from which the ring is
2135  *		allocated
2136  * @index:	Ring index
2137  * @addr_lo:	Returns ring's base address lo 32 bits
2138  * @addr_hi:	Returns ring's base address hi 32 bits
2139  * @count:	Returns number of ring elements
2140  * @mode:	Returns mode of the ring
2141  * @size:	Returns ring element size
2142  * @order_id:	Returns ring's bus order ID
2143  *
2144  * Return: 0 if all went well, else returns appropriate error value.
2145  *
2146  * See @ti_sci_msg_rm_ring_get_cfg_req for more info.
2147  */
2148 static int ti_sci_cmd_ring_get_config(const struct ti_sci_handle *handle,
2149 				      u32 nav_id, u32 index, u8 *mode,
2150 				      u32 *addr_lo, u32 *addr_hi,
2151 				      u32 *count, u8 *size, u8 *order_id)
2152 {
2153 	struct ti_sci_msg_rm_ring_get_cfg_resp *resp;
2154 	struct ti_sci_msg_rm_ring_get_cfg_req *req;
2155 	struct ti_sci_xfer *xfer;
2156 	struct ti_sci_info *info;
2157 	struct device *dev;
2158 	int ret = 0;
2159 
2160 	if (IS_ERR_OR_NULL(handle))
2161 		return -EINVAL;
2162 
2163 	info = handle_to_ti_sci_info(handle);
2164 	dev = info->dev;
2165 
2166 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_RING_GET_CFG,
2167 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2168 				   sizeof(*req), sizeof(*resp));
2169 	if (IS_ERR(xfer)) {
2170 		ret = PTR_ERR(xfer);
2171 		dev_err(dev,
2172 			"RM_RA:Message get config failed(%d)\n", ret);
2173 		return ret;
2174 	}
2175 	req = (struct ti_sci_msg_rm_ring_get_cfg_req *)xfer->xfer_buf;
2176 	req->nav_id = nav_id;
2177 	req->index = index;
2178 
2179 	ret = ti_sci_do_xfer(info, xfer);
2180 	if (ret) {
2181 		dev_err(dev, "RM_RA:Mbox get config send fail %d\n", ret);
2182 		goto fail;
2183 	}
2184 
2185 	resp = (struct ti_sci_msg_rm_ring_get_cfg_resp *)xfer->xfer_buf;
2186 
2187 	if (!ti_sci_is_response_ack(resp)) {
2188 		ret = -ENODEV;
2189 	} else {
2190 		if (mode)
2191 			*mode = resp->mode;
2192 		if (addr_lo)
2193 			*addr_lo = resp->addr_lo;
2194 		if (addr_hi)
2195 			*addr_hi = resp->addr_hi;
2196 		if (count)
2197 			*count = resp->count;
2198 		if (size)
2199 			*size = resp->size;
2200 		if (order_id)
2201 			*order_id = resp->order_id;
2202 	};
2203 
2204 fail:
2205 	ti_sci_put_one_xfer(&info->minfo, xfer);
2206 	dev_dbg(dev, "RM_RA:get config ring %u ret:%d\n", index, ret);
2207 	return ret;
2208 }
2209 
2210 /**
2211  * ti_sci_cmd_rm_psil_pair() - Pair PSI-L source to destination thread
2212  * @handle:	Pointer to TI SCI handle.
2213  * @nav_id:	Device ID of Navigator Subsystem which should be used for
2214  *		pairing
2215  * @src_thread:	Source PSI-L thread ID
2216  * @dst_thread: Destination PSI-L thread ID
2217  *
2218  * Return: 0 if all went well, else returns appropriate error value.
2219  */
2220 static int ti_sci_cmd_rm_psil_pair(const struct ti_sci_handle *handle,
2221 				   u32 nav_id, u32 src_thread, u32 dst_thread)
2222 {
2223 	struct ti_sci_msg_psil_pair *req;
2224 	struct ti_sci_msg_hdr *resp;
2225 	struct ti_sci_xfer *xfer;
2226 	struct ti_sci_info *info;
2227 	struct device *dev;
2228 	int ret = 0;
2229 
2230 	if (IS_ERR(handle))
2231 		return PTR_ERR(handle);
2232 	if (!handle)
2233 		return -EINVAL;
2234 
2235 	info = handle_to_ti_sci_info(handle);
2236 	dev = info->dev;
2237 
2238 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_PSIL_PAIR,
2239 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2240 				   sizeof(*req), sizeof(*resp));
2241 	if (IS_ERR(xfer)) {
2242 		ret = PTR_ERR(xfer);
2243 		dev_err(dev, "RM_PSIL:Message reconfig failed(%d)\n", ret);
2244 		return ret;
2245 	}
2246 	req = (struct ti_sci_msg_psil_pair *)xfer->xfer_buf;
2247 	req->nav_id = nav_id;
2248 	req->src_thread = src_thread;
2249 	req->dst_thread = dst_thread;
2250 
2251 	ret = ti_sci_do_xfer(info, xfer);
2252 	if (ret) {
2253 		dev_err(dev, "RM_PSIL:Mbox send fail %d\n", ret);
2254 		goto fail;
2255 	}
2256 
2257 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2258 	ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2259 
2260 fail:
2261 	ti_sci_put_one_xfer(&info->minfo, xfer);
2262 
2263 	return ret;
2264 }
2265 
2266 /**
2267  * ti_sci_cmd_rm_psil_unpair() - Unpair PSI-L source from destination thread
2268  * @handle:	Pointer to TI SCI handle.
2269  * @nav_id:	Device ID of Navigator Subsystem which should be used for
2270  *		unpairing
2271  * @src_thread:	Source PSI-L thread ID
2272  * @dst_thread:	Destination PSI-L thread ID
2273  *
2274  * Return: 0 if all went well, else returns appropriate error value.
2275  */
2276 static int ti_sci_cmd_rm_psil_unpair(const struct ti_sci_handle *handle,
2277 				     u32 nav_id, u32 src_thread, u32 dst_thread)
2278 {
2279 	struct ti_sci_msg_psil_unpair *req;
2280 	struct ti_sci_msg_hdr *resp;
2281 	struct ti_sci_xfer *xfer;
2282 	struct ti_sci_info *info;
2283 	struct device *dev;
2284 	int ret = 0;
2285 
2286 	if (IS_ERR(handle))
2287 		return PTR_ERR(handle);
2288 	if (!handle)
2289 		return -EINVAL;
2290 
2291 	info = handle_to_ti_sci_info(handle);
2292 	dev = info->dev;
2293 
2294 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_PSIL_UNPAIR,
2295 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2296 				   sizeof(*req), sizeof(*resp));
2297 	if (IS_ERR(xfer)) {
2298 		ret = PTR_ERR(xfer);
2299 		dev_err(dev, "RM_PSIL:Message reconfig failed(%d)\n", ret);
2300 		return ret;
2301 	}
2302 	req = (struct ti_sci_msg_psil_unpair *)xfer->xfer_buf;
2303 	req->nav_id = nav_id;
2304 	req->src_thread = src_thread;
2305 	req->dst_thread = dst_thread;
2306 
2307 	ret = ti_sci_do_xfer(info, xfer);
2308 	if (ret) {
2309 		dev_err(dev, "RM_PSIL:Mbox send fail %d\n", ret);
2310 		goto fail;
2311 	}
2312 
2313 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2314 	ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2315 
2316 fail:
2317 	ti_sci_put_one_xfer(&info->minfo, xfer);
2318 
2319 	return ret;
2320 }
2321 
2322 /**
2323  * ti_sci_cmd_rm_udmap_tx_ch_cfg() - Configure a UDMAP TX channel
2324  * @handle:	Pointer to TI SCI handle.
2325  * @params:	Pointer to ti_sci_msg_rm_udmap_tx_ch_cfg TX channel config
2326  *		structure
2327  *
2328  * Return: 0 if all went well, else returns appropriate error value.
2329  *
2330  * See @ti_sci_msg_rm_udmap_tx_ch_cfg and @ti_sci_msg_rm_udmap_tx_ch_cfg_req for
2331  * more info.
2332  */
2333 static int ti_sci_cmd_rm_udmap_tx_ch_cfg(const struct ti_sci_handle *handle,
2334 			const struct ti_sci_msg_rm_udmap_tx_ch_cfg *params)
2335 {
2336 	struct ti_sci_msg_rm_udmap_tx_ch_cfg_req *req;
2337 	struct ti_sci_msg_hdr *resp;
2338 	struct ti_sci_xfer *xfer;
2339 	struct ti_sci_info *info;
2340 	struct device *dev;
2341 	int ret = 0;
2342 
2343 	if (IS_ERR_OR_NULL(handle))
2344 		return -EINVAL;
2345 
2346 	info = handle_to_ti_sci_info(handle);
2347 	dev = info->dev;
2348 
2349 	xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_TX_CH_CFG,
2350 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2351 				   sizeof(*req), sizeof(*resp));
2352 	if (IS_ERR(xfer)) {
2353 		ret = PTR_ERR(xfer);
2354 		dev_err(dev, "Message TX_CH_CFG alloc failed(%d)\n", ret);
2355 		return ret;
2356 	}
2357 	req = (struct ti_sci_msg_rm_udmap_tx_ch_cfg_req *)xfer->xfer_buf;
2358 	req->valid_params = params->valid_params;
2359 	req->nav_id = params->nav_id;
2360 	req->index = params->index;
2361 	req->tx_pause_on_err = params->tx_pause_on_err;
2362 	req->tx_filt_einfo = params->tx_filt_einfo;
2363 	req->tx_filt_pswords = params->tx_filt_pswords;
2364 	req->tx_atype = params->tx_atype;
2365 	req->tx_chan_type = params->tx_chan_type;
2366 	req->tx_supr_tdpkt = params->tx_supr_tdpkt;
2367 	req->tx_fetch_size = params->tx_fetch_size;
2368 	req->tx_credit_count = params->tx_credit_count;
2369 	req->txcq_qnum = params->txcq_qnum;
2370 	req->tx_priority = params->tx_priority;
2371 	req->tx_qos = params->tx_qos;
2372 	req->tx_orderid = params->tx_orderid;
2373 	req->fdepth = params->fdepth;
2374 	req->tx_sched_priority = params->tx_sched_priority;
2375 	req->tx_burst_size = params->tx_burst_size;
2376 
2377 	ret = ti_sci_do_xfer(info, xfer);
2378 	if (ret) {
2379 		dev_err(dev, "Mbox send TX_CH_CFG fail %d\n", ret);
2380 		goto fail;
2381 	}
2382 
2383 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2384 	ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2385 
2386 fail:
2387 	ti_sci_put_one_xfer(&info->minfo, xfer);
2388 	dev_dbg(dev, "TX_CH_CFG: chn %u ret:%u\n", params->index, ret);
2389 	return ret;
2390 }
2391 
2392 /**
2393  * ti_sci_cmd_rm_udmap_rx_ch_cfg() - Configure a UDMAP RX channel
2394  * @handle:	Pointer to TI SCI handle.
2395  * @params:	Pointer to ti_sci_msg_rm_udmap_rx_ch_cfg RX channel config
2396  *		structure
2397  *
2398  * Return: 0 if all went well, else returns appropriate error value.
2399  *
2400  * See @ti_sci_msg_rm_udmap_rx_ch_cfg and @ti_sci_msg_rm_udmap_rx_ch_cfg_req for
2401  * more info.
2402  */
2403 static int ti_sci_cmd_rm_udmap_rx_ch_cfg(const struct ti_sci_handle *handle,
2404 			const struct ti_sci_msg_rm_udmap_rx_ch_cfg *params)
2405 {
2406 	struct ti_sci_msg_rm_udmap_rx_ch_cfg_req *req;
2407 	struct ti_sci_msg_hdr *resp;
2408 	struct ti_sci_xfer *xfer;
2409 	struct ti_sci_info *info;
2410 	struct device *dev;
2411 	int ret = 0;
2412 
2413 	if (IS_ERR_OR_NULL(handle))
2414 		return -EINVAL;
2415 
2416 	info = handle_to_ti_sci_info(handle);
2417 	dev = info->dev;
2418 
2419 	xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_RX_CH_CFG,
2420 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2421 				   sizeof(*req), sizeof(*resp));
2422 	if (IS_ERR(xfer)) {
2423 		ret = PTR_ERR(xfer);
2424 		dev_err(dev, "Message RX_CH_CFG alloc failed(%d)\n", ret);
2425 		return ret;
2426 	}
2427 	req = (struct ti_sci_msg_rm_udmap_rx_ch_cfg_req *)xfer->xfer_buf;
2428 	req->valid_params = params->valid_params;
2429 	req->nav_id = params->nav_id;
2430 	req->index = params->index;
2431 	req->rx_fetch_size = params->rx_fetch_size;
2432 	req->rxcq_qnum = params->rxcq_qnum;
2433 	req->rx_priority = params->rx_priority;
2434 	req->rx_qos = params->rx_qos;
2435 	req->rx_orderid = params->rx_orderid;
2436 	req->rx_sched_priority = params->rx_sched_priority;
2437 	req->flowid_start = params->flowid_start;
2438 	req->flowid_cnt = params->flowid_cnt;
2439 	req->rx_pause_on_err = params->rx_pause_on_err;
2440 	req->rx_atype = params->rx_atype;
2441 	req->rx_chan_type = params->rx_chan_type;
2442 	req->rx_ignore_short = params->rx_ignore_short;
2443 	req->rx_ignore_long = params->rx_ignore_long;
2444 	req->rx_burst_size = params->rx_burst_size;
2445 
2446 	ret = ti_sci_do_xfer(info, xfer);
2447 	if (ret) {
2448 		dev_err(dev, "Mbox send RX_CH_CFG fail %d\n", ret);
2449 		goto fail;
2450 	}
2451 
2452 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2453 	ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2454 
2455 fail:
2456 	ti_sci_put_one_xfer(&info->minfo, xfer);
2457 	dev_dbg(dev, "RX_CH_CFG: chn %u ret:%d\n", params->index, ret);
2458 	return ret;
2459 }
2460 
2461 /**
2462  * ti_sci_cmd_rm_udmap_rx_flow_cfg() - Configure UDMAP RX FLOW
2463  * @handle:	Pointer to TI SCI handle.
2464  * @params:	Pointer to ti_sci_msg_rm_udmap_flow_cfg RX FLOW config
2465  *		structure
2466  *
2467  * Return: 0 if all went well, else returns appropriate error value.
2468  *
2469  * See @ti_sci_msg_rm_udmap_flow_cfg and @ti_sci_msg_rm_udmap_flow_cfg_req for
2470  * more info.
2471  */
2472 static int ti_sci_cmd_rm_udmap_rx_flow_cfg(const struct ti_sci_handle *handle,
2473 			const struct ti_sci_msg_rm_udmap_flow_cfg *params)
2474 {
2475 	struct ti_sci_msg_rm_udmap_flow_cfg_req *req;
2476 	struct ti_sci_msg_hdr *resp;
2477 	struct ti_sci_xfer *xfer;
2478 	struct ti_sci_info *info;
2479 	struct device *dev;
2480 	int ret = 0;
2481 
2482 	if (IS_ERR_OR_NULL(handle))
2483 		return -EINVAL;
2484 
2485 	info = handle_to_ti_sci_info(handle);
2486 	dev = info->dev;
2487 
2488 	xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_FLOW_CFG,
2489 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2490 				   sizeof(*req), sizeof(*resp));
2491 	if (IS_ERR(xfer)) {
2492 		ret = PTR_ERR(xfer);
2493 		dev_err(dev, "RX_FL_CFG: Message alloc failed(%d)\n", ret);
2494 		return ret;
2495 	}
2496 	req = (struct ti_sci_msg_rm_udmap_flow_cfg_req *)xfer->xfer_buf;
2497 	req->valid_params = params->valid_params;
2498 	req->nav_id = params->nav_id;
2499 	req->flow_index = params->flow_index;
2500 	req->rx_einfo_present = params->rx_einfo_present;
2501 	req->rx_psinfo_present = params->rx_psinfo_present;
2502 	req->rx_error_handling = params->rx_error_handling;
2503 	req->rx_desc_type = params->rx_desc_type;
2504 	req->rx_sop_offset = params->rx_sop_offset;
2505 	req->rx_dest_qnum = params->rx_dest_qnum;
2506 	req->rx_src_tag_hi = params->rx_src_tag_hi;
2507 	req->rx_src_tag_lo = params->rx_src_tag_lo;
2508 	req->rx_dest_tag_hi = params->rx_dest_tag_hi;
2509 	req->rx_dest_tag_lo = params->rx_dest_tag_lo;
2510 	req->rx_src_tag_hi_sel = params->rx_src_tag_hi_sel;
2511 	req->rx_src_tag_lo_sel = params->rx_src_tag_lo_sel;
2512 	req->rx_dest_tag_hi_sel = params->rx_dest_tag_hi_sel;
2513 	req->rx_dest_tag_lo_sel = params->rx_dest_tag_lo_sel;
2514 	req->rx_fdq0_sz0_qnum = params->rx_fdq0_sz0_qnum;
2515 	req->rx_fdq1_qnum = params->rx_fdq1_qnum;
2516 	req->rx_fdq2_qnum = params->rx_fdq2_qnum;
2517 	req->rx_fdq3_qnum = params->rx_fdq3_qnum;
2518 	req->rx_ps_location = params->rx_ps_location;
2519 
2520 	ret = ti_sci_do_xfer(info, xfer);
2521 	if (ret) {
2522 		dev_err(dev, "RX_FL_CFG: Mbox send fail %d\n", ret);
2523 		goto fail;
2524 	}
2525 
2526 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2527 	ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2528 
2529 fail:
2530 	ti_sci_put_one_xfer(&info->minfo, xfer);
2531 	dev_dbg(info->dev, "RX_FL_CFG: %u ret:%d\n", params->flow_index, ret);
2532 	return ret;
2533 }
2534 
2535 /**
2536  * ti_sci_cmd_proc_request() - Command to request a physical processor control
2537  * @handle:	Pointer to TI SCI handle
2538  * @proc_id:	Processor ID this request is for
2539  *
2540  * Return: 0 if all went well, else returns appropriate error value.
2541  */
2542 static int ti_sci_cmd_proc_request(const struct ti_sci_handle *handle,
2543 				   u8 proc_id)
2544 {
2545 	struct ti_sci_msg_req_proc_request *req;
2546 	struct ti_sci_msg_hdr *resp;
2547 	struct ti_sci_info *info;
2548 	struct ti_sci_xfer *xfer;
2549 	struct device *dev;
2550 	int ret = 0;
2551 
2552 	if (!handle)
2553 		return -EINVAL;
2554 	if (IS_ERR(handle))
2555 		return PTR_ERR(handle);
2556 
2557 	info = handle_to_ti_sci_info(handle);
2558 	dev = info->dev;
2559 
2560 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_REQUEST,
2561 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2562 				   sizeof(*req), sizeof(*resp));
2563 	if (IS_ERR(xfer)) {
2564 		ret = PTR_ERR(xfer);
2565 		dev_err(dev, "Message alloc failed(%d)\n", ret);
2566 		return ret;
2567 	}
2568 	req = (struct ti_sci_msg_req_proc_request *)xfer->xfer_buf;
2569 	req->processor_id = proc_id;
2570 
2571 	ret = ti_sci_do_xfer(info, xfer);
2572 	if (ret) {
2573 		dev_err(dev, "Mbox send fail %d\n", ret);
2574 		goto fail;
2575 	}
2576 
2577 	resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2578 
2579 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2580 
2581 fail:
2582 	ti_sci_put_one_xfer(&info->minfo, xfer);
2583 
2584 	return ret;
2585 }
2586 
2587 /**
2588  * ti_sci_cmd_proc_release() - Command to release a physical processor control
2589  * @handle:	Pointer to TI SCI handle
2590  * @proc_id:	Processor ID this request is for
2591  *
2592  * Return: 0 if all went well, else returns appropriate error value.
2593  */
2594 static int ti_sci_cmd_proc_release(const struct ti_sci_handle *handle,
2595 				   u8 proc_id)
2596 {
2597 	struct ti_sci_msg_req_proc_release *req;
2598 	struct ti_sci_msg_hdr *resp;
2599 	struct ti_sci_info *info;
2600 	struct ti_sci_xfer *xfer;
2601 	struct device *dev;
2602 	int ret = 0;
2603 
2604 	if (!handle)
2605 		return -EINVAL;
2606 	if (IS_ERR(handle))
2607 		return PTR_ERR(handle);
2608 
2609 	info = handle_to_ti_sci_info(handle);
2610 	dev = info->dev;
2611 
2612 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_RELEASE,
2613 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2614 				   sizeof(*req), sizeof(*resp));
2615 	if (IS_ERR(xfer)) {
2616 		ret = PTR_ERR(xfer);
2617 		dev_err(dev, "Message alloc failed(%d)\n", ret);
2618 		return ret;
2619 	}
2620 	req = (struct ti_sci_msg_req_proc_release *)xfer->xfer_buf;
2621 	req->processor_id = proc_id;
2622 
2623 	ret = ti_sci_do_xfer(info, xfer);
2624 	if (ret) {
2625 		dev_err(dev, "Mbox send fail %d\n", ret);
2626 		goto fail;
2627 	}
2628 
2629 	resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2630 
2631 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2632 
2633 fail:
2634 	ti_sci_put_one_xfer(&info->minfo, xfer);
2635 
2636 	return ret;
2637 }
2638 
2639 /**
2640  * ti_sci_cmd_proc_handover() - Command to handover a physical processor
2641  *				control to a host in the processor's access
2642  *				control list.
2643  * @handle:	Pointer to TI SCI handle
2644  * @proc_id:	Processor ID this request is for
2645  * @host_id:	Host ID to get the control of the processor
2646  *
2647  * Return: 0 if all went well, else returns appropriate error value.
2648  */
2649 static int ti_sci_cmd_proc_handover(const struct ti_sci_handle *handle,
2650 				    u8 proc_id, u8 host_id)
2651 {
2652 	struct ti_sci_msg_req_proc_handover *req;
2653 	struct ti_sci_msg_hdr *resp;
2654 	struct ti_sci_info *info;
2655 	struct ti_sci_xfer *xfer;
2656 	struct device *dev;
2657 	int ret = 0;
2658 
2659 	if (!handle)
2660 		return -EINVAL;
2661 	if (IS_ERR(handle))
2662 		return PTR_ERR(handle);
2663 
2664 	info = handle_to_ti_sci_info(handle);
2665 	dev = info->dev;
2666 
2667 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_HANDOVER,
2668 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2669 				   sizeof(*req), sizeof(*resp));
2670 	if (IS_ERR(xfer)) {
2671 		ret = PTR_ERR(xfer);
2672 		dev_err(dev, "Message alloc failed(%d)\n", ret);
2673 		return ret;
2674 	}
2675 	req = (struct ti_sci_msg_req_proc_handover *)xfer->xfer_buf;
2676 	req->processor_id = proc_id;
2677 	req->host_id = host_id;
2678 
2679 	ret = ti_sci_do_xfer(info, xfer);
2680 	if (ret) {
2681 		dev_err(dev, "Mbox send fail %d\n", ret);
2682 		goto fail;
2683 	}
2684 
2685 	resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2686 
2687 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2688 
2689 fail:
2690 	ti_sci_put_one_xfer(&info->minfo, xfer);
2691 
2692 	return ret;
2693 }
2694 
2695 /**
2696  * ti_sci_cmd_proc_set_config() - Command to set the processor boot
2697  *				    configuration flags
2698  * @handle:		Pointer to TI SCI handle
2699  * @proc_id:		Processor ID this request is for
2700  * @config_flags_set:	Configuration flags to be set
2701  * @config_flags_clear:	Configuration flags to be cleared.
2702  *
2703  * Return: 0 if all went well, else returns appropriate error value.
2704  */
2705 static int ti_sci_cmd_proc_set_config(const struct ti_sci_handle *handle,
2706 				      u8 proc_id, u64 bootvector,
2707 				      u32 config_flags_set,
2708 				      u32 config_flags_clear)
2709 {
2710 	struct ti_sci_msg_req_set_config *req;
2711 	struct ti_sci_msg_hdr *resp;
2712 	struct ti_sci_info *info;
2713 	struct ti_sci_xfer *xfer;
2714 	struct device *dev;
2715 	int ret = 0;
2716 
2717 	if (!handle)
2718 		return -EINVAL;
2719 	if (IS_ERR(handle))
2720 		return PTR_ERR(handle);
2721 
2722 	info = handle_to_ti_sci_info(handle);
2723 	dev = info->dev;
2724 
2725 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CONFIG,
2726 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2727 				   sizeof(*req), sizeof(*resp));
2728 	if (IS_ERR(xfer)) {
2729 		ret = PTR_ERR(xfer);
2730 		dev_err(dev, "Message alloc failed(%d)\n", ret);
2731 		return ret;
2732 	}
2733 	req = (struct ti_sci_msg_req_set_config *)xfer->xfer_buf;
2734 	req->processor_id = proc_id;
2735 	req->bootvector_low = bootvector & TI_SCI_ADDR_LOW_MASK;
2736 	req->bootvector_high = (bootvector & TI_SCI_ADDR_HIGH_MASK) >>
2737 				TI_SCI_ADDR_HIGH_SHIFT;
2738 	req->config_flags_set = config_flags_set;
2739 	req->config_flags_clear = config_flags_clear;
2740 
2741 	ret = ti_sci_do_xfer(info, xfer);
2742 	if (ret) {
2743 		dev_err(dev, "Mbox send fail %d\n", ret);
2744 		goto fail;
2745 	}
2746 
2747 	resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2748 
2749 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2750 
2751 fail:
2752 	ti_sci_put_one_xfer(&info->minfo, xfer);
2753 
2754 	return ret;
2755 }
2756 
2757 /**
2758  * ti_sci_cmd_proc_set_control() - Command to set the processor boot
2759  *				     control flags
2760  * @handle:			Pointer to TI SCI handle
2761  * @proc_id:			Processor ID this request is for
2762  * @control_flags_set:		Control flags to be set
2763  * @control_flags_clear:	Control flags to be cleared
2764  *
2765  * Return: 0 if all went well, else returns appropriate error value.
2766  */
2767 static int ti_sci_cmd_proc_set_control(const struct ti_sci_handle *handle,
2768 				       u8 proc_id, u32 control_flags_set,
2769 				       u32 control_flags_clear)
2770 {
2771 	struct ti_sci_msg_req_set_ctrl *req;
2772 	struct ti_sci_msg_hdr *resp;
2773 	struct ti_sci_info *info;
2774 	struct ti_sci_xfer *xfer;
2775 	struct device *dev;
2776 	int ret = 0;
2777 
2778 	if (!handle)
2779 		return -EINVAL;
2780 	if (IS_ERR(handle))
2781 		return PTR_ERR(handle);
2782 
2783 	info = handle_to_ti_sci_info(handle);
2784 	dev = info->dev;
2785 
2786 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CTRL,
2787 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2788 				   sizeof(*req), sizeof(*resp));
2789 	if (IS_ERR(xfer)) {
2790 		ret = PTR_ERR(xfer);
2791 		dev_err(dev, "Message alloc failed(%d)\n", ret);
2792 		return ret;
2793 	}
2794 	req = (struct ti_sci_msg_req_set_ctrl *)xfer->xfer_buf;
2795 	req->processor_id = proc_id;
2796 	req->control_flags_set = control_flags_set;
2797 	req->control_flags_clear = control_flags_clear;
2798 
2799 	ret = ti_sci_do_xfer(info, xfer);
2800 	if (ret) {
2801 		dev_err(dev, "Mbox send fail %d\n", ret);
2802 		goto fail;
2803 	}
2804 
2805 	resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2806 
2807 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2808 
2809 fail:
2810 	ti_sci_put_one_xfer(&info->minfo, xfer);
2811 
2812 	return ret;
2813 }
2814 
2815 /**
2816  * ti_sci_cmd_get_boot_status() - Command to get the processor boot status
2817  * @handle:	Pointer to TI SCI handle
2818  * @proc_id:	Processor ID this request is for
2819  *
2820  * Return: 0 if all went well, else returns appropriate error value.
2821  */
2822 static int ti_sci_cmd_proc_get_status(const struct ti_sci_handle *handle,
2823 				      u8 proc_id, u64 *bv, u32 *cfg_flags,
2824 				      u32 *ctrl_flags, u32 *sts_flags)
2825 {
2826 	struct ti_sci_msg_resp_get_status *resp;
2827 	struct ti_sci_msg_req_get_status *req;
2828 	struct ti_sci_info *info;
2829 	struct ti_sci_xfer *xfer;
2830 	struct device *dev;
2831 	int ret = 0;
2832 
2833 	if (!handle)
2834 		return -EINVAL;
2835 	if (IS_ERR(handle))
2836 		return PTR_ERR(handle);
2837 
2838 	info = handle_to_ti_sci_info(handle);
2839 	dev = info->dev;
2840 
2841 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_STATUS,
2842 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2843 				   sizeof(*req), sizeof(*resp));
2844 	if (IS_ERR(xfer)) {
2845 		ret = PTR_ERR(xfer);
2846 		dev_err(dev, "Message alloc failed(%d)\n", ret);
2847 		return ret;
2848 	}
2849 	req = (struct ti_sci_msg_req_get_status *)xfer->xfer_buf;
2850 	req->processor_id = proc_id;
2851 
2852 	ret = ti_sci_do_xfer(info, xfer);
2853 	if (ret) {
2854 		dev_err(dev, "Mbox send fail %d\n", ret);
2855 		goto fail;
2856 	}
2857 
2858 	resp = (struct ti_sci_msg_resp_get_status *)xfer->tx_message.buf;
2859 
2860 	if (!ti_sci_is_response_ack(resp)) {
2861 		ret = -ENODEV;
2862 	} else {
2863 		*bv = (resp->bootvector_low & TI_SCI_ADDR_LOW_MASK) |
2864 		      (((u64)resp->bootvector_high << TI_SCI_ADDR_HIGH_SHIFT) &
2865 		       TI_SCI_ADDR_HIGH_MASK);
2866 		*cfg_flags = resp->config_flags;
2867 		*ctrl_flags = resp->control_flags;
2868 		*sts_flags = resp->status_flags;
2869 	}
2870 
2871 fail:
2872 	ti_sci_put_one_xfer(&info->minfo, xfer);
2873 
2874 	return ret;
2875 }
2876 
2877 /*
2878  * ti_sci_setup_ops() - Setup the operations structures
2879  * @info:	pointer to TISCI pointer
2880  */
2881 static void ti_sci_setup_ops(struct ti_sci_info *info)
2882 {
2883 	struct ti_sci_ops *ops = &info->handle.ops;
2884 	struct ti_sci_core_ops *core_ops = &ops->core_ops;
2885 	struct ti_sci_dev_ops *dops = &ops->dev_ops;
2886 	struct ti_sci_clk_ops *cops = &ops->clk_ops;
2887 	struct ti_sci_rm_core_ops *rm_core_ops = &ops->rm_core_ops;
2888 	struct ti_sci_rm_irq_ops *iops = &ops->rm_irq_ops;
2889 	struct ti_sci_rm_ringacc_ops *rops = &ops->rm_ring_ops;
2890 	struct ti_sci_rm_psil_ops *psilops = &ops->rm_psil_ops;
2891 	struct ti_sci_rm_udmap_ops *udmap_ops = &ops->rm_udmap_ops;
2892 	struct ti_sci_proc_ops *pops = &ops->proc_ops;
2893 
2894 	core_ops->reboot_device = ti_sci_cmd_core_reboot;
2895 
2896 	dops->get_device = ti_sci_cmd_get_device;
2897 	dops->idle_device = ti_sci_cmd_idle_device;
2898 	dops->put_device = ti_sci_cmd_put_device;
2899 
2900 	dops->is_valid = ti_sci_cmd_dev_is_valid;
2901 	dops->get_context_loss_count = ti_sci_cmd_dev_get_clcnt;
2902 	dops->is_idle = ti_sci_cmd_dev_is_idle;
2903 	dops->is_stop = ti_sci_cmd_dev_is_stop;
2904 	dops->is_on = ti_sci_cmd_dev_is_on;
2905 	dops->is_transitioning = ti_sci_cmd_dev_is_trans;
2906 	dops->set_device_resets = ti_sci_cmd_set_device_resets;
2907 	dops->get_device_resets = ti_sci_cmd_get_device_resets;
2908 
2909 	cops->get_clock = ti_sci_cmd_get_clock;
2910 	cops->idle_clock = ti_sci_cmd_idle_clock;
2911 	cops->put_clock = ti_sci_cmd_put_clock;
2912 	cops->is_auto = ti_sci_cmd_clk_is_auto;
2913 	cops->is_on = ti_sci_cmd_clk_is_on;
2914 	cops->is_off = ti_sci_cmd_clk_is_off;
2915 
2916 	cops->set_parent = ti_sci_cmd_clk_set_parent;
2917 	cops->get_parent = ti_sci_cmd_clk_get_parent;
2918 	cops->get_num_parents = ti_sci_cmd_clk_get_num_parents;
2919 
2920 	cops->get_best_match_freq = ti_sci_cmd_clk_get_match_freq;
2921 	cops->set_freq = ti_sci_cmd_clk_set_freq;
2922 	cops->get_freq = ti_sci_cmd_clk_get_freq;
2923 
2924 	rm_core_ops->get_range = ti_sci_cmd_get_resource_range;
2925 	rm_core_ops->get_range_from_shost =
2926 				ti_sci_cmd_get_resource_range_from_shost;
2927 
2928 	iops->set_irq = ti_sci_cmd_set_irq;
2929 	iops->set_event_map = ti_sci_cmd_set_event_map;
2930 	iops->free_irq = ti_sci_cmd_free_irq;
2931 	iops->free_event_map = ti_sci_cmd_free_event_map;
2932 
2933 	rops->config = ti_sci_cmd_ring_config;
2934 	rops->get_config = ti_sci_cmd_ring_get_config;
2935 
2936 	psilops->pair = ti_sci_cmd_rm_psil_pair;
2937 	psilops->unpair = ti_sci_cmd_rm_psil_unpair;
2938 
2939 	udmap_ops->tx_ch_cfg = ti_sci_cmd_rm_udmap_tx_ch_cfg;
2940 	udmap_ops->rx_ch_cfg = ti_sci_cmd_rm_udmap_rx_ch_cfg;
2941 	udmap_ops->rx_flow_cfg = ti_sci_cmd_rm_udmap_rx_flow_cfg;
2942 
2943 	pops->request = ti_sci_cmd_proc_request;
2944 	pops->release = ti_sci_cmd_proc_release;
2945 	pops->handover = ti_sci_cmd_proc_handover;
2946 	pops->set_config = ti_sci_cmd_proc_set_config;
2947 	pops->set_control = ti_sci_cmd_proc_set_control;
2948 	pops->get_status = ti_sci_cmd_proc_get_status;
2949 }
2950 
2951 /**
2952  * ti_sci_get_handle() - Get the TI SCI handle for a device
2953  * @dev:	Pointer to device for which we want SCI handle
2954  *
2955  * NOTE: The function does not track individual clients of the framework
2956  * and is expected to be maintained by caller of TI SCI protocol library.
2957  * ti_sci_put_handle must be balanced with successful ti_sci_get_handle
2958  * Return: pointer to handle if successful, else:
2959  * -EPROBE_DEFER if the instance is not ready
2960  * -ENODEV if the required node handler is missing
2961  * -EINVAL if invalid conditions are encountered.
2962  */
2963 const struct ti_sci_handle *ti_sci_get_handle(struct device *dev)
2964 {
2965 	struct device_node *ti_sci_np;
2966 	struct list_head *p;
2967 	struct ti_sci_handle *handle = NULL;
2968 	struct ti_sci_info *info;
2969 
2970 	if (!dev) {
2971 		pr_err("I need a device pointer\n");
2972 		return ERR_PTR(-EINVAL);
2973 	}
2974 	ti_sci_np = of_get_parent(dev->of_node);
2975 	if (!ti_sci_np) {
2976 		dev_err(dev, "No OF information\n");
2977 		return ERR_PTR(-EINVAL);
2978 	}
2979 
2980 	mutex_lock(&ti_sci_list_mutex);
2981 	list_for_each(p, &ti_sci_list) {
2982 		info = list_entry(p, struct ti_sci_info, node);
2983 		if (ti_sci_np == info->dev->of_node) {
2984 			handle = &info->handle;
2985 			info->users++;
2986 			break;
2987 		}
2988 	}
2989 	mutex_unlock(&ti_sci_list_mutex);
2990 	of_node_put(ti_sci_np);
2991 
2992 	if (!handle)
2993 		return ERR_PTR(-EPROBE_DEFER);
2994 
2995 	return handle;
2996 }
2997 EXPORT_SYMBOL_GPL(ti_sci_get_handle);
2998 
2999 /**
3000  * ti_sci_put_handle() - Release the handle acquired by ti_sci_get_handle
3001  * @handle:	Handle acquired by ti_sci_get_handle
3002  *
3003  * NOTE: The function does not track individual clients of the framework
3004  * and is expected to be maintained by caller of TI SCI protocol library.
3005  * ti_sci_put_handle must be balanced with successful ti_sci_get_handle
3006  *
3007  * Return: 0 is successfully released
3008  * if an error pointer was passed, it returns the error value back,
3009  * if null was passed, it returns -EINVAL;
3010  */
3011 int ti_sci_put_handle(const struct ti_sci_handle *handle)
3012 {
3013 	struct ti_sci_info *info;
3014 
3015 	if (IS_ERR(handle))
3016 		return PTR_ERR(handle);
3017 	if (!handle)
3018 		return -EINVAL;
3019 
3020 	info = handle_to_ti_sci_info(handle);
3021 	mutex_lock(&ti_sci_list_mutex);
3022 	if (!WARN_ON(!info->users))
3023 		info->users--;
3024 	mutex_unlock(&ti_sci_list_mutex);
3025 
3026 	return 0;
3027 }
3028 EXPORT_SYMBOL_GPL(ti_sci_put_handle);
3029 
3030 static void devm_ti_sci_release(struct device *dev, void *res)
3031 {
3032 	const struct ti_sci_handle **ptr = res;
3033 	const struct ti_sci_handle *handle = *ptr;
3034 	int ret;
3035 
3036 	ret = ti_sci_put_handle(handle);
3037 	if (ret)
3038 		dev_err(dev, "failed to put handle %d\n", ret);
3039 }
3040 
3041 /**
3042  * devm_ti_sci_get_handle() - Managed get handle
3043  * @dev:	device for which we want SCI handle for.
3044  *
3045  * NOTE: This releases the handle once the device resources are
3046  * no longer needed. MUST NOT BE released with ti_sci_put_handle.
3047  * The function does not track individual clients of the framework
3048  * and is expected to be maintained by caller of TI SCI protocol library.
3049  *
3050  * Return: 0 if all went fine, else corresponding error.
3051  */
3052 const struct ti_sci_handle *devm_ti_sci_get_handle(struct device *dev)
3053 {
3054 	const struct ti_sci_handle **ptr;
3055 	const struct ti_sci_handle *handle;
3056 
3057 	ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL);
3058 	if (!ptr)
3059 		return ERR_PTR(-ENOMEM);
3060 	handle = ti_sci_get_handle(dev);
3061 
3062 	if (!IS_ERR(handle)) {
3063 		*ptr = handle;
3064 		devres_add(dev, ptr);
3065 	} else {
3066 		devres_free(ptr);
3067 	}
3068 
3069 	return handle;
3070 }
3071 EXPORT_SYMBOL_GPL(devm_ti_sci_get_handle);
3072 
3073 /**
3074  * ti_sci_get_by_phandle() - Get the TI SCI handle using DT phandle
3075  * @np:		device node
3076  * @property:	property name containing phandle on TISCI node
3077  *
3078  * NOTE: The function does not track individual clients of the framework
3079  * and is expected to be maintained by caller of TI SCI protocol library.
3080  * ti_sci_put_handle must be balanced with successful ti_sci_get_by_phandle
3081  * Return: pointer to handle if successful, else:
3082  * -EPROBE_DEFER if the instance is not ready
3083  * -ENODEV if the required node handler is missing
3084  * -EINVAL if invalid conditions are encountered.
3085  */
3086 const struct ti_sci_handle *ti_sci_get_by_phandle(struct device_node *np,
3087 						  const char *property)
3088 {
3089 	struct ti_sci_handle *handle = NULL;
3090 	struct device_node *ti_sci_np;
3091 	struct ti_sci_info *info;
3092 	struct list_head *p;
3093 
3094 	if (!np) {
3095 		pr_err("I need a device pointer\n");
3096 		return ERR_PTR(-EINVAL);
3097 	}
3098 
3099 	ti_sci_np = of_parse_phandle(np, property, 0);
3100 	if (!ti_sci_np)
3101 		return ERR_PTR(-ENODEV);
3102 
3103 	mutex_lock(&ti_sci_list_mutex);
3104 	list_for_each(p, &ti_sci_list) {
3105 		info = list_entry(p, struct ti_sci_info, node);
3106 		if (ti_sci_np == info->dev->of_node) {
3107 			handle = &info->handle;
3108 			info->users++;
3109 			break;
3110 		}
3111 	}
3112 	mutex_unlock(&ti_sci_list_mutex);
3113 	of_node_put(ti_sci_np);
3114 
3115 	if (!handle)
3116 		return ERR_PTR(-EPROBE_DEFER);
3117 
3118 	return handle;
3119 }
3120 EXPORT_SYMBOL_GPL(ti_sci_get_by_phandle);
3121 
3122 /**
3123  * devm_ti_sci_get_by_phandle() - Managed get handle using phandle
3124  * @dev:	Device pointer requesting TISCI handle
3125  * @property:	property name containing phandle on TISCI node
3126  *
3127  * NOTE: This releases the handle once the device resources are
3128  * no longer needed. MUST NOT BE released with ti_sci_put_handle.
3129  * The function does not track individual clients of the framework
3130  * and is expected to be maintained by caller of TI SCI protocol library.
3131  *
3132  * Return: 0 if all went fine, else corresponding error.
3133  */
3134 const struct ti_sci_handle *devm_ti_sci_get_by_phandle(struct device *dev,
3135 						       const char *property)
3136 {
3137 	const struct ti_sci_handle *handle;
3138 	const struct ti_sci_handle **ptr;
3139 
3140 	ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL);
3141 	if (!ptr)
3142 		return ERR_PTR(-ENOMEM);
3143 	handle = ti_sci_get_by_phandle(dev_of_node(dev), property);
3144 
3145 	if (!IS_ERR(handle)) {
3146 		*ptr = handle;
3147 		devres_add(dev, ptr);
3148 	} else {
3149 		devres_free(ptr);
3150 	}
3151 
3152 	return handle;
3153 }
3154 EXPORT_SYMBOL_GPL(devm_ti_sci_get_by_phandle);
3155 
3156 /**
3157  * ti_sci_get_free_resource() - Get a free resource from TISCI resource.
3158  * @res:	Pointer to the TISCI resource
3159  *
3160  * Return: resource num if all went ok else TI_SCI_RESOURCE_NULL.
3161  */
3162 u16 ti_sci_get_free_resource(struct ti_sci_resource *res)
3163 {
3164 	unsigned long flags;
3165 	u16 set, free_bit;
3166 
3167 	raw_spin_lock_irqsave(&res->lock, flags);
3168 	for (set = 0; set < res->sets; set++) {
3169 		free_bit = find_first_zero_bit(res->desc[set].res_map,
3170 					       res->desc[set].num);
3171 		if (free_bit != res->desc[set].num) {
3172 			set_bit(free_bit, res->desc[set].res_map);
3173 			raw_spin_unlock_irqrestore(&res->lock, flags);
3174 			return res->desc[set].start + free_bit;
3175 		}
3176 	}
3177 	raw_spin_unlock_irqrestore(&res->lock, flags);
3178 
3179 	return TI_SCI_RESOURCE_NULL;
3180 }
3181 EXPORT_SYMBOL_GPL(ti_sci_get_free_resource);
3182 
3183 /**
3184  * ti_sci_release_resource() - Release a resource from TISCI resource.
3185  * @res:	Pointer to the TISCI resource
3186  * @id:		Resource id to be released.
3187  */
3188 void ti_sci_release_resource(struct ti_sci_resource *res, u16 id)
3189 {
3190 	unsigned long flags;
3191 	u16 set;
3192 
3193 	raw_spin_lock_irqsave(&res->lock, flags);
3194 	for (set = 0; set < res->sets; set++) {
3195 		if (res->desc[set].start <= id &&
3196 		    (res->desc[set].num + res->desc[set].start) > id)
3197 			clear_bit(id - res->desc[set].start,
3198 				  res->desc[set].res_map);
3199 	}
3200 	raw_spin_unlock_irqrestore(&res->lock, flags);
3201 }
3202 EXPORT_SYMBOL_GPL(ti_sci_release_resource);
3203 
3204 /**
3205  * ti_sci_get_num_resources() - Get the number of resources in TISCI resource
3206  * @res:	Pointer to the TISCI resource
3207  *
3208  * Return: Total number of available resources.
3209  */
3210 u32 ti_sci_get_num_resources(struct ti_sci_resource *res)
3211 {
3212 	u32 set, count = 0;
3213 
3214 	for (set = 0; set < res->sets; set++)
3215 		count += res->desc[set].num;
3216 
3217 	return count;
3218 }
3219 EXPORT_SYMBOL_GPL(ti_sci_get_num_resources);
3220 
3221 /**
3222  * devm_ti_sci_get_of_resource() - Get a TISCI resource assigned to a device
3223  * @handle:	TISCI handle
3224  * @dev:	Device pointer to which the resource is assigned
3225  * @dev_id:	TISCI device id to which the resource is assigned
3226  * @of_prop:	property name by which the resource are represented
3227  *
3228  * Return: Pointer to ti_sci_resource if all went well else appropriate
3229  *	   error pointer.
3230  */
3231 struct ti_sci_resource *
3232 devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
3233 			    struct device *dev, u32 dev_id, char *of_prop)
3234 {
3235 	struct ti_sci_resource *res;
3236 	bool valid_set = false;
3237 	u32 resource_subtype;
3238 	int i, ret;
3239 
3240 	res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
3241 	if (!res)
3242 		return ERR_PTR(-ENOMEM);
3243 
3244 	ret = of_property_count_elems_of_size(dev_of_node(dev), of_prop,
3245 					      sizeof(u32));
3246 	if (ret < 0) {
3247 		dev_err(dev, "%s resource type ids not available\n", of_prop);
3248 		return ERR_PTR(ret);
3249 	}
3250 	res->sets = ret;
3251 
3252 	res->desc = devm_kcalloc(dev, res->sets, sizeof(*res->desc),
3253 				 GFP_KERNEL);
3254 	if (!res->desc)
3255 		return ERR_PTR(-ENOMEM);
3256 
3257 	for (i = 0; i < res->sets; i++) {
3258 		ret = of_property_read_u32_index(dev_of_node(dev), of_prop, i,
3259 						 &resource_subtype);
3260 		if (ret)
3261 			return ERR_PTR(-EINVAL);
3262 
3263 		ret = handle->ops.rm_core_ops.get_range(handle, dev_id,
3264 							resource_subtype,
3265 							&res->desc[i].start,
3266 							&res->desc[i].num);
3267 		if (ret) {
3268 			dev_dbg(dev, "dev = %d subtype %d not allocated for this host\n",
3269 				dev_id, resource_subtype);
3270 			res->desc[i].start = 0;
3271 			res->desc[i].num = 0;
3272 			continue;
3273 		}
3274 
3275 		dev_dbg(dev, "dev = %d, subtype = %d, start = %d, num = %d\n",
3276 			dev_id, resource_subtype, res->desc[i].start,
3277 			res->desc[i].num);
3278 
3279 		valid_set = true;
3280 		res->desc[i].res_map =
3281 			devm_kzalloc(dev, BITS_TO_LONGS(res->desc[i].num) *
3282 				     sizeof(*res->desc[i].res_map), GFP_KERNEL);
3283 		if (!res->desc[i].res_map)
3284 			return ERR_PTR(-ENOMEM);
3285 	}
3286 	raw_spin_lock_init(&res->lock);
3287 
3288 	if (valid_set)
3289 		return res;
3290 
3291 	return ERR_PTR(-EINVAL);
3292 }
3293 
3294 static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode,
3295 				void *cmd)
3296 {
3297 	struct ti_sci_info *info = reboot_to_ti_sci_info(nb);
3298 	const struct ti_sci_handle *handle = &info->handle;
3299 
3300 	ti_sci_cmd_core_reboot(handle);
3301 
3302 	/* call fail OR pass, we should not be here in the first place */
3303 	return NOTIFY_BAD;
3304 }
3305 
3306 /* Description for K2G */
3307 static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
3308 	.default_host_id = 2,
3309 	/* Conservative duration */
3310 	.max_rx_timeout_ms = 1000,
3311 	/* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
3312 	.max_msgs = 20,
3313 	.max_msg_size = 64,
3314 	.rm_type_map = NULL,
3315 };
3316 
3317 static struct ti_sci_rm_type_map ti_sci_am654_rm_type_map[] = {
3318 	{.dev_id = 56, .type = 0x00b}, /* GIC_IRQ */
3319 	{.dev_id = 179, .type = 0x000}, /* MAIN_NAV_UDMASS_IA0 */
3320 	{.dev_id = 187, .type = 0x009}, /* MAIN_NAV_RA */
3321 	{.dev_id = 188, .type = 0x006}, /* MAIN_NAV_UDMAP */
3322 	{.dev_id = 194, .type = 0x007}, /* MCU_NAV_UDMAP */
3323 	{.dev_id = 195, .type = 0x00a}, /* MCU_NAV_RA */
3324 	{.dev_id = 0, .type = 0x000}, /* end of table */
3325 };
3326 
3327 /* Description for AM654 */
3328 static const struct ti_sci_desc ti_sci_pmmc_am654_desc = {
3329 	.default_host_id = 12,
3330 	/* Conservative duration */
3331 	.max_rx_timeout_ms = 10000,
3332 	/* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
3333 	.max_msgs = 20,
3334 	.max_msg_size = 60,
3335 	.rm_type_map = ti_sci_am654_rm_type_map,
3336 };
3337 
3338 static const struct of_device_id ti_sci_of_match[] = {
3339 	{.compatible = "ti,k2g-sci", .data = &ti_sci_pmmc_k2g_desc},
3340 	{.compatible = "ti,am654-sci", .data = &ti_sci_pmmc_am654_desc},
3341 	{ /* Sentinel */ },
3342 };
3343 MODULE_DEVICE_TABLE(of, ti_sci_of_match);
3344 
3345 static int ti_sci_probe(struct platform_device *pdev)
3346 {
3347 	struct device *dev = &pdev->dev;
3348 	const struct of_device_id *of_id;
3349 	const struct ti_sci_desc *desc;
3350 	struct ti_sci_xfer *xfer;
3351 	struct ti_sci_info *info = NULL;
3352 	struct ti_sci_xfers_info *minfo;
3353 	struct mbox_client *cl;
3354 	int ret = -EINVAL;
3355 	int i;
3356 	int reboot = 0;
3357 	u32 h_id;
3358 
3359 	of_id = of_match_device(ti_sci_of_match, dev);
3360 	if (!of_id) {
3361 		dev_err(dev, "OF data missing\n");
3362 		return -EINVAL;
3363 	}
3364 	desc = of_id->data;
3365 
3366 	info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
3367 	if (!info)
3368 		return -ENOMEM;
3369 
3370 	info->dev = dev;
3371 	info->desc = desc;
3372 	ret = of_property_read_u32(dev->of_node, "ti,host-id", &h_id);
3373 	/* if the property is not present in DT, use a default from desc */
3374 	if (ret < 0) {
3375 		info->host_id = info->desc->default_host_id;
3376 	} else {
3377 		if (!h_id) {
3378 			dev_warn(dev, "Host ID 0 is reserved for firmware\n");
3379 			info->host_id = info->desc->default_host_id;
3380 		} else {
3381 			info->host_id = h_id;
3382 		}
3383 	}
3384 
3385 	reboot = of_property_read_bool(dev->of_node,
3386 				       "ti,system-reboot-controller");
3387 	INIT_LIST_HEAD(&info->node);
3388 	minfo = &info->minfo;
3389 
3390 	/*
3391 	 * Pre-allocate messages
3392 	 * NEVER allocate more than what we can indicate in hdr.seq
3393 	 * if we have data description bug, force a fix..
3394 	 */
3395 	if (WARN_ON(desc->max_msgs >=
3396 		    1 << 8 * sizeof(((struct ti_sci_msg_hdr *)0)->seq)))
3397 		return -EINVAL;
3398 
3399 	minfo->xfer_block = devm_kcalloc(dev,
3400 					 desc->max_msgs,
3401 					 sizeof(*minfo->xfer_block),
3402 					 GFP_KERNEL);
3403 	if (!minfo->xfer_block)
3404 		return -ENOMEM;
3405 
3406 	minfo->xfer_alloc_table = devm_kcalloc(dev,
3407 					       BITS_TO_LONGS(desc->max_msgs),
3408 					       sizeof(unsigned long),
3409 					       GFP_KERNEL);
3410 	if (!minfo->xfer_alloc_table)
3411 		return -ENOMEM;
3412 	bitmap_zero(minfo->xfer_alloc_table, desc->max_msgs);
3413 
3414 	/* Pre-initialize the buffer pointer to pre-allocated buffers */
3415 	for (i = 0, xfer = minfo->xfer_block; i < desc->max_msgs; i++, xfer++) {
3416 		xfer->xfer_buf = devm_kcalloc(dev, 1, desc->max_msg_size,
3417 					      GFP_KERNEL);
3418 		if (!xfer->xfer_buf)
3419 			return -ENOMEM;
3420 
3421 		xfer->tx_message.buf = xfer->xfer_buf;
3422 		init_completion(&xfer->done);
3423 	}
3424 
3425 	ret = ti_sci_debugfs_create(pdev, info);
3426 	if (ret)
3427 		dev_warn(dev, "Failed to create debug file\n");
3428 
3429 	platform_set_drvdata(pdev, info);
3430 
3431 	cl = &info->cl;
3432 	cl->dev = dev;
3433 	cl->tx_block = false;
3434 	cl->rx_callback = ti_sci_rx_callback;
3435 	cl->knows_txdone = true;
3436 
3437 	spin_lock_init(&minfo->xfer_lock);
3438 	sema_init(&minfo->sem_xfer_count, desc->max_msgs);
3439 
3440 	info->chan_rx = mbox_request_channel_byname(cl, "rx");
3441 	if (IS_ERR(info->chan_rx)) {
3442 		ret = PTR_ERR(info->chan_rx);
3443 		goto out;
3444 	}
3445 
3446 	info->chan_tx = mbox_request_channel_byname(cl, "tx");
3447 	if (IS_ERR(info->chan_tx)) {
3448 		ret = PTR_ERR(info->chan_tx);
3449 		goto out;
3450 	}
3451 	ret = ti_sci_cmd_get_revision(info);
3452 	if (ret) {
3453 		dev_err(dev, "Unable to communicate with TISCI(%d)\n", ret);
3454 		goto out;
3455 	}
3456 
3457 	ti_sci_setup_ops(info);
3458 
3459 	if (reboot) {
3460 		info->nb.notifier_call = tisci_reboot_handler;
3461 		info->nb.priority = 128;
3462 
3463 		ret = register_restart_handler(&info->nb);
3464 		if (ret) {
3465 			dev_err(dev, "reboot registration fail(%d)\n", ret);
3466 			return ret;
3467 		}
3468 	}
3469 
3470 	dev_info(dev, "ABI: %d.%d (firmware rev 0x%04x '%s')\n",
3471 		 info->handle.version.abi_major, info->handle.version.abi_minor,
3472 		 info->handle.version.firmware_revision,
3473 		 info->handle.version.firmware_description);
3474 
3475 	mutex_lock(&ti_sci_list_mutex);
3476 	list_add_tail(&info->node, &ti_sci_list);
3477 	mutex_unlock(&ti_sci_list_mutex);
3478 
3479 	return of_platform_populate(dev->of_node, NULL, NULL, dev);
3480 out:
3481 	if (!IS_ERR(info->chan_tx))
3482 		mbox_free_channel(info->chan_tx);
3483 	if (!IS_ERR(info->chan_rx))
3484 		mbox_free_channel(info->chan_rx);
3485 	debugfs_remove(info->d);
3486 	return ret;
3487 }
3488 
3489 static int ti_sci_remove(struct platform_device *pdev)
3490 {
3491 	struct ti_sci_info *info;
3492 	struct device *dev = &pdev->dev;
3493 	int ret = 0;
3494 
3495 	of_platform_depopulate(dev);
3496 
3497 	info = platform_get_drvdata(pdev);
3498 
3499 	if (info->nb.notifier_call)
3500 		unregister_restart_handler(&info->nb);
3501 
3502 	mutex_lock(&ti_sci_list_mutex);
3503 	if (info->users)
3504 		ret = -EBUSY;
3505 	else
3506 		list_del(&info->node);
3507 	mutex_unlock(&ti_sci_list_mutex);
3508 
3509 	if (!ret) {
3510 		ti_sci_debugfs_destroy(pdev, info);
3511 
3512 		/* Safe to free channels since no more users */
3513 		mbox_free_channel(info->chan_tx);
3514 		mbox_free_channel(info->chan_rx);
3515 	}
3516 
3517 	return ret;
3518 }
3519 
3520 static struct platform_driver ti_sci_driver = {
3521 	.probe = ti_sci_probe,
3522 	.remove = ti_sci_remove,
3523 	.driver = {
3524 		   .name = "ti-sci",
3525 		   .of_match_table = of_match_ptr(ti_sci_of_match),
3526 	},
3527 };
3528 module_platform_driver(ti_sci_driver);
3529 
3530 MODULE_LICENSE("GPL v2");
3531 MODULE_DESCRIPTION("TI System Control Interface(SCI) driver");
3532 MODULE_AUTHOR("Nishanth Menon");
3533 MODULE_ALIAS("platform:ti-sci");
3534