xref: /openbmc/linux/drivers/firmware/ti_sci.c (revision d47a97bd)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Texas Instruments System Control Interface Protocol Driver
4  *
5  * Copyright (C) 2015-2022 Texas Instruments Incorporated - https://www.ti.com/
6  *	Nishanth Menon
7  */
8 
9 #define pr_fmt(fmt) "%s: " fmt, __func__
10 
11 #include <linux/bitmap.h>
12 #include <linux/debugfs.h>
13 #include <linux/export.h>
14 #include <linux/io.h>
15 #include <linux/iopoll.h>
16 #include <linux/kernel.h>
17 #include <linux/mailbox_client.h>
18 #include <linux/module.h>
19 #include <linux/of_device.h>
20 #include <linux/semaphore.h>
21 #include <linux/slab.h>
22 #include <linux/soc/ti/ti-msgmgr.h>
23 #include <linux/soc/ti/ti_sci_protocol.h>
24 #include <linux/reboot.h>
25 
26 #include "ti_sci.h"
27 
28 /* List of all TI SCI devices active in system */
29 static LIST_HEAD(ti_sci_list);
30 /* Protection for the entire list */
31 static DEFINE_MUTEX(ti_sci_list_mutex);
32 
33 /**
34  * struct ti_sci_xfer - Structure representing a message flow
35  * @tx_message:	Transmit message
36  * @rx_len:	Receive message length
37  * @xfer_buf:	Preallocated buffer to store receive message
38  *		Since we work with request-ACK protocol, we can
39  *		reuse the same buffer for the rx path as we
40  *		use for the tx path.
41  * @done:	completion event
42  */
43 struct ti_sci_xfer {
44 	struct ti_msgmgr_message tx_message;
45 	u8 rx_len;
46 	u8 *xfer_buf;
47 	struct completion done;
48 };
49 
50 /**
51  * struct ti_sci_xfers_info - Structure to manage transfer information
52  * @sem_xfer_count:	Counting Semaphore for managing max simultaneous
53  *			Messages.
54  * @xfer_block:		Preallocated Message array
55  * @xfer_alloc_table:	Bitmap table for allocated messages.
56  *			Index of this bitmap table is also used for message
57  *			sequence identifier.
58  * @xfer_lock:		Protection for message allocation
59  */
60 struct ti_sci_xfers_info {
61 	struct semaphore sem_xfer_count;
62 	struct ti_sci_xfer *xfer_block;
63 	unsigned long *xfer_alloc_table;
64 	/* protect transfer allocation */
65 	spinlock_t xfer_lock;
66 };
67 
68 /**
69  * struct ti_sci_desc - Description of SoC integration
70  * @default_host_id:	Host identifier representing the compute entity
71  * @max_rx_timeout_ms:	Timeout for communication with SoC (in Milliseconds)
72  * @max_msgs: Maximum number of messages that can be pending
73  *		  simultaneously in the system
74  * @max_msg_size: Maximum size of data per message that can be handled.
75  */
76 struct ti_sci_desc {
77 	u8 default_host_id;
78 	int max_rx_timeout_ms;
79 	int max_msgs;
80 	int max_msg_size;
81 };
82 
83 /**
84  * struct ti_sci_info - Structure representing a TI SCI instance
85  * @dev:	Device pointer
86  * @desc:	SoC description for this instance
87  * @nb:	Reboot Notifier block
88  * @d:		Debugfs file entry
89  * @debug_region: Memory region where the debug message are available
90  * @debug_region_size: Debug region size
91  * @debug_buffer: Buffer allocated to copy debug messages.
92  * @handle:	Instance of TI SCI handle to send to clients.
93  * @cl:		Mailbox Client
94  * @chan_tx:	Transmit mailbox channel
95  * @chan_rx:	Receive mailbox channel
96  * @minfo:	Message info
97  * @node:	list head
98  * @host_id:	Host ID
99  * @users:	Number of users of this instance
100  * @is_suspending: Flag set to indicate in suspend path.
101  */
102 struct ti_sci_info {
103 	struct device *dev;
104 	struct notifier_block nb;
105 	const struct ti_sci_desc *desc;
106 	struct dentry *d;
107 	void __iomem *debug_region;
108 	char *debug_buffer;
109 	size_t debug_region_size;
110 	struct ti_sci_handle handle;
111 	struct mbox_client cl;
112 	struct mbox_chan *chan_tx;
113 	struct mbox_chan *chan_rx;
114 	struct ti_sci_xfers_info minfo;
115 	struct list_head node;
116 	u8 host_id;
117 	/* protected by ti_sci_list_mutex */
118 	int users;
119 	bool is_suspending;
120 };
121 
122 #define cl_to_ti_sci_info(c)	container_of(c, struct ti_sci_info, cl)
123 #define handle_to_ti_sci_info(h) container_of(h, struct ti_sci_info, handle)
124 #define reboot_to_ti_sci_info(n) container_of(n, struct ti_sci_info, nb)
125 
126 #ifdef CONFIG_DEBUG_FS
127 
128 /**
129  * ti_sci_debug_show() - Helper to dump the debug log
130  * @s:	sequence file pointer
131  * @unused:	unused.
132  *
133  * Return: 0
134  */
135 static int ti_sci_debug_show(struct seq_file *s, void *unused)
136 {
137 	struct ti_sci_info *info = s->private;
138 
139 	memcpy_fromio(info->debug_buffer, info->debug_region,
140 		      info->debug_region_size);
141 	/*
142 	 * We don't trust firmware to leave NULL terminated last byte (hence
143 	 * we have allocated 1 extra 0 byte). Since we cannot guarantee any
144 	 * specific data format for debug messages, We just present the data
145 	 * in the buffer as is - we expect the messages to be self explanatory.
146 	 */
147 	seq_puts(s, info->debug_buffer);
148 	return 0;
149 }
150 
151 /* Provide the log file operations interface*/
152 DEFINE_SHOW_ATTRIBUTE(ti_sci_debug);
153 
154 /**
155  * ti_sci_debugfs_create() - Create log debug file
156  * @pdev:	platform device pointer
157  * @info:	Pointer to SCI entity information
158  *
159  * Return: 0 if all went fine, else corresponding error.
160  */
161 static int ti_sci_debugfs_create(struct platform_device *pdev,
162 				 struct ti_sci_info *info)
163 {
164 	struct device *dev = &pdev->dev;
165 	struct resource *res;
166 	char debug_name[50] = "ti_sci_debug@";
167 
168 	/* Debug region is optional */
169 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
170 					   "debug_messages");
171 	info->debug_region = devm_ioremap_resource(dev, res);
172 	if (IS_ERR(info->debug_region))
173 		return 0;
174 	info->debug_region_size = resource_size(res);
175 
176 	info->debug_buffer = devm_kcalloc(dev, info->debug_region_size + 1,
177 					  sizeof(char), GFP_KERNEL);
178 	if (!info->debug_buffer)
179 		return -ENOMEM;
180 	/* Setup NULL termination */
181 	info->debug_buffer[info->debug_region_size] = 0;
182 
183 	info->d = debugfs_create_file(strncat(debug_name, dev_name(dev),
184 					      sizeof(debug_name) -
185 					      sizeof("ti_sci_debug@")),
186 				      0444, NULL, info, &ti_sci_debug_fops);
187 	if (IS_ERR(info->d))
188 		return PTR_ERR(info->d);
189 
190 	dev_dbg(dev, "Debug region => %p, size = %zu bytes, resource: %pr\n",
191 		info->debug_region, info->debug_region_size, res);
192 	return 0;
193 }
194 
195 /**
196  * ti_sci_debugfs_destroy() - clean up log debug file
197  * @pdev:	platform device pointer
198  * @info:	Pointer to SCI entity information
199  */
200 static void ti_sci_debugfs_destroy(struct platform_device *pdev,
201 				   struct ti_sci_info *info)
202 {
203 	if (IS_ERR(info->debug_region))
204 		return;
205 
206 	debugfs_remove(info->d);
207 }
208 #else /* CONFIG_DEBUG_FS */
209 static inline int ti_sci_debugfs_create(struct platform_device *dev,
210 					struct ti_sci_info *info)
211 {
212 	return 0;
213 }
214 
215 static inline void ti_sci_debugfs_destroy(struct platform_device *dev,
216 					  struct ti_sci_info *info)
217 {
218 }
219 #endif /* CONFIG_DEBUG_FS */
220 
221 /**
222  * ti_sci_dump_header_dbg() - Helper to dump a message header.
223  * @dev:	Device pointer corresponding to the SCI entity
224  * @hdr:	pointer to header.
225  */
226 static inline void ti_sci_dump_header_dbg(struct device *dev,
227 					  struct ti_sci_msg_hdr *hdr)
228 {
229 	dev_dbg(dev, "MSGHDR:type=0x%04x host=0x%02x seq=0x%02x flags=0x%08x\n",
230 		hdr->type, hdr->host, hdr->seq, hdr->flags);
231 }
232 
233 /**
234  * ti_sci_rx_callback() - mailbox client callback for receive messages
235  * @cl:	client pointer
236  * @m:	mailbox message
237  *
238  * Processes one received message to appropriate transfer information and
239  * signals completion of the transfer.
240  *
241  * NOTE: This function will be invoked in IRQ context, hence should be
242  * as optimal as possible.
243  */
244 static void ti_sci_rx_callback(struct mbox_client *cl, void *m)
245 {
246 	struct ti_sci_info *info = cl_to_ti_sci_info(cl);
247 	struct device *dev = info->dev;
248 	struct ti_sci_xfers_info *minfo = &info->minfo;
249 	struct ti_msgmgr_message *mbox_msg = m;
250 	struct ti_sci_msg_hdr *hdr = (struct ti_sci_msg_hdr *)mbox_msg->buf;
251 	struct ti_sci_xfer *xfer;
252 	u8 xfer_id;
253 
254 	xfer_id = hdr->seq;
255 
256 	/*
257 	 * Are we even expecting this?
258 	 * NOTE: barriers were implicit in locks used for modifying the bitmap
259 	 */
260 	if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
261 		dev_err(dev, "Message for %d is not expected!\n", xfer_id);
262 		return;
263 	}
264 
265 	xfer = &minfo->xfer_block[xfer_id];
266 
267 	/* Is the message of valid length? */
268 	if (mbox_msg->len > info->desc->max_msg_size) {
269 		dev_err(dev, "Unable to handle %zu xfer(max %d)\n",
270 			mbox_msg->len, info->desc->max_msg_size);
271 		ti_sci_dump_header_dbg(dev, hdr);
272 		return;
273 	}
274 	if (mbox_msg->len < xfer->rx_len) {
275 		dev_err(dev, "Recv xfer %zu < expected %d length\n",
276 			mbox_msg->len, xfer->rx_len);
277 		ti_sci_dump_header_dbg(dev, hdr);
278 		return;
279 	}
280 
281 	ti_sci_dump_header_dbg(dev, hdr);
282 	/* Take a copy to the rx buffer.. */
283 	memcpy(xfer->xfer_buf, mbox_msg->buf, xfer->rx_len);
284 	complete(&xfer->done);
285 }
286 
287 /**
288  * ti_sci_get_one_xfer() - Allocate one message
289  * @info:	Pointer to SCI entity information
290  * @msg_type:	Message type
291  * @msg_flags:	Flag to set for the message
292  * @tx_message_size: transmit message size
293  * @rx_message_size: receive message size
294  *
295  * Helper function which is used by various command functions that are
296  * exposed to clients of this driver for allocating a message traffic event.
297  *
298  * This function can sleep depending on pending requests already in the system
299  * for the SCI entity. Further, this also holds a spinlock to maintain integrity
300  * of internal data structures.
301  *
302  * Return: 0 if all went fine, else corresponding error.
303  */
304 static struct ti_sci_xfer *ti_sci_get_one_xfer(struct ti_sci_info *info,
305 					       u16 msg_type, u32 msg_flags,
306 					       size_t tx_message_size,
307 					       size_t rx_message_size)
308 {
309 	struct ti_sci_xfers_info *minfo = &info->minfo;
310 	struct ti_sci_xfer *xfer;
311 	struct ti_sci_msg_hdr *hdr;
312 	unsigned long flags;
313 	unsigned long bit_pos;
314 	u8 xfer_id;
315 	int ret;
316 	int timeout;
317 
318 	/* Ensure we have sane transfer sizes */
319 	if (rx_message_size > info->desc->max_msg_size ||
320 	    tx_message_size > info->desc->max_msg_size ||
321 	    rx_message_size < sizeof(*hdr) || tx_message_size < sizeof(*hdr))
322 		return ERR_PTR(-ERANGE);
323 
324 	/*
325 	 * Ensure we have only controlled number of pending messages.
326 	 * Ideally, we might just have to wait a single message, be
327 	 * conservative and wait 5 times that..
328 	 */
329 	timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms) * 5;
330 	ret = down_timeout(&minfo->sem_xfer_count, timeout);
331 	if (ret < 0)
332 		return ERR_PTR(ret);
333 
334 	/* Keep the locked section as small as possible */
335 	spin_lock_irqsave(&minfo->xfer_lock, flags);
336 	bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
337 				      info->desc->max_msgs);
338 	set_bit(bit_pos, minfo->xfer_alloc_table);
339 	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
340 
341 	/*
342 	 * We already ensured in probe that we can have max messages that can
343 	 * fit in  hdr.seq - NOTE: this improves access latencies
344 	 * to predictable O(1) access, BUT, it opens us to risk if
345 	 * remote misbehaves with corrupted message sequence responses.
346 	 * If that happens, we are going to be messed up anyways..
347 	 */
348 	xfer_id = (u8)bit_pos;
349 
350 	xfer = &minfo->xfer_block[xfer_id];
351 
352 	hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
353 	xfer->tx_message.len = tx_message_size;
354 	xfer->tx_message.chan_rx = info->chan_rx;
355 	xfer->tx_message.timeout_rx_ms = info->desc->max_rx_timeout_ms;
356 	xfer->rx_len = (u8)rx_message_size;
357 
358 	reinit_completion(&xfer->done);
359 
360 	hdr->seq = xfer_id;
361 	hdr->type = msg_type;
362 	hdr->host = info->host_id;
363 	hdr->flags = msg_flags;
364 
365 	return xfer;
366 }
367 
368 /**
369  * ti_sci_put_one_xfer() - Release a message
370  * @minfo:	transfer info pointer
371  * @xfer:	message that was reserved by ti_sci_get_one_xfer
372  *
373  * This holds a spinlock to maintain integrity of internal data structures.
374  */
375 static void ti_sci_put_one_xfer(struct ti_sci_xfers_info *minfo,
376 				struct ti_sci_xfer *xfer)
377 {
378 	unsigned long flags;
379 	struct ti_sci_msg_hdr *hdr;
380 	u8 xfer_id;
381 
382 	hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
383 	xfer_id = hdr->seq;
384 
385 	/*
386 	 * Keep the locked section as small as possible
387 	 * NOTE: we might escape with smp_mb and no lock here..
388 	 * but just be conservative and symmetric.
389 	 */
390 	spin_lock_irqsave(&minfo->xfer_lock, flags);
391 	clear_bit(xfer_id, minfo->xfer_alloc_table);
392 	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
393 
394 	/* Increment the count for the next user to get through */
395 	up(&minfo->sem_xfer_count);
396 }
397 
398 /**
399  * ti_sci_do_xfer() - Do one transfer
400  * @info:	Pointer to SCI entity information
401  * @xfer:	Transfer to initiate and wait for response
402  *
403  * Return: -ETIMEDOUT in case of no response, if transmit error,
404  *	   return corresponding error, else if all goes well,
405  *	   return 0.
406  */
407 static inline int ti_sci_do_xfer(struct ti_sci_info *info,
408 				 struct ti_sci_xfer *xfer)
409 {
410 	int ret;
411 	int timeout;
412 	struct device *dev = info->dev;
413 	bool done_state = true;
414 
415 	ret = mbox_send_message(info->chan_tx, &xfer->tx_message);
416 	if (ret < 0)
417 		return ret;
418 
419 	ret = 0;
420 
421 	if (!info->is_suspending) {
422 		/* And we wait for the response. */
423 		timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
424 		if (!wait_for_completion_timeout(&xfer->done, timeout))
425 			ret = -ETIMEDOUT;
426 	} else {
427 		/*
428 		 * If we are suspending, we cannot use wait_for_completion_timeout
429 		 * during noirq phase, so we must manually poll the completion.
430 		 */
431 		ret = read_poll_timeout_atomic(try_wait_for_completion, done_state,
432 					       done_state, 1,
433 					       info->desc->max_rx_timeout_ms * 1000,
434 					       false, &xfer->done);
435 	}
436 
437 	if (ret == -ETIMEDOUT)
438 		dev_err(dev, "Mbox timedout in resp(caller: %pS)\n",
439 			(void *)_RET_IP_);
440 
441 	/*
442 	 * NOTE: we might prefer not to need the mailbox ticker to manage the
443 	 * transfer queueing since the protocol layer queues things by itself.
444 	 * Unfortunately, we have to kick the mailbox framework after we have
445 	 * received our message.
446 	 */
447 	mbox_client_txdone(info->chan_tx, ret);
448 
449 	return ret;
450 }
451 
452 /**
453  * ti_sci_cmd_get_revision() - command to get the revision of the SCI entity
454  * @info:	Pointer to SCI entity information
455  *
456  * Updates the SCI information in the internal data structure.
457  *
458  * Return: 0 if all went fine, else return appropriate error.
459  */
460 static int ti_sci_cmd_get_revision(struct ti_sci_info *info)
461 {
462 	struct device *dev = info->dev;
463 	struct ti_sci_handle *handle = &info->handle;
464 	struct ti_sci_version_info *ver = &handle->version;
465 	struct ti_sci_msg_resp_version *rev_info;
466 	struct ti_sci_xfer *xfer;
467 	int ret;
468 
469 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_VERSION,
470 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
471 				   sizeof(struct ti_sci_msg_hdr),
472 				   sizeof(*rev_info));
473 	if (IS_ERR(xfer)) {
474 		ret = PTR_ERR(xfer);
475 		dev_err(dev, "Message alloc failed(%d)\n", ret);
476 		return ret;
477 	}
478 
479 	rev_info = (struct ti_sci_msg_resp_version *)xfer->xfer_buf;
480 
481 	ret = ti_sci_do_xfer(info, xfer);
482 	if (ret) {
483 		dev_err(dev, "Mbox send fail %d\n", ret);
484 		goto fail;
485 	}
486 
487 	ver->abi_major = rev_info->abi_major;
488 	ver->abi_minor = rev_info->abi_minor;
489 	ver->firmware_revision = rev_info->firmware_revision;
490 	strncpy(ver->firmware_description, rev_info->firmware_description,
491 		sizeof(ver->firmware_description));
492 
493 fail:
494 	ti_sci_put_one_xfer(&info->minfo, xfer);
495 	return ret;
496 }
497 
498 /**
499  * ti_sci_is_response_ack() - Generic ACK/NACK message checkup
500  * @r:	pointer to response buffer
501  *
502  * Return: true if the response was an ACK, else returns false.
503  */
504 static inline bool ti_sci_is_response_ack(void *r)
505 {
506 	struct ti_sci_msg_hdr *hdr = r;
507 
508 	return hdr->flags & TI_SCI_FLAG_RESP_GENERIC_ACK ? true : false;
509 }
510 
511 /**
512  * ti_sci_set_device_state() - Set device state helper
513  * @handle:	pointer to TI SCI handle
514  * @id:		Device identifier
515  * @flags:	flags to setup for the device
516  * @state:	State to move the device to
517  *
518  * Return: 0 if all went well, else returns appropriate error value.
519  */
520 static int ti_sci_set_device_state(const struct ti_sci_handle *handle,
521 				   u32 id, u32 flags, u8 state)
522 {
523 	struct ti_sci_info *info;
524 	struct ti_sci_msg_req_set_device_state *req;
525 	struct ti_sci_msg_hdr *resp;
526 	struct ti_sci_xfer *xfer;
527 	struct device *dev;
528 	int ret = 0;
529 
530 	if (IS_ERR(handle))
531 		return PTR_ERR(handle);
532 	if (!handle)
533 		return -EINVAL;
534 
535 	info = handle_to_ti_sci_info(handle);
536 	dev = info->dev;
537 
538 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE,
539 				   flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
540 				   sizeof(*req), sizeof(*resp));
541 	if (IS_ERR(xfer)) {
542 		ret = PTR_ERR(xfer);
543 		dev_err(dev, "Message alloc failed(%d)\n", ret);
544 		return ret;
545 	}
546 	req = (struct ti_sci_msg_req_set_device_state *)xfer->xfer_buf;
547 	req->id = id;
548 	req->state = state;
549 
550 	ret = ti_sci_do_xfer(info, xfer);
551 	if (ret) {
552 		dev_err(dev, "Mbox send fail %d\n", ret);
553 		goto fail;
554 	}
555 
556 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
557 
558 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
559 
560 fail:
561 	ti_sci_put_one_xfer(&info->minfo, xfer);
562 
563 	return ret;
564 }
565 
566 /**
567  * ti_sci_get_device_state() - Get device state helper
568  * @handle:	Handle to the device
569  * @id:		Device Identifier
570  * @clcnt:	Pointer to Context Loss Count
571  * @resets:	pointer to resets
572  * @p_state:	pointer to p_state
573  * @c_state:	pointer to c_state
574  *
575  * Return: 0 if all went fine, else return appropriate error.
576  */
577 static int ti_sci_get_device_state(const struct ti_sci_handle *handle,
578 				   u32 id,  u32 *clcnt,  u32 *resets,
579 				    u8 *p_state,  u8 *c_state)
580 {
581 	struct ti_sci_info *info;
582 	struct ti_sci_msg_req_get_device_state *req;
583 	struct ti_sci_msg_resp_get_device_state *resp;
584 	struct ti_sci_xfer *xfer;
585 	struct device *dev;
586 	int ret = 0;
587 
588 	if (IS_ERR(handle))
589 		return PTR_ERR(handle);
590 	if (!handle)
591 		return -EINVAL;
592 
593 	if (!clcnt && !resets && !p_state && !c_state)
594 		return -EINVAL;
595 
596 	info = handle_to_ti_sci_info(handle);
597 	dev = info->dev;
598 
599 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE,
600 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
601 				   sizeof(*req), sizeof(*resp));
602 	if (IS_ERR(xfer)) {
603 		ret = PTR_ERR(xfer);
604 		dev_err(dev, "Message alloc failed(%d)\n", ret);
605 		return ret;
606 	}
607 	req = (struct ti_sci_msg_req_get_device_state *)xfer->xfer_buf;
608 	req->id = id;
609 
610 	ret = ti_sci_do_xfer(info, xfer);
611 	if (ret) {
612 		dev_err(dev, "Mbox send fail %d\n", ret);
613 		goto fail;
614 	}
615 
616 	resp = (struct ti_sci_msg_resp_get_device_state *)xfer->xfer_buf;
617 	if (!ti_sci_is_response_ack(resp)) {
618 		ret = -ENODEV;
619 		goto fail;
620 	}
621 
622 	if (clcnt)
623 		*clcnt = resp->context_loss_count;
624 	if (resets)
625 		*resets = resp->resets;
626 	if (p_state)
627 		*p_state = resp->programmed_state;
628 	if (c_state)
629 		*c_state = resp->current_state;
630 fail:
631 	ti_sci_put_one_xfer(&info->minfo, xfer);
632 
633 	return ret;
634 }
635 
636 /**
637  * ti_sci_cmd_get_device() - command to request for device managed by TISCI
638  *			     that can be shared with other hosts.
639  * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
640  * @id:		Device Identifier
641  *
642  * Request for the device - NOTE: the client MUST maintain integrity of
643  * usage count by balancing get_device with put_device. No refcounting is
644  * managed by driver for that purpose.
645  *
646  * Return: 0 if all went fine, else return appropriate error.
647  */
648 static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id)
649 {
650 	return ti_sci_set_device_state(handle, id, 0,
651 				       MSG_DEVICE_SW_STATE_ON);
652 }
653 
654 /**
655  * ti_sci_cmd_get_device_exclusive() - command to request for device managed by
656  *				       TISCI that is exclusively owned by the
657  *				       requesting host.
658  * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
659  * @id:		Device Identifier
660  *
661  * Request for the device - NOTE: the client MUST maintain integrity of
662  * usage count by balancing get_device with put_device. No refcounting is
663  * managed by driver for that purpose.
664  *
665  * Return: 0 if all went fine, else return appropriate error.
666  */
667 static int ti_sci_cmd_get_device_exclusive(const struct ti_sci_handle *handle,
668 					   u32 id)
669 {
670 	return ti_sci_set_device_state(handle, id,
671 				       MSG_FLAG_DEVICE_EXCLUSIVE,
672 				       MSG_DEVICE_SW_STATE_ON);
673 }
674 
675 /**
676  * ti_sci_cmd_idle_device() - Command to idle a device managed by TISCI
677  * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
678  * @id:		Device Identifier
679  *
680  * Request for the device - NOTE: the client MUST maintain integrity of
681  * usage count by balancing get_device with put_device. No refcounting is
682  * managed by driver for that purpose.
683  *
684  * Return: 0 if all went fine, else return appropriate error.
685  */
686 static int ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id)
687 {
688 	return ti_sci_set_device_state(handle, id, 0,
689 				       MSG_DEVICE_SW_STATE_RETENTION);
690 }
691 
692 /**
693  * ti_sci_cmd_idle_device_exclusive() - Command to idle a device managed by
694  *					TISCI that is exclusively owned by
695  *					requesting host.
696  * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
697  * @id:		Device Identifier
698  *
699  * Request for the device - NOTE: the client MUST maintain integrity of
700  * usage count by balancing get_device with put_device. No refcounting is
701  * managed by driver for that purpose.
702  *
703  * Return: 0 if all went fine, else return appropriate error.
704  */
705 static int ti_sci_cmd_idle_device_exclusive(const struct ti_sci_handle *handle,
706 					    u32 id)
707 {
708 	return ti_sci_set_device_state(handle, id,
709 				       MSG_FLAG_DEVICE_EXCLUSIVE,
710 				       MSG_DEVICE_SW_STATE_RETENTION);
711 }
712 
713 /**
714  * ti_sci_cmd_put_device() - command to release a device managed by TISCI
715  * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
716  * @id:		Device Identifier
717  *
718  * Request for the device - NOTE: the client MUST maintain integrity of
719  * usage count by balancing get_device with put_device. No refcounting is
720  * managed by driver for that purpose.
721  *
722  * Return: 0 if all went fine, else return appropriate error.
723  */
724 static int ti_sci_cmd_put_device(const struct ti_sci_handle *handle, u32 id)
725 {
726 	return ti_sci_set_device_state(handle, id,
727 				       0, MSG_DEVICE_SW_STATE_AUTO_OFF);
728 }
729 
730 /**
731  * ti_sci_cmd_dev_is_valid() - Is the device valid
732  * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
733  * @id:		Device Identifier
734  *
735  * Return: 0 if all went fine and the device ID is valid, else return
736  * appropriate error.
737  */
738 static int ti_sci_cmd_dev_is_valid(const struct ti_sci_handle *handle, u32 id)
739 {
740 	u8 unused;
741 
742 	/* check the device state which will also tell us if the ID is valid */
743 	return ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &unused);
744 }
745 
746 /**
747  * ti_sci_cmd_dev_get_clcnt() - Get context loss counter
748  * @handle:	Pointer to TISCI handle
749  * @id:		Device Identifier
750  * @count:	Pointer to Context Loss counter to populate
751  *
752  * Return: 0 if all went fine, else return appropriate error.
753  */
754 static int ti_sci_cmd_dev_get_clcnt(const struct ti_sci_handle *handle, u32 id,
755 				    u32 *count)
756 {
757 	return ti_sci_get_device_state(handle, id, count, NULL, NULL, NULL);
758 }
759 
760 /**
761  * ti_sci_cmd_dev_is_idle() - Check if the device is requested to be idle
762  * @handle:	Pointer to TISCI handle
763  * @id:		Device Identifier
764  * @r_state:	true if requested to be idle
765  *
766  * Return: 0 if all went fine, else return appropriate error.
767  */
768 static int ti_sci_cmd_dev_is_idle(const struct ti_sci_handle *handle, u32 id,
769 				  bool *r_state)
770 {
771 	int ret;
772 	u8 state;
773 
774 	if (!r_state)
775 		return -EINVAL;
776 
777 	ret = ti_sci_get_device_state(handle, id, NULL, NULL, &state, NULL);
778 	if (ret)
779 		return ret;
780 
781 	*r_state = (state == MSG_DEVICE_SW_STATE_RETENTION);
782 
783 	return 0;
784 }
785 
786 /**
787  * ti_sci_cmd_dev_is_stop() - Check if the device is requested to be stopped
788  * @handle:	Pointer to TISCI handle
789  * @id:		Device Identifier
790  * @r_state:	true if requested to be stopped
791  * @curr_state:	true if currently stopped.
792  *
793  * Return: 0 if all went fine, else return appropriate error.
794  */
795 static int ti_sci_cmd_dev_is_stop(const struct ti_sci_handle *handle, u32 id,
796 				  bool *r_state,  bool *curr_state)
797 {
798 	int ret;
799 	u8 p_state, c_state;
800 
801 	if (!r_state && !curr_state)
802 		return -EINVAL;
803 
804 	ret =
805 	    ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
806 	if (ret)
807 		return ret;
808 
809 	if (r_state)
810 		*r_state = (p_state == MSG_DEVICE_SW_STATE_AUTO_OFF);
811 	if (curr_state)
812 		*curr_state = (c_state == MSG_DEVICE_HW_STATE_OFF);
813 
814 	return 0;
815 }
816 
817 /**
818  * ti_sci_cmd_dev_is_on() - Check if the device is requested to be ON
819  * @handle:	Pointer to TISCI handle
820  * @id:		Device Identifier
821  * @r_state:	true if requested to be ON
822  * @curr_state:	true if currently ON and active
823  *
824  * Return: 0 if all went fine, else return appropriate error.
825  */
826 static int ti_sci_cmd_dev_is_on(const struct ti_sci_handle *handle, u32 id,
827 				bool *r_state,  bool *curr_state)
828 {
829 	int ret;
830 	u8 p_state, c_state;
831 
832 	if (!r_state && !curr_state)
833 		return -EINVAL;
834 
835 	ret =
836 	    ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
837 	if (ret)
838 		return ret;
839 
840 	if (r_state)
841 		*r_state = (p_state == MSG_DEVICE_SW_STATE_ON);
842 	if (curr_state)
843 		*curr_state = (c_state == MSG_DEVICE_HW_STATE_ON);
844 
845 	return 0;
846 }
847 
848 /**
849  * ti_sci_cmd_dev_is_trans() - Check if the device is currently transitioning
850  * @handle:	Pointer to TISCI handle
851  * @id:		Device Identifier
852  * @curr_state:	true if currently transitioning.
853  *
854  * Return: 0 if all went fine, else return appropriate error.
855  */
856 static int ti_sci_cmd_dev_is_trans(const struct ti_sci_handle *handle, u32 id,
857 				   bool *curr_state)
858 {
859 	int ret;
860 	u8 state;
861 
862 	if (!curr_state)
863 		return -EINVAL;
864 
865 	ret = ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &state);
866 	if (ret)
867 		return ret;
868 
869 	*curr_state = (state == MSG_DEVICE_HW_STATE_TRANS);
870 
871 	return 0;
872 }
873 
874 /**
875  * ti_sci_cmd_set_device_resets() - command to set resets for device managed
876  *				    by TISCI
877  * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
878  * @id:		Device Identifier
879  * @reset_state: Device specific reset bit field
880  *
881  * Return: 0 if all went fine, else return appropriate error.
882  */
883 static int ti_sci_cmd_set_device_resets(const struct ti_sci_handle *handle,
884 					u32 id, u32 reset_state)
885 {
886 	struct ti_sci_info *info;
887 	struct ti_sci_msg_req_set_device_resets *req;
888 	struct ti_sci_msg_hdr *resp;
889 	struct ti_sci_xfer *xfer;
890 	struct device *dev;
891 	int ret = 0;
892 
893 	if (IS_ERR(handle))
894 		return PTR_ERR(handle);
895 	if (!handle)
896 		return -EINVAL;
897 
898 	info = handle_to_ti_sci_info(handle);
899 	dev = info->dev;
900 
901 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_RESETS,
902 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
903 				   sizeof(*req), sizeof(*resp));
904 	if (IS_ERR(xfer)) {
905 		ret = PTR_ERR(xfer);
906 		dev_err(dev, "Message alloc failed(%d)\n", ret);
907 		return ret;
908 	}
909 	req = (struct ti_sci_msg_req_set_device_resets *)xfer->xfer_buf;
910 	req->id = id;
911 	req->resets = reset_state;
912 
913 	ret = ti_sci_do_xfer(info, xfer);
914 	if (ret) {
915 		dev_err(dev, "Mbox send fail %d\n", ret);
916 		goto fail;
917 	}
918 
919 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
920 
921 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
922 
923 fail:
924 	ti_sci_put_one_xfer(&info->minfo, xfer);
925 
926 	return ret;
927 }
928 
929 /**
930  * ti_sci_cmd_get_device_resets() - Get reset state for device managed
931  *				    by TISCI
932  * @handle:		Pointer to TISCI handle
933  * @id:			Device Identifier
934  * @reset_state:	Pointer to reset state to populate
935  *
936  * Return: 0 if all went fine, else return appropriate error.
937  */
938 static int ti_sci_cmd_get_device_resets(const struct ti_sci_handle *handle,
939 					u32 id, u32 *reset_state)
940 {
941 	return ti_sci_get_device_state(handle, id, NULL, reset_state, NULL,
942 				       NULL);
943 }
944 
945 /**
946  * ti_sci_set_clock_state() - Set clock state helper
947  * @handle:	pointer to TI SCI handle
948  * @dev_id:	Device identifier this request is for
949  * @clk_id:	Clock identifier for the device for this request.
950  *		Each device has it's own set of clock inputs. This indexes
951  *		which clock input to modify.
952  * @flags:	Header flags as needed
953  * @state:	State to request for the clock.
954  *
955  * Return: 0 if all went well, else returns appropriate error value.
956  */
957 static int ti_sci_set_clock_state(const struct ti_sci_handle *handle,
958 				  u32 dev_id, u32 clk_id,
959 				  u32 flags, u8 state)
960 {
961 	struct ti_sci_info *info;
962 	struct ti_sci_msg_req_set_clock_state *req;
963 	struct ti_sci_msg_hdr *resp;
964 	struct ti_sci_xfer *xfer;
965 	struct device *dev;
966 	int ret = 0;
967 
968 	if (IS_ERR(handle))
969 		return PTR_ERR(handle);
970 	if (!handle)
971 		return -EINVAL;
972 
973 	info = handle_to_ti_sci_info(handle);
974 	dev = info->dev;
975 
976 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_STATE,
977 				   flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
978 				   sizeof(*req), sizeof(*resp));
979 	if (IS_ERR(xfer)) {
980 		ret = PTR_ERR(xfer);
981 		dev_err(dev, "Message alloc failed(%d)\n", ret);
982 		return ret;
983 	}
984 	req = (struct ti_sci_msg_req_set_clock_state *)xfer->xfer_buf;
985 	req->dev_id = dev_id;
986 	if (clk_id < 255) {
987 		req->clk_id = clk_id;
988 	} else {
989 		req->clk_id = 255;
990 		req->clk_id_32 = clk_id;
991 	}
992 	req->request_state = state;
993 
994 	ret = ti_sci_do_xfer(info, xfer);
995 	if (ret) {
996 		dev_err(dev, "Mbox send fail %d\n", ret);
997 		goto fail;
998 	}
999 
1000 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
1001 
1002 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
1003 
1004 fail:
1005 	ti_sci_put_one_xfer(&info->minfo, xfer);
1006 
1007 	return ret;
1008 }
1009 
1010 /**
1011  * ti_sci_cmd_get_clock_state() - Get clock state helper
1012  * @handle:	pointer to TI SCI handle
1013  * @dev_id:	Device identifier this request is for
1014  * @clk_id:	Clock identifier for the device for this request.
1015  *		Each device has it's own set of clock inputs. This indexes
1016  *		which clock input to modify.
1017  * @programmed_state:	State requested for clock to move to
1018  * @current_state:	State that the clock is currently in
1019  *
1020  * Return: 0 if all went well, else returns appropriate error value.
1021  */
1022 static int ti_sci_cmd_get_clock_state(const struct ti_sci_handle *handle,
1023 				      u32 dev_id, u32 clk_id,
1024 				      u8 *programmed_state, u8 *current_state)
1025 {
1026 	struct ti_sci_info *info;
1027 	struct ti_sci_msg_req_get_clock_state *req;
1028 	struct ti_sci_msg_resp_get_clock_state *resp;
1029 	struct ti_sci_xfer *xfer;
1030 	struct device *dev;
1031 	int ret = 0;
1032 
1033 	if (IS_ERR(handle))
1034 		return PTR_ERR(handle);
1035 	if (!handle)
1036 		return -EINVAL;
1037 
1038 	if (!programmed_state && !current_state)
1039 		return -EINVAL;
1040 
1041 	info = handle_to_ti_sci_info(handle);
1042 	dev = info->dev;
1043 
1044 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_STATE,
1045 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1046 				   sizeof(*req), sizeof(*resp));
1047 	if (IS_ERR(xfer)) {
1048 		ret = PTR_ERR(xfer);
1049 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1050 		return ret;
1051 	}
1052 	req = (struct ti_sci_msg_req_get_clock_state *)xfer->xfer_buf;
1053 	req->dev_id = dev_id;
1054 	if (clk_id < 255) {
1055 		req->clk_id = clk_id;
1056 	} else {
1057 		req->clk_id = 255;
1058 		req->clk_id_32 = clk_id;
1059 	}
1060 
1061 	ret = ti_sci_do_xfer(info, xfer);
1062 	if (ret) {
1063 		dev_err(dev, "Mbox send fail %d\n", ret);
1064 		goto fail;
1065 	}
1066 
1067 	resp = (struct ti_sci_msg_resp_get_clock_state *)xfer->xfer_buf;
1068 
1069 	if (!ti_sci_is_response_ack(resp)) {
1070 		ret = -ENODEV;
1071 		goto fail;
1072 	}
1073 
1074 	if (programmed_state)
1075 		*programmed_state = resp->programmed_state;
1076 	if (current_state)
1077 		*current_state = resp->current_state;
1078 
1079 fail:
1080 	ti_sci_put_one_xfer(&info->minfo, xfer);
1081 
1082 	return ret;
1083 }
1084 
1085 /**
1086  * ti_sci_cmd_get_clock() - Get control of a clock from TI SCI
1087  * @handle:	pointer to TI SCI handle
1088  * @dev_id:	Device identifier this request is for
1089  * @clk_id:	Clock identifier for the device for this request.
1090  *		Each device has it's own set of clock inputs. This indexes
1091  *		which clock input to modify.
1092  * @needs_ssc: 'true' if Spread Spectrum clock is desired, else 'false'
1093  * @can_change_freq: 'true' if frequency change is desired, else 'false'
1094  * @enable_input_term: 'true' if input termination is desired, else 'false'
1095  *
1096  * Return: 0 if all went well, else returns appropriate error value.
1097  */
1098 static int ti_sci_cmd_get_clock(const struct ti_sci_handle *handle, u32 dev_id,
1099 				u32 clk_id, bool needs_ssc,
1100 				bool can_change_freq, bool enable_input_term)
1101 {
1102 	u32 flags = 0;
1103 
1104 	flags |= needs_ssc ? MSG_FLAG_CLOCK_ALLOW_SSC : 0;
1105 	flags |= can_change_freq ? MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE : 0;
1106 	flags |= enable_input_term ? MSG_FLAG_CLOCK_INPUT_TERM : 0;
1107 
1108 	return ti_sci_set_clock_state(handle, dev_id, clk_id, flags,
1109 				      MSG_CLOCK_SW_STATE_REQ);
1110 }
1111 
1112 /**
1113  * ti_sci_cmd_idle_clock() - Idle a clock which is in our control
1114  * @handle:	pointer to TI SCI handle
1115  * @dev_id:	Device identifier this request is for
1116  * @clk_id:	Clock identifier for the device for this request.
1117  *		Each device has it's own set of clock inputs. This indexes
1118  *		which clock input to modify.
1119  *
1120  * NOTE: This clock must have been requested by get_clock previously.
1121  *
1122  * Return: 0 if all went well, else returns appropriate error value.
1123  */
1124 static int ti_sci_cmd_idle_clock(const struct ti_sci_handle *handle,
1125 				 u32 dev_id, u32 clk_id)
1126 {
1127 	return ti_sci_set_clock_state(handle, dev_id, clk_id,
1128 				      MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE,
1129 				      MSG_CLOCK_SW_STATE_UNREQ);
1130 }
1131 
1132 /**
1133  * ti_sci_cmd_put_clock() - Release a clock from our control back to TISCI
1134  * @handle:	pointer to TI SCI handle
1135  * @dev_id:	Device identifier this request is for
1136  * @clk_id:	Clock identifier for the device for this request.
1137  *		Each device has it's own set of clock inputs. This indexes
1138  *		which clock input to modify.
1139  *
1140  * NOTE: This clock must have been requested by get_clock previously.
1141  *
1142  * Return: 0 if all went well, else returns appropriate error value.
1143  */
1144 static int ti_sci_cmd_put_clock(const struct ti_sci_handle *handle,
1145 				u32 dev_id, u32 clk_id)
1146 {
1147 	return ti_sci_set_clock_state(handle, dev_id, clk_id,
1148 				      MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE,
1149 				      MSG_CLOCK_SW_STATE_AUTO);
1150 }
1151 
1152 /**
1153  * ti_sci_cmd_clk_is_auto() - Is the clock being auto managed
1154  * @handle:	pointer to TI SCI handle
1155  * @dev_id:	Device identifier this request is for
1156  * @clk_id:	Clock identifier for the device for this request.
1157  *		Each device has it's own set of clock inputs. This indexes
1158  *		which clock input to modify.
1159  * @req_state: state indicating if the clock is auto managed
1160  *
1161  * Return: 0 if all went well, else returns appropriate error value.
1162  */
1163 static int ti_sci_cmd_clk_is_auto(const struct ti_sci_handle *handle,
1164 				  u32 dev_id, u32 clk_id, bool *req_state)
1165 {
1166 	u8 state = 0;
1167 	int ret;
1168 
1169 	if (!req_state)
1170 		return -EINVAL;
1171 
1172 	ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, &state, NULL);
1173 	if (ret)
1174 		return ret;
1175 
1176 	*req_state = (state == MSG_CLOCK_SW_STATE_AUTO);
1177 	return 0;
1178 }
1179 
1180 /**
1181  * ti_sci_cmd_clk_is_on() - Is the clock ON
1182  * @handle:	pointer to TI SCI handle
1183  * @dev_id:	Device identifier this request is for
1184  * @clk_id:	Clock identifier for the device for this request.
1185  *		Each device has it's own set of clock inputs. This indexes
1186  *		which clock input to modify.
1187  * @req_state: state indicating if the clock is managed by us and enabled
1188  * @curr_state: state indicating if the clock is ready for operation
1189  *
1190  * Return: 0 if all went well, else returns appropriate error value.
1191  */
1192 static int ti_sci_cmd_clk_is_on(const struct ti_sci_handle *handle, u32 dev_id,
1193 				u32 clk_id, bool *req_state, bool *curr_state)
1194 {
1195 	u8 c_state = 0, r_state = 0;
1196 	int ret;
1197 
1198 	if (!req_state && !curr_state)
1199 		return -EINVAL;
1200 
1201 	ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
1202 					 &r_state, &c_state);
1203 	if (ret)
1204 		return ret;
1205 
1206 	if (req_state)
1207 		*req_state = (r_state == MSG_CLOCK_SW_STATE_REQ);
1208 	if (curr_state)
1209 		*curr_state = (c_state == MSG_CLOCK_HW_STATE_READY);
1210 	return 0;
1211 }
1212 
1213 /**
1214  * ti_sci_cmd_clk_is_off() - Is the clock OFF
1215  * @handle:	pointer to TI SCI handle
1216  * @dev_id:	Device identifier this request is for
1217  * @clk_id:	Clock identifier for the device for this request.
1218  *		Each device has it's own set of clock inputs. This indexes
1219  *		which clock input to modify.
1220  * @req_state: state indicating if the clock is managed by us and disabled
1221  * @curr_state: state indicating if the clock is NOT ready for operation
1222  *
1223  * Return: 0 if all went well, else returns appropriate error value.
1224  */
1225 static int ti_sci_cmd_clk_is_off(const struct ti_sci_handle *handle, u32 dev_id,
1226 				 u32 clk_id, bool *req_state, bool *curr_state)
1227 {
1228 	u8 c_state = 0, r_state = 0;
1229 	int ret;
1230 
1231 	if (!req_state && !curr_state)
1232 		return -EINVAL;
1233 
1234 	ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
1235 					 &r_state, &c_state);
1236 	if (ret)
1237 		return ret;
1238 
1239 	if (req_state)
1240 		*req_state = (r_state == MSG_CLOCK_SW_STATE_UNREQ);
1241 	if (curr_state)
1242 		*curr_state = (c_state == MSG_CLOCK_HW_STATE_NOT_READY);
1243 	return 0;
1244 }
1245 
1246 /**
1247  * ti_sci_cmd_clk_set_parent() - Set the clock source of a specific device clock
1248  * @handle:	pointer to TI SCI handle
1249  * @dev_id:	Device identifier this request is for
1250  * @clk_id:	Clock identifier for the device for this request.
1251  *		Each device has it's own set of clock inputs. This indexes
1252  *		which clock input to modify.
1253  * @parent_id:	Parent clock identifier to set
1254  *
1255  * Return: 0 if all went well, else returns appropriate error value.
1256  */
1257 static int ti_sci_cmd_clk_set_parent(const struct ti_sci_handle *handle,
1258 				     u32 dev_id, u32 clk_id, u32 parent_id)
1259 {
1260 	struct ti_sci_info *info;
1261 	struct ti_sci_msg_req_set_clock_parent *req;
1262 	struct ti_sci_msg_hdr *resp;
1263 	struct ti_sci_xfer *xfer;
1264 	struct device *dev;
1265 	int ret = 0;
1266 
1267 	if (IS_ERR(handle))
1268 		return PTR_ERR(handle);
1269 	if (!handle)
1270 		return -EINVAL;
1271 
1272 	info = handle_to_ti_sci_info(handle);
1273 	dev = info->dev;
1274 
1275 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_PARENT,
1276 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1277 				   sizeof(*req), sizeof(*resp));
1278 	if (IS_ERR(xfer)) {
1279 		ret = PTR_ERR(xfer);
1280 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1281 		return ret;
1282 	}
1283 	req = (struct ti_sci_msg_req_set_clock_parent *)xfer->xfer_buf;
1284 	req->dev_id = dev_id;
1285 	if (clk_id < 255) {
1286 		req->clk_id = clk_id;
1287 	} else {
1288 		req->clk_id = 255;
1289 		req->clk_id_32 = clk_id;
1290 	}
1291 	if (parent_id < 255) {
1292 		req->parent_id = parent_id;
1293 	} else {
1294 		req->parent_id = 255;
1295 		req->parent_id_32 = parent_id;
1296 	}
1297 
1298 	ret = ti_sci_do_xfer(info, xfer);
1299 	if (ret) {
1300 		dev_err(dev, "Mbox send fail %d\n", ret);
1301 		goto fail;
1302 	}
1303 
1304 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
1305 
1306 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
1307 
1308 fail:
1309 	ti_sci_put_one_xfer(&info->minfo, xfer);
1310 
1311 	return ret;
1312 }
1313 
1314 /**
1315  * ti_sci_cmd_clk_get_parent() - Get current parent clock source
1316  * @handle:	pointer to TI SCI handle
1317  * @dev_id:	Device identifier this request is for
1318  * @clk_id:	Clock identifier for the device for this request.
1319  *		Each device has it's own set of clock inputs. This indexes
1320  *		which clock input to modify.
1321  * @parent_id:	Current clock parent
1322  *
1323  * Return: 0 if all went well, else returns appropriate error value.
1324  */
1325 static int ti_sci_cmd_clk_get_parent(const struct ti_sci_handle *handle,
1326 				     u32 dev_id, u32 clk_id, u32 *parent_id)
1327 {
1328 	struct ti_sci_info *info;
1329 	struct ti_sci_msg_req_get_clock_parent *req;
1330 	struct ti_sci_msg_resp_get_clock_parent *resp;
1331 	struct ti_sci_xfer *xfer;
1332 	struct device *dev;
1333 	int ret = 0;
1334 
1335 	if (IS_ERR(handle))
1336 		return PTR_ERR(handle);
1337 	if (!handle || !parent_id)
1338 		return -EINVAL;
1339 
1340 	info = handle_to_ti_sci_info(handle);
1341 	dev = info->dev;
1342 
1343 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_PARENT,
1344 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1345 				   sizeof(*req), sizeof(*resp));
1346 	if (IS_ERR(xfer)) {
1347 		ret = PTR_ERR(xfer);
1348 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1349 		return ret;
1350 	}
1351 	req = (struct ti_sci_msg_req_get_clock_parent *)xfer->xfer_buf;
1352 	req->dev_id = dev_id;
1353 	if (clk_id < 255) {
1354 		req->clk_id = clk_id;
1355 	} else {
1356 		req->clk_id = 255;
1357 		req->clk_id_32 = clk_id;
1358 	}
1359 
1360 	ret = ti_sci_do_xfer(info, xfer);
1361 	if (ret) {
1362 		dev_err(dev, "Mbox send fail %d\n", ret);
1363 		goto fail;
1364 	}
1365 
1366 	resp = (struct ti_sci_msg_resp_get_clock_parent *)xfer->xfer_buf;
1367 
1368 	if (!ti_sci_is_response_ack(resp)) {
1369 		ret = -ENODEV;
1370 	} else {
1371 		if (resp->parent_id < 255)
1372 			*parent_id = resp->parent_id;
1373 		else
1374 			*parent_id = resp->parent_id_32;
1375 	}
1376 
1377 fail:
1378 	ti_sci_put_one_xfer(&info->minfo, xfer);
1379 
1380 	return ret;
1381 }
1382 
1383 /**
1384  * ti_sci_cmd_clk_get_num_parents() - Get num parents of the current clk source
1385  * @handle:	pointer to TI SCI handle
1386  * @dev_id:	Device identifier this request is for
1387  * @clk_id:	Clock identifier for the device for this request.
1388  *		Each device has it's own set of clock inputs. This indexes
1389  *		which clock input to modify.
1390  * @num_parents: Returns he number of parents to the current clock.
1391  *
1392  * Return: 0 if all went well, else returns appropriate error value.
1393  */
1394 static int ti_sci_cmd_clk_get_num_parents(const struct ti_sci_handle *handle,
1395 					  u32 dev_id, u32 clk_id,
1396 					  u32 *num_parents)
1397 {
1398 	struct ti_sci_info *info;
1399 	struct ti_sci_msg_req_get_clock_num_parents *req;
1400 	struct ti_sci_msg_resp_get_clock_num_parents *resp;
1401 	struct ti_sci_xfer *xfer;
1402 	struct device *dev;
1403 	int ret = 0;
1404 
1405 	if (IS_ERR(handle))
1406 		return PTR_ERR(handle);
1407 	if (!handle || !num_parents)
1408 		return -EINVAL;
1409 
1410 	info = handle_to_ti_sci_info(handle);
1411 	dev = info->dev;
1412 
1413 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_NUM_CLOCK_PARENTS,
1414 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1415 				   sizeof(*req), sizeof(*resp));
1416 	if (IS_ERR(xfer)) {
1417 		ret = PTR_ERR(xfer);
1418 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1419 		return ret;
1420 	}
1421 	req = (struct ti_sci_msg_req_get_clock_num_parents *)xfer->xfer_buf;
1422 	req->dev_id = dev_id;
1423 	if (clk_id < 255) {
1424 		req->clk_id = clk_id;
1425 	} else {
1426 		req->clk_id = 255;
1427 		req->clk_id_32 = clk_id;
1428 	}
1429 
1430 	ret = ti_sci_do_xfer(info, xfer);
1431 	if (ret) {
1432 		dev_err(dev, "Mbox send fail %d\n", ret);
1433 		goto fail;
1434 	}
1435 
1436 	resp = (struct ti_sci_msg_resp_get_clock_num_parents *)xfer->xfer_buf;
1437 
1438 	if (!ti_sci_is_response_ack(resp)) {
1439 		ret = -ENODEV;
1440 	} else {
1441 		if (resp->num_parents < 255)
1442 			*num_parents = resp->num_parents;
1443 		else
1444 			*num_parents = resp->num_parents_32;
1445 	}
1446 
1447 fail:
1448 	ti_sci_put_one_xfer(&info->minfo, xfer);
1449 
1450 	return ret;
1451 }
1452 
1453 /**
1454  * ti_sci_cmd_clk_get_match_freq() - Find a good match for frequency
1455  * @handle:	pointer to TI SCI handle
1456  * @dev_id:	Device identifier this request is for
1457  * @clk_id:	Clock identifier for the device for this request.
1458  *		Each device has it's own set of clock inputs. This indexes
1459  *		which clock input to modify.
1460  * @min_freq:	The minimum allowable frequency in Hz. This is the minimum
1461  *		allowable programmed frequency and does not account for clock
1462  *		tolerances and jitter.
1463  * @target_freq: The target clock frequency in Hz. A frequency will be
1464  *		processed as close to this target frequency as possible.
1465  * @max_freq:	The maximum allowable frequency in Hz. This is the maximum
1466  *		allowable programmed frequency and does not account for clock
1467  *		tolerances and jitter.
1468  * @match_freq:	Frequency match in Hz response.
1469  *
1470  * Return: 0 if all went well, else returns appropriate error value.
1471  */
1472 static int ti_sci_cmd_clk_get_match_freq(const struct ti_sci_handle *handle,
1473 					 u32 dev_id, u32 clk_id, u64 min_freq,
1474 					 u64 target_freq, u64 max_freq,
1475 					 u64 *match_freq)
1476 {
1477 	struct ti_sci_info *info;
1478 	struct ti_sci_msg_req_query_clock_freq *req;
1479 	struct ti_sci_msg_resp_query_clock_freq *resp;
1480 	struct ti_sci_xfer *xfer;
1481 	struct device *dev;
1482 	int ret = 0;
1483 
1484 	if (IS_ERR(handle))
1485 		return PTR_ERR(handle);
1486 	if (!handle || !match_freq)
1487 		return -EINVAL;
1488 
1489 	info = handle_to_ti_sci_info(handle);
1490 	dev = info->dev;
1491 
1492 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_QUERY_CLOCK_FREQ,
1493 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1494 				   sizeof(*req), sizeof(*resp));
1495 	if (IS_ERR(xfer)) {
1496 		ret = PTR_ERR(xfer);
1497 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1498 		return ret;
1499 	}
1500 	req = (struct ti_sci_msg_req_query_clock_freq *)xfer->xfer_buf;
1501 	req->dev_id = dev_id;
1502 	if (clk_id < 255) {
1503 		req->clk_id = clk_id;
1504 	} else {
1505 		req->clk_id = 255;
1506 		req->clk_id_32 = clk_id;
1507 	}
1508 	req->min_freq_hz = min_freq;
1509 	req->target_freq_hz = target_freq;
1510 	req->max_freq_hz = max_freq;
1511 
1512 	ret = ti_sci_do_xfer(info, xfer);
1513 	if (ret) {
1514 		dev_err(dev, "Mbox send fail %d\n", ret);
1515 		goto fail;
1516 	}
1517 
1518 	resp = (struct ti_sci_msg_resp_query_clock_freq *)xfer->xfer_buf;
1519 
1520 	if (!ti_sci_is_response_ack(resp))
1521 		ret = -ENODEV;
1522 	else
1523 		*match_freq = resp->freq_hz;
1524 
1525 fail:
1526 	ti_sci_put_one_xfer(&info->minfo, xfer);
1527 
1528 	return ret;
1529 }
1530 
1531 /**
1532  * ti_sci_cmd_clk_set_freq() - Set a frequency for clock
1533  * @handle:	pointer to TI SCI handle
1534  * @dev_id:	Device identifier this request is for
1535  * @clk_id:	Clock identifier for the device for this request.
1536  *		Each device has it's own set of clock inputs. This indexes
1537  *		which clock input to modify.
1538  * @min_freq:	The minimum allowable frequency in Hz. This is the minimum
1539  *		allowable programmed frequency and does not account for clock
1540  *		tolerances and jitter.
1541  * @target_freq: The target clock frequency in Hz. A frequency will be
1542  *		processed as close to this target frequency as possible.
1543  * @max_freq:	The maximum allowable frequency in Hz. This is the maximum
1544  *		allowable programmed frequency and does not account for clock
1545  *		tolerances and jitter.
1546  *
1547  * Return: 0 if all went well, else returns appropriate error value.
1548  */
1549 static int ti_sci_cmd_clk_set_freq(const struct ti_sci_handle *handle,
1550 				   u32 dev_id, u32 clk_id, u64 min_freq,
1551 				   u64 target_freq, u64 max_freq)
1552 {
1553 	struct ti_sci_info *info;
1554 	struct ti_sci_msg_req_set_clock_freq *req;
1555 	struct ti_sci_msg_hdr *resp;
1556 	struct ti_sci_xfer *xfer;
1557 	struct device *dev;
1558 	int ret = 0;
1559 
1560 	if (IS_ERR(handle))
1561 		return PTR_ERR(handle);
1562 	if (!handle)
1563 		return -EINVAL;
1564 
1565 	info = handle_to_ti_sci_info(handle);
1566 	dev = info->dev;
1567 
1568 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_FREQ,
1569 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1570 				   sizeof(*req), sizeof(*resp));
1571 	if (IS_ERR(xfer)) {
1572 		ret = PTR_ERR(xfer);
1573 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1574 		return ret;
1575 	}
1576 	req = (struct ti_sci_msg_req_set_clock_freq *)xfer->xfer_buf;
1577 	req->dev_id = dev_id;
1578 	if (clk_id < 255) {
1579 		req->clk_id = clk_id;
1580 	} else {
1581 		req->clk_id = 255;
1582 		req->clk_id_32 = clk_id;
1583 	}
1584 	req->min_freq_hz = min_freq;
1585 	req->target_freq_hz = target_freq;
1586 	req->max_freq_hz = max_freq;
1587 
1588 	ret = ti_sci_do_xfer(info, xfer);
1589 	if (ret) {
1590 		dev_err(dev, "Mbox send fail %d\n", ret);
1591 		goto fail;
1592 	}
1593 
1594 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
1595 
1596 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
1597 
1598 fail:
1599 	ti_sci_put_one_xfer(&info->minfo, xfer);
1600 
1601 	return ret;
1602 }
1603 
1604 /**
1605  * ti_sci_cmd_clk_get_freq() - Get current frequency
1606  * @handle:	pointer to TI SCI handle
1607  * @dev_id:	Device identifier this request is for
1608  * @clk_id:	Clock identifier for the device for this request.
1609  *		Each device has it's own set of clock inputs. This indexes
1610  *		which clock input to modify.
1611  * @freq:	Currently frequency in Hz
1612  *
1613  * Return: 0 if all went well, else returns appropriate error value.
1614  */
1615 static int ti_sci_cmd_clk_get_freq(const struct ti_sci_handle *handle,
1616 				   u32 dev_id, u32 clk_id, u64 *freq)
1617 {
1618 	struct ti_sci_info *info;
1619 	struct ti_sci_msg_req_get_clock_freq *req;
1620 	struct ti_sci_msg_resp_get_clock_freq *resp;
1621 	struct ti_sci_xfer *xfer;
1622 	struct device *dev;
1623 	int ret = 0;
1624 
1625 	if (IS_ERR(handle))
1626 		return PTR_ERR(handle);
1627 	if (!handle || !freq)
1628 		return -EINVAL;
1629 
1630 	info = handle_to_ti_sci_info(handle);
1631 	dev = info->dev;
1632 
1633 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_FREQ,
1634 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1635 				   sizeof(*req), sizeof(*resp));
1636 	if (IS_ERR(xfer)) {
1637 		ret = PTR_ERR(xfer);
1638 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1639 		return ret;
1640 	}
1641 	req = (struct ti_sci_msg_req_get_clock_freq *)xfer->xfer_buf;
1642 	req->dev_id = dev_id;
1643 	if (clk_id < 255) {
1644 		req->clk_id = clk_id;
1645 	} else {
1646 		req->clk_id = 255;
1647 		req->clk_id_32 = clk_id;
1648 	}
1649 
1650 	ret = ti_sci_do_xfer(info, xfer);
1651 	if (ret) {
1652 		dev_err(dev, "Mbox send fail %d\n", ret);
1653 		goto fail;
1654 	}
1655 
1656 	resp = (struct ti_sci_msg_resp_get_clock_freq *)xfer->xfer_buf;
1657 
1658 	if (!ti_sci_is_response_ack(resp))
1659 		ret = -ENODEV;
1660 	else
1661 		*freq = resp->freq_hz;
1662 
1663 fail:
1664 	ti_sci_put_one_xfer(&info->minfo, xfer);
1665 
1666 	return ret;
1667 }
1668 
1669 static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle)
1670 {
1671 	struct ti_sci_info *info;
1672 	struct ti_sci_msg_req_reboot *req;
1673 	struct ti_sci_msg_hdr *resp;
1674 	struct ti_sci_xfer *xfer;
1675 	struct device *dev;
1676 	int ret = 0;
1677 
1678 	if (IS_ERR(handle))
1679 		return PTR_ERR(handle);
1680 	if (!handle)
1681 		return -EINVAL;
1682 
1683 	info = handle_to_ti_sci_info(handle);
1684 	dev = info->dev;
1685 
1686 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SYS_RESET,
1687 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1688 				   sizeof(*req), sizeof(*resp));
1689 	if (IS_ERR(xfer)) {
1690 		ret = PTR_ERR(xfer);
1691 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1692 		return ret;
1693 	}
1694 	req = (struct ti_sci_msg_req_reboot *)xfer->xfer_buf;
1695 
1696 	ret = ti_sci_do_xfer(info, xfer);
1697 	if (ret) {
1698 		dev_err(dev, "Mbox send fail %d\n", ret);
1699 		goto fail;
1700 	}
1701 
1702 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
1703 
1704 	if (!ti_sci_is_response_ack(resp))
1705 		ret = -ENODEV;
1706 	else
1707 		ret = 0;
1708 
1709 fail:
1710 	ti_sci_put_one_xfer(&info->minfo, xfer);
1711 
1712 	return ret;
1713 }
1714 
1715 /**
1716  * ti_sci_get_resource_range - Helper to get a range of resources assigned
1717  *			       to a host. Resource is uniquely identified by
1718  *			       type and subtype.
1719  * @handle:		Pointer to TISCI handle.
1720  * @dev_id:		TISCI device ID.
1721  * @subtype:		Resource assignment subtype that is being requested
1722  *			from the given device.
1723  * @s_host:		Host processor ID to which the resources are allocated
1724  * @desc:		Pointer to ti_sci_resource_desc to be updated with the
1725  *			resource range start index and number of resources
1726  *
1727  * Return: 0 if all went fine, else return appropriate error.
1728  */
1729 static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
1730 				     u32 dev_id, u8 subtype, u8 s_host,
1731 				     struct ti_sci_resource_desc *desc)
1732 {
1733 	struct ti_sci_msg_resp_get_resource_range *resp;
1734 	struct ti_sci_msg_req_get_resource_range *req;
1735 	struct ti_sci_xfer *xfer;
1736 	struct ti_sci_info *info;
1737 	struct device *dev;
1738 	int ret = 0;
1739 
1740 	if (IS_ERR(handle))
1741 		return PTR_ERR(handle);
1742 	if (!handle || !desc)
1743 		return -EINVAL;
1744 
1745 	info = handle_to_ti_sci_info(handle);
1746 	dev = info->dev;
1747 
1748 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_RESOURCE_RANGE,
1749 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1750 				   sizeof(*req), sizeof(*resp));
1751 	if (IS_ERR(xfer)) {
1752 		ret = PTR_ERR(xfer);
1753 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1754 		return ret;
1755 	}
1756 
1757 	req = (struct ti_sci_msg_req_get_resource_range *)xfer->xfer_buf;
1758 	req->secondary_host = s_host;
1759 	req->type = dev_id & MSG_RM_RESOURCE_TYPE_MASK;
1760 	req->subtype = subtype & MSG_RM_RESOURCE_SUBTYPE_MASK;
1761 
1762 	ret = ti_sci_do_xfer(info, xfer);
1763 	if (ret) {
1764 		dev_err(dev, "Mbox send fail %d\n", ret);
1765 		goto fail;
1766 	}
1767 
1768 	resp = (struct ti_sci_msg_resp_get_resource_range *)xfer->xfer_buf;
1769 
1770 	if (!ti_sci_is_response_ack(resp)) {
1771 		ret = -ENODEV;
1772 	} else if (!resp->range_num && !resp->range_num_sec) {
1773 		/* Neither of the two resource range is valid */
1774 		ret = -ENODEV;
1775 	} else {
1776 		desc->start = resp->range_start;
1777 		desc->num = resp->range_num;
1778 		desc->start_sec = resp->range_start_sec;
1779 		desc->num_sec = resp->range_num_sec;
1780 	}
1781 
1782 fail:
1783 	ti_sci_put_one_xfer(&info->minfo, xfer);
1784 
1785 	return ret;
1786 }
1787 
1788 /**
1789  * ti_sci_cmd_get_resource_range - Get a range of resources assigned to host
1790  *				   that is same as ti sci interface host.
1791  * @handle:		Pointer to TISCI handle.
1792  * @dev_id:		TISCI device ID.
1793  * @subtype:		Resource assignment subtype that is being requested
1794  *			from the given device.
1795  * @desc:		Pointer to ti_sci_resource_desc to be updated with the
1796  *			resource range start index and number of resources
1797  *
1798  * Return: 0 if all went fine, else return appropriate error.
1799  */
1800 static int ti_sci_cmd_get_resource_range(const struct ti_sci_handle *handle,
1801 					 u32 dev_id, u8 subtype,
1802 					 struct ti_sci_resource_desc *desc)
1803 {
1804 	return ti_sci_get_resource_range(handle, dev_id, subtype,
1805 					 TI_SCI_IRQ_SECONDARY_HOST_INVALID,
1806 					 desc);
1807 }
1808 
1809 /**
1810  * ti_sci_cmd_get_resource_range_from_shost - Get a range of resources
1811  *					      assigned to a specified host.
1812  * @handle:		Pointer to TISCI handle.
1813  * @dev_id:		TISCI device ID.
1814  * @subtype:		Resource assignment subtype that is being requested
1815  *			from the given device.
1816  * @s_host:		Host processor ID to which the resources are allocated
1817  * @desc:		Pointer to ti_sci_resource_desc to be updated with the
1818  *			resource range start index and number of resources
1819  *
1820  * Return: 0 if all went fine, else return appropriate error.
1821  */
1822 static
1823 int ti_sci_cmd_get_resource_range_from_shost(const struct ti_sci_handle *handle,
1824 					     u32 dev_id, u8 subtype, u8 s_host,
1825 					     struct ti_sci_resource_desc *desc)
1826 {
1827 	return ti_sci_get_resource_range(handle, dev_id, subtype, s_host, desc);
1828 }
1829 
1830 /**
1831  * ti_sci_manage_irq() - Helper api to configure/release the irq route between
1832  *			 the requested source and destination
1833  * @handle:		Pointer to TISCI handle.
1834  * @valid_params:	Bit fields defining the validity of certain params
1835  * @src_id:		Device ID of the IRQ source
1836  * @src_index:		IRQ source index within the source device
1837  * @dst_id:		Device ID of the IRQ destination
1838  * @dst_host_irq:	IRQ number of the destination device
1839  * @ia_id:		Device ID of the IA, if the IRQ flows through this IA
1840  * @vint:		Virtual interrupt to be used within the IA
1841  * @global_event:	Global event number to be used for the requesting event
1842  * @vint_status_bit:	Virtual interrupt status bit to be used for the event
1843  * @s_host:		Secondary host ID to which the irq/event is being
1844  *			requested for.
1845  * @type:		Request type irq set or release.
1846  *
1847  * Return: 0 if all went fine, else return appropriate error.
1848  */
1849 static int ti_sci_manage_irq(const struct ti_sci_handle *handle,
1850 			     u32 valid_params, u16 src_id, u16 src_index,
1851 			     u16 dst_id, u16 dst_host_irq, u16 ia_id, u16 vint,
1852 			     u16 global_event, u8 vint_status_bit, u8 s_host,
1853 			     u16 type)
1854 {
1855 	struct ti_sci_msg_req_manage_irq *req;
1856 	struct ti_sci_msg_hdr *resp;
1857 	struct ti_sci_xfer *xfer;
1858 	struct ti_sci_info *info;
1859 	struct device *dev;
1860 	int ret = 0;
1861 
1862 	if (IS_ERR(handle))
1863 		return PTR_ERR(handle);
1864 	if (!handle)
1865 		return -EINVAL;
1866 
1867 	info = handle_to_ti_sci_info(handle);
1868 	dev = info->dev;
1869 
1870 	xfer = ti_sci_get_one_xfer(info, type, TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1871 				   sizeof(*req), sizeof(*resp));
1872 	if (IS_ERR(xfer)) {
1873 		ret = PTR_ERR(xfer);
1874 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1875 		return ret;
1876 	}
1877 	req = (struct ti_sci_msg_req_manage_irq *)xfer->xfer_buf;
1878 	req->valid_params = valid_params;
1879 	req->src_id = src_id;
1880 	req->src_index = src_index;
1881 	req->dst_id = dst_id;
1882 	req->dst_host_irq = dst_host_irq;
1883 	req->ia_id = ia_id;
1884 	req->vint = vint;
1885 	req->global_event = global_event;
1886 	req->vint_status_bit = vint_status_bit;
1887 	req->secondary_host = s_host;
1888 
1889 	ret = ti_sci_do_xfer(info, xfer);
1890 	if (ret) {
1891 		dev_err(dev, "Mbox send fail %d\n", ret);
1892 		goto fail;
1893 	}
1894 
1895 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
1896 
1897 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
1898 
1899 fail:
1900 	ti_sci_put_one_xfer(&info->minfo, xfer);
1901 
1902 	return ret;
1903 }
1904 
1905 /**
1906  * ti_sci_set_irq() - Helper api to configure the irq route between the
1907  *		      requested source and destination
1908  * @handle:		Pointer to TISCI handle.
1909  * @valid_params:	Bit fields defining the validity of certain params
1910  * @src_id:		Device ID of the IRQ source
1911  * @src_index:		IRQ source index within the source device
1912  * @dst_id:		Device ID of the IRQ destination
1913  * @dst_host_irq:	IRQ number of the destination device
1914  * @ia_id:		Device ID of the IA, if the IRQ flows through this IA
1915  * @vint:		Virtual interrupt to be used within the IA
1916  * @global_event:	Global event number to be used for the requesting event
1917  * @vint_status_bit:	Virtual interrupt status bit to be used for the event
1918  * @s_host:		Secondary host ID to which the irq/event is being
1919  *			requested for.
1920  *
1921  * Return: 0 if all went fine, else return appropriate error.
1922  */
1923 static int ti_sci_set_irq(const struct ti_sci_handle *handle, u32 valid_params,
1924 			  u16 src_id, u16 src_index, u16 dst_id,
1925 			  u16 dst_host_irq, u16 ia_id, u16 vint,
1926 			  u16 global_event, u8 vint_status_bit, u8 s_host)
1927 {
1928 	pr_debug("%s: IRQ set with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n",
1929 		 __func__, valid_params, src_id, src_index,
1930 		 dst_id, dst_host_irq, ia_id, vint, global_event,
1931 		 vint_status_bit);
1932 
1933 	return ti_sci_manage_irq(handle, valid_params, src_id, src_index,
1934 				 dst_id, dst_host_irq, ia_id, vint,
1935 				 global_event, vint_status_bit, s_host,
1936 				 TI_SCI_MSG_SET_IRQ);
1937 }
1938 
1939 /**
1940  * ti_sci_free_irq() - Helper api to free the irq route between the
1941  *			   requested source and destination
1942  * @handle:		Pointer to TISCI handle.
1943  * @valid_params:	Bit fields defining the validity of certain params
1944  * @src_id:		Device ID of the IRQ source
1945  * @src_index:		IRQ source index within the source device
1946  * @dst_id:		Device ID of the IRQ destination
1947  * @dst_host_irq:	IRQ number of the destination device
1948  * @ia_id:		Device ID of the IA, if the IRQ flows through this IA
1949  * @vint:		Virtual interrupt to be used within the IA
1950  * @global_event:	Global event number to be used for the requesting event
1951  * @vint_status_bit:	Virtual interrupt status bit to be used for the event
1952  * @s_host:		Secondary host ID to which the irq/event is being
1953  *			requested for.
1954  *
1955  * Return: 0 if all went fine, else return appropriate error.
1956  */
1957 static int ti_sci_free_irq(const struct ti_sci_handle *handle, u32 valid_params,
1958 			   u16 src_id, u16 src_index, u16 dst_id,
1959 			   u16 dst_host_irq, u16 ia_id, u16 vint,
1960 			   u16 global_event, u8 vint_status_bit, u8 s_host)
1961 {
1962 	pr_debug("%s: IRQ release with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n",
1963 		 __func__, valid_params, src_id, src_index,
1964 		 dst_id, dst_host_irq, ia_id, vint, global_event,
1965 		 vint_status_bit);
1966 
1967 	return ti_sci_manage_irq(handle, valid_params, src_id, src_index,
1968 				 dst_id, dst_host_irq, ia_id, vint,
1969 				 global_event, vint_status_bit, s_host,
1970 				 TI_SCI_MSG_FREE_IRQ);
1971 }
1972 
1973 /**
1974  * ti_sci_cmd_set_irq() - Configure a host irq route between the requested
1975  *			  source and destination.
1976  * @handle:		Pointer to TISCI handle.
1977  * @src_id:		Device ID of the IRQ source
1978  * @src_index:		IRQ source index within the source device
1979  * @dst_id:		Device ID of the IRQ destination
1980  * @dst_host_irq:	IRQ number of the destination device
1981  * @vint_irq:		Boolean specifying if this interrupt belongs to
1982  *			Interrupt Aggregator.
1983  *
1984  * Return: 0 if all went fine, else return appropriate error.
1985  */
1986 static int ti_sci_cmd_set_irq(const struct ti_sci_handle *handle, u16 src_id,
1987 			      u16 src_index, u16 dst_id, u16 dst_host_irq)
1988 {
1989 	u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID;
1990 
1991 	return ti_sci_set_irq(handle, valid_params, src_id, src_index, dst_id,
1992 			      dst_host_irq, 0, 0, 0, 0, 0);
1993 }
1994 
1995 /**
1996  * ti_sci_cmd_set_event_map() - Configure an event based irq route between the
1997  *				requested source and Interrupt Aggregator.
1998  * @handle:		Pointer to TISCI handle.
1999  * @src_id:		Device ID of the IRQ source
2000  * @src_index:		IRQ source index within the source device
2001  * @ia_id:		Device ID of the IA, if the IRQ flows through this IA
2002  * @vint:		Virtual interrupt to be used within the IA
2003  * @global_event:	Global event number to be used for the requesting event
2004  * @vint_status_bit:	Virtual interrupt status bit to be used for the event
2005  *
2006  * Return: 0 if all went fine, else return appropriate error.
2007  */
2008 static int ti_sci_cmd_set_event_map(const struct ti_sci_handle *handle,
2009 				    u16 src_id, u16 src_index, u16 ia_id,
2010 				    u16 vint, u16 global_event,
2011 				    u8 vint_status_bit)
2012 {
2013 	u32 valid_params = MSG_FLAG_IA_ID_VALID | MSG_FLAG_VINT_VALID |
2014 			   MSG_FLAG_GLB_EVNT_VALID |
2015 			   MSG_FLAG_VINT_STS_BIT_VALID;
2016 
2017 	return ti_sci_set_irq(handle, valid_params, src_id, src_index, 0, 0,
2018 			      ia_id, vint, global_event, vint_status_bit, 0);
2019 }
2020 
2021 /**
2022  * ti_sci_cmd_free_irq() - Free a host irq route between the between the
2023  *			   requested source and destination.
2024  * @handle:		Pointer to TISCI handle.
2025  * @src_id:		Device ID of the IRQ source
2026  * @src_index:		IRQ source index within the source device
2027  * @dst_id:		Device ID of the IRQ destination
2028  * @dst_host_irq:	IRQ number of the destination device
2029  * @vint_irq:		Boolean specifying if this interrupt belongs to
2030  *			Interrupt Aggregator.
2031  *
2032  * Return: 0 if all went fine, else return appropriate error.
2033  */
2034 static int ti_sci_cmd_free_irq(const struct ti_sci_handle *handle, u16 src_id,
2035 			       u16 src_index, u16 dst_id, u16 dst_host_irq)
2036 {
2037 	u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID;
2038 
2039 	return ti_sci_free_irq(handle, valid_params, src_id, src_index, dst_id,
2040 			       dst_host_irq, 0, 0, 0, 0, 0);
2041 }
2042 
2043 /**
2044  * ti_sci_cmd_free_event_map() - Free an event map between the requested source
2045  *				 and Interrupt Aggregator.
2046  * @handle:		Pointer to TISCI handle.
2047  * @src_id:		Device ID of the IRQ source
2048  * @src_index:		IRQ source index within the source device
2049  * @ia_id:		Device ID of the IA, if the IRQ flows through this IA
2050  * @vint:		Virtual interrupt to be used within the IA
2051  * @global_event:	Global event number to be used for the requesting event
2052  * @vint_status_bit:	Virtual interrupt status bit to be used for the event
2053  *
2054  * Return: 0 if all went fine, else return appropriate error.
2055  */
2056 static int ti_sci_cmd_free_event_map(const struct ti_sci_handle *handle,
2057 				     u16 src_id, u16 src_index, u16 ia_id,
2058 				     u16 vint, u16 global_event,
2059 				     u8 vint_status_bit)
2060 {
2061 	u32 valid_params = MSG_FLAG_IA_ID_VALID |
2062 			   MSG_FLAG_VINT_VALID | MSG_FLAG_GLB_EVNT_VALID |
2063 			   MSG_FLAG_VINT_STS_BIT_VALID;
2064 
2065 	return ti_sci_free_irq(handle, valid_params, src_id, src_index, 0, 0,
2066 			       ia_id, vint, global_event, vint_status_bit, 0);
2067 }
2068 
2069 /**
2070  * ti_sci_cmd_rm_ring_cfg() - Configure a NAVSS ring
2071  * @handle:	Pointer to TI SCI handle.
2072  * @params:	Pointer to ti_sci_msg_rm_ring_cfg ring config structure
2073  *
2074  * Return: 0 if all went well, else returns appropriate error value.
2075  *
2076  * See @ti_sci_msg_rm_ring_cfg and @ti_sci_msg_rm_ring_cfg_req for
2077  * more info.
2078  */
2079 static int ti_sci_cmd_rm_ring_cfg(const struct ti_sci_handle *handle,
2080 				  const struct ti_sci_msg_rm_ring_cfg *params)
2081 {
2082 	struct ti_sci_msg_rm_ring_cfg_req *req;
2083 	struct ti_sci_msg_hdr *resp;
2084 	struct ti_sci_xfer *xfer;
2085 	struct ti_sci_info *info;
2086 	struct device *dev;
2087 	int ret = 0;
2088 
2089 	if (IS_ERR_OR_NULL(handle))
2090 		return -EINVAL;
2091 
2092 	info = handle_to_ti_sci_info(handle);
2093 	dev = info->dev;
2094 
2095 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_RING_CFG,
2096 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2097 				   sizeof(*req), sizeof(*resp));
2098 	if (IS_ERR(xfer)) {
2099 		ret = PTR_ERR(xfer);
2100 		dev_err(dev, "RM_RA:Message config failed(%d)\n", ret);
2101 		return ret;
2102 	}
2103 	req = (struct ti_sci_msg_rm_ring_cfg_req *)xfer->xfer_buf;
2104 	req->valid_params = params->valid_params;
2105 	req->nav_id = params->nav_id;
2106 	req->index = params->index;
2107 	req->addr_lo = params->addr_lo;
2108 	req->addr_hi = params->addr_hi;
2109 	req->count = params->count;
2110 	req->mode = params->mode;
2111 	req->size = params->size;
2112 	req->order_id = params->order_id;
2113 	req->virtid = params->virtid;
2114 	req->asel = params->asel;
2115 
2116 	ret = ti_sci_do_xfer(info, xfer);
2117 	if (ret) {
2118 		dev_err(dev, "RM_RA:Mbox config send fail %d\n", ret);
2119 		goto fail;
2120 	}
2121 
2122 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2123 	ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2124 
2125 fail:
2126 	ti_sci_put_one_xfer(&info->minfo, xfer);
2127 	dev_dbg(dev, "RM_RA:config ring %u ret:%d\n", params->index, ret);
2128 	return ret;
2129 }
2130 
2131 /**
2132  * ti_sci_cmd_rm_psil_pair() - Pair PSI-L source to destination thread
2133  * @handle:	Pointer to TI SCI handle.
2134  * @nav_id:	Device ID of Navigator Subsystem which should be used for
2135  *		pairing
2136  * @src_thread:	Source PSI-L thread ID
2137  * @dst_thread: Destination PSI-L thread ID
2138  *
2139  * Return: 0 if all went well, else returns appropriate error value.
2140  */
2141 static int ti_sci_cmd_rm_psil_pair(const struct ti_sci_handle *handle,
2142 				   u32 nav_id, u32 src_thread, u32 dst_thread)
2143 {
2144 	struct ti_sci_msg_psil_pair *req;
2145 	struct ti_sci_msg_hdr *resp;
2146 	struct ti_sci_xfer *xfer;
2147 	struct ti_sci_info *info;
2148 	struct device *dev;
2149 	int ret = 0;
2150 
2151 	if (IS_ERR(handle))
2152 		return PTR_ERR(handle);
2153 	if (!handle)
2154 		return -EINVAL;
2155 
2156 	info = handle_to_ti_sci_info(handle);
2157 	dev = info->dev;
2158 
2159 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_PSIL_PAIR,
2160 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2161 				   sizeof(*req), sizeof(*resp));
2162 	if (IS_ERR(xfer)) {
2163 		ret = PTR_ERR(xfer);
2164 		dev_err(dev, "RM_PSIL:Message reconfig failed(%d)\n", ret);
2165 		return ret;
2166 	}
2167 	req = (struct ti_sci_msg_psil_pair *)xfer->xfer_buf;
2168 	req->nav_id = nav_id;
2169 	req->src_thread = src_thread;
2170 	req->dst_thread = dst_thread;
2171 
2172 	ret = ti_sci_do_xfer(info, xfer);
2173 	if (ret) {
2174 		dev_err(dev, "RM_PSIL:Mbox send fail %d\n", ret);
2175 		goto fail;
2176 	}
2177 
2178 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2179 	ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2180 
2181 fail:
2182 	ti_sci_put_one_xfer(&info->minfo, xfer);
2183 
2184 	return ret;
2185 }
2186 
2187 /**
2188  * ti_sci_cmd_rm_psil_unpair() - Unpair PSI-L source from destination thread
2189  * @handle:	Pointer to TI SCI handle.
2190  * @nav_id:	Device ID of Navigator Subsystem which should be used for
2191  *		unpairing
2192  * @src_thread:	Source PSI-L thread ID
2193  * @dst_thread:	Destination PSI-L thread ID
2194  *
2195  * Return: 0 if all went well, else returns appropriate error value.
2196  */
2197 static int ti_sci_cmd_rm_psil_unpair(const struct ti_sci_handle *handle,
2198 				     u32 nav_id, u32 src_thread, u32 dst_thread)
2199 {
2200 	struct ti_sci_msg_psil_unpair *req;
2201 	struct ti_sci_msg_hdr *resp;
2202 	struct ti_sci_xfer *xfer;
2203 	struct ti_sci_info *info;
2204 	struct device *dev;
2205 	int ret = 0;
2206 
2207 	if (IS_ERR(handle))
2208 		return PTR_ERR(handle);
2209 	if (!handle)
2210 		return -EINVAL;
2211 
2212 	info = handle_to_ti_sci_info(handle);
2213 	dev = info->dev;
2214 
2215 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_PSIL_UNPAIR,
2216 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2217 				   sizeof(*req), sizeof(*resp));
2218 	if (IS_ERR(xfer)) {
2219 		ret = PTR_ERR(xfer);
2220 		dev_err(dev, "RM_PSIL:Message reconfig failed(%d)\n", ret);
2221 		return ret;
2222 	}
2223 	req = (struct ti_sci_msg_psil_unpair *)xfer->xfer_buf;
2224 	req->nav_id = nav_id;
2225 	req->src_thread = src_thread;
2226 	req->dst_thread = dst_thread;
2227 
2228 	ret = ti_sci_do_xfer(info, xfer);
2229 	if (ret) {
2230 		dev_err(dev, "RM_PSIL:Mbox send fail %d\n", ret);
2231 		goto fail;
2232 	}
2233 
2234 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2235 	ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2236 
2237 fail:
2238 	ti_sci_put_one_xfer(&info->minfo, xfer);
2239 
2240 	return ret;
2241 }
2242 
2243 /**
2244  * ti_sci_cmd_rm_udmap_tx_ch_cfg() - Configure a UDMAP TX channel
2245  * @handle:	Pointer to TI SCI handle.
2246  * @params:	Pointer to ti_sci_msg_rm_udmap_tx_ch_cfg TX channel config
2247  *		structure
2248  *
2249  * Return: 0 if all went well, else returns appropriate error value.
2250  *
2251  * See @ti_sci_msg_rm_udmap_tx_ch_cfg and @ti_sci_msg_rm_udmap_tx_ch_cfg_req for
2252  * more info.
2253  */
2254 static int ti_sci_cmd_rm_udmap_tx_ch_cfg(const struct ti_sci_handle *handle,
2255 			const struct ti_sci_msg_rm_udmap_tx_ch_cfg *params)
2256 {
2257 	struct ti_sci_msg_rm_udmap_tx_ch_cfg_req *req;
2258 	struct ti_sci_msg_hdr *resp;
2259 	struct ti_sci_xfer *xfer;
2260 	struct ti_sci_info *info;
2261 	struct device *dev;
2262 	int ret = 0;
2263 
2264 	if (IS_ERR_OR_NULL(handle))
2265 		return -EINVAL;
2266 
2267 	info = handle_to_ti_sci_info(handle);
2268 	dev = info->dev;
2269 
2270 	xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_TX_CH_CFG,
2271 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2272 				   sizeof(*req), sizeof(*resp));
2273 	if (IS_ERR(xfer)) {
2274 		ret = PTR_ERR(xfer);
2275 		dev_err(dev, "Message TX_CH_CFG alloc failed(%d)\n", ret);
2276 		return ret;
2277 	}
2278 	req = (struct ti_sci_msg_rm_udmap_tx_ch_cfg_req *)xfer->xfer_buf;
2279 	req->valid_params = params->valid_params;
2280 	req->nav_id = params->nav_id;
2281 	req->index = params->index;
2282 	req->tx_pause_on_err = params->tx_pause_on_err;
2283 	req->tx_filt_einfo = params->tx_filt_einfo;
2284 	req->tx_filt_pswords = params->tx_filt_pswords;
2285 	req->tx_atype = params->tx_atype;
2286 	req->tx_chan_type = params->tx_chan_type;
2287 	req->tx_supr_tdpkt = params->tx_supr_tdpkt;
2288 	req->tx_fetch_size = params->tx_fetch_size;
2289 	req->tx_credit_count = params->tx_credit_count;
2290 	req->txcq_qnum = params->txcq_qnum;
2291 	req->tx_priority = params->tx_priority;
2292 	req->tx_qos = params->tx_qos;
2293 	req->tx_orderid = params->tx_orderid;
2294 	req->fdepth = params->fdepth;
2295 	req->tx_sched_priority = params->tx_sched_priority;
2296 	req->tx_burst_size = params->tx_burst_size;
2297 	req->tx_tdtype = params->tx_tdtype;
2298 	req->extended_ch_type = params->extended_ch_type;
2299 
2300 	ret = ti_sci_do_xfer(info, xfer);
2301 	if (ret) {
2302 		dev_err(dev, "Mbox send TX_CH_CFG fail %d\n", ret);
2303 		goto fail;
2304 	}
2305 
2306 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2307 	ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2308 
2309 fail:
2310 	ti_sci_put_one_xfer(&info->minfo, xfer);
2311 	dev_dbg(dev, "TX_CH_CFG: chn %u ret:%u\n", params->index, ret);
2312 	return ret;
2313 }
2314 
2315 /**
2316  * ti_sci_cmd_rm_udmap_rx_ch_cfg() - Configure a UDMAP RX channel
2317  * @handle:	Pointer to TI SCI handle.
2318  * @params:	Pointer to ti_sci_msg_rm_udmap_rx_ch_cfg RX channel config
2319  *		structure
2320  *
2321  * Return: 0 if all went well, else returns appropriate error value.
2322  *
2323  * See @ti_sci_msg_rm_udmap_rx_ch_cfg and @ti_sci_msg_rm_udmap_rx_ch_cfg_req for
2324  * more info.
2325  */
2326 static int ti_sci_cmd_rm_udmap_rx_ch_cfg(const struct ti_sci_handle *handle,
2327 			const struct ti_sci_msg_rm_udmap_rx_ch_cfg *params)
2328 {
2329 	struct ti_sci_msg_rm_udmap_rx_ch_cfg_req *req;
2330 	struct ti_sci_msg_hdr *resp;
2331 	struct ti_sci_xfer *xfer;
2332 	struct ti_sci_info *info;
2333 	struct device *dev;
2334 	int ret = 0;
2335 
2336 	if (IS_ERR_OR_NULL(handle))
2337 		return -EINVAL;
2338 
2339 	info = handle_to_ti_sci_info(handle);
2340 	dev = info->dev;
2341 
2342 	xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_RX_CH_CFG,
2343 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2344 				   sizeof(*req), sizeof(*resp));
2345 	if (IS_ERR(xfer)) {
2346 		ret = PTR_ERR(xfer);
2347 		dev_err(dev, "Message RX_CH_CFG alloc failed(%d)\n", ret);
2348 		return ret;
2349 	}
2350 	req = (struct ti_sci_msg_rm_udmap_rx_ch_cfg_req *)xfer->xfer_buf;
2351 	req->valid_params = params->valid_params;
2352 	req->nav_id = params->nav_id;
2353 	req->index = params->index;
2354 	req->rx_fetch_size = params->rx_fetch_size;
2355 	req->rxcq_qnum = params->rxcq_qnum;
2356 	req->rx_priority = params->rx_priority;
2357 	req->rx_qos = params->rx_qos;
2358 	req->rx_orderid = params->rx_orderid;
2359 	req->rx_sched_priority = params->rx_sched_priority;
2360 	req->flowid_start = params->flowid_start;
2361 	req->flowid_cnt = params->flowid_cnt;
2362 	req->rx_pause_on_err = params->rx_pause_on_err;
2363 	req->rx_atype = params->rx_atype;
2364 	req->rx_chan_type = params->rx_chan_type;
2365 	req->rx_ignore_short = params->rx_ignore_short;
2366 	req->rx_ignore_long = params->rx_ignore_long;
2367 	req->rx_burst_size = params->rx_burst_size;
2368 
2369 	ret = ti_sci_do_xfer(info, xfer);
2370 	if (ret) {
2371 		dev_err(dev, "Mbox send RX_CH_CFG fail %d\n", ret);
2372 		goto fail;
2373 	}
2374 
2375 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2376 	ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2377 
2378 fail:
2379 	ti_sci_put_one_xfer(&info->minfo, xfer);
2380 	dev_dbg(dev, "RX_CH_CFG: chn %u ret:%d\n", params->index, ret);
2381 	return ret;
2382 }
2383 
2384 /**
2385  * ti_sci_cmd_rm_udmap_rx_flow_cfg() - Configure UDMAP RX FLOW
2386  * @handle:	Pointer to TI SCI handle.
2387  * @params:	Pointer to ti_sci_msg_rm_udmap_flow_cfg RX FLOW config
2388  *		structure
2389  *
2390  * Return: 0 if all went well, else returns appropriate error value.
2391  *
2392  * See @ti_sci_msg_rm_udmap_flow_cfg and @ti_sci_msg_rm_udmap_flow_cfg_req for
2393  * more info.
2394  */
2395 static int ti_sci_cmd_rm_udmap_rx_flow_cfg(const struct ti_sci_handle *handle,
2396 			const struct ti_sci_msg_rm_udmap_flow_cfg *params)
2397 {
2398 	struct ti_sci_msg_rm_udmap_flow_cfg_req *req;
2399 	struct ti_sci_msg_hdr *resp;
2400 	struct ti_sci_xfer *xfer;
2401 	struct ti_sci_info *info;
2402 	struct device *dev;
2403 	int ret = 0;
2404 
2405 	if (IS_ERR_OR_NULL(handle))
2406 		return -EINVAL;
2407 
2408 	info = handle_to_ti_sci_info(handle);
2409 	dev = info->dev;
2410 
2411 	xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_FLOW_CFG,
2412 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2413 				   sizeof(*req), sizeof(*resp));
2414 	if (IS_ERR(xfer)) {
2415 		ret = PTR_ERR(xfer);
2416 		dev_err(dev, "RX_FL_CFG: Message alloc failed(%d)\n", ret);
2417 		return ret;
2418 	}
2419 	req = (struct ti_sci_msg_rm_udmap_flow_cfg_req *)xfer->xfer_buf;
2420 	req->valid_params = params->valid_params;
2421 	req->nav_id = params->nav_id;
2422 	req->flow_index = params->flow_index;
2423 	req->rx_einfo_present = params->rx_einfo_present;
2424 	req->rx_psinfo_present = params->rx_psinfo_present;
2425 	req->rx_error_handling = params->rx_error_handling;
2426 	req->rx_desc_type = params->rx_desc_type;
2427 	req->rx_sop_offset = params->rx_sop_offset;
2428 	req->rx_dest_qnum = params->rx_dest_qnum;
2429 	req->rx_src_tag_hi = params->rx_src_tag_hi;
2430 	req->rx_src_tag_lo = params->rx_src_tag_lo;
2431 	req->rx_dest_tag_hi = params->rx_dest_tag_hi;
2432 	req->rx_dest_tag_lo = params->rx_dest_tag_lo;
2433 	req->rx_src_tag_hi_sel = params->rx_src_tag_hi_sel;
2434 	req->rx_src_tag_lo_sel = params->rx_src_tag_lo_sel;
2435 	req->rx_dest_tag_hi_sel = params->rx_dest_tag_hi_sel;
2436 	req->rx_dest_tag_lo_sel = params->rx_dest_tag_lo_sel;
2437 	req->rx_fdq0_sz0_qnum = params->rx_fdq0_sz0_qnum;
2438 	req->rx_fdq1_qnum = params->rx_fdq1_qnum;
2439 	req->rx_fdq2_qnum = params->rx_fdq2_qnum;
2440 	req->rx_fdq3_qnum = params->rx_fdq3_qnum;
2441 	req->rx_ps_location = params->rx_ps_location;
2442 
2443 	ret = ti_sci_do_xfer(info, xfer);
2444 	if (ret) {
2445 		dev_err(dev, "RX_FL_CFG: Mbox send fail %d\n", ret);
2446 		goto fail;
2447 	}
2448 
2449 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2450 	ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2451 
2452 fail:
2453 	ti_sci_put_one_xfer(&info->minfo, xfer);
2454 	dev_dbg(info->dev, "RX_FL_CFG: %u ret:%d\n", params->flow_index, ret);
2455 	return ret;
2456 }
2457 
2458 /**
2459  * ti_sci_cmd_proc_request() - Command to request a physical processor control
2460  * @handle:	Pointer to TI SCI handle
2461  * @proc_id:	Processor ID this request is for
2462  *
2463  * Return: 0 if all went well, else returns appropriate error value.
2464  */
2465 static int ti_sci_cmd_proc_request(const struct ti_sci_handle *handle,
2466 				   u8 proc_id)
2467 {
2468 	struct ti_sci_msg_req_proc_request *req;
2469 	struct ti_sci_msg_hdr *resp;
2470 	struct ti_sci_info *info;
2471 	struct ti_sci_xfer *xfer;
2472 	struct device *dev;
2473 	int ret = 0;
2474 
2475 	if (!handle)
2476 		return -EINVAL;
2477 	if (IS_ERR(handle))
2478 		return PTR_ERR(handle);
2479 
2480 	info = handle_to_ti_sci_info(handle);
2481 	dev = info->dev;
2482 
2483 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_REQUEST,
2484 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2485 				   sizeof(*req), sizeof(*resp));
2486 	if (IS_ERR(xfer)) {
2487 		ret = PTR_ERR(xfer);
2488 		dev_err(dev, "Message alloc failed(%d)\n", ret);
2489 		return ret;
2490 	}
2491 	req = (struct ti_sci_msg_req_proc_request *)xfer->xfer_buf;
2492 	req->processor_id = proc_id;
2493 
2494 	ret = ti_sci_do_xfer(info, xfer);
2495 	if (ret) {
2496 		dev_err(dev, "Mbox send fail %d\n", ret);
2497 		goto fail;
2498 	}
2499 
2500 	resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2501 
2502 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2503 
2504 fail:
2505 	ti_sci_put_one_xfer(&info->minfo, xfer);
2506 
2507 	return ret;
2508 }
2509 
2510 /**
2511  * ti_sci_cmd_proc_release() - Command to release a physical processor control
2512  * @handle:	Pointer to TI SCI handle
2513  * @proc_id:	Processor ID this request is for
2514  *
2515  * Return: 0 if all went well, else returns appropriate error value.
2516  */
2517 static int ti_sci_cmd_proc_release(const struct ti_sci_handle *handle,
2518 				   u8 proc_id)
2519 {
2520 	struct ti_sci_msg_req_proc_release *req;
2521 	struct ti_sci_msg_hdr *resp;
2522 	struct ti_sci_info *info;
2523 	struct ti_sci_xfer *xfer;
2524 	struct device *dev;
2525 	int ret = 0;
2526 
2527 	if (!handle)
2528 		return -EINVAL;
2529 	if (IS_ERR(handle))
2530 		return PTR_ERR(handle);
2531 
2532 	info = handle_to_ti_sci_info(handle);
2533 	dev = info->dev;
2534 
2535 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_RELEASE,
2536 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2537 				   sizeof(*req), sizeof(*resp));
2538 	if (IS_ERR(xfer)) {
2539 		ret = PTR_ERR(xfer);
2540 		dev_err(dev, "Message alloc failed(%d)\n", ret);
2541 		return ret;
2542 	}
2543 	req = (struct ti_sci_msg_req_proc_release *)xfer->xfer_buf;
2544 	req->processor_id = proc_id;
2545 
2546 	ret = ti_sci_do_xfer(info, xfer);
2547 	if (ret) {
2548 		dev_err(dev, "Mbox send fail %d\n", ret);
2549 		goto fail;
2550 	}
2551 
2552 	resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2553 
2554 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2555 
2556 fail:
2557 	ti_sci_put_one_xfer(&info->minfo, xfer);
2558 
2559 	return ret;
2560 }
2561 
2562 /**
2563  * ti_sci_cmd_proc_handover() - Command to handover a physical processor
2564  *				control to a host in the processor's access
2565  *				control list.
2566  * @handle:	Pointer to TI SCI handle
2567  * @proc_id:	Processor ID this request is for
2568  * @host_id:	Host ID to get the control of the processor
2569  *
2570  * Return: 0 if all went well, else returns appropriate error value.
2571  */
2572 static int ti_sci_cmd_proc_handover(const struct ti_sci_handle *handle,
2573 				    u8 proc_id, u8 host_id)
2574 {
2575 	struct ti_sci_msg_req_proc_handover *req;
2576 	struct ti_sci_msg_hdr *resp;
2577 	struct ti_sci_info *info;
2578 	struct ti_sci_xfer *xfer;
2579 	struct device *dev;
2580 	int ret = 0;
2581 
2582 	if (!handle)
2583 		return -EINVAL;
2584 	if (IS_ERR(handle))
2585 		return PTR_ERR(handle);
2586 
2587 	info = handle_to_ti_sci_info(handle);
2588 	dev = info->dev;
2589 
2590 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_HANDOVER,
2591 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2592 				   sizeof(*req), sizeof(*resp));
2593 	if (IS_ERR(xfer)) {
2594 		ret = PTR_ERR(xfer);
2595 		dev_err(dev, "Message alloc failed(%d)\n", ret);
2596 		return ret;
2597 	}
2598 	req = (struct ti_sci_msg_req_proc_handover *)xfer->xfer_buf;
2599 	req->processor_id = proc_id;
2600 	req->host_id = host_id;
2601 
2602 	ret = ti_sci_do_xfer(info, xfer);
2603 	if (ret) {
2604 		dev_err(dev, "Mbox send fail %d\n", ret);
2605 		goto fail;
2606 	}
2607 
2608 	resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2609 
2610 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2611 
2612 fail:
2613 	ti_sci_put_one_xfer(&info->minfo, xfer);
2614 
2615 	return ret;
2616 }
2617 
2618 /**
2619  * ti_sci_cmd_proc_set_config() - Command to set the processor boot
2620  *				    configuration flags
2621  * @handle:		Pointer to TI SCI handle
2622  * @proc_id:		Processor ID this request is for
2623  * @config_flags_set:	Configuration flags to be set
2624  * @config_flags_clear:	Configuration flags to be cleared.
2625  *
2626  * Return: 0 if all went well, else returns appropriate error value.
2627  */
2628 static int ti_sci_cmd_proc_set_config(const struct ti_sci_handle *handle,
2629 				      u8 proc_id, u64 bootvector,
2630 				      u32 config_flags_set,
2631 				      u32 config_flags_clear)
2632 {
2633 	struct ti_sci_msg_req_set_config *req;
2634 	struct ti_sci_msg_hdr *resp;
2635 	struct ti_sci_info *info;
2636 	struct ti_sci_xfer *xfer;
2637 	struct device *dev;
2638 	int ret = 0;
2639 
2640 	if (!handle)
2641 		return -EINVAL;
2642 	if (IS_ERR(handle))
2643 		return PTR_ERR(handle);
2644 
2645 	info = handle_to_ti_sci_info(handle);
2646 	dev = info->dev;
2647 
2648 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CONFIG,
2649 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2650 				   sizeof(*req), sizeof(*resp));
2651 	if (IS_ERR(xfer)) {
2652 		ret = PTR_ERR(xfer);
2653 		dev_err(dev, "Message alloc failed(%d)\n", ret);
2654 		return ret;
2655 	}
2656 	req = (struct ti_sci_msg_req_set_config *)xfer->xfer_buf;
2657 	req->processor_id = proc_id;
2658 	req->bootvector_low = bootvector & TI_SCI_ADDR_LOW_MASK;
2659 	req->bootvector_high = (bootvector & TI_SCI_ADDR_HIGH_MASK) >>
2660 				TI_SCI_ADDR_HIGH_SHIFT;
2661 	req->config_flags_set = config_flags_set;
2662 	req->config_flags_clear = config_flags_clear;
2663 
2664 	ret = ti_sci_do_xfer(info, xfer);
2665 	if (ret) {
2666 		dev_err(dev, "Mbox send fail %d\n", ret);
2667 		goto fail;
2668 	}
2669 
2670 	resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2671 
2672 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2673 
2674 fail:
2675 	ti_sci_put_one_xfer(&info->minfo, xfer);
2676 
2677 	return ret;
2678 }
2679 
2680 /**
2681  * ti_sci_cmd_proc_set_control() - Command to set the processor boot
2682  *				     control flags
2683  * @handle:			Pointer to TI SCI handle
2684  * @proc_id:			Processor ID this request is for
2685  * @control_flags_set:		Control flags to be set
2686  * @control_flags_clear:	Control flags to be cleared
2687  *
2688  * Return: 0 if all went well, else returns appropriate error value.
2689  */
2690 static int ti_sci_cmd_proc_set_control(const struct ti_sci_handle *handle,
2691 				       u8 proc_id, u32 control_flags_set,
2692 				       u32 control_flags_clear)
2693 {
2694 	struct ti_sci_msg_req_set_ctrl *req;
2695 	struct ti_sci_msg_hdr *resp;
2696 	struct ti_sci_info *info;
2697 	struct ti_sci_xfer *xfer;
2698 	struct device *dev;
2699 	int ret = 0;
2700 
2701 	if (!handle)
2702 		return -EINVAL;
2703 	if (IS_ERR(handle))
2704 		return PTR_ERR(handle);
2705 
2706 	info = handle_to_ti_sci_info(handle);
2707 	dev = info->dev;
2708 
2709 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CTRL,
2710 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2711 				   sizeof(*req), sizeof(*resp));
2712 	if (IS_ERR(xfer)) {
2713 		ret = PTR_ERR(xfer);
2714 		dev_err(dev, "Message alloc failed(%d)\n", ret);
2715 		return ret;
2716 	}
2717 	req = (struct ti_sci_msg_req_set_ctrl *)xfer->xfer_buf;
2718 	req->processor_id = proc_id;
2719 	req->control_flags_set = control_flags_set;
2720 	req->control_flags_clear = control_flags_clear;
2721 
2722 	ret = ti_sci_do_xfer(info, xfer);
2723 	if (ret) {
2724 		dev_err(dev, "Mbox send fail %d\n", ret);
2725 		goto fail;
2726 	}
2727 
2728 	resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2729 
2730 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2731 
2732 fail:
2733 	ti_sci_put_one_xfer(&info->minfo, xfer);
2734 
2735 	return ret;
2736 }
2737 
2738 /**
2739  * ti_sci_cmd_get_boot_status() - Command to get the processor boot status
2740  * @handle:	Pointer to TI SCI handle
2741  * @proc_id:	Processor ID this request is for
2742  *
2743  * Return: 0 if all went well, else returns appropriate error value.
2744  */
2745 static int ti_sci_cmd_proc_get_status(const struct ti_sci_handle *handle,
2746 				      u8 proc_id, u64 *bv, u32 *cfg_flags,
2747 				      u32 *ctrl_flags, u32 *sts_flags)
2748 {
2749 	struct ti_sci_msg_resp_get_status *resp;
2750 	struct ti_sci_msg_req_get_status *req;
2751 	struct ti_sci_info *info;
2752 	struct ti_sci_xfer *xfer;
2753 	struct device *dev;
2754 	int ret = 0;
2755 
2756 	if (!handle)
2757 		return -EINVAL;
2758 	if (IS_ERR(handle))
2759 		return PTR_ERR(handle);
2760 
2761 	info = handle_to_ti_sci_info(handle);
2762 	dev = info->dev;
2763 
2764 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_STATUS,
2765 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2766 				   sizeof(*req), sizeof(*resp));
2767 	if (IS_ERR(xfer)) {
2768 		ret = PTR_ERR(xfer);
2769 		dev_err(dev, "Message alloc failed(%d)\n", ret);
2770 		return ret;
2771 	}
2772 	req = (struct ti_sci_msg_req_get_status *)xfer->xfer_buf;
2773 	req->processor_id = proc_id;
2774 
2775 	ret = ti_sci_do_xfer(info, xfer);
2776 	if (ret) {
2777 		dev_err(dev, "Mbox send fail %d\n", ret);
2778 		goto fail;
2779 	}
2780 
2781 	resp = (struct ti_sci_msg_resp_get_status *)xfer->tx_message.buf;
2782 
2783 	if (!ti_sci_is_response_ack(resp)) {
2784 		ret = -ENODEV;
2785 	} else {
2786 		*bv = (resp->bootvector_low & TI_SCI_ADDR_LOW_MASK) |
2787 		      (((u64)resp->bootvector_high << TI_SCI_ADDR_HIGH_SHIFT) &
2788 		       TI_SCI_ADDR_HIGH_MASK);
2789 		*cfg_flags = resp->config_flags;
2790 		*ctrl_flags = resp->control_flags;
2791 		*sts_flags = resp->status_flags;
2792 	}
2793 
2794 fail:
2795 	ti_sci_put_one_xfer(&info->minfo, xfer);
2796 
2797 	return ret;
2798 }
2799 
2800 /*
2801  * ti_sci_setup_ops() - Setup the operations structures
2802  * @info:	pointer to TISCI pointer
2803  */
2804 static void ti_sci_setup_ops(struct ti_sci_info *info)
2805 {
2806 	struct ti_sci_ops *ops = &info->handle.ops;
2807 	struct ti_sci_core_ops *core_ops = &ops->core_ops;
2808 	struct ti_sci_dev_ops *dops = &ops->dev_ops;
2809 	struct ti_sci_clk_ops *cops = &ops->clk_ops;
2810 	struct ti_sci_rm_core_ops *rm_core_ops = &ops->rm_core_ops;
2811 	struct ti_sci_rm_irq_ops *iops = &ops->rm_irq_ops;
2812 	struct ti_sci_rm_ringacc_ops *rops = &ops->rm_ring_ops;
2813 	struct ti_sci_rm_psil_ops *psilops = &ops->rm_psil_ops;
2814 	struct ti_sci_rm_udmap_ops *udmap_ops = &ops->rm_udmap_ops;
2815 	struct ti_sci_proc_ops *pops = &ops->proc_ops;
2816 
2817 	core_ops->reboot_device = ti_sci_cmd_core_reboot;
2818 
2819 	dops->get_device = ti_sci_cmd_get_device;
2820 	dops->get_device_exclusive = ti_sci_cmd_get_device_exclusive;
2821 	dops->idle_device = ti_sci_cmd_idle_device;
2822 	dops->idle_device_exclusive = ti_sci_cmd_idle_device_exclusive;
2823 	dops->put_device = ti_sci_cmd_put_device;
2824 
2825 	dops->is_valid = ti_sci_cmd_dev_is_valid;
2826 	dops->get_context_loss_count = ti_sci_cmd_dev_get_clcnt;
2827 	dops->is_idle = ti_sci_cmd_dev_is_idle;
2828 	dops->is_stop = ti_sci_cmd_dev_is_stop;
2829 	dops->is_on = ti_sci_cmd_dev_is_on;
2830 	dops->is_transitioning = ti_sci_cmd_dev_is_trans;
2831 	dops->set_device_resets = ti_sci_cmd_set_device_resets;
2832 	dops->get_device_resets = ti_sci_cmd_get_device_resets;
2833 
2834 	cops->get_clock = ti_sci_cmd_get_clock;
2835 	cops->idle_clock = ti_sci_cmd_idle_clock;
2836 	cops->put_clock = ti_sci_cmd_put_clock;
2837 	cops->is_auto = ti_sci_cmd_clk_is_auto;
2838 	cops->is_on = ti_sci_cmd_clk_is_on;
2839 	cops->is_off = ti_sci_cmd_clk_is_off;
2840 
2841 	cops->set_parent = ti_sci_cmd_clk_set_parent;
2842 	cops->get_parent = ti_sci_cmd_clk_get_parent;
2843 	cops->get_num_parents = ti_sci_cmd_clk_get_num_parents;
2844 
2845 	cops->get_best_match_freq = ti_sci_cmd_clk_get_match_freq;
2846 	cops->set_freq = ti_sci_cmd_clk_set_freq;
2847 	cops->get_freq = ti_sci_cmd_clk_get_freq;
2848 
2849 	rm_core_ops->get_range = ti_sci_cmd_get_resource_range;
2850 	rm_core_ops->get_range_from_shost =
2851 				ti_sci_cmd_get_resource_range_from_shost;
2852 
2853 	iops->set_irq = ti_sci_cmd_set_irq;
2854 	iops->set_event_map = ti_sci_cmd_set_event_map;
2855 	iops->free_irq = ti_sci_cmd_free_irq;
2856 	iops->free_event_map = ti_sci_cmd_free_event_map;
2857 
2858 	rops->set_cfg = ti_sci_cmd_rm_ring_cfg;
2859 
2860 	psilops->pair = ti_sci_cmd_rm_psil_pair;
2861 	psilops->unpair = ti_sci_cmd_rm_psil_unpair;
2862 
2863 	udmap_ops->tx_ch_cfg = ti_sci_cmd_rm_udmap_tx_ch_cfg;
2864 	udmap_ops->rx_ch_cfg = ti_sci_cmd_rm_udmap_rx_ch_cfg;
2865 	udmap_ops->rx_flow_cfg = ti_sci_cmd_rm_udmap_rx_flow_cfg;
2866 
2867 	pops->request = ti_sci_cmd_proc_request;
2868 	pops->release = ti_sci_cmd_proc_release;
2869 	pops->handover = ti_sci_cmd_proc_handover;
2870 	pops->set_config = ti_sci_cmd_proc_set_config;
2871 	pops->set_control = ti_sci_cmd_proc_set_control;
2872 	pops->get_status = ti_sci_cmd_proc_get_status;
2873 }
2874 
2875 /**
2876  * ti_sci_get_handle() - Get the TI SCI handle for a device
2877  * @dev:	Pointer to device for which we want SCI handle
2878  *
2879  * NOTE: The function does not track individual clients of the framework
2880  * and is expected to be maintained by caller of TI SCI protocol library.
2881  * ti_sci_put_handle must be balanced with successful ti_sci_get_handle
2882  * Return: pointer to handle if successful, else:
2883  * -EPROBE_DEFER if the instance is not ready
2884  * -ENODEV if the required node handler is missing
2885  * -EINVAL if invalid conditions are encountered.
2886  */
2887 const struct ti_sci_handle *ti_sci_get_handle(struct device *dev)
2888 {
2889 	struct device_node *ti_sci_np;
2890 	struct list_head *p;
2891 	struct ti_sci_handle *handle = NULL;
2892 	struct ti_sci_info *info;
2893 
2894 	if (!dev) {
2895 		pr_err("I need a device pointer\n");
2896 		return ERR_PTR(-EINVAL);
2897 	}
2898 	ti_sci_np = of_get_parent(dev->of_node);
2899 	if (!ti_sci_np) {
2900 		dev_err(dev, "No OF information\n");
2901 		return ERR_PTR(-EINVAL);
2902 	}
2903 
2904 	mutex_lock(&ti_sci_list_mutex);
2905 	list_for_each(p, &ti_sci_list) {
2906 		info = list_entry(p, struct ti_sci_info, node);
2907 		if (ti_sci_np == info->dev->of_node) {
2908 			handle = &info->handle;
2909 			info->users++;
2910 			break;
2911 		}
2912 	}
2913 	mutex_unlock(&ti_sci_list_mutex);
2914 	of_node_put(ti_sci_np);
2915 
2916 	if (!handle)
2917 		return ERR_PTR(-EPROBE_DEFER);
2918 
2919 	return handle;
2920 }
2921 EXPORT_SYMBOL_GPL(ti_sci_get_handle);
2922 
2923 /**
2924  * ti_sci_put_handle() - Release the handle acquired by ti_sci_get_handle
2925  * @handle:	Handle acquired by ti_sci_get_handle
2926  *
2927  * NOTE: The function does not track individual clients of the framework
2928  * and is expected to be maintained by caller of TI SCI protocol library.
2929  * ti_sci_put_handle must be balanced with successful ti_sci_get_handle
2930  *
2931  * Return: 0 is successfully released
2932  * if an error pointer was passed, it returns the error value back,
2933  * if null was passed, it returns -EINVAL;
2934  */
2935 int ti_sci_put_handle(const struct ti_sci_handle *handle)
2936 {
2937 	struct ti_sci_info *info;
2938 
2939 	if (IS_ERR(handle))
2940 		return PTR_ERR(handle);
2941 	if (!handle)
2942 		return -EINVAL;
2943 
2944 	info = handle_to_ti_sci_info(handle);
2945 	mutex_lock(&ti_sci_list_mutex);
2946 	if (!WARN_ON(!info->users))
2947 		info->users--;
2948 	mutex_unlock(&ti_sci_list_mutex);
2949 
2950 	return 0;
2951 }
2952 EXPORT_SYMBOL_GPL(ti_sci_put_handle);
2953 
2954 static void devm_ti_sci_release(struct device *dev, void *res)
2955 {
2956 	const struct ti_sci_handle **ptr = res;
2957 	const struct ti_sci_handle *handle = *ptr;
2958 	int ret;
2959 
2960 	ret = ti_sci_put_handle(handle);
2961 	if (ret)
2962 		dev_err(dev, "failed to put handle %d\n", ret);
2963 }
2964 
2965 /**
2966  * devm_ti_sci_get_handle() - Managed get handle
2967  * @dev:	device for which we want SCI handle for.
2968  *
2969  * NOTE: This releases the handle once the device resources are
2970  * no longer needed. MUST NOT BE released with ti_sci_put_handle.
2971  * The function does not track individual clients of the framework
2972  * and is expected to be maintained by caller of TI SCI protocol library.
2973  *
2974  * Return: 0 if all went fine, else corresponding error.
2975  */
2976 const struct ti_sci_handle *devm_ti_sci_get_handle(struct device *dev)
2977 {
2978 	const struct ti_sci_handle **ptr;
2979 	const struct ti_sci_handle *handle;
2980 
2981 	ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL);
2982 	if (!ptr)
2983 		return ERR_PTR(-ENOMEM);
2984 	handle = ti_sci_get_handle(dev);
2985 
2986 	if (!IS_ERR(handle)) {
2987 		*ptr = handle;
2988 		devres_add(dev, ptr);
2989 	} else {
2990 		devres_free(ptr);
2991 	}
2992 
2993 	return handle;
2994 }
2995 EXPORT_SYMBOL_GPL(devm_ti_sci_get_handle);
2996 
2997 /**
2998  * ti_sci_get_by_phandle() - Get the TI SCI handle using DT phandle
2999  * @np:		device node
3000  * @property:	property name containing phandle on TISCI node
3001  *
3002  * NOTE: The function does not track individual clients of the framework
3003  * and is expected to be maintained by caller of TI SCI protocol library.
3004  * ti_sci_put_handle must be balanced with successful ti_sci_get_by_phandle
3005  * Return: pointer to handle if successful, else:
3006  * -EPROBE_DEFER if the instance is not ready
3007  * -ENODEV if the required node handler is missing
3008  * -EINVAL if invalid conditions are encountered.
3009  */
3010 const struct ti_sci_handle *ti_sci_get_by_phandle(struct device_node *np,
3011 						  const char *property)
3012 {
3013 	struct ti_sci_handle *handle = NULL;
3014 	struct device_node *ti_sci_np;
3015 	struct ti_sci_info *info;
3016 	struct list_head *p;
3017 
3018 	if (!np) {
3019 		pr_err("I need a device pointer\n");
3020 		return ERR_PTR(-EINVAL);
3021 	}
3022 
3023 	ti_sci_np = of_parse_phandle(np, property, 0);
3024 	if (!ti_sci_np)
3025 		return ERR_PTR(-ENODEV);
3026 
3027 	mutex_lock(&ti_sci_list_mutex);
3028 	list_for_each(p, &ti_sci_list) {
3029 		info = list_entry(p, struct ti_sci_info, node);
3030 		if (ti_sci_np == info->dev->of_node) {
3031 			handle = &info->handle;
3032 			info->users++;
3033 			break;
3034 		}
3035 	}
3036 	mutex_unlock(&ti_sci_list_mutex);
3037 	of_node_put(ti_sci_np);
3038 
3039 	if (!handle)
3040 		return ERR_PTR(-EPROBE_DEFER);
3041 
3042 	return handle;
3043 }
3044 EXPORT_SYMBOL_GPL(ti_sci_get_by_phandle);
3045 
3046 /**
3047  * devm_ti_sci_get_by_phandle() - Managed get handle using phandle
3048  * @dev:	Device pointer requesting TISCI handle
3049  * @property:	property name containing phandle on TISCI node
3050  *
3051  * NOTE: This releases the handle once the device resources are
3052  * no longer needed. MUST NOT BE released with ti_sci_put_handle.
3053  * The function does not track individual clients of the framework
3054  * and is expected to be maintained by caller of TI SCI protocol library.
3055  *
3056  * Return: 0 if all went fine, else corresponding error.
3057  */
3058 const struct ti_sci_handle *devm_ti_sci_get_by_phandle(struct device *dev,
3059 						       const char *property)
3060 {
3061 	const struct ti_sci_handle *handle;
3062 	const struct ti_sci_handle **ptr;
3063 
3064 	ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL);
3065 	if (!ptr)
3066 		return ERR_PTR(-ENOMEM);
3067 	handle = ti_sci_get_by_phandle(dev_of_node(dev), property);
3068 
3069 	if (!IS_ERR(handle)) {
3070 		*ptr = handle;
3071 		devres_add(dev, ptr);
3072 	} else {
3073 		devres_free(ptr);
3074 	}
3075 
3076 	return handle;
3077 }
3078 EXPORT_SYMBOL_GPL(devm_ti_sci_get_by_phandle);
3079 
3080 /**
3081  * ti_sci_get_free_resource() - Get a free resource from TISCI resource.
3082  * @res:	Pointer to the TISCI resource
3083  *
3084  * Return: resource num if all went ok else TI_SCI_RESOURCE_NULL.
3085  */
3086 u16 ti_sci_get_free_resource(struct ti_sci_resource *res)
3087 {
3088 	unsigned long flags;
3089 	u16 set, free_bit;
3090 
3091 	raw_spin_lock_irqsave(&res->lock, flags);
3092 	for (set = 0; set < res->sets; set++) {
3093 		struct ti_sci_resource_desc *desc = &res->desc[set];
3094 		int res_count = desc->num + desc->num_sec;
3095 
3096 		free_bit = find_first_zero_bit(desc->res_map, res_count);
3097 		if (free_bit != res_count) {
3098 			__set_bit(free_bit, desc->res_map);
3099 			raw_spin_unlock_irqrestore(&res->lock, flags);
3100 
3101 			if (desc->num && free_bit < desc->num)
3102 				return desc->start + free_bit;
3103 			else
3104 				return desc->start_sec + free_bit;
3105 		}
3106 	}
3107 	raw_spin_unlock_irqrestore(&res->lock, flags);
3108 
3109 	return TI_SCI_RESOURCE_NULL;
3110 }
3111 EXPORT_SYMBOL_GPL(ti_sci_get_free_resource);
3112 
3113 /**
3114  * ti_sci_release_resource() - Release a resource from TISCI resource.
3115  * @res:	Pointer to the TISCI resource
3116  * @id:		Resource id to be released.
3117  */
3118 void ti_sci_release_resource(struct ti_sci_resource *res, u16 id)
3119 {
3120 	unsigned long flags;
3121 	u16 set;
3122 
3123 	raw_spin_lock_irqsave(&res->lock, flags);
3124 	for (set = 0; set < res->sets; set++) {
3125 		struct ti_sci_resource_desc *desc = &res->desc[set];
3126 
3127 		if (desc->num && desc->start <= id &&
3128 		    (desc->start + desc->num) > id)
3129 			__clear_bit(id - desc->start, desc->res_map);
3130 		else if (desc->num_sec && desc->start_sec <= id &&
3131 			 (desc->start_sec + desc->num_sec) > id)
3132 			__clear_bit(id - desc->start_sec, desc->res_map);
3133 	}
3134 	raw_spin_unlock_irqrestore(&res->lock, flags);
3135 }
3136 EXPORT_SYMBOL_GPL(ti_sci_release_resource);
3137 
3138 /**
3139  * ti_sci_get_num_resources() - Get the number of resources in TISCI resource
3140  * @res:	Pointer to the TISCI resource
3141  *
3142  * Return: Total number of available resources.
3143  */
3144 u32 ti_sci_get_num_resources(struct ti_sci_resource *res)
3145 {
3146 	u32 set, count = 0;
3147 
3148 	for (set = 0; set < res->sets; set++)
3149 		count += res->desc[set].num + res->desc[set].num_sec;
3150 
3151 	return count;
3152 }
3153 EXPORT_SYMBOL_GPL(ti_sci_get_num_resources);
3154 
3155 /**
3156  * devm_ti_sci_get_resource_sets() - Get a TISCI resources assigned to a device
3157  * @handle:	TISCI handle
3158  * @dev:	Device pointer to which the resource is assigned
3159  * @dev_id:	TISCI device id to which the resource is assigned
3160  * @sub_types:	Array of sub_types assigned corresponding to device
3161  * @sets:	Number of sub_types
3162  *
3163  * Return: Pointer to ti_sci_resource if all went well else appropriate
3164  *	   error pointer.
3165  */
3166 static struct ti_sci_resource *
3167 devm_ti_sci_get_resource_sets(const struct ti_sci_handle *handle,
3168 			      struct device *dev, u32 dev_id, u32 *sub_types,
3169 			      u32 sets)
3170 {
3171 	struct ti_sci_resource *res;
3172 	bool valid_set = false;
3173 	int i, ret, res_count;
3174 
3175 	res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
3176 	if (!res)
3177 		return ERR_PTR(-ENOMEM);
3178 
3179 	res->sets = sets;
3180 	res->desc = devm_kcalloc(dev, res->sets, sizeof(*res->desc),
3181 				 GFP_KERNEL);
3182 	if (!res->desc)
3183 		return ERR_PTR(-ENOMEM);
3184 
3185 	for (i = 0; i < res->sets; i++) {
3186 		ret = handle->ops.rm_core_ops.get_range(handle, dev_id,
3187 							sub_types[i],
3188 							&res->desc[i]);
3189 		if (ret) {
3190 			dev_dbg(dev, "dev = %d subtype %d not allocated for this host\n",
3191 				dev_id, sub_types[i]);
3192 			memset(&res->desc[i], 0, sizeof(res->desc[i]));
3193 			continue;
3194 		}
3195 
3196 		dev_dbg(dev, "dev/sub_type: %d/%d, start/num: %d/%d | %d/%d\n",
3197 			dev_id, sub_types[i], res->desc[i].start,
3198 			res->desc[i].num, res->desc[i].start_sec,
3199 			res->desc[i].num_sec);
3200 
3201 		valid_set = true;
3202 		res_count = res->desc[i].num + res->desc[i].num_sec;
3203 		res->desc[i].res_map = devm_bitmap_zalloc(dev, res_count,
3204 							  GFP_KERNEL);
3205 		if (!res->desc[i].res_map)
3206 			return ERR_PTR(-ENOMEM);
3207 	}
3208 	raw_spin_lock_init(&res->lock);
3209 
3210 	if (valid_set)
3211 		return res;
3212 
3213 	return ERR_PTR(-EINVAL);
3214 }
3215 
3216 /**
3217  * devm_ti_sci_get_of_resource() - Get a TISCI resource assigned to a device
3218  * @handle:	TISCI handle
3219  * @dev:	Device pointer to which the resource is assigned
3220  * @dev_id:	TISCI device id to which the resource is assigned
3221  * @of_prop:	property name by which the resource are represented
3222  *
3223  * Return: Pointer to ti_sci_resource if all went well else appropriate
3224  *	   error pointer.
3225  */
3226 struct ti_sci_resource *
3227 devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
3228 			    struct device *dev, u32 dev_id, char *of_prop)
3229 {
3230 	struct ti_sci_resource *res;
3231 	u32 *sub_types;
3232 	int sets;
3233 
3234 	sets = of_property_count_elems_of_size(dev_of_node(dev), of_prop,
3235 					       sizeof(u32));
3236 	if (sets < 0) {
3237 		dev_err(dev, "%s resource type ids not available\n", of_prop);
3238 		return ERR_PTR(sets);
3239 	}
3240 
3241 	sub_types = kcalloc(sets, sizeof(*sub_types), GFP_KERNEL);
3242 	if (!sub_types)
3243 		return ERR_PTR(-ENOMEM);
3244 
3245 	of_property_read_u32_array(dev_of_node(dev), of_prop, sub_types, sets);
3246 	res = devm_ti_sci_get_resource_sets(handle, dev, dev_id, sub_types,
3247 					    sets);
3248 
3249 	kfree(sub_types);
3250 	return res;
3251 }
3252 EXPORT_SYMBOL_GPL(devm_ti_sci_get_of_resource);
3253 
3254 /**
3255  * devm_ti_sci_get_resource() - Get a resource range assigned to the device
3256  * @handle:	TISCI handle
3257  * @dev:	Device pointer to which the resource is assigned
3258  * @dev_id:	TISCI device id to which the resource is assigned
3259  * @suub_type:	TISCI resource subytpe representing the resource.
3260  *
3261  * Return: Pointer to ti_sci_resource if all went well else appropriate
3262  *	   error pointer.
3263  */
3264 struct ti_sci_resource *
3265 devm_ti_sci_get_resource(const struct ti_sci_handle *handle, struct device *dev,
3266 			 u32 dev_id, u32 sub_type)
3267 {
3268 	return devm_ti_sci_get_resource_sets(handle, dev, dev_id, &sub_type, 1);
3269 }
3270 EXPORT_SYMBOL_GPL(devm_ti_sci_get_resource);
3271 
3272 static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode,
3273 				void *cmd)
3274 {
3275 	struct ti_sci_info *info = reboot_to_ti_sci_info(nb);
3276 	const struct ti_sci_handle *handle = &info->handle;
3277 
3278 	ti_sci_cmd_core_reboot(handle);
3279 
3280 	/* call fail OR pass, we should not be here in the first place */
3281 	return NOTIFY_BAD;
3282 }
3283 
3284 static void ti_sci_set_is_suspending(struct ti_sci_info *info, bool is_suspending)
3285 {
3286 	info->is_suspending = is_suspending;
3287 }
3288 
3289 static int ti_sci_suspend(struct device *dev)
3290 {
3291 	struct ti_sci_info *info = dev_get_drvdata(dev);
3292 	/*
3293 	 * We must switch operation to polled mode now as drivers and the genpd
3294 	 * layer may make late TI SCI calls to change clock and device states
3295 	 * from the noirq phase of suspend.
3296 	 */
3297 	ti_sci_set_is_suspending(info, true);
3298 
3299 	return 0;
3300 }
3301 
3302 static int ti_sci_resume(struct device *dev)
3303 {
3304 	struct ti_sci_info *info = dev_get_drvdata(dev);
3305 
3306 	ti_sci_set_is_suspending(info, false);
3307 
3308 	return 0;
3309 }
3310 
3311 static DEFINE_SIMPLE_DEV_PM_OPS(ti_sci_pm_ops, ti_sci_suspend, ti_sci_resume);
3312 
3313 /* Description for K2G */
3314 static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
3315 	.default_host_id = 2,
3316 	/* Conservative duration */
3317 	.max_rx_timeout_ms = 1000,
3318 	/* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
3319 	.max_msgs = 20,
3320 	.max_msg_size = 64,
3321 };
3322 
3323 /* Description for AM654 */
3324 static const struct ti_sci_desc ti_sci_pmmc_am654_desc = {
3325 	.default_host_id = 12,
3326 	/* Conservative duration */
3327 	.max_rx_timeout_ms = 10000,
3328 	/* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
3329 	.max_msgs = 20,
3330 	.max_msg_size = 60,
3331 };
3332 
3333 static const struct of_device_id ti_sci_of_match[] = {
3334 	{.compatible = "ti,k2g-sci", .data = &ti_sci_pmmc_k2g_desc},
3335 	{.compatible = "ti,am654-sci", .data = &ti_sci_pmmc_am654_desc},
3336 	{ /* Sentinel */ },
3337 };
3338 MODULE_DEVICE_TABLE(of, ti_sci_of_match);
3339 
3340 static int ti_sci_probe(struct platform_device *pdev)
3341 {
3342 	struct device *dev = &pdev->dev;
3343 	const struct of_device_id *of_id;
3344 	const struct ti_sci_desc *desc;
3345 	struct ti_sci_xfer *xfer;
3346 	struct ti_sci_info *info = NULL;
3347 	struct ti_sci_xfers_info *minfo;
3348 	struct mbox_client *cl;
3349 	int ret = -EINVAL;
3350 	int i;
3351 	int reboot = 0;
3352 	u32 h_id;
3353 
3354 	of_id = of_match_device(ti_sci_of_match, dev);
3355 	if (!of_id) {
3356 		dev_err(dev, "OF data missing\n");
3357 		return -EINVAL;
3358 	}
3359 	desc = of_id->data;
3360 
3361 	info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
3362 	if (!info)
3363 		return -ENOMEM;
3364 
3365 	info->dev = dev;
3366 	info->desc = desc;
3367 	ret = of_property_read_u32(dev->of_node, "ti,host-id", &h_id);
3368 	/* if the property is not present in DT, use a default from desc */
3369 	if (ret < 0) {
3370 		info->host_id = info->desc->default_host_id;
3371 	} else {
3372 		if (!h_id) {
3373 			dev_warn(dev, "Host ID 0 is reserved for firmware\n");
3374 			info->host_id = info->desc->default_host_id;
3375 		} else {
3376 			info->host_id = h_id;
3377 		}
3378 	}
3379 
3380 	reboot = of_property_read_bool(dev->of_node,
3381 				       "ti,system-reboot-controller");
3382 	INIT_LIST_HEAD(&info->node);
3383 	minfo = &info->minfo;
3384 
3385 	/*
3386 	 * Pre-allocate messages
3387 	 * NEVER allocate more than what we can indicate in hdr.seq
3388 	 * if we have data description bug, force a fix..
3389 	 */
3390 	if (WARN_ON(desc->max_msgs >=
3391 		    1 << 8 * sizeof(((struct ti_sci_msg_hdr *)0)->seq)))
3392 		return -EINVAL;
3393 
3394 	minfo->xfer_block = devm_kcalloc(dev,
3395 					 desc->max_msgs,
3396 					 sizeof(*minfo->xfer_block),
3397 					 GFP_KERNEL);
3398 	if (!minfo->xfer_block)
3399 		return -ENOMEM;
3400 
3401 	minfo->xfer_alloc_table = devm_bitmap_zalloc(dev,
3402 						     desc->max_msgs,
3403 						     GFP_KERNEL);
3404 	if (!minfo->xfer_alloc_table)
3405 		return -ENOMEM;
3406 
3407 	/* Pre-initialize the buffer pointer to pre-allocated buffers */
3408 	for (i = 0, xfer = minfo->xfer_block; i < desc->max_msgs; i++, xfer++) {
3409 		xfer->xfer_buf = devm_kcalloc(dev, 1, desc->max_msg_size,
3410 					      GFP_KERNEL);
3411 		if (!xfer->xfer_buf)
3412 			return -ENOMEM;
3413 
3414 		xfer->tx_message.buf = xfer->xfer_buf;
3415 		init_completion(&xfer->done);
3416 	}
3417 
3418 	ret = ti_sci_debugfs_create(pdev, info);
3419 	if (ret)
3420 		dev_warn(dev, "Failed to create debug file\n");
3421 
3422 	platform_set_drvdata(pdev, info);
3423 
3424 	cl = &info->cl;
3425 	cl->dev = dev;
3426 	cl->tx_block = false;
3427 	cl->rx_callback = ti_sci_rx_callback;
3428 	cl->knows_txdone = true;
3429 
3430 	spin_lock_init(&minfo->xfer_lock);
3431 	sema_init(&minfo->sem_xfer_count, desc->max_msgs);
3432 
3433 	info->chan_rx = mbox_request_channel_byname(cl, "rx");
3434 	if (IS_ERR(info->chan_rx)) {
3435 		ret = PTR_ERR(info->chan_rx);
3436 		goto out;
3437 	}
3438 
3439 	info->chan_tx = mbox_request_channel_byname(cl, "tx");
3440 	if (IS_ERR(info->chan_tx)) {
3441 		ret = PTR_ERR(info->chan_tx);
3442 		goto out;
3443 	}
3444 	ret = ti_sci_cmd_get_revision(info);
3445 	if (ret) {
3446 		dev_err(dev, "Unable to communicate with TISCI(%d)\n", ret);
3447 		goto out;
3448 	}
3449 
3450 	ti_sci_setup_ops(info);
3451 
3452 	if (reboot) {
3453 		info->nb.notifier_call = tisci_reboot_handler;
3454 		info->nb.priority = 128;
3455 
3456 		ret = register_restart_handler(&info->nb);
3457 		if (ret) {
3458 			dev_err(dev, "reboot registration fail(%d)\n", ret);
3459 			goto out;
3460 		}
3461 	}
3462 
3463 	dev_info(dev, "ABI: %d.%d (firmware rev 0x%04x '%s')\n",
3464 		 info->handle.version.abi_major, info->handle.version.abi_minor,
3465 		 info->handle.version.firmware_revision,
3466 		 info->handle.version.firmware_description);
3467 
3468 	mutex_lock(&ti_sci_list_mutex);
3469 	list_add_tail(&info->node, &ti_sci_list);
3470 	mutex_unlock(&ti_sci_list_mutex);
3471 
3472 	return of_platform_populate(dev->of_node, NULL, NULL, dev);
3473 out:
3474 	if (!IS_ERR(info->chan_tx))
3475 		mbox_free_channel(info->chan_tx);
3476 	if (!IS_ERR(info->chan_rx))
3477 		mbox_free_channel(info->chan_rx);
3478 	debugfs_remove(info->d);
3479 	return ret;
3480 }
3481 
3482 static int ti_sci_remove(struct platform_device *pdev)
3483 {
3484 	struct ti_sci_info *info;
3485 	struct device *dev = &pdev->dev;
3486 	int ret = 0;
3487 
3488 	of_platform_depopulate(dev);
3489 
3490 	info = platform_get_drvdata(pdev);
3491 
3492 	if (info->nb.notifier_call)
3493 		unregister_restart_handler(&info->nb);
3494 
3495 	mutex_lock(&ti_sci_list_mutex);
3496 	if (info->users)
3497 		ret = -EBUSY;
3498 	else
3499 		list_del(&info->node);
3500 	mutex_unlock(&ti_sci_list_mutex);
3501 
3502 	if (!ret) {
3503 		ti_sci_debugfs_destroy(pdev, info);
3504 
3505 		/* Safe to free channels since no more users */
3506 		mbox_free_channel(info->chan_tx);
3507 		mbox_free_channel(info->chan_rx);
3508 	}
3509 
3510 	return ret;
3511 }
3512 
3513 static struct platform_driver ti_sci_driver = {
3514 	.probe = ti_sci_probe,
3515 	.remove = ti_sci_remove,
3516 	.driver = {
3517 		   .name = "ti-sci",
3518 		   .of_match_table = of_match_ptr(ti_sci_of_match),
3519 		   .pm = &ti_sci_pm_ops,
3520 	},
3521 };
3522 module_platform_driver(ti_sci_driver);
3523 
3524 MODULE_LICENSE("GPL v2");
3525 MODULE_DESCRIPTION("TI System Control Interface(SCI) driver");
3526 MODULE_AUTHOR("Nishanth Menon");
3527 MODULE_ALIAS("platform:ti-sci");
3528