xref: /openbmc/linux/drivers/firmware/ti_sci.c (revision f519f0be)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Texas Instruments System Control Interface Protocol Driver
4  *
5  * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
6  *	Nishanth Menon
7  */
8 
9 #define pr_fmt(fmt) "%s: " fmt, __func__
10 
11 #include <linux/bitmap.h>
12 #include <linux/debugfs.h>
13 #include <linux/export.h>
14 #include <linux/io.h>
15 #include <linux/kernel.h>
16 #include <linux/mailbox_client.h>
17 #include <linux/module.h>
18 #include <linux/of_device.h>
19 #include <linux/semaphore.h>
20 #include <linux/slab.h>
21 #include <linux/soc/ti/ti-msgmgr.h>
22 #include <linux/soc/ti/ti_sci_protocol.h>
23 #include <linux/reboot.h>
24 
25 #include "ti_sci.h"
26 
27 /* List of all TI SCI devices active in system */
28 static LIST_HEAD(ti_sci_list);
29 /* Protection for the entire list */
30 static DEFINE_MUTEX(ti_sci_list_mutex);
31 
32 /**
33  * struct ti_sci_xfer - Structure representing a message flow
34  * @tx_message:	Transmit message
35  * @rx_len:	Receive message length
36  * @xfer_buf:	Preallocated buffer to store receive message
37  *		Since we work with request-ACK protocol, we can
38  *		reuse the same buffer for the rx path as we
39  *		use for the tx path.
40  * @done:	completion event
41  */
42 struct ti_sci_xfer {
43 	struct ti_msgmgr_message tx_message;
44 	u8 rx_len;
45 	u8 *xfer_buf;
46 	struct completion done;
47 };
48 
49 /**
50  * struct ti_sci_xfers_info - Structure to manage transfer information
51  * @sem_xfer_count:	Counting Semaphore for managing max simultaneous
52  *			Messages.
53  * @xfer_block:		Preallocated Message array
54  * @xfer_alloc_table:	Bitmap table for allocated messages.
55  *			Index of this bitmap table is also used for message
56  *			sequence identifier.
57  * @xfer_lock:		Protection for message allocation
58  */
59 struct ti_sci_xfers_info {
60 	struct semaphore sem_xfer_count;
61 	struct ti_sci_xfer *xfer_block;
62 	unsigned long *xfer_alloc_table;
63 	/* protect transfer allocation */
64 	spinlock_t xfer_lock;
65 };
66 
67 /**
68  * struct ti_sci_rm_type_map - Structure representing TISCI Resource
69  *				management representation of dev_ids.
70  * @dev_id:	TISCI device ID
71  * @type:	Corresponding id as identified by TISCI RM.
72  *
73  * Note: This is used only as a work around for using RM range apis
74  *	for AM654 SoC. For future SoCs dev_id will be used as type
75  *	for RM range APIs. In order to maintain ABI backward compatibility
76  *	type is not being changed for AM654 SoC.
77  */
78 struct ti_sci_rm_type_map {
79 	u32 dev_id;
80 	u16 type;
81 };
82 
83 /**
84  * struct ti_sci_desc - Description of SoC integration
85  * @default_host_id:	Host identifier representing the compute entity
86  * @max_rx_timeout_ms:	Timeout for communication with SoC (in Milliseconds)
87  * @max_msgs: Maximum number of messages that can be pending
88  *		  simultaneously in the system
89  * @max_msg_size: Maximum size of data per message that can be handled.
90  * @rm_type_map: RM resource type mapping structure.
91  */
92 struct ti_sci_desc {
93 	u8 default_host_id;
94 	int max_rx_timeout_ms;
95 	int max_msgs;
96 	int max_msg_size;
97 	struct ti_sci_rm_type_map *rm_type_map;
98 };
99 
100 /**
101  * struct ti_sci_info - Structure representing a TI SCI instance
102  * @dev:	Device pointer
103  * @desc:	SoC description for this instance
104  * @nb:	Reboot Notifier block
105  * @d:		Debugfs file entry
106  * @debug_region: Memory region where the debug message are available
107  * @debug_region_size: Debug region size
108  * @debug_buffer: Buffer allocated to copy debug messages.
109  * @handle:	Instance of TI SCI handle to send to clients.
110  * @cl:		Mailbox Client
111  * @chan_tx:	Transmit mailbox channel
112  * @chan_rx:	Receive mailbox channel
113  * @minfo:	Message info
114  * @node:	list head
115  * @host_id:	Host ID
116  * @users:	Number of users of this instance
117  */
118 struct ti_sci_info {
119 	struct device *dev;
120 	struct notifier_block nb;
121 	const struct ti_sci_desc *desc;
122 	struct dentry *d;
123 	void __iomem *debug_region;
124 	char *debug_buffer;
125 	size_t debug_region_size;
126 	struct ti_sci_handle handle;
127 	struct mbox_client cl;
128 	struct mbox_chan *chan_tx;
129 	struct mbox_chan *chan_rx;
130 	struct ti_sci_xfers_info minfo;
131 	struct list_head node;
132 	u8 host_id;
133 	/* protected by ti_sci_list_mutex */
134 	int users;
135 
136 };
137 
138 #define cl_to_ti_sci_info(c)	container_of(c, struct ti_sci_info, cl)
139 #define handle_to_ti_sci_info(h) container_of(h, struct ti_sci_info, handle)
140 #define reboot_to_ti_sci_info(n) container_of(n, struct ti_sci_info, nb)
141 
142 #ifdef CONFIG_DEBUG_FS
143 
144 /**
145  * ti_sci_debug_show() - Helper to dump the debug log
146  * @s:	sequence file pointer
147  * @unused:	unused.
148  *
149  * Return: 0
150  */
151 static int ti_sci_debug_show(struct seq_file *s, void *unused)
152 {
153 	struct ti_sci_info *info = s->private;
154 
155 	memcpy_fromio(info->debug_buffer, info->debug_region,
156 		      info->debug_region_size);
157 	/*
158 	 * We don't trust firmware to leave NULL terminated last byte (hence
159 	 * we have allocated 1 extra 0 byte). Since we cannot guarantee any
160 	 * specific data format for debug messages, We just present the data
161 	 * in the buffer as is - we expect the messages to be self explanatory.
162 	 */
163 	seq_puts(s, info->debug_buffer);
164 	return 0;
165 }
166 
167 /* Provide the log file operations interface*/
168 DEFINE_SHOW_ATTRIBUTE(ti_sci_debug);
169 
170 /**
171  * ti_sci_debugfs_create() - Create log debug file
172  * @pdev:	platform device pointer
173  * @info:	Pointer to SCI entity information
174  *
175  * Return: 0 if all went fine, else corresponding error.
176  */
177 static int ti_sci_debugfs_create(struct platform_device *pdev,
178 				 struct ti_sci_info *info)
179 {
180 	struct device *dev = &pdev->dev;
181 	struct resource *res;
182 	char debug_name[50] = "ti_sci_debug@";
183 
184 	/* Debug region is optional */
185 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
186 					   "debug_messages");
187 	info->debug_region = devm_ioremap_resource(dev, res);
188 	if (IS_ERR(info->debug_region))
189 		return 0;
190 	info->debug_region_size = resource_size(res);
191 
192 	info->debug_buffer = devm_kcalloc(dev, info->debug_region_size + 1,
193 					  sizeof(char), GFP_KERNEL);
194 	if (!info->debug_buffer)
195 		return -ENOMEM;
196 	/* Setup NULL termination */
197 	info->debug_buffer[info->debug_region_size] = 0;
198 
199 	info->d = debugfs_create_file(strncat(debug_name, dev_name(dev),
200 					      sizeof(debug_name) -
201 					      sizeof("ti_sci_debug@")),
202 				      0444, NULL, info, &ti_sci_debug_fops);
203 	if (IS_ERR(info->d))
204 		return PTR_ERR(info->d);
205 
206 	dev_dbg(dev, "Debug region => %p, size = %zu bytes, resource: %pr\n",
207 		info->debug_region, info->debug_region_size, res);
208 	return 0;
209 }
210 
211 /**
212  * ti_sci_debugfs_destroy() - clean up log debug file
213  * @pdev:	platform device pointer
214  * @info:	Pointer to SCI entity information
215  */
216 static void ti_sci_debugfs_destroy(struct platform_device *pdev,
217 				   struct ti_sci_info *info)
218 {
219 	if (IS_ERR(info->debug_region))
220 		return;
221 
222 	debugfs_remove(info->d);
223 }
224 #else /* CONFIG_DEBUG_FS */
225 static inline int ti_sci_debugfs_create(struct platform_device *dev,
226 					struct ti_sci_info *info)
227 {
228 	return 0;
229 }
230 
231 static inline void ti_sci_debugfs_destroy(struct platform_device *dev,
232 					  struct ti_sci_info *info)
233 {
234 }
235 #endif /* CONFIG_DEBUG_FS */
236 
237 /**
238  * ti_sci_dump_header_dbg() - Helper to dump a message header.
239  * @dev:	Device pointer corresponding to the SCI entity
240  * @hdr:	pointer to header.
241  */
242 static inline void ti_sci_dump_header_dbg(struct device *dev,
243 					  struct ti_sci_msg_hdr *hdr)
244 {
245 	dev_dbg(dev, "MSGHDR:type=0x%04x host=0x%02x seq=0x%02x flags=0x%08x\n",
246 		hdr->type, hdr->host, hdr->seq, hdr->flags);
247 }
248 
249 /**
250  * ti_sci_rx_callback() - mailbox client callback for receive messages
251  * @cl:	client pointer
252  * @m:	mailbox message
253  *
254  * Processes one received message to appropriate transfer information and
255  * signals completion of the transfer.
256  *
257  * NOTE: This function will be invoked in IRQ context, hence should be
258  * as optimal as possible.
259  */
260 static void ti_sci_rx_callback(struct mbox_client *cl, void *m)
261 {
262 	struct ti_sci_info *info = cl_to_ti_sci_info(cl);
263 	struct device *dev = info->dev;
264 	struct ti_sci_xfers_info *minfo = &info->minfo;
265 	struct ti_msgmgr_message *mbox_msg = m;
266 	struct ti_sci_msg_hdr *hdr = (struct ti_sci_msg_hdr *)mbox_msg->buf;
267 	struct ti_sci_xfer *xfer;
268 	u8 xfer_id;
269 
270 	xfer_id = hdr->seq;
271 
272 	/*
273 	 * Are we even expecting this?
274 	 * NOTE: barriers were implicit in locks used for modifying the bitmap
275 	 */
276 	if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
277 		dev_err(dev, "Message for %d is not expected!\n", xfer_id);
278 		return;
279 	}
280 
281 	xfer = &minfo->xfer_block[xfer_id];
282 
283 	/* Is the message of valid length? */
284 	if (mbox_msg->len > info->desc->max_msg_size) {
285 		dev_err(dev, "Unable to handle %zu xfer(max %d)\n",
286 			mbox_msg->len, info->desc->max_msg_size);
287 		ti_sci_dump_header_dbg(dev, hdr);
288 		return;
289 	}
290 	if (mbox_msg->len < xfer->rx_len) {
291 		dev_err(dev, "Recv xfer %zu < expected %d length\n",
292 			mbox_msg->len, xfer->rx_len);
293 		ti_sci_dump_header_dbg(dev, hdr);
294 		return;
295 	}
296 
297 	ti_sci_dump_header_dbg(dev, hdr);
298 	/* Take a copy to the rx buffer.. */
299 	memcpy(xfer->xfer_buf, mbox_msg->buf, xfer->rx_len);
300 	complete(&xfer->done);
301 }
302 
303 /**
304  * ti_sci_get_one_xfer() - Allocate one message
305  * @info:	Pointer to SCI entity information
306  * @msg_type:	Message type
307  * @msg_flags:	Flag to set for the message
308  * @tx_message_size: transmit message size
309  * @rx_message_size: receive message size
310  *
311  * Helper function which is used by various command functions that are
312  * exposed to clients of this driver for allocating a message traffic event.
313  *
314  * This function can sleep depending on pending requests already in the system
315  * for the SCI entity. Further, this also holds a spinlock to maintain integrity
316  * of internal data structures.
317  *
318  * Return: 0 if all went fine, else corresponding error.
319  */
320 static struct ti_sci_xfer *ti_sci_get_one_xfer(struct ti_sci_info *info,
321 					       u16 msg_type, u32 msg_flags,
322 					       size_t tx_message_size,
323 					       size_t rx_message_size)
324 {
325 	struct ti_sci_xfers_info *minfo = &info->minfo;
326 	struct ti_sci_xfer *xfer;
327 	struct ti_sci_msg_hdr *hdr;
328 	unsigned long flags;
329 	unsigned long bit_pos;
330 	u8 xfer_id;
331 	int ret;
332 	int timeout;
333 
334 	/* Ensure we have sane transfer sizes */
335 	if (rx_message_size > info->desc->max_msg_size ||
336 	    tx_message_size > info->desc->max_msg_size ||
337 	    rx_message_size < sizeof(*hdr) || tx_message_size < sizeof(*hdr))
338 		return ERR_PTR(-ERANGE);
339 
340 	/*
341 	 * Ensure we have only controlled number of pending messages.
342 	 * Ideally, we might just have to wait a single message, be
343 	 * conservative and wait 5 times that..
344 	 */
345 	timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms) * 5;
346 	ret = down_timeout(&minfo->sem_xfer_count, timeout);
347 	if (ret < 0)
348 		return ERR_PTR(ret);
349 
350 	/* Keep the locked section as small as possible */
351 	spin_lock_irqsave(&minfo->xfer_lock, flags);
352 	bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
353 				      info->desc->max_msgs);
354 	set_bit(bit_pos, minfo->xfer_alloc_table);
355 	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
356 
357 	/*
358 	 * We already ensured in probe that we can have max messages that can
359 	 * fit in  hdr.seq - NOTE: this improves access latencies
360 	 * to predictable O(1) access, BUT, it opens us to risk if
361 	 * remote misbehaves with corrupted message sequence responses.
362 	 * If that happens, we are going to be messed up anyways..
363 	 */
364 	xfer_id = (u8)bit_pos;
365 
366 	xfer = &minfo->xfer_block[xfer_id];
367 
368 	hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
369 	xfer->tx_message.len = tx_message_size;
370 	xfer->rx_len = (u8)rx_message_size;
371 
372 	reinit_completion(&xfer->done);
373 
374 	hdr->seq = xfer_id;
375 	hdr->type = msg_type;
376 	hdr->host = info->host_id;
377 	hdr->flags = msg_flags;
378 
379 	return xfer;
380 }
381 
382 /**
383  * ti_sci_put_one_xfer() - Release a message
384  * @minfo:	transfer info pointer
385  * @xfer:	message that was reserved by ti_sci_get_one_xfer
386  *
387  * This holds a spinlock to maintain integrity of internal data structures.
388  */
389 static void ti_sci_put_one_xfer(struct ti_sci_xfers_info *minfo,
390 				struct ti_sci_xfer *xfer)
391 {
392 	unsigned long flags;
393 	struct ti_sci_msg_hdr *hdr;
394 	u8 xfer_id;
395 
396 	hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
397 	xfer_id = hdr->seq;
398 
399 	/*
400 	 * Keep the locked section as small as possible
401 	 * NOTE: we might escape with smp_mb and no lock here..
402 	 * but just be conservative and symmetric.
403 	 */
404 	spin_lock_irqsave(&minfo->xfer_lock, flags);
405 	clear_bit(xfer_id, minfo->xfer_alloc_table);
406 	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
407 
408 	/* Increment the count for the next user to get through */
409 	up(&minfo->sem_xfer_count);
410 }
411 
412 /**
413  * ti_sci_do_xfer() - Do one transfer
414  * @info:	Pointer to SCI entity information
415  * @xfer:	Transfer to initiate and wait for response
416  *
417  * Return: -ETIMEDOUT in case of no response, if transmit error,
418  *	   return corresponding error, else if all goes well,
419  *	   return 0.
420  */
421 static inline int ti_sci_do_xfer(struct ti_sci_info *info,
422 				 struct ti_sci_xfer *xfer)
423 {
424 	int ret;
425 	int timeout;
426 	struct device *dev = info->dev;
427 
428 	ret = mbox_send_message(info->chan_tx, &xfer->tx_message);
429 	if (ret < 0)
430 		return ret;
431 
432 	ret = 0;
433 
434 	/* And we wait for the response. */
435 	timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
436 	if (!wait_for_completion_timeout(&xfer->done, timeout)) {
437 		dev_err(dev, "Mbox timedout in resp(caller: %pS)\n",
438 			(void *)_RET_IP_);
439 		ret = -ETIMEDOUT;
440 	}
441 	/*
442 	 * NOTE: we might prefer not to need the mailbox ticker to manage the
443 	 * transfer queueing since the protocol layer queues things by itself.
444 	 * Unfortunately, we have to kick the mailbox framework after we have
445 	 * received our message.
446 	 */
447 	mbox_client_txdone(info->chan_tx, ret);
448 
449 	return ret;
450 }
451 
452 /**
453  * ti_sci_cmd_get_revision() - command to get the revision of the SCI entity
454  * @info:	Pointer to SCI entity information
455  *
456  * Updates the SCI information in the internal data structure.
457  *
458  * Return: 0 if all went fine, else return appropriate error.
459  */
460 static int ti_sci_cmd_get_revision(struct ti_sci_info *info)
461 {
462 	struct device *dev = info->dev;
463 	struct ti_sci_handle *handle = &info->handle;
464 	struct ti_sci_version_info *ver = &handle->version;
465 	struct ti_sci_msg_resp_version *rev_info;
466 	struct ti_sci_xfer *xfer;
467 	int ret;
468 
469 	/* No need to setup flags since it is expected to respond */
470 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_VERSION,
471 				   0x0, sizeof(struct ti_sci_msg_hdr),
472 				   sizeof(*rev_info));
473 	if (IS_ERR(xfer)) {
474 		ret = PTR_ERR(xfer);
475 		dev_err(dev, "Message alloc failed(%d)\n", ret);
476 		return ret;
477 	}
478 
479 	rev_info = (struct ti_sci_msg_resp_version *)xfer->xfer_buf;
480 
481 	ret = ti_sci_do_xfer(info, xfer);
482 	if (ret) {
483 		dev_err(dev, "Mbox send fail %d\n", ret);
484 		goto fail;
485 	}
486 
487 	ver->abi_major = rev_info->abi_major;
488 	ver->abi_minor = rev_info->abi_minor;
489 	ver->firmware_revision = rev_info->firmware_revision;
490 	strncpy(ver->firmware_description, rev_info->firmware_description,
491 		sizeof(ver->firmware_description));
492 
493 fail:
494 	ti_sci_put_one_xfer(&info->minfo, xfer);
495 	return ret;
496 }
497 
498 /**
499  * ti_sci_is_response_ack() - Generic ACK/NACK message checkup
500  * @r:	pointer to response buffer
501  *
502  * Return: true if the response was an ACK, else returns false.
503  */
504 static inline bool ti_sci_is_response_ack(void *r)
505 {
506 	struct ti_sci_msg_hdr *hdr = r;
507 
508 	return hdr->flags & TI_SCI_FLAG_RESP_GENERIC_ACK ? true : false;
509 }
510 
511 /**
512  * ti_sci_set_device_state() - Set device state helper
513  * @handle:	pointer to TI SCI handle
514  * @id:		Device identifier
515  * @flags:	flags to setup for the device
516  * @state:	State to move the device to
517  *
518  * Return: 0 if all went well, else returns appropriate error value.
519  */
520 static int ti_sci_set_device_state(const struct ti_sci_handle *handle,
521 				   u32 id, u32 flags, u8 state)
522 {
523 	struct ti_sci_info *info;
524 	struct ti_sci_msg_req_set_device_state *req;
525 	struct ti_sci_msg_hdr *resp;
526 	struct ti_sci_xfer *xfer;
527 	struct device *dev;
528 	int ret = 0;
529 
530 	if (IS_ERR(handle))
531 		return PTR_ERR(handle);
532 	if (!handle)
533 		return -EINVAL;
534 
535 	info = handle_to_ti_sci_info(handle);
536 	dev = info->dev;
537 
538 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE,
539 				   flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
540 				   sizeof(*req), sizeof(*resp));
541 	if (IS_ERR(xfer)) {
542 		ret = PTR_ERR(xfer);
543 		dev_err(dev, "Message alloc failed(%d)\n", ret);
544 		return ret;
545 	}
546 	req = (struct ti_sci_msg_req_set_device_state *)xfer->xfer_buf;
547 	req->id = id;
548 	req->state = state;
549 
550 	ret = ti_sci_do_xfer(info, xfer);
551 	if (ret) {
552 		dev_err(dev, "Mbox send fail %d\n", ret);
553 		goto fail;
554 	}
555 
556 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
557 
558 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
559 
560 fail:
561 	ti_sci_put_one_xfer(&info->minfo, xfer);
562 
563 	return ret;
564 }
565 
566 /**
567  * ti_sci_get_device_state() - Get device state helper
568  * @handle:	Handle to the device
569  * @id:		Device Identifier
570  * @clcnt:	Pointer to Context Loss Count
571  * @resets:	pointer to resets
572  * @p_state:	pointer to p_state
573  * @c_state:	pointer to c_state
574  *
575  * Return: 0 if all went fine, else return appropriate error.
576  */
577 static int ti_sci_get_device_state(const struct ti_sci_handle *handle,
578 				   u32 id,  u32 *clcnt,  u32 *resets,
579 				    u8 *p_state,  u8 *c_state)
580 {
581 	struct ti_sci_info *info;
582 	struct ti_sci_msg_req_get_device_state *req;
583 	struct ti_sci_msg_resp_get_device_state *resp;
584 	struct ti_sci_xfer *xfer;
585 	struct device *dev;
586 	int ret = 0;
587 
588 	if (IS_ERR(handle))
589 		return PTR_ERR(handle);
590 	if (!handle)
591 		return -EINVAL;
592 
593 	if (!clcnt && !resets && !p_state && !c_state)
594 		return -EINVAL;
595 
596 	info = handle_to_ti_sci_info(handle);
597 	dev = info->dev;
598 
599 	/* Response is expected, so need of any flags */
600 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE,
601 				   0, sizeof(*req), sizeof(*resp));
602 	if (IS_ERR(xfer)) {
603 		ret = PTR_ERR(xfer);
604 		dev_err(dev, "Message alloc failed(%d)\n", ret);
605 		return ret;
606 	}
607 	req = (struct ti_sci_msg_req_get_device_state *)xfer->xfer_buf;
608 	req->id = id;
609 
610 	ret = ti_sci_do_xfer(info, xfer);
611 	if (ret) {
612 		dev_err(dev, "Mbox send fail %d\n", ret);
613 		goto fail;
614 	}
615 
616 	resp = (struct ti_sci_msg_resp_get_device_state *)xfer->xfer_buf;
617 	if (!ti_sci_is_response_ack(resp)) {
618 		ret = -ENODEV;
619 		goto fail;
620 	}
621 
622 	if (clcnt)
623 		*clcnt = resp->context_loss_count;
624 	if (resets)
625 		*resets = resp->resets;
626 	if (p_state)
627 		*p_state = resp->programmed_state;
628 	if (c_state)
629 		*c_state = resp->current_state;
630 fail:
631 	ti_sci_put_one_xfer(&info->minfo, xfer);
632 
633 	return ret;
634 }
635 
636 /**
637  * ti_sci_cmd_get_device() - command to request for device managed by TISCI
638  * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
639  * @id:		Device Identifier
640  *
641  * Request for the device - NOTE: the client MUST maintain integrity of
642  * usage count by balancing get_device with put_device. No refcounting is
643  * managed by driver for that purpose.
644  *
645  * NOTE: The request is for exclusive access for the processor.
646  *
647  * Return: 0 if all went fine, else return appropriate error.
648  */
649 static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id)
650 {
651 	return ti_sci_set_device_state(handle, id,
652 				       MSG_FLAG_DEVICE_EXCLUSIVE,
653 				       MSG_DEVICE_SW_STATE_ON);
654 }
655 
656 /**
657  * ti_sci_cmd_idle_device() - Command to idle a device managed by TISCI
658  * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
659  * @id:		Device Identifier
660  *
661  * Request for the device - NOTE: the client MUST maintain integrity of
662  * usage count by balancing get_device with put_device. No refcounting is
663  * managed by driver for that purpose.
664  *
665  * Return: 0 if all went fine, else return appropriate error.
666  */
667 static int ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id)
668 {
669 	return ti_sci_set_device_state(handle, id,
670 				       MSG_FLAG_DEVICE_EXCLUSIVE,
671 				       MSG_DEVICE_SW_STATE_RETENTION);
672 }
673 
674 /**
675  * ti_sci_cmd_put_device() - command to release a device managed by TISCI
676  * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
677  * @id:		Device Identifier
678  *
679  * Request for the device - NOTE: the client MUST maintain integrity of
680  * usage count by balancing get_device with put_device. No refcounting is
681  * managed by driver for that purpose.
682  *
683  * Return: 0 if all went fine, else return appropriate error.
684  */
685 static int ti_sci_cmd_put_device(const struct ti_sci_handle *handle, u32 id)
686 {
687 	return ti_sci_set_device_state(handle, id,
688 				       0, MSG_DEVICE_SW_STATE_AUTO_OFF);
689 }
690 
691 /**
692  * ti_sci_cmd_dev_is_valid() - Is the device valid
693  * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
694  * @id:		Device Identifier
695  *
696  * Return: 0 if all went fine and the device ID is valid, else return
697  * appropriate error.
698  */
699 static int ti_sci_cmd_dev_is_valid(const struct ti_sci_handle *handle, u32 id)
700 {
701 	u8 unused;
702 
703 	/* check the device state which will also tell us if the ID is valid */
704 	return ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &unused);
705 }
706 
707 /**
708  * ti_sci_cmd_dev_get_clcnt() - Get context loss counter
709  * @handle:	Pointer to TISCI handle
710  * @id:		Device Identifier
711  * @count:	Pointer to Context Loss counter to populate
712  *
713  * Return: 0 if all went fine, else return appropriate error.
714  */
715 static int ti_sci_cmd_dev_get_clcnt(const struct ti_sci_handle *handle, u32 id,
716 				    u32 *count)
717 {
718 	return ti_sci_get_device_state(handle, id, count, NULL, NULL, NULL);
719 }
720 
721 /**
722  * ti_sci_cmd_dev_is_idle() - Check if the device is requested to be idle
723  * @handle:	Pointer to TISCI handle
724  * @id:		Device Identifier
725  * @r_state:	true if requested to be idle
726  *
727  * Return: 0 if all went fine, else return appropriate error.
728  */
729 static int ti_sci_cmd_dev_is_idle(const struct ti_sci_handle *handle, u32 id,
730 				  bool *r_state)
731 {
732 	int ret;
733 	u8 state;
734 
735 	if (!r_state)
736 		return -EINVAL;
737 
738 	ret = ti_sci_get_device_state(handle, id, NULL, NULL, &state, NULL);
739 	if (ret)
740 		return ret;
741 
742 	*r_state = (state == MSG_DEVICE_SW_STATE_RETENTION);
743 
744 	return 0;
745 }
746 
747 /**
748  * ti_sci_cmd_dev_is_stop() - Check if the device is requested to be stopped
749  * @handle:	Pointer to TISCI handle
750  * @id:		Device Identifier
751  * @r_state:	true if requested to be stopped
752  * @curr_state:	true if currently stopped.
753  *
754  * Return: 0 if all went fine, else return appropriate error.
755  */
756 static int ti_sci_cmd_dev_is_stop(const struct ti_sci_handle *handle, u32 id,
757 				  bool *r_state,  bool *curr_state)
758 {
759 	int ret;
760 	u8 p_state, c_state;
761 
762 	if (!r_state && !curr_state)
763 		return -EINVAL;
764 
765 	ret =
766 	    ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
767 	if (ret)
768 		return ret;
769 
770 	if (r_state)
771 		*r_state = (p_state == MSG_DEVICE_SW_STATE_AUTO_OFF);
772 	if (curr_state)
773 		*curr_state = (c_state == MSG_DEVICE_HW_STATE_OFF);
774 
775 	return 0;
776 }
777 
778 /**
779  * ti_sci_cmd_dev_is_on() - Check if the device is requested to be ON
780  * @handle:	Pointer to TISCI handle
781  * @id:		Device Identifier
782  * @r_state:	true if requested to be ON
783  * @curr_state:	true if currently ON and active
784  *
785  * Return: 0 if all went fine, else return appropriate error.
786  */
787 static int ti_sci_cmd_dev_is_on(const struct ti_sci_handle *handle, u32 id,
788 				bool *r_state,  bool *curr_state)
789 {
790 	int ret;
791 	u8 p_state, c_state;
792 
793 	if (!r_state && !curr_state)
794 		return -EINVAL;
795 
796 	ret =
797 	    ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
798 	if (ret)
799 		return ret;
800 
801 	if (r_state)
802 		*r_state = (p_state == MSG_DEVICE_SW_STATE_ON);
803 	if (curr_state)
804 		*curr_state = (c_state == MSG_DEVICE_HW_STATE_ON);
805 
806 	return 0;
807 }
808 
809 /**
810  * ti_sci_cmd_dev_is_trans() - Check if the device is currently transitioning
811  * @handle:	Pointer to TISCI handle
812  * @id:		Device Identifier
813  * @curr_state:	true if currently transitioning.
814  *
815  * Return: 0 if all went fine, else return appropriate error.
816  */
817 static int ti_sci_cmd_dev_is_trans(const struct ti_sci_handle *handle, u32 id,
818 				   bool *curr_state)
819 {
820 	int ret;
821 	u8 state;
822 
823 	if (!curr_state)
824 		return -EINVAL;
825 
826 	ret = ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &state);
827 	if (ret)
828 		return ret;
829 
830 	*curr_state = (state == MSG_DEVICE_HW_STATE_TRANS);
831 
832 	return 0;
833 }
834 
835 /**
836  * ti_sci_cmd_set_device_resets() - command to set resets for device managed
837  *				    by TISCI
838  * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
839  * @id:		Device Identifier
840  * @reset_state: Device specific reset bit field
841  *
842  * Return: 0 if all went fine, else return appropriate error.
843  */
844 static int ti_sci_cmd_set_device_resets(const struct ti_sci_handle *handle,
845 					u32 id, u32 reset_state)
846 {
847 	struct ti_sci_info *info;
848 	struct ti_sci_msg_req_set_device_resets *req;
849 	struct ti_sci_msg_hdr *resp;
850 	struct ti_sci_xfer *xfer;
851 	struct device *dev;
852 	int ret = 0;
853 
854 	if (IS_ERR(handle))
855 		return PTR_ERR(handle);
856 	if (!handle)
857 		return -EINVAL;
858 
859 	info = handle_to_ti_sci_info(handle);
860 	dev = info->dev;
861 
862 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_RESETS,
863 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
864 				   sizeof(*req), sizeof(*resp));
865 	if (IS_ERR(xfer)) {
866 		ret = PTR_ERR(xfer);
867 		dev_err(dev, "Message alloc failed(%d)\n", ret);
868 		return ret;
869 	}
870 	req = (struct ti_sci_msg_req_set_device_resets *)xfer->xfer_buf;
871 	req->id = id;
872 	req->resets = reset_state;
873 
874 	ret = ti_sci_do_xfer(info, xfer);
875 	if (ret) {
876 		dev_err(dev, "Mbox send fail %d\n", ret);
877 		goto fail;
878 	}
879 
880 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
881 
882 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
883 
884 fail:
885 	ti_sci_put_one_xfer(&info->minfo, xfer);
886 
887 	return ret;
888 }
889 
890 /**
891  * ti_sci_cmd_get_device_resets() - Get reset state for device managed
892  *				    by TISCI
893  * @handle:		Pointer to TISCI handle
894  * @id:			Device Identifier
895  * @reset_state:	Pointer to reset state to populate
896  *
897  * Return: 0 if all went fine, else return appropriate error.
898  */
899 static int ti_sci_cmd_get_device_resets(const struct ti_sci_handle *handle,
900 					u32 id, u32 *reset_state)
901 {
902 	return ti_sci_get_device_state(handle, id, NULL, reset_state, NULL,
903 				       NULL);
904 }
905 
906 /**
907  * ti_sci_set_clock_state() - Set clock state helper
908  * @handle:	pointer to TI SCI handle
909  * @dev_id:	Device identifier this request is for
910  * @clk_id:	Clock identifier for the device for this request.
911  *		Each device has it's own set of clock inputs. This indexes
912  *		which clock input to modify.
913  * @flags:	Header flags as needed
914  * @state:	State to request for the clock.
915  *
916  * Return: 0 if all went well, else returns appropriate error value.
917  */
918 static int ti_sci_set_clock_state(const struct ti_sci_handle *handle,
919 				  u32 dev_id, u8 clk_id,
920 				  u32 flags, u8 state)
921 {
922 	struct ti_sci_info *info;
923 	struct ti_sci_msg_req_set_clock_state *req;
924 	struct ti_sci_msg_hdr *resp;
925 	struct ti_sci_xfer *xfer;
926 	struct device *dev;
927 	int ret = 0;
928 
929 	if (IS_ERR(handle))
930 		return PTR_ERR(handle);
931 	if (!handle)
932 		return -EINVAL;
933 
934 	info = handle_to_ti_sci_info(handle);
935 	dev = info->dev;
936 
937 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_STATE,
938 				   flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
939 				   sizeof(*req), sizeof(*resp));
940 	if (IS_ERR(xfer)) {
941 		ret = PTR_ERR(xfer);
942 		dev_err(dev, "Message alloc failed(%d)\n", ret);
943 		return ret;
944 	}
945 	req = (struct ti_sci_msg_req_set_clock_state *)xfer->xfer_buf;
946 	req->dev_id = dev_id;
947 	req->clk_id = clk_id;
948 	req->request_state = state;
949 
950 	ret = ti_sci_do_xfer(info, xfer);
951 	if (ret) {
952 		dev_err(dev, "Mbox send fail %d\n", ret);
953 		goto fail;
954 	}
955 
956 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
957 
958 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
959 
960 fail:
961 	ti_sci_put_one_xfer(&info->minfo, xfer);
962 
963 	return ret;
964 }
965 
966 /**
967  * ti_sci_cmd_get_clock_state() - Get clock state helper
968  * @handle:	pointer to TI SCI handle
969  * @dev_id:	Device identifier this request is for
970  * @clk_id:	Clock identifier for the device for this request.
971  *		Each device has it's own set of clock inputs. This indexes
972  *		which clock input to modify.
973  * @programmed_state:	State requested for clock to move to
974  * @current_state:	State that the clock is currently in
975  *
976  * Return: 0 if all went well, else returns appropriate error value.
977  */
978 static int ti_sci_cmd_get_clock_state(const struct ti_sci_handle *handle,
979 				      u32 dev_id, u8 clk_id,
980 				      u8 *programmed_state, u8 *current_state)
981 {
982 	struct ti_sci_info *info;
983 	struct ti_sci_msg_req_get_clock_state *req;
984 	struct ti_sci_msg_resp_get_clock_state *resp;
985 	struct ti_sci_xfer *xfer;
986 	struct device *dev;
987 	int ret = 0;
988 
989 	if (IS_ERR(handle))
990 		return PTR_ERR(handle);
991 	if (!handle)
992 		return -EINVAL;
993 
994 	if (!programmed_state && !current_state)
995 		return -EINVAL;
996 
997 	info = handle_to_ti_sci_info(handle);
998 	dev = info->dev;
999 
1000 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_STATE,
1001 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1002 				   sizeof(*req), sizeof(*resp));
1003 	if (IS_ERR(xfer)) {
1004 		ret = PTR_ERR(xfer);
1005 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1006 		return ret;
1007 	}
1008 	req = (struct ti_sci_msg_req_get_clock_state *)xfer->xfer_buf;
1009 	req->dev_id = dev_id;
1010 	req->clk_id = clk_id;
1011 
1012 	ret = ti_sci_do_xfer(info, xfer);
1013 	if (ret) {
1014 		dev_err(dev, "Mbox send fail %d\n", ret);
1015 		goto fail;
1016 	}
1017 
1018 	resp = (struct ti_sci_msg_resp_get_clock_state *)xfer->xfer_buf;
1019 
1020 	if (!ti_sci_is_response_ack(resp)) {
1021 		ret = -ENODEV;
1022 		goto fail;
1023 	}
1024 
1025 	if (programmed_state)
1026 		*programmed_state = resp->programmed_state;
1027 	if (current_state)
1028 		*current_state = resp->current_state;
1029 
1030 fail:
1031 	ti_sci_put_one_xfer(&info->minfo, xfer);
1032 
1033 	return ret;
1034 }
1035 
1036 /**
1037  * ti_sci_cmd_get_clock() - Get control of a clock from TI SCI
1038  * @handle:	pointer to TI SCI handle
1039  * @dev_id:	Device identifier this request is for
1040  * @clk_id:	Clock identifier for the device for this request.
1041  *		Each device has it's own set of clock inputs. This indexes
1042  *		which clock input to modify.
1043  * @needs_ssc: 'true' if Spread Spectrum clock is desired, else 'false'
1044  * @can_change_freq: 'true' if frequency change is desired, else 'false'
1045  * @enable_input_term: 'true' if input termination is desired, else 'false'
1046  *
1047  * Return: 0 if all went well, else returns appropriate error value.
1048  */
1049 static int ti_sci_cmd_get_clock(const struct ti_sci_handle *handle, u32 dev_id,
1050 				u8 clk_id, bool needs_ssc, bool can_change_freq,
1051 				bool enable_input_term)
1052 {
1053 	u32 flags = 0;
1054 
1055 	flags |= needs_ssc ? MSG_FLAG_CLOCK_ALLOW_SSC : 0;
1056 	flags |= can_change_freq ? MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE : 0;
1057 	flags |= enable_input_term ? MSG_FLAG_CLOCK_INPUT_TERM : 0;
1058 
1059 	return ti_sci_set_clock_state(handle, dev_id, clk_id, flags,
1060 				      MSG_CLOCK_SW_STATE_REQ);
1061 }
1062 
1063 /**
1064  * ti_sci_cmd_idle_clock() - Idle a clock which is in our control
1065  * @handle:	pointer to TI SCI handle
1066  * @dev_id:	Device identifier this request is for
1067  * @clk_id:	Clock identifier for the device for this request.
1068  *		Each device has it's own set of clock inputs. This indexes
1069  *		which clock input to modify.
1070  *
1071  * NOTE: This clock must have been requested by get_clock previously.
1072  *
1073  * Return: 0 if all went well, else returns appropriate error value.
1074  */
1075 static int ti_sci_cmd_idle_clock(const struct ti_sci_handle *handle,
1076 				 u32 dev_id, u8 clk_id)
1077 {
1078 	return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
1079 				      MSG_CLOCK_SW_STATE_UNREQ);
1080 }
1081 
1082 /**
1083  * ti_sci_cmd_put_clock() - Release a clock from our control back to TISCI
1084  * @handle:	pointer to TI SCI handle
1085  * @dev_id:	Device identifier this request is for
1086  * @clk_id:	Clock identifier for the device for this request.
1087  *		Each device has it's own set of clock inputs. This indexes
1088  *		which clock input to modify.
1089  *
1090  * NOTE: This clock must have been requested by get_clock previously.
1091  *
1092  * Return: 0 if all went well, else returns appropriate error value.
1093  */
1094 static int ti_sci_cmd_put_clock(const struct ti_sci_handle *handle,
1095 				u32 dev_id, u8 clk_id)
1096 {
1097 	return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
1098 				      MSG_CLOCK_SW_STATE_AUTO);
1099 }
1100 
1101 /**
1102  * ti_sci_cmd_clk_is_auto() - Is the clock being auto managed
1103  * @handle:	pointer to TI SCI handle
1104  * @dev_id:	Device identifier this request is for
1105  * @clk_id:	Clock identifier for the device for this request.
1106  *		Each device has it's own set of clock inputs. This indexes
1107  *		which clock input to modify.
1108  * @req_state: state indicating if the clock is auto managed
1109  *
1110  * Return: 0 if all went well, else returns appropriate error value.
1111  */
1112 static int ti_sci_cmd_clk_is_auto(const struct ti_sci_handle *handle,
1113 				  u32 dev_id, u8 clk_id, bool *req_state)
1114 {
1115 	u8 state = 0;
1116 	int ret;
1117 
1118 	if (!req_state)
1119 		return -EINVAL;
1120 
1121 	ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, &state, NULL);
1122 	if (ret)
1123 		return ret;
1124 
1125 	*req_state = (state == MSG_CLOCK_SW_STATE_AUTO);
1126 	return 0;
1127 }
1128 
1129 /**
1130  * ti_sci_cmd_clk_is_on() - Is the clock ON
1131  * @handle:	pointer to TI SCI handle
1132  * @dev_id:	Device identifier this request is for
1133  * @clk_id:	Clock identifier for the device for this request.
1134  *		Each device has it's own set of clock inputs. This indexes
1135  *		which clock input to modify.
1136  * @req_state: state indicating if the clock is managed by us and enabled
1137  * @curr_state: state indicating if the clock is ready for operation
1138  *
1139  * Return: 0 if all went well, else returns appropriate error value.
1140  */
1141 static int ti_sci_cmd_clk_is_on(const struct ti_sci_handle *handle, u32 dev_id,
1142 				u8 clk_id, bool *req_state, bool *curr_state)
1143 {
1144 	u8 c_state = 0, r_state = 0;
1145 	int ret;
1146 
1147 	if (!req_state && !curr_state)
1148 		return -EINVAL;
1149 
1150 	ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
1151 					 &r_state, &c_state);
1152 	if (ret)
1153 		return ret;
1154 
1155 	if (req_state)
1156 		*req_state = (r_state == MSG_CLOCK_SW_STATE_REQ);
1157 	if (curr_state)
1158 		*curr_state = (c_state == MSG_CLOCK_HW_STATE_READY);
1159 	return 0;
1160 }
1161 
1162 /**
1163  * ti_sci_cmd_clk_is_off() - Is the clock OFF
1164  * @handle:	pointer to TI SCI handle
1165  * @dev_id:	Device identifier this request is for
1166  * @clk_id:	Clock identifier for the device for this request.
1167  *		Each device has it's own set of clock inputs. This indexes
1168  *		which clock input to modify.
1169  * @req_state: state indicating if the clock is managed by us and disabled
1170  * @curr_state: state indicating if the clock is NOT ready for operation
1171  *
1172  * Return: 0 if all went well, else returns appropriate error value.
1173  */
1174 static int ti_sci_cmd_clk_is_off(const struct ti_sci_handle *handle, u32 dev_id,
1175 				 u8 clk_id, bool *req_state, bool *curr_state)
1176 {
1177 	u8 c_state = 0, r_state = 0;
1178 	int ret;
1179 
1180 	if (!req_state && !curr_state)
1181 		return -EINVAL;
1182 
1183 	ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
1184 					 &r_state, &c_state);
1185 	if (ret)
1186 		return ret;
1187 
1188 	if (req_state)
1189 		*req_state = (r_state == MSG_CLOCK_SW_STATE_UNREQ);
1190 	if (curr_state)
1191 		*curr_state = (c_state == MSG_CLOCK_HW_STATE_NOT_READY);
1192 	return 0;
1193 }
1194 
1195 /**
1196  * ti_sci_cmd_clk_set_parent() - Set the clock source of a specific device clock
1197  * @handle:	pointer to TI SCI handle
1198  * @dev_id:	Device identifier this request is for
1199  * @clk_id:	Clock identifier for the device for this request.
1200  *		Each device has it's own set of clock inputs. This indexes
1201  *		which clock input to modify.
1202  * @parent_id:	Parent clock identifier to set
1203  *
1204  * Return: 0 if all went well, else returns appropriate error value.
1205  */
1206 static int ti_sci_cmd_clk_set_parent(const struct ti_sci_handle *handle,
1207 				     u32 dev_id, u8 clk_id, u8 parent_id)
1208 {
1209 	struct ti_sci_info *info;
1210 	struct ti_sci_msg_req_set_clock_parent *req;
1211 	struct ti_sci_msg_hdr *resp;
1212 	struct ti_sci_xfer *xfer;
1213 	struct device *dev;
1214 	int ret = 0;
1215 
1216 	if (IS_ERR(handle))
1217 		return PTR_ERR(handle);
1218 	if (!handle)
1219 		return -EINVAL;
1220 
1221 	info = handle_to_ti_sci_info(handle);
1222 	dev = info->dev;
1223 
1224 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_PARENT,
1225 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1226 				   sizeof(*req), sizeof(*resp));
1227 	if (IS_ERR(xfer)) {
1228 		ret = PTR_ERR(xfer);
1229 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1230 		return ret;
1231 	}
1232 	req = (struct ti_sci_msg_req_set_clock_parent *)xfer->xfer_buf;
1233 	req->dev_id = dev_id;
1234 	req->clk_id = clk_id;
1235 	req->parent_id = parent_id;
1236 
1237 	ret = ti_sci_do_xfer(info, xfer);
1238 	if (ret) {
1239 		dev_err(dev, "Mbox send fail %d\n", ret);
1240 		goto fail;
1241 	}
1242 
1243 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
1244 
1245 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
1246 
1247 fail:
1248 	ti_sci_put_one_xfer(&info->minfo, xfer);
1249 
1250 	return ret;
1251 }
1252 
1253 /**
1254  * ti_sci_cmd_clk_get_parent() - Get current parent clock source
1255  * @handle:	pointer to TI SCI handle
1256  * @dev_id:	Device identifier this request is for
1257  * @clk_id:	Clock identifier for the device for this request.
1258  *		Each device has it's own set of clock inputs. This indexes
1259  *		which clock input to modify.
1260  * @parent_id:	Current clock parent
1261  *
1262  * Return: 0 if all went well, else returns appropriate error value.
1263  */
1264 static int ti_sci_cmd_clk_get_parent(const struct ti_sci_handle *handle,
1265 				     u32 dev_id, u8 clk_id, u8 *parent_id)
1266 {
1267 	struct ti_sci_info *info;
1268 	struct ti_sci_msg_req_get_clock_parent *req;
1269 	struct ti_sci_msg_resp_get_clock_parent *resp;
1270 	struct ti_sci_xfer *xfer;
1271 	struct device *dev;
1272 	int ret = 0;
1273 
1274 	if (IS_ERR(handle))
1275 		return PTR_ERR(handle);
1276 	if (!handle || !parent_id)
1277 		return -EINVAL;
1278 
1279 	info = handle_to_ti_sci_info(handle);
1280 	dev = info->dev;
1281 
1282 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_PARENT,
1283 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1284 				   sizeof(*req), sizeof(*resp));
1285 	if (IS_ERR(xfer)) {
1286 		ret = PTR_ERR(xfer);
1287 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1288 		return ret;
1289 	}
1290 	req = (struct ti_sci_msg_req_get_clock_parent *)xfer->xfer_buf;
1291 	req->dev_id = dev_id;
1292 	req->clk_id = clk_id;
1293 
1294 	ret = ti_sci_do_xfer(info, xfer);
1295 	if (ret) {
1296 		dev_err(dev, "Mbox send fail %d\n", ret);
1297 		goto fail;
1298 	}
1299 
1300 	resp = (struct ti_sci_msg_resp_get_clock_parent *)xfer->xfer_buf;
1301 
1302 	if (!ti_sci_is_response_ack(resp))
1303 		ret = -ENODEV;
1304 	else
1305 		*parent_id = resp->parent_id;
1306 
1307 fail:
1308 	ti_sci_put_one_xfer(&info->minfo, xfer);
1309 
1310 	return ret;
1311 }
1312 
1313 /**
1314  * ti_sci_cmd_clk_get_num_parents() - Get num parents of the current clk source
1315  * @handle:	pointer to TI SCI handle
1316  * @dev_id:	Device identifier this request is for
1317  * @clk_id:	Clock identifier for the device for this request.
1318  *		Each device has it's own set of clock inputs. This indexes
1319  *		which clock input to modify.
1320  * @num_parents: Returns he number of parents to the current clock.
1321  *
1322  * Return: 0 if all went well, else returns appropriate error value.
1323  */
1324 static int ti_sci_cmd_clk_get_num_parents(const struct ti_sci_handle *handle,
1325 					  u32 dev_id, u8 clk_id,
1326 					  u8 *num_parents)
1327 {
1328 	struct ti_sci_info *info;
1329 	struct ti_sci_msg_req_get_clock_num_parents *req;
1330 	struct ti_sci_msg_resp_get_clock_num_parents *resp;
1331 	struct ti_sci_xfer *xfer;
1332 	struct device *dev;
1333 	int ret = 0;
1334 
1335 	if (IS_ERR(handle))
1336 		return PTR_ERR(handle);
1337 	if (!handle || !num_parents)
1338 		return -EINVAL;
1339 
1340 	info = handle_to_ti_sci_info(handle);
1341 	dev = info->dev;
1342 
1343 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_NUM_CLOCK_PARENTS,
1344 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1345 				   sizeof(*req), sizeof(*resp));
1346 	if (IS_ERR(xfer)) {
1347 		ret = PTR_ERR(xfer);
1348 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1349 		return ret;
1350 	}
1351 	req = (struct ti_sci_msg_req_get_clock_num_parents *)xfer->xfer_buf;
1352 	req->dev_id = dev_id;
1353 	req->clk_id = clk_id;
1354 
1355 	ret = ti_sci_do_xfer(info, xfer);
1356 	if (ret) {
1357 		dev_err(dev, "Mbox send fail %d\n", ret);
1358 		goto fail;
1359 	}
1360 
1361 	resp = (struct ti_sci_msg_resp_get_clock_num_parents *)xfer->xfer_buf;
1362 
1363 	if (!ti_sci_is_response_ack(resp))
1364 		ret = -ENODEV;
1365 	else
1366 		*num_parents = resp->num_parents;
1367 
1368 fail:
1369 	ti_sci_put_one_xfer(&info->minfo, xfer);
1370 
1371 	return ret;
1372 }
1373 
1374 /**
1375  * ti_sci_cmd_clk_get_match_freq() - Find a good match for frequency
1376  * @handle:	pointer to TI SCI handle
1377  * @dev_id:	Device identifier this request is for
1378  * @clk_id:	Clock identifier for the device for this request.
1379  *		Each device has it's own set of clock inputs. This indexes
1380  *		which clock input to modify.
1381  * @min_freq:	The minimum allowable frequency in Hz. This is the minimum
1382  *		allowable programmed frequency and does not account for clock
1383  *		tolerances and jitter.
1384  * @target_freq: The target clock frequency in Hz. A frequency will be
1385  *		processed as close to this target frequency as possible.
1386  * @max_freq:	The maximum allowable frequency in Hz. This is the maximum
1387  *		allowable programmed frequency and does not account for clock
1388  *		tolerances and jitter.
1389  * @match_freq:	Frequency match in Hz response.
1390  *
1391  * Return: 0 if all went well, else returns appropriate error value.
1392  */
1393 static int ti_sci_cmd_clk_get_match_freq(const struct ti_sci_handle *handle,
1394 					 u32 dev_id, u8 clk_id, u64 min_freq,
1395 					 u64 target_freq, u64 max_freq,
1396 					 u64 *match_freq)
1397 {
1398 	struct ti_sci_info *info;
1399 	struct ti_sci_msg_req_query_clock_freq *req;
1400 	struct ti_sci_msg_resp_query_clock_freq *resp;
1401 	struct ti_sci_xfer *xfer;
1402 	struct device *dev;
1403 	int ret = 0;
1404 
1405 	if (IS_ERR(handle))
1406 		return PTR_ERR(handle);
1407 	if (!handle || !match_freq)
1408 		return -EINVAL;
1409 
1410 	info = handle_to_ti_sci_info(handle);
1411 	dev = info->dev;
1412 
1413 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_QUERY_CLOCK_FREQ,
1414 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1415 				   sizeof(*req), sizeof(*resp));
1416 	if (IS_ERR(xfer)) {
1417 		ret = PTR_ERR(xfer);
1418 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1419 		return ret;
1420 	}
1421 	req = (struct ti_sci_msg_req_query_clock_freq *)xfer->xfer_buf;
1422 	req->dev_id = dev_id;
1423 	req->clk_id = clk_id;
1424 	req->min_freq_hz = min_freq;
1425 	req->target_freq_hz = target_freq;
1426 	req->max_freq_hz = max_freq;
1427 
1428 	ret = ti_sci_do_xfer(info, xfer);
1429 	if (ret) {
1430 		dev_err(dev, "Mbox send fail %d\n", ret);
1431 		goto fail;
1432 	}
1433 
1434 	resp = (struct ti_sci_msg_resp_query_clock_freq *)xfer->xfer_buf;
1435 
1436 	if (!ti_sci_is_response_ack(resp))
1437 		ret = -ENODEV;
1438 	else
1439 		*match_freq = resp->freq_hz;
1440 
1441 fail:
1442 	ti_sci_put_one_xfer(&info->minfo, xfer);
1443 
1444 	return ret;
1445 }
1446 
1447 /**
1448  * ti_sci_cmd_clk_set_freq() - Set a frequency for clock
1449  * @handle:	pointer to TI SCI handle
1450  * @dev_id:	Device identifier this request is for
1451  * @clk_id:	Clock identifier for the device for this request.
1452  *		Each device has it's own set of clock inputs. This indexes
1453  *		which clock input to modify.
1454  * @min_freq:	The minimum allowable frequency in Hz. This is the minimum
1455  *		allowable programmed frequency and does not account for clock
1456  *		tolerances and jitter.
1457  * @target_freq: The target clock frequency in Hz. A frequency will be
1458  *		processed as close to this target frequency as possible.
1459  * @max_freq:	The maximum allowable frequency in Hz. This is the maximum
1460  *		allowable programmed frequency and does not account for clock
1461  *		tolerances and jitter.
1462  *
1463  * Return: 0 if all went well, else returns appropriate error value.
1464  */
1465 static int ti_sci_cmd_clk_set_freq(const struct ti_sci_handle *handle,
1466 				   u32 dev_id, u8 clk_id, u64 min_freq,
1467 				   u64 target_freq, u64 max_freq)
1468 {
1469 	struct ti_sci_info *info;
1470 	struct ti_sci_msg_req_set_clock_freq *req;
1471 	struct ti_sci_msg_hdr *resp;
1472 	struct ti_sci_xfer *xfer;
1473 	struct device *dev;
1474 	int ret = 0;
1475 
1476 	if (IS_ERR(handle))
1477 		return PTR_ERR(handle);
1478 	if (!handle)
1479 		return -EINVAL;
1480 
1481 	info = handle_to_ti_sci_info(handle);
1482 	dev = info->dev;
1483 
1484 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_FREQ,
1485 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1486 				   sizeof(*req), sizeof(*resp));
1487 	if (IS_ERR(xfer)) {
1488 		ret = PTR_ERR(xfer);
1489 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1490 		return ret;
1491 	}
1492 	req = (struct ti_sci_msg_req_set_clock_freq *)xfer->xfer_buf;
1493 	req->dev_id = dev_id;
1494 	req->clk_id = clk_id;
1495 	req->min_freq_hz = min_freq;
1496 	req->target_freq_hz = target_freq;
1497 	req->max_freq_hz = max_freq;
1498 
1499 	ret = ti_sci_do_xfer(info, xfer);
1500 	if (ret) {
1501 		dev_err(dev, "Mbox send fail %d\n", ret);
1502 		goto fail;
1503 	}
1504 
1505 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
1506 
1507 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
1508 
1509 fail:
1510 	ti_sci_put_one_xfer(&info->minfo, xfer);
1511 
1512 	return ret;
1513 }
1514 
1515 /**
1516  * ti_sci_cmd_clk_get_freq() - Get current frequency
1517  * @handle:	pointer to TI SCI handle
1518  * @dev_id:	Device identifier this request is for
1519  * @clk_id:	Clock identifier for the device for this request.
1520  *		Each device has it's own set of clock inputs. This indexes
1521  *		which clock input to modify.
1522  * @freq:	Currently frequency in Hz
1523  *
1524  * Return: 0 if all went well, else returns appropriate error value.
1525  */
1526 static int ti_sci_cmd_clk_get_freq(const struct ti_sci_handle *handle,
1527 				   u32 dev_id, u8 clk_id, u64 *freq)
1528 {
1529 	struct ti_sci_info *info;
1530 	struct ti_sci_msg_req_get_clock_freq *req;
1531 	struct ti_sci_msg_resp_get_clock_freq *resp;
1532 	struct ti_sci_xfer *xfer;
1533 	struct device *dev;
1534 	int ret = 0;
1535 
1536 	if (IS_ERR(handle))
1537 		return PTR_ERR(handle);
1538 	if (!handle || !freq)
1539 		return -EINVAL;
1540 
1541 	info = handle_to_ti_sci_info(handle);
1542 	dev = info->dev;
1543 
1544 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_FREQ,
1545 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1546 				   sizeof(*req), sizeof(*resp));
1547 	if (IS_ERR(xfer)) {
1548 		ret = PTR_ERR(xfer);
1549 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1550 		return ret;
1551 	}
1552 	req = (struct ti_sci_msg_req_get_clock_freq *)xfer->xfer_buf;
1553 	req->dev_id = dev_id;
1554 	req->clk_id = clk_id;
1555 
1556 	ret = ti_sci_do_xfer(info, xfer);
1557 	if (ret) {
1558 		dev_err(dev, "Mbox send fail %d\n", ret);
1559 		goto fail;
1560 	}
1561 
1562 	resp = (struct ti_sci_msg_resp_get_clock_freq *)xfer->xfer_buf;
1563 
1564 	if (!ti_sci_is_response_ack(resp))
1565 		ret = -ENODEV;
1566 	else
1567 		*freq = resp->freq_hz;
1568 
1569 fail:
1570 	ti_sci_put_one_xfer(&info->minfo, xfer);
1571 
1572 	return ret;
1573 }
1574 
1575 static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle)
1576 {
1577 	struct ti_sci_info *info;
1578 	struct ti_sci_msg_req_reboot *req;
1579 	struct ti_sci_msg_hdr *resp;
1580 	struct ti_sci_xfer *xfer;
1581 	struct device *dev;
1582 	int ret = 0;
1583 
1584 	if (IS_ERR(handle))
1585 		return PTR_ERR(handle);
1586 	if (!handle)
1587 		return -EINVAL;
1588 
1589 	info = handle_to_ti_sci_info(handle);
1590 	dev = info->dev;
1591 
1592 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SYS_RESET,
1593 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1594 				   sizeof(*req), sizeof(*resp));
1595 	if (IS_ERR(xfer)) {
1596 		ret = PTR_ERR(xfer);
1597 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1598 		return ret;
1599 	}
1600 	req = (struct ti_sci_msg_req_reboot *)xfer->xfer_buf;
1601 
1602 	ret = ti_sci_do_xfer(info, xfer);
1603 	if (ret) {
1604 		dev_err(dev, "Mbox send fail %d\n", ret);
1605 		goto fail;
1606 	}
1607 
1608 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
1609 
1610 	if (!ti_sci_is_response_ack(resp))
1611 		ret = -ENODEV;
1612 	else
1613 		ret = 0;
1614 
1615 fail:
1616 	ti_sci_put_one_xfer(&info->minfo, xfer);
1617 
1618 	return ret;
1619 }
1620 
1621 static int ti_sci_get_resource_type(struct ti_sci_info *info, u16 dev_id,
1622 				    u16 *type)
1623 {
1624 	struct ti_sci_rm_type_map *rm_type_map = info->desc->rm_type_map;
1625 	bool found = false;
1626 	int i;
1627 
1628 	/* If map is not provided then assume dev_id is used as type */
1629 	if (!rm_type_map) {
1630 		*type = dev_id;
1631 		return 0;
1632 	}
1633 
1634 	for (i = 0; rm_type_map[i].dev_id; i++) {
1635 		if (rm_type_map[i].dev_id == dev_id) {
1636 			*type = rm_type_map[i].type;
1637 			found = true;
1638 			break;
1639 		}
1640 	}
1641 
1642 	if (!found)
1643 		return -EINVAL;
1644 
1645 	return 0;
1646 }
1647 
1648 /**
1649  * ti_sci_get_resource_range - Helper to get a range of resources assigned
1650  *			       to a host. Resource is uniquely identified by
1651  *			       type and subtype.
1652  * @handle:		Pointer to TISCI handle.
1653  * @dev_id:		TISCI device ID.
1654  * @subtype:		Resource assignment subtype that is being requested
1655  *			from the given device.
1656  * @s_host:		Host processor ID to which the resources are allocated
1657  * @range_start:	Start index of the resource range
1658  * @range_num:		Number of resources in the range
1659  *
1660  * Return: 0 if all went fine, else return appropriate error.
1661  */
1662 static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
1663 				     u32 dev_id, u8 subtype, u8 s_host,
1664 				     u16 *range_start, u16 *range_num)
1665 {
1666 	struct ti_sci_msg_resp_get_resource_range *resp;
1667 	struct ti_sci_msg_req_get_resource_range *req;
1668 	struct ti_sci_xfer *xfer;
1669 	struct ti_sci_info *info;
1670 	struct device *dev;
1671 	u16 type;
1672 	int ret = 0;
1673 
1674 	if (IS_ERR(handle))
1675 		return PTR_ERR(handle);
1676 	if (!handle)
1677 		return -EINVAL;
1678 
1679 	info = handle_to_ti_sci_info(handle);
1680 	dev = info->dev;
1681 
1682 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_RESOURCE_RANGE,
1683 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1684 				   sizeof(*req), sizeof(*resp));
1685 	if (IS_ERR(xfer)) {
1686 		ret = PTR_ERR(xfer);
1687 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1688 		return ret;
1689 	}
1690 
1691 	ret = ti_sci_get_resource_type(info, dev_id, &type);
1692 	if (ret) {
1693 		dev_err(dev, "rm type lookup failed for %u\n", dev_id);
1694 		goto fail;
1695 	}
1696 
1697 	req = (struct ti_sci_msg_req_get_resource_range *)xfer->xfer_buf;
1698 	req->secondary_host = s_host;
1699 	req->type = type & MSG_RM_RESOURCE_TYPE_MASK;
1700 	req->subtype = subtype & MSG_RM_RESOURCE_SUBTYPE_MASK;
1701 
1702 	ret = ti_sci_do_xfer(info, xfer);
1703 	if (ret) {
1704 		dev_err(dev, "Mbox send fail %d\n", ret);
1705 		goto fail;
1706 	}
1707 
1708 	resp = (struct ti_sci_msg_resp_get_resource_range *)xfer->xfer_buf;
1709 
1710 	if (!ti_sci_is_response_ack(resp)) {
1711 		ret = -ENODEV;
1712 	} else if (!resp->range_start && !resp->range_num) {
1713 		ret = -ENODEV;
1714 	} else {
1715 		*range_start = resp->range_start;
1716 		*range_num = resp->range_num;
1717 	};
1718 
1719 fail:
1720 	ti_sci_put_one_xfer(&info->minfo, xfer);
1721 
1722 	return ret;
1723 }
1724 
1725 /**
1726  * ti_sci_cmd_get_resource_range - Get a range of resources assigned to host
1727  *				   that is same as ti sci interface host.
1728  * @handle:		Pointer to TISCI handle.
1729  * @dev_id:		TISCI device ID.
1730  * @subtype:		Resource assignment subtype that is being requested
1731  *			from the given device.
1732  * @range_start:	Start index of the resource range
1733  * @range_num:		Number of resources in the range
1734  *
1735  * Return: 0 if all went fine, else return appropriate error.
1736  */
1737 static int ti_sci_cmd_get_resource_range(const struct ti_sci_handle *handle,
1738 					 u32 dev_id, u8 subtype,
1739 					 u16 *range_start, u16 *range_num)
1740 {
1741 	return ti_sci_get_resource_range(handle, dev_id, subtype,
1742 					 TI_SCI_IRQ_SECONDARY_HOST_INVALID,
1743 					 range_start, range_num);
1744 }
1745 
1746 /**
1747  * ti_sci_cmd_get_resource_range_from_shost - Get a range of resources
1748  *					      assigned to a specified host.
1749  * @handle:		Pointer to TISCI handle.
1750  * @dev_id:		TISCI device ID.
1751  * @subtype:		Resource assignment subtype that is being requested
1752  *			from the given device.
1753  * @s_host:		Host processor ID to which the resources are allocated
1754  * @range_start:	Start index of the resource range
1755  * @range_num:		Number of resources in the range
1756  *
1757  * Return: 0 if all went fine, else return appropriate error.
1758  */
1759 static
1760 int ti_sci_cmd_get_resource_range_from_shost(const struct ti_sci_handle *handle,
1761 					     u32 dev_id, u8 subtype, u8 s_host,
1762 					     u16 *range_start, u16 *range_num)
1763 {
1764 	return ti_sci_get_resource_range(handle, dev_id, subtype, s_host,
1765 					 range_start, range_num);
1766 }
1767 
1768 /**
1769  * ti_sci_manage_irq() - Helper api to configure/release the irq route between
1770  *			 the requested source and destination
1771  * @handle:		Pointer to TISCI handle.
1772  * @valid_params:	Bit fields defining the validity of certain params
1773  * @src_id:		Device ID of the IRQ source
1774  * @src_index:		IRQ source index within the source device
1775  * @dst_id:		Device ID of the IRQ destination
1776  * @dst_host_irq:	IRQ number of the destination device
1777  * @ia_id:		Device ID of the IA, if the IRQ flows through this IA
1778  * @vint:		Virtual interrupt to be used within the IA
1779  * @global_event:	Global event number to be used for the requesting event
1780  * @vint_status_bit:	Virtual interrupt status bit to be used for the event
1781  * @s_host:		Secondary host ID to which the irq/event is being
1782  *			requested for.
1783  * @type:		Request type irq set or release.
1784  *
1785  * Return: 0 if all went fine, else return appropriate error.
1786  */
1787 static int ti_sci_manage_irq(const struct ti_sci_handle *handle,
1788 			     u32 valid_params, u16 src_id, u16 src_index,
1789 			     u16 dst_id, u16 dst_host_irq, u16 ia_id, u16 vint,
1790 			     u16 global_event, u8 vint_status_bit, u8 s_host,
1791 			     u16 type)
1792 {
1793 	struct ti_sci_msg_req_manage_irq *req;
1794 	struct ti_sci_msg_hdr *resp;
1795 	struct ti_sci_xfer *xfer;
1796 	struct ti_sci_info *info;
1797 	struct device *dev;
1798 	int ret = 0;
1799 
1800 	if (IS_ERR(handle))
1801 		return PTR_ERR(handle);
1802 	if (!handle)
1803 		return -EINVAL;
1804 
1805 	info = handle_to_ti_sci_info(handle);
1806 	dev = info->dev;
1807 
1808 	xfer = ti_sci_get_one_xfer(info, type, TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1809 				   sizeof(*req), sizeof(*resp));
1810 	if (IS_ERR(xfer)) {
1811 		ret = PTR_ERR(xfer);
1812 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1813 		return ret;
1814 	}
1815 	req = (struct ti_sci_msg_req_manage_irq *)xfer->xfer_buf;
1816 	req->valid_params = valid_params;
1817 	req->src_id = src_id;
1818 	req->src_index = src_index;
1819 	req->dst_id = dst_id;
1820 	req->dst_host_irq = dst_host_irq;
1821 	req->ia_id = ia_id;
1822 	req->vint = vint;
1823 	req->global_event = global_event;
1824 	req->vint_status_bit = vint_status_bit;
1825 	req->secondary_host = s_host;
1826 
1827 	ret = ti_sci_do_xfer(info, xfer);
1828 	if (ret) {
1829 		dev_err(dev, "Mbox send fail %d\n", ret);
1830 		goto fail;
1831 	}
1832 
1833 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
1834 
1835 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
1836 
1837 fail:
1838 	ti_sci_put_one_xfer(&info->minfo, xfer);
1839 
1840 	return ret;
1841 }
1842 
1843 /**
1844  * ti_sci_set_irq() - Helper api to configure the irq route between the
1845  *		      requested source and destination
1846  * @handle:		Pointer to TISCI handle.
1847  * @valid_params:	Bit fields defining the validity of certain params
1848  * @src_id:		Device ID of the IRQ source
1849  * @src_index:		IRQ source index within the source device
1850  * @dst_id:		Device ID of the IRQ destination
1851  * @dst_host_irq:	IRQ number of the destination device
1852  * @ia_id:		Device ID of the IA, if the IRQ flows through this IA
1853  * @vint:		Virtual interrupt to be used within the IA
1854  * @global_event:	Global event number to be used for the requesting event
1855  * @vint_status_bit:	Virtual interrupt status bit to be used for the event
1856  * @s_host:		Secondary host ID to which the irq/event is being
1857  *			requested for.
1858  *
1859  * Return: 0 if all went fine, else return appropriate error.
1860  */
1861 static int ti_sci_set_irq(const struct ti_sci_handle *handle, u32 valid_params,
1862 			  u16 src_id, u16 src_index, u16 dst_id,
1863 			  u16 dst_host_irq, u16 ia_id, u16 vint,
1864 			  u16 global_event, u8 vint_status_bit, u8 s_host)
1865 {
1866 	pr_debug("%s: IRQ set with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n",
1867 		 __func__, valid_params, src_id, src_index,
1868 		 dst_id, dst_host_irq, ia_id, vint, global_event,
1869 		 vint_status_bit);
1870 
1871 	return ti_sci_manage_irq(handle, valid_params, src_id, src_index,
1872 				 dst_id, dst_host_irq, ia_id, vint,
1873 				 global_event, vint_status_bit, s_host,
1874 				 TI_SCI_MSG_SET_IRQ);
1875 }
1876 
1877 /**
1878  * ti_sci_free_irq() - Helper api to free the irq route between the
1879  *			   requested source and destination
1880  * @handle:		Pointer to TISCI handle.
1881  * @valid_params:	Bit fields defining the validity of certain params
1882  * @src_id:		Device ID of the IRQ source
1883  * @src_index:		IRQ source index within the source device
1884  * @dst_id:		Device ID of the IRQ destination
1885  * @dst_host_irq:	IRQ number of the destination device
1886  * @ia_id:		Device ID of the IA, if the IRQ flows through this IA
1887  * @vint:		Virtual interrupt to be used within the IA
1888  * @global_event:	Global event number to be used for the requesting event
1889  * @vint_status_bit:	Virtual interrupt status bit to be used for the event
1890  * @s_host:		Secondary host ID to which the irq/event is being
1891  *			requested for.
1892  *
1893  * Return: 0 if all went fine, else return appropriate error.
1894  */
1895 static int ti_sci_free_irq(const struct ti_sci_handle *handle, u32 valid_params,
1896 			   u16 src_id, u16 src_index, u16 dst_id,
1897 			   u16 dst_host_irq, u16 ia_id, u16 vint,
1898 			   u16 global_event, u8 vint_status_bit, u8 s_host)
1899 {
1900 	pr_debug("%s: IRQ release with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n",
1901 		 __func__, valid_params, src_id, src_index,
1902 		 dst_id, dst_host_irq, ia_id, vint, global_event,
1903 		 vint_status_bit);
1904 
1905 	return ti_sci_manage_irq(handle, valid_params, src_id, src_index,
1906 				 dst_id, dst_host_irq, ia_id, vint,
1907 				 global_event, vint_status_bit, s_host,
1908 				 TI_SCI_MSG_FREE_IRQ);
1909 }
1910 
1911 /**
1912  * ti_sci_cmd_set_irq() - Configure a host irq route between the requested
1913  *			  source and destination.
1914  * @handle:		Pointer to TISCI handle.
1915  * @src_id:		Device ID of the IRQ source
1916  * @src_index:		IRQ source index within the source device
1917  * @dst_id:		Device ID of the IRQ destination
1918  * @dst_host_irq:	IRQ number of the destination device
1919  * @vint_irq:		Boolean specifying if this interrupt belongs to
1920  *			Interrupt Aggregator.
1921  *
1922  * Return: 0 if all went fine, else return appropriate error.
1923  */
1924 static int ti_sci_cmd_set_irq(const struct ti_sci_handle *handle, u16 src_id,
1925 			      u16 src_index, u16 dst_id, u16 dst_host_irq)
1926 {
1927 	u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID;
1928 
1929 	return ti_sci_set_irq(handle, valid_params, src_id, src_index, dst_id,
1930 			      dst_host_irq, 0, 0, 0, 0, 0);
1931 }
1932 
1933 /**
1934  * ti_sci_cmd_set_event_map() - Configure an event based irq route between the
1935  *				requested source and Interrupt Aggregator.
1936  * @handle:		Pointer to TISCI handle.
1937  * @src_id:		Device ID of the IRQ source
1938  * @src_index:		IRQ source index within the source device
1939  * @ia_id:		Device ID of the IA, if the IRQ flows through this IA
1940  * @vint:		Virtual interrupt to be used within the IA
1941  * @global_event:	Global event number to be used for the requesting event
1942  * @vint_status_bit:	Virtual interrupt status bit to be used for the event
1943  *
1944  * Return: 0 if all went fine, else return appropriate error.
1945  */
1946 static int ti_sci_cmd_set_event_map(const struct ti_sci_handle *handle,
1947 				    u16 src_id, u16 src_index, u16 ia_id,
1948 				    u16 vint, u16 global_event,
1949 				    u8 vint_status_bit)
1950 {
1951 	u32 valid_params = MSG_FLAG_IA_ID_VALID | MSG_FLAG_VINT_VALID |
1952 			   MSG_FLAG_GLB_EVNT_VALID |
1953 			   MSG_FLAG_VINT_STS_BIT_VALID;
1954 
1955 	return ti_sci_set_irq(handle, valid_params, src_id, src_index, 0, 0,
1956 			      ia_id, vint, global_event, vint_status_bit, 0);
1957 }
1958 
1959 /**
1960  * ti_sci_cmd_free_irq() - Free a host irq route between the between the
1961  *			   requested source and destination.
1962  * @handle:		Pointer to TISCI handle.
1963  * @src_id:		Device ID of the IRQ source
1964  * @src_index:		IRQ source index within the source device
1965  * @dst_id:		Device ID of the IRQ destination
1966  * @dst_host_irq:	IRQ number of the destination device
1967  * @vint_irq:		Boolean specifying if this interrupt belongs to
1968  *			Interrupt Aggregator.
1969  *
1970  * Return: 0 if all went fine, else return appropriate error.
1971  */
1972 static int ti_sci_cmd_free_irq(const struct ti_sci_handle *handle, u16 src_id,
1973 			       u16 src_index, u16 dst_id, u16 dst_host_irq)
1974 {
1975 	u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID;
1976 
1977 	return ti_sci_free_irq(handle, valid_params, src_id, src_index, dst_id,
1978 			       dst_host_irq, 0, 0, 0, 0, 0);
1979 }
1980 
1981 /**
1982  * ti_sci_cmd_free_event_map() - Free an event map between the requested source
1983  *				 and Interrupt Aggregator.
1984  * @handle:		Pointer to TISCI handle.
1985  * @src_id:		Device ID of the IRQ source
1986  * @src_index:		IRQ source index within the source device
1987  * @ia_id:		Device ID of the IA, if the IRQ flows through this IA
1988  * @vint:		Virtual interrupt to be used within the IA
1989  * @global_event:	Global event number to be used for the requesting event
1990  * @vint_status_bit:	Virtual interrupt status bit to be used for the event
1991  *
1992  * Return: 0 if all went fine, else return appropriate error.
1993  */
1994 static int ti_sci_cmd_free_event_map(const struct ti_sci_handle *handle,
1995 				     u16 src_id, u16 src_index, u16 ia_id,
1996 				     u16 vint, u16 global_event,
1997 				     u8 vint_status_bit)
1998 {
1999 	u32 valid_params = MSG_FLAG_IA_ID_VALID |
2000 			   MSG_FLAG_VINT_VALID | MSG_FLAG_GLB_EVNT_VALID |
2001 			   MSG_FLAG_VINT_STS_BIT_VALID;
2002 
2003 	return ti_sci_free_irq(handle, valid_params, src_id, src_index, 0, 0,
2004 			       ia_id, vint, global_event, vint_status_bit, 0);
2005 }
2006 
2007 /*
2008  * ti_sci_setup_ops() - Setup the operations structures
2009  * @info:	pointer to TISCI pointer
2010  */
2011 static void ti_sci_setup_ops(struct ti_sci_info *info)
2012 {
2013 	struct ti_sci_ops *ops = &info->handle.ops;
2014 	struct ti_sci_core_ops *core_ops = &ops->core_ops;
2015 	struct ti_sci_dev_ops *dops = &ops->dev_ops;
2016 	struct ti_sci_clk_ops *cops = &ops->clk_ops;
2017 	struct ti_sci_rm_core_ops *rm_core_ops = &ops->rm_core_ops;
2018 	struct ti_sci_rm_irq_ops *iops = &ops->rm_irq_ops;
2019 
2020 	core_ops->reboot_device = ti_sci_cmd_core_reboot;
2021 
2022 	dops->get_device = ti_sci_cmd_get_device;
2023 	dops->idle_device = ti_sci_cmd_idle_device;
2024 	dops->put_device = ti_sci_cmd_put_device;
2025 
2026 	dops->is_valid = ti_sci_cmd_dev_is_valid;
2027 	dops->get_context_loss_count = ti_sci_cmd_dev_get_clcnt;
2028 	dops->is_idle = ti_sci_cmd_dev_is_idle;
2029 	dops->is_stop = ti_sci_cmd_dev_is_stop;
2030 	dops->is_on = ti_sci_cmd_dev_is_on;
2031 	dops->is_transitioning = ti_sci_cmd_dev_is_trans;
2032 	dops->set_device_resets = ti_sci_cmd_set_device_resets;
2033 	dops->get_device_resets = ti_sci_cmd_get_device_resets;
2034 
2035 	cops->get_clock = ti_sci_cmd_get_clock;
2036 	cops->idle_clock = ti_sci_cmd_idle_clock;
2037 	cops->put_clock = ti_sci_cmd_put_clock;
2038 	cops->is_auto = ti_sci_cmd_clk_is_auto;
2039 	cops->is_on = ti_sci_cmd_clk_is_on;
2040 	cops->is_off = ti_sci_cmd_clk_is_off;
2041 
2042 	cops->set_parent = ti_sci_cmd_clk_set_parent;
2043 	cops->get_parent = ti_sci_cmd_clk_get_parent;
2044 	cops->get_num_parents = ti_sci_cmd_clk_get_num_parents;
2045 
2046 	cops->get_best_match_freq = ti_sci_cmd_clk_get_match_freq;
2047 	cops->set_freq = ti_sci_cmd_clk_set_freq;
2048 	cops->get_freq = ti_sci_cmd_clk_get_freq;
2049 
2050 	rm_core_ops->get_range = ti_sci_cmd_get_resource_range;
2051 	rm_core_ops->get_range_from_shost =
2052 				ti_sci_cmd_get_resource_range_from_shost;
2053 
2054 	iops->set_irq = ti_sci_cmd_set_irq;
2055 	iops->set_event_map = ti_sci_cmd_set_event_map;
2056 	iops->free_irq = ti_sci_cmd_free_irq;
2057 	iops->free_event_map = ti_sci_cmd_free_event_map;
2058 }
2059 
2060 /**
2061  * ti_sci_get_handle() - Get the TI SCI handle for a device
2062  * @dev:	Pointer to device for which we want SCI handle
2063  *
2064  * NOTE: The function does not track individual clients of the framework
2065  * and is expected to be maintained by caller of TI SCI protocol library.
2066  * ti_sci_put_handle must be balanced with successful ti_sci_get_handle
2067  * Return: pointer to handle if successful, else:
2068  * -EPROBE_DEFER if the instance is not ready
2069  * -ENODEV if the required node handler is missing
2070  * -EINVAL if invalid conditions are encountered.
2071  */
2072 const struct ti_sci_handle *ti_sci_get_handle(struct device *dev)
2073 {
2074 	struct device_node *ti_sci_np;
2075 	struct list_head *p;
2076 	struct ti_sci_handle *handle = NULL;
2077 	struct ti_sci_info *info;
2078 
2079 	if (!dev) {
2080 		pr_err("I need a device pointer\n");
2081 		return ERR_PTR(-EINVAL);
2082 	}
2083 	ti_sci_np = of_get_parent(dev->of_node);
2084 	if (!ti_sci_np) {
2085 		dev_err(dev, "No OF information\n");
2086 		return ERR_PTR(-EINVAL);
2087 	}
2088 
2089 	mutex_lock(&ti_sci_list_mutex);
2090 	list_for_each(p, &ti_sci_list) {
2091 		info = list_entry(p, struct ti_sci_info, node);
2092 		if (ti_sci_np == info->dev->of_node) {
2093 			handle = &info->handle;
2094 			info->users++;
2095 			break;
2096 		}
2097 	}
2098 	mutex_unlock(&ti_sci_list_mutex);
2099 	of_node_put(ti_sci_np);
2100 
2101 	if (!handle)
2102 		return ERR_PTR(-EPROBE_DEFER);
2103 
2104 	return handle;
2105 }
2106 EXPORT_SYMBOL_GPL(ti_sci_get_handle);
2107 
2108 /**
2109  * ti_sci_put_handle() - Release the handle acquired by ti_sci_get_handle
2110  * @handle:	Handle acquired by ti_sci_get_handle
2111  *
2112  * NOTE: The function does not track individual clients of the framework
2113  * and is expected to be maintained by caller of TI SCI protocol library.
2114  * ti_sci_put_handle must be balanced with successful ti_sci_get_handle
2115  *
2116  * Return: 0 is successfully released
2117  * if an error pointer was passed, it returns the error value back,
2118  * if null was passed, it returns -EINVAL;
2119  */
2120 int ti_sci_put_handle(const struct ti_sci_handle *handle)
2121 {
2122 	struct ti_sci_info *info;
2123 
2124 	if (IS_ERR(handle))
2125 		return PTR_ERR(handle);
2126 	if (!handle)
2127 		return -EINVAL;
2128 
2129 	info = handle_to_ti_sci_info(handle);
2130 	mutex_lock(&ti_sci_list_mutex);
2131 	if (!WARN_ON(!info->users))
2132 		info->users--;
2133 	mutex_unlock(&ti_sci_list_mutex);
2134 
2135 	return 0;
2136 }
2137 EXPORT_SYMBOL_GPL(ti_sci_put_handle);
2138 
2139 static void devm_ti_sci_release(struct device *dev, void *res)
2140 {
2141 	const struct ti_sci_handle **ptr = res;
2142 	const struct ti_sci_handle *handle = *ptr;
2143 	int ret;
2144 
2145 	ret = ti_sci_put_handle(handle);
2146 	if (ret)
2147 		dev_err(dev, "failed to put handle %d\n", ret);
2148 }
2149 
2150 /**
2151  * devm_ti_sci_get_handle() - Managed get handle
2152  * @dev:	device for which we want SCI handle for.
2153  *
2154  * NOTE: This releases the handle once the device resources are
2155  * no longer needed. MUST NOT BE released with ti_sci_put_handle.
2156  * The function does not track individual clients of the framework
2157  * and is expected to be maintained by caller of TI SCI protocol library.
2158  *
2159  * Return: 0 if all went fine, else corresponding error.
2160  */
2161 const struct ti_sci_handle *devm_ti_sci_get_handle(struct device *dev)
2162 {
2163 	const struct ti_sci_handle **ptr;
2164 	const struct ti_sci_handle *handle;
2165 
2166 	ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL);
2167 	if (!ptr)
2168 		return ERR_PTR(-ENOMEM);
2169 	handle = ti_sci_get_handle(dev);
2170 
2171 	if (!IS_ERR(handle)) {
2172 		*ptr = handle;
2173 		devres_add(dev, ptr);
2174 	} else {
2175 		devres_free(ptr);
2176 	}
2177 
2178 	return handle;
2179 }
2180 EXPORT_SYMBOL_GPL(devm_ti_sci_get_handle);
2181 
2182 /**
2183  * ti_sci_get_by_phandle() - Get the TI SCI handle using DT phandle
2184  * @np:		device node
2185  * @property:	property name containing phandle on TISCI node
2186  *
2187  * NOTE: The function does not track individual clients of the framework
2188  * and is expected to be maintained by caller of TI SCI protocol library.
2189  * ti_sci_put_handle must be balanced with successful ti_sci_get_by_phandle
2190  * Return: pointer to handle if successful, else:
2191  * -EPROBE_DEFER if the instance is not ready
2192  * -ENODEV if the required node handler is missing
2193  * -EINVAL if invalid conditions are encountered.
2194  */
2195 const struct ti_sci_handle *ti_sci_get_by_phandle(struct device_node *np,
2196 						  const char *property)
2197 {
2198 	struct ti_sci_handle *handle = NULL;
2199 	struct device_node *ti_sci_np;
2200 	struct ti_sci_info *info;
2201 	struct list_head *p;
2202 
2203 	if (!np) {
2204 		pr_err("I need a device pointer\n");
2205 		return ERR_PTR(-EINVAL);
2206 	}
2207 
2208 	ti_sci_np = of_parse_phandle(np, property, 0);
2209 	if (!ti_sci_np)
2210 		return ERR_PTR(-ENODEV);
2211 
2212 	mutex_lock(&ti_sci_list_mutex);
2213 	list_for_each(p, &ti_sci_list) {
2214 		info = list_entry(p, struct ti_sci_info, node);
2215 		if (ti_sci_np == info->dev->of_node) {
2216 			handle = &info->handle;
2217 			info->users++;
2218 			break;
2219 		}
2220 	}
2221 	mutex_unlock(&ti_sci_list_mutex);
2222 	of_node_put(ti_sci_np);
2223 
2224 	if (!handle)
2225 		return ERR_PTR(-EPROBE_DEFER);
2226 
2227 	return handle;
2228 }
2229 EXPORT_SYMBOL_GPL(ti_sci_get_by_phandle);
2230 
2231 /**
2232  * devm_ti_sci_get_by_phandle() - Managed get handle using phandle
2233  * @dev:	Device pointer requesting TISCI handle
2234  * @property:	property name containing phandle on TISCI node
2235  *
2236  * NOTE: This releases the handle once the device resources are
2237  * no longer needed. MUST NOT BE released with ti_sci_put_handle.
2238  * The function does not track individual clients of the framework
2239  * and is expected to be maintained by caller of TI SCI protocol library.
2240  *
2241  * Return: 0 if all went fine, else corresponding error.
2242  */
2243 const struct ti_sci_handle *devm_ti_sci_get_by_phandle(struct device *dev,
2244 						       const char *property)
2245 {
2246 	const struct ti_sci_handle *handle;
2247 	const struct ti_sci_handle **ptr;
2248 
2249 	ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL);
2250 	if (!ptr)
2251 		return ERR_PTR(-ENOMEM);
2252 	handle = ti_sci_get_by_phandle(dev_of_node(dev), property);
2253 
2254 	if (!IS_ERR(handle)) {
2255 		*ptr = handle;
2256 		devres_add(dev, ptr);
2257 	} else {
2258 		devres_free(ptr);
2259 	}
2260 
2261 	return handle;
2262 }
2263 EXPORT_SYMBOL_GPL(devm_ti_sci_get_by_phandle);
2264 
2265 /**
2266  * ti_sci_get_free_resource() - Get a free resource from TISCI resource.
2267  * @res:	Pointer to the TISCI resource
2268  *
2269  * Return: resource num if all went ok else TI_SCI_RESOURCE_NULL.
2270  */
2271 u16 ti_sci_get_free_resource(struct ti_sci_resource *res)
2272 {
2273 	unsigned long flags;
2274 	u16 set, free_bit;
2275 
2276 	raw_spin_lock_irqsave(&res->lock, flags);
2277 	for (set = 0; set < res->sets; set++) {
2278 		free_bit = find_first_zero_bit(res->desc[set].res_map,
2279 					       res->desc[set].num);
2280 		if (free_bit != res->desc[set].num) {
2281 			set_bit(free_bit, res->desc[set].res_map);
2282 			raw_spin_unlock_irqrestore(&res->lock, flags);
2283 			return res->desc[set].start + free_bit;
2284 		}
2285 	}
2286 	raw_spin_unlock_irqrestore(&res->lock, flags);
2287 
2288 	return TI_SCI_RESOURCE_NULL;
2289 }
2290 EXPORT_SYMBOL_GPL(ti_sci_get_free_resource);
2291 
2292 /**
2293  * ti_sci_release_resource() - Release a resource from TISCI resource.
2294  * @res:	Pointer to the TISCI resource
2295  * @id:		Resource id to be released.
2296  */
2297 void ti_sci_release_resource(struct ti_sci_resource *res, u16 id)
2298 {
2299 	unsigned long flags;
2300 	u16 set;
2301 
2302 	raw_spin_lock_irqsave(&res->lock, flags);
2303 	for (set = 0; set < res->sets; set++) {
2304 		if (res->desc[set].start <= id &&
2305 		    (res->desc[set].num + res->desc[set].start) > id)
2306 			clear_bit(id - res->desc[set].start,
2307 				  res->desc[set].res_map);
2308 	}
2309 	raw_spin_unlock_irqrestore(&res->lock, flags);
2310 }
2311 EXPORT_SYMBOL_GPL(ti_sci_release_resource);
2312 
2313 /**
2314  * ti_sci_get_num_resources() - Get the number of resources in TISCI resource
2315  * @res:	Pointer to the TISCI resource
2316  *
2317  * Return: Total number of available resources.
2318  */
2319 u32 ti_sci_get_num_resources(struct ti_sci_resource *res)
2320 {
2321 	u32 set, count = 0;
2322 
2323 	for (set = 0; set < res->sets; set++)
2324 		count += res->desc[set].num;
2325 
2326 	return count;
2327 }
2328 EXPORT_SYMBOL_GPL(ti_sci_get_num_resources);
2329 
2330 /**
2331  * devm_ti_sci_get_of_resource() - Get a TISCI resource assigned to a device
2332  * @handle:	TISCI handle
2333  * @dev:	Device pointer to which the resource is assigned
2334  * @dev_id:	TISCI device id to which the resource is assigned
2335  * @of_prop:	property name by which the resource are represented
2336  *
2337  * Return: Pointer to ti_sci_resource if all went well else appropriate
2338  *	   error pointer.
2339  */
2340 struct ti_sci_resource *
2341 devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
2342 			    struct device *dev, u32 dev_id, char *of_prop)
2343 {
2344 	struct ti_sci_resource *res;
2345 	u32 resource_subtype;
2346 	int i, ret;
2347 
2348 	res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
2349 	if (!res)
2350 		return ERR_PTR(-ENOMEM);
2351 
2352 	res->sets = of_property_count_elems_of_size(dev_of_node(dev), of_prop,
2353 						    sizeof(u32));
2354 	if (res->sets < 0) {
2355 		dev_err(dev, "%s resource type ids not available\n", of_prop);
2356 		return ERR_PTR(res->sets);
2357 	}
2358 
2359 	res->desc = devm_kcalloc(dev, res->sets, sizeof(*res->desc),
2360 				 GFP_KERNEL);
2361 	if (!res->desc)
2362 		return ERR_PTR(-ENOMEM);
2363 
2364 	for (i = 0; i < res->sets; i++) {
2365 		ret = of_property_read_u32_index(dev_of_node(dev), of_prop, i,
2366 						 &resource_subtype);
2367 		if (ret)
2368 			return ERR_PTR(-EINVAL);
2369 
2370 		ret = handle->ops.rm_core_ops.get_range(handle, dev_id,
2371 							resource_subtype,
2372 							&res->desc[i].start,
2373 							&res->desc[i].num);
2374 		if (ret) {
2375 			dev_err(dev, "dev = %d subtype %d not allocated for this host\n",
2376 				dev_id, resource_subtype);
2377 			return ERR_PTR(ret);
2378 		}
2379 
2380 		dev_dbg(dev, "dev = %d, subtype = %d, start = %d, num = %d\n",
2381 			dev_id, resource_subtype, res->desc[i].start,
2382 			res->desc[i].num);
2383 
2384 		res->desc[i].res_map =
2385 			devm_kzalloc(dev, BITS_TO_LONGS(res->desc[i].num) *
2386 				     sizeof(*res->desc[i].res_map), GFP_KERNEL);
2387 		if (!res->desc[i].res_map)
2388 			return ERR_PTR(-ENOMEM);
2389 	}
2390 	raw_spin_lock_init(&res->lock);
2391 
2392 	return res;
2393 }
2394 
2395 static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode,
2396 				void *cmd)
2397 {
2398 	struct ti_sci_info *info = reboot_to_ti_sci_info(nb);
2399 	const struct ti_sci_handle *handle = &info->handle;
2400 
2401 	ti_sci_cmd_core_reboot(handle);
2402 
2403 	/* call fail OR pass, we should not be here in the first place */
2404 	return NOTIFY_BAD;
2405 }
2406 
2407 /* Description for K2G */
2408 static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
2409 	.default_host_id = 2,
2410 	/* Conservative duration */
2411 	.max_rx_timeout_ms = 1000,
2412 	/* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
2413 	.max_msgs = 20,
2414 	.max_msg_size = 64,
2415 	.rm_type_map = NULL,
2416 };
2417 
2418 static struct ti_sci_rm_type_map ti_sci_am654_rm_type_map[] = {
2419 	{.dev_id = 56, .type = 0x00b}, /* GIC_IRQ */
2420 	{.dev_id = 179, .type = 0x000}, /* MAIN_NAV_UDMASS_IA0 */
2421 	{.dev_id = 187, .type = 0x009}, /* MAIN_NAV_RA */
2422 	{.dev_id = 188, .type = 0x006}, /* MAIN_NAV_UDMAP */
2423 	{.dev_id = 194, .type = 0x007}, /* MCU_NAV_UDMAP */
2424 	{.dev_id = 195, .type = 0x00a}, /* MCU_NAV_RA */
2425 	{.dev_id = 0, .type = 0x000}, /* end of table */
2426 };
2427 
2428 /* Description for AM654 */
2429 static const struct ti_sci_desc ti_sci_pmmc_am654_desc = {
2430 	.default_host_id = 12,
2431 	/* Conservative duration */
2432 	.max_rx_timeout_ms = 10000,
2433 	/* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
2434 	.max_msgs = 20,
2435 	.max_msg_size = 60,
2436 	.rm_type_map = ti_sci_am654_rm_type_map,
2437 };
2438 
2439 static const struct of_device_id ti_sci_of_match[] = {
2440 	{.compatible = "ti,k2g-sci", .data = &ti_sci_pmmc_k2g_desc},
2441 	{.compatible = "ti,am654-sci", .data = &ti_sci_pmmc_am654_desc},
2442 	{ /* Sentinel */ },
2443 };
2444 MODULE_DEVICE_TABLE(of, ti_sci_of_match);
2445 
2446 static int ti_sci_probe(struct platform_device *pdev)
2447 {
2448 	struct device *dev = &pdev->dev;
2449 	const struct of_device_id *of_id;
2450 	const struct ti_sci_desc *desc;
2451 	struct ti_sci_xfer *xfer;
2452 	struct ti_sci_info *info = NULL;
2453 	struct ti_sci_xfers_info *minfo;
2454 	struct mbox_client *cl;
2455 	int ret = -EINVAL;
2456 	int i;
2457 	int reboot = 0;
2458 	u32 h_id;
2459 
2460 	of_id = of_match_device(ti_sci_of_match, dev);
2461 	if (!of_id) {
2462 		dev_err(dev, "OF data missing\n");
2463 		return -EINVAL;
2464 	}
2465 	desc = of_id->data;
2466 
2467 	info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
2468 	if (!info)
2469 		return -ENOMEM;
2470 
2471 	info->dev = dev;
2472 	info->desc = desc;
2473 	ret = of_property_read_u32(dev->of_node, "ti,host-id", &h_id);
2474 	/* if the property is not present in DT, use a default from desc */
2475 	if (ret < 0) {
2476 		info->host_id = info->desc->default_host_id;
2477 	} else {
2478 		if (!h_id) {
2479 			dev_warn(dev, "Host ID 0 is reserved for firmware\n");
2480 			info->host_id = info->desc->default_host_id;
2481 		} else {
2482 			info->host_id = h_id;
2483 		}
2484 	}
2485 
2486 	reboot = of_property_read_bool(dev->of_node,
2487 				       "ti,system-reboot-controller");
2488 	INIT_LIST_HEAD(&info->node);
2489 	minfo = &info->minfo;
2490 
2491 	/*
2492 	 * Pre-allocate messages
2493 	 * NEVER allocate more than what we can indicate in hdr.seq
2494 	 * if we have data description bug, force a fix..
2495 	 */
2496 	if (WARN_ON(desc->max_msgs >=
2497 		    1 << 8 * sizeof(((struct ti_sci_msg_hdr *)0)->seq)))
2498 		return -EINVAL;
2499 
2500 	minfo->xfer_block = devm_kcalloc(dev,
2501 					 desc->max_msgs,
2502 					 sizeof(*minfo->xfer_block),
2503 					 GFP_KERNEL);
2504 	if (!minfo->xfer_block)
2505 		return -ENOMEM;
2506 
2507 	minfo->xfer_alloc_table = devm_kcalloc(dev,
2508 					       BITS_TO_LONGS(desc->max_msgs),
2509 					       sizeof(unsigned long),
2510 					       GFP_KERNEL);
2511 	if (!minfo->xfer_alloc_table)
2512 		return -ENOMEM;
2513 	bitmap_zero(minfo->xfer_alloc_table, desc->max_msgs);
2514 
2515 	/* Pre-initialize the buffer pointer to pre-allocated buffers */
2516 	for (i = 0, xfer = minfo->xfer_block; i < desc->max_msgs; i++, xfer++) {
2517 		xfer->xfer_buf = devm_kcalloc(dev, 1, desc->max_msg_size,
2518 					      GFP_KERNEL);
2519 		if (!xfer->xfer_buf)
2520 			return -ENOMEM;
2521 
2522 		xfer->tx_message.buf = xfer->xfer_buf;
2523 		init_completion(&xfer->done);
2524 	}
2525 
2526 	ret = ti_sci_debugfs_create(pdev, info);
2527 	if (ret)
2528 		dev_warn(dev, "Failed to create debug file\n");
2529 
2530 	platform_set_drvdata(pdev, info);
2531 
2532 	cl = &info->cl;
2533 	cl->dev = dev;
2534 	cl->tx_block = false;
2535 	cl->rx_callback = ti_sci_rx_callback;
2536 	cl->knows_txdone = true;
2537 
2538 	spin_lock_init(&minfo->xfer_lock);
2539 	sema_init(&minfo->sem_xfer_count, desc->max_msgs);
2540 
2541 	info->chan_rx = mbox_request_channel_byname(cl, "rx");
2542 	if (IS_ERR(info->chan_rx)) {
2543 		ret = PTR_ERR(info->chan_rx);
2544 		goto out;
2545 	}
2546 
2547 	info->chan_tx = mbox_request_channel_byname(cl, "tx");
2548 	if (IS_ERR(info->chan_tx)) {
2549 		ret = PTR_ERR(info->chan_tx);
2550 		goto out;
2551 	}
2552 	ret = ti_sci_cmd_get_revision(info);
2553 	if (ret) {
2554 		dev_err(dev, "Unable to communicate with TISCI(%d)\n", ret);
2555 		goto out;
2556 	}
2557 
2558 	ti_sci_setup_ops(info);
2559 
2560 	if (reboot) {
2561 		info->nb.notifier_call = tisci_reboot_handler;
2562 		info->nb.priority = 128;
2563 
2564 		ret = register_restart_handler(&info->nb);
2565 		if (ret) {
2566 			dev_err(dev, "reboot registration fail(%d)\n", ret);
2567 			return ret;
2568 		}
2569 	}
2570 
2571 	dev_info(dev, "ABI: %d.%d (firmware rev 0x%04x '%s')\n",
2572 		 info->handle.version.abi_major, info->handle.version.abi_minor,
2573 		 info->handle.version.firmware_revision,
2574 		 info->handle.version.firmware_description);
2575 
2576 	mutex_lock(&ti_sci_list_mutex);
2577 	list_add_tail(&info->node, &ti_sci_list);
2578 	mutex_unlock(&ti_sci_list_mutex);
2579 
2580 	return of_platform_populate(dev->of_node, NULL, NULL, dev);
2581 out:
2582 	if (!IS_ERR(info->chan_tx))
2583 		mbox_free_channel(info->chan_tx);
2584 	if (!IS_ERR(info->chan_rx))
2585 		mbox_free_channel(info->chan_rx);
2586 	debugfs_remove(info->d);
2587 	return ret;
2588 }
2589 
2590 static int ti_sci_remove(struct platform_device *pdev)
2591 {
2592 	struct ti_sci_info *info;
2593 	struct device *dev = &pdev->dev;
2594 	int ret = 0;
2595 
2596 	of_platform_depopulate(dev);
2597 
2598 	info = platform_get_drvdata(pdev);
2599 
2600 	if (info->nb.notifier_call)
2601 		unregister_restart_handler(&info->nb);
2602 
2603 	mutex_lock(&ti_sci_list_mutex);
2604 	if (info->users)
2605 		ret = -EBUSY;
2606 	else
2607 		list_del(&info->node);
2608 	mutex_unlock(&ti_sci_list_mutex);
2609 
2610 	if (!ret) {
2611 		ti_sci_debugfs_destroy(pdev, info);
2612 
2613 		/* Safe to free channels since no more users */
2614 		mbox_free_channel(info->chan_tx);
2615 		mbox_free_channel(info->chan_rx);
2616 	}
2617 
2618 	return ret;
2619 }
2620 
2621 static struct platform_driver ti_sci_driver = {
2622 	.probe = ti_sci_probe,
2623 	.remove = ti_sci_remove,
2624 	.driver = {
2625 		   .name = "ti-sci",
2626 		   .of_match_table = of_match_ptr(ti_sci_of_match),
2627 	},
2628 };
2629 module_platform_driver(ti_sci_driver);
2630 
2631 MODULE_LICENSE("GPL v2");
2632 MODULE_DESCRIPTION("TI System Control Interface(SCI) driver");
2633 MODULE_AUTHOR("Nishanth Menon");
2634 MODULE_ALIAS("platform:ti-sci");
2635