xref: /openbmc/linux/drivers/firmware/ti_sci.c (revision 68608b5e5063dd12942f1118286c6f595d0c4a05)
11e0a6014SLokesh Vutla // SPDX-License-Identifier: GPL-2.0
2aa276781SNishanth Menon /*
3aa276781SNishanth Menon  * Texas Instruments System Control Interface Protocol Driver
4aa276781SNishanth Menon  *
5aa276781SNishanth Menon  * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
6aa276781SNishanth Menon  *	Nishanth Menon
7aa276781SNishanth Menon  */
8aa276781SNishanth Menon 
9aa276781SNishanth Menon #define pr_fmt(fmt) "%s: " fmt, __func__
10aa276781SNishanth Menon 
11aa276781SNishanth Menon #include <linux/bitmap.h>
12aa276781SNishanth Menon #include <linux/debugfs.h>
13aa276781SNishanth Menon #include <linux/export.h>
14aa276781SNishanth Menon #include <linux/io.h>
15aa276781SNishanth Menon #include <linux/kernel.h>
16aa276781SNishanth Menon #include <linux/mailbox_client.h>
17aa276781SNishanth Menon #include <linux/module.h>
18aa276781SNishanth Menon #include <linux/of_device.h>
19aa276781SNishanth Menon #include <linux/semaphore.h>
20aa276781SNishanth Menon #include <linux/slab.h>
21aa276781SNishanth Menon #include <linux/soc/ti/ti-msgmgr.h>
22aa276781SNishanth Menon #include <linux/soc/ti/ti_sci_protocol.h>
23912cffb4SNishanth Menon #include <linux/reboot.h>
24aa276781SNishanth Menon 
25aa276781SNishanth Menon #include "ti_sci.h"
26aa276781SNishanth Menon 
27aa276781SNishanth Menon /* List of all TI SCI devices active in system */
28aa276781SNishanth Menon static LIST_HEAD(ti_sci_list);
29aa276781SNishanth Menon /* Protection for the entire list */
30aa276781SNishanth Menon static DEFINE_MUTEX(ti_sci_list_mutex);
31aa276781SNishanth Menon 
32aa276781SNishanth Menon /**
33aa276781SNishanth Menon  * struct ti_sci_xfer - Structure representing a message flow
34aa276781SNishanth Menon  * @tx_message:	Transmit message
35aa276781SNishanth Menon  * @rx_len:	Receive message length
36aa276781SNishanth Menon  * @xfer_buf:	Preallocated buffer to store receive message
37aa276781SNishanth Menon  *		Since we work with request-ACK protocol, we can
38aa276781SNishanth Menon  *		reuse the same buffer for the rx path as we
39aa276781SNishanth Menon  *		use for the tx path.
40aa276781SNishanth Menon  * @done:	completion event
41aa276781SNishanth Menon  */
42aa276781SNishanth Menon struct ti_sci_xfer {
43aa276781SNishanth Menon 	struct ti_msgmgr_message tx_message;
44aa276781SNishanth Menon 	u8 rx_len;
45aa276781SNishanth Menon 	u8 *xfer_buf;
46aa276781SNishanth Menon 	struct completion done;
47aa276781SNishanth Menon };
48aa276781SNishanth Menon 
49aa276781SNishanth Menon /**
50aa276781SNishanth Menon  * struct ti_sci_xfers_info - Structure to manage transfer information
51aa276781SNishanth Menon  * @sem_xfer_count:	Counting Semaphore for managing max simultaneous
52aa276781SNishanth Menon  *			Messages.
53aa276781SNishanth Menon  * @xfer_block:		Preallocated Message array
54aa276781SNishanth Menon  * @xfer_alloc_table:	Bitmap table for allocated messages.
55aa276781SNishanth Menon  *			Index of this bitmap table is also used for message
56aa276781SNishanth Menon  *			sequence identifier.
57aa276781SNishanth Menon  * @xfer_lock:		Protection for message allocation
58aa276781SNishanth Menon  */
59aa276781SNishanth Menon struct ti_sci_xfers_info {
60aa276781SNishanth Menon 	struct semaphore sem_xfer_count;
61aa276781SNishanth Menon 	struct ti_sci_xfer *xfer_block;
62aa276781SNishanth Menon 	unsigned long *xfer_alloc_table;
63aa276781SNishanth Menon 	/* protect transfer allocation */
64aa276781SNishanth Menon 	spinlock_t xfer_lock;
65aa276781SNishanth Menon };
66aa276781SNishanth Menon 
67aa276781SNishanth Menon /**
689c19fb68SLokesh Vutla  * struct ti_sci_rm_type_map - Structure representing TISCI Resource
699c19fb68SLokesh Vutla  *				management representation of dev_ids.
709c19fb68SLokesh Vutla  * @dev_id:	TISCI device ID
719c19fb68SLokesh Vutla  * @type:	Corresponding id as identified by TISCI RM.
729c19fb68SLokesh Vutla  *
739c19fb68SLokesh Vutla  * Note: This is used only as a work around for using RM range apis
749c19fb68SLokesh Vutla  *	for AM654 SoC. For future SoCs dev_id will be used as type
759c19fb68SLokesh Vutla  *	for RM range APIs. In order to maintain ABI backward compatibility
769c19fb68SLokesh Vutla  *	type is not being changed for AM654 SoC.
779c19fb68SLokesh Vutla  */
789c19fb68SLokesh Vutla struct ti_sci_rm_type_map {
799c19fb68SLokesh Vutla 	u32 dev_id;
809c19fb68SLokesh Vutla 	u16 type;
819c19fb68SLokesh Vutla };
829c19fb68SLokesh Vutla 
839c19fb68SLokesh Vutla /**
84aa276781SNishanth Menon  * struct ti_sci_desc - Description of SoC integration
85e69a3553SNishanth Menon  * @default_host_id:	Host identifier representing the compute entity
86aa276781SNishanth Menon  * @max_rx_timeout_ms:	Timeout for communication with SoC (in Milliseconds)
87aa276781SNishanth Menon  * @max_msgs: Maximum number of messages that can be pending
88aa276781SNishanth Menon  *		  simultaneously in the system
89aa276781SNishanth Menon  * @max_msg_size: Maximum size of data per message that can be handled.
909c19fb68SLokesh Vutla  * @rm_type_map: RM resource type mapping structure.
91aa276781SNishanth Menon  */
92aa276781SNishanth Menon struct ti_sci_desc {
93e69a3553SNishanth Menon 	u8 default_host_id;
94aa276781SNishanth Menon 	int max_rx_timeout_ms;
95aa276781SNishanth Menon 	int max_msgs;
96aa276781SNishanth Menon 	int max_msg_size;
979c19fb68SLokesh Vutla 	struct ti_sci_rm_type_map *rm_type_map;
98aa276781SNishanth Menon };
99aa276781SNishanth Menon 
100aa276781SNishanth Menon /**
101aa276781SNishanth Menon  * struct ti_sci_info - Structure representing a TI SCI instance
102aa276781SNishanth Menon  * @dev:	Device pointer
103aa276781SNishanth Menon  * @desc:	SoC description for this instance
104912cffb4SNishanth Menon  * @nb:	Reboot Notifier block
105aa276781SNishanth Menon  * @d:		Debugfs file entry
106aa276781SNishanth Menon  * @debug_region: Memory region where the debug message are available
107aa276781SNishanth Menon  * @debug_region_size: Debug region size
108aa276781SNishanth Menon  * @debug_buffer: Buffer allocated to copy debug messages.
109aa276781SNishanth Menon  * @handle:	Instance of TI SCI handle to send to clients.
110aa276781SNishanth Menon  * @cl:		Mailbox Client
111aa276781SNishanth Menon  * @chan_tx:	Transmit mailbox channel
112aa276781SNishanth Menon  * @chan_rx:	Receive mailbox channel
113aa276781SNishanth Menon  * @minfo:	Message info
114aa276781SNishanth Menon  * @node:	list head
115e69a3553SNishanth Menon  * @host_id:	Host ID
116aa276781SNishanth Menon  * @users:	Number of users of this instance
117aa276781SNishanth Menon  */
118aa276781SNishanth Menon struct ti_sci_info {
119aa276781SNishanth Menon 	struct device *dev;
120912cffb4SNishanth Menon 	struct notifier_block nb;
121aa276781SNishanth Menon 	const struct ti_sci_desc *desc;
122aa276781SNishanth Menon 	struct dentry *d;
123aa276781SNishanth Menon 	void __iomem *debug_region;
124aa276781SNishanth Menon 	char *debug_buffer;
125aa276781SNishanth Menon 	size_t debug_region_size;
126aa276781SNishanth Menon 	struct ti_sci_handle handle;
127aa276781SNishanth Menon 	struct mbox_client cl;
128aa276781SNishanth Menon 	struct mbox_chan *chan_tx;
129aa276781SNishanth Menon 	struct mbox_chan *chan_rx;
130aa276781SNishanth Menon 	struct ti_sci_xfers_info minfo;
131aa276781SNishanth Menon 	struct list_head node;
132e69a3553SNishanth Menon 	u8 host_id;
133aa276781SNishanth Menon 	/* protected by ti_sci_list_mutex */
134aa276781SNishanth Menon 	int users;
135912cffb4SNishanth Menon 
136aa276781SNishanth Menon };
137aa276781SNishanth Menon 
138aa276781SNishanth Menon #define cl_to_ti_sci_info(c)	container_of(c, struct ti_sci_info, cl)
139aa276781SNishanth Menon #define handle_to_ti_sci_info(h) container_of(h, struct ti_sci_info, handle)
140912cffb4SNishanth Menon #define reboot_to_ti_sci_info(n) container_of(n, struct ti_sci_info, nb)
141aa276781SNishanth Menon 
142aa276781SNishanth Menon #ifdef CONFIG_DEBUG_FS
143aa276781SNishanth Menon 
144aa276781SNishanth Menon /**
145aa276781SNishanth Menon  * ti_sci_debug_show() - Helper to dump the debug log
146aa276781SNishanth Menon  * @s:	sequence file pointer
147aa276781SNishanth Menon  * @unused:	unused.
148aa276781SNishanth Menon  *
149aa276781SNishanth Menon  * Return: 0
150aa276781SNishanth Menon  */
151aa276781SNishanth Menon static int ti_sci_debug_show(struct seq_file *s, void *unused)
152aa276781SNishanth Menon {
153aa276781SNishanth Menon 	struct ti_sci_info *info = s->private;
154aa276781SNishanth Menon 
155aa276781SNishanth Menon 	memcpy_fromio(info->debug_buffer, info->debug_region,
156aa276781SNishanth Menon 		      info->debug_region_size);
157aa276781SNishanth Menon 	/*
158aa276781SNishanth Menon 	 * We don't trust firmware to leave NULL terminated last byte (hence
159aa276781SNishanth Menon 	 * we have allocated 1 extra 0 byte). Since we cannot guarantee any
160aa276781SNishanth Menon 	 * specific data format for debug messages, We just present the data
161aa276781SNishanth Menon 	 * in the buffer as is - we expect the messages to be self explanatory.
162aa276781SNishanth Menon 	 */
163aa276781SNishanth Menon 	seq_puts(s, info->debug_buffer);
164aa276781SNishanth Menon 	return 0;
165aa276781SNishanth Menon }
166aa276781SNishanth Menon 
1675953c887SYangtao Li /* Provide the log file operations interface*/
1685953c887SYangtao Li DEFINE_SHOW_ATTRIBUTE(ti_sci_debug);
169aa276781SNishanth Menon 
170aa276781SNishanth Menon /**
171aa276781SNishanth Menon  * ti_sci_debugfs_create() - Create log debug file
172aa276781SNishanth Menon  * @pdev:	platform device pointer
173aa276781SNishanth Menon  * @info:	Pointer to SCI entity information
174aa276781SNishanth Menon  *
175aa276781SNishanth Menon  * Return: 0 if all went fine, else corresponding error.
176aa276781SNishanth Menon  */
177aa276781SNishanth Menon static int ti_sci_debugfs_create(struct platform_device *pdev,
178aa276781SNishanth Menon 				 struct ti_sci_info *info)
179aa276781SNishanth Menon {
180aa276781SNishanth Menon 	struct device *dev = &pdev->dev;
181aa276781SNishanth Menon 	struct resource *res;
182aa276781SNishanth Menon 	char debug_name[50] = "ti_sci_debug@";
183aa276781SNishanth Menon 
184aa276781SNishanth Menon 	/* Debug region is optional */
185aa276781SNishanth Menon 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
186aa276781SNishanth Menon 					   "debug_messages");
187aa276781SNishanth Menon 	info->debug_region = devm_ioremap_resource(dev, res);
188aa276781SNishanth Menon 	if (IS_ERR(info->debug_region))
189aa276781SNishanth Menon 		return 0;
190aa276781SNishanth Menon 	info->debug_region_size = resource_size(res);
191aa276781SNishanth Menon 
192aa276781SNishanth Menon 	info->debug_buffer = devm_kcalloc(dev, info->debug_region_size + 1,
193aa276781SNishanth Menon 					  sizeof(char), GFP_KERNEL);
194aa276781SNishanth Menon 	if (!info->debug_buffer)
195aa276781SNishanth Menon 		return -ENOMEM;
196aa276781SNishanth Menon 	/* Setup NULL termination */
197aa276781SNishanth Menon 	info->debug_buffer[info->debug_region_size] = 0;
198aa276781SNishanth Menon 
199aa276781SNishanth Menon 	info->d = debugfs_create_file(strncat(debug_name, dev_name(dev),
20076cefef8SArnd Bergmann 					      sizeof(debug_name) -
20176cefef8SArnd Bergmann 					      sizeof("ti_sci_debug@")),
202aa276781SNishanth Menon 				      0444, NULL, info, &ti_sci_debug_fops);
203aa276781SNishanth Menon 	if (IS_ERR(info->d))
204aa276781SNishanth Menon 		return PTR_ERR(info->d);
205aa276781SNishanth Menon 
206aa276781SNishanth Menon 	dev_dbg(dev, "Debug region => %p, size = %zu bytes, resource: %pr\n",
207aa276781SNishanth Menon 		info->debug_region, info->debug_region_size, res);
208aa276781SNishanth Menon 	return 0;
209aa276781SNishanth Menon }
210aa276781SNishanth Menon 
211aa276781SNishanth Menon /**
212aa276781SNishanth Menon  * ti_sci_debugfs_destroy() - clean up log debug file
213aa276781SNishanth Menon  * @pdev:	platform device pointer
214aa276781SNishanth Menon  * @info:	Pointer to SCI entity information
215aa276781SNishanth Menon  */
216aa276781SNishanth Menon static void ti_sci_debugfs_destroy(struct platform_device *pdev,
217aa276781SNishanth Menon 				   struct ti_sci_info *info)
218aa276781SNishanth Menon {
219aa276781SNishanth Menon 	if (IS_ERR(info->debug_region))
220aa276781SNishanth Menon 		return;
221aa276781SNishanth Menon 
222aa276781SNishanth Menon 	debugfs_remove(info->d);
223aa276781SNishanth Menon }
224aa276781SNishanth Menon #else /* CONFIG_DEBUG_FS */
225aa276781SNishanth Menon static inline int ti_sci_debugfs_create(struct platform_device *dev,
226aa276781SNishanth Menon 					struct ti_sci_info *info)
227aa276781SNishanth Menon {
228aa276781SNishanth Menon 	return 0;
229aa276781SNishanth Menon }
230aa276781SNishanth Menon 
231aa276781SNishanth Menon static inline void ti_sci_debugfs_destroy(struct platform_device *dev,
232aa276781SNishanth Menon 					  struct ti_sci_info *info)
233aa276781SNishanth Menon {
234aa276781SNishanth Menon }
235aa276781SNishanth Menon #endif /* CONFIG_DEBUG_FS */
236aa276781SNishanth Menon 
237aa276781SNishanth Menon /**
238aa276781SNishanth Menon  * ti_sci_dump_header_dbg() - Helper to dump a message header.
239aa276781SNishanth Menon  * @dev:	Device pointer corresponding to the SCI entity
240aa276781SNishanth Menon  * @hdr:	pointer to header.
241aa276781SNishanth Menon  */
242aa276781SNishanth Menon static inline void ti_sci_dump_header_dbg(struct device *dev,
243aa276781SNishanth Menon 					  struct ti_sci_msg_hdr *hdr)
244aa276781SNishanth Menon {
245aa276781SNishanth Menon 	dev_dbg(dev, "MSGHDR:type=0x%04x host=0x%02x seq=0x%02x flags=0x%08x\n",
246aa276781SNishanth Menon 		hdr->type, hdr->host, hdr->seq, hdr->flags);
247aa276781SNishanth Menon }
248aa276781SNishanth Menon 
249aa276781SNishanth Menon /**
250aa276781SNishanth Menon  * ti_sci_rx_callback() - mailbox client callback for receive messages
251aa276781SNishanth Menon  * @cl:	client pointer
252aa276781SNishanth Menon  * @m:	mailbox message
253aa276781SNishanth Menon  *
254aa276781SNishanth Menon  * Processes one received message to appropriate transfer information and
255aa276781SNishanth Menon  * signals completion of the transfer.
256aa276781SNishanth Menon  *
257aa276781SNishanth Menon  * NOTE: This function will be invoked in IRQ context, hence should be
258aa276781SNishanth Menon  * as optimal as possible.
259aa276781SNishanth Menon  */
260aa276781SNishanth Menon static void ti_sci_rx_callback(struct mbox_client *cl, void *m)
261aa276781SNishanth Menon {
262aa276781SNishanth Menon 	struct ti_sci_info *info = cl_to_ti_sci_info(cl);
263aa276781SNishanth Menon 	struct device *dev = info->dev;
264aa276781SNishanth Menon 	struct ti_sci_xfers_info *minfo = &info->minfo;
265aa276781SNishanth Menon 	struct ti_msgmgr_message *mbox_msg = m;
266aa276781SNishanth Menon 	struct ti_sci_msg_hdr *hdr = (struct ti_sci_msg_hdr *)mbox_msg->buf;
267aa276781SNishanth Menon 	struct ti_sci_xfer *xfer;
268aa276781SNishanth Menon 	u8 xfer_id;
269aa276781SNishanth Menon 
270aa276781SNishanth Menon 	xfer_id = hdr->seq;
271aa276781SNishanth Menon 
272aa276781SNishanth Menon 	/*
273aa276781SNishanth Menon 	 * Are we even expecting this?
274aa276781SNishanth Menon 	 * NOTE: barriers were implicit in locks used for modifying the bitmap
275aa276781SNishanth Menon 	 */
276aa276781SNishanth Menon 	if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
277aa276781SNishanth Menon 		dev_err(dev, "Message for %d is not expected!\n", xfer_id);
278aa276781SNishanth Menon 		return;
279aa276781SNishanth Menon 	}
280aa276781SNishanth Menon 
281aa276781SNishanth Menon 	xfer = &minfo->xfer_block[xfer_id];
282aa276781SNishanth Menon 
283aa276781SNishanth Menon 	/* Is the message of valid length? */
284aa276781SNishanth Menon 	if (mbox_msg->len > info->desc->max_msg_size) {
285bd0fa74eSNishanth Menon 		dev_err(dev, "Unable to handle %zu xfer(max %d)\n",
286aa276781SNishanth Menon 			mbox_msg->len, info->desc->max_msg_size);
287aa276781SNishanth Menon 		ti_sci_dump_header_dbg(dev, hdr);
288aa276781SNishanth Menon 		return;
289aa276781SNishanth Menon 	}
290aa276781SNishanth Menon 	if (mbox_msg->len < xfer->rx_len) {
291bd0fa74eSNishanth Menon 		dev_err(dev, "Recv xfer %zu < expected %d length\n",
292aa276781SNishanth Menon 			mbox_msg->len, xfer->rx_len);
293aa276781SNishanth Menon 		ti_sci_dump_header_dbg(dev, hdr);
294aa276781SNishanth Menon 		return;
295aa276781SNishanth Menon 	}
296aa276781SNishanth Menon 
297aa276781SNishanth Menon 	ti_sci_dump_header_dbg(dev, hdr);
298aa276781SNishanth Menon 	/* Take a copy to the rx buffer.. */
299aa276781SNishanth Menon 	memcpy(xfer->xfer_buf, mbox_msg->buf, xfer->rx_len);
300aa276781SNishanth Menon 	complete(&xfer->done);
301aa276781SNishanth Menon }
302aa276781SNishanth Menon 
303aa276781SNishanth Menon /**
304aa276781SNishanth Menon  * ti_sci_get_one_xfer() - Allocate one message
305aa276781SNishanth Menon  * @info:	Pointer to SCI entity information
306aa276781SNishanth Menon  * @msg_type:	Message type
307aa276781SNishanth Menon  * @msg_flags:	Flag to set for the message
308aa276781SNishanth Menon  * @tx_message_size: transmit message size
309aa276781SNishanth Menon  * @rx_message_size: receive message size
310aa276781SNishanth Menon  *
311aa276781SNishanth Menon  * Helper function which is used by various command functions that are
312aa276781SNishanth Menon  * exposed to clients of this driver for allocating a message traffic event.
313aa276781SNishanth Menon  *
314aa276781SNishanth Menon  * This function can sleep depending on pending requests already in the system
315aa276781SNishanth Menon  * for the SCI entity. Further, this also holds a spinlock to maintain integrity
316aa276781SNishanth Menon  * of internal data structures.
317aa276781SNishanth Menon  *
318aa276781SNishanth Menon  * Return: 0 if all went fine, else corresponding error.
319aa276781SNishanth Menon  */
320aa276781SNishanth Menon static struct ti_sci_xfer *ti_sci_get_one_xfer(struct ti_sci_info *info,
321aa276781SNishanth Menon 					       u16 msg_type, u32 msg_flags,
322aa276781SNishanth Menon 					       size_t tx_message_size,
323aa276781SNishanth Menon 					       size_t rx_message_size)
324aa276781SNishanth Menon {
325aa276781SNishanth Menon 	struct ti_sci_xfers_info *minfo = &info->minfo;
326aa276781SNishanth Menon 	struct ti_sci_xfer *xfer;
327aa276781SNishanth Menon 	struct ti_sci_msg_hdr *hdr;
328aa276781SNishanth Menon 	unsigned long flags;
329aa276781SNishanth Menon 	unsigned long bit_pos;
330aa276781SNishanth Menon 	u8 xfer_id;
331aa276781SNishanth Menon 	int ret;
332aa276781SNishanth Menon 	int timeout;
333aa276781SNishanth Menon 
334aa276781SNishanth Menon 	/* Ensure we have sane transfer sizes */
335aa276781SNishanth Menon 	if (rx_message_size > info->desc->max_msg_size ||
336aa276781SNishanth Menon 	    tx_message_size > info->desc->max_msg_size ||
337aa276781SNishanth Menon 	    rx_message_size < sizeof(*hdr) || tx_message_size < sizeof(*hdr))
338aa276781SNishanth Menon 		return ERR_PTR(-ERANGE);
339aa276781SNishanth Menon 
340aa276781SNishanth Menon 	/*
341aa276781SNishanth Menon 	 * Ensure we have only controlled number of pending messages.
342aa276781SNishanth Menon 	 * Ideally, we might just have to wait a single message, be
343aa276781SNishanth Menon 	 * conservative and wait 5 times that..
344aa276781SNishanth Menon 	 */
345aa276781SNishanth Menon 	timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms) * 5;
346aa276781SNishanth Menon 	ret = down_timeout(&minfo->sem_xfer_count, timeout);
347aa276781SNishanth Menon 	if (ret < 0)
348aa276781SNishanth Menon 		return ERR_PTR(ret);
349aa276781SNishanth Menon 
350aa276781SNishanth Menon 	/* Keep the locked section as small as possible */
351aa276781SNishanth Menon 	spin_lock_irqsave(&minfo->xfer_lock, flags);
352aa276781SNishanth Menon 	bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
353aa276781SNishanth Menon 				      info->desc->max_msgs);
354aa276781SNishanth Menon 	set_bit(bit_pos, minfo->xfer_alloc_table);
355aa276781SNishanth Menon 	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
356aa276781SNishanth Menon 
357aa276781SNishanth Menon 	/*
358aa276781SNishanth Menon 	 * We already ensured in probe that we can have max messages that can
359aa276781SNishanth Menon 	 * fit in  hdr.seq - NOTE: this improves access latencies
360aa276781SNishanth Menon 	 * to predictable O(1) access, BUT, it opens us to risk if
361aa276781SNishanth Menon 	 * remote misbehaves with corrupted message sequence responses.
362aa276781SNishanth Menon 	 * If that happens, we are going to be messed up anyways..
363aa276781SNishanth Menon 	 */
364aa276781SNishanth Menon 	xfer_id = (u8)bit_pos;
365aa276781SNishanth Menon 
366aa276781SNishanth Menon 	xfer = &minfo->xfer_block[xfer_id];
367aa276781SNishanth Menon 
368aa276781SNishanth Menon 	hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
369aa276781SNishanth Menon 	xfer->tx_message.len = tx_message_size;
370aa276781SNishanth Menon 	xfer->rx_len = (u8)rx_message_size;
371aa276781SNishanth Menon 
372aa276781SNishanth Menon 	reinit_completion(&xfer->done);
373aa276781SNishanth Menon 
374aa276781SNishanth Menon 	hdr->seq = xfer_id;
375aa276781SNishanth Menon 	hdr->type = msg_type;
376e69a3553SNishanth Menon 	hdr->host = info->host_id;
377aa276781SNishanth Menon 	hdr->flags = msg_flags;
378aa276781SNishanth Menon 
379aa276781SNishanth Menon 	return xfer;
380aa276781SNishanth Menon }
381aa276781SNishanth Menon 
382aa276781SNishanth Menon /**
383aa276781SNishanth Menon  * ti_sci_put_one_xfer() - Release a message
384aa276781SNishanth Menon  * @minfo:	transfer info pointer
385aa276781SNishanth Menon  * @xfer:	message that was reserved by ti_sci_get_one_xfer
386aa276781SNishanth Menon  *
387aa276781SNishanth Menon  * This holds a spinlock to maintain integrity of internal data structures.
388aa276781SNishanth Menon  */
389aa276781SNishanth Menon static void ti_sci_put_one_xfer(struct ti_sci_xfers_info *minfo,
390aa276781SNishanth Menon 				struct ti_sci_xfer *xfer)
391aa276781SNishanth Menon {
392aa276781SNishanth Menon 	unsigned long flags;
393aa276781SNishanth Menon 	struct ti_sci_msg_hdr *hdr;
394aa276781SNishanth Menon 	u8 xfer_id;
395aa276781SNishanth Menon 
396aa276781SNishanth Menon 	hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
397aa276781SNishanth Menon 	xfer_id = hdr->seq;
398aa276781SNishanth Menon 
399aa276781SNishanth Menon 	/*
400aa276781SNishanth Menon 	 * Keep the locked section as small as possible
401aa276781SNishanth Menon 	 * NOTE: we might escape with smp_mb and no lock here..
402aa276781SNishanth Menon 	 * but just be conservative and symmetric.
403aa276781SNishanth Menon 	 */
404aa276781SNishanth Menon 	spin_lock_irqsave(&minfo->xfer_lock, flags);
405aa276781SNishanth Menon 	clear_bit(xfer_id, minfo->xfer_alloc_table);
406aa276781SNishanth Menon 	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
407aa276781SNishanth Menon 
408aa276781SNishanth Menon 	/* Increment the count for the next user to get through */
409aa276781SNishanth Menon 	up(&minfo->sem_xfer_count);
410aa276781SNishanth Menon }
411aa276781SNishanth Menon 
412aa276781SNishanth Menon /**
413aa276781SNishanth Menon  * ti_sci_do_xfer() - Do one transfer
414aa276781SNishanth Menon  * @info:	Pointer to SCI entity information
415aa276781SNishanth Menon  * @xfer:	Transfer to initiate and wait for response
416aa276781SNishanth Menon  *
417aa276781SNishanth Menon  * Return: -ETIMEDOUT in case of no response, if transmit error,
418aa276781SNishanth Menon  *	   return corresponding error, else if all goes well,
419aa276781SNishanth Menon  *	   return 0.
420aa276781SNishanth Menon  */
421aa276781SNishanth Menon static inline int ti_sci_do_xfer(struct ti_sci_info *info,
422aa276781SNishanth Menon 				 struct ti_sci_xfer *xfer)
423aa276781SNishanth Menon {
424aa276781SNishanth Menon 	int ret;
425aa276781SNishanth Menon 	int timeout;
426aa276781SNishanth Menon 	struct device *dev = info->dev;
427aa276781SNishanth Menon 
428aa276781SNishanth Menon 	ret = mbox_send_message(info->chan_tx, &xfer->tx_message);
429aa276781SNishanth Menon 	if (ret < 0)
430aa276781SNishanth Menon 		return ret;
431aa276781SNishanth Menon 
432aa276781SNishanth Menon 	ret = 0;
433aa276781SNishanth Menon 
434aa276781SNishanth Menon 	/* And we wait for the response. */
435aa276781SNishanth Menon 	timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
436aa276781SNishanth Menon 	if (!wait_for_completion_timeout(&xfer->done, timeout)) {
437595f3a9dSHelge Deller 		dev_err(dev, "Mbox timedout in resp(caller: %pS)\n",
438aa276781SNishanth Menon 			(void *)_RET_IP_);
439aa276781SNishanth Menon 		ret = -ETIMEDOUT;
440aa276781SNishanth Menon 	}
441aa276781SNishanth Menon 	/*
442aa276781SNishanth Menon 	 * NOTE: we might prefer not to need the mailbox ticker to manage the
443aa276781SNishanth Menon 	 * transfer queueing since the protocol layer queues things by itself.
444aa276781SNishanth Menon 	 * Unfortunately, we have to kick the mailbox framework after we have
445aa276781SNishanth Menon 	 * received our message.
446aa276781SNishanth Menon 	 */
447aa276781SNishanth Menon 	mbox_client_txdone(info->chan_tx, ret);
448aa276781SNishanth Menon 
449aa276781SNishanth Menon 	return ret;
450aa276781SNishanth Menon }
451aa276781SNishanth Menon 
452aa276781SNishanth Menon /**
453aa276781SNishanth Menon  * ti_sci_cmd_get_revision() - command to get the revision of the SCI entity
454aa276781SNishanth Menon  * @info:	Pointer to SCI entity information
455aa276781SNishanth Menon  *
456aa276781SNishanth Menon  * Updates the SCI information in the internal data structure.
457aa276781SNishanth Menon  *
458aa276781SNishanth Menon  * Return: 0 if all went fine, else return appropriate error.
459aa276781SNishanth Menon  */
460aa276781SNishanth Menon static int ti_sci_cmd_get_revision(struct ti_sci_info *info)
461aa276781SNishanth Menon {
462aa276781SNishanth Menon 	struct device *dev = info->dev;
463aa276781SNishanth Menon 	struct ti_sci_handle *handle = &info->handle;
464aa276781SNishanth Menon 	struct ti_sci_version_info *ver = &handle->version;
465aa276781SNishanth Menon 	struct ti_sci_msg_resp_version *rev_info;
466aa276781SNishanth Menon 	struct ti_sci_xfer *xfer;
467aa276781SNishanth Menon 	int ret;
468aa276781SNishanth Menon 
469aa276781SNishanth Menon 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_VERSION,
47066f030eaSAndrew F. Davis 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
47166f030eaSAndrew F. Davis 				   sizeof(struct ti_sci_msg_hdr),
472aa276781SNishanth Menon 				   sizeof(*rev_info));
473aa276781SNishanth Menon 	if (IS_ERR(xfer)) {
474aa276781SNishanth Menon 		ret = PTR_ERR(xfer);
475aa276781SNishanth Menon 		dev_err(dev, "Message alloc failed(%d)\n", ret);
476aa276781SNishanth Menon 		return ret;
477aa276781SNishanth Menon 	}
478aa276781SNishanth Menon 
479aa276781SNishanth Menon 	rev_info = (struct ti_sci_msg_resp_version *)xfer->xfer_buf;
480aa276781SNishanth Menon 
481aa276781SNishanth Menon 	ret = ti_sci_do_xfer(info, xfer);
482aa276781SNishanth Menon 	if (ret) {
483aa276781SNishanth Menon 		dev_err(dev, "Mbox send fail %d\n", ret);
484aa276781SNishanth Menon 		goto fail;
485aa276781SNishanth Menon 	}
486aa276781SNishanth Menon 
487aa276781SNishanth Menon 	ver->abi_major = rev_info->abi_major;
488aa276781SNishanth Menon 	ver->abi_minor = rev_info->abi_minor;
489aa276781SNishanth Menon 	ver->firmware_revision = rev_info->firmware_revision;
490aa276781SNishanth Menon 	strncpy(ver->firmware_description, rev_info->firmware_description,
491aa276781SNishanth Menon 		sizeof(ver->firmware_description));
492aa276781SNishanth Menon 
493aa276781SNishanth Menon fail:
494aa276781SNishanth Menon 	ti_sci_put_one_xfer(&info->minfo, xfer);
495aa276781SNishanth Menon 	return ret;
496aa276781SNishanth Menon }
497aa276781SNishanth Menon 
498aa276781SNishanth Menon /**
4999e7d756dSNishanth Menon  * ti_sci_is_response_ack() - Generic ACK/NACK message checkup
5009e7d756dSNishanth Menon  * @r:	pointer to response buffer
5019e7d756dSNishanth Menon  *
5029e7d756dSNishanth Menon  * Return: true if the response was an ACK, else returns false.
5039e7d756dSNishanth Menon  */
5049e7d756dSNishanth Menon static inline bool ti_sci_is_response_ack(void *r)
5059e7d756dSNishanth Menon {
5069e7d756dSNishanth Menon 	struct ti_sci_msg_hdr *hdr = r;
5079e7d756dSNishanth Menon 
5089e7d756dSNishanth Menon 	return hdr->flags & TI_SCI_FLAG_RESP_GENERIC_ACK ? true : false;
5099e7d756dSNishanth Menon }
5109e7d756dSNishanth Menon 
5119e7d756dSNishanth Menon /**
5129e7d756dSNishanth Menon  * ti_sci_set_device_state() - Set device state helper
5139e7d756dSNishanth Menon  * @handle:	pointer to TI SCI handle
5149e7d756dSNishanth Menon  * @id:		Device identifier
5159e7d756dSNishanth Menon  * @flags:	flags to setup for the device
5169e7d756dSNishanth Menon  * @state:	State to move the device to
5179e7d756dSNishanth Menon  *
5189e7d756dSNishanth Menon  * Return: 0 if all went well, else returns appropriate error value.
5199e7d756dSNishanth Menon  */
5209e7d756dSNishanth Menon static int ti_sci_set_device_state(const struct ti_sci_handle *handle,
5219e7d756dSNishanth Menon 				   u32 id, u32 flags, u8 state)
5229e7d756dSNishanth Menon {
5239e7d756dSNishanth Menon 	struct ti_sci_info *info;
5249e7d756dSNishanth Menon 	struct ti_sci_msg_req_set_device_state *req;
5259e7d756dSNishanth Menon 	struct ti_sci_msg_hdr *resp;
5269e7d756dSNishanth Menon 	struct ti_sci_xfer *xfer;
5279e7d756dSNishanth Menon 	struct device *dev;
5289e7d756dSNishanth Menon 	int ret = 0;
5299e7d756dSNishanth Menon 
5309e7d756dSNishanth Menon 	if (IS_ERR(handle))
5319e7d756dSNishanth Menon 		return PTR_ERR(handle);
5329e7d756dSNishanth Menon 	if (!handle)
5339e7d756dSNishanth Menon 		return -EINVAL;
5349e7d756dSNishanth Menon 
5359e7d756dSNishanth Menon 	info = handle_to_ti_sci_info(handle);
5369e7d756dSNishanth Menon 	dev = info->dev;
5379e7d756dSNishanth Menon 
5389e7d756dSNishanth Menon 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE,
5399e7d756dSNishanth Menon 				   flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
5409e7d756dSNishanth Menon 				   sizeof(*req), sizeof(*resp));
5419e7d756dSNishanth Menon 	if (IS_ERR(xfer)) {
5429e7d756dSNishanth Menon 		ret = PTR_ERR(xfer);
5439e7d756dSNishanth Menon 		dev_err(dev, "Message alloc failed(%d)\n", ret);
5449e7d756dSNishanth Menon 		return ret;
5459e7d756dSNishanth Menon 	}
5469e7d756dSNishanth Menon 	req = (struct ti_sci_msg_req_set_device_state *)xfer->xfer_buf;
5479e7d756dSNishanth Menon 	req->id = id;
5489e7d756dSNishanth Menon 	req->state = state;
5499e7d756dSNishanth Menon 
5509e7d756dSNishanth Menon 	ret = ti_sci_do_xfer(info, xfer);
5519e7d756dSNishanth Menon 	if (ret) {
5529e7d756dSNishanth Menon 		dev_err(dev, "Mbox send fail %d\n", ret);
5539e7d756dSNishanth Menon 		goto fail;
5549e7d756dSNishanth Menon 	}
5559e7d756dSNishanth Menon 
5569e7d756dSNishanth Menon 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
5579e7d756dSNishanth Menon 
5589e7d756dSNishanth Menon 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
5599e7d756dSNishanth Menon 
5609e7d756dSNishanth Menon fail:
5619e7d756dSNishanth Menon 	ti_sci_put_one_xfer(&info->minfo, xfer);
5629e7d756dSNishanth Menon 
5639e7d756dSNishanth Menon 	return ret;
5649e7d756dSNishanth Menon }
5659e7d756dSNishanth Menon 
5669e7d756dSNishanth Menon /**
5679e7d756dSNishanth Menon  * ti_sci_get_device_state() - Get device state helper
5689e7d756dSNishanth Menon  * @handle:	Handle to the device
5699e7d756dSNishanth Menon  * @id:		Device Identifier
5709e7d756dSNishanth Menon  * @clcnt:	Pointer to Context Loss Count
5719e7d756dSNishanth Menon  * @resets:	pointer to resets
5729e7d756dSNishanth Menon  * @p_state:	pointer to p_state
5739e7d756dSNishanth Menon  * @c_state:	pointer to c_state
5749e7d756dSNishanth Menon  *
5759e7d756dSNishanth Menon  * Return: 0 if all went fine, else return appropriate error.
5769e7d756dSNishanth Menon  */
5779e7d756dSNishanth Menon static int ti_sci_get_device_state(const struct ti_sci_handle *handle,
5789e7d756dSNishanth Menon 				   u32 id,  u32 *clcnt,  u32 *resets,
5799e7d756dSNishanth Menon 				    u8 *p_state,  u8 *c_state)
5809e7d756dSNishanth Menon {
5819e7d756dSNishanth Menon 	struct ti_sci_info *info;
5829e7d756dSNishanth Menon 	struct ti_sci_msg_req_get_device_state *req;
5839e7d756dSNishanth Menon 	struct ti_sci_msg_resp_get_device_state *resp;
5849e7d756dSNishanth Menon 	struct ti_sci_xfer *xfer;
5859e7d756dSNishanth Menon 	struct device *dev;
5869e7d756dSNishanth Menon 	int ret = 0;
5879e7d756dSNishanth Menon 
5889e7d756dSNishanth Menon 	if (IS_ERR(handle))
5899e7d756dSNishanth Menon 		return PTR_ERR(handle);
5909e7d756dSNishanth Menon 	if (!handle)
5919e7d756dSNishanth Menon 		return -EINVAL;
5929e7d756dSNishanth Menon 
5939e7d756dSNishanth Menon 	if (!clcnt && !resets && !p_state && !c_state)
5949e7d756dSNishanth Menon 		return -EINVAL;
5959e7d756dSNishanth Menon 
5969e7d756dSNishanth Menon 	info = handle_to_ti_sci_info(handle);
5979e7d756dSNishanth Menon 	dev = info->dev;
5989e7d756dSNishanth Menon 
5999e7d756dSNishanth Menon 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE,
60066f030eaSAndrew F. Davis 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
60166f030eaSAndrew F. Davis 				   sizeof(*req), sizeof(*resp));
6029e7d756dSNishanth Menon 	if (IS_ERR(xfer)) {
6039e7d756dSNishanth Menon 		ret = PTR_ERR(xfer);
6049e7d756dSNishanth Menon 		dev_err(dev, "Message alloc failed(%d)\n", ret);
6059e7d756dSNishanth Menon 		return ret;
6069e7d756dSNishanth Menon 	}
6079e7d756dSNishanth Menon 	req = (struct ti_sci_msg_req_get_device_state *)xfer->xfer_buf;
6089e7d756dSNishanth Menon 	req->id = id;
6099e7d756dSNishanth Menon 
6109e7d756dSNishanth Menon 	ret = ti_sci_do_xfer(info, xfer);
6119e7d756dSNishanth Menon 	if (ret) {
6129e7d756dSNishanth Menon 		dev_err(dev, "Mbox send fail %d\n", ret);
6139e7d756dSNishanth Menon 		goto fail;
6149e7d756dSNishanth Menon 	}
6159e7d756dSNishanth Menon 
6169e7d756dSNishanth Menon 	resp = (struct ti_sci_msg_resp_get_device_state *)xfer->xfer_buf;
6179e7d756dSNishanth Menon 	if (!ti_sci_is_response_ack(resp)) {
6189e7d756dSNishanth Menon 		ret = -ENODEV;
6199e7d756dSNishanth Menon 		goto fail;
6209e7d756dSNishanth Menon 	}
6219e7d756dSNishanth Menon 
6229e7d756dSNishanth Menon 	if (clcnt)
6239e7d756dSNishanth Menon 		*clcnt = resp->context_loss_count;
6249e7d756dSNishanth Menon 	if (resets)
6259e7d756dSNishanth Menon 		*resets = resp->resets;
6269e7d756dSNishanth Menon 	if (p_state)
6279e7d756dSNishanth Menon 		*p_state = resp->programmed_state;
6289e7d756dSNishanth Menon 	if (c_state)
6299e7d756dSNishanth Menon 		*c_state = resp->current_state;
6309e7d756dSNishanth Menon fail:
6319e7d756dSNishanth Menon 	ti_sci_put_one_xfer(&info->minfo, xfer);
6329e7d756dSNishanth Menon 
6339e7d756dSNishanth Menon 	return ret;
6349e7d756dSNishanth Menon }
6359e7d756dSNishanth Menon 
6369e7d756dSNishanth Menon /**
6379e7d756dSNishanth Menon  * ti_sci_cmd_get_device() - command to request for device managed by TISCI
6389e7d756dSNishanth Menon  * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
6399e7d756dSNishanth Menon  * @id:		Device Identifier
6409e7d756dSNishanth Menon  *
6419e7d756dSNishanth Menon  * Request for the device - NOTE: the client MUST maintain integrity of
6429e7d756dSNishanth Menon  * usage count by balancing get_device with put_device. No refcounting is
6439e7d756dSNishanth Menon  * managed by driver for that purpose.
6449e7d756dSNishanth Menon  *
6459e7d756dSNishanth Menon  * NOTE: The request is for exclusive access for the processor.
6469e7d756dSNishanth Menon  *
6479e7d756dSNishanth Menon  * Return: 0 if all went fine, else return appropriate error.
6489e7d756dSNishanth Menon  */
6499e7d756dSNishanth Menon static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id)
6509e7d756dSNishanth Menon {
6519e7d756dSNishanth Menon 	return ti_sci_set_device_state(handle, id,
6529e7d756dSNishanth Menon 				       MSG_FLAG_DEVICE_EXCLUSIVE,
6539e7d756dSNishanth Menon 				       MSG_DEVICE_SW_STATE_ON);
6549e7d756dSNishanth Menon }
6559e7d756dSNishanth Menon 
6569e7d756dSNishanth Menon /**
6579e7d756dSNishanth Menon  * ti_sci_cmd_idle_device() - Command to idle a device managed by TISCI
6589e7d756dSNishanth Menon  * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
6599e7d756dSNishanth Menon  * @id:		Device Identifier
6609e7d756dSNishanth Menon  *
6619e7d756dSNishanth Menon  * Request for the device - NOTE: the client MUST maintain integrity of
6629e7d756dSNishanth Menon  * usage count by balancing get_device with put_device. No refcounting is
6639e7d756dSNishanth Menon  * managed by driver for that purpose.
6649e7d756dSNishanth Menon  *
6659e7d756dSNishanth Menon  * Return: 0 if all went fine, else return appropriate error.
6669e7d756dSNishanth Menon  */
6679e7d756dSNishanth Menon static int ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id)
6689e7d756dSNishanth Menon {
6699e7d756dSNishanth Menon 	return ti_sci_set_device_state(handle, id,
6709e7d756dSNishanth Menon 				       MSG_FLAG_DEVICE_EXCLUSIVE,
6719e7d756dSNishanth Menon 				       MSG_DEVICE_SW_STATE_RETENTION);
6729e7d756dSNishanth Menon }
6739e7d756dSNishanth Menon 
6749e7d756dSNishanth Menon /**
6759e7d756dSNishanth Menon  * ti_sci_cmd_put_device() - command to release a device managed by TISCI
6769e7d756dSNishanth Menon  * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
6779e7d756dSNishanth Menon  * @id:		Device Identifier
6789e7d756dSNishanth Menon  *
6799e7d756dSNishanth Menon  * Request for the device - NOTE: the client MUST maintain integrity of
6809e7d756dSNishanth Menon  * usage count by balancing get_device with put_device. No refcounting is
6819e7d756dSNishanth Menon  * managed by driver for that purpose.
6829e7d756dSNishanth Menon  *
6839e7d756dSNishanth Menon  * Return: 0 if all went fine, else return appropriate error.
6849e7d756dSNishanth Menon  */
6859e7d756dSNishanth Menon static int ti_sci_cmd_put_device(const struct ti_sci_handle *handle, u32 id)
6869e7d756dSNishanth Menon {
6879e7d756dSNishanth Menon 	return ti_sci_set_device_state(handle, id,
6889e7d756dSNishanth Menon 				       0, MSG_DEVICE_SW_STATE_AUTO_OFF);
6899e7d756dSNishanth Menon }
6909e7d756dSNishanth Menon 
6919e7d756dSNishanth Menon /**
6929e7d756dSNishanth Menon  * ti_sci_cmd_dev_is_valid() - Is the device valid
6939e7d756dSNishanth Menon  * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
6949e7d756dSNishanth Menon  * @id:		Device Identifier
6959e7d756dSNishanth Menon  *
6969e7d756dSNishanth Menon  * Return: 0 if all went fine and the device ID is valid, else return
6979e7d756dSNishanth Menon  * appropriate error.
6989e7d756dSNishanth Menon  */
6999e7d756dSNishanth Menon static int ti_sci_cmd_dev_is_valid(const struct ti_sci_handle *handle, u32 id)
7009e7d756dSNishanth Menon {
7019e7d756dSNishanth Menon 	u8 unused;
7029e7d756dSNishanth Menon 
7039e7d756dSNishanth Menon 	/* check the device state which will also tell us if the ID is valid */
7049e7d756dSNishanth Menon 	return ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &unused);
7059e7d756dSNishanth Menon }
7069e7d756dSNishanth Menon 
7079e7d756dSNishanth Menon /**
7089e7d756dSNishanth Menon  * ti_sci_cmd_dev_get_clcnt() - Get context loss counter
7099e7d756dSNishanth Menon  * @handle:	Pointer to TISCI handle
7109e7d756dSNishanth Menon  * @id:		Device Identifier
7119e7d756dSNishanth Menon  * @count:	Pointer to Context Loss counter to populate
7129e7d756dSNishanth Menon  *
7139e7d756dSNishanth Menon  * Return: 0 if all went fine, else return appropriate error.
7149e7d756dSNishanth Menon  */
7159e7d756dSNishanth Menon static int ti_sci_cmd_dev_get_clcnt(const struct ti_sci_handle *handle, u32 id,
7169e7d756dSNishanth Menon 				    u32 *count)
7179e7d756dSNishanth Menon {
7189e7d756dSNishanth Menon 	return ti_sci_get_device_state(handle, id, count, NULL, NULL, NULL);
7199e7d756dSNishanth Menon }
7209e7d756dSNishanth Menon 
7219e7d756dSNishanth Menon /**
7229e7d756dSNishanth Menon  * ti_sci_cmd_dev_is_idle() - Check if the device is requested to be idle
7239e7d756dSNishanth Menon  * @handle:	Pointer to TISCI handle
7249e7d756dSNishanth Menon  * @id:		Device Identifier
7259e7d756dSNishanth Menon  * @r_state:	true if requested to be idle
7269e7d756dSNishanth Menon  *
7279e7d756dSNishanth Menon  * Return: 0 if all went fine, else return appropriate error.
7289e7d756dSNishanth Menon  */
7299e7d756dSNishanth Menon static int ti_sci_cmd_dev_is_idle(const struct ti_sci_handle *handle, u32 id,
7309e7d756dSNishanth Menon 				  bool *r_state)
7319e7d756dSNishanth Menon {
7329e7d756dSNishanth Menon 	int ret;
7339e7d756dSNishanth Menon 	u8 state;
7349e7d756dSNishanth Menon 
7359e7d756dSNishanth Menon 	if (!r_state)
7369e7d756dSNishanth Menon 		return -EINVAL;
7379e7d756dSNishanth Menon 
7389e7d756dSNishanth Menon 	ret = ti_sci_get_device_state(handle, id, NULL, NULL, &state, NULL);
7399e7d756dSNishanth Menon 	if (ret)
7409e7d756dSNishanth Menon 		return ret;
7419e7d756dSNishanth Menon 
7429e7d756dSNishanth Menon 	*r_state = (state == MSG_DEVICE_SW_STATE_RETENTION);
7439e7d756dSNishanth Menon 
7449e7d756dSNishanth Menon 	return 0;
7459e7d756dSNishanth Menon }
7469e7d756dSNishanth Menon 
7479e7d756dSNishanth Menon /**
7489e7d756dSNishanth Menon  * ti_sci_cmd_dev_is_stop() - Check if the device is requested to be stopped
7499e7d756dSNishanth Menon  * @handle:	Pointer to TISCI handle
7509e7d756dSNishanth Menon  * @id:		Device Identifier
7519e7d756dSNishanth Menon  * @r_state:	true if requested to be stopped
7529e7d756dSNishanth Menon  * @curr_state:	true if currently stopped.
7539e7d756dSNishanth Menon  *
7549e7d756dSNishanth Menon  * Return: 0 if all went fine, else return appropriate error.
7559e7d756dSNishanth Menon  */
7569e7d756dSNishanth Menon static int ti_sci_cmd_dev_is_stop(const struct ti_sci_handle *handle, u32 id,
7579e7d756dSNishanth Menon 				  bool *r_state,  bool *curr_state)
7589e7d756dSNishanth Menon {
7599e7d756dSNishanth Menon 	int ret;
7609e7d756dSNishanth Menon 	u8 p_state, c_state;
7619e7d756dSNishanth Menon 
7629e7d756dSNishanth Menon 	if (!r_state && !curr_state)
7639e7d756dSNishanth Menon 		return -EINVAL;
7649e7d756dSNishanth Menon 
7659e7d756dSNishanth Menon 	ret =
7669e7d756dSNishanth Menon 	    ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
7679e7d756dSNishanth Menon 	if (ret)
7689e7d756dSNishanth Menon 		return ret;
7699e7d756dSNishanth Menon 
7709e7d756dSNishanth Menon 	if (r_state)
7719e7d756dSNishanth Menon 		*r_state = (p_state == MSG_DEVICE_SW_STATE_AUTO_OFF);
7729e7d756dSNishanth Menon 	if (curr_state)
7739e7d756dSNishanth Menon 		*curr_state = (c_state == MSG_DEVICE_HW_STATE_OFF);
7749e7d756dSNishanth Menon 
7759e7d756dSNishanth Menon 	return 0;
7769e7d756dSNishanth Menon }
7779e7d756dSNishanth Menon 
7789e7d756dSNishanth Menon /**
7799e7d756dSNishanth Menon  * ti_sci_cmd_dev_is_on() - Check if the device is requested to be ON
7809e7d756dSNishanth Menon  * @handle:	Pointer to TISCI handle
7819e7d756dSNishanth Menon  * @id:		Device Identifier
7829e7d756dSNishanth Menon  * @r_state:	true if requested to be ON
7839e7d756dSNishanth Menon  * @curr_state:	true if currently ON and active
7849e7d756dSNishanth Menon  *
7859e7d756dSNishanth Menon  * Return: 0 if all went fine, else return appropriate error.
7869e7d756dSNishanth Menon  */
7879e7d756dSNishanth Menon static int ti_sci_cmd_dev_is_on(const struct ti_sci_handle *handle, u32 id,
7889e7d756dSNishanth Menon 				bool *r_state,  bool *curr_state)
7899e7d756dSNishanth Menon {
7909e7d756dSNishanth Menon 	int ret;
7919e7d756dSNishanth Menon 	u8 p_state, c_state;
7929e7d756dSNishanth Menon 
7939e7d756dSNishanth Menon 	if (!r_state && !curr_state)
7949e7d756dSNishanth Menon 		return -EINVAL;
7959e7d756dSNishanth Menon 
7969e7d756dSNishanth Menon 	ret =
7979e7d756dSNishanth Menon 	    ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
7989e7d756dSNishanth Menon 	if (ret)
7999e7d756dSNishanth Menon 		return ret;
8009e7d756dSNishanth Menon 
8019e7d756dSNishanth Menon 	if (r_state)
8029e7d756dSNishanth Menon 		*r_state = (p_state == MSG_DEVICE_SW_STATE_ON);
8039e7d756dSNishanth Menon 	if (curr_state)
8049e7d756dSNishanth Menon 		*curr_state = (c_state == MSG_DEVICE_HW_STATE_ON);
8059e7d756dSNishanth Menon 
8069e7d756dSNishanth Menon 	return 0;
8079e7d756dSNishanth Menon }
8089e7d756dSNishanth Menon 
8099e7d756dSNishanth Menon /**
8109e7d756dSNishanth Menon  * ti_sci_cmd_dev_is_trans() - Check if the device is currently transitioning
8119e7d756dSNishanth Menon  * @handle:	Pointer to TISCI handle
8129e7d756dSNishanth Menon  * @id:		Device Identifier
8139e7d756dSNishanth Menon  * @curr_state:	true if currently transitioning.
8149e7d756dSNishanth Menon  *
8159e7d756dSNishanth Menon  * Return: 0 if all went fine, else return appropriate error.
8169e7d756dSNishanth Menon  */
8179e7d756dSNishanth Menon static int ti_sci_cmd_dev_is_trans(const struct ti_sci_handle *handle, u32 id,
8189e7d756dSNishanth Menon 				   bool *curr_state)
8199e7d756dSNishanth Menon {
8209e7d756dSNishanth Menon 	int ret;
8219e7d756dSNishanth Menon 	u8 state;
8229e7d756dSNishanth Menon 
8239e7d756dSNishanth Menon 	if (!curr_state)
8249e7d756dSNishanth Menon 		return -EINVAL;
8259e7d756dSNishanth Menon 
8269e7d756dSNishanth Menon 	ret = ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &state);
8279e7d756dSNishanth Menon 	if (ret)
8289e7d756dSNishanth Menon 		return ret;
8299e7d756dSNishanth Menon 
8309e7d756dSNishanth Menon 	*curr_state = (state == MSG_DEVICE_HW_STATE_TRANS);
8319e7d756dSNishanth Menon 
8329e7d756dSNishanth Menon 	return 0;
8339e7d756dSNishanth Menon }
8349e7d756dSNishanth Menon 
8359e7d756dSNishanth Menon /**
8369e7d756dSNishanth Menon  * ti_sci_cmd_set_device_resets() - command to set resets for device managed
8379e7d756dSNishanth Menon  *				    by TISCI
8389e7d756dSNishanth Menon  * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
8399e7d756dSNishanth Menon  * @id:		Device Identifier
8409e7d756dSNishanth Menon  * @reset_state: Device specific reset bit field
8419e7d756dSNishanth Menon  *
8429e7d756dSNishanth Menon  * Return: 0 if all went fine, else return appropriate error.
8439e7d756dSNishanth Menon  */
8449e7d756dSNishanth Menon static int ti_sci_cmd_set_device_resets(const struct ti_sci_handle *handle,
8459e7d756dSNishanth Menon 					u32 id, u32 reset_state)
8469e7d756dSNishanth Menon {
8479e7d756dSNishanth Menon 	struct ti_sci_info *info;
8489e7d756dSNishanth Menon 	struct ti_sci_msg_req_set_device_resets *req;
8499e7d756dSNishanth Menon 	struct ti_sci_msg_hdr *resp;
8509e7d756dSNishanth Menon 	struct ti_sci_xfer *xfer;
8519e7d756dSNishanth Menon 	struct device *dev;
8529e7d756dSNishanth Menon 	int ret = 0;
8539e7d756dSNishanth Menon 
8549e7d756dSNishanth Menon 	if (IS_ERR(handle))
8559e7d756dSNishanth Menon 		return PTR_ERR(handle);
8569e7d756dSNishanth Menon 	if (!handle)
8579e7d756dSNishanth Menon 		return -EINVAL;
8589e7d756dSNishanth Menon 
8599e7d756dSNishanth Menon 	info = handle_to_ti_sci_info(handle);
8609e7d756dSNishanth Menon 	dev = info->dev;
8619e7d756dSNishanth Menon 
8629e7d756dSNishanth Menon 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_RESETS,
8639e7d756dSNishanth Menon 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
8649e7d756dSNishanth Menon 				   sizeof(*req), sizeof(*resp));
8659e7d756dSNishanth Menon 	if (IS_ERR(xfer)) {
8669e7d756dSNishanth Menon 		ret = PTR_ERR(xfer);
8679e7d756dSNishanth Menon 		dev_err(dev, "Message alloc failed(%d)\n", ret);
8689e7d756dSNishanth Menon 		return ret;
8699e7d756dSNishanth Menon 	}
8709e7d756dSNishanth Menon 	req = (struct ti_sci_msg_req_set_device_resets *)xfer->xfer_buf;
8719e7d756dSNishanth Menon 	req->id = id;
8729e7d756dSNishanth Menon 	req->resets = reset_state;
8739e7d756dSNishanth Menon 
8749e7d756dSNishanth Menon 	ret = ti_sci_do_xfer(info, xfer);
8759e7d756dSNishanth Menon 	if (ret) {
8769e7d756dSNishanth Menon 		dev_err(dev, "Mbox send fail %d\n", ret);
8779e7d756dSNishanth Menon 		goto fail;
8789e7d756dSNishanth Menon 	}
8799e7d756dSNishanth Menon 
8809e7d756dSNishanth Menon 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
8819e7d756dSNishanth Menon 
8829e7d756dSNishanth Menon 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
8839e7d756dSNishanth Menon 
8849e7d756dSNishanth Menon fail:
8859e7d756dSNishanth Menon 	ti_sci_put_one_xfer(&info->minfo, xfer);
8869e7d756dSNishanth Menon 
8879e7d756dSNishanth Menon 	return ret;
8889e7d756dSNishanth Menon }
8899e7d756dSNishanth Menon 
8909e7d756dSNishanth Menon /**
8919e7d756dSNishanth Menon  * ti_sci_cmd_get_device_resets() - Get reset state for device managed
8929e7d756dSNishanth Menon  *				    by TISCI
8939e7d756dSNishanth Menon  * @handle:		Pointer to TISCI handle
8949e7d756dSNishanth Menon  * @id:			Device Identifier
8959e7d756dSNishanth Menon  * @reset_state:	Pointer to reset state to populate
8969e7d756dSNishanth Menon  *
8979e7d756dSNishanth Menon  * Return: 0 if all went fine, else return appropriate error.
8989e7d756dSNishanth Menon  */
8999e7d756dSNishanth Menon static int ti_sci_cmd_get_device_resets(const struct ti_sci_handle *handle,
9009e7d756dSNishanth Menon 					u32 id, u32 *reset_state)
9019e7d756dSNishanth Menon {
9029e7d756dSNishanth Menon 	return ti_sci_get_device_state(handle, id, NULL, reset_state, NULL,
9039e7d756dSNishanth Menon 				       NULL);
9049e7d756dSNishanth Menon }
9059e7d756dSNishanth Menon 
9069f723220SNishanth Menon /**
9079f723220SNishanth Menon  * ti_sci_set_clock_state() - Set clock state helper
9089f723220SNishanth Menon  * @handle:	pointer to TI SCI handle
9099f723220SNishanth Menon  * @dev_id:	Device identifier this request is for
9109f723220SNishanth Menon  * @clk_id:	Clock identifier for the device for this request.
9119f723220SNishanth Menon  *		Each device has it's own set of clock inputs. This indexes
9129f723220SNishanth Menon  *		which clock input to modify.
9139f723220SNishanth Menon  * @flags:	Header flags as needed
9149f723220SNishanth Menon  * @state:	State to request for the clock.
9159f723220SNishanth Menon  *
9169f723220SNishanth Menon  * Return: 0 if all went well, else returns appropriate error value.
9179f723220SNishanth Menon  */
9189f723220SNishanth Menon static int ti_sci_set_clock_state(const struct ti_sci_handle *handle,
9199f723220SNishanth Menon 				  u32 dev_id, u8 clk_id,
9209f723220SNishanth Menon 				  u32 flags, u8 state)
9219f723220SNishanth Menon {
9229f723220SNishanth Menon 	struct ti_sci_info *info;
9239f723220SNishanth Menon 	struct ti_sci_msg_req_set_clock_state *req;
9249f723220SNishanth Menon 	struct ti_sci_msg_hdr *resp;
9259f723220SNishanth Menon 	struct ti_sci_xfer *xfer;
9269f723220SNishanth Menon 	struct device *dev;
9279f723220SNishanth Menon 	int ret = 0;
9289f723220SNishanth Menon 
9299f723220SNishanth Menon 	if (IS_ERR(handle))
9309f723220SNishanth Menon 		return PTR_ERR(handle);
9319f723220SNishanth Menon 	if (!handle)
9329f723220SNishanth Menon 		return -EINVAL;
9339f723220SNishanth Menon 
9349f723220SNishanth Menon 	info = handle_to_ti_sci_info(handle);
9359f723220SNishanth Menon 	dev = info->dev;
9369f723220SNishanth Menon 
9379f723220SNishanth Menon 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_STATE,
9389f723220SNishanth Menon 				   flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
9399f723220SNishanth Menon 				   sizeof(*req), sizeof(*resp));
9409f723220SNishanth Menon 	if (IS_ERR(xfer)) {
9419f723220SNishanth Menon 		ret = PTR_ERR(xfer);
9429f723220SNishanth Menon 		dev_err(dev, "Message alloc failed(%d)\n", ret);
9439f723220SNishanth Menon 		return ret;
9449f723220SNishanth Menon 	}
9459f723220SNishanth Menon 	req = (struct ti_sci_msg_req_set_clock_state *)xfer->xfer_buf;
9469f723220SNishanth Menon 	req->dev_id = dev_id;
9479f723220SNishanth Menon 	req->clk_id = clk_id;
9489f723220SNishanth Menon 	req->request_state = state;
9499f723220SNishanth Menon 
9509f723220SNishanth Menon 	ret = ti_sci_do_xfer(info, xfer);
9519f723220SNishanth Menon 	if (ret) {
9529f723220SNishanth Menon 		dev_err(dev, "Mbox send fail %d\n", ret);
9539f723220SNishanth Menon 		goto fail;
9549f723220SNishanth Menon 	}
9559f723220SNishanth Menon 
9569f723220SNishanth Menon 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
9579f723220SNishanth Menon 
9589f723220SNishanth Menon 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
9599f723220SNishanth Menon 
9609f723220SNishanth Menon fail:
9619f723220SNishanth Menon 	ti_sci_put_one_xfer(&info->minfo, xfer);
9629f723220SNishanth Menon 
9639f723220SNishanth Menon 	return ret;
9649f723220SNishanth Menon }
9659f723220SNishanth Menon 
9669f723220SNishanth Menon /**
9679f723220SNishanth Menon  * ti_sci_cmd_get_clock_state() - Get clock state helper
9689f723220SNishanth Menon  * @handle:	pointer to TI SCI handle
9699f723220SNishanth Menon  * @dev_id:	Device identifier this request is for
9709f723220SNishanth Menon  * @clk_id:	Clock identifier for the device for this request.
9719f723220SNishanth Menon  *		Each device has it's own set of clock inputs. This indexes
9729f723220SNishanth Menon  *		which clock input to modify.
9739f723220SNishanth Menon  * @programmed_state:	State requested for clock to move to
9749f723220SNishanth Menon  * @current_state:	State that the clock is currently in
9759f723220SNishanth Menon  *
9769f723220SNishanth Menon  * Return: 0 if all went well, else returns appropriate error value.
9779f723220SNishanth Menon  */
9789f723220SNishanth Menon static int ti_sci_cmd_get_clock_state(const struct ti_sci_handle *handle,
9799f723220SNishanth Menon 				      u32 dev_id, u8 clk_id,
9809f723220SNishanth Menon 				      u8 *programmed_state, u8 *current_state)
9819f723220SNishanth Menon {
9829f723220SNishanth Menon 	struct ti_sci_info *info;
9839f723220SNishanth Menon 	struct ti_sci_msg_req_get_clock_state *req;
9849f723220SNishanth Menon 	struct ti_sci_msg_resp_get_clock_state *resp;
9859f723220SNishanth Menon 	struct ti_sci_xfer *xfer;
9869f723220SNishanth Menon 	struct device *dev;
9879f723220SNishanth Menon 	int ret = 0;
9889f723220SNishanth Menon 
9899f723220SNishanth Menon 	if (IS_ERR(handle))
9909f723220SNishanth Menon 		return PTR_ERR(handle);
9919f723220SNishanth Menon 	if (!handle)
9929f723220SNishanth Menon 		return -EINVAL;
9939f723220SNishanth Menon 
9949f723220SNishanth Menon 	if (!programmed_state && !current_state)
9959f723220SNishanth Menon 		return -EINVAL;
9969f723220SNishanth Menon 
9979f723220SNishanth Menon 	info = handle_to_ti_sci_info(handle);
9989f723220SNishanth Menon 	dev = info->dev;
9999f723220SNishanth Menon 
10009f723220SNishanth Menon 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_STATE,
10019f723220SNishanth Menon 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
10029f723220SNishanth Menon 				   sizeof(*req), sizeof(*resp));
10039f723220SNishanth Menon 	if (IS_ERR(xfer)) {
10049f723220SNishanth Menon 		ret = PTR_ERR(xfer);
10059f723220SNishanth Menon 		dev_err(dev, "Message alloc failed(%d)\n", ret);
10069f723220SNishanth Menon 		return ret;
10079f723220SNishanth Menon 	}
10089f723220SNishanth Menon 	req = (struct ti_sci_msg_req_get_clock_state *)xfer->xfer_buf;
10099f723220SNishanth Menon 	req->dev_id = dev_id;
10109f723220SNishanth Menon 	req->clk_id = clk_id;
10119f723220SNishanth Menon 
10129f723220SNishanth Menon 	ret = ti_sci_do_xfer(info, xfer);
10139f723220SNishanth Menon 	if (ret) {
10149f723220SNishanth Menon 		dev_err(dev, "Mbox send fail %d\n", ret);
10159f723220SNishanth Menon 		goto fail;
10169f723220SNishanth Menon 	}
10179f723220SNishanth Menon 
10189f723220SNishanth Menon 	resp = (struct ti_sci_msg_resp_get_clock_state *)xfer->xfer_buf;
10199f723220SNishanth Menon 
10209f723220SNishanth Menon 	if (!ti_sci_is_response_ack(resp)) {
10219f723220SNishanth Menon 		ret = -ENODEV;
10229f723220SNishanth Menon 		goto fail;
10239f723220SNishanth Menon 	}
10249f723220SNishanth Menon 
10259f723220SNishanth Menon 	if (programmed_state)
10269f723220SNishanth Menon 		*programmed_state = resp->programmed_state;
10279f723220SNishanth Menon 	if (current_state)
10289f723220SNishanth Menon 		*current_state = resp->current_state;
10299f723220SNishanth Menon 
10309f723220SNishanth Menon fail:
10319f723220SNishanth Menon 	ti_sci_put_one_xfer(&info->minfo, xfer);
10329f723220SNishanth Menon 
10339f723220SNishanth Menon 	return ret;
10349f723220SNishanth Menon }
10359f723220SNishanth Menon 
10369f723220SNishanth Menon /**
10379f723220SNishanth Menon  * ti_sci_cmd_get_clock() - Get control of a clock from TI SCI
10389f723220SNishanth Menon  * @handle:	pointer to TI SCI handle
10399f723220SNishanth Menon  * @dev_id:	Device identifier this request is for
10409f723220SNishanth Menon  * @clk_id:	Clock identifier for the device for this request.
10419f723220SNishanth Menon  *		Each device has it's own set of clock inputs. This indexes
10429f723220SNishanth Menon  *		which clock input to modify.
10439f723220SNishanth Menon  * @needs_ssc: 'true' if Spread Spectrum clock is desired, else 'false'
10449f723220SNishanth Menon  * @can_change_freq: 'true' if frequency change is desired, else 'false'
10459f723220SNishanth Menon  * @enable_input_term: 'true' if input termination is desired, else 'false'
10469f723220SNishanth Menon  *
10479f723220SNishanth Menon  * Return: 0 if all went well, else returns appropriate error value.
10489f723220SNishanth Menon  */
10499f723220SNishanth Menon static int ti_sci_cmd_get_clock(const struct ti_sci_handle *handle, u32 dev_id,
10509f723220SNishanth Menon 				u8 clk_id, bool needs_ssc, bool can_change_freq,
10519f723220SNishanth Menon 				bool enable_input_term)
10529f723220SNishanth Menon {
10539f723220SNishanth Menon 	u32 flags = 0;
10549f723220SNishanth Menon 
10559f723220SNishanth Menon 	flags |= needs_ssc ? MSG_FLAG_CLOCK_ALLOW_SSC : 0;
10569f723220SNishanth Menon 	flags |= can_change_freq ? MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE : 0;
10579f723220SNishanth Menon 	flags |= enable_input_term ? MSG_FLAG_CLOCK_INPUT_TERM : 0;
10589f723220SNishanth Menon 
10599f723220SNishanth Menon 	return ti_sci_set_clock_state(handle, dev_id, clk_id, flags,
10609f723220SNishanth Menon 				      MSG_CLOCK_SW_STATE_REQ);
10619f723220SNishanth Menon }
10629f723220SNishanth Menon 
10639f723220SNishanth Menon /**
10649f723220SNishanth Menon  * ti_sci_cmd_idle_clock() - Idle a clock which is in our control
10659f723220SNishanth Menon  * @handle:	pointer to TI SCI handle
10669f723220SNishanth Menon  * @dev_id:	Device identifier this request is for
10679f723220SNishanth Menon  * @clk_id:	Clock identifier for the device for this request.
10689f723220SNishanth Menon  *		Each device has it's own set of clock inputs. This indexes
10699f723220SNishanth Menon  *		which clock input to modify.
10709f723220SNishanth Menon  *
10719f723220SNishanth Menon  * NOTE: This clock must have been requested by get_clock previously.
10729f723220SNishanth Menon  *
10739f723220SNishanth Menon  * Return: 0 if all went well, else returns appropriate error value.
10749f723220SNishanth Menon  */
10759f723220SNishanth Menon static int ti_sci_cmd_idle_clock(const struct ti_sci_handle *handle,
10769f723220SNishanth Menon 				 u32 dev_id, u8 clk_id)
10779f723220SNishanth Menon {
10789f723220SNishanth Menon 	return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
10799f723220SNishanth Menon 				      MSG_CLOCK_SW_STATE_UNREQ);
10809f723220SNishanth Menon }
10819f723220SNishanth Menon 
10829f723220SNishanth Menon /**
10839f723220SNishanth Menon  * ti_sci_cmd_put_clock() - Release a clock from our control back to TISCI
10849f723220SNishanth Menon  * @handle:	pointer to TI SCI handle
10859f723220SNishanth Menon  * @dev_id:	Device identifier this request is for
10869f723220SNishanth Menon  * @clk_id:	Clock identifier for the device for this request.
10879f723220SNishanth Menon  *		Each device has it's own set of clock inputs. This indexes
10889f723220SNishanth Menon  *		which clock input to modify.
10899f723220SNishanth Menon  *
10909f723220SNishanth Menon  * NOTE: This clock must have been requested by get_clock previously.
10919f723220SNishanth Menon  *
10929f723220SNishanth Menon  * Return: 0 if all went well, else returns appropriate error value.
10939f723220SNishanth Menon  */
10949f723220SNishanth Menon static int ti_sci_cmd_put_clock(const struct ti_sci_handle *handle,
10959f723220SNishanth Menon 				u32 dev_id, u8 clk_id)
10969f723220SNishanth Menon {
10979f723220SNishanth Menon 	return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
10989f723220SNishanth Menon 				      MSG_CLOCK_SW_STATE_AUTO);
10999f723220SNishanth Menon }
11009f723220SNishanth Menon 
11019f723220SNishanth Menon /**
11029f723220SNishanth Menon  * ti_sci_cmd_clk_is_auto() - Is the clock being auto managed
11039f723220SNishanth Menon  * @handle:	pointer to TI SCI handle
11049f723220SNishanth Menon  * @dev_id:	Device identifier this request is for
11059f723220SNishanth Menon  * @clk_id:	Clock identifier for the device for this request.
11069f723220SNishanth Menon  *		Each device has it's own set of clock inputs. This indexes
11079f723220SNishanth Menon  *		which clock input to modify.
11089f723220SNishanth Menon  * @req_state: state indicating if the clock is auto managed
11099f723220SNishanth Menon  *
11109f723220SNishanth Menon  * Return: 0 if all went well, else returns appropriate error value.
11119f723220SNishanth Menon  */
11129f723220SNishanth Menon static int ti_sci_cmd_clk_is_auto(const struct ti_sci_handle *handle,
11139f723220SNishanth Menon 				  u32 dev_id, u8 clk_id, bool *req_state)
11149f723220SNishanth Menon {
11159f723220SNishanth Menon 	u8 state = 0;
11169f723220SNishanth Menon 	int ret;
11179f723220SNishanth Menon 
11189f723220SNishanth Menon 	if (!req_state)
11199f723220SNishanth Menon 		return -EINVAL;
11209f723220SNishanth Menon 
11219f723220SNishanth Menon 	ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, &state, NULL);
11229f723220SNishanth Menon 	if (ret)
11239f723220SNishanth Menon 		return ret;
11249f723220SNishanth Menon 
11259f723220SNishanth Menon 	*req_state = (state == MSG_CLOCK_SW_STATE_AUTO);
11269f723220SNishanth Menon 	return 0;
11279f723220SNishanth Menon }
11289f723220SNishanth Menon 
11299f723220SNishanth Menon /**
11309f723220SNishanth Menon  * ti_sci_cmd_clk_is_on() - Is the clock ON
11319f723220SNishanth Menon  * @handle:	pointer to TI SCI handle
11329f723220SNishanth Menon  * @dev_id:	Device identifier this request is for
11339f723220SNishanth Menon  * @clk_id:	Clock identifier for the device for this request.
11349f723220SNishanth Menon  *		Each device has it's own set of clock inputs. This indexes
11359f723220SNishanth Menon  *		which clock input to modify.
11369f723220SNishanth Menon  * @req_state: state indicating if the clock is managed by us and enabled
11379f723220SNishanth Menon  * @curr_state: state indicating if the clock is ready for operation
11389f723220SNishanth Menon  *
11399f723220SNishanth Menon  * Return: 0 if all went well, else returns appropriate error value.
11409f723220SNishanth Menon  */
11419f723220SNishanth Menon static int ti_sci_cmd_clk_is_on(const struct ti_sci_handle *handle, u32 dev_id,
11429f723220SNishanth Menon 				u8 clk_id, bool *req_state, bool *curr_state)
11439f723220SNishanth Menon {
11449f723220SNishanth Menon 	u8 c_state = 0, r_state = 0;
11459f723220SNishanth Menon 	int ret;
11469f723220SNishanth Menon 
11479f723220SNishanth Menon 	if (!req_state && !curr_state)
11489f723220SNishanth Menon 		return -EINVAL;
11499f723220SNishanth Menon 
11509f723220SNishanth Menon 	ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
11519f723220SNishanth Menon 					 &r_state, &c_state);
11529f723220SNishanth Menon 	if (ret)
11539f723220SNishanth Menon 		return ret;
11549f723220SNishanth Menon 
11559f723220SNishanth Menon 	if (req_state)
11569f723220SNishanth Menon 		*req_state = (r_state == MSG_CLOCK_SW_STATE_REQ);
11579f723220SNishanth Menon 	if (curr_state)
11589f723220SNishanth Menon 		*curr_state = (c_state == MSG_CLOCK_HW_STATE_READY);
11599f723220SNishanth Menon 	return 0;
11609f723220SNishanth Menon }
11619f723220SNishanth Menon 
11629f723220SNishanth Menon /**
11639f723220SNishanth Menon  * ti_sci_cmd_clk_is_off() - Is the clock OFF
11649f723220SNishanth Menon  * @handle:	pointer to TI SCI handle
11659f723220SNishanth Menon  * @dev_id:	Device identifier this request is for
11669f723220SNishanth Menon  * @clk_id:	Clock identifier for the device for this request.
11679f723220SNishanth Menon  *		Each device has it's own set of clock inputs. This indexes
11689f723220SNishanth Menon  *		which clock input to modify.
11699f723220SNishanth Menon  * @req_state: state indicating if the clock is managed by us and disabled
11709f723220SNishanth Menon  * @curr_state: state indicating if the clock is NOT ready for operation
11719f723220SNishanth Menon  *
11729f723220SNishanth Menon  * Return: 0 if all went well, else returns appropriate error value.
11739f723220SNishanth Menon  */
11749f723220SNishanth Menon static int ti_sci_cmd_clk_is_off(const struct ti_sci_handle *handle, u32 dev_id,
11759f723220SNishanth Menon 				 u8 clk_id, bool *req_state, bool *curr_state)
11769f723220SNishanth Menon {
11779f723220SNishanth Menon 	u8 c_state = 0, r_state = 0;
11789f723220SNishanth Menon 	int ret;
11799f723220SNishanth Menon 
11809f723220SNishanth Menon 	if (!req_state && !curr_state)
11819f723220SNishanth Menon 		return -EINVAL;
11829f723220SNishanth Menon 
11839f723220SNishanth Menon 	ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
11849f723220SNishanth Menon 					 &r_state, &c_state);
11859f723220SNishanth Menon 	if (ret)
11869f723220SNishanth Menon 		return ret;
11879f723220SNishanth Menon 
11889f723220SNishanth Menon 	if (req_state)
11899f723220SNishanth Menon 		*req_state = (r_state == MSG_CLOCK_SW_STATE_UNREQ);
11909f723220SNishanth Menon 	if (curr_state)
11919f723220SNishanth Menon 		*curr_state = (c_state == MSG_CLOCK_HW_STATE_NOT_READY);
11929f723220SNishanth Menon 	return 0;
11939f723220SNishanth Menon }
11949f723220SNishanth Menon 
11959f723220SNishanth Menon /**
11969f723220SNishanth Menon  * ti_sci_cmd_clk_set_parent() - Set the clock source of a specific device clock
11979f723220SNishanth Menon  * @handle:	pointer to TI SCI handle
11989f723220SNishanth Menon  * @dev_id:	Device identifier this request is for
11999f723220SNishanth Menon  * @clk_id:	Clock identifier for the device for this request.
12009f723220SNishanth Menon  *		Each device has it's own set of clock inputs. This indexes
12019f723220SNishanth Menon  *		which clock input to modify.
12029f723220SNishanth Menon  * @parent_id:	Parent clock identifier to set
12039f723220SNishanth Menon  *
12049f723220SNishanth Menon  * Return: 0 if all went well, else returns appropriate error value.
12059f723220SNishanth Menon  */
12069f723220SNishanth Menon static int ti_sci_cmd_clk_set_parent(const struct ti_sci_handle *handle,
12079f723220SNishanth Menon 				     u32 dev_id, u8 clk_id, u8 parent_id)
12089f723220SNishanth Menon {
12099f723220SNishanth Menon 	struct ti_sci_info *info;
12109f723220SNishanth Menon 	struct ti_sci_msg_req_set_clock_parent *req;
12119f723220SNishanth Menon 	struct ti_sci_msg_hdr *resp;
12129f723220SNishanth Menon 	struct ti_sci_xfer *xfer;
12139f723220SNishanth Menon 	struct device *dev;
12149f723220SNishanth Menon 	int ret = 0;
12159f723220SNishanth Menon 
12169f723220SNishanth Menon 	if (IS_ERR(handle))
12179f723220SNishanth Menon 		return PTR_ERR(handle);
12189f723220SNishanth Menon 	if (!handle)
12199f723220SNishanth Menon 		return -EINVAL;
12209f723220SNishanth Menon 
12219f723220SNishanth Menon 	info = handle_to_ti_sci_info(handle);
12229f723220SNishanth Menon 	dev = info->dev;
12239f723220SNishanth Menon 
12249f723220SNishanth Menon 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_PARENT,
12259f723220SNishanth Menon 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
12269f723220SNishanth Menon 				   sizeof(*req), sizeof(*resp));
12279f723220SNishanth Menon 	if (IS_ERR(xfer)) {
12289f723220SNishanth Menon 		ret = PTR_ERR(xfer);
12299f723220SNishanth Menon 		dev_err(dev, "Message alloc failed(%d)\n", ret);
12309f723220SNishanth Menon 		return ret;
12319f723220SNishanth Menon 	}
12329f723220SNishanth Menon 	req = (struct ti_sci_msg_req_set_clock_parent *)xfer->xfer_buf;
12339f723220SNishanth Menon 	req->dev_id = dev_id;
12349f723220SNishanth Menon 	req->clk_id = clk_id;
12359f723220SNishanth Menon 	req->parent_id = parent_id;
12369f723220SNishanth Menon 
12379f723220SNishanth Menon 	ret = ti_sci_do_xfer(info, xfer);
12389f723220SNishanth Menon 	if (ret) {
12399f723220SNishanth Menon 		dev_err(dev, "Mbox send fail %d\n", ret);
12409f723220SNishanth Menon 		goto fail;
12419f723220SNishanth Menon 	}
12429f723220SNishanth Menon 
12439f723220SNishanth Menon 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
12449f723220SNishanth Menon 
12459f723220SNishanth Menon 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
12469f723220SNishanth Menon 
12479f723220SNishanth Menon fail:
12489f723220SNishanth Menon 	ti_sci_put_one_xfer(&info->minfo, xfer);
12499f723220SNishanth Menon 
12509f723220SNishanth Menon 	return ret;
12519f723220SNishanth Menon }
12529f723220SNishanth Menon 
12539f723220SNishanth Menon /**
12549f723220SNishanth Menon  * ti_sci_cmd_clk_get_parent() - Get current parent clock source
12559f723220SNishanth Menon  * @handle:	pointer to TI SCI handle
12569f723220SNishanth Menon  * @dev_id:	Device identifier this request is for
12579f723220SNishanth Menon  * @clk_id:	Clock identifier for the device for this request.
12589f723220SNishanth Menon  *		Each device has it's own set of clock inputs. This indexes
12599f723220SNishanth Menon  *		which clock input to modify.
12609f723220SNishanth Menon  * @parent_id:	Current clock parent
12619f723220SNishanth Menon  *
12629f723220SNishanth Menon  * Return: 0 if all went well, else returns appropriate error value.
12639f723220SNishanth Menon  */
12649f723220SNishanth Menon static int ti_sci_cmd_clk_get_parent(const struct ti_sci_handle *handle,
12659f723220SNishanth Menon 				     u32 dev_id, u8 clk_id, u8 *parent_id)
12669f723220SNishanth Menon {
12679f723220SNishanth Menon 	struct ti_sci_info *info;
12689f723220SNishanth Menon 	struct ti_sci_msg_req_get_clock_parent *req;
12699f723220SNishanth Menon 	struct ti_sci_msg_resp_get_clock_parent *resp;
12709f723220SNishanth Menon 	struct ti_sci_xfer *xfer;
12719f723220SNishanth Menon 	struct device *dev;
12729f723220SNishanth Menon 	int ret = 0;
12739f723220SNishanth Menon 
12749f723220SNishanth Menon 	if (IS_ERR(handle))
12759f723220SNishanth Menon 		return PTR_ERR(handle);
12769f723220SNishanth Menon 	if (!handle || !parent_id)
12779f723220SNishanth Menon 		return -EINVAL;
12789f723220SNishanth Menon 
12799f723220SNishanth Menon 	info = handle_to_ti_sci_info(handle);
12809f723220SNishanth Menon 	dev = info->dev;
12819f723220SNishanth Menon 
12829f723220SNishanth Menon 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_PARENT,
12839f723220SNishanth Menon 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
12849f723220SNishanth Menon 				   sizeof(*req), sizeof(*resp));
12859f723220SNishanth Menon 	if (IS_ERR(xfer)) {
12869f723220SNishanth Menon 		ret = PTR_ERR(xfer);
12879f723220SNishanth Menon 		dev_err(dev, "Message alloc failed(%d)\n", ret);
12889f723220SNishanth Menon 		return ret;
12899f723220SNishanth Menon 	}
12909f723220SNishanth Menon 	req = (struct ti_sci_msg_req_get_clock_parent *)xfer->xfer_buf;
12919f723220SNishanth Menon 	req->dev_id = dev_id;
12929f723220SNishanth Menon 	req->clk_id = clk_id;
12939f723220SNishanth Menon 
12949f723220SNishanth Menon 	ret = ti_sci_do_xfer(info, xfer);
12959f723220SNishanth Menon 	if (ret) {
12969f723220SNishanth Menon 		dev_err(dev, "Mbox send fail %d\n", ret);
12979f723220SNishanth Menon 		goto fail;
12989f723220SNishanth Menon 	}
12999f723220SNishanth Menon 
13009f723220SNishanth Menon 	resp = (struct ti_sci_msg_resp_get_clock_parent *)xfer->xfer_buf;
13019f723220SNishanth Menon 
13029f723220SNishanth Menon 	if (!ti_sci_is_response_ack(resp))
13039f723220SNishanth Menon 		ret = -ENODEV;
13049f723220SNishanth Menon 	else
13059f723220SNishanth Menon 		*parent_id = resp->parent_id;
13069f723220SNishanth Menon 
13079f723220SNishanth Menon fail:
13089f723220SNishanth Menon 	ti_sci_put_one_xfer(&info->minfo, xfer);
13099f723220SNishanth Menon 
13109f723220SNishanth Menon 	return ret;
13119f723220SNishanth Menon }
13129f723220SNishanth Menon 
13139f723220SNishanth Menon /**
13149f723220SNishanth Menon  * ti_sci_cmd_clk_get_num_parents() - Get num parents of the current clk source
13159f723220SNishanth Menon  * @handle:	pointer to TI SCI handle
13169f723220SNishanth Menon  * @dev_id:	Device identifier this request is for
13179f723220SNishanth Menon  * @clk_id:	Clock identifier for the device for this request.
13189f723220SNishanth Menon  *		Each device has it's own set of clock inputs. This indexes
13199f723220SNishanth Menon  *		which clock input to modify.
13209f723220SNishanth Menon  * @num_parents: Returns he number of parents to the current clock.
13219f723220SNishanth Menon  *
13229f723220SNishanth Menon  * Return: 0 if all went well, else returns appropriate error value.
13239f723220SNishanth Menon  */
13249f723220SNishanth Menon static int ti_sci_cmd_clk_get_num_parents(const struct ti_sci_handle *handle,
13259f723220SNishanth Menon 					  u32 dev_id, u8 clk_id,
13269f723220SNishanth Menon 					  u8 *num_parents)
13279f723220SNishanth Menon {
13289f723220SNishanth Menon 	struct ti_sci_info *info;
13299f723220SNishanth Menon 	struct ti_sci_msg_req_get_clock_num_parents *req;
13309f723220SNishanth Menon 	struct ti_sci_msg_resp_get_clock_num_parents *resp;
13319f723220SNishanth Menon 	struct ti_sci_xfer *xfer;
13329f723220SNishanth Menon 	struct device *dev;
13339f723220SNishanth Menon 	int ret = 0;
13349f723220SNishanth Menon 
13359f723220SNishanth Menon 	if (IS_ERR(handle))
13369f723220SNishanth Menon 		return PTR_ERR(handle);
13379f723220SNishanth Menon 	if (!handle || !num_parents)
13389f723220SNishanth Menon 		return -EINVAL;
13399f723220SNishanth Menon 
13409f723220SNishanth Menon 	info = handle_to_ti_sci_info(handle);
13419f723220SNishanth Menon 	dev = info->dev;
13429f723220SNishanth Menon 
13439f723220SNishanth Menon 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_NUM_CLOCK_PARENTS,
13449f723220SNishanth Menon 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
13459f723220SNishanth Menon 				   sizeof(*req), sizeof(*resp));
13469f723220SNishanth Menon 	if (IS_ERR(xfer)) {
13479f723220SNishanth Menon 		ret = PTR_ERR(xfer);
13489f723220SNishanth Menon 		dev_err(dev, "Message alloc failed(%d)\n", ret);
13499f723220SNishanth Menon 		return ret;
13509f723220SNishanth Menon 	}
13519f723220SNishanth Menon 	req = (struct ti_sci_msg_req_get_clock_num_parents *)xfer->xfer_buf;
13529f723220SNishanth Menon 	req->dev_id = dev_id;
13539f723220SNishanth Menon 	req->clk_id = clk_id;
13549f723220SNishanth Menon 
13559f723220SNishanth Menon 	ret = ti_sci_do_xfer(info, xfer);
13569f723220SNishanth Menon 	if (ret) {
13579f723220SNishanth Menon 		dev_err(dev, "Mbox send fail %d\n", ret);
13589f723220SNishanth Menon 		goto fail;
13599f723220SNishanth Menon 	}
13609f723220SNishanth Menon 
13619f723220SNishanth Menon 	resp = (struct ti_sci_msg_resp_get_clock_num_parents *)xfer->xfer_buf;
13629f723220SNishanth Menon 
13639f723220SNishanth Menon 	if (!ti_sci_is_response_ack(resp))
13649f723220SNishanth Menon 		ret = -ENODEV;
13659f723220SNishanth Menon 	else
13669f723220SNishanth Menon 		*num_parents = resp->num_parents;
13679f723220SNishanth Menon 
13689f723220SNishanth Menon fail:
13699f723220SNishanth Menon 	ti_sci_put_one_xfer(&info->minfo, xfer);
13709f723220SNishanth Menon 
13719f723220SNishanth Menon 	return ret;
13729f723220SNishanth Menon }
13739f723220SNishanth Menon 
13749f723220SNishanth Menon /**
13759f723220SNishanth Menon  * ti_sci_cmd_clk_get_match_freq() - Find a good match for frequency
13769f723220SNishanth Menon  * @handle:	pointer to TI SCI handle
13779f723220SNishanth Menon  * @dev_id:	Device identifier this request is for
13789f723220SNishanth Menon  * @clk_id:	Clock identifier for the device for this request.
13799f723220SNishanth Menon  *		Each device has it's own set of clock inputs. This indexes
13809f723220SNishanth Menon  *		which clock input to modify.
13819f723220SNishanth Menon  * @min_freq:	The minimum allowable frequency in Hz. This is the minimum
13829f723220SNishanth Menon  *		allowable programmed frequency and does not account for clock
13839f723220SNishanth Menon  *		tolerances and jitter.
13849f723220SNishanth Menon  * @target_freq: The target clock frequency in Hz. A frequency will be
13859f723220SNishanth Menon  *		processed as close to this target frequency as possible.
13869f723220SNishanth Menon  * @max_freq:	The maximum allowable frequency in Hz. This is the maximum
13879f723220SNishanth Menon  *		allowable programmed frequency and does not account for clock
13889f723220SNishanth Menon  *		tolerances and jitter.
13899f723220SNishanth Menon  * @match_freq:	Frequency match in Hz response.
13909f723220SNishanth Menon  *
13919f723220SNishanth Menon  * Return: 0 if all went well, else returns appropriate error value.
13929f723220SNishanth Menon  */
13939f723220SNishanth Menon static int ti_sci_cmd_clk_get_match_freq(const struct ti_sci_handle *handle,
13949f723220SNishanth Menon 					 u32 dev_id, u8 clk_id, u64 min_freq,
13959f723220SNishanth Menon 					 u64 target_freq, u64 max_freq,
13969f723220SNishanth Menon 					 u64 *match_freq)
13979f723220SNishanth Menon {
13989f723220SNishanth Menon 	struct ti_sci_info *info;
13999f723220SNishanth Menon 	struct ti_sci_msg_req_query_clock_freq *req;
14009f723220SNishanth Menon 	struct ti_sci_msg_resp_query_clock_freq *resp;
14019f723220SNishanth Menon 	struct ti_sci_xfer *xfer;
14029f723220SNishanth Menon 	struct device *dev;
14039f723220SNishanth Menon 	int ret = 0;
14049f723220SNishanth Menon 
14059f723220SNishanth Menon 	if (IS_ERR(handle))
14069f723220SNishanth Menon 		return PTR_ERR(handle);
14079f723220SNishanth Menon 	if (!handle || !match_freq)
14089f723220SNishanth Menon 		return -EINVAL;
14099f723220SNishanth Menon 
14109f723220SNishanth Menon 	info = handle_to_ti_sci_info(handle);
14119f723220SNishanth Menon 	dev = info->dev;
14129f723220SNishanth Menon 
14139f723220SNishanth Menon 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_QUERY_CLOCK_FREQ,
14149f723220SNishanth Menon 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
14159f723220SNishanth Menon 				   sizeof(*req), sizeof(*resp));
14169f723220SNishanth Menon 	if (IS_ERR(xfer)) {
14179f723220SNishanth Menon 		ret = PTR_ERR(xfer);
14189f723220SNishanth Menon 		dev_err(dev, "Message alloc failed(%d)\n", ret);
14199f723220SNishanth Menon 		return ret;
14209f723220SNishanth Menon 	}
14219f723220SNishanth Menon 	req = (struct ti_sci_msg_req_query_clock_freq *)xfer->xfer_buf;
14229f723220SNishanth Menon 	req->dev_id = dev_id;
14239f723220SNishanth Menon 	req->clk_id = clk_id;
14249f723220SNishanth Menon 	req->min_freq_hz = min_freq;
14259f723220SNishanth Menon 	req->target_freq_hz = target_freq;
14269f723220SNishanth Menon 	req->max_freq_hz = max_freq;
14279f723220SNishanth Menon 
14289f723220SNishanth Menon 	ret = ti_sci_do_xfer(info, xfer);
14299f723220SNishanth Menon 	if (ret) {
14309f723220SNishanth Menon 		dev_err(dev, "Mbox send fail %d\n", ret);
14319f723220SNishanth Menon 		goto fail;
14329f723220SNishanth Menon 	}
14339f723220SNishanth Menon 
14349f723220SNishanth Menon 	resp = (struct ti_sci_msg_resp_query_clock_freq *)xfer->xfer_buf;
14359f723220SNishanth Menon 
14369f723220SNishanth Menon 	if (!ti_sci_is_response_ack(resp))
14379f723220SNishanth Menon 		ret = -ENODEV;
14389f723220SNishanth Menon 	else
14399f723220SNishanth Menon 		*match_freq = resp->freq_hz;
14409f723220SNishanth Menon 
14419f723220SNishanth Menon fail:
14429f723220SNishanth Menon 	ti_sci_put_one_xfer(&info->minfo, xfer);
14439f723220SNishanth Menon 
14449f723220SNishanth Menon 	return ret;
14459f723220SNishanth Menon }
14469f723220SNishanth Menon 
14479f723220SNishanth Menon /**
14489f723220SNishanth Menon  * ti_sci_cmd_clk_set_freq() - Set a frequency for clock
14499f723220SNishanth Menon  * @handle:	pointer to TI SCI handle
14509f723220SNishanth Menon  * @dev_id:	Device identifier this request is for
14519f723220SNishanth Menon  * @clk_id:	Clock identifier for the device for this request.
14529f723220SNishanth Menon  *		Each device has it's own set of clock inputs. This indexes
14539f723220SNishanth Menon  *		which clock input to modify.
14549f723220SNishanth Menon  * @min_freq:	The minimum allowable frequency in Hz. This is the minimum
14559f723220SNishanth Menon  *		allowable programmed frequency and does not account for clock
14569f723220SNishanth Menon  *		tolerances and jitter.
14579f723220SNishanth Menon  * @target_freq: The target clock frequency in Hz. A frequency will be
14589f723220SNishanth Menon  *		processed as close to this target frequency as possible.
14599f723220SNishanth Menon  * @max_freq:	The maximum allowable frequency in Hz. This is the maximum
14609f723220SNishanth Menon  *		allowable programmed frequency and does not account for clock
14619f723220SNishanth Menon  *		tolerances and jitter.
14629f723220SNishanth Menon  *
14639f723220SNishanth Menon  * Return: 0 if all went well, else returns appropriate error value.
14649f723220SNishanth Menon  */
14659f723220SNishanth Menon static int ti_sci_cmd_clk_set_freq(const struct ti_sci_handle *handle,
14669f723220SNishanth Menon 				   u32 dev_id, u8 clk_id, u64 min_freq,
14679f723220SNishanth Menon 				   u64 target_freq, u64 max_freq)
14689f723220SNishanth Menon {
14699f723220SNishanth Menon 	struct ti_sci_info *info;
14709f723220SNishanth Menon 	struct ti_sci_msg_req_set_clock_freq *req;
14719f723220SNishanth Menon 	struct ti_sci_msg_hdr *resp;
14729f723220SNishanth Menon 	struct ti_sci_xfer *xfer;
14739f723220SNishanth Menon 	struct device *dev;
14749f723220SNishanth Menon 	int ret = 0;
14759f723220SNishanth Menon 
14769f723220SNishanth Menon 	if (IS_ERR(handle))
14779f723220SNishanth Menon 		return PTR_ERR(handle);
14789f723220SNishanth Menon 	if (!handle)
14799f723220SNishanth Menon 		return -EINVAL;
14809f723220SNishanth Menon 
14819f723220SNishanth Menon 	info = handle_to_ti_sci_info(handle);
14829f723220SNishanth Menon 	dev = info->dev;
14839f723220SNishanth Menon 
14849f723220SNishanth Menon 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_FREQ,
14859f723220SNishanth Menon 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
14869f723220SNishanth Menon 				   sizeof(*req), sizeof(*resp));
14879f723220SNishanth Menon 	if (IS_ERR(xfer)) {
14889f723220SNishanth Menon 		ret = PTR_ERR(xfer);
14899f723220SNishanth Menon 		dev_err(dev, "Message alloc failed(%d)\n", ret);
14909f723220SNishanth Menon 		return ret;
14919f723220SNishanth Menon 	}
14929f723220SNishanth Menon 	req = (struct ti_sci_msg_req_set_clock_freq *)xfer->xfer_buf;
14939f723220SNishanth Menon 	req->dev_id = dev_id;
14949f723220SNishanth Menon 	req->clk_id = clk_id;
14959f723220SNishanth Menon 	req->min_freq_hz = min_freq;
14969f723220SNishanth Menon 	req->target_freq_hz = target_freq;
14979f723220SNishanth Menon 	req->max_freq_hz = max_freq;
14989f723220SNishanth Menon 
14999f723220SNishanth Menon 	ret = ti_sci_do_xfer(info, xfer);
15009f723220SNishanth Menon 	if (ret) {
15019f723220SNishanth Menon 		dev_err(dev, "Mbox send fail %d\n", ret);
15029f723220SNishanth Menon 		goto fail;
15039f723220SNishanth Menon 	}
15049f723220SNishanth Menon 
15059f723220SNishanth Menon 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
15069f723220SNishanth Menon 
15079f723220SNishanth Menon 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
15089f723220SNishanth Menon 
15099f723220SNishanth Menon fail:
15109f723220SNishanth Menon 	ti_sci_put_one_xfer(&info->minfo, xfer);
15119f723220SNishanth Menon 
15129f723220SNishanth Menon 	return ret;
15139f723220SNishanth Menon }
15149f723220SNishanth Menon 
15159f723220SNishanth Menon /**
15169f723220SNishanth Menon  * ti_sci_cmd_clk_get_freq() - Get current frequency
15179f723220SNishanth Menon  * @handle:	pointer to TI SCI handle
15189f723220SNishanth Menon  * @dev_id:	Device identifier this request is for
15199f723220SNishanth Menon  * @clk_id:	Clock identifier for the device for this request.
15209f723220SNishanth Menon  *		Each device has it's own set of clock inputs. This indexes
15219f723220SNishanth Menon  *		which clock input to modify.
15229f723220SNishanth Menon  * @freq:	Currently frequency in Hz
15239f723220SNishanth Menon  *
15249f723220SNishanth Menon  * Return: 0 if all went well, else returns appropriate error value.
15259f723220SNishanth Menon  */
15269f723220SNishanth Menon static int ti_sci_cmd_clk_get_freq(const struct ti_sci_handle *handle,
15279f723220SNishanth Menon 				   u32 dev_id, u8 clk_id, u64 *freq)
15289f723220SNishanth Menon {
15299f723220SNishanth Menon 	struct ti_sci_info *info;
15309f723220SNishanth Menon 	struct ti_sci_msg_req_get_clock_freq *req;
15319f723220SNishanth Menon 	struct ti_sci_msg_resp_get_clock_freq *resp;
15329f723220SNishanth Menon 	struct ti_sci_xfer *xfer;
15339f723220SNishanth Menon 	struct device *dev;
15349f723220SNishanth Menon 	int ret = 0;
15359f723220SNishanth Menon 
15369f723220SNishanth Menon 	if (IS_ERR(handle))
15379f723220SNishanth Menon 		return PTR_ERR(handle);
15389f723220SNishanth Menon 	if (!handle || !freq)
15399f723220SNishanth Menon 		return -EINVAL;
15409f723220SNishanth Menon 
15419f723220SNishanth Menon 	info = handle_to_ti_sci_info(handle);
15429f723220SNishanth Menon 	dev = info->dev;
15439f723220SNishanth Menon 
15449f723220SNishanth Menon 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_FREQ,
15459f723220SNishanth Menon 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
15469f723220SNishanth Menon 				   sizeof(*req), sizeof(*resp));
15479f723220SNishanth Menon 	if (IS_ERR(xfer)) {
15489f723220SNishanth Menon 		ret = PTR_ERR(xfer);
15499f723220SNishanth Menon 		dev_err(dev, "Message alloc failed(%d)\n", ret);
15509f723220SNishanth Menon 		return ret;
15519f723220SNishanth Menon 	}
15529f723220SNishanth Menon 	req = (struct ti_sci_msg_req_get_clock_freq *)xfer->xfer_buf;
15539f723220SNishanth Menon 	req->dev_id = dev_id;
15549f723220SNishanth Menon 	req->clk_id = clk_id;
15559f723220SNishanth Menon 
15569f723220SNishanth Menon 	ret = ti_sci_do_xfer(info, xfer);
15579f723220SNishanth Menon 	if (ret) {
15589f723220SNishanth Menon 		dev_err(dev, "Mbox send fail %d\n", ret);
15599f723220SNishanth Menon 		goto fail;
15609f723220SNishanth Menon 	}
15619f723220SNishanth Menon 
15629f723220SNishanth Menon 	resp = (struct ti_sci_msg_resp_get_clock_freq *)xfer->xfer_buf;
15639f723220SNishanth Menon 
15649f723220SNishanth Menon 	if (!ti_sci_is_response_ack(resp))
15659f723220SNishanth Menon 		ret = -ENODEV;
15669f723220SNishanth Menon 	else
15679f723220SNishanth Menon 		*freq = resp->freq_hz;
15689f723220SNishanth Menon 
15699f723220SNishanth Menon fail:
15709f723220SNishanth Menon 	ti_sci_put_one_xfer(&info->minfo, xfer);
15719f723220SNishanth Menon 
15729f723220SNishanth Menon 	return ret;
15739f723220SNishanth Menon }
15749f723220SNishanth Menon 
1575912cffb4SNishanth Menon static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle)
1576912cffb4SNishanth Menon {
1577912cffb4SNishanth Menon 	struct ti_sci_info *info;
1578912cffb4SNishanth Menon 	struct ti_sci_msg_req_reboot *req;
1579912cffb4SNishanth Menon 	struct ti_sci_msg_hdr *resp;
1580912cffb4SNishanth Menon 	struct ti_sci_xfer *xfer;
1581912cffb4SNishanth Menon 	struct device *dev;
1582912cffb4SNishanth Menon 	int ret = 0;
1583912cffb4SNishanth Menon 
1584912cffb4SNishanth Menon 	if (IS_ERR(handle))
1585912cffb4SNishanth Menon 		return PTR_ERR(handle);
1586912cffb4SNishanth Menon 	if (!handle)
1587912cffb4SNishanth Menon 		return -EINVAL;
1588912cffb4SNishanth Menon 
1589912cffb4SNishanth Menon 	info = handle_to_ti_sci_info(handle);
1590912cffb4SNishanth Menon 	dev = info->dev;
1591912cffb4SNishanth Menon 
1592912cffb4SNishanth Menon 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SYS_RESET,
1593912cffb4SNishanth Menon 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1594912cffb4SNishanth Menon 				   sizeof(*req), sizeof(*resp));
1595912cffb4SNishanth Menon 	if (IS_ERR(xfer)) {
1596912cffb4SNishanth Menon 		ret = PTR_ERR(xfer);
1597912cffb4SNishanth Menon 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1598912cffb4SNishanth Menon 		return ret;
1599912cffb4SNishanth Menon 	}
1600912cffb4SNishanth Menon 	req = (struct ti_sci_msg_req_reboot *)xfer->xfer_buf;
1601912cffb4SNishanth Menon 
1602912cffb4SNishanth Menon 	ret = ti_sci_do_xfer(info, xfer);
1603912cffb4SNishanth Menon 	if (ret) {
1604912cffb4SNishanth Menon 		dev_err(dev, "Mbox send fail %d\n", ret);
1605912cffb4SNishanth Menon 		goto fail;
1606912cffb4SNishanth Menon 	}
1607912cffb4SNishanth Menon 
1608912cffb4SNishanth Menon 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
1609912cffb4SNishanth Menon 
1610912cffb4SNishanth Menon 	if (!ti_sci_is_response_ack(resp))
1611912cffb4SNishanth Menon 		ret = -ENODEV;
1612912cffb4SNishanth Menon 	else
1613912cffb4SNishanth Menon 		ret = 0;
1614912cffb4SNishanth Menon 
1615912cffb4SNishanth Menon fail:
1616912cffb4SNishanth Menon 	ti_sci_put_one_xfer(&info->minfo, xfer);
1617912cffb4SNishanth Menon 
1618912cffb4SNishanth Menon 	return ret;
1619912cffb4SNishanth Menon }
1620912cffb4SNishanth Menon 
16219c19fb68SLokesh Vutla static int ti_sci_get_resource_type(struct ti_sci_info *info, u16 dev_id,
16229c19fb68SLokesh Vutla 				    u16 *type)
16239c19fb68SLokesh Vutla {
16249c19fb68SLokesh Vutla 	struct ti_sci_rm_type_map *rm_type_map = info->desc->rm_type_map;
16259c19fb68SLokesh Vutla 	bool found = false;
16269c19fb68SLokesh Vutla 	int i;
16279c19fb68SLokesh Vutla 
16289c19fb68SLokesh Vutla 	/* If map is not provided then assume dev_id is used as type */
16299c19fb68SLokesh Vutla 	if (!rm_type_map) {
16309c19fb68SLokesh Vutla 		*type = dev_id;
16319c19fb68SLokesh Vutla 		return 0;
16329c19fb68SLokesh Vutla 	}
16339c19fb68SLokesh Vutla 
16349c19fb68SLokesh Vutla 	for (i = 0; rm_type_map[i].dev_id; i++) {
16359c19fb68SLokesh Vutla 		if (rm_type_map[i].dev_id == dev_id) {
16369c19fb68SLokesh Vutla 			*type = rm_type_map[i].type;
16379c19fb68SLokesh Vutla 			found = true;
16389c19fb68SLokesh Vutla 			break;
16399c19fb68SLokesh Vutla 		}
16409c19fb68SLokesh Vutla 	}
16419c19fb68SLokesh Vutla 
16429c19fb68SLokesh Vutla 	if (!found)
16439c19fb68SLokesh Vutla 		return -EINVAL;
16449c19fb68SLokesh Vutla 
16459c19fb68SLokesh Vutla 	return 0;
16469c19fb68SLokesh Vutla }
16479c19fb68SLokesh Vutla 
16489c19fb68SLokesh Vutla /**
16499c19fb68SLokesh Vutla  * ti_sci_get_resource_range - Helper to get a range of resources assigned
16509c19fb68SLokesh Vutla  *			       to a host. Resource is uniquely identified by
16519c19fb68SLokesh Vutla  *			       type and subtype.
16529c19fb68SLokesh Vutla  * @handle:		Pointer to TISCI handle.
16539c19fb68SLokesh Vutla  * @dev_id:		TISCI device ID.
16549c19fb68SLokesh Vutla  * @subtype:		Resource assignment subtype that is being requested
16559c19fb68SLokesh Vutla  *			from the given device.
16569c19fb68SLokesh Vutla  * @s_host:		Host processor ID to which the resources are allocated
16579c19fb68SLokesh Vutla  * @range_start:	Start index of the resource range
16589c19fb68SLokesh Vutla  * @range_num:		Number of resources in the range
16599c19fb68SLokesh Vutla  *
16609c19fb68SLokesh Vutla  * Return: 0 if all went fine, else return appropriate error.
16619c19fb68SLokesh Vutla  */
16629c19fb68SLokesh Vutla static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
16639c19fb68SLokesh Vutla 				     u32 dev_id, u8 subtype, u8 s_host,
16649c19fb68SLokesh Vutla 				     u16 *range_start, u16 *range_num)
16659c19fb68SLokesh Vutla {
16669c19fb68SLokesh Vutla 	struct ti_sci_msg_resp_get_resource_range *resp;
16679c19fb68SLokesh Vutla 	struct ti_sci_msg_req_get_resource_range *req;
16689c19fb68SLokesh Vutla 	struct ti_sci_xfer *xfer;
16699c19fb68SLokesh Vutla 	struct ti_sci_info *info;
16709c19fb68SLokesh Vutla 	struct device *dev;
16719c19fb68SLokesh Vutla 	u16 type;
16729c19fb68SLokesh Vutla 	int ret = 0;
16739c19fb68SLokesh Vutla 
16749c19fb68SLokesh Vutla 	if (IS_ERR(handle))
16759c19fb68SLokesh Vutla 		return PTR_ERR(handle);
16769c19fb68SLokesh Vutla 	if (!handle)
16779c19fb68SLokesh Vutla 		return -EINVAL;
16789c19fb68SLokesh Vutla 
16799c19fb68SLokesh Vutla 	info = handle_to_ti_sci_info(handle);
16809c19fb68SLokesh Vutla 	dev = info->dev;
16819c19fb68SLokesh Vutla 
16829c19fb68SLokesh Vutla 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_RESOURCE_RANGE,
16839c19fb68SLokesh Vutla 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
16849c19fb68SLokesh Vutla 				   sizeof(*req), sizeof(*resp));
16859c19fb68SLokesh Vutla 	if (IS_ERR(xfer)) {
16869c19fb68SLokesh Vutla 		ret = PTR_ERR(xfer);
16879c19fb68SLokesh Vutla 		dev_err(dev, "Message alloc failed(%d)\n", ret);
16889c19fb68SLokesh Vutla 		return ret;
16899c19fb68SLokesh Vutla 	}
16909c19fb68SLokesh Vutla 
16919c19fb68SLokesh Vutla 	ret = ti_sci_get_resource_type(info, dev_id, &type);
16929c19fb68SLokesh Vutla 	if (ret) {
16939c19fb68SLokesh Vutla 		dev_err(dev, "rm type lookup failed for %u\n", dev_id);
16949c19fb68SLokesh Vutla 		goto fail;
16959c19fb68SLokesh Vutla 	}
16969c19fb68SLokesh Vutla 
16979c19fb68SLokesh Vutla 	req = (struct ti_sci_msg_req_get_resource_range *)xfer->xfer_buf;
16989c19fb68SLokesh Vutla 	req->secondary_host = s_host;
16999c19fb68SLokesh Vutla 	req->type = type & MSG_RM_RESOURCE_TYPE_MASK;
17009c19fb68SLokesh Vutla 	req->subtype = subtype & MSG_RM_RESOURCE_SUBTYPE_MASK;
17019c19fb68SLokesh Vutla 
17029c19fb68SLokesh Vutla 	ret = ti_sci_do_xfer(info, xfer);
17039c19fb68SLokesh Vutla 	if (ret) {
17049c19fb68SLokesh Vutla 		dev_err(dev, "Mbox send fail %d\n", ret);
17059c19fb68SLokesh Vutla 		goto fail;
17069c19fb68SLokesh Vutla 	}
17079c19fb68SLokesh Vutla 
17089c19fb68SLokesh Vutla 	resp = (struct ti_sci_msg_resp_get_resource_range *)xfer->xfer_buf;
17099c19fb68SLokesh Vutla 
17109c19fb68SLokesh Vutla 	if (!ti_sci_is_response_ack(resp)) {
17119c19fb68SLokesh Vutla 		ret = -ENODEV;
17129c19fb68SLokesh Vutla 	} else if (!resp->range_start && !resp->range_num) {
17139c19fb68SLokesh Vutla 		ret = -ENODEV;
17149c19fb68SLokesh Vutla 	} else {
17159c19fb68SLokesh Vutla 		*range_start = resp->range_start;
17169c19fb68SLokesh Vutla 		*range_num = resp->range_num;
17179c19fb68SLokesh Vutla 	};
17189c19fb68SLokesh Vutla 
17199c19fb68SLokesh Vutla fail:
17209c19fb68SLokesh Vutla 	ti_sci_put_one_xfer(&info->minfo, xfer);
17219c19fb68SLokesh Vutla 
17229c19fb68SLokesh Vutla 	return ret;
17239c19fb68SLokesh Vutla }
17249c19fb68SLokesh Vutla 
17259c19fb68SLokesh Vutla /**
17269c19fb68SLokesh Vutla  * ti_sci_cmd_get_resource_range - Get a range of resources assigned to host
17279c19fb68SLokesh Vutla  *				   that is same as ti sci interface host.
17289c19fb68SLokesh Vutla  * @handle:		Pointer to TISCI handle.
17299c19fb68SLokesh Vutla  * @dev_id:		TISCI device ID.
17309c19fb68SLokesh Vutla  * @subtype:		Resource assignment subtype that is being requested
17319c19fb68SLokesh Vutla  *			from the given device.
17329c19fb68SLokesh Vutla  * @range_start:	Start index of the resource range
17339c19fb68SLokesh Vutla  * @range_num:		Number of resources in the range
17349c19fb68SLokesh Vutla  *
17359c19fb68SLokesh Vutla  * Return: 0 if all went fine, else return appropriate error.
17369c19fb68SLokesh Vutla  */
17379c19fb68SLokesh Vutla static int ti_sci_cmd_get_resource_range(const struct ti_sci_handle *handle,
17389c19fb68SLokesh Vutla 					 u32 dev_id, u8 subtype,
17399c19fb68SLokesh Vutla 					 u16 *range_start, u16 *range_num)
17409c19fb68SLokesh Vutla {
17419c19fb68SLokesh Vutla 	return ti_sci_get_resource_range(handle, dev_id, subtype,
17429c19fb68SLokesh Vutla 					 TI_SCI_IRQ_SECONDARY_HOST_INVALID,
17439c19fb68SLokesh Vutla 					 range_start, range_num);
17449c19fb68SLokesh Vutla }
17459c19fb68SLokesh Vutla 
17469c19fb68SLokesh Vutla /**
17479c19fb68SLokesh Vutla  * ti_sci_cmd_get_resource_range_from_shost - Get a range of resources
17489c19fb68SLokesh Vutla  *					      assigned to a specified host.
17499c19fb68SLokesh Vutla  * @handle:		Pointer to TISCI handle.
17509c19fb68SLokesh Vutla  * @dev_id:		TISCI device ID.
17519c19fb68SLokesh Vutla  * @subtype:		Resource assignment subtype that is being requested
17529c19fb68SLokesh Vutla  *			from the given device.
17539c19fb68SLokesh Vutla  * @s_host:		Host processor ID to which the resources are allocated
17549c19fb68SLokesh Vutla  * @range_start:	Start index of the resource range
17559c19fb68SLokesh Vutla  * @range_num:		Number of resources in the range
17569c19fb68SLokesh Vutla  *
17579c19fb68SLokesh Vutla  * Return: 0 if all went fine, else return appropriate error.
17589c19fb68SLokesh Vutla  */
17599c19fb68SLokesh Vutla static
17609c19fb68SLokesh Vutla int ti_sci_cmd_get_resource_range_from_shost(const struct ti_sci_handle *handle,
17619c19fb68SLokesh Vutla 					     u32 dev_id, u8 subtype, u8 s_host,
17629c19fb68SLokesh Vutla 					     u16 *range_start, u16 *range_num)
17639c19fb68SLokesh Vutla {
17649c19fb68SLokesh Vutla 	return ti_sci_get_resource_range(handle, dev_id, subtype, s_host,
17659c19fb68SLokesh Vutla 					 range_start, range_num);
17669c19fb68SLokesh Vutla }
17679c19fb68SLokesh Vutla 
1768997b001fSLokesh Vutla /**
1769997b001fSLokesh Vutla  * ti_sci_manage_irq() - Helper api to configure/release the irq route between
1770997b001fSLokesh Vutla  *			 the requested source and destination
1771997b001fSLokesh Vutla  * @handle:		Pointer to TISCI handle.
1772997b001fSLokesh Vutla  * @valid_params:	Bit fields defining the validity of certain params
1773997b001fSLokesh Vutla  * @src_id:		Device ID of the IRQ source
1774997b001fSLokesh Vutla  * @src_index:		IRQ source index within the source device
1775997b001fSLokesh Vutla  * @dst_id:		Device ID of the IRQ destination
1776997b001fSLokesh Vutla  * @dst_host_irq:	IRQ number of the destination device
1777997b001fSLokesh Vutla  * @ia_id:		Device ID of the IA, if the IRQ flows through this IA
1778997b001fSLokesh Vutla  * @vint:		Virtual interrupt to be used within the IA
1779997b001fSLokesh Vutla  * @global_event:	Global event number to be used for the requesting event
1780997b001fSLokesh Vutla  * @vint_status_bit:	Virtual interrupt status bit to be used for the event
1781997b001fSLokesh Vutla  * @s_host:		Secondary host ID to which the irq/event is being
1782997b001fSLokesh Vutla  *			requested for.
1783997b001fSLokesh Vutla  * @type:		Request type irq set or release.
1784997b001fSLokesh Vutla  *
1785997b001fSLokesh Vutla  * Return: 0 if all went fine, else return appropriate error.
1786997b001fSLokesh Vutla  */
1787997b001fSLokesh Vutla static int ti_sci_manage_irq(const struct ti_sci_handle *handle,
1788997b001fSLokesh Vutla 			     u32 valid_params, u16 src_id, u16 src_index,
1789997b001fSLokesh Vutla 			     u16 dst_id, u16 dst_host_irq, u16 ia_id, u16 vint,
1790997b001fSLokesh Vutla 			     u16 global_event, u8 vint_status_bit, u8 s_host,
1791997b001fSLokesh Vutla 			     u16 type)
1792997b001fSLokesh Vutla {
1793997b001fSLokesh Vutla 	struct ti_sci_msg_req_manage_irq *req;
1794997b001fSLokesh Vutla 	struct ti_sci_msg_hdr *resp;
1795997b001fSLokesh Vutla 	struct ti_sci_xfer *xfer;
1796997b001fSLokesh Vutla 	struct ti_sci_info *info;
1797997b001fSLokesh Vutla 	struct device *dev;
1798997b001fSLokesh Vutla 	int ret = 0;
1799997b001fSLokesh Vutla 
1800997b001fSLokesh Vutla 	if (IS_ERR(handle))
1801997b001fSLokesh Vutla 		return PTR_ERR(handle);
1802997b001fSLokesh Vutla 	if (!handle)
1803997b001fSLokesh Vutla 		return -EINVAL;
1804997b001fSLokesh Vutla 
1805997b001fSLokesh Vutla 	info = handle_to_ti_sci_info(handle);
1806997b001fSLokesh Vutla 	dev = info->dev;
1807997b001fSLokesh Vutla 
1808997b001fSLokesh Vutla 	xfer = ti_sci_get_one_xfer(info, type, TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1809997b001fSLokesh Vutla 				   sizeof(*req), sizeof(*resp));
1810997b001fSLokesh Vutla 	if (IS_ERR(xfer)) {
1811997b001fSLokesh Vutla 		ret = PTR_ERR(xfer);
1812997b001fSLokesh Vutla 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1813997b001fSLokesh Vutla 		return ret;
1814997b001fSLokesh Vutla 	}
1815997b001fSLokesh Vutla 	req = (struct ti_sci_msg_req_manage_irq *)xfer->xfer_buf;
1816997b001fSLokesh Vutla 	req->valid_params = valid_params;
1817997b001fSLokesh Vutla 	req->src_id = src_id;
1818997b001fSLokesh Vutla 	req->src_index = src_index;
1819997b001fSLokesh Vutla 	req->dst_id = dst_id;
1820997b001fSLokesh Vutla 	req->dst_host_irq = dst_host_irq;
1821997b001fSLokesh Vutla 	req->ia_id = ia_id;
1822997b001fSLokesh Vutla 	req->vint = vint;
1823997b001fSLokesh Vutla 	req->global_event = global_event;
1824997b001fSLokesh Vutla 	req->vint_status_bit = vint_status_bit;
1825997b001fSLokesh Vutla 	req->secondary_host = s_host;
1826997b001fSLokesh Vutla 
1827997b001fSLokesh Vutla 	ret = ti_sci_do_xfer(info, xfer);
1828997b001fSLokesh Vutla 	if (ret) {
1829997b001fSLokesh Vutla 		dev_err(dev, "Mbox send fail %d\n", ret);
1830997b001fSLokesh Vutla 		goto fail;
1831997b001fSLokesh Vutla 	}
1832997b001fSLokesh Vutla 
1833997b001fSLokesh Vutla 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
1834997b001fSLokesh Vutla 
1835997b001fSLokesh Vutla 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
1836997b001fSLokesh Vutla 
1837997b001fSLokesh Vutla fail:
1838997b001fSLokesh Vutla 	ti_sci_put_one_xfer(&info->minfo, xfer);
1839997b001fSLokesh Vutla 
1840997b001fSLokesh Vutla 	return ret;
1841997b001fSLokesh Vutla }
1842997b001fSLokesh Vutla 
1843997b001fSLokesh Vutla /**
1844997b001fSLokesh Vutla  * ti_sci_set_irq() - Helper api to configure the irq route between the
1845997b001fSLokesh Vutla  *		      requested source and destination
1846997b001fSLokesh Vutla  * @handle:		Pointer to TISCI handle.
1847997b001fSLokesh Vutla  * @valid_params:	Bit fields defining the validity of certain params
1848997b001fSLokesh Vutla  * @src_id:		Device ID of the IRQ source
1849997b001fSLokesh Vutla  * @src_index:		IRQ source index within the source device
1850997b001fSLokesh Vutla  * @dst_id:		Device ID of the IRQ destination
1851997b001fSLokesh Vutla  * @dst_host_irq:	IRQ number of the destination device
1852997b001fSLokesh Vutla  * @ia_id:		Device ID of the IA, if the IRQ flows through this IA
1853997b001fSLokesh Vutla  * @vint:		Virtual interrupt to be used within the IA
1854997b001fSLokesh Vutla  * @global_event:	Global event number to be used for the requesting event
1855997b001fSLokesh Vutla  * @vint_status_bit:	Virtual interrupt status bit to be used for the event
1856997b001fSLokesh Vutla  * @s_host:		Secondary host ID to which the irq/event is being
1857997b001fSLokesh Vutla  *			requested for.
1858997b001fSLokesh Vutla  *
1859997b001fSLokesh Vutla  * Return: 0 if all went fine, else return appropriate error.
1860997b001fSLokesh Vutla  */
1861997b001fSLokesh Vutla static int ti_sci_set_irq(const struct ti_sci_handle *handle, u32 valid_params,
1862997b001fSLokesh Vutla 			  u16 src_id, u16 src_index, u16 dst_id,
1863997b001fSLokesh Vutla 			  u16 dst_host_irq, u16 ia_id, u16 vint,
1864997b001fSLokesh Vutla 			  u16 global_event, u8 vint_status_bit, u8 s_host)
1865997b001fSLokesh Vutla {
1866997b001fSLokesh Vutla 	pr_debug("%s: IRQ set with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n",
1867997b001fSLokesh Vutla 		 __func__, valid_params, src_id, src_index,
1868997b001fSLokesh Vutla 		 dst_id, dst_host_irq, ia_id, vint, global_event,
1869997b001fSLokesh Vutla 		 vint_status_bit);
1870997b001fSLokesh Vutla 
1871997b001fSLokesh Vutla 	return ti_sci_manage_irq(handle, valid_params, src_id, src_index,
1872997b001fSLokesh Vutla 				 dst_id, dst_host_irq, ia_id, vint,
1873997b001fSLokesh Vutla 				 global_event, vint_status_bit, s_host,
1874997b001fSLokesh Vutla 				 TI_SCI_MSG_SET_IRQ);
1875997b001fSLokesh Vutla }
1876997b001fSLokesh Vutla 
1877997b001fSLokesh Vutla /**
1878997b001fSLokesh Vutla  * ti_sci_free_irq() - Helper api to free the irq route between the
1879997b001fSLokesh Vutla  *			   requested source and destination
1880997b001fSLokesh Vutla  * @handle:		Pointer to TISCI handle.
1881997b001fSLokesh Vutla  * @valid_params:	Bit fields defining the validity of certain params
1882997b001fSLokesh Vutla  * @src_id:		Device ID of the IRQ source
1883997b001fSLokesh Vutla  * @src_index:		IRQ source index within the source device
1884997b001fSLokesh Vutla  * @dst_id:		Device ID of the IRQ destination
1885997b001fSLokesh Vutla  * @dst_host_irq:	IRQ number of the destination device
1886997b001fSLokesh Vutla  * @ia_id:		Device ID of the IA, if the IRQ flows through this IA
1887997b001fSLokesh Vutla  * @vint:		Virtual interrupt to be used within the IA
1888997b001fSLokesh Vutla  * @global_event:	Global event number to be used for the requesting event
1889997b001fSLokesh Vutla  * @vint_status_bit:	Virtual interrupt status bit to be used for the event
1890997b001fSLokesh Vutla  * @s_host:		Secondary host ID to which the irq/event is being
1891997b001fSLokesh Vutla  *			requested for.
1892997b001fSLokesh Vutla  *
1893997b001fSLokesh Vutla  * Return: 0 if all went fine, else return appropriate error.
1894997b001fSLokesh Vutla  */
1895997b001fSLokesh Vutla static int ti_sci_free_irq(const struct ti_sci_handle *handle, u32 valid_params,
1896997b001fSLokesh Vutla 			   u16 src_id, u16 src_index, u16 dst_id,
1897997b001fSLokesh Vutla 			   u16 dst_host_irq, u16 ia_id, u16 vint,
1898997b001fSLokesh Vutla 			   u16 global_event, u8 vint_status_bit, u8 s_host)
1899997b001fSLokesh Vutla {
1900997b001fSLokesh Vutla 	pr_debug("%s: IRQ release with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n",
1901997b001fSLokesh Vutla 		 __func__, valid_params, src_id, src_index,
1902997b001fSLokesh Vutla 		 dst_id, dst_host_irq, ia_id, vint, global_event,
1903997b001fSLokesh Vutla 		 vint_status_bit);
1904997b001fSLokesh Vutla 
1905997b001fSLokesh Vutla 	return ti_sci_manage_irq(handle, valid_params, src_id, src_index,
1906997b001fSLokesh Vutla 				 dst_id, dst_host_irq, ia_id, vint,
1907997b001fSLokesh Vutla 				 global_event, vint_status_bit, s_host,
1908997b001fSLokesh Vutla 				 TI_SCI_MSG_FREE_IRQ);
1909997b001fSLokesh Vutla }
1910997b001fSLokesh Vutla 
1911997b001fSLokesh Vutla /**
1912997b001fSLokesh Vutla  * ti_sci_cmd_set_irq() - Configure a host irq route between the requested
1913997b001fSLokesh Vutla  *			  source and destination.
1914997b001fSLokesh Vutla  * @handle:		Pointer to TISCI handle.
1915997b001fSLokesh Vutla  * @src_id:		Device ID of the IRQ source
1916997b001fSLokesh Vutla  * @src_index:		IRQ source index within the source device
1917997b001fSLokesh Vutla  * @dst_id:		Device ID of the IRQ destination
1918997b001fSLokesh Vutla  * @dst_host_irq:	IRQ number of the destination device
1919997b001fSLokesh Vutla  * @vint_irq:		Boolean specifying if this interrupt belongs to
1920997b001fSLokesh Vutla  *			Interrupt Aggregator.
1921997b001fSLokesh Vutla  *
1922997b001fSLokesh Vutla  * Return: 0 if all went fine, else return appropriate error.
1923997b001fSLokesh Vutla  */
1924997b001fSLokesh Vutla static int ti_sci_cmd_set_irq(const struct ti_sci_handle *handle, u16 src_id,
1925997b001fSLokesh Vutla 			      u16 src_index, u16 dst_id, u16 dst_host_irq)
1926997b001fSLokesh Vutla {
1927997b001fSLokesh Vutla 	u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID;
1928997b001fSLokesh Vutla 
1929997b001fSLokesh Vutla 	return ti_sci_set_irq(handle, valid_params, src_id, src_index, dst_id,
1930997b001fSLokesh Vutla 			      dst_host_irq, 0, 0, 0, 0, 0);
1931997b001fSLokesh Vutla }
1932997b001fSLokesh Vutla 
1933997b001fSLokesh Vutla /**
1934997b001fSLokesh Vutla  * ti_sci_cmd_set_event_map() - Configure an event based irq route between the
1935997b001fSLokesh Vutla  *				requested source and Interrupt Aggregator.
1936997b001fSLokesh Vutla  * @handle:		Pointer to TISCI handle.
1937997b001fSLokesh Vutla  * @src_id:		Device ID of the IRQ source
1938997b001fSLokesh Vutla  * @src_index:		IRQ source index within the source device
1939997b001fSLokesh Vutla  * @ia_id:		Device ID of the IA, if the IRQ flows through this IA
1940997b001fSLokesh Vutla  * @vint:		Virtual interrupt to be used within the IA
1941997b001fSLokesh Vutla  * @global_event:	Global event number to be used for the requesting event
1942997b001fSLokesh Vutla  * @vint_status_bit:	Virtual interrupt status bit to be used for the event
1943997b001fSLokesh Vutla  *
1944997b001fSLokesh Vutla  * Return: 0 if all went fine, else return appropriate error.
1945997b001fSLokesh Vutla  */
1946997b001fSLokesh Vutla static int ti_sci_cmd_set_event_map(const struct ti_sci_handle *handle,
1947997b001fSLokesh Vutla 				    u16 src_id, u16 src_index, u16 ia_id,
1948997b001fSLokesh Vutla 				    u16 vint, u16 global_event,
1949997b001fSLokesh Vutla 				    u8 vint_status_bit)
1950997b001fSLokesh Vutla {
1951997b001fSLokesh Vutla 	u32 valid_params = MSG_FLAG_IA_ID_VALID | MSG_FLAG_VINT_VALID |
1952997b001fSLokesh Vutla 			   MSG_FLAG_GLB_EVNT_VALID |
1953997b001fSLokesh Vutla 			   MSG_FLAG_VINT_STS_BIT_VALID;
1954997b001fSLokesh Vutla 
1955997b001fSLokesh Vutla 	return ti_sci_set_irq(handle, valid_params, src_id, src_index, 0, 0,
1956997b001fSLokesh Vutla 			      ia_id, vint, global_event, vint_status_bit, 0);
1957997b001fSLokesh Vutla }
1958997b001fSLokesh Vutla 
1959997b001fSLokesh Vutla /**
1960997b001fSLokesh Vutla  * ti_sci_cmd_free_irq() - Free a host irq route between the between the
1961997b001fSLokesh Vutla  *			   requested source and destination.
1962997b001fSLokesh Vutla  * @handle:		Pointer to TISCI handle.
1963997b001fSLokesh Vutla  * @src_id:		Device ID of the IRQ source
1964997b001fSLokesh Vutla  * @src_index:		IRQ source index within the source device
1965997b001fSLokesh Vutla  * @dst_id:		Device ID of the IRQ destination
1966997b001fSLokesh Vutla  * @dst_host_irq:	IRQ number of the destination device
1967997b001fSLokesh Vutla  * @vint_irq:		Boolean specifying if this interrupt belongs to
1968997b001fSLokesh Vutla  *			Interrupt Aggregator.
1969997b001fSLokesh Vutla  *
1970997b001fSLokesh Vutla  * Return: 0 if all went fine, else return appropriate error.
1971997b001fSLokesh Vutla  */
1972997b001fSLokesh Vutla static int ti_sci_cmd_free_irq(const struct ti_sci_handle *handle, u16 src_id,
1973997b001fSLokesh Vutla 			       u16 src_index, u16 dst_id, u16 dst_host_irq)
1974997b001fSLokesh Vutla {
1975997b001fSLokesh Vutla 	u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID;
1976997b001fSLokesh Vutla 
1977997b001fSLokesh Vutla 	return ti_sci_free_irq(handle, valid_params, src_id, src_index, dst_id,
1978997b001fSLokesh Vutla 			       dst_host_irq, 0, 0, 0, 0, 0);
1979997b001fSLokesh Vutla }
1980997b001fSLokesh Vutla 
1981997b001fSLokesh Vutla /**
1982997b001fSLokesh Vutla  * ti_sci_cmd_free_event_map() - Free an event map between the requested source
1983997b001fSLokesh Vutla  *				 and Interrupt Aggregator.
1984997b001fSLokesh Vutla  * @handle:		Pointer to TISCI handle.
1985997b001fSLokesh Vutla  * @src_id:		Device ID of the IRQ source
1986997b001fSLokesh Vutla  * @src_index:		IRQ source index within the source device
1987997b001fSLokesh Vutla  * @ia_id:		Device ID of the IA, if the IRQ flows through this IA
1988997b001fSLokesh Vutla  * @vint:		Virtual interrupt to be used within the IA
1989997b001fSLokesh Vutla  * @global_event:	Global event number to be used for the requesting event
1990997b001fSLokesh Vutla  * @vint_status_bit:	Virtual interrupt status bit to be used for the event
1991997b001fSLokesh Vutla  *
1992997b001fSLokesh Vutla  * Return: 0 if all went fine, else return appropriate error.
1993997b001fSLokesh Vutla  */
1994997b001fSLokesh Vutla static int ti_sci_cmd_free_event_map(const struct ti_sci_handle *handle,
1995997b001fSLokesh Vutla 				     u16 src_id, u16 src_index, u16 ia_id,
1996997b001fSLokesh Vutla 				     u16 vint, u16 global_event,
1997997b001fSLokesh Vutla 				     u8 vint_status_bit)
1998997b001fSLokesh Vutla {
1999997b001fSLokesh Vutla 	u32 valid_params = MSG_FLAG_IA_ID_VALID |
2000997b001fSLokesh Vutla 			   MSG_FLAG_VINT_VALID | MSG_FLAG_GLB_EVNT_VALID |
2001997b001fSLokesh Vutla 			   MSG_FLAG_VINT_STS_BIT_VALID;
2002997b001fSLokesh Vutla 
2003997b001fSLokesh Vutla 	return ti_sci_free_irq(handle, valid_params, src_id, src_index, 0, 0,
2004997b001fSLokesh Vutla 			       ia_id, vint, global_event, vint_status_bit, 0);
2005997b001fSLokesh Vutla }
2006997b001fSLokesh Vutla 
2007*68608b5eSPeter Ujfalusi /**
2008*68608b5eSPeter Ujfalusi  * ti_sci_cmd_ring_config() - configure RA ring
2009*68608b5eSPeter Ujfalusi  * @handle:		Pointer to TI SCI handle.
2010*68608b5eSPeter Ujfalusi  * @valid_params:	Bitfield defining validity of ring configuration
2011*68608b5eSPeter Ujfalusi  *			parameters
2012*68608b5eSPeter Ujfalusi  * @nav_id:		Device ID of Navigator Subsystem from which the ring is
2013*68608b5eSPeter Ujfalusi  *			allocated
2014*68608b5eSPeter Ujfalusi  * @index:		Ring index
2015*68608b5eSPeter Ujfalusi  * @addr_lo:		The ring base address lo 32 bits
2016*68608b5eSPeter Ujfalusi  * @addr_hi:		The ring base address hi 32 bits
2017*68608b5eSPeter Ujfalusi  * @count:		Number of ring elements
2018*68608b5eSPeter Ujfalusi  * @mode:		The mode of the ring
2019*68608b5eSPeter Ujfalusi  * @size:		The ring element size.
2020*68608b5eSPeter Ujfalusi  * @order_id:		Specifies the ring's bus order ID
2021*68608b5eSPeter Ujfalusi  *
2022*68608b5eSPeter Ujfalusi  * Return: 0 if all went well, else returns appropriate error value.
2023*68608b5eSPeter Ujfalusi  *
2024*68608b5eSPeter Ujfalusi  * See @ti_sci_msg_rm_ring_cfg_req for more info.
2025*68608b5eSPeter Ujfalusi  */
2026*68608b5eSPeter Ujfalusi static int ti_sci_cmd_ring_config(const struct ti_sci_handle *handle,
2027*68608b5eSPeter Ujfalusi 				  u32 valid_params, u16 nav_id, u16 index,
2028*68608b5eSPeter Ujfalusi 				  u32 addr_lo, u32 addr_hi, u32 count,
2029*68608b5eSPeter Ujfalusi 				  u8 mode, u8 size, u8 order_id)
2030*68608b5eSPeter Ujfalusi {
2031*68608b5eSPeter Ujfalusi 	struct ti_sci_msg_rm_ring_cfg_req *req;
2032*68608b5eSPeter Ujfalusi 	struct ti_sci_msg_hdr *resp;
2033*68608b5eSPeter Ujfalusi 	struct ti_sci_xfer *xfer;
2034*68608b5eSPeter Ujfalusi 	struct ti_sci_info *info;
2035*68608b5eSPeter Ujfalusi 	struct device *dev;
2036*68608b5eSPeter Ujfalusi 	int ret = 0;
2037*68608b5eSPeter Ujfalusi 
2038*68608b5eSPeter Ujfalusi 	if (IS_ERR_OR_NULL(handle))
2039*68608b5eSPeter Ujfalusi 		return -EINVAL;
2040*68608b5eSPeter Ujfalusi 
2041*68608b5eSPeter Ujfalusi 	info = handle_to_ti_sci_info(handle);
2042*68608b5eSPeter Ujfalusi 	dev = info->dev;
2043*68608b5eSPeter Ujfalusi 
2044*68608b5eSPeter Ujfalusi 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_RING_CFG,
2045*68608b5eSPeter Ujfalusi 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2046*68608b5eSPeter Ujfalusi 				   sizeof(*req), sizeof(*resp));
2047*68608b5eSPeter Ujfalusi 	if (IS_ERR(xfer)) {
2048*68608b5eSPeter Ujfalusi 		ret = PTR_ERR(xfer);
2049*68608b5eSPeter Ujfalusi 		dev_err(info->dev, "RM_RA:Message config failed(%d)\n", ret);
2050*68608b5eSPeter Ujfalusi 		return ret;
2051*68608b5eSPeter Ujfalusi 	}
2052*68608b5eSPeter Ujfalusi 	req = (struct ti_sci_msg_rm_ring_cfg_req *)xfer->xfer_buf;
2053*68608b5eSPeter Ujfalusi 	req->valid_params = valid_params;
2054*68608b5eSPeter Ujfalusi 	req->nav_id = nav_id;
2055*68608b5eSPeter Ujfalusi 	req->index = index;
2056*68608b5eSPeter Ujfalusi 	req->addr_lo = addr_lo;
2057*68608b5eSPeter Ujfalusi 	req->addr_hi = addr_hi;
2058*68608b5eSPeter Ujfalusi 	req->count = count;
2059*68608b5eSPeter Ujfalusi 	req->mode = mode;
2060*68608b5eSPeter Ujfalusi 	req->size = size;
2061*68608b5eSPeter Ujfalusi 	req->order_id = order_id;
2062*68608b5eSPeter Ujfalusi 
2063*68608b5eSPeter Ujfalusi 	ret = ti_sci_do_xfer(info, xfer);
2064*68608b5eSPeter Ujfalusi 	if (ret) {
2065*68608b5eSPeter Ujfalusi 		dev_err(info->dev, "RM_RA:Mbox config send fail %d\n", ret);
2066*68608b5eSPeter Ujfalusi 		goto fail;
2067*68608b5eSPeter Ujfalusi 	}
2068*68608b5eSPeter Ujfalusi 
2069*68608b5eSPeter Ujfalusi 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2070*68608b5eSPeter Ujfalusi 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2071*68608b5eSPeter Ujfalusi 
2072*68608b5eSPeter Ujfalusi fail:
2073*68608b5eSPeter Ujfalusi 	ti_sci_put_one_xfer(&info->minfo, xfer);
2074*68608b5eSPeter Ujfalusi 	dev_dbg(info->dev, "RM_RA:config ring %u ret:%d\n", index, ret);
2075*68608b5eSPeter Ujfalusi 	return ret;
2076*68608b5eSPeter Ujfalusi }
2077*68608b5eSPeter Ujfalusi 
2078*68608b5eSPeter Ujfalusi /**
2079*68608b5eSPeter Ujfalusi  * ti_sci_cmd_ring_get_config() - get RA ring configuration
2080*68608b5eSPeter Ujfalusi  * @handle:	Pointer to TI SCI handle.
2081*68608b5eSPeter Ujfalusi  * @nav_id:	Device ID of Navigator Subsystem from which the ring is
2082*68608b5eSPeter Ujfalusi  *		allocated
2083*68608b5eSPeter Ujfalusi  * @index:	Ring index
2084*68608b5eSPeter Ujfalusi  * @addr_lo:	Returns ring's base address lo 32 bits
2085*68608b5eSPeter Ujfalusi  * @addr_hi:	Returns ring's base address hi 32 bits
2086*68608b5eSPeter Ujfalusi  * @count:	Returns number of ring elements
2087*68608b5eSPeter Ujfalusi  * @mode:	Returns mode of the ring
2088*68608b5eSPeter Ujfalusi  * @size:	Returns ring element size
2089*68608b5eSPeter Ujfalusi  * @order_id:	Returns ring's bus order ID
2090*68608b5eSPeter Ujfalusi  *
2091*68608b5eSPeter Ujfalusi  * Return: 0 if all went well, else returns appropriate error value.
2092*68608b5eSPeter Ujfalusi  *
2093*68608b5eSPeter Ujfalusi  * See @ti_sci_msg_rm_ring_get_cfg_req for more info.
2094*68608b5eSPeter Ujfalusi  */
2095*68608b5eSPeter Ujfalusi static int ti_sci_cmd_ring_get_config(const struct ti_sci_handle *handle,
2096*68608b5eSPeter Ujfalusi 				      u32 nav_id, u32 index, u8 *mode,
2097*68608b5eSPeter Ujfalusi 				      u32 *addr_lo, u32 *addr_hi,
2098*68608b5eSPeter Ujfalusi 				      u32 *count, u8 *size, u8 *order_id)
2099*68608b5eSPeter Ujfalusi {
2100*68608b5eSPeter Ujfalusi 	struct ti_sci_msg_rm_ring_get_cfg_resp *resp;
2101*68608b5eSPeter Ujfalusi 	struct ti_sci_msg_rm_ring_get_cfg_req *req;
2102*68608b5eSPeter Ujfalusi 	struct ti_sci_xfer *xfer;
2103*68608b5eSPeter Ujfalusi 	struct ti_sci_info *info;
2104*68608b5eSPeter Ujfalusi 	struct device *dev;
2105*68608b5eSPeter Ujfalusi 	int ret = 0;
2106*68608b5eSPeter Ujfalusi 
2107*68608b5eSPeter Ujfalusi 	if (IS_ERR_OR_NULL(handle))
2108*68608b5eSPeter Ujfalusi 		return -EINVAL;
2109*68608b5eSPeter Ujfalusi 
2110*68608b5eSPeter Ujfalusi 	info = handle_to_ti_sci_info(handle);
2111*68608b5eSPeter Ujfalusi 	dev = info->dev;
2112*68608b5eSPeter Ujfalusi 
2113*68608b5eSPeter Ujfalusi 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_RING_GET_CFG,
2114*68608b5eSPeter Ujfalusi 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2115*68608b5eSPeter Ujfalusi 				   sizeof(*req), sizeof(*resp));
2116*68608b5eSPeter Ujfalusi 	if (IS_ERR(xfer)) {
2117*68608b5eSPeter Ujfalusi 		ret = PTR_ERR(xfer);
2118*68608b5eSPeter Ujfalusi 		dev_err(info->dev,
2119*68608b5eSPeter Ujfalusi 			"RM_RA:Message get config failed(%d)\n", ret);
2120*68608b5eSPeter Ujfalusi 		return ret;
2121*68608b5eSPeter Ujfalusi 	}
2122*68608b5eSPeter Ujfalusi 	req = (struct ti_sci_msg_rm_ring_get_cfg_req *)xfer->xfer_buf;
2123*68608b5eSPeter Ujfalusi 	req->nav_id = nav_id;
2124*68608b5eSPeter Ujfalusi 	req->index = index;
2125*68608b5eSPeter Ujfalusi 
2126*68608b5eSPeter Ujfalusi 	ret = ti_sci_do_xfer(info, xfer);
2127*68608b5eSPeter Ujfalusi 	if (ret) {
2128*68608b5eSPeter Ujfalusi 		dev_err(info->dev, "RM_RA:Mbox get config send fail %d\n", ret);
2129*68608b5eSPeter Ujfalusi 		goto fail;
2130*68608b5eSPeter Ujfalusi 	}
2131*68608b5eSPeter Ujfalusi 
2132*68608b5eSPeter Ujfalusi 	resp = (struct ti_sci_msg_rm_ring_get_cfg_resp *)xfer->xfer_buf;
2133*68608b5eSPeter Ujfalusi 
2134*68608b5eSPeter Ujfalusi 	if (!ti_sci_is_response_ack(resp)) {
2135*68608b5eSPeter Ujfalusi 		ret = -ENODEV;
2136*68608b5eSPeter Ujfalusi 	} else {
2137*68608b5eSPeter Ujfalusi 		if (mode)
2138*68608b5eSPeter Ujfalusi 			*mode = resp->mode;
2139*68608b5eSPeter Ujfalusi 		if (addr_lo)
2140*68608b5eSPeter Ujfalusi 			*addr_lo = resp->addr_lo;
2141*68608b5eSPeter Ujfalusi 		if (addr_hi)
2142*68608b5eSPeter Ujfalusi 			*addr_hi = resp->addr_hi;
2143*68608b5eSPeter Ujfalusi 		if (count)
2144*68608b5eSPeter Ujfalusi 			*count = resp->count;
2145*68608b5eSPeter Ujfalusi 		if (size)
2146*68608b5eSPeter Ujfalusi 			*size = resp->size;
2147*68608b5eSPeter Ujfalusi 		if (order_id)
2148*68608b5eSPeter Ujfalusi 			*order_id = resp->order_id;
2149*68608b5eSPeter Ujfalusi 	};
2150*68608b5eSPeter Ujfalusi 
2151*68608b5eSPeter Ujfalusi fail:
2152*68608b5eSPeter Ujfalusi 	ti_sci_put_one_xfer(&info->minfo, xfer);
2153*68608b5eSPeter Ujfalusi 	dev_dbg(info->dev, "RM_RA:get config ring %u ret:%d\n", index, ret);
2154*68608b5eSPeter Ujfalusi 	return ret;
2155*68608b5eSPeter Ujfalusi }
2156*68608b5eSPeter Ujfalusi 
2157*68608b5eSPeter Ujfalusi /**
2158*68608b5eSPeter Ujfalusi  * ti_sci_cmd_rm_psil_pair() - Pair PSI-L source to destination thread
2159*68608b5eSPeter Ujfalusi  * @handle:	Pointer to TI SCI handle.
2160*68608b5eSPeter Ujfalusi  * @nav_id:	Device ID of Navigator Subsystem which should be used for
2161*68608b5eSPeter Ujfalusi  *		pairing
2162*68608b5eSPeter Ujfalusi  * @src_thread:	Source PSI-L thread ID
2163*68608b5eSPeter Ujfalusi  * @dst_thread: Destination PSI-L thread ID
2164*68608b5eSPeter Ujfalusi  *
2165*68608b5eSPeter Ujfalusi  * Return: 0 if all went well, else returns appropriate error value.
2166*68608b5eSPeter Ujfalusi  */
2167*68608b5eSPeter Ujfalusi static int ti_sci_cmd_rm_psil_pair(const struct ti_sci_handle *handle,
2168*68608b5eSPeter Ujfalusi 				   u32 nav_id, u32 src_thread, u32 dst_thread)
2169*68608b5eSPeter Ujfalusi {
2170*68608b5eSPeter Ujfalusi 	struct ti_sci_msg_psil_pair *req;
2171*68608b5eSPeter Ujfalusi 	struct ti_sci_msg_hdr *resp;
2172*68608b5eSPeter Ujfalusi 	struct ti_sci_xfer *xfer;
2173*68608b5eSPeter Ujfalusi 	struct ti_sci_info *info;
2174*68608b5eSPeter Ujfalusi 	struct device *dev;
2175*68608b5eSPeter Ujfalusi 	int ret = 0;
2176*68608b5eSPeter Ujfalusi 
2177*68608b5eSPeter Ujfalusi 	if (IS_ERR(handle))
2178*68608b5eSPeter Ujfalusi 		return PTR_ERR(handle);
2179*68608b5eSPeter Ujfalusi 	if (!handle)
2180*68608b5eSPeter Ujfalusi 		return -EINVAL;
2181*68608b5eSPeter Ujfalusi 
2182*68608b5eSPeter Ujfalusi 	info = handle_to_ti_sci_info(handle);
2183*68608b5eSPeter Ujfalusi 	dev = info->dev;
2184*68608b5eSPeter Ujfalusi 
2185*68608b5eSPeter Ujfalusi 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_PSIL_PAIR,
2186*68608b5eSPeter Ujfalusi 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2187*68608b5eSPeter Ujfalusi 				   sizeof(*req), sizeof(*resp));
2188*68608b5eSPeter Ujfalusi 	if (IS_ERR(xfer)) {
2189*68608b5eSPeter Ujfalusi 		ret = PTR_ERR(xfer);
2190*68608b5eSPeter Ujfalusi 		dev_err(dev, "RM_PSIL:Message reconfig failed(%d)\n", ret);
2191*68608b5eSPeter Ujfalusi 		return ret;
2192*68608b5eSPeter Ujfalusi 	}
2193*68608b5eSPeter Ujfalusi 	req = (struct ti_sci_msg_psil_pair *)xfer->xfer_buf;
2194*68608b5eSPeter Ujfalusi 	req->nav_id = nav_id;
2195*68608b5eSPeter Ujfalusi 	req->src_thread = src_thread;
2196*68608b5eSPeter Ujfalusi 	req->dst_thread = dst_thread;
2197*68608b5eSPeter Ujfalusi 
2198*68608b5eSPeter Ujfalusi 	ret = ti_sci_do_xfer(info, xfer);
2199*68608b5eSPeter Ujfalusi 	if (ret) {
2200*68608b5eSPeter Ujfalusi 		dev_err(dev, "RM_PSIL:Mbox send fail %d\n", ret);
2201*68608b5eSPeter Ujfalusi 		goto fail;
2202*68608b5eSPeter Ujfalusi 	}
2203*68608b5eSPeter Ujfalusi 
2204*68608b5eSPeter Ujfalusi 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2205*68608b5eSPeter Ujfalusi 	ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2206*68608b5eSPeter Ujfalusi 
2207*68608b5eSPeter Ujfalusi fail:
2208*68608b5eSPeter Ujfalusi 	ti_sci_put_one_xfer(&info->minfo, xfer);
2209*68608b5eSPeter Ujfalusi 
2210*68608b5eSPeter Ujfalusi 	return ret;
2211*68608b5eSPeter Ujfalusi }
2212*68608b5eSPeter Ujfalusi 
2213*68608b5eSPeter Ujfalusi /**
2214*68608b5eSPeter Ujfalusi  * ti_sci_cmd_rm_psil_unpair() - Unpair PSI-L source from destination thread
2215*68608b5eSPeter Ujfalusi  * @handle:	Pointer to TI SCI handle.
2216*68608b5eSPeter Ujfalusi  * @nav_id:	Device ID of Navigator Subsystem which should be used for
2217*68608b5eSPeter Ujfalusi  *		unpairing
2218*68608b5eSPeter Ujfalusi  * @src_thread:	Source PSI-L thread ID
2219*68608b5eSPeter Ujfalusi  * @dst_thread:	Destination PSI-L thread ID
2220*68608b5eSPeter Ujfalusi  *
2221*68608b5eSPeter Ujfalusi  * Return: 0 if all went well, else returns appropriate error value.
2222*68608b5eSPeter Ujfalusi  */
2223*68608b5eSPeter Ujfalusi static int ti_sci_cmd_rm_psil_unpair(const struct ti_sci_handle *handle,
2224*68608b5eSPeter Ujfalusi 				     u32 nav_id, u32 src_thread, u32 dst_thread)
2225*68608b5eSPeter Ujfalusi {
2226*68608b5eSPeter Ujfalusi 	struct ti_sci_msg_psil_unpair *req;
2227*68608b5eSPeter Ujfalusi 	struct ti_sci_msg_hdr *resp;
2228*68608b5eSPeter Ujfalusi 	struct ti_sci_xfer *xfer;
2229*68608b5eSPeter Ujfalusi 	struct ti_sci_info *info;
2230*68608b5eSPeter Ujfalusi 	struct device *dev;
2231*68608b5eSPeter Ujfalusi 	int ret = 0;
2232*68608b5eSPeter Ujfalusi 
2233*68608b5eSPeter Ujfalusi 	if (IS_ERR(handle))
2234*68608b5eSPeter Ujfalusi 		return PTR_ERR(handle);
2235*68608b5eSPeter Ujfalusi 	if (!handle)
2236*68608b5eSPeter Ujfalusi 		return -EINVAL;
2237*68608b5eSPeter Ujfalusi 
2238*68608b5eSPeter Ujfalusi 	info = handle_to_ti_sci_info(handle);
2239*68608b5eSPeter Ujfalusi 	dev = info->dev;
2240*68608b5eSPeter Ujfalusi 
2241*68608b5eSPeter Ujfalusi 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_PSIL_UNPAIR,
2242*68608b5eSPeter Ujfalusi 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2243*68608b5eSPeter Ujfalusi 				   sizeof(*req), sizeof(*resp));
2244*68608b5eSPeter Ujfalusi 	if (IS_ERR(xfer)) {
2245*68608b5eSPeter Ujfalusi 		ret = PTR_ERR(xfer);
2246*68608b5eSPeter Ujfalusi 		dev_err(dev, "RM_PSIL:Message reconfig failed(%d)\n", ret);
2247*68608b5eSPeter Ujfalusi 		return ret;
2248*68608b5eSPeter Ujfalusi 	}
2249*68608b5eSPeter Ujfalusi 	req = (struct ti_sci_msg_psil_unpair *)xfer->xfer_buf;
2250*68608b5eSPeter Ujfalusi 	req->nav_id = nav_id;
2251*68608b5eSPeter Ujfalusi 	req->src_thread = src_thread;
2252*68608b5eSPeter Ujfalusi 	req->dst_thread = dst_thread;
2253*68608b5eSPeter Ujfalusi 
2254*68608b5eSPeter Ujfalusi 	ret = ti_sci_do_xfer(info, xfer);
2255*68608b5eSPeter Ujfalusi 	if (ret) {
2256*68608b5eSPeter Ujfalusi 		dev_err(dev, "RM_PSIL:Mbox send fail %d\n", ret);
2257*68608b5eSPeter Ujfalusi 		goto fail;
2258*68608b5eSPeter Ujfalusi 	}
2259*68608b5eSPeter Ujfalusi 
2260*68608b5eSPeter Ujfalusi 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2261*68608b5eSPeter Ujfalusi 	ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2262*68608b5eSPeter Ujfalusi 
2263*68608b5eSPeter Ujfalusi fail:
2264*68608b5eSPeter Ujfalusi 	ti_sci_put_one_xfer(&info->minfo, xfer);
2265*68608b5eSPeter Ujfalusi 
2266*68608b5eSPeter Ujfalusi 	return ret;
2267*68608b5eSPeter Ujfalusi }
2268*68608b5eSPeter Ujfalusi 
2269*68608b5eSPeter Ujfalusi /**
2270*68608b5eSPeter Ujfalusi  * ti_sci_cmd_rm_udmap_tx_ch_cfg() - Configure a UDMAP TX channel
2271*68608b5eSPeter Ujfalusi  * @handle:	Pointer to TI SCI handle.
2272*68608b5eSPeter Ujfalusi  * @params:	Pointer to ti_sci_msg_rm_udmap_tx_ch_cfg TX channel config
2273*68608b5eSPeter Ujfalusi  *		structure
2274*68608b5eSPeter Ujfalusi  *
2275*68608b5eSPeter Ujfalusi  * Return: 0 if all went well, else returns appropriate error value.
2276*68608b5eSPeter Ujfalusi  *
2277*68608b5eSPeter Ujfalusi  * See @ti_sci_msg_rm_udmap_tx_ch_cfg and @ti_sci_msg_rm_udmap_tx_ch_cfg_req for
2278*68608b5eSPeter Ujfalusi  * more info.
2279*68608b5eSPeter Ujfalusi  */
2280*68608b5eSPeter Ujfalusi static int ti_sci_cmd_rm_udmap_tx_ch_cfg(const struct ti_sci_handle *handle,
2281*68608b5eSPeter Ujfalusi 			const struct ti_sci_msg_rm_udmap_tx_ch_cfg *params)
2282*68608b5eSPeter Ujfalusi {
2283*68608b5eSPeter Ujfalusi 	struct ti_sci_msg_rm_udmap_tx_ch_cfg_req *req;
2284*68608b5eSPeter Ujfalusi 	struct ti_sci_msg_hdr *resp;
2285*68608b5eSPeter Ujfalusi 	struct ti_sci_xfer *xfer;
2286*68608b5eSPeter Ujfalusi 	struct ti_sci_info *info;
2287*68608b5eSPeter Ujfalusi 	struct device *dev;
2288*68608b5eSPeter Ujfalusi 	int ret = 0;
2289*68608b5eSPeter Ujfalusi 
2290*68608b5eSPeter Ujfalusi 	if (IS_ERR_OR_NULL(handle))
2291*68608b5eSPeter Ujfalusi 		return -EINVAL;
2292*68608b5eSPeter Ujfalusi 
2293*68608b5eSPeter Ujfalusi 	info = handle_to_ti_sci_info(handle);
2294*68608b5eSPeter Ujfalusi 	dev = info->dev;
2295*68608b5eSPeter Ujfalusi 
2296*68608b5eSPeter Ujfalusi 	xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_TX_CH_CFG,
2297*68608b5eSPeter Ujfalusi 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2298*68608b5eSPeter Ujfalusi 				   sizeof(*req), sizeof(*resp));
2299*68608b5eSPeter Ujfalusi 	if (IS_ERR(xfer)) {
2300*68608b5eSPeter Ujfalusi 		ret = PTR_ERR(xfer);
2301*68608b5eSPeter Ujfalusi 		dev_err(info->dev, "Message TX_CH_CFG alloc failed(%d)\n", ret);
2302*68608b5eSPeter Ujfalusi 		return ret;
2303*68608b5eSPeter Ujfalusi 	}
2304*68608b5eSPeter Ujfalusi 	req = (struct ti_sci_msg_rm_udmap_tx_ch_cfg_req *)xfer->xfer_buf;
2305*68608b5eSPeter Ujfalusi 	req->valid_params = params->valid_params;
2306*68608b5eSPeter Ujfalusi 	req->nav_id = params->nav_id;
2307*68608b5eSPeter Ujfalusi 	req->index = params->index;
2308*68608b5eSPeter Ujfalusi 	req->tx_pause_on_err = params->tx_pause_on_err;
2309*68608b5eSPeter Ujfalusi 	req->tx_filt_einfo = params->tx_filt_einfo;
2310*68608b5eSPeter Ujfalusi 	req->tx_filt_pswords = params->tx_filt_pswords;
2311*68608b5eSPeter Ujfalusi 	req->tx_atype = params->tx_atype;
2312*68608b5eSPeter Ujfalusi 	req->tx_chan_type = params->tx_chan_type;
2313*68608b5eSPeter Ujfalusi 	req->tx_supr_tdpkt = params->tx_supr_tdpkt;
2314*68608b5eSPeter Ujfalusi 	req->tx_fetch_size = params->tx_fetch_size;
2315*68608b5eSPeter Ujfalusi 	req->tx_credit_count = params->tx_credit_count;
2316*68608b5eSPeter Ujfalusi 	req->txcq_qnum = params->txcq_qnum;
2317*68608b5eSPeter Ujfalusi 	req->tx_priority = params->tx_priority;
2318*68608b5eSPeter Ujfalusi 	req->tx_qos = params->tx_qos;
2319*68608b5eSPeter Ujfalusi 	req->tx_orderid = params->tx_orderid;
2320*68608b5eSPeter Ujfalusi 	req->fdepth = params->fdepth;
2321*68608b5eSPeter Ujfalusi 	req->tx_sched_priority = params->tx_sched_priority;
2322*68608b5eSPeter Ujfalusi 	req->tx_burst_size = params->tx_burst_size;
2323*68608b5eSPeter Ujfalusi 
2324*68608b5eSPeter Ujfalusi 	ret = ti_sci_do_xfer(info, xfer);
2325*68608b5eSPeter Ujfalusi 	if (ret) {
2326*68608b5eSPeter Ujfalusi 		dev_err(info->dev, "Mbox send TX_CH_CFG fail %d\n", ret);
2327*68608b5eSPeter Ujfalusi 		goto fail;
2328*68608b5eSPeter Ujfalusi 	}
2329*68608b5eSPeter Ujfalusi 
2330*68608b5eSPeter Ujfalusi 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2331*68608b5eSPeter Ujfalusi 	ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2332*68608b5eSPeter Ujfalusi 
2333*68608b5eSPeter Ujfalusi fail:
2334*68608b5eSPeter Ujfalusi 	ti_sci_put_one_xfer(&info->minfo, xfer);
2335*68608b5eSPeter Ujfalusi 	dev_dbg(info->dev, "TX_CH_CFG: chn %u ret:%u\n", params->index, ret);
2336*68608b5eSPeter Ujfalusi 	return ret;
2337*68608b5eSPeter Ujfalusi }
2338*68608b5eSPeter Ujfalusi 
2339*68608b5eSPeter Ujfalusi /**
2340*68608b5eSPeter Ujfalusi  * ti_sci_cmd_rm_udmap_rx_ch_cfg() - Configure a UDMAP RX channel
2341*68608b5eSPeter Ujfalusi  * @handle:	Pointer to TI SCI handle.
2342*68608b5eSPeter Ujfalusi  * @params:	Pointer to ti_sci_msg_rm_udmap_rx_ch_cfg RX channel config
2343*68608b5eSPeter Ujfalusi  *		structure
2344*68608b5eSPeter Ujfalusi  *
2345*68608b5eSPeter Ujfalusi  * Return: 0 if all went well, else returns appropriate error value.
2346*68608b5eSPeter Ujfalusi  *
2347*68608b5eSPeter Ujfalusi  * See @ti_sci_msg_rm_udmap_rx_ch_cfg and @ti_sci_msg_rm_udmap_rx_ch_cfg_req for
2348*68608b5eSPeter Ujfalusi  * more info.
2349*68608b5eSPeter Ujfalusi  */
2350*68608b5eSPeter Ujfalusi static int ti_sci_cmd_rm_udmap_rx_ch_cfg(const struct ti_sci_handle *handle,
2351*68608b5eSPeter Ujfalusi 			const struct ti_sci_msg_rm_udmap_rx_ch_cfg *params)
2352*68608b5eSPeter Ujfalusi {
2353*68608b5eSPeter Ujfalusi 	struct ti_sci_msg_rm_udmap_rx_ch_cfg_req *req;
2354*68608b5eSPeter Ujfalusi 	struct ti_sci_msg_hdr *resp;
2355*68608b5eSPeter Ujfalusi 	struct ti_sci_xfer *xfer;
2356*68608b5eSPeter Ujfalusi 	struct ti_sci_info *info;
2357*68608b5eSPeter Ujfalusi 	struct device *dev;
2358*68608b5eSPeter Ujfalusi 	int ret = 0;
2359*68608b5eSPeter Ujfalusi 
2360*68608b5eSPeter Ujfalusi 	if (IS_ERR_OR_NULL(handle))
2361*68608b5eSPeter Ujfalusi 		return -EINVAL;
2362*68608b5eSPeter Ujfalusi 
2363*68608b5eSPeter Ujfalusi 	info = handle_to_ti_sci_info(handle);
2364*68608b5eSPeter Ujfalusi 	dev = info->dev;
2365*68608b5eSPeter Ujfalusi 
2366*68608b5eSPeter Ujfalusi 	xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_RX_CH_CFG,
2367*68608b5eSPeter Ujfalusi 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2368*68608b5eSPeter Ujfalusi 				   sizeof(*req), sizeof(*resp));
2369*68608b5eSPeter Ujfalusi 	if (IS_ERR(xfer)) {
2370*68608b5eSPeter Ujfalusi 		ret = PTR_ERR(xfer);
2371*68608b5eSPeter Ujfalusi 		dev_err(info->dev, "Message RX_CH_CFG alloc failed(%d)\n", ret);
2372*68608b5eSPeter Ujfalusi 		return ret;
2373*68608b5eSPeter Ujfalusi 	}
2374*68608b5eSPeter Ujfalusi 	req = (struct ti_sci_msg_rm_udmap_rx_ch_cfg_req *)xfer->xfer_buf;
2375*68608b5eSPeter Ujfalusi 	req->valid_params = params->valid_params;
2376*68608b5eSPeter Ujfalusi 	req->nav_id = params->nav_id;
2377*68608b5eSPeter Ujfalusi 	req->index = params->index;
2378*68608b5eSPeter Ujfalusi 	req->rx_fetch_size = params->rx_fetch_size;
2379*68608b5eSPeter Ujfalusi 	req->rxcq_qnum = params->rxcq_qnum;
2380*68608b5eSPeter Ujfalusi 	req->rx_priority = params->rx_priority;
2381*68608b5eSPeter Ujfalusi 	req->rx_qos = params->rx_qos;
2382*68608b5eSPeter Ujfalusi 	req->rx_orderid = params->rx_orderid;
2383*68608b5eSPeter Ujfalusi 	req->rx_sched_priority = params->rx_sched_priority;
2384*68608b5eSPeter Ujfalusi 	req->flowid_start = params->flowid_start;
2385*68608b5eSPeter Ujfalusi 	req->flowid_cnt = params->flowid_cnt;
2386*68608b5eSPeter Ujfalusi 	req->rx_pause_on_err = params->rx_pause_on_err;
2387*68608b5eSPeter Ujfalusi 	req->rx_atype = params->rx_atype;
2388*68608b5eSPeter Ujfalusi 	req->rx_chan_type = params->rx_chan_type;
2389*68608b5eSPeter Ujfalusi 	req->rx_ignore_short = params->rx_ignore_short;
2390*68608b5eSPeter Ujfalusi 	req->rx_ignore_long = params->rx_ignore_long;
2391*68608b5eSPeter Ujfalusi 	req->rx_burst_size = params->rx_burst_size;
2392*68608b5eSPeter Ujfalusi 
2393*68608b5eSPeter Ujfalusi 	ret = ti_sci_do_xfer(info, xfer);
2394*68608b5eSPeter Ujfalusi 	if (ret) {
2395*68608b5eSPeter Ujfalusi 		dev_err(info->dev, "Mbox send RX_CH_CFG fail %d\n", ret);
2396*68608b5eSPeter Ujfalusi 		goto fail;
2397*68608b5eSPeter Ujfalusi 	}
2398*68608b5eSPeter Ujfalusi 
2399*68608b5eSPeter Ujfalusi 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2400*68608b5eSPeter Ujfalusi 	ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2401*68608b5eSPeter Ujfalusi 
2402*68608b5eSPeter Ujfalusi fail:
2403*68608b5eSPeter Ujfalusi 	ti_sci_put_one_xfer(&info->minfo, xfer);
2404*68608b5eSPeter Ujfalusi 	dev_dbg(info->dev, "RX_CH_CFG: chn %u ret:%d\n", params->index, ret);
2405*68608b5eSPeter Ujfalusi 	return ret;
2406*68608b5eSPeter Ujfalusi }
2407*68608b5eSPeter Ujfalusi 
2408*68608b5eSPeter Ujfalusi /**
2409*68608b5eSPeter Ujfalusi  * ti_sci_cmd_rm_udmap_rx_flow_cfg() - Configure UDMAP RX FLOW
2410*68608b5eSPeter Ujfalusi  * @handle:	Pointer to TI SCI handle.
2411*68608b5eSPeter Ujfalusi  * @params:	Pointer to ti_sci_msg_rm_udmap_flow_cfg RX FLOW config
2412*68608b5eSPeter Ujfalusi  *		structure
2413*68608b5eSPeter Ujfalusi  *
2414*68608b5eSPeter Ujfalusi  * Return: 0 if all went well, else returns appropriate error value.
2415*68608b5eSPeter Ujfalusi  *
2416*68608b5eSPeter Ujfalusi  * See @ti_sci_msg_rm_udmap_flow_cfg and @ti_sci_msg_rm_udmap_flow_cfg_req for
2417*68608b5eSPeter Ujfalusi  * more info.
2418*68608b5eSPeter Ujfalusi  */
2419*68608b5eSPeter Ujfalusi static int ti_sci_cmd_rm_udmap_rx_flow_cfg(const struct ti_sci_handle *handle,
2420*68608b5eSPeter Ujfalusi 			const struct ti_sci_msg_rm_udmap_flow_cfg *params)
2421*68608b5eSPeter Ujfalusi {
2422*68608b5eSPeter Ujfalusi 	struct ti_sci_msg_rm_udmap_flow_cfg_req *req;
2423*68608b5eSPeter Ujfalusi 	struct ti_sci_msg_hdr *resp;
2424*68608b5eSPeter Ujfalusi 	struct ti_sci_xfer *xfer;
2425*68608b5eSPeter Ujfalusi 	struct ti_sci_info *info;
2426*68608b5eSPeter Ujfalusi 	struct device *dev;
2427*68608b5eSPeter Ujfalusi 	int ret = 0;
2428*68608b5eSPeter Ujfalusi 
2429*68608b5eSPeter Ujfalusi 	if (IS_ERR_OR_NULL(handle))
2430*68608b5eSPeter Ujfalusi 		return -EINVAL;
2431*68608b5eSPeter Ujfalusi 
2432*68608b5eSPeter Ujfalusi 	info = handle_to_ti_sci_info(handle);
2433*68608b5eSPeter Ujfalusi 	dev = info->dev;
2434*68608b5eSPeter Ujfalusi 
2435*68608b5eSPeter Ujfalusi 	xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_FLOW_CFG,
2436*68608b5eSPeter Ujfalusi 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2437*68608b5eSPeter Ujfalusi 				   sizeof(*req), sizeof(*resp));
2438*68608b5eSPeter Ujfalusi 	if (IS_ERR(xfer)) {
2439*68608b5eSPeter Ujfalusi 		ret = PTR_ERR(xfer);
2440*68608b5eSPeter Ujfalusi 		dev_err(dev, "RX_FL_CFG: Message alloc failed(%d)\n", ret);
2441*68608b5eSPeter Ujfalusi 		return ret;
2442*68608b5eSPeter Ujfalusi 	}
2443*68608b5eSPeter Ujfalusi 	req = (struct ti_sci_msg_rm_udmap_flow_cfg_req *)xfer->xfer_buf;
2444*68608b5eSPeter Ujfalusi 	req->valid_params = params->valid_params;
2445*68608b5eSPeter Ujfalusi 	req->nav_id = params->nav_id;
2446*68608b5eSPeter Ujfalusi 	req->flow_index = params->flow_index;
2447*68608b5eSPeter Ujfalusi 	req->rx_einfo_present = params->rx_einfo_present;
2448*68608b5eSPeter Ujfalusi 	req->rx_psinfo_present = params->rx_psinfo_present;
2449*68608b5eSPeter Ujfalusi 	req->rx_error_handling = params->rx_error_handling;
2450*68608b5eSPeter Ujfalusi 	req->rx_desc_type = params->rx_desc_type;
2451*68608b5eSPeter Ujfalusi 	req->rx_sop_offset = params->rx_sop_offset;
2452*68608b5eSPeter Ujfalusi 	req->rx_dest_qnum = params->rx_dest_qnum;
2453*68608b5eSPeter Ujfalusi 	req->rx_src_tag_hi = params->rx_src_tag_hi;
2454*68608b5eSPeter Ujfalusi 	req->rx_src_tag_lo = params->rx_src_tag_lo;
2455*68608b5eSPeter Ujfalusi 	req->rx_dest_tag_hi = params->rx_dest_tag_hi;
2456*68608b5eSPeter Ujfalusi 	req->rx_dest_tag_lo = params->rx_dest_tag_lo;
2457*68608b5eSPeter Ujfalusi 	req->rx_src_tag_hi_sel = params->rx_src_tag_hi_sel;
2458*68608b5eSPeter Ujfalusi 	req->rx_src_tag_lo_sel = params->rx_src_tag_lo_sel;
2459*68608b5eSPeter Ujfalusi 	req->rx_dest_tag_hi_sel = params->rx_dest_tag_hi_sel;
2460*68608b5eSPeter Ujfalusi 	req->rx_dest_tag_lo_sel = params->rx_dest_tag_lo_sel;
2461*68608b5eSPeter Ujfalusi 	req->rx_fdq0_sz0_qnum = params->rx_fdq0_sz0_qnum;
2462*68608b5eSPeter Ujfalusi 	req->rx_fdq1_qnum = params->rx_fdq1_qnum;
2463*68608b5eSPeter Ujfalusi 	req->rx_fdq2_qnum = params->rx_fdq2_qnum;
2464*68608b5eSPeter Ujfalusi 	req->rx_fdq3_qnum = params->rx_fdq3_qnum;
2465*68608b5eSPeter Ujfalusi 	req->rx_ps_location = params->rx_ps_location;
2466*68608b5eSPeter Ujfalusi 
2467*68608b5eSPeter Ujfalusi 	ret = ti_sci_do_xfer(info, xfer);
2468*68608b5eSPeter Ujfalusi 	if (ret) {
2469*68608b5eSPeter Ujfalusi 		dev_err(dev, "RX_FL_CFG: Mbox send fail %d\n", ret);
2470*68608b5eSPeter Ujfalusi 		goto fail;
2471*68608b5eSPeter Ujfalusi 	}
2472*68608b5eSPeter Ujfalusi 
2473*68608b5eSPeter Ujfalusi 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2474*68608b5eSPeter Ujfalusi 	ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2475*68608b5eSPeter Ujfalusi 
2476*68608b5eSPeter Ujfalusi fail:
2477*68608b5eSPeter Ujfalusi 	ti_sci_put_one_xfer(&info->minfo, xfer);
2478*68608b5eSPeter Ujfalusi 	dev_dbg(info->dev, "RX_FL_CFG: %u ret:%d\n", params->flow_index, ret);
2479*68608b5eSPeter Ujfalusi 	return ret;
2480*68608b5eSPeter Ujfalusi }
2481*68608b5eSPeter Ujfalusi 
24829e7d756dSNishanth Menon /*
24839e7d756dSNishanth Menon  * ti_sci_setup_ops() - Setup the operations structures
24849e7d756dSNishanth Menon  * @info:	pointer to TISCI pointer
24859e7d756dSNishanth Menon  */
24869e7d756dSNishanth Menon static void ti_sci_setup_ops(struct ti_sci_info *info)
24879e7d756dSNishanth Menon {
24889e7d756dSNishanth Menon 	struct ti_sci_ops *ops = &info->handle.ops;
2489912cffb4SNishanth Menon 	struct ti_sci_core_ops *core_ops = &ops->core_ops;
24909e7d756dSNishanth Menon 	struct ti_sci_dev_ops *dops = &ops->dev_ops;
24919f723220SNishanth Menon 	struct ti_sci_clk_ops *cops = &ops->clk_ops;
24929c19fb68SLokesh Vutla 	struct ti_sci_rm_core_ops *rm_core_ops = &ops->rm_core_ops;
2493997b001fSLokesh Vutla 	struct ti_sci_rm_irq_ops *iops = &ops->rm_irq_ops;
2494*68608b5eSPeter Ujfalusi 	struct ti_sci_rm_ringacc_ops *rops = &ops->rm_ring_ops;
2495*68608b5eSPeter Ujfalusi 	struct ti_sci_rm_psil_ops *psilops = &ops->rm_psil_ops;
2496*68608b5eSPeter Ujfalusi 	struct ti_sci_rm_udmap_ops *udmap_ops = &ops->rm_udmap_ops;
24979e7d756dSNishanth Menon 
2498912cffb4SNishanth Menon 	core_ops->reboot_device = ti_sci_cmd_core_reboot;
2499912cffb4SNishanth Menon 
25009e7d756dSNishanth Menon 	dops->get_device = ti_sci_cmd_get_device;
25019e7d756dSNishanth Menon 	dops->idle_device = ti_sci_cmd_idle_device;
25029e7d756dSNishanth Menon 	dops->put_device = ti_sci_cmd_put_device;
25039e7d756dSNishanth Menon 
25049e7d756dSNishanth Menon 	dops->is_valid = ti_sci_cmd_dev_is_valid;
25059e7d756dSNishanth Menon 	dops->get_context_loss_count = ti_sci_cmd_dev_get_clcnt;
25069e7d756dSNishanth Menon 	dops->is_idle = ti_sci_cmd_dev_is_idle;
25079e7d756dSNishanth Menon 	dops->is_stop = ti_sci_cmd_dev_is_stop;
25089e7d756dSNishanth Menon 	dops->is_on = ti_sci_cmd_dev_is_on;
25099e7d756dSNishanth Menon 	dops->is_transitioning = ti_sci_cmd_dev_is_trans;
25109e7d756dSNishanth Menon 	dops->set_device_resets = ti_sci_cmd_set_device_resets;
25119e7d756dSNishanth Menon 	dops->get_device_resets = ti_sci_cmd_get_device_resets;
25129f723220SNishanth Menon 
25139f723220SNishanth Menon 	cops->get_clock = ti_sci_cmd_get_clock;
25149f723220SNishanth Menon 	cops->idle_clock = ti_sci_cmd_idle_clock;
25159f723220SNishanth Menon 	cops->put_clock = ti_sci_cmd_put_clock;
25169f723220SNishanth Menon 	cops->is_auto = ti_sci_cmd_clk_is_auto;
25179f723220SNishanth Menon 	cops->is_on = ti_sci_cmd_clk_is_on;
25189f723220SNishanth Menon 	cops->is_off = ti_sci_cmd_clk_is_off;
25199f723220SNishanth Menon 
25209f723220SNishanth Menon 	cops->set_parent = ti_sci_cmd_clk_set_parent;
25219f723220SNishanth Menon 	cops->get_parent = ti_sci_cmd_clk_get_parent;
25229f723220SNishanth Menon 	cops->get_num_parents = ti_sci_cmd_clk_get_num_parents;
25239f723220SNishanth Menon 
25249f723220SNishanth Menon 	cops->get_best_match_freq = ti_sci_cmd_clk_get_match_freq;
25259f723220SNishanth Menon 	cops->set_freq = ti_sci_cmd_clk_set_freq;
25269f723220SNishanth Menon 	cops->get_freq = ti_sci_cmd_clk_get_freq;
25279c19fb68SLokesh Vutla 
25289c19fb68SLokesh Vutla 	rm_core_ops->get_range = ti_sci_cmd_get_resource_range;
25299c19fb68SLokesh Vutla 	rm_core_ops->get_range_from_shost =
25309c19fb68SLokesh Vutla 				ti_sci_cmd_get_resource_range_from_shost;
2531997b001fSLokesh Vutla 
2532997b001fSLokesh Vutla 	iops->set_irq = ti_sci_cmd_set_irq;
2533997b001fSLokesh Vutla 	iops->set_event_map = ti_sci_cmd_set_event_map;
2534997b001fSLokesh Vutla 	iops->free_irq = ti_sci_cmd_free_irq;
2535997b001fSLokesh Vutla 	iops->free_event_map = ti_sci_cmd_free_event_map;
2536*68608b5eSPeter Ujfalusi 
2537*68608b5eSPeter Ujfalusi 	rops->config = ti_sci_cmd_ring_config;
2538*68608b5eSPeter Ujfalusi 	rops->get_config = ti_sci_cmd_ring_get_config;
2539*68608b5eSPeter Ujfalusi 
2540*68608b5eSPeter Ujfalusi 	psilops->pair = ti_sci_cmd_rm_psil_pair;
2541*68608b5eSPeter Ujfalusi 	psilops->unpair = ti_sci_cmd_rm_psil_unpair;
2542*68608b5eSPeter Ujfalusi 
2543*68608b5eSPeter Ujfalusi 	udmap_ops->tx_ch_cfg = ti_sci_cmd_rm_udmap_tx_ch_cfg;
2544*68608b5eSPeter Ujfalusi 	udmap_ops->rx_ch_cfg = ti_sci_cmd_rm_udmap_rx_ch_cfg;
2545*68608b5eSPeter Ujfalusi 	udmap_ops->rx_flow_cfg = ti_sci_cmd_rm_udmap_rx_flow_cfg;
25469e7d756dSNishanth Menon }
25479e7d756dSNishanth Menon 
25489e7d756dSNishanth Menon /**
2549aa276781SNishanth Menon  * ti_sci_get_handle() - Get the TI SCI handle for a device
2550aa276781SNishanth Menon  * @dev:	Pointer to device for which we want SCI handle
2551aa276781SNishanth Menon  *
2552aa276781SNishanth Menon  * NOTE: The function does not track individual clients of the framework
2553aa276781SNishanth Menon  * and is expected to be maintained by caller of TI SCI protocol library.
2554aa276781SNishanth Menon  * ti_sci_put_handle must be balanced with successful ti_sci_get_handle
2555aa276781SNishanth Menon  * Return: pointer to handle if successful, else:
2556aa276781SNishanth Menon  * -EPROBE_DEFER if the instance is not ready
2557aa276781SNishanth Menon  * -ENODEV if the required node handler is missing
2558aa276781SNishanth Menon  * -EINVAL if invalid conditions are encountered.
2559aa276781SNishanth Menon  */
2560aa276781SNishanth Menon const struct ti_sci_handle *ti_sci_get_handle(struct device *dev)
2561aa276781SNishanth Menon {
2562aa276781SNishanth Menon 	struct device_node *ti_sci_np;
2563aa276781SNishanth Menon 	struct list_head *p;
2564aa276781SNishanth Menon 	struct ti_sci_handle *handle = NULL;
2565aa276781SNishanth Menon 	struct ti_sci_info *info;
2566aa276781SNishanth Menon 
2567aa276781SNishanth Menon 	if (!dev) {
2568aa276781SNishanth Menon 		pr_err("I need a device pointer\n");
2569aa276781SNishanth Menon 		return ERR_PTR(-EINVAL);
2570aa276781SNishanth Menon 	}
2571aa276781SNishanth Menon 	ti_sci_np = of_get_parent(dev->of_node);
2572aa276781SNishanth Menon 	if (!ti_sci_np) {
2573aa276781SNishanth Menon 		dev_err(dev, "No OF information\n");
2574aa276781SNishanth Menon 		return ERR_PTR(-EINVAL);
2575aa276781SNishanth Menon 	}
2576aa276781SNishanth Menon 
2577aa276781SNishanth Menon 	mutex_lock(&ti_sci_list_mutex);
2578aa276781SNishanth Menon 	list_for_each(p, &ti_sci_list) {
2579aa276781SNishanth Menon 		info = list_entry(p, struct ti_sci_info, node);
2580aa276781SNishanth Menon 		if (ti_sci_np == info->dev->of_node) {
2581aa276781SNishanth Menon 			handle = &info->handle;
2582aa276781SNishanth Menon 			info->users++;
2583aa276781SNishanth Menon 			break;
2584aa276781SNishanth Menon 		}
2585aa276781SNishanth Menon 	}
2586aa276781SNishanth Menon 	mutex_unlock(&ti_sci_list_mutex);
2587aa276781SNishanth Menon 	of_node_put(ti_sci_np);
2588aa276781SNishanth Menon 
2589aa276781SNishanth Menon 	if (!handle)
2590aa276781SNishanth Menon 		return ERR_PTR(-EPROBE_DEFER);
2591aa276781SNishanth Menon 
2592aa276781SNishanth Menon 	return handle;
2593aa276781SNishanth Menon }
2594aa276781SNishanth Menon EXPORT_SYMBOL_GPL(ti_sci_get_handle);
2595aa276781SNishanth Menon 
2596aa276781SNishanth Menon /**
2597aa276781SNishanth Menon  * ti_sci_put_handle() - Release the handle acquired by ti_sci_get_handle
2598aa276781SNishanth Menon  * @handle:	Handle acquired by ti_sci_get_handle
2599aa276781SNishanth Menon  *
2600aa276781SNishanth Menon  * NOTE: The function does not track individual clients of the framework
2601aa276781SNishanth Menon  * and is expected to be maintained by caller of TI SCI protocol library.
2602aa276781SNishanth Menon  * ti_sci_put_handle must be balanced with successful ti_sci_get_handle
2603aa276781SNishanth Menon  *
2604aa276781SNishanth Menon  * Return: 0 is successfully released
2605aa276781SNishanth Menon  * if an error pointer was passed, it returns the error value back,
2606aa276781SNishanth Menon  * if null was passed, it returns -EINVAL;
2607aa276781SNishanth Menon  */
2608aa276781SNishanth Menon int ti_sci_put_handle(const struct ti_sci_handle *handle)
2609aa276781SNishanth Menon {
2610aa276781SNishanth Menon 	struct ti_sci_info *info;
2611aa276781SNishanth Menon 
2612aa276781SNishanth Menon 	if (IS_ERR(handle))
2613aa276781SNishanth Menon 		return PTR_ERR(handle);
2614aa276781SNishanth Menon 	if (!handle)
2615aa276781SNishanth Menon 		return -EINVAL;
2616aa276781SNishanth Menon 
2617aa276781SNishanth Menon 	info = handle_to_ti_sci_info(handle);
2618aa276781SNishanth Menon 	mutex_lock(&ti_sci_list_mutex);
2619aa276781SNishanth Menon 	if (!WARN_ON(!info->users))
2620aa276781SNishanth Menon 		info->users--;
2621aa276781SNishanth Menon 	mutex_unlock(&ti_sci_list_mutex);
2622aa276781SNishanth Menon 
2623aa276781SNishanth Menon 	return 0;
2624aa276781SNishanth Menon }
2625aa276781SNishanth Menon EXPORT_SYMBOL_GPL(ti_sci_put_handle);
2626aa276781SNishanth Menon 
2627aa276781SNishanth Menon static void devm_ti_sci_release(struct device *dev, void *res)
2628aa276781SNishanth Menon {
2629aa276781SNishanth Menon 	const struct ti_sci_handle **ptr = res;
2630aa276781SNishanth Menon 	const struct ti_sci_handle *handle = *ptr;
2631aa276781SNishanth Menon 	int ret;
2632aa276781SNishanth Menon 
2633aa276781SNishanth Menon 	ret = ti_sci_put_handle(handle);
2634aa276781SNishanth Menon 	if (ret)
2635aa276781SNishanth Menon 		dev_err(dev, "failed to put handle %d\n", ret);
2636aa276781SNishanth Menon }
2637aa276781SNishanth Menon 
2638aa276781SNishanth Menon /**
2639aa276781SNishanth Menon  * devm_ti_sci_get_handle() - Managed get handle
2640aa276781SNishanth Menon  * @dev:	device for which we want SCI handle for.
2641aa276781SNishanth Menon  *
2642aa276781SNishanth Menon  * NOTE: This releases the handle once the device resources are
2643aa276781SNishanth Menon  * no longer needed. MUST NOT BE released with ti_sci_put_handle.
2644aa276781SNishanth Menon  * The function does not track individual clients of the framework
2645aa276781SNishanth Menon  * and is expected to be maintained by caller of TI SCI protocol library.
2646aa276781SNishanth Menon  *
2647aa276781SNishanth Menon  * Return: 0 if all went fine, else corresponding error.
2648aa276781SNishanth Menon  */
2649aa276781SNishanth Menon const struct ti_sci_handle *devm_ti_sci_get_handle(struct device *dev)
2650aa276781SNishanth Menon {
2651aa276781SNishanth Menon 	const struct ti_sci_handle **ptr;
2652aa276781SNishanth Menon 	const struct ti_sci_handle *handle;
2653aa276781SNishanth Menon 
2654aa276781SNishanth Menon 	ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL);
2655aa276781SNishanth Menon 	if (!ptr)
2656aa276781SNishanth Menon 		return ERR_PTR(-ENOMEM);
2657aa276781SNishanth Menon 	handle = ti_sci_get_handle(dev);
2658aa276781SNishanth Menon 
2659aa276781SNishanth Menon 	if (!IS_ERR(handle)) {
2660aa276781SNishanth Menon 		*ptr = handle;
2661aa276781SNishanth Menon 		devres_add(dev, ptr);
2662aa276781SNishanth Menon 	} else {
2663aa276781SNishanth Menon 		devres_free(ptr);
2664aa276781SNishanth Menon 	}
2665aa276781SNishanth Menon 
2666aa276781SNishanth Menon 	return handle;
2667aa276781SNishanth Menon }
2668aa276781SNishanth Menon EXPORT_SYMBOL_GPL(devm_ti_sci_get_handle);
2669aa276781SNishanth Menon 
2670905c3047SGrygorii Strashko /**
2671905c3047SGrygorii Strashko  * ti_sci_get_by_phandle() - Get the TI SCI handle using DT phandle
2672905c3047SGrygorii Strashko  * @np:		device node
2673905c3047SGrygorii Strashko  * @property:	property name containing phandle on TISCI node
2674905c3047SGrygorii Strashko  *
2675905c3047SGrygorii Strashko  * NOTE: The function does not track individual clients of the framework
2676905c3047SGrygorii Strashko  * and is expected to be maintained by caller of TI SCI protocol library.
2677905c3047SGrygorii Strashko  * ti_sci_put_handle must be balanced with successful ti_sci_get_by_phandle
2678905c3047SGrygorii Strashko  * Return: pointer to handle if successful, else:
2679905c3047SGrygorii Strashko  * -EPROBE_DEFER if the instance is not ready
2680905c3047SGrygorii Strashko  * -ENODEV if the required node handler is missing
2681905c3047SGrygorii Strashko  * -EINVAL if invalid conditions are encountered.
2682905c3047SGrygorii Strashko  */
2683905c3047SGrygorii Strashko const struct ti_sci_handle *ti_sci_get_by_phandle(struct device_node *np,
2684905c3047SGrygorii Strashko 						  const char *property)
2685905c3047SGrygorii Strashko {
2686905c3047SGrygorii Strashko 	struct ti_sci_handle *handle = NULL;
2687905c3047SGrygorii Strashko 	struct device_node *ti_sci_np;
2688905c3047SGrygorii Strashko 	struct ti_sci_info *info;
2689905c3047SGrygorii Strashko 	struct list_head *p;
2690905c3047SGrygorii Strashko 
2691905c3047SGrygorii Strashko 	if (!np) {
2692905c3047SGrygorii Strashko 		pr_err("I need a device pointer\n");
2693905c3047SGrygorii Strashko 		return ERR_PTR(-EINVAL);
2694905c3047SGrygorii Strashko 	}
2695905c3047SGrygorii Strashko 
2696905c3047SGrygorii Strashko 	ti_sci_np = of_parse_phandle(np, property, 0);
2697905c3047SGrygorii Strashko 	if (!ti_sci_np)
2698905c3047SGrygorii Strashko 		return ERR_PTR(-ENODEV);
2699905c3047SGrygorii Strashko 
2700905c3047SGrygorii Strashko 	mutex_lock(&ti_sci_list_mutex);
2701905c3047SGrygorii Strashko 	list_for_each(p, &ti_sci_list) {
2702905c3047SGrygorii Strashko 		info = list_entry(p, struct ti_sci_info, node);
2703905c3047SGrygorii Strashko 		if (ti_sci_np == info->dev->of_node) {
2704905c3047SGrygorii Strashko 			handle = &info->handle;
2705905c3047SGrygorii Strashko 			info->users++;
2706905c3047SGrygorii Strashko 			break;
2707905c3047SGrygorii Strashko 		}
2708905c3047SGrygorii Strashko 	}
2709905c3047SGrygorii Strashko 	mutex_unlock(&ti_sci_list_mutex);
2710905c3047SGrygorii Strashko 	of_node_put(ti_sci_np);
2711905c3047SGrygorii Strashko 
2712905c3047SGrygorii Strashko 	if (!handle)
2713905c3047SGrygorii Strashko 		return ERR_PTR(-EPROBE_DEFER);
2714905c3047SGrygorii Strashko 
2715905c3047SGrygorii Strashko 	return handle;
2716905c3047SGrygorii Strashko }
2717905c3047SGrygorii Strashko EXPORT_SYMBOL_GPL(ti_sci_get_by_phandle);
2718905c3047SGrygorii Strashko 
2719905c3047SGrygorii Strashko /**
2720905c3047SGrygorii Strashko  * devm_ti_sci_get_by_phandle() - Managed get handle using phandle
2721905c3047SGrygorii Strashko  * @dev:	Device pointer requesting TISCI handle
2722905c3047SGrygorii Strashko  * @property:	property name containing phandle on TISCI node
2723905c3047SGrygorii Strashko  *
2724905c3047SGrygorii Strashko  * NOTE: This releases the handle once the device resources are
2725905c3047SGrygorii Strashko  * no longer needed. MUST NOT BE released with ti_sci_put_handle.
2726905c3047SGrygorii Strashko  * The function does not track individual clients of the framework
2727905c3047SGrygorii Strashko  * and is expected to be maintained by caller of TI SCI protocol library.
2728905c3047SGrygorii Strashko  *
2729905c3047SGrygorii Strashko  * Return: 0 if all went fine, else corresponding error.
2730905c3047SGrygorii Strashko  */
2731905c3047SGrygorii Strashko const struct ti_sci_handle *devm_ti_sci_get_by_phandle(struct device *dev,
2732905c3047SGrygorii Strashko 						       const char *property)
2733905c3047SGrygorii Strashko {
2734905c3047SGrygorii Strashko 	const struct ti_sci_handle *handle;
2735905c3047SGrygorii Strashko 	const struct ti_sci_handle **ptr;
2736905c3047SGrygorii Strashko 
2737905c3047SGrygorii Strashko 	ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL);
2738905c3047SGrygorii Strashko 	if (!ptr)
2739905c3047SGrygorii Strashko 		return ERR_PTR(-ENOMEM);
2740905c3047SGrygorii Strashko 	handle = ti_sci_get_by_phandle(dev_of_node(dev), property);
2741905c3047SGrygorii Strashko 
2742905c3047SGrygorii Strashko 	if (!IS_ERR(handle)) {
2743905c3047SGrygorii Strashko 		*ptr = handle;
2744905c3047SGrygorii Strashko 		devres_add(dev, ptr);
2745905c3047SGrygorii Strashko 	} else {
2746905c3047SGrygorii Strashko 		devres_free(ptr);
2747905c3047SGrygorii Strashko 	}
2748905c3047SGrygorii Strashko 
2749905c3047SGrygorii Strashko 	return handle;
2750905c3047SGrygorii Strashko }
2751905c3047SGrygorii Strashko EXPORT_SYMBOL_GPL(devm_ti_sci_get_by_phandle);
2752905c3047SGrygorii Strashko 
2753032a1ec5SLokesh Vutla /**
2754032a1ec5SLokesh Vutla  * ti_sci_get_free_resource() - Get a free resource from TISCI resource.
2755032a1ec5SLokesh Vutla  * @res:	Pointer to the TISCI resource
2756032a1ec5SLokesh Vutla  *
2757032a1ec5SLokesh Vutla  * Return: resource num if all went ok else TI_SCI_RESOURCE_NULL.
2758032a1ec5SLokesh Vutla  */
2759032a1ec5SLokesh Vutla u16 ti_sci_get_free_resource(struct ti_sci_resource *res)
2760032a1ec5SLokesh Vutla {
2761032a1ec5SLokesh Vutla 	unsigned long flags;
2762032a1ec5SLokesh Vutla 	u16 set, free_bit;
2763032a1ec5SLokesh Vutla 
2764032a1ec5SLokesh Vutla 	raw_spin_lock_irqsave(&res->lock, flags);
2765032a1ec5SLokesh Vutla 	for (set = 0; set < res->sets; set++) {
2766032a1ec5SLokesh Vutla 		free_bit = find_first_zero_bit(res->desc[set].res_map,
2767032a1ec5SLokesh Vutla 					       res->desc[set].num);
2768032a1ec5SLokesh Vutla 		if (free_bit != res->desc[set].num) {
2769032a1ec5SLokesh Vutla 			set_bit(free_bit, res->desc[set].res_map);
2770032a1ec5SLokesh Vutla 			raw_spin_unlock_irqrestore(&res->lock, flags);
2771032a1ec5SLokesh Vutla 			return res->desc[set].start + free_bit;
2772032a1ec5SLokesh Vutla 		}
2773032a1ec5SLokesh Vutla 	}
2774032a1ec5SLokesh Vutla 	raw_spin_unlock_irqrestore(&res->lock, flags);
2775032a1ec5SLokesh Vutla 
2776032a1ec5SLokesh Vutla 	return TI_SCI_RESOURCE_NULL;
2777032a1ec5SLokesh Vutla }
2778032a1ec5SLokesh Vutla EXPORT_SYMBOL_GPL(ti_sci_get_free_resource);
2779032a1ec5SLokesh Vutla 
2780032a1ec5SLokesh Vutla /**
2781032a1ec5SLokesh Vutla  * ti_sci_release_resource() - Release a resource from TISCI resource.
2782032a1ec5SLokesh Vutla  * @res:	Pointer to the TISCI resource
2783032a1ec5SLokesh Vutla  * @id:		Resource id to be released.
2784032a1ec5SLokesh Vutla  */
2785032a1ec5SLokesh Vutla void ti_sci_release_resource(struct ti_sci_resource *res, u16 id)
2786032a1ec5SLokesh Vutla {
2787032a1ec5SLokesh Vutla 	unsigned long flags;
2788032a1ec5SLokesh Vutla 	u16 set;
2789032a1ec5SLokesh Vutla 
2790032a1ec5SLokesh Vutla 	raw_spin_lock_irqsave(&res->lock, flags);
2791032a1ec5SLokesh Vutla 	for (set = 0; set < res->sets; set++) {
2792032a1ec5SLokesh Vutla 		if (res->desc[set].start <= id &&
2793032a1ec5SLokesh Vutla 		    (res->desc[set].num + res->desc[set].start) > id)
2794032a1ec5SLokesh Vutla 			clear_bit(id - res->desc[set].start,
2795032a1ec5SLokesh Vutla 				  res->desc[set].res_map);
2796032a1ec5SLokesh Vutla 	}
2797032a1ec5SLokesh Vutla 	raw_spin_unlock_irqrestore(&res->lock, flags);
2798032a1ec5SLokesh Vutla }
2799032a1ec5SLokesh Vutla EXPORT_SYMBOL_GPL(ti_sci_release_resource);
2800032a1ec5SLokesh Vutla 
2801032a1ec5SLokesh Vutla /**
2802032a1ec5SLokesh Vutla  * ti_sci_get_num_resources() - Get the number of resources in TISCI resource
2803032a1ec5SLokesh Vutla  * @res:	Pointer to the TISCI resource
2804032a1ec5SLokesh Vutla  *
2805032a1ec5SLokesh Vutla  * Return: Total number of available resources.
2806032a1ec5SLokesh Vutla  */
2807032a1ec5SLokesh Vutla u32 ti_sci_get_num_resources(struct ti_sci_resource *res)
2808032a1ec5SLokesh Vutla {
2809032a1ec5SLokesh Vutla 	u32 set, count = 0;
2810032a1ec5SLokesh Vutla 
2811032a1ec5SLokesh Vutla 	for (set = 0; set < res->sets; set++)
2812032a1ec5SLokesh Vutla 		count += res->desc[set].num;
2813032a1ec5SLokesh Vutla 
2814032a1ec5SLokesh Vutla 	return count;
2815032a1ec5SLokesh Vutla }
2816032a1ec5SLokesh Vutla EXPORT_SYMBOL_GPL(ti_sci_get_num_resources);
2817032a1ec5SLokesh Vutla 
2818032a1ec5SLokesh Vutla /**
2819032a1ec5SLokesh Vutla  * devm_ti_sci_get_of_resource() - Get a TISCI resource assigned to a device
2820032a1ec5SLokesh Vutla  * @handle:	TISCI handle
2821032a1ec5SLokesh Vutla  * @dev:	Device pointer to which the resource is assigned
2822032a1ec5SLokesh Vutla  * @dev_id:	TISCI device id to which the resource is assigned
2823032a1ec5SLokesh Vutla  * @of_prop:	property name by which the resource are represented
2824032a1ec5SLokesh Vutla  *
2825032a1ec5SLokesh Vutla  * Return: Pointer to ti_sci_resource if all went well else appropriate
2826032a1ec5SLokesh Vutla  *	   error pointer.
2827032a1ec5SLokesh Vutla  */
2828032a1ec5SLokesh Vutla struct ti_sci_resource *
2829032a1ec5SLokesh Vutla devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
2830032a1ec5SLokesh Vutla 			    struct device *dev, u32 dev_id, char *of_prop)
2831032a1ec5SLokesh Vutla {
2832032a1ec5SLokesh Vutla 	struct ti_sci_resource *res;
2833032a1ec5SLokesh Vutla 	u32 resource_subtype;
2834032a1ec5SLokesh Vutla 	int i, ret;
2835032a1ec5SLokesh Vutla 
2836032a1ec5SLokesh Vutla 	res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
2837032a1ec5SLokesh Vutla 	if (!res)
2838032a1ec5SLokesh Vutla 		return ERR_PTR(-ENOMEM);
2839032a1ec5SLokesh Vutla 
2840032a1ec5SLokesh Vutla 	res->sets = of_property_count_elems_of_size(dev_of_node(dev), of_prop,
2841032a1ec5SLokesh Vutla 						    sizeof(u32));
2842032a1ec5SLokesh Vutla 	if (res->sets < 0) {
2843032a1ec5SLokesh Vutla 		dev_err(dev, "%s resource type ids not available\n", of_prop);
2844032a1ec5SLokesh Vutla 		return ERR_PTR(res->sets);
2845032a1ec5SLokesh Vutla 	}
2846032a1ec5SLokesh Vutla 
2847032a1ec5SLokesh Vutla 	res->desc = devm_kcalloc(dev, res->sets, sizeof(*res->desc),
2848032a1ec5SLokesh Vutla 				 GFP_KERNEL);
2849032a1ec5SLokesh Vutla 	if (!res->desc)
2850032a1ec5SLokesh Vutla 		return ERR_PTR(-ENOMEM);
2851032a1ec5SLokesh Vutla 
2852032a1ec5SLokesh Vutla 	for (i = 0; i < res->sets; i++) {
2853032a1ec5SLokesh Vutla 		ret = of_property_read_u32_index(dev_of_node(dev), of_prop, i,
2854032a1ec5SLokesh Vutla 						 &resource_subtype);
2855032a1ec5SLokesh Vutla 		if (ret)
2856032a1ec5SLokesh Vutla 			return ERR_PTR(-EINVAL);
2857032a1ec5SLokesh Vutla 
2858032a1ec5SLokesh Vutla 		ret = handle->ops.rm_core_ops.get_range(handle, dev_id,
2859032a1ec5SLokesh Vutla 							resource_subtype,
2860032a1ec5SLokesh Vutla 							&res->desc[i].start,
2861032a1ec5SLokesh Vutla 							&res->desc[i].num);
2862032a1ec5SLokesh Vutla 		if (ret) {
2863032a1ec5SLokesh Vutla 			dev_err(dev, "dev = %d subtype %d not allocated for this host\n",
2864032a1ec5SLokesh Vutla 				dev_id, resource_subtype);
2865032a1ec5SLokesh Vutla 			return ERR_PTR(ret);
2866032a1ec5SLokesh Vutla 		}
2867032a1ec5SLokesh Vutla 
2868032a1ec5SLokesh Vutla 		dev_dbg(dev, "dev = %d, subtype = %d, start = %d, num = %d\n",
2869032a1ec5SLokesh Vutla 			dev_id, resource_subtype, res->desc[i].start,
2870032a1ec5SLokesh Vutla 			res->desc[i].num);
2871032a1ec5SLokesh Vutla 
2872032a1ec5SLokesh Vutla 		res->desc[i].res_map =
2873032a1ec5SLokesh Vutla 			devm_kzalloc(dev, BITS_TO_LONGS(res->desc[i].num) *
2874032a1ec5SLokesh Vutla 				     sizeof(*res->desc[i].res_map), GFP_KERNEL);
2875032a1ec5SLokesh Vutla 		if (!res->desc[i].res_map)
2876032a1ec5SLokesh Vutla 			return ERR_PTR(-ENOMEM);
2877032a1ec5SLokesh Vutla 	}
2878032a1ec5SLokesh Vutla 	raw_spin_lock_init(&res->lock);
2879032a1ec5SLokesh Vutla 
2880032a1ec5SLokesh Vutla 	return res;
2881032a1ec5SLokesh Vutla }
2882032a1ec5SLokesh Vutla 
2883912cffb4SNishanth Menon static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode,
2884912cffb4SNishanth Menon 				void *cmd)
2885912cffb4SNishanth Menon {
2886912cffb4SNishanth Menon 	struct ti_sci_info *info = reboot_to_ti_sci_info(nb);
2887912cffb4SNishanth Menon 	const struct ti_sci_handle *handle = &info->handle;
2888912cffb4SNishanth Menon 
2889912cffb4SNishanth Menon 	ti_sci_cmd_core_reboot(handle);
2890912cffb4SNishanth Menon 
2891912cffb4SNishanth Menon 	/* call fail OR pass, we should not be here in the first place */
2892912cffb4SNishanth Menon 	return NOTIFY_BAD;
2893912cffb4SNishanth Menon }
2894912cffb4SNishanth Menon 
2895aa276781SNishanth Menon /* Description for K2G */
2896aa276781SNishanth Menon static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
2897e69a3553SNishanth Menon 	.default_host_id = 2,
2898aa276781SNishanth Menon 	/* Conservative duration */
2899aa276781SNishanth Menon 	.max_rx_timeout_ms = 1000,
2900aa276781SNishanth Menon 	/* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
2901aa276781SNishanth Menon 	.max_msgs = 20,
2902aa276781SNishanth Menon 	.max_msg_size = 64,
2903754c9477SPeter Ujfalusi 	.rm_type_map = NULL,
2904754c9477SPeter Ujfalusi };
2905754c9477SPeter Ujfalusi 
2906754c9477SPeter Ujfalusi static struct ti_sci_rm_type_map ti_sci_am654_rm_type_map[] = {
2907754c9477SPeter Ujfalusi 	{.dev_id = 56, .type = 0x00b}, /* GIC_IRQ */
2908754c9477SPeter Ujfalusi 	{.dev_id = 179, .type = 0x000}, /* MAIN_NAV_UDMASS_IA0 */
2909754c9477SPeter Ujfalusi 	{.dev_id = 187, .type = 0x009}, /* MAIN_NAV_RA */
2910754c9477SPeter Ujfalusi 	{.dev_id = 188, .type = 0x006}, /* MAIN_NAV_UDMAP */
2911754c9477SPeter Ujfalusi 	{.dev_id = 194, .type = 0x007}, /* MCU_NAV_UDMAP */
2912754c9477SPeter Ujfalusi 	{.dev_id = 195, .type = 0x00a}, /* MCU_NAV_RA */
2913754c9477SPeter Ujfalusi 	{.dev_id = 0, .type = 0x000}, /* end of table */
2914754c9477SPeter Ujfalusi };
2915754c9477SPeter Ujfalusi 
2916754c9477SPeter Ujfalusi /* Description for AM654 */
2917754c9477SPeter Ujfalusi static const struct ti_sci_desc ti_sci_pmmc_am654_desc = {
2918754c9477SPeter Ujfalusi 	.default_host_id = 12,
2919754c9477SPeter Ujfalusi 	/* Conservative duration */
2920754c9477SPeter Ujfalusi 	.max_rx_timeout_ms = 10000,
2921754c9477SPeter Ujfalusi 	/* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
2922754c9477SPeter Ujfalusi 	.max_msgs = 20,
2923754c9477SPeter Ujfalusi 	.max_msg_size = 60,
2924754c9477SPeter Ujfalusi 	.rm_type_map = ti_sci_am654_rm_type_map,
2925aa276781SNishanth Menon };
2926aa276781SNishanth Menon 
2927aa276781SNishanth Menon static const struct of_device_id ti_sci_of_match[] = {
2928aa276781SNishanth Menon 	{.compatible = "ti,k2g-sci", .data = &ti_sci_pmmc_k2g_desc},
2929754c9477SPeter Ujfalusi 	{.compatible = "ti,am654-sci", .data = &ti_sci_pmmc_am654_desc},
2930aa276781SNishanth Menon 	{ /* Sentinel */ },
2931aa276781SNishanth Menon };
2932aa276781SNishanth Menon MODULE_DEVICE_TABLE(of, ti_sci_of_match);
2933aa276781SNishanth Menon 
2934aa276781SNishanth Menon static int ti_sci_probe(struct platform_device *pdev)
2935aa276781SNishanth Menon {
2936aa276781SNishanth Menon 	struct device *dev = &pdev->dev;
2937aa276781SNishanth Menon 	const struct of_device_id *of_id;
2938aa276781SNishanth Menon 	const struct ti_sci_desc *desc;
2939aa276781SNishanth Menon 	struct ti_sci_xfer *xfer;
2940aa276781SNishanth Menon 	struct ti_sci_info *info = NULL;
2941aa276781SNishanth Menon 	struct ti_sci_xfers_info *minfo;
2942aa276781SNishanth Menon 	struct mbox_client *cl;
2943aa276781SNishanth Menon 	int ret = -EINVAL;
2944aa276781SNishanth Menon 	int i;
2945912cffb4SNishanth Menon 	int reboot = 0;
2946e69a3553SNishanth Menon 	u32 h_id;
2947aa276781SNishanth Menon 
2948aa276781SNishanth Menon 	of_id = of_match_device(ti_sci_of_match, dev);
2949aa276781SNishanth Menon 	if (!of_id) {
2950aa276781SNishanth Menon 		dev_err(dev, "OF data missing\n");
2951aa276781SNishanth Menon 		return -EINVAL;
2952aa276781SNishanth Menon 	}
2953aa276781SNishanth Menon 	desc = of_id->data;
2954aa276781SNishanth Menon 
2955aa276781SNishanth Menon 	info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
2956aa276781SNishanth Menon 	if (!info)
2957aa276781SNishanth Menon 		return -ENOMEM;
2958aa276781SNishanth Menon 
2959aa276781SNishanth Menon 	info->dev = dev;
2960aa276781SNishanth Menon 	info->desc = desc;
2961e69a3553SNishanth Menon 	ret = of_property_read_u32(dev->of_node, "ti,host-id", &h_id);
2962e69a3553SNishanth Menon 	/* if the property is not present in DT, use a default from desc */
2963e69a3553SNishanth Menon 	if (ret < 0) {
2964e69a3553SNishanth Menon 		info->host_id = info->desc->default_host_id;
2965e69a3553SNishanth Menon 	} else {
2966e69a3553SNishanth Menon 		if (!h_id) {
2967e69a3553SNishanth Menon 			dev_warn(dev, "Host ID 0 is reserved for firmware\n");
2968e69a3553SNishanth Menon 			info->host_id = info->desc->default_host_id;
2969e69a3553SNishanth Menon 		} else {
2970e69a3553SNishanth Menon 			info->host_id = h_id;
2971e69a3553SNishanth Menon 		}
2972e69a3553SNishanth Menon 	}
2973e69a3553SNishanth Menon 
2974912cffb4SNishanth Menon 	reboot = of_property_read_bool(dev->of_node,
2975912cffb4SNishanth Menon 				       "ti,system-reboot-controller");
2976aa276781SNishanth Menon 	INIT_LIST_HEAD(&info->node);
2977aa276781SNishanth Menon 	minfo = &info->minfo;
2978aa276781SNishanth Menon 
2979aa276781SNishanth Menon 	/*
2980aa276781SNishanth Menon 	 * Pre-allocate messages
2981aa276781SNishanth Menon 	 * NEVER allocate more than what we can indicate in hdr.seq
2982aa276781SNishanth Menon 	 * if we have data description bug, force a fix..
2983aa276781SNishanth Menon 	 */
2984aa276781SNishanth Menon 	if (WARN_ON(desc->max_msgs >=
2985aa276781SNishanth Menon 		    1 << 8 * sizeof(((struct ti_sci_msg_hdr *)0)->seq)))
2986aa276781SNishanth Menon 		return -EINVAL;
2987aa276781SNishanth Menon 
2988aa276781SNishanth Menon 	minfo->xfer_block = devm_kcalloc(dev,
2989aa276781SNishanth Menon 					 desc->max_msgs,
2990aa276781SNishanth Menon 					 sizeof(*minfo->xfer_block),
2991aa276781SNishanth Menon 					 GFP_KERNEL);
2992aa276781SNishanth Menon 	if (!minfo->xfer_block)
2993aa276781SNishanth Menon 		return -ENOMEM;
2994aa276781SNishanth Menon 
2995a86854d0SKees Cook 	minfo->xfer_alloc_table = devm_kcalloc(dev,
2996a86854d0SKees Cook 					       BITS_TO_LONGS(desc->max_msgs),
2997a86854d0SKees Cook 					       sizeof(unsigned long),
2998aa276781SNishanth Menon 					       GFP_KERNEL);
2999aa276781SNishanth Menon 	if (!minfo->xfer_alloc_table)
3000aa276781SNishanth Menon 		return -ENOMEM;
3001aa276781SNishanth Menon 	bitmap_zero(minfo->xfer_alloc_table, desc->max_msgs);
3002aa276781SNishanth Menon 
3003aa276781SNishanth Menon 	/* Pre-initialize the buffer pointer to pre-allocated buffers */
3004aa276781SNishanth Menon 	for (i = 0, xfer = minfo->xfer_block; i < desc->max_msgs; i++, xfer++) {
3005aa276781SNishanth Menon 		xfer->xfer_buf = devm_kcalloc(dev, 1, desc->max_msg_size,
3006aa276781SNishanth Menon 					      GFP_KERNEL);
3007aa276781SNishanth Menon 		if (!xfer->xfer_buf)
3008aa276781SNishanth Menon 			return -ENOMEM;
3009aa276781SNishanth Menon 
3010aa276781SNishanth Menon 		xfer->tx_message.buf = xfer->xfer_buf;
3011aa276781SNishanth Menon 		init_completion(&xfer->done);
3012aa276781SNishanth Menon 	}
3013aa276781SNishanth Menon 
3014aa276781SNishanth Menon 	ret = ti_sci_debugfs_create(pdev, info);
3015aa276781SNishanth Menon 	if (ret)
3016aa276781SNishanth Menon 		dev_warn(dev, "Failed to create debug file\n");
3017aa276781SNishanth Menon 
3018aa276781SNishanth Menon 	platform_set_drvdata(pdev, info);
3019aa276781SNishanth Menon 
3020aa276781SNishanth Menon 	cl = &info->cl;
3021aa276781SNishanth Menon 	cl->dev = dev;
3022aa276781SNishanth Menon 	cl->tx_block = false;
3023aa276781SNishanth Menon 	cl->rx_callback = ti_sci_rx_callback;
3024aa276781SNishanth Menon 	cl->knows_txdone = true;
3025aa276781SNishanth Menon 
3026aa276781SNishanth Menon 	spin_lock_init(&minfo->xfer_lock);
3027aa276781SNishanth Menon 	sema_init(&minfo->sem_xfer_count, desc->max_msgs);
3028aa276781SNishanth Menon 
3029aa276781SNishanth Menon 	info->chan_rx = mbox_request_channel_byname(cl, "rx");
3030aa276781SNishanth Menon 	if (IS_ERR(info->chan_rx)) {
3031aa276781SNishanth Menon 		ret = PTR_ERR(info->chan_rx);
3032aa276781SNishanth Menon 		goto out;
3033aa276781SNishanth Menon 	}
3034aa276781SNishanth Menon 
3035aa276781SNishanth Menon 	info->chan_tx = mbox_request_channel_byname(cl, "tx");
3036aa276781SNishanth Menon 	if (IS_ERR(info->chan_tx)) {
3037aa276781SNishanth Menon 		ret = PTR_ERR(info->chan_tx);
3038aa276781SNishanth Menon 		goto out;
3039aa276781SNishanth Menon 	}
3040aa276781SNishanth Menon 	ret = ti_sci_cmd_get_revision(info);
3041aa276781SNishanth Menon 	if (ret) {
3042aa276781SNishanth Menon 		dev_err(dev, "Unable to communicate with TISCI(%d)\n", ret);
3043aa276781SNishanth Menon 		goto out;
3044aa276781SNishanth Menon 	}
3045aa276781SNishanth Menon 
30469e7d756dSNishanth Menon 	ti_sci_setup_ops(info);
30479e7d756dSNishanth Menon 
3048912cffb4SNishanth Menon 	if (reboot) {
3049912cffb4SNishanth Menon 		info->nb.notifier_call = tisci_reboot_handler;
3050912cffb4SNishanth Menon 		info->nb.priority = 128;
3051912cffb4SNishanth Menon 
3052912cffb4SNishanth Menon 		ret = register_restart_handler(&info->nb);
3053912cffb4SNishanth Menon 		if (ret) {
3054912cffb4SNishanth Menon 			dev_err(dev, "reboot registration fail(%d)\n", ret);
3055912cffb4SNishanth Menon 			return ret;
3056912cffb4SNishanth Menon 		}
3057912cffb4SNishanth Menon 	}
3058912cffb4SNishanth Menon 
3059aa276781SNishanth Menon 	dev_info(dev, "ABI: %d.%d (firmware rev 0x%04x '%s')\n",
3060aa276781SNishanth Menon 		 info->handle.version.abi_major, info->handle.version.abi_minor,
3061aa276781SNishanth Menon 		 info->handle.version.firmware_revision,
3062aa276781SNishanth Menon 		 info->handle.version.firmware_description);
3063aa276781SNishanth Menon 
3064aa276781SNishanth Menon 	mutex_lock(&ti_sci_list_mutex);
3065aa276781SNishanth Menon 	list_add_tail(&info->node, &ti_sci_list);
3066aa276781SNishanth Menon 	mutex_unlock(&ti_sci_list_mutex);
3067aa276781SNishanth Menon 
3068aa276781SNishanth Menon 	return of_platform_populate(dev->of_node, NULL, NULL, dev);
3069aa276781SNishanth Menon out:
3070aa276781SNishanth Menon 	if (!IS_ERR(info->chan_tx))
3071aa276781SNishanth Menon 		mbox_free_channel(info->chan_tx);
3072aa276781SNishanth Menon 	if (!IS_ERR(info->chan_rx))
3073aa276781SNishanth Menon 		mbox_free_channel(info->chan_rx);
3074aa276781SNishanth Menon 	debugfs_remove(info->d);
3075aa276781SNishanth Menon 	return ret;
3076aa276781SNishanth Menon }
3077aa276781SNishanth Menon 
3078aa276781SNishanth Menon static int ti_sci_remove(struct platform_device *pdev)
3079aa276781SNishanth Menon {
3080aa276781SNishanth Menon 	struct ti_sci_info *info;
3081aa276781SNishanth Menon 	struct device *dev = &pdev->dev;
3082aa276781SNishanth Menon 	int ret = 0;
3083aa276781SNishanth Menon 
3084aa276781SNishanth Menon 	of_platform_depopulate(dev);
3085aa276781SNishanth Menon 
3086aa276781SNishanth Menon 	info = platform_get_drvdata(pdev);
3087aa276781SNishanth Menon 
3088912cffb4SNishanth Menon 	if (info->nb.notifier_call)
3089912cffb4SNishanth Menon 		unregister_restart_handler(&info->nb);
3090912cffb4SNishanth Menon 
3091aa276781SNishanth Menon 	mutex_lock(&ti_sci_list_mutex);
3092aa276781SNishanth Menon 	if (info->users)
3093aa276781SNishanth Menon 		ret = -EBUSY;
3094aa276781SNishanth Menon 	else
3095aa276781SNishanth Menon 		list_del(&info->node);
3096aa276781SNishanth Menon 	mutex_unlock(&ti_sci_list_mutex);
3097aa276781SNishanth Menon 
3098aa276781SNishanth Menon 	if (!ret) {
3099aa276781SNishanth Menon 		ti_sci_debugfs_destroy(pdev, info);
3100aa276781SNishanth Menon 
3101aa276781SNishanth Menon 		/* Safe to free channels since no more users */
3102aa276781SNishanth Menon 		mbox_free_channel(info->chan_tx);
3103aa276781SNishanth Menon 		mbox_free_channel(info->chan_rx);
3104aa276781SNishanth Menon 	}
3105aa276781SNishanth Menon 
3106aa276781SNishanth Menon 	return ret;
3107aa276781SNishanth Menon }
3108aa276781SNishanth Menon 
3109aa276781SNishanth Menon static struct platform_driver ti_sci_driver = {
3110aa276781SNishanth Menon 	.probe = ti_sci_probe,
3111aa276781SNishanth Menon 	.remove = ti_sci_remove,
3112aa276781SNishanth Menon 	.driver = {
3113aa276781SNishanth Menon 		   .name = "ti-sci",
3114aa276781SNishanth Menon 		   .of_match_table = of_match_ptr(ti_sci_of_match),
3115aa276781SNishanth Menon 	},
3116aa276781SNishanth Menon };
3117aa276781SNishanth Menon module_platform_driver(ti_sci_driver);
3118aa276781SNishanth Menon 
3119aa276781SNishanth Menon MODULE_LICENSE("GPL v2");
3120aa276781SNishanth Menon MODULE_DESCRIPTION("TI System Control Interface(SCI) driver");
3121aa276781SNishanth Menon MODULE_AUTHOR("Nishanth Menon");
3122aa276781SNishanth Menon MODULE_ALIAS("platform:ti-sci");
3123