11e0a6014SLokesh Vutla // SPDX-License-Identifier: GPL-2.0 2aa276781SNishanth Menon /* 3aa276781SNishanth Menon * Texas Instruments System Control Interface Protocol Driver 4aa276781SNishanth Menon * 5aa276781SNishanth Menon * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/ 6aa276781SNishanth Menon * Nishanth Menon 7aa276781SNishanth Menon */ 8aa276781SNishanth Menon 9aa276781SNishanth Menon #define pr_fmt(fmt) "%s: " fmt, __func__ 10aa276781SNishanth Menon 11aa276781SNishanth Menon #include <linux/bitmap.h> 12aa276781SNishanth Menon #include <linux/debugfs.h> 13aa276781SNishanth Menon #include <linux/export.h> 14aa276781SNishanth Menon #include <linux/io.h> 15aa276781SNishanth Menon #include <linux/kernel.h> 16aa276781SNishanth Menon #include <linux/mailbox_client.h> 17aa276781SNishanth Menon #include <linux/module.h> 18aa276781SNishanth Menon #include <linux/of_device.h> 19aa276781SNishanth Menon #include <linux/semaphore.h> 20aa276781SNishanth Menon #include <linux/slab.h> 21aa276781SNishanth Menon #include <linux/soc/ti/ti-msgmgr.h> 22aa276781SNishanth Menon #include <linux/soc/ti/ti_sci_protocol.h> 23912cffb4SNishanth Menon #include <linux/reboot.h> 24aa276781SNishanth Menon 25aa276781SNishanth Menon #include "ti_sci.h" 26aa276781SNishanth Menon 27aa276781SNishanth Menon /* List of all TI SCI devices active in system */ 28aa276781SNishanth Menon static LIST_HEAD(ti_sci_list); 29aa276781SNishanth Menon /* Protection for the entire list */ 30aa276781SNishanth Menon static DEFINE_MUTEX(ti_sci_list_mutex); 31aa276781SNishanth Menon 32aa276781SNishanth Menon /** 33aa276781SNishanth Menon * struct ti_sci_xfer - Structure representing a message flow 34aa276781SNishanth Menon * @tx_message: Transmit message 35aa276781SNishanth Menon * @rx_len: Receive message length 36aa276781SNishanth Menon * @xfer_buf: Preallocated buffer to store receive message 37aa276781SNishanth Menon * Since we work with request-ACK protocol, we can 38aa276781SNishanth Menon * reuse the same buffer for the rx path as we 39aa276781SNishanth Menon * use for the tx path. 40aa276781SNishanth Menon * @done: completion event 41aa276781SNishanth Menon */ 42aa276781SNishanth Menon struct ti_sci_xfer { 43aa276781SNishanth Menon struct ti_msgmgr_message tx_message; 44aa276781SNishanth Menon u8 rx_len; 45aa276781SNishanth Menon u8 *xfer_buf; 46aa276781SNishanth Menon struct completion done; 47aa276781SNishanth Menon }; 48aa276781SNishanth Menon 49aa276781SNishanth Menon /** 50aa276781SNishanth Menon * struct ti_sci_xfers_info - Structure to manage transfer information 51aa276781SNishanth Menon * @sem_xfer_count: Counting Semaphore for managing max simultaneous 52aa276781SNishanth Menon * Messages. 53aa276781SNishanth Menon * @xfer_block: Preallocated Message array 54aa276781SNishanth Menon * @xfer_alloc_table: Bitmap table for allocated messages. 55aa276781SNishanth Menon * Index of this bitmap table is also used for message 56aa276781SNishanth Menon * sequence identifier. 57aa276781SNishanth Menon * @xfer_lock: Protection for message allocation 58aa276781SNishanth Menon */ 59aa276781SNishanth Menon struct ti_sci_xfers_info { 60aa276781SNishanth Menon struct semaphore sem_xfer_count; 61aa276781SNishanth Menon struct ti_sci_xfer *xfer_block; 62aa276781SNishanth Menon unsigned long *xfer_alloc_table; 63aa276781SNishanth Menon /* protect transfer allocation */ 64aa276781SNishanth Menon spinlock_t xfer_lock; 65aa276781SNishanth Menon }; 66aa276781SNishanth Menon 67aa276781SNishanth Menon /** 689c19fb68SLokesh Vutla * struct ti_sci_rm_type_map - Structure representing TISCI Resource 699c19fb68SLokesh Vutla * management representation of dev_ids. 709c19fb68SLokesh Vutla * @dev_id: TISCI device ID 719c19fb68SLokesh Vutla * @type: Corresponding id as identified by TISCI RM. 729c19fb68SLokesh Vutla * 739c19fb68SLokesh Vutla * Note: This is used only as a work around for using RM range apis 749c19fb68SLokesh Vutla * for AM654 SoC. For future SoCs dev_id will be used as type 759c19fb68SLokesh Vutla * for RM range APIs. In order to maintain ABI backward compatibility 769c19fb68SLokesh Vutla * type is not being changed for AM654 SoC. 779c19fb68SLokesh Vutla */ 789c19fb68SLokesh Vutla struct ti_sci_rm_type_map { 799c19fb68SLokesh Vutla u32 dev_id; 809c19fb68SLokesh Vutla u16 type; 819c19fb68SLokesh Vutla }; 829c19fb68SLokesh Vutla 839c19fb68SLokesh Vutla /** 84aa276781SNishanth Menon * struct ti_sci_desc - Description of SoC integration 85e69a3553SNishanth Menon * @default_host_id: Host identifier representing the compute entity 86aa276781SNishanth Menon * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds) 87aa276781SNishanth Menon * @max_msgs: Maximum number of messages that can be pending 88aa276781SNishanth Menon * simultaneously in the system 89aa276781SNishanth Menon * @max_msg_size: Maximum size of data per message that can be handled. 909c19fb68SLokesh Vutla * @rm_type_map: RM resource type mapping structure. 91aa276781SNishanth Menon */ 92aa276781SNishanth Menon struct ti_sci_desc { 93e69a3553SNishanth Menon u8 default_host_id; 94aa276781SNishanth Menon int max_rx_timeout_ms; 95aa276781SNishanth Menon int max_msgs; 96aa276781SNishanth Menon int max_msg_size; 979c19fb68SLokesh Vutla struct ti_sci_rm_type_map *rm_type_map; 98aa276781SNishanth Menon }; 99aa276781SNishanth Menon 100aa276781SNishanth Menon /** 101aa276781SNishanth Menon * struct ti_sci_info - Structure representing a TI SCI instance 102aa276781SNishanth Menon * @dev: Device pointer 103aa276781SNishanth Menon * @desc: SoC description for this instance 104912cffb4SNishanth Menon * @nb: Reboot Notifier block 105aa276781SNishanth Menon * @d: Debugfs file entry 106aa276781SNishanth Menon * @debug_region: Memory region where the debug message are available 107aa276781SNishanth Menon * @debug_region_size: Debug region size 108aa276781SNishanth Menon * @debug_buffer: Buffer allocated to copy debug messages. 109aa276781SNishanth Menon * @handle: Instance of TI SCI handle to send to clients. 110aa276781SNishanth Menon * @cl: Mailbox Client 111aa276781SNishanth Menon * @chan_tx: Transmit mailbox channel 112aa276781SNishanth Menon * @chan_rx: Receive mailbox channel 113aa276781SNishanth Menon * @minfo: Message info 114aa276781SNishanth Menon * @node: list head 115e69a3553SNishanth Menon * @host_id: Host ID 116aa276781SNishanth Menon * @users: Number of users of this instance 117aa276781SNishanth Menon */ 118aa276781SNishanth Menon struct ti_sci_info { 119aa276781SNishanth Menon struct device *dev; 120912cffb4SNishanth Menon struct notifier_block nb; 121aa276781SNishanth Menon const struct ti_sci_desc *desc; 122aa276781SNishanth Menon struct dentry *d; 123aa276781SNishanth Menon void __iomem *debug_region; 124aa276781SNishanth Menon char *debug_buffer; 125aa276781SNishanth Menon size_t debug_region_size; 126aa276781SNishanth Menon struct ti_sci_handle handle; 127aa276781SNishanth Menon struct mbox_client cl; 128aa276781SNishanth Menon struct mbox_chan *chan_tx; 129aa276781SNishanth Menon struct mbox_chan *chan_rx; 130aa276781SNishanth Menon struct ti_sci_xfers_info minfo; 131aa276781SNishanth Menon struct list_head node; 132e69a3553SNishanth Menon u8 host_id; 133aa276781SNishanth Menon /* protected by ti_sci_list_mutex */ 134aa276781SNishanth Menon int users; 135912cffb4SNishanth Menon 136aa276781SNishanth Menon }; 137aa276781SNishanth Menon 138aa276781SNishanth Menon #define cl_to_ti_sci_info(c) container_of(c, struct ti_sci_info, cl) 139aa276781SNishanth Menon #define handle_to_ti_sci_info(h) container_of(h, struct ti_sci_info, handle) 140912cffb4SNishanth Menon #define reboot_to_ti_sci_info(n) container_of(n, struct ti_sci_info, nb) 141aa276781SNishanth Menon 142aa276781SNishanth Menon #ifdef CONFIG_DEBUG_FS 143aa276781SNishanth Menon 144aa276781SNishanth Menon /** 145aa276781SNishanth Menon * ti_sci_debug_show() - Helper to dump the debug log 146aa276781SNishanth Menon * @s: sequence file pointer 147aa276781SNishanth Menon * @unused: unused. 148aa276781SNishanth Menon * 149aa276781SNishanth Menon * Return: 0 150aa276781SNishanth Menon */ 151aa276781SNishanth Menon static int ti_sci_debug_show(struct seq_file *s, void *unused) 152aa276781SNishanth Menon { 153aa276781SNishanth Menon struct ti_sci_info *info = s->private; 154aa276781SNishanth Menon 155aa276781SNishanth Menon memcpy_fromio(info->debug_buffer, info->debug_region, 156aa276781SNishanth Menon info->debug_region_size); 157aa276781SNishanth Menon /* 158aa276781SNishanth Menon * We don't trust firmware to leave NULL terminated last byte (hence 159aa276781SNishanth Menon * we have allocated 1 extra 0 byte). Since we cannot guarantee any 160aa276781SNishanth Menon * specific data format for debug messages, We just present the data 161aa276781SNishanth Menon * in the buffer as is - we expect the messages to be self explanatory. 162aa276781SNishanth Menon */ 163aa276781SNishanth Menon seq_puts(s, info->debug_buffer); 164aa276781SNishanth Menon return 0; 165aa276781SNishanth Menon } 166aa276781SNishanth Menon 1675953c887SYangtao Li /* Provide the log file operations interface*/ 1685953c887SYangtao Li DEFINE_SHOW_ATTRIBUTE(ti_sci_debug); 169aa276781SNishanth Menon 170aa276781SNishanth Menon /** 171aa276781SNishanth Menon * ti_sci_debugfs_create() - Create log debug file 172aa276781SNishanth Menon * @pdev: platform device pointer 173aa276781SNishanth Menon * @info: Pointer to SCI entity information 174aa276781SNishanth Menon * 175aa276781SNishanth Menon * Return: 0 if all went fine, else corresponding error. 176aa276781SNishanth Menon */ 177aa276781SNishanth Menon static int ti_sci_debugfs_create(struct platform_device *pdev, 178aa276781SNishanth Menon struct ti_sci_info *info) 179aa276781SNishanth Menon { 180aa276781SNishanth Menon struct device *dev = &pdev->dev; 181aa276781SNishanth Menon struct resource *res; 182aa276781SNishanth Menon char debug_name[50] = "ti_sci_debug@"; 183aa276781SNishanth Menon 184aa276781SNishanth Menon /* Debug region is optional */ 185aa276781SNishanth Menon res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 186aa276781SNishanth Menon "debug_messages"); 187aa276781SNishanth Menon info->debug_region = devm_ioremap_resource(dev, res); 188aa276781SNishanth Menon if (IS_ERR(info->debug_region)) 189aa276781SNishanth Menon return 0; 190aa276781SNishanth Menon info->debug_region_size = resource_size(res); 191aa276781SNishanth Menon 192aa276781SNishanth Menon info->debug_buffer = devm_kcalloc(dev, info->debug_region_size + 1, 193aa276781SNishanth Menon sizeof(char), GFP_KERNEL); 194aa276781SNishanth Menon if (!info->debug_buffer) 195aa276781SNishanth Menon return -ENOMEM; 196aa276781SNishanth Menon /* Setup NULL termination */ 197aa276781SNishanth Menon info->debug_buffer[info->debug_region_size] = 0; 198aa276781SNishanth Menon 199aa276781SNishanth Menon info->d = debugfs_create_file(strncat(debug_name, dev_name(dev), 20076cefef8SArnd Bergmann sizeof(debug_name) - 20176cefef8SArnd Bergmann sizeof("ti_sci_debug@")), 202aa276781SNishanth Menon 0444, NULL, info, &ti_sci_debug_fops); 203aa276781SNishanth Menon if (IS_ERR(info->d)) 204aa276781SNishanth Menon return PTR_ERR(info->d); 205aa276781SNishanth Menon 206aa276781SNishanth Menon dev_dbg(dev, "Debug region => %p, size = %zu bytes, resource: %pr\n", 207aa276781SNishanth Menon info->debug_region, info->debug_region_size, res); 208aa276781SNishanth Menon return 0; 209aa276781SNishanth Menon } 210aa276781SNishanth Menon 211aa276781SNishanth Menon /** 212aa276781SNishanth Menon * ti_sci_debugfs_destroy() - clean up log debug file 213aa276781SNishanth Menon * @pdev: platform device pointer 214aa276781SNishanth Menon * @info: Pointer to SCI entity information 215aa276781SNishanth Menon */ 216aa276781SNishanth Menon static void ti_sci_debugfs_destroy(struct platform_device *pdev, 217aa276781SNishanth Menon struct ti_sci_info *info) 218aa276781SNishanth Menon { 219aa276781SNishanth Menon if (IS_ERR(info->debug_region)) 220aa276781SNishanth Menon return; 221aa276781SNishanth Menon 222aa276781SNishanth Menon debugfs_remove(info->d); 223aa276781SNishanth Menon } 224aa276781SNishanth Menon #else /* CONFIG_DEBUG_FS */ 225aa276781SNishanth Menon static inline int ti_sci_debugfs_create(struct platform_device *dev, 226aa276781SNishanth Menon struct ti_sci_info *info) 227aa276781SNishanth Menon { 228aa276781SNishanth Menon return 0; 229aa276781SNishanth Menon } 230aa276781SNishanth Menon 231aa276781SNishanth Menon static inline void ti_sci_debugfs_destroy(struct platform_device *dev, 232aa276781SNishanth Menon struct ti_sci_info *info) 233aa276781SNishanth Menon { 234aa276781SNishanth Menon } 235aa276781SNishanth Menon #endif /* CONFIG_DEBUG_FS */ 236aa276781SNishanth Menon 237aa276781SNishanth Menon /** 238aa276781SNishanth Menon * ti_sci_dump_header_dbg() - Helper to dump a message header. 239aa276781SNishanth Menon * @dev: Device pointer corresponding to the SCI entity 240aa276781SNishanth Menon * @hdr: pointer to header. 241aa276781SNishanth Menon */ 242aa276781SNishanth Menon static inline void ti_sci_dump_header_dbg(struct device *dev, 243aa276781SNishanth Menon struct ti_sci_msg_hdr *hdr) 244aa276781SNishanth Menon { 245aa276781SNishanth Menon dev_dbg(dev, "MSGHDR:type=0x%04x host=0x%02x seq=0x%02x flags=0x%08x\n", 246aa276781SNishanth Menon hdr->type, hdr->host, hdr->seq, hdr->flags); 247aa276781SNishanth Menon } 248aa276781SNishanth Menon 249aa276781SNishanth Menon /** 250aa276781SNishanth Menon * ti_sci_rx_callback() - mailbox client callback for receive messages 251aa276781SNishanth Menon * @cl: client pointer 252aa276781SNishanth Menon * @m: mailbox message 253aa276781SNishanth Menon * 254aa276781SNishanth Menon * Processes one received message to appropriate transfer information and 255aa276781SNishanth Menon * signals completion of the transfer. 256aa276781SNishanth Menon * 257aa276781SNishanth Menon * NOTE: This function will be invoked in IRQ context, hence should be 258aa276781SNishanth Menon * as optimal as possible. 259aa276781SNishanth Menon */ 260aa276781SNishanth Menon static void ti_sci_rx_callback(struct mbox_client *cl, void *m) 261aa276781SNishanth Menon { 262aa276781SNishanth Menon struct ti_sci_info *info = cl_to_ti_sci_info(cl); 263aa276781SNishanth Menon struct device *dev = info->dev; 264aa276781SNishanth Menon struct ti_sci_xfers_info *minfo = &info->minfo; 265aa276781SNishanth Menon struct ti_msgmgr_message *mbox_msg = m; 266aa276781SNishanth Menon struct ti_sci_msg_hdr *hdr = (struct ti_sci_msg_hdr *)mbox_msg->buf; 267aa276781SNishanth Menon struct ti_sci_xfer *xfer; 268aa276781SNishanth Menon u8 xfer_id; 269aa276781SNishanth Menon 270aa276781SNishanth Menon xfer_id = hdr->seq; 271aa276781SNishanth Menon 272aa276781SNishanth Menon /* 273aa276781SNishanth Menon * Are we even expecting this? 274aa276781SNishanth Menon * NOTE: barriers were implicit in locks used for modifying the bitmap 275aa276781SNishanth Menon */ 276aa276781SNishanth Menon if (!test_bit(xfer_id, minfo->xfer_alloc_table)) { 277aa276781SNishanth Menon dev_err(dev, "Message for %d is not expected!\n", xfer_id); 278aa276781SNishanth Menon return; 279aa276781SNishanth Menon } 280aa276781SNishanth Menon 281aa276781SNishanth Menon xfer = &minfo->xfer_block[xfer_id]; 282aa276781SNishanth Menon 283aa276781SNishanth Menon /* Is the message of valid length? */ 284aa276781SNishanth Menon if (mbox_msg->len > info->desc->max_msg_size) { 285bd0fa74eSNishanth Menon dev_err(dev, "Unable to handle %zu xfer(max %d)\n", 286aa276781SNishanth Menon mbox_msg->len, info->desc->max_msg_size); 287aa276781SNishanth Menon ti_sci_dump_header_dbg(dev, hdr); 288aa276781SNishanth Menon return; 289aa276781SNishanth Menon } 290aa276781SNishanth Menon if (mbox_msg->len < xfer->rx_len) { 291bd0fa74eSNishanth Menon dev_err(dev, "Recv xfer %zu < expected %d length\n", 292aa276781SNishanth Menon mbox_msg->len, xfer->rx_len); 293aa276781SNishanth Menon ti_sci_dump_header_dbg(dev, hdr); 294aa276781SNishanth Menon return; 295aa276781SNishanth Menon } 296aa276781SNishanth Menon 297aa276781SNishanth Menon ti_sci_dump_header_dbg(dev, hdr); 298aa276781SNishanth Menon /* Take a copy to the rx buffer.. */ 299aa276781SNishanth Menon memcpy(xfer->xfer_buf, mbox_msg->buf, xfer->rx_len); 300aa276781SNishanth Menon complete(&xfer->done); 301aa276781SNishanth Menon } 302aa276781SNishanth Menon 303aa276781SNishanth Menon /** 304aa276781SNishanth Menon * ti_sci_get_one_xfer() - Allocate one message 305aa276781SNishanth Menon * @info: Pointer to SCI entity information 306aa276781SNishanth Menon * @msg_type: Message type 307aa276781SNishanth Menon * @msg_flags: Flag to set for the message 308aa276781SNishanth Menon * @tx_message_size: transmit message size 309aa276781SNishanth Menon * @rx_message_size: receive message size 310aa276781SNishanth Menon * 311aa276781SNishanth Menon * Helper function which is used by various command functions that are 312aa276781SNishanth Menon * exposed to clients of this driver for allocating a message traffic event. 313aa276781SNishanth Menon * 314aa276781SNishanth Menon * This function can sleep depending on pending requests already in the system 315aa276781SNishanth Menon * for the SCI entity. Further, this also holds a spinlock to maintain integrity 316aa276781SNishanth Menon * of internal data structures. 317aa276781SNishanth Menon * 318aa276781SNishanth Menon * Return: 0 if all went fine, else corresponding error. 319aa276781SNishanth Menon */ 320aa276781SNishanth Menon static struct ti_sci_xfer *ti_sci_get_one_xfer(struct ti_sci_info *info, 321aa276781SNishanth Menon u16 msg_type, u32 msg_flags, 322aa276781SNishanth Menon size_t tx_message_size, 323aa276781SNishanth Menon size_t rx_message_size) 324aa276781SNishanth Menon { 325aa276781SNishanth Menon struct ti_sci_xfers_info *minfo = &info->minfo; 326aa276781SNishanth Menon struct ti_sci_xfer *xfer; 327aa276781SNishanth Menon struct ti_sci_msg_hdr *hdr; 328aa276781SNishanth Menon unsigned long flags; 329aa276781SNishanth Menon unsigned long bit_pos; 330aa276781SNishanth Menon u8 xfer_id; 331aa276781SNishanth Menon int ret; 332aa276781SNishanth Menon int timeout; 333aa276781SNishanth Menon 334aa276781SNishanth Menon /* Ensure we have sane transfer sizes */ 335aa276781SNishanth Menon if (rx_message_size > info->desc->max_msg_size || 336aa276781SNishanth Menon tx_message_size > info->desc->max_msg_size || 337aa276781SNishanth Menon rx_message_size < sizeof(*hdr) || tx_message_size < sizeof(*hdr)) 338aa276781SNishanth Menon return ERR_PTR(-ERANGE); 339aa276781SNishanth Menon 340aa276781SNishanth Menon /* 341aa276781SNishanth Menon * Ensure we have only controlled number of pending messages. 342aa276781SNishanth Menon * Ideally, we might just have to wait a single message, be 343aa276781SNishanth Menon * conservative and wait 5 times that.. 344aa276781SNishanth Menon */ 345aa276781SNishanth Menon timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms) * 5; 346aa276781SNishanth Menon ret = down_timeout(&minfo->sem_xfer_count, timeout); 347aa276781SNishanth Menon if (ret < 0) 348aa276781SNishanth Menon return ERR_PTR(ret); 349aa276781SNishanth Menon 350aa276781SNishanth Menon /* Keep the locked section as small as possible */ 351aa276781SNishanth Menon spin_lock_irqsave(&minfo->xfer_lock, flags); 352aa276781SNishanth Menon bit_pos = find_first_zero_bit(minfo->xfer_alloc_table, 353aa276781SNishanth Menon info->desc->max_msgs); 354aa276781SNishanth Menon set_bit(bit_pos, minfo->xfer_alloc_table); 355aa276781SNishanth Menon spin_unlock_irqrestore(&minfo->xfer_lock, flags); 356aa276781SNishanth Menon 357aa276781SNishanth Menon /* 358aa276781SNishanth Menon * We already ensured in probe that we can have max messages that can 359aa276781SNishanth Menon * fit in hdr.seq - NOTE: this improves access latencies 360aa276781SNishanth Menon * to predictable O(1) access, BUT, it opens us to risk if 361aa276781SNishanth Menon * remote misbehaves with corrupted message sequence responses. 362aa276781SNishanth Menon * If that happens, we are going to be messed up anyways.. 363aa276781SNishanth Menon */ 364aa276781SNishanth Menon xfer_id = (u8)bit_pos; 365aa276781SNishanth Menon 366aa276781SNishanth Menon xfer = &minfo->xfer_block[xfer_id]; 367aa276781SNishanth Menon 368aa276781SNishanth Menon hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf; 369aa276781SNishanth Menon xfer->tx_message.len = tx_message_size; 370aa276781SNishanth Menon xfer->rx_len = (u8)rx_message_size; 371aa276781SNishanth Menon 372aa276781SNishanth Menon reinit_completion(&xfer->done); 373aa276781SNishanth Menon 374aa276781SNishanth Menon hdr->seq = xfer_id; 375aa276781SNishanth Menon hdr->type = msg_type; 376e69a3553SNishanth Menon hdr->host = info->host_id; 377aa276781SNishanth Menon hdr->flags = msg_flags; 378aa276781SNishanth Menon 379aa276781SNishanth Menon return xfer; 380aa276781SNishanth Menon } 381aa276781SNishanth Menon 382aa276781SNishanth Menon /** 383aa276781SNishanth Menon * ti_sci_put_one_xfer() - Release a message 384aa276781SNishanth Menon * @minfo: transfer info pointer 385aa276781SNishanth Menon * @xfer: message that was reserved by ti_sci_get_one_xfer 386aa276781SNishanth Menon * 387aa276781SNishanth Menon * This holds a spinlock to maintain integrity of internal data structures. 388aa276781SNishanth Menon */ 389aa276781SNishanth Menon static void ti_sci_put_one_xfer(struct ti_sci_xfers_info *minfo, 390aa276781SNishanth Menon struct ti_sci_xfer *xfer) 391aa276781SNishanth Menon { 392aa276781SNishanth Menon unsigned long flags; 393aa276781SNishanth Menon struct ti_sci_msg_hdr *hdr; 394aa276781SNishanth Menon u8 xfer_id; 395aa276781SNishanth Menon 396aa276781SNishanth Menon hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf; 397aa276781SNishanth Menon xfer_id = hdr->seq; 398aa276781SNishanth Menon 399aa276781SNishanth Menon /* 400aa276781SNishanth Menon * Keep the locked section as small as possible 401aa276781SNishanth Menon * NOTE: we might escape with smp_mb and no lock here.. 402aa276781SNishanth Menon * but just be conservative and symmetric. 403aa276781SNishanth Menon */ 404aa276781SNishanth Menon spin_lock_irqsave(&minfo->xfer_lock, flags); 405aa276781SNishanth Menon clear_bit(xfer_id, minfo->xfer_alloc_table); 406aa276781SNishanth Menon spin_unlock_irqrestore(&minfo->xfer_lock, flags); 407aa276781SNishanth Menon 408aa276781SNishanth Menon /* Increment the count for the next user to get through */ 409aa276781SNishanth Menon up(&minfo->sem_xfer_count); 410aa276781SNishanth Menon } 411aa276781SNishanth Menon 412aa276781SNishanth Menon /** 413aa276781SNishanth Menon * ti_sci_do_xfer() - Do one transfer 414aa276781SNishanth Menon * @info: Pointer to SCI entity information 415aa276781SNishanth Menon * @xfer: Transfer to initiate and wait for response 416aa276781SNishanth Menon * 417aa276781SNishanth Menon * Return: -ETIMEDOUT in case of no response, if transmit error, 418aa276781SNishanth Menon * return corresponding error, else if all goes well, 419aa276781SNishanth Menon * return 0. 420aa276781SNishanth Menon */ 421aa276781SNishanth Menon static inline int ti_sci_do_xfer(struct ti_sci_info *info, 422aa276781SNishanth Menon struct ti_sci_xfer *xfer) 423aa276781SNishanth Menon { 424aa276781SNishanth Menon int ret; 425aa276781SNishanth Menon int timeout; 426aa276781SNishanth Menon struct device *dev = info->dev; 427aa276781SNishanth Menon 428aa276781SNishanth Menon ret = mbox_send_message(info->chan_tx, &xfer->tx_message); 429aa276781SNishanth Menon if (ret < 0) 430aa276781SNishanth Menon return ret; 431aa276781SNishanth Menon 432aa276781SNishanth Menon ret = 0; 433aa276781SNishanth Menon 434aa276781SNishanth Menon /* And we wait for the response. */ 435aa276781SNishanth Menon timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms); 436aa276781SNishanth Menon if (!wait_for_completion_timeout(&xfer->done, timeout)) { 437595f3a9dSHelge Deller dev_err(dev, "Mbox timedout in resp(caller: %pS)\n", 438aa276781SNishanth Menon (void *)_RET_IP_); 439aa276781SNishanth Menon ret = -ETIMEDOUT; 440aa276781SNishanth Menon } 441aa276781SNishanth Menon /* 442aa276781SNishanth Menon * NOTE: we might prefer not to need the mailbox ticker to manage the 443aa276781SNishanth Menon * transfer queueing since the protocol layer queues things by itself. 444aa276781SNishanth Menon * Unfortunately, we have to kick the mailbox framework after we have 445aa276781SNishanth Menon * received our message. 446aa276781SNishanth Menon */ 447aa276781SNishanth Menon mbox_client_txdone(info->chan_tx, ret); 448aa276781SNishanth Menon 449aa276781SNishanth Menon return ret; 450aa276781SNishanth Menon } 451aa276781SNishanth Menon 452aa276781SNishanth Menon /** 453aa276781SNishanth Menon * ti_sci_cmd_get_revision() - command to get the revision of the SCI entity 454aa276781SNishanth Menon * @info: Pointer to SCI entity information 455aa276781SNishanth Menon * 456aa276781SNishanth Menon * Updates the SCI information in the internal data structure. 457aa276781SNishanth Menon * 458aa276781SNishanth Menon * Return: 0 if all went fine, else return appropriate error. 459aa276781SNishanth Menon */ 460aa276781SNishanth Menon static int ti_sci_cmd_get_revision(struct ti_sci_info *info) 461aa276781SNishanth Menon { 462aa276781SNishanth Menon struct device *dev = info->dev; 463aa276781SNishanth Menon struct ti_sci_handle *handle = &info->handle; 464aa276781SNishanth Menon struct ti_sci_version_info *ver = &handle->version; 465aa276781SNishanth Menon struct ti_sci_msg_resp_version *rev_info; 466aa276781SNishanth Menon struct ti_sci_xfer *xfer; 467aa276781SNishanth Menon int ret; 468aa276781SNishanth Menon 469aa276781SNishanth Menon xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_VERSION, 47066f030eaSAndrew F. Davis TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 47166f030eaSAndrew F. Davis sizeof(struct ti_sci_msg_hdr), 472aa276781SNishanth Menon sizeof(*rev_info)); 473aa276781SNishanth Menon if (IS_ERR(xfer)) { 474aa276781SNishanth Menon ret = PTR_ERR(xfer); 475aa276781SNishanth Menon dev_err(dev, "Message alloc failed(%d)\n", ret); 476aa276781SNishanth Menon return ret; 477aa276781SNishanth Menon } 478aa276781SNishanth Menon 479aa276781SNishanth Menon rev_info = (struct ti_sci_msg_resp_version *)xfer->xfer_buf; 480aa276781SNishanth Menon 481aa276781SNishanth Menon ret = ti_sci_do_xfer(info, xfer); 482aa276781SNishanth Menon if (ret) { 483aa276781SNishanth Menon dev_err(dev, "Mbox send fail %d\n", ret); 484aa276781SNishanth Menon goto fail; 485aa276781SNishanth Menon } 486aa276781SNishanth Menon 487aa276781SNishanth Menon ver->abi_major = rev_info->abi_major; 488aa276781SNishanth Menon ver->abi_minor = rev_info->abi_minor; 489aa276781SNishanth Menon ver->firmware_revision = rev_info->firmware_revision; 490aa276781SNishanth Menon strncpy(ver->firmware_description, rev_info->firmware_description, 491aa276781SNishanth Menon sizeof(ver->firmware_description)); 492aa276781SNishanth Menon 493aa276781SNishanth Menon fail: 494aa276781SNishanth Menon ti_sci_put_one_xfer(&info->minfo, xfer); 495aa276781SNishanth Menon return ret; 496aa276781SNishanth Menon } 497aa276781SNishanth Menon 498aa276781SNishanth Menon /** 4999e7d756dSNishanth Menon * ti_sci_is_response_ack() - Generic ACK/NACK message checkup 5009e7d756dSNishanth Menon * @r: pointer to response buffer 5019e7d756dSNishanth Menon * 5029e7d756dSNishanth Menon * Return: true if the response was an ACK, else returns false. 5039e7d756dSNishanth Menon */ 5049e7d756dSNishanth Menon static inline bool ti_sci_is_response_ack(void *r) 5059e7d756dSNishanth Menon { 5069e7d756dSNishanth Menon struct ti_sci_msg_hdr *hdr = r; 5079e7d756dSNishanth Menon 5089e7d756dSNishanth Menon return hdr->flags & TI_SCI_FLAG_RESP_GENERIC_ACK ? true : false; 5099e7d756dSNishanth Menon } 5109e7d756dSNishanth Menon 5119e7d756dSNishanth Menon /** 5129e7d756dSNishanth Menon * ti_sci_set_device_state() - Set device state helper 5139e7d756dSNishanth Menon * @handle: pointer to TI SCI handle 5149e7d756dSNishanth Menon * @id: Device identifier 5159e7d756dSNishanth Menon * @flags: flags to setup for the device 5169e7d756dSNishanth Menon * @state: State to move the device to 5179e7d756dSNishanth Menon * 5189e7d756dSNishanth Menon * Return: 0 if all went well, else returns appropriate error value. 5199e7d756dSNishanth Menon */ 5209e7d756dSNishanth Menon static int ti_sci_set_device_state(const struct ti_sci_handle *handle, 5219e7d756dSNishanth Menon u32 id, u32 flags, u8 state) 5229e7d756dSNishanth Menon { 5239e7d756dSNishanth Menon struct ti_sci_info *info; 5249e7d756dSNishanth Menon struct ti_sci_msg_req_set_device_state *req; 5259e7d756dSNishanth Menon struct ti_sci_msg_hdr *resp; 5269e7d756dSNishanth Menon struct ti_sci_xfer *xfer; 5279e7d756dSNishanth Menon struct device *dev; 5289e7d756dSNishanth Menon int ret = 0; 5299e7d756dSNishanth Menon 5309e7d756dSNishanth Menon if (IS_ERR(handle)) 5319e7d756dSNishanth Menon return PTR_ERR(handle); 5329e7d756dSNishanth Menon if (!handle) 5339e7d756dSNishanth Menon return -EINVAL; 5349e7d756dSNishanth Menon 5359e7d756dSNishanth Menon info = handle_to_ti_sci_info(handle); 5369e7d756dSNishanth Menon dev = info->dev; 5379e7d756dSNishanth Menon 5389e7d756dSNishanth Menon xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE, 5399e7d756dSNishanth Menon flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 5409e7d756dSNishanth Menon sizeof(*req), sizeof(*resp)); 5419e7d756dSNishanth Menon if (IS_ERR(xfer)) { 5429e7d756dSNishanth Menon ret = PTR_ERR(xfer); 5439e7d756dSNishanth Menon dev_err(dev, "Message alloc failed(%d)\n", ret); 5449e7d756dSNishanth Menon return ret; 5459e7d756dSNishanth Menon } 5469e7d756dSNishanth Menon req = (struct ti_sci_msg_req_set_device_state *)xfer->xfer_buf; 5479e7d756dSNishanth Menon req->id = id; 5489e7d756dSNishanth Menon req->state = state; 5499e7d756dSNishanth Menon 5509e7d756dSNishanth Menon ret = ti_sci_do_xfer(info, xfer); 5519e7d756dSNishanth Menon if (ret) { 5529e7d756dSNishanth Menon dev_err(dev, "Mbox send fail %d\n", ret); 5539e7d756dSNishanth Menon goto fail; 5549e7d756dSNishanth Menon } 5559e7d756dSNishanth Menon 5569e7d756dSNishanth Menon resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 5579e7d756dSNishanth Menon 5589e7d756dSNishanth Menon ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; 5599e7d756dSNishanth Menon 5609e7d756dSNishanth Menon fail: 5619e7d756dSNishanth Menon ti_sci_put_one_xfer(&info->minfo, xfer); 5629e7d756dSNishanth Menon 5639e7d756dSNishanth Menon return ret; 5649e7d756dSNishanth Menon } 5659e7d756dSNishanth Menon 5669e7d756dSNishanth Menon /** 5679e7d756dSNishanth Menon * ti_sci_get_device_state() - Get device state helper 5689e7d756dSNishanth Menon * @handle: Handle to the device 5699e7d756dSNishanth Menon * @id: Device Identifier 5709e7d756dSNishanth Menon * @clcnt: Pointer to Context Loss Count 5719e7d756dSNishanth Menon * @resets: pointer to resets 5729e7d756dSNishanth Menon * @p_state: pointer to p_state 5739e7d756dSNishanth Menon * @c_state: pointer to c_state 5749e7d756dSNishanth Menon * 5759e7d756dSNishanth Menon * Return: 0 if all went fine, else return appropriate error. 5769e7d756dSNishanth Menon */ 5779e7d756dSNishanth Menon static int ti_sci_get_device_state(const struct ti_sci_handle *handle, 5789e7d756dSNishanth Menon u32 id, u32 *clcnt, u32 *resets, 5799e7d756dSNishanth Menon u8 *p_state, u8 *c_state) 5809e7d756dSNishanth Menon { 5819e7d756dSNishanth Menon struct ti_sci_info *info; 5829e7d756dSNishanth Menon struct ti_sci_msg_req_get_device_state *req; 5839e7d756dSNishanth Menon struct ti_sci_msg_resp_get_device_state *resp; 5849e7d756dSNishanth Menon struct ti_sci_xfer *xfer; 5859e7d756dSNishanth Menon struct device *dev; 5869e7d756dSNishanth Menon int ret = 0; 5879e7d756dSNishanth Menon 5889e7d756dSNishanth Menon if (IS_ERR(handle)) 5899e7d756dSNishanth Menon return PTR_ERR(handle); 5909e7d756dSNishanth Menon if (!handle) 5919e7d756dSNishanth Menon return -EINVAL; 5929e7d756dSNishanth Menon 5939e7d756dSNishanth Menon if (!clcnt && !resets && !p_state && !c_state) 5949e7d756dSNishanth Menon return -EINVAL; 5959e7d756dSNishanth Menon 5969e7d756dSNishanth Menon info = handle_to_ti_sci_info(handle); 5979e7d756dSNishanth Menon dev = info->dev; 5989e7d756dSNishanth Menon 5999e7d756dSNishanth Menon xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE, 60066f030eaSAndrew F. Davis TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 60166f030eaSAndrew F. Davis sizeof(*req), sizeof(*resp)); 6029e7d756dSNishanth Menon if (IS_ERR(xfer)) { 6039e7d756dSNishanth Menon ret = PTR_ERR(xfer); 6049e7d756dSNishanth Menon dev_err(dev, "Message alloc failed(%d)\n", ret); 6059e7d756dSNishanth Menon return ret; 6069e7d756dSNishanth Menon } 6079e7d756dSNishanth Menon req = (struct ti_sci_msg_req_get_device_state *)xfer->xfer_buf; 6089e7d756dSNishanth Menon req->id = id; 6099e7d756dSNishanth Menon 6109e7d756dSNishanth Menon ret = ti_sci_do_xfer(info, xfer); 6119e7d756dSNishanth Menon if (ret) { 6129e7d756dSNishanth Menon dev_err(dev, "Mbox send fail %d\n", ret); 6139e7d756dSNishanth Menon goto fail; 6149e7d756dSNishanth Menon } 6159e7d756dSNishanth Menon 6169e7d756dSNishanth Menon resp = (struct ti_sci_msg_resp_get_device_state *)xfer->xfer_buf; 6179e7d756dSNishanth Menon if (!ti_sci_is_response_ack(resp)) { 6189e7d756dSNishanth Menon ret = -ENODEV; 6199e7d756dSNishanth Menon goto fail; 6209e7d756dSNishanth Menon } 6219e7d756dSNishanth Menon 6229e7d756dSNishanth Menon if (clcnt) 6239e7d756dSNishanth Menon *clcnt = resp->context_loss_count; 6249e7d756dSNishanth Menon if (resets) 6259e7d756dSNishanth Menon *resets = resp->resets; 6269e7d756dSNishanth Menon if (p_state) 6279e7d756dSNishanth Menon *p_state = resp->programmed_state; 6289e7d756dSNishanth Menon if (c_state) 6299e7d756dSNishanth Menon *c_state = resp->current_state; 6309e7d756dSNishanth Menon fail: 6319e7d756dSNishanth Menon ti_sci_put_one_xfer(&info->minfo, xfer); 6329e7d756dSNishanth Menon 6339e7d756dSNishanth Menon return ret; 6349e7d756dSNishanth Menon } 6359e7d756dSNishanth Menon 6369e7d756dSNishanth Menon /** 6379e7d756dSNishanth Menon * ti_sci_cmd_get_device() - command to request for device managed by TISCI 638*45b659eeSLokesh Vutla * that can be shared with other hosts. 6399e7d756dSNishanth Menon * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle 6409e7d756dSNishanth Menon * @id: Device Identifier 6419e7d756dSNishanth Menon * 6429e7d756dSNishanth Menon * Request for the device - NOTE: the client MUST maintain integrity of 6439e7d756dSNishanth Menon * usage count by balancing get_device with put_device. No refcounting is 6449e7d756dSNishanth Menon * managed by driver for that purpose. 6459e7d756dSNishanth Menon * 6469e7d756dSNishanth Menon * Return: 0 if all went fine, else return appropriate error. 6479e7d756dSNishanth Menon */ 6489e7d756dSNishanth Menon static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id) 6499e7d756dSNishanth Menon { 650*45b659eeSLokesh Vutla return ti_sci_set_device_state(handle, id, 0, 651*45b659eeSLokesh Vutla MSG_DEVICE_SW_STATE_ON); 652*45b659eeSLokesh Vutla } 653*45b659eeSLokesh Vutla 654*45b659eeSLokesh Vutla /** 655*45b659eeSLokesh Vutla * ti_sci_cmd_get_device_exclusive() - command to request for device managed by 656*45b659eeSLokesh Vutla * TISCI that is exclusively owned by the 657*45b659eeSLokesh Vutla * requesting host. 658*45b659eeSLokesh Vutla * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle 659*45b659eeSLokesh Vutla * @id: Device Identifier 660*45b659eeSLokesh Vutla * 661*45b659eeSLokesh Vutla * Request for the device - NOTE: the client MUST maintain integrity of 662*45b659eeSLokesh Vutla * usage count by balancing get_device with put_device. No refcounting is 663*45b659eeSLokesh Vutla * managed by driver for that purpose. 664*45b659eeSLokesh Vutla * 665*45b659eeSLokesh Vutla * Return: 0 if all went fine, else return appropriate error. 666*45b659eeSLokesh Vutla */ 667*45b659eeSLokesh Vutla static int ti_sci_cmd_get_device_exclusive(const struct ti_sci_handle *handle, 668*45b659eeSLokesh Vutla u32 id) 669*45b659eeSLokesh Vutla { 6709e7d756dSNishanth Menon return ti_sci_set_device_state(handle, id, 6719e7d756dSNishanth Menon MSG_FLAG_DEVICE_EXCLUSIVE, 6729e7d756dSNishanth Menon MSG_DEVICE_SW_STATE_ON); 6739e7d756dSNishanth Menon } 6749e7d756dSNishanth Menon 6759e7d756dSNishanth Menon /** 6769e7d756dSNishanth Menon * ti_sci_cmd_idle_device() - Command to idle a device managed by TISCI 6779e7d756dSNishanth Menon * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle 6789e7d756dSNishanth Menon * @id: Device Identifier 6799e7d756dSNishanth Menon * 6809e7d756dSNishanth Menon * Request for the device - NOTE: the client MUST maintain integrity of 6819e7d756dSNishanth Menon * usage count by balancing get_device with put_device. No refcounting is 6829e7d756dSNishanth Menon * managed by driver for that purpose. 6839e7d756dSNishanth Menon * 6849e7d756dSNishanth Menon * Return: 0 if all went fine, else return appropriate error. 6859e7d756dSNishanth Menon */ 6869e7d756dSNishanth Menon static int ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id) 6879e7d756dSNishanth Menon { 688*45b659eeSLokesh Vutla return ti_sci_set_device_state(handle, id, 0, 689*45b659eeSLokesh Vutla MSG_DEVICE_SW_STATE_RETENTION); 690*45b659eeSLokesh Vutla } 691*45b659eeSLokesh Vutla 692*45b659eeSLokesh Vutla /** 693*45b659eeSLokesh Vutla * ti_sci_cmd_idle_device_exclusive() - Command to idle a device managed by 694*45b659eeSLokesh Vutla * TISCI that is exclusively owned by 695*45b659eeSLokesh Vutla * requesting host. 696*45b659eeSLokesh Vutla * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle 697*45b659eeSLokesh Vutla * @id: Device Identifier 698*45b659eeSLokesh Vutla * 699*45b659eeSLokesh Vutla * Request for the device - NOTE: the client MUST maintain integrity of 700*45b659eeSLokesh Vutla * usage count by balancing get_device with put_device. No refcounting is 701*45b659eeSLokesh Vutla * managed by driver for that purpose. 702*45b659eeSLokesh Vutla * 703*45b659eeSLokesh Vutla * Return: 0 if all went fine, else return appropriate error. 704*45b659eeSLokesh Vutla */ 705*45b659eeSLokesh Vutla static int ti_sci_cmd_idle_device_exclusive(const struct ti_sci_handle *handle, 706*45b659eeSLokesh Vutla u32 id) 707*45b659eeSLokesh Vutla { 7089e7d756dSNishanth Menon return ti_sci_set_device_state(handle, id, 7099e7d756dSNishanth Menon MSG_FLAG_DEVICE_EXCLUSIVE, 7109e7d756dSNishanth Menon MSG_DEVICE_SW_STATE_RETENTION); 7119e7d756dSNishanth Menon } 7129e7d756dSNishanth Menon 7139e7d756dSNishanth Menon /** 7149e7d756dSNishanth Menon * ti_sci_cmd_put_device() - command to release a device managed by TISCI 7159e7d756dSNishanth Menon * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle 7169e7d756dSNishanth Menon * @id: Device Identifier 7179e7d756dSNishanth Menon * 7189e7d756dSNishanth Menon * Request for the device - NOTE: the client MUST maintain integrity of 7199e7d756dSNishanth Menon * usage count by balancing get_device with put_device. No refcounting is 7209e7d756dSNishanth Menon * managed by driver for that purpose. 7219e7d756dSNishanth Menon * 7229e7d756dSNishanth Menon * Return: 0 if all went fine, else return appropriate error. 7239e7d756dSNishanth Menon */ 7249e7d756dSNishanth Menon static int ti_sci_cmd_put_device(const struct ti_sci_handle *handle, u32 id) 7259e7d756dSNishanth Menon { 7269e7d756dSNishanth Menon return ti_sci_set_device_state(handle, id, 7279e7d756dSNishanth Menon 0, MSG_DEVICE_SW_STATE_AUTO_OFF); 7289e7d756dSNishanth Menon } 7299e7d756dSNishanth Menon 7309e7d756dSNishanth Menon /** 7319e7d756dSNishanth Menon * ti_sci_cmd_dev_is_valid() - Is the device valid 7329e7d756dSNishanth Menon * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle 7339e7d756dSNishanth Menon * @id: Device Identifier 7349e7d756dSNishanth Menon * 7359e7d756dSNishanth Menon * Return: 0 if all went fine and the device ID is valid, else return 7369e7d756dSNishanth Menon * appropriate error. 7379e7d756dSNishanth Menon */ 7389e7d756dSNishanth Menon static int ti_sci_cmd_dev_is_valid(const struct ti_sci_handle *handle, u32 id) 7399e7d756dSNishanth Menon { 7409e7d756dSNishanth Menon u8 unused; 7419e7d756dSNishanth Menon 7429e7d756dSNishanth Menon /* check the device state which will also tell us if the ID is valid */ 7439e7d756dSNishanth Menon return ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &unused); 7449e7d756dSNishanth Menon } 7459e7d756dSNishanth Menon 7469e7d756dSNishanth Menon /** 7479e7d756dSNishanth Menon * ti_sci_cmd_dev_get_clcnt() - Get context loss counter 7489e7d756dSNishanth Menon * @handle: Pointer to TISCI handle 7499e7d756dSNishanth Menon * @id: Device Identifier 7509e7d756dSNishanth Menon * @count: Pointer to Context Loss counter to populate 7519e7d756dSNishanth Menon * 7529e7d756dSNishanth Menon * Return: 0 if all went fine, else return appropriate error. 7539e7d756dSNishanth Menon */ 7549e7d756dSNishanth Menon static int ti_sci_cmd_dev_get_clcnt(const struct ti_sci_handle *handle, u32 id, 7559e7d756dSNishanth Menon u32 *count) 7569e7d756dSNishanth Menon { 7579e7d756dSNishanth Menon return ti_sci_get_device_state(handle, id, count, NULL, NULL, NULL); 7589e7d756dSNishanth Menon } 7599e7d756dSNishanth Menon 7609e7d756dSNishanth Menon /** 7619e7d756dSNishanth Menon * ti_sci_cmd_dev_is_idle() - Check if the device is requested to be idle 7629e7d756dSNishanth Menon * @handle: Pointer to TISCI handle 7639e7d756dSNishanth Menon * @id: Device Identifier 7649e7d756dSNishanth Menon * @r_state: true if requested to be idle 7659e7d756dSNishanth Menon * 7669e7d756dSNishanth Menon * Return: 0 if all went fine, else return appropriate error. 7679e7d756dSNishanth Menon */ 7689e7d756dSNishanth Menon static int ti_sci_cmd_dev_is_idle(const struct ti_sci_handle *handle, u32 id, 7699e7d756dSNishanth Menon bool *r_state) 7709e7d756dSNishanth Menon { 7719e7d756dSNishanth Menon int ret; 7729e7d756dSNishanth Menon u8 state; 7739e7d756dSNishanth Menon 7749e7d756dSNishanth Menon if (!r_state) 7759e7d756dSNishanth Menon return -EINVAL; 7769e7d756dSNishanth Menon 7779e7d756dSNishanth Menon ret = ti_sci_get_device_state(handle, id, NULL, NULL, &state, NULL); 7789e7d756dSNishanth Menon if (ret) 7799e7d756dSNishanth Menon return ret; 7809e7d756dSNishanth Menon 7819e7d756dSNishanth Menon *r_state = (state == MSG_DEVICE_SW_STATE_RETENTION); 7829e7d756dSNishanth Menon 7839e7d756dSNishanth Menon return 0; 7849e7d756dSNishanth Menon } 7859e7d756dSNishanth Menon 7869e7d756dSNishanth Menon /** 7879e7d756dSNishanth Menon * ti_sci_cmd_dev_is_stop() - Check if the device is requested to be stopped 7889e7d756dSNishanth Menon * @handle: Pointer to TISCI handle 7899e7d756dSNishanth Menon * @id: Device Identifier 7909e7d756dSNishanth Menon * @r_state: true if requested to be stopped 7919e7d756dSNishanth Menon * @curr_state: true if currently stopped. 7929e7d756dSNishanth Menon * 7939e7d756dSNishanth Menon * Return: 0 if all went fine, else return appropriate error. 7949e7d756dSNishanth Menon */ 7959e7d756dSNishanth Menon static int ti_sci_cmd_dev_is_stop(const struct ti_sci_handle *handle, u32 id, 7969e7d756dSNishanth Menon bool *r_state, bool *curr_state) 7979e7d756dSNishanth Menon { 7989e7d756dSNishanth Menon int ret; 7999e7d756dSNishanth Menon u8 p_state, c_state; 8009e7d756dSNishanth Menon 8019e7d756dSNishanth Menon if (!r_state && !curr_state) 8029e7d756dSNishanth Menon return -EINVAL; 8039e7d756dSNishanth Menon 8049e7d756dSNishanth Menon ret = 8059e7d756dSNishanth Menon ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state); 8069e7d756dSNishanth Menon if (ret) 8079e7d756dSNishanth Menon return ret; 8089e7d756dSNishanth Menon 8099e7d756dSNishanth Menon if (r_state) 8109e7d756dSNishanth Menon *r_state = (p_state == MSG_DEVICE_SW_STATE_AUTO_OFF); 8119e7d756dSNishanth Menon if (curr_state) 8129e7d756dSNishanth Menon *curr_state = (c_state == MSG_DEVICE_HW_STATE_OFF); 8139e7d756dSNishanth Menon 8149e7d756dSNishanth Menon return 0; 8159e7d756dSNishanth Menon } 8169e7d756dSNishanth Menon 8179e7d756dSNishanth Menon /** 8189e7d756dSNishanth Menon * ti_sci_cmd_dev_is_on() - Check if the device is requested to be ON 8199e7d756dSNishanth Menon * @handle: Pointer to TISCI handle 8209e7d756dSNishanth Menon * @id: Device Identifier 8219e7d756dSNishanth Menon * @r_state: true if requested to be ON 8229e7d756dSNishanth Menon * @curr_state: true if currently ON and active 8239e7d756dSNishanth Menon * 8249e7d756dSNishanth Menon * Return: 0 if all went fine, else return appropriate error. 8259e7d756dSNishanth Menon */ 8269e7d756dSNishanth Menon static int ti_sci_cmd_dev_is_on(const struct ti_sci_handle *handle, u32 id, 8279e7d756dSNishanth Menon bool *r_state, bool *curr_state) 8289e7d756dSNishanth Menon { 8299e7d756dSNishanth Menon int ret; 8309e7d756dSNishanth Menon u8 p_state, c_state; 8319e7d756dSNishanth Menon 8329e7d756dSNishanth Menon if (!r_state && !curr_state) 8339e7d756dSNishanth Menon return -EINVAL; 8349e7d756dSNishanth Menon 8359e7d756dSNishanth Menon ret = 8369e7d756dSNishanth Menon ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state); 8379e7d756dSNishanth Menon if (ret) 8389e7d756dSNishanth Menon return ret; 8399e7d756dSNishanth Menon 8409e7d756dSNishanth Menon if (r_state) 8419e7d756dSNishanth Menon *r_state = (p_state == MSG_DEVICE_SW_STATE_ON); 8429e7d756dSNishanth Menon if (curr_state) 8439e7d756dSNishanth Menon *curr_state = (c_state == MSG_DEVICE_HW_STATE_ON); 8449e7d756dSNishanth Menon 8459e7d756dSNishanth Menon return 0; 8469e7d756dSNishanth Menon } 8479e7d756dSNishanth Menon 8489e7d756dSNishanth Menon /** 8499e7d756dSNishanth Menon * ti_sci_cmd_dev_is_trans() - Check if the device is currently transitioning 8509e7d756dSNishanth Menon * @handle: Pointer to TISCI handle 8519e7d756dSNishanth Menon * @id: Device Identifier 8529e7d756dSNishanth Menon * @curr_state: true if currently transitioning. 8539e7d756dSNishanth Menon * 8549e7d756dSNishanth Menon * Return: 0 if all went fine, else return appropriate error. 8559e7d756dSNishanth Menon */ 8569e7d756dSNishanth Menon static int ti_sci_cmd_dev_is_trans(const struct ti_sci_handle *handle, u32 id, 8579e7d756dSNishanth Menon bool *curr_state) 8589e7d756dSNishanth Menon { 8599e7d756dSNishanth Menon int ret; 8609e7d756dSNishanth Menon u8 state; 8619e7d756dSNishanth Menon 8629e7d756dSNishanth Menon if (!curr_state) 8639e7d756dSNishanth Menon return -EINVAL; 8649e7d756dSNishanth Menon 8659e7d756dSNishanth Menon ret = ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &state); 8669e7d756dSNishanth Menon if (ret) 8679e7d756dSNishanth Menon return ret; 8689e7d756dSNishanth Menon 8699e7d756dSNishanth Menon *curr_state = (state == MSG_DEVICE_HW_STATE_TRANS); 8709e7d756dSNishanth Menon 8719e7d756dSNishanth Menon return 0; 8729e7d756dSNishanth Menon } 8739e7d756dSNishanth Menon 8749e7d756dSNishanth Menon /** 8759e7d756dSNishanth Menon * ti_sci_cmd_set_device_resets() - command to set resets for device managed 8769e7d756dSNishanth Menon * by TISCI 8779e7d756dSNishanth Menon * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle 8789e7d756dSNishanth Menon * @id: Device Identifier 8799e7d756dSNishanth Menon * @reset_state: Device specific reset bit field 8809e7d756dSNishanth Menon * 8819e7d756dSNishanth Menon * Return: 0 if all went fine, else return appropriate error. 8829e7d756dSNishanth Menon */ 8839e7d756dSNishanth Menon static int ti_sci_cmd_set_device_resets(const struct ti_sci_handle *handle, 8849e7d756dSNishanth Menon u32 id, u32 reset_state) 8859e7d756dSNishanth Menon { 8869e7d756dSNishanth Menon struct ti_sci_info *info; 8879e7d756dSNishanth Menon struct ti_sci_msg_req_set_device_resets *req; 8889e7d756dSNishanth Menon struct ti_sci_msg_hdr *resp; 8899e7d756dSNishanth Menon struct ti_sci_xfer *xfer; 8909e7d756dSNishanth Menon struct device *dev; 8919e7d756dSNishanth Menon int ret = 0; 8929e7d756dSNishanth Menon 8939e7d756dSNishanth Menon if (IS_ERR(handle)) 8949e7d756dSNishanth Menon return PTR_ERR(handle); 8959e7d756dSNishanth Menon if (!handle) 8969e7d756dSNishanth Menon return -EINVAL; 8979e7d756dSNishanth Menon 8989e7d756dSNishanth Menon info = handle_to_ti_sci_info(handle); 8999e7d756dSNishanth Menon dev = info->dev; 9009e7d756dSNishanth Menon 9019e7d756dSNishanth Menon xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_RESETS, 9029e7d756dSNishanth Menon TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 9039e7d756dSNishanth Menon sizeof(*req), sizeof(*resp)); 9049e7d756dSNishanth Menon if (IS_ERR(xfer)) { 9059e7d756dSNishanth Menon ret = PTR_ERR(xfer); 9069e7d756dSNishanth Menon dev_err(dev, "Message alloc failed(%d)\n", ret); 9079e7d756dSNishanth Menon return ret; 9089e7d756dSNishanth Menon } 9099e7d756dSNishanth Menon req = (struct ti_sci_msg_req_set_device_resets *)xfer->xfer_buf; 9109e7d756dSNishanth Menon req->id = id; 9119e7d756dSNishanth Menon req->resets = reset_state; 9129e7d756dSNishanth Menon 9139e7d756dSNishanth Menon ret = ti_sci_do_xfer(info, xfer); 9149e7d756dSNishanth Menon if (ret) { 9159e7d756dSNishanth Menon dev_err(dev, "Mbox send fail %d\n", ret); 9169e7d756dSNishanth Menon goto fail; 9179e7d756dSNishanth Menon } 9189e7d756dSNishanth Menon 9199e7d756dSNishanth Menon resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 9209e7d756dSNishanth Menon 9219e7d756dSNishanth Menon ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; 9229e7d756dSNishanth Menon 9239e7d756dSNishanth Menon fail: 9249e7d756dSNishanth Menon ti_sci_put_one_xfer(&info->minfo, xfer); 9259e7d756dSNishanth Menon 9269e7d756dSNishanth Menon return ret; 9279e7d756dSNishanth Menon } 9289e7d756dSNishanth Menon 9299e7d756dSNishanth Menon /** 9309e7d756dSNishanth Menon * ti_sci_cmd_get_device_resets() - Get reset state for device managed 9319e7d756dSNishanth Menon * by TISCI 9329e7d756dSNishanth Menon * @handle: Pointer to TISCI handle 9339e7d756dSNishanth Menon * @id: Device Identifier 9349e7d756dSNishanth Menon * @reset_state: Pointer to reset state to populate 9359e7d756dSNishanth Menon * 9369e7d756dSNishanth Menon * Return: 0 if all went fine, else return appropriate error. 9379e7d756dSNishanth Menon */ 9389e7d756dSNishanth Menon static int ti_sci_cmd_get_device_resets(const struct ti_sci_handle *handle, 9399e7d756dSNishanth Menon u32 id, u32 *reset_state) 9409e7d756dSNishanth Menon { 9419e7d756dSNishanth Menon return ti_sci_get_device_state(handle, id, NULL, reset_state, NULL, 9429e7d756dSNishanth Menon NULL); 9439e7d756dSNishanth Menon } 9449e7d756dSNishanth Menon 9459f723220SNishanth Menon /** 9469f723220SNishanth Menon * ti_sci_set_clock_state() - Set clock state helper 9479f723220SNishanth Menon * @handle: pointer to TI SCI handle 9489f723220SNishanth Menon * @dev_id: Device identifier this request is for 9499f723220SNishanth Menon * @clk_id: Clock identifier for the device for this request. 9509f723220SNishanth Menon * Each device has it's own set of clock inputs. This indexes 9519f723220SNishanth Menon * which clock input to modify. 9529f723220SNishanth Menon * @flags: Header flags as needed 9539f723220SNishanth Menon * @state: State to request for the clock. 9549f723220SNishanth Menon * 9559f723220SNishanth Menon * Return: 0 if all went well, else returns appropriate error value. 9569f723220SNishanth Menon */ 9579f723220SNishanth Menon static int ti_sci_set_clock_state(const struct ti_sci_handle *handle, 95881f4458cSTero Kristo u32 dev_id, u32 clk_id, 9599f723220SNishanth Menon u32 flags, u8 state) 9609f723220SNishanth Menon { 9619f723220SNishanth Menon struct ti_sci_info *info; 9629f723220SNishanth Menon struct ti_sci_msg_req_set_clock_state *req; 9639f723220SNishanth Menon struct ti_sci_msg_hdr *resp; 9649f723220SNishanth Menon struct ti_sci_xfer *xfer; 9659f723220SNishanth Menon struct device *dev; 9669f723220SNishanth Menon int ret = 0; 9679f723220SNishanth Menon 9689f723220SNishanth Menon if (IS_ERR(handle)) 9699f723220SNishanth Menon return PTR_ERR(handle); 9709f723220SNishanth Menon if (!handle) 9719f723220SNishanth Menon return -EINVAL; 9729f723220SNishanth Menon 9739f723220SNishanth Menon info = handle_to_ti_sci_info(handle); 9749f723220SNishanth Menon dev = info->dev; 9759f723220SNishanth Menon 9769f723220SNishanth Menon xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_STATE, 9779f723220SNishanth Menon flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 9789f723220SNishanth Menon sizeof(*req), sizeof(*resp)); 9799f723220SNishanth Menon if (IS_ERR(xfer)) { 9809f723220SNishanth Menon ret = PTR_ERR(xfer); 9819f723220SNishanth Menon dev_err(dev, "Message alloc failed(%d)\n", ret); 9829f723220SNishanth Menon return ret; 9839f723220SNishanth Menon } 9849f723220SNishanth Menon req = (struct ti_sci_msg_req_set_clock_state *)xfer->xfer_buf; 9859f723220SNishanth Menon req->dev_id = dev_id; 98681f4458cSTero Kristo if (clk_id < 255) { 9879f723220SNishanth Menon req->clk_id = clk_id; 98881f4458cSTero Kristo } else { 98981f4458cSTero Kristo req->clk_id = 255; 99081f4458cSTero Kristo req->clk_id_32 = clk_id; 99181f4458cSTero Kristo } 9929f723220SNishanth Menon req->request_state = state; 9939f723220SNishanth Menon 9949f723220SNishanth Menon ret = ti_sci_do_xfer(info, xfer); 9959f723220SNishanth Menon if (ret) { 9969f723220SNishanth Menon dev_err(dev, "Mbox send fail %d\n", ret); 9979f723220SNishanth Menon goto fail; 9989f723220SNishanth Menon } 9999f723220SNishanth Menon 10009f723220SNishanth Menon resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 10019f723220SNishanth Menon 10029f723220SNishanth Menon ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; 10039f723220SNishanth Menon 10049f723220SNishanth Menon fail: 10059f723220SNishanth Menon ti_sci_put_one_xfer(&info->minfo, xfer); 10069f723220SNishanth Menon 10079f723220SNishanth Menon return ret; 10089f723220SNishanth Menon } 10099f723220SNishanth Menon 10109f723220SNishanth Menon /** 10119f723220SNishanth Menon * ti_sci_cmd_get_clock_state() - Get clock state helper 10129f723220SNishanth Menon * @handle: pointer to TI SCI handle 10139f723220SNishanth Menon * @dev_id: Device identifier this request is for 10149f723220SNishanth Menon * @clk_id: Clock identifier for the device for this request. 10159f723220SNishanth Menon * Each device has it's own set of clock inputs. This indexes 10169f723220SNishanth Menon * which clock input to modify. 10179f723220SNishanth Menon * @programmed_state: State requested for clock to move to 10189f723220SNishanth Menon * @current_state: State that the clock is currently in 10199f723220SNishanth Menon * 10209f723220SNishanth Menon * Return: 0 if all went well, else returns appropriate error value. 10219f723220SNishanth Menon */ 10229f723220SNishanth Menon static int ti_sci_cmd_get_clock_state(const struct ti_sci_handle *handle, 102381f4458cSTero Kristo u32 dev_id, u32 clk_id, 10249f723220SNishanth Menon u8 *programmed_state, u8 *current_state) 10259f723220SNishanth Menon { 10269f723220SNishanth Menon struct ti_sci_info *info; 10279f723220SNishanth Menon struct ti_sci_msg_req_get_clock_state *req; 10289f723220SNishanth Menon struct ti_sci_msg_resp_get_clock_state *resp; 10299f723220SNishanth Menon struct ti_sci_xfer *xfer; 10309f723220SNishanth Menon struct device *dev; 10319f723220SNishanth Menon int ret = 0; 10329f723220SNishanth Menon 10339f723220SNishanth Menon if (IS_ERR(handle)) 10349f723220SNishanth Menon return PTR_ERR(handle); 10359f723220SNishanth Menon if (!handle) 10369f723220SNishanth Menon return -EINVAL; 10379f723220SNishanth Menon 10389f723220SNishanth Menon if (!programmed_state && !current_state) 10399f723220SNishanth Menon return -EINVAL; 10409f723220SNishanth Menon 10419f723220SNishanth Menon info = handle_to_ti_sci_info(handle); 10429f723220SNishanth Menon dev = info->dev; 10439f723220SNishanth Menon 10449f723220SNishanth Menon xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_STATE, 10459f723220SNishanth Menon TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 10469f723220SNishanth Menon sizeof(*req), sizeof(*resp)); 10479f723220SNishanth Menon if (IS_ERR(xfer)) { 10489f723220SNishanth Menon ret = PTR_ERR(xfer); 10499f723220SNishanth Menon dev_err(dev, "Message alloc failed(%d)\n", ret); 10509f723220SNishanth Menon return ret; 10519f723220SNishanth Menon } 10529f723220SNishanth Menon req = (struct ti_sci_msg_req_get_clock_state *)xfer->xfer_buf; 10539f723220SNishanth Menon req->dev_id = dev_id; 105481f4458cSTero Kristo if (clk_id < 255) { 10559f723220SNishanth Menon req->clk_id = clk_id; 105681f4458cSTero Kristo } else { 105781f4458cSTero Kristo req->clk_id = 255; 105881f4458cSTero Kristo req->clk_id_32 = clk_id; 105981f4458cSTero Kristo } 10609f723220SNishanth Menon 10619f723220SNishanth Menon ret = ti_sci_do_xfer(info, xfer); 10629f723220SNishanth Menon if (ret) { 10639f723220SNishanth Menon dev_err(dev, "Mbox send fail %d\n", ret); 10649f723220SNishanth Menon goto fail; 10659f723220SNishanth Menon } 10669f723220SNishanth Menon 10679f723220SNishanth Menon resp = (struct ti_sci_msg_resp_get_clock_state *)xfer->xfer_buf; 10689f723220SNishanth Menon 10699f723220SNishanth Menon if (!ti_sci_is_response_ack(resp)) { 10709f723220SNishanth Menon ret = -ENODEV; 10719f723220SNishanth Menon goto fail; 10729f723220SNishanth Menon } 10739f723220SNishanth Menon 10749f723220SNishanth Menon if (programmed_state) 10759f723220SNishanth Menon *programmed_state = resp->programmed_state; 10769f723220SNishanth Menon if (current_state) 10779f723220SNishanth Menon *current_state = resp->current_state; 10789f723220SNishanth Menon 10799f723220SNishanth Menon fail: 10809f723220SNishanth Menon ti_sci_put_one_xfer(&info->minfo, xfer); 10819f723220SNishanth Menon 10829f723220SNishanth Menon return ret; 10839f723220SNishanth Menon } 10849f723220SNishanth Menon 10859f723220SNishanth Menon /** 10869f723220SNishanth Menon * ti_sci_cmd_get_clock() - Get control of a clock from TI SCI 10879f723220SNishanth Menon * @handle: pointer to TI SCI handle 10889f723220SNishanth Menon * @dev_id: Device identifier this request is for 10899f723220SNishanth Menon * @clk_id: Clock identifier for the device for this request. 10909f723220SNishanth Menon * Each device has it's own set of clock inputs. This indexes 10919f723220SNishanth Menon * which clock input to modify. 10929f723220SNishanth Menon * @needs_ssc: 'true' if Spread Spectrum clock is desired, else 'false' 10939f723220SNishanth Menon * @can_change_freq: 'true' if frequency change is desired, else 'false' 10949f723220SNishanth Menon * @enable_input_term: 'true' if input termination is desired, else 'false' 10959f723220SNishanth Menon * 10969f723220SNishanth Menon * Return: 0 if all went well, else returns appropriate error value. 10979f723220SNishanth Menon */ 10989f723220SNishanth Menon static int ti_sci_cmd_get_clock(const struct ti_sci_handle *handle, u32 dev_id, 109981f4458cSTero Kristo u32 clk_id, bool needs_ssc, 110081f4458cSTero Kristo bool can_change_freq, bool enable_input_term) 11019f723220SNishanth Menon { 11029f723220SNishanth Menon u32 flags = 0; 11039f723220SNishanth Menon 11049f723220SNishanth Menon flags |= needs_ssc ? MSG_FLAG_CLOCK_ALLOW_SSC : 0; 11059f723220SNishanth Menon flags |= can_change_freq ? MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE : 0; 11069f723220SNishanth Menon flags |= enable_input_term ? MSG_FLAG_CLOCK_INPUT_TERM : 0; 11079f723220SNishanth Menon 11089f723220SNishanth Menon return ti_sci_set_clock_state(handle, dev_id, clk_id, flags, 11099f723220SNishanth Menon MSG_CLOCK_SW_STATE_REQ); 11109f723220SNishanth Menon } 11119f723220SNishanth Menon 11129f723220SNishanth Menon /** 11139f723220SNishanth Menon * ti_sci_cmd_idle_clock() - Idle a clock which is in our control 11149f723220SNishanth Menon * @handle: pointer to TI SCI handle 11159f723220SNishanth Menon * @dev_id: Device identifier this request is for 11169f723220SNishanth Menon * @clk_id: Clock identifier for the device for this request. 11179f723220SNishanth Menon * Each device has it's own set of clock inputs. This indexes 11189f723220SNishanth Menon * which clock input to modify. 11199f723220SNishanth Menon * 11209f723220SNishanth Menon * NOTE: This clock must have been requested by get_clock previously. 11219f723220SNishanth Menon * 11229f723220SNishanth Menon * Return: 0 if all went well, else returns appropriate error value. 11239f723220SNishanth Menon */ 11249f723220SNishanth Menon static int ti_sci_cmd_idle_clock(const struct ti_sci_handle *handle, 112581f4458cSTero Kristo u32 dev_id, u32 clk_id) 11269f723220SNishanth Menon { 11279f723220SNishanth Menon return ti_sci_set_clock_state(handle, dev_id, clk_id, 0, 11289f723220SNishanth Menon MSG_CLOCK_SW_STATE_UNREQ); 11299f723220SNishanth Menon } 11309f723220SNishanth Menon 11319f723220SNishanth Menon /** 11329f723220SNishanth Menon * ti_sci_cmd_put_clock() - Release a clock from our control back to TISCI 11339f723220SNishanth Menon * @handle: pointer to TI SCI handle 11349f723220SNishanth Menon * @dev_id: Device identifier this request is for 11359f723220SNishanth Menon * @clk_id: Clock identifier for the device for this request. 11369f723220SNishanth Menon * Each device has it's own set of clock inputs. This indexes 11379f723220SNishanth Menon * which clock input to modify. 11389f723220SNishanth Menon * 11399f723220SNishanth Menon * NOTE: This clock must have been requested by get_clock previously. 11409f723220SNishanth Menon * 11419f723220SNishanth Menon * Return: 0 if all went well, else returns appropriate error value. 11429f723220SNishanth Menon */ 11439f723220SNishanth Menon static int ti_sci_cmd_put_clock(const struct ti_sci_handle *handle, 114481f4458cSTero Kristo u32 dev_id, u32 clk_id) 11459f723220SNishanth Menon { 11469f723220SNishanth Menon return ti_sci_set_clock_state(handle, dev_id, clk_id, 0, 11479f723220SNishanth Menon MSG_CLOCK_SW_STATE_AUTO); 11489f723220SNishanth Menon } 11499f723220SNishanth Menon 11509f723220SNishanth Menon /** 11519f723220SNishanth Menon * ti_sci_cmd_clk_is_auto() - Is the clock being auto managed 11529f723220SNishanth Menon * @handle: pointer to TI SCI handle 11539f723220SNishanth Menon * @dev_id: Device identifier this request is for 11549f723220SNishanth Menon * @clk_id: Clock identifier for the device for this request. 11559f723220SNishanth Menon * Each device has it's own set of clock inputs. This indexes 11569f723220SNishanth Menon * which clock input to modify. 11579f723220SNishanth Menon * @req_state: state indicating if the clock is auto managed 11589f723220SNishanth Menon * 11599f723220SNishanth Menon * Return: 0 if all went well, else returns appropriate error value. 11609f723220SNishanth Menon */ 11619f723220SNishanth Menon static int ti_sci_cmd_clk_is_auto(const struct ti_sci_handle *handle, 116281f4458cSTero Kristo u32 dev_id, u32 clk_id, bool *req_state) 11639f723220SNishanth Menon { 11649f723220SNishanth Menon u8 state = 0; 11659f723220SNishanth Menon int ret; 11669f723220SNishanth Menon 11679f723220SNishanth Menon if (!req_state) 11689f723220SNishanth Menon return -EINVAL; 11699f723220SNishanth Menon 11709f723220SNishanth Menon ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, &state, NULL); 11719f723220SNishanth Menon if (ret) 11729f723220SNishanth Menon return ret; 11739f723220SNishanth Menon 11749f723220SNishanth Menon *req_state = (state == MSG_CLOCK_SW_STATE_AUTO); 11759f723220SNishanth Menon return 0; 11769f723220SNishanth Menon } 11779f723220SNishanth Menon 11789f723220SNishanth Menon /** 11799f723220SNishanth Menon * ti_sci_cmd_clk_is_on() - Is the clock ON 11809f723220SNishanth Menon * @handle: pointer to TI SCI handle 11819f723220SNishanth Menon * @dev_id: Device identifier this request is for 11829f723220SNishanth Menon * @clk_id: Clock identifier for the device for this request. 11839f723220SNishanth Menon * Each device has it's own set of clock inputs. This indexes 11849f723220SNishanth Menon * which clock input to modify. 11859f723220SNishanth Menon * @req_state: state indicating if the clock is managed by us and enabled 11869f723220SNishanth Menon * @curr_state: state indicating if the clock is ready for operation 11879f723220SNishanth Menon * 11889f723220SNishanth Menon * Return: 0 if all went well, else returns appropriate error value. 11899f723220SNishanth Menon */ 11909f723220SNishanth Menon static int ti_sci_cmd_clk_is_on(const struct ti_sci_handle *handle, u32 dev_id, 119181f4458cSTero Kristo u32 clk_id, bool *req_state, bool *curr_state) 11929f723220SNishanth Menon { 11939f723220SNishanth Menon u8 c_state = 0, r_state = 0; 11949f723220SNishanth Menon int ret; 11959f723220SNishanth Menon 11969f723220SNishanth Menon if (!req_state && !curr_state) 11979f723220SNishanth Menon return -EINVAL; 11989f723220SNishanth Menon 11999f723220SNishanth Menon ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, 12009f723220SNishanth Menon &r_state, &c_state); 12019f723220SNishanth Menon if (ret) 12029f723220SNishanth Menon return ret; 12039f723220SNishanth Menon 12049f723220SNishanth Menon if (req_state) 12059f723220SNishanth Menon *req_state = (r_state == MSG_CLOCK_SW_STATE_REQ); 12069f723220SNishanth Menon if (curr_state) 12079f723220SNishanth Menon *curr_state = (c_state == MSG_CLOCK_HW_STATE_READY); 12089f723220SNishanth Menon return 0; 12099f723220SNishanth Menon } 12109f723220SNishanth Menon 12119f723220SNishanth Menon /** 12129f723220SNishanth Menon * ti_sci_cmd_clk_is_off() - Is the clock OFF 12139f723220SNishanth Menon * @handle: pointer to TI SCI handle 12149f723220SNishanth Menon * @dev_id: Device identifier this request is for 12159f723220SNishanth Menon * @clk_id: Clock identifier for the device for this request. 12169f723220SNishanth Menon * Each device has it's own set of clock inputs. This indexes 12179f723220SNishanth Menon * which clock input to modify. 12189f723220SNishanth Menon * @req_state: state indicating if the clock is managed by us and disabled 12199f723220SNishanth Menon * @curr_state: state indicating if the clock is NOT ready for operation 12209f723220SNishanth Menon * 12219f723220SNishanth Menon * Return: 0 if all went well, else returns appropriate error value. 12229f723220SNishanth Menon */ 12239f723220SNishanth Menon static int ti_sci_cmd_clk_is_off(const struct ti_sci_handle *handle, u32 dev_id, 122481f4458cSTero Kristo u32 clk_id, bool *req_state, bool *curr_state) 12259f723220SNishanth Menon { 12269f723220SNishanth Menon u8 c_state = 0, r_state = 0; 12279f723220SNishanth Menon int ret; 12289f723220SNishanth Menon 12299f723220SNishanth Menon if (!req_state && !curr_state) 12309f723220SNishanth Menon return -EINVAL; 12319f723220SNishanth Menon 12329f723220SNishanth Menon ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, 12339f723220SNishanth Menon &r_state, &c_state); 12349f723220SNishanth Menon if (ret) 12359f723220SNishanth Menon return ret; 12369f723220SNishanth Menon 12379f723220SNishanth Menon if (req_state) 12389f723220SNishanth Menon *req_state = (r_state == MSG_CLOCK_SW_STATE_UNREQ); 12399f723220SNishanth Menon if (curr_state) 12409f723220SNishanth Menon *curr_state = (c_state == MSG_CLOCK_HW_STATE_NOT_READY); 12419f723220SNishanth Menon return 0; 12429f723220SNishanth Menon } 12439f723220SNishanth Menon 12449f723220SNishanth Menon /** 12459f723220SNishanth Menon * ti_sci_cmd_clk_set_parent() - Set the clock source of a specific device clock 12469f723220SNishanth Menon * @handle: pointer to TI SCI handle 12479f723220SNishanth Menon * @dev_id: Device identifier this request is for 12489f723220SNishanth Menon * @clk_id: Clock identifier for the device for this request. 12499f723220SNishanth Menon * Each device has it's own set of clock inputs. This indexes 12509f723220SNishanth Menon * which clock input to modify. 12519f723220SNishanth Menon * @parent_id: Parent clock identifier to set 12529f723220SNishanth Menon * 12539f723220SNishanth Menon * Return: 0 if all went well, else returns appropriate error value. 12549f723220SNishanth Menon */ 12559f723220SNishanth Menon static int ti_sci_cmd_clk_set_parent(const struct ti_sci_handle *handle, 125681f4458cSTero Kristo u32 dev_id, u32 clk_id, u32 parent_id) 12579f723220SNishanth Menon { 12589f723220SNishanth Menon struct ti_sci_info *info; 12599f723220SNishanth Menon struct ti_sci_msg_req_set_clock_parent *req; 12609f723220SNishanth Menon struct ti_sci_msg_hdr *resp; 12619f723220SNishanth Menon struct ti_sci_xfer *xfer; 12629f723220SNishanth Menon struct device *dev; 12639f723220SNishanth Menon int ret = 0; 12649f723220SNishanth Menon 12659f723220SNishanth Menon if (IS_ERR(handle)) 12669f723220SNishanth Menon return PTR_ERR(handle); 12679f723220SNishanth Menon if (!handle) 12689f723220SNishanth Menon return -EINVAL; 12699f723220SNishanth Menon 12709f723220SNishanth Menon info = handle_to_ti_sci_info(handle); 12719f723220SNishanth Menon dev = info->dev; 12729f723220SNishanth Menon 12739f723220SNishanth Menon xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_PARENT, 12749f723220SNishanth Menon TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 12759f723220SNishanth Menon sizeof(*req), sizeof(*resp)); 12769f723220SNishanth Menon if (IS_ERR(xfer)) { 12779f723220SNishanth Menon ret = PTR_ERR(xfer); 12789f723220SNishanth Menon dev_err(dev, "Message alloc failed(%d)\n", ret); 12799f723220SNishanth Menon return ret; 12809f723220SNishanth Menon } 12819f723220SNishanth Menon req = (struct ti_sci_msg_req_set_clock_parent *)xfer->xfer_buf; 12829f723220SNishanth Menon req->dev_id = dev_id; 128381f4458cSTero Kristo if (clk_id < 255) { 12849f723220SNishanth Menon req->clk_id = clk_id; 128581f4458cSTero Kristo } else { 128681f4458cSTero Kristo req->clk_id = 255; 128781f4458cSTero Kristo req->clk_id_32 = clk_id; 128881f4458cSTero Kristo } 128981f4458cSTero Kristo if (parent_id < 255) { 12909f723220SNishanth Menon req->parent_id = parent_id; 129181f4458cSTero Kristo } else { 129281f4458cSTero Kristo req->parent_id = 255; 129381f4458cSTero Kristo req->parent_id_32 = parent_id; 129481f4458cSTero Kristo } 12959f723220SNishanth Menon 12969f723220SNishanth Menon ret = ti_sci_do_xfer(info, xfer); 12979f723220SNishanth Menon if (ret) { 12989f723220SNishanth Menon dev_err(dev, "Mbox send fail %d\n", ret); 12999f723220SNishanth Menon goto fail; 13009f723220SNishanth Menon } 13019f723220SNishanth Menon 13029f723220SNishanth Menon resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 13039f723220SNishanth Menon 13049f723220SNishanth Menon ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; 13059f723220SNishanth Menon 13069f723220SNishanth Menon fail: 13079f723220SNishanth Menon ti_sci_put_one_xfer(&info->minfo, xfer); 13089f723220SNishanth Menon 13099f723220SNishanth Menon return ret; 13109f723220SNishanth Menon } 13119f723220SNishanth Menon 13129f723220SNishanth Menon /** 13139f723220SNishanth Menon * ti_sci_cmd_clk_get_parent() - Get current parent clock source 13149f723220SNishanth Menon * @handle: pointer to TI SCI handle 13159f723220SNishanth Menon * @dev_id: Device identifier this request is for 13169f723220SNishanth Menon * @clk_id: Clock identifier for the device for this request. 13179f723220SNishanth Menon * Each device has it's own set of clock inputs. This indexes 13189f723220SNishanth Menon * which clock input to modify. 13199f723220SNishanth Menon * @parent_id: Current clock parent 13209f723220SNishanth Menon * 13219f723220SNishanth Menon * Return: 0 if all went well, else returns appropriate error value. 13229f723220SNishanth Menon */ 13239f723220SNishanth Menon static int ti_sci_cmd_clk_get_parent(const struct ti_sci_handle *handle, 132481f4458cSTero Kristo u32 dev_id, u32 clk_id, u32 *parent_id) 13259f723220SNishanth Menon { 13269f723220SNishanth Menon struct ti_sci_info *info; 13279f723220SNishanth Menon struct ti_sci_msg_req_get_clock_parent *req; 13289f723220SNishanth Menon struct ti_sci_msg_resp_get_clock_parent *resp; 13299f723220SNishanth Menon struct ti_sci_xfer *xfer; 13309f723220SNishanth Menon struct device *dev; 13319f723220SNishanth Menon int ret = 0; 13329f723220SNishanth Menon 13339f723220SNishanth Menon if (IS_ERR(handle)) 13349f723220SNishanth Menon return PTR_ERR(handle); 13359f723220SNishanth Menon if (!handle || !parent_id) 13369f723220SNishanth Menon return -EINVAL; 13379f723220SNishanth Menon 13389f723220SNishanth Menon info = handle_to_ti_sci_info(handle); 13399f723220SNishanth Menon dev = info->dev; 13409f723220SNishanth Menon 13419f723220SNishanth Menon xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_PARENT, 13429f723220SNishanth Menon TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 13439f723220SNishanth Menon sizeof(*req), sizeof(*resp)); 13449f723220SNishanth Menon if (IS_ERR(xfer)) { 13459f723220SNishanth Menon ret = PTR_ERR(xfer); 13469f723220SNishanth Menon dev_err(dev, "Message alloc failed(%d)\n", ret); 13479f723220SNishanth Menon return ret; 13489f723220SNishanth Menon } 13499f723220SNishanth Menon req = (struct ti_sci_msg_req_get_clock_parent *)xfer->xfer_buf; 13509f723220SNishanth Menon req->dev_id = dev_id; 135181f4458cSTero Kristo if (clk_id < 255) { 13529f723220SNishanth Menon req->clk_id = clk_id; 135381f4458cSTero Kristo } else { 135481f4458cSTero Kristo req->clk_id = 255; 135581f4458cSTero Kristo req->clk_id_32 = clk_id; 135681f4458cSTero Kristo } 13579f723220SNishanth Menon 13589f723220SNishanth Menon ret = ti_sci_do_xfer(info, xfer); 13599f723220SNishanth Menon if (ret) { 13609f723220SNishanth Menon dev_err(dev, "Mbox send fail %d\n", ret); 13619f723220SNishanth Menon goto fail; 13629f723220SNishanth Menon } 13639f723220SNishanth Menon 13649f723220SNishanth Menon resp = (struct ti_sci_msg_resp_get_clock_parent *)xfer->xfer_buf; 13659f723220SNishanth Menon 136681f4458cSTero Kristo if (!ti_sci_is_response_ack(resp)) { 13679f723220SNishanth Menon ret = -ENODEV; 136881f4458cSTero Kristo } else { 136981f4458cSTero Kristo if (resp->parent_id < 255) 13709f723220SNishanth Menon *parent_id = resp->parent_id; 137181f4458cSTero Kristo else 137281f4458cSTero Kristo *parent_id = resp->parent_id_32; 137381f4458cSTero Kristo } 13749f723220SNishanth Menon 13759f723220SNishanth Menon fail: 13769f723220SNishanth Menon ti_sci_put_one_xfer(&info->minfo, xfer); 13779f723220SNishanth Menon 13789f723220SNishanth Menon return ret; 13799f723220SNishanth Menon } 13809f723220SNishanth Menon 13819f723220SNishanth Menon /** 13829f723220SNishanth Menon * ti_sci_cmd_clk_get_num_parents() - Get num parents of the current clk source 13839f723220SNishanth Menon * @handle: pointer to TI SCI handle 13849f723220SNishanth Menon * @dev_id: Device identifier this request is for 13859f723220SNishanth Menon * @clk_id: Clock identifier for the device for this request. 13869f723220SNishanth Menon * Each device has it's own set of clock inputs. This indexes 13879f723220SNishanth Menon * which clock input to modify. 13889f723220SNishanth Menon * @num_parents: Returns he number of parents to the current clock. 13899f723220SNishanth Menon * 13909f723220SNishanth Menon * Return: 0 if all went well, else returns appropriate error value. 13919f723220SNishanth Menon */ 13929f723220SNishanth Menon static int ti_sci_cmd_clk_get_num_parents(const struct ti_sci_handle *handle, 139381f4458cSTero Kristo u32 dev_id, u32 clk_id, 139481f4458cSTero Kristo u32 *num_parents) 13959f723220SNishanth Menon { 13969f723220SNishanth Menon struct ti_sci_info *info; 13979f723220SNishanth Menon struct ti_sci_msg_req_get_clock_num_parents *req; 13989f723220SNishanth Menon struct ti_sci_msg_resp_get_clock_num_parents *resp; 13999f723220SNishanth Menon struct ti_sci_xfer *xfer; 14009f723220SNishanth Menon struct device *dev; 14019f723220SNishanth Menon int ret = 0; 14029f723220SNishanth Menon 14039f723220SNishanth Menon if (IS_ERR(handle)) 14049f723220SNishanth Menon return PTR_ERR(handle); 14059f723220SNishanth Menon if (!handle || !num_parents) 14069f723220SNishanth Menon return -EINVAL; 14079f723220SNishanth Menon 14089f723220SNishanth Menon info = handle_to_ti_sci_info(handle); 14099f723220SNishanth Menon dev = info->dev; 14109f723220SNishanth Menon 14119f723220SNishanth Menon xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_NUM_CLOCK_PARENTS, 14129f723220SNishanth Menon TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 14139f723220SNishanth Menon sizeof(*req), sizeof(*resp)); 14149f723220SNishanth Menon if (IS_ERR(xfer)) { 14159f723220SNishanth Menon ret = PTR_ERR(xfer); 14169f723220SNishanth Menon dev_err(dev, "Message alloc failed(%d)\n", ret); 14179f723220SNishanth Menon return ret; 14189f723220SNishanth Menon } 14199f723220SNishanth Menon req = (struct ti_sci_msg_req_get_clock_num_parents *)xfer->xfer_buf; 14209f723220SNishanth Menon req->dev_id = dev_id; 142181f4458cSTero Kristo if (clk_id < 255) { 14229f723220SNishanth Menon req->clk_id = clk_id; 142381f4458cSTero Kristo } else { 142481f4458cSTero Kristo req->clk_id = 255; 142581f4458cSTero Kristo req->clk_id_32 = clk_id; 142681f4458cSTero Kristo } 14279f723220SNishanth Menon 14289f723220SNishanth Menon ret = ti_sci_do_xfer(info, xfer); 14299f723220SNishanth Menon if (ret) { 14309f723220SNishanth Menon dev_err(dev, "Mbox send fail %d\n", ret); 14319f723220SNishanth Menon goto fail; 14329f723220SNishanth Menon } 14339f723220SNishanth Menon 14349f723220SNishanth Menon resp = (struct ti_sci_msg_resp_get_clock_num_parents *)xfer->xfer_buf; 14359f723220SNishanth Menon 143681f4458cSTero Kristo if (!ti_sci_is_response_ack(resp)) { 14379f723220SNishanth Menon ret = -ENODEV; 143881f4458cSTero Kristo } else { 143981f4458cSTero Kristo if (resp->num_parents < 255) 14409f723220SNishanth Menon *num_parents = resp->num_parents; 144181f4458cSTero Kristo else 144281f4458cSTero Kristo *num_parents = resp->num_parents_32; 144381f4458cSTero Kristo } 14449f723220SNishanth Menon 14459f723220SNishanth Menon fail: 14469f723220SNishanth Menon ti_sci_put_one_xfer(&info->minfo, xfer); 14479f723220SNishanth Menon 14489f723220SNishanth Menon return ret; 14499f723220SNishanth Menon } 14509f723220SNishanth Menon 14519f723220SNishanth Menon /** 14529f723220SNishanth Menon * ti_sci_cmd_clk_get_match_freq() - Find a good match for frequency 14539f723220SNishanth Menon * @handle: pointer to TI SCI handle 14549f723220SNishanth Menon * @dev_id: Device identifier this request is for 14559f723220SNishanth Menon * @clk_id: Clock identifier for the device for this request. 14569f723220SNishanth Menon * Each device has it's own set of clock inputs. This indexes 14579f723220SNishanth Menon * which clock input to modify. 14589f723220SNishanth Menon * @min_freq: The minimum allowable frequency in Hz. This is the minimum 14599f723220SNishanth Menon * allowable programmed frequency and does not account for clock 14609f723220SNishanth Menon * tolerances and jitter. 14619f723220SNishanth Menon * @target_freq: The target clock frequency in Hz. A frequency will be 14629f723220SNishanth Menon * processed as close to this target frequency as possible. 14639f723220SNishanth Menon * @max_freq: The maximum allowable frequency in Hz. This is the maximum 14649f723220SNishanth Menon * allowable programmed frequency and does not account for clock 14659f723220SNishanth Menon * tolerances and jitter. 14669f723220SNishanth Menon * @match_freq: Frequency match in Hz response. 14679f723220SNishanth Menon * 14689f723220SNishanth Menon * Return: 0 if all went well, else returns appropriate error value. 14699f723220SNishanth Menon */ 14709f723220SNishanth Menon static int ti_sci_cmd_clk_get_match_freq(const struct ti_sci_handle *handle, 147181f4458cSTero Kristo u32 dev_id, u32 clk_id, u64 min_freq, 14729f723220SNishanth Menon u64 target_freq, u64 max_freq, 14739f723220SNishanth Menon u64 *match_freq) 14749f723220SNishanth Menon { 14759f723220SNishanth Menon struct ti_sci_info *info; 14769f723220SNishanth Menon struct ti_sci_msg_req_query_clock_freq *req; 14779f723220SNishanth Menon struct ti_sci_msg_resp_query_clock_freq *resp; 14789f723220SNishanth Menon struct ti_sci_xfer *xfer; 14799f723220SNishanth Menon struct device *dev; 14809f723220SNishanth Menon int ret = 0; 14819f723220SNishanth Menon 14829f723220SNishanth Menon if (IS_ERR(handle)) 14839f723220SNishanth Menon return PTR_ERR(handle); 14849f723220SNishanth Menon if (!handle || !match_freq) 14859f723220SNishanth Menon return -EINVAL; 14869f723220SNishanth Menon 14879f723220SNishanth Menon info = handle_to_ti_sci_info(handle); 14889f723220SNishanth Menon dev = info->dev; 14899f723220SNishanth Menon 14909f723220SNishanth Menon xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_QUERY_CLOCK_FREQ, 14919f723220SNishanth Menon TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 14929f723220SNishanth Menon sizeof(*req), sizeof(*resp)); 14939f723220SNishanth Menon if (IS_ERR(xfer)) { 14949f723220SNishanth Menon ret = PTR_ERR(xfer); 14959f723220SNishanth Menon dev_err(dev, "Message alloc failed(%d)\n", ret); 14969f723220SNishanth Menon return ret; 14979f723220SNishanth Menon } 14989f723220SNishanth Menon req = (struct ti_sci_msg_req_query_clock_freq *)xfer->xfer_buf; 14999f723220SNishanth Menon req->dev_id = dev_id; 150081f4458cSTero Kristo if (clk_id < 255) { 15019f723220SNishanth Menon req->clk_id = clk_id; 150281f4458cSTero Kristo } else { 150381f4458cSTero Kristo req->clk_id = 255; 150481f4458cSTero Kristo req->clk_id_32 = clk_id; 150581f4458cSTero Kristo } 15069f723220SNishanth Menon req->min_freq_hz = min_freq; 15079f723220SNishanth Menon req->target_freq_hz = target_freq; 15089f723220SNishanth Menon req->max_freq_hz = max_freq; 15099f723220SNishanth Menon 15109f723220SNishanth Menon ret = ti_sci_do_xfer(info, xfer); 15119f723220SNishanth Menon if (ret) { 15129f723220SNishanth Menon dev_err(dev, "Mbox send fail %d\n", ret); 15139f723220SNishanth Menon goto fail; 15149f723220SNishanth Menon } 15159f723220SNishanth Menon 15169f723220SNishanth Menon resp = (struct ti_sci_msg_resp_query_clock_freq *)xfer->xfer_buf; 15179f723220SNishanth Menon 15189f723220SNishanth Menon if (!ti_sci_is_response_ack(resp)) 15199f723220SNishanth Menon ret = -ENODEV; 15209f723220SNishanth Menon else 15219f723220SNishanth Menon *match_freq = resp->freq_hz; 15229f723220SNishanth Menon 15239f723220SNishanth Menon fail: 15249f723220SNishanth Menon ti_sci_put_one_xfer(&info->minfo, xfer); 15259f723220SNishanth Menon 15269f723220SNishanth Menon return ret; 15279f723220SNishanth Menon } 15289f723220SNishanth Menon 15299f723220SNishanth Menon /** 15309f723220SNishanth Menon * ti_sci_cmd_clk_set_freq() - Set a frequency for clock 15319f723220SNishanth Menon * @handle: pointer to TI SCI handle 15329f723220SNishanth Menon * @dev_id: Device identifier this request is for 15339f723220SNishanth Menon * @clk_id: Clock identifier for the device for this request. 15349f723220SNishanth Menon * Each device has it's own set of clock inputs. This indexes 15359f723220SNishanth Menon * which clock input to modify. 15369f723220SNishanth Menon * @min_freq: The minimum allowable frequency in Hz. This is the minimum 15379f723220SNishanth Menon * allowable programmed frequency and does not account for clock 15389f723220SNishanth Menon * tolerances and jitter. 15399f723220SNishanth Menon * @target_freq: The target clock frequency in Hz. A frequency will be 15409f723220SNishanth Menon * processed as close to this target frequency as possible. 15419f723220SNishanth Menon * @max_freq: The maximum allowable frequency in Hz. This is the maximum 15429f723220SNishanth Menon * allowable programmed frequency and does not account for clock 15439f723220SNishanth Menon * tolerances and jitter. 15449f723220SNishanth Menon * 15459f723220SNishanth Menon * Return: 0 if all went well, else returns appropriate error value. 15469f723220SNishanth Menon */ 15479f723220SNishanth Menon static int ti_sci_cmd_clk_set_freq(const struct ti_sci_handle *handle, 154881f4458cSTero Kristo u32 dev_id, u32 clk_id, u64 min_freq, 15499f723220SNishanth Menon u64 target_freq, u64 max_freq) 15509f723220SNishanth Menon { 15519f723220SNishanth Menon struct ti_sci_info *info; 15529f723220SNishanth Menon struct ti_sci_msg_req_set_clock_freq *req; 15539f723220SNishanth Menon struct ti_sci_msg_hdr *resp; 15549f723220SNishanth Menon struct ti_sci_xfer *xfer; 15559f723220SNishanth Menon struct device *dev; 15569f723220SNishanth Menon int ret = 0; 15579f723220SNishanth Menon 15589f723220SNishanth Menon if (IS_ERR(handle)) 15599f723220SNishanth Menon return PTR_ERR(handle); 15609f723220SNishanth Menon if (!handle) 15619f723220SNishanth Menon return -EINVAL; 15629f723220SNishanth Menon 15639f723220SNishanth Menon info = handle_to_ti_sci_info(handle); 15649f723220SNishanth Menon dev = info->dev; 15659f723220SNishanth Menon 15669f723220SNishanth Menon xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_FREQ, 15679f723220SNishanth Menon TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 15689f723220SNishanth Menon sizeof(*req), sizeof(*resp)); 15699f723220SNishanth Menon if (IS_ERR(xfer)) { 15709f723220SNishanth Menon ret = PTR_ERR(xfer); 15719f723220SNishanth Menon dev_err(dev, "Message alloc failed(%d)\n", ret); 15729f723220SNishanth Menon return ret; 15739f723220SNishanth Menon } 15749f723220SNishanth Menon req = (struct ti_sci_msg_req_set_clock_freq *)xfer->xfer_buf; 15759f723220SNishanth Menon req->dev_id = dev_id; 157681f4458cSTero Kristo if (clk_id < 255) { 15779f723220SNishanth Menon req->clk_id = clk_id; 157881f4458cSTero Kristo } else { 157981f4458cSTero Kristo req->clk_id = 255; 158081f4458cSTero Kristo req->clk_id_32 = clk_id; 158181f4458cSTero Kristo } 15829f723220SNishanth Menon req->min_freq_hz = min_freq; 15839f723220SNishanth Menon req->target_freq_hz = target_freq; 15849f723220SNishanth Menon req->max_freq_hz = max_freq; 15859f723220SNishanth Menon 15869f723220SNishanth Menon ret = ti_sci_do_xfer(info, xfer); 15879f723220SNishanth Menon if (ret) { 15889f723220SNishanth Menon dev_err(dev, "Mbox send fail %d\n", ret); 15899f723220SNishanth Menon goto fail; 15909f723220SNishanth Menon } 15919f723220SNishanth Menon 15929f723220SNishanth Menon resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 15939f723220SNishanth Menon 15949f723220SNishanth Menon ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; 15959f723220SNishanth Menon 15969f723220SNishanth Menon fail: 15979f723220SNishanth Menon ti_sci_put_one_xfer(&info->minfo, xfer); 15989f723220SNishanth Menon 15999f723220SNishanth Menon return ret; 16009f723220SNishanth Menon } 16019f723220SNishanth Menon 16029f723220SNishanth Menon /** 16039f723220SNishanth Menon * ti_sci_cmd_clk_get_freq() - Get current frequency 16049f723220SNishanth Menon * @handle: pointer to TI SCI handle 16059f723220SNishanth Menon * @dev_id: Device identifier this request is for 16069f723220SNishanth Menon * @clk_id: Clock identifier for the device for this request. 16079f723220SNishanth Menon * Each device has it's own set of clock inputs. This indexes 16089f723220SNishanth Menon * which clock input to modify. 16099f723220SNishanth Menon * @freq: Currently frequency in Hz 16109f723220SNishanth Menon * 16119f723220SNishanth Menon * Return: 0 if all went well, else returns appropriate error value. 16129f723220SNishanth Menon */ 16139f723220SNishanth Menon static int ti_sci_cmd_clk_get_freq(const struct ti_sci_handle *handle, 161481f4458cSTero Kristo u32 dev_id, u32 clk_id, u64 *freq) 16159f723220SNishanth Menon { 16169f723220SNishanth Menon struct ti_sci_info *info; 16179f723220SNishanth Menon struct ti_sci_msg_req_get_clock_freq *req; 16189f723220SNishanth Menon struct ti_sci_msg_resp_get_clock_freq *resp; 16199f723220SNishanth Menon struct ti_sci_xfer *xfer; 16209f723220SNishanth Menon struct device *dev; 16219f723220SNishanth Menon int ret = 0; 16229f723220SNishanth Menon 16239f723220SNishanth Menon if (IS_ERR(handle)) 16249f723220SNishanth Menon return PTR_ERR(handle); 16259f723220SNishanth Menon if (!handle || !freq) 16269f723220SNishanth Menon return -EINVAL; 16279f723220SNishanth Menon 16289f723220SNishanth Menon info = handle_to_ti_sci_info(handle); 16299f723220SNishanth Menon dev = info->dev; 16309f723220SNishanth Menon 16319f723220SNishanth Menon xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_FREQ, 16329f723220SNishanth Menon TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 16339f723220SNishanth Menon sizeof(*req), sizeof(*resp)); 16349f723220SNishanth Menon if (IS_ERR(xfer)) { 16359f723220SNishanth Menon ret = PTR_ERR(xfer); 16369f723220SNishanth Menon dev_err(dev, "Message alloc failed(%d)\n", ret); 16379f723220SNishanth Menon return ret; 16389f723220SNishanth Menon } 16399f723220SNishanth Menon req = (struct ti_sci_msg_req_get_clock_freq *)xfer->xfer_buf; 16409f723220SNishanth Menon req->dev_id = dev_id; 164181f4458cSTero Kristo if (clk_id < 255) { 16429f723220SNishanth Menon req->clk_id = clk_id; 164381f4458cSTero Kristo } else { 164481f4458cSTero Kristo req->clk_id = 255; 164581f4458cSTero Kristo req->clk_id_32 = clk_id; 164681f4458cSTero Kristo } 16479f723220SNishanth Menon 16489f723220SNishanth Menon ret = ti_sci_do_xfer(info, xfer); 16499f723220SNishanth Menon if (ret) { 16509f723220SNishanth Menon dev_err(dev, "Mbox send fail %d\n", ret); 16519f723220SNishanth Menon goto fail; 16529f723220SNishanth Menon } 16539f723220SNishanth Menon 16549f723220SNishanth Menon resp = (struct ti_sci_msg_resp_get_clock_freq *)xfer->xfer_buf; 16559f723220SNishanth Menon 16569f723220SNishanth Menon if (!ti_sci_is_response_ack(resp)) 16579f723220SNishanth Menon ret = -ENODEV; 16589f723220SNishanth Menon else 16599f723220SNishanth Menon *freq = resp->freq_hz; 16609f723220SNishanth Menon 16619f723220SNishanth Menon fail: 16629f723220SNishanth Menon ti_sci_put_one_xfer(&info->minfo, xfer); 16639f723220SNishanth Menon 16649f723220SNishanth Menon return ret; 16659f723220SNishanth Menon } 16669f723220SNishanth Menon 1667912cffb4SNishanth Menon static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle) 1668912cffb4SNishanth Menon { 1669912cffb4SNishanth Menon struct ti_sci_info *info; 1670912cffb4SNishanth Menon struct ti_sci_msg_req_reboot *req; 1671912cffb4SNishanth Menon struct ti_sci_msg_hdr *resp; 1672912cffb4SNishanth Menon struct ti_sci_xfer *xfer; 1673912cffb4SNishanth Menon struct device *dev; 1674912cffb4SNishanth Menon int ret = 0; 1675912cffb4SNishanth Menon 1676912cffb4SNishanth Menon if (IS_ERR(handle)) 1677912cffb4SNishanth Menon return PTR_ERR(handle); 1678912cffb4SNishanth Menon if (!handle) 1679912cffb4SNishanth Menon return -EINVAL; 1680912cffb4SNishanth Menon 1681912cffb4SNishanth Menon info = handle_to_ti_sci_info(handle); 1682912cffb4SNishanth Menon dev = info->dev; 1683912cffb4SNishanth Menon 1684912cffb4SNishanth Menon xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SYS_RESET, 1685912cffb4SNishanth Menon TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 1686912cffb4SNishanth Menon sizeof(*req), sizeof(*resp)); 1687912cffb4SNishanth Menon if (IS_ERR(xfer)) { 1688912cffb4SNishanth Menon ret = PTR_ERR(xfer); 1689912cffb4SNishanth Menon dev_err(dev, "Message alloc failed(%d)\n", ret); 1690912cffb4SNishanth Menon return ret; 1691912cffb4SNishanth Menon } 1692912cffb4SNishanth Menon req = (struct ti_sci_msg_req_reboot *)xfer->xfer_buf; 1693912cffb4SNishanth Menon 1694912cffb4SNishanth Menon ret = ti_sci_do_xfer(info, xfer); 1695912cffb4SNishanth Menon if (ret) { 1696912cffb4SNishanth Menon dev_err(dev, "Mbox send fail %d\n", ret); 1697912cffb4SNishanth Menon goto fail; 1698912cffb4SNishanth Menon } 1699912cffb4SNishanth Menon 1700912cffb4SNishanth Menon resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 1701912cffb4SNishanth Menon 1702912cffb4SNishanth Menon if (!ti_sci_is_response_ack(resp)) 1703912cffb4SNishanth Menon ret = -ENODEV; 1704912cffb4SNishanth Menon else 1705912cffb4SNishanth Menon ret = 0; 1706912cffb4SNishanth Menon 1707912cffb4SNishanth Menon fail: 1708912cffb4SNishanth Menon ti_sci_put_one_xfer(&info->minfo, xfer); 1709912cffb4SNishanth Menon 1710912cffb4SNishanth Menon return ret; 1711912cffb4SNishanth Menon } 1712912cffb4SNishanth Menon 17139c19fb68SLokesh Vutla static int ti_sci_get_resource_type(struct ti_sci_info *info, u16 dev_id, 17149c19fb68SLokesh Vutla u16 *type) 17159c19fb68SLokesh Vutla { 17169c19fb68SLokesh Vutla struct ti_sci_rm_type_map *rm_type_map = info->desc->rm_type_map; 17179c19fb68SLokesh Vutla bool found = false; 17189c19fb68SLokesh Vutla int i; 17199c19fb68SLokesh Vutla 17209c19fb68SLokesh Vutla /* If map is not provided then assume dev_id is used as type */ 17219c19fb68SLokesh Vutla if (!rm_type_map) { 17229c19fb68SLokesh Vutla *type = dev_id; 17239c19fb68SLokesh Vutla return 0; 17249c19fb68SLokesh Vutla } 17259c19fb68SLokesh Vutla 17269c19fb68SLokesh Vutla for (i = 0; rm_type_map[i].dev_id; i++) { 17279c19fb68SLokesh Vutla if (rm_type_map[i].dev_id == dev_id) { 17289c19fb68SLokesh Vutla *type = rm_type_map[i].type; 17299c19fb68SLokesh Vutla found = true; 17309c19fb68SLokesh Vutla break; 17319c19fb68SLokesh Vutla } 17329c19fb68SLokesh Vutla } 17339c19fb68SLokesh Vutla 17349c19fb68SLokesh Vutla if (!found) 17359c19fb68SLokesh Vutla return -EINVAL; 17369c19fb68SLokesh Vutla 17379c19fb68SLokesh Vutla return 0; 17389c19fb68SLokesh Vutla } 17399c19fb68SLokesh Vutla 17409c19fb68SLokesh Vutla /** 17419c19fb68SLokesh Vutla * ti_sci_get_resource_range - Helper to get a range of resources assigned 17429c19fb68SLokesh Vutla * to a host. Resource is uniquely identified by 17439c19fb68SLokesh Vutla * type and subtype. 17449c19fb68SLokesh Vutla * @handle: Pointer to TISCI handle. 17459c19fb68SLokesh Vutla * @dev_id: TISCI device ID. 17469c19fb68SLokesh Vutla * @subtype: Resource assignment subtype that is being requested 17479c19fb68SLokesh Vutla * from the given device. 17489c19fb68SLokesh Vutla * @s_host: Host processor ID to which the resources are allocated 17499c19fb68SLokesh Vutla * @range_start: Start index of the resource range 17509c19fb68SLokesh Vutla * @range_num: Number of resources in the range 17519c19fb68SLokesh Vutla * 17529c19fb68SLokesh Vutla * Return: 0 if all went fine, else return appropriate error. 17539c19fb68SLokesh Vutla */ 17549c19fb68SLokesh Vutla static int ti_sci_get_resource_range(const struct ti_sci_handle *handle, 17559c19fb68SLokesh Vutla u32 dev_id, u8 subtype, u8 s_host, 17569c19fb68SLokesh Vutla u16 *range_start, u16 *range_num) 17579c19fb68SLokesh Vutla { 17589c19fb68SLokesh Vutla struct ti_sci_msg_resp_get_resource_range *resp; 17599c19fb68SLokesh Vutla struct ti_sci_msg_req_get_resource_range *req; 17609c19fb68SLokesh Vutla struct ti_sci_xfer *xfer; 17619c19fb68SLokesh Vutla struct ti_sci_info *info; 17629c19fb68SLokesh Vutla struct device *dev; 17639c19fb68SLokesh Vutla u16 type; 17649c19fb68SLokesh Vutla int ret = 0; 17659c19fb68SLokesh Vutla 17669c19fb68SLokesh Vutla if (IS_ERR(handle)) 17679c19fb68SLokesh Vutla return PTR_ERR(handle); 17689c19fb68SLokesh Vutla if (!handle) 17699c19fb68SLokesh Vutla return -EINVAL; 17709c19fb68SLokesh Vutla 17719c19fb68SLokesh Vutla info = handle_to_ti_sci_info(handle); 17729c19fb68SLokesh Vutla dev = info->dev; 17739c19fb68SLokesh Vutla 17749c19fb68SLokesh Vutla xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_RESOURCE_RANGE, 17759c19fb68SLokesh Vutla TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 17769c19fb68SLokesh Vutla sizeof(*req), sizeof(*resp)); 17779c19fb68SLokesh Vutla if (IS_ERR(xfer)) { 17789c19fb68SLokesh Vutla ret = PTR_ERR(xfer); 17799c19fb68SLokesh Vutla dev_err(dev, "Message alloc failed(%d)\n", ret); 17809c19fb68SLokesh Vutla return ret; 17819c19fb68SLokesh Vutla } 17829c19fb68SLokesh Vutla 17839c19fb68SLokesh Vutla ret = ti_sci_get_resource_type(info, dev_id, &type); 17849c19fb68SLokesh Vutla if (ret) { 17859c19fb68SLokesh Vutla dev_err(dev, "rm type lookup failed for %u\n", dev_id); 17869c19fb68SLokesh Vutla goto fail; 17879c19fb68SLokesh Vutla } 17889c19fb68SLokesh Vutla 17899c19fb68SLokesh Vutla req = (struct ti_sci_msg_req_get_resource_range *)xfer->xfer_buf; 17909c19fb68SLokesh Vutla req->secondary_host = s_host; 17919c19fb68SLokesh Vutla req->type = type & MSG_RM_RESOURCE_TYPE_MASK; 17929c19fb68SLokesh Vutla req->subtype = subtype & MSG_RM_RESOURCE_SUBTYPE_MASK; 17939c19fb68SLokesh Vutla 17949c19fb68SLokesh Vutla ret = ti_sci_do_xfer(info, xfer); 17959c19fb68SLokesh Vutla if (ret) { 17969c19fb68SLokesh Vutla dev_err(dev, "Mbox send fail %d\n", ret); 17979c19fb68SLokesh Vutla goto fail; 17989c19fb68SLokesh Vutla } 17999c19fb68SLokesh Vutla 18009c19fb68SLokesh Vutla resp = (struct ti_sci_msg_resp_get_resource_range *)xfer->xfer_buf; 18019c19fb68SLokesh Vutla 18029c19fb68SLokesh Vutla if (!ti_sci_is_response_ack(resp)) { 18039c19fb68SLokesh Vutla ret = -ENODEV; 18049c19fb68SLokesh Vutla } else if (!resp->range_start && !resp->range_num) { 18059c19fb68SLokesh Vutla ret = -ENODEV; 18069c19fb68SLokesh Vutla } else { 18079c19fb68SLokesh Vutla *range_start = resp->range_start; 18089c19fb68SLokesh Vutla *range_num = resp->range_num; 18099c19fb68SLokesh Vutla }; 18109c19fb68SLokesh Vutla 18119c19fb68SLokesh Vutla fail: 18129c19fb68SLokesh Vutla ti_sci_put_one_xfer(&info->minfo, xfer); 18139c19fb68SLokesh Vutla 18149c19fb68SLokesh Vutla return ret; 18159c19fb68SLokesh Vutla } 18169c19fb68SLokesh Vutla 18179c19fb68SLokesh Vutla /** 18189c19fb68SLokesh Vutla * ti_sci_cmd_get_resource_range - Get a range of resources assigned to host 18199c19fb68SLokesh Vutla * that is same as ti sci interface host. 18209c19fb68SLokesh Vutla * @handle: Pointer to TISCI handle. 18219c19fb68SLokesh Vutla * @dev_id: TISCI device ID. 18229c19fb68SLokesh Vutla * @subtype: Resource assignment subtype that is being requested 18239c19fb68SLokesh Vutla * from the given device. 18249c19fb68SLokesh Vutla * @range_start: Start index of the resource range 18259c19fb68SLokesh Vutla * @range_num: Number of resources in the range 18269c19fb68SLokesh Vutla * 18279c19fb68SLokesh Vutla * Return: 0 if all went fine, else return appropriate error. 18289c19fb68SLokesh Vutla */ 18299c19fb68SLokesh Vutla static int ti_sci_cmd_get_resource_range(const struct ti_sci_handle *handle, 18309c19fb68SLokesh Vutla u32 dev_id, u8 subtype, 18319c19fb68SLokesh Vutla u16 *range_start, u16 *range_num) 18329c19fb68SLokesh Vutla { 18339c19fb68SLokesh Vutla return ti_sci_get_resource_range(handle, dev_id, subtype, 18349c19fb68SLokesh Vutla TI_SCI_IRQ_SECONDARY_HOST_INVALID, 18359c19fb68SLokesh Vutla range_start, range_num); 18369c19fb68SLokesh Vutla } 18379c19fb68SLokesh Vutla 18389c19fb68SLokesh Vutla /** 18399c19fb68SLokesh Vutla * ti_sci_cmd_get_resource_range_from_shost - Get a range of resources 18409c19fb68SLokesh Vutla * assigned to a specified host. 18419c19fb68SLokesh Vutla * @handle: Pointer to TISCI handle. 18429c19fb68SLokesh Vutla * @dev_id: TISCI device ID. 18439c19fb68SLokesh Vutla * @subtype: Resource assignment subtype that is being requested 18449c19fb68SLokesh Vutla * from the given device. 18459c19fb68SLokesh Vutla * @s_host: Host processor ID to which the resources are allocated 18469c19fb68SLokesh Vutla * @range_start: Start index of the resource range 18479c19fb68SLokesh Vutla * @range_num: Number of resources in the range 18489c19fb68SLokesh Vutla * 18499c19fb68SLokesh Vutla * Return: 0 if all went fine, else return appropriate error. 18509c19fb68SLokesh Vutla */ 18519c19fb68SLokesh Vutla static 18529c19fb68SLokesh Vutla int ti_sci_cmd_get_resource_range_from_shost(const struct ti_sci_handle *handle, 18539c19fb68SLokesh Vutla u32 dev_id, u8 subtype, u8 s_host, 18549c19fb68SLokesh Vutla u16 *range_start, u16 *range_num) 18559c19fb68SLokesh Vutla { 18569c19fb68SLokesh Vutla return ti_sci_get_resource_range(handle, dev_id, subtype, s_host, 18579c19fb68SLokesh Vutla range_start, range_num); 18589c19fb68SLokesh Vutla } 18599c19fb68SLokesh Vutla 1860997b001fSLokesh Vutla /** 1861997b001fSLokesh Vutla * ti_sci_manage_irq() - Helper api to configure/release the irq route between 1862997b001fSLokesh Vutla * the requested source and destination 1863997b001fSLokesh Vutla * @handle: Pointer to TISCI handle. 1864997b001fSLokesh Vutla * @valid_params: Bit fields defining the validity of certain params 1865997b001fSLokesh Vutla * @src_id: Device ID of the IRQ source 1866997b001fSLokesh Vutla * @src_index: IRQ source index within the source device 1867997b001fSLokesh Vutla * @dst_id: Device ID of the IRQ destination 1868997b001fSLokesh Vutla * @dst_host_irq: IRQ number of the destination device 1869997b001fSLokesh Vutla * @ia_id: Device ID of the IA, if the IRQ flows through this IA 1870997b001fSLokesh Vutla * @vint: Virtual interrupt to be used within the IA 1871997b001fSLokesh Vutla * @global_event: Global event number to be used for the requesting event 1872997b001fSLokesh Vutla * @vint_status_bit: Virtual interrupt status bit to be used for the event 1873997b001fSLokesh Vutla * @s_host: Secondary host ID to which the irq/event is being 1874997b001fSLokesh Vutla * requested for. 1875997b001fSLokesh Vutla * @type: Request type irq set or release. 1876997b001fSLokesh Vutla * 1877997b001fSLokesh Vutla * Return: 0 if all went fine, else return appropriate error. 1878997b001fSLokesh Vutla */ 1879997b001fSLokesh Vutla static int ti_sci_manage_irq(const struct ti_sci_handle *handle, 1880997b001fSLokesh Vutla u32 valid_params, u16 src_id, u16 src_index, 1881997b001fSLokesh Vutla u16 dst_id, u16 dst_host_irq, u16 ia_id, u16 vint, 1882997b001fSLokesh Vutla u16 global_event, u8 vint_status_bit, u8 s_host, 1883997b001fSLokesh Vutla u16 type) 1884997b001fSLokesh Vutla { 1885997b001fSLokesh Vutla struct ti_sci_msg_req_manage_irq *req; 1886997b001fSLokesh Vutla struct ti_sci_msg_hdr *resp; 1887997b001fSLokesh Vutla struct ti_sci_xfer *xfer; 1888997b001fSLokesh Vutla struct ti_sci_info *info; 1889997b001fSLokesh Vutla struct device *dev; 1890997b001fSLokesh Vutla int ret = 0; 1891997b001fSLokesh Vutla 1892997b001fSLokesh Vutla if (IS_ERR(handle)) 1893997b001fSLokesh Vutla return PTR_ERR(handle); 1894997b001fSLokesh Vutla if (!handle) 1895997b001fSLokesh Vutla return -EINVAL; 1896997b001fSLokesh Vutla 1897997b001fSLokesh Vutla info = handle_to_ti_sci_info(handle); 1898997b001fSLokesh Vutla dev = info->dev; 1899997b001fSLokesh Vutla 1900997b001fSLokesh Vutla xfer = ti_sci_get_one_xfer(info, type, TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 1901997b001fSLokesh Vutla sizeof(*req), sizeof(*resp)); 1902997b001fSLokesh Vutla if (IS_ERR(xfer)) { 1903997b001fSLokesh Vutla ret = PTR_ERR(xfer); 1904997b001fSLokesh Vutla dev_err(dev, "Message alloc failed(%d)\n", ret); 1905997b001fSLokesh Vutla return ret; 1906997b001fSLokesh Vutla } 1907997b001fSLokesh Vutla req = (struct ti_sci_msg_req_manage_irq *)xfer->xfer_buf; 1908997b001fSLokesh Vutla req->valid_params = valid_params; 1909997b001fSLokesh Vutla req->src_id = src_id; 1910997b001fSLokesh Vutla req->src_index = src_index; 1911997b001fSLokesh Vutla req->dst_id = dst_id; 1912997b001fSLokesh Vutla req->dst_host_irq = dst_host_irq; 1913997b001fSLokesh Vutla req->ia_id = ia_id; 1914997b001fSLokesh Vutla req->vint = vint; 1915997b001fSLokesh Vutla req->global_event = global_event; 1916997b001fSLokesh Vutla req->vint_status_bit = vint_status_bit; 1917997b001fSLokesh Vutla req->secondary_host = s_host; 1918997b001fSLokesh Vutla 1919997b001fSLokesh Vutla ret = ti_sci_do_xfer(info, xfer); 1920997b001fSLokesh Vutla if (ret) { 1921997b001fSLokesh Vutla dev_err(dev, "Mbox send fail %d\n", ret); 1922997b001fSLokesh Vutla goto fail; 1923997b001fSLokesh Vutla } 1924997b001fSLokesh Vutla 1925997b001fSLokesh Vutla resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 1926997b001fSLokesh Vutla 1927997b001fSLokesh Vutla ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; 1928997b001fSLokesh Vutla 1929997b001fSLokesh Vutla fail: 1930997b001fSLokesh Vutla ti_sci_put_one_xfer(&info->minfo, xfer); 1931997b001fSLokesh Vutla 1932997b001fSLokesh Vutla return ret; 1933997b001fSLokesh Vutla } 1934997b001fSLokesh Vutla 1935997b001fSLokesh Vutla /** 1936997b001fSLokesh Vutla * ti_sci_set_irq() - Helper api to configure the irq route between the 1937997b001fSLokesh Vutla * requested source and destination 1938997b001fSLokesh Vutla * @handle: Pointer to TISCI handle. 1939997b001fSLokesh Vutla * @valid_params: Bit fields defining the validity of certain params 1940997b001fSLokesh Vutla * @src_id: Device ID of the IRQ source 1941997b001fSLokesh Vutla * @src_index: IRQ source index within the source device 1942997b001fSLokesh Vutla * @dst_id: Device ID of the IRQ destination 1943997b001fSLokesh Vutla * @dst_host_irq: IRQ number of the destination device 1944997b001fSLokesh Vutla * @ia_id: Device ID of the IA, if the IRQ flows through this IA 1945997b001fSLokesh Vutla * @vint: Virtual interrupt to be used within the IA 1946997b001fSLokesh Vutla * @global_event: Global event number to be used for the requesting event 1947997b001fSLokesh Vutla * @vint_status_bit: Virtual interrupt status bit to be used for the event 1948997b001fSLokesh Vutla * @s_host: Secondary host ID to which the irq/event is being 1949997b001fSLokesh Vutla * requested for. 1950997b001fSLokesh Vutla * 1951997b001fSLokesh Vutla * Return: 0 if all went fine, else return appropriate error. 1952997b001fSLokesh Vutla */ 1953997b001fSLokesh Vutla static int ti_sci_set_irq(const struct ti_sci_handle *handle, u32 valid_params, 1954997b001fSLokesh Vutla u16 src_id, u16 src_index, u16 dst_id, 1955997b001fSLokesh Vutla u16 dst_host_irq, u16 ia_id, u16 vint, 1956997b001fSLokesh Vutla u16 global_event, u8 vint_status_bit, u8 s_host) 1957997b001fSLokesh Vutla { 1958997b001fSLokesh Vutla pr_debug("%s: IRQ set with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n", 1959997b001fSLokesh Vutla __func__, valid_params, src_id, src_index, 1960997b001fSLokesh Vutla dst_id, dst_host_irq, ia_id, vint, global_event, 1961997b001fSLokesh Vutla vint_status_bit); 1962997b001fSLokesh Vutla 1963997b001fSLokesh Vutla return ti_sci_manage_irq(handle, valid_params, src_id, src_index, 1964997b001fSLokesh Vutla dst_id, dst_host_irq, ia_id, vint, 1965997b001fSLokesh Vutla global_event, vint_status_bit, s_host, 1966997b001fSLokesh Vutla TI_SCI_MSG_SET_IRQ); 1967997b001fSLokesh Vutla } 1968997b001fSLokesh Vutla 1969997b001fSLokesh Vutla /** 1970997b001fSLokesh Vutla * ti_sci_free_irq() - Helper api to free the irq route between the 1971997b001fSLokesh Vutla * requested source and destination 1972997b001fSLokesh Vutla * @handle: Pointer to TISCI handle. 1973997b001fSLokesh Vutla * @valid_params: Bit fields defining the validity of certain params 1974997b001fSLokesh Vutla * @src_id: Device ID of the IRQ source 1975997b001fSLokesh Vutla * @src_index: IRQ source index within the source device 1976997b001fSLokesh Vutla * @dst_id: Device ID of the IRQ destination 1977997b001fSLokesh Vutla * @dst_host_irq: IRQ number of the destination device 1978997b001fSLokesh Vutla * @ia_id: Device ID of the IA, if the IRQ flows through this IA 1979997b001fSLokesh Vutla * @vint: Virtual interrupt to be used within the IA 1980997b001fSLokesh Vutla * @global_event: Global event number to be used for the requesting event 1981997b001fSLokesh Vutla * @vint_status_bit: Virtual interrupt status bit to be used for the event 1982997b001fSLokesh Vutla * @s_host: Secondary host ID to which the irq/event is being 1983997b001fSLokesh Vutla * requested for. 1984997b001fSLokesh Vutla * 1985997b001fSLokesh Vutla * Return: 0 if all went fine, else return appropriate error. 1986997b001fSLokesh Vutla */ 1987997b001fSLokesh Vutla static int ti_sci_free_irq(const struct ti_sci_handle *handle, u32 valid_params, 1988997b001fSLokesh Vutla u16 src_id, u16 src_index, u16 dst_id, 1989997b001fSLokesh Vutla u16 dst_host_irq, u16 ia_id, u16 vint, 1990997b001fSLokesh Vutla u16 global_event, u8 vint_status_bit, u8 s_host) 1991997b001fSLokesh Vutla { 1992997b001fSLokesh Vutla pr_debug("%s: IRQ release with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n", 1993997b001fSLokesh Vutla __func__, valid_params, src_id, src_index, 1994997b001fSLokesh Vutla dst_id, dst_host_irq, ia_id, vint, global_event, 1995997b001fSLokesh Vutla vint_status_bit); 1996997b001fSLokesh Vutla 1997997b001fSLokesh Vutla return ti_sci_manage_irq(handle, valid_params, src_id, src_index, 1998997b001fSLokesh Vutla dst_id, dst_host_irq, ia_id, vint, 1999997b001fSLokesh Vutla global_event, vint_status_bit, s_host, 2000997b001fSLokesh Vutla TI_SCI_MSG_FREE_IRQ); 2001997b001fSLokesh Vutla } 2002997b001fSLokesh Vutla 2003997b001fSLokesh Vutla /** 2004997b001fSLokesh Vutla * ti_sci_cmd_set_irq() - Configure a host irq route between the requested 2005997b001fSLokesh Vutla * source and destination. 2006997b001fSLokesh Vutla * @handle: Pointer to TISCI handle. 2007997b001fSLokesh Vutla * @src_id: Device ID of the IRQ source 2008997b001fSLokesh Vutla * @src_index: IRQ source index within the source device 2009997b001fSLokesh Vutla * @dst_id: Device ID of the IRQ destination 2010997b001fSLokesh Vutla * @dst_host_irq: IRQ number of the destination device 2011997b001fSLokesh Vutla * @vint_irq: Boolean specifying if this interrupt belongs to 2012997b001fSLokesh Vutla * Interrupt Aggregator. 2013997b001fSLokesh Vutla * 2014997b001fSLokesh Vutla * Return: 0 if all went fine, else return appropriate error. 2015997b001fSLokesh Vutla */ 2016997b001fSLokesh Vutla static int ti_sci_cmd_set_irq(const struct ti_sci_handle *handle, u16 src_id, 2017997b001fSLokesh Vutla u16 src_index, u16 dst_id, u16 dst_host_irq) 2018997b001fSLokesh Vutla { 2019997b001fSLokesh Vutla u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID; 2020997b001fSLokesh Vutla 2021997b001fSLokesh Vutla return ti_sci_set_irq(handle, valid_params, src_id, src_index, dst_id, 2022997b001fSLokesh Vutla dst_host_irq, 0, 0, 0, 0, 0); 2023997b001fSLokesh Vutla } 2024997b001fSLokesh Vutla 2025997b001fSLokesh Vutla /** 2026997b001fSLokesh Vutla * ti_sci_cmd_set_event_map() - Configure an event based irq route between the 2027997b001fSLokesh Vutla * requested source and Interrupt Aggregator. 2028997b001fSLokesh Vutla * @handle: Pointer to TISCI handle. 2029997b001fSLokesh Vutla * @src_id: Device ID of the IRQ source 2030997b001fSLokesh Vutla * @src_index: IRQ source index within the source device 2031997b001fSLokesh Vutla * @ia_id: Device ID of the IA, if the IRQ flows through this IA 2032997b001fSLokesh Vutla * @vint: Virtual interrupt to be used within the IA 2033997b001fSLokesh Vutla * @global_event: Global event number to be used for the requesting event 2034997b001fSLokesh Vutla * @vint_status_bit: Virtual interrupt status bit to be used for the event 2035997b001fSLokesh Vutla * 2036997b001fSLokesh Vutla * Return: 0 if all went fine, else return appropriate error. 2037997b001fSLokesh Vutla */ 2038997b001fSLokesh Vutla static int ti_sci_cmd_set_event_map(const struct ti_sci_handle *handle, 2039997b001fSLokesh Vutla u16 src_id, u16 src_index, u16 ia_id, 2040997b001fSLokesh Vutla u16 vint, u16 global_event, 2041997b001fSLokesh Vutla u8 vint_status_bit) 2042997b001fSLokesh Vutla { 2043997b001fSLokesh Vutla u32 valid_params = MSG_FLAG_IA_ID_VALID | MSG_FLAG_VINT_VALID | 2044997b001fSLokesh Vutla MSG_FLAG_GLB_EVNT_VALID | 2045997b001fSLokesh Vutla MSG_FLAG_VINT_STS_BIT_VALID; 2046997b001fSLokesh Vutla 2047997b001fSLokesh Vutla return ti_sci_set_irq(handle, valid_params, src_id, src_index, 0, 0, 2048997b001fSLokesh Vutla ia_id, vint, global_event, vint_status_bit, 0); 2049997b001fSLokesh Vutla } 2050997b001fSLokesh Vutla 2051997b001fSLokesh Vutla /** 2052997b001fSLokesh Vutla * ti_sci_cmd_free_irq() - Free a host irq route between the between the 2053997b001fSLokesh Vutla * requested source and destination. 2054997b001fSLokesh Vutla * @handle: Pointer to TISCI handle. 2055997b001fSLokesh Vutla * @src_id: Device ID of the IRQ source 2056997b001fSLokesh Vutla * @src_index: IRQ source index within the source device 2057997b001fSLokesh Vutla * @dst_id: Device ID of the IRQ destination 2058997b001fSLokesh Vutla * @dst_host_irq: IRQ number of the destination device 2059997b001fSLokesh Vutla * @vint_irq: Boolean specifying if this interrupt belongs to 2060997b001fSLokesh Vutla * Interrupt Aggregator. 2061997b001fSLokesh Vutla * 2062997b001fSLokesh Vutla * Return: 0 if all went fine, else return appropriate error. 2063997b001fSLokesh Vutla */ 2064997b001fSLokesh Vutla static int ti_sci_cmd_free_irq(const struct ti_sci_handle *handle, u16 src_id, 2065997b001fSLokesh Vutla u16 src_index, u16 dst_id, u16 dst_host_irq) 2066997b001fSLokesh Vutla { 2067997b001fSLokesh Vutla u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID; 2068997b001fSLokesh Vutla 2069997b001fSLokesh Vutla return ti_sci_free_irq(handle, valid_params, src_id, src_index, dst_id, 2070997b001fSLokesh Vutla dst_host_irq, 0, 0, 0, 0, 0); 2071997b001fSLokesh Vutla } 2072997b001fSLokesh Vutla 2073997b001fSLokesh Vutla /** 2074997b001fSLokesh Vutla * ti_sci_cmd_free_event_map() - Free an event map between the requested source 2075997b001fSLokesh Vutla * and Interrupt Aggregator. 2076997b001fSLokesh Vutla * @handle: Pointer to TISCI handle. 2077997b001fSLokesh Vutla * @src_id: Device ID of the IRQ source 2078997b001fSLokesh Vutla * @src_index: IRQ source index within the source device 2079997b001fSLokesh Vutla * @ia_id: Device ID of the IA, if the IRQ flows through this IA 2080997b001fSLokesh Vutla * @vint: Virtual interrupt to be used within the IA 2081997b001fSLokesh Vutla * @global_event: Global event number to be used for the requesting event 2082997b001fSLokesh Vutla * @vint_status_bit: Virtual interrupt status bit to be used for the event 2083997b001fSLokesh Vutla * 2084997b001fSLokesh Vutla * Return: 0 if all went fine, else return appropriate error. 2085997b001fSLokesh Vutla */ 2086997b001fSLokesh Vutla static int ti_sci_cmd_free_event_map(const struct ti_sci_handle *handle, 2087997b001fSLokesh Vutla u16 src_id, u16 src_index, u16 ia_id, 2088997b001fSLokesh Vutla u16 vint, u16 global_event, 2089997b001fSLokesh Vutla u8 vint_status_bit) 2090997b001fSLokesh Vutla { 2091997b001fSLokesh Vutla u32 valid_params = MSG_FLAG_IA_ID_VALID | 2092997b001fSLokesh Vutla MSG_FLAG_VINT_VALID | MSG_FLAG_GLB_EVNT_VALID | 2093997b001fSLokesh Vutla MSG_FLAG_VINT_STS_BIT_VALID; 2094997b001fSLokesh Vutla 2095997b001fSLokesh Vutla return ti_sci_free_irq(handle, valid_params, src_id, src_index, 0, 0, 2096997b001fSLokesh Vutla ia_id, vint, global_event, vint_status_bit, 0); 2097997b001fSLokesh Vutla } 2098997b001fSLokesh Vutla 209968608b5eSPeter Ujfalusi /** 210068608b5eSPeter Ujfalusi * ti_sci_cmd_ring_config() - configure RA ring 210168608b5eSPeter Ujfalusi * @handle: Pointer to TI SCI handle. 210268608b5eSPeter Ujfalusi * @valid_params: Bitfield defining validity of ring configuration 210368608b5eSPeter Ujfalusi * parameters 210468608b5eSPeter Ujfalusi * @nav_id: Device ID of Navigator Subsystem from which the ring is 210568608b5eSPeter Ujfalusi * allocated 210668608b5eSPeter Ujfalusi * @index: Ring index 210768608b5eSPeter Ujfalusi * @addr_lo: The ring base address lo 32 bits 210868608b5eSPeter Ujfalusi * @addr_hi: The ring base address hi 32 bits 210968608b5eSPeter Ujfalusi * @count: Number of ring elements 211068608b5eSPeter Ujfalusi * @mode: The mode of the ring 211168608b5eSPeter Ujfalusi * @size: The ring element size. 211268608b5eSPeter Ujfalusi * @order_id: Specifies the ring's bus order ID 211368608b5eSPeter Ujfalusi * 211468608b5eSPeter Ujfalusi * Return: 0 if all went well, else returns appropriate error value. 211568608b5eSPeter Ujfalusi * 211668608b5eSPeter Ujfalusi * See @ti_sci_msg_rm_ring_cfg_req for more info. 211768608b5eSPeter Ujfalusi */ 211868608b5eSPeter Ujfalusi static int ti_sci_cmd_ring_config(const struct ti_sci_handle *handle, 211968608b5eSPeter Ujfalusi u32 valid_params, u16 nav_id, u16 index, 212068608b5eSPeter Ujfalusi u32 addr_lo, u32 addr_hi, u32 count, 212168608b5eSPeter Ujfalusi u8 mode, u8 size, u8 order_id) 212268608b5eSPeter Ujfalusi { 212368608b5eSPeter Ujfalusi struct ti_sci_msg_rm_ring_cfg_req *req; 212468608b5eSPeter Ujfalusi struct ti_sci_msg_hdr *resp; 212568608b5eSPeter Ujfalusi struct ti_sci_xfer *xfer; 212668608b5eSPeter Ujfalusi struct ti_sci_info *info; 212768608b5eSPeter Ujfalusi struct device *dev; 212868608b5eSPeter Ujfalusi int ret = 0; 212968608b5eSPeter Ujfalusi 213068608b5eSPeter Ujfalusi if (IS_ERR_OR_NULL(handle)) 213168608b5eSPeter Ujfalusi return -EINVAL; 213268608b5eSPeter Ujfalusi 213368608b5eSPeter Ujfalusi info = handle_to_ti_sci_info(handle); 213468608b5eSPeter Ujfalusi dev = info->dev; 213568608b5eSPeter Ujfalusi 213668608b5eSPeter Ujfalusi xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_RING_CFG, 213768608b5eSPeter Ujfalusi TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 213868608b5eSPeter Ujfalusi sizeof(*req), sizeof(*resp)); 213968608b5eSPeter Ujfalusi if (IS_ERR(xfer)) { 214068608b5eSPeter Ujfalusi ret = PTR_ERR(xfer); 21414c960505SYueHaibing dev_err(dev, "RM_RA:Message config failed(%d)\n", ret); 214268608b5eSPeter Ujfalusi return ret; 214368608b5eSPeter Ujfalusi } 214468608b5eSPeter Ujfalusi req = (struct ti_sci_msg_rm_ring_cfg_req *)xfer->xfer_buf; 214568608b5eSPeter Ujfalusi req->valid_params = valid_params; 214668608b5eSPeter Ujfalusi req->nav_id = nav_id; 214768608b5eSPeter Ujfalusi req->index = index; 214868608b5eSPeter Ujfalusi req->addr_lo = addr_lo; 214968608b5eSPeter Ujfalusi req->addr_hi = addr_hi; 215068608b5eSPeter Ujfalusi req->count = count; 215168608b5eSPeter Ujfalusi req->mode = mode; 215268608b5eSPeter Ujfalusi req->size = size; 215368608b5eSPeter Ujfalusi req->order_id = order_id; 215468608b5eSPeter Ujfalusi 215568608b5eSPeter Ujfalusi ret = ti_sci_do_xfer(info, xfer); 215668608b5eSPeter Ujfalusi if (ret) { 21574c960505SYueHaibing dev_err(dev, "RM_RA:Mbox config send fail %d\n", ret); 215868608b5eSPeter Ujfalusi goto fail; 215968608b5eSPeter Ujfalusi } 216068608b5eSPeter Ujfalusi 216168608b5eSPeter Ujfalusi resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 216268608b5eSPeter Ujfalusi ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; 216368608b5eSPeter Ujfalusi 216468608b5eSPeter Ujfalusi fail: 216568608b5eSPeter Ujfalusi ti_sci_put_one_xfer(&info->minfo, xfer); 21664c960505SYueHaibing dev_dbg(dev, "RM_RA:config ring %u ret:%d\n", index, ret); 216768608b5eSPeter Ujfalusi return ret; 216868608b5eSPeter Ujfalusi } 216968608b5eSPeter Ujfalusi 217068608b5eSPeter Ujfalusi /** 217168608b5eSPeter Ujfalusi * ti_sci_cmd_ring_get_config() - get RA ring configuration 217268608b5eSPeter Ujfalusi * @handle: Pointer to TI SCI handle. 217368608b5eSPeter Ujfalusi * @nav_id: Device ID of Navigator Subsystem from which the ring is 217468608b5eSPeter Ujfalusi * allocated 217568608b5eSPeter Ujfalusi * @index: Ring index 217668608b5eSPeter Ujfalusi * @addr_lo: Returns ring's base address lo 32 bits 217768608b5eSPeter Ujfalusi * @addr_hi: Returns ring's base address hi 32 bits 217868608b5eSPeter Ujfalusi * @count: Returns number of ring elements 217968608b5eSPeter Ujfalusi * @mode: Returns mode of the ring 218068608b5eSPeter Ujfalusi * @size: Returns ring element size 218168608b5eSPeter Ujfalusi * @order_id: Returns ring's bus order ID 218268608b5eSPeter Ujfalusi * 218368608b5eSPeter Ujfalusi * Return: 0 if all went well, else returns appropriate error value. 218468608b5eSPeter Ujfalusi * 218568608b5eSPeter Ujfalusi * See @ti_sci_msg_rm_ring_get_cfg_req for more info. 218668608b5eSPeter Ujfalusi */ 218768608b5eSPeter Ujfalusi static int ti_sci_cmd_ring_get_config(const struct ti_sci_handle *handle, 218868608b5eSPeter Ujfalusi u32 nav_id, u32 index, u8 *mode, 218968608b5eSPeter Ujfalusi u32 *addr_lo, u32 *addr_hi, 219068608b5eSPeter Ujfalusi u32 *count, u8 *size, u8 *order_id) 219168608b5eSPeter Ujfalusi { 219268608b5eSPeter Ujfalusi struct ti_sci_msg_rm_ring_get_cfg_resp *resp; 219368608b5eSPeter Ujfalusi struct ti_sci_msg_rm_ring_get_cfg_req *req; 219468608b5eSPeter Ujfalusi struct ti_sci_xfer *xfer; 219568608b5eSPeter Ujfalusi struct ti_sci_info *info; 219668608b5eSPeter Ujfalusi struct device *dev; 219768608b5eSPeter Ujfalusi int ret = 0; 219868608b5eSPeter Ujfalusi 219968608b5eSPeter Ujfalusi if (IS_ERR_OR_NULL(handle)) 220068608b5eSPeter Ujfalusi return -EINVAL; 220168608b5eSPeter Ujfalusi 220268608b5eSPeter Ujfalusi info = handle_to_ti_sci_info(handle); 220368608b5eSPeter Ujfalusi dev = info->dev; 220468608b5eSPeter Ujfalusi 220568608b5eSPeter Ujfalusi xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_RING_GET_CFG, 220668608b5eSPeter Ujfalusi TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 220768608b5eSPeter Ujfalusi sizeof(*req), sizeof(*resp)); 220868608b5eSPeter Ujfalusi if (IS_ERR(xfer)) { 220968608b5eSPeter Ujfalusi ret = PTR_ERR(xfer); 22104c960505SYueHaibing dev_err(dev, 221168608b5eSPeter Ujfalusi "RM_RA:Message get config failed(%d)\n", ret); 221268608b5eSPeter Ujfalusi return ret; 221368608b5eSPeter Ujfalusi } 221468608b5eSPeter Ujfalusi req = (struct ti_sci_msg_rm_ring_get_cfg_req *)xfer->xfer_buf; 221568608b5eSPeter Ujfalusi req->nav_id = nav_id; 221668608b5eSPeter Ujfalusi req->index = index; 221768608b5eSPeter Ujfalusi 221868608b5eSPeter Ujfalusi ret = ti_sci_do_xfer(info, xfer); 221968608b5eSPeter Ujfalusi if (ret) { 22204c960505SYueHaibing dev_err(dev, "RM_RA:Mbox get config send fail %d\n", ret); 222168608b5eSPeter Ujfalusi goto fail; 222268608b5eSPeter Ujfalusi } 222368608b5eSPeter Ujfalusi 222468608b5eSPeter Ujfalusi resp = (struct ti_sci_msg_rm_ring_get_cfg_resp *)xfer->xfer_buf; 222568608b5eSPeter Ujfalusi 222668608b5eSPeter Ujfalusi if (!ti_sci_is_response_ack(resp)) { 222768608b5eSPeter Ujfalusi ret = -ENODEV; 222868608b5eSPeter Ujfalusi } else { 222968608b5eSPeter Ujfalusi if (mode) 223068608b5eSPeter Ujfalusi *mode = resp->mode; 223168608b5eSPeter Ujfalusi if (addr_lo) 223268608b5eSPeter Ujfalusi *addr_lo = resp->addr_lo; 223368608b5eSPeter Ujfalusi if (addr_hi) 223468608b5eSPeter Ujfalusi *addr_hi = resp->addr_hi; 223568608b5eSPeter Ujfalusi if (count) 223668608b5eSPeter Ujfalusi *count = resp->count; 223768608b5eSPeter Ujfalusi if (size) 223868608b5eSPeter Ujfalusi *size = resp->size; 223968608b5eSPeter Ujfalusi if (order_id) 224068608b5eSPeter Ujfalusi *order_id = resp->order_id; 224168608b5eSPeter Ujfalusi }; 224268608b5eSPeter Ujfalusi 224368608b5eSPeter Ujfalusi fail: 224468608b5eSPeter Ujfalusi ti_sci_put_one_xfer(&info->minfo, xfer); 22454c960505SYueHaibing dev_dbg(dev, "RM_RA:get config ring %u ret:%d\n", index, ret); 224668608b5eSPeter Ujfalusi return ret; 224768608b5eSPeter Ujfalusi } 224868608b5eSPeter Ujfalusi 224968608b5eSPeter Ujfalusi /** 225068608b5eSPeter Ujfalusi * ti_sci_cmd_rm_psil_pair() - Pair PSI-L source to destination thread 225168608b5eSPeter Ujfalusi * @handle: Pointer to TI SCI handle. 225268608b5eSPeter Ujfalusi * @nav_id: Device ID of Navigator Subsystem which should be used for 225368608b5eSPeter Ujfalusi * pairing 225468608b5eSPeter Ujfalusi * @src_thread: Source PSI-L thread ID 225568608b5eSPeter Ujfalusi * @dst_thread: Destination PSI-L thread ID 225668608b5eSPeter Ujfalusi * 225768608b5eSPeter Ujfalusi * Return: 0 if all went well, else returns appropriate error value. 225868608b5eSPeter Ujfalusi */ 225968608b5eSPeter Ujfalusi static int ti_sci_cmd_rm_psil_pair(const struct ti_sci_handle *handle, 226068608b5eSPeter Ujfalusi u32 nav_id, u32 src_thread, u32 dst_thread) 226168608b5eSPeter Ujfalusi { 226268608b5eSPeter Ujfalusi struct ti_sci_msg_psil_pair *req; 226368608b5eSPeter Ujfalusi struct ti_sci_msg_hdr *resp; 226468608b5eSPeter Ujfalusi struct ti_sci_xfer *xfer; 226568608b5eSPeter Ujfalusi struct ti_sci_info *info; 226668608b5eSPeter Ujfalusi struct device *dev; 226768608b5eSPeter Ujfalusi int ret = 0; 226868608b5eSPeter Ujfalusi 226968608b5eSPeter Ujfalusi if (IS_ERR(handle)) 227068608b5eSPeter Ujfalusi return PTR_ERR(handle); 227168608b5eSPeter Ujfalusi if (!handle) 227268608b5eSPeter Ujfalusi return -EINVAL; 227368608b5eSPeter Ujfalusi 227468608b5eSPeter Ujfalusi info = handle_to_ti_sci_info(handle); 227568608b5eSPeter Ujfalusi dev = info->dev; 227668608b5eSPeter Ujfalusi 227768608b5eSPeter Ujfalusi xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_PSIL_PAIR, 227868608b5eSPeter Ujfalusi TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 227968608b5eSPeter Ujfalusi sizeof(*req), sizeof(*resp)); 228068608b5eSPeter Ujfalusi if (IS_ERR(xfer)) { 228168608b5eSPeter Ujfalusi ret = PTR_ERR(xfer); 228268608b5eSPeter Ujfalusi dev_err(dev, "RM_PSIL:Message reconfig failed(%d)\n", ret); 228368608b5eSPeter Ujfalusi return ret; 228468608b5eSPeter Ujfalusi } 228568608b5eSPeter Ujfalusi req = (struct ti_sci_msg_psil_pair *)xfer->xfer_buf; 228668608b5eSPeter Ujfalusi req->nav_id = nav_id; 228768608b5eSPeter Ujfalusi req->src_thread = src_thread; 228868608b5eSPeter Ujfalusi req->dst_thread = dst_thread; 228968608b5eSPeter Ujfalusi 229068608b5eSPeter Ujfalusi ret = ti_sci_do_xfer(info, xfer); 229168608b5eSPeter Ujfalusi if (ret) { 229268608b5eSPeter Ujfalusi dev_err(dev, "RM_PSIL:Mbox send fail %d\n", ret); 229368608b5eSPeter Ujfalusi goto fail; 229468608b5eSPeter Ujfalusi } 229568608b5eSPeter Ujfalusi 229668608b5eSPeter Ujfalusi resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 229768608b5eSPeter Ujfalusi ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL; 229868608b5eSPeter Ujfalusi 229968608b5eSPeter Ujfalusi fail: 230068608b5eSPeter Ujfalusi ti_sci_put_one_xfer(&info->minfo, xfer); 230168608b5eSPeter Ujfalusi 230268608b5eSPeter Ujfalusi return ret; 230368608b5eSPeter Ujfalusi } 230468608b5eSPeter Ujfalusi 230568608b5eSPeter Ujfalusi /** 230668608b5eSPeter Ujfalusi * ti_sci_cmd_rm_psil_unpair() - Unpair PSI-L source from destination thread 230768608b5eSPeter Ujfalusi * @handle: Pointer to TI SCI handle. 230868608b5eSPeter Ujfalusi * @nav_id: Device ID of Navigator Subsystem which should be used for 230968608b5eSPeter Ujfalusi * unpairing 231068608b5eSPeter Ujfalusi * @src_thread: Source PSI-L thread ID 231168608b5eSPeter Ujfalusi * @dst_thread: Destination PSI-L thread ID 231268608b5eSPeter Ujfalusi * 231368608b5eSPeter Ujfalusi * Return: 0 if all went well, else returns appropriate error value. 231468608b5eSPeter Ujfalusi */ 231568608b5eSPeter Ujfalusi static int ti_sci_cmd_rm_psil_unpair(const struct ti_sci_handle *handle, 231668608b5eSPeter Ujfalusi u32 nav_id, u32 src_thread, u32 dst_thread) 231768608b5eSPeter Ujfalusi { 231868608b5eSPeter Ujfalusi struct ti_sci_msg_psil_unpair *req; 231968608b5eSPeter Ujfalusi struct ti_sci_msg_hdr *resp; 232068608b5eSPeter Ujfalusi struct ti_sci_xfer *xfer; 232168608b5eSPeter Ujfalusi struct ti_sci_info *info; 232268608b5eSPeter Ujfalusi struct device *dev; 232368608b5eSPeter Ujfalusi int ret = 0; 232468608b5eSPeter Ujfalusi 232568608b5eSPeter Ujfalusi if (IS_ERR(handle)) 232668608b5eSPeter Ujfalusi return PTR_ERR(handle); 232768608b5eSPeter Ujfalusi if (!handle) 232868608b5eSPeter Ujfalusi return -EINVAL; 232968608b5eSPeter Ujfalusi 233068608b5eSPeter Ujfalusi info = handle_to_ti_sci_info(handle); 233168608b5eSPeter Ujfalusi dev = info->dev; 233268608b5eSPeter Ujfalusi 233368608b5eSPeter Ujfalusi xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_PSIL_UNPAIR, 233468608b5eSPeter Ujfalusi TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 233568608b5eSPeter Ujfalusi sizeof(*req), sizeof(*resp)); 233668608b5eSPeter Ujfalusi if (IS_ERR(xfer)) { 233768608b5eSPeter Ujfalusi ret = PTR_ERR(xfer); 233868608b5eSPeter Ujfalusi dev_err(dev, "RM_PSIL:Message reconfig failed(%d)\n", ret); 233968608b5eSPeter Ujfalusi return ret; 234068608b5eSPeter Ujfalusi } 234168608b5eSPeter Ujfalusi req = (struct ti_sci_msg_psil_unpair *)xfer->xfer_buf; 234268608b5eSPeter Ujfalusi req->nav_id = nav_id; 234368608b5eSPeter Ujfalusi req->src_thread = src_thread; 234468608b5eSPeter Ujfalusi req->dst_thread = dst_thread; 234568608b5eSPeter Ujfalusi 234668608b5eSPeter Ujfalusi ret = ti_sci_do_xfer(info, xfer); 234768608b5eSPeter Ujfalusi if (ret) { 234868608b5eSPeter Ujfalusi dev_err(dev, "RM_PSIL:Mbox send fail %d\n", ret); 234968608b5eSPeter Ujfalusi goto fail; 235068608b5eSPeter Ujfalusi } 235168608b5eSPeter Ujfalusi 235268608b5eSPeter Ujfalusi resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 235368608b5eSPeter Ujfalusi ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL; 235468608b5eSPeter Ujfalusi 235568608b5eSPeter Ujfalusi fail: 235668608b5eSPeter Ujfalusi ti_sci_put_one_xfer(&info->minfo, xfer); 235768608b5eSPeter Ujfalusi 235868608b5eSPeter Ujfalusi return ret; 235968608b5eSPeter Ujfalusi } 236068608b5eSPeter Ujfalusi 236168608b5eSPeter Ujfalusi /** 236268608b5eSPeter Ujfalusi * ti_sci_cmd_rm_udmap_tx_ch_cfg() - Configure a UDMAP TX channel 236368608b5eSPeter Ujfalusi * @handle: Pointer to TI SCI handle. 236468608b5eSPeter Ujfalusi * @params: Pointer to ti_sci_msg_rm_udmap_tx_ch_cfg TX channel config 236568608b5eSPeter Ujfalusi * structure 236668608b5eSPeter Ujfalusi * 236768608b5eSPeter Ujfalusi * Return: 0 if all went well, else returns appropriate error value. 236868608b5eSPeter Ujfalusi * 236968608b5eSPeter Ujfalusi * See @ti_sci_msg_rm_udmap_tx_ch_cfg and @ti_sci_msg_rm_udmap_tx_ch_cfg_req for 237068608b5eSPeter Ujfalusi * more info. 237168608b5eSPeter Ujfalusi */ 237268608b5eSPeter Ujfalusi static int ti_sci_cmd_rm_udmap_tx_ch_cfg(const struct ti_sci_handle *handle, 237368608b5eSPeter Ujfalusi const struct ti_sci_msg_rm_udmap_tx_ch_cfg *params) 237468608b5eSPeter Ujfalusi { 237568608b5eSPeter Ujfalusi struct ti_sci_msg_rm_udmap_tx_ch_cfg_req *req; 237668608b5eSPeter Ujfalusi struct ti_sci_msg_hdr *resp; 237768608b5eSPeter Ujfalusi struct ti_sci_xfer *xfer; 237868608b5eSPeter Ujfalusi struct ti_sci_info *info; 237968608b5eSPeter Ujfalusi struct device *dev; 238068608b5eSPeter Ujfalusi int ret = 0; 238168608b5eSPeter Ujfalusi 238268608b5eSPeter Ujfalusi if (IS_ERR_OR_NULL(handle)) 238368608b5eSPeter Ujfalusi return -EINVAL; 238468608b5eSPeter Ujfalusi 238568608b5eSPeter Ujfalusi info = handle_to_ti_sci_info(handle); 238668608b5eSPeter Ujfalusi dev = info->dev; 238768608b5eSPeter Ujfalusi 238868608b5eSPeter Ujfalusi xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_TX_CH_CFG, 238968608b5eSPeter Ujfalusi TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 239068608b5eSPeter Ujfalusi sizeof(*req), sizeof(*resp)); 239168608b5eSPeter Ujfalusi if (IS_ERR(xfer)) { 239268608b5eSPeter Ujfalusi ret = PTR_ERR(xfer); 23934c960505SYueHaibing dev_err(dev, "Message TX_CH_CFG alloc failed(%d)\n", ret); 239468608b5eSPeter Ujfalusi return ret; 239568608b5eSPeter Ujfalusi } 239668608b5eSPeter Ujfalusi req = (struct ti_sci_msg_rm_udmap_tx_ch_cfg_req *)xfer->xfer_buf; 239768608b5eSPeter Ujfalusi req->valid_params = params->valid_params; 239868608b5eSPeter Ujfalusi req->nav_id = params->nav_id; 239968608b5eSPeter Ujfalusi req->index = params->index; 240068608b5eSPeter Ujfalusi req->tx_pause_on_err = params->tx_pause_on_err; 240168608b5eSPeter Ujfalusi req->tx_filt_einfo = params->tx_filt_einfo; 240268608b5eSPeter Ujfalusi req->tx_filt_pswords = params->tx_filt_pswords; 240368608b5eSPeter Ujfalusi req->tx_atype = params->tx_atype; 240468608b5eSPeter Ujfalusi req->tx_chan_type = params->tx_chan_type; 240568608b5eSPeter Ujfalusi req->tx_supr_tdpkt = params->tx_supr_tdpkt; 240668608b5eSPeter Ujfalusi req->tx_fetch_size = params->tx_fetch_size; 240768608b5eSPeter Ujfalusi req->tx_credit_count = params->tx_credit_count; 240868608b5eSPeter Ujfalusi req->txcq_qnum = params->txcq_qnum; 240968608b5eSPeter Ujfalusi req->tx_priority = params->tx_priority; 241068608b5eSPeter Ujfalusi req->tx_qos = params->tx_qos; 241168608b5eSPeter Ujfalusi req->tx_orderid = params->tx_orderid; 241268608b5eSPeter Ujfalusi req->fdepth = params->fdepth; 241368608b5eSPeter Ujfalusi req->tx_sched_priority = params->tx_sched_priority; 241468608b5eSPeter Ujfalusi req->tx_burst_size = params->tx_burst_size; 241568608b5eSPeter Ujfalusi 241668608b5eSPeter Ujfalusi ret = ti_sci_do_xfer(info, xfer); 241768608b5eSPeter Ujfalusi if (ret) { 24184c960505SYueHaibing dev_err(dev, "Mbox send TX_CH_CFG fail %d\n", ret); 241968608b5eSPeter Ujfalusi goto fail; 242068608b5eSPeter Ujfalusi } 242168608b5eSPeter Ujfalusi 242268608b5eSPeter Ujfalusi resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 242368608b5eSPeter Ujfalusi ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL; 242468608b5eSPeter Ujfalusi 242568608b5eSPeter Ujfalusi fail: 242668608b5eSPeter Ujfalusi ti_sci_put_one_xfer(&info->minfo, xfer); 24274c960505SYueHaibing dev_dbg(dev, "TX_CH_CFG: chn %u ret:%u\n", params->index, ret); 242868608b5eSPeter Ujfalusi return ret; 242968608b5eSPeter Ujfalusi } 243068608b5eSPeter Ujfalusi 243168608b5eSPeter Ujfalusi /** 243268608b5eSPeter Ujfalusi * ti_sci_cmd_rm_udmap_rx_ch_cfg() - Configure a UDMAP RX channel 243368608b5eSPeter Ujfalusi * @handle: Pointer to TI SCI handle. 243468608b5eSPeter Ujfalusi * @params: Pointer to ti_sci_msg_rm_udmap_rx_ch_cfg RX channel config 243568608b5eSPeter Ujfalusi * structure 243668608b5eSPeter Ujfalusi * 243768608b5eSPeter Ujfalusi * Return: 0 if all went well, else returns appropriate error value. 243868608b5eSPeter Ujfalusi * 243968608b5eSPeter Ujfalusi * See @ti_sci_msg_rm_udmap_rx_ch_cfg and @ti_sci_msg_rm_udmap_rx_ch_cfg_req for 244068608b5eSPeter Ujfalusi * more info. 244168608b5eSPeter Ujfalusi */ 244268608b5eSPeter Ujfalusi static int ti_sci_cmd_rm_udmap_rx_ch_cfg(const struct ti_sci_handle *handle, 244368608b5eSPeter Ujfalusi const struct ti_sci_msg_rm_udmap_rx_ch_cfg *params) 244468608b5eSPeter Ujfalusi { 244568608b5eSPeter Ujfalusi struct ti_sci_msg_rm_udmap_rx_ch_cfg_req *req; 244668608b5eSPeter Ujfalusi struct ti_sci_msg_hdr *resp; 244768608b5eSPeter Ujfalusi struct ti_sci_xfer *xfer; 244868608b5eSPeter Ujfalusi struct ti_sci_info *info; 244968608b5eSPeter Ujfalusi struct device *dev; 245068608b5eSPeter Ujfalusi int ret = 0; 245168608b5eSPeter Ujfalusi 245268608b5eSPeter Ujfalusi if (IS_ERR_OR_NULL(handle)) 245368608b5eSPeter Ujfalusi return -EINVAL; 245468608b5eSPeter Ujfalusi 245568608b5eSPeter Ujfalusi info = handle_to_ti_sci_info(handle); 245668608b5eSPeter Ujfalusi dev = info->dev; 245768608b5eSPeter Ujfalusi 245868608b5eSPeter Ujfalusi xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_RX_CH_CFG, 245968608b5eSPeter Ujfalusi TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 246068608b5eSPeter Ujfalusi sizeof(*req), sizeof(*resp)); 246168608b5eSPeter Ujfalusi if (IS_ERR(xfer)) { 246268608b5eSPeter Ujfalusi ret = PTR_ERR(xfer); 24634c960505SYueHaibing dev_err(dev, "Message RX_CH_CFG alloc failed(%d)\n", ret); 246468608b5eSPeter Ujfalusi return ret; 246568608b5eSPeter Ujfalusi } 246668608b5eSPeter Ujfalusi req = (struct ti_sci_msg_rm_udmap_rx_ch_cfg_req *)xfer->xfer_buf; 246768608b5eSPeter Ujfalusi req->valid_params = params->valid_params; 246868608b5eSPeter Ujfalusi req->nav_id = params->nav_id; 246968608b5eSPeter Ujfalusi req->index = params->index; 247068608b5eSPeter Ujfalusi req->rx_fetch_size = params->rx_fetch_size; 247168608b5eSPeter Ujfalusi req->rxcq_qnum = params->rxcq_qnum; 247268608b5eSPeter Ujfalusi req->rx_priority = params->rx_priority; 247368608b5eSPeter Ujfalusi req->rx_qos = params->rx_qos; 247468608b5eSPeter Ujfalusi req->rx_orderid = params->rx_orderid; 247568608b5eSPeter Ujfalusi req->rx_sched_priority = params->rx_sched_priority; 247668608b5eSPeter Ujfalusi req->flowid_start = params->flowid_start; 247768608b5eSPeter Ujfalusi req->flowid_cnt = params->flowid_cnt; 247868608b5eSPeter Ujfalusi req->rx_pause_on_err = params->rx_pause_on_err; 247968608b5eSPeter Ujfalusi req->rx_atype = params->rx_atype; 248068608b5eSPeter Ujfalusi req->rx_chan_type = params->rx_chan_type; 248168608b5eSPeter Ujfalusi req->rx_ignore_short = params->rx_ignore_short; 248268608b5eSPeter Ujfalusi req->rx_ignore_long = params->rx_ignore_long; 248368608b5eSPeter Ujfalusi req->rx_burst_size = params->rx_burst_size; 248468608b5eSPeter Ujfalusi 248568608b5eSPeter Ujfalusi ret = ti_sci_do_xfer(info, xfer); 248668608b5eSPeter Ujfalusi if (ret) { 24874c960505SYueHaibing dev_err(dev, "Mbox send RX_CH_CFG fail %d\n", ret); 248868608b5eSPeter Ujfalusi goto fail; 248968608b5eSPeter Ujfalusi } 249068608b5eSPeter Ujfalusi 249168608b5eSPeter Ujfalusi resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 249268608b5eSPeter Ujfalusi ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL; 249368608b5eSPeter Ujfalusi 249468608b5eSPeter Ujfalusi fail: 249568608b5eSPeter Ujfalusi ti_sci_put_one_xfer(&info->minfo, xfer); 24964c960505SYueHaibing dev_dbg(dev, "RX_CH_CFG: chn %u ret:%d\n", params->index, ret); 249768608b5eSPeter Ujfalusi return ret; 249868608b5eSPeter Ujfalusi } 249968608b5eSPeter Ujfalusi 250068608b5eSPeter Ujfalusi /** 250168608b5eSPeter Ujfalusi * ti_sci_cmd_rm_udmap_rx_flow_cfg() - Configure UDMAP RX FLOW 250268608b5eSPeter Ujfalusi * @handle: Pointer to TI SCI handle. 250368608b5eSPeter Ujfalusi * @params: Pointer to ti_sci_msg_rm_udmap_flow_cfg RX FLOW config 250468608b5eSPeter Ujfalusi * structure 250568608b5eSPeter Ujfalusi * 250668608b5eSPeter Ujfalusi * Return: 0 if all went well, else returns appropriate error value. 250768608b5eSPeter Ujfalusi * 250868608b5eSPeter Ujfalusi * See @ti_sci_msg_rm_udmap_flow_cfg and @ti_sci_msg_rm_udmap_flow_cfg_req for 250968608b5eSPeter Ujfalusi * more info. 251068608b5eSPeter Ujfalusi */ 251168608b5eSPeter Ujfalusi static int ti_sci_cmd_rm_udmap_rx_flow_cfg(const struct ti_sci_handle *handle, 251268608b5eSPeter Ujfalusi const struct ti_sci_msg_rm_udmap_flow_cfg *params) 251368608b5eSPeter Ujfalusi { 251468608b5eSPeter Ujfalusi struct ti_sci_msg_rm_udmap_flow_cfg_req *req; 251568608b5eSPeter Ujfalusi struct ti_sci_msg_hdr *resp; 251668608b5eSPeter Ujfalusi struct ti_sci_xfer *xfer; 251768608b5eSPeter Ujfalusi struct ti_sci_info *info; 251868608b5eSPeter Ujfalusi struct device *dev; 251968608b5eSPeter Ujfalusi int ret = 0; 252068608b5eSPeter Ujfalusi 252168608b5eSPeter Ujfalusi if (IS_ERR_OR_NULL(handle)) 252268608b5eSPeter Ujfalusi return -EINVAL; 252368608b5eSPeter Ujfalusi 252468608b5eSPeter Ujfalusi info = handle_to_ti_sci_info(handle); 252568608b5eSPeter Ujfalusi dev = info->dev; 252668608b5eSPeter Ujfalusi 252768608b5eSPeter Ujfalusi xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_FLOW_CFG, 252868608b5eSPeter Ujfalusi TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 252968608b5eSPeter Ujfalusi sizeof(*req), sizeof(*resp)); 253068608b5eSPeter Ujfalusi if (IS_ERR(xfer)) { 253168608b5eSPeter Ujfalusi ret = PTR_ERR(xfer); 253268608b5eSPeter Ujfalusi dev_err(dev, "RX_FL_CFG: Message alloc failed(%d)\n", ret); 253368608b5eSPeter Ujfalusi return ret; 253468608b5eSPeter Ujfalusi } 253568608b5eSPeter Ujfalusi req = (struct ti_sci_msg_rm_udmap_flow_cfg_req *)xfer->xfer_buf; 253668608b5eSPeter Ujfalusi req->valid_params = params->valid_params; 253768608b5eSPeter Ujfalusi req->nav_id = params->nav_id; 253868608b5eSPeter Ujfalusi req->flow_index = params->flow_index; 253968608b5eSPeter Ujfalusi req->rx_einfo_present = params->rx_einfo_present; 254068608b5eSPeter Ujfalusi req->rx_psinfo_present = params->rx_psinfo_present; 254168608b5eSPeter Ujfalusi req->rx_error_handling = params->rx_error_handling; 254268608b5eSPeter Ujfalusi req->rx_desc_type = params->rx_desc_type; 254368608b5eSPeter Ujfalusi req->rx_sop_offset = params->rx_sop_offset; 254468608b5eSPeter Ujfalusi req->rx_dest_qnum = params->rx_dest_qnum; 254568608b5eSPeter Ujfalusi req->rx_src_tag_hi = params->rx_src_tag_hi; 254668608b5eSPeter Ujfalusi req->rx_src_tag_lo = params->rx_src_tag_lo; 254768608b5eSPeter Ujfalusi req->rx_dest_tag_hi = params->rx_dest_tag_hi; 254868608b5eSPeter Ujfalusi req->rx_dest_tag_lo = params->rx_dest_tag_lo; 254968608b5eSPeter Ujfalusi req->rx_src_tag_hi_sel = params->rx_src_tag_hi_sel; 255068608b5eSPeter Ujfalusi req->rx_src_tag_lo_sel = params->rx_src_tag_lo_sel; 255168608b5eSPeter Ujfalusi req->rx_dest_tag_hi_sel = params->rx_dest_tag_hi_sel; 255268608b5eSPeter Ujfalusi req->rx_dest_tag_lo_sel = params->rx_dest_tag_lo_sel; 255368608b5eSPeter Ujfalusi req->rx_fdq0_sz0_qnum = params->rx_fdq0_sz0_qnum; 255468608b5eSPeter Ujfalusi req->rx_fdq1_qnum = params->rx_fdq1_qnum; 255568608b5eSPeter Ujfalusi req->rx_fdq2_qnum = params->rx_fdq2_qnum; 255668608b5eSPeter Ujfalusi req->rx_fdq3_qnum = params->rx_fdq3_qnum; 255768608b5eSPeter Ujfalusi req->rx_ps_location = params->rx_ps_location; 255868608b5eSPeter Ujfalusi 255968608b5eSPeter Ujfalusi ret = ti_sci_do_xfer(info, xfer); 256068608b5eSPeter Ujfalusi if (ret) { 256168608b5eSPeter Ujfalusi dev_err(dev, "RX_FL_CFG: Mbox send fail %d\n", ret); 256268608b5eSPeter Ujfalusi goto fail; 256368608b5eSPeter Ujfalusi } 256468608b5eSPeter Ujfalusi 256568608b5eSPeter Ujfalusi resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 256668608b5eSPeter Ujfalusi ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL; 256768608b5eSPeter Ujfalusi 256868608b5eSPeter Ujfalusi fail: 256968608b5eSPeter Ujfalusi ti_sci_put_one_xfer(&info->minfo, xfer); 257068608b5eSPeter Ujfalusi dev_dbg(info->dev, "RX_FL_CFG: %u ret:%d\n", params->flow_index, ret); 257168608b5eSPeter Ujfalusi return ret; 257268608b5eSPeter Ujfalusi } 257368608b5eSPeter Ujfalusi 25741e407f33SSuman Anna /** 25751e407f33SSuman Anna * ti_sci_cmd_proc_request() - Command to request a physical processor control 25761e407f33SSuman Anna * @handle: Pointer to TI SCI handle 25771e407f33SSuman Anna * @proc_id: Processor ID this request is for 25781e407f33SSuman Anna * 25791e407f33SSuman Anna * Return: 0 if all went well, else returns appropriate error value. 25801e407f33SSuman Anna */ 25811e407f33SSuman Anna static int ti_sci_cmd_proc_request(const struct ti_sci_handle *handle, 25821e407f33SSuman Anna u8 proc_id) 25831e407f33SSuman Anna { 25841e407f33SSuman Anna struct ti_sci_msg_req_proc_request *req; 25851e407f33SSuman Anna struct ti_sci_msg_hdr *resp; 25861e407f33SSuman Anna struct ti_sci_info *info; 25871e407f33SSuman Anna struct ti_sci_xfer *xfer; 25881e407f33SSuman Anna struct device *dev; 25891e407f33SSuman Anna int ret = 0; 25901e407f33SSuman Anna 25911e407f33SSuman Anna if (!handle) 25921e407f33SSuman Anna return -EINVAL; 25931e407f33SSuman Anna if (IS_ERR(handle)) 25941e407f33SSuman Anna return PTR_ERR(handle); 25951e407f33SSuman Anna 25961e407f33SSuman Anna info = handle_to_ti_sci_info(handle); 25971e407f33SSuman Anna dev = info->dev; 25981e407f33SSuman Anna 25991e407f33SSuman Anna xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_REQUEST, 26001e407f33SSuman Anna TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 26011e407f33SSuman Anna sizeof(*req), sizeof(*resp)); 26021e407f33SSuman Anna if (IS_ERR(xfer)) { 26031e407f33SSuman Anna ret = PTR_ERR(xfer); 26041e407f33SSuman Anna dev_err(dev, "Message alloc failed(%d)\n", ret); 26051e407f33SSuman Anna return ret; 26061e407f33SSuman Anna } 26071e407f33SSuman Anna req = (struct ti_sci_msg_req_proc_request *)xfer->xfer_buf; 26081e407f33SSuman Anna req->processor_id = proc_id; 26091e407f33SSuman Anna 26101e407f33SSuman Anna ret = ti_sci_do_xfer(info, xfer); 26111e407f33SSuman Anna if (ret) { 26121e407f33SSuman Anna dev_err(dev, "Mbox send fail %d\n", ret); 26131e407f33SSuman Anna goto fail; 26141e407f33SSuman Anna } 26151e407f33SSuman Anna 26161e407f33SSuman Anna resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf; 26171e407f33SSuman Anna 26181e407f33SSuman Anna ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; 26191e407f33SSuman Anna 26201e407f33SSuman Anna fail: 26211e407f33SSuman Anna ti_sci_put_one_xfer(&info->minfo, xfer); 26221e407f33SSuman Anna 26231e407f33SSuman Anna return ret; 26241e407f33SSuman Anna } 26251e407f33SSuman Anna 26261e407f33SSuman Anna /** 26271e407f33SSuman Anna * ti_sci_cmd_proc_release() - Command to release a physical processor control 26281e407f33SSuman Anna * @handle: Pointer to TI SCI handle 26291e407f33SSuman Anna * @proc_id: Processor ID this request is for 26301e407f33SSuman Anna * 26311e407f33SSuman Anna * Return: 0 if all went well, else returns appropriate error value. 26321e407f33SSuman Anna */ 26331e407f33SSuman Anna static int ti_sci_cmd_proc_release(const struct ti_sci_handle *handle, 26341e407f33SSuman Anna u8 proc_id) 26351e407f33SSuman Anna { 26361e407f33SSuman Anna struct ti_sci_msg_req_proc_release *req; 26371e407f33SSuman Anna struct ti_sci_msg_hdr *resp; 26381e407f33SSuman Anna struct ti_sci_info *info; 26391e407f33SSuman Anna struct ti_sci_xfer *xfer; 26401e407f33SSuman Anna struct device *dev; 26411e407f33SSuman Anna int ret = 0; 26421e407f33SSuman Anna 26431e407f33SSuman Anna if (!handle) 26441e407f33SSuman Anna return -EINVAL; 26451e407f33SSuman Anna if (IS_ERR(handle)) 26461e407f33SSuman Anna return PTR_ERR(handle); 26471e407f33SSuman Anna 26481e407f33SSuman Anna info = handle_to_ti_sci_info(handle); 26491e407f33SSuman Anna dev = info->dev; 26501e407f33SSuman Anna 26511e407f33SSuman Anna xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_RELEASE, 26521e407f33SSuman Anna TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 26531e407f33SSuman Anna sizeof(*req), sizeof(*resp)); 26541e407f33SSuman Anna if (IS_ERR(xfer)) { 26551e407f33SSuman Anna ret = PTR_ERR(xfer); 26561e407f33SSuman Anna dev_err(dev, "Message alloc failed(%d)\n", ret); 26571e407f33SSuman Anna return ret; 26581e407f33SSuman Anna } 26591e407f33SSuman Anna req = (struct ti_sci_msg_req_proc_release *)xfer->xfer_buf; 26601e407f33SSuman Anna req->processor_id = proc_id; 26611e407f33SSuman Anna 26621e407f33SSuman Anna ret = ti_sci_do_xfer(info, xfer); 26631e407f33SSuman Anna if (ret) { 26641e407f33SSuman Anna dev_err(dev, "Mbox send fail %d\n", ret); 26651e407f33SSuman Anna goto fail; 26661e407f33SSuman Anna } 26671e407f33SSuman Anna 26681e407f33SSuman Anna resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf; 26691e407f33SSuman Anna 26701e407f33SSuman Anna ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; 26711e407f33SSuman Anna 26721e407f33SSuman Anna fail: 26731e407f33SSuman Anna ti_sci_put_one_xfer(&info->minfo, xfer); 26741e407f33SSuman Anna 26751e407f33SSuman Anna return ret; 26761e407f33SSuman Anna } 26771e407f33SSuman Anna 26781e407f33SSuman Anna /** 26791e407f33SSuman Anna * ti_sci_cmd_proc_handover() - Command to handover a physical processor 26801e407f33SSuman Anna * control to a host in the processor's access 26811e407f33SSuman Anna * control list. 26821e407f33SSuman Anna * @handle: Pointer to TI SCI handle 26831e407f33SSuman Anna * @proc_id: Processor ID this request is for 26841e407f33SSuman Anna * @host_id: Host ID to get the control of the processor 26851e407f33SSuman Anna * 26861e407f33SSuman Anna * Return: 0 if all went well, else returns appropriate error value. 26871e407f33SSuman Anna */ 26881e407f33SSuman Anna static int ti_sci_cmd_proc_handover(const struct ti_sci_handle *handle, 26891e407f33SSuman Anna u8 proc_id, u8 host_id) 26901e407f33SSuman Anna { 26911e407f33SSuman Anna struct ti_sci_msg_req_proc_handover *req; 26921e407f33SSuman Anna struct ti_sci_msg_hdr *resp; 26931e407f33SSuman Anna struct ti_sci_info *info; 26941e407f33SSuman Anna struct ti_sci_xfer *xfer; 26951e407f33SSuman Anna struct device *dev; 26961e407f33SSuman Anna int ret = 0; 26971e407f33SSuman Anna 26981e407f33SSuman Anna if (!handle) 26991e407f33SSuman Anna return -EINVAL; 27001e407f33SSuman Anna if (IS_ERR(handle)) 27011e407f33SSuman Anna return PTR_ERR(handle); 27021e407f33SSuman Anna 27031e407f33SSuman Anna info = handle_to_ti_sci_info(handle); 27041e407f33SSuman Anna dev = info->dev; 27051e407f33SSuman Anna 27061e407f33SSuman Anna xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_HANDOVER, 27071e407f33SSuman Anna TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 27081e407f33SSuman Anna sizeof(*req), sizeof(*resp)); 27091e407f33SSuman Anna if (IS_ERR(xfer)) { 27101e407f33SSuman Anna ret = PTR_ERR(xfer); 27111e407f33SSuman Anna dev_err(dev, "Message alloc failed(%d)\n", ret); 27121e407f33SSuman Anna return ret; 27131e407f33SSuman Anna } 27141e407f33SSuman Anna req = (struct ti_sci_msg_req_proc_handover *)xfer->xfer_buf; 27151e407f33SSuman Anna req->processor_id = proc_id; 27161e407f33SSuman Anna req->host_id = host_id; 27171e407f33SSuman Anna 27181e407f33SSuman Anna ret = ti_sci_do_xfer(info, xfer); 27191e407f33SSuman Anna if (ret) { 27201e407f33SSuman Anna dev_err(dev, "Mbox send fail %d\n", ret); 27211e407f33SSuman Anna goto fail; 27221e407f33SSuman Anna } 27231e407f33SSuman Anna 27241e407f33SSuman Anna resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf; 27251e407f33SSuman Anna 27261e407f33SSuman Anna ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; 27271e407f33SSuman Anna 27281e407f33SSuman Anna fail: 27291e407f33SSuman Anna ti_sci_put_one_xfer(&info->minfo, xfer); 27301e407f33SSuman Anna 27311e407f33SSuman Anna return ret; 27321e407f33SSuman Anna } 27331e407f33SSuman Anna 27341e407f33SSuman Anna /** 27351e407f33SSuman Anna * ti_sci_cmd_proc_set_config() - Command to set the processor boot 27361e407f33SSuman Anna * configuration flags 27371e407f33SSuman Anna * @handle: Pointer to TI SCI handle 27381e407f33SSuman Anna * @proc_id: Processor ID this request is for 27391e407f33SSuman Anna * @config_flags_set: Configuration flags to be set 27401e407f33SSuman Anna * @config_flags_clear: Configuration flags to be cleared. 27411e407f33SSuman Anna * 27421e407f33SSuman Anna * Return: 0 if all went well, else returns appropriate error value. 27431e407f33SSuman Anna */ 27441e407f33SSuman Anna static int ti_sci_cmd_proc_set_config(const struct ti_sci_handle *handle, 27451e407f33SSuman Anna u8 proc_id, u64 bootvector, 27461e407f33SSuman Anna u32 config_flags_set, 27471e407f33SSuman Anna u32 config_flags_clear) 27481e407f33SSuman Anna { 27491e407f33SSuman Anna struct ti_sci_msg_req_set_config *req; 27501e407f33SSuman Anna struct ti_sci_msg_hdr *resp; 27511e407f33SSuman Anna struct ti_sci_info *info; 27521e407f33SSuman Anna struct ti_sci_xfer *xfer; 27531e407f33SSuman Anna struct device *dev; 27541e407f33SSuman Anna int ret = 0; 27551e407f33SSuman Anna 27561e407f33SSuman Anna if (!handle) 27571e407f33SSuman Anna return -EINVAL; 27581e407f33SSuman Anna if (IS_ERR(handle)) 27591e407f33SSuman Anna return PTR_ERR(handle); 27601e407f33SSuman Anna 27611e407f33SSuman Anna info = handle_to_ti_sci_info(handle); 27621e407f33SSuman Anna dev = info->dev; 27631e407f33SSuman Anna 27641e407f33SSuman Anna xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CONFIG, 27651e407f33SSuman Anna TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 27661e407f33SSuman Anna sizeof(*req), sizeof(*resp)); 27671e407f33SSuman Anna if (IS_ERR(xfer)) { 27681e407f33SSuman Anna ret = PTR_ERR(xfer); 27691e407f33SSuman Anna dev_err(dev, "Message alloc failed(%d)\n", ret); 27701e407f33SSuman Anna return ret; 27711e407f33SSuman Anna } 27721e407f33SSuman Anna req = (struct ti_sci_msg_req_set_config *)xfer->xfer_buf; 27731e407f33SSuman Anna req->processor_id = proc_id; 27741e407f33SSuman Anna req->bootvector_low = bootvector & TI_SCI_ADDR_LOW_MASK; 27751e407f33SSuman Anna req->bootvector_high = (bootvector & TI_SCI_ADDR_HIGH_MASK) >> 27761e407f33SSuman Anna TI_SCI_ADDR_HIGH_SHIFT; 27771e407f33SSuman Anna req->config_flags_set = config_flags_set; 27781e407f33SSuman Anna req->config_flags_clear = config_flags_clear; 27791e407f33SSuman Anna 27801e407f33SSuman Anna ret = ti_sci_do_xfer(info, xfer); 27811e407f33SSuman Anna if (ret) { 27821e407f33SSuman Anna dev_err(dev, "Mbox send fail %d\n", ret); 27831e407f33SSuman Anna goto fail; 27841e407f33SSuman Anna } 27851e407f33SSuman Anna 27861e407f33SSuman Anna resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf; 27871e407f33SSuman Anna 27881e407f33SSuman Anna ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; 27891e407f33SSuman Anna 27901e407f33SSuman Anna fail: 27911e407f33SSuman Anna ti_sci_put_one_xfer(&info->minfo, xfer); 27921e407f33SSuman Anna 27931e407f33SSuman Anna return ret; 27941e407f33SSuman Anna } 27951e407f33SSuman Anna 27961e407f33SSuman Anna /** 27971e407f33SSuman Anna * ti_sci_cmd_proc_set_control() - Command to set the processor boot 27981e407f33SSuman Anna * control flags 27991e407f33SSuman Anna * @handle: Pointer to TI SCI handle 28001e407f33SSuman Anna * @proc_id: Processor ID this request is for 28011e407f33SSuman Anna * @control_flags_set: Control flags to be set 28021e407f33SSuman Anna * @control_flags_clear: Control flags to be cleared 28031e407f33SSuman Anna * 28041e407f33SSuman Anna * Return: 0 if all went well, else returns appropriate error value. 28051e407f33SSuman Anna */ 28061e407f33SSuman Anna static int ti_sci_cmd_proc_set_control(const struct ti_sci_handle *handle, 28071e407f33SSuman Anna u8 proc_id, u32 control_flags_set, 28081e407f33SSuman Anna u32 control_flags_clear) 28091e407f33SSuman Anna { 28101e407f33SSuman Anna struct ti_sci_msg_req_set_ctrl *req; 28111e407f33SSuman Anna struct ti_sci_msg_hdr *resp; 28121e407f33SSuman Anna struct ti_sci_info *info; 28131e407f33SSuman Anna struct ti_sci_xfer *xfer; 28141e407f33SSuman Anna struct device *dev; 28151e407f33SSuman Anna int ret = 0; 28161e407f33SSuman Anna 28171e407f33SSuman Anna if (!handle) 28181e407f33SSuman Anna return -EINVAL; 28191e407f33SSuman Anna if (IS_ERR(handle)) 28201e407f33SSuman Anna return PTR_ERR(handle); 28211e407f33SSuman Anna 28221e407f33SSuman Anna info = handle_to_ti_sci_info(handle); 28231e407f33SSuman Anna dev = info->dev; 28241e407f33SSuman Anna 28251e407f33SSuman Anna xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CTRL, 28261e407f33SSuman Anna TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 28271e407f33SSuman Anna sizeof(*req), sizeof(*resp)); 28281e407f33SSuman Anna if (IS_ERR(xfer)) { 28291e407f33SSuman Anna ret = PTR_ERR(xfer); 28301e407f33SSuman Anna dev_err(dev, "Message alloc failed(%d)\n", ret); 28311e407f33SSuman Anna return ret; 28321e407f33SSuman Anna } 28331e407f33SSuman Anna req = (struct ti_sci_msg_req_set_ctrl *)xfer->xfer_buf; 28341e407f33SSuman Anna req->processor_id = proc_id; 28351e407f33SSuman Anna req->control_flags_set = control_flags_set; 28361e407f33SSuman Anna req->control_flags_clear = control_flags_clear; 28371e407f33SSuman Anna 28381e407f33SSuman Anna ret = ti_sci_do_xfer(info, xfer); 28391e407f33SSuman Anna if (ret) { 28401e407f33SSuman Anna dev_err(dev, "Mbox send fail %d\n", ret); 28411e407f33SSuman Anna goto fail; 28421e407f33SSuman Anna } 28431e407f33SSuman Anna 28441e407f33SSuman Anna resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf; 28451e407f33SSuman Anna 28461e407f33SSuman Anna ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; 28471e407f33SSuman Anna 28481e407f33SSuman Anna fail: 28491e407f33SSuman Anna ti_sci_put_one_xfer(&info->minfo, xfer); 28501e407f33SSuman Anna 28511e407f33SSuman Anna return ret; 28521e407f33SSuman Anna } 28531e407f33SSuman Anna 28541e407f33SSuman Anna /** 28551e407f33SSuman Anna * ti_sci_cmd_get_boot_status() - Command to get the processor boot status 28561e407f33SSuman Anna * @handle: Pointer to TI SCI handle 28571e407f33SSuman Anna * @proc_id: Processor ID this request is for 28581e407f33SSuman Anna * 28591e407f33SSuman Anna * Return: 0 if all went well, else returns appropriate error value. 28601e407f33SSuman Anna */ 28611e407f33SSuman Anna static int ti_sci_cmd_proc_get_status(const struct ti_sci_handle *handle, 28621e407f33SSuman Anna u8 proc_id, u64 *bv, u32 *cfg_flags, 28631e407f33SSuman Anna u32 *ctrl_flags, u32 *sts_flags) 28641e407f33SSuman Anna { 28651e407f33SSuman Anna struct ti_sci_msg_resp_get_status *resp; 28661e407f33SSuman Anna struct ti_sci_msg_req_get_status *req; 28671e407f33SSuman Anna struct ti_sci_info *info; 28681e407f33SSuman Anna struct ti_sci_xfer *xfer; 28691e407f33SSuman Anna struct device *dev; 28701e407f33SSuman Anna int ret = 0; 28711e407f33SSuman Anna 28721e407f33SSuman Anna if (!handle) 28731e407f33SSuman Anna return -EINVAL; 28741e407f33SSuman Anna if (IS_ERR(handle)) 28751e407f33SSuman Anna return PTR_ERR(handle); 28761e407f33SSuman Anna 28771e407f33SSuman Anna info = handle_to_ti_sci_info(handle); 28781e407f33SSuman Anna dev = info->dev; 28791e407f33SSuman Anna 28801e407f33SSuman Anna xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_STATUS, 28811e407f33SSuman Anna TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 28821e407f33SSuman Anna sizeof(*req), sizeof(*resp)); 28831e407f33SSuman Anna if (IS_ERR(xfer)) { 28841e407f33SSuman Anna ret = PTR_ERR(xfer); 28851e407f33SSuman Anna dev_err(dev, "Message alloc failed(%d)\n", ret); 28861e407f33SSuman Anna return ret; 28871e407f33SSuman Anna } 28881e407f33SSuman Anna req = (struct ti_sci_msg_req_get_status *)xfer->xfer_buf; 28891e407f33SSuman Anna req->processor_id = proc_id; 28901e407f33SSuman Anna 28911e407f33SSuman Anna ret = ti_sci_do_xfer(info, xfer); 28921e407f33SSuman Anna if (ret) { 28931e407f33SSuman Anna dev_err(dev, "Mbox send fail %d\n", ret); 28941e407f33SSuman Anna goto fail; 28951e407f33SSuman Anna } 28961e407f33SSuman Anna 28971e407f33SSuman Anna resp = (struct ti_sci_msg_resp_get_status *)xfer->tx_message.buf; 28981e407f33SSuman Anna 28991e407f33SSuman Anna if (!ti_sci_is_response_ack(resp)) { 29001e407f33SSuman Anna ret = -ENODEV; 29011e407f33SSuman Anna } else { 29021e407f33SSuman Anna *bv = (resp->bootvector_low & TI_SCI_ADDR_LOW_MASK) | 29031e407f33SSuman Anna (((u64)resp->bootvector_high << TI_SCI_ADDR_HIGH_SHIFT) & 29041e407f33SSuman Anna TI_SCI_ADDR_HIGH_MASK); 29051e407f33SSuman Anna *cfg_flags = resp->config_flags; 29061e407f33SSuman Anna *ctrl_flags = resp->control_flags; 29071e407f33SSuman Anna *sts_flags = resp->status_flags; 29081e407f33SSuman Anna } 29091e407f33SSuman Anna 29101e407f33SSuman Anna fail: 29111e407f33SSuman Anna ti_sci_put_one_xfer(&info->minfo, xfer); 29121e407f33SSuman Anna 29131e407f33SSuman Anna return ret; 29141e407f33SSuman Anna } 29151e407f33SSuman Anna 29169e7d756dSNishanth Menon /* 29179e7d756dSNishanth Menon * ti_sci_setup_ops() - Setup the operations structures 29189e7d756dSNishanth Menon * @info: pointer to TISCI pointer 29199e7d756dSNishanth Menon */ 29209e7d756dSNishanth Menon static void ti_sci_setup_ops(struct ti_sci_info *info) 29219e7d756dSNishanth Menon { 29229e7d756dSNishanth Menon struct ti_sci_ops *ops = &info->handle.ops; 2923912cffb4SNishanth Menon struct ti_sci_core_ops *core_ops = &ops->core_ops; 29249e7d756dSNishanth Menon struct ti_sci_dev_ops *dops = &ops->dev_ops; 29259f723220SNishanth Menon struct ti_sci_clk_ops *cops = &ops->clk_ops; 29269c19fb68SLokesh Vutla struct ti_sci_rm_core_ops *rm_core_ops = &ops->rm_core_ops; 2927997b001fSLokesh Vutla struct ti_sci_rm_irq_ops *iops = &ops->rm_irq_ops; 292868608b5eSPeter Ujfalusi struct ti_sci_rm_ringacc_ops *rops = &ops->rm_ring_ops; 292968608b5eSPeter Ujfalusi struct ti_sci_rm_psil_ops *psilops = &ops->rm_psil_ops; 293068608b5eSPeter Ujfalusi struct ti_sci_rm_udmap_ops *udmap_ops = &ops->rm_udmap_ops; 29311e407f33SSuman Anna struct ti_sci_proc_ops *pops = &ops->proc_ops; 29329e7d756dSNishanth Menon 2933912cffb4SNishanth Menon core_ops->reboot_device = ti_sci_cmd_core_reboot; 2934912cffb4SNishanth Menon 29359e7d756dSNishanth Menon dops->get_device = ti_sci_cmd_get_device; 2936*45b659eeSLokesh Vutla dops->get_device_exclusive = ti_sci_cmd_get_device_exclusive; 29379e7d756dSNishanth Menon dops->idle_device = ti_sci_cmd_idle_device; 2938*45b659eeSLokesh Vutla dops->idle_device_exclusive = ti_sci_cmd_idle_device_exclusive; 29399e7d756dSNishanth Menon dops->put_device = ti_sci_cmd_put_device; 29409e7d756dSNishanth Menon 29419e7d756dSNishanth Menon dops->is_valid = ti_sci_cmd_dev_is_valid; 29429e7d756dSNishanth Menon dops->get_context_loss_count = ti_sci_cmd_dev_get_clcnt; 29439e7d756dSNishanth Menon dops->is_idle = ti_sci_cmd_dev_is_idle; 29449e7d756dSNishanth Menon dops->is_stop = ti_sci_cmd_dev_is_stop; 29459e7d756dSNishanth Menon dops->is_on = ti_sci_cmd_dev_is_on; 29469e7d756dSNishanth Menon dops->is_transitioning = ti_sci_cmd_dev_is_trans; 29479e7d756dSNishanth Menon dops->set_device_resets = ti_sci_cmd_set_device_resets; 29489e7d756dSNishanth Menon dops->get_device_resets = ti_sci_cmd_get_device_resets; 29499f723220SNishanth Menon 29509f723220SNishanth Menon cops->get_clock = ti_sci_cmd_get_clock; 29519f723220SNishanth Menon cops->idle_clock = ti_sci_cmd_idle_clock; 29529f723220SNishanth Menon cops->put_clock = ti_sci_cmd_put_clock; 29539f723220SNishanth Menon cops->is_auto = ti_sci_cmd_clk_is_auto; 29549f723220SNishanth Menon cops->is_on = ti_sci_cmd_clk_is_on; 29559f723220SNishanth Menon cops->is_off = ti_sci_cmd_clk_is_off; 29569f723220SNishanth Menon 29579f723220SNishanth Menon cops->set_parent = ti_sci_cmd_clk_set_parent; 29589f723220SNishanth Menon cops->get_parent = ti_sci_cmd_clk_get_parent; 29599f723220SNishanth Menon cops->get_num_parents = ti_sci_cmd_clk_get_num_parents; 29609f723220SNishanth Menon 29619f723220SNishanth Menon cops->get_best_match_freq = ti_sci_cmd_clk_get_match_freq; 29629f723220SNishanth Menon cops->set_freq = ti_sci_cmd_clk_set_freq; 29639f723220SNishanth Menon cops->get_freq = ti_sci_cmd_clk_get_freq; 29649c19fb68SLokesh Vutla 29659c19fb68SLokesh Vutla rm_core_ops->get_range = ti_sci_cmd_get_resource_range; 29669c19fb68SLokesh Vutla rm_core_ops->get_range_from_shost = 29679c19fb68SLokesh Vutla ti_sci_cmd_get_resource_range_from_shost; 2968997b001fSLokesh Vutla 2969997b001fSLokesh Vutla iops->set_irq = ti_sci_cmd_set_irq; 2970997b001fSLokesh Vutla iops->set_event_map = ti_sci_cmd_set_event_map; 2971997b001fSLokesh Vutla iops->free_irq = ti_sci_cmd_free_irq; 2972997b001fSLokesh Vutla iops->free_event_map = ti_sci_cmd_free_event_map; 297368608b5eSPeter Ujfalusi 297468608b5eSPeter Ujfalusi rops->config = ti_sci_cmd_ring_config; 297568608b5eSPeter Ujfalusi rops->get_config = ti_sci_cmd_ring_get_config; 297668608b5eSPeter Ujfalusi 297768608b5eSPeter Ujfalusi psilops->pair = ti_sci_cmd_rm_psil_pair; 297868608b5eSPeter Ujfalusi psilops->unpair = ti_sci_cmd_rm_psil_unpair; 297968608b5eSPeter Ujfalusi 298068608b5eSPeter Ujfalusi udmap_ops->tx_ch_cfg = ti_sci_cmd_rm_udmap_tx_ch_cfg; 298168608b5eSPeter Ujfalusi udmap_ops->rx_ch_cfg = ti_sci_cmd_rm_udmap_rx_ch_cfg; 298268608b5eSPeter Ujfalusi udmap_ops->rx_flow_cfg = ti_sci_cmd_rm_udmap_rx_flow_cfg; 29831e407f33SSuman Anna 29841e407f33SSuman Anna pops->request = ti_sci_cmd_proc_request; 29851e407f33SSuman Anna pops->release = ti_sci_cmd_proc_release; 29861e407f33SSuman Anna pops->handover = ti_sci_cmd_proc_handover; 29871e407f33SSuman Anna pops->set_config = ti_sci_cmd_proc_set_config; 29881e407f33SSuman Anna pops->set_control = ti_sci_cmd_proc_set_control; 29891e407f33SSuman Anna pops->get_status = ti_sci_cmd_proc_get_status; 29909e7d756dSNishanth Menon } 29919e7d756dSNishanth Menon 29929e7d756dSNishanth Menon /** 2993aa276781SNishanth Menon * ti_sci_get_handle() - Get the TI SCI handle for a device 2994aa276781SNishanth Menon * @dev: Pointer to device for which we want SCI handle 2995aa276781SNishanth Menon * 2996aa276781SNishanth Menon * NOTE: The function does not track individual clients of the framework 2997aa276781SNishanth Menon * and is expected to be maintained by caller of TI SCI protocol library. 2998aa276781SNishanth Menon * ti_sci_put_handle must be balanced with successful ti_sci_get_handle 2999aa276781SNishanth Menon * Return: pointer to handle if successful, else: 3000aa276781SNishanth Menon * -EPROBE_DEFER if the instance is not ready 3001aa276781SNishanth Menon * -ENODEV if the required node handler is missing 3002aa276781SNishanth Menon * -EINVAL if invalid conditions are encountered. 3003aa276781SNishanth Menon */ 3004aa276781SNishanth Menon const struct ti_sci_handle *ti_sci_get_handle(struct device *dev) 3005aa276781SNishanth Menon { 3006aa276781SNishanth Menon struct device_node *ti_sci_np; 3007aa276781SNishanth Menon struct list_head *p; 3008aa276781SNishanth Menon struct ti_sci_handle *handle = NULL; 3009aa276781SNishanth Menon struct ti_sci_info *info; 3010aa276781SNishanth Menon 3011aa276781SNishanth Menon if (!dev) { 3012aa276781SNishanth Menon pr_err("I need a device pointer\n"); 3013aa276781SNishanth Menon return ERR_PTR(-EINVAL); 3014aa276781SNishanth Menon } 3015aa276781SNishanth Menon ti_sci_np = of_get_parent(dev->of_node); 3016aa276781SNishanth Menon if (!ti_sci_np) { 3017aa276781SNishanth Menon dev_err(dev, "No OF information\n"); 3018aa276781SNishanth Menon return ERR_PTR(-EINVAL); 3019aa276781SNishanth Menon } 3020aa276781SNishanth Menon 3021aa276781SNishanth Menon mutex_lock(&ti_sci_list_mutex); 3022aa276781SNishanth Menon list_for_each(p, &ti_sci_list) { 3023aa276781SNishanth Menon info = list_entry(p, struct ti_sci_info, node); 3024aa276781SNishanth Menon if (ti_sci_np == info->dev->of_node) { 3025aa276781SNishanth Menon handle = &info->handle; 3026aa276781SNishanth Menon info->users++; 3027aa276781SNishanth Menon break; 3028aa276781SNishanth Menon } 3029aa276781SNishanth Menon } 3030aa276781SNishanth Menon mutex_unlock(&ti_sci_list_mutex); 3031aa276781SNishanth Menon of_node_put(ti_sci_np); 3032aa276781SNishanth Menon 3033aa276781SNishanth Menon if (!handle) 3034aa276781SNishanth Menon return ERR_PTR(-EPROBE_DEFER); 3035aa276781SNishanth Menon 3036aa276781SNishanth Menon return handle; 3037aa276781SNishanth Menon } 3038aa276781SNishanth Menon EXPORT_SYMBOL_GPL(ti_sci_get_handle); 3039aa276781SNishanth Menon 3040aa276781SNishanth Menon /** 3041aa276781SNishanth Menon * ti_sci_put_handle() - Release the handle acquired by ti_sci_get_handle 3042aa276781SNishanth Menon * @handle: Handle acquired by ti_sci_get_handle 3043aa276781SNishanth Menon * 3044aa276781SNishanth Menon * NOTE: The function does not track individual clients of the framework 3045aa276781SNishanth Menon * and is expected to be maintained by caller of TI SCI protocol library. 3046aa276781SNishanth Menon * ti_sci_put_handle must be balanced with successful ti_sci_get_handle 3047aa276781SNishanth Menon * 3048aa276781SNishanth Menon * Return: 0 is successfully released 3049aa276781SNishanth Menon * if an error pointer was passed, it returns the error value back, 3050aa276781SNishanth Menon * if null was passed, it returns -EINVAL; 3051aa276781SNishanth Menon */ 3052aa276781SNishanth Menon int ti_sci_put_handle(const struct ti_sci_handle *handle) 3053aa276781SNishanth Menon { 3054aa276781SNishanth Menon struct ti_sci_info *info; 3055aa276781SNishanth Menon 3056aa276781SNishanth Menon if (IS_ERR(handle)) 3057aa276781SNishanth Menon return PTR_ERR(handle); 3058aa276781SNishanth Menon if (!handle) 3059aa276781SNishanth Menon return -EINVAL; 3060aa276781SNishanth Menon 3061aa276781SNishanth Menon info = handle_to_ti_sci_info(handle); 3062aa276781SNishanth Menon mutex_lock(&ti_sci_list_mutex); 3063aa276781SNishanth Menon if (!WARN_ON(!info->users)) 3064aa276781SNishanth Menon info->users--; 3065aa276781SNishanth Menon mutex_unlock(&ti_sci_list_mutex); 3066aa276781SNishanth Menon 3067aa276781SNishanth Menon return 0; 3068aa276781SNishanth Menon } 3069aa276781SNishanth Menon EXPORT_SYMBOL_GPL(ti_sci_put_handle); 3070aa276781SNishanth Menon 3071aa276781SNishanth Menon static void devm_ti_sci_release(struct device *dev, void *res) 3072aa276781SNishanth Menon { 3073aa276781SNishanth Menon const struct ti_sci_handle **ptr = res; 3074aa276781SNishanth Menon const struct ti_sci_handle *handle = *ptr; 3075aa276781SNishanth Menon int ret; 3076aa276781SNishanth Menon 3077aa276781SNishanth Menon ret = ti_sci_put_handle(handle); 3078aa276781SNishanth Menon if (ret) 3079aa276781SNishanth Menon dev_err(dev, "failed to put handle %d\n", ret); 3080aa276781SNishanth Menon } 3081aa276781SNishanth Menon 3082aa276781SNishanth Menon /** 3083aa276781SNishanth Menon * devm_ti_sci_get_handle() - Managed get handle 3084aa276781SNishanth Menon * @dev: device for which we want SCI handle for. 3085aa276781SNishanth Menon * 3086aa276781SNishanth Menon * NOTE: This releases the handle once the device resources are 3087aa276781SNishanth Menon * no longer needed. MUST NOT BE released with ti_sci_put_handle. 3088aa276781SNishanth Menon * The function does not track individual clients of the framework 3089aa276781SNishanth Menon * and is expected to be maintained by caller of TI SCI protocol library. 3090aa276781SNishanth Menon * 3091aa276781SNishanth Menon * Return: 0 if all went fine, else corresponding error. 3092aa276781SNishanth Menon */ 3093aa276781SNishanth Menon const struct ti_sci_handle *devm_ti_sci_get_handle(struct device *dev) 3094aa276781SNishanth Menon { 3095aa276781SNishanth Menon const struct ti_sci_handle **ptr; 3096aa276781SNishanth Menon const struct ti_sci_handle *handle; 3097aa276781SNishanth Menon 3098aa276781SNishanth Menon ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL); 3099aa276781SNishanth Menon if (!ptr) 3100aa276781SNishanth Menon return ERR_PTR(-ENOMEM); 3101aa276781SNishanth Menon handle = ti_sci_get_handle(dev); 3102aa276781SNishanth Menon 3103aa276781SNishanth Menon if (!IS_ERR(handle)) { 3104aa276781SNishanth Menon *ptr = handle; 3105aa276781SNishanth Menon devres_add(dev, ptr); 3106aa276781SNishanth Menon } else { 3107aa276781SNishanth Menon devres_free(ptr); 3108aa276781SNishanth Menon } 3109aa276781SNishanth Menon 3110aa276781SNishanth Menon return handle; 3111aa276781SNishanth Menon } 3112aa276781SNishanth Menon EXPORT_SYMBOL_GPL(devm_ti_sci_get_handle); 3113aa276781SNishanth Menon 3114905c3047SGrygorii Strashko /** 3115905c3047SGrygorii Strashko * ti_sci_get_by_phandle() - Get the TI SCI handle using DT phandle 3116905c3047SGrygorii Strashko * @np: device node 3117905c3047SGrygorii Strashko * @property: property name containing phandle on TISCI node 3118905c3047SGrygorii Strashko * 3119905c3047SGrygorii Strashko * NOTE: The function does not track individual clients of the framework 3120905c3047SGrygorii Strashko * and is expected to be maintained by caller of TI SCI protocol library. 3121905c3047SGrygorii Strashko * ti_sci_put_handle must be balanced with successful ti_sci_get_by_phandle 3122905c3047SGrygorii Strashko * Return: pointer to handle if successful, else: 3123905c3047SGrygorii Strashko * -EPROBE_DEFER if the instance is not ready 3124905c3047SGrygorii Strashko * -ENODEV if the required node handler is missing 3125905c3047SGrygorii Strashko * -EINVAL if invalid conditions are encountered. 3126905c3047SGrygorii Strashko */ 3127905c3047SGrygorii Strashko const struct ti_sci_handle *ti_sci_get_by_phandle(struct device_node *np, 3128905c3047SGrygorii Strashko const char *property) 3129905c3047SGrygorii Strashko { 3130905c3047SGrygorii Strashko struct ti_sci_handle *handle = NULL; 3131905c3047SGrygorii Strashko struct device_node *ti_sci_np; 3132905c3047SGrygorii Strashko struct ti_sci_info *info; 3133905c3047SGrygorii Strashko struct list_head *p; 3134905c3047SGrygorii Strashko 3135905c3047SGrygorii Strashko if (!np) { 3136905c3047SGrygorii Strashko pr_err("I need a device pointer\n"); 3137905c3047SGrygorii Strashko return ERR_PTR(-EINVAL); 3138905c3047SGrygorii Strashko } 3139905c3047SGrygorii Strashko 3140905c3047SGrygorii Strashko ti_sci_np = of_parse_phandle(np, property, 0); 3141905c3047SGrygorii Strashko if (!ti_sci_np) 3142905c3047SGrygorii Strashko return ERR_PTR(-ENODEV); 3143905c3047SGrygorii Strashko 3144905c3047SGrygorii Strashko mutex_lock(&ti_sci_list_mutex); 3145905c3047SGrygorii Strashko list_for_each(p, &ti_sci_list) { 3146905c3047SGrygorii Strashko info = list_entry(p, struct ti_sci_info, node); 3147905c3047SGrygorii Strashko if (ti_sci_np == info->dev->of_node) { 3148905c3047SGrygorii Strashko handle = &info->handle; 3149905c3047SGrygorii Strashko info->users++; 3150905c3047SGrygorii Strashko break; 3151905c3047SGrygorii Strashko } 3152905c3047SGrygorii Strashko } 3153905c3047SGrygorii Strashko mutex_unlock(&ti_sci_list_mutex); 3154905c3047SGrygorii Strashko of_node_put(ti_sci_np); 3155905c3047SGrygorii Strashko 3156905c3047SGrygorii Strashko if (!handle) 3157905c3047SGrygorii Strashko return ERR_PTR(-EPROBE_DEFER); 3158905c3047SGrygorii Strashko 3159905c3047SGrygorii Strashko return handle; 3160905c3047SGrygorii Strashko } 3161905c3047SGrygorii Strashko EXPORT_SYMBOL_GPL(ti_sci_get_by_phandle); 3162905c3047SGrygorii Strashko 3163905c3047SGrygorii Strashko /** 3164905c3047SGrygorii Strashko * devm_ti_sci_get_by_phandle() - Managed get handle using phandle 3165905c3047SGrygorii Strashko * @dev: Device pointer requesting TISCI handle 3166905c3047SGrygorii Strashko * @property: property name containing phandle on TISCI node 3167905c3047SGrygorii Strashko * 3168905c3047SGrygorii Strashko * NOTE: This releases the handle once the device resources are 3169905c3047SGrygorii Strashko * no longer needed. MUST NOT BE released with ti_sci_put_handle. 3170905c3047SGrygorii Strashko * The function does not track individual clients of the framework 3171905c3047SGrygorii Strashko * and is expected to be maintained by caller of TI SCI protocol library. 3172905c3047SGrygorii Strashko * 3173905c3047SGrygorii Strashko * Return: 0 if all went fine, else corresponding error. 3174905c3047SGrygorii Strashko */ 3175905c3047SGrygorii Strashko const struct ti_sci_handle *devm_ti_sci_get_by_phandle(struct device *dev, 3176905c3047SGrygorii Strashko const char *property) 3177905c3047SGrygorii Strashko { 3178905c3047SGrygorii Strashko const struct ti_sci_handle *handle; 3179905c3047SGrygorii Strashko const struct ti_sci_handle **ptr; 3180905c3047SGrygorii Strashko 3181905c3047SGrygorii Strashko ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL); 3182905c3047SGrygorii Strashko if (!ptr) 3183905c3047SGrygorii Strashko return ERR_PTR(-ENOMEM); 3184905c3047SGrygorii Strashko handle = ti_sci_get_by_phandle(dev_of_node(dev), property); 3185905c3047SGrygorii Strashko 3186905c3047SGrygorii Strashko if (!IS_ERR(handle)) { 3187905c3047SGrygorii Strashko *ptr = handle; 3188905c3047SGrygorii Strashko devres_add(dev, ptr); 3189905c3047SGrygorii Strashko } else { 3190905c3047SGrygorii Strashko devres_free(ptr); 3191905c3047SGrygorii Strashko } 3192905c3047SGrygorii Strashko 3193905c3047SGrygorii Strashko return handle; 3194905c3047SGrygorii Strashko } 3195905c3047SGrygorii Strashko EXPORT_SYMBOL_GPL(devm_ti_sci_get_by_phandle); 3196905c3047SGrygorii Strashko 3197032a1ec5SLokesh Vutla /** 3198032a1ec5SLokesh Vutla * ti_sci_get_free_resource() - Get a free resource from TISCI resource. 3199032a1ec5SLokesh Vutla * @res: Pointer to the TISCI resource 3200032a1ec5SLokesh Vutla * 3201032a1ec5SLokesh Vutla * Return: resource num if all went ok else TI_SCI_RESOURCE_NULL. 3202032a1ec5SLokesh Vutla */ 3203032a1ec5SLokesh Vutla u16 ti_sci_get_free_resource(struct ti_sci_resource *res) 3204032a1ec5SLokesh Vutla { 3205032a1ec5SLokesh Vutla unsigned long flags; 3206032a1ec5SLokesh Vutla u16 set, free_bit; 3207032a1ec5SLokesh Vutla 3208032a1ec5SLokesh Vutla raw_spin_lock_irqsave(&res->lock, flags); 3209032a1ec5SLokesh Vutla for (set = 0; set < res->sets; set++) { 3210032a1ec5SLokesh Vutla free_bit = find_first_zero_bit(res->desc[set].res_map, 3211032a1ec5SLokesh Vutla res->desc[set].num); 3212032a1ec5SLokesh Vutla if (free_bit != res->desc[set].num) { 3213032a1ec5SLokesh Vutla set_bit(free_bit, res->desc[set].res_map); 3214032a1ec5SLokesh Vutla raw_spin_unlock_irqrestore(&res->lock, flags); 3215032a1ec5SLokesh Vutla return res->desc[set].start + free_bit; 3216032a1ec5SLokesh Vutla } 3217032a1ec5SLokesh Vutla } 3218032a1ec5SLokesh Vutla raw_spin_unlock_irqrestore(&res->lock, flags); 3219032a1ec5SLokesh Vutla 3220032a1ec5SLokesh Vutla return TI_SCI_RESOURCE_NULL; 3221032a1ec5SLokesh Vutla } 3222032a1ec5SLokesh Vutla EXPORT_SYMBOL_GPL(ti_sci_get_free_resource); 3223032a1ec5SLokesh Vutla 3224032a1ec5SLokesh Vutla /** 3225032a1ec5SLokesh Vutla * ti_sci_release_resource() - Release a resource from TISCI resource. 3226032a1ec5SLokesh Vutla * @res: Pointer to the TISCI resource 3227032a1ec5SLokesh Vutla * @id: Resource id to be released. 3228032a1ec5SLokesh Vutla */ 3229032a1ec5SLokesh Vutla void ti_sci_release_resource(struct ti_sci_resource *res, u16 id) 3230032a1ec5SLokesh Vutla { 3231032a1ec5SLokesh Vutla unsigned long flags; 3232032a1ec5SLokesh Vutla u16 set; 3233032a1ec5SLokesh Vutla 3234032a1ec5SLokesh Vutla raw_spin_lock_irqsave(&res->lock, flags); 3235032a1ec5SLokesh Vutla for (set = 0; set < res->sets; set++) { 3236032a1ec5SLokesh Vutla if (res->desc[set].start <= id && 3237032a1ec5SLokesh Vutla (res->desc[set].num + res->desc[set].start) > id) 3238032a1ec5SLokesh Vutla clear_bit(id - res->desc[set].start, 3239032a1ec5SLokesh Vutla res->desc[set].res_map); 3240032a1ec5SLokesh Vutla } 3241032a1ec5SLokesh Vutla raw_spin_unlock_irqrestore(&res->lock, flags); 3242032a1ec5SLokesh Vutla } 3243032a1ec5SLokesh Vutla EXPORT_SYMBOL_GPL(ti_sci_release_resource); 3244032a1ec5SLokesh Vutla 3245032a1ec5SLokesh Vutla /** 3246032a1ec5SLokesh Vutla * ti_sci_get_num_resources() - Get the number of resources in TISCI resource 3247032a1ec5SLokesh Vutla * @res: Pointer to the TISCI resource 3248032a1ec5SLokesh Vutla * 3249032a1ec5SLokesh Vutla * Return: Total number of available resources. 3250032a1ec5SLokesh Vutla */ 3251032a1ec5SLokesh Vutla u32 ti_sci_get_num_resources(struct ti_sci_resource *res) 3252032a1ec5SLokesh Vutla { 3253032a1ec5SLokesh Vutla u32 set, count = 0; 3254032a1ec5SLokesh Vutla 3255032a1ec5SLokesh Vutla for (set = 0; set < res->sets; set++) 3256032a1ec5SLokesh Vutla count += res->desc[set].num; 3257032a1ec5SLokesh Vutla 3258032a1ec5SLokesh Vutla return count; 3259032a1ec5SLokesh Vutla } 3260032a1ec5SLokesh Vutla EXPORT_SYMBOL_GPL(ti_sci_get_num_resources); 3261032a1ec5SLokesh Vutla 3262032a1ec5SLokesh Vutla /** 3263032a1ec5SLokesh Vutla * devm_ti_sci_get_of_resource() - Get a TISCI resource assigned to a device 3264032a1ec5SLokesh Vutla * @handle: TISCI handle 3265032a1ec5SLokesh Vutla * @dev: Device pointer to which the resource is assigned 3266032a1ec5SLokesh Vutla * @dev_id: TISCI device id to which the resource is assigned 3267032a1ec5SLokesh Vutla * @of_prop: property name by which the resource are represented 3268032a1ec5SLokesh Vutla * 3269032a1ec5SLokesh Vutla * Return: Pointer to ti_sci_resource if all went well else appropriate 3270032a1ec5SLokesh Vutla * error pointer. 3271032a1ec5SLokesh Vutla */ 3272032a1ec5SLokesh Vutla struct ti_sci_resource * 3273032a1ec5SLokesh Vutla devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle, 3274032a1ec5SLokesh Vutla struct device *dev, u32 dev_id, char *of_prop) 3275032a1ec5SLokesh Vutla { 3276032a1ec5SLokesh Vutla struct ti_sci_resource *res; 3277fa42da11SPeter Ujfalusi bool valid_set = false; 3278032a1ec5SLokesh Vutla u32 resource_subtype; 3279032a1ec5SLokesh Vutla int i, ret; 3280032a1ec5SLokesh Vutla 3281032a1ec5SLokesh Vutla res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL); 3282032a1ec5SLokesh Vutla if (!res) 3283032a1ec5SLokesh Vutla return ERR_PTR(-ENOMEM); 3284032a1ec5SLokesh Vutla 32850b88bc92SStephen Boyd ret = of_property_count_elems_of_size(dev_of_node(dev), of_prop, 3286032a1ec5SLokesh Vutla sizeof(u32)); 32870b88bc92SStephen Boyd if (ret < 0) { 3288032a1ec5SLokesh Vutla dev_err(dev, "%s resource type ids not available\n", of_prop); 32890b88bc92SStephen Boyd return ERR_PTR(ret); 3290032a1ec5SLokesh Vutla } 32910b88bc92SStephen Boyd res->sets = ret; 3292032a1ec5SLokesh Vutla 3293032a1ec5SLokesh Vutla res->desc = devm_kcalloc(dev, res->sets, sizeof(*res->desc), 3294032a1ec5SLokesh Vutla GFP_KERNEL); 3295032a1ec5SLokesh Vutla if (!res->desc) 3296032a1ec5SLokesh Vutla return ERR_PTR(-ENOMEM); 3297032a1ec5SLokesh Vutla 3298032a1ec5SLokesh Vutla for (i = 0; i < res->sets; i++) { 3299032a1ec5SLokesh Vutla ret = of_property_read_u32_index(dev_of_node(dev), of_prop, i, 3300032a1ec5SLokesh Vutla &resource_subtype); 3301032a1ec5SLokesh Vutla if (ret) 3302032a1ec5SLokesh Vutla return ERR_PTR(-EINVAL); 3303032a1ec5SLokesh Vutla 3304032a1ec5SLokesh Vutla ret = handle->ops.rm_core_ops.get_range(handle, dev_id, 3305032a1ec5SLokesh Vutla resource_subtype, 3306032a1ec5SLokesh Vutla &res->desc[i].start, 3307032a1ec5SLokesh Vutla &res->desc[i].num); 3308032a1ec5SLokesh Vutla if (ret) { 3309fa42da11SPeter Ujfalusi dev_dbg(dev, "dev = %d subtype %d not allocated for this host\n", 3310032a1ec5SLokesh Vutla dev_id, resource_subtype); 3311fa42da11SPeter Ujfalusi res->desc[i].start = 0; 3312fa42da11SPeter Ujfalusi res->desc[i].num = 0; 3313fa42da11SPeter Ujfalusi continue; 3314032a1ec5SLokesh Vutla } 3315032a1ec5SLokesh Vutla 3316032a1ec5SLokesh Vutla dev_dbg(dev, "dev = %d, subtype = %d, start = %d, num = %d\n", 3317032a1ec5SLokesh Vutla dev_id, resource_subtype, res->desc[i].start, 3318032a1ec5SLokesh Vutla res->desc[i].num); 3319032a1ec5SLokesh Vutla 3320fa42da11SPeter Ujfalusi valid_set = true; 3321032a1ec5SLokesh Vutla res->desc[i].res_map = 3322032a1ec5SLokesh Vutla devm_kzalloc(dev, BITS_TO_LONGS(res->desc[i].num) * 3323032a1ec5SLokesh Vutla sizeof(*res->desc[i].res_map), GFP_KERNEL); 3324032a1ec5SLokesh Vutla if (!res->desc[i].res_map) 3325032a1ec5SLokesh Vutla return ERR_PTR(-ENOMEM); 3326032a1ec5SLokesh Vutla } 3327032a1ec5SLokesh Vutla raw_spin_lock_init(&res->lock); 3328032a1ec5SLokesh Vutla 3329fa42da11SPeter Ujfalusi if (valid_set) 3330032a1ec5SLokesh Vutla return res; 3331fa42da11SPeter Ujfalusi 3332fa42da11SPeter Ujfalusi return ERR_PTR(-EINVAL); 3333032a1ec5SLokesh Vutla } 3334032a1ec5SLokesh Vutla 3335912cffb4SNishanth Menon static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode, 3336912cffb4SNishanth Menon void *cmd) 3337912cffb4SNishanth Menon { 3338912cffb4SNishanth Menon struct ti_sci_info *info = reboot_to_ti_sci_info(nb); 3339912cffb4SNishanth Menon const struct ti_sci_handle *handle = &info->handle; 3340912cffb4SNishanth Menon 3341912cffb4SNishanth Menon ti_sci_cmd_core_reboot(handle); 3342912cffb4SNishanth Menon 3343912cffb4SNishanth Menon /* call fail OR pass, we should not be here in the first place */ 3344912cffb4SNishanth Menon return NOTIFY_BAD; 3345912cffb4SNishanth Menon } 3346912cffb4SNishanth Menon 3347aa276781SNishanth Menon /* Description for K2G */ 3348aa276781SNishanth Menon static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = { 3349e69a3553SNishanth Menon .default_host_id = 2, 3350aa276781SNishanth Menon /* Conservative duration */ 3351aa276781SNishanth Menon .max_rx_timeout_ms = 1000, 3352aa276781SNishanth Menon /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */ 3353aa276781SNishanth Menon .max_msgs = 20, 3354aa276781SNishanth Menon .max_msg_size = 64, 3355754c9477SPeter Ujfalusi .rm_type_map = NULL, 3356754c9477SPeter Ujfalusi }; 3357754c9477SPeter Ujfalusi 3358754c9477SPeter Ujfalusi static struct ti_sci_rm_type_map ti_sci_am654_rm_type_map[] = { 3359754c9477SPeter Ujfalusi {.dev_id = 56, .type = 0x00b}, /* GIC_IRQ */ 3360754c9477SPeter Ujfalusi {.dev_id = 179, .type = 0x000}, /* MAIN_NAV_UDMASS_IA0 */ 3361754c9477SPeter Ujfalusi {.dev_id = 187, .type = 0x009}, /* MAIN_NAV_RA */ 3362754c9477SPeter Ujfalusi {.dev_id = 188, .type = 0x006}, /* MAIN_NAV_UDMAP */ 3363754c9477SPeter Ujfalusi {.dev_id = 194, .type = 0x007}, /* MCU_NAV_UDMAP */ 3364754c9477SPeter Ujfalusi {.dev_id = 195, .type = 0x00a}, /* MCU_NAV_RA */ 3365754c9477SPeter Ujfalusi {.dev_id = 0, .type = 0x000}, /* end of table */ 3366754c9477SPeter Ujfalusi }; 3367754c9477SPeter Ujfalusi 3368754c9477SPeter Ujfalusi /* Description for AM654 */ 3369754c9477SPeter Ujfalusi static const struct ti_sci_desc ti_sci_pmmc_am654_desc = { 3370754c9477SPeter Ujfalusi .default_host_id = 12, 3371754c9477SPeter Ujfalusi /* Conservative duration */ 3372754c9477SPeter Ujfalusi .max_rx_timeout_ms = 10000, 3373754c9477SPeter Ujfalusi /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */ 3374754c9477SPeter Ujfalusi .max_msgs = 20, 3375754c9477SPeter Ujfalusi .max_msg_size = 60, 3376754c9477SPeter Ujfalusi .rm_type_map = ti_sci_am654_rm_type_map, 3377aa276781SNishanth Menon }; 3378aa276781SNishanth Menon 3379aa276781SNishanth Menon static const struct of_device_id ti_sci_of_match[] = { 3380aa276781SNishanth Menon {.compatible = "ti,k2g-sci", .data = &ti_sci_pmmc_k2g_desc}, 3381754c9477SPeter Ujfalusi {.compatible = "ti,am654-sci", .data = &ti_sci_pmmc_am654_desc}, 3382aa276781SNishanth Menon { /* Sentinel */ }, 3383aa276781SNishanth Menon }; 3384aa276781SNishanth Menon MODULE_DEVICE_TABLE(of, ti_sci_of_match); 3385aa276781SNishanth Menon 3386aa276781SNishanth Menon static int ti_sci_probe(struct platform_device *pdev) 3387aa276781SNishanth Menon { 3388aa276781SNishanth Menon struct device *dev = &pdev->dev; 3389aa276781SNishanth Menon const struct of_device_id *of_id; 3390aa276781SNishanth Menon const struct ti_sci_desc *desc; 3391aa276781SNishanth Menon struct ti_sci_xfer *xfer; 3392aa276781SNishanth Menon struct ti_sci_info *info = NULL; 3393aa276781SNishanth Menon struct ti_sci_xfers_info *minfo; 3394aa276781SNishanth Menon struct mbox_client *cl; 3395aa276781SNishanth Menon int ret = -EINVAL; 3396aa276781SNishanth Menon int i; 3397912cffb4SNishanth Menon int reboot = 0; 3398e69a3553SNishanth Menon u32 h_id; 3399aa276781SNishanth Menon 3400aa276781SNishanth Menon of_id = of_match_device(ti_sci_of_match, dev); 3401aa276781SNishanth Menon if (!of_id) { 3402aa276781SNishanth Menon dev_err(dev, "OF data missing\n"); 3403aa276781SNishanth Menon return -EINVAL; 3404aa276781SNishanth Menon } 3405aa276781SNishanth Menon desc = of_id->data; 3406aa276781SNishanth Menon 3407aa276781SNishanth Menon info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL); 3408aa276781SNishanth Menon if (!info) 3409aa276781SNishanth Menon return -ENOMEM; 3410aa276781SNishanth Menon 3411aa276781SNishanth Menon info->dev = dev; 3412aa276781SNishanth Menon info->desc = desc; 3413e69a3553SNishanth Menon ret = of_property_read_u32(dev->of_node, "ti,host-id", &h_id); 3414e69a3553SNishanth Menon /* if the property is not present in DT, use a default from desc */ 3415e69a3553SNishanth Menon if (ret < 0) { 3416e69a3553SNishanth Menon info->host_id = info->desc->default_host_id; 3417e69a3553SNishanth Menon } else { 3418e69a3553SNishanth Menon if (!h_id) { 3419e69a3553SNishanth Menon dev_warn(dev, "Host ID 0 is reserved for firmware\n"); 3420e69a3553SNishanth Menon info->host_id = info->desc->default_host_id; 3421e69a3553SNishanth Menon } else { 3422e69a3553SNishanth Menon info->host_id = h_id; 3423e69a3553SNishanth Menon } 3424e69a3553SNishanth Menon } 3425e69a3553SNishanth Menon 3426912cffb4SNishanth Menon reboot = of_property_read_bool(dev->of_node, 3427912cffb4SNishanth Menon "ti,system-reboot-controller"); 3428aa276781SNishanth Menon INIT_LIST_HEAD(&info->node); 3429aa276781SNishanth Menon minfo = &info->minfo; 3430aa276781SNishanth Menon 3431aa276781SNishanth Menon /* 3432aa276781SNishanth Menon * Pre-allocate messages 3433aa276781SNishanth Menon * NEVER allocate more than what we can indicate in hdr.seq 3434aa276781SNishanth Menon * if we have data description bug, force a fix.. 3435aa276781SNishanth Menon */ 3436aa276781SNishanth Menon if (WARN_ON(desc->max_msgs >= 3437aa276781SNishanth Menon 1 << 8 * sizeof(((struct ti_sci_msg_hdr *)0)->seq))) 3438aa276781SNishanth Menon return -EINVAL; 3439aa276781SNishanth Menon 3440aa276781SNishanth Menon minfo->xfer_block = devm_kcalloc(dev, 3441aa276781SNishanth Menon desc->max_msgs, 3442aa276781SNishanth Menon sizeof(*minfo->xfer_block), 3443aa276781SNishanth Menon GFP_KERNEL); 3444aa276781SNishanth Menon if (!minfo->xfer_block) 3445aa276781SNishanth Menon return -ENOMEM; 3446aa276781SNishanth Menon 3447a86854d0SKees Cook minfo->xfer_alloc_table = devm_kcalloc(dev, 3448a86854d0SKees Cook BITS_TO_LONGS(desc->max_msgs), 3449a86854d0SKees Cook sizeof(unsigned long), 3450aa276781SNishanth Menon GFP_KERNEL); 3451aa276781SNishanth Menon if (!minfo->xfer_alloc_table) 3452aa276781SNishanth Menon return -ENOMEM; 3453aa276781SNishanth Menon bitmap_zero(minfo->xfer_alloc_table, desc->max_msgs); 3454aa276781SNishanth Menon 3455aa276781SNishanth Menon /* Pre-initialize the buffer pointer to pre-allocated buffers */ 3456aa276781SNishanth Menon for (i = 0, xfer = minfo->xfer_block; i < desc->max_msgs; i++, xfer++) { 3457aa276781SNishanth Menon xfer->xfer_buf = devm_kcalloc(dev, 1, desc->max_msg_size, 3458aa276781SNishanth Menon GFP_KERNEL); 3459aa276781SNishanth Menon if (!xfer->xfer_buf) 3460aa276781SNishanth Menon return -ENOMEM; 3461aa276781SNishanth Menon 3462aa276781SNishanth Menon xfer->tx_message.buf = xfer->xfer_buf; 3463aa276781SNishanth Menon init_completion(&xfer->done); 3464aa276781SNishanth Menon } 3465aa276781SNishanth Menon 3466aa276781SNishanth Menon ret = ti_sci_debugfs_create(pdev, info); 3467aa276781SNishanth Menon if (ret) 3468aa276781SNishanth Menon dev_warn(dev, "Failed to create debug file\n"); 3469aa276781SNishanth Menon 3470aa276781SNishanth Menon platform_set_drvdata(pdev, info); 3471aa276781SNishanth Menon 3472aa276781SNishanth Menon cl = &info->cl; 3473aa276781SNishanth Menon cl->dev = dev; 3474aa276781SNishanth Menon cl->tx_block = false; 3475aa276781SNishanth Menon cl->rx_callback = ti_sci_rx_callback; 3476aa276781SNishanth Menon cl->knows_txdone = true; 3477aa276781SNishanth Menon 3478aa276781SNishanth Menon spin_lock_init(&minfo->xfer_lock); 3479aa276781SNishanth Menon sema_init(&minfo->sem_xfer_count, desc->max_msgs); 3480aa276781SNishanth Menon 3481aa276781SNishanth Menon info->chan_rx = mbox_request_channel_byname(cl, "rx"); 3482aa276781SNishanth Menon if (IS_ERR(info->chan_rx)) { 3483aa276781SNishanth Menon ret = PTR_ERR(info->chan_rx); 3484aa276781SNishanth Menon goto out; 3485aa276781SNishanth Menon } 3486aa276781SNishanth Menon 3487aa276781SNishanth Menon info->chan_tx = mbox_request_channel_byname(cl, "tx"); 3488aa276781SNishanth Menon if (IS_ERR(info->chan_tx)) { 3489aa276781SNishanth Menon ret = PTR_ERR(info->chan_tx); 3490aa276781SNishanth Menon goto out; 3491aa276781SNishanth Menon } 3492aa276781SNishanth Menon ret = ti_sci_cmd_get_revision(info); 3493aa276781SNishanth Menon if (ret) { 3494aa276781SNishanth Menon dev_err(dev, "Unable to communicate with TISCI(%d)\n", ret); 3495aa276781SNishanth Menon goto out; 3496aa276781SNishanth Menon } 3497aa276781SNishanth Menon 34989e7d756dSNishanth Menon ti_sci_setup_ops(info); 34999e7d756dSNishanth Menon 3500912cffb4SNishanth Menon if (reboot) { 3501912cffb4SNishanth Menon info->nb.notifier_call = tisci_reboot_handler; 3502912cffb4SNishanth Menon info->nb.priority = 128; 3503912cffb4SNishanth Menon 3504912cffb4SNishanth Menon ret = register_restart_handler(&info->nb); 3505912cffb4SNishanth Menon if (ret) { 3506912cffb4SNishanth Menon dev_err(dev, "reboot registration fail(%d)\n", ret); 3507912cffb4SNishanth Menon return ret; 3508912cffb4SNishanth Menon } 3509912cffb4SNishanth Menon } 3510912cffb4SNishanth Menon 3511aa276781SNishanth Menon dev_info(dev, "ABI: %d.%d (firmware rev 0x%04x '%s')\n", 3512aa276781SNishanth Menon info->handle.version.abi_major, info->handle.version.abi_minor, 3513aa276781SNishanth Menon info->handle.version.firmware_revision, 3514aa276781SNishanth Menon info->handle.version.firmware_description); 3515aa276781SNishanth Menon 3516aa276781SNishanth Menon mutex_lock(&ti_sci_list_mutex); 3517aa276781SNishanth Menon list_add_tail(&info->node, &ti_sci_list); 3518aa276781SNishanth Menon mutex_unlock(&ti_sci_list_mutex); 3519aa276781SNishanth Menon 3520aa276781SNishanth Menon return of_platform_populate(dev->of_node, NULL, NULL, dev); 3521aa276781SNishanth Menon out: 3522aa276781SNishanth Menon if (!IS_ERR(info->chan_tx)) 3523aa276781SNishanth Menon mbox_free_channel(info->chan_tx); 3524aa276781SNishanth Menon if (!IS_ERR(info->chan_rx)) 3525aa276781SNishanth Menon mbox_free_channel(info->chan_rx); 3526aa276781SNishanth Menon debugfs_remove(info->d); 3527aa276781SNishanth Menon return ret; 3528aa276781SNishanth Menon } 3529aa276781SNishanth Menon 3530aa276781SNishanth Menon static int ti_sci_remove(struct platform_device *pdev) 3531aa276781SNishanth Menon { 3532aa276781SNishanth Menon struct ti_sci_info *info; 3533aa276781SNishanth Menon struct device *dev = &pdev->dev; 3534aa276781SNishanth Menon int ret = 0; 3535aa276781SNishanth Menon 3536aa276781SNishanth Menon of_platform_depopulate(dev); 3537aa276781SNishanth Menon 3538aa276781SNishanth Menon info = platform_get_drvdata(pdev); 3539aa276781SNishanth Menon 3540912cffb4SNishanth Menon if (info->nb.notifier_call) 3541912cffb4SNishanth Menon unregister_restart_handler(&info->nb); 3542912cffb4SNishanth Menon 3543aa276781SNishanth Menon mutex_lock(&ti_sci_list_mutex); 3544aa276781SNishanth Menon if (info->users) 3545aa276781SNishanth Menon ret = -EBUSY; 3546aa276781SNishanth Menon else 3547aa276781SNishanth Menon list_del(&info->node); 3548aa276781SNishanth Menon mutex_unlock(&ti_sci_list_mutex); 3549aa276781SNishanth Menon 3550aa276781SNishanth Menon if (!ret) { 3551aa276781SNishanth Menon ti_sci_debugfs_destroy(pdev, info); 3552aa276781SNishanth Menon 3553aa276781SNishanth Menon /* Safe to free channels since no more users */ 3554aa276781SNishanth Menon mbox_free_channel(info->chan_tx); 3555aa276781SNishanth Menon mbox_free_channel(info->chan_rx); 3556aa276781SNishanth Menon } 3557aa276781SNishanth Menon 3558aa276781SNishanth Menon return ret; 3559aa276781SNishanth Menon } 3560aa276781SNishanth Menon 3561aa276781SNishanth Menon static struct platform_driver ti_sci_driver = { 3562aa276781SNishanth Menon .probe = ti_sci_probe, 3563aa276781SNishanth Menon .remove = ti_sci_remove, 3564aa276781SNishanth Menon .driver = { 3565aa276781SNishanth Menon .name = "ti-sci", 3566aa276781SNishanth Menon .of_match_table = of_match_ptr(ti_sci_of_match), 3567aa276781SNishanth Menon }, 3568aa276781SNishanth Menon }; 3569aa276781SNishanth Menon module_platform_driver(ti_sci_driver); 3570aa276781SNishanth Menon 3571aa276781SNishanth Menon MODULE_LICENSE("GPL v2"); 3572aa276781SNishanth Menon MODULE_DESCRIPTION("TI System Control Interface(SCI) driver"); 3573aa276781SNishanth Menon MODULE_AUTHOR("Nishanth Menon"); 3574aa276781SNishanth Menon MODULE_ALIAS("platform:ti-sci"); 3575