1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Texas Instruments System Control Interface Protocol Driver 4 * 5 * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/ 6 * Nishanth Menon 7 */ 8 9 #define pr_fmt(fmt) "%s: " fmt, __func__ 10 11 #include <linux/bitmap.h> 12 #include <linux/debugfs.h> 13 #include <linux/export.h> 14 #include <linux/io.h> 15 #include <linux/kernel.h> 16 #include <linux/mailbox_client.h> 17 #include <linux/module.h> 18 #include <linux/of_device.h> 19 #include <linux/semaphore.h> 20 #include <linux/slab.h> 21 #include <linux/soc/ti/ti-msgmgr.h> 22 #include <linux/soc/ti/ti_sci_protocol.h> 23 #include <linux/reboot.h> 24 25 #include "ti_sci.h" 26 27 /* List of all TI SCI devices active in system */ 28 static LIST_HEAD(ti_sci_list); 29 /* Protection for the entire list */ 30 static DEFINE_MUTEX(ti_sci_list_mutex); 31 32 /** 33 * struct ti_sci_xfer - Structure representing a message flow 34 * @tx_message: Transmit message 35 * @rx_len: Receive message length 36 * @xfer_buf: Preallocated buffer to store receive message 37 * Since we work with request-ACK protocol, we can 38 * reuse the same buffer for the rx path as we 39 * use for the tx path. 40 * @done: completion event 41 */ 42 struct ti_sci_xfer { 43 struct ti_msgmgr_message tx_message; 44 u8 rx_len; 45 u8 *xfer_buf; 46 struct completion done; 47 }; 48 49 /** 50 * struct ti_sci_xfers_info - Structure to manage transfer information 51 * @sem_xfer_count: Counting Semaphore for managing max simultaneous 52 * Messages. 53 * @xfer_block: Preallocated Message array 54 * @xfer_alloc_table: Bitmap table for allocated messages. 55 * Index of this bitmap table is also used for message 56 * sequence identifier. 57 * @xfer_lock: Protection for message allocation 58 */ 59 struct ti_sci_xfers_info { 60 struct semaphore sem_xfer_count; 61 struct ti_sci_xfer *xfer_block; 62 unsigned long *xfer_alloc_table; 63 /* protect transfer allocation */ 64 spinlock_t xfer_lock; 65 }; 66 67 /** 68 * struct ti_sci_desc - Description of SoC integration 69 * @default_host_id: Host identifier representing the compute entity 70 * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds) 71 * @max_msgs: Maximum number of messages that can be pending 72 * simultaneously in the system 73 * @max_msg_size: Maximum size of data per message that can be handled. 74 */ 75 struct ti_sci_desc { 76 u8 default_host_id; 77 int max_rx_timeout_ms; 78 int max_msgs; 79 int max_msg_size; 80 }; 81 82 /** 83 * struct ti_sci_info - Structure representing a TI SCI instance 84 * @dev: Device pointer 85 * @desc: SoC description for this instance 86 * @nb: Reboot Notifier block 87 * @d: Debugfs file entry 88 * @debug_region: Memory region where the debug message are available 89 * @debug_region_size: Debug region size 90 * @debug_buffer: Buffer allocated to copy debug messages. 91 * @handle: Instance of TI SCI handle to send to clients. 92 * @cl: Mailbox Client 93 * @chan_tx: Transmit mailbox channel 94 * @chan_rx: Receive mailbox channel 95 * @minfo: Message info 96 * @node: list head 97 * @host_id: Host ID 98 * @users: Number of users of this instance 99 */ 100 struct ti_sci_info { 101 struct device *dev; 102 struct notifier_block nb; 103 const struct ti_sci_desc *desc; 104 struct dentry *d; 105 void __iomem *debug_region; 106 char *debug_buffer; 107 size_t debug_region_size; 108 struct ti_sci_handle handle; 109 struct mbox_client cl; 110 struct mbox_chan *chan_tx; 111 struct mbox_chan *chan_rx; 112 struct ti_sci_xfers_info minfo; 113 struct list_head node; 114 u8 host_id; 115 /* protected by ti_sci_list_mutex */ 116 int users; 117 118 }; 119 120 #define cl_to_ti_sci_info(c) container_of(c, struct ti_sci_info, cl) 121 #define handle_to_ti_sci_info(h) container_of(h, struct ti_sci_info, handle) 122 #define reboot_to_ti_sci_info(n) container_of(n, struct ti_sci_info, nb) 123 124 #ifdef CONFIG_DEBUG_FS 125 126 /** 127 * ti_sci_debug_show() - Helper to dump the debug log 128 * @s: sequence file pointer 129 * @unused: unused. 130 * 131 * Return: 0 132 */ 133 static int ti_sci_debug_show(struct seq_file *s, void *unused) 134 { 135 struct ti_sci_info *info = s->private; 136 137 memcpy_fromio(info->debug_buffer, info->debug_region, 138 info->debug_region_size); 139 /* 140 * We don't trust firmware to leave NULL terminated last byte (hence 141 * we have allocated 1 extra 0 byte). Since we cannot guarantee any 142 * specific data format for debug messages, We just present the data 143 * in the buffer as is - we expect the messages to be self explanatory. 144 */ 145 seq_puts(s, info->debug_buffer); 146 return 0; 147 } 148 149 /* Provide the log file operations interface*/ 150 DEFINE_SHOW_ATTRIBUTE(ti_sci_debug); 151 152 /** 153 * ti_sci_debugfs_create() - Create log debug file 154 * @pdev: platform device pointer 155 * @info: Pointer to SCI entity information 156 * 157 * Return: 0 if all went fine, else corresponding error. 158 */ 159 static int ti_sci_debugfs_create(struct platform_device *pdev, 160 struct ti_sci_info *info) 161 { 162 struct device *dev = &pdev->dev; 163 struct resource *res; 164 char debug_name[50] = "ti_sci_debug@"; 165 166 /* Debug region is optional */ 167 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 168 "debug_messages"); 169 info->debug_region = devm_ioremap_resource(dev, res); 170 if (IS_ERR(info->debug_region)) 171 return 0; 172 info->debug_region_size = resource_size(res); 173 174 info->debug_buffer = devm_kcalloc(dev, info->debug_region_size + 1, 175 sizeof(char), GFP_KERNEL); 176 if (!info->debug_buffer) 177 return -ENOMEM; 178 /* Setup NULL termination */ 179 info->debug_buffer[info->debug_region_size] = 0; 180 181 info->d = debugfs_create_file(strncat(debug_name, dev_name(dev), 182 sizeof(debug_name) - 183 sizeof("ti_sci_debug@")), 184 0444, NULL, info, &ti_sci_debug_fops); 185 if (IS_ERR(info->d)) 186 return PTR_ERR(info->d); 187 188 dev_dbg(dev, "Debug region => %p, size = %zu bytes, resource: %pr\n", 189 info->debug_region, info->debug_region_size, res); 190 return 0; 191 } 192 193 /** 194 * ti_sci_debugfs_destroy() - clean up log debug file 195 * @pdev: platform device pointer 196 * @info: Pointer to SCI entity information 197 */ 198 static void ti_sci_debugfs_destroy(struct platform_device *pdev, 199 struct ti_sci_info *info) 200 { 201 if (IS_ERR(info->debug_region)) 202 return; 203 204 debugfs_remove(info->d); 205 } 206 #else /* CONFIG_DEBUG_FS */ 207 static inline int ti_sci_debugfs_create(struct platform_device *dev, 208 struct ti_sci_info *info) 209 { 210 return 0; 211 } 212 213 static inline void ti_sci_debugfs_destroy(struct platform_device *dev, 214 struct ti_sci_info *info) 215 { 216 } 217 #endif /* CONFIG_DEBUG_FS */ 218 219 /** 220 * ti_sci_dump_header_dbg() - Helper to dump a message header. 221 * @dev: Device pointer corresponding to the SCI entity 222 * @hdr: pointer to header. 223 */ 224 static inline void ti_sci_dump_header_dbg(struct device *dev, 225 struct ti_sci_msg_hdr *hdr) 226 { 227 dev_dbg(dev, "MSGHDR:type=0x%04x host=0x%02x seq=0x%02x flags=0x%08x\n", 228 hdr->type, hdr->host, hdr->seq, hdr->flags); 229 } 230 231 /** 232 * ti_sci_rx_callback() - mailbox client callback for receive messages 233 * @cl: client pointer 234 * @m: mailbox message 235 * 236 * Processes one received message to appropriate transfer information and 237 * signals completion of the transfer. 238 * 239 * NOTE: This function will be invoked in IRQ context, hence should be 240 * as optimal as possible. 241 */ 242 static void ti_sci_rx_callback(struct mbox_client *cl, void *m) 243 { 244 struct ti_sci_info *info = cl_to_ti_sci_info(cl); 245 struct device *dev = info->dev; 246 struct ti_sci_xfers_info *minfo = &info->minfo; 247 struct ti_msgmgr_message *mbox_msg = m; 248 struct ti_sci_msg_hdr *hdr = (struct ti_sci_msg_hdr *)mbox_msg->buf; 249 struct ti_sci_xfer *xfer; 250 u8 xfer_id; 251 252 xfer_id = hdr->seq; 253 254 /* 255 * Are we even expecting this? 256 * NOTE: barriers were implicit in locks used for modifying the bitmap 257 */ 258 if (!test_bit(xfer_id, minfo->xfer_alloc_table)) { 259 dev_err(dev, "Message for %d is not expected!\n", xfer_id); 260 return; 261 } 262 263 xfer = &minfo->xfer_block[xfer_id]; 264 265 /* Is the message of valid length? */ 266 if (mbox_msg->len > info->desc->max_msg_size) { 267 dev_err(dev, "Unable to handle %zu xfer(max %d)\n", 268 mbox_msg->len, info->desc->max_msg_size); 269 ti_sci_dump_header_dbg(dev, hdr); 270 return; 271 } 272 if (mbox_msg->len < xfer->rx_len) { 273 dev_err(dev, "Recv xfer %zu < expected %d length\n", 274 mbox_msg->len, xfer->rx_len); 275 ti_sci_dump_header_dbg(dev, hdr); 276 return; 277 } 278 279 ti_sci_dump_header_dbg(dev, hdr); 280 /* Take a copy to the rx buffer.. */ 281 memcpy(xfer->xfer_buf, mbox_msg->buf, xfer->rx_len); 282 complete(&xfer->done); 283 } 284 285 /** 286 * ti_sci_get_one_xfer() - Allocate one message 287 * @info: Pointer to SCI entity information 288 * @msg_type: Message type 289 * @msg_flags: Flag to set for the message 290 * @tx_message_size: transmit message size 291 * @rx_message_size: receive message size 292 * 293 * Helper function which is used by various command functions that are 294 * exposed to clients of this driver for allocating a message traffic event. 295 * 296 * This function can sleep depending on pending requests already in the system 297 * for the SCI entity. Further, this also holds a spinlock to maintain integrity 298 * of internal data structures. 299 * 300 * Return: 0 if all went fine, else corresponding error. 301 */ 302 static struct ti_sci_xfer *ti_sci_get_one_xfer(struct ti_sci_info *info, 303 u16 msg_type, u32 msg_flags, 304 size_t tx_message_size, 305 size_t rx_message_size) 306 { 307 struct ti_sci_xfers_info *minfo = &info->minfo; 308 struct ti_sci_xfer *xfer; 309 struct ti_sci_msg_hdr *hdr; 310 unsigned long flags; 311 unsigned long bit_pos; 312 u8 xfer_id; 313 int ret; 314 int timeout; 315 316 /* Ensure we have sane transfer sizes */ 317 if (rx_message_size > info->desc->max_msg_size || 318 tx_message_size > info->desc->max_msg_size || 319 rx_message_size < sizeof(*hdr) || tx_message_size < sizeof(*hdr)) 320 return ERR_PTR(-ERANGE); 321 322 /* 323 * Ensure we have only controlled number of pending messages. 324 * Ideally, we might just have to wait a single message, be 325 * conservative and wait 5 times that.. 326 */ 327 timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms) * 5; 328 ret = down_timeout(&minfo->sem_xfer_count, timeout); 329 if (ret < 0) 330 return ERR_PTR(ret); 331 332 /* Keep the locked section as small as possible */ 333 spin_lock_irqsave(&minfo->xfer_lock, flags); 334 bit_pos = find_first_zero_bit(minfo->xfer_alloc_table, 335 info->desc->max_msgs); 336 set_bit(bit_pos, minfo->xfer_alloc_table); 337 spin_unlock_irqrestore(&minfo->xfer_lock, flags); 338 339 /* 340 * We already ensured in probe that we can have max messages that can 341 * fit in hdr.seq - NOTE: this improves access latencies 342 * to predictable O(1) access, BUT, it opens us to risk if 343 * remote misbehaves with corrupted message sequence responses. 344 * If that happens, we are going to be messed up anyways.. 345 */ 346 xfer_id = (u8)bit_pos; 347 348 xfer = &minfo->xfer_block[xfer_id]; 349 350 hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf; 351 xfer->tx_message.len = tx_message_size; 352 xfer->rx_len = (u8)rx_message_size; 353 354 reinit_completion(&xfer->done); 355 356 hdr->seq = xfer_id; 357 hdr->type = msg_type; 358 hdr->host = info->host_id; 359 hdr->flags = msg_flags; 360 361 return xfer; 362 } 363 364 /** 365 * ti_sci_put_one_xfer() - Release a message 366 * @minfo: transfer info pointer 367 * @xfer: message that was reserved by ti_sci_get_one_xfer 368 * 369 * This holds a spinlock to maintain integrity of internal data structures. 370 */ 371 static void ti_sci_put_one_xfer(struct ti_sci_xfers_info *minfo, 372 struct ti_sci_xfer *xfer) 373 { 374 unsigned long flags; 375 struct ti_sci_msg_hdr *hdr; 376 u8 xfer_id; 377 378 hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf; 379 xfer_id = hdr->seq; 380 381 /* 382 * Keep the locked section as small as possible 383 * NOTE: we might escape with smp_mb and no lock here.. 384 * but just be conservative and symmetric. 385 */ 386 spin_lock_irqsave(&minfo->xfer_lock, flags); 387 clear_bit(xfer_id, minfo->xfer_alloc_table); 388 spin_unlock_irqrestore(&minfo->xfer_lock, flags); 389 390 /* Increment the count for the next user to get through */ 391 up(&minfo->sem_xfer_count); 392 } 393 394 /** 395 * ti_sci_do_xfer() - Do one transfer 396 * @info: Pointer to SCI entity information 397 * @xfer: Transfer to initiate and wait for response 398 * 399 * Return: -ETIMEDOUT in case of no response, if transmit error, 400 * return corresponding error, else if all goes well, 401 * return 0. 402 */ 403 static inline int ti_sci_do_xfer(struct ti_sci_info *info, 404 struct ti_sci_xfer *xfer) 405 { 406 int ret; 407 int timeout; 408 struct device *dev = info->dev; 409 410 ret = mbox_send_message(info->chan_tx, &xfer->tx_message); 411 if (ret < 0) 412 return ret; 413 414 ret = 0; 415 416 /* And we wait for the response. */ 417 timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms); 418 if (!wait_for_completion_timeout(&xfer->done, timeout)) { 419 dev_err(dev, "Mbox timedout in resp(caller: %pS)\n", 420 (void *)_RET_IP_); 421 ret = -ETIMEDOUT; 422 } 423 /* 424 * NOTE: we might prefer not to need the mailbox ticker to manage the 425 * transfer queueing since the protocol layer queues things by itself. 426 * Unfortunately, we have to kick the mailbox framework after we have 427 * received our message. 428 */ 429 mbox_client_txdone(info->chan_tx, ret); 430 431 return ret; 432 } 433 434 /** 435 * ti_sci_cmd_get_revision() - command to get the revision of the SCI entity 436 * @info: Pointer to SCI entity information 437 * 438 * Updates the SCI information in the internal data structure. 439 * 440 * Return: 0 if all went fine, else return appropriate error. 441 */ 442 static int ti_sci_cmd_get_revision(struct ti_sci_info *info) 443 { 444 struct device *dev = info->dev; 445 struct ti_sci_handle *handle = &info->handle; 446 struct ti_sci_version_info *ver = &handle->version; 447 struct ti_sci_msg_resp_version *rev_info; 448 struct ti_sci_xfer *xfer; 449 int ret; 450 451 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_VERSION, 452 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 453 sizeof(struct ti_sci_msg_hdr), 454 sizeof(*rev_info)); 455 if (IS_ERR(xfer)) { 456 ret = PTR_ERR(xfer); 457 dev_err(dev, "Message alloc failed(%d)\n", ret); 458 return ret; 459 } 460 461 rev_info = (struct ti_sci_msg_resp_version *)xfer->xfer_buf; 462 463 ret = ti_sci_do_xfer(info, xfer); 464 if (ret) { 465 dev_err(dev, "Mbox send fail %d\n", ret); 466 goto fail; 467 } 468 469 ver->abi_major = rev_info->abi_major; 470 ver->abi_minor = rev_info->abi_minor; 471 ver->firmware_revision = rev_info->firmware_revision; 472 strncpy(ver->firmware_description, rev_info->firmware_description, 473 sizeof(ver->firmware_description)); 474 475 fail: 476 ti_sci_put_one_xfer(&info->minfo, xfer); 477 return ret; 478 } 479 480 /** 481 * ti_sci_is_response_ack() - Generic ACK/NACK message checkup 482 * @r: pointer to response buffer 483 * 484 * Return: true if the response was an ACK, else returns false. 485 */ 486 static inline bool ti_sci_is_response_ack(void *r) 487 { 488 struct ti_sci_msg_hdr *hdr = r; 489 490 return hdr->flags & TI_SCI_FLAG_RESP_GENERIC_ACK ? true : false; 491 } 492 493 /** 494 * ti_sci_set_device_state() - Set device state helper 495 * @handle: pointer to TI SCI handle 496 * @id: Device identifier 497 * @flags: flags to setup for the device 498 * @state: State to move the device to 499 * 500 * Return: 0 if all went well, else returns appropriate error value. 501 */ 502 static int ti_sci_set_device_state(const struct ti_sci_handle *handle, 503 u32 id, u32 flags, u8 state) 504 { 505 struct ti_sci_info *info; 506 struct ti_sci_msg_req_set_device_state *req; 507 struct ti_sci_msg_hdr *resp; 508 struct ti_sci_xfer *xfer; 509 struct device *dev; 510 int ret = 0; 511 512 if (IS_ERR(handle)) 513 return PTR_ERR(handle); 514 if (!handle) 515 return -EINVAL; 516 517 info = handle_to_ti_sci_info(handle); 518 dev = info->dev; 519 520 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE, 521 flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 522 sizeof(*req), sizeof(*resp)); 523 if (IS_ERR(xfer)) { 524 ret = PTR_ERR(xfer); 525 dev_err(dev, "Message alloc failed(%d)\n", ret); 526 return ret; 527 } 528 req = (struct ti_sci_msg_req_set_device_state *)xfer->xfer_buf; 529 req->id = id; 530 req->state = state; 531 532 ret = ti_sci_do_xfer(info, xfer); 533 if (ret) { 534 dev_err(dev, "Mbox send fail %d\n", ret); 535 goto fail; 536 } 537 538 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 539 540 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; 541 542 fail: 543 ti_sci_put_one_xfer(&info->minfo, xfer); 544 545 return ret; 546 } 547 548 /** 549 * ti_sci_get_device_state() - Get device state helper 550 * @handle: Handle to the device 551 * @id: Device Identifier 552 * @clcnt: Pointer to Context Loss Count 553 * @resets: pointer to resets 554 * @p_state: pointer to p_state 555 * @c_state: pointer to c_state 556 * 557 * Return: 0 if all went fine, else return appropriate error. 558 */ 559 static int ti_sci_get_device_state(const struct ti_sci_handle *handle, 560 u32 id, u32 *clcnt, u32 *resets, 561 u8 *p_state, u8 *c_state) 562 { 563 struct ti_sci_info *info; 564 struct ti_sci_msg_req_get_device_state *req; 565 struct ti_sci_msg_resp_get_device_state *resp; 566 struct ti_sci_xfer *xfer; 567 struct device *dev; 568 int ret = 0; 569 570 if (IS_ERR(handle)) 571 return PTR_ERR(handle); 572 if (!handle) 573 return -EINVAL; 574 575 if (!clcnt && !resets && !p_state && !c_state) 576 return -EINVAL; 577 578 info = handle_to_ti_sci_info(handle); 579 dev = info->dev; 580 581 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE, 582 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 583 sizeof(*req), sizeof(*resp)); 584 if (IS_ERR(xfer)) { 585 ret = PTR_ERR(xfer); 586 dev_err(dev, "Message alloc failed(%d)\n", ret); 587 return ret; 588 } 589 req = (struct ti_sci_msg_req_get_device_state *)xfer->xfer_buf; 590 req->id = id; 591 592 ret = ti_sci_do_xfer(info, xfer); 593 if (ret) { 594 dev_err(dev, "Mbox send fail %d\n", ret); 595 goto fail; 596 } 597 598 resp = (struct ti_sci_msg_resp_get_device_state *)xfer->xfer_buf; 599 if (!ti_sci_is_response_ack(resp)) { 600 ret = -ENODEV; 601 goto fail; 602 } 603 604 if (clcnt) 605 *clcnt = resp->context_loss_count; 606 if (resets) 607 *resets = resp->resets; 608 if (p_state) 609 *p_state = resp->programmed_state; 610 if (c_state) 611 *c_state = resp->current_state; 612 fail: 613 ti_sci_put_one_xfer(&info->minfo, xfer); 614 615 return ret; 616 } 617 618 /** 619 * ti_sci_cmd_get_device() - command to request for device managed by TISCI 620 * that can be shared with other hosts. 621 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle 622 * @id: Device Identifier 623 * 624 * Request for the device - NOTE: the client MUST maintain integrity of 625 * usage count by balancing get_device with put_device. No refcounting is 626 * managed by driver for that purpose. 627 * 628 * Return: 0 if all went fine, else return appropriate error. 629 */ 630 static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id) 631 { 632 return ti_sci_set_device_state(handle, id, 0, 633 MSG_DEVICE_SW_STATE_ON); 634 } 635 636 /** 637 * ti_sci_cmd_get_device_exclusive() - command to request for device managed by 638 * TISCI that is exclusively owned by the 639 * requesting host. 640 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle 641 * @id: Device Identifier 642 * 643 * Request for the device - NOTE: the client MUST maintain integrity of 644 * usage count by balancing get_device with put_device. No refcounting is 645 * managed by driver for that purpose. 646 * 647 * Return: 0 if all went fine, else return appropriate error. 648 */ 649 static int ti_sci_cmd_get_device_exclusive(const struct ti_sci_handle *handle, 650 u32 id) 651 { 652 return ti_sci_set_device_state(handle, id, 653 MSG_FLAG_DEVICE_EXCLUSIVE, 654 MSG_DEVICE_SW_STATE_ON); 655 } 656 657 /** 658 * ti_sci_cmd_idle_device() - Command to idle a device managed by TISCI 659 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle 660 * @id: Device Identifier 661 * 662 * Request for the device - NOTE: the client MUST maintain integrity of 663 * usage count by balancing get_device with put_device. No refcounting is 664 * managed by driver for that purpose. 665 * 666 * Return: 0 if all went fine, else return appropriate error. 667 */ 668 static int ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id) 669 { 670 return ti_sci_set_device_state(handle, id, 0, 671 MSG_DEVICE_SW_STATE_RETENTION); 672 } 673 674 /** 675 * ti_sci_cmd_idle_device_exclusive() - Command to idle a device managed by 676 * TISCI that is exclusively owned by 677 * requesting host. 678 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle 679 * @id: Device Identifier 680 * 681 * Request for the device - NOTE: the client MUST maintain integrity of 682 * usage count by balancing get_device with put_device. No refcounting is 683 * managed by driver for that purpose. 684 * 685 * Return: 0 if all went fine, else return appropriate error. 686 */ 687 static int ti_sci_cmd_idle_device_exclusive(const struct ti_sci_handle *handle, 688 u32 id) 689 { 690 return ti_sci_set_device_state(handle, id, 691 MSG_FLAG_DEVICE_EXCLUSIVE, 692 MSG_DEVICE_SW_STATE_RETENTION); 693 } 694 695 /** 696 * ti_sci_cmd_put_device() - command to release a device managed by TISCI 697 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle 698 * @id: Device Identifier 699 * 700 * Request for the device - NOTE: the client MUST maintain integrity of 701 * usage count by balancing get_device with put_device. No refcounting is 702 * managed by driver for that purpose. 703 * 704 * Return: 0 if all went fine, else return appropriate error. 705 */ 706 static int ti_sci_cmd_put_device(const struct ti_sci_handle *handle, u32 id) 707 { 708 return ti_sci_set_device_state(handle, id, 709 0, MSG_DEVICE_SW_STATE_AUTO_OFF); 710 } 711 712 /** 713 * ti_sci_cmd_dev_is_valid() - Is the device valid 714 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle 715 * @id: Device Identifier 716 * 717 * Return: 0 if all went fine and the device ID is valid, else return 718 * appropriate error. 719 */ 720 static int ti_sci_cmd_dev_is_valid(const struct ti_sci_handle *handle, u32 id) 721 { 722 u8 unused; 723 724 /* check the device state which will also tell us if the ID is valid */ 725 return ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &unused); 726 } 727 728 /** 729 * ti_sci_cmd_dev_get_clcnt() - Get context loss counter 730 * @handle: Pointer to TISCI handle 731 * @id: Device Identifier 732 * @count: Pointer to Context Loss counter to populate 733 * 734 * Return: 0 if all went fine, else return appropriate error. 735 */ 736 static int ti_sci_cmd_dev_get_clcnt(const struct ti_sci_handle *handle, u32 id, 737 u32 *count) 738 { 739 return ti_sci_get_device_state(handle, id, count, NULL, NULL, NULL); 740 } 741 742 /** 743 * ti_sci_cmd_dev_is_idle() - Check if the device is requested to be idle 744 * @handle: Pointer to TISCI handle 745 * @id: Device Identifier 746 * @r_state: true if requested to be idle 747 * 748 * Return: 0 if all went fine, else return appropriate error. 749 */ 750 static int ti_sci_cmd_dev_is_idle(const struct ti_sci_handle *handle, u32 id, 751 bool *r_state) 752 { 753 int ret; 754 u8 state; 755 756 if (!r_state) 757 return -EINVAL; 758 759 ret = ti_sci_get_device_state(handle, id, NULL, NULL, &state, NULL); 760 if (ret) 761 return ret; 762 763 *r_state = (state == MSG_DEVICE_SW_STATE_RETENTION); 764 765 return 0; 766 } 767 768 /** 769 * ti_sci_cmd_dev_is_stop() - Check if the device is requested to be stopped 770 * @handle: Pointer to TISCI handle 771 * @id: Device Identifier 772 * @r_state: true if requested to be stopped 773 * @curr_state: true if currently stopped. 774 * 775 * Return: 0 if all went fine, else return appropriate error. 776 */ 777 static int ti_sci_cmd_dev_is_stop(const struct ti_sci_handle *handle, u32 id, 778 bool *r_state, bool *curr_state) 779 { 780 int ret; 781 u8 p_state, c_state; 782 783 if (!r_state && !curr_state) 784 return -EINVAL; 785 786 ret = 787 ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state); 788 if (ret) 789 return ret; 790 791 if (r_state) 792 *r_state = (p_state == MSG_DEVICE_SW_STATE_AUTO_OFF); 793 if (curr_state) 794 *curr_state = (c_state == MSG_DEVICE_HW_STATE_OFF); 795 796 return 0; 797 } 798 799 /** 800 * ti_sci_cmd_dev_is_on() - Check if the device is requested to be ON 801 * @handle: Pointer to TISCI handle 802 * @id: Device Identifier 803 * @r_state: true if requested to be ON 804 * @curr_state: true if currently ON and active 805 * 806 * Return: 0 if all went fine, else return appropriate error. 807 */ 808 static int ti_sci_cmd_dev_is_on(const struct ti_sci_handle *handle, u32 id, 809 bool *r_state, bool *curr_state) 810 { 811 int ret; 812 u8 p_state, c_state; 813 814 if (!r_state && !curr_state) 815 return -EINVAL; 816 817 ret = 818 ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state); 819 if (ret) 820 return ret; 821 822 if (r_state) 823 *r_state = (p_state == MSG_DEVICE_SW_STATE_ON); 824 if (curr_state) 825 *curr_state = (c_state == MSG_DEVICE_HW_STATE_ON); 826 827 return 0; 828 } 829 830 /** 831 * ti_sci_cmd_dev_is_trans() - Check if the device is currently transitioning 832 * @handle: Pointer to TISCI handle 833 * @id: Device Identifier 834 * @curr_state: true if currently transitioning. 835 * 836 * Return: 0 if all went fine, else return appropriate error. 837 */ 838 static int ti_sci_cmd_dev_is_trans(const struct ti_sci_handle *handle, u32 id, 839 bool *curr_state) 840 { 841 int ret; 842 u8 state; 843 844 if (!curr_state) 845 return -EINVAL; 846 847 ret = ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &state); 848 if (ret) 849 return ret; 850 851 *curr_state = (state == MSG_DEVICE_HW_STATE_TRANS); 852 853 return 0; 854 } 855 856 /** 857 * ti_sci_cmd_set_device_resets() - command to set resets for device managed 858 * by TISCI 859 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle 860 * @id: Device Identifier 861 * @reset_state: Device specific reset bit field 862 * 863 * Return: 0 if all went fine, else return appropriate error. 864 */ 865 static int ti_sci_cmd_set_device_resets(const struct ti_sci_handle *handle, 866 u32 id, u32 reset_state) 867 { 868 struct ti_sci_info *info; 869 struct ti_sci_msg_req_set_device_resets *req; 870 struct ti_sci_msg_hdr *resp; 871 struct ti_sci_xfer *xfer; 872 struct device *dev; 873 int ret = 0; 874 875 if (IS_ERR(handle)) 876 return PTR_ERR(handle); 877 if (!handle) 878 return -EINVAL; 879 880 info = handle_to_ti_sci_info(handle); 881 dev = info->dev; 882 883 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_RESETS, 884 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 885 sizeof(*req), sizeof(*resp)); 886 if (IS_ERR(xfer)) { 887 ret = PTR_ERR(xfer); 888 dev_err(dev, "Message alloc failed(%d)\n", ret); 889 return ret; 890 } 891 req = (struct ti_sci_msg_req_set_device_resets *)xfer->xfer_buf; 892 req->id = id; 893 req->resets = reset_state; 894 895 ret = ti_sci_do_xfer(info, xfer); 896 if (ret) { 897 dev_err(dev, "Mbox send fail %d\n", ret); 898 goto fail; 899 } 900 901 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 902 903 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; 904 905 fail: 906 ti_sci_put_one_xfer(&info->minfo, xfer); 907 908 return ret; 909 } 910 911 /** 912 * ti_sci_cmd_get_device_resets() - Get reset state for device managed 913 * by TISCI 914 * @handle: Pointer to TISCI handle 915 * @id: Device Identifier 916 * @reset_state: Pointer to reset state to populate 917 * 918 * Return: 0 if all went fine, else return appropriate error. 919 */ 920 static int ti_sci_cmd_get_device_resets(const struct ti_sci_handle *handle, 921 u32 id, u32 *reset_state) 922 { 923 return ti_sci_get_device_state(handle, id, NULL, reset_state, NULL, 924 NULL); 925 } 926 927 /** 928 * ti_sci_set_clock_state() - Set clock state helper 929 * @handle: pointer to TI SCI handle 930 * @dev_id: Device identifier this request is for 931 * @clk_id: Clock identifier for the device for this request. 932 * Each device has it's own set of clock inputs. This indexes 933 * which clock input to modify. 934 * @flags: Header flags as needed 935 * @state: State to request for the clock. 936 * 937 * Return: 0 if all went well, else returns appropriate error value. 938 */ 939 static int ti_sci_set_clock_state(const struct ti_sci_handle *handle, 940 u32 dev_id, u32 clk_id, 941 u32 flags, u8 state) 942 { 943 struct ti_sci_info *info; 944 struct ti_sci_msg_req_set_clock_state *req; 945 struct ti_sci_msg_hdr *resp; 946 struct ti_sci_xfer *xfer; 947 struct device *dev; 948 int ret = 0; 949 950 if (IS_ERR(handle)) 951 return PTR_ERR(handle); 952 if (!handle) 953 return -EINVAL; 954 955 info = handle_to_ti_sci_info(handle); 956 dev = info->dev; 957 958 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_STATE, 959 flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 960 sizeof(*req), sizeof(*resp)); 961 if (IS_ERR(xfer)) { 962 ret = PTR_ERR(xfer); 963 dev_err(dev, "Message alloc failed(%d)\n", ret); 964 return ret; 965 } 966 req = (struct ti_sci_msg_req_set_clock_state *)xfer->xfer_buf; 967 req->dev_id = dev_id; 968 if (clk_id < 255) { 969 req->clk_id = clk_id; 970 } else { 971 req->clk_id = 255; 972 req->clk_id_32 = clk_id; 973 } 974 req->request_state = state; 975 976 ret = ti_sci_do_xfer(info, xfer); 977 if (ret) { 978 dev_err(dev, "Mbox send fail %d\n", ret); 979 goto fail; 980 } 981 982 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 983 984 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; 985 986 fail: 987 ti_sci_put_one_xfer(&info->minfo, xfer); 988 989 return ret; 990 } 991 992 /** 993 * ti_sci_cmd_get_clock_state() - Get clock state helper 994 * @handle: pointer to TI SCI handle 995 * @dev_id: Device identifier this request is for 996 * @clk_id: Clock identifier for the device for this request. 997 * Each device has it's own set of clock inputs. This indexes 998 * which clock input to modify. 999 * @programmed_state: State requested for clock to move to 1000 * @current_state: State that the clock is currently in 1001 * 1002 * Return: 0 if all went well, else returns appropriate error value. 1003 */ 1004 static int ti_sci_cmd_get_clock_state(const struct ti_sci_handle *handle, 1005 u32 dev_id, u32 clk_id, 1006 u8 *programmed_state, u8 *current_state) 1007 { 1008 struct ti_sci_info *info; 1009 struct ti_sci_msg_req_get_clock_state *req; 1010 struct ti_sci_msg_resp_get_clock_state *resp; 1011 struct ti_sci_xfer *xfer; 1012 struct device *dev; 1013 int ret = 0; 1014 1015 if (IS_ERR(handle)) 1016 return PTR_ERR(handle); 1017 if (!handle) 1018 return -EINVAL; 1019 1020 if (!programmed_state && !current_state) 1021 return -EINVAL; 1022 1023 info = handle_to_ti_sci_info(handle); 1024 dev = info->dev; 1025 1026 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_STATE, 1027 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 1028 sizeof(*req), sizeof(*resp)); 1029 if (IS_ERR(xfer)) { 1030 ret = PTR_ERR(xfer); 1031 dev_err(dev, "Message alloc failed(%d)\n", ret); 1032 return ret; 1033 } 1034 req = (struct ti_sci_msg_req_get_clock_state *)xfer->xfer_buf; 1035 req->dev_id = dev_id; 1036 if (clk_id < 255) { 1037 req->clk_id = clk_id; 1038 } else { 1039 req->clk_id = 255; 1040 req->clk_id_32 = clk_id; 1041 } 1042 1043 ret = ti_sci_do_xfer(info, xfer); 1044 if (ret) { 1045 dev_err(dev, "Mbox send fail %d\n", ret); 1046 goto fail; 1047 } 1048 1049 resp = (struct ti_sci_msg_resp_get_clock_state *)xfer->xfer_buf; 1050 1051 if (!ti_sci_is_response_ack(resp)) { 1052 ret = -ENODEV; 1053 goto fail; 1054 } 1055 1056 if (programmed_state) 1057 *programmed_state = resp->programmed_state; 1058 if (current_state) 1059 *current_state = resp->current_state; 1060 1061 fail: 1062 ti_sci_put_one_xfer(&info->minfo, xfer); 1063 1064 return ret; 1065 } 1066 1067 /** 1068 * ti_sci_cmd_get_clock() - Get control of a clock from TI SCI 1069 * @handle: pointer to TI SCI handle 1070 * @dev_id: Device identifier this request is for 1071 * @clk_id: Clock identifier for the device for this request. 1072 * Each device has it's own set of clock inputs. This indexes 1073 * which clock input to modify. 1074 * @needs_ssc: 'true' if Spread Spectrum clock is desired, else 'false' 1075 * @can_change_freq: 'true' if frequency change is desired, else 'false' 1076 * @enable_input_term: 'true' if input termination is desired, else 'false' 1077 * 1078 * Return: 0 if all went well, else returns appropriate error value. 1079 */ 1080 static int ti_sci_cmd_get_clock(const struct ti_sci_handle *handle, u32 dev_id, 1081 u32 clk_id, bool needs_ssc, 1082 bool can_change_freq, bool enable_input_term) 1083 { 1084 u32 flags = 0; 1085 1086 flags |= needs_ssc ? MSG_FLAG_CLOCK_ALLOW_SSC : 0; 1087 flags |= can_change_freq ? MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE : 0; 1088 flags |= enable_input_term ? MSG_FLAG_CLOCK_INPUT_TERM : 0; 1089 1090 return ti_sci_set_clock_state(handle, dev_id, clk_id, flags, 1091 MSG_CLOCK_SW_STATE_REQ); 1092 } 1093 1094 /** 1095 * ti_sci_cmd_idle_clock() - Idle a clock which is in our control 1096 * @handle: pointer to TI SCI handle 1097 * @dev_id: Device identifier this request is for 1098 * @clk_id: Clock identifier for the device for this request. 1099 * Each device has it's own set of clock inputs. This indexes 1100 * which clock input to modify. 1101 * 1102 * NOTE: This clock must have been requested by get_clock previously. 1103 * 1104 * Return: 0 if all went well, else returns appropriate error value. 1105 */ 1106 static int ti_sci_cmd_idle_clock(const struct ti_sci_handle *handle, 1107 u32 dev_id, u32 clk_id) 1108 { 1109 return ti_sci_set_clock_state(handle, dev_id, clk_id, 1110 MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE, 1111 MSG_CLOCK_SW_STATE_UNREQ); 1112 } 1113 1114 /** 1115 * ti_sci_cmd_put_clock() - Release a clock from our control back to TISCI 1116 * @handle: pointer to TI SCI handle 1117 * @dev_id: Device identifier this request is for 1118 * @clk_id: Clock identifier for the device for this request. 1119 * Each device has it's own set of clock inputs. This indexes 1120 * which clock input to modify. 1121 * 1122 * NOTE: This clock must have been requested by get_clock previously. 1123 * 1124 * Return: 0 if all went well, else returns appropriate error value. 1125 */ 1126 static int ti_sci_cmd_put_clock(const struct ti_sci_handle *handle, 1127 u32 dev_id, u32 clk_id) 1128 { 1129 return ti_sci_set_clock_state(handle, dev_id, clk_id, 1130 MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE, 1131 MSG_CLOCK_SW_STATE_AUTO); 1132 } 1133 1134 /** 1135 * ti_sci_cmd_clk_is_auto() - Is the clock being auto managed 1136 * @handle: pointer to TI SCI handle 1137 * @dev_id: Device identifier this request is for 1138 * @clk_id: Clock identifier for the device for this request. 1139 * Each device has it's own set of clock inputs. This indexes 1140 * which clock input to modify. 1141 * @req_state: state indicating if the clock is auto managed 1142 * 1143 * Return: 0 if all went well, else returns appropriate error value. 1144 */ 1145 static int ti_sci_cmd_clk_is_auto(const struct ti_sci_handle *handle, 1146 u32 dev_id, u32 clk_id, bool *req_state) 1147 { 1148 u8 state = 0; 1149 int ret; 1150 1151 if (!req_state) 1152 return -EINVAL; 1153 1154 ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, &state, NULL); 1155 if (ret) 1156 return ret; 1157 1158 *req_state = (state == MSG_CLOCK_SW_STATE_AUTO); 1159 return 0; 1160 } 1161 1162 /** 1163 * ti_sci_cmd_clk_is_on() - Is the clock ON 1164 * @handle: pointer to TI SCI handle 1165 * @dev_id: Device identifier this request is for 1166 * @clk_id: Clock identifier for the device for this request. 1167 * Each device has it's own set of clock inputs. This indexes 1168 * which clock input to modify. 1169 * @req_state: state indicating if the clock is managed by us and enabled 1170 * @curr_state: state indicating if the clock is ready for operation 1171 * 1172 * Return: 0 if all went well, else returns appropriate error value. 1173 */ 1174 static int ti_sci_cmd_clk_is_on(const struct ti_sci_handle *handle, u32 dev_id, 1175 u32 clk_id, bool *req_state, bool *curr_state) 1176 { 1177 u8 c_state = 0, r_state = 0; 1178 int ret; 1179 1180 if (!req_state && !curr_state) 1181 return -EINVAL; 1182 1183 ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, 1184 &r_state, &c_state); 1185 if (ret) 1186 return ret; 1187 1188 if (req_state) 1189 *req_state = (r_state == MSG_CLOCK_SW_STATE_REQ); 1190 if (curr_state) 1191 *curr_state = (c_state == MSG_CLOCK_HW_STATE_READY); 1192 return 0; 1193 } 1194 1195 /** 1196 * ti_sci_cmd_clk_is_off() - Is the clock OFF 1197 * @handle: pointer to TI SCI handle 1198 * @dev_id: Device identifier this request is for 1199 * @clk_id: Clock identifier for the device for this request. 1200 * Each device has it's own set of clock inputs. This indexes 1201 * which clock input to modify. 1202 * @req_state: state indicating if the clock is managed by us and disabled 1203 * @curr_state: state indicating if the clock is NOT ready for operation 1204 * 1205 * Return: 0 if all went well, else returns appropriate error value. 1206 */ 1207 static int ti_sci_cmd_clk_is_off(const struct ti_sci_handle *handle, u32 dev_id, 1208 u32 clk_id, bool *req_state, bool *curr_state) 1209 { 1210 u8 c_state = 0, r_state = 0; 1211 int ret; 1212 1213 if (!req_state && !curr_state) 1214 return -EINVAL; 1215 1216 ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, 1217 &r_state, &c_state); 1218 if (ret) 1219 return ret; 1220 1221 if (req_state) 1222 *req_state = (r_state == MSG_CLOCK_SW_STATE_UNREQ); 1223 if (curr_state) 1224 *curr_state = (c_state == MSG_CLOCK_HW_STATE_NOT_READY); 1225 return 0; 1226 } 1227 1228 /** 1229 * ti_sci_cmd_clk_set_parent() - Set the clock source of a specific device clock 1230 * @handle: pointer to TI SCI handle 1231 * @dev_id: Device identifier this request is for 1232 * @clk_id: Clock identifier for the device for this request. 1233 * Each device has it's own set of clock inputs. This indexes 1234 * which clock input to modify. 1235 * @parent_id: Parent clock identifier to set 1236 * 1237 * Return: 0 if all went well, else returns appropriate error value. 1238 */ 1239 static int ti_sci_cmd_clk_set_parent(const struct ti_sci_handle *handle, 1240 u32 dev_id, u32 clk_id, u32 parent_id) 1241 { 1242 struct ti_sci_info *info; 1243 struct ti_sci_msg_req_set_clock_parent *req; 1244 struct ti_sci_msg_hdr *resp; 1245 struct ti_sci_xfer *xfer; 1246 struct device *dev; 1247 int ret = 0; 1248 1249 if (IS_ERR(handle)) 1250 return PTR_ERR(handle); 1251 if (!handle) 1252 return -EINVAL; 1253 1254 info = handle_to_ti_sci_info(handle); 1255 dev = info->dev; 1256 1257 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_PARENT, 1258 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 1259 sizeof(*req), sizeof(*resp)); 1260 if (IS_ERR(xfer)) { 1261 ret = PTR_ERR(xfer); 1262 dev_err(dev, "Message alloc failed(%d)\n", ret); 1263 return ret; 1264 } 1265 req = (struct ti_sci_msg_req_set_clock_parent *)xfer->xfer_buf; 1266 req->dev_id = dev_id; 1267 if (clk_id < 255) { 1268 req->clk_id = clk_id; 1269 } else { 1270 req->clk_id = 255; 1271 req->clk_id_32 = clk_id; 1272 } 1273 if (parent_id < 255) { 1274 req->parent_id = parent_id; 1275 } else { 1276 req->parent_id = 255; 1277 req->parent_id_32 = parent_id; 1278 } 1279 1280 ret = ti_sci_do_xfer(info, xfer); 1281 if (ret) { 1282 dev_err(dev, "Mbox send fail %d\n", ret); 1283 goto fail; 1284 } 1285 1286 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 1287 1288 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; 1289 1290 fail: 1291 ti_sci_put_one_xfer(&info->minfo, xfer); 1292 1293 return ret; 1294 } 1295 1296 /** 1297 * ti_sci_cmd_clk_get_parent() - Get current parent clock source 1298 * @handle: pointer to TI SCI handle 1299 * @dev_id: Device identifier this request is for 1300 * @clk_id: Clock identifier for the device for this request. 1301 * Each device has it's own set of clock inputs. This indexes 1302 * which clock input to modify. 1303 * @parent_id: Current clock parent 1304 * 1305 * Return: 0 if all went well, else returns appropriate error value. 1306 */ 1307 static int ti_sci_cmd_clk_get_parent(const struct ti_sci_handle *handle, 1308 u32 dev_id, u32 clk_id, u32 *parent_id) 1309 { 1310 struct ti_sci_info *info; 1311 struct ti_sci_msg_req_get_clock_parent *req; 1312 struct ti_sci_msg_resp_get_clock_parent *resp; 1313 struct ti_sci_xfer *xfer; 1314 struct device *dev; 1315 int ret = 0; 1316 1317 if (IS_ERR(handle)) 1318 return PTR_ERR(handle); 1319 if (!handle || !parent_id) 1320 return -EINVAL; 1321 1322 info = handle_to_ti_sci_info(handle); 1323 dev = info->dev; 1324 1325 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_PARENT, 1326 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 1327 sizeof(*req), sizeof(*resp)); 1328 if (IS_ERR(xfer)) { 1329 ret = PTR_ERR(xfer); 1330 dev_err(dev, "Message alloc failed(%d)\n", ret); 1331 return ret; 1332 } 1333 req = (struct ti_sci_msg_req_get_clock_parent *)xfer->xfer_buf; 1334 req->dev_id = dev_id; 1335 if (clk_id < 255) { 1336 req->clk_id = clk_id; 1337 } else { 1338 req->clk_id = 255; 1339 req->clk_id_32 = clk_id; 1340 } 1341 1342 ret = ti_sci_do_xfer(info, xfer); 1343 if (ret) { 1344 dev_err(dev, "Mbox send fail %d\n", ret); 1345 goto fail; 1346 } 1347 1348 resp = (struct ti_sci_msg_resp_get_clock_parent *)xfer->xfer_buf; 1349 1350 if (!ti_sci_is_response_ack(resp)) { 1351 ret = -ENODEV; 1352 } else { 1353 if (resp->parent_id < 255) 1354 *parent_id = resp->parent_id; 1355 else 1356 *parent_id = resp->parent_id_32; 1357 } 1358 1359 fail: 1360 ti_sci_put_one_xfer(&info->minfo, xfer); 1361 1362 return ret; 1363 } 1364 1365 /** 1366 * ti_sci_cmd_clk_get_num_parents() - Get num parents of the current clk source 1367 * @handle: pointer to TI SCI handle 1368 * @dev_id: Device identifier this request is for 1369 * @clk_id: Clock identifier for the device for this request. 1370 * Each device has it's own set of clock inputs. This indexes 1371 * which clock input to modify. 1372 * @num_parents: Returns he number of parents to the current clock. 1373 * 1374 * Return: 0 if all went well, else returns appropriate error value. 1375 */ 1376 static int ti_sci_cmd_clk_get_num_parents(const struct ti_sci_handle *handle, 1377 u32 dev_id, u32 clk_id, 1378 u32 *num_parents) 1379 { 1380 struct ti_sci_info *info; 1381 struct ti_sci_msg_req_get_clock_num_parents *req; 1382 struct ti_sci_msg_resp_get_clock_num_parents *resp; 1383 struct ti_sci_xfer *xfer; 1384 struct device *dev; 1385 int ret = 0; 1386 1387 if (IS_ERR(handle)) 1388 return PTR_ERR(handle); 1389 if (!handle || !num_parents) 1390 return -EINVAL; 1391 1392 info = handle_to_ti_sci_info(handle); 1393 dev = info->dev; 1394 1395 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_NUM_CLOCK_PARENTS, 1396 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 1397 sizeof(*req), sizeof(*resp)); 1398 if (IS_ERR(xfer)) { 1399 ret = PTR_ERR(xfer); 1400 dev_err(dev, "Message alloc failed(%d)\n", ret); 1401 return ret; 1402 } 1403 req = (struct ti_sci_msg_req_get_clock_num_parents *)xfer->xfer_buf; 1404 req->dev_id = dev_id; 1405 if (clk_id < 255) { 1406 req->clk_id = clk_id; 1407 } else { 1408 req->clk_id = 255; 1409 req->clk_id_32 = clk_id; 1410 } 1411 1412 ret = ti_sci_do_xfer(info, xfer); 1413 if (ret) { 1414 dev_err(dev, "Mbox send fail %d\n", ret); 1415 goto fail; 1416 } 1417 1418 resp = (struct ti_sci_msg_resp_get_clock_num_parents *)xfer->xfer_buf; 1419 1420 if (!ti_sci_is_response_ack(resp)) { 1421 ret = -ENODEV; 1422 } else { 1423 if (resp->num_parents < 255) 1424 *num_parents = resp->num_parents; 1425 else 1426 *num_parents = resp->num_parents_32; 1427 } 1428 1429 fail: 1430 ti_sci_put_one_xfer(&info->minfo, xfer); 1431 1432 return ret; 1433 } 1434 1435 /** 1436 * ti_sci_cmd_clk_get_match_freq() - Find a good match for frequency 1437 * @handle: pointer to TI SCI handle 1438 * @dev_id: Device identifier this request is for 1439 * @clk_id: Clock identifier for the device for this request. 1440 * Each device has it's own set of clock inputs. This indexes 1441 * which clock input to modify. 1442 * @min_freq: The minimum allowable frequency in Hz. This is the minimum 1443 * allowable programmed frequency and does not account for clock 1444 * tolerances and jitter. 1445 * @target_freq: The target clock frequency in Hz. A frequency will be 1446 * processed as close to this target frequency as possible. 1447 * @max_freq: The maximum allowable frequency in Hz. This is the maximum 1448 * allowable programmed frequency and does not account for clock 1449 * tolerances and jitter. 1450 * @match_freq: Frequency match in Hz response. 1451 * 1452 * Return: 0 if all went well, else returns appropriate error value. 1453 */ 1454 static int ti_sci_cmd_clk_get_match_freq(const struct ti_sci_handle *handle, 1455 u32 dev_id, u32 clk_id, u64 min_freq, 1456 u64 target_freq, u64 max_freq, 1457 u64 *match_freq) 1458 { 1459 struct ti_sci_info *info; 1460 struct ti_sci_msg_req_query_clock_freq *req; 1461 struct ti_sci_msg_resp_query_clock_freq *resp; 1462 struct ti_sci_xfer *xfer; 1463 struct device *dev; 1464 int ret = 0; 1465 1466 if (IS_ERR(handle)) 1467 return PTR_ERR(handle); 1468 if (!handle || !match_freq) 1469 return -EINVAL; 1470 1471 info = handle_to_ti_sci_info(handle); 1472 dev = info->dev; 1473 1474 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_QUERY_CLOCK_FREQ, 1475 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 1476 sizeof(*req), sizeof(*resp)); 1477 if (IS_ERR(xfer)) { 1478 ret = PTR_ERR(xfer); 1479 dev_err(dev, "Message alloc failed(%d)\n", ret); 1480 return ret; 1481 } 1482 req = (struct ti_sci_msg_req_query_clock_freq *)xfer->xfer_buf; 1483 req->dev_id = dev_id; 1484 if (clk_id < 255) { 1485 req->clk_id = clk_id; 1486 } else { 1487 req->clk_id = 255; 1488 req->clk_id_32 = clk_id; 1489 } 1490 req->min_freq_hz = min_freq; 1491 req->target_freq_hz = target_freq; 1492 req->max_freq_hz = max_freq; 1493 1494 ret = ti_sci_do_xfer(info, xfer); 1495 if (ret) { 1496 dev_err(dev, "Mbox send fail %d\n", ret); 1497 goto fail; 1498 } 1499 1500 resp = (struct ti_sci_msg_resp_query_clock_freq *)xfer->xfer_buf; 1501 1502 if (!ti_sci_is_response_ack(resp)) 1503 ret = -ENODEV; 1504 else 1505 *match_freq = resp->freq_hz; 1506 1507 fail: 1508 ti_sci_put_one_xfer(&info->minfo, xfer); 1509 1510 return ret; 1511 } 1512 1513 /** 1514 * ti_sci_cmd_clk_set_freq() - Set a frequency for clock 1515 * @handle: pointer to TI SCI handle 1516 * @dev_id: Device identifier this request is for 1517 * @clk_id: Clock identifier for the device for this request. 1518 * Each device has it's own set of clock inputs. This indexes 1519 * which clock input to modify. 1520 * @min_freq: The minimum allowable frequency in Hz. This is the minimum 1521 * allowable programmed frequency and does not account for clock 1522 * tolerances and jitter. 1523 * @target_freq: The target clock frequency in Hz. A frequency will be 1524 * processed as close to this target frequency as possible. 1525 * @max_freq: The maximum allowable frequency in Hz. This is the maximum 1526 * allowable programmed frequency and does not account for clock 1527 * tolerances and jitter. 1528 * 1529 * Return: 0 if all went well, else returns appropriate error value. 1530 */ 1531 static int ti_sci_cmd_clk_set_freq(const struct ti_sci_handle *handle, 1532 u32 dev_id, u32 clk_id, u64 min_freq, 1533 u64 target_freq, u64 max_freq) 1534 { 1535 struct ti_sci_info *info; 1536 struct ti_sci_msg_req_set_clock_freq *req; 1537 struct ti_sci_msg_hdr *resp; 1538 struct ti_sci_xfer *xfer; 1539 struct device *dev; 1540 int ret = 0; 1541 1542 if (IS_ERR(handle)) 1543 return PTR_ERR(handle); 1544 if (!handle) 1545 return -EINVAL; 1546 1547 info = handle_to_ti_sci_info(handle); 1548 dev = info->dev; 1549 1550 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_FREQ, 1551 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 1552 sizeof(*req), sizeof(*resp)); 1553 if (IS_ERR(xfer)) { 1554 ret = PTR_ERR(xfer); 1555 dev_err(dev, "Message alloc failed(%d)\n", ret); 1556 return ret; 1557 } 1558 req = (struct ti_sci_msg_req_set_clock_freq *)xfer->xfer_buf; 1559 req->dev_id = dev_id; 1560 if (clk_id < 255) { 1561 req->clk_id = clk_id; 1562 } else { 1563 req->clk_id = 255; 1564 req->clk_id_32 = clk_id; 1565 } 1566 req->min_freq_hz = min_freq; 1567 req->target_freq_hz = target_freq; 1568 req->max_freq_hz = max_freq; 1569 1570 ret = ti_sci_do_xfer(info, xfer); 1571 if (ret) { 1572 dev_err(dev, "Mbox send fail %d\n", ret); 1573 goto fail; 1574 } 1575 1576 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 1577 1578 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; 1579 1580 fail: 1581 ti_sci_put_one_xfer(&info->minfo, xfer); 1582 1583 return ret; 1584 } 1585 1586 /** 1587 * ti_sci_cmd_clk_get_freq() - Get current frequency 1588 * @handle: pointer to TI SCI handle 1589 * @dev_id: Device identifier this request is for 1590 * @clk_id: Clock identifier for the device for this request. 1591 * Each device has it's own set of clock inputs. This indexes 1592 * which clock input to modify. 1593 * @freq: Currently frequency in Hz 1594 * 1595 * Return: 0 if all went well, else returns appropriate error value. 1596 */ 1597 static int ti_sci_cmd_clk_get_freq(const struct ti_sci_handle *handle, 1598 u32 dev_id, u32 clk_id, u64 *freq) 1599 { 1600 struct ti_sci_info *info; 1601 struct ti_sci_msg_req_get_clock_freq *req; 1602 struct ti_sci_msg_resp_get_clock_freq *resp; 1603 struct ti_sci_xfer *xfer; 1604 struct device *dev; 1605 int ret = 0; 1606 1607 if (IS_ERR(handle)) 1608 return PTR_ERR(handle); 1609 if (!handle || !freq) 1610 return -EINVAL; 1611 1612 info = handle_to_ti_sci_info(handle); 1613 dev = info->dev; 1614 1615 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_FREQ, 1616 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 1617 sizeof(*req), sizeof(*resp)); 1618 if (IS_ERR(xfer)) { 1619 ret = PTR_ERR(xfer); 1620 dev_err(dev, "Message alloc failed(%d)\n", ret); 1621 return ret; 1622 } 1623 req = (struct ti_sci_msg_req_get_clock_freq *)xfer->xfer_buf; 1624 req->dev_id = dev_id; 1625 if (clk_id < 255) { 1626 req->clk_id = clk_id; 1627 } else { 1628 req->clk_id = 255; 1629 req->clk_id_32 = clk_id; 1630 } 1631 1632 ret = ti_sci_do_xfer(info, xfer); 1633 if (ret) { 1634 dev_err(dev, "Mbox send fail %d\n", ret); 1635 goto fail; 1636 } 1637 1638 resp = (struct ti_sci_msg_resp_get_clock_freq *)xfer->xfer_buf; 1639 1640 if (!ti_sci_is_response_ack(resp)) 1641 ret = -ENODEV; 1642 else 1643 *freq = resp->freq_hz; 1644 1645 fail: 1646 ti_sci_put_one_xfer(&info->minfo, xfer); 1647 1648 return ret; 1649 } 1650 1651 static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle) 1652 { 1653 struct ti_sci_info *info; 1654 struct ti_sci_msg_req_reboot *req; 1655 struct ti_sci_msg_hdr *resp; 1656 struct ti_sci_xfer *xfer; 1657 struct device *dev; 1658 int ret = 0; 1659 1660 if (IS_ERR(handle)) 1661 return PTR_ERR(handle); 1662 if (!handle) 1663 return -EINVAL; 1664 1665 info = handle_to_ti_sci_info(handle); 1666 dev = info->dev; 1667 1668 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SYS_RESET, 1669 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 1670 sizeof(*req), sizeof(*resp)); 1671 if (IS_ERR(xfer)) { 1672 ret = PTR_ERR(xfer); 1673 dev_err(dev, "Message alloc failed(%d)\n", ret); 1674 return ret; 1675 } 1676 req = (struct ti_sci_msg_req_reboot *)xfer->xfer_buf; 1677 1678 ret = ti_sci_do_xfer(info, xfer); 1679 if (ret) { 1680 dev_err(dev, "Mbox send fail %d\n", ret); 1681 goto fail; 1682 } 1683 1684 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 1685 1686 if (!ti_sci_is_response_ack(resp)) 1687 ret = -ENODEV; 1688 else 1689 ret = 0; 1690 1691 fail: 1692 ti_sci_put_one_xfer(&info->minfo, xfer); 1693 1694 return ret; 1695 } 1696 1697 /** 1698 * ti_sci_get_resource_range - Helper to get a range of resources assigned 1699 * to a host. Resource is uniquely identified by 1700 * type and subtype. 1701 * @handle: Pointer to TISCI handle. 1702 * @dev_id: TISCI device ID. 1703 * @subtype: Resource assignment subtype that is being requested 1704 * from the given device. 1705 * @s_host: Host processor ID to which the resources are allocated 1706 * @range_start: Start index of the resource range 1707 * @range_num: Number of resources in the range 1708 * 1709 * Return: 0 if all went fine, else return appropriate error. 1710 */ 1711 static int ti_sci_get_resource_range(const struct ti_sci_handle *handle, 1712 u32 dev_id, u8 subtype, u8 s_host, 1713 u16 *range_start, u16 *range_num) 1714 { 1715 struct ti_sci_msg_resp_get_resource_range *resp; 1716 struct ti_sci_msg_req_get_resource_range *req; 1717 struct ti_sci_xfer *xfer; 1718 struct ti_sci_info *info; 1719 struct device *dev; 1720 int ret = 0; 1721 1722 if (IS_ERR(handle)) 1723 return PTR_ERR(handle); 1724 if (!handle) 1725 return -EINVAL; 1726 1727 info = handle_to_ti_sci_info(handle); 1728 dev = info->dev; 1729 1730 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_RESOURCE_RANGE, 1731 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 1732 sizeof(*req), sizeof(*resp)); 1733 if (IS_ERR(xfer)) { 1734 ret = PTR_ERR(xfer); 1735 dev_err(dev, "Message alloc failed(%d)\n", ret); 1736 return ret; 1737 } 1738 1739 req = (struct ti_sci_msg_req_get_resource_range *)xfer->xfer_buf; 1740 req->secondary_host = s_host; 1741 req->type = dev_id & MSG_RM_RESOURCE_TYPE_MASK; 1742 req->subtype = subtype & MSG_RM_RESOURCE_SUBTYPE_MASK; 1743 1744 ret = ti_sci_do_xfer(info, xfer); 1745 if (ret) { 1746 dev_err(dev, "Mbox send fail %d\n", ret); 1747 goto fail; 1748 } 1749 1750 resp = (struct ti_sci_msg_resp_get_resource_range *)xfer->xfer_buf; 1751 1752 if (!ti_sci_is_response_ack(resp)) { 1753 ret = -ENODEV; 1754 } else if (!resp->range_start && !resp->range_num) { 1755 ret = -ENODEV; 1756 } else { 1757 *range_start = resp->range_start; 1758 *range_num = resp->range_num; 1759 }; 1760 1761 fail: 1762 ti_sci_put_one_xfer(&info->minfo, xfer); 1763 1764 return ret; 1765 } 1766 1767 /** 1768 * ti_sci_cmd_get_resource_range - Get a range of resources assigned to host 1769 * that is same as ti sci interface host. 1770 * @handle: Pointer to TISCI handle. 1771 * @dev_id: TISCI device ID. 1772 * @subtype: Resource assignment subtype that is being requested 1773 * from the given device. 1774 * @range_start: Start index of the resource range 1775 * @range_num: Number of resources in the range 1776 * 1777 * Return: 0 if all went fine, else return appropriate error. 1778 */ 1779 static int ti_sci_cmd_get_resource_range(const struct ti_sci_handle *handle, 1780 u32 dev_id, u8 subtype, 1781 u16 *range_start, u16 *range_num) 1782 { 1783 return ti_sci_get_resource_range(handle, dev_id, subtype, 1784 TI_SCI_IRQ_SECONDARY_HOST_INVALID, 1785 range_start, range_num); 1786 } 1787 1788 /** 1789 * ti_sci_cmd_get_resource_range_from_shost - Get a range of resources 1790 * assigned to a specified host. 1791 * @handle: Pointer to TISCI handle. 1792 * @dev_id: TISCI device ID. 1793 * @subtype: Resource assignment subtype that is being requested 1794 * from the given device. 1795 * @s_host: Host processor ID to which the resources are allocated 1796 * @range_start: Start index of the resource range 1797 * @range_num: Number of resources in the range 1798 * 1799 * Return: 0 if all went fine, else return appropriate error. 1800 */ 1801 static 1802 int ti_sci_cmd_get_resource_range_from_shost(const struct ti_sci_handle *handle, 1803 u32 dev_id, u8 subtype, u8 s_host, 1804 u16 *range_start, u16 *range_num) 1805 { 1806 return ti_sci_get_resource_range(handle, dev_id, subtype, s_host, 1807 range_start, range_num); 1808 } 1809 1810 /** 1811 * ti_sci_manage_irq() - Helper api to configure/release the irq route between 1812 * the requested source and destination 1813 * @handle: Pointer to TISCI handle. 1814 * @valid_params: Bit fields defining the validity of certain params 1815 * @src_id: Device ID of the IRQ source 1816 * @src_index: IRQ source index within the source device 1817 * @dst_id: Device ID of the IRQ destination 1818 * @dst_host_irq: IRQ number of the destination device 1819 * @ia_id: Device ID of the IA, if the IRQ flows through this IA 1820 * @vint: Virtual interrupt to be used within the IA 1821 * @global_event: Global event number to be used for the requesting event 1822 * @vint_status_bit: Virtual interrupt status bit to be used for the event 1823 * @s_host: Secondary host ID to which the irq/event is being 1824 * requested for. 1825 * @type: Request type irq set or release. 1826 * 1827 * Return: 0 if all went fine, else return appropriate error. 1828 */ 1829 static int ti_sci_manage_irq(const struct ti_sci_handle *handle, 1830 u32 valid_params, u16 src_id, u16 src_index, 1831 u16 dst_id, u16 dst_host_irq, u16 ia_id, u16 vint, 1832 u16 global_event, u8 vint_status_bit, u8 s_host, 1833 u16 type) 1834 { 1835 struct ti_sci_msg_req_manage_irq *req; 1836 struct ti_sci_msg_hdr *resp; 1837 struct ti_sci_xfer *xfer; 1838 struct ti_sci_info *info; 1839 struct device *dev; 1840 int ret = 0; 1841 1842 if (IS_ERR(handle)) 1843 return PTR_ERR(handle); 1844 if (!handle) 1845 return -EINVAL; 1846 1847 info = handle_to_ti_sci_info(handle); 1848 dev = info->dev; 1849 1850 xfer = ti_sci_get_one_xfer(info, type, TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 1851 sizeof(*req), sizeof(*resp)); 1852 if (IS_ERR(xfer)) { 1853 ret = PTR_ERR(xfer); 1854 dev_err(dev, "Message alloc failed(%d)\n", ret); 1855 return ret; 1856 } 1857 req = (struct ti_sci_msg_req_manage_irq *)xfer->xfer_buf; 1858 req->valid_params = valid_params; 1859 req->src_id = src_id; 1860 req->src_index = src_index; 1861 req->dst_id = dst_id; 1862 req->dst_host_irq = dst_host_irq; 1863 req->ia_id = ia_id; 1864 req->vint = vint; 1865 req->global_event = global_event; 1866 req->vint_status_bit = vint_status_bit; 1867 req->secondary_host = s_host; 1868 1869 ret = ti_sci_do_xfer(info, xfer); 1870 if (ret) { 1871 dev_err(dev, "Mbox send fail %d\n", ret); 1872 goto fail; 1873 } 1874 1875 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 1876 1877 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; 1878 1879 fail: 1880 ti_sci_put_one_xfer(&info->minfo, xfer); 1881 1882 return ret; 1883 } 1884 1885 /** 1886 * ti_sci_set_irq() - Helper api to configure the irq route between the 1887 * requested source and destination 1888 * @handle: Pointer to TISCI handle. 1889 * @valid_params: Bit fields defining the validity of certain params 1890 * @src_id: Device ID of the IRQ source 1891 * @src_index: IRQ source index within the source device 1892 * @dst_id: Device ID of the IRQ destination 1893 * @dst_host_irq: IRQ number of the destination device 1894 * @ia_id: Device ID of the IA, if the IRQ flows through this IA 1895 * @vint: Virtual interrupt to be used within the IA 1896 * @global_event: Global event number to be used for the requesting event 1897 * @vint_status_bit: Virtual interrupt status bit to be used for the event 1898 * @s_host: Secondary host ID to which the irq/event is being 1899 * requested for. 1900 * 1901 * Return: 0 if all went fine, else return appropriate error. 1902 */ 1903 static int ti_sci_set_irq(const struct ti_sci_handle *handle, u32 valid_params, 1904 u16 src_id, u16 src_index, u16 dst_id, 1905 u16 dst_host_irq, u16 ia_id, u16 vint, 1906 u16 global_event, u8 vint_status_bit, u8 s_host) 1907 { 1908 pr_debug("%s: IRQ set with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n", 1909 __func__, valid_params, src_id, src_index, 1910 dst_id, dst_host_irq, ia_id, vint, global_event, 1911 vint_status_bit); 1912 1913 return ti_sci_manage_irq(handle, valid_params, src_id, src_index, 1914 dst_id, dst_host_irq, ia_id, vint, 1915 global_event, vint_status_bit, s_host, 1916 TI_SCI_MSG_SET_IRQ); 1917 } 1918 1919 /** 1920 * ti_sci_free_irq() - Helper api to free the irq route between the 1921 * requested source and destination 1922 * @handle: Pointer to TISCI handle. 1923 * @valid_params: Bit fields defining the validity of certain params 1924 * @src_id: Device ID of the IRQ source 1925 * @src_index: IRQ source index within the source device 1926 * @dst_id: Device ID of the IRQ destination 1927 * @dst_host_irq: IRQ number of the destination device 1928 * @ia_id: Device ID of the IA, if the IRQ flows through this IA 1929 * @vint: Virtual interrupt to be used within the IA 1930 * @global_event: Global event number to be used for the requesting event 1931 * @vint_status_bit: Virtual interrupt status bit to be used for the event 1932 * @s_host: Secondary host ID to which the irq/event is being 1933 * requested for. 1934 * 1935 * Return: 0 if all went fine, else return appropriate error. 1936 */ 1937 static int ti_sci_free_irq(const struct ti_sci_handle *handle, u32 valid_params, 1938 u16 src_id, u16 src_index, u16 dst_id, 1939 u16 dst_host_irq, u16 ia_id, u16 vint, 1940 u16 global_event, u8 vint_status_bit, u8 s_host) 1941 { 1942 pr_debug("%s: IRQ release with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n", 1943 __func__, valid_params, src_id, src_index, 1944 dst_id, dst_host_irq, ia_id, vint, global_event, 1945 vint_status_bit); 1946 1947 return ti_sci_manage_irq(handle, valid_params, src_id, src_index, 1948 dst_id, dst_host_irq, ia_id, vint, 1949 global_event, vint_status_bit, s_host, 1950 TI_SCI_MSG_FREE_IRQ); 1951 } 1952 1953 /** 1954 * ti_sci_cmd_set_irq() - Configure a host irq route between the requested 1955 * source and destination. 1956 * @handle: Pointer to TISCI handle. 1957 * @src_id: Device ID of the IRQ source 1958 * @src_index: IRQ source index within the source device 1959 * @dst_id: Device ID of the IRQ destination 1960 * @dst_host_irq: IRQ number of the destination device 1961 * @vint_irq: Boolean specifying if this interrupt belongs to 1962 * Interrupt Aggregator. 1963 * 1964 * Return: 0 if all went fine, else return appropriate error. 1965 */ 1966 static int ti_sci_cmd_set_irq(const struct ti_sci_handle *handle, u16 src_id, 1967 u16 src_index, u16 dst_id, u16 dst_host_irq) 1968 { 1969 u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID; 1970 1971 return ti_sci_set_irq(handle, valid_params, src_id, src_index, dst_id, 1972 dst_host_irq, 0, 0, 0, 0, 0); 1973 } 1974 1975 /** 1976 * ti_sci_cmd_set_event_map() - Configure an event based irq route between the 1977 * requested source and Interrupt Aggregator. 1978 * @handle: Pointer to TISCI handle. 1979 * @src_id: Device ID of the IRQ source 1980 * @src_index: IRQ source index within the source device 1981 * @ia_id: Device ID of the IA, if the IRQ flows through this IA 1982 * @vint: Virtual interrupt to be used within the IA 1983 * @global_event: Global event number to be used for the requesting event 1984 * @vint_status_bit: Virtual interrupt status bit to be used for the event 1985 * 1986 * Return: 0 if all went fine, else return appropriate error. 1987 */ 1988 static int ti_sci_cmd_set_event_map(const struct ti_sci_handle *handle, 1989 u16 src_id, u16 src_index, u16 ia_id, 1990 u16 vint, u16 global_event, 1991 u8 vint_status_bit) 1992 { 1993 u32 valid_params = MSG_FLAG_IA_ID_VALID | MSG_FLAG_VINT_VALID | 1994 MSG_FLAG_GLB_EVNT_VALID | 1995 MSG_FLAG_VINT_STS_BIT_VALID; 1996 1997 return ti_sci_set_irq(handle, valid_params, src_id, src_index, 0, 0, 1998 ia_id, vint, global_event, vint_status_bit, 0); 1999 } 2000 2001 /** 2002 * ti_sci_cmd_free_irq() - Free a host irq route between the between the 2003 * requested source and destination. 2004 * @handle: Pointer to TISCI handle. 2005 * @src_id: Device ID of the IRQ source 2006 * @src_index: IRQ source index within the source device 2007 * @dst_id: Device ID of the IRQ destination 2008 * @dst_host_irq: IRQ number of the destination device 2009 * @vint_irq: Boolean specifying if this interrupt belongs to 2010 * Interrupt Aggregator. 2011 * 2012 * Return: 0 if all went fine, else return appropriate error. 2013 */ 2014 static int ti_sci_cmd_free_irq(const struct ti_sci_handle *handle, u16 src_id, 2015 u16 src_index, u16 dst_id, u16 dst_host_irq) 2016 { 2017 u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID; 2018 2019 return ti_sci_free_irq(handle, valid_params, src_id, src_index, dst_id, 2020 dst_host_irq, 0, 0, 0, 0, 0); 2021 } 2022 2023 /** 2024 * ti_sci_cmd_free_event_map() - Free an event map between the requested source 2025 * and Interrupt Aggregator. 2026 * @handle: Pointer to TISCI handle. 2027 * @src_id: Device ID of the IRQ source 2028 * @src_index: IRQ source index within the source device 2029 * @ia_id: Device ID of the IA, if the IRQ flows through this IA 2030 * @vint: Virtual interrupt to be used within the IA 2031 * @global_event: Global event number to be used for the requesting event 2032 * @vint_status_bit: Virtual interrupt status bit to be used for the event 2033 * 2034 * Return: 0 if all went fine, else return appropriate error. 2035 */ 2036 static int ti_sci_cmd_free_event_map(const struct ti_sci_handle *handle, 2037 u16 src_id, u16 src_index, u16 ia_id, 2038 u16 vint, u16 global_event, 2039 u8 vint_status_bit) 2040 { 2041 u32 valid_params = MSG_FLAG_IA_ID_VALID | 2042 MSG_FLAG_VINT_VALID | MSG_FLAG_GLB_EVNT_VALID | 2043 MSG_FLAG_VINT_STS_BIT_VALID; 2044 2045 return ti_sci_free_irq(handle, valid_params, src_id, src_index, 0, 0, 2046 ia_id, vint, global_event, vint_status_bit, 0); 2047 } 2048 2049 /** 2050 * ti_sci_cmd_ring_config() - configure RA ring 2051 * @handle: Pointer to TI SCI handle. 2052 * @valid_params: Bitfield defining validity of ring configuration 2053 * parameters 2054 * @nav_id: Device ID of Navigator Subsystem from which the ring is 2055 * allocated 2056 * @index: Ring index 2057 * @addr_lo: The ring base address lo 32 bits 2058 * @addr_hi: The ring base address hi 32 bits 2059 * @count: Number of ring elements 2060 * @mode: The mode of the ring 2061 * @size: The ring element size. 2062 * @order_id: Specifies the ring's bus order ID 2063 * 2064 * Return: 0 if all went well, else returns appropriate error value. 2065 * 2066 * See @ti_sci_msg_rm_ring_cfg_req for more info. 2067 */ 2068 static int ti_sci_cmd_ring_config(const struct ti_sci_handle *handle, 2069 u32 valid_params, u16 nav_id, u16 index, 2070 u32 addr_lo, u32 addr_hi, u32 count, 2071 u8 mode, u8 size, u8 order_id) 2072 { 2073 struct ti_sci_msg_rm_ring_cfg_req *req; 2074 struct ti_sci_msg_hdr *resp; 2075 struct ti_sci_xfer *xfer; 2076 struct ti_sci_info *info; 2077 struct device *dev; 2078 int ret = 0; 2079 2080 if (IS_ERR_OR_NULL(handle)) 2081 return -EINVAL; 2082 2083 info = handle_to_ti_sci_info(handle); 2084 dev = info->dev; 2085 2086 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_RING_CFG, 2087 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 2088 sizeof(*req), sizeof(*resp)); 2089 if (IS_ERR(xfer)) { 2090 ret = PTR_ERR(xfer); 2091 dev_err(dev, "RM_RA:Message config failed(%d)\n", ret); 2092 return ret; 2093 } 2094 req = (struct ti_sci_msg_rm_ring_cfg_req *)xfer->xfer_buf; 2095 req->valid_params = valid_params; 2096 req->nav_id = nav_id; 2097 req->index = index; 2098 req->addr_lo = addr_lo; 2099 req->addr_hi = addr_hi; 2100 req->count = count; 2101 req->mode = mode; 2102 req->size = size; 2103 req->order_id = order_id; 2104 2105 ret = ti_sci_do_xfer(info, xfer); 2106 if (ret) { 2107 dev_err(dev, "RM_RA:Mbox config send fail %d\n", ret); 2108 goto fail; 2109 } 2110 2111 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 2112 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; 2113 2114 fail: 2115 ti_sci_put_one_xfer(&info->minfo, xfer); 2116 dev_dbg(dev, "RM_RA:config ring %u ret:%d\n", index, ret); 2117 return ret; 2118 } 2119 2120 /** 2121 * ti_sci_cmd_ring_get_config() - get RA ring configuration 2122 * @handle: Pointer to TI SCI handle. 2123 * @nav_id: Device ID of Navigator Subsystem from which the ring is 2124 * allocated 2125 * @index: Ring index 2126 * @addr_lo: Returns ring's base address lo 32 bits 2127 * @addr_hi: Returns ring's base address hi 32 bits 2128 * @count: Returns number of ring elements 2129 * @mode: Returns mode of the ring 2130 * @size: Returns ring element size 2131 * @order_id: Returns ring's bus order ID 2132 * 2133 * Return: 0 if all went well, else returns appropriate error value. 2134 * 2135 * See @ti_sci_msg_rm_ring_get_cfg_req for more info. 2136 */ 2137 static int ti_sci_cmd_ring_get_config(const struct ti_sci_handle *handle, 2138 u32 nav_id, u32 index, u8 *mode, 2139 u32 *addr_lo, u32 *addr_hi, 2140 u32 *count, u8 *size, u8 *order_id) 2141 { 2142 struct ti_sci_msg_rm_ring_get_cfg_resp *resp; 2143 struct ti_sci_msg_rm_ring_get_cfg_req *req; 2144 struct ti_sci_xfer *xfer; 2145 struct ti_sci_info *info; 2146 struct device *dev; 2147 int ret = 0; 2148 2149 if (IS_ERR_OR_NULL(handle)) 2150 return -EINVAL; 2151 2152 info = handle_to_ti_sci_info(handle); 2153 dev = info->dev; 2154 2155 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_RING_GET_CFG, 2156 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 2157 sizeof(*req), sizeof(*resp)); 2158 if (IS_ERR(xfer)) { 2159 ret = PTR_ERR(xfer); 2160 dev_err(dev, 2161 "RM_RA:Message get config failed(%d)\n", ret); 2162 return ret; 2163 } 2164 req = (struct ti_sci_msg_rm_ring_get_cfg_req *)xfer->xfer_buf; 2165 req->nav_id = nav_id; 2166 req->index = index; 2167 2168 ret = ti_sci_do_xfer(info, xfer); 2169 if (ret) { 2170 dev_err(dev, "RM_RA:Mbox get config send fail %d\n", ret); 2171 goto fail; 2172 } 2173 2174 resp = (struct ti_sci_msg_rm_ring_get_cfg_resp *)xfer->xfer_buf; 2175 2176 if (!ti_sci_is_response_ack(resp)) { 2177 ret = -ENODEV; 2178 } else { 2179 if (mode) 2180 *mode = resp->mode; 2181 if (addr_lo) 2182 *addr_lo = resp->addr_lo; 2183 if (addr_hi) 2184 *addr_hi = resp->addr_hi; 2185 if (count) 2186 *count = resp->count; 2187 if (size) 2188 *size = resp->size; 2189 if (order_id) 2190 *order_id = resp->order_id; 2191 }; 2192 2193 fail: 2194 ti_sci_put_one_xfer(&info->minfo, xfer); 2195 dev_dbg(dev, "RM_RA:get config ring %u ret:%d\n", index, ret); 2196 return ret; 2197 } 2198 2199 /** 2200 * ti_sci_cmd_rm_psil_pair() - Pair PSI-L source to destination thread 2201 * @handle: Pointer to TI SCI handle. 2202 * @nav_id: Device ID of Navigator Subsystem which should be used for 2203 * pairing 2204 * @src_thread: Source PSI-L thread ID 2205 * @dst_thread: Destination PSI-L thread ID 2206 * 2207 * Return: 0 if all went well, else returns appropriate error value. 2208 */ 2209 static int ti_sci_cmd_rm_psil_pair(const struct ti_sci_handle *handle, 2210 u32 nav_id, u32 src_thread, u32 dst_thread) 2211 { 2212 struct ti_sci_msg_psil_pair *req; 2213 struct ti_sci_msg_hdr *resp; 2214 struct ti_sci_xfer *xfer; 2215 struct ti_sci_info *info; 2216 struct device *dev; 2217 int ret = 0; 2218 2219 if (IS_ERR(handle)) 2220 return PTR_ERR(handle); 2221 if (!handle) 2222 return -EINVAL; 2223 2224 info = handle_to_ti_sci_info(handle); 2225 dev = info->dev; 2226 2227 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_PSIL_PAIR, 2228 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 2229 sizeof(*req), sizeof(*resp)); 2230 if (IS_ERR(xfer)) { 2231 ret = PTR_ERR(xfer); 2232 dev_err(dev, "RM_PSIL:Message reconfig failed(%d)\n", ret); 2233 return ret; 2234 } 2235 req = (struct ti_sci_msg_psil_pair *)xfer->xfer_buf; 2236 req->nav_id = nav_id; 2237 req->src_thread = src_thread; 2238 req->dst_thread = dst_thread; 2239 2240 ret = ti_sci_do_xfer(info, xfer); 2241 if (ret) { 2242 dev_err(dev, "RM_PSIL:Mbox send fail %d\n", ret); 2243 goto fail; 2244 } 2245 2246 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 2247 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL; 2248 2249 fail: 2250 ti_sci_put_one_xfer(&info->minfo, xfer); 2251 2252 return ret; 2253 } 2254 2255 /** 2256 * ti_sci_cmd_rm_psil_unpair() - Unpair PSI-L source from destination thread 2257 * @handle: Pointer to TI SCI handle. 2258 * @nav_id: Device ID of Navigator Subsystem which should be used for 2259 * unpairing 2260 * @src_thread: Source PSI-L thread ID 2261 * @dst_thread: Destination PSI-L thread ID 2262 * 2263 * Return: 0 if all went well, else returns appropriate error value. 2264 */ 2265 static int ti_sci_cmd_rm_psil_unpair(const struct ti_sci_handle *handle, 2266 u32 nav_id, u32 src_thread, u32 dst_thread) 2267 { 2268 struct ti_sci_msg_psil_unpair *req; 2269 struct ti_sci_msg_hdr *resp; 2270 struct ti_sci_xfer *xfer; 2271 struct ti_sci_info *info; 2272 struct device *dev; 2273 int ret = 0; 2274 2275 if (IS_ERR(handle)) 2276 return PTR_ERR(handle); 2277 if (!handle) 2278 return -EINVAL; 2279 2280 info = handle_to_ti_sci_info(handle); 2281 dev = info->dev; 2282 2283 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_PSIL_UNPAIR, 2284 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 2285 sizeof(*req), sizeof(*resp)); 2286 if (IS_ERR(xfer)) { 2287 ret = PTR_ERR(xfer); 2288 dev_err(dev, "RM_PSIL:Message reconfig failed(%d)\n", ret); 2289 return ret; 2290 } 2291 req = (struct ti_sci_msg_psil_unpair *)xfer->xfer_buf; 2292 req->nav_id = nav_id; 2293 req->src_thread = src_thread; 2294 req->dst_thread = dst_thread; 2295 2296 ret = ti_sci_do_xfer(info, xfer); 2297 if (ret) { 2298 dev_err(dev, "RM_PSIL:Mbox send fail %d\n", ret); 2299 goto fail; 2300 } 2301 2302 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 2303 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL; 2304 2305 fail: 2306 ti_sci_put_one_xfer(&info->minfo, xfer); 2307 2308 return ret; 2309 } 2310 2311 /** 2312 * ti_sci_cmd_rm_udmap_tx_ch_cfg() - Configure a UDMAP TX channel 2313 * @handle: Pointer to TI SCI handle. 2314 * @params: Pointer to ti_sci_msg_rm_udmap_tx_ch_cfg TX channel config 2315 * structure 2316 * 2317 * Return: 0 if all went well, else returns appropriate error value. 2318 * 2319 * See @ti_sci_msg_rm_udmap_tx_ch_cfg and @ti_sci_msg_rm_udmap_tx_ch_cfg_req for 2320 * more info. 2321 */ 2322 static int ti_sci_cmd_rm_udmap_tx_ch_cfg(const struct ti_sci_handle *handle, 2323 const struct ti_sci_msg_rm_udmap_tx_ch_cfg *params) 2324 { 2325 struct ti_sci_msg_rm_udmap_tx_ch_cfg_req *req; 2326 struct ti_sci_msg_hdr *resp; 2327 struct ti_sci_xfer *xfer; 2328 struct ti_sci_info *info; 2329 struct device *dev; 2330 int ret = 0; 2331 2332 if (IS_ERR_OR_NULL(handle)) 2333 return -EINVAL; 2334 2335 info = handle_to_ti_sci_info(handle); 2336 dev = info->dev; 2337 2338 xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_TX_CH_CFG, 2339 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 2340 sizeof(*req), sizeof(*resp)); 2341 if (IS_ERR(xfer)) { 2342 ret = PTR_ERR(xfer); 2343 dev_err(dev, "Message TX_CH_CFG alloc failed(%d)\n", ret); 2344 return ret; 2345 } 2346 req = (struct ti_sci_msg_rm_udmap_tx_ch_cfg_req *)xfer->xfer_buf; 2347 req->valid_params = params->valid_params; 2348 req->nav_id = params->nav_id; 2349 req->index = params->index; 2350 req->tx_pause_on_err = params->tx_pause_on_err; 2351 req->tx_filt_einfo = params->tx_filt_einfo; 2352 req->tx_filt_pswords = params->tx_filt_pswords; 2353 req->tx_atype = params->tx_atype; 2354 req->tx_chan_type = params->tx_chan_type; 2355 req->tx_supr_tdpkt = params->tx_supr_tdpkt; 2356 req->tx_fetch_size = params->tx_fetch_size; 2357 req->tx_credit_count = params->tx_credit_count; 2358 req->txcq_qnum = params->txcq_qnum; 2359 req->tx_priority = params->tx_priority; 2360 req->tx_qos = params->tx_qos; 2361 req->tx_orderid = params->tx_orderid; 2362 req->fdepth = params->fdepth; 2363 req->tx_sched_priority = params->tx_sched_priority; 2364 req->tx_burst_size = params->tx_burst_size; 2365 2366 ret = ti_sci_do_xfer(info, xfer); 2367 if (ret) { 2368 dev_err(dev, "Mbox send TX_CH_CFG fail %d\n", ret); 2369 goto fail; 2370 } 2371 2372 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 2373 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL; 2374 2375 fail: 2376 ti_sci_put_one_xfer(&info->minfo, xfer); 2377 dev_dbg(dev, "TX_CH_CFG: chn %u ret:%u\n", params->index, ret); 2378 return ret; 2379 } 2380 2381 /** 2382 * ti_sci_cmd_rm_udmap_rx_ch_cfg() - Configure a UDMAP RX channel 2383 * @handle: Pointer to TI SCI handle. 2384 * @params: Pointer to ti_sci_msg_rm_udmap_rx_ch_cfg RX channel config 2385 * structure 2386 * 2387 * Return: 0 if all went well, else returns appropriate error value. 2388 * 2389 * See @ti_sci_msg_rm_udmap_rx_ch_cfg and @ti_sci_msg_rm_udmap_rx_ch_cfg_req for 2390 * more info. 2391 */ 2392 static int ti_sci_cmd_rm_udmap_rx_ch_cfg(const struct ti_sci_handle *handle, 2393 const struct ti_sci_msg_rm_udmap_rx_ch_cfg *params) 2394 { 2395 struct ti_sci_msg_rm_udmap_rx_ch_cfg_req *req; 2396 struct ti_sci_msg_hdr *resp; 2397 struct ti_sci_xfer *xfer; 2398 struct ti_sci_info *info; 2399 struct device *dev; 2400 int ret = 0; 2401 2402 if (IS_ERR_OR_NULL(handle)) 2403 return -EINVAL; 2404 2405 info = handle_to_ti_sci_info(handle); 2406 dev = info->dev; 2407 2408 xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_RX_CH_CFG, 2409 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 2410 sizeof(*req), sizeof(*resp)); 2411 if (IS_ERR(xfer)) { 2412 ret = PTR_ERR(xfer); 2413 dev_err(dev, "Message RX_CH_CFG alloc failed(%d)\n", ret); 2414 return ret; 2415 } 2416 req = (struct ti_sci_msg_rm_udmap_rx_ch_cfg_req *)xfer->xfer_buf; 2417 req->valid_params = params->valid_params; 2418 req->nav_id = params->nav_id; 2419 req->index = params->index; 2420 req->rx_fetch_size = params->rx_fetch_size; 2421 req->rxcq_qnum = params->rxcq_qnum; 2422 req->rx_priority = params->rx_priority; 2423 req->rx_qos = params->rx_qos; 2424 req->rx_orderid = params->rx_orderid; 2425 req->rx_sched_priority = params->rx_sched_priority; 2426 req->flowid_start = params->flowid_start; 2427 req->flowid_cnt = params->flowid_cnt; 2428 req->rx_pause_on_err = params->rx_pause_on_err; 2429 req->rx_atype = params->rx_atype; 2430 req->rx_chan_type = params->rx_chan_type; 2431 req->rx_ignore_short = params->rx_ignore_short; 2432 req->rx_ignore_long = params->rx_ignore_long; 2433 req->rx_burst_size = params->rx_burst_size; 2434 2435 ret = ti_sci_do_xfer(info, xfer); 2436 if (ret) { 2437 dev_err(dev, "Mbox send RX_CH_CFG fail %d\n", ret); 2438 goto fail; 2439 } 2440 2441 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 2442 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL; 2443 2444 fail: 2445 ti_sci_put_one_xfer(&info->minfo, xfer); 2446 dev_dbg(dev, "RX_CH_CFG: chn %u ret:%d\n", params->index, ret); 2447 return ret; 2448 } 2449 2450 /** 2451 * ti_sci_cmd_rm_udmap_rx_flow_cfg() - Configure UDMAP RX FLOW 2452 * @handle: Pointer to TI SCI handle. 2453 * @params: Pointer to ti_sci_msg_rm_udmap_flow_cfg RX FLOW config 2454 * structure 2455 * 2456 * Return: 0 if all went well, else returns appropriate error value. 2457 * 2458 * See @ti_sci_msg_rm_udmap_flow_cfg and @ti_sci_msg_rm_udmap_flow_cfg_req for 2459 * more info. 2460 */ 2461 static int ti_sci_cmd_rm_udmap_rx_flow_cfg(const struct ti_sci_handle *handle, 2462 const struct ti_sci_msg_rm_udmap_flow_cfg *params) 2463 { 2464 struct ti_sci_msg_rm_udmap_flow_cfg_req *req; 2465 struct ti_sci_msg_hdr *resp; 2466 struct ti_sci_xfer *xfer; 2467 struct ti_sci_info *info; 2468 struct device *dev; 2469 int ret = 0; 2470 2471 if (IS_ERR_OR_NULL(handle)) 2472 return -EINVAL; 2473 2474 info = handle_to_ti_sci_info(handle); 2475 dev = info->dev; 2476 2477 xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_FLOW_CFG, 2478 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 2479 sizeof(*req), sizeof(*resp)); 2480 if (IS_ERR(xfer)) { 2481 ret = PTR_ERR(xfer); 2482 dev_err(dev, "RX_FL_CFG: Message alloc failed(%d)\n", ret); 2483 return ret; 2484 } 2485 req = (struct ti_sci_msg_rm_udmap_flow_cfg_req *)xfer->xfer_buf; 2486 req->valid_params = params->valid_params; 2487 req->nav_id = params->nav_id; 2488 req->flow_index = params->flow_index; 2489 req->rx_einfo_present = params->rx_einfo_present; 2490 req->rx_psinfo_present = params->rx_psinfo_present; 2491 req->rx_error_handling = params->rx_error_handling; 2492 req->rx_desc_type = params->rx_desc_type; 2493 req->rx_sop_offset = params->rx_sop_offset; 2494 req->rx_dest_qnum = params->rx_dest_qnum; 2495 req->rx_src_tag_hi = params->rx_src_tag_hi; 2496 req->rx_src_tag_lo = params->rx_src_tag_lo; 2497 req->rx_dest_tag_hi = params->rx_dest_tag_hi; 2498 req->rx_dest_tag_lo = params->rx_dest_tag_lo; 2499 req->rx_src_tag_hi_sel = params->rx_src_tag_hi_sel; 2500 req->rx_src_tag_lo_sel = params->rx_src_tag_lo_sel; 2501 req->rx_dest_tag_hi_sel = params->rx_dest_tag_hi_sel; 2502 req->rx_dest_tag_lo_sel = params->rx_dest_tag_lo_sel; 2503 req->rx_fdq0_sz0_qnum = params->rx_fdq0_sz0_qnum; 2504 req->rx_fdq1_qnum = params->rx_fdq1_qnum; 2505 req->rx_fdq2_qnum = params->rx_fdq2_qnum; 2506 req->rx_fdq3_qnum = params->rx_fdq3_qnum; 2507 req->rx_ps_location = params->rx_ps_location; 2508 2509 ret = ti_sci_do_xfer(info, xfer); 2510 if (ret) { 2511 dev_err(dev, "RX_FL_CFG: Mbox send fail %d\n", ret); 2512 goto fail; 2513 } 2514 2515 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 2516 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL; 2517 2518 fail: 2519 ti_sci_put_one_xfer(&info->minfo, xfer); 2520 dev_dbg(info->dev, "RX_FL_CFG: %u ret:%d\n", params->flow_index, ret); 2521 return ret; 2522 } 2523 2524 /** 2525 * ti_sci_cmd_proc_request() - Command to request a physical processor control 2526 * @handle: Pointer to TI SCI handle 2527 * @proc_id: Processor ID this request is for 2528 * 2529 * Return: 0 if all went well, else returns appropriate error value. 2530 */ 2531 static int ti_sci_cmd_proc_request(const struct ti_sci_handle *handle, 2532 u8 proc_id) 2533 { 2534 struct ti_sci_msg_req_proc_request *req; 2535 struct ti_sci_msg_hdr *resp; 2536 struct ti_sci_info *info; 2537 struct ti_sci_xfer *xfer; 2538 struct device *dev; 2539 int ret = 0; 2540 2541 if (!handle) 2542 return -EINVAL; 2543 if (IS_ERR(handle)) 2544 return PTR_ERR(handle); 2545 2546 info = handle_to_ti_sci_info(handle); 2547 dev = info->dev; 2548 2549 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_REQUEST, 2550 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 2551 sizeof(*req), sizeof(*resp)); 2552 if (IS_ERR(xfer)) { 2553 ret = PTR_ERR(xfer); 2554 dev_err(dev, "Message alloc failed(%d)\n", ret); 2555 return ret; 2556 } 2557 req = (struct ti_sci_msg_req_proc_request *)xfer->xfer_buf; 2558 req->processor_id = proc_id; 2559 2560 ret = ti_sci_do_xfer(info, xfer); 2561 if (ret) { 2562 dev_err(dev, "Mbox send fail %d\n", ret); 2563 goto fail; 2564 } 2565 2566 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf; 2567 2568 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; 2569 2570 fail: 2571 ti_sci_put_one_xfer(&info->minfo, xfer); 2572 2573 return ret; 2574 } 2575 2576 /** 2577 * ti_sci_cmd_proc_release() - Command to release a physical processor control 2578 * @handle: Pointer to TI SCI handle 2579 * @proc_id: Processor ID this request is for 2580 * 2581 * Return: 0 if all went well, else returns appropriate error value. 2582 */ 2583 static int ti_sci_cmd_proc_release(const struct ti_sci_handle *handle, 2584 u8 proc_id) 2585 { 2586 struct ti_sci_msg_req_proc_release *req; 2587 struct ti_sci_msg_hdr *resp; 2588 struct ti_sci_info *info; 2589 struct ti_sci_xfer *xfer; 2590 struct device *dev; 2591 int ret = 0; 2592 2593 if (!handle) 2594 return -EINVAL; 2595 if (IS_ERR(handle)) 2596 return PTR_ERR(handle); 2597 2598 info = handle_to_ti_sci_info(handle); 2599 dev = info->dev; 2600 2601 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_RELEASE, 2602 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 2603 sizeof(*req), sizeof(*resp)); 2604 if (IS_ERR(xfer)) { 2605 ret = PTR_ERR(xfer); 2606 dev_err(dev, "Message alloc failed(%d)\n", ret); 2607 return ret; 2608 } 2609 req = (struct ti_sci_msg_req_proc_release *)xfer->xfer_buf; 2610 req->processor_id = proc_id; 2611 2612 ret = ti_sci_do_xfer(info, xfer); 2613 if (ret) { 2614 dev_err(dev, "Mbox send fail %d\n", ret); 2615 goto fail; 2616 } 2617 2618 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf; 2619 2620 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; 2621 2622 fail: 2623 ti_sci_put_one_xfer(&info->minfo, xfer); 2624 2625 return ret; 2626 } 2627 2628 /** 2629 * ti_sci_cmd_proc_handover() - Command to handover a physical processor 2630 * control to a host in the processor's access 2631 * control list. 2632 * @handle: Pointer to TI SCI handle 2633 * @proc_id: Processor ID this request is for 2634 * @host_id: Host ID to get the control of the processor 2635 * 2636 * Return: 0 if all went well, else returns appropriate error value. 2637 */ 2638 static int ti_sci_cmd_proc_handover(const struct ti_sci_handle *handle, 2639 u8 proc_id, u8 host_id) 2640 { 2641 struct ti_sci_msg_req_proc_handover *req; 2642 struct ti_sci_msg_hdr *resp; 2643 struct ti_sci_info *info; 2644 struct ti_sci_xfer *xfer; 2645 struct device *dev; 2646 int ret = 0; 2647 2648 if (!handle) 2649 return -EINVAL; 2650 if (IS_ERR(handle)) 2651 return PTR_ERR(handle); 2652 2653 info = handle_to_ti_sci_info(handle); 2654 dev = info->dev; 2655 2656 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_HANDOVER, 2657 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 2658 sizeof(*req), sizeof(*resp)); 2659 if (IS_ERR(xfer)) { 2660 ret = PTR_ERR(xfer); 2661 dev_err(dev, "Message alloc failed(%d)\n", ret); 2662 return ret; 2663 } 2664 req = (struct ti_sci_msg_req_proc_handover *)xfer->xfer_buf; 2665 req->processor_id = proc_id; 2666 req->host_id = host_id; 2667 2668 ret = ti_sci_do_xfer(info, xfer); 2669 if (ret) { 2670 dev_err(dev, "Mbox send fail %d\n", ret); 2671 goto fail; 2672 } 2673 2674 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf; 2675 2676 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; 2677 2678 fail: 2679 ti_sci_put_one_xfer(&info->minfo, xfer); 2680 2681 return ret; 2682 } 2683 2684 /** 2685 * ti_sci_cmd_proc_set_config() - Command to set the processor boot 2686 * configuration flags 2687 * @handle: Pointer to TI SCI handle 2688 * @proc_id: Processor ID this request is for 2689 * @config_flags_set: Configuration flags to be set 2690 * @config_flags_clear: Configuration flags to be cleared. 2691 * 2692 * Return: 0 if all went well, else returns appropriate error value. 2693 */ 2694 static int ti_sci_cmd_proc_set_config(const struct ti_sci_handle *handle, 2695 u8 proc_id, u64 bootvector, 2696 u32 config_flags_set, 2697 u32 config_flags_clear) 2698 { 2699 struct ti_sci_msg_req_set_config *req; 2700 struct ti_sci_msg_hdr *resp; 2701 struct ti_sci_info *info; 2702 struct ti_sci_xfer *xfer; 2703 struct device *dev; 2704 int ret = 0; 2705 2706 if (!handle) 2707 return -EINVAL; 2708 if (IS_ERR(handle)) 2709 return PTR_ERR(handle); 2710 2711 info = handle_to_ti_sci_info(handle); 2712 dev = info->dev; 2713 2714 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CONFIG, 2715 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 2716 sizeof(*req), sizeof(*resp)); 2717 if (IS_ERR(xfer)) { 2718 ret = PTR_ERR(xfer); 2719 dev_err(dev, "Message alloc failed(%d)\n", ret); 2720 return ret; 2721 } 2722 req = (struct ti_sci_msg_req_set_config *)xfer->xfer_buf; 2723 req->processor_id = proc_id; 2724 req->bootvector_low = bootvector & TI_SCI_ADDR_LOW_MASK; 2725 req->bootvector_high = (bootvector & TI_SCI_ADDR_HIGH_MASK) >> 2726 TI_SCI_ADDR_HIGH_SHIFT; 2727 req->config_flags_set = config_flags_set; 2728 req->config_flags_clear = config_flags_clear; 2729 2730 ret = ti_sci_do_xfer(info, xfer); 2731 if (ret) { 2732 dev_err(dev, "Mbox send fail %d\n", ret); 2733 goto fail; 2734 } 2735 2736 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf; 2737 2738 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; 2739 2740 fail: 2741 ti_sci_put_one_xfer(&info->minfo, xfer); 2742 2743 return ret; 2744 } 2745 2746 /** 2747 * ti_sci_cmd_proc_set_control() - Command to set the processor boot 2748 * control flags 2749 * @handle: Pointer to TI SCI handle 2750 * @proc_id: Processor ID this request is for 2751 * @control_flags_set: Control flags to be set 2752 * @control_flags_clear: Control flags to be cleared 2753 * 2754 * Return: 0 if all went well, else returns appropriate error value. 2755 */ 2756 static int ti_sci_cmd_proc_set_control(const struct ti_sci_handle *handle, 2757 u8 proc_id, u32 control_flags_set, 2758 u32 control_flags_clear) 2759 { 2760 struct ti_sci_msg_req_set_ctrl *req; 2761 struct ti_sci_msg_hdr *resp; 2762 struct ti_sci_info *info; 2763 struct ti_sci_xfer *xfer; 2764 struct device *dev; 2765 int ret = 0; 2766 2767 if (!handle) 2768 return -EINVAL; 2769 if (IS_ERR(handle)) 2770 return PTR_ERR(handle); 2771 2772 info = handle_to_ti_sci_info(handle); 2773 dev = info->dev; 2774 2775 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CTRL, 2776 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 2777 sizeof(*req), sizeof(*resp)); 2778 if (IS_ERR(xfer)) { 2779 ret = PTR_ERR(xfer); 2780 dev_err(dev, "Message alloc failed(%d)\n", ret); 2781 return ret; 2782 } 2783 req = (struct ti_sci_msg_req_set_ctrl *)xfer->xfer_buf; 2784 req->processor_id = proc_id; 2785 req->control_flags_set = control_flags_set; 2786 req->control_flags_clear = control_flags_clear; 2787 2788 ret = ti_sci_do_xfer(info, xfer); 2789 if (ret) { 2790 dev_err(dev, "Mbox send fail %d\n", ret); 2791 goto fail; 2792 } 2793 2794 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf; 2795 2796 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; 2797 2798 fail: 2799 ti_sci_put_one_xfer(&info->minfo, xfer); 2800 2801 return ret; 2802 } 2803 2804 /** 2805 * ti_sci_cmd_get_boot_status() - Command to get the processor boot status 2806 * @handle: Pointer to TI SCI handle 2807 * @proc_id: Processor ID this request is for 2808 * 2809 * Return: 0 if all went well, else returns appropriate error value. 2810 */ 2811 static int ti_sci_cmd_proc_get_status(const struct ti_sci_handle *handle, 2812 u8 proc_id, u64 *bv, u32 *cfg_flags, 2813 u32 *ctrl_flags, u32 *sts_flags) 2814 { 2815 struct ti_sci_msg_resp_get_status *resp; 2816 struct ti_sci_msg_req_get_status *req; 2817 struct ti_sci_info *info; 2818 struct ti_sci_xfer *xfer; 2819 struct device *dev; 2820 int ret = 0; 2821 2822 if (!handle) 2823 return -EINVAL; 2824 if (IS_ERR(handle)) 2825 return PTR_ERR(handle); 2826 2827 info = handle_to_ti_sci_info(handle); 2828 dev = info->dev; 2829 2830 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_STATUS, 2831 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 2832 sizeof(*req), sizeof(*resp)); 2833 if (IS_ERR(xfer)) { 2834 ret = PTR_ERR(xfer); 2835 dev_err(dev, "Message alloc failed(%d)\n", ret); 2836 return ret; 2837 } 2838 req = (struct ti_sci_msg_req_get_status *)xfer->xfer_buf; 2839 req->processor_id = proc_id; 2840 2841 ret = ti_sci_do_xfer(info, xfer); 2842 if (ret) { 2843 dev_err(dev, "Mbox send fail %d\n", ret); 2844 goto fail; 2845 } 2846 2847 resp = (struct ti_sci_msg_resp_get_status *)xfer->tx_message.buf; 2848 2849 if (!ti_sci_is_response_ack(resp)) { 2850 ret = -ENODEV; 2851 } else { 2852 *bv = (resp->bootvector_low & TI_SCI_ADDR_LOW_MASK) | 2853 (((u64)resp->bootvector_high << TI_SCI_ADDR_HIGH_SHIFT) & 2854 TI_SCI_ADDR_HIGH_MASK); 2855 *cfg_flags = resp->config_flags; 2856 *ctrl_flags = resp->control_flags; 2857 *sts_flags = resp->status_flags; 2858 } 2859 2860 fail: 2861 ti_sci_put_one_xfer(&info->minfo, xfer); 2862 2863 return ret; 2864 } 2865 2866 /* 2867 * ti_sci_setup_ops() - Setup the operations structures 2868 * @info: pointer to TISCI pointer 2869 */ 2870 static void ti_sci_setup_ops(struct ti_sci_info *info) 2871 { 2872 struct ti_sci_ops *ops = &info->handle.ops; 2873 struct ti_sci_core_ops *core_ops = &ops->core_ops; 2874 struct ti_sci_dev_ops *dops = &ops->dev_ops; 2875 struct ti_sci_clk_ops *cops = &ops->clk_ops; 2876 struct ti_sci_rm_core_ops *rm_core_ops = &ops->rm_core_ops; 2877 struct ti_sci_rm_irq_ops *iops = &ops->rm_irq_ops; 2878 struct ti_sci_rm_ringacc_ops *rops = &ops->rm_ring_ops; 2879 struct ti_sci_rm_psil_ops *psilops = &ops->rm_psil_ops; 2880 struct ti_sci_rm_udmap_ops *udmap_ops = &ops->rm_udmap_ops; 2881 struct ti_sci_proc_ops *pops = &ops->proc_ops; 2882 2883 core_ops->reboot_device = ti_sci_cmd_core_reboot; 2884 2885 dops->get_device = ti_sci_cmd_get_device; 2886 dops->get_device_exclusive = ti_sci_cmd_get_device_exclusive; 2887 dops->idle_device = ti_sci_cmd_idle_device; 2888 dops->idle_device_exclusive = ti_sci_cmd_idle_device_exclusive; 2889 dops->put_device = ti_sci_cmd_put_device; 2890 2891 dops->is_valid = ti_sci_cmd_dev_is_valid; 2892 dops->get_context_loss_count = ti_sci_cmd_dev_get_clcnt; 2893 dops->is_idle = ti_sci_cmd_dev_is_idle; 2894 dops->is_stop = ti_sci_cmd_dev_is_stop; 2895 dops->is_on = ti_sci_cmd_dev_is_on; 2896 dops->is_transitioning = ti_sci_cmd_dev_is_trans; 2897 dops->set_device_resets = ti_sci_cmd_set_device_resets; 2898 dops->get_device_resets = ti_sci_cmd_get_device_resets; 2899 2900 cops->get_clock = ti_sci_cmd_get_clock; 2901 cops->idle_clock = ti_sci_cmd_idle_clock; 2902 cops->put_clock = ti_sci_cmd_put_clock; 2903 cops->is_auto = ti_sci_cmd_clk_is_auto; 2904 cops->is_on = ti_sci_cmd_clk_is_on; 2905 cops->is_off = ti_sci_cmd_clk_is_off; 2906 2907 cops->set_parent = ti_sci_cmd_clk_set_parent; 2908 cops->get_parent = ti_sci_cmd_clk_get_parent; 2909 cops->get_num_parents = ti_sci_cmd_clk_get_num_parents; 2910 2911 cops->get_best_match_freq = ti_sci_cmd_clk_get_match_freq; 2912 cops->set_freq = ti_sci_cmd_clk_set_freq; 2913 cops->get_freq = ti_sci_cmd_clk_get_freq; 2914 2915 rm_core_ops->get_range = ti_sci_cmd_get_resource_range; 2916 rm_core_ops->get_range_from_shost = 2917 ti_sci_cmd_get_resource_range_from_shost; 2918 2919 iops->set_irq = ti_sci_cmd_set_irq; 2920 iops->set_event_map = ti_sci_cmd_set_event_map; 2921 iops->free_irq = ti_sci_cmd_free_irq; 2922 iops->free_event_map = ti_sci_cmd_free_event_map; 2923 2924 rops->config = ti_sci_cmd_ring_config; 2925 rops->get_config = ti_sci_cmd_ring_get_config; 2926 2927 psilops->pair = ti_sci_cmd_rm_psil_pair; 2928 psilops->unpair = ti_sci_cmd_rm_psil_unpair; 2929 2930 udmap_ops->tx_ch_cfg = ti_sci_cmd_rm_udmap_tx_ch_cfg; 2931 udmap_ops->rx_ch_cfg = ti_sci_cmd_rm_udmap_rx_ch_cfg; 2932 udmap_ops->rx_flow_cfg = ti_sci_cmd_rm_udmap_rx_flow_cfg; 2933 2934 pops->request = ti_sci_cmd_proc_request; 2935 pops->release = ti_sci_cmd_proc_release; 2936 pops->handover = ti_sci_cmd_proc_handover; 2937 pops->set_config = ti_sci_cmd_proc_set_config; 2938 pops->set_control = ti_sci_cmd_proc_set_control; 2939 pops->get_status = ti_sci_cmd_proc_get_status; 2940 } 2941 2942 /** 2943 * ti_sci_get_handle() - Get the TI SCI handle for a device 2944 * @dev: Pointer to device for which we want SCI handle 2945 * 2946 * NOTE: The function does not track individual clients of the framework 2947 * and is expected to be maintained by caller of TI SCI protocol library. 2948 * ti_sci_put_handle must be balanced with successful ti_sci_get_handle 2949 * Return: pointer to handle if successful, else: 2950 * -EPROBE_DEFER if the instance is not ready 2951 * -ENODEV if the required node handler is missing 2952 * -EINVAL if invalid conditions are encountered. 2953 */ 2954 const struct ti_sci_handle *ti_sci_get_handle(struct device *dev) 2955 { 2956 struct device_node *ti_sci_np; 2957 struct list_head *p; 2958 struct ti_sci_handle *handle = NULL; 2959 struct ti_sci_info *info; 2960 2961 if (!dev) { 2962 pr_err("I need a device pointer\n"); 2963 return ERR_PTR(-EINVAL); 2964 } 2965 ti_sci_np = of_get_parent(dev->of_node); 2966 if (!ti_sci_np) { 2967 dev_err(dev, "No OF information\n"); 2968 return ERR_PTR(-EINVAL); 2969 } 2970 2971 mutex_lock(&ti_sci_list_mutex); 2972 list_for_each(p, &ti_sci_list) { 2973 info = list_entry(p, struct ti_sci_info, node); 2974 if (ti_sci_np == info->dev->of_node) { 2975 handle = &info->handle; 2976 info->users++; 2977 break; 2978 } 2979 } 2980 mutex_unlock(&ti_sci_list_mutex); 2981 of_node_put(ti_sci_np); 2982 2983 if (!handle) 2984 return ERR_PTR(-EPROBE_DEFER); 2985 2986 return handle; 2987 } 2988 EXPORT_SYMBOL_GPL(ti_sci_get_handle); 2989 2990 /** 2991 * ti_sci_put_handle() - Release the handle acquired by ti_sci_get_handle 2992 * @handle: Handle acquired by ti_sci_get_handle 2993 * 2994 * NOTE: The function does not track individual clients of the framework 2995 * and is expected to be maintained by caller of TI SCI protocol library. 2996 * ti_sci_put_handle must be balanced with successful ti_sci_get_handle 2997 * 2998 * Return: 0 is successfully released 2999 * if an error pointer was passed, it returns the error value back, 3000 * if null was passed, it returns -EINVAL; 3001 */ 3002 int ti_sci_put_handle(const struct ti_sci_handle *handle) 3003 { 3004 struct ti_sci_info *info; 3005 3006 if (IS_ERR(handle)) 3007 return PTR_ERR(handle); 3008 if (!handle) 3009 return -EINVAL; 3010 3011 info = handle_to_ti_sci_info(handle); 3012 mutex_lock(&ti_sci_list_mutex); 3013 if (!WARN_ON(!info->users)) 3014 info->users--; 3015 mutex_unlock(&ti_sci_list_mutex); 3016 3017 return 0; 3018 } 3019 EXPORT_SYMBOL_GPL(ti_sci_put_handle); 3020 3021 static void devm_ti_sci_release(struct device *dev, void *res) 3022 { 3023 const struct ti_sci_handle **ptr = res; 3024 const struct ti_sci_handle *handle = *ptr; 3025 int ret; 3026 3027 ret = ti_sci_put_handle(handle); 3028 if (ret) 3029 dev_err(dev, "failed to put handle %d\n", ret); 3030 } 3031 3032 /** 3033 * devm_ti_sci_get_handle() - Managed get handle 3034 * @dev: device for which we want SCI handle for. 3035 * 3036 * NOTE: This releases the handle once the device resources are 3037 * no longer needed. MUST NOT BE released with ti_sci_put_handle. 3038 * The function does not track individual clients of the framework 3039 * and is expected to be maintained by caller of TI SCI protocol library. 3040 * 3041 * Return: 0 if all went fine, else corresponding error. 3042 */ 3043 const struct ti_sci_handle *devm_ti_sci_get_handle(struct device *dev) 3044 { 3045 const struct ti_sci_handle **ptr; 3046 const struct ti_sci_handle *handle; 3047 3048 ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL); 3049 if (!ptr) 3050 return ERR_PTR(-ENOMEM); 3051 handle = ti_sci_get_handle(dev); 3052 3053 if (!IS_ERR(handle)) { 3054 *ptr = handle; 3055 devres_add(dev, ptr); 3056 } else { 3057 devres_free(ptr); 3058 } 3059 3060 return handle; 3061 } 3062 EXPORT_SYMBOL_GPL(devm_ti_sci_get_handle); 3063 3064 /** 3065 * ti_sci_get_by_phandle() - Get the TI SCI handle using DT phandle 3066 * @np: device node 3067 * @property: property name containing phandle on TISCI node 3068 * 3069 * NOTE: The function does not track individual clients of the framework 3070 * and is expected to be maintained by caller of TI SCI protocol library. 3071 * ti_sci_put_handle must be balanced with successful ti_sci_get_by_phandle 3072 * Return: pointer to handle if successful, else: 3073 * -EPROBE_DEFER if the instance is not ready 3074 * -ENODEV if the required node handler is missing 3075 * -EINVAL if invalid conditions are encountered. 3076 */ 3077 const struct ti_sci_handle *ti_sci_get_by_phandle(struct device_node *np, 3078 const char *property) 3079 { 3080 struct ti_sci_handle *handle = NULL; 3081 struct device_node *ti_sci_np; 3082 struct ti_sci_info *info; 3083 struct list_head *p; 3084 3085 if (!np) { 3086 pr_err("I need a device pointer\n"); 3087 return ERR_PTR(-EINVAL); 3088 } 3089 3090 ti_sci_np = of_parse_phandle(np, property, 0); 3091 if (!ti_sci_np) 3092 return ERR_PTR(-ENODEV); 3093 3094 mutex_lock(&ti_sci_list_mutex); 3095 list_for_each(p, &ti_sci_list) { 3096 info = list_entry(p, struct ti_sci_info, node); 3097 if (ti_sci_np == info->dev->of_node) { 3098 handle = &info->handle; 3099 info->users++; 3100 break; 3101 } 3102 } 3103 mutex_unlock(&ti_sci_list_mutex); 3104 of_node_put(ti_sci_np); 3105 3106 if (!handle) 3107 return ERR_PTR(-EPROBE_DEFER); 3108 3109 return handle; 3110 } 3111 EXPORT_SYMBOL_GPL(ti_sci_get_by_phandle); 3112 3113 /** 3114 * devm_ti_sci_get_by_phandle() - Managed get handle using phandle 3115 * @dev: Device pointer requesting TISCI handle 3116 * @property: property name containing phandle on TISCI node 3117 * 3118 * NOTE: This releases the handle once the device resources are 3119 * no longer needed. MUST NOT BE released with ti_sci_put_handle. 3120 * The function does not track individual clients of the framework 3121 * and is expected to be maintained by caller of TI SCI protocol library. 3122 * 3123 * Return: 0 if all went fine, else corresponding error. 3124 */ 3125 const struct ti_sci_handle *devm_ti_sci_get_by_phandle(struct device *dev, 3126 const char *property) 3127 { 3128 const struct ti_sci_handle *handle; 3129 const struct ti_sci_handle **ptr; 3130 3131 ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL); 3132 if (!ptr) 3133 return ERR_PTR(-ENOMEM); 3134 handle = ti_sci_get_by_phandle(dev_of_node(dev), property); 3135 3136 if (!IS_ERR(handle)) { 3137 *ptr = handle; 3138 devres_add(dev, ptr); 3139 } else { 3140 devres_free(ptr); 3141 } 3142 3143 return handle; 3144 } 3145 EXPORT_SYMBOL_GPL(devm_ti_sci_get_by_phandle); 3146 3147 /** 3148 * ti_sci_get_free_resource() - Get a free resource from TISCI resource. 3149 * @res: Pointer to the TISCI resource 3150 * 3151 * Return: resource num if all went ok else TI_SCI_RESOURCE_NULL. 3152 */ 3153 u16 ti_sci_get_free_resource(struct ti_sci_resource *res) 3154 { 3155 unsigned long flags; 3156 u16 set, free_bit; 3157 3158 raw_spin_lock_irqsave(&res->lock, flags); 3159 for (set = 0; set < res->sets; set++) { 3160 free_bit = find_first_zero_bit(res->desc[set].res_map, 3161 res->desc[set].num); 3162 if (free_bit != res->desc[set].num) { 3163 set_bit(free_bit, res->desc[set].res_map); 3164 raw_spin_unlock_irqrestore(&res->lock, flags); 3165 return res->desc[set].start + free_bit; 3166 } 3167 } 3168 raw_spin_unlock_irqrestore(&res->lock, flags); 3169 3170 return TI_SCI_RESOURCE_NULL; 3171 } 3172 EXPORT_SYMBOL_GPL(ti_sci_get_free_resource); 3173 3174 /** 3175 * ti_sci_release_resource() - Release a resource from TISCI resource. 3176 * @res: Pointer to the TISCI resource 3177 * @id: Resource id to be released. 3178 */ 3179 void ti_sci_release_resource(struct ti_sci_resource *res, u16 id) 3180 { 3181 unsigned long flags; 3182 u16 set; 3183 3184 raw_spin_lock_irqsave(&res->lock, flags); 3185 for (set = 0; set < res->sets; set++) { 3186 if (res->desc[set].start <= id && 3187 (res->desc[set].num + res->desc[set].start) > id) 3188 clear_bit(id - res->desc[set].start, 3189 res->desc[set].res_map); 3190 } 3191 raw_spin_unlock_irqrestore(&res->lock, flags); 3192 } 3193 EXPORT_SYMBOL_GPL(ti_sci_release_resource); 3194 3195 /** 3196 * ti_sci_get_num_resources() - Get the number of resources in TISCI resource 3197 * @res: Pointer to the TISCI resource 3198 * 3199 * Return: Total number of available resources. 3200 */ 3201 u32 ti_sci_get_num_resources(struct ti_sci_resource *res) 3202 { 3203 u32 set, count = 0; 3204 3205 for (set = 0; set < res->sets; set++) 3206 count += res->desc[set].num; 3207 3208 return count; 3209 } 3210 EXPORT_SYMBOL_GPL(ti_sci_get_num_resources); 3211 3212 /** 3213 * devm_ti_sci_get_resource_sets() - Get a TISCI resources assigned to a device 3214 * @handle: TISCI handle 3215 * @dev: Device pointer to which the resource is assigned 3216 * @dev_id: TISCI device id to which the resource is assigned 3217 * @sub_types: Array of sub_types assigned corresponding to device 3218 * @sets: Number of sub_types 3219 * 3220 * Return: Pointer to ti_sci_resource if all went well else appropriate 3221 * error pointer. 3222 */ 3223 static struct ti_sci_resource * 3224 devm_ti_sci_get_resource_sets(const struct ti_sci_handle *handle, 3225 struct device *dev, u32 dev_id, u32 *sub_types, 3226 u32 sets) 3227 { 3228 struct ti_sci_resource *res; 3229 bool valid_set = false; 3230 int i, ret; 3231 3232 res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL); 3233 if (!res) 3234 return ERR_PTR(-ENOMEM); 3235 3236 res->sets = sets; 3237 res->desc = devm_kcalloc(dev, res->sets, sizeof(*res->desc), 3238 GFP_KERNEL); 3239 if (!res->desc) 3240 return ERR_PTR(-ENOMEM); 3241 3242 for (i = 0; i < res->sets; i++) { 3243 ret = handle->ops.rm_core_ops.get_range(handle, dev_id, 3244 sub_types[i], 3245 &res->desc[i].start, 3246 &res->desc[i].num); 3247 if (ret) { 3248 dev_dbg(dev, "dev = %d subtype %d not allocated for this host\n", 3249 dev_id, sub_types[i]); 3250 res->desc[i].start = 0; 3251 res->desc[i].num = 0; 3252 continue; 3253 } 3254 3255 dev_dbg(dev, "dev = %d, subtype = %d, start = %d, num = %d\n", 3256 dev_id, sub_types[i], res->desc[i].start, 3257 res->desc[i].num); 3258 3259 valid_set = true; 3260 res->desc[i].res_map = 3261 devm_kzalloc(dev, BITS_TO_LONGS(res->desc[i].num) * 3262 sizeof(*res->desc[i].res_map), GFP_KERNEL); 3263 if (!res->desc[i].res_map) 3264 return ERR_PTR(-ENOMEM); 3265 } 3266 raw_spin_lock_init(&res->lock); 3267 3268 if (valid_set) 3269 return res; 3270 3271 return ERR_PTR(-EINVAL); 3272 } 3273 3274 /** 3275 * devm_ti_sci_get_of_resource() - Get a TISCI resource assigned to a device 3276 * @handle: TISCI handle 3277 * @dev: Device pointer to which the resource is assigned 3278 * @dev_id: TISCI device id to which the resource is assigned 3279 * @of_prop: property name by which the resource are represented 3280 * 3281 * Return: Pointer to ti_sci_resource if all went well else appropriate 3282 * error pointer. 3283 */ 3284 struct ti_sci_resource * 3285 devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle, 3286 struct device *dev, u32 dev_id, char *of_prop) 3287 { 3288 struct ti_sci_resource *res; 3289 u32 *sub_types; 3290 int sets; 3291 3292 sets = of_property_count_elems_of_size(dev_of_node(dev), of_prop, 3293 sizeof(u32)); 3294 if (sets < 0) { 3295 dev_err(dev, "%s resource type ids not available\n", of_prop); 3296 return ERR_PTR(sets); 3297 } 3298 3299 sub_types = kcalloc(sets, sizeof(*sub_types), GFP_KERNEL); 3300 if (!sub_types) 3301 return ERR_PTR(-ENOMEM); 3302 3303 of_property_read_u32_array(dev_of_node(dev), of_prop, sub_types, sets); 3304 res = devm_ti_sci_get_resource_sets(handle, dev, dev_id, sub_types, 3305 sets); 3306 3307 kfree(sub_types); 3308 return res; 3309 } 3310 EXPORT_SYMBOL_GPL(devm_ti_sci_get_of_resource); 3311 3312 /** 3313 * devm_ti_sci_get_resource() - Get a resource range assigned to the device 3314 * @handle: TISCI handle 3315 * @dev: Device pointer to which the resource is assigned 3316 * @dev_id: TISCI device id to which the resource is assigned 3317 * @suub_type: TISCI resource subytpe representing the resource. 3318 * 3319 * Return: Pointer to ti_sci_resource if all went well else appropriate 3320 * error pointer. 3321 */ 3322 struct ti_sci_resource * 3323 devm_ti_sci_get_resource(const struct ti_sci_handle *handle, struct device *dev, 3324 u32 dev_id, u32 sub_type) 3325 { 3326 return devm_ti_sci_get_resource_sets(handle, dev, dev_id, &sub_type, 1); 3327 } 3328 EXPORT_SYMBOL_GPL(devm_ti_sci_get_resource); 3329 3330 static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode, 3331 void *cmd) 3332 { 3333 struct ti_sci_info *info = reboot_to_ti_sci_info(nb); 3334 const struct ti_sci_handle *handle = &info->handle; 3335 3336 ti_sci_cmd_core_reboot(handle); 3337 3338 /* call fail OR pass, we should not be here in the first place */ 3339 return NOTIFY_BAD; 3340 } 3341 3342 /* Description for K2G */ 3343 static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = { 3344 .default_host_id = 2, 3345 /* Conservative duration */ 3346 .max_rx_timeout_ms = 1000, 3347 /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */ 3348 .max_msgs = 20, 3349 .max_msg_size = 64, 3350 }; 3351 3352 /* Description for AM654 */ 3353 static const struct ti_sci_desc ti_sci_pmmc_am654_desc = { 3354 .default_host_id = 12, 3355 /* Conservative duration */ 3356 .max_rx_timeout_ms = 10000, 3357 /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */ 3358 .max_msgs = 20, 3359 .max_msg_size = 60, 3360 }; 3361 3362 static const struct of_device_id ti_sci_of_match[] = { 3363 {.compatible = "ti,k2g-sci", .data = &ti_sci_pmmc_k2g_desc}, 3364 {.compatible = "ti,am654-sci", .data = &ti_sci_pmmc_am654_desc}, 3365 { /* Sentinel */ }, 3366 }; 3367 MODULE_DEVICE_TABLE(of, ti_sci_of_match); 3368 3369 static int ti_sci_probe(struct platform_device *pdev) 3370 { 3371 struct device *dev = &pdev->dev; 3372 const struct of_device_id *of_id; 3373 const struct ti_sci_desc *desc; 3374 struct ti_sci_xfer *xfer; 3375 struct ti_sci_info *info = NULL; 3376 struct ti_sci_xfers_info *minfo; 3377 struct mbox_client *cl; 3378 int ret = -EINVAL; 3379 int i; 3380 int reboot = 0; 3381 u32 h_id; 3382 3383 of_id = of_match_device(ti_sci_of_match, dev); 3384 if (!of_id) { 3385 dev_err(dev, "OF data missing\n"); 3386 return -EINVAL; 3387 } 3388 desc = of_id->data; 3389 3390 info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL); 3391 if (!info) 3392 return -ENOMEM; 3393 3394 info->dev = dev; 3395 info->desc = desc; 3396 ret = of_property_read_u32(dev->of_node, "ti,host-id", &h_id); 3397 /* if the property is not present in DT, use a default from desc */ 3398 if (ret < 0) { 3399 info->host_id = info->desc->default_host_id; 3400 } else { 3401 if (!h_id) { 3402 dev_warn(dev, "Host ID 0 is reserved for firmware\n"); 3403 info->host_id = info->desc->default_host_id; 3404 } else { 3405 info->host_id = h_id; 3406 } 3407 } 3408 3409 reboot = of_property_read_bool(dev->of_node, 3410 "ti,system-reboot-controller"); 3411 INIT_LIST_HEAD(&info->node); 3412 minfo = &info->minfo; 3413 3414 /* 3415 * Pre-allocate messages 3416 * NEVER allocate more than what we can indicate in hdr.seq 3417 * if we have data description bug, force a fix.. 3418 */ 3419 if (WARN_ON(desc->max_msgs >= 3420 1 << 8 * sizeof(((struct ti_sci_msg_hdr *)0)->seq))) 3421 return -EINVAL; 3422 3423 minfo->xfer_block = devm_kcalloc(dev, 3424 desc->max_msgs, 3425 sizeof(*minfo->xfer_block), 3426 GFP_KERNEL); 3427 if (!minfo->xfer_block) 3428 return -ENOMEM; 3429 3430 minfo->xfer_alloc_table = devm_kcalloc(dev, 3431 BITS_TO_LONGS(desc->max_msgs), 3432 sizeof(unsigned long), 3433 GFP_KERNEL); 3434 if (!minfo->xfer_alloc_table) 3435 return -ENOMEM; 3436 bitmap_zero(minfo->xfer_alloc_table, desc->max_msgs); 3437 3438 /* Pre-initialize the buffer pointer to pre-allocated buffers */ 3439 for (i = 0, xfer = minfo->xfer_block; i < desc->max_msgs; i++, xfer++) { 3440 xfer->xfer_buf = devm_kcalloc(dev, 1, desc->max_msg_size, 3441 GFP_KERNEL); 3442 if (!xfer->xfer_buf) 3443 return -ENOMEM; 3444 3445 xfer->tx_message.buf = xfer->xfer_buf; 3446 init_completion(&xfer->done); 3447 } 3448 3449 ret = ti_sci_debugfs_create(pdev, info); 3450 if (ret) 3451 dev_warn(dev, "Failed to create debug file\n"); 3452 3453 platform_set_drvdata(pdev, info); 3454 3455 cl = &info->cl; 3456 cl->dev = dev; 3457 cl->tx_block = false; 3458 cl->rx_callback = ti_sci_rx_callback; 3459 cl->knows_txdone = true; 3460 3461 spin_lock_init(&minfo->xfer_lock); 3462 sema_init(&minfo->sem_xfer_count, desc->max_msgs); 3463 3464 info->chan_rx = mbox_request_channel_byname(cl, "rx"); 3465 if (IS_ERR(info->chan_rx)) { 3466 ret = PTR_ERR(info->chan_rx); 3467 goto out; 3468 } 3469 3470 info->chan_tx = mbox_request_channel_byname(cl, "tx"); 3471 if (IS_ERR(info->chan_tx)) { 3472 ret = PTR_ERR(info->chan_tx); 3473 goto out; 3474 } 3475 ret = ti_sci_cmd_get_revision(info); 3476 if (ret) { 3477 dev_err(dev, "Unable to communicate with TISCI(%d)\n", ret); 3478 goto out; 3479 } 3480 3481 ti_sci_setup_ops(info); 3482 3483 if (reboot) { 3484 info->nb.notifier_call = tisci_reboot_handler; 3485 info->nb.priority = 128; 3486 3487 ret = register_restart_handler(&info->nb); 3488 if (ret) { 3489 dev_err(dev, "reboot registration fail(%d)\n", ret); 3490 return ret; 3491 } 3492 } 3493 3494 dev_info(dev, "ABI: %d.%d (firmware rev 0x%04x '%s')\n", 3495 info->handle.version.abi_major, info->handle.version.abi_minor, 3496 info->handle.version.firmware_revision, 3497 info->handle.version.firmware_description); 3498 3499 mutex_lock(&ti_sci_list_mutex); 3500 list_add_tail(&info->node, &ti_sci_list); 3501 mutex_unlock(&ti_sci_list_mutex); 3502 3503 return of_platform_populate(dev->of_node, NULL, NULL, dev); 3504 out: 3505 if (!IS_ERR(info->chan_tx)) 3506 mbox_free_channel(info->chan_tx); 3507 if (!IS_ERR(info->chan_rx)) 3508 mbox_free_channel(info->chan_rx); 3509 debugfs_remove(info->d); 3510 return ret; 3511 } 3512 3513 static int ti_sci_remove(struct platform_device *pdev) 3514 { 3515 struct ti_sci_info *info; 3516 struct device *dev = &pdev->dev; 3517 int ret = 0; 3518 3519 of_platform_depopulate(dev); 3520 3521 info = platform_get_drvdata(pdev); 3522 3523 if (info->nb.notifier_call) 3524 unregister_restart_handler(&info->nb); 3525 3526 mutex_lock(&ti_sci_list_mutex); 3527 if (info->users) 3528 ret = -EBUSY; 3529 else 3530 list_del(&info->node); 3531 mutex_unlock(&ti_sci_list_mutex); 3532 3533 if (!ret) { 3534 ti_sci_debugfs_destroy(pdev, info); 3535 3536 /* Safe to free channels since no more users */ 3537 mbox_free_channel(info->chan_tx); 3538 mbox_free_channel(info->chan_rx); 3539 } 3540 3541 return ret; 3542 } 3543 3544 static struct platform_driver ti_sci_driver = { 3545 .probe = ti_sci_probe, 3546 .remove = ti_sci_remove, 3547 .driver = { 3548 .name = "ti-sci", 3549 .of_match_table = of_match_ptr(ti_sci_of_match), 3550 }, 3551 }; 3552 module_platform_driver(ti_sci_driver); 3553 3554 MODULE_LICENSE("GPL v2"); 3555 MODULE_DESCRIPTION("TI System Control Interface(SCI) driver"); 3556 MODULE_AUTHOR("Nishanth Menon"); 3557 MODULE_ALIAS("platform:ti-sci"); 3558