1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Texas Instruments System Control Interface Protocol Driver 4 * 5 * Copyright (C) 2015-2022 Texas Instruments Incorporated - https://www.ti.com/ 6 * Nishanth Menon 7 */ 8 9 #define pr_fmt(fmt) "%s: " fmt, __func__ 10 11 #include <linux/bitmap.h> 12 #include <linux/debugfs.h> 13 #include <linux/export.h> 14 #include <linux/io.h> 15 #include <linux/iopoll.h> 16 #include <linux/kernel.h> 17 #include <linux/mailbox_client.h> 18 #include <linux/module.h> 19 #include <linux/of_device.h> 20 #include <linux/semaphore.h> 21 #include <linux/slab.h> 22 #include <linux/soc/ti/ti-msgmgr.h> 23 #include <linux/soc/ti/ti_sci_protocol.h> 24 #include <linux/reboot.h> 25 26 #include "ti_sci.h" 27 28 /* List of all TI SCI devices active in system */ 29 static LIST_HEAD(ti_sci_list); 30 /* Protection for the entire list */ 31 static DEFINE_MUTEX(ti_sci_list_mutex); 32 33 /** 34 * struct ti_sci_xfer - Structure representing a message flow 35 * @tx_message: Transmit message 36 * @rx_len: Receive message length 37 * @xfer_buf: Preallocated buffer to store receive message 38 * Since we work with request-ACK protocol, we can 39 * reuse the same buffer for the rx path as we 40 * use for the tx path. 41 * @done: completion event 42 */ 43 struct ti_sci_xfer { 44 struct ti_msgmgr_message tx_message; 45 u8 rx_len; 46 u8 *xfer_buf; 47 struct completion done; 48 }; 49 50 /** 51 * struct ti_sci_xfers_info - Structure to manage transfer information 52 * @sem_xfer_count: Counting Semaphore for managing max simultaneous 53 * Messages. 54 * @xfer_block: Preallocated Message array 55 * @xfer_alloc_table: Bitmap table for allocated messages. 56 * Index of this bitmap table is also used for message 57 * sequence identifier. 58 * @xfer_lock: Protection for message allocation 59 */ 60 struct ti_sci_xfers_info { 61 struct semaphore sem_xfer_count; 62 struct ti_sci_xfer *xfer_block; 63 unsigned long *xfer_alloc_table; 64 /* protect transfer allocation */ 65 spinlock_t xfer_lock; 66 }; 67 68 /** 69 * struct ti_sci_desc - Description of SoC integration 70 * @default_host_id: Host identifier representing the compute entity 71 * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds) 72 * @max_msgs: Maximum number of messages that can be pending 73 * simultaneously in the system 74 * @max_msg_size: Maximum size of data per message that can be handled. 75 */ 76 struct ti_sci_desc { 77 u8 default_host_id; 78 int max_rx_timeout_ms; 79 int max_msgs; 80 int max_msg_size; 81 }; 82 83 /** 84 * struct ti_sci_info - Structure representing a TI SCI instance 85 * @dev: Device pointer 86 * @desc: SoC description for this instance 87 * @nb: Reboot Notifier block 88 * @d: Debugfs file entry 89 * @debug_region: Memory region where the debug message are available 90 * @debug_region_size: Debug region size 91 * @debug_buffer: Buffer allocated to copy debug messages. 92 * @handle: Instance of TI SCI handle to send to clients. 93 * @cl: Mailbox Client 94 * @chan_tx: Transmit mailbox channel 95 * @chan_rx: Receive mailbox channel 96 * @minfo: Message info 97 * @node: list head 98 * @host_id: Host ID 99 * @users: Number of users of this instance 100 */ 101 struct ti_sci_info { 102 struct device *dev; 103 struct notifier_block nb; 104 const struct ti_sci_desc *desc; 105 struct dentry *d; 106 void __iomem *debug_region; 107 char *debug_buffer; 108 size_t debug_region_size; 109 struct ti_sci_handle handle; 110 struct mbox_client cl; 111 struct mbox_chan *chan_tx; 112 struct mbox_chan *chan_rx; 113 struct ti_sci_xfers_info minfo; 114 struct list_head node; 115 u8 host_id; 116 /* protected by ti_sci_list_mutex */ 117 int users; 118 }; 119 120 #define cl_to_ti_sci_info(c) container_of(c, struct ti_sci_info, cl) 121 #define handle_to_ti_sci_info(h) container_of(h, struct ti_sci_info, handle) 122 #define reboot_to_ti_sci_info(n) container_of(n, struct ti_sci_info, nb) 123 124 #ifdef CONFIG_DEBUG_FS 125 126 /** 127 * ti_sci_debug_show() - Helper to dump the debug log 128 * @s: sequence file pointer 129 * @unused: unused. 130 * 131 * Return: 0 132 */ 133 static int ti_sci_debug_show(struct seq_file *s, void *unused) 134 { 135 struct ti_sci_info *info = s->private; 136 137 memcpy_fromio(info->debug_buffer, info->debug_region, 138 info->debug_region_size); 139 /* 140 * We don't trust firmware to leave NULL terminated last byte (hence 141 * we have allocated 1 extra 0 byte). Since we cannot guarantee any 142 * specific data format for debug messages, We just present the data 143 * in the buffer as is - we expect the messages to be self explanatory. 144 */ 145 seq_puts(s, info->debug_buffer); 146 return 0; 147 } 148 149 /* Provide the log file operations interface*/ 150 DEFINE_SHOW_ATTRIBUTE(ti_sci_debug); 151 152 /** 153 * ti_sci_debugfs_create() - Create log debug file 154 * @pdev: platform device pointer 155 * @info: Pointer to SCI entity information 156 * 157 * Return: 0 if all went fine, else corresponding error. 158 */ 159 static int ti_sci_debugfs_create(struct platform_device *pdev, 160 struct ti_sci_info *info) 161 { 162 struct device *dev = &pdev->dev; 163 struct resource *res; 164 char debug_name[50] = "ti_sci_debug@"; 165 166 /* Debug region is optional */ 167 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 168 "debug_messages"); 169 info->debug_region = devm_ioremap_resource(dev, res); 170 if (IS_ERR(info->debug_region)) 171 return 0; 172 info->debug_region_size = resource_size(res); 173 174 info->debug_buffer = devm_kcalloc(dev, info->debug_region_size + 1, 175 sizeof(char), GFP_KERNEL); 176 if (!info->debug_buffer) 177 return -ENOMEM; 178 /* Setup NULL termination */ 179 info->debug_buffer[info->debug_region_size] = 0; 180 181 info->d = debugfs_create_file(strncat(debug_name, dev_name(dev), 182 sizeof(debug_name) - 183 sizeof("ti_sci_debug@")), 184 0444, NULL, info, &ti_sci_debug_fops); 185 if (IS_ERR(info->d)) 186 return PTR_ERR(info->d); 187 188 dev_dbg(dev, "Debug region => %p, size = %zu bytes, resource: %pr\n", 189 info->debug_region, info->debug_region_size, res); 190 return 0; 191 } 192 193 /** 194 * ti_sci_debugfs_destroy() - clean up log debug file 195 * @pdev: platform device pointer 196 * @info: Pointer to SCI entity information 197 */ 198 static void ti_sci_debugfs_destroy(struct platform_device *pdev, 199 struct ti_sci_info *info) 200 { 201 if (IS_ERR(info->debug_region)) 202 return; 203 204 debugfs_remove(info->d); 205 } 206 #else /* CONFIG_DEBUG_FS */ 207 static inline int ti_sci_debugfs_create(struct platform_device *dev, 208 struct ti_sci_info *info) 209 { 210 return 0; 211 } 212 213 static inline void ti_sci_debugfs_destroy(struct platform_device *dev, 214 struct ti_sci_info *info) 215 { 216 } 217 #endif /* CONFIG_DEBUG_FS */ 218 219 /** 220 * ti_sci_dump_header_dbg() - Helper to dump a message header. 221 * @dev: Device pointer corresponding to the SCI entity 222 * @hdr: pointer to header. 223 */ 224 static inline void ti_sci_dump_header_dbg(struct device *dev, 225 struct ti_sci_msg_hdr *hdr) 226 { 227 dev_dbg(dev, "MSGHDR:type=0x%04x host=0x%02x seq=0x%02x flags=0x%08x\n", 228 hdr->type, hdr->host, hdr->seq, hdr->flags); 229 } 230 231 /** 232 * ti_sci_rx_callback() - mailbox client callback for receive messages 233 * @cl: client pointer 234 * @m: mailbox message 235 * 236 * Processes one received message to appropriate transfer information and 237 * signals completion of the transfer. 238 * 239 * NOTE: This function will be invoked in IRQ context, hence should be 240 * as optimal as possible. 241 */ 242 static void ti_sci_rx_callback(struct mbox_client *cl, void *m) 243 { 244 struct ti_sci_info *info = cl_to_ti_sci_info(cl); 245 struct device *dev = info->dev; 246 struct ti_sci_xfers_info *minfo = &info->minfo; 247 struct ti_msgmgr_message *mbox_msg = m; 248 struct ti_sci_msg_hdr *hdr = (struct ti_sci_msg_hdr *)mbox_msg->buf; 249 struct ti_sci_xfer *xfer; 250 u8 xfer_id; 251 252 xfer_id = hdr->seq; 253 254 /* 255 * Are we even expecting this? 256 * NOTE: barriers were implicit in locks used for modifying the bitmap 257 */ 258 if (!test_bit(xfer_id, minfo->xfer_alloc_table)) { 259 dev_err(dev, "Message for %d is not expected!\n", xfer_id); 260 return; 261 } 262 263 xfer = &minfo->xfer_block[xfer_id]; 264 265 /* Is the message of valid length? */ 266 if (mbox_msg->len > info->desc->max_msg_size) { 267 dev_err(dev, "Unable to handle %zu xfer(max %d)\n", 268 mbox_msg->len, info->desc->max_msg_size); 269 ti_sci_dump_header_dbg(dev, hdr); 270 return; 271 } 272 if (mbox_msg->len < xfer->rx_len) { 273 dev_err(dev, "Recv xfer %zu < expected %d length\n", 274 mbox_msg->len, xfer->rx_len); 275 ti_sci_dump_header_dbg(dev, hdr); 276 return; 277 } 278 279 ti_sci_dump_header_dbg(dev, hdr); 280 /* Take a copy to the rx buffer.. */ 281 memcpy(xfer->xfer_buf, mbox_msg->buf, xfer->rx_len); 282 complete(&xfer->done); 283 } 284 285 /** 286 * ti_sci_get_one_xfer() - Allocate one message 287 * @info: Pointer to SCI entity information 288 * @msg_type: Message type 289 * @msg_flags: Flag to set for the message 290 * @tx_message_size: transmit message size 291 * @rx_message_size: receive message size 292 * 293 * Helper function which is used by various command functions that are 294 * exposed to clients of this driver for allocating a message traffic event. 295 * 296 * This function can sleep depending on pending requests already in the system 297 * for the SCI entity. Further, this also holds a spinlock to maintain integrity 298 * of internal data structures. 299 * 300 * Return: 0 if all went fine, else corresponding error. 301 */ 302 static struct ti_sci_xfer *ti_sci_get_one_xfer(struct ti_sci_info *info, 303 u16 msg_type, u32 msg_flags, 304 size_t tx_message_size, 305 size_t rx_message_size) 306 { 307 struct ti_sci_xfers_info *minfo = &info->minfo; 308 struct ti_sci_xfer *xfer; 309 struct ti_sci_msg_hdr *hdr; 310 unsigned long flags; 311 unsigned long bit_pos; 312 u8 xfer_id; 313 int ret; 314 int timeout; 315 316 /* Ensure we have sane transfer sizes */ 317 if (rx_message_size > info->desc->max_msg_size || 318 tx_message_size > info->desc->max_msg_size || 319 rx_message_size < sizeof(*hdr) || tx_message_size < sizeof(*hdr)) 320 return ERR_PTR(-ERANGE); 321 322 /* 323 * Ensure we have only controlled number of pending messages. 324 * Ideally, we might just have to wait a single message, be 325 * conservative and wait 5 times that.. 326 */ 327 timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms) * 5; 328 ret = down_timeout(&minfo->sem_xfer_count, timeout); 329 if (ret < 0) 330 return ERR_PTR(ret); 331 332 /* Keep the locked section as small as possible */ 333 spin_lock_irqsave(&minfo->xfer_lock, flags); 334 bit_pos = find_first_zero_bit(minfo->xfer_alloc_table, 335 info->desc->max_msgs); 336 set_bit(bit_pos, minfo->xfer_alloc_table); 337 spin_unlock_irqrestore(&minfo->xfer_lock, flags); 338 339 /* 340 * We already ensured in probe that we can have max messages that can 341 * fit in hdr.seq - NOTE: this improves access latencies 342 * to predictable O(1) access, BUT, it opens us to risk if 343 * remote misbehaves with corrupted message sequence responses. 344 * If that happens, we are going to be messed up anyways.. 345 */ 346 xfer_id = (u8)bit_pos; 347 348 xfer = &minfo->xfer_block[xfer_id]; 349 350 hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf; 351 xfer->tx_message.len = tx_message_size; 352 xfer->tx_message.chan_rx = info->chan_rx; 353 xfer->tx_message.timeout_rx_ms = info->desc->max_rx_timeout_ms; 354 xfer->rx_len = (u8)rx_message_size; 355 356 reinit_completion(&xfer->done); 357 358 hdr->seq = xfer_id; 359 hdr->type = msg_type; 360 hdr->host = info->host_id; 361 hdr->flags = msg_flags; 362 363 return xfer; 364 } 365 366 /** 367 * ti_sci_put_one_xfer() - Release a message 368 * @minfo: transfer info pointer 369 * @xfer: message that was reserved by ti_sci_get_one_xfer 370 * 371 * This holds a spinlock to maintain integrity of internal data structures. 372 */ 373 static void ti_sci_put_one_xfer(struct ti_sci_xfers_info *minfo, 374 struct ti_sci_xfer *xfer) 375 { 376 unsigned long flags; 377 struct ti_sci_msg_hdr *hdr; 378 u8 xfer_id; 379 380 hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf; 381 xfer_id = hdr->seq; 382 383 /* 384 * Keep the locked section as small as possible 385 * NOTE: we might escape with smp_mb and no lock here.. 386 * but just be conservative and symmetric. 387 */ 388 spin_lock_irqsave(&minfo->xfer_lock, flags); 389 clear_bit(xfer_id, minfo->xfer_alloc_table); 390 spin_unlock_irqrestore(&minfo->xfer_lock, flags); 391 392 /* Increment the count for the next user to get through */ 393 up(&minfo->sem_xfer_count); 394 } 395 396 /** 397 * ti_sci_do_xfer() - Do one transfer 398 * @info: Pointer to SCI entity information 399 * @xfer: Transfer to initiate and wait for response 400 * 401 * Return: -ETIMEDOUT in case of no response, if transmit error, 402 * return corresponding error, else if all goes well, 403 * return 0. 404 */ 405 static inline int ti_sci_do_xfer(struct ti_sci_info *info, 406 struct ti_sci_xfer *xfer) 407 { 408 int ret; 409 int timeout; 410 struct device *dev = info->dev; 411 bool done_state = true; 412 413 ret = mbox_send_message(info->chan_tx, &xfer->tx_message); 414 if (ret < 0) 415 return ret; 416 417 ret = 0; 418 419 if (system_state <= SYSTEM_RUNNING) { 420 /* And we wait for the response. */ 421 timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms); 422 if (!wait_for_completion_timeout(&xfer->done, timeout)) 423 ret = -ETIMEDOUT; 424 } else { 425 /* 426 * If we are !running, we cannot use wait_for_completion_timeout 427 * during noirq phase, so we must manually poll the completion. 428 */ 429 ret = read_poll_timeout_atomic(try_wait_for_completion, done_state, 430 done_state, 1, 431 info->desc->max_rx_timeout_ms * 1000, 432 false, &xfer->done); 433 } 434 435 if (ret == -ETIMEDOUT) 436 dev_err(dev, "Mbox timedout in resp(caller: %pS)\n", 437 (void *)_RET_IP_); 438 439 /* 440 * NOTE: we might prefer not to need the mailbox ticker to manage the 441 * transfer queueing since the protocol layer queues things by itself. 442 * Unfortunately, we have to kick the mailbox framework after we have 443 * received our message. 444 */ 445 mbox_client_txdone(info->chan_tx, ret); 446 447 return ret; 448 } 449 450 /** 451 * ti_sci_cmd_get_revision() - command to get the revision of the SCI entity 452 * @info: Pointer to SCI entity information 453 * 454 * Updates the SCI information in the internal data structure. 455 * 456 * Return: 0 if all went fine, else return appropriate error. 457 */ 458 static int ti_sci_cmd_get_revision(struct ti_sci_info *info) 459 { 460 struct device *dev = info->dev; 461 struct ti_sci_handle *handle = &info->handle; 462 struct ti_sci_version_info *ver = &handle->version; 463 struct ti_sci_msg_resp_version *rev_info; 464 struct ti_sci_xfer *xfer; 465 int ret; 466 467 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_VERSION, 468 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 469 sizeof(struct ti_sci_msg_hdr), 470 sizeof(*rev_info)); 471 if (IS_ERR(xfer)) { 472 ret = PTR_ERR(xfer); 473 dev_err(dev, "Message alloc failed(%d)\n", ret); 474 return ret; 475 } 476 477 rev_info = (struct ti_sci_msg_resp_version *)xfer->xfer_buf; 478 479 ret = ti_sci_do_xfer(info, xfer); 480 if (ret) { 481 dev_err(dev, "Mbox send fail %d\n", ret); 482 goto fail; 483 } 484 485 ver->abi_major = rev_info->abi_major; 486 ver->abi_minor = rev_info->abi_minor; 487 ver->firmware_revision = rev_info->firmware_revision; 488 strncpy(ver->firmware_description, rev_info->firmware_description, 489 sizeof(ver->firmware_description)); 490 491 fail: 492 ti_sci_put_one_xfer(&info->minfo, xfer); 493 return ret; 494 } 495 496 /** 497 * ti_sci_is_response_ack() - Generic ACK/NACK message checkup 498 * @r: pointer to response buffer 499 * 500 * Return: true if the response was an ACK, else returns false. 501 */ 502 static inline bool ti_sci_is_response_ack(void *r) 503 { 504 struct ti_sci_msg_hdr *hdr = r; 505 506 return hdr->flags & TI_SCI_FLAG_RESP_GENERIC_ACK ? true : false; 507 } 508 509 /** 510 * ti_sci_set_device_state() - Set device state helper 511 * @handle: pointer to TI SCI handle 512 * @id: Device identifier 513 * @flags: flags to setup for the device 514 * @state: State to move the device to 515 * 516 * Return: 0 if all went well, else returns appropriate error value. 517 */ 518 static int ti_sci_set_device_state(const struct ti_sci_handle *handle, 519 u32 id, u32 flags, u8 state) 520 { 521 struct ti_sci_info *info; 522 struct ti_sci_msg_req_set_device_state *req; 523 struct ti_sci_msg_hdr *resp; 524 struct ti_sci_xfer *xfer; 525 struct device *dev; 526 int ret = 0; 527 528 if (IS_ERR(handle)) 529 return PTR_ERR(handle); 530 if (!handle) 531 return -EINVAL; 532 533 info = handle_to_ti_sci_info(handle); 534 dev = info->dev; 535 536 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE, 537 flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 538 sizeof(*req), sizeof(*resp)); 539 if (IS_ERR(xfer)) { 540 ret = PTR_ERR(xfer); 541 dev_err(dev, "Message alloc failed(%d)\n", ret); 542 return ret; 543 } 544 req = (struct ti_sci_msg_req_set_device_state *)xfer->xfer_buf; 545 req->id = id; 546 req->state = state; 547 548 ret = ti_sci_do_xfer(info, xfer); 549 if (ret) { 550 dev_err(dev, "Mbox send fail %d\n", ret); 551 goto fail; 552 } 553 554 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 555 556 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; 557 558 fail: 559 ti_sci_put_one_xfer(&info->minfo, xfer); 560 561 return ret; 562 } 563 564 /** 565 * ti_sci_get_device_state() - Get device state helper 566 * @handle: Handle to the device 567 * @id: Device Identifier 568 * @clcnt: Pointer to Context Loss Count 569 * @resets: pointer to resets 570 * @p_state: pointer to p_state 571 * @c_state: pointer to c_state 572 * 573 * Return: 0 if all went fine, else return appropriate error. 574 */ 575 static int ti_sci_get_device_state(const struct ti_sci_handle *handle, 576 u32 id, u32 *clcnt, u32 *resets, 577 u8 *p_state, u8 *c_state) 578 { 579 struct ti_sci_info *info; 580 struct ti_sci_msg_req_get_device_state *req; 581 struct ti_sci_msg_resp_get_device_state *resp; 582 struct ti_sci_xfer *xfer; 583 struct device *dev; 584 int ret = 0; 585 586 if (IS_ERR(handle)) 587 return PTR_ERR(handle); 588 if (!handle) 589 return -EINVAL; 590 591 if (!clcnt && !resets && !p_state && !c_state) 592 return -EINVAL; 593 594 info = handle_to_ti_sci_info(handle); 595 dev = info->dev; 596 597 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE, 598 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 599 sizeof(*req), sizeof(*resp)); 600 if (IS_ERR(xfer)) { 601 ret = PTR_ERR(xfer); 602 dev_err(dev, "Message alloc failed(%d)\n", ret); 603 return ret; 604 } 605 req = (struct ti_sci_msg_req_get_device_state *)xfer->xfer_buf; 606 req->id = id; 607 608 ret = ti_sci_do_xfer(info, xfer); 609 if (ret) { 610 dev_err(dev, "Mbox send fail %d\n", ret); 611 goto fail; 612 } 613 614 resp = (struct ti_sci_msg_resp_get_device_state *)xfer->xfer_buf; 615 if (!ti_sci_is_response_ack(resp)) { 616 ret = -ENODEV; 617 goto fail; 618 } 619 620 if (clcnt) 621 *clcnt = resp->context_loss_count; 622 if (resets) 623 *resets = resp->resets; 624 if (p_state) 625 *p_state = resp->programmed_state; 626 if (c_state) 627 *c_state = resp->current_state; 628 fail: 629 ti_sci_put_one_xfer(&info->minfo, xfer); 630 631 return ret; 632 } 633 634 /** 635 * ti_sci_cmd_get_device() - command to request for device managed by TISCI 636 * that can be shared with other hosts. 637 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle 638 * @id: Device Identifier 639 * 640 * Request for the device - NOTE: the client MUST maintain integrity of 641 * usage count by balancing get_device with put_device. No refcounting is 642 * managed by driver for that purpose. 643 * 644 * Return: 0 if all went fine, else return appropriate error. 645 */ 646 static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id) 647 { 648 return ti_sci_set_device_state(handle, id, 0, 649 MSG_DEVICE_SW_STATE_ON); 650 } 651 652 /** 653 * ti_sci_cmd_get_device_exclusive() - command to request for device managed by 654 * TISCI that is exclusively owned by the 655 * requesting host. 656 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle 657 * @id: Device Identifier 658 * 659 * Request for the device - NOTE: the client MUST maintain integrity of 660 * usage count by balancing get_device with put_device. No refcounting is 661 * managed by driver for that purpose. 662 * 663 * Return: 0 if all went fine, else return appropriate error. 664 */ 665 static int ti_sci_cmd_get_device_exclusive(const struct ti_sci_handle *handle, 666 u32 id) 667 { 668 return ti_sci_set_device_state(handle, id, 669 MSG_FLAG_DEVICE_EXCLUSIVE, 670 MSG_DEVICE_SW_STATE_ON); 671 } 672 673 /** 674 * ti_sci_cmd_idle_device() - Command to idle a device managed by TISCI 675 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle 676 * @id: Device Identifier 677 * 678 * Request for the device - NOTE: the client MUST maintain integrity of 679 * usage count by balancing get_device with put_device. No refcounting is 680 * managed by driver for that purpose. 681 * 682 * Return: 0 if all went fine, else return appropriate error. 683 */ 684 static int ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id) 685 { 686 return ti_sci_set_device_state(handle, id, 0, 687 MSG_DEVICE_SW_STATE_RETENTION); 688 } 689 690 /** 691 * ti_sci_cmd_idle_device_exclusive() - Command to idle a device managed by 692 * TISCI that is exclusively owned by 693 * requesting host. 694 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle 695 * @id: Device Identifier 696 * 697 * Request for the device - NOTE: the client MUST maintain integrity of 698 * usage count by balancing get_device with put_device. No refcounting is 699 * managed by driver for that purpose. 700 * 701 * Return: 0 if all went fine, else return appropriate error. 702 */ 703 static int ti_sci_cmd_idle_device_exclusive(const struct ti_sci_handle *handle, 704 u32 id) 705 { 706 return ti_sci_set_device_state(handle, id, 707 MSG_FLAG_DEVICE_EXCLUSIVE, 708 MSG_DEVICE_SW_STATE_RETENTION); 709 } 710 711 /** 712 * ti_sci_cmd_put_device() - command to release a device managed by TISCI 713 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle 714 * @id: Device Identifier 715 * 716 * Request for the device - NOTE: the client MUST maintain integrity of 717 * usage count by balancing get_device with put_device. No refcounting is 718 * managed by driver for that purpose. 719 * 720 * Return: 0 if all went fine, else return appropriate error. 721 */ 722 static int ti_sci_cmd_put_device(const struct ti_sci_handle *handle, u32 id) 723 { 724 return ti_sci_set_device_state(handle, id, 725 0, MSG_DEVICE_SW_STATE_AUTO_OFF); 726 } 727 728 /** 729 * ti_sci_cmd_dev_is_valid() - Is the device valid 730 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle 731 * @id: Device Identifier 732 * 733 * Return: 0 if all went fine and the device ID is valid, else return 734 * appropriate error. 735 */ 736 static int ti_sci_cmd_dev_is_valid(const struct ti_sci_handle *handle, u32 id) 737 { 738 u8 unused; 739 740 /* check the device state which will also tell us if the ID is valid */ 741 return ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &unused); 742 } 743 744 /** 745 * ti_sci_cmd_dev_get_clcnt() - Get context loss counter 746 * @handle: Pointer to TISCI handle 747 * @id: Device Identifier 748 * @count: Pointer to Context Loss counter to populate 749 * 750 * Return: 0 if all went fine, else return appropriate error. 751 */ 752 static int ti_sci_cmd_dev_get_clcnt(const struct ti_sci_handle *handle, u32 id, 753 u32 *count) 754 { 755 return ti_sci_get_device_state(handle, id, count, NULL, NULL, NULL); 756 } 757 758 /** 759 * ti_sci_cmd_dev_is_idle() - Check if the device is requested to be idle 760 * @handle: Pointer to TISCI handle 761 * @id: Device Identifier 762 * @r_state: true if requested to be idle 763 * 764 * Return: 0 if all went fine, else return appropriate error. 765 */ 766 static int ti_sci_cmd_dev_is_idle(const struct ti_sci_handle *handle, u32 id, 767 bool *r_state) 768 { 769 int ret; 770 u8 state; 771 772 if (!r_state) 773 return -EINVAL; 774 775 ret = ti_sci_get_device_state(handle, id, NULL, NULL, &state, NULL); 776 if (ret) 777 return ret; 778 779 *r_state = (state == MSG_DEVICE_SW_STATE_RETENTION); 780 781 return 0; 782 } 783 784 /** 785 * ti_sci_cmd_dev_is_stop() - Check if the device is requested to be stopped 786 * @handle: Pointer to TISCI handle 787 * @id: Device Identifier 788 * @r_state: true if requested to be stopped 789 * @curr_state: true if currently stopped. 790 * 791 * Return: 0 if all went fine, else return appropriate error. 792 */ 793 static int ti_sci_cmd_dev_is_stop(const struct ti_sci_handle *handle, u32 id, 794 bool *r_state, bool *curr_state) 795 { 796 int ret; 797 u8 p_state, c_state; 798 799 if (!r_state && !curr_state) 800 return -EINVAL; 801 802 ret = 803 ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state); 804 if (ret) 805 return ret; 806 807 if (r_state) 808 *r_state = (p_state == MSG_DEVICE_SW_STATE_AUTO_OFF); 809 if (curr_state) 810 *curr_state = (c_state == MSG_DEVICE_HW_STATE_OFF); 811 812 return 0; 813 } 814 815 /** 816 * ti_sci_cmd_dev_is_on() - Check if the device is requested to be ON 817 * @handle: Pointer to TISCI handle 818 * @id: Device Identifier 819 * @r_state: true if requested to be ON 820 * @curr_state: true if currently ON and active 821 * 822 * Return: 0 if all went fine, else return appropriate error. 823 */ 824 static int ti_sci_cmd_dev_is_on(const struct ti_sci_handle *handle, u32 id, 825 bool *r_state, bool *curr_state) 826 { 827 int ret; 828 u8 p_state, c_state; 829 830 if (!r_state && !curr_state) 831 return -EINVAL; 832 833 ret = 834 ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state); 835 if (ret) 836 return ret; 837 838 if (r_state) 839 *r_state = (p_state == MSG_DEVICE_SW_STATE_ON); 840 if (curr_state) 841 *curr_state = (c_state == MSG_DEVICE_HW_STATE_ON); 842 843 return 0; 844 } 845 846 /** 847 * ti_sci_cmd_dev_is_trans() - Check if the device is currently transitioning 848 * @handle: Pointer to TISCI handle 849 * @id: Device Identifier 850 * @curr_state: true if currently transitioning. 851 * 852 * Return: 0 if all went fine, else return appropriate error. 853 */ 854 static int ti_sci_cmd_dev_is_trans(const struct ti_sci_handle *handle, u32 id, 855 bool *curr_state) 856 { 857 int ret; 858 u8 state; 859 860 if (!curr_state) 861 return -EINVAL; 862 863 ret = ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &state); 864 if (ret) 865 return ret; 866 867 *curr_state = (state == MSG_DEVICE_HW_STATE_TRANS); 868 869 return 0; 870 } 871 872 /** 873 * ti_sci_cmd_set_device_resets() - command to set resets for device managed 874 * by TISCI 875 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle 876 * @id: Device Identifier 877 * @reset_state: Device specific reset bit field 878 * 879 * Return: 0 if all went fine, else return appropriate error. 880 */ 881 static int ti_sci_cmd_set_device_resets(const struct ti_sci_handle *handle, 882 u32 id, u32 reset_state) 883 { 884 struct ti_sci_info *info; 885 struct ti_sci_msg_req_set_device_resets *req; 886 struct ti_sci_msg_hdr *resp; 887 struct ti_sci_xfer *xfer; 888 struct device *dev; 889 int ret = 0; 890 891 if (IS_ERR(handle)) 892 return PTR_ERR(handle); 893 if (!handle) 894 return -EINVAL; 895 896 info = handle_to_ti_sci_info(handle); 897 dev = info->dev; 898 899 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_RESETS, 900 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 901 sizeof(*req), sizeof(*resp)); 902 if (IS_ERR(xfer)) { 903 ret = PTR_ERR(xfer); 904 dev_err(dev, "Message alloc failed(%d)\n", ret); 905 return ret; 906 } 907 req = (struct ti_sci_msg_req_set_device_resets *)xfer->xfer_buf; 908 req->id = id; 909 req->resets = reset_state; 910 911 ret = ti_sci_do_xfer(info, xfer); 912 if (ret) { 913 dev_err(dev, "Mbox send fail %d\n", ret); 914 goto fail; 915 } 916 917 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 918 919 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; 920 921 fail: 922 ti_sci_put_one_xfer(&info->minfo, xfer); 923 924 return ret; 925 } 926 927 /** 928 * ti_sci_cmd_get_device_resets() - Get reset state for device managed 929 * by TISCI 930 * @handle: Pointer to TISCI handle 931 * @id: Device Identifier 932 * @reset_state: Pointer to reset state to populate 933 * 934 * Return: 0 if all went fine, else return appropriate error. 935 */ 936 static int ti_sci_cmd_get_device_resets(const struct ti_sci_handle *handle, 937 u32 id, u32 *reset_state) 938 { 939 return ti_sci_get_device_state(handle, id, NULL, reset_state, NULL, 940 NULL); 941 } 942 943 /** 944 * ti_sci_set_clock_state() - Set clock state helper 945 * @handle: pointer to TI SCI handle 946 * @dev_id: Device identifier this request is for 947 * @clk_id: Clock identifier for the device for this request. 948 * Each device has it's own set of clock inputs. This indexes 949 * which clock input to modify. 950 * @flags: Header flags as needed 951 * @state: State to request for the clock. 952 * 953 * Return: 0 if all went well, else returns appropriate error value. 954 */ 955 static int ti_sci_set_clock_state(const struct ti_sci_handle *handle, 956 u32 dev_id, u32 clk_id, 957 u32 flags, u8 state) 958 { 959 struct ti_sci_info *info; 960 struct ti_sci_msg_req_set_clock_state *req; 961 struct ti_sci_msg_hdr *resp; 962 struct ti_sci_xfer *xfer; 963 struct device *dev; 964 int ret = 0; 965 966 if (IS_ERR(handle)) 967 return PTR_ERR(handle); 968 if (!handle) 969 return -EINVAL; 970 971 info = handle_to_ti_sci_info(handle); 972 dev = info->dev; 973 974 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_STATE, 975 flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 976 sizeof(*req), sizeof(*resp)); 977 if (IS_ERR(xfer)) { 978 ret = PTR_ERR(xfer); 979 dev_err(dev, "Message alloc failed(%d)\n", ret); 980 return ret; 981 } 982 req = (struct ti_sci_msg_req_set_clock_state *)xfer->xfer_buf; 983 req->dev_id = dev_id; 984 if (clk_id < 255) { 985 req->clk_id = clk_id; 986 } else { 987 req->clk_id = 255; 988 req->clk_id_32 = clk_id; 989 } 990 req->request_state = state; 991 992 ret = ti_sci_do_xfer(info, xfer); 993 if (ret) { 994 dev_err(dev, "Mbox send fail %d\n", ret); 995 goto fail; 996 } 997 998 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 999 1000 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; 1001 1002 fail: 1003 ti_sci_put_one_xfer(&info->minfo, xfer); 1004 1005 return ret; 1006 } 1007 1008 /** 1009 * ti_sci_cmd_get_clock_state() - Get clock state helper 1010 * @handle: pointer to TI SCI handle 1011 * @dev_id: Device identifier this request is for 1012 * @clk_id: Clock identifier for the device for this request. 1013 * Each device has it's own set of clock inputs. This indexes 1014 * which clock input to modify. 1015 * @programmed_state: State requested for clock to move to 1016 * @current_state: State that the clock is currently in 1017 * 1018 * Return: 0 if all went well, else returns appropriate error value. 1019 */ 1020 static int ti_sci_cmd_get_clock_state(const struct ti_sci_handle *handle, 1021 u32 dev_id, u32 clk_id, 1022 u8 *programmed_state, u8 *current_state) 1023 { 1024 struct ti_sci_info *info; 1025 struct ti_sci_msg_req_get_clock_state *req; 1026 struct ti_sci_msg_resp_get_clock_state *resp; 1027 struct ti_sci_xfer *xfer; 1028 struct device *dev; 1029 int ret = 0; 1030 1031 if (IS_ERR(handle)) 1032 return PTR_ERR(handle); 1033 if (!handle) 1034 return -EINVAL; 1035 1036 if (!programmed_state && !current_state) 1037 return -EINVAL; 1038 1039 info = handle_to_ti_sci_info(handle); 1040 dev = info->dev; 1041 1042 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_STATE, 1043 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 1044 sizeof(*req), sizeof(*resp)); 1045 if (IS_ERR(xfer)) { 1046 ret = PTR_ERR(xfer); 1047 dev_err(dev, "Message alloc failed(%d)\n", ret); 1048 return ret; 1049 } 1050 req = (struct ti_sci_msg_req_get_clock_state *)xfer->xfer_buf; 1051 req->dev_id = dev_id; 1052 if (clk_id < 255) { 1053 req->clk_id = clk_id; 1054 } else { 1055 req->clk_id = 255; 1056 req->clk_id_32 = clk_id; 1057 } 1058 1059 ret = ti_sci_do_xfer(info, xfer); 1060 if (ret) { 1061 dev_err(dev, "Mbox send fail %d\n", ret); 1062 goto fail; 1063 } 1064 1065 resp = (struct ti_sci_msg_resp_get_clock_state *)xfer->xfer_buf; 1066 1067 if (!ti_sci_is_response_ack(resp)) { 1068 ret = -ENODEV; 1069 goto fail; 1070 } 1071 1072 if (programmed_state) 1073 *programmed_state = resp->programmed_state; 1074 if (current_state) 1075 *current_state = resp->current_state; 1076 1077 fail: 1078 ti_sci_put_one_xfer(&info->minfo, xfer); 1079 1080 return ret; 1081 } 1082 1083 /** 1084 * ti_sci_cmd_get_clock() - Get control of a clock from TI SCI 1085 * @handle: pointer to TI SCI handle 1086 * @dev_id: Device identifier this request is for 1087 * @clk_id: Clock identifier for the device for this request. 1088 * Each device has it's own set of clock inputs. This indexes 1089 * which clock input to modify. 1090 * @needs_ssc: 'true' if Spread Spectrum clock is desired, else 'false' 1091 * @can_change_freq: 'true' if frequency change is desired, else 'false' 1092 * @enable_input_term: 'true' if input termination is desired, else 'false' 1093 * 1094 * Return: 0 if all went well, else returns appropriate error value. 1095 */ 1096 static int ti_sci_cmd_get_clock(const struct ti_sci_handle *handle, u32 dev_id, 1097 u32 clk_id, bool needs_ssc, 1098 bool can_change_freq, bool enable_input_term) 1099 { 1100 u32 flags = 0; 1101 1102 flags |= needs_ssc ? MSG_FLAG_CLOCK_ALLOW_SSC : 0; 1103 flags |= can_change_freq ? MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE : 0; 1104 flags |= enable_input_term ? MSG_FLAG_CLOCK_INPUT_TERM : 0; 1105 1106 return ti_sci_set_clock_state(handle, dev_id, clk_id, flags, 1107 MSG_CLOCK_SW_STATE_REQ); 1108 } 1109 1110 /** 1111 * ti_sci_cmd_idle_clock() - Idle a clock which is in our control 1112 * @handle: pointer to TI SCI handle 1113 * @dev_id: Device identifier this request is for 1114 * @clk_id: Clock identifier for the device for this request. 1115 * Each device has it's own set of clock inputs. This indexes 1116 * which clock input to modify. 1117 * 1118 * NOTE: This clock must have been requested by get_clock previously. 1119 * 1120 * Return: 0 if all went well, else returns appropriate error value. 1121 */ 1122 static int ti_sci_cmd_idle_clock(const struct ti_sci_handle *handle, 1123 u32 dev_id, u32 clk_id) 1124 { 1125 return ti_sci_set_clock_state(handle, dev_id, clk_id, 1126 MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE, 1127 MSG_CLOCK_SW_STATE_UNREQ); 1128 } 1129 1130 /** 1131 * ti_sci_cmd_put_clock() - Release a clock from our control back to TISCI 1132 * @handle: pointer to TI SCI handle 1133 * @dev_id: Device identifier this request is for 1134 * @clk_id: Clock identifier for the device for this request. 1135 * Each device has it's own set of clock inputs. This indexes 1136 * which clock input to modify. 1137 * 1138 * NOTE: This clock must have been requested by get_clock previously. 1139 * 1140 * Return: 0 if all went well, else returns appropriate error value. 1141 */ 1142 static int ti_sci_cmd_put_clock(const struct ti_sci_handle *handle, 1143 u32 dev_id, u32 clk_id) 1144 { 1145 return ti_sci_set_clock_state(handle, dev_id, clk_id, 1146 MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE, 1147 MSG_CLOCK_SW_STATE_AUTO); 1148 } 1149 1150 /** 1151 * ti_sci_cmd_clk_is_auto() - Is the clock being auto managed 1152 * @handle: pointer to TI SCI handle 1153 * @dev_id: Device identifier this request is for 1154 * @clk_id: Clock identifier for the device for this request. 1155 * Each device has it's own set of clock inputs. This indexes 1156 * which clock input to modify. 1157 * @req_state: state indicating if the clock is auto managed 1158 * 1159 * Return: 0 if all went well, else returns appropriate error value. 1160 */ 1161 static int ti_sci_cmd_clk_is_auto(const struct ti_sci_handle *handle, 1162 u32 dev_id, u32 clk_id, bool *req_state) 1163 { 1164 u8 state = 0; 1165 int ret; 1166 1167 if (!req_state) 1168 return -EINVAL; 1169 1170 ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, &state, NULL); 1171 if (ret) 1172 return ret; 1173 1174 *req_state = (state == MSG_CLOCK_SW_STATE_AUTO); 1175 return 0; 1176 } 1177 1178 /** 1179 * ti_sci_cmd_clk_is_on() - Is the clock ON 1180 * @handle: pointer to TI SCI handle 1181 * @dev_id: Device identifier this request is for 1182 * @clk_id: Clock identifier for the device for this request. 1183 * Each device has it's own set of clock inputs. This indexes 1184 * which clock input to modify. 1185 * @req_state: state indicating if the clock is managed by us and enabled 1186 * @curr_state: state indicating if the clock is ready for operation 1187 * 1188 * Return: 0 if all went well, else returns appropriate error value. 1189 */ 1190 static int ti_sci_cmd_clk_is_on(const struct ti_sci_handle *handle, u32 dev_id, 1191 u32 clk_id, bool *req_state, bool *curr_state) 1192 { 1193 u8 c_state = 0, r_state = 0; 1194 int ret; 1195 1196 if (!req_state && !curr_state) 1197 return -EINVAL; 1198 1199 ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, 1200 &r_state, &c_state); 1201 if (ret) 1202 return ret; 1203 1204 if (req_state) 1205 *req_state = (r_state == MSG_CLOCK_SW_STATE_REQ); 1206 if (curr_state) 1207 *curr_state = (c_state == MSG_CLOCK_HW_STATE_READY); 1208 return 0; 1209 } 1210 1211 /** 1212 * ti_sci_cmd_clk_is_off() - Is the clock OFF 1213 * @handle: pointer to TI SCI handle 1214 * @dev_id: Device identifier this request is for 1215 * @clk_id: Clock identifier for the device for this request. 1216 * Each device has it's own set of clock inputs. This indexes 1217 * which clock input to modify. 1218 * @req_state: state indicating if the clock is managed by us and disabled 1219 * @curr_state: state indicating if the clock is NOT ready for operation 1220 * 1221 * Return: 0 if all went well, else returns appropriate error value. 1222 */ 1223 static int ti_sci_cmd_clk_is_off(const struct ti_sci_handle *handle, u32 dev_id, 1224 u32 clk_id, bool *req_state, bool *curr_state) 1225 { 1226 u8 c_state = 0, r_state = 0; 1227 int ret; 1228 1229 if (!req_state && !curr_state) 1230 return -EINVAL; 1231 1232 ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, 1233 &r_state, &c_state); 1234 if (ret) 1235 return ret; 1236 1237 if (req_state) 1238 *req_state = (r_state == MSG_CLOCK_SW_STATE_UNREQ); 1239 if (curr_state) 1240 *curr_state = (c_state == MSG_CLOCK_HW_STATE_NOT_READY); 1241 return 0; 1242 } 1243 1244 /** 1245 * ti_sci_cmd_clk_set_parent() - Set the clock source of a specific device clock 1246 * @handle: pointer to TI SCI handle 1247 * @dev_id: Device identifier this request is for 1248 * @clk_id: Clock identifier for the device for this request. 1249 * Each device has it's own set of clock inputs. This indexes 1250 * which clock input to modify. 1251 * @parent_id: Parent clock identifier to set 1252 * 1253 * Return: 0 if all went well, else returns appropriate error value. 1254 */ 1255 static int ti_sci_cmd_clk_set_parent(const struct ti_sci_handle *handle, 1256 u32 dev_id, u32 clk_id, u32 parent_id) 1257 { 1258 struct ti_sci_info *info; 1259 struct ti_sci_msg_req_set_clock_parent *req; 1260 struct ti_sci_msg_hdr *resp; 1261 struct ti_sci_xfer *xfer; 1262 struct device *dev; 1263 int ret = 0; 1264 1265 if (IS_ERR(handle)) 1266 return PTR_ERR(handle); 1267 if (!handle) 1268 return -EINVAL; 1269 1270 info = handle_to_ti_sci_info(handle); 1271 dev = info->dev; 1272 1273 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_PARENT, 1274 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 1275 sizeof(*req), sizeof(*resp)); 1276 if (IS_ERR(xfer)) { 1277 ret = PTR_ERR(xfer); 1278 dev_err(dev, "Message alloc failed(%d)\n", ret); 1279 return ret; 1280 } 1281 req = (struct ti_sci_msg_req_set_clock_parent *)xfer->xfer_buf; 1282 req->dev_id = dev_id; 1283 if (clk_id < 255) { 1284 req->clk_id = clk_id; 1285 } else { 1286 req->clk_id = 255; 1287 req->clk_id_32 = clk_id; 1288 } 1289 if (parent_id < 255) { 1290 req->parent_id = parent_id; 1291 } else { 1292 req->parent_id = 255; 1293 req->parent_id_32 = parent_id; 1294 } 1295 1296 ret = ti_sci_do_xfer(info, xfer); 1297 if (ret) { 1298 dev_err(dev, "Mbox send fail %d\n", ret); 1299 goto fail; 1300 } 1301 1302 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 1303 1304 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; 1305 1306 fail: 1307 ti_sci_put_one_xfer(&info->minfo, xfer); 1308 1309 return ret; 1310 } 1311 1312 /** 1313 * ti_sci_cmd_clk_get_parent() - Get current parent clock source 1314 * @handle: pointer to TI SCI handle 1315 * @dev_id: Device identifier this request is for 1316 * @clk_id: Clock identifier for the device for this request. 1317 * Each device has it's own set of clock inputs. This indexes 1318 * which clock input to modify. 1319 * @parent_id: Current clock parent 1320 * 1321 * Return: 0 if all went well, else returns appropriate error value. 1322 */ 1323 static int ti_sci_cmd_clk_get_parent(const struct ti_sci_handle *handle, 1324 u32 dev_id, u32 clk_id, u32 *parent_id) 1325 { 1326 struct ti_sci_info *info; 1327 struct ti_sci_msg_req_get_clock_parent *req; 1328 struct ti_sci_msg_resp_get_clock_parent *resp; 1329 struct ti_sci_xfer *xfer; 1330 struct device *dev; 1331 int ret = 0; 1332 1333 if (IS_ERR(handle)) 1334 return PTR_ERR(handle); 1335 if (!handle || !parent_id) 1336 return -EINVAL; 1337 1338 info = handle_to_ti_sci_info(handle); 1339 dev = info->dev; 1340 1341 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_PARENT, 1342 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 1343 sizeof(*req), sizeof(*resp)); 1344 if (IS_ERR(xfer)) { 1345 ret = PTR_ERR(xfer); 1346 dev_err(dev, "Message alloc failed(%d)\n", ret); 1347 return ret; 1348 } 1349 req = (struct ti_sci_msg_req_get_clock_parent *)xfer->xfer_buf; 1350 req->dev_id = dev_id; 1351 if (clk_id < 255) { 1352 req->clk_id = clk_id; 1353 } else { 1354 req->clk_id = 255; 1355 req->clk_id_32 = clk_id; 1356 } 1357 1358 ret = ti_sci_do_xfer(info, xfer); 1359 if (ret) { 1360 dev_err(dev, "Mbox send fail %d\n", ret); 1361 goto fail; 1362 } 1363 1364 resp = (struct ti_sci_msg_resp_get_clock_parent *)xfer->xfer_buf; 1365 1366 if (!ti_sci_is_response_ack(resp)) { 1367 ret = -ENODEV; 1368 } else { 1369 if (resp->parent_id < 255) 1370 *parent_id = resp->parent_id; 1371 else 1372 *parent_id = resp->parent_id_32; 1373 } 1374 1375 fail: 1376 ti_sci_put_one_xfer(&info->minfo, xfer); 1377 1378 return ret; 1379 } 1380 1381 /** 1382 * ti_sci_cmd_clk_get_num_parents() - Get num parents of the current clk source 1383 * @handle: pointer to TI SCI handle 1384 * @dev_id: Device identifier this request is for 1385 * @clk_id: Clock identifier for the device for this request. 1386 * Each device has it's own set of clock inputs. This indexes 1387 * which clock input to modify. 1388 * @num_parents: Returns he number of parents to the current clock. 1389 * 1390 * Return: 0 if all went well, else returns appropriate error value. 1391 */ 1392 static int ti_sci_cmd_clk_get_num_parents(const struct ti_sci_handle *handle, 1393 u32 dev_id, u32 clk_id, 1394 u32 *num_parents) 1395 { 1396 struct ti_sci_info *info; 1397 struct ti_sci_msg_req_get_clock_num_parents *req; 1398 struct ti_sci_msg_resp_get_clock_num_parents *resp; 1399 struct ti_sci_xfer *xfer; 1400 struct device *dev; 1401 int ret = 0; 1402 1403 if (IS_ERR(handle)) 1404 return PTR_ERR(handle); 1405 if (!handle || !num_parents) 1406 return -EINVAL; 1407 1408 info = handle_to_ti_sci_info(handle); 1409 dev = info->dev; 1410 1411 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_NUM_CLOCK_PARENTS, 1412 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 1413 sizeof(*req), sizeof(*resp)); 1414 if (IS_ERR(xfer)) { 1415 ret = PTR_ERR(xfer); 1416 dev_err(dev, "Message alloc failed(%d)\n", ret); 1417 return ret; 1418 } 1419 req = (struct ti_sci_msg_req_get_clock_num_parents *)xfer->xfer_buf; 1420 req->dev_id = dev_id; 1421 if (clk_id < 255) { 1422 req->clk_id = clk_id; 1423 } else { 1424 req->clk_id = 255; 1425 req->clk_id_32 = clk_id; 1426 } 1427 1428 ret = ti_sci_do_xfer(info, xfer); 1429 if (ret) { 1430 dev_err(dev, "Mbox send fail %d\n", ret); 1431 goto fail; 1432 } 1433 1434 resp = (struct ti_sci_msg_resp_get_clock_num_parents *)xfer->xfer_buf; 1435 1436 if (!ti_sci_is_response_ack(resp)) { 1437 ret = -ENODEV; 1438 } else { 1439 if (resp->num_parents < 255) 1440 *num_parents = resp->num_parents; 1441 else 1442 *num_parents = resp->num_parents_32; 1443 } 1444 1445 fail: 1446 ti_sci_put_one_xfer(&info->minfo, xfer); 1447 1448 return ret; 1449 } 1450 1451 /** 1452 * ti_sci_cmd_clk_get_match_freq() - Find a good match for frequency 1453 * @handle: pointer to TI SCI handle 1454 * @dev_id: Device identifier this request is for 1455 * @clk_id: Clock identifier for the device for this request. 1456 * Each device has it's own set of clock inputs. This indexes 1457 * which clock input to modify. 1458 * @min_freq: The minimum allowable frequency in Hz. This is the minimum 1459 * allowable programmed frequency and does not account for clock 1460 * tolerances and jitter. 1461 * @target_freq: The target clock frequency in Hz. A frequency will be 1462 * processed as close to this target frequency as possible. 1463 * @max_freq: The maximum allowable frequency in Hz. This is the maximum 1464 * allowable programmed frequency and does not account for clock 1465 * tolerances and jitter. 1466 * @match_freq: Frequency match in Hz response. 1467 * 1468 * Return: 0 if all went well, else returns appropriate error value. 1469 */ 1470 static int ti_sci_cmd_clk_get_match_freq(const struct ti_sci_handle *handle, 1471 u32 dev_id, u32 clk_id, u64 min_freq, 1472 u64 target_freq, u64 max_freq, 1473 u64 *match_freq) 1474 { 1475 struct ti_sci_info *info; 1476 struct ti_sci_msg_req_query_clock_freq *req; 1477 struct ti_sci_msg_resp_query_clock_freq *resp; 1478 struct ti_sci_xfer *xfer; 1479 struct device *dev; 1480 int ret = 0; 1481 1482 if (IS_ERR(handle)) 1483 return PTR_ERR(handle); 1484 if (!handle || !match_freq) 1485 return -EINVAL; 1486 1487 info = handle_to_ti_sci_info(handle); 1488 dev = info->dev; 1489 1490 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_QUERY_CLOCK_FREQ, 1491 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 1492 sizeof(*req), sizeof(*resp)); 1493 if (IS_ERR(xfer)) { 1494 ret = PTR_ERR(xfer); 1495 dev_err(dev, "Message alloc failed(%d)\n", ret); 1496 return ret; 1497 } 1498 req = (struct ti_sci_msg_req_query_clock_freq *)xfer->xfer_buf; 1499 req->dev_id = dev_id; 1500 if (clk_id < 255) { 1501 req->clk_id = clk_id; 1502 } else { 1503 req->clk_id = 255; 1504 req->clk_id_32 = clk_id; 1505 } 1506 req->min_freq_hz = min_freq; 1507 req->target_freq_hz = target_freq; 1508 req->max_freq_hz = max_freq; 1509 1510 ret = ti_sci_do_xfer(info, xfer); 1511 if (ret) { 1512 dev_err(dev, "Mbox send fail %d\n", ret); 1513 goto fail; 1514 } 1515 1516 resp = (struct ti_sci_msg_resp_query_clock_freq *)xfer->xfer_buf; 1517 1518 if (!ti_sci_is_response_ack(resp)) 1519 ret = -ENODEV; 1520 else 1521 *match_freq = resp->freq_hz; 1522 1523 fail: 1524 ti_sci_put_one_xfer(&info->minfo, xfer); 1525 1526 return ret; 1527 } 1528 1529 /** 1530 * ti_sci_cmd_clk_set_freq() - Set a frequency for clock 1531 * @handle: pointer to TI SCI handle 1532 * @dev_id: Device identifier this request is for 1533 * @clk_id: Clock identifier for the device for this request. 1534 * Each device has it's own set of clock inputs. This indexes 1535 * which clock input to modify. 1536 * @min_freq: The minimum allowable frequency in Hz. This is the minimum 1537 * allowable programmed frequency and does not account for clock 1538 * tolerances and jitter. 1539 * @target_freq: The target clock frequency in Hz. A frequency will be 1540 * processed as close to this target frequency as possible. 1541 * @max_freq: The maximum allowable frequency in Hz. This is the maximum 1542 * allowable programmed frequency and does not account for clock 1543 * tolerances and jitter. 1544 * 1545 * Return: 0 if all went well, else returns appropriate error value. 1546 */ 1547 static int ti_sci_cmd_clk_set_freq(const struct ti_sci_handle *handle, 1548 u32 dev_id, u32 clk_id, u64 min_freq, 1549 u64 target_freq, u64 max_freq) 1550 { 1551 struct ti_sci_info *info; 1552 struct ti_sci_msg_req_set_clock_freq *req; 1553 struct ti_sci_msg_hdr *resp; 1554 struct ti_sci_xfer *xfer; 1555 struct device *dev; 1556 int ret = 0; 1557 1558 if (IS_ERR(handle)) 1559 return PTR_ERR(handle); 1560 if (!handle) 1561 return -EINVAL; 1562 1563 info = handle_to_ti_sci_info(handle); 1564 dev = info->dev; 1565 1566 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_FREQ, 1567 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 1568 sizeof(*req), sizeof(*resp)); 1569 if (IS_ERR(xfer)) { 1570 ret = PTR_ERR(xfer); 1571 dev_err(dev, "Message alloc failed(%d)\n", ret); 1572 return ret; 1573 } 1574 req = (struct ti_sci_msg_req_set_clock_freq *)xfer->xfer_buf; 1575 req->dev_id = dev_id; 1576 if (clk_id < 255) { 1577 req->clk_id = clk_id; 1578 } else { 1579 req->clk_id = 255; 1580 req->clk_id_32 = clk_id; 1581 } 1582 req->min_freq_hz = min_freq; 1583 req->target_freq_hz = target_freq; 1584 req->max_freq_hz = max_freq; 1585 1586 ret = ti_sci_do_xfer(info, xfer); 1587 if (ret) { 1588 dev_err(dev, "Mbox send fail %d\n", ret); 1589 goto fail; 1590 } 1591 1592 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 1593 1594 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; 1595 1596 fail: 1597 ti_sci_put_one_xfer(&info->minfo, xfer); 1598 1599 return ret; 1600 } 1601 1602 /** 1603 * ti_sci_cmd_clk_get_freq() - Get current frequency 1604 * @handle: pointer to TI SCI handle 1605 * @dev_id: Device identifier this request is for 1606 * @clk_id: Clock identifier for the device for this request. 1607 * Each device has it's own set of clock inputs. This indexes 1608 * which clock input to modify. 1609 * @freq: Currently frequency in Hz 1610 * 1611 * Return: 0 if all went well, else returns appropriate error value. 1612 */ 1613 static int ti_sci_cmd_clk_get_freq(const struct ti_sci_handle *handle, 1614 u32 dev_id, u32 clk_id, u64 *freq) 1615 { 1616 struct ti_sci_info *info; 1617 struct ti_sci_msg_req_get_clock_freq *req; 1618 struct ti_sci_msg_resp_get_clock_freq *resp; 1619 struct ti_sci_xfer *xfer; 1620 struct device *dev; 1621 int ret = 0; 1622 1623 if (IS_ERR(handle)) 1624 return PTR_ERR(handle); 1625 if (!handle || !freq) 1626 return -EINVAL; 1627 1628 info = handle_to_ti_sci_info(handle); 1629 dev = info->dev; 1630 1631 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_FREQ, 1632 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 1633 sizeof(*req), sizeof(*resp)); 1634 if (IS_ERR(xfer)) { 1635 ret = PTR_ERR(xfer); 1636 dev_err(dev, "Message alloc failed(%d)\n", ret); 1637 return ret; 1638 } 1639 req = (struct ti_sci_msg_req_get_clock_freq *)xfer->xfer_buf; 1640 req->dev_id = dev_id; 1641 if (clk_id < 255) { 1642 req->clk_id = clk_id; 1643 } else { 1644 req->clk_id = 255; 1645 req->clk_id_32 = clk_id; 1646 } 1647 1648 ret = ti_sci_do_xfer(info, xfer); 1649 if (ret) { 1650 dev_err(dev, "Mbox send fail %d\n", ret); 1651 goto fail; 1652 } 1653 1654 resp = (struct ti_sci_msg_resp_get_clock_freq *)xfer->xfer_buf; 1655 1656 if (!ti_sci_is_response_ack(resp)) 1657 ret = -ENODEV; 1658 else 1659 *freq = resp->freq_hz; 1660 1661 fail: 1662 ti_sci_put_one_xfer(&info->minfo, xfer); 1663 1664 return ret; 1665 } 1666 1667 static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle) 1668 { 1669 struct ti_sci_info *info; 1670 struct ti_sci_msg_req_reboot *req; 1671 struct ti_sci_msg_hdr *resp; 1672 struct ti_sci_xfer *xfer; 1673 struct device *dev; 1674 int ret = 0; 1675 1676 if (IS_ERR(handle)) 1677 return PTR_ERR(handle); 1678 if (!handle) 1679 return -EINVAL; 1680 1681 info = handle_to_ti_sci_info(handle); 1682 dev = info->dev; 1683 1684 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SYS_RESET, 1685 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 1686 sizeof(*req), sizeof(*resp)); 1687 if (IS_ERR(xfer)) { 1688 ret = PTR_ERR(xfer); 1689 dev_err(dev, "Message alloc failed(%d)\n", ret); 1690 return ret; 1691 } 1692 req = (struct ti_sci_msg_req_reboot *)xfer->xfer_buf; 1693 1694 ret = ti_sci_do_xfer(info, xfer); 1695 if (ret) { 1696 dev_err(dev, "Mbox send fail %d\n", ret); 1697 goto fail; 1698 } 1699 1700 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 1701 1702 if (!ti_sci_is_response_ack(resp)) 1703 ret = -ENODEV; 1704 else 1705 ret = 0; 1706 1707 fail: 1708 ti_sci_put_one_xfer(&info->minfo, xfer); 1709 1710 return ret; 1711 } 1712 1713 /** 1714 * ti_sci_get_resource_range - Helper to get a range of resources assigned 1715 * to a host. Resource is uniquely identified by 1716 * type and subtype. 1717 * @handle: Pointer to TISCI handle. 1718 * @dev_id: TISCI device ID. 1719 * @subtype: Resource assignment subtype that is being requested 1720 * from the given device. 1721 * @s_host: Host processor ID to which the resources are allocated 1722 * @desc: Pointer to ti_sci_resource_desc to be updated with the 1723 * resource range start index and number of resources 1724 * 1725 * Return: 0 if all went fine, else return appropriate error. 1726 */ 1727 static int ti_sci_get_resource_range(const struct ti_sci_handle *handle, 1728 u32 dev_id, u8 subtype, u8 s_host, 1729 struct ti_sci_resource_desc *desc) 1730 { 1731 struct ti_sci_msg_resp_get_resource_range *resp; 1732 struct ti_sci_msg_req_get_resource_range *req; 1733 struct ti_sci_xfer *xfer; 1734 struct ti_sci_info *info; 1735 struct device *dev; 1736 int ret = 0; 1737 1738 if (IS_ERR(handle)) 1739 return PTR_ERR(handle); 1740 if (!handle || !desc) 1741 return -EINVAL; 1742 1743 info = handle_to_ti_sci_info(handle); 1744 dev = info->dev; 1745 1746 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_RESOURCE_RANGE, 1747 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 1748 sizeof(*req), sizeof(*resp)); 1749 if (IS_ERR(xfer)) { 1750 ret = PTR_ERR(xfer); 1751 dev_err(dev, "Message alloc failed(%d)\n", ret); 1752 return ret; 1753 } 1754 1755 req = (struct ti_sci_msg_req_get_resource_range *)xfer->xfer_buf; 1756 req->secondary_host = s_host; 1757 req->type = dev_id & MSG_RM_RESOURCE_TYPE_MASK; 1758 req->subtype = subtype & MSG_RM_RESOURCE_SUBTYPE_MASK; 1759 1760 ret = ti_sci_do_xfer(info, xfer); 1761 if (ret) { 1762 dev_err(dev, "Mbox send fail %d\n", ret); 1763 goto fail; 1764 } 1765 1766 resp = (struct ti_sci_msg_resp_get_resource_range *)xfer->xfer_buf; 1767 1768 if (!ti_sci_is_response_ack(resp)) { 1769 ret = -ENODEV; 1770 } else if (!resp->range_num && !resp->range_num_sec) { 1771 /* Neither of the two resource range is valid */ 1772 ret = -ENODEV; 1773 } else { 1774 desc->start = resp->range_start; 1775 desc->num = resp->range_num; 1776 desc->start_sec = resp->range_start_sec; 1777 desc->num_sec = resp->range_num_sec; 1778 } 1779 1780 fail: 1781 ti_sci_put_one_xfer(&info->minfo, xfer); 1782 1783 return ret; 1784 } 1785 1786 /** 1787 * ti_sci_cmd_get_resource_range - Get a range of resources assigned to host 1788 * that is same as ti sci interface host. 1789 * @handle: Pointer to TISCI handle. 1790 * @dev_id: TISCI device ID. 1791 * @subtype: Resource assignment subtype that is being requested 1792 * from the given device. 1793 * @desc: Pointer to ti_sci_resource_desc to be updated with the 1794 * resource range start index and number of resources 1795 * 1796 * Return: 0 if all went fine, else return appropriate error. 1797 */ 1798 static int ti_sci_cmd_get_resource_range(const struct ti_sci_handle *handle, 1799 u32 dev_id, u8 subtype, 1800 struct ti_sci_resource_desc *desc) 1801 { 1802 return ti_sci_get_resource_range(handle, dev_id, subtype, 1803 TI_SCI_IRQ_SECONDARY_HOST_INVALID, 1804 desc); 1805 } 1806 1807 /** 1808 * ti_sci_cmd_get_resource_range_from_shost - Get a range of resources 1809 * assigned to a specified host. 1810 * @handle: Pointer to TISCI handle. 1811 * @dev_id: TISCI device ID. 1812 * @subtype: Resource assignment subtype that is being requested 1813 * from the given device. 1814 * @s_host: Host processor ID to which the resources are allocated 1815 * @desc: Pointer to ti_sci_resource_desc to be updated with the 1816 * resource range start index and number of resources 1817 * 1818 * Return: 0 if all went fine, else return appropriate error. 1819 */ 1820 static 1821 int ti_sci_cmd_get_resource_range_from_shost(const struct ti_sci_handle *handle, 1822 u32 dev_id, u8 subtype, u8 s_host, 1823 struct ti_sci_resource_desc *desc) 1824 { 1825 return ti_sci_get_resource_range(handle, dev_id, subtype, s_host, desc); 1826 } 1827 1828 /** 1829 * ti_sci_manage_irq() - Helper api to configure/release the irq route between 1830 * the requested source and destination 1831 * @handle: Pointer to TISCI handle. 1832 * @valid_params: Bit fields defining the validity of certain params 1833 * @src_id: Device ID of the IRQ source 1834 * @src_index: IRQ source index within the source device 1835 * @dst_id: Device ID of the IRQ destination 1836 * @dst_host_irq: IRQ number of the destination device 1837 * @ia_id: Device ID of the IA, if the IRQ flows through this IA 1838 * @vint: Virtual interrupt to be used within the IA 1839 * @global_event: Global event number to be used for the requesting event 1840 * @vint_status_bit: Virtual interrupt status bit to be used for the event 1841 * @s_host: Secondary host ID to which the irq/event is being 1842 * requested for. 1843 * @type: Request type irq set or release. 1844 * 1845 * Return: 0 if all went fine, else return appropriate error. 1846 */ 1847 static int ti_sci_manage_irq(const struct ti_sci_handle *handle, 1848 u32 valid_params, u16 src_id, u16 src_index, 1849 u16 dst_id, u16 dst_host_irq, u16 ia_id, u16 vint, 1850 u16 global_event, u8 vint_status_bit, u8 s_host, 1851 u16 type) 1852 { 1853 struct ti_sci_msg_req_manage_irq *req; 1854 struct ti_sci_msg_hdr *resp; 1855 struct ti_sci_xfer *xfer; 1856 struct ti_sci_info *info; 1857 struct device *dev; 1858 int ret = 0; 1859 1860 if (IS_ERR(handle)) 1861 return PTR_ERR(handle); 1862 if (!handle) 1863 return -EINVAL; 1864 1865 info = handle_to_ti_sci_info(handle); 1866 dev = info->dev; 1867 1868 xfer = ti_sci_get_one_xfer(info, type, TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 1869 sizeof(*req), sizeof(*resp)); 1870 if (IS_ERR(xfer)) { 1871 ret = PTR_ERR(xfer); 1872 dev_err(dev, "Message alloc failed(%d)\n", ret); 1873 return ret; 1874 } 1875 req = (struct ti_sci_msg_req_manage_irq *)xfer->xfer_buf; 1876 req->valid_params = valid_params; 1877 req->src_id = src_id; 1878 req->src_index = src_index; 1879 req->dst_id = dst_id; 1880 req->dst_host_irq = dst_host_irq; 1881 req->ia_id = ia_id; 1882 req->vint = vint; 1883 req->global_event = global_event; 1884 req->vint_status_bit = vint_status_bit; 1885 req->secondary_host = s_host; 1886 1887 ret = ti_sci_do_xfer(info, xfer); 1888 if (ret) { 1889 dev_err(dev, "Mbox send fail %d\n", ret); 1890 goto fail; 1891 } 1892 1893 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 1894 1895 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; 1896 1897 fail: 1898 ti_sci_put_one_xfer(&info->minfo, xfer); 1899 1900 return ret; 1901 } 1902 1903 /** 1904 * ti_sci_set_irq() - Helper api to configure the irq route between the 1905 * requested source and destination 1906 * @handle: Pointer to TISCI handle. 1907 * @valid_params: Bit fields defining the validity of certain params 1908 * @src_id: Device ID of the IRQ source 1909 * @src_index: IRQ source index within the source device 1910 * @dst_id: Device ID of the IRQ destination 1911 * @dst_host_irq: IRQ number of the destination device 1912 * @ia_id: Device ID of the IA, if the IRQ flows through this IA 1913 * @vint: Virtual interrupt to be used within the IA 1914 * @global_event: Global event number to be used for the requesting event 1915 * @vint_status_bit: Virtual interrupt status bit to be used for the event 1916 * @s_host: Secondary host ID to which the irq/event is being 1917 * requested for. 1918 * 1919 * Return: 0 if all went fine, else return appropriate error. 1920 */ 1921 static int ti_sci_set_irq(const struct ti_sci_handle *handle, u32 valid_params, 1922 u16 src_id, u16 src_index, u16 dst_id, 1923 u16 dst_host_irq, u16 ia_id, u16 vint, 1924 u16 global_event, u8 vint_status_bit, u8 s_host) 1925 { 1926 pr_debug("%s: IRQ set with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n", 1927 __func__, valid_params, src_id, src_index, 1928 dst_id, dst_host_irq, ia_id, vint, global_event, 1929 vint_status_bit); 1930 1931 return ti_sci_manage_irq(handle, valid_params, src_id, src_index, 1932 dst_id, dst_host_irq, ia_id, vint, 1933 global_event, vint_status_bit, s_host, 1934 TI_SCI_MSG_SET_IRQ); 1935 } 1936 1937 /** 1938 * ti_sci_free_irq() - Helper api to free the irq route between the 1939 * requested source and destination 1940 * @handle: Pointer to TISCI handle. 1941 * @valid_params: Bit fields defining the validity of certain params 1942 * @src_id: Device ID of the IRQ source 1943 * @src_index: IRQ source index within the source device 1944 * @dst_id: Device ID of the IRQ destination 1945 * @dst_host_irq: IRQ number of the destination device 1946 * @ia_id: Device ID of the IA, if the IRQ flows through this IA 1947 * @vint: Virtual interrupt to be used within the IA 1948 * @global_event: Global event number to be used for the requesting event 1949 * @vint_status_bit: Virtual interrupt status bit to be used for the event 1950 * @s_host: Secondary host ID to which the irq/event is being 1951 * requested for. 1952 * 1953 * Return: 0 if all went fine, else return appropriate error. 1954 */ 1955 static int ti_sci_free_irq(const struct ti_sci_handle *handle, u32 valid_params, 1956 u16 src_id, u16 src_index, u16 dst_id, 1957 u16 dst_host_irq, u16 ia_id, u16 vint, 1958 u16 global_event, u8 vint_status_bit, u8 s_host) 1959 { 1960 pr_debug("%s: IRQ release with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n", 1961 __func__, valid_params, src_id, src_index, 1962 dst_id, dst_host_irq, ia_id, vint, global_event, 1963 vint_status_bit); 1964 1965 return ti_sci_manage_irq(handle, valid_params, src_id, src_index, 1966 dst_id, dst_host_irq, ia_id, vint, 1967 global_event, vint_status_bit, s_host, 1968 TI_SCI_MSG_FREE_IRQ); 1969 } 1970 1971 /** 1972 * ti_sci_cmd_set_irq() - Configure a host irq route between the requested 1973 * source and destination. 1974 * @handle: Pointer to TISCI handle. 1975 * @src_id: Device ID of the IRQ source 1976 * @src_index: IRQ source index within the source device 1977 * @dst_id: Device ID of the IRQ destination 1978 * @dst_host_irq: IRQ number of the destination device 1979 * @vint_irq: Boolean specifying if this interrupt belongs to 1980 * Interrupt Aggregator. 1981 * 1982 * Return: 0 if all went fine, else return appropriate error. 1983 */ 1984 static int ti_sci_cmd_set_irq(const struct ti_sci_handle *handle, u16 src_id, 1985 u16 src_index, u16 dst_id, u16 dst_host_irq) 1986 { 1987 u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID; 1988 1989 return ti_sci_set_irq(handle, valid_params, src_id, src_index, dst_id, 1990 dst_host_irq, 0, 0, 0, 0, 0); 1991 } 1992 1993 /** 1994 * ti_sci_cmd_set_event_map() - Configure an event based irq route between the 1995 * requested source and Interrupt Aggregator. 1996 * @handle: Pointer to TISCI handle. 1997 * @src_id: Device ID of the IRQ source 1998 * @src_index: IRQ source index within the source device 1999 * @ia_id: Device ID of the IA, if the IRQ flows through this IA 2000 * @vint: Virtual interrupt to be used within the IA 2001 * @global_event: Global event number to be used for the requesting event 2002 * @vint_status_bit: Virtual interrupt status bit to be used for the event 2003 * 2004 * Return: 0 if all went fine, else return appropriate error. 2005 */ 2006 static int ti_sci_cmd_set_event_map(const struct ti_sci_handle *handle, 2007 u16 src_id, u16 src_index, u16 ia_id, 2008 u16 vint, u16 global_event, 2009 u8 vint_status_bit) 2010 { 2011 u32 valid_params = MSG_FLAG_IA_ID_VALID | MSG_FLAG_VINT_VALID | 2012 MSG_FLAG_GLB_EVNT_VALID | 2013 MSG_FLAG_VINT_STS_BIT_VALID; 2014 2015 return ti_sci_set_irq(handle, valid_params, src_id, src_index, 0, 0, 2016 ia_id, vint, global_event, vint_status_bit, 0); 2017 } 2018 2019 /** 2020 * ti_sci_cmd_free_irq() - Free a host irq route between the between the 2021 * requested source and destination. 2022 * @handle: Pointer to TISCI handle. 2023 * @src_id: Device ID of the IRQ source 2024 * @src_index: IRQ source index within the source device 2025 * @dst_id: Device ID of the IRQ destination 2026 * @dst_host_irq: IRQ number of the destination device 2027 * @vint_irq: Boolean specifying if this interrupt belongs to 2028 * Interrupt Aggregator. 2029 * 2030 * Return: 0 if all went fine, else return appropriate error. 2031 */ 2032 static int ti_sci_cmd_free_irq(const struct ti_sci_handle *handle, u16 src_id, 2033 u16 src_index, u16 dst_id, u16 dst_host_irq) 2034 { 2035 u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID; 2036 2037 return ti_sci_free_irq(handle, valid_params, src_id, src_index, dst_id, 2038 dst_host_irq, 0, 0, 0, 0, 0); 2039 } 2040 2041 /** 2042 * ti_sci_cmd_free_event_map() - Free an event map between the requested source 2043 * and Interrupt Aggregator. 2044 * @handle: Pointer to TISCI handle. 2045 * @src_id: Device ID of the IRQ source 2046 * @src_index: IRQ source index within the source device 2047 * @ia_id: Device ID of the IA, if the IRQ flows through this IA 2048 * @vint: Virtual interrupt to be used within the IA 2049 * @global_event: Global event number to be used for the requesting event 2050 * @vint_status_bit: Virtual interrupt status bit to be used for the event 2051 * 2052 * Return: 0 if all went fine, else return appropriate error. 2053 */ 2054 static int ti_sci_cmd_free_event_map(const struct ti_sci_handle *handle, 2055 u16 src_id, u16 src_index, u16 ia_id, 2056 u16 vint, u16 global_event, 2057 u8 vint_status_bit) 2058 { 2059 u32 valid_params = MSG_FLAG_IA_ID_VALID | 2060 MSG_FLAG_VINT_VALID | MSG_FLAG_GLB_EVNT_VALID | 2061 MSG_FLAG_VINT_STS_BIT_VALID; 2062 2063 return ti_sci_free_irq(handle, valid_params, src_id, src_index, 0, 0, 2064 ia_id, vint, global_event, vint_status_bit, 0); 2065 } 2066 2067 /** 2068 * ti_sci_cmd_rm_ring_cfg() - Configure a NAVSS ring 2069 * @handle: Pointer to TI SCI handle. 2070 * @params: Pointer to ti_sci_msg_rm_ring_cfg ring config structure 2071 * 2072 * Return: 0 if all went well, else returns appropriate error value. 2073 * 2074 * See @ti_sci_msg_rm_ring_cfg and @ti_sci_msg_rm_ring_cfg_req for 2075 * more info. 2076 */ 2077 static int ti_sci_cmd_rm_ring_cfg(const struct ti_sci_handle *handle, 2078 const struct ti_sci_msg_rm_ring_cfg *params) 2079 { 2080 struct ti_sci_msg_rm_ring_cfg_req *req; 2081 struct ti_sci_msg_hdr *resp; 2082 struct ti_sci_xfer *xfer; 2083 struct ti_sci_info *info; 2084 struct device *dev; 2085 int ret = 0; 2086 2087 if (IS_ERR_OR_NULL(handle)) 2088 return -EINVAL; 2089 2090 info = handle_to_ti_sci_info(handle); 2091 dev = info->dev; 2092 2093 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_RING_CFG, 2094 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 2095 sizeof(*req), sizeof(*resp)); 2096 if (IS_ERR(xfer)) { 2097 ret = PTR_ERR(xfer); 2098 dev_err(dev, "RM_RA:Message config failed(%d)\n", ret); 2099 return ret; 2100 } 2101 req = (struct ti_sci_msg_rm_ring_cfg_req *)xfer->xfer_buf; 2102 req->valid_params = params->valid_params; 2103 req->nav_id = params->nav_id; 2104 req->index = params->index; 2105 req->addr_lo = params->addr_lo; 2106 req->addr_hi = params->addr_hi; 2107 req->count = params->count; 2108 req->mode = params->mode; 2109 req->size = params->size; 2110 req->order_id = params->order_id; 2111 req->virtid = params->virtid; 2112 req->asel = params->asel; 2113 2114 ret = ti_sci_do_xfer(info, xfer); 2115 if (ret) { 2116 dev_err(dev, "RM_RA:Mbox config send fail %d\n", ret); 2117 goto fail; 2118 } 2119 2120 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 2121 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL; 2122 2123 fail: 2124 ti_sci_put_one_xfer(&info->minfo, xfer); 2125 dev_dbg(dev, "RM_RA:config ring %u ret:%d\n", params->index, ret); 2126 return ret; 2127 } 2128 2129 /** 2130 * ti_sci_cmd_rm_psil_pair() - Pair PSI-L source to destination thread 2131 * @handle: Pointer to TI SCI handle. 2132 * @nav_id: Device ID of Navigator Subsystem which should be used for 2133 * pairing 2134 * @src_thread: Source PSI-L thread ID 2135 * @dst_thread: Destination PSI-L thread ID 2136 * 2137 * Return: 0 if all went well, else returns appropriate error value. 2138 */ 2139 static int ti_sci_cmd_rm_psil_pair(const struct ti_sci_handle *handle, 2140 u32 nav_id, u32 src_thread, u32 dst_thread) 2141 { 2142 struct ti_sci_msg_psil_pair *req; 2143 struct ti_sci_msg_hdr *resp; 2144 struct ti_sci_xfer *xfer; 2145 struct ti_sci_info *info; 2146 struct device *dev; 2147 int ret = 0; 2148 2149 if (IS_ERR(handle)) 2150 return PTR_ERR(handle); 2151 if (!handle) 2152 return -EINVAL; 2153 2154 info = handle_to_ti_sci_info(handle); 2155 dev = info->dev; 2156 2157 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_PSIL_PAIR, 2158 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 2159 sizeof(*req), sizeof(*resp)); 2160 if (IS_ERR(xfer)) { 2161 ret = PTR_ERR(xfer); 2162 dev_err(dev, "RM_PSIL:Message reconfig failed(%d)\n", ret); 2163 return ret; 2164 } 2165 req = (struct ti_sci_msg_psil_pair *)xfer->xfer_buf; 2166 req->nav_id = nav_id; 2167 req->src_thread = src_thread; 2168 req->dst_thread = dst_thread; 2169 2170 ret = ti_sci_do_xfer(info, xfer); 2171 if (ret) { 2172 dev_err(dev, "RM_PSIL:Mbox send fail %d\n", ret); 2173 goto fail; 2174 } 2175 2176 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 2177 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL; 2178 2179 fail: 2180 ti_sci_put_one_xfer(&info->minfo, xfer); 2181 2182 return ret; 2183 } 2184 2185 /** 2186 * ti_sci_cmd_rm_psil_unpair() - Unpair PSI-L source from destination thread 2187 * @handle: Pointer to TI SCI handle. 2188 * @nav_id: Device ID of Navigator Subsystem which should be used for 2189 * unpairing 2190 * @src_thread: Source PSI-L thread ID 2191 * @dst_thread: Destination PSI-L thread ID 2192 * 2193 * Return: 0 if all went well, else returns appropriate error value. 2194 */ 2195 static int ti_sci_cmd_rm_psil_unpair(const struct ti_sci_handle *handle, 2196 u32 nav_id, u32 src_thread, u32 dst_thread) 2197 { 2198 struct ti_sci_msg_psil_unpair *req; 2199 struct ti_sci_msg_hdr *resp; 2200 struct ti_sci_xfer *xfer; 2201 struct ti_sci_info *info; 2202 struct device *dev; 2203 int ret = 0; 2204 2205 if (IS_ERR(handle)) 2206 return PTR_ERR(handle); 2207 if (!handle) 2208 return -EINVAL; 2209 2210 info = handle_to_ti_sci_info(handle); 2211 dev = info->dev; 2212 2213 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_PSIL_UNPAIR, 2214 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 2215 sizeof(*req), sizeof(*resp)); 2216 if (IS_ERR(xfer)) { 2217 ret = PTR_ERR(xfer); 2218 dev_err(dev, "RM_PSIL:Message reconfig failed(%d)\n", ret); 2219 return ret; 2220 } 2221 req = (struct ti_sci_msg_psil_unpair *)xfer->xfer_buf; 2222 req->nav_id = nav_id; 2223 req->src_thread = src_thread; 2224 req->dst_thread = dst_thread; 2225 2226 ret = ti_sci_do_xfer(info, xfer); 2227 if (ret) { 2228 dev_err(dev, "RM_PSIL:Mbox send fail %d\n", ret); 2229 goto fail; 2230 } 2231 2232 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 2233 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL; 2234 2235 fail: 2236 ti_sci_put_one_xfer(&info->minfo, xfer); 2237 2238 return ret; 2239 } 2240 2241 /** 2242 * ti_sci_cmd_rm_udmap_tx_ch_cfg() - Configure a UDMAP TX channel 2243 * @handle: Pointer to TI SCI handle. 2244 * @params: Pointer to ti_sci_msg_rm_udmap_tx_ch_cfg TX channel config 2245 * structure 2246 * 2247 * Return: 0 if all went well, else returns appropriate error value. 2248 * 2249 * See @ti_sci_msg_rm_udmap_tx_ch_cfg and @ti_sci_msg_rm_udmap_tx_ch_cfg_req for 2250 * more info. 2251 */ 2252 static int ti_sci_cmd_rm_udmap_tx_ch_cfg(const struct ti_sci_handle *handle, 2253 const struct ti_sci_msg_rm_udmap_tx_ch_cfg *params) 2254 { 2255 struct ti_sci_msg_rm_udmap_tx_ch_cfg_req *req; 2256 struct ti_sci_msg_hdr *resp; 2257 struct ti_sci_xfer *xfer; 2258 struct ti_sci_info *info; 2259 struct device *dev; 2260 int ret = 0; 2261 2262 if (IS_ERR_OR_NULL(handle)) 2263 return -EINVAL; 2264 2265 info = handle_to_ti_sci_info(handle); 2266 dev = info->dev; 2267 2268 xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_TX_CH_CFG, 2269 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 2270 sizeof(*req), sizeof(*resp)); 2271 if (IS_ERR(xfer)) { 2272 ret = PTR_ERR(xfer); 2273 dev_err(dev, "Message TX_CH_CFG alloc failed(%d)\n", ret); 2274 return ret; 2275 } 2276 req = (struct ti_sci_msg_rm_udmap_tx_ch_cfg_req *)xfer->xfer_buf; 2277 req->valid_params = params->valid_params; 2278 req->nav_id = params->nav_id; 2279 req->index = params->index; 2280 req->tx_pause_on_err = params->tx_pause_on_err; 2281 req->tx_filt_einfo = params->tx_filt_einfo; 2282 req->tx_filt_pswords = params->tx_filt_pswords; 2283 req->tx_atype = params->tx_atype; 2284 req->tx_chan_type = params->tx_chan_type; 2285 req->tx_supr_tdpkt = params->tx_supr_tdpkt; 2286 req->tx_fetch_size = params->tx_fetch_size; 2287 req->tx_credit_count = params->tx_credit_count; 2288 req->txcq_qnum = params->txcq_qnum; 2289 req->tx_priority = params->tx_priority; 2290 req->tx_qos = params->tx_qos; 2291 req->tx_orderid = params->tx_orderid; 2292 req->fdepth = params->fdepth; 2293 req->tx_sched_priority = params->tx_sched_priority; 2294 req->tx_burst_size = params->tx_burst_size; 2295 req->tx_tdtype = params->tx_tdtype; 2296 req->extended_ch_type = params->extended_ch_type; 2297 2298 ret = ti_sci_do_xfer(info, xfer); 2299 if (ret) { 2300 dev_err(dev, "Mbox send TX_CH_CFG fail %d\n", ret); 2301 goto fail; 2302 } 2303 2304 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 2305 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL; 2306 2307 fail: 2308 ti_sci_put_one_xfer(&info->minfo, xfer); 2309 dev_dbg(dev, "TX_CH_CFG: chn %u ret:%u\n", params->index, ret); 2310 return ret; 2311 } 2312 2313 /** 2314 * ti_sci_cmd_rm_udmap_rx_ch_cfg() - Configure a UDMAP RX channel 2315 * @handle: Pointer to TI SCI handle. 2316 * @params: Pointer to ti_sci_msg_rm_udmap_rx_ch_cfg RX channel config 2317 * structure 2318 * 2319 * Return: 0 if all went well, else returns appropriate error value. 2320 * 2321 * See @ti_sci_msg_rm_udmap_rx_ch_cfg and @ti_sci_msg_rm_udmap_rx_ch_cfg_req for 2322 * more info. 2323 */ 2324 static int ti_sci_cmd_rm_udmap_rx_ch_cfg(const struct ti_sci_handle *handle, 2325 const struct ti_sci_msg_rm_udmap_rx_ch_cfg *params) 2326 { 2327 struct ti_sci_msg_rm_udmap_rx_ch_cfg_req *req; 2328 struct ti_sci_msg_hdr *resp; 2329 struct ti_sci_xfer *xfer; 2330 struct ti_sci_info *info; 2331 struct device *dev; 2332 int ret = 0; 2333 2334 if (IS_ERR_OR_NULL(handle)) 2335 return -EINVAL; 2336 2337 info = handle_to_ti_sci_info(handle); 2338 dev = info->dev; 2339 2340 xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_RX_CH_CFG, 2341 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 2342 sizeof(*req), sizeof(*resp)); 2343 if (IS_ERR(xfer)) { 2344 ret = PTR_ERR(xfer); 2345 dev_err(dev, "Message RX_CH_CFG alloc failed(%d)\n", ret); 2346 return ret; 2347 } 2348 req = (struct ti_sci_msg_rm_udmap_rx_ch_cfg_req *)xfer->xfer_buf; 2349 req->valid_params = params->valid_params; 2350 req->nav_id = params->nav_id; 2351 req->index = params->index; 2352 req->rx_fetch_size = params->rx_fetch_size; 2353 req->rxcq_qnum = params->rxcq_qnum; 2354 req->rx_priority = params->rx_priority; 2355 req->rx_qos = params->rx_qos; 2356 req->rx_orderid = params->rx_orderid; 2357 req->rx_sched_priority = params->rx_sched_priority; 2358 req->flowid_start = params->flowid_start; 2359 req->flowid_cnt = params->flowid_cnt; 2360 req->rx_pause_on_err = params->rx_pause_on_err; 2361 req->rx_atype = params->rx_atype; 2362 req->rx_chan_type = params->rx_chan_type; 2363 req->rx_ignore_short = params->rx_ignore_short; 2364 req->rx_ignore_long = params->rx_ignore_long; 2365 req->rx_burst_size = params->rx_burst_size; 2366 2367 ret = ti_sci_do_xfer(info, xfer); 2368 if (ret) { 2369 dev_err(dev, "Mbox send RX_CH_CFG fail %d\n", ret); 2370 goto fail; 2371 } 2372 2373 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 2374 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL; 2375 2376 fail: 2377 ti_sci_put_one_xfer(&info->minfo, xfer); 2378 dev_dbg(dev, "RX_CH_CFG: chn %u ret:%d\n", params->index, ret); 2379 return ret; 2380 } 2381 2382 /** 2383 * ti_sci_cmd_rm_udmap_rx_flow_cfg() - Configure UDMAP RX FLOW 2384 * @handle: Pointer to TI SCI handle. 2385 * @params: Pointer to ti_sci_msg_rm_udmap_flow_cfg RX FLOW config 2386 * structure 2387 * 2388 * Return: 0 if all went well, else returns appropriate error value. 2389 * 2390 * See @ti_sci_msg_rm_udmap_flow_cfg and @ti_sci_msg_rm_udmap_flow_cfg_req for 2391 * more info. 2392 */ 2393 static int ti_sci_cmd_rm_udmap_rx_flow_cfg(const struct ti_sci_handle *handle, 2394 const struct ti_sci_msg_rm_udmap_flow_cfg *params) 2395 { 2396 struct ti_sci_msg_rm_udmap_flow_cfg_req *req; 2397 struct ti_sci_msg_hdr *resp; 2398 struct ti_sci_xfer *xfer; 2399 struct ti_sci_info *info; 2400 struct device *dev; 2401 int ret = 0; 2402 2403 if (IS_ERR_OR_NULL(handle)) 2404 return -EINVAL; 2405 2406 info = handle_to_ti_sci_info(handle); 2407 dev = info->dev; 2408 2409 xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_FLOW_CFG, 2410 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 2411 sizeof(*req), sizeof(*resp)); 2412 if (IS_ERR(xfer)) { 2413 ret = PTR_ERR(xfer); 2414 dev_err(dev, "RX_FL_CFG: Message alloc failed(%d)\n", ret); 2415 return ret; 2416 } 2417 req = (struct ti_sci_msg_rm_udmap_flow_cfg_req *)xfer->xfer_buf; 2418 req->valid_params = params->valid_params; 2419 req->nav_id = params->nav_id; 2420 req->flow_index = params->flow_index; 2421 req->rx_einfo_present = params->rx_einfo_present; 2422 req->rx_psinfo_present = params->rx_psinfo_present; 2423 req->rx_error_handling = params->rx_error_handling; 2424 req->rx_desc_type = params->rx_desc_type; 2425 req->rx_sop_offset = params->rx_sop_offset; 2426 req->rx_dest_qnum = params->rx_dest_qnum; 2427 req->rx_src_tag_hi = params->rx_src_tag_hi; 2428 req->rx_src_tag_lo = params->rx_src_tag_lo; 2429 req->rx_dest_tag_hi = params->rx_dest_tag_hi; 2430 req->rx_dest_tag_lo = params->rx_dest_tag_lo; 2431 req->rx_src_tag_hi_sel = params->rx_src_tag_hi_sel; 2432 req->rx_src_tag_lo_sel = params->rx_src_tag_lo_sel; 2433 req->rx_dest_tag_hi_sel = params->rx_dest_tag_hi_sel; 2434 req->rx_dest_tag_lo_sel = params->rx_dest_tag_lo_sel; 2435 req->rx_fdq0_sz0_qnum = params->rx_fdq0_sz0_qnum; 2436 req->rx_fdq1_qnum = params->rx_fdq1_qnum; 2437 req->rx_fdq2_qnum = params->rx_fdq2_qnum; 2438 req->rx_fdq3_qnum = params->rx_fdq3_qnum; 2439 req->rx_ps_location = params->rx_ps_location; 2440 2441 ret = ti_sci_do_xfer(info, xfer); 2442 if (ret) { 2443 dev_err(dev, "RX_FL_CFG: Mbox send fail %d\n", ret); 2444 goto fail; 2445 } 2446 2447 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 2448 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL; 2449 2450 fail: 2451 ti_sci_put_one_xfer(&info->minfo, xfer); 2452 dev_dbg(info->dev, "RX_FL_CFG: %u ret:%d\n", params->flow_index, ret); 2453 return ret; 2454 } 2455 2456 /** 2457 * ti_sci_cmd_proc_request() - Command to request a physical processor control 2458 * @handle: Pointer to TI SCI handle 2459 * @proc_id: Processor ID this request is for 2460 * 2461 * Return: 0 if all went well, else returns appropriate error value. 2462 */ 2463 static int ti_sci_cmd_proc_request(const struct ti_sci_handle *handle, 2464 u8 proc_id) 2465 { 2466 struct ti_sci_msg_req_proc_request *req; 2467 struct ti_sci_msg_hdr *resp; 2468 struct ti_sci_info *info; 2469 struct ti_sci_xfer *xfer; 2470 struct device *dev; 2471 int ret = 0; 2472 2473 if (!handle) 2474 return -EINVAL; 2475 if (IS_ERR(handle)) 2476 return PTR_ERR(handle); 2477 2478 info = handle_to_ti_sci_info(handle); 2479 dev = info->dev; 2480 2481 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_REQUEST, 2482 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 2483 sizeof(*req), sizeof(*resp)); 2484 if (IS_ERR(xfer)) { 2485 ret = PTR_ERR(xfer); 2486 dev_err(dev, "Message alloc failed(%d)\n", ret); 2487 return ret; 2488 } 2489 req = (struct ti_sci_msg_req_proc_request *)xfer->xfer_buf; 2490 req->processor_id = proc_id; 2491 2492 ret = ti_sci_do_xfer(info, xfer); 2493 if (ret) { 2494 dev_err(dev, "Mbox send fail %d\n", ret); 2495 goto fail; 2496 } 2497 2498 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf; 2499 2500 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; 2501 2502 fail: 2503 ti_sci_put_one_xfer(&info->minfo, xfer); 2504 2505 return ret; 2506 } 2507 2508 /** 2509 * ti_sci_cmd_proc_release() - Command to release a physical processor control 2510 * @handle: Pointer to TI SCI handle 2511 * @proc_id: Processor ID this request is for 2512 * 2513 * Return: 0 if all went well, else returns appropriate error value. 2514 */ 2515 static int ti_sci_cmd_proc_release(const struct ti_sci_handle *handle, 2516 u8 proc_id) 2517 { 2518 struct ti_sci_msg_req_proc_release *req; 2519 struct ti_sci_msg_hdr *resp; 2520 struct ti_sci_info *info; 2521 struct ti_sci_xfer *xfer; 2522 struct device *dev; 2523 int ret = 0; 2524 2525 if (!handle) 2526 return -EINVAL; 2527 if (IS_ERR(handle)) 2528 return PTR_ERR(handle); 2529 2530 info = handle_to_ti_sci_info(handle); 2531 dev = info->dev; 2532 2533 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_RELEASE, 2534 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 2535 sizeof(*req), sizeof(*resp)); 2536 if (IS_ERR(xfer)) { 2537 ret = PTR_ERR(xfer); 2538 dev_err(dev, "Message alloc failed(%d)\n", ret); 2539 return ret; 2540 } 2541 req = (struct ti_sci_msg_req_proc_release *)xfer->xfer_buf; 2542 req->processor_id = proc_id; 2543 2544 ret = ti_sci_do_xfer(info, xfer); 2545 if (ret) { 2546 dev_err(dev, "Mbox send fail %d\n", ret); 2547 goto fail; 2548 } 2549 2550 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf; 2551 2552 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; 2553 2554 fail: 2555 ti_sci_put_one_xfer(&info->minfo, xfer); 2556 2557 return ret; 2558 } 2559 2560 /** 2561 * ti_sci_cmd_proc_handover() - Command to handover a physical processor 2562 * control to a host in the processor's access 2563 * control list. 2564 * @handle: Pointer to TI SCI handle 2565 * @proc_id: Processor ID this request is for 2566 * @host_id: Host ID to get the control of the processor 2567 * 2568 * Return: 0 if all went well, else returns appropriate error value. 2569 */ 2570 static int ti_sci_cmd_proc_handover(const struct ti_sci_handle *handle, 2571 u8 proc_id, u8 host_id) 2572 { 2573 struct ti_sci_msg_req_proc_handover *req; 2574 struct ti_sci_msg_hdr *resp; 2575 struct ti_sci_info *info; 2576 struct ti_sci_xfer *xfer; 2577 struct device *dev; 2578 int ret = 0; 2579 2580 if (!handle) 2581 return -EINVAL; 2582 if (IS_ERR(handle)) 2583 return PTR_ERR(handle); 2584 2585 info = handle_to_ti_sci_info(handle); 2586 dev = info->dev; 2587 2588 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_HANDOVER, 2589 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 2590 sizeof(*req), sizeof(*resp)); 2591 if (IS_ERR(xfer)) { 2592 ret = PTR_ERR(xfer); 2593 dev_err(dev, "Message alloc failed(%d)\n", ret); 2594 return ret; 2595 } 2596 req = (struct ti_sci_msg_req_proc_handover *)xfer->xfer_buf; 2597 req->processor_id = proc_id; 2598 req->host_id = host_id; 2599 2600 ret = ti_sci_do_xfer(info, xfer); 2601 if (ret) { 2602 dev_err(dev, "Mbox send fail %d\n", ret); 2603 goto fail; 2604 } 2605 2606 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf; 2607 2608 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; 2609 2610 fail: 2611 ti_sci_put_one_xfer(&info->minfo, xfer); 2612 2613 return ret; 2614 } 2615 2616 /** 2617 * ti_sci_cmd_proc_set_config() - Command to set the processor boot 2618 * configuration flags 2619 * @handle: Pointer to TI SCI handle 2620 * @proc_id: Processor ID this request is for 2621 * @config_flags_set: Configuration flags to be set 2622 * @config_flags_clear: Configuration flags to be cleared. 2623 * 2624 * Return: 0 if all went well, else returns appropriate error value. 2625 */ 2626 static int ti_sci_cmd_proc_set_config(const struct ti_sci_handle *handle, 2627 u8 proc_id, u64 bootvector, 2628 u32 config_flags_set, 2629 u32 config_flags_clear) 2630 { 2631 struct ti_sci_msg_req_set_config *req; 2632 struct ti_sci_msg_hdr *resp; 2633 struct ti_sci_info *info; 2634 struct ti_sci_xfer *xfer; 2635 struct device *dev; 2636 int ret = 0; 2637 2638 if (!handle) 2639 return -EINVAL; 2640 if (IS_ERR(handle)) 2641 return PTR_ERR(handle); 2642 2643 info = handle_to_ti_sci_info(handle); 2644 dev = info->dev; 2645 2646 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CONFIG, 2647 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 2648 sizeof(*req), sizeof(*resp)); 2649 if (IS_ERR(xfer)) { 2650 ret = PTR_ERR(xfer); 2651 dev_err(dev, "Message alloc failed(%d)\n", ret); 2652 return ret; 2653 } 2654 req = (struct ti_sci_msg_req_set_config *)xfer->xfer_buf; 2655 req->processor_id = proc_id; 2656 req->bootvector_low = bootvector & TI_SCI_ADDR_LOW_MASK; 2657 req->bootvector_high = (bootvector & TI_SCI_ADDR_HIGH_MASK) >> 2658 TI_SCI_ADDR_HIGH_SHIFT; 2659 req->config_flags_set = config_flags_set; 2660 req->config_flags_clear = config_flags_clear; 2661 2662 ret = ti_sci_do_xfer(info, xfer); 2663 if (ret) { 2664 dev_err(dev, "Mbox send fail %d\n", ret); 2665 goto fail; 2666 } 2667 2668 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf; 2669 2670 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; 2671 2672 fail: 2673 ti_sci_put_one_xfer(&info->minfo, xfer); 2674 2675 return ret; 2676 } 2677 2678 /** 2679 * ti_sci_cmd_proc_set_control() - Command to set the processor boot 2680 * control flags 2681 * @handle: Pointer to TI SCI handle 2682 * @proc_id: Processor ID this request is for 2683 * @control_flags_set: Control flags to be set 2684 * @control_flags_clear: Control flags to be cleared 2685 * 2686 * Return: 0 if all went well, else returns appropriate error value. 2687 */ 2688 static int ti_sci_cmd_proc_set_control(const struct ti_sci_handle *handle, 2689 u8 proc_id, u32 control_flags_set, 2690 u32 control_flags_clear) 2691 { 2692 struct ti_sci_msg_req_set_ctrl *req; 2693 struct ti_sci_msg_hdr *resp; 2694 struct ti_sci_info *info; 2695 struct ti_sci_xfer *xfer; 2696 struct device *dev; 2697 int ret = 0; 2698 2699 if (!handle) 2700 return -EINVAL; 2701 if (IS_ERR(handle)) 2702 return PTR_ERR(handle); 2703 2704 info = handle_to_ti_sci_info(handle); 2705 dev = info->dev; 2706 2707 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CTRL, 2708 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 2709 sizeof(*req), sizeof(*resp)); 2710 if (IS_ERR(xfer)) { 2711 ret = PTR_ERR(xfer); 2712 dev_err(dev, "Message alloc failed(%d)\n", ret); 2713 return ret; 2714 } 2715 req = (struct ti_sci_msg_req_set_ctrl *)xfer->xfer_buf; 2716 req->processor_id = proc_id; 2717 req->control_flags_set = control_flags_set; 2718 req->control_flags_clear = control_flags_clear; 2719 2720 ret = ti_sci_do_xfer(info, xfer); 2721 if (ret) { 2722 dev_err(dev, "Mbox send fail %d\n", ret); 2723 goto fail; 2724 } 2725 2726 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf; 2727 2728 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; 2729 2730 fail: 2731 ti_sci_put_one_xfer(&info->minfo, xfer); 2732 2733 return ret; 2734 } 2735 2736 /** 2737 * ti_sci_cmd_get_boot_status() - Command to get the processor boot status 2738 * @handle: Pointer to TI SCI handle 2739 * @proc_id: Processor ID this request is for 2740 * 2741 * Return: 0 if all went well, else returns appropriate error value. 2742 */ 2743 static int ti_sci_cmd_proc_get_status(const struct ti_sci_handle *handle, 2744 u8 proc_id, u64 *bv, u32 *cfg_flags, 2745 u32 *ctrl_flags, u32 *sts_flags) 2746 { 2747 struct ti_sci_msg_resp_get_status *resp; 2748 struct ti_sci_msg_req_get_status *req; 2749 struct ti_sci_info *info; 2750 struct ti_sci_xfer *xfer; 2751 struct device *dev; 2752 int ret = 0; 2753 2754 if (!handle) 2755 return -EINVAL; 2756 if (IS_ERR(handle)) 2757 return PTR_ERR(handle); 2758 2759 info = handle_to_ti_sci_info(handle); 2760 dev = info->dev; 2761 2762 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_STATUS, 2763 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 2764 sizeof(*req), sizeof(*resp)); 2765 if (IS_ERR(xfer)) { 2766 ret = PTR_ERR(xfer); 2767 dev_err(dev, "Message alloc failed(%d)\n", ret); 2768 return ret; 2769 } 2770 req = (struct ti_sci_msg_req_get_status *)xfer->xfer_buf; 2771 req->processor_id = proc_id; 2772 2773 ret = ti_sci_do_xfer(info, xfer); 2774 if (ret) { 2775 dev_err(dev, "Mbox send fail %d\n", ret); 2776 goto fail; 2777 } 2778 2779 resp = (struct ti_sci_msg_resp_get_status *)xfer->tx_message.buf; 2780 2781 if (!ti_sci_is_response_ack(resp)) { 2782 ret = -ENODEV; 2783 } else { 2784 *bv = (resp->bootvector_low & TI_SCI_ADDR_LOW_MASK) | 2785 (((u64)resp->bootvector_high << TI_SCI_ADDR_HIGH_SHIFT) & 2786 TI_SCI_ADDR_HIGH_MASK); 2787 *cfg_flags = resp->config_flags; 2788 *ctrl_flags = resp->control_flags; 2789 *sts_flags = resp->status_flags; 2790 } 2791 2792 fail: 2793 ti_sci_put_one_xfer(&info->minfo, xfer); 2794 2795 return ret; 2796 } 2797 2798 /* 2799 * ti_sci_setup_ops() - Setup the operations structures 2800 * @info: pointer to TISCI pointer 2801 */ 2802 static void ti_sci_setup_ops(struct ti_sci_info *info) 2803 { 2804 struct ti_sci_ops *ops = &info->handle.ops; 2805 struct ti_sci_core_ops *core_ops = &ops->core_ops; 2806 struct ti_sci_dev_ops *dops = &ops->dev_ops; 2807 struct ti_sci_clk_ops *cops = &ops->clk_ops; 2808 struct ti_sci_rm_core_ops *rm_core_ops = &ops->rm_core_ops; 2809 struct ti_sci_rm_irq_ops *iops = &ops->rm_irq_ops; 2810 struct ti_sci_rm_ringacc_ops *rops = &ops->rm_ring_ops; 2811 struct ti_sci_rm_psil_ops *psilops = &ops->rm_psil_ops; 2812 struct ti_sci_rm_udmap_ops *udmap_ops = &ops->rm_udmap_ops; 2813 struct ti_sci_proc_ops *pops = &ops->proc_ops; 2814 2815 core_ops->reboot_device = ti_sci_cmd_core_reboot; 2816 2817 dops->get_device = ti_sci_cmd_get_device; 2818 dops->get_device_exclusive = ti_sci_cmd_get_device_exclusive; 2819 dops->idle_device = ti_sci_cmd_idle_device; 2820 dops->idle_device_exclusive = ti_sci_cmd_idle_device_exclusive; 2821 dops->put_device = ti_sci_cmd_put_device; 2822 2823 dops->is_valid = ti_sci_cmd_dev_is_valid; 2824 dops->get_context_loss_count = ti_sci_cmd_dev_get_clcnt; 2825 dops->is_idle = ti_sci_cmd_dev_is_idle; 2826 dops->is_stop = ti_sci_cmd_dev_is_stop; 2827 dops->is_on = ti_sci_cmd_dev_is_on; 2828 dops->is_transitioning = ti_sci_cmd_dev_is_trans; 2829 dops->set_device_resets = ti_sci_cmd_set_device_resets; 2830 dops->get_device_resets = ti_sci_cmd_get_device_resets; 2831 2832 cops->get_clock = ti_sci_cmd_get_clock; 2833 cops->idle_clock = ti_sci_cmd_idle_clock; 2834 cops->put_clock = ti_sci_cmd_put_clock; 2835 cops->is_auto = ti_sci_cmd_clk_is_auto; 2836 cops->is_on = ti_sci_cmd_clk_is_on; 2837 cops->is_off = ti_sci_cmd_clk_is_off; 2838 2839 cops->set_parent = ti_sci_cmd_clk_set_parent; 2840 cops->get_parent = ti_sci_cmd_clk_get_parent; 2841 cops->get_num_parents = ti_sci_cmd_clk_get_num_parents; 2842 2843 cops->get_best_match_freq = ti_sci_cmd_clk_get_match_freq; 2844 cops->set_freq = ti_sci_cmd_clk_set_freq; 2845 cops->get_freq = ti_sci_cmd_clk_get_freq; 2846 2847 rm_core_ops->get_range = ti_sci_cmd_get_resource_range; 2848 rm_core_ops->get_range_from_shost = 2849 ti_sci_cmd_get_resource_range_from_shost; 2850 2851 iops->set_irq = ti_sci_cmd_set_irq; 2852 iops->set_event_map = ti_sci_cmd_set_event_map; 2853 iops->free_irq = ti_sci_cmd_free_irq; 2854 iops->free_event_map = ti_sci_cmd_free_event_map; 2855 2856 rops->set_cfg = ti_sci_cmd_rm_ring_cfg; 2857 2858 psilops->pair = ti_sci_cmd_rm_psil_pair; 2859 psilops->unpair = ti_sci_cmd_rm_psil_unpair; 2860 2861 udmap_ops->tx_ch_cfg = ti_sci_cmd_rm_udmap_tx_ch_cfg; 2862 udmap_ops->rx_ch_cfg = ti_sci_cmd_rm_udmap_rx_ch_cfg; 2863 udmap_ops->rx_flow_cfg = ti_sci_cmd_rm_udmap_rx_flow_cfg; 2864 2865 pops->request = ti_sci_cmd_proc_request; 2866 pops->release = ti_sci_cmd_proc_release; 2867 pops->handover = ti_sci_cmd_proc_handover; 2868 pops->set_config = ti_sci_cmd_proc_set_config; 2869 pops->set_control = ti_sci_cmd_proc_set_control; 2870 pops->get_status = ti_sci_cmd_proc_get_status; 2871 } 2872 2873 /** 2874 * ti_sci_get_handle() - Get the TI SCI handle for a device 2875 * @dev: Pointer to device for which we want SCI handle 2876 * 2877 * NOTE: The function does not track individual clients of the framework 2878 * and is expected to be maintained by caller of TI SCI protocol library. 2879 * ti_sci_put_handle must be balanced with successful ti_sci_get_handle 2880 * Return: pointer to handle if successful, else: 2881 * -EPROBE_DEFER if the instance is not ready 2882 * -ENODEV if the required node handler is missing 2883 * -EINVAL if invalid conditions are encountered. 2884 */ 2885 const struct ti_sci_handle *ti_sci_get_handle(struct device *dev) 2886 { 2887 struct device_node *ti_sci_np; 2888 struct list_head *p; 2889 struct ti_sci_handle *handle = NULL; 2890 struct ti_sci_info *info; 2891 2892 if (!dev) { 2893 pr_err("I need a device pointer\n"); 2894 return ERR_PTR(-EINVAL); 2895 } 2896 ti_sci_np = of_get_parent(dev->of_node); 2897 if (!ti_sci_np) { 2898 dev_err(dev, "No OF information\n"); 2899 return ERR_PTR(-EINVAL); 2900 } 2901 2902 mutex_lock(&ti_sci_list_mutex); 2903 list_for_each(p, &ti_sci_list) { 2904 info = list_entry(p, struct ti_sci_info, node); 2905 if (ti_sci_np == info->dev->of_node) { 2906 handle = &info->handle; 2907 info->users++; 2908 break; 2909 } 2910 } 2911 mutex_unlock(&ti_sci_list_mutex); 2912 of_node_put(ti_sci_np); 2913 2914 if (!handle) 2915 return ERR_PTR(-EPROBE_DEFER); 2916 2917 return handle; 2918 } 2919 EXPORT_SYMBOL_GPL(ti_sci_get_handle); 2920 2921 /** 2922 * ti_sci_put_handle() - Release the handle acquired by ti_sci_get_handle 2923 * @handle: Handle acquired by ti_sci_get_handle 2924 * 2925 * NOTE: The function does not track individual clients of the framework 2926 * and is expected to be maintained by caller of TI SCI protocol library. 2927 * ti_sci_put_handle must be balanced with successful ti_sci_get_handle 2928 * 2929 * Return: 0 is successfully released 2930 * if an error pointer was passed, it returns the error value back, 2931 * if null was passed, it returns -EINVAL; 2932 */ 2933 int ti_sci_put_handle(const struct ti_sci_handle *handle) 2934 { 2935 struct ti_sci_info *info; 2936 2937 if (IS_ERR(handle)) 2938 return PTR_ERR(handle); 2939 if (!handle) 2940 return -EINVAL; 2941 2942 info = handle_to_ti_sci_info(handle); 2943 mutex_lock(&ti_sci_list_mutex); 2944 if (!WARN_ON(!info->users)) 2945 info->users--; 2946 mutex_unlock(&ti_sci_list_mutex); 2947 2948 return 0; 2949 } 2950 EXPORT_SYMBOL_GPL(ti_sci_put_handle); 2951 2952 static void devm_ti_sci_release(struct device *dev, void *res) 2953 { 2954 const struct ti_sci_handle **ptr = res; 2955 const struct ti_sci_handle *handle = *ptr; 2956 int ret; 2957 2958 ret = ti_sci_put_handle(handle); 2959 if (ret) 2960 dev_err(dev, "failed to put handle %d\n", ret); 2961 } 2962 2963 /** 2964 * devm_ti_sci_get_handle() - Managed get handle 2965 * @dev: device for which we want SCI handle for. 2966 * 2967 * NOTE: This releases the handle once the device resources are 2968 * no longer needed. MUST NOT BE released with ti_sci_put_handle. 2969 * The function does not track individual clients of the framework 2970 * and is expected to be maintained by caller of TI SCI protocol library. 2971 * 2972 * Return: 0 if all went fine, else corresponding error. 2973 */ 2974 const struct ti_sci_handle *devm_ti_sci_get_handle(struct device *dev) 2975 { 2976 const struct ti_sci_handle **ptr; 2977 const struct ti_sci_handle *handle; 2978 2979 ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL); 2980 if (!ptr) 2981 return ERR_PTR(-ENOMEM); 2982 handle = ti_sci_get_handle(dev); 2983 2984 if (!IS_ERR(handle)) { 2985 *ptr = handle; 2986 devres_add(dev, ptr); 2987 } else { 2988 devres_free(ptr); 2989 } 2990 2991 return handle; 2992 } 2993 EXPORT_SYMBOL_GPL(devm_ti_sci_get_handle); 2994 2995 /** 2996 * ti_sci_get_by_phandle() - Get the TI SCI handle using DT phandle 2997 * @np: device node 2998 * @property: property name containing phandle on TISCI node 2999 * 3000 * NOTE: The function does not track individual clients of the framework 3001 * and is expected to be maintained by caller of TI SCI protocol library. 3002 * ti_sci_put_handle must be balanced with successful ti_sci_get_by_phandle 3003 * Return: pointer to handle if successful, else: 3004 * -EPROBE_DEFER if the instance is not ready 3005 * -ENODEV if the required node handler is missing 3006 * -EINVAL if invalid conditions are encountered. 3007 */ 3008 const struct ti_sci_handle *ti_sci_get_by_phandle(struct device_node *np, 3009 const char *property) 3010 { 3011 struct ti_sci_handle *handle = NULL; 3012 struct device_node *ti_sci_np; 3013 struct ti_sci_info *info; 3014 struct list_head *p; 3015 3016 if (!np) { 3017 pr_err("I need a device pointer\n"); 3018 return ERR_PTR(-EINVAL); 3019 } 3020 3021 ti_sci_np = of_parse_phandle(np, property, 0); 3022 if (!ti_sci_np) 3023 return ERR_PTR(-ENODEV); 3024 3025 mutex_lock(&ti_sci_list_mutex); 3026 list_for_each(p, &ti_sci_list) { 3027 info = list_entry(p, struct ti_sci_info, node); 3028 if (ti_sci_np == info->dev->of_node) { 3029 handle = &info->handle; 3030 info->users++; 3031 break; 3032 } 3033 } 3034 mutex_unlock(&ti_sci_list_mutex); 3035 of_node_put(ti_sci_np); 3036 3037 if (!handle) 3038 return ERR_PTR(-EPROBE_DEFER); 3039 3040 return handle; 3041 } 3042 EXPORT_SYMBOL_GPL(ti_sci_get_by_phandle); 3043 3044 /** 3045 * devm_ti_sci_get_by_phandle() - Managed get handle using phandle 3046 * @dev: Device pointer requesting TISCI handle 3047 * @property: property name containing phandle on TISCI node 3048 * 3049 * NOTE: This releases the handle once the device resources are 3050 * no longer needed. MUST NOT BE released with ti_sci_put_handle. 3051 * The function does not track individual clients of the framework 3052 * and is expected to be maintained by caller of TI SCI protocol library. 3053 * 3054 * Return: 0 if all went fine, else corresponding error. 3055 */ 3056 const struct ti_sci_handle *devm_ti_sci_get_by_phandle(struct device *dev, 3057 const char *property) 3058 { 3059 const struct ti_sci_handle *handle; 3060 const struct ti_sci_handle **ptr; 3061 3062 ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL); 3063 if (!ptr) 3064 return ERR_PTR(-ENOMEM); 3065 handle = ti_sci_get_by_phandle(dev_of_node(dev), property); 3066 3067 if (!IS_ERR(handle)) { 3068 *ptr = handle; 3069 devres_add(dev, ptr); 3070 } else { 3071 devres_free(ptr); 3072 } 3073 3074 return handle; 3075 } 3076 EXPORT_SYMBOL_GPL(devm_ti_sci_get_by_phandle); 3077 3078 /** 3079 * ti_sci_get_free_resource() - Get a free resource from TISCI resource. 3080 * @res: Pointer to the TISCI resource 3081 * 3082 * Return: resource num if all went ok else TI_SCI_RESOURCE_NULL. 3083 */ 3084 u16 ti_sci_get_free_resource(struct ti_sci_resource *res) 3085 { 3086 unsigned long flags; 3087 u16 set, free_bit; 3088 3089 raw_spin_lock_irqsave(&res->lock, flags); 3090 for (set = 0; set < res->sets; set++) { 3091 struct ti_sci_resource_desc *desc = &res->desc[set]; 3092 int res_count = desc->num + desc->num_sec; 3093 3094 free_bit = find_first_zero_bit(desc->res_map, res_count); 3095 if (free_bit != res_count) { 3096 __set_bit(free_bit, desc->res_map); 3097 raw_spin_unlock_irqrestore(&res->lock, flags); 3098 3099 if (desc->num && free_bit < desc->num) 3100 return desc->start + free_bit; 3101 else 3102 return desc->start_sec + free_bit; 3103 } 3104 } 3105 raw_spin_unlock_irqrestore(&res->lock, flags); 3106 3107 return TI_SCI_RESOURCE_NULL; 3108 } 3109 EXPORT_SYMBOL_GPL(ti_sci_get_free_resource); 3110 3111 /** 3112 * ti_sci_release_resource() - Release a resource from TISCI resource. 3113 * @res: Pointer to the TISCI resource 3114 * @id: Resource id to be released. 3115 */ 3116 void ti_sci_release_resource(struct ti_sci_resource *res, u16 id) 3117 { 3118 unsigned long flags; 3119 u16 set; 3120 3121 raw_spin_lock_irqsave(&res->lock, flags); 3122 for (set = 0; set < res->sets; set++) { 3123 struct ti_sci_resource_desc *desc = &res->desc[set]; 3124 3125 if (desc->num && desc->start <= id && 3126 (desc->start + desc->num) > id) 3127 __clear_bit(id - desc->start, desc->res_map); 3128 else if (desc->num_sec && desc->start_sec <= id && 3129 (desc->start_sec + desc->num_sec) > id) 3130 __clear_bit(id - desc->start_sec, desc->res_map); 3131 } 3132 raw_spin_unlock_irqrestore(&res->lock, flags); 3133 } 3134 EXPORT_SYMBOL_GPL(ti_sci_release_resource); 3135 3136 /** 3137 * ti_sci_get_num_resources() - Get the number of resources in TISCI resource 3138 * @res: Pointer to the TISCI resource 3139 * 3140 * Return: Total number of available resources. 3141 */ 3142 u32 ti_sci_get_num_resources(struct ti_sci_resource *res) 3143 { 3144 u32 set, count = 0; 3145 3146 for (set = 0; set < res->sets; set++) 3147 count += res->desc[set].num + res->desc[set].num_sec; 3148 3149 return count; 3150 } 3151 EXPORT_SYMBOL_GPL(ti_sci_get_num_resources); 3152 3153 /** 3154 * devm_ti_sci_get_resource_sets() - Get a TISCI resources assigned to a device 3155 * @handle: TISCI handle 3156 * @dev: Device pointer to which the resource is assigned 3157 * @dev_id: TISCI device id to which the resource is assigned 3158 * @sub_types: Array of sub_types assigned corresponding to device 3159 * @sets: Number of sub_types 3160 * 3161 * Return: Pointer to ti_sci_resource if all went well else appropriate 3162 * error pointer. 3163 */ 3164 static struct ti_sci_resource * 3165 devm_ti_sci_get_resource_sets(const struct ti_sci_handle *handle, 3166 struct device *dev, u32 dev_id, u32 *sub_types, 3167 u32 sets) 3168 { 3169 struct ti_sci_resource *res; 3170 bool valid_set = false; 3171 int i, ret, res_count; 3172 3173 res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL); 3174 if (!res) 3175 return ERR_PTR(-ENOMEM); 3176 3177 res->sets = sets; 3178 res->desc = devm_kcalloc(dev, res->sets, sizeof(*res->desc), 3179 GFP_KERNEL); 3180 if (!res->desc) 3181 return ERR_PTR(-ENOMEM); 3182 3183 for (i = 0; i < res->sets; i++) { 3184 ret = handle->ops.rm_core_ops.get_range(handle, dev_id, 3185 sub_types[i], 3186 &res->desc[i]); 3187 if (ret) { 3188 dev_dbg(dev, "dev = %d subtype %d not allocated for this host\n", 3189 dev_id, sub_types[i]); 3190 memset(&res->desc[i], 0, sizeof(res->desc[i])); 3191 continue; 3192 } 3193 3194 dev_dbg(dev, "dev/sub_type: %d/%d, start/num: %d/%d | %d/%d\n", 3195 dev_id, sub_types[i], res->desc[i].start, 3196 res->desc[i].num, res->desc[i].start_sec, 3197 res->desc[i].num_sec); 3198 3199 valid_set = true; 3200 res_count = res->desc[i].num + res->desc[i].num_sec; 3201 res->desc[i].res_map = devm_bitmap_zalloc(dev, res_count, 3202 GFP_KERNEL); 3203 if (!res->desc[i].res_map) 3204 return ERR_PTR(-ENOMEM); 3205 } 3206 raw_spin_lock_init(&res->lock); 3207 3208 if (valid_set) 3209 return res; 3210 3211 return ERR_PTR(-EINVAL); 3212 } 3213 3214 /** 3215 * devm_ti_sci_get_of_resource() - Get a TISCI resource assigned to a device 3216 * @handle: TISCI handle 3217 * @dev: Device pointer to which the resource is assigned 3218 * @dev_id: TISCI device id to which the resource is assigned 3219 * @of_prop: property name by which the resource are represented 3220 * 3221 * Return: Pointer to ti_sci_resource if all went well else appropriate 3222 * error pointer. 3223 */ 3224 struct ti_sci_resource * 3225 devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle, 3226 struct device *dev, u32 dev_id, char *of_prop) 3227 { 3228 struct ti_sci_resource *res; 3229 u32 *sub_types; 3230 int sets; 3231 3232 sets = of_property_count_elems_of_size(dev_of_node(dev), of_prop, 3233 sizeof(u32)); 3234 if (sets < 0) { 3235 dev_err(dev, "%s resource type ids not available\n", of_prop); 3236 return ERR_PTR(sets); 3237 } 3238 3239 sub_types = kcalloc(sets, sizeof(*sub_types), GFP_KERNEL); 3240 if (!sub_types) 3241 return ERR_PTR(-ENOMEM); 3242 3243 of_property_read_u32_array(dev_of_node(dev), of_prop, sub_types, sets); 3244 res = devm_ti_sci_get_resource_sets(handle, dev, dev_id, sub_types, 3245 sets); 3246 3247 kfree(sub_types); 3248 return res; 3249 } 3250 EXPORT_SYMBOL_GPL(devm_ti_sci_get_of_resource); 3251 3252 /** 3253 * devm_ti_sci_get_resource() - Get a resource range assigned to the device 3254 * @handle: TISCI handle 3255 * @dev: Device pointer to which the resource is assigned 3256 * @dev_id: TISCI device id to which the resource is assigned 3257 * @suub_type: TISCI resource subytpe representing the resource. 3258 * 3259 * Return: Pointer to ti_sci_resource if all went well else appropriate 3260 * error pointer. 3261 */ 3262 struct ti_sci_resource * 3263 devm_ti_sci_get_resource(const struct ti_sci_handle *handle, struct device *dev, 3264 u32 dev_id, u32 sub_type) 3265 { 3266 return devm_ti_sci_get_resource_sets(handle, dev, dev_id, &sub_type, 1); 3267 } 3268 EXPORT_SYMBOL_GPL(devm_ti_sci_get_resource); 3269 3270 static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode, 3271 void *cmd) 3272 { 3273 struct ti_sci_info *info = reboot_to_ti_sci_info(nb); 3274 const struct ti_sci_handle *handle = &info->handle; 3275 3276 ti_sci_cmd_core_reboot(handle); 3277 3278 /* call fail OR pass, we should not be here in the first place */ 3279 return NOTIFY_BAD; 3280 } 3281 3282 /* Description for K2G */ 3283 static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = { 3284 .default_host_id = 2, 3285 /* Conservative duration */ 3286 .max_rx_timeout_ms = 1000, 3287 /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */ 3288 .max_msgs = 20, 3289 .max_msg_size = 64, 3290 }; 3291 3292 /* Description for AM654 */ 3293 static const struct ti_sci_desc ti_sci_pmmc_am654_desc = { 3294 .default_host_id = 12, 3295 /* Conservative duration */ 3296 .max_rx_timeout_ms = 10000, 3297 /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */ 3298 .max_msgs = 20, 3299 .max_msg_size = 60, 3300 }; 3301 3302 static const struct of_device_id ti_sci_of_match[] = { 3303 {.compatible = "ti,k2g-sci", .data = &ti_sci_pmmc_k2g_desc}, 3304 {.compatible = "ti,am654-sci", .data = &ti_sci_pmmc_am654_desc}, 3305 { /* Sentinel */ }, 3306 }; 3307 MODULE_DEVICE_TABLE(of, ti_sci_of_match); 3308 3309 static int ti_sci_probe(struct platform_device *pdev) 3310 { 3311 struct device *dev = &pdev->dev; 3312 const struct of_device_id *of_id; 3313 const struct ti_sci_desc *desc; 3314 struct ti_sci_xfer *xfer; 3315 struct ti_sci_info *info = NULL; 3316 struct ti_sci_xfers_info *minfo; 3317 struct mbox_client *cl; 3318 int ret = -EINVAL; 3319 int i; 3320 int reboot = 0; 3321 u32 h_id; 3322 3323 of_id = of_match_device(ti_sci_of_match, dev); 3324 if (!of_id) { 3325 dev_err(dev, "OF data missing\n"); 3326 return -EINVAL; 3327 } 3328 desc = of_id->data; 3329 3330 info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL); 3331 if (!info) 3332 return -ENOMEM; 3333 3334 info->dev = dev; 3335 info->desc = desc; 3336 ret = of_property_read_u32(dev->of_node, "ti,host-id", &h_id); 3337 /* if the property is not present in DT, use a default from desc */ 3338 if (ret < 0) { 3339 info->host_id = info->desc->default_host_id; 3340 } else { 3341 if (!h_id) { 3342 dev_warn(dev, "Host ID 0 is reserved for firmware\n"); 3343 info->host_id = info->desc->default_host_id; 3344 } else { 3345 info->host_id = h_id; 3346 } 3347 } 3348 3349 reboot = of_property_read_bool(dev->of_node, 3350 "ti,system-reboot-controller"); 3351 INIT_LIST_HEAD(&info->node); 3352 minfo = &info->minfo; 3353 3354 /* 3355 * Pre-allocate messages 3356 * NEVER allocate more than what we can indicate in hdr.seq 3357 * if we have data description bug, force a fix.. 3358 */ 3359 if (WARN_ON(desc->max_msgs >= 3360 1 << 8 * sizeof(((struct ti_sci_msg_hdr *)0)->seq))) 3361 return -EINVAL; 3362 3363 minfo->xfer_block = devm_kcalloc(dev, 3364 desc->max_msgs, 3365 sizeof(*minfo->xfer_block), 3366 GFP_KERNEL); 3367 if (!minfo->xfer_block) 3368 return -ENOMEM; 3369 3370 minfo->xfer_alloc_table = devm_bitmap_zalloc(dev, 3371 desc->max_msgs, 3372 GFP_KERNEL); 3373 if (!minfo->xfer_alloc_table) 3374 return -ENOMEM; 3375 3376 /* Pre-initialize the buffer pointer to pre-allocated buffers */ 3377 for (i = 0, xfer = minfo->xfer_block; i < desc->max_msgs; i++, xfer++) { 3378 xfer->xfer_buf = devm_kcalloc(dev, 1, desc->max_msg_size, 3379 GFP_KERNEL); 3380 if (!xfer->xfer_buf) 3381 return -ENOMEM; 3382 3383 xfer->tx_message.buf = xfer->xfer_buf; 3384 init_completion(&xfer->done); 3385 } 3386 3387 ret = ti_sci_debugfs_create(pdev, info); 3388 if (ret) 3389 dev_warn(dev, "Failed to create debug file\n"); 3390 3391 platform_set_drvdata(pdev, info); 3392 3393 cl = &info->cl; 3394 cl->dev = dev; 3395 cl->tx_block = false; 3396 cl->rx_callback = ti_sci_rx_callback; 3397 cl->knows_txdone = true; 3398 3399 spin_lock_init(&minfo->xfer_lock); 3400 sema_init(&minfo->sem_xfer_count, desc->max_msgs); 3401 3402 info->chan_rx = mbox_request_channel_byname(cl, "rx"); 3403 if (IS_ERR(info->chan_rx)) { 3404 ret = PTR_ERR(info->chan_rx); 3405 goto out; 3406 } 3407 3408 info->chan_tx = mbox_request_channel_byname(cl, "tx"); 3409 if (IS_ERR(info->chan_tx)) { 3410 ret = PTR_ERR(info->chan_tx); 3411 goto out; 3412 } 3413 ret = ti_sci_cmd_get_revision(info); 3414 if (ret) { 3415 dev_err(dev, "Unable to communicate with TISCI(%d)\n", ret); 3416 goto out; 3417 } 3418 3419 ti_sci_setup_ops(info); 3420 3421 if (reboot) { 3422 info->nb.notifier_call = tisci_reboot_handler; 3423 info->nb.priority = 128; 3424 3425 ret = register_restart_handler(&info->nb); 3426 if (ret) { 3427 dev_err(dev, "reboot registration fail(%d)\n", ret); 3428 goto out; 3429 } 3430 } 3431 3432 dev_info(dev, "ABI: %d.%d (firmware rev 0x%04x '%s')\n", 3433 info->handle.version.abi_major, info->handle.version.abi_minor, 3434 info->handle.version.firmware_revision, 3435 info->handle.version.firmware_description); 3436 3437 mutex_lock(&ti_sci_list_mutex); 3438 list_add_tail(&info->node, &ti_sci_list); 3439 mutex_unlock(&ti_sci_list_mutex); 3440 3441 return of_platform_populate(dev->of_node, NULL, NULL, dev); 3442 out: 3443 if (!IS_ERR(info->chan_tx)) 3444 mbox_free_channel(info->chan_tx); 3445 if (!IS_ERR(info->chan_rx)) 3446 mbox_free_channel(info->chan_rx); 3447 debugfs_remove(info->d); 3448 return ret; 3449 } 3450 3451 static int ti_sci_remove(struct platform_device *pdev) 3452 { 3453 struct ti_sci_info *info; 3454 struct device *dev = &pdev->dev; 3455 int ret = 0; 3456 3457 of_platform_depopulate(dev); 3458 3459 info = platform_get_drvdata(pdev); 3460 3461 if (info->nb.notifier_call) 3462 unregister_restart_handler(&info->nb); 3463 3464 mutex_lock(&ti_sci_list_mutex); 3465 if (info->users) 3466 ret = -EBUSY; 3467 else 3468 list_del(&info->node); 3469 mutex_unlock(&ti_sci_list_mutex); 3470 3471 if (!ret) { 3472 ti_sci_debugfs_destroy(pdev, info); 3473 3474 /* Safe to free channels since no more users */ 3475 mbox_free_channel(info->chan_tx); 3476 mbox_free_channel(info->chan_rx); 3477 } 3478 3479 return ret; 3480 } 3481 3482 static struct platform_driver ti_sci_driver = { 3483 .probe = ti_sci_probe, 3484 .remove = ti_sci_remove, 3485 .driver = { 3486 .name = "ti-sci", 3487 .of_match_table = of_match_ptr(ti_sci_of_match), 3488 }, 3489 }; 3490 module_platform_driver(ti_sci_driver); 3491 3492 MODULE_LICENSE("GPL v2"); 3493 MODULE_DESCRIPTION("TI System Control Interface(SCI) driver"); 3494 MODULE_AUTHOR("Nishanth Menon"); 3495 MODULE_ALIAS("platform:ti-sci"); 3496