1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Texas Instruments System Control Interface Protocol Driver 4 * 5 * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/ 6 * Nishanth Menon 7 */ 8 9 #define pr_fmt(fmt) "%s: " fmt, __func__ 10 11 #include <linux/bitmap.h> 12 #include <linux/debugfs.h> 13 #include <linux/export.h> 14 #include <linux/io.h> 15 #include <linux/kernel.h> 16 #include <linux/mailbox_client.h> 17 #include <linux/module.h> 18 #include <linux/of_device.h> 19 #include <linux/semaphore.h> 20 #include <linux/slab.h> 21 #include <linux/soc/ti/ti-msgmgr.h> 22 #include <linux/soc/ti/ti_sci_protocol.h> 23 #include <linux/reboot.h> 24 25 #include "ti_sci.h" 26 27 /* List of all TI SCI devices active in system */ 28 static LIST_HEAD(ti_sci_list); 29 /* Protection for the entire list */ 30 static DEFINE_MUTEX(ti_sci_list_mutex); 31 32 /** 33 * struct ti_sci_xfer - Structure representing a message flow 34 * @tx_message: Transmit message 35 * @rx_len: Receive message length 36 * @xfer_buf: Preallocated buffer to store receive message 37 * Since we work with request-ACK protocol, we can 38 * reuse the same buffer for the rx path as we 39 * use for the tx path. 40 * @done: completion event 41 */ 42 struct ti_sci_xfer { 43 struct ti_msgmgr_message tx_message; 44 u8 rx_len; 45 u8 *xfer_buf; 46 struct completion done; 47 }; 48 49 /** 50 * struct ti_sci_xfers_info - Structure to manage transfer information 51 * @sem_xfer_count: Counting Semaphore for managing max simultaneous 52 * Messages. 53 * @xfer_block: Preallocated Message array 54 * @xfer_alloc_table: Bitmap table for allocated messages. 55 * Index of this bitmap table is also used for message 56 * sequence identifier. 57 * @xfer_lock: Protection for message allocation 58 */ 59 struct ti_sci_xfers_info { 60 struct semaphore sem_xfer_count; 61 struct ti_sci_xfer *xfer_block; 62 unsigned long *xfer_alloc_table; 63 /* protect transfer allocation */ 64 spinlock_t xfer_lock; 65 }; 66 67 /** 68 * struct ti_sci_desc - Description of SoC integration 69 * @host_id: Host identifier representing the compute entity 70 * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds) 71 * @max_msgs: Maximum number of messages that can be pending 72 * simultaneously in the system 73 * @max_msg_size: Maximum size of data per message that can be handled. 74 */ 75 struct ti_sci_desc { 76 u8 host_id; 77 int max_rx_timeout_ms; 78 int max_msgs; 79 int max_msg_size; 80 }; 81 82 /** 83 * struct ti_sci_info - Structure representing a TI SCI instance 84 * @dev: Device pointer 85 * @desc: SoC description for this instance 86 * @nb: Reboot Notifier block 87 * @d: Debugfs file entry 88 * @debug_region: Memory region where the debug message are available 89 * @debug_region_size: Debug region size 90 * @debug_buffer: Buffer allocated to copy debug messages. 91 * @handle: Instance of TI SCI handle to send to clients. 92 * @cl: Mailbox Client 93 * @chan_tx: Transmit mailbox channel 94 * @chan_rx: Receive mailbox channel 95 * @minfo: Message info 96 * @node: list head 97 * @users: Number of users of this instance 98 */ 99 struct ti_sci_info { 100 struct device *dev; 101 struct notifier_block nb; 102 const struct ti_sci_desc *desc; 103 struct dentry *d; 104 void __iomem *debug_region; 105 char *debug_buffer; 106 size_t debug_region_size; 107 struct ti_sci_handle handle; 108 struct mbox_client cl; 109 struct mbox_chan *chan_tx; 110 struct mbox_chan *chan_rx; 111 struct ti_sci_xfers_info minfo; 112 struct list_head node; 113 /* protected by ti_sci_list_mutex */ 114 int users; 115 116 }; 117 118 #define cl_to_ti_sci_info(c) container_of(c, struct ti_sci_info, cl) 119 #define handle_to_ti_sci_info(h) container_of(h, struct ti_sci_info, handle) 120 #define reboot_to_ti_sci_info(n) container_of(n, struct ti_sci_info, nb) 121 122 #ifdef CONFIG_DEBUG_FS 123 124 /** 125 * ti_sci_debug_show() - Helper to dump the debug log 126 * @s: sequence file pointer 127 * @unused: unused. 128 * 129 * Return: 0 130 */ 131 static int ti_sci_debug_show(struct seq_file *s, void *unused) 132 { 133 struct ti_sci_info *info = s->private; 134 135 memcpy_fromio(info->debug_buffer, info->debug_region, 136 info->debug_region_size); 137 /* 138 * We don't trust firmware to leave NULL terminated last byte (hence 139 * we have allocated 1 extra 0 byte). Since we cannot guarantee any 140 * specific data format for debug messages, We just present the data 141 * in the buffer as is - we expect the messages to be self explanatory. 142 */ 143 seq_puts(s, info->debug_buffer); 144 return 0; 145 } 146 147 /** 148 * ti_sci_debug_open() - debug file open 149 * @inode: inode pointer 150 * @file: file pointer 151 * 152 * Return: result of single_open 153 */ 154 static int ti_sci_debug_open(struct inode *inode, struct file *file) 155 { 156 return single_open(file, ti_sci_debug_show, inode->i_private); 157 } 158 159 /* log file operations */ 160 static const struct file_operations ti_sci_debug_fops = { 161 .open = ti_sci_debug_open, 162 .read = seq_read, 163 .llseek = seq_lseek, 164 .release = single_release, 165 }; 166 167 /** 168 * ti_sci_debugfs_create() - Create log debug file 169 * @pdev: platform device pointer 170 * @info: Pointer to SCI entity information 171 * 172 * Return: 0 if all went fine, else corresponding error. 173 */ 174 static int ti_sci_debugfs_create(struct platform_device *pdev, 175 struct ti_sci_info *info) 176 { 177 struct device *dev = &pdev->dev; 178 struct resource *res; 179 char debug_name[50] = "ti_sci_debug@"; 180 181 /* Debug region is optional */ 182 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 183 "debug_messages"); 184 info->debug_region = devm_ioremap_resource(dev, res); 185 if (IS_ERR(info->debug_region)) 186 return 0; 187 info->debug_region_size = resource_size(res); 188 189 info->debug_buffer = devm_kcalloc(dev, info->debug_region_size + 1, 190 sizeof(char), GFP_KERNEL); 191 if (!info->debug_buffer) 192 return -ENOMEM; 193 /* Setup NULL termination */ 194 info->debug_buffer[info->debug_region_size] = 0; 195 196 info->d = debugfs_create_file(strncat(debug_name, dev_name(dev), 197 sizeof(debug_name) - 198 sizeof("ti_sci_debug@")), 199 0444, NULL, info, &ti_sci_debug_fops); 200 if (IS_ERR(info->d)) 201 return PTR_ERR(info->d); 202 203 dev_dbg(dev, "Debug region => %p, size = %zu bytes, resource: %pr\n", 204 info->debug_region, info->debug_region_size, res); 205 return 0; 206 } 207 208 /** 209 * ti_sci_debugfs_destroy() - clean up log debug file 210 * @pdev: platform device pointer 211 * @info: Pointer to SCI entity information 212 */ 213 static void ti_sci_debugfs_destroy(struct platform_device *pdev, 214 struct ti_sci_info *info) 215 { 216 if (IS_ERR(info->debug_region)) 217 return; 218 219 debugfs_remove(info->d); 220 } 221 #else /* CONFIG_DEBUG_FS */ 222 static inline int ti_sci_debugfs_create(struct platform_device *dev, 223 struct ti_sci_info *info) 224 { 225 return 0; 226 } 227 228 static inline void ti_sci_debugfs_destroy(struct platform_device *dev, 229 struct ti_sci_info *info) 230 { 231 } 232 #endif /* CONFIG_DEBUG_FS */ 233 234 /** 235 * ti_sci_dump_header_dbg() - Helper to dump a message header. 236 * @dev: Device pointer corresponding to the SCI entity 237 * @hdr: pointer to header. 238 */ 239 static inline void ti_sci_dump_header_dbg(struct device *dev, 240 struct ti_sci_msg_hdr *hdr) 241 { 242 dev_dbg(dev, "MSGHDR:type=0x%04x host=0x%02x seq=0x%02x flags=0x%08x\n", 243 hdr->type, hdr->host, hdr->seq, hdr->flags); 244 } 245 246 /** 247 * ti_sci_rx_callback() - mailbox client callback for receive messages 248 * @cl: client pointer 249 * @m: mailbox message 250 * 251 * Processes one received message to appropriate transfer information and 252 * signals completion of the transfer. 253 * 254 * NOTE: This function will be invoked in IRQ context, hence should be 255 * as optimal as possible. 256 */ 257 static void ti_sci_rx_callback(struct mbox_client *cl, void *m) 258 { 259 struct ti_sci_info *info = cl_to_ti_sci_info(cl); 260 struct device *dev = info->dev; 261 struct ti_sci_xfers_info *minfo = &info->minfo; 262 struct ti_msgmgr_message *mbox_msg = m; 263 struct ti_sci_msg_hdr *hdr = (struct ti_sci_msg_hdr *)mbox_msg->buf; 264 struct ti_sci_xfer *xfer; 265 u8 xfer_id; 266 267 xfer_id = hdr->seq; 268 269 /* 270 * Are we even expecting this? 271 * NOTE: barriers were implicit in locks used for modifying the bitmap 272 */ 273 if (!test_bit(xfer_id, minfo->xfer_alloc_table)) { 274 dev_err(dev, "Message for %d is not expected!\n", xfer_id); 275 return; 276 } 277 278 xfer = &minfo->xfer_block[xfer_id]; 279 280 /* Is the message of valid length? */ 281 if (mbox_msg->len > info->desc->max_msg_size) { 282 dev_err(dev, "Unable to handle %zu xfer(max %d)\n", 283 mbox_msg->len, info->desc->max_msg_size); 284 ti_sci_dump_header_dbg(dev, hdr); 285 return; 286 } 287 if (mbox_msg->len < xfer->rx_len) { 288 dev_err(dev, "Recv xfer %zu < expected %d length\n", 289 mbox_msg->len, xfer->rx_len); 290 ti_sci_dump_header_dbg(dev, hdr); 291 return; 292 } 293 294 ti_sci_dump_header_dbg(dev, hdr); 295 /* Take a copy to the rx buffer.. */ 296 memcpy(xfer->xfer_buf, mbox_msg->buf, xfer->rx_len); 297 complete(&xfer->done); 298 } 299 300 /** 301 * ti_sci_get_one_xfer() - Allocate one message 302 * @info: Pointer to SCI entity information 303 * @msg_type: Message type 304 * @msg_flags: Flag to set for the message 305 * @tx_message_size: transmit message size 306 * @rx_message_size: receive message size 307 * 308 * Helper function which is used by various command functions that are 309 * exposed to clients of this driver for allocating a message traffic event. 310 * 311 * This function can sleep depending on pending requests already in the system 312 * for the SCI entity. Further, this also holds a spinlock to maintain integrity 313 * of internal data structures. 314 * 315 * Return: 0 if all went fine, else corresponding error. 316 */ 317 static struct ti_sci_xfer *ti_sci_get_one_xfer(struct ti_sci_info *info, 318 u16 msg_type, u32 msg_flags, 319 size_t tx_message_size, 320 size_t rx_message_size) 321 { 322 struct ti_sci_xfers_info *minfo = &info->minfo; 323 struct ti_sci_xfer *xfer; 324 struct ti_sci_msg_hdr *hdr; 325 unsigned long flags; 326 unsigned long bit_pos; 327 u8 xfer_id; 328 int ret; 329 int timeout; 330 331 /* Ensure we have sane transfer sizes */ 332 if (rx_message_size > info->desc->max_msg_size || 333 tx_message_size > info->desc->max_msg_size || 334 rx_message_size < sizeof(*hdr) || tx_message_size < sizeof(*hdr)) 335 return ERR_PTR(-ERANGE); 336 337 /* 338 * Ensure we have only controlled number of pending messages. 339 * Ideally, we might just have to wait a single message, be 340 * conservative and wait 5 times that.. 341 */ 342 timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms) * 5; 343 ret = down_timeout(&minfo->sem_xfer_count, timeout); 344 if (ret < 0) 345 return ERR_PTR(ret); 346 347 /* Keep the locked section as small as possible */ 348 spin_lock_irqsave(&minfo->xfer_lock, flags); 349 bit_pos = find_first_zero_bit(minfo->xfer_alloc_table, 350 info->desc->max_msgs); 351 set_bit(bit_pos, minfo->xfer_alloc_table); 352 spin_unlock_irqrestore(&minfo->xfer_lock, flags); 353 354 /* 355 * We already ensured in probe that we can have max messages that can 356 * fit in hdr.seq - NOTE: this improves access latencies 357 * to predictable O(1) access, BUT, it opens us to risk if 358 * remote misbehaves with corrupted message sequence responses. 359 * If that happens, we are going to be messed up anyways.. 360 */ 361 xfer_id = (u8)bit_pos; 362 363 xfer = &minfo->xfer_block[xfer_id]; 364 365 hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf; 366 xfer->tx_message.len = tx_message_size; 367 xfer->rx_len = (u8)rx_message_size; 368 369 reinit_completion(&xfer->done); 370 371 hdr->seq = xfer_id; 372 hdr->type = msg_type; 373 hdr->host = info->desc->host_id; 374 hdr->flags = msg_flags; 375 376 return xfer; 377 } 378 379 /** 380 * ti_sci_put_one_xfer() - Release a message 381 * @minfo: transfer info pointer 382 * @xfer: message that was reserved by ti_sci_get_one_xfer 383 * 384 * This holds a spinlock to maintain integrity of internal data structures. 385 */ 386 static void ti_sci_put_one_xfer(struct ti_sci_xfers_info *minfo, 387 struct ti_sci_xfer *xfer) 388 { 389 unsigned long flags; 390 struct ti_sci_msg_hdr *hdr; 391 u8 xfer_id; 392 393 hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf; 394 xfer_id = hdr->seq; 395 396 /* 397 * Keep the locked section as small as possible 398 * NOTE: we might escape with smp_mb and no lock here.. 399 * but just be conservative and symmetric. 400 */ 401 spin_lock_irqsave(&minfo->xfer_lock, flags); 402 clear_bit(xfer_id, minfo->xfer_alloc_table); 403 spin_unlock_irqrestore(&minfo->xfer_lock, flags); 404 405 /* Increment the count for the next user to get through */ 406 up(&minfo->sem_xfer_count); 407 } 408 409 /** 410 * ti_sci_do_xfer() - Do one transfer 411 * @info: Pointer to SCI entity information 412 * @xfer: Transfer to initiate and wait for response 413 * 414 * Return: -ETIMEDOUT in case of no response, if transmit error, 415 * return corresponding error, else if all goes well, 416 * return 0. 417 */ 418 static inline int ti_sci_do_xfer(struct ti_sci_info *info, 419 struct ti_sci_xfer *xfer) 420 { 421 int ret; 422 int timeout; 423 struct device *dev = info->dev; 424 425 ret = mbox_send_message(info->chan_tx, &xfer->tx_message); 426 if (ret < 0) 427 return ret; 428 429 ret = 0; 430 431 /* And we wait for the response. */ 432 timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms); 433 if (!wait_for_completion_timeout(&xfer->done, timeout)) { 434 dev_err(dev, "Mbox timedout in resp(caller: %pS)\n", 435 (void *)_RET_IP_); 436 ret = -ETIMEDOUT; 437 } 438 /* 439 * NOTE: we might prefer not to need the mailbox ticker to manage the 440 * transfer queueing since the protocol layer queues things by itself. 441 * Unfortunately, we have to kick the mailbox framework after we have 442 * received our message. 443 */ 444 mbox_client_txdone(info->chan_tx, ret); 445 446 return ret; 447 } 448 449 /** 450 * ti_sci_cmd_get_revision() - command to get the revision of the SCI entity 451 * @info: Pointer to SCI entity information 452 * 453 * Updates the SCI information in the internal data structure. 454 * 455 * Return: 0 if all went fine, else return appropriate error. 456 */ 457 static int ti_sci_cmd_get_revision(struct ti_sci_info *info) 458 { 459 struct device *dev = info->dev; 460 struct ti_sci_handle *handle = &info->handle; 461 struct ti_sci_version_info *ver = &handle->version; 462 struct ti_sci_msg_resp_version *rev_info; 463 struct ti_sci_xfer *xfer; 464 int ret; 465 466 /* No need to setup flags since it is expected to respond */ 467 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_VERSION, 468 0x0, sizeof(struct ti_sci_msg_hdr), 469 sizeof(*rev_info)); 470 if (IS_ERR(xfer)) { 471 ret = PTR_ERR(xfer); 472 dev_err(dev, "Message alloc failed(%d)\n", ret); 473 return ret; 474 } 475 476 rev_info = (struct ti_sci_msg_resp_version *)xfer->xfer_buf; 477 478 ret = ti_sci_do_xfer(info, xfer); 479 if (ret) { 480 dev_err(dev, "Mbox send fail %d\n", ret); 481 goto fail; 482 } 483 484 ver->abi_major = rev_info->abi_major; 485 ver->abi_minor = rev_info->abi_minor; 486 ver->firmware_revision = rev_info->firmware_revision; 487 strncpy(ver->firmware_description, rev_info->firmware_description, 488 sizeof(ver->firmware_description)); 489 490 fail: 491 ti_sci_put_one_xfer(&info->minfo, xfer); 492 return ret; 493 } 494 495 /** 496 * ti_sci_is_response_ack() - Generic ACK/NACK message checkup 497 * @r: pointer to response buffer 498 * 499 * Return: true if the response was an ACK, else returns false. 500 */ 501 static inline bool ti_sci_is_response_ack(void *r) 502 { 503 struct ti_sci_msg_hdr *hdr = r; 504 505 return hdr->flags & TI_SCI_FLAG_RESP_GENERIC_ACK ? true : false; 506 } 507 508 /** 509 * ti_sci_set_device_state() - Set device state helper 510 * @handle: pointer to TI SCI handle 511 * @id: Device identifier 512 * @flags: flags to setup for the device 513 * @state: State to move the device to 514 * 515 * Return: 0 if all went well, else returns appropriate error value. 516 */ 517 static int ti_sci_set_device_state(const struct ti_sci_handle *handle, 518 u32 id, u32 flags, u8 state) 519 { 520 struct ti_sci_info *info; 521 struct ti_sci_msg_req_set_device_state *req; 522 struct ti_sci_msg_hdr *resp; 523 struct ti_sci_xfer *xfer; 524 struct device *dev; 525 int ret = 0; 526 527 if (IS_ERR(handle)) 528 return PTR_ERR(handle); 529 if (!handle) 530 return -EINVAL; 531 532 info = handle_to_ti_sci_info(handle); 533 dev = info->dev; 534 535 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE, 536 flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 537 sizeof(*req), sizeof(*resp)); 538 if (IS_ERR(xfer)) { 539 ret = PTR_ERR(xfer); 540 dev_err(dev, "Message alloc failed(%d)\n", ret); 541 return ret; 542 } 543 req = (struct ti_sci_msg_req_set_device_state *)xfer->xfer_buf; 544 req->id = id; 545 req->state = state; 546 547 ret = ti_sci_do_xfer(info, xfer); 548 if (ret) { 549 dev_err(dev, "Mbox send fail %d\n", ret); 550 goto fail; 551 } 552 553 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 554 555 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; 556 557 fail: 558 ti_sci_put_one_xfer(&info->minfo, xfer); 559 560 return ret; 561 } 562 563 /** 564 * ti_sci_get_device_state() - Get device state helper 565 * @handle: Handle to the device 566 * @id: Device Identifier 567 * @clcnt: Pointer to Context Loss Count 568 * @resets: pointer to resets 569 * @p_state: pointer to p_state 570 * @c_state: pointer to c_state 571 * 572 * Return: 0 if all went fine, else return appropriate error. 573 */ 574 static int ti_sci_get_device_state(const struct ti_sci_handle *handle, 575 u32 id, u32 *clcnt, u32 *resets, 576 u8 *p_state, u8 *c_state) 577 { 578 struct ti_sci_info *info; 579 struct ti_sci_msg_req_get_device_state *req; 580 struct ti_sci_msg_resp_get_device_state *resp; 581 struct ti_sci_xfer *xfer; 582 struct device *dev; 583 int ret = 0; 584 585 if (IS_ERR(handle)) 586 return PTR_ERR(handle); 587 if (!handle) 588 return -EINVAL; 589 590 if (!clcnt && !resets && !p_state && !c_state) 591 return -EINVAL; 592 593 info = handle_to_ti_sci_info(handle); 594 dev = info->dev; 595 596 /* Response is expected, so need of any flags */ 597 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE, 598 0, sizeof(*req), sizeof(*resp)); 599 if (IS_ERR(xfer)) { 600 ret = PTR_ERR(xfer); 601 dev_err(dev, "Message alloc failed(%d)\n", ret); 602 return ret; 603 } 604 req = (struct ti_sci_msg_req_get_device_state *)xfer->xfer_buf; 605 req->id = id; 606 607 ret = ti_sci_do_xfer(info, xfer); 608 if (ret) { 609 dev_err(dev, "Mbox send fail %d\n", ret); 610 goto fail; 611 } 612 613 resp = (struct ti_sci_msg_resp_get_device_state *)xfer->xfer_buf; 614 if (!ti_sci_is_response_ack(resp)) { 615 ret = -ENODEV; 616 goto fail; 617 } 618 619 if (clcnt) 620 *clcnt = resp->context_loss_count; 621 if (resets) 622 *resets = resp->resets; 623 if (p_state) 624 *p_state = resp->programmed_state; 625 if (c_state) 626 *c_state = resp->current_state; 627 fail: 628 ti_sci_put_one_xfer(&info->minfo, xfer); 629 630 return ret; 631 } 632 633 /** 634 * ti_sci_cmd_get_device() - command to request for device managed by TISCI 635 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle 636 * @id: Device Identifier 637 * 638 * Request for the device - NOTE: the client MUST maintain integrity of 639 * usage count by balancing get_device with put_device. No refcounting is 640 * managed by driver for that purpose. 641 * 642 * NOTE: The request is for exclusive access for the processor. 643 * 644 * Return: 0 if all went fine, else return appropriate error. 645 */ 646 static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id) 647 { 648 return ti_sci_set_device_state(handle, id, 649 MSG_FLAG_DEVICE_EXCLUSIVE, 650 MSG_DEVICE_SW_STATE_ON); 651 } 652 653 /** 654 * ti_sci_cmd_idle_device() - Command to idle a device managed by TISCI 655 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle 656 * @id: Device Identifier 657 * 658 * Request for the device - NOTE: the client MUST maintain integrity of 659 * usage count by balancing get_device with put_device. No refcounting is 660 * managed by driver for that purpose. 661 * 662 * Return: 0 if all went fine, else return appropriate error. 663 */ 664 static int ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id) 665 { 666 return ti_sci_set_device_state(handle, id, 667 MSG_FLAG_DEVICE_EXCLUSIVE, 668 MSG_DEVICE_SW_STATE_RETENTION); 669 } 670 671 /** 672 * ti_sci_cmd_put_device() - command to release a device managed by TISCI 673 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle 674 * @id: Device Identifier 675 * 676 * Request for the device - NOTE: the client MUST maintain integrity of 677 * usage count by balancing get_device with put_device. No refcounting is 678 * managed by driver for that purpose. 679 * 680 * Return: 0 if all went fine, else return appropriate error. 681 */ 682 static int ti_sci_cmd_put_device(const struct ti_sci_handle *handle, u32 id) 683 { 684 return ti_sci_set_device_state(handle, id, 685 0, MSG_DEVICE_SW_STATE_AUTO_OFF); 686 } 687 688 /** 689 * ti_sci_cmd_dev_is_valid() - Is the device valid 690 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle 691 * @id: Device Identifier 692 * 693 * Return: 0 if all went fine and the device ID is valid, else return 694 * appropriate error. 695 */ 696 static int ti_sci_cmd_dev_is_valid(const struct ti_sci_handle *handle, u32 id) 697 { 698 u8 unused; 699 700 /* check the device state which will also tell us if the ID is valid */ 701 return ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &unused); 702 } 703 704 /** 705 * ti_sci_cmd_dev_get_clcnt() - Get context loss counter 706 * @handle: Pointer to TISCI handle 707 * @id: Device Identifier 708 * @count: Pointer to Context Loss counter to populate 709 * 710 * Return: 0 if all went fine, else return appropriate error. 711 */ 712 static int ti_sci_cmd_dev_get_clcnt(const struct ti_sci_handle *handle, u32 id, 713 u32 *count) 714 { 715 return ti_sci_get_device_state(handle, id, count, NULL, NULL, NULL); 716 } 717 718 /** 719 * ti_sci_cmd_dev_is_idle() - Check if the device is requested to be idle 720 * @handle: Pointer to TISCI handle 721 * @id: Device Identifier 722 * @r_state: true if requested to be idle 723 * 724 * Return: 0 if all went fine, else return appropriate error. 725 */ 726 static int ti_sci_cmd_dev_is_idle(const struct ti_sci_handle *handle, u32 id, 727 bool *r_state) 728 { 729 int ret; 730 u8 state; 731 732 if (!r_state) 733 return -EINVAL; 734 735 ret = ti_sci_get_device_state(handle, id, NULL, NULL, &state, NULL); 736 if (ret) 737 return ret; 738 739 *r_state = (state == MSG_DEVICE_SW_STATE_RETENTION); 740 741 return 0; 742 } 743 744 /** 745 * ti_sci_cmd_dev_is_stop() - Check if the device is requested to be stopped 746 * @handle: Pointer to TISCI handle 747 * @id: Device Identifier 748 * @r_state: true if requested to be stopped 749 * @curr_state: true if currently stopped. 750 * 751 * Return: 0 if all went fine, else return appropriate error. 752 */ 753 static int ti_sci_cmd_dev_is_stop(const struct ti_sci_handle *handle, u32 id, 754 bool *r_state, bool *curr_state) 755 { 756 int ret; 757 u8 p_state, c_state; 758 759 if (!r_state && !curr_state) 760 return -EINVAL; 761 762 ret = 763 ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state); 764 if (ret) 765 return ret; 766 767 if (r_state) 768 *r_state = (p_state == MSG_DEVICE_SW_STATE_AUTO_OFF); 769 if (curr_state) 770 *curr_state = (c_state == MSG_DEVICE_HW_STATE_OFF); 771 772 return 0; 773 } 774 775 /** 776 * ti_sci_cmd_dev_is_on() - Check if the device is requested to be ON 777 * @handle: Pointer to TISCI handle 778 * @id: Device Identifier 779 * @r_state: true if requested to be ON 780 * @curr_state: true if currently ON and active 781 * 782 * Return: 0 if all went fine, else return appropriate error. 783 */ 784 static int ti_sci_cmd_dev_is_on(const struct ti_sci_handle *handle, u32 id, 785 bool *r_state, bool *curr_state) 786 { 787 int ret; 788 u8 p_state, c_state; 789 790 if (!r_state && !curr_state) 791 return -EINVAL; 792 793 ret = 794 ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state); 795 if (ret) 796 return ret; 797 798 if (r_state) 799 *r_state = (p_state == MSG_DEVICE_SW_STATE_ON); 800 if (curr_state) 801 *curr_state = (c_state == MSG_DEVICE_HW_STATE_ON); 802 803 return 0; 804 } 805 806 /** 807 * ti_sci_cmd_dev_is_trans() - Check if the device is currently transitioning 808 * @handle: Pointer to TISCI handle 809 * @id: Device Identifier 810 * @curr_state: true if currently transitioning. 811 * 812 * Return: 0 if all went fine, else return appropriate error. 813 */ 814 static int ti_sci_cmd_dev_is_trans(const struct ti_sci_handle *handle, u32 id, 815 bool *curr_state) 816 { 817 int ret; 818 u8 state; 819 820 if (!curr_state) 821 return -EINVAL; 822 823 ret = ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &state); 824 if (ret) 825 return ret; 826 827 *curr_state = (state == MSG_DEVICE_HW_STATE_TRANS); 828 829 return 0; 830 } 831 832 /** 833 * ti_sci_cmd_set_device_resets() - command to set resets for device managed 834 * by TISCI 835 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle 836 * @id: Device Identifier 837 * @reset_state: Device specific reset bit field 838 * 839 * Return: 0 if all went fine, else return appropriate error. 840 */ 841 static int ti_sci_cmd_set_device_resets(const struct ti_sci_handle *handle, 842 u32 id, u32 reset_state) 843 { 844 struct ti_sci_info *info; 845 struct ti_sci_msg_req_set_device_resets *req; 846 struct ti_sci_msg_hdr *resp; 847 struct ti_sci_xfer *xfer; 848 struct device *dev; 849 int ret = 0; 850 851 if (IS_ERR(handle)) 852 return PTR_ERR(handle); 853 if (!handle) 854 return -EINVAL; 855 856 info = handle_to_ti_sci_info(handle); 857 dev = info->dev; 858 859 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_RESETS, 860 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 861 sizeof(*req), sizeof(*resp)); 862 if (IS_ERR(xfer)) { 863 ret = PTR_ERR(xfer); 864 dev_err(dev, "Message alloc failed(%d)\n", ret); 865 return ret; 866 } 867 req = (struct ti_sci_msg_req_set_device_resets *)xfer->xfer_buf; 868 req->id = id; 869 req->resets = reset_state; 870 871 ret = ti_sci_do_xfer(info, xfer); 872 if (ret) { 873 dev_err(dev, "Mbox send fail %d\n", ret); 874 goto fail; 875 } 876 877 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 878 879 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; 880 881 fail: 882 ti_sci_put_one_xfer(&info->minfo, xfer); 883 884 return ret; 885 } 886 887 /** 888 * ti_sci_cmd_get_device_resets() - Get reset state for device managed 889 * by TISCI 890 * @handle: Pointer to TISCI handle 891 * @id: Device Identifier 892 * @reset_state: Pointer to reset state to populate 893 * 894 * Return: 0 if all went fine, else return appropriate error. 895 */ 896 static int ti_sci_cmd_get_device_resets(const struct ti_sci_handle *handle, 897 u32 id, u32 *reset_state) 898 { 899 return ti_sci_get_device_state(handle, id, NULL, reset_state, NULL, 900 NULL); 901 } 902 903 /** 904 * ti_sci_set_clock_state() - Set clock state helper 905 * @handle: pointer to TI SCI handle 906 * @dev_id: Device identifier this request is for 907 * @clk_id: Clock identifier for the device for this request. 908 * Each device has it's own set of clock inputs. This indexes 909 * which clock input to modify. 910 * @flags: Header flags as needed 911 * @state: State to request for the clock. 912 * 913 * Return: 0 if all went well, else returns appropriate error value. 914 */ 915 static int ti_sci_set_clock_state(const struct ti_sci_handle *handle, 916 u32 dev_id, u8 clk_id, 917 u32 flags, u8 state) 918 { 919 struct ti_sci_info *info; 920 struct ti_sci_msg_req_set_clock_state *req; 921 struct ti_sci_msg_hdr *resp; 922 struct ti_sci_xfer *xfer; 923 struct device *dev; 924 int ret = 0; 925 926 if (IS_ERR(handle)) 927 return PTR_ERR(handle); 928 if (!handle) 929 return -EINVAL; 930 931 info = handle_to_ti_sci_info(handle); 932 dev = info->dev; 933 934 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_STATE, 935 flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 936 sizeof(*req), sizeof(*resp)); 937 if (IS_ERR(xfer)) { 938 ret = PTR_ERR(xfer); 939 dev_err(dev, "Message alloc failed(%d)\n", ret); 940 return ret; 941 } 942 req = (struct ti_sci_msg_req_set_clock_state *)xfer->xfer_buf; 943 req->dev_id = dev_id; 944 req->clk_id = clk_id; 945 req->request_state = state; 946 947 ret = ti_sci_do_xfer(info, xfer); 948 if (ret) { 949 dev_err(dev, "Mbox send fail %d\n", ret); 950 goto fail; 951 } 952 953 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 954 955 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; 956 957 fail: 958 ti_sci_put_one_xfer(&info->minfo, xfer); 959 960 return ret; 961 } 962 963 /** 964 * ti_sci_cmd_get_clock_state() - Get clock state helper 965 * @handle: pointer to TI SCI handle 966 * @dev_id: Device identifier this request is for 967 * @clk_id: Clock identifier for the device for this request. 968 * Each device has it's own set of clock inputs. This indexes 969 * which clock input to modify. 970 * @programmed_state: State requested for clock to move to 971 * @current_state: State that the clock is currently in 972 * 973 * Return: 0 if all went well, else returns appropriate error value. 974 */ 975 static int ti_sci_cmd_get_clock_state(const struct ti_sci_handle *handle, 976 u32 dev_id, u8 clk_id, 977 u8 *programmed_state, u8 *current_state) 978 { 979 struct ti_sci_info *info; 980 struct ti_sci_msg_req_get_clock_state *req; 981 struct ti_sci_msg_resp_get_clock_state *resp; 982 struct ti_sci_xfer *xfer; 983 struct device *dev; 984 int ret = 0; 985 986 if (IS_ERR(handle)) 987 return PTR_ERR(handle); 988 if (!handle) 989 return -EINVAL; 990 991 if (!programmed_state && !current_state) 992 return -EINVAL; 993 994 info = handle_to_ti_sci_info(handle); 995 dev = info->dev; 996 997 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_STATE, 998 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 999 sizeof(*req), sizeof(*resp)); 1000 if (IS_ERR(xfer)) { 1001 ret = PTR_ERR(xfer); 1002 dev_err(dev, "Message alloc failed(%d)\n", ret); 1003 return ret; 1004 } 1005 req = (struct ti_sci_msg_req_get_clock_state *)xfer->xfer_buf; 1006 req->dev_id = dev_id; 1007 req->clk_id = clk_id; 1008 1009 ret = ti_sci_do_xfer(info, xfer); 1010 if (ret) { 1011 dev_err(dev, "Mbox send fail %d\n", ret); 1012 goto fail; 1013 } 1014 1015 resp = (struct ti_sci_msg_resp_get_clock_state *)xfer->xfer_buf; 1016 1017 if (!ti_sci_is_response_ack(resp)) { 1018 ret = -ENODEV; 1019 goto fail; 1020 } 1021 1022 if (programmed_state) 1023 *programmed_state = resp->programmed_state; 1024 if (current_state) 1025 *current_state = resp->current_state; 1026 1027 fail: 1028 ti_sci_put_one_xfer(&info->minfo, xfer); 1029 1030 return ret; 1031 } 1032 1033 /** 1034 * ti_sci_cmd_get_clock() - Get control of a clock from TI SCI 1035 * @handle: pointer to TI SCI handle 1036 * @dev_id: Device identifier this request is for 1037 * @clk_id: Clock identifier for the device for this request. 1038 * Each device has it's own set of clock inputs. This indexes 1039 * which clock input to modify. 1040 * @needs_ssc: 'true' if Spread Spectrum clock is desired, else 'false' 1041 * @can_change_freq: 'true' if frequency change is desired, else 'false' 1042 * @enable_input_term: 'true' if input termination is desired, else 'false' 1043 * 1044 * Return: 0 if all went well, else returns appropriate error value. 1045 */ 1046 static int ti_sci_cmd_get_clock(const struct ti_sci_handle *handle, u32 dev_id, 1047 u8 clk_id, bool needs_ssc, bool can_change_freq, 1048 bool enable_input_term) 1049 { 1050 u32 flags = 0; 1051 1052 flags |= needs_ssc ? MSG_FLAG_CLOCK_ALLOW_SSC : 0; 1053 flags |= can_change_freq ? MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE : 0; 1054 flags |= enable_input_term ? MSG_FLAG_CLOCK_INPUT_TERM : 0; 1055 1056 return ti_sci_set_clock_state(handle, dev_id, clk_id, flags, 1057 MSG_CLOCK_SW_STATE_REQ); 1058 } 1059 1060 /** 1061 * ti_sci_cmd_idle_clock() - Idle a clock which is in our control 1062 * @handle: pointer to TI SCI handle 1063 * @dev_id: Device identifier this request is for 1064 * @clk_id: Clock identifier for the device for this request. 1065 * Each device has it's own set of clock inputs. This indexes 1066 * which clock input to modify. 1067 * 1068 * NOTE: This clock must have been requested by get_clock previously. 1069 * 1070 * Return: 0 if all went well, else returns appropriate error value. 1071 */ 1072 static int ti_sci_cmd_idle_clock(const struct ti_sci_handle *handle, 1073 u32 dev_id, u8 clk_id) 1074 { 1075 return ti_sci_set_clock_state(handle, dev_id, clk_id, 0, 1076 MSG_CLOCK_SW_STATE_UNREQ); 1077 } 1078 1079 /** 1080 * ti_sci_cmd_put_clock() - Release a clock from our control back to TISCI 1081 * @handle: pointer to TI SCI handle 1082 * @dev_id: Device identifier this request is for 1083 * @clk_id: Clock identifier for the device for this request. 1084 * Each device has it's own set of clock inputs. This indexes 1085 * which clock input to modify. 1086 * 1087 * NOTE: This clock must have been requested by get_clock previously. 1088 * 1089 * Return: 0 if all went well, else returns appropriate error value. 1090 */ 1091 static int ti_sci_cmd_put_clock(const struct ti_sci_handle *handle, 1092 u32 dev_id, u8 clk_id) 1093 { 1094 return ti_sci_set_clock_state(handle, dev_id, clk_id, 0, 1095 MSG_CLOCK_SW_STATE_AUTO); 1096 } 1097 1098 /** 1099 * ti_sci_cmd_clk_is_auto() - Is the clock being auto managed 1100 * @handle: pointer to TI SCI handle 1101 * @dev_id: Device identifier this request is for 1102 * @clk_id: Clock identifier for the device for this request. 1103 * Each device has it's own set of clock inputs. This indexes 1104 * which clock input to modify. 1105 * @req_state: state indicating if the clock is auto managed 1106 * 1107 * Return: 0 if all went well, else returns appropriate error value. 1108 */ 1109 static int ti_sci_cmd_clk_is_auto(const struct ti_sci_handle *handle, 1110 u32 dev_id, u8 clk_id, bool *req_state) 1111 { 1112 u8 state = 0; 1113 int ret; 1114 1115 if (!req_state) 1116 return -EINVAL; 1117 1118 ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, &state, NULL); 1119 if (ret) 1120 return ret; 1121 1122 *req_state = (state == MSG_CLOCK_SW_STATE_AUTO); 1123 return 0; 1124 } 1125 1126 /** 1127 * ti_sci_cmd_clk_is_on() - Is the clock ON 1128 * @handle: pointer to TI SCI handle 1129 * @dev_id: Device identifier this request is for 1130 * @clk_id: Clock identifier for the device for this request. 1131 * Each device has it's own set of clock inputs. This indexes 1132 * which clock input to modify. 1133 * @req_state: state indicating if the clock is managed by us and enabled 1134 * @curr_state: state indicating if the clock is ready for operation 1135 * 1136 * Return: 0 if all went well, else returns appropriate error value. 1137 */ 1138 static int ti_sci_cmd_clk_is_on(const struct ti_sci_handle *handle, u32 dev_id, 1139 u8 clk_id, bool *req_state, bool *curr_state) 1140 { 1141 u8 c_state = 0, r_state = 0; 1142 int ret; 1143 1144 if (!req_state && !curr_state) 1145 return -EINVAL; 1146 1147 ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, 1148 &r_state, &c_state); 1149 if (ret) 1150 return ret; 1151 1152 if (req_state) 1153 *req_state = (r_state == MSG_CLOCK_SW_STATE_REQ); 1154 if (curr_state) 1155 *curr_state = (c_state == MSG_CLOCK_HW_STATE_READY); 1156 return 0; 1157 } 1158 1159 /** 1160 * ti_sci_cmd_clk_is_off() - Is the clock OFF 1161 * @handle: pointer to TI SCI handle 1162 * @dev_id: Device identifier this request is for 1163 * @clk_id: Clock identifier for the device for this request. 1164 * Each device has it's own set of clock inputs. This indexes 1165 * which clock input to modify. 1166 * @req_state: state indicating if the clock is managed by us and disabled 1167 * @curr_state: state indicating if the clock is NOT ready for operation 1168 * 1169 * Return: 0 if all went well, else returns appropriate error value. 1170 */ 1171 static int ti_sci_cmd_clk_is_off(const struct ti_sci_handle *handle, u32 dev_id, 1172 u8 clk_id, bool *req_state, bool *curr_state) 1173 { 1174 u8 c_state = 0, r_state = 0; 1175 int ret; 1176 1177 if (!req_state && !curr_state) 1178 return -EINVAL; 1179 1180 ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, 1181 &r_state, &c_state); 1182 if (ret) 1183 return ret; 1184 1185 if (req_state) 1186 *req_state = (r_state == MSG_CLOCK_SW_STATE_UNREQ); 1187 if (curr_state) 1188 *curr_state = (c_state == MSG_CLOCK_HW_STATE_NOT_READY); 1189 return 0; 1190 } 1191 1192 /** 1193 * ti_sci_cmd_clk_set_parent() - Set the clock source of a specific device clock 1194 * @handle: pointer to TI SCI handle 1195 * @dev_id: Device identifier this request is for 1196 * @clk_id: Clock identifier for the device for this request. 1197 * Each device has it's own set of clock inputs. This indexes 1198 * which clock input to modify. 1199 * @parent_id: Parent clock identifier to set 1200 * 1201 * Return: 0 if all went well, else returns appropriate error value. 1202 */ 1203 static int ti_sci_cmd_clk_set_parent(const struct ti_sci_handle *handle, 1204 u32 dev_id, u8 clk_id, u8 parent_id) 1205 { 1206 struct ti_sci_info *info; 1207 struct ti_sci_msg_req_set_clock_parent *req; 1208 struct ti_sci_msg_hdr *resp; 1209 struct ti_sci_xfer *xfer; 1210 struct device *dev; 1211 int ret = 0; 1212 1213 if (IS_ERR(handle)) 1214 return PTR_ERR(handle); 1215 if (!handle) 1216 return -EINVAL; 1217 1218 info = handle_to_ti_sci_info(handle); 1219 dev = info->dev; 1220 1221 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_PARENT, 1222 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 1223 sizeof(*req), sizeof(*resp)); 1224 if (IS_ERR(xfer)) { 1225 ret = PTR_ERR(xfer); 1226 dev_err(dev, "Message alloc failed(%d)\n", ret); 1227 return ret; 1228 } 1229 req = (struct ti_sci_msg_req_set_clock_parent *)xfer->xfer_buf; 1230 req->dev_id = dev_id; 1231 req->clk_id = clk_id; 1232 req->parent_id = parent_id; 1233 1234 ret = ti_sci_do_xfer(info, xfer); 1235 if (ret) { 1236 dev_err(dev, "Mbox send fail %d\n", ret); 1237 goto fail; 1238 } 1239 1240 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 1241 1242 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; 1243 1244 fail: 1245 ti_sci_put_one_xfer(&info->minfo, xfer); 1246 1247 return ret; 1248 } 1249 1250 /** 1251 * ti_sci_cmd_clk_get_parent() - Get current parent clock source 1252 * @handle: pointer to TI SCI handle 1253 * @dev_id: Device identifier this request is for 1254 * @clk_id: Clock identifier for the device for this request. 1255 * Each device has it's own set of clock inputs. This indexes 1256 * which clock input to modify. 1257 * @parent_id: Current clock parent 1258 * 1259 * Return: 0 if all went well, else returns appropriate error value. 1260 */ 1261 static int ti_sci_cmd_clk_get_parent(const struct ti_sci_handle *handle, 1262 u32 dev_id, u8 clk_id, u8 *parent_id) 1263 { 1264 struct ti_sci_info *info; 1265 struct ti_sci_msg_req_get_clock_parent *req; 1266 struct ti_sci_msg_resp_get_clock_parent *resp; 1267 struct ti_sci_xfer *xfer; 1268 struct device *dev; 1269 int ret = 0; 1270 1271 if (IS_ERR(handle)) 1272 return PTR_ERR(handle); 1273 if (!handle || !parent_id) 1274 return -EINVAL; 1275 1276 info = handle_to_ti_sci_info(handle); 1277 dev = info->dev; 1278 1279 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_PARENT, 1280 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 1281 sizeof(*req), sizeof(*resp)); 1282 if (IS_ERR(xfer)) { 1283 ret = PTR_ERR(xfer); 1284 dev_err(dev, "Message alloc failed(%d)\n", ret); 1285 return ret; 1286 } 1287 req = (struct ti_sci_msg_req_get_clock_parent *)xfer->xfer_buf; 1288 req->dev_id = dev_id; 1289 req->clk_id = clk_id; 1290 1291 ret = ti_sci_do_xfer(info, xfer); 1292 if (ret) { 1293 dev_err(dev, "Mbox send fail %d\n", ret); 1294 goto fail; 1295 } 1296 1297 resp = (struct ti_sci_msg_resp_get_clock_parent *)xfer->xfer_buf; 1298 1299 if (!ti_sci_is_response_ack(resp)) 1300 ret = -ENODEV; 1301 else 1302 *parent_id = resp->parent_id; 1303 1304 fail: 1305 ti_sci_put_one_xfer(&info->minfo, xfer); 1306 1307 return ret; 1308 } 1309 1310 /** 1311 * ti_sci_cmd_clk_get_num_parents() - Get num parents of the current clk source 1312 * @handle: pointer to TI SCI handle 1313 * @dev_id: Device identifier this request is for 1314 * @clk_id: Clock identifier for the device for this request. 1315 * Each device has it's own set of clock inputs. This indexes 1316 * which clock input to modify. 1317 * @num_parents: Returns he number of parents to the current clock. 1318 * 1319 * Return: 0 if all went well, else returns appropriate error value. 1320 */ 1321 static int ti_sci_cmd_clk_get_num_parents(const struct ti_sci_handle *handle, 1322 u32 dev_id, u8 clk_id, 1323 u8 *num_parents) 1324 { 1325 struct ti_sci_info *info; 1326 struct ti_sci_msg_req_get_clock_num_parents *req; 1327 struct ti_sci_msg_resp_get_clock_num_parents *resp; 1328 struct ti_sci_xfer *xfer; 1329 struct device *dev; 1330 int ret = 0; 1331 1332 if (IS_ERR(handle)) 1333 return PTR_ERR(handle); 1334 if (!handle || !num_parents) 1335 return -EINVAL; 1336 1337 info = handle_to_ti_sci_info(handle); 1338 dev = info->dev; 1339 1340 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_NUM_CLOCK_PARENTS, 1341 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 1342 sizeof(*req), sizeof(*resp)); 1343 if (IS_ERR(xfer)) { 1344 ret = PTR_ERR(xfer); 1345 dev_err(dev, "Message alloc failed(%d)\n", ret); 1346 return ret; 1347 } 1348 req = (struct ti_sci_msg_req_get_clock_num_parents *)xfer->xfer_buf; 1349 req->dev_id = dev_id; 1350 req->clk_id = clk_id; 1351 1352 ret = ti_sci_do_xfer(info, xfer); 1353 if (ret) { 1354 dev_err(dev, "Mbox send fail %d\n", ret); 1355 goto fail; 1356 } 1357 1358 resp = (struct ti_sci_msg_resp_get_clock_num_parents *)xfer->xfer_buf; 1359 1360 if (!ti_sci_is_response_ack(resp)) 1361 ret = -ENODEV; 1362 else 1363 *num_parents = resp->num_parents; 1364 1365 fail: 1366 ti_sci_put_one_xfer(&info->minfo, xfer); 1367 1368 return ret; 1369 } 1370 1371 /** 1372 * ti_sci_cmd_clk_get_match_freq() - Find a good match for frequency 1373 * @handle: pointer to TI SCI handle 1374 * @dev_id: Device identifier this request is for 1375 * @clk_id: Clock identifier for the device for this request. 1376 * Each device has it's own set of clock inputs. This indexes 1377 * which clock input to modify. 1378 * @min_freq: The minimum allowable frequency in Hz. This is the minimum 1379 * allowable programmed frequency and does not account for clock 1380 * tolerances and jitter. 1381 * @target_freq: The target clock frequency in Hz. A frequency will be 1382 * processed as close to this target frequency as possible. 1383 * @max_freq: The maximum allowable frequency in Hz. This is the maximum 1384 * allowable programmed frequency and does not account for clock 1385 * tolerances and jitter. 1386 * @match_freq: Frequency match in Hz response. 1387 * 1388 * Return: 0 if all went well, else returns appropriate error value. 1389 */ 1390 static int ti_sci_cmd_clk_get_match_freq(const struct ti_sci_handle *handle, 1391 u32 dev_id, u8 clk_id, u64 min_freq, 1392 u64 target_freq, u64 max_freq, 1393 u64 *match_freq) 1394 { 1395 struct ti_sci_info *info; 1396 struct ti_sci_msg_req_query_clock_freq *req; 1397 struct ti_sci_msg_resp_query_clock_freq *resp; 1398 struct ti_sci_xfer *xfer; 1399 struct device *dev; 1400 int ret = 0; 1401 1402 if (IS_ERR(handle)) 1403 return PTR_ERR(handle); 1404 if (!handle || !match_freq) 1405 return -EINVAL; 1406 1407 info = handle_to_ti_sci_info(handle); 1408 dev = info->dev; 1409 1410 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_QUERY_CLOCK_FREQ, 1411 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 1412 sizeof(*req), sizeof(*resp)); 1413 if (IS_ERR(xfer)) { 1414 ret = PTR_ERR(xfer); 1415 dev_err(dev, "Message alloc failed(%d)\n", ret); 1416 return ret; 1417 } 1418 req = (struct ti_sci_msg_req_query_clock_freq *)xfer->xfer_buf; 1419 req->dev_id = dev_id; 1420 req->clk_id = clk_id; 1421 req->min_freq_hz = min_freq; 1422 req->target_freq_hz = target_freq; 1423 req->max_freq_hz = max_freq; 1424 1425 ret = ti_sci_do_xfer(info, xfer); 1426 if (ret) { 1427 dev_err(dev, "Mbox send fail %d\n", ret); 1428 goto fail; 1429 } 1430 1431 resp = (struct ti_sci_msg_resp_query_clock_freq *)xfer->xfer_buf; 1432 1433 if (!ti_sci_is_response_ack(resp)) 1434 ret = -ENODEV; 1435 else 1436 *match_freq = resp->freq_hz; 1437 1438 fail: 1439 ti_sci_put_one_xfer(&info->minfo, xfer); 1440 1441 return ret; 1442 } 1443 1444 /** 1445 * ti_sci_cmd_clk_set_freq() - Set a frequency for clock 1446 * @handle: pointer to TI SCI handle 1447 * @dev_id: Device identifier this request is for 1448 * @clk_id: Clock identifier for the device for this request. 1449 * Each device has it's own set of clock inputs. This indexes 1450 * which clock input to modify. 1451 * @min_freq: The minimum allowable frequency in Hz. This is the minimum 1452 * allowable programmed frequency and does not account for clock 1453 * tolerances and jitter. 1454 * @target_freq: The target clock frequency in Hz. A frequency will be 1455 * processed as close to this target frequency as possible. 1456 * @max_freq: The maximum allowable frequency in Hz. This is the maximum 1457 * allowable programmed frequency and does not account for clock 1458 * tolerances and jitter. 1459 * 1460 * Return: 0 if all went well, else returns appropriate error value. 1461 */ 1462 static int ti_sci_cmd_clk_set_freq(const struct ti_sci_handle *handle, 1463 u32 dev_id, u8 clk_id, u64 min_freq, 1464 u64 target_freq, u64 max_freq) 1465 { 1466 struct ti_sci_info *info; 1467 struct ti_sci_msg_req_set_clock_freq *req; 1468 struct ti_sci_msg_hdr *resp; 1469 struct ti_sci_xfer *xfer; 1470 struct device *dev; 1471 int ret = 0; 1472 1473 if (IS_ERR(handle)) 1474 return PTR_ERR(handle); 1475 if (!handle) 1476 return -EINVAL; 1477 1478 info = handle_to_ti_sci_info(handle); 1479 dev = info->dev; 1480 1481 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_FREQ, 1482 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 1483 sizeof(*req), sizeof(*resp)); 1484 if (IS_ERR(xfer)) { 1485 ret = PTR_ERR(xfer); 1486 dev_err(dev, "Message alloc failed(%d)\n", ret); 1487 return ret; 1488 } 1489 req = (struct ti_sci_msg_req_set_clock_freq *)xfer->xfer_buf; 1490 req->dev_id = dev_id; 1491 req->clk_id = clk_id; 1492 req->min_freq_hz = min_freq; 1493 req->target_freq_hz = target_freq; 1494 req->max_freq_hz = max_freq; 1495 1496 ret = ti_sci_do_xfer(info, xfer); 1497 if (ret) { 1498 dev_err(dev, "Mbox send fail %d\n", ret); 1499 goto fail; 1500 } 1501 1502 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 1503 1504 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; 1505 1506 fail: 1507 ti_sci_put_one_xfer(&info->minfo, xfer); 1508 1509 return ret; 1510 } 1511 1512 /** 1513 * ti_sci_cmd_clk_get_freq() - Get current frequency 1514 * @handle: pointer to TI SCI handle 1515 * @dev_id: Device identifier this request is for 1516 * @clk_id: Clock identifier for the device for this request. 1517 * Each device has it's own set of clock inputs. This indexes 1518 * which clock input to modify. 1519 * @freq: Currently frequency in Hz 1520 * 1521 * Return: 0 if all went well, else returns appropriate error value. 1522 */ 1523 static int ti_sci_cmd_clk_get_freq(const struct ti_sci_handle *handle, 1524 u32 dev_id, u8 clk_id, u64 *freq) 1525 { 1526 struct ti_sci_info *info; 1527 struct ti_sci_msg_req_get_clock_freq *req; 1528 struct ti_sci_msg_resp_get_clock_freq *resp; 1529 struct ti_sci_xfer *xfer; 1530 struct device *dev; 1531 int ret = 0; 1532 1533 if (IS_ERR(handle)) 1534 return PTR_ERR(handle); 1535 if (!handle || !freq) 1536 return -EINVAL; 1537 1538 info = handle_to_ti_sci_info(handle); 1539 dev = info->dev; 1540 1541 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_FREQ, 1542 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 1543 sizeof(*req), sizeof(*resp)); 1544 if (IS_ERR(xfer)) { 1545 ret = PTR_ERR(xfer); 1546 dev_err(dev, "Message alloc failed(%d)\n", ret); 1547 return ret; 1548 } 1549 req = (struct ti_sci_msg_req_get_clock_freq *)xfer->xfer_buf; 1550 req->dev_id = dev_id; 1551 req->clk_id = clk_id; 1552 1553 ret = ti_sci_do_xfer(info, xfer); 1554 if (ret) { 1555 dev_err(dev, "Mbox send fail %d\n", ret); 1556 goto fail; 1557 } 1558 1559 resp = (struct ti_sci_msg_resp_get_clock_freq *)xfer->xfer_buf; 1560 1561 if (!ti_sci_is_response_ack(resp)) 1562 ret = -ENODEV; 1563 else 1564 *freq = resp->freq_hz; 1565 1566 fail: 1567 ti_sci_put_one_xfer(&info->minfo, xfer); 1568 1569 return ret; 1570 } 1571 1572 static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle) 1573 { 1574 struct ti_sci_info *info; 1575 struct ti_sci_msg_req_reboot *req; 1576 struct ti_sci_msg_hdr *resp; 1577 struct ti_sci_xfer *xfer; 1578 struct device *dev; 1579 int ret = 0; 1580 1581 if (IS_ERR(handle)) 1582 return PTR_ERR(handle); 1583 if (!handle) 1584 return -EINVAL; 1585 1586 info = handle_to_ti_sci_info(handle); 1587 dev = info->dev; 1588 1589 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SYS_RESET, 1590 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 1591 sizeof(*req), sizeof(*resp)); 1592 if (IS_ERR(xfer)) { 1593 ret = PTR_ERR(xfer); 1594 dev_err(dev, "Message alloc failed(%d)\n", ret); 1595 return ret; 1596 } 1597 req = (struct ti_sci_msg_req_reboot *)xfer->xfer_buf; 1598 1599 ret = ti_sci_do_xfer(info, xfer); 1600 if (ret) { 1601 dev_err(dev, "Mbox send fail %d\n", ret); 1602 goto fail; 1603 } 1604 1605 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 1606 1607 if (!ti_sci_is_response_ack(resp)) 1608 ret = -ENODEV; 1609 else 1610 ret = 0; 1611 1612 fail: 1613 ti_sci_put_one_xfer(&info->minfo, xfer); 1614 1615 return ret; 1616 } 1617 1618 /* 1619 * ti_sci_setup_ops() - Setup the operations structures 1620 * @info: pointer to TISCI pointer 1621 */ 1622 static void ti_sci_setup_ops(struct ti_sci_info *info) 1623 { 1624 struct ti_sci_ops *ops = &info->handle.ops; 1625 struct ti_sci_core_ops *core_ops = &ops->core_ops; 1626 struct ti_sci_dev_ops *dops = &ops->dev_ops; 1627 struct ti_sci_clk_ops *cops = &ops->clk_ops; 1628 1629 core_ops->reboot_device = ti_sci_cmd_core_reboot; 1630 1631 dops->get_device = ti_sci_cmd_get_device; 1632 dops->idle_device = ti_sci_cmd_idle_device; 1633 dops->put_device = ti_sci_cmd_put_device; 1634 1635 dops->is_valid = ti_sci_cmd_dev_is_valid; 1636 dops->get_context_loss_count = ti_sci_cmd_dev_get_clcnt; 1637 dops->is_idle = ti_sci_cmd_dev_is_idle; 1638 dops->is_stop = ti_sci_cmd_dev_is_stop; 1639 dops->is_on = ti_sci_cmd_dev_is_on; 1640 dops->is_transitioning = ti_sci_cmd_dev_is_trans; 1641 dops->set_device_resets = ti_sci_cmd_set_device_resets; 1642 dops->get_device_resets = ti_sci_cmd_get_device_resets; 1643 1644 cops->get_clock = ti_sci_cmd_get_clock; 1645 cops->idle_clock = ti_sci_cmd_idle_clock; 1646 cops->put_clock = ti_sci_cmd_put_clock; 1647 cops->is_auto = ti_sci_cmd_clk_is_auto; 1648 cops->is_on = ti_sci_cmd_clk_is_on; 1649 cops->is_off = ti_sci_cmd_clk_is_off; 1650 1651 cops->set_parent = ti_sci_cmd_clk_set_parent; 1652 cops->get_parent = ti_sci_cmd_clk_get_parent; 1653 cops->get_num_parents = ti_sci_cmd_clk_get_num_parents; 1654 1655 cops->get_best_match_freq = ti_sci_cmd_clk_get_match_freq; 1656 cops->set_freq = ti_sci_cmd_clk_set_freq; 1657 cops->get_freq = ti_sci_cmd_clk_get_freq; 1658 } 1659 1660 /** 1661 * ti_sci_get_handle() - Get the TI SCI handle for a device 1662 * @dev: Pointer to device for which we want SCI handle 1663 * 1664 * NOTE: The function does not track individual clients of the framework 1665 * and is expected to be maintained by caller of TI SCI protocol library. 1666 * ti_sci_put_handle must be balanced with successful ti_sci_get_handle 1667 * Return: pointer to handle if successful, else: 1668 * -EPROBE_DEFER if the instance is not ready 1669 * -ENODEV if the required node handler is missing 1670 * -EINVAL if invalid conditions are encountered. 1671 */ 1672 const struct ti_sci_handle *ti_sci_get_handle(struct device *dev) 1673 { 1674 struct device_node *ti_sci_np; 1675 struct list_head *p; 1676 struct ti_sci_handle *handle = NULL; 1677 struct ti_sci_info *info; 1678 1679 if (!dev) { 1680 pr_err("I need a device pointer\n"); 1681 return ERR_PTR(-EINVAL); 1682 } 1683 ti_sci_np = of_get_parent(dev->of_node); 1684 if (!ti_sci_np) { 1685 dev_err(dev, "No OF information\n"); 1686 return ERR_PTR(-EINVAL); 1687 } 1688 1689 mutex_lock(&ti_sci_list_mutex); 1690 list_for_each(p, &ti_sci_list) { 1691 info = list_entry(p, struct ti_sci_info, node); 1692 if (ti_sci_np == info->dev->of_node) { 1693 handle = &info->handle; 1694 info->users++; 1695 break; 1696 } 1697 } 1698 mutex_unlock(&ti_sci_list_mutex); 1699 of_node_put(ti_sci_np); 1700 1701 if (!handle) 1702 return ERR_PTR(-EPROBE_DEFER); 1703 1704 return handle; 1705 } 1706 EXPORT_SYMBOL_GPL(ti_sci_get_handle); 1707 1708 /** 1709 * ti_sci_put_handle() - Release the handle acquired by ti_sci_get_handle 1710 * @handle: Handle acquired by ti_sci_get_handle 1711 * 1712 * NOTE: The function does not track individual clients of the framework 1713 * and is expected to be maintained by caller of TI SCI protocol library. 1714 * ti_sci_put_handle must be balanced with successful ti_sci_get_handle 1715 * 1716 * Return: 0 is successfully released 1717 * if an error pointer was passed, it returns the error value back, 1718 * if null was passed, it returns -EINVAL; 1719 */ 1720 int ti_sci_put_handle(const struct ti_sci_handle *handle) 1721 { 1722 struct ti_sci_info *info; 1723 1724 if (IS_ERR(handle)) 1725 return PTR_ERR(handle); 1726 if (!handle) 1727 return -EINVAL; 1728 1729 info = handle_to_ti_sci_info(handle); 1730 mutex_lock(&ti_sci_list_mutex); 1731 if (!WARN_ON(!info->users)) 1732 info->users--; 1733 mutex_unlock(&ti_sci_list_mutex); 1734 1735 return 0; 1736 } 1737 EXPORT_SYMBOL_GPL(ti_sci_put_handle); 1738 1739 static void devm_ti_sci_release(struct device *dev, void *res) 1740 { 1741 const struct ti_sci_handle **ptr = res; 1742 const struct ti_sci_handle *handle = *ptr; 1743 int ret; 1744 1745 ret = ti_sci_put_handle(handle); 1746 if (ret) 1747 dev_err(dev, "failed to put handle %d\n", ret); 1748 } 1749 1750 /** 1751 * devm_ti_sci_get_handle() - Managed get handle 1752 * @dev: device for which we want SCI handle for. 1753 * 1754 * NOTE: This releases the handle once the device resources are 1755 * no longer needed. MUST NOT BE released with ti_sci_put_handle. 1756 * The function does not track individual clients of the framework 1757 * and is expected to be maintained by caller of TI SCI protocol library. 1758 * 1759 * Return: 0 if all went fine, else corresponding error. 1760 */ 1761 const struct ti_sci_handle *devm_ti_sci_get_handle(struct device *dev) 1762 { 1763 const struct ti_sci_handle **ptr; 1764 const struct ti_sci_handle *handle; 1765 1766 ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL); 1767 if (!ptr) 1768 return ERR_PTR(-ENOMEM); 1769 handle = ti_sci_get_handle(dev); 1770 1771 if (!IS_ERR(handle)) { 1772 *ptr = handle; 1773 devres_add(dev, ptr); 1774 } else { 1775 devres_free(ptr); 1776 } 1777 1778 return handle; 1779 } 1780 EXPORT_SYMBOL_GPL(devm_ti_sci_get_handle); 1781 1782 static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode, 1783 void *cmd) 1784 { 1785 struct ti_sci_info *info = reboot_to_ti_sci_info(nb); 1786 const struct ti_sci_handle *handle = &info->handle; 1787 1788 ti_sci_cmd_core_reboot(handle); 1789 1790 /* call fail OR pass, we should not be here in the first place */ 1791 return NOTIFY_BAD; 1792 } 1793 1794 /* Description for K2G */ 1795 static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = { 1796 .host_id = 2, 1797 /* Conservative duration */ 1798 .max_rx_timeout_ms = 1000, 1799 /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */ 1800 .max_msgs = 20, 1801 .max_msg_size = 64, 1802 }; 1803 1804 static const struct of_device_id ti_sci_of_match[] = { 1805 {.compatible = "ti,k2g-sci", .data = &ti_sci_pmmc_k2g_desc}, 1806 { /* Sentinel */ }, 1807 }; 1808 MODULE_DEVICE_TABLE(of, ti_sci_of_match); 1809 1810 static int ti_sci_probe(struct platform_device *pdev) 1811 { 1812 struct device *dev = &pdev->dev; 1813 const struct of_device_id *of_id; 1814 const struct ti_sci_desc *desc; 1815 struct ti_sci_xfer *xfer; 1816 struct ti_sci_info *info = NULL; 1817 struct ti_sci_xfers_info *minfo; 1818 struct mbox_client *cl; 1819 int ret = -EINVAL; 1820 int i; 1821 int reboot = 0; 1822 1823 of_id = of_match_device(ti_sci_of_match, dev); 1824 if (!of_id) { 1825 dev_err(dev, "OF data missing\n"); 1826 return -EINVAL; 1827 } 1828 desc = of_id->data; 1829 1830 info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL); 1831 if (!info) 1832 return -ENOMEM; 1833 1834 info->dev = dev; 1835 info->desc = desc; 1836 reboot = of_property_read_bool(dev->of_node, 1837 "ti,system-reboot-controller"); 1838 INIT_LIST_HEAD(&info->node); 1839 minfo = &info->minfo; 1840 1841 /* 1842 * Pre-allocate messages 1843 * NEVER allocate more than what we can indicate in hdr.seq 1844 * if we have data description bug, force a fix.. 1845 */ 1846 if (WARN_ON(desc->max_msgs >= 1847 1 << 8 * sizeof(((struct ti_sci_msg_hdr *)0)->seq))) 1848 return -EINVAL; 1849 1850 minfo->xfer_block = devm_kcalloc(dev, 1851 desc->max_msgs, 1852 sizeof(*minfo->xfer_block), 1853 GFP_KERNEL); 1854 if (!minfo->xfer_block) 1855 return -ENOMEM; 1856 1857 minfo->xfer_alloc_table = devm_kcalloc(dev, 1858 BITS_TO_LONGS(desc->max_msgs), 1859 sizeof(unsigned long), 1860 GFP_KERNEL); 1861 if (!minfo->xfer_alloc_table) 1862 return -ENOMEM; 1863 bitmap_zero(minfo->xfer_alloc_table, desc->max_msgs); 1864 1865 /* Pre-initialize the buffer pointer to pre-allocated buffers */ 1866 for (i = 0, xfer = minfo->xfer_block; i < desc->max_msgs; i++, xfer++) { 1867 xfer->xfer_buf = devm_kcalloc(dev, 1, desc->max_msg_size, 1868 GFP_KERNEL); 1869 if (!xfer->xfer_buf) 1870 return -ENOMEM; 1871 1872 xfer->tx_message.buf = xfer->xfer_buf; 1873 init_completion(&xfer->done); 1874 } 1875 1876 ret = ti_sci_debugfs_create(pdev, info); 1877 if (ret) 1878 dev_warn(dev, "Failed to create debug file\n"); 1879 1880 platform_set_drvdata(pdev, info); 1881 1882 cl = &info->cl; 1883 cl->dev = dev; 1884 cl->tx_block = false; 1885 cl->rx_callback = ti_sci_rx_callback; 1886 cl->knows_txdone = true; 1887 1888 spin_lock_init(&minfo->xfer_lock); 1889 sema_init(&minfo->sem_xfer_count, desc->max_msgs); 1890 1891 info->chan_rx = mbox_request_channel_byname(cl, "rx"); 1892 if (IS_ERR(info->chan_rx)) { 1893 ret = PTR_ERR(info->chan_rx); 1894 goto out; 1895 } 1896 1897 info->chan_tx = mbox_request_channel_byname(cl, "tx"); 1898 if (IS_ERR(info->chan_tx)) { 1899 ret = PTR_ERR(info->chan_tx); 1900 goto out; 1901 } 1902 ret = ti_sci_cmd_get_revision(info); 1903 if (ret) { 1904 dev_err(dev, "Unable to communicate with TISCI(%d)\n", ret); 1905 goto out; 1906 } 1907 1908 ti_sci_setup_ops(info); 1909 1910 if (reboot) { 1911 info->nb.notifier_call = tisci_reboot_handler; 1912 info->nb.priority = 128; 1913 1914 ret = register_restart_handler(&info->nb); 1915 if (ret) { 1916 dev_err(dev, "reboot registration fail(%d)\n", ret); 1917 return ret; 1918 } 1919 } 1920 1921 dev_info(dev, "ABI: %d.%d (firmware rev 0x%04x '%s')\n", 1922 info->handle.version.abi_major, info->handle.version.abi_minor, 1923 info->handle.version.firmware_revision, 1924 info->handle.version.firmware_description); 1925 1926 mutex_lock(&ti_sci_list_mutex); 1927 list_add_tail(&info->node, &ti_sci_list); 1928 mutex_unlock(&ti_sci_list_mutex); 1929 1930 return of_platform_populate(dev->of_node, NULL, NULL, dev); 1931 out: 1932 if (!IS_ERR(info->chan_tx)) 1933 mbox_free_channel(info->chan_tx); 1934 if (!IS_ERR(info->chan_rx)) 1935 mbox_free_channel(info->chan_rx); 1936 debugfs_remove(info->d); 1937 return ret; 1938 } 1939 1940 static int ti_sci_remove(struct platform_device *pdev) 1941 { 1942 struct ti_sci_info *info; 1943 struct device *dev = &pdev->dev; 1944 int ret = 0; 1945 1946 of_platform_depopulate(dev); 1947 1948 info = platform_get_drvdata(pdev); 1949 1950 if (info->nb.notifier_call) 1951 unregister_restart_handler(&info->nb); 1952 1953 mutex_lock(&ti_sci_list_mutex); 1954 if (info->users) 1955 ret = -EBUSY; 1956 else 1957 list_del(&info->node); 1958 mutex_unlock(&ti_sci_list_mutex); 1959 1960 if (!ret) { 1961 ti_sci_debugfs_destroy(pdev, info); 1962 1963 /* Safe to free channels since no more users */ 1964 mbox_free_channel(info->chan_tx); 1965 mbox_free_channel(info->chan_rx); 1966 } 1967 1968 return ret; 1969 } 1970 1971 static struct platform_driver ti_sci_driver = { 1972 .probe = ti_sci_probe, 1973 .remove = ti_sci_remove, 1974 .driver = { 1975 .name = "ti-sci", 1976 .of_match_table = of_match_ptr(ti_sci_of_match), 1977 }, 1978 }; 1979 module_platform_driver(ti_sci_driver); 1980 1981 MODULE_LICENSE("GPL v2"); 1982 MODULE_DESCRIPTION("TI System Control Interface(SCI) driver"); 1983 MODULE_AUTHOR("Nishanth Menon"); 1984 MODULE_ALIAS("platform:ti-sci"); 1985