1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * System Control and Management Interface (SCMI) Message Protocol driver 4 * 5 * SCMI Message Protocol is used between the System Control Processor(SCP) 6 * and the Application Processors(AP). The Message Handling Unit(MHU) 7 * provides a mechanism for inter-processor communication between SCP's 8 * Cortex M3 and AP. 9 * 10 * SCP offers control and management of the core/cluster power states, 11 * various power domain DVFS including the core/cluster, certain system 12 * clocks configuration, thermal sensors and many others. 13 * 14 * Copyright (C) 2018-2021 ARM Ltd. 15 */ 16 17 #include <linux/bitmap.h> 18 #include <linux/device.h> 19 #include <linux/export.h> 20 #include <linux/idr.h> 21 #include <linux/io.h> 22 #include <linux/kernel.h> 23 #include <linux/ktime.h> 24 #include <linux/list.h> 25 #include <linux/module.h> 26 #include <linux/of_address.h> 27 #include <linux/of_device.h> 28 #include <linux/processor.h> 29 #include <linux/refcount.h> 30 #include <linux/slab.h> 31 32 #include "common.h" 33 #include "notify.h" 34 35 #define CREATE_TRACE_POINTS 36 #include <trace/events/scmi.h> 37 38 enum scmi_error_codes { 39 SCMI_SUCCESS = 0, /* Success */ 40 SCMI_ERR_SUPPORT = -1, /* Not supported */ 41 SCMI_ERR_PARAMS = -2, /* Invalid Parameters */ 42 SCMI_ERR_ACCESS = -3, /* Invalid access/permission denied */ 43 SCMI_ERR_ENTRY = -4, /* Not found */ 44 SCMI_ERR_RANGE = -5, /* Value out of range */ 45 SCMI_ERR_BUSY = -6, /* Device busy */ 46 SCMI_ERR_COMMS = -7, /* Communication Error */ 47 SCMI_ERR_GENERIC = -8, /* Generic Error */ 48 SCMI_ERR_HARDWARE = -9, /* Hardware Error */ 49 SCMI_ERR_PROTOCOL = -10,/* Protocol Error */ 50 SCMI_ERR_MAX 51 }; 52 53 /* List of all SCMI devices active in system */ 54 static LIST_HEAD(scmi_list); 55 /* Protection for the entire list */ 56 static DEFINE_MUTEX(scmi_list_mutex); 57 /* Track the unique id for the transfers for debug & profiling purpose */ 58 static atomic_t transfer_last_id; 59 60 static DEFINE_IDR(scmi_requested_devices); 61 static DEFINE_MUTEX(scmi_requested_devices_mtx); 62 63 struct scmi_requested_dev { 64 const struct scmi_device_id *id_table; 65 struct list_head node; 66 }; 67 68 /** 69 * struct scmi_xfers_info - Structure to manage transfer information 70 * 71 * @xfer_block: Preallocated Message array 72 * @xfer_alloc_table: Bitmap table for allocated messages. 73 * Index of this bitmap table is also used for message 74 * sequence identifier. 75 * @xfer_lock: Protection for message allocation 76 */ 77 struct scmi_xfers_info { 78 struct scmi_xfer *xfer_block; 79 unsigned long *xfer_alloc_table; 80 spinlock_t xfer_lock; 81 }; 82 83 /** 84 * struct scmi_protocol_instance - Describe an initialized protocol instance. 85 * @handle: Reference to the SCMI handle associated to this protocol instance. 86 * @proto: A reference to the protocol descriptor. 87 * @gid: A reference for per-protocol devres management. 88 * @users: A refcount to track effective users of this protocol. 89 * @priv: Reference for optional protocol private data. 90 * @ph: An embedded protocol handle that will be passed down to protocol 91 * initialization code to identify this instance. 92 * 93 * Each protocol is initialized independently once for each SCMI platform in 94 * which is defined by DT and implemented by the SCMI server fw. 95 */ 96 struct scmi_protocol_instance { 97 const struct scmi_handle *handle; 98 const struct scmi_protocol *proto; 99 void *gid; 100 refcount_t users; 101 void *priv; 102 struct scmi_protocol_handle ph; 103 }; 104 105 #define ph_to_pi(h) container_of(h, struct scmi_protocol_instance, ph) 106 107 /** 108 * struct scmi_info - Structure representing a SCMI instance 109 * 110 * @dev: Device pointer 111 * @desc: SoC description for this instance 112 * @version: SCMI revision information containing protocol version, 113 * implementation version and (sub-)vendor identification. 114 * @handle: Instance of SCMI handle to send to clients 115 * @tx_minfo: Universal Transmit Message management info 116 * @rx_minfo: Universal Receive Message management info 117 * @tx_idr: IDR object to map protocol id to Tx channel info pointer 118 * @rx_idr: IDR object to map protocol id to Rx channel info pointer 119 * @protocols: IDR for protocols' instance descriptors initialized for 120 * this SCMI instance: populated on protocol's first attempted 121 * usage. 122 * @protocols_mtx: A mutex to protect protocols instances initialization. 123 * @protocols_imp: List of protocols implemented, currently maximum of 124 * MAX_PROTOCOLS_IMP elements allocated by the base protocol 125 * @active_protocols: IDR storing device_nodes for protocols actually defined 126 * in the DT and confirmed as implemented by fw. 127 * @notify_priv: Pointer to private data structure specific to notifications. 128 * @node: List head 129 * @users: Number of users of this instance 130 */ 131 struct scmi_info { 132 struct device *dev; 133 const struct scmi_desc *desc; 134 struct scmi_revision_info version; 135 struct scmi_handle handle; 136 struct scmi_xfers_info tx_minfo; 137 struct scmi_xfers_info rx_minfo; 138 struct idr tx_idr; 139 struct idr rx_idr; 140 struct idr protocols; 141 /* Ensure mutual exclusive access to protocols instance array */ 142 struct mutex protocols_mtx; 143 u8 *protocols_imp; 144 struct idr active_protocols; 145 void *notify_priv; 146 struct list_head node; 147 int users; 148 }; 149 150 #define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle) 151 152 static const int scmi_linux_errmap[] = { 153 /* better than switch case as long as return value is continuous */ 154 0, /* SCMI_SUCCESS */ 155 -EOPNOTSUPP, /* SCMI_ERR_SUPPORT */ 156 -EINVAL, /* SCMI_ERR_PARAM */ 157 -EACCES, /* SCMI_ERR_ACCESS */ 158 -ENOENT, /* SCMI_ERR_ENTRY */ 159 -ERANGE, /* SCMI_ERR_RANGE */ 160 -EBUSY, /* SCMI_ERR_BUSY */ 161 -ECOMM, /* SCMI_ERR_COMMS */ 162 -EIO, /* SCMI_ERR_GENERIC */ 163 -EREMOTEIO, /* SCMI_ERR_HARDWARE */ 164 -EPROTO, /* SCMI_ERR_PROTOCOL */ 165 }; 166 167 static inline int scmi_to_linux_errno(int errno) 168 { 169 if (errno < SCMI_SUCCESS && errno > SCMI_ERR_MAX) 170 return scmi_linux_errmap[-errno]; 171 return -EIO; 172 } 173 174 /** 175 * scmi_dump_header_dbg() - Helper to dump a message header. 176 * 177 * @dev: Device pointer corresponding to the SCMI entity 178 * @hdr: pointer to header. 179 */ 180 static inline void scmi_dump_header_dbg(struct device *dev, 181 struct scmi_msg_hdr *hdr) 182 { 183 dev_dbg(dev, "Message ID: %x Sequence ID: %x Protocol: %x\n", 184 hdr->id, hdr->seq, hdr->protocol_id); 185 } 186 187 void scmi_notification_instance_data_set(const struct scmi_handle *handle, 188 void *priv) 189 { 190 struct scmi_info *info = handle_to_scmi_info(handle); 191 192 info->notify_priv = priv; 193 /* Ensure updated protocol private date are visible */ 194 smp_wmb(); 195 } 196 197 void *scmi_notification_instance_data_get(const struct scmi_handle *handle) 198 { 199 struct scmi_info *info = handle_to_scmi_info(handle); 200 201 /* Ensure protocols_private_data has been updated */ 202 smp_rmb(); 203 return info->notify_priv; 204 } 205 206 /** 207 * scmi_xfer_get() - Allocate one message 208 * 209 * @handle: Pointer to SCMI entity handle 210 * @minfo: Pointer to Tx/Rx Message management info based on channel type 211 * 212 * Helper function which is used by various message functions that are 213 * exposed to clients of this driver for allocating a message traffic event. 214 * 215 * This function can sleep depending on pending requests already in the system 216 * for the SCMI entity. Further, this also holds a spinlock to maintain 217 * integrity of internal data structures. 218 * 219 * Return: 0 if all went fine, else corresponding error. 220 */ 221 static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle, 222 struct scmi_xfers_info *minfo) 223 { 224 u16 xfer_id; 225 struct scmi_xfer *xfer; 226 unsigned long flags, bit_pos; 227 struct scmi_info *info = handle_to_scmi_info(handle); 228 229 /* Keep the locked section as small as possible */ 230 spin_lock_irqsave(&minfo->xfer_lock, flags); 231 bit_pos = find_first_zero_bit(minfo->xfer_alloc_table, 232 info->desc->max_msg); 233 if (bit_pos == info->desc->max_msg) { 234 spin_unlock_irqrestore(&minfo->xfer_lock, flags); 235 return ERR_PTR(-ENOMEM); 236 } 237 set_bit(bit_pos, minfo->xfer_alloc_table); 238 spin_unlock_irqrestore(&minfo->xfer_lock, flags); 239 240 xfer_id = bit_pos; 241 242 xfer = &minfo->xfer_block[xfer_id]; 243 xfer->hdr.seq = xfer_id; 244 reinit_completion(&xfer->done); 245 xfer->transfer_id = atomic_inc_return(&transfer_last_id); 246 247 return xfer; 248 } 249 250 /** 251 * __scmi_xfer_put() - Release a message 252 * 253 * @minfo: Pointer to Tx/Rx Message management info based on channel type 254 * @xfer: message that was reserved by scmi_xfer_get 255 * 256 * This holds a spinlock to maintain integrity of internal data structures. 257 */ 258 static void 259 __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer) 260 { 261 unsigned long flags; 262 263 /* 264 * Keep the locked section as small as possible 265 * NOTE: we might escape with smp_mb and no lock here.. 266 * but just be conservative and symmetric. 267 */ 268 spin_lock_irqsave(&minfo->xfer_lock, flags); 269 clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table); 270 spin_unlock_irqrestore(&minfo->xfer_lock, flags); 271 } 272 273 static void scmi_handle_notification(struct scmi_chan_info *cinfo, u32 msg_hdr) 274 { 275 struct scmi_xfer *xfer; 276 struct device *dev = cinfo->dev; 277 struct scmi_info *info = handle_to_scmi_info(cinfo->handle); 278 struct scmi_xfers_info *minfo = &info->rx_minfo; 279 ktime_t ts; 280 281 ts = ktime_get_boottime(); 282 xfer = scmi_xfer_get(cinfo->handle, minfo); 283 if (IS_ERR(xfer)) { 284 dev_err(dev, "failed to get free message slot (%ld)\n", 285 PTR_ERR(xfer)); 286 info->desc->ops->clear_channel(cinfo); 287 return; 288 } 289 290 unpack_scmi_header(msg_hdr, &xfer->hdr); 291 scmi_dump_header_dbg(dev, &xfer->hdr); 292 info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size, 293 xfer); 294 scmi_notify(cinfo->handle, xfer->hdr.protocol_id, 295 xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts); 296 297 trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id, 298 xfer->hdr.protocol_id, xfer->hdr.seq, 299 MSG_TYPE_NOTIFICATION); 300 301 __scmi_xfer_put(minfo, xfer); 302 303 info->desc->ops->clear_channel(cinfo); 304 } 305 306 static void scmi_handle_response(struct scmi_chan_info *cinfo, 307 u16 xfer_id, u8 msg_type) 308 { 309 struct scmi_xfer *xfer; 310 struct device *dev = cinfo->dev; 311 struct scmi_info *info = handle_to_scmi_info(cinfo->handle); 312 struct scmi_xfers_info *minfo = &info->tx_minfo; 313 314 /* Are we even expecting this? */ 315 if (!test_bit(xfer_id, minfo->xfer_alloc_table)) { 316 dev_err(dev, "message for %d is not expected!\n", xfer_id); 317 info->desc->ops->clear_channel(cinfo); 318 return; 319 } 320 321 xfer = &minfo->xfer_block[xfer_id]; 322 /* 323 * Even if a response was indeed expected on this slot at this point, 324 * a buggy platform could wrongly reply feeding us an unexpected 325 * delayed response we're not prepared to handle: bail-out safely 326 * blaming firmware. 327 */ 328 if (unlikely(msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done)) { 329 dev_err(dev, 330 "Delayed Response for %d not expected! Buggy F/W ?\n", 331 xfer_id); 332 info->desc->ops->clear_channel(cinfo); 333 /* It was unexpected, so nobody will clear the xfer if not us */ 334 __scmi_xfer_put(minfo, xfer); 335 return; 336 } 337 338 scmi_dump_header_dbg(dev, &xfer->hdr); 339 340 info->desc->ops->fetch_response(cinfo, xfer); 341 342 trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id, 343 xfer->hdr.protocol_id, xfer->hdr.seq, 344 msg_type); 345 346 if (msg_type == MSG_TYPE_DELAYED_RESP) { 347 info->desc->ops->clear_channel(cinfo); 348 complete(xfer->async_done); 349 } else { 350 complete(&xfer->done); 351 } 352 } 353 354 /** 355 * scmi_rx_callback() - callback for receiving messages 356 * 357 * @cinfo: SCMI channel info 358 * @msg_hdr: Message header 359 * 360 * Processes one received message to appropriate transfer information and 361 * signals completion of the transfer. 362 * 363 * NOTE: This function will be invoked in IRQ context, hence should be 364 * as optimal as possible. 365 */ 366 void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr) 367 { 368 u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr); 369 u8 msg_type = MSG_XTRACT_TYPE(msg_hdr); 370 371 switch (msg_type) { 372 case MSG_TYPE_NOTIFICATION: 373 scmi_handle_notification(cinfo, msg_hdr); 374 break; 375 case MSG_TYPE_COMMAND: 376 case MSG_TYPE_DELAYED_RESP: 377 scmi_handle_response(cinfo, xfer_id, msg_type); 378 break; 379 default: 380 WARN_ONCE(1, "received unknown msg_type:%d\n", msg_type); 381 break; 382 } 383 } 384 385 /** 386 * xfer_put() - Release a transmit message 387 * 388 * @ph: Pointer to SCMI protocol handle 389 * @xfer: message that was reserved by scmi_xfer_get 390 */ 391 static void xfer_put(const struct scmi_protocol_handle *ph, 392 struct scmi_xfer *xfer) 393 { 394 const struct scmi_protocol_instance *pi = ph_to_pi(ph); 395 struct scmi_info *info = handle_to_scmi_info(pi->handle); 396 397 __scmi_xfer_put(&info->tx_minfo, xfer); 398 } 399 400 #define SCMI_MAX_POLL_TO_NS (100 * NSEC_PER_USEC) 401 402 static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo, 403 struct scmi_xfer *xfer, ktime_t stop) 404 { 405 struct scmi_info *info = handle_to_scmi_info(cinfo->handle); 406 407 return info->desc->ops->poll_done(cinfo, xfer) || 408 ktime_after(ktime_get(), stop); 409 } 410 411 /** 412 * do_xfer() - Do one transfer 413 * 414 * @ph: Pointer to SCMI protocol handle 415 * @xfer: Transfer to initiate and wait for response 416 * 417 * Return: -ETIMEDOUT in case of no response, if transmit error, 418 * return corresponding error, else if all goes well, 419 * return 0. 420 */ 421 static int do_xfer(const struct scmi_protocol_handle *ph, 422 struct scmi_xfer *xfer) 423 { 424 int ret; 425 int timeout; 426 const struct scmi_protocol_instance *pi = ph_to_pi(ph); 427 struct scmi_info *info = handle_to_scmi_info(pi->handle); 428 struct device *dev = info->dev; 429 struct scmi_chan_info *cinfo; 430 431 /* 432 * Re-instate protocol id here from protocol handle so that cannot be 433 * overridden by mistake (or malice) by the protocol code mangling with 434 * the scmi_xfer structure. 435 */ 436 xfer->hdr.protocol_id = pi->proto->id; 437 438 cinfo = idr_find(&info->tx_idr, xfer->hdr.protocol_id); 439 if (unlikely(!cinfo)) 440 return -EINVAL; 441 442 trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id, 443 xfer->hdr.protocol_id, xfer->hdr.seq, 444 xfer->hdr.poll_completion); 445 446 ret = info->desc->ops->send_message(cinfo, xfer); 447 if (ret < 0) { 448 dev_dbg(dev, "Failed to send message %d\n", ret); 449 return ret; 450 } 451 452 if (xfer->hdr.poll_completion) { 453 ktime_t stop = ktime_add_ns(ktime_get(), SCMI_MAX_POLL_TO_NS); 454 455 spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop)); 456 457 if (ktime_before(ktime_get(), stop)) 458 info->desc->ops->fetch_response(cinfo, xfer); 459 else 460 ret = -ETIMEDOUT; 461 } else { 462 /* And we wait for the response. */ 463 timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms); 464 if (!wait_for_completion_timeout(&xfer->done, timeout)) { 465 dev_err(dev, "timed out in resp(caller: %pS)\n", 466 (void *)_RET_IP_); 467 ret = -ETIMEDOUT; 468 } 469 } 470 471 if (!ret && xfer->hdr.status) 472 ret = scmi_to_linux_errno(xfer->hdr.status); 473 474 if (info->desc->ops->mark_txdone) 475 info->desc->ops->mark_txdone(cinfo, ret); 476 477 trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id, 478 xfer->hdr.protocol_id, xfer->hdr.seq, ret); 479 480 return ret; 481 } 482 483 static void reset_rx_to_maxsz(const struct scmi_protocol_handle *ph, 484 struct scmi_xfer *xfer) 485 { 486 const struct scmi_protocol_instance *pi = ph_to_pi(ph); 487 struct scmi_info *info = handle_to_scmi_info(pi->handle); 488 489 xfer->rx.len = info->desc->max_msg_size; 490 } 491 492 #define SCMI_MAX_RESPONSE_TIMEOUT (2 * MSEC_PER_SEC) 493 494 /** 495 * do_xfer_with_response() - Do one transfer and wait until the delayed 496 * response is received 497 * 498 * @ph: Pointer to SCMI protocol handle 499 * @xfer: Transfer to initiate and wait for response 500 * 501 * Return: -ETIMEDOUT in case of no delayed response, if transmit error, 502 * return corresponding error, else if all goes well, return 0. 503 */ 504 static int do_xfer_with_response(const struct scmi_protocol_handle *ph, 505 struct scmi_xfer *xfer) 506 { 507 int ret, timeout = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT); 508 const struct scmi_protocol_instance *pi = ph_to_pi(ph); 509 DECLARE_COMPLETION_ONSTACK(async_response); 510 511 xfer->hdr.protocol_id = pi->proto->id; 512 513 xfer->async_done = &async_response; 514 515 ret = do_xfer(ph, xfer); 516 if (!ret && !wait_for_completion_timeout(xfer->async_done, timeout)) 517 ret = -ETIMEDOUT; 518 519 xfer->async_done = NULL; 520 return ret; 521 } 522 523 /** 524 * xfer_get_init() - Allocate and initialise one message for transmit 525 * 526 * @ph: Pointer to SCMI protocol handle 527 * @msg_id: Message identifier 528 * @tx_size: transmit message size 529 * @rx_size: receive message size 530 * @p: pointer to the allocated and initialised message 531 * 532 * This function allocates the message using @scmi_xfer_get and 533 * initialise the header. 534 * 535 * Return: 0 if all went fine with @p pointing to message, else 536 * corresponding error. 537 */ 538 static int xfer_get_init(const struct scmi_protocol_handle *ph, 539 u8 msg_id, size_t tx_size, size_t rx_size, 540 struct scmi_xfer **p) 541 { 542 int ret; 543 struct scmi_xfer *xfer; 544 const struct scmi_protocol_instance *pi = ph_to_pi(ph); 545 struct scmi_info *info = handle_to_scmi_info(pi->handle); 546 struct scmi_xfers_info *minfo = &info->tx_minfo; 547 struct device *dev = info->dev; 548 549 /* Ensure we have sane transfer sizes */ 550 if (rx_size > info->desc->max_msg_size || 551 tx_size > info->desc->max_msg_size) 552 return -ERANGE; 553 554 xfer = scmi_xfer_get(pi->handle, minfo); 555 if (IS_ERR(xfer)) { 556 ret = PTR_ERR(xfer); 557 dev_err(dev, "failed to get free message slot(%d)\n", ret); 558 return ret; 559 } 560 561 xfer->tx.len = tx_size; 562 xfer->rx.len = rx_size ? : info->desc->max_msg_size; 563 xfer->hdr.id = msg_id; 564 xfer->hdr.protocol_id = pi->proto->id; 565 xfer->hdr.poll_completion = false; 566 567 *p = xfer; 568 569 return 0; 570 } 571 572 /** 573 * version_get() - command to get the revision of the SCMI entity 574 * 575 * @ph: Pointer to SCMI protocol handle 576 * @version: Holds returned version of protocol. 577 * 578 * Updates the SCMI information in the internal data structure. 579 * 580 * Return: 0 if all went fine, else return appropriate error. 581 */ 582 static int version_get(const struct scmi_protocol_handle *ph, u32 *version) 583 { 584 int ret; 585 __le32 *rev_info; 586 struct scmi_xfer *t; 587 588 ret = xfer_get_init(ph, PROTOCOL_VERSION, 0, sizeof(*version), &t); 589 if (ret) 590 return ret; 591 592 ret = do_xfer(ph, t); 593 if (!ret) { 594 rev_info = t->rx.buf; 595 *version = le32_to_cpu(*rev_info); 596 } 597 598 xfer_put(ph, t); 599 return ret; 600 } 601 602 /** 603 * scmi_set_protocol_priv - Set protocol specific data at init time 604 * 605 * @ph: A reference to the protocol handle. 606 * @priv: The private data to set. 607 * 608 * Return: 0 on Success 609 */ 610 static int scmi_set_protocol_priv(const struct scmi_protocol_handle *ph, 611 void *priv) 612 { 613 struct scmi_protocol_instance *pi = ph_to_pi(ph); 614 615 pi->priv = priv; 616 617 return 0; 618 } 619 620 /** 621 * scmi_get_protocol_priv - Set protocol specific data at init time 622 * 623 * @ph: A reference to the protocol handle. 624 * 625 * Return: Protocol private data if any was set. 626 */ 627 static void *scmi_get_protocol_priv(const struct scmi_protocol_handle *ph) 628 { 629 const struct scmi_protocol_instance *pi = ph_to_pi(ph); 630 631 return pi->priv; 632 } 633 634 static const struct scmi_xfer_ops xfer_ops = { 635 .version_get = version_get, 636 .xfer_get_init = xfer_get_init, 637 .reset_rx_to_maxsz = reset_rx_to_maxsz, 638 .do_xfer = do_xfer, 639 .do_xfer_with_response = do_xfer_with_response, 640 .xfer_put = xfer_put, 641 }; 642 643 /** 644 * scmi_revision_area_get - Retrieve version memory area. 645 * 646 * @ph: A reference to the protocol handle. 647 * 648 * A helper to grab the version memory area reference during SCMI Base protocol 649 * initialization. 650 * 651 * Return: A reference to the version memory area associated to the SCMI 652 * instance underlying this protocol handle. 653 */ 654 struct scmi_revision_info * 655 scmi_revision_area_get(const struct scmi_protocol_handle *ph) 656 { 657 const struct scmi_protocol_instance *pi = ph_to_pi(ph); 658 659 return pi->handle->version; 660 } 661 662 /** 663 * scmi_alloc_init_protocol_instance - Allocate and initialize a protocol 664 * instance descriptor. 665 * @info: The reference to the related SCMI instance. 666 * @proto: The protocol descriptor. 667 * 668 * Allocate a new protocol instance descriptor, using the provided @proto 669 * description, against the specified SCMI instance @info, and initialize it; 670 * all resources management is handled via a dedicated per-protocol devres 671 * group. 672 * 673 * Context: Assumes to be called with @protocols_mtx already acquired. 674 * Return: A reference to a freshly allocated and initialized protocol instance 675 * or ERR_PTR on failure. On failure the @proto reference is at first 676 * put using @scmi_protocol_put() before releasing all the devres group. 677 */ 678 static struct scmi_protocol_instance * 679 scmi_alloc_init_protocol_instance(struct scmi_info *info, 680 const struct scmi_protocol *proto) 681 { 682 int ret = -ENOMEM; 683 void *gid; 684 struct scmi_protocol_instance *pi; 685 const struct scmi_handle *handle = &info->handle; 686 687 /* Protocol specific devres group */ 688 gid = devres_open_group(handle->dev, NULL, GFP_KERNEL); 689 if (!gid) { 690 scmi_protocol_put(proto->id); 691 goto out; 692 } 693 694 pi = devm_kzalloc(handle->dev, sizeof(*pi), GFP_KERNEL); 695 if (!pi) 696 goto clean; 697 698 pi->gid = gid; 699 pi->proto = proto; 700 pi->handle = handle; 701 pi->ph.dev = handle->dev; 702 pi->ph.xops = &xfer_ops; 703 pi->ph.set_priv = scmi_set_protocol_priv; 704 pi->ph.get_priv = scmi_get_protocol_priv; 705 refcount_set(&pi->users, 1); 706 /* proto->init is assured NON NULL by scmi_protocol_register */ 707 ret = pi->proto->instance_init(&pi->ph); 708 if (ret) 709 goto clean; 710 711 ret = idr_alloc(&info->protocols, pi, proto->id, proto->id + 1, 712 GFP_KERNEL); 713 if (ret != proto->id) 714 goto clean; 715 716 /* 717 * Warn but ignore events registration errors since we do not want 718 * to skip whole protocols if their notifications are messed up. 719 */ 720 if (pi->proto->events) { 721 ret = scmi_register_protocol_events(handle, pi->proto->id, 722 &pi->ph, 723 pi->proto->events); 724 if (ret) 725 dev_warn(handle->dev, 726 "Protocol:%X - Events Registration Failed - err:%d\n", 727 pi->proto->id, ret); 728 } 729 730 devres_close_group(handle->dev, pi->gid); 731 dev_dbg(handle->dev, "Initialized protocol: 0x%X\n", pi->proto->id); 732 733 return pi; 734 735 clean: 736 /* Take care to put the protocol module's owner before releasing all */ 737 scmi_protocol_put(proto->id); 738 devres_release_group(handle->dev, gid); 739 out: 740 return ERR_PTR(ret); 741 } 742 743 /** 744 * scmi_get_protocol_instance - Protocol initialization helper. 745 * @handle: A reference to the SCMI platform instance. 746 * @protocol_id: The protocol being requested. 747 * 748 * In case the required protocol has never been requested before for this 749 * instance, allocate and initialize all the needed structures while handling 750 * resource allocation with a dedicated per-protocol devres subgroup. 751 * 752 * Return: A reference to an initialized protocol instance or error on failure: 753 * in particular returns -EPROBE_DEFER when the desired protocol could 754 * NOT be found. 755 */ 756 static struct scmi_protocol_instance * __must_check 757 scmi_get_protocol_instance(const struct scmi_handle *handle, u8 protocol_id) 758 { 759 struct scmi_protocol_instance *pi; 760 struct scmi_info *info = handle_to_scmi_info(handle); 761 762 mutex_lock(&info->protocols_mtx); 763 pi = idr_find(&info->protocols, protocol_id); 764 765 if (pi) { 766 refcount_inc(&pi->users); 767 } else { 768 const struct scmi_protocol *proto; 769 770 /* Fails if protocol not registered on bus */ 771 proto = scmi_protocol_get(protocol_id); 772 if (proto) 773 pi = scmi_alloc_init_protocol_instance(info, proto); 774 else 775 pi = ERR_PTR(-EPROBE_DEFER); 776 } 777 mutex_unlock(&info->protocols_mtx); 778 779 return pi; 780 } 781 782 /** 783 * scmi_protocol_acquire - Protocol acquire 784 * @handle: A reference to the SCMI platform instance. 785 * @protocol_id: The protocol being requested. 786 * 787 * Register a new user for the requested protocol on the specified SCMI 788 * platform instance, possibly triggering its initialization on first user. 789 * 790 * Return: 0 if protocol was acquired successfully. 791 */ 792 int scmi_protocol_acquire(const struct scmi_handle *handle, u8 protocol_id) 793 { 794 return PTR_ERR_OR_ZERO(scmi_get_protocol_instance(handle, protocol_id)); 795 } 796 797 /** 798 * scmi_protocol_release - Protocol de-initialization helper. 799 * @handle: A reference to the SCMI platform instance. 800 * @protocol_id: The protocol being requested. 801 * 802 * Remove one user for the specified protocol and triggers de-initialization 803 * and resources de-allocation once the last user has gone. 804 */ 805 void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id) 806 { 807 struct scmi_info *info = handle_to_scmi_info(handle); 808 struct scmi_protocol_instance *pi; 809 810 mutex_lock(&info->protocols_mtx); 811 pi = idr_find(&info->protocols, protocol_id); 812 if (WARN_ON(!pi)) 813 goto out; 814 815 if (refcount_dec_and_test(&pi->users)) { 816 void *gid = pi->gid; 817 818 if (pi->proto->events) 819 scmi_deregister_protocol_events(handle, protocol_id); 820 821 if (pi->proto->instance_deinit) 822 pi->proto->instance_deinit(&pi->ph); 823 824 idr_remove(&info->protocols, protocol_id); 825 826 scmi_protocol_put(protocol_id); 827 828 devres_release_group(handle->dev, gid); 829 dev_dbg(handle->dev, "De-Initialized protocol: 0x%X\n", 830 protocol_id); 831 } 832 833 out: 834 mutex_unlock(&info->protocols_mtx); 835 } 836 837 void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph, 838 u8 *prot_imp) 839 { 840 const struct scmi_protocol_instance *pi = ph_to_pi(ph); 841 struct scmi_info *info = handle_to_scmi_info(pi->handle); 842 843 info->protocols_imp = prot_imp; 844 } 845 846 static bool 847 scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id) 848 { 849 int i; 850 struct scmi_info *info = handle_to_scmi_info(handle); 851 852 if (!info->protocols_imp) 853 return false; 854 855 for (i = 0; i < MAX_PROTOCOLS_IMP; i++) 856 if (info->protocols_imp[i] == prot_id) 857 return true; 858 return false; 859 } 860 861 struct scmi_protocol_devres { 862 const struct scmi_handle *handle; 863 u8 protocol_id; 864 }; 865 866 static void scmi_devm_release_protocol(struct device *dev, void *res) 867 { 868 struct scmi_protocol_devres *dres = res; 869 870 scmi_protocol_release(dres->handle, dres->protocol_id); 871 } 872 873 /** 874 * scmi_devm_protocol_get - Devres managed get protocol operations and handle 875 * @sdev: A reference to an scmi_device whose embedded struct device is to 876 * be used for devres accounting. 877 * @protocol_id: The protocol being requested. 878 * @ph: A pointer reference used to pass back the associated protocol handle. 879 * 880 * Get hold of a protocol accounting for its usage, eventually triggering its 881 * initialization, and returning the protocol specific operations and related 882 * protocol handle which will be used as first argument in most of the 883 * protocols operations methods. 884 * Being a devres based managed method, protocol hold will be automatically 885 * released, and possibly de-initialized on last user, once the SCMI driver 886 * owning the scmi_device is unbound from it. 887 * 888 * Return: A reference to the requested protocol operations or error. 889 * Must be checked for errors by caller. 890 */ 891 static const void __must_check * 892 scmi_devm_protocol_get(struct scmi_device *sdev, u8 protocol_id, 893 struct scmi_protocol_handle **ph) 894 { 895 struct scmi_protocol_instance *pi; 896 struct scmi_protocol_devres *dres; 897 struct scmi_handle *handle = sdev->handle; 898 899 if (!ph) 900 return ERR_PTR(-EINVAL); 901 902 dres = devres_alloc(scmi_devm_release_protocol, 903 sizeof(*dres), GFP_KERNEL); 904 if (!dres) 905 return ERR_PTR(-ENOMEM); 906 907 pi = scmi_get_protocol_instance(handle, protocol_id); 908 if (IS_ERR(pi)) { 909 devres_free(dres); 910 return pi; 911 } 912 913 dres->handle = handle; 914 dres->protocol_id = protocol_id; 915 devres_add(&sdev->dev, dres); 916 917 *ph = &pi->ph; 918 919 return pi->proto->ops; 920 } 921 922 static int scmi_devm_protocol_match(struct device *dev, void *res, void *data) 923 { 924 struct scmi_protocol_devres *dres = res; 925 926 if (WARN_ON(!dres || !data)) 927 return 0; 928 929 return dres->protocol_id == *((u8 *)data); 930 } 931 932 /** 933 * scmi_devm_protocol_put - Devres managed put protocol operations and handle 934 * @sdev: A reference to an scmi_device whose embedded struct device is to 935 * be used for devres accounting. 936 * @protocol_id: The protocol being requested. 937 * 938 * Explicitly release a protocol hold previously obtained calling the above 939 * @scmi_devm_protocol_get. 940 */ 941 static void scmi_devm_protocol_put(struct scmi_device *sdev, u8 protocol_id) 942 { 943 int ret; 944 945 ret = devres_release(&sdev->dev, scmi_devm_release_protocol, 946 scmi_devm_protocol_match, &protocol_id); 947 WARN_ON(ret); 948 } 949 950 static inline 951 struct scmi_handle *scmi_handle_get_from_info_unlocked(struct scmi_info *info) 952 { 953 info->users++; 954 return &info->handle; 955 } 956 957 /** 958 * scmi_handle_get() - Get the SCMI handle for a device 959 * 960 * @dev: pointer to device for which we want SCMI handle 961 * 962 * NOTE: The function does not track individual clients of the framework 963 * and is expected to be maintained by caller of SCMI protocol library. 964 * scmi_handle_put must be balanced with successful scmi_handle_get 965 * 966 * Return: pointer to handle if successful, NULL on error 967 */ 968 struct scmi_handle *scmi_handle_get(struct device *dev) 969 { 970 struct list_head *p; 971 struct scmi_info *info; 972 struct scmi_handle *handle = NULL; 973 974 mutex_lock(&scmi_list_mutex); 975 list_for_each(p, &scmi_list) { 976 info = list_entry(p, struct scmi_info, node); 977 if (dev->parent == info->dev) { 978 handle = scmi_handle_get_from_info_unlocked(info); 979 break; 980 } 981 } 982 mutex_unlock(&scmi_list_mutex); 983 984 return handle; 985 } 986 987 /** 988 * scmi_handle_put() - Release the handle acquired by scmi_handle_get 989 * 990 * @handle: handle acquired by scmi_handle_get 991 * 992 * NOTE: The function does not track individual clients of the framework 993 * and is expected to be maintained by caller of SCMI protocol library. 994 * scmi_handle_put must be balanced with successful scmi_handle_get 995 * 996 * Return: 0 is successfully released 997 * if null was passed, it returns -EINVAL; 998 */ 999 int scmi_handle_put(const struct scmi_handle *handle) 1000 { 1001 struct scmi_info *info; 1002 1003 if (!handle) 1004 return -EINVAL; 1005 1006 info = handle_to_scmi_info(handle); 1007 mutex_lock(&scmi_list_mutex); 1008 if (!WARN_ON(!info->users)) 1009 info->users--; 1010 mutex_unlock(&scmi_list_mutex); 1011 1012 return 0; 1013 } 1014 1015 static int __scmi_xfer_info_init(struct scmi_info *sinfo, 1016 struct scmi_xfers_info *info) 1017 { 1018 int i; 1019 struct scmi_xfer *xfer; 1020 struct device *dev = sinfo->dev; 1021 const struct scmi_desc *desc = sinfo->desc; 1022 1023 /* Pre-allocated messages, no more than what hdr.seq can support */ 1024 if (WARN_ON(desc->max_msg >= MSG_TOKEN_MAX)) { 1025 dev_err(dev, "Maximum message of %d exceeds supported %ld\n", 1026 desc->max_msg, MSG_TOKEN_MAX); 1027 return -EINVAL; 1028 } 1029 1030 info->xfer_block = devm_kcalloc(dev, desc->max_msg, 1031 sizeof(*info->xfer_block), GFP_KERNEL); 1032 if (!info->xfer_block) 1033 return -ENOMEM; 1034 1035 info->xfer_alloc_table = devm_kcalloc(dev, BITS_TO_LONGS(desc->max_msg), 1036 sizeof(long), GFP_KERNEL); 1037 if (!info->xfer_alloc_table) 1038 return -ENOMEM; 1039 1040 /* Pre-initialize the buffer pointer to pre-allocated buffers */ 1041 for (i = 0, xfer = info->xfer_block; i < desc->max_msg; i++, xfer++) { 1042 xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size, 1043 GFP_KERNEL); 1044 if (!xfer->rx.buf) 1045 return -ENOMEM; 1046 1047 xfer->tx.buf = xfer->rx.buf; 1048 init_completion(&xfer->done); 1049 } 1050 1051 spin_lock_init(&info->xfer_lock); 1052 1053 return 0; 1054 } 1055 1056 static int scmi_xfer_info_init(struct scmi_info *sinfo) 1057 { 1058 int ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo); 1059 1060 if (!ret && idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE)) 1061 ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo); 1062 1063 return ret; 1064 } 1065 1066 static int scmi_chan_setup(struct scmi_info *info, struct device *dev, 1067 int prot_id, bool tx) 1068 { 1069 int ret, idx; 1070 struct scmi_chan_info *cinfo; 1071 struct idr *idr; 1072 1073 /* Transmit channel is first entry i.e. index 0 */ 1074 idx = tx ? 0 : 1; 1075 idr = tx ? &info->tx_idr : &info->rx_idr; 1076 1077 /* check if already allocated, used for multiple device per protocol */ 1078 cinfo = idr_find(idr, prot_id); 1079 if (cinfo) 1080 return 0; 1081 1082 if (!info->desc->ops->chan_available(dev, idx)) { 1083 cinfo = idr_find(idr, SCMI_PROTOCOL_BASE); 1084 if (unlikely(!cinfo)) /* Possible only if platform has no Rx */ 1085 return -EINVAL; 1086 goto idr_alloc; 1087 } 1088 1089 cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL); 1090 if (!cinfo) 1091 return -ENOMEM; 1092 1093 cinfo->dev = dev; 1094 1095 ret = info->desc->ops->chan_setup(cinfo, info->dev, tx); 1096 if (ret) 1097 return ret; 1098 1099 idr_alloc: 1100 ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL); 1101 if (ret != prot_id) { 1102 dev_err(dev, "unable to allocate SCMI idr slot err %d\n", ret); 1103 return ret; 1104 } 1105 1106 cinfo->handle = &info->handle; 1107 return 0; 1108 } 1109 1110 static inline int 1111 scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id) 1112 { 1113 int ret = scmi_chan_setup(info, dev, prot_id, true); 1114 1115 if (!ret) /* Rx is optional, hence no error check */ 1116 scmi_chan_setup(info, dev, prot_id, false); 1117 1118 return ret; 1119 } 1120 1121 /** 1122 * scmi_get_protocol_device - Helper to get/create an SCMI device. 1123 * 1124 * @np: A device node representing a valid active protocols for the referred 1125 * SCMI instance. 1126 * @info: The referred SCMI instance for which we are getting/creating this 1127 * device. 1128 * @prot_id: The protocol ID. 1129 * @name: The device name. 1130 * 1131 * Referring to the specific SCMI instance identified by @info, this helper 1132 * takes care to return a properly initialized device matching the requested 1133 * @proto_id and @name: if device was still not existent it is created as a 1134 * child of the specified SCMI instance @info and its transport properly 1135 * initialized as usual. 1136 */ 1137 static inline struct scmi_device * 1138 scmi_get_protocol_device(struct device_node *np, struct scmi_info *info, 1139 int prot_id, const char *name) 1140 { 1141 struct scmi_device *sdev; 1142 1143 /* Already created for this parent SCMI instance ? */ 1144 sdev = scmi_child_dev_find(info->dev, prot_id, name); 1145 if (sdev) 1146 return sdev; 1147 1148 pr_debug("Creating SCMI device (%s) for protocol %x\n", name, prot_id); 1149 1150 sdev = scmi_device_create(np, info->dev, prot_id, name); 1151 if (!sdev) { 1152 dev_err(info->dev, "failed to create %d protocol device\n", 1153 prot_id); 1154 return NULL; 1155 } 1156 1157 if (scmi_txrx_setup(info, &sdev->dev, prot_id)) { 1158 dev_err(&sdev->dev, "failed to setup transport\n"); 1159 scmi_device_destroy(sdev); 1160 return NULL; 1161 } 1162 1163 return sdev; 1164 } 1165 1166 static inline void 1167 scmi_create_protocol_device(struct device_node *np, struct scmi_info *info, 1168 int prot_id, const char *name) 1169 { 1170 struct scmi_device *sdev; 1171 1172 sdev = scmi_get_protocol_device(np, info, prot_id, name); 1173 if (!sdev) 1174 return; 1175 1176 /* setup handle now as the transport is ready */ 1177 scmi_set_handle(sdev); 1178 } 1179 1180 /** 1181 * scmi_create_protocol_devices - Create devices for all pending requests for 1182 * this SCMI instance. 1183 * 1184 * @np: The device node describing the protocol 1185 * @info: The SCMI instance descriptor 1186 * @prot_id: The protocol ID 1187 * 1188 * All devices previously requested for this instance (if any) are found and 1189 * created by scanning the proper @&scmi_requested_devices entry. 1190 */ 1191 static void scmi_create_protocol_devices(struct device_node *np, 1192 struct scmi_info *info, int prot_id) 1193 { 1194 struct list_head *phead; 1195 1196 mutex_lock(&scmi_requested_devices_mtx); 1197 phead = idr_find(&scmi_requested_devices, prot_id); 1198 if (phead) { 1199 struct scmi_requested_dev *rdev; 1200 1201 list_for_each_entry(rdev, phead, node) 1202 scmi_create_protocol_device(np, info, prot_id, 1203 rdev->id_table->name); 1204 } 1205 mutex_unlock(&scmi_requested_devices_mtx); 1206 } 1207 1208 /** 1209 * scmi_protocol_device_request - Helper to request a device 1210 * 1211 * @id_table: A protocol/name pair descriptor for the device to be created. 1212 * 1213 * This helper let an SCMI driver request specific devices identified by the 1214 * @id_table to be created for each active SCMI instance. 1215 * 1216 * The requested device name MUST NOT be already existent for any protocol; 1217 * at first the freshly requested @id_table is annotated in the IDR table 1218 * @scmi_requested_devices, then a matching device is created for each already 1219 * active SCMI instance. (if any) 1220 * 1221 * This way the requested device is created straight-away for all the already 1222 * initialized(probed) SCMI instances (handles) and it remains also annotated 1223 * as pending creation if the requesting SCMI driver was loaded before some 1224 * SCMI instance and related transports were available: when such late instance 1225 * is probed, its probe will take care to scan the list of pending requested 1226 * devices and create those on its own (see @scmi_create_protocol_devices and 1227 * its enclosing loop) 1228 * 1229 * Return: 0 on Success 1230 */ 1231 int scmi_protocol_device_request(const struct scmi_device_id *id_table) 1232 { 1233 int ret = 0; 1234 unsigned int id = 0; 1235 struct list_head *head, *phead = NULL; 1236 struct scmi_requested_dev *rdev; 1237 struct scmi_info *info; 1238 1239 pr_debug("Requesting SCMI device (%s) for protocol %x\n", 1240 id_table->name, id_table->protocol_id); 1241 1242 /* 1243 * Search for the matching protocol rdev list and then search 1244 * of any existent equally named device...fails if any duplicate found. 1245 */ 1246 mutex_lock(&scmi_requested_devices_mtx); 1247 idr_for_each_entry(&scmi_requested_devices, head, id) { 1248 if (!phead) { 1249 /* A list found registered in the IDR is never empty */ 1250 rdev = list_first_entry(head, struct scmi_requested_dev, 1251 node); 1252 if (rdev->id_table->protocol_id == 1253 id_table->protocol_id) 1254 phead = head; 1255 } 1256 list_for_each_entry(rdev, head, node) { 1257 if (!strcmp(rdev->id_table->name, id_table->name)) { 1258 pr_err("Ignoring duplicate request [%d] %s\n", 1259 rdev->id_table->protocol_id, 1260 rdev->id_table->name); 1261 ret = -EINVAL; 1262 goto out; 1263 } 1264 } 1265 } 1266 1267 /* 1268 * No duplicate found for requested id_table, so let's create a new 1269 * requested device entry for this new valid request. 1270 */ 1271 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); 1272 if (!rdev) { 1273 ret = -ENOMEM; 1274 goto out; 1275 } 1276 rdev->id_table = id_table; 1277 1278 /* 1279 * Append the new requested device table descriptor to the head of the 1280 * related protocol list, eventually creating such head if not already 1281 * there. 1282 */ 1283 if (!phead) { 1284 phead = kzalloc(sizeof(*phead), GFP_KERNEL); 1285 if (!phead) { 1286 kfree(rdev); 1287 ret = -ENOMEM; 1288 goto out; 1289 } 1290 INIT_LIST_HEAD(phead); 1291 1292 ret = idr_alloc(&scmi_requested_devices, (void *)phead, 1293 id_table->protocol_id, 1294 id_table->protocol_id + 1, GFP_KERNEL); 1295 if (ret != id_table->protocol_id) { 1296 pr_err("Failed to save SCMI device - ret:%d\n", ret); 1297 kfree(rdev); 1298 kfree(phead); 1299 ret = -EINVAL; 1300 goto out; 1301 } 1302 ret = 0; 1303 } 1304 list_add(&rdev->node, phead); 1305 1306 /* 1307 * Now effectively create and initialize the requested device for every 1308 * already initialized SCMI instance which has registered the requested 1309 * protocol as a valid active one: i.e. defined in DT and supported by 1310 * current platform FW. 1311 */ 1312 mutex_lock(&scmi_list_mutex); 1313 list_for_each_entry(info, &scmi_list, node) { 1314 struct device_node *child; 1315 1316 child = idr_find(&info->active_protocols, 1317 id_table->protocol_id); 1318 if (child) { 1319 struct scmi_device *sdev; 1320 1321 sdev = scmi_get_protocol_device(child, info, 1322 id_table->protocol_id, 1323 id_table->name); 1324 /* Set handle if not already set: device existed */ 1325 if (sdev && !sdev->handle) 1326 sdev->handle = 1327 scmi_handle_get_from_info_unlocked(info); 1328 } else { 1329 dev_err(info->dev, 1330 "Failed. SCMI protocol %d not active.\n", 1331 id_table->protocol_id); 1332 } 1333 } 1334 mutex_unlock(&scmi_list_mutex); 1335 1336 out: 1337 mutex_unlock(&scmi_requested_devices_mtx); 1338 1339 return ret; 1340 } 1341 1342 /** 1343 * scmi_protocol_device_unrequest - Helper to unrequest a device 1344 * 1345 * @id_table: A protocol/name pair descriptor for the device to be unrequested. 1346 * 1347 * An helper to let an SCMI driver release its request about devices; note that 1348 * devices are created and initialized once the first SCMI driver request them 1349 * but they destroyed only on SCMI core unloading/unbinding. 1350 * 1351 * The current SCMI transport layer uses such devices as internal references and 1352 * as such they could be shared as same transport between multiple drivers so 1353 * that cannot be safely destroyed till the whole SCMI stack is removed. 1354 * (unless adding further burden of refcounting.) 1355 */ 1356 void scmi_protocol_device_unrequest(const struct scmi_device_id *id_table) 1357 { 1358 struct list_head *phead; 1359 1360 pr_debug("Unrequesting SCMI device (%s) for protocol %x\n", 1361 id_table->name, id_table->protocol_id); 1362 1363 mutex_lock(&scmi_requested_devices_mtx); 1364 phead = idr_find(&scmi_requested_devices, id_table->protocol_id); 1365 if (phead) { 1366 struct scmi_requested_dev *victim, *tmp; 1367 1368 list_for_each_entry_safe(victim, tmp, phead, node) { 1369 if (!strcmp(victim->id_table->name, id_table->name)) { 1370 list_del(&victim->node); 1371 kfree(victim); 1372 break; 1373 } 1374 } 1375 1376 if (list_empty(phead)) { 1377 idr_remove(&scmi_requested_devices, 1378 id_table->protocol_id); 1379 kfree(phead); 1380 } 1381 } 1382 mutex_unlock(&scmi_requested_devices_mtx); 1383 } 1384 1385 static int scmi_probe(struct platform_device *pdev) 1386 { 1387 int ret; 1388 struct scmi_handle *handle; 1389 const struct scmi_desc *desc; 1390 struct scmi_info *info; 1391 struct device *dev = &pdev->dev; 1392 struct device_node *child, *np = dev->of_node; 1393 1394 desc = of_device_get_match_data(dev); 1395 if (!desc) 1396 return -EINVAL; 1397 1398 info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL); 1399 if (!info) 1400 return -ENOMEM; 1401 1402 info->dev = dev; 1403 info->desc = desc; 1404 INIT_LIST_HEAD(&info->node); 1405 idr_init(&info->protocols); 1406 mutex_init(&info->protocols_mtx); 1407 idr_init(&info->active_protocols); 1408 1409 platform_set_drvdata(pdev, info); 1410 idr_init(&info->tx_idr); 1411 idr_init(&info->rx_idr); 1412 1413 handle = &info->handle; 1414 handle->dev = info->dev; 1415 handle->version = &info->version; 1416 handle->devm_protocol_get = scmi_devm_protocol_get; 1417 handle->devm_protocol_put = scmi_devm_protocol_put; 1418 1419 ret = scmi_txrx_setup(info, dev, SCMI_PROTOCOL_BASE); 1420 if (ret) 1421 return ret; 1422 1423 ret = scmi_xfer_info_init(info); 1424 if (ret) 1425 return ret; 1426 1427 if (scmi_notification_init(handle)) 1428 dev_err(dev, "SCMI Notifications NOT available.\n"); 1429 1430 /* 1431 * Trigger SCMI Base protocol initialization. 1432 * It's mandatory and won't be ever released/deinit until the 1433 * SCMI stack is shutdown/unloaded as a whole. 1434 */ 1435 ret = scmi_protocol_acquire(handle, SCMI_PROTOCOL_BASE); 1436 if (ret) { 1437 dev_err(dev, "unable to communicate with SCMI\n"); 1438 return ret; 1439 } 1440 1441 mutex_lock(&scmi_list_mutex); 1442 list_add_tail(&info->node, &scmi_list); 1443 mutex_unlock(&scmi_list_mutex); 1444 1445 for_each_available_child_of_node(np, child) { 1446 u32 prot_id; 1447 1448 if (of_property_read_u32(child, "reg", &prot_id)) 1449 continue; 1450 1451 if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id)) 1452 dev_err(dev, "Out of range protocol %d\n", prot_id); 1453 1454 if (!scmi_is_protocol_implemented(handle, prot_id)) { 1455 dev_err(dev, "SCMI protocol %d not implemented\n", 1456 prot_id); 1457 continue; 1458 } 1459 1460 /* 1461 * Save this valid DT protocol descriptor amongst 1462 * @active_protocols for this SCMI instance/ 1463 */ 1464 ret = idr_alloc(&info->active_protocols, child, 1465 prot_id, prot_id + 1, GFP_KERNEL); 1466 if (ret != prot_id) { 1467 dev_err(dev, "SCMI protocol %d already activated. Skip\n", 1468 prot_id); 1469 continue; 1470 } 1471 1472 of_node_get(child); 1473 scmi_create_protocol_devices(child, info, prot_id); 1474 } 1475 1476 return 0; 1477 } 1478 1479 void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id) 1480 { 1481 idr_remove(idr, id); 1482 } 1483 1484 static int scmi_remove(struct platform_device *pdev) 1485 { 1486 int ret = 0, id; 1487 struct scmi_info *info = platform_get_drvdata(pdev); 1488 struct idr *idr = &info->tx_idr; 1489 struct device_node *child; 1490 1491 mutex_lock(&scmi_list_mutex); 1492 if (info->users) 1493 ret = -EBUSY; 1494 else 1495 list_del(&info->node); 1496 mutex_unlock(&scmi_list_mutex); 1497 1498 if (ret) 1499 return ret; 1500 1501 scmi_notification_exit(&info->handle); 1502 1503 mutex_lock(&info->protocols_mtx); 1504 idr_destroy(&info->protocols); 1505 mutex_unlock(&info->protocols_mtx); 1506 1507 idr_for_each_entry(&info->active_protocols, child, id) 1508 of_node_put(child); 1509 idr_destroy(&info->active_protocols); 1510 1511 /* Safe to free channels since no more users */ 1512 ret = idr_for_each(idr, info->desc->ops->chan_free, idr); 1513 idr_destroy(&info->tx_idr); 1514 1515 idr = &info->rx_idr; 1516 ret = idr_for_each(idr, info->desc->ops->chan_free, idr); 1517 idr_destroy(&info->rx_idr); 1518 1519 return ret; 1520 } 1521 1522 static ssize_t protocol_version_show(struct device *dev, 1523 struct device_attribute *attr, char *buf) 1524 { 1525 struct scmi_info *info = dev_get_drvdata(dev); 1526 1527 return sprintf(buf, "%u.%u\n", info->version.major_ver, 1528 info->version.minor_ver); 1529 } 1530 static DEVICE_ATTR_RO(protocol_version); 1531 1532 static ssize_t firmware_version_show(struct device *dev, 1533 struct device_attribute *attr, char *buf) 1534 { 1535 struct scmi_info *info = dev_get_drvdata(dev); 1536 1537 return sprintf(buf, "0x%x\n", info->version.impl_ver); 1538 } 1539 static DEVICE_ATTR_RO(firmware_version); 1540 1541 static ssize_t vendor_id_show(struct device *dev, 1542 struct device_attribute *attr, char *buf) 1543 { 1544 struct scmi_info *info = dev_get_drvdata(dev); 1545 1546 return sprintf(buf, "%s\n", info->version.vendor_id); 1547 } 1548 static DEVICE_ATTR_RO(vendor_id); 1549 1550 static ssize_t sub_vendor_id_show(struct device *dev, 1551 struct device_attribute *attr, char *buf) 1552 { 1553 struct scmi_info *info = dev_get_drvdata(dev); 1554 1555 return sprintf(buf, "%s\n", info->version.sub_vendor_id); 1556 } 1557 static DEVICE_ATTR_RO(sub_vendor_id); 1558 1559 static struct attribute *versions_attrs[] = { 1560 &dev_attr_firmware_version.attr, 1561 &dev_attr_protocol_version.attr, 1562 &dev_attr_vendor_id.attr, 1563 &dev_attr_sub_vendor_id.attr, 1564 NULL, 1565 }; 1566 ATTRIBUTE_GROUPS(versions); 1567 1568 /* Each compatible listed below must have descriptor associated with it */ 1569 static const struct of_device_id scmi_of_match[] = { 1570 { .compatible = "arm,scmi", .data = &scmi_mailbox_desc }, 1571 #ifdef CONFIG_HAVE_ARM_SMCCC_DISCOVERY 1572 { .compatible = "arm,scmi-smc", .data = &scmi_smc_desc}, 1573 #endif 1574 { /* Sentinel */ }, 1575 }; 1576 1577 MODULE_DEVICE_TABLE(of, scmi_of_match); 1578 1579 static struct platform_driver scmi_driver = { 1580 .driver = { 1581 .name = "arm-scmi", 1582 .of_match_table = scmi_of_match, 1583 .dev_groups = versions_groups, 1584 }, 1585 .probe = scmi_probe, 1586 .remove = scmi_remove, 1587 }; 1588 1589 static int __init scmi_driver_init(void) 1590 { 1591 scmi_bus_init(); 1592 1593 scmi_base_register(); 1594 1595 scmi_clock_register(); 1596 scmi_perf_register(); 1597 scmi_power_register(); 1598 scmi_reset_register(); 1599 scmi_sensors_register(); 1600 scmi_voltage_register(); 1601 scmi_system_register(); 1602 1603 return platform_driver_register(&scmi_driver); 1604 } 1605 subsys_initcall(scmi_driver_init); 1606 1607 static void __exit scmi_driver_exit(void) 1608 { 1609 scmi_base_unregister(); 1610 1611 scmi_clock_unregister(); 1612 scmi_perf_unregister(); 1613 scmi_power_unregister(); 1614 scmi_reset_unregister(); 1615 scmi_sensors_unregister(); 1616 scmi_voltage_unregister(); 1617 scmi_system_unregister(); 1618 1619 scmi_bus_exit(); 1620 1621 platform_driver_unregister(&scmi_driver); 1622 } 1623 module_exit(scmi_driver_exit); 1624 1625 MODULE_ALIAS("platform: arm-scmi"); 1626 MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>"); 1627 MODULE_DESCRIPTION("ARM SCMI protocol driver"); 1628 MODULE_LICENSE("GPL v2"); 1629