1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Virtio Transport driver for Arm System Control and Management Interface 4 * (SCMI). 5 * 6 * Copyright (C) 2020-2022 OpenSynergy. 7 * Copyright (C) 2021-2022 ARM Ltd. 8 */ 9 10 /** 11 * DOC: Theory of Operation 12 * 13 * The scmi-virtio transport implements a driver for the virtio SCMI device. 14 * 15 * There is one Tx channel (virtio cmdq, A2P channel) and at most one Rx 16 * channel (virtio eventq, P2A channel). Each channel is implemented through a 17 * virtqueue. Access to each virtqueue is protected by spinlocks. 18 */ 19 20 #include <linux/completion.h> 21 #include <linux/errno.h> 22 #include <linux/refcount.h> 23 #include <linux/slab.h> 24 #include <linux/virtio.h> 25 #include <linux/virtio_config.h> 26 27 #include <uapi/linux/virtio_ids.h> 28 #include <uapi/linux/virtio_scmi.h> 29 30 #include "common.h" 31 32 #define VIRTIO_MAX_RX_TIMEOUT_MS 60000 33 #define VIRTIO_SCMI_MAX_MSG_SIZE 128 /* Value may be increased. */ 34 #define VIRTIO_SCMI_MAX_PDU_SIZE \ 35 (VIRTIO_SCMI_MAX_MSG_SIZE + SCMI_MSG_MAX_PROT_OVERHEAD) 36 #define DESCRIPTORS_PER_TX_MSG 2 37 38 /** 39 * struct scmi_vio_channel - Transport channel information 40 * 41 * @vqueue: Associated virtqueue 42 * @cinfo: SCMI Tx or Rx channel 43 * @free_lock: Protects access to the @free_list. 44 * @free_list: List of unused scmi_vio_msg, maintained for Tx channels only 45 * @deferred_tx_work: Worker for TX deferred replies processing 46 * @deferred_tx_wq: Workqueue for TX deferred replies 47 * @pending_lock: Protects access to the @pending_cmds_list. 48 * @pending_cmds_list: List of pre-fetched commands queueud for later processing 49 * @is_rx: Whether channel is an Rx channel 50 * @max_msg: Maximum number of pending messages for this channel. 51 * @lock: Protects access to all members except users, free_list and 52 * pending_cmds_list. 53 * @shutdown_done: A reference to a completion used when freeing this channel. 54 * @users: A reference count to currently active users of this channel. 55 */ 56 struct scmi_vio_channel { 57 struct virtqueue *vqueue; 58 struct scmi_chan_info *cinfo; 59 /* lock to protect access to the free list. */ 60 spinlock_t free_lock; 61 struct list_head free_list; 62 /* lock to protect access to the pending list. */ 63 spinlock_t pending_lock; 64 struct list_head pending_cmds_list; 65 struct work_struct deferred_tx_work; 66 struct workqueue_struct *deferred_tx_wq; 67 bool is_rx; 68 unsigned int max_msg; 69 /* 70 * Lock to protect access to all members except users, free_list and 71 * pending_cmds_list 72 */ 73 spinlock_t lock; 74 struct completion *shutdown_done; 75 refcount_t users; 76 }; 77 78 enum poll_states { 79 VIO_MSG_NOT_POLLED, 80 VIO_MSG_POLL_TIMEOUT, 81 VIO_MSG_POLLING, 82 VIO_MSG_POLL_DONE, 83 }; 84 85 /** 86 * struct scmi_vio_msg - Transport PDU information 87 * 88 * @request: SDU used for commands 89 * @input: SDU used for (delayed) responses and notifications 90 * @list: List which scmi_vio_msg may be part of 91 * @rx_len: Input SDU size in bytes, once input has been received 92 * @poll_idx: Last used index registered for polling purposes if this message 93 * transaction reply was configured for polling. 94 * @poll_status: Polling state for this message. 95 * @poll_lock: A lock to protect @poll_status 96 * @users: A reference count to track this message users and avoid premature 97 * freeing (and reuse) when polling and IRQ execution paths interleave. 98 */ 99 struct scmi_vio_msg { 100 struct scmi_msg_payld *request; 101 struct scmi_msg_payld *input; 102 struct list_head list; 103 unsigned int rx_len; 104 unsigned int poll_idx; 105 enum poll_states poll_status; 106 /* Lock to protect access to poll_status */ 107 spinlock_t poll_lock; 108 refcount_t users; 109 }; 110 111 /* Only one SCMI VirtIO device can possibly exist */ 112 static struct virtio_device *scmi_vdev; 113 114 static void scmi_vio_channel_ready(struct scmi_vio_channel *vioch, 115 struct scmi_chan_info *cinfo) 116 { 117 unsigned long flags; 118 119 spin_lock_irqsave(&vioch->lock, flags); 120 cinfo->transport_info = vioch; 121 /* Indirectly setting channel not available any more */ 122 vioch->cinfo = cinfo; 123 spin_unlock_irqrestore(&vioch->lock, flags); 124 125 refcount_set(&vioch->users, 1); 126 } 127 128 static inline bool scmi_vio_channel_acquire(struct scmi_vio_channel *vioch) 129 { 130 return refcount_inc_not_zero(&vioch->users); 131 } 132 133 static inline void scmi_vio_channel_release(struct scmi_vio_channel *vioch) 134 { 135 if (refcount_dec_and_test(&vioch->users)) { 136 unsigned long flags; 137 138 spin_lock_irqsave(&vioch->lock, flags); 139 if (vioch->shutdown_done) { 140 vioch->cinfo = NULL; 141 complete(vioch->shutdown_done); 142 } 143 spin_unlock_irqrestore(&vioch->lock, flags); 144 } 145 } 146 147 static void scmi_vio_channel_cleanup_sync(struct scmi_vio_channel *vioch) 148 { 149 unsigned long flags; 150 DECLARE_COMPLETION_ONSTACK(vioch_shutdown_done); 151 152 /* 153 * Prepare to wait for the last release if not already released 154 * or in progress. 155 */ 156 spin_lock_irqsave(&vioch->lock, flags); 157 if (!vioch->cinfo || vioch->shutdown_done) { 158 spin_unlock_irqrestore(&vioch->lock, flags); 159 return; 160 } 161 162 vioch->shutdown_done = &vioch_shutdown_done; 163 virtio_break_device(vioch->vqueue->vdev); 164 if (!vioch->is_rx && vioch->deferred_tx_wq) 165 /* Cannot be kicked anymore after this...*/ 166 vioch->deferred_tx_wq = NULL; 167 spin_unlock_irqrestore(&vioch->lock, flags); 168 169 scmi_vio_channel_release(vioch); 170 171 /* Let any possibly concurrent RX path release the channel */ 172 wait_for_completion(vioch->shutdown_done); 173 } 174 175 /* Assumes to be called with vio channel acquired already */ 176 static struct scmi_vio_msg * 177 scmi_virtio_get_free_msg(struct scmi_vio_channel *vioch) 178 { 179 unsigned long flags; 180 struct scmi_vio_msg *msg; 181 182 spin_lock_irqsave(&vioch->free_lock, flags); 183 if (list_empty(&vioch->free_list)) { 184 spin_unlock_irqrestore(&vioch->free_lock, flags); 185 return NULL; 186 } 187 188 msg = list_first_entry(&vioch->free_list, typeof(*msg), list); 189 list_del_init(&msg->list); 190 spin_unlock_irqrestore(&vioch->free_lock, flags); 191 192 /* Still no users, no need to acquire poll_lock */ 193 msg->poll_status = VIO_MSG_NOT_POLLED; 194 refcount_set(&msg->users, 1); 195 196 return msg; 197 } 198 199 static inline bool scmi_vio_msg_acquire(struct scmi_vio_msg *msg) 200 { 201 return refcount_inc_not_zero(&msg->users); 202 } 203 204 /* Assumes to be called with vio channel acquired already */ 205 static inline bool scmi_vio_msg_release(struct scmi_vio_channel *vioch, 206 struct scmi_vio_msg *msg) 207 { 208 bool ret; 209 210 ret = refcount_dec_and_test(&msg->users); 211 if (ret) { 212 unsigned long flags; 213 214 spin_lock_irqsave(&vioch->free_lock, flags); 215 list_add_tail(&msg->list, &vioch->free_list); 216 spin_unlock_irqrestore(&vioch->free_lock, flags); 217 } 218 219 return ret; 220 } 221 222 static bool scmi_vio_have_vq_rx(struct virtio_device *vdev) 223 { 224 return virtio_has_feature(vdev, VIRTIO_SCMI_F_P2A_CHANNELS); 225 } 226 227 static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch, 228 struct scmi_vio_msg *msg) 229 { 230 struct scatterlist sg_in; 231 int rc; 232 unsigned long flags; 233 struct device *dev = &vioch->vqueue->vdev->dev; 234 235 sg_init_one(&sg_in, msg->input, VIRTIO_SCMI_MAX_PDU_SIZE); 236 237 spin_lock_irqsave(&vioch->lock, flags); 238 239 rc = virtqueue_add_inbuf(vioch->vqueue, &sg_in, 1, msg, GFP_ATOMIC); 240 if (rc) 241 dev_err(dev, "failed to add to RX virtqueue (%d)\n", rc); 242 else 243 virtqueue_kick(vioch->vqueue); 244 245 spin_unlock_irqrestore(&vioch->lock, flags); 246 247 return rc; 248 } 249 250 /* 251 * Assume to be called with channel already acquired or not ready at all; 252 * vioch->lock MUST NOT have been already acquired. 253 */ 254 static void scmi_finalize_message(struct scmi_vio_channel *vioch, 255 struct scmi_vio_msg *msg) 256 { 257 if (vioch->is_rx) 258 scmi_vio_feed_vq_rx(vioch, msg); 259 else 260 scmi_vio_msg_release(vioch, msg); 261 } 262 263 static void scmi_vio_complete_cb(struct virtqueue *vqueue) 264 { 265 unsigned long flags; 266 unsigned int length; 267 struct scmi_vio_channel *vioch; 268 struct scmi_vio_msg *msg; 269 bool cb_enabled = true; 270 271 if (WARN_ON_ONCE(!vqueue->vdev->priv)) 272 return; 273 vioch = &((struct scmi_vio_channel *)vqueue->vdev->priv)[vqueue->index]; 274 275 for (;;) { 276 if (!scmi_vio_channel_acquire(vioch)) 277 return; 278 279 spin_lock_irqsave(&vioch->lock, flags); 280 if (cb_enabled) { 281 virtqueue_disable_cb(vqueue); 282 cb_enabled = false; 283 } 284 285 msg = virtqueue_get_buf(vqueue, &length); 286 if (!msg) { 287 if (virtqueue_enable_cb(vqueue)) { 288 spin_unlock_irqrestore(&vioch->lock, flags); 289 scmi_vio_channel_release(vioch); 290 return; 291 } 292 cb_enabled = true; 293 } 294 spin_unlock_irqrestore(&vioch->lock, flags); 295 296 if (msg) { 297 msg->rx_len = length; 298 scmi_rx_callback(vioch->cinfo, 299 msg_read_header(msg->input), msg); 300 301 scmi_finalize_message(vioch, msg); 302 } 303 304 /* 305 * Release vio channel between loop iterations to allow 306 * virtio_chan_free() to eventually fully release it when 307 * shutting down; in such a case, any outstanding message will 308 * be ignored since this loop will bail out at the next 309 * iteration. 310 */ 311 scmi_vio_channel_release(vioch); 312 } 313 } 314 315 static void scmi_vio_deferred_tx_worker(struct work_struct *work) 316 { 317 unsigned long flags; 318 struct scmi_vio_channel *vioch; 319 struct scmi_vio_msg *msg, *tmp; 320 321 vioch = container_of(work, struct scmi_vio_channel, deferred_tx_work); 322 323 if (!scmi_vio_channel_acquire(vioch)) 324 return; 325 326 /* 327 * Process pre-fetched messages: these could be non-polled messages or 328 * late timed-out replies to polled messages dequeued by chance while 329 * polling for some other messages: this worker is in charge to process 330 * the valid non-expired messages and anyway finally free all of them. 331 */ 332 spin_lock_irqsave(&vioch->pending_lock, flags); 333 334 /* Scan the list of possibly pre-fetched messages during polling. */ 335 list_for_each_entry_safe(msg, tmp, &vioch->pending_cmds_list, list) { 336 list_del(&msg->list); 337 338 /* 339 * Channel is acquired here (cannot vanish) and this message 340 * is no more processed elsewhere so no poll_lock needed. 341 */ 342 if (msg->poll_status == VIO_MSG_NOT_POLLED) 343 scmi_rx_callback(vioch->cinfo, 344 msg_read_header(msg->input), msg); 345 346 /* Free the processed message once done */ 347 scmi_vio_msg_release(vioch, msg); 348 } 349 350 spin_unlock_irqrestore(&vioch->pending_lock, flags); 351 352 /* Process possibly still pending messages */ 353 scmi_vio_complete_cb(vioch->vqueue); 354 355 scmi_vio_channel_release(vioch); 356 } 357 358 static const char *const scmi_vio_vqueue_names[] = { "tx", "rx" }; 359 360 static vq_callback_t *scmi_vio_complete_callbacks[] = { 361 scmi_vio_complete_cb, 362 scmi_vio_complete_cb 363 }; 364 365 static unsigned int virtio_get_max_msg(struct scmi_chan_info *base_cinfo) 366 { 367 struct scmi_vio_channel *vioch = base_cinfo->transport_info; 368 369 return vioch->max_msg; 370 } 371 372 static int virtio_link_supplier(struct device *dev) 373 { 374 if (!scmi_vdev) { 375 dev_notice(dev, 376 "Deferring probe after not finding a bound scmi-virtio device\n"); 377 return -EPROBE_DEFER; 378 } 379 380 if (!device_link_add(dev, &scmi_vdev->dev, 381 DL_FLAG_AUTOREMOVE_CONSUMER)) { 382 dev_err(dev, "Adding link to supplier virtio device failed\n"); 383 return -ECANCELED; 384 } 385 386 return 0; 387 } 388 389 static bool virtio_chan_available(struct device *dev, int idx) 390 { 391 struct scmi_vio_channel *channels, *vioch = NULL; 392 393 if (WARN_ON_ONCE(!scmi_vdev)) 394 return false; 395 396 channels = (struct scmi_vio_channel *)scmi_vdev->priv; 397 398 switch (idx) { 399 case VIRTIO_SCMI_VQ_TX: 400 vioch = &channels[VIRTIO_SCMI_VQ_TX]; 401 break; 402 case VIRTIO_SCMI_VQ_RX: 403 if (scmi_vio_have_vq_rx(scmi_vdev)) 404 vioch = &channels[VIRTIO_SCMI_VQ_RX]; 405 break; 406 default: 407 return false; 408 } 409 410 return vioch && !vioch->cinfo; 411 } 412 413 static void scmi_destroy_tx_workqueue(void *deferred_tx_wq) 414 { 415 destroy_workqueue(deferred_tx_wq); 416 } 417 418 static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, 419 bool tx) 420 { 421 struct scmi_vio_channel *vioch; 422 int index = tx ? VIRTIO_SCMI_VQ_TX : VIRTIO_SCMI_VQ_RX; 423 int i; 424 425 if (!scmi_vdev) 426 return -EPROBE_DEFER; 427 428 vioch = &((struct scmi_vio_channel *)scmi_vdev->priv)[index]; 429 430 /* Setup a deferred worker for polling. */ 431 if (tx && !vioch->deferred_tx_wq) { 432 int ret; 433 434 vioch->deferred_tx_wq = 435 alloc_workqueue(dev_name(&scmi_vdev->dev), 436 WQ_UNBOUND | WQ_FREEZABLE | WQ_SYSFS, 437 0); 438 if (!vioch->deferred_tx_wq) 439 return -ENOMEM; 440 441 ret = devm_add_action_or_reset(dev, scmi_destroy_tx_workqueue, 442 vioch->deferred_tx_wq); 443 if (ret) 444 return ret; 445 446 INIT_WORK(&vioch->deferred_tx_work, 447 scmi_vio_deferred_tx_worker); 448 } 449 450 for (i = 0; i < vioch->max_msg; i++) { 451 struct scmi_vio_msg *msg; 452 453 msg = devm_kzalloc(dev, sizeof(*msg), GFP_KERNEL); 454 if (!msg) 455 return -ENOMEM; 456 457 if (tx) { 458 msg->request = devm_kzalloc(dev, 459 VIRTIO_SCMI_MAX_PDU_SIZE, 460 GFP_KERNEL); 461 if (!msg->request) 462 return -ENOMEM; 463 spin_lock_init(&msg->poll_lock); 464 refcount_set(&msg->users, 1); 465 } 466 467 msg->input = devm_kzalloc(dev, VIRTIO_SCMI_MAX_PDU_SIZE, 468 GFP_KERNEL); 469 if (!msg->input) 470 return -ENOMEM; 471 472 scmi_finalize_message(vioch, msg); 473 } 474 475 scmi_vio_channel_ready(vioch, cinfo); 476 477 return 0; 478 } 479 480 static int virtio_chan_free(int id, void *p, void *data) 481 { 482 struct scmi_chan_info *cinfo = p; 483 struct scmi_vio_channel *vioch = cinfo->transport_info; 484 485 scmi_vio_channel_cleanup_sync(vioch); 486 487 scmi_free_channel(cinfo, data, id); 488 489 return 0; 490 } 491 492 static int virtio_send_message(struct scmi_chan_info *cinfo, 493 struct scmi_xfer *xfer) 494 { 495 struct scmi_vio_channel *vioch = cinfo->transport_info; 496 struct scatterlist sg_out; 497 struct scatterlist sg_in; 498 struct scatterlist *sgs[DESCRIPTORS_PER_TX_MSG] = { &sg_out, &sg_in }; 499 unsigned long flags; 500 int rc; 501 struct scmi_vio_msg *msg; 502 503 if (!scmi_vio_channel_acquire(vioch)) 504 return -EINVAL; 505 506 msg = scmi_virtio_get_free_msg(vioch); 507 if (!msg) { 508 scmi_vio_channel_release(vioch); 509 return -EBUSY; 510 } 511 512 msg_tx_prepare(msg->request, xfer); 513 514 sg_init_one(&sg_out, msg->request, msg_command_size(xfer)); 515 sg_init_one(&sg_in, msg->input, msg_response_size(xfer)); 516 517 spin_lock_irqsave(&vioch->lock, flags); 518 519 /* 520 * If polling was requested for this transaction: 521 * - retrieve last used index (will be used as polling reference) 522 * - bind the polled message to the xfer via .priv 523 * - grab an additional msg refcount for the poll-path 524 */ 525 if (xfer->hdr.poll_completion) { 526 msg->poll_idx = virtqueue_enable_cb_prepare(vioch->vqueue); 527 /* Still no users, no need to acquire poll_lock */ 528 msg->poll_status = VIO_MSG_POLLING; 529 scmi_vio_msg_acquire(msg); 530 /* Ensure initialized msg is visibly bound to xfer */ 531 smp_store_mb(xfer->priv, msg); 532 } 533 534 rc = virtqueue_add_sgs(vioch->vqueue, sgs, 1, 1, msg, GFP_ATOMIC); 535 if (rc) 536 dev_err(vioch->cinfo->dev, 537 "failed to add to TX virtqueue (%d)\n", rc); 538 else 539 virtqueue_kick(vioch->vqueue); 540 541 spin_unlock_irqrestore(&vioch->lock, flags); 542 543 if (rc) { 544 /* Ensure order between xfer->priv clear and vq feeding */ 545 smp_store_mb(xfer->priv, NULL); 546 if (xfer->hdr.poll_completion) 547 scmi_vio_msg_release(vioch, msg); 548 scmi_vio_msg_release(vioch, msg); 549 } 550 551 scmi_vio_channel_release(vioch); 552 553 return rc; 554 } 555 556 static void virtio_fetch_response(struct scmi_chan_info *cinfo, 557 struct scmi_xfer *xfer) 558 { 559 struct scmi_vio_msg *msg = xfer->priv; 560 561 if (msg) 562 msg_fetch_response(msg->input, msg->rx_len, xfer); 563 } 564 565 static void virtio_fetch_notification(struct scmi_chan_info *cinfo, 566 size_t max_len, struct scmi_xfer *xfer) 567 { 568 struct scmi_vio_msg *msg = xfer->priv; 569 570 if (msg) 571 msg_fetch_notification(msg->input, msg->rx_len, max_len, xfer); 572 } 573 574 /** 575 * virtio_mark_txdone - Mark transmission done 576 * 577 * Free only completed polling transfer messages. 578 * 579 * Note that in the SCMI VirtIO transport we never explicitly release still 580 * outstanding but timed-out messages by forcibly re-adding them to the 581 * free-list inside the TX code path; we instead let IRQ/RX callbacks, or the 582 * TX deferred worker, eventually clean up such messages once, finally, a late 583 * reply is received and discarded (if ever). 584 * 585 * This approach was deemed preferable since those pending timed-out buffers are 586 * still effectively owned by the SCMI platform VirtIO device even after timeout 587 * expiration: forcibly freeing and reusing them before they had been returned 588 * explicitly by the SCMI platform could lead to subtle bugs due to message 589 * corruption. 590 * An SCMI platform VirtIO device which never returns message buffers is 591 * anyway broken and it will quickly lead to exhaustion of available messages. 592 * 593 * For this same reason, here, we take care to free only the polled messages 594 * that had been somehow replied (only if not by chance already processed on the 595 * IRQ path - the initial scmi_vio_msg_release() takes care of this) and also 596 * any timed-out polled message if that indeed appears to have been at least 597 * dequeued from the virtqueues (VIO_MSG_POLL_DONE): this is needed since such 598 * messages won't be freed elsewhere. Any other polled message is marked as 599 * VIO_MSG_POLL_TIMEOUT. 600 * 601 * Possible late replies to timed-out polled messages will be eventually freed 602 * by RX callbacks if delivered on the IRQ path or by the deferred TX worker if 603 * dequeued on some other polling path. 604 * 605 * @cinfo: SCMI channel info 606 * @ret: Transmission return code 607 * @xfer: Transfer descriptor 608 */ 609 static void virtio_mark_txdone(struct scmi_chan_info *cinfo, int ret, 610 struct scmi_xfer *xfer) 611 { 612 unsigned long flags; 613 struct scmi_vio_channel *vioch = cinfo->transport_info; 614 struct scmi_vio_msg *msg = xfer->priv; 615 616 if (!msg || !scmi_vio_channel_acquire(vioch)) 617 return; 618 619 /* Ensure msg is unbound from xfer anyway at this point */ 620 smp_store_mb(xfer->priv, NULL); 621 622 /* Must be a polled xfer and not already freed on the IRQ path */ 623 if (!xfer->hdr.poll_completion || scmi_vio_msg_release(vioch, msg)) { 624 scmi_vio_channel_release(vioch); 625 return; 626 } 627 628 spin_lock_irqsave(&msg->poll_lock, flags); 629 /* Do not free timedout polled messages only if still inflight */ 630 if (ret != -ETIMEDOUT || msg->poll_status == VIO_MSG_POLL_DONE) 631 scmi_vio_msg_release(vioch, msg); 632 else if (msg->poll_status == VIO_MSG_POLLING) 633 msg->poll_status = VIO_MSG_POLL_TIMEOUT; 634 spin_unlock_irqrestore(&msg->poll_lock, flags); 635 636 scmi_vio_channel_release(vioch); 637 } 638 639 /** 640 * virtio_poll_done - Provide polling support for VirtIO transport 641 * 642 * @cinfo: SCMI channel info 643 * @xfer: Reference to the transfer being poll for. 644 * 645 * VirtIO core provides a polling mechanism based only on last used indexes: 646 * this means that it is possible to poll the virtqueues waiting for something 647 * new to arrive from the host side, but the only way to check if the freshly 648 * arrived buffer was indeed what we were waiting for is to compare the newly 649 * arrived message descriptor with the one we are polling on. 650 * 651 * As a consequence it can happen to dequeue something different from the buffer 652 * we were poll-waiting for: if that is the case such early fetched buffers are 653 * then added to a the @pending_cmds_list list for later processing by a 654 * dedicated deferred worker. 655 * 656 * So, basically, once something new is spotted we proceed to de-queue all the 657 * freshly received used buffers until we found the one we were polling on, or, 658 * we have 'seemingly' emptied the virtqueue; if some buffers are still pending 659 * in the vqueue at the end of the polling loop (possible due to inherent races 660 * in virtqueues handling mechanisms), we similarly kick the deferred worker 661 * and let it process those, to avoid indefinitely looping in the .poll_done 662 * busy-waiting helper. 663 * 664 * Finally, we delegate to the deferred worker also the final free of any timed 665 * out reply to a polled message that we should dequeue. 666 * 667 * Note that, since we do NOT have per-message suppress notification mechanism, 668 * the message we are polling for could be alternatively delivered via usual 669 * IRQs callbacks on another core which happened to have IRQs enabled while we 670 * are actively polling for it here: in such a case it will be handled as such 671 * by scmi_rx_callback() and the polling loop in the SCMI Core TX path will be 672 * transparently terminated anyway. 673 * 674 * Return: True once polling has successfully completed. 675 */ 676 static bool virtio_poll_done(struct scmi_chan_info *cinfo, 677 struct scmi_xfer *xfer) 678 { 679 bool pending, found = false; 680 unsigned int length, any_prefetched = 0; 681 unsigned long flags; 682 struct scmi_vio_msg *next_msg, *msg = xfer->priv; 683 struct scmi_vio_channel *vioch = cinfo->transport_info; 684 685 if (!msg) 686 return true; 687 688 /* 689 * Processed already by other polling loop on another CPU ? 690 * 691 * Note that this message is acquired on the poll path so cannot vanish 692 * while inside this loop iteration even if concurrently processed on 693 * the IRQ path. 694 * 695 * Avoid to acquire poll_lock since polled_status can be changed 696 * in a relevant manner only later in this same thread of execution: 697 * any other possible changes made concurrently by other polling loops 698 * or by a reply delivered on the IRQ path have no meaningful impact on 699 * this loop iteration: in other words it is harmless to allow this 700 * possible race but let has avoid spinlocking with irqs off in this 701 * initial part of the polling loop. 702 */ 703 if (msg->poll_status == VIO_MSG_POLL_DONE) 704 return true; 705 706 if (!scmi_vio_channel_acquire(vioch)) 707 return true; 708 709 /* Has cmdq index moved at all ? */ 710 pending = virtqueue_poll(vioch->vqueue, msg->poll_idx); 711 if (!pending) { 712 scmi_vio_channel_release(vioch); 713 return false; 714 } 715 716 spin_lock_irqsave(&vioch->lock, flags); 717 virtqueue_disable_cb(vioch->vqueue); 718 719 /* 720 * Process all new messages till the polled-for message is found OR 721 * the vqueue is empty. 722 */ 723 while ((next_msg = virtqueue_get_buf(vioch->vqueue, &length))) { 724 bool next_msg_done = false; 725 726 /* 727 * Mark any dequeued buffer message as VIO_MSG_POLL_DONE so 728 * that can be properly freed even on timeout in mark_txdone. 729 */ 730 spin_lock(&next_msg->poll_lock); 731 if (next_msg->poll_status == VIO_MSG_POLLING) { 732 next_msg->poll_status = VIO_MSG_POLL_DONE; 733 next_msg_done = true; 734 } 735 spin_unlock(&next_msg->poll_lock); 736 737 next_msg->rx_len = length; 738 /* Is the message we were polling for ? */ 739 if (next_msg == msg) { 740 found = true; 741 break; 742 } else if (next_msg_done) { 743 /* Skip the rest if this was another polled msg */ 744 continue; 745 } 746 747 /* 748 * Enqueue for later processing any non-polled message and any 749 * timed-out polled one that we happen to have dequeued. 750 */ 751 spin_lock(&next_msg->poll_lock); 752 if (next_msg->poll_status == VIO_MSG_NOT_POLLED || 753 next_msg->poll_status == VIO_MSG_POLL_TIMEOUT) { 754 spin_unlock(&next_msg->poll_lock); 755 756 any_prefetched++; 757 spin_lock(&vioch->pending_lock); 758 list_add_tail(&next_msg->list, 759 &vioch->pending_cmds_list); 760 spin_unlock(&vioch->pending_lock); 761 } else { 762 spin_unlock(&next_msg->poll_lock); 763 } 764 } 765 766 /* 767 * When the polling loop has successfully terminated if something 768 * else was queued in the meantime, it will be served by a deferred 769 * worker OR by the normal IRQ/callback OR by other poll loops. 770 * 771 * If we are still looking for the polled reply, the polling index has 772 * to be updated to the current vqueue last used index. 773 */ 774 if (found) { 775 pending = !virtqueue_enable_cb(vioch->vqueue); 776 } else { 777 msg->poll_idx = virtqueue_enable_cb_prepare(vioch->vqueue); 778 pending = virtqueue_poll(vioch->vqueue, msg->poll_idx); 779 } 780 781 if (vioch->deferred_tx_wq && (any_prefetched || pending)) 782 queue_work(vioch->deferred_tx_wq, &vioch->deferred_tx_work); 783 784 spin_unlock_irqrestore(&vioch->lock, flags); 785 786 scmi_vio_channel_release(vioch); 787 788 return found; 789 } 790 791 static const struct scmi_transport_ops scmi_virtio_ops = { 792 .link_supplier = virtio_link_supplier, 793 .chan_available = virtio_chan_available, 794 .chan_setup = virtio_chan_setup, 795 .chan_free = virtio_chan_free, 796 .get_max_msg = virtio_get_max_msg, 797 .send_message = virtio_send_message, 798 .fetch_response = virtio_fetch_response, 799 .fetch_notification = virtio_fetch_notification, 800 .mark_txdone = virtio_mark_txdone, 801 .poll_done = virtio_poll_done, 802 }; 803 804 static int scmi_vio_probe(struct virtio_device *vdev) 805 { 806 struct device *dev = &vdev->dev; 807 struct scmi_vio_channel *channels; 808 bool have_vq_rx; 809 int vq_cnt; 810 int i; 811 int ret; 812 struct virtqueue *vqs[VIRTIO_SCMI_VQ_MAX_CNT]; 813 814 /* Only one SCMI VirtiO device allowed */ 815 if (scmi_vdev) { 816 dev_err(dev, 817 "One SCMI Virtio device was already initialized: only one allowed.\n"); 818 return -EBUSY; 819 } 820 821 have_vq_rx = scmi_vio_have_vq_rx(vdev); 822 vq_cnt = have_vq_rx ? VIRTIO_SCMI_VQ_MAX_CNT : 1; 823 824 channels = devm_kcalloc(dev, vq_cnt, sizeof(*channels), GFP_KERNEL); 825 if (!channels) 826 return -ENOMEM; 827 828 if (have_vq_rx) 829 channels[VIRTIO_SCMI_VQ_RX].is_rx = true; 830 831 ret = virtio_find_vqs(vdev, vq_cnt, vqs, scmi_vio_complete_callbacks, 832 scmi_vio_vqueue_names, NULL); 833 if (ret) { 834 dev_err(dev, "Failed to get %d virtqueue(s)\n", vq_cnt); 835 return ret; 836 } 837 838 for (i = 0; i < vq_cnt; i++) { 839 unsigned int sz; 840 841 spin_lock_init(&channels[i].lock); 842 spin_lock_init(&channels[i].free_lock); 843 INIT_LIST_HEAD(&channels[i].free_list); 844 spin_lock_init(&channels[i].pending_lock); 845 INIT_LIST_HEAD(&channels[i].pending_cmds_list); 846 channels[i].vqueue = vqs[i]; 847 848 sz = virtqueue_get_vring_size(channels[i].vqueue); 849 /* Tx messages need multiple descriptors. */ 850 if (!channels[i].is_rx) 851 sz /= DESCRIPTORS_PER_TX_MSG; 852 853 if (sz > MSG_TOKEN_MAX) { 854 dev_info(dev, 855 "%s virtqueue could hold %d messages. Only %ld allowed to be pending.\n", 856 channels[i].is_rx ? "rx" : "tx", 857 sz, MSG_TOKEN_MAX); 858 sz = MSG_TOKEN_MAX; 859 } 860 channels[i].max_msg = sz; 861 } 862 863 vdev->priv = channels; 864 /* Ensure initialized scmi_vdev is visible */ 865 smp_store_mb(scmi_vdev, vdev); 866 867 return 0; 868 } 869 870 static void scmi_vio_remove(struct virtio_device *vdev) 871 { 872 /* 873 * Once we get here, virtio_chan_free() will have already been called by 874 * the SCMI core for any existing channel and, as a consequence, all the 875 * virtio channels will have been already marked NOT ready, causing any 876 * outstanding message on any vqueue to be ignored by complete_cb: now 877 * we can just stop processing buffers and destroy the vqueues. 878 */ 879 virtio_reset_device(vdev); 880 vdev->config->del_vqs(vdev); 881 /* Ensure scmi_vdev is visible as NULL */ 882 smp_store_mb(scmi_vdev, NULL); 883 } 884 885 static int scmi_vio_validate(struct virtio_device *vdev) 886 { 887 #ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO_VERSION1_COMPLIANCE 888 if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) { 889 dev_err(&vdev->dev, 890 "device does not comply with spec version 1.x\n"); 891 return -EINVAL; 892 } 893 #endif 894 return 0; 895 } 896 897 static unsigned int features[] = { 898 VIRTIO_SCMI_F_P2A_CHANNELS, 899 }; 900 901 static const struct virtio_device_id id_table[] = { 902 { VIRTIO_ID_SCMI, VIRTIO_DEV_ANY_ID }, 903 { 0 } 904 }; 905 906 static struct virtio_driver virtio_scmi_driver = { 907 .driver.name = "scmi-virtio", 908 .driver.owner = THIS_MODULE, 909 .feature_table = features, 910 .feature_table_size = ARRAY_SIZE(features), 911 .id_table = id_table, 912 .probe = scmi_vio_probe, 913 .remove = scmi_vio_remove, 914 .validate = scmi_vio_validate, 915 }; 916 917 static int __init virtio_scmi_init(void) 918 { 919 return register_virtio_driver(&virtio_scmi_driver); 920 } 921 922 static void virtio_scmi_exit(void) 923 { 924 unregister_virtio_driver(&virtio_scmi_driver); 925 } 926 927 const struct scmi_desc scmi_virtio_desc = { 928 .transport_init = virtio_scmi_init, 929 .transport_exit = virtio_scmi_exit, 930 .ops = &scmi_virtio_ops, 931 /* for non-realtime virtio devices */ 932 .max_rx_timeout_ms = VIRTIO_MAX_RX_TIMEOUT_MS, 933 .max_msg = 0, /* overridden by virtio_get_max_msg() */ 934 .max_msg_size = VIRTIO_SCMI_MAX_MSG_SIZE, 935 .atomic_enabled = IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO_ATOMIC_ENABLE), 936 }; 937