Lines Matching +full:p2a +full:- +full:control

1 // SPDX-License-Identifier: GPL-2.0
3 * Virtio Transport driver for Arm System Control and Management Interface
6 * Copyright (C) 2020-2022 OpenSynergy.
7 * Copyright (C) 2021-2022 ARM Ltd.
13 * The scmi-virtio transport implements a driver for the virtio SCMI device.
16 * channel (virtio eventq, P2A channel). Each channel is implemented through a
39 * struct scmi_vio_channel - Transport channel information
48 * @pending_cmds_list: List of pre-fetched commands queueud for later processing
86 * struct scmi_vio_msg - Transport PDU information
119 spin_lock_irqsave(&vioch->lock, flags); in scmi_vio_channel_ready()
120 cinfo->transport_info = vioch; in scmi_vio_channel_ready()
122 vioch->cinfo = cinfo; in scmi_vio_channel_ready()
123 spin_unlock_irqrestore(&vioch->lock, flags); in scmi_vio_channel_ready()
125 refcount_set(&vioch->users, 1); in scmi_vio_channel_ready()
130 return refcount_inc_not_zero(&vioch->users); in scmi_vio_channel_acquire()
135 if (refcount_dec_and_test(&vioch->users)) { in scmi_vio_channel_release()
138 spin_lock_irqsave(&vioch->lock, flags); in scmi_vio_channel_release()
139 if (vioch->shutdown_done) { in scmi_vio_channel_release()
140 vioch->cinfo = NULL; in scmi_vio_channel_release()
141 complete(vioch->shutdown_done); in scmi_vio_channel_release()
143 spin_unlock_irqrestore(&vioch->lock, flags); in scmi_vio_channel_release()
156 spin_lock_irqsave(&vioch->lock, flags); in scmi_vio_channel_cleanup_sync()
157 if (!vioch->cinfo || vioch->shutdown_done) { in scmi_vio_channel_cleanup_sync()
158 spin_unlock_irqrestore(&vioch->lock, flags); in scmi_vio_channel_cleanup_sync()
162 vioch->shutdown_done = &vioch_shutdown_done; in scmi_vio_channel_cleanup_sync()
163 if (!vioch->is_rx && vioch->deferred_tx_wq) in scmi_vio_channel_cleanup_sync()
165 vioch->deferred_tx_wq = NULL; in scmi_vio_channel_cleanup_sync()
166 spin_unlock_irqrestore(&vioch->lock, flags); in scmi_vio_channel_cleanup_sync()
171 wait_for_completion(vioch->shutdown_done); in scmi_vio_channel_cleanup_sync()
181 spin_lock_irqsave(&vioch->free_lock, flags); in scmi_virtio_get_free_msg()
182 if (list_empty(&vioch->free_list)) { in scmi_virtio_get_free_msg()
183 spin_unlock_irqrestore(&vioch->free_lock, flags); in scmi_virtio_get_free_msg()
187 msg = list_first_entry(&vioch->free_list, typeof(*msg), list); in scmi_virtio_get_free_msg()
188 list_del_init(&msg->list); in scmi_virtio_get_free_msg()
189 spin_unlock_irqrestore(&vioch->free_lock, flags); in scmi_virtio_get_free_msg()
192 msg->poll_status = VIO_MSG_NOT_POLLED; in scmi_virtio_get_free_msg()
193 refcount_set(&msg->users, 1); in scmi_virtio_get_free_msg()
200 return refcount_inc_not_zero(&msg->users); in scmi_vio_msg_acquire()
209 ret = refcount_dec_and_test(&msg->users); in scmi_vio_msg_release()
213 spin_lock_irqsave(&vioch->free_lock, flags); in scmi_vio_msg_release()
214 list_add_tail(&msg->list, &vioch->free_list); in scmi_vio_msg_release()
215 spin_unlock_irqrestore(&vioch->free_lock, flags); in scmi_vio_msg_release()
232 struct device *dev = &vioch->vqueue->vdev->dev; in scmi_vio_feed_vq_rx()
234 sg_init_one(&sg_in, msg->input, VIRTIO_SCMI_MAX_PDU_SIZE); in scmi_vio_feed_vq_rx()
236 spin_lock_irqsave(&vioch->lock, flags); in scmi_vio_feed_vq_rx()
238 rc = virtqueue_add_inbuf(vioch->vqueue, &sg_in, 1, msg, GFP_ATOMIC); in scmi_vio_feed_vq_rx()
242 virtqueue_kick(vioch->vqueue); in scmi_vio_feed_vq_rx()
244 spin_unlock_irqrestore(&vioch->lock, flags); in scmi_vio_feed_vq_rx()
251 * vioch->lock MUST NOT have been already acquired.
256 if (vioch->is_rx) in scmi_finalize_message()
270 if (WARN_ON_ONCE(!vqueue->vdev->priv)) in scmi_vio_complete_cb()
272 vioch = &((struct scmi_vio_channel *)vqueue->vdev->priv)[vqueue->index]; in scmi_vio_complete_cb()
278 spin_lock_irqsave(&vioch->lock, flags); in scmi_vio_complete_cb()
287 spin_unlock_irqrestore(&vioch->lock, flags); in scmi_vio_complete_cb()
293 spin_unlock_irqrestore(&vioch->lock, flags); in scmi_vio_complete_cb()
296 msg->rx_len = length; in scmi_vio_complete_cb()
297 scmi_rx_callback(vioch->cinfo, in scmi_vio_complete_cb()
298 msg_read_header(msg->input), msg); in scmi_vio_complete_cb()
326 * Process pre-fetched messages: these could be non-polled messages or in scmi_vio_deferred_tx_worker()
327 * late timed-out replies to polled messages dequeued by chance while in scmi_vio_deferred_tx_worker()
329 * the valid non-expired messages and anyway finally free all of them. in scmi_vio_deferred_tx_worker()
331 spin_lock_irqsave(&vioch->pending_lock, flags); in scmi_vio_deferred_tx_worker()
333 /* Scan the list of possibly pre-fetched messages during polling. */ in scmi_vio_deferred_tx_worker()
334 list_for_each_entry_safe(msg, tmp, &vioch->pending_cmds_list, list) { in scmi_vio_deferred_tx_worker()
335 list_del(&msg->list); in scmi_vio_deferred_tx_worker()
341 if (msg->poll_status == VIO_MSG_NOT_POLLED) in scmi_vio_deferred_tx_worker()
342 scmi_rx_callback(vioch->cinfo, in scmi_vio_deferred_tx_worker()
343 msg_read_header(msg->input), msg); in scmi_vio_deferred_tx_worker()
349 spin_unlock_irqrestore(&vioch->pending_lock, flags); in scmi_vio_deferred_tx_worker()
352 scmi_vio_complete_cb(vioch->vqueue); in scmi_vio_deferred_tx_worker()
366 struct scmi_vio_channel *vioch = base_cinfo->transport_info; in virtio_get_max_msg()
368 return vioch->max_msg; in virtio_get_max_msg()
375 "Deferring probe after not finding a bound scmi-virtio device\n"); in virtio_link_supplier()
376 return -EPROBE_DEFER; in virtio_link_supplier()
379 if (!device_link_add(dev, &scmi_vdev->dev, in virtio_link_supplier()
382 return -ECANCELED; in virtio_link_supplier()
395 channels = (struct scmi_vio_channel *)scmi_vdev->priv; in virtio_chan_available()
409 return vioch && !vioch->cinfo; in virtio_chan_available()
425 return -EPROBE_DEFER; in virtio_chan_setup()
427 vioch = &((struct scmi_vio_channel *)scmi_vdev->priv)[index]; in virtio_chan_setup()
430 if (tx && !vioch->deferred_tx_wq) { in virtio_chan_setup()
433 vioch->deferred_tx_wq = in virtio_chan_setup()
434 alloc_workqueue(dev_name(&scmi_vdev->dev), in virtio_chan_setup()
437 if (!vioch->deferred_tx_wq) in virtio_chan_setup()
438 return -ENOMEM; in virtio_chan_setup()
441 vioch->deferred_tx_wq); in virtio_chan_setup()
445 INIT_WORK(&vioch->deferred_tx_work, in virtio_chan_setup()
449 for (i = 0; i < vioch->max_msg; i++) { in virtio_chan_setup()
454 return -ENOMEM; in virtio_chan_setup()
457 msg->request = devm_kzalloc(dev, in virtio_chan_setup()
460 if (!msg->request) in virtio_chan_setup()
461 return -ENOMEM; in virtio_chan_setup()
462 spin_lock_init(&msg->poll_lock); in virtio_chan_setup()
463 refcount_set(&msg->users, 1); in virtio_chan_setup()
466 msg->input = devm_kzalloc(dev, VIRTIO_SCMI_MAX_PDU_SIZE, in virtio_chan_setup()
468 if (!msg->input) in virtio_chan_setup()
469 return -ENOMEM; in virtio_chan_setup()
482 struct scmi_vio_channel *vioch = cinfo->transport_info; in virtio_chan_free()
486 * the channels: doing it later holding vioch->lock creates unsafe in virtio_chan_free()
489 virtio_break_device(vioch->vqueue->vdev); in virtio_chan_free()
498 struct scmi_vio_channel *vioch = cinfo->transport_info; in virtio_send_message()
507 return -EINVAL; in virtio_send_message()
512 return -EBUSY; in virtio_send_message()
515 msg_tx_prepare(msg->request, xfer); in virtio_send_message()
517 sg_init_one(&sg_out, msg->request, msg_command_size(xfer)); in virtio_send_message()
518 sg_init_one(&sg_in, msg->input, msg_response_size(xfer)); in virtio_send_message()
520 spin_lock_irqsave(&vioch->lock, flags); in virtio_send_message()
524 * - retrieve last used index (will be used as polling reference) in virtio_send_message()
525 * - bind the polled message to the xfer via .priv in virtio_send_message()
526 * - grab an additional msg refcount for the poll-path in virtio_send_message()
528 if (xfer->hdr.poll_completion) { in virtio_send_message()
529 msg->poll_idx = virtqueue_enable_cb_prepare(vioch->vqueue); in virtio_send_message()
531 msg->poll_status = VIO_MSG_POLLING; in virtio_send_message()
534 smp_store_mb(xfer->priv, msg); in virtio_send_message()
537 rc = virtqueue_add_sgs(vioch->vqueue, sgs, 1, 1, msg, GFP_ATOMIC); in virtio_send_message()
539 dev_err(vioch->cinfo->dev, in virtio_send_message()
542 virtqueue_kick(vioch->vqueue); in virtio_send_message()
544 spin_unlock_irqrestore(&vioch->lock, flags); in virtio_send_message()
547 /* Ensure order between xfer->priv clear and vq feeding */ in virtio_send_message()
548 smp_store_mb(xfer->priv, NULL); in virtio_send_message()
549 if (xfer->hdr.poll_completion) in virtio_send_message()
562 struct scmi_vio_msg *msg = xfer->priv; in virtio_fetch_response()
565 msg_fetch_response(msg->input, msg->rx_len, xfer); in virtio_fetch_response()
571 struct scmi_vio_msg *msg = xfer->priv; in virtio_fetch_notification()
574 msg_fetch_notification(msg->input, msg->rx_len, max_len, xfer); in virtio_fetch_notification()
578 * virtio_mark_txdone - Mark transmission done
583 * outstanding but timed-out messages by forcibly re-adding them to the
584 * free-list inside the TX code path; we instead let IRQ/RX callbacks, or the
588 * This approach was deemed preferable since those pending timed-out buffers are
598 * IRQ path - the initial scmi_vio_msg_release() takes care of this) and also
599 * any timed-out polled message if that indeed appears to have been at least
604 * Possible late replies to timed-out polled messages will be eventually freed
616 struct scmi_vio_channel *vioch = cinfo->transport_info; in virtio_mark_txdone()
617 struct scmi_vio_msg *msg = xfer->priv; in virtio_mark_txdone()
623 smp_store_mb(xfer->priv, NULL); in virtio_mark_txdone()
626 if (!xfer->hdr.poll_completion || scmi_vio_msg_release(vioch, msg)) { in virtio_mark_txdone()
631 spin_lock_irqsave(&msg->poll_lock, flags); in virtio_mark_txdone()
633 if (ret != -ETIMEDOUT || msg->poll_status == VIO_MSG_POLL_DONE) in virtio_mark_txdone()
635 else if (msg->poll_status == VIO_MSG_POLLING) in virtio_mark_txdone()
636 msg->poll_status = VIO_MSG_POLL_TIMEOUT; in virtio_mark_txdone()
637 spin_unlock_irqrestore(&msg->poll_lock, flags); in virtio_mark_txdone()
643 * virtio_poll_done - Provide polling support for VirtIO transport
655 * we were poll-waiting for: if that is the case such early fetched buffers are
659 * So, basically, once something new is spotted we proceed to de-queue all the
665 * busy-waiting helper.
670 * Note that, since we do NOT have per-message suppress notification mechanism,
685 struct scmi_vio_msg *next_msg, *msg = xfer->priv; in virtio_poll_done()
686 struct scmi_vio_channel *vioch = cinfo->transport_info; in virtio_poll_done()
706 if (msg->poll_status == VIO_MSG_POLL_DONE) in virtio_poll_done()
713 pending = virtqueue_poll(vioch->vqueue, msg->poll_idx); in virtio_poll_done()
719 spin_lock_irqsave(&vioch->lock, flags); in virtio_poll_done()
720 virtqueue_disable_cb(vioch->vqueue); in virtio_poll_done()
723 * Process all new messages till the polled-for message is found OR in virtio_poll_done()
726 while ((next_msg = virtqueue_get_buf(vioch->vqueue, &length))) { in virtio_poll_done()
733 spin_lock(&next_msg->poll_lock); in virtio_poll_done()
734 if (next_msg->poll_status == VIO_MSG_POLLING) { in virtio_poll_done()
735 next_msg->poll_status = VIO_MSG_POLL_DONE; in virtio_poll_done()
738 spin_unlock(&next_msg->poll_lock); in virtio_poll_done()
740 next_msg->rx_len = length; in virtio_poll_done()
751 * Enqueue for later processing any non-polled message and any in virtio_poll_done()
752 * timed-out polled one that we happen to have dequeued. in virtio_poll_done()
754 spin_lock(&next_msg->poll_lock); in virtio_poll_done()
755 if (next_msg->poll_status == VIO_MSG_NOT_POLLED || in virtio_poll_done()
756 next_msg->poll_status == VIO_MSG_POLL_TIMEOUT) { in virtio_poll_done()
757 spin_unlock(&next_msg->poll_lock); in virtio_poll_done()
760 spin_lock(&vioch->pending_lock); in virtio_poll_done()
761 list_add_tail(&next_msg->list, in virtio_poll_done()
762 &vioch->pending_cmds_list); in virtio_poll_done()
763 spin_unlock(&vioch->pending_lock); in virtio_poll_done()
765 spin_unlock(&next_msg->poll_lock); in virtio_poll_done()
778 pending = !virtqueue_enable_cb(vioch->vqueue); in virtio_poll_done()
780 msg->poll_idx = virtqueue_enable_cb_prepare(vioch->vqueue); in virtio_poll_done()
781 pending = virtqueue_poll(vioch->vqueue, msg->poll_idx); in virtio_poll_done()
784 if (vioch->deferred_tx_wq && (any_prefetched || pending)) in virtio_poll_done()
785 queue_work(vioch->deferred_tx_wq, &vioch->deferred_tx_work); in virtio_poll_done()
787 spin_unlock_irqrestore(&vioch->lock, flags); in virtio_poll_done()
809 struct device *dev = &vdev->dev; in scmi_vio_probe()
821 return -EBUSY; in scmi_vio_probe()
829 return -ENOMEM; in scmi_vio_probe()
866 vdev->priv = channels; in scmi_vio_probe()
883 vdev->config->del_vqs(vdev); in scmi_vio_remove()
892 dev_err(&vdev->dev, in scmi_vio_validate()
894 return -EINVAL; in scmi_vio_validate()
910 .driver.name = "scmi-virtio",
934 /* for non-realtime virtio devices */