1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Virtio Transport driver for Arm System Control and Management Interface 4 * (SCMI). 5 * 6 * Copyright (C) 2020-2021 OpenSynergy. 7 * Copyright (C) 2021 ARM Ltd. 8 */ 9 10 /** 11 * DOC: Theory of Operation 12 * 13 * The scmi-virtio transport implements a driver for the virtio SCMI device. 14 * 15 * There is one Tx channel (virtio cmdq, A2P channel) and at most one Rx 16 * channel (virtio eventq, P2A channel). Each channel is implemented through a 17 * virtqueue. Access to each virtqueue is protected by spinlocks. 18 */ 19 20 #include <linux/errno.h> 21 #include <linux/slab.h> 22 #include <linux/virtio.h> 23 #include <linux/virtio_config.h> 24 25 #include <uapi/linux/virtio_ids.h> 26 #include <uapi/linux/virtio_scmi.h> 27 28 #include "common.h" 29 30 #define VIRTIO_SCMI_MAX_MSG_SIZE 128 /* Value may be increased. */ 31 #define VIRTIO_SCMI_MAX_PDU_SIZE \ 32 (VIRTIO_SCMI_MAX_MSG_SIZE + SCMI_MSG_MAX_PROT_OVERHEAD) 33 #define DESCRIPTORS_PER_TX_MSG 2 34 35 /** 36 * struct scmi_vio_channel - Transport channel information 37 * 38 * @vqueue: Associated virtqueue 39 * @cinfo: SCMI Tx or Rx channel 40 * @free_list: List of unused scmi_vio_msg, maintained for Tx channels only 41 * @is_rx: Whether channel is an Rx channel 42 * @ready: Whether transport user is ready to hear about channel 43 * @max_msg: Maximum number of pending messages for this channel. 44 * @lock: Protects access to all members except ready. 45 * @ready_lock: Protects access to ready. If required, it must be taken before 46 * lock. 47 */ 48 struct scmi_vio_channel { 49 struct virtqueue *vqueue; 50 struct scmi_chan_info *cinfo; 51 struct list_head free_list; 52 bool is_rx; 53 bool ready; 54 unsigned int max_msg; 55 /* lock to protect access to all members except ready. */ 56 spinlock_t lock; 57 /* lock to rotects access to ready flag. */ 58 spinlock_t ready_lock; 59 }; 60 61 /** 62 * struct scmi_vio_msg - Transport PDU information 63 * 64 * @request: SDU used for commands 65 * @input: SDU used for (delayed) responses and notifications 66 * @list: List which scmi_vio_msg may be part of 67 * @rx_len: Input SDU size in bytes, once input has been received 68 */ 69 struct scmi_vio_msg { 70 struct scmi_msg_payld *request; 71 struct scmi_msg_payld *input; 72 struct list_head list; 73 unsigned int rx_len; 74 }; 75 76 /* Only one SCMI VirtIO device can possibly exist */ 77 static struct virtio_device *scmi_vdev; 78 79 static bool scmi_vio_have_vq_rx(struct virtio_device *vdev) 80 { 81 return virtio_has_feature(vdev, VIRTIO_SCMI_F_P2A_CHANNELS); 82 } 83 84 static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch, 85 struct scmi_vio_msg *msg) 86 { 87 struct scatterlist sg_in; 88 int rc; 89 unsigned long flags; 90 91 sg_init_one(&sg_in, msg->input, VIRTIO_SCMI_MAX_PDU_SIZE); 92 93 spin_lock_irqsave(&vioch->lock, flags); 94 95 rc = virtqueue_add_inbuf(vioch->vqueue, &sg_in, 1, msg, GFP_ATOMIC); 96 if (rc) 97 dev_err_once(vioch->cinfo->dev, 98 "failed to add to virtqueue (%d)\n", rc); 99 else 100 virtqueue_kick(vioch->vqueue); 101 102 spin_unlock_irqrestore(&vioch->lock, flags); 103 104 return rc; 105 } 106 107 static void scmi_finalize_message(struct scmi_vio_channel *vioch, 108 struct scmi_vio_msg *msg) 109 { 110 if (vioch->is_rx) { 111 scmi_vio_feed_vq_rx(vioch, msg); 112 } else { 113 unsigned long flags; 114 115 spin_lock_irqsave(&vioch->lock, flags); 116 list_add(&msg->list, &vioch->free_list); 117 spin_unlock_irqrestore(&vioch->lock, flags); 118 } 119 } 120 121 static void scmi_vio_complete_cb(struct virtqueue *vqueue) 122 { 123 unsigned long ready_flags; 124 unsigned long flags; 125 unsigned int length; 126 struct scmi_vio_channel *vioch; 127 struct scmi_vio_msg *msg; 128 bool cb_enabled = true; 129 130 if (WARN_ON_ONCE(!vqueue->vdev->priv)) 131 return; 132 vioch = &((struct scmi_vio_channel *)vqueue->vdev->priv)[vqueue->index]; 133 134 for (;;) { 135 spin_lock_irqsave(&vioch->ready_lock, ready_flags); 136 137 if (!vioch->ready) { 138 if (!cb_enabled) 139 (void)virtqueue_enable_cb(vqueue); 140 goto unlock_ready_out; 141 } 142 143 spin_lock_irqsave(&vioch->lock, flags); 144 if (cb_enabled) { 145 virtqueue_disable_cb(vqueue); 146 cb_enabled = false; 147 } 148 msg = virtqueue_get_buf(vqueue, &length); 149 if (!msg) { 150 if (virtqueue_enable_cb(vqueue)) 151 goto unlock_out; 152 cb_enabled = true; 153 } 154 spin_unlock_irqrestore(&vioch->lock, flags); 155 156 if (msg) { 157 msg->rx_len = length; 158 scmi_rx_callback(vioch->cinfo, 159 msg_read_header(msg->input), msg); 160 161 scmi_finalize_message(vioch, msg); 162 } 163 164 spin_unlock_irqrestore(&vioch->ready_lock, ready_flags); 165 } 166 167 unlock_out: 168 spin_unlock_irqrestore(&vioch->lock, flags); 169 unlock_ready_out: 170 spin_unlock_irqrestore(&vioch->ready_lock, ready_flags); 171 } 172 173 static const char *const scmi_vio_vqueue_names[] = { "tx", "rx" }; 174 175 static vq_callback_t *scmi_vio_complete_callbacks[] = { 176 scmi_vio_complete_cb, 177 scmi_vio_complete_cb 178 }; 179 180 static unsigned int virtio_get_max_msg(struct scmi_chan_info *base_cinfo) 181 { 182 struct scmi_vio_channel *vioch = base_cinfo->transport_info; 183 184 return vioch->max_msg; 185 } 186 187 static int virtio_link_supplier(struct device *dev) 188 { 189 if (!scmi_vdev) { 190 dev_notice_once(dev, 191 "Deferring probe after not finding a bound scmi-virtio device\n"); 192 return -EPROBE_DEFER; 193 } 194 195 if (!device_link_add(dev, &scmi_vdev->dev, 196 DL_FLAG_AUTOREMOVE_CONSUMER)) { 197 dev_err(dev, "Adding link to supplier virtio device failed\n"); 198 return -ECANCELED; 199 } 200 201 return 0; 202 } 203 204 static bool virtio_chan_available(struct device *dev, int idx) 205 { 206 struct scmi_vio_channel *channels, *vioch = NULL; 207 208 if (WARN_ON_ONCE(!scmi_vdev)) 209 return false; 210 211 channels = (struct scmi_vio_channel *)scmi_vdev->priv; 212 213 switch (idx) { 214 case VIRTIO_SCMI_VQ_TX: 215 vioch = &channels[VIRTIO_SCMI_VQ_TX]; 216 break; 217 case VIRTIO_SCMI_VQ_RX: 218 if (scmi_vio_have_vq_rx(scmi_vdev)) 219 vioch = &channels[VIRTIO_SCMI_VQ_RX]; 220 break; 221 default: 222 return false; 223 } 224 225 return vioch && !vioch->cinfo; 226 } 227 228 static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, 229 bool tx) 230 { 231 unsigned long flags; 232 struct scmi_vio_channel *vioch; 233 int index = tx ? VIRTIO_SCMI_VQ_TX : VIRTIO_SCMI_VQ_RX; 234 int i; 235 236 if (!scmi_vdev) 237 return -EPROBE_DEFER; 238 239 vioch = &((struct scmi_vio_channel *)scmi_vdev->priv)[index]; 240 241 for (i = 0; i < vioch->max_msg; i++) { 242 struct scmi_vio_msg *msg; 243 244 msg = devm_kzalloc(cinfo->dev, sizeof(*msg), GFP_KERNEL); 245 if (!msg) 246 return -ENOMEM; 247 248 if (tx) { 249 msg->request = devm_kzalloc(cinfo->dev, 250 VIRTIO_SCMI_MAX_PDU_SIZE, 251 GFP_KERNEL); 252 if (!msg->request) 253 return -ENOMEM; 254 } 255 256 msg->input = devm_kzalloc(cinfo->dev, VIRTIO_SCMI_MAX_PDU_SIZE, 257 GFP_KERNEL); 258 if (!msg->input) 259 return -ENOMEM; 260 261 if (tx) { 262 spin_lock_irqsave(&vioch->lock, flags); 263 list_add_tail(&msg->list, &vioch->free_list); 264 spin_unlock_irqrestore(&vioch->lock, flags); 265 } else { 266 scmi_vio_feed_vq_rx(vioch, msg); 267 } 268 } 269 270 spin_lock_irqsave(&vioch->lock, flags); 271 cinfo->transport_info = vioch; 272 /* Indirectly setting channel not available any more */ 273 vioch->cinfo = cinfo; 274 spin_unlock_irqrestore(&vioch->lock, flags); 275 276 spin_lock_irqsave(&vioch->ready_lock, flags); 277 vioch->ready = true; 278 spin_unlock_irqrestore(&vioch->ready_lock, flags); 279 280 return 0; 281 } 282 283 static int virtio_chan_free(int id, void *p, void *data) 284 { 285 unsigned long flags; 286 struct scmi_chan_info *cinfo = p; 287 struct scmi_vio_channel *vioch = cinfo->transport_info; 288 289 spin_lock_irqsave(&vioch->ready_lock, flags); 290 vioch->ready = false; 291 spin_unlock_irqrestore(&vioch->ready_lock, flags); 292 293 scmi_free_channel(cinfo, data, id); 294 295 spin_lock_irqsave(&vioch->lock, flags); 296 vioch->cinfo = NULL; 297 spin_unlock_irqrestore(&vioch->lock, flags); 298 299 return 0; 300 } 301 302 static int virtio_send_message(struct scmi_chan_info *cinfo, 303 struct scmi_xfer *xfer) 304 { 305 struct scmi_vio_channel *vioch = cinfo->transport_info; 306 struct scatterlist sg_out; 307 struct scatterlist sg_in; 308 struct scatterlist *sgs[DESCRIPTORS_PER_TX_MSG] = { &sg_out, &sg_in }; 309 unsigned long flags; 310 int rc; 311 struct scmi_vio_msg *msg; 312 313 spin_lock_irqsave(&vioch->lock, flags); 314 315 if (list_empty(&vioch->free_list)) { 316 spin_unlock_irqrestore(&vioch->lock, flags); 317 return -EBUSY; 318 } 319 320 msg = list_first_entry(&vioch->free_list, typeof(*msg), list); 321 list_del(&msg->list); 322 323 msg_tx_prepare(msg->request, xfer); 324 325 sg_init_one(&sg_out, msg->request, msg_command_size(xfer)); 326 sg_init_one(&sg_in, msg->input, msg_response_size(xfer)); 327 328 rc = virtqueue_add_sgs(vioch->vqueue, sgs, 1, 1, msg, GFP_ATOMIC); 329 if (rc) { 330 list_add(&msg->list, &vioch->free_list); 331 dev_err_once(vioch->cinfo->dev, 332 "%s() failed to add to virtqueue (%d)\n", __func__, 333 rc); 334 } else { 335 virtqueue_kick(vioch->vqueue); 336 } 337 338 spin_unlock_irqrestore(&vioch->lock, flags); 339 340 return rc; 341 } 342 343 static void virtio_fetch_response(struct scmi_chan_info *cinfo, 344 struct scmi_xfer *xfer) 345 { 346 struct scmi_vio_msg *msg = xfer->priv; 347 348 if (msg) { 349 msg_fetch_response(msg->input, msg->rx_len, xfer); 350 xfer->priv = NULL; 351 } 352 } 353 354 static void virtio_fetch_notification(struct scmi_chan_info *cinfo, 355 size_t max_len, struct scmi_xfer *xfer) 356 { 357 struct scmi_vio_msg *msg = xfer->priv; 358 359 if (msg) { 360 msg_fetch_notification(msg->input, msg->rx_len, max_len, xfer); 361 xfer->priv = NULL; 362 } 363 } 364 365 static const struct scmi_transport_ops scmi_virtio_ops = { 366 .link_supplier = virtio_link_supplier, 367 .chan_available = virtio_chan_available, 368 .chan_setup = virtio_chan_setup, 369 .chan_free = virtio_chan_free, 370 .get_max_msg = virtio_get_max_msg, 371 .send_message = virtio_send_message, 372 .fetch_response = virtio_fetch_response, 373 .fetch_notification = virtio_fetch_notification, 374 }; 375 376 static int scmi_vio_probe(struct virtio_device *vdev) 377 { 378 struct device *dev = &vdev->dev; 379 struct scmi_vio_channel *channels; 380 bool have_vq_rx; 381 int vq_cnt; 382 int i; 383 int ret; 384 struct virtqueue *vqs[VIRTIO_SCMI_VQ_MAX_CNT]; 385 386 /* Only one SCMI VirtiO device allowed */ 387 if (scmi_vdev) 388 return -EINVAL; 389 390 have_vq_rx = scmi_vio_have_vq_rx(vdev); 391 vq_cnt = have_vq_rx ? VIRTIO_SCMI_VQ_MAX_CNT : 1; 392 393 channels = devm_kcalloc(dev, vq_cnt, sizeof(*channels), GFP_KERNEL); 394 if (!channels) 395 return -ENOMEM; 396 397 if (have_vq_rx) 398 channels[VIRTIO_SCMI_VQ_RX].is_rx = true; 399 400 ret = virtio_find_vqs(vdev, vq_cnt, vqs, scmi_vio_complete_callbacks, 401 scmi_vio_vqueue_names, NULL); 402 if (ret) { 403 dev_err(dev, "Failed to get %d virtqueue(s)\n", vq_cnt); 404 return ret; 405 } 406 407 for (i = 0; i < vq_cnt; i++) { 408 unsigned int sz; 409 410 spin_lock_init(&channels[i].lock); 411 spin_lock_init(&channels[i].ready_lock); 412 INIT_LIST_HEAD(&channels[i].free_list); 413 channels[i].vqueue = vqs[i]; 414 415 sz = virtqueue_get_vring_size(channels[i].vqueue); 416 /* Tx messages need multiple descriptors. */ 417 if (!channels[i].is_rx) 418 sz /= DESCRIPTORS_PER_TX_MSG; 419 420 if (sz > MSG_TOKEN_MAX) { 421 dev_info_once(dev, 422 "%s virtqueue could hold %d messages. Only %ld allowed to be pending.\n", 423 channels[i].is_rx ? "rx" : "tx", 424 sz, MSG_TOKEN_MAX); 425 sz = MSG_TOKEN_MAX; 426 } 427 channels[i].max_msg = sz; 428 } 429 430 vdev->priv = channels; 431 scmi_vdev = vdev; 432 433 return 0; 434 } 435 436 static void scmi_vio_remove(struct virtio_device *vdev) 437 { 438 vdev->config->reset(vdev); 439 vdev->config->del_vqs(vdev); 440 scmi_vdev = NULL; 441 } 442 443 static int scmi_vio_validate(struct virtio_device *vdev) 444 { 445 if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) { 446 dev_err(&vdev->dev, 447 "device does not comply with spec version 1.x\n"); 448 return -EINVAL; 449 } 450 451 return 0; 452 } 453 454 static unsigned int features[] = { 455 VIRTIO_SCMI_F_P2A_CHANNELS, 456 }; 457 458 static const struct virtio_device_id id_table[] = { 459 { VIRTIO_ID_SCMI, VIRTIO_DEV_ANY_ID }, 460 { 0 } 461 }; 462 463 static struct virtio_driver virtio_scmi_driver = { 464 .driver.name = "scmi-virtio", 465 .driver.owner = THIS_MODULE, 466 .feature_table = features, 467 .feature_table_size = ARRAY_SIZE(features), 468 .id_table = id_table, 469 .probe = scmi_vio_probe, 470 .remove = scmi_vio_remove, 471 .validate = scmi_vio_validate, 472 }; 473 474 static int __init virtio_scmi_init(void) 475 { 476 return register_virtio_driver(&virtio_scmi_driver); 477 } 478 479 static void __exit virtio_scmi_exit(void) 480 { 481 unregister_virtio_driver(&virtio_scmi_driver); 482 } 483 484 const struct scmi_desc scmi_virtio_desc = { 485 .transport_init = virtio_scmi_init, 486 .transport_exit = virtio_scmi_exit, 487 .ops = &scmi_virtio_ops, 488 .max_rx_timeout_ms = 60000, /* for non-realtime virtio devices */ 489 .max_msg = 0, /* overridden by virtio_get_max_msg() */ 490 .max_msg_size = VIRTIO_SCMI_MAX_MSG_SIZE, 491 }; 492