1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* Driver for Virtio crypto device. 3 * 4 * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD. 5 */ 6 7 #include <linux/err.h> 8 #include <linux/module.h> 9 #include <linux/virtio_config.h> 10 #include <linux/cpu.h> 11 12 #include <uapi/linux/virtio_crypto.h> 13 #include "virtio_crypto_common.h" 14 15 16 void 17 virtcrypto_clear_request(struct virtio_crypto_request *vc_req) 18 { 19 if (vc_req) { 20 kfree_sensitive(vc_req->req_data); 21 kfree(vc_req->sgs); 22 } 23 } 24 25 static void virtcrypto_dataq_callback(struct virtqueue *vq) 26 { 27 struct virtio_crypto *vcrypto = vq->vdev->priv; 28 struct virtio_crypto_request *vc_req; 29 unsigned long flags; 30 unsigned int len; 31 unsigned int qid = vq->index; 32 33 spin_lock_irqsave(&vcrypto->data_vq[qid].lock, flags); 34 do { 35 virtqueue_disable_cb(vq); 36 while ((vc_req = virtqueue_get_buf(vq, &len)) != NULL) { 37 spin_unlock_irqrestore( 38 &vcrypto->data_vq[qid].lock, flags); 39 if (vc_req->alg_cb) 40 vc_req->alg_cb(vc_req, len); 41 spin_lock_irqsave( 42 &vcrypto->data_vq[qid].lock, flags); 43 } 44 } while (!virtqueue_enable_cb(vq)); 45 spin_unlock_irqrestore(&vcrypto->data_vq[qid].lock, flags); 46 } 47 48 static int virtcrypto_find_vqs(struct virtio_crypto *vi) 49 { 50 vq_callback_t **callbacks; 51 struct virtqueue **vqs; 52 int ret = -ENOMEM; 53 int i, total_vqs; 54 const char **names; 55 struct device *dev = &vi->vdev->dev; 56 57 /* 58 * We expect 1 data virtqueue, followed by 59 * possible N-1 data queues used in multiqueue mode, 60 * followed by control vq. 61 */ 62 total_vqs = vi->max_data_queues + 1; 63 64 /* Allocate space for find_vqs parameters */ 65 vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL); 66 if (!vqs) 67 goto err_vq; 68 callbacks = kcalloc(total_vqs, sizeof(*callbacks), GFP_KERNEL); 69 if (!callbacks) 70 goto err_callback; 71 names = kcalloc(total_vqs, sizeof(*names), GFP_KERNEL); 72 if (!names) 73 goto err_names; 74 75 /* Parameters for control virtqueue */ 76 callbacks[total_vqs - 1] = NULL; 77 names[total_vqs - 1] = "controlq"; 78 79 /* Allocate/initialize parameters for data virtqueues */ 80 for (i = 0; i < vi->max_data_queues; i++) { 81 callbacks[i] = virtcrypto_dataq_callback; 82 snprintf(vi->data_vq[i].name, sizeof(vi->data_vq[i].name), 83 "dataq.%d", i); 84 names[i] = vi->data_vq[i].name; 85 } 86 87 ret = virtio_find_vqs(vi->vdev, total_vqs, vqs, callbacks, names, NULL); 88 if (ret) 89 goto err_find; 90 91 vi->ctrl_vq = vqs[total_vqs - 1]; 92 93 for (i = 0; i < vi->max_data_queues; i++) { 94 spin_lock_init(&vi->data_vq[i].lock); 95 vi->data_vq[i].vq = vqs[i]; 96 /* Initialize crypto engine */ 97 vi->data_vq[i].engine = crypto_engine_alloc_init(dev, 1); 98 if (!vi->data_vq[i].engine) { 99 ret = -ENOMEM; 100 goto err_engine; 101 } 102 } 103 104 kfree(names); 105 kfree(callbacks); 106 kfree(vqs); 107 108 return 0; 109 110 err_engine: 111 err_find: 112 kfree(names); 113 err_names: 114 kfree(callbacks); 115 err_callback: 116 kfree(vqs); 117 err_vq: 118 return ret; 119 } 120 121 static int virtcrypto_alloc_queues(struct virtio_crypto *vi) 122 { 123 vi->data_vq = kcalloc(vi->max_data_queues, sizeof(*vi->data_vq), 124 GFP_KERNEL); 125 if (!vi->data_vq) 126 return -ENOMEM; 127 128 return 0; 129 } 130 131 static void virtcrypto_clean_affinity(struct virtio_crypto *vi, long hcpu) 132 { 133 int i; 134 135 if (vi->affinity_hint_set) { 136 for (i = 0; i < vi->max_data_queues; i++) 137 virtqueue_set_affinity(vi->data_vq[i].vq, NULL); 138 139 vi->affinity_hint_set = false; 140 } 141 } 142 143 static void virtcrypto_set_affinity(struct virtio_crypto *vcrypto) 144 { 145 int i = 0; 146 int cpu; 147 148 /* 149 * In single queue mode, we don't set the cpu affinity. 150 */ 151 if (vcrypto->curr_queue == 1 || vcrypto->max_data_queues == 1) { 152 virtcrypto_clean_affinity(vcrypto, -1); 153 return; 154 } 155 156 /* 157 * In multiqueue mode, we let the queue to be private to one cpu 158 * by setting the affinity hint to eliminate the contention. 159 * 160 * TODO: adds cpu hotplug support by register cpu notifier. 161 * 162 */ 163 for_each_online_cpu(cpu) { 164 virtqueue_set_affinity(vcrypto->data_vq[i].vq, cpumask_of(cpu)); 165 if (++i >= vcrypto->max_data_queues) 166 break; 167 } 168 169 vcrypto->affinity_hint_set = true; 170 } 171 172 static void virtcrypto_free_queues(struct virtio_crypto *vi) 173 { 174 kfree(vi->data_vq); 175 } 176 177 static int virtcrypto_init_vqs(struct virtio_crypto *vi) 178 { 179 int ret; 180 181 /* Allocate send & receive queues */ 182 ret = virtcrypto_alloc_queues(vi); 183 if (ret) 184 goto err; 185 186 ret = virtcrypto_find_vqs(vi); 187 if (ret) 188 goto err_free; 189 190 cpus_read_lock(); 191 virtcrypto_set_affinity(vi); 192 cpus_read_unlock(); 193 194 return 0; 195 196 err_free: 197 virtcrypto_free_queues(vi); 198 err: 199 return ret; 200 } 201 202 static int virtcrypto_update_status(struct virtio_crypto *vcrypto) 203 { 204 u32 status; 205 int err; 206 207 virtio_cread_le(vcrypto->vdev, 208 struct virtio_crypto_config, status, &status); 209 210 /* 211 * Unknown status bits would be a host error and the driver 212 * should consider the device to be broken. 213 */ 214 if (status & (~VIRTIO_CRYPTO_S_HW_READY)) { 215 dev_warn(&vcrypto->vdev->dev, 216 "Unknown status bits: 0x%x\n", status); 217 218 virtio_break_device(vcrypto->vdev); 219 return -EPERM; 220 } 221 222 if (vcrypto->status == status) 223 return 0; 224 225 vcrypto->status = status; 226 227 if (vcrypto->status & VIRTIO_CRYPTO_S_HW_READY) { 228 err = virtcrypto_dev_start(vcrypto); 229 if (err) { 230 dev_err(&vcrypto->vdev->dev, 231 "Failed to start virtio crypto device.\n"); 232 233 return -EPERM; 234 } 235 dev_info(&vcrypto->vdev->dev, "Accelerator device is ready\n"); 236 } else { 237 virtcrypto_dev_stop(vcrypto); 238 dev_info(&vcrypto->vdev->dev, "Accelerator is not ready\n"); 239 } 240 241 return 0; 242 } 243 244 static int virtcrypto_start_crypto_engines(struct virtio_crypto *vcrypto) 245 { 246 int32_t i; 247 int ret; 248 249 for (i = 0; i < vcrypto->max_data_queues; i++) { 250 if (vcrypto->data_vq[i].engine) { 251 ret = crypto_engine_start(vcrypto->data_vq[i].engine); 252 if (ret) 253 goto err; 254 } 255 } 256 257 return 0; 258 259 err: 260 while (--i >= 0) 261 if (vcrypto->data_vq[i].engine) 262 crypto_engine_exit(vcrypto->data_vq[i].engine); 263 264 return ret; 265 } 266 267 static void virtcrypto_clear_crypto_engines(struct virtio_crypto *vcrypto) 268 { 269 u32 i; 270 271 for (i = 0; i < vcrypto->max_data_queues; i++) 272 if (vcrypto->data_vq[i].engine) 273 crypto_engine_exit(vcrypto->data_vq[i].engine); 274 } 275 276 static void virtcrypto_del_vqs(struct virtio_crypto *vcrypto) 277 { 278 struct virtio_device *vdev = vcrypto->vdev; 279 280 virtcrypto_clean_affinity(vcrypto, -1); 281 282 vdev->config->del_vqs(vdev); 283 284 virtcrypto_free_queues(vcrypto); 285 } 286 287 static int virtcrypto_probe(struct virtio_device *vdev) 288 { 289 int err = -EFAULT; 290 struct virtio_crypto *vcrypto; 291 u32 max_data_queues = 0, max_cipher_key_len = 0; 292 u32 max_auth_key_len = 0; 293 u64 max_size = 0; 294 u32 cipher_algo_l = 0; 295 u32 cipher_algo_h = 0; 296 u32 hash_algo = 0; 297 u32 mac_algo_l = 0; 298 u32 mac_algo_h = 0; 299 u32 aead_algo = 0; 300 u32 akcipher_algo = 0; 301 u32 crypto_services = 0; 302 303 if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) 304 return -ENODEV; 305 306 if (!vdev->config->get) { 307 dev_err(&vdev->dev, "%s failure: config access disabled\n", 308 __func__); 309 return -EINVAL; 310 } 311 312 if (num_possible_nodes() > 1 && dev_to_node(&vdev->dev) < 0) { 313 /* 314 * If the accelerator is connected to a node with no memory 315 * there is no point in using the accelerator since the remote 316 * memory transaction will be very slow. 317 */ 318 dev_err(&vdev->dev, "Invalid NUMA configuration.\n"); 319 return -EINVAL; 320 } 321 322 vcrypto = kzalloc_node(sizeof(*vcrypto), GFP_KERNEL, 323 dev_to_node(&vdev->dev)); 324 if (!vcrypto) 325 return -ENOMEM; 326 327 virtio_cread_le(vdev, struct virtio_crypto_config, 328 max_dataqueues, &max_data_queues); 329 if (max_data_queues < 1) 330 max_data_queues = 1; 331 332 virtio_cread_le(vdev, struct virtio_crypto_config, 333 max_cipher_key_len, &max_cipher_key_len); 334 virtio_cread_le(vdev, struct virtio_crypto_config, 335 max_auth_key_len, &max_auth_key_len); 336 virtio_cread_le(vdev, struct virtio_crypto_config, 337 max_size, &max_size); 338 virtio_cread_le(vdev, struct virtio_crypto_config, 339 crypto_services, &crypto_services); 340 virtio_cread_le(vdev, struct virtio_crypto_config, 341 cipher_algo_l, &cipher_algo_l); 342 virtio_cread_le(vdev, struct virtio_crypto_config, 343 cipher_algo_h, &cipher_algo_h); 344 virtio_cread_le(vdev, struct virtio_crypto_config, 345 hash_algo, &hash_algo); 346 virtio_cread_le(vdev, struct virtio_crypto_config, 347 mac_algo_l, &mac_algo_l); 348 virtio_cread_le(vdev, struct virtio_crypto_config, 349 mac_algo_h, &mac_algo_h); 350 virtio_cread_le(vdev, struct virtio_crypto_config, 351 aead_algo, &aead_algo); 352 if (crypto_services & (1 << VIRTIO_CRYPTO_SERVICE_AKCIPHER)) 353 virtio_cread_le(vdev, struct virtio_crypto_config, 354 akcipher_algo, &akcipher_algo); 355 356 /* Add virtio crypto device to global table */ 357 err = virtcrypto_devmgr_add_dev(vcrypto); 358 if (err) { 359 dev_err(&vdev->dev, "Failed to add new virtio crypto device.\n"); 360 goto free; 361 } 362 vcrypto->owner = THIS_MODULE; 363 vcrypto = vdev->priv = vcrypto; 364 vcrypto->vdev = vdev; 365 366 spin_lock_init(&vcrypto->ctrl_lock); 367 368 /* Use single data queue as default */ 369 vcrypto->curr_queue = 1; 370 vcrypto->max_data_queues = max_data_queues; 371 vcrypto->max_cipher_key_len = max_cipher_key_len; 372 vcrypto->max_auth_key_len = max_auth_key_len; 373 vcrypto->max_size = max_size; 374 vcrypto->crypto_services = crypto_services; 375 vcrypto->cipher_algo_l = cipher_algo_l; 376 vcrypto->cipher_algo_h = cipher_algo_h; 377 vcrypto->mac_algo_l = mac_algo_l; 378 vcrypto->mac_algo_h = mac_algo_h; 379 vcrypto->hash_algo = hash_algo; 380 vcrypto->aead_algo = aead_algo; 381 vcrypto->akcipher_algo = akcipher_algo; 382 383 dev_info(&vdev->dev, 384 "max_queues: %u, max_cipher_key_len: %u, max_auth_key_len: %u, max_size 0x%llx\n", 385 vcrypto->max_data_queues, 386 vcrypto->max_cipher_key_len, 387 vcrypto->max_auth_key_len, 388 vcrypto->max_size); 389 390 err = virtcrypto_init_vqs(vcrypto); 391 if (err) { 392 dev_err(&vdev->dev, "Failed to initialize vqs.\n"); 393 goto free_dev; 394 } 395 396 err = virtcrypto_start_crypto_engines(vcrypto); 397 if (err) 398 goto free_vqs; 399 400 virtio_device_ready(vdev); 401 402 err = virtcrypto_update_status(vcrypto); 403 if (err) 404 goto free_engines; 405 406 return 0; 407 408 free_engines: 409 virtcrypto_clear_crypto_engines(vcrypto); 410 free_vqs: 411 virtio_reset_device(vdev); 412 virtcrypto_del_vqs(vcrypto); 413 free_dev: 414 virtcrypto_devmgr_rm_dev(vcrypto); 415 free: 416 kfree(vcrypto); 417 return err; 418 } 419 420 static void virtcrypto_free_unused_reqs(struct virtio_crypto *vcrypto) 421 { 422 struct virtio_crypto_request *vc_req; 423 int i; 424 struct virtqueue *vq; 425 426 for (i = 0; i < vcrypto->max_data_queues; i++) { 427 vq = vcrypto->data_vq[i].vq; 428 while ((vc_req = virtqueue_detach_unused_buf(vq)) != NULL) { 429 kfree(vc_req->req_data); 430 kfree(vc_req->sgs); 431 } 432 } 433 } 434 435 static void virtcrypto_remove(struct virtio_device *vdev) 436 { 437 struct virtio_crypto *vcrypto = vdev->priv; 438 439 dev_info(&vdev->dev, "Start virtcrypto_remove.\n"); 440 441 if (virtcrypto_dev_started(vcrypto)) 442 virtcrypto_dev_stop(vcrypto); 443 virtio_reset_device(vdev); 444 virtcrypto_free_unused_reqs(vcrypto); 445 virtcrypto_clear_crypto_engines(vcrypto); 446 virtcrypto_del_vqs(vcrypto); 447 virtcrypto_devmgr_rm_dev(vcrypto); 448 kfree(vcrypto); 449 } 450 451 static void virtcrypto_config_changed(struct virtio_device *vdev) 452 { 453 struct virtio_crypto *vcrypto = vdev->priv; 454 455 virtcrypto_update_status(vcrypto); 456 } 457 458 #ifdef CONFIG_PM_SLEEP 459 static int virtcrypto_freeze(struct virtio_device *vdev) 460 { 461 struct virtio_crypto *vcrypto = vdev->priv; 462 463 virtio_reset_device(vdev); 464 virtcrypto_free_unused_reqs(vcrypto); 465 if (virtcrypto_dev_started(vcrypto)) 466 virtcrypto_dev_stop(vcrypto); 467 468 virtcrypto_clear_crypto_engines(vcrypto); 469 virtcrypto_del_vqs(vcrypto); 470 return 0; 471 } 472 473 static int virtcrypto_restore(struct virtio_device *vdev) 474 { 475 struct virtio_crypto *vcrypto = vdev->priv; 476 int err; 477 478 err = virtcrypto_init_vqs(vcrypto); 479 if (err) 480 return err; 481 482 err = virtcrypto_start_crypto_engines(vcrypto); 483 if (err) 484 goto free_vqs; 485 486 virtio_device_ready(vdev); 487 488 err = virtcrypto_dev_start(vcrypto); 489 if (err) { 490 dev_err(&vdev->dev, "Failed to start virtio crypto device.\n"); 491 goto free_engines; 492 } 493 494 return 0; 495 496 free_engines: 497 virtcrypto_clear_crypto_engines(vcrypto); 498 free_vqs: 499 virtio_reset_device(vdev); 500 virtcrypto_del_vqs(vcrypto); 501 return err; 502 } 503 #endif 504 505 static const unsigned int features[] = { 506 /* none */ 507 }; 508 509 static const struct virtio_device_id id_table[] = { 510 { VIRTIO_ID_CRYPTO, VIRTIO_DEV_ANY_ID }, 511 { 0 }, 512 }; 513 514 static struct virtio_driver virtio_crypto_driver = { 515 .driver.name = KBUILD_MODNAME, 516 .driver.owner = THIS_MODULE, 517 .feature_table = features, 518 .feature_table_size = ARRAY_SIZE(features), 519 .id_table = id_table, 520 .probe = virtcrypto_probe, 521 .remove = virtcrypto_remove, 522 .config_changed = virtcrypto_config_changed, 523 #ifdef CONFIG_PM_SLEEP 524 .freeze = virtcrypto_freeze, 525 .restore = virtcrypto_restore, 526 #endif 527 }; 528 529 module_virtio_driver(virtio_crypto_driver); 530 531 MODULE_DEVICE_TABLE(virtio, id_table); 532 MODULE_DESCRIPTION("virtio crypto device driver"); 533 MODULE_LICENSE("GPL"); 534 MODULE_AUTHOR("Gonglei <arei.gonglei@huawei.com>"); 535