1 /* Driver for Virtio crypto device. 2 * 3 * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD. 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, see <http://www.gnu.org/licenses/>. 17 */ 18 19 #include <linux/err.h> 20 #include <linux/module.h> 21 #include <linux/virtio_config.h> 22 #include <linux/cpu.h> 23 24 #include <uapi/linux/virtio_crypto.h> 25 #include "virtio_crypto_common.h" 26 27 28 void 29 virtcrypto_clear_request(struct virtio_crypto_request *vc_req) 30 { 31 if (vc_req) { 32 kzfree(vc_req->req_data); 33 kfree(vc_req->sgs); 34 } 35 } 36 37 static void virtcrypto_dataq_callback(struct virtqueue *vq) 38 { 39 struct virtio_crypto *vcrypto = vq->vdev->priv; 40 struct virtio_crypto_request *vc_req; 41 unsigned long flags; 42 unsigned int len; 43 unsigned int qid = vq->index; 44 45 spin_lock_irqsave(&vcrypto->data_vq[qid].lock, flags); 46 do { 47 virtqueue_disable_cb(vq); 48 while ((vc_req = virtqueue_get_buf(vq, &len)) != NULL) { 49 spin_unlock_irqrestore( 50 &vcrypto->data_vq[qid].lock, flags); 51 if (vc_req->alg_cb) 52 vc_req->alg_cb(vc_req, len); 53 spin_lock_irqsave( 54 &vcrypto->data_vq[qid].lock, flags); 55 } 56 } while (!virtqueue_enable_cb(vq)); 57 spin_unlock_irqrestore(&vcrypto->data_vq[qid].lock, flags); 58 } 59 60 static int virtcrypto_find_vqs(struct virtio_crypto *vi) 61 { 62 vq_callback_t **callbacks; 63 struct virtqueue **vqs; 64 int ret = -ENOMEM; 65 int i, total_vqs; 66 const char **names; 67 struct device *dev = &vi->vdev->dev; 68 69 /* 70 * We expect 1 data virtqueue, followed by 71 * possible N-1 data queues used in multiqueue mode, 72 * followed by control vq. 73 */ 74 total_vqs = vi->max_data_queues + 1; 75 76 /* Allocate space for find_vqs parameters */ 77 vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL); 78 if (!vqs) 79 goto err_vq; 80 callbacks = kcalloc(total_vqs, sizeof(*callbacks), GFP_KERNEL); 81 if (!callbacks) 82 goto err_callback; 83 names = kcalloc(total_vqs, sizeof(*names), GFP_KERNEL); 84 if (!names) 85 goto err_names; 86 87 /* Parameters for control virtqueue */ 88 callbacks[total_vqs - 1] = NULL; 89 names[total_vqs - 1] = "controlq"; 90 91 /* Allocate/initialize parameters for data virtqueues */ 92 for (i = 0; i < vi->max_data_queues; i++) { 93 callbacks[i] = virtcrypto_dataq_callback; 94 snprintf(vi->data_vq[i].name, sizeof(vi->data_vq[i].name), 95 "dataq.%d", i); 96 names[i] = vi->data_vq[i].name; 97 } 98 99 ret = virtio_find_vqs(vi->vdev, total_vqs, vqs, callbacks, names, NULL); 100 if (ret) 101 goto err_find; 102 103 vi->ctrl_vq = vqs[total_vqs - 1]; 104 105 for (i = 0; i < vi->max_data_queues; i++) { 106 spin_lock_init(&vi->data_vq[i].lock); 107 vi->data_vq[i].vq = vqs[i]; 108 /* Initialize crypto engine */ 109 vi->data_vq[i].engine = crypto_engine_alloc_init(dev, 1); 110 if (!vi->data_vq[i].engine) { 111 ret = -ENOMEM; 112 goto err_engine; 113 } 114 } 115 116 kfree(names); 117 kfree(callbacks); 118 kfree(vqs); 119 120 return 0; 121 122 err_engine: 123 err_find: 124 kfree(names); 125 err_names: 126 kfree(callbacks); 127 err_callback: 128 kfree(vqs); 129 err_vq: 130 return ret; 131 } 132 133 static int virtcrypto_alloc_queues(struct virtio_crypto *vi) 134 { 135 vi->data_vq = kcalloc(vi->max_data_queues, sizeof(*vi->data_vq), 136 GFP_KERNEL); 137 if (!vi->data_vq) 138 return -ENOMEM; 139 140 return 0; 141 } 142 143 static void virtcrypto_clean_affinity(struct virtio_crypto *vi, long hcpu) 144 { 145 int i; 146 147 if (vi->affinity_hint_set) { 148 for (i = 0; i < vi->max_data_queues; i++) 149 virtqueue_set_affinity(vi->data_vq[i].vq, NULL); 150 151 vi->affinity_hint_set = false; 152 } 153 } 154 155 static void virtcrypto_set_affinity(struct virtio_crypto *vcrypto) 156 { 157 int i = 0; 158 int cpu; 159 160 /* 161 * In single queue mode, we don't set the cpu affinity. 162 */ 163 if (vcrypto->curr_queue == 1 || vcrypto->max_data_queues == 1) { 164 virtcrypto_clean_affinity(vcrypto, -1); 165 return; 166 } 167 168 /* 169 * In multiqueue mode, we let the queue to be private to one cpu 170 * by setting the affinity hint to eliminate the contention. 171 * 172 * TODO: adds cpu hotplug support by register cpu notifier. 173 * 174 */ 175 for_each_online_cpu(cpu) { 176 virtqueue_set_affinity(vcrypto->data_vq[i].vq, cpumask_of(cpu)); 177 if (++i >= vcrypto->max_data_queues) 178 break; 179 } 180 181 vcrypto->affinity_hint_set = true; 182 } 183 184 static void virtcrypto_free_queues(struct virtio_crypto *vi) 185 { 186 kfree(vi->data_vq); 187 } 188 189 static int virtcrypto_init_vqs(struct virtio_crypto *vi) 190 { 191 int ret; 192 193 /* Allocate send & receive queues */ 194 ret = virtcrypto_alloc_queues(vi); 195 if (ret) 196 goto err; 197 198 ret = virtcrypto_find_vqs(vi); 199 if (ret) 200 goto err_free; 201 202 get_online_cpus(); 203 virtcrypto_set_affinity(vi); 204 put_online_cpus(); 205 206 return 0; 207 208 err_free: 209 virtcrypto_free_queues(vi); 210 err: 211 return ret; 212 } 213 214 static int virtcrypto_update_status(struct virtio_crypto *vcrypto) 215 { 216 u32 status; 217 int err; 218 219 virtio_cread(vcrypto->vdev, 220 struct virtio_crypto_config, status, &status); 221 222 /* 223 * Unknown status bits would be a host error and the driver 224 * should consider the device to be broken. 225 */ 226 if (status & (~VIRTIO_CRYPTO_S_HW_READY)) { 227 dev_warn(&vcrypto->vdev->dev, 228 "Unknown status bits: 0x%x\n", status); 229 230 virtio_break_device(vcrypto->vdev); 231 return -EPERM; 232 } 233 234 if (vcrypto->status == status) 235 return 0; 236 237 vcrypto->status = status; 238 239 if (vcrypto->status & VIRTIO_CRYPTO_S_HW_READY) { 240 err = virtcrypto_dev_start(vcrypto); 241 if (err) { 242 dev_err(&vcrypto->vdev->dev, 243 "Failed to start virtio crypto device.\n"); 244 245 return -EPERM; 246 } 247 dev_info(&vcrypto->vdev->dev, "Accelerator device is ready\n"); 248 } else { 249 virtcrypto_dev_stop(vcrypto); 250 dev_info(&vcrypto->vdev->dev, "Accelerator is not ready\n"); 251 } 252 253 return 0; 254 } 255 256 static int virtcrypto_start_crypto_engines(struct virtio_crypto *vcrypto) 257 { 258 int32_t i; 259 int ret; 260 261 for (i = 0; i < vcrypto->max_data_queues; i++) { 262 if (vcrypto->data_vq[i].engine) { 263 ret = crypto_engine_start(vcrypto->data_vq[i].engine); 264 if (ret) 265 goto err; 266 } 267 } 268 269 return 0; 270 271 err: 272 while (--i >= 0) 273 if (vcrypto->data_vq[i].engine) 274 crypto_engine_exit(vcrypto->data_vq[i].engine); 275 276 return ret; 277 } 278 279 static void virtcrypto_clear_crypto_engines(struct virtio_crypto *vcrypto) 280 { 281 u32 i; 282 283 for (i = 0; i < vcrypto->max_data_queues; i++) 284 if (vcrypto->data_vq[i].engine) 285 crypto_engine_exit(vcrypto->data_vq[i].engine); 286 } 287 288 static void virtcrypto_del_vqs(struct virtio_crypto *vcrypto) 289 { 290 struct virtio_device *vdev = vcrypto->vdev; 291 292 virtcrypto_clean_affinity(vcrypto, -1); 293 294 vdev->config->del_vqs(vdev); 295 296 virtcrypto_free_queues(vcrypto); 297 } 298 299 static int virtcrypto_probe(struct virtio_device *vdev) 300 { 301 int err = -EFAULT; 302 struct virtio_crypto *vcrypto; 303 u32 max_data_queues = 0, max_cipher_key_len = 0; 304 u32 max_auth_key_len = 0; 305 u64 max_size = 0; 306 u32 cipher_algo_l = 0; 307 u32 cipher_algo_h = 0; 308 u32 hash_algo = 0; 309 u32 mac_algo_l = 0; 310 u32 mac_algo_h = 0; 311 u32 aead_algo = 0; 312 u32 crypto_services = 0; 313 314 if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) 315 return -ENODEV; 316 317 if (!vdev->config->get) { 318 dev_err(&vdev->dev, "%s failure: config access disabled\n", 319 __func__); 320 return -EINVAL; 321 } 322 323 if (num_possible_nodes() > 1 && dev_to_node(&vdev->dev) < 0) { 324 /* 325 * If the accelerator is connected to a node with no memory 326 * there is no point in using the accelerator since the remote 327 * memory transaction will be very slow. 328 */ 329 dev_err(&vdev->dev, "Invalid NUMA configuration.\n"); 330 return -EINVAL; 331 } 332 333 vcrypto = kzalloc_node(sizeof(*vcrypto), GFP_KERNEL, 334 dev_to_node(&vdev->dev)); 335 if (!vcrypto) 336 return -ENOMEM; 337 338 virtio_cread(vdev, struct virtio_crypto_config, 339 max_dataqueues, &max_data_queues); 340 if (max_data_queues < 1) 341 max_data_queues = 1; 342 343 virtio_cread(vdev, struct virtio_crypto_config, 344 max_cipher_key_len, &max_cipher_key_len); 345 virtio_cread(vdev, struct virtio_crypto_config, 346 max_auth_key_len, &max_auth_key_len); 347 virtio_cread(vdev, struct virtio_crypto_config, 348 max_size, &max_size); 349 virtio_cread(vdev, struct virtio_crypto_config, 350 crypto_services, &crypto_services); 351 virtio_cread(vdev, struct virtio_crypto_config, 352 cipher_algo_l, &cipher_algo_l); 353 virtio_cread(vdev, struct virtio_crypto_config, 354 cipher_algo_h, &cipher_algo_h); 355 virtio_cread(vdev, struct virtio_crypto_config, 356 hash_algo, &hash_algo); 357 virtio_cread(vdev, struct virtio_crypto_config, 358 mac_algo_l, &mac_algo_l); 359 virtio_cread(vdev, struct virtio_crypto_config, 360 mac_algo_h, &mac_algo_h); 361 virtio_cread(vdev, struct virtio_crypto_config, 362 aead_algo, &aead_algo); 363 364 /* Add virtio crypto device to global table */ 365 err = virtcrypto_devmgr_add_dev(vcrypto); 366 if (err) { 367 dev_err(&vdev->dev, "Failed to add new virtio crypto device.\n"); 368 goto free; 369 } 370 vcrypto->owner = THIS_MODULE; 371 vcrypto = vdev->priv = vcrypto; 372 vcrypto->vdev = vdev; 373 374 spin_lock_init(&vcrypto->ctrl_lock); 375 376 /* Use single data queue as default */ 377 vcrypto->curr_queue = 1; 378 vcrypto->max_data_queues = max_data_queues; 379 vcrypto->max_cipher_key_len = max_cipher_key_len; 380 vcrypto->max_auth_key_len = max_auth_key_len; 381 vcrypto->max_size = max_size; 382 vcrypto->crypto_services = crypto_services; 383 vcrypto->cipher_algo_l = cipher_algo_l; 384 vcrypto->cipher_algo_h = cipher_algo_h; 385 vcrypto->mac_algo_l = mac_algo_l; 386 vcrypto->mac_algo_h = mac_algo_h; 387 vcrypto->hash_algo = hash_algo; 388 vcrypto->aead_algo = aead_algo; 389 390 391 dev_info(&vdev->dev, 392 "max_queues: %u, max_cipher_key_len: %u, max_auth_key_len: %u, max_size 0x%llx\n", 393 vcrypto->max_data_queues, 394 vcrypto->max_cipher_key_len, 395 vcrypto->max_auth_key_len, 396 vcrypto->max_size); 397 398 err = virtcrypto_init_vqs(vcrypto); 399 if (err) { 400 dev_err(&vdev->dev, "Failed to initialize vqs.\n"); 401 goto free_dev; 402 } 403 404 err = virtcrypto_start_crypto_engines(vcrypto); 405 if (err) 406 goto free_vqs; 407 408 virtio_device_ready(vdev); 409 410 err = virtcrypto_update_status(vcrypto); 411 if (err) 412 goto free_engines; 413 414 return 0; 415 416 free_engines: 417 virtcrypto_clear_crypto_engines(vcrypto); 418 free_vqs: 419 vcrypto->vdev->config->reset(vdev); 420 virtcrypto_del_vqs(vcrypto); 421 free_dev: 422 virtcrypto_devmgr_rm_dev(vcrypto); 423 free: 424 kfree(vcrypto); 425 return err; 426 } 427 428 static void virtcrypto_free_unused_reqs(struct virtio_crypto *vcrypto) 429 { 430 struct virtio_crypto_request *vc_req; 431 int i; 432 struct virtqueue *vq; 433 434 for (i = 0; i < vcrypto->max_data_queues; i++) { 435 vq = vcrypto->data_vq[i].vq; 436 while ((vc_req = virtqueue_detach_unused_buf(vq)) != NULL) { 437 kfree(vc_req->req_data); 438 kfree(vc_req->sgs); 439 } 440 } 441 } 442 443 static void virtcrypto_remove(struct virtio_device *vdev) 444 { 445 struct virtio_crypto *vcrypto = vdev->priv; 446 447 dev_info(&vdev->dev, "Start virtcrypto_remove.\n"); 448 449 if (virtcrypto_dev_started(vcrypto)) 450 virtcrypto_dev_stop(vcrypto); 451 vdev->config->reset(vdev); 452 virtcrypto_free_unused_reqs(vcrypto); 453 virtcrypto_clear_crypto_engines(vcrypto); 454 virtcrypto_del_vqs(vcrypto); 455 virtcrypto_devmgr_rm_dev(vcrypto); 456 kfree(vcrypto); 457 } 458 459 static void virtcrypto_config_changed(struct virtio_device *vdev) 460 { 461 struct virtio_crypto *vcrypto = vdev->priv; 462 463 virtcrypto_update_status(vcrypto); 464 } 465 466 #ifdef CONFIG_PM_SLEEP 467 static int virtcrypto_freeze(struct virtio_device *vdev) 468 { 469 struct virtio_crypto *vcrypto = vdev->priv; 470 471 vdev->config->reset(vdev); 472 virtcrypto_free_unused_reqs(vcrypto); 473 if (virtcrypto_dev_started(vcrypto)) 474 virtcrypto_dev_stop(vcrypto); 475 476 virtcrypto_clear_crypto_engines(vcrypto); 477 virtcrypto_del_vqs(vcrypto); 478 return 0; 479 } 480 481 static int virtcrypto_restore(struct virtio_device *vdev) 482 { 483 struct virtio_crypto *vcrypto = vdev->priv; 484 int err; 485 486 err = virtcrypto_init_vqs(vcrypto); 487 if (err) 488 return err; 489 490 err = virtcrypto_start_crypto_engines(vcrypto); 491 if (err) 492 goto free_vqs; 493 494 virtio_device_ready(vdev); 495 496 err = virtcrypto_dev_start(vcrypto); 497 if (err) { 498 dev_err(&vdev->dev, "Failed to start virtio crypto device.\n"); 499 goto free_engines; 500 } 501 502 return 0; 503 504 free_engines: 505 virtcrypto_clear_crypto_engines(vcrypto); 506 free_vqs: 507 vcrypto->vdev->config->reset(vdev); 508 virtcrypto_del_vqs(vcrypto); 509 return err; 510 } 511 #endif 512 513 static unsigned int features[] = { 514 /* none */ 515 }; 516 517 static struct virtio_device_id id_table[] = { 518 { VIRTIO_ID_CRYPTO, VIRTIO_DEV_ANY_ID }, 519 { 0 }, 520 }; 521 522 static struct virtio_driver virtio_crypto_driver = { 523 .driver.name = KBUILD_MODNAME, 524 .driver.owner = THIS_MODULE, 525 .feature_table = features, 526 .feature_table_size = ARRAY_SIZE(features), 527 .id_table = id_table, 528 .probe = virtcrypto_probe, 529 .remove = virtcrypto_remove, 530 .config_changed = virtcrypto_config_changed, 531 #ifdef CONFIG_PM_SLEEP 532 .freeze = virtcrypto_freeze, 533 .restore = virtcrypto_restore, 534 #endif 535 }; 536 537 module_virtio_driver(virtio_crypto_driver); 538 539 MODULE_DEVICE_TABLE(virtio, id_table); 540 MODULE_DESCRIPTION("virtio crypto device driver"); 541 MODULE_LICENSE("GPL"); 542 MODULE_AUTHOR("Gonglei <arei.gonglei@huawei.com>"); 543