1 /* Driver for Virtio crypto device. 2 * 3 * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD. 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, see <http://www.gnu.org/licenses/>. 17 */ 18 19 #include <linux/err.h> 20 #include <linux/module.h> 21 #include <linux/virtio_config.h> 22 #include <linux/cpu.h> 23 24 #include <uapi/linux/virtio_crypto.h> 25 #include "virtio_crypto_common.h" 26 27 28 void 29 virtcrypto_clear_request(struct virtio_crypto_request *vc_req) 30 { 31 if (vc_req) { 32 kzfree(vc_req->iv); 33 kzfree(vc_req->req_data); 34 kfree(vc_req->sgs); 35 } 36 } 37 38 static void virtcrypto_dataq_callback(struct virtqueue *vq) 39 { 40 struct virtio_crypto *vcrypto = vq->vdev->priv; 41 struct virtio_crypto_request *vc_req; 42 unsigned long flags; 43 unsigned int len; 44 struct ablkcipher_request *ablk_req; 45 int error; 46 unsigned int qid = vq->index; 47 48 spin_lock_irqsave(&vcrypto->data_vq[qid].lock, flags); 49 do { 50 virtqueue_disable_cb(vq); 51 while ((vc_req = virtqueue_get_buf(vq, &len)) != NULL) { 52 if (vc_req->type == VIRTIO_CRYPTO_SYM_OP_CIPHER) { 53 switch (vc_req->status) { 54 case VIRTIO_CRYPTO_OK: 55 error = 0; 56 break; 57 case VIRTIO_CRYPTO_INVSESS: 58 case VIRTIO_CRYPTO_ERR: 59 error = -EINVAL; 60 break; 61 case VIRTIO_CRYPTO_BADMSG: 62 error = -EBADMSG; 63 break; 64 default: 65 error = -EIO; 66 break; 67 } 68 ablk_req = vc_req->ablkcipher_req; 69 70 spin_unlock_irqrestore( 71 &vcrypto->data_vq[qid].lock, flags); 72 /* Finish the encrypt or decrypt process */ 73 virtio_crypto_ablkcipher_finalize_req(vc_req, 74 ablk_req, error); 75 spin_lock_irqsave( 76 &vcrypto->data_vq[qid].lock, flags); 77 } 78 } 79 } while (!virtqueue_enable_cb(vq)); 80 spin_unlock_irqrestore(&vcrypto->data_vq[qid].lock, flags); 81 } 82 83 static int virtcrypto_find_vqs(struct virtio_crypto *vi) 84 { 85 vq_callback_t **callbacks; 86 struct virtqueue **vqs; 87 int ret = -ENOMEM; 88 int i, total_vqs; 89 const char **names; 90 struct device *dev = &vi->vdev->dev; 91 92 /* 93 * We expect 1 data virtqueue, followed by 94 * possible N-1 data queues used in multiqueue mode, 95 * followed by control vq. 96 */ 97 total_vqs = vi->max_data_queues + 1; 98 99 /* Allocate space for find_vqs parameters */ 100 vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL); 101 if (!vqs) 102 goto err_vq; 103 callbacks = kcalloc(total_vqs, sizeof(*callbacks), GFP_KERNEL); 104 if (!callbacks) 105 goto err_callback; 106 names = kcalloc(total_vqs, sizeof(*names), GFP_KERNEL); 107 if (!names) 108 goto err_names; 109 110 /* Parameters for control virtqueue */ 111 callbacks[total_vqs - 1] = NULL; 112 names[total_vqs - 1] = "controlq"; 113 114 /* Allocate/initialize parameters for data virtqueues */ 115 for (i = 0; i < vi->max_data_queues; i++) { 116 callbacks[i] = virtcrypto_dataq_callback; 117 snprintf(vi->data_vq[i].name, sizeof(vi->data_vq[i].name), 118 "dataq.%d", i); 119 names[i] = vi->data_vq[i].name; 120 } 121 122 ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks, 123 names); 124 if (ret) 125 goto err_find; 126 127 vi->ctrl_vq = vqs[total_vqs - 1]; 128 129 for (i = 0; i < vi->max_data_queues; i++) { 130 spin_lock_init(&vi->data_vq[i].lock); 131 vi->data_vq[i].vq = vqs[i]; 132 /* Initialize crypto engine */ 133 vi->data_vq[i].engine = crypto_engine_alloc_init(dev, 1); 134 if (!vi->data_vq[i].engine) { 135 ret = -ENOMEM; 136 goto err_engine; 137 } 138 139 vi->data_vq[i].engine->cipher_one_request = 140 virtio_crypto_ablkcipher_crypt_req; 141 } 142 143 kfree(names); 144 kfree(callbacks); 145 kfree(vqs); 146 147 return 0; 148 149 err_engine: 150 err_find: 151 kfree(names); 152 err_names: 153 kfree(callbacks); 154 err_callback: 155 kfree(vqs); 156 err_vq: 157 return ret; 158 } 159 160 static int virtcrypto_alloc_queues(struct virtio_crypto *vi) 161 { 162 vi->data_vq = kcalloc(vi->max_data_queues, sizeof(*vi->data_vq), 163 GFP_KERNEL); 164 if (!vi->data_vq) 165 return -ENOMEM; 166 167 return 0; 168 } 169 170 static void virtcrypto_clean_affinity(struct virtio_crypto *vi, long hcpu) 171 { 172 int i; 173 174 if (vi->affinity_hint_set) { 175 for (i = 0; i < vi->max_data_queues; i++) 176 virtqueue_set_affinity(vi->data_vq[i].vq, -1); 177 178 vi->affinity_hint_set = false; 179 } 180 } 181 182 static void virtcrypto_set_affinity(struct virtio_crypto *vcrypto) 183 { 184 int i = 0; 185 int cpu; 186 187 /* 188 * In single queue mode, we don't set the cpu affinity. 189 */ 190 if (vcrypto->curr_queue == 1 || vcrypto->max_data_queues == 1) { 191 virtcrypto_clean_affinity(vcrypto, -1); 192 return; 193 } 194 195 /* 196 * In multiqueue mode, we let the queue to be private to one cpu 197 * by setting the affinity hint to eliminate the contention. 198 * 199 * TODO: adds cpu hotplug support by register cpu notifier. 200 * 201 */ 202 for_each_online_cpu(cpu) { 203 virtqueue_set_affinity(vcrypto->data_vq[i].vq, cpu); 204 if (++i >= vcrypto->max_data_queues) 205 break; 206 } 207 208 vcrypto->affinity_hint_set = true; 209 } 210 211 static void virtcrypto_free_queues(struct virtio_crypto *vi) 212 { 213 kfree(vi->data_vq); 214 } 215 216 static int virtcrypto_init_vqs(struct virtio_crypto *vi) 217 { 218 int ret; 219 220 /* Allocate send & receive queues */ 221 ret = virtcrypto_alloc_queues(vi); 222 if (ret) 223 goto err; 224 225 ret = virtcrypto_find_vqs(vi); 226 if (ret) 227 goto err_free; 228 229 get_online_cpus(); 230 virtcrypto_set_affinity(vi); 231 put_online_cpus(); 232 233 return 0; 234 235 err_free: 236 virtcrypto_free_queues(vi); 237 err: 238 return ret; 239 } 240 241 static int virtcrypto_update_status(struct virtio_crypto *vcrypto) 242 { 243 u32 status; 244 int err; 245 246 virtio_cread(vcrypto->vdev, 247 struct virtio_crypto_config, status, &status); 248 249 /* 250 * Unknown status bits would be a host error and the driver 251 * should consider the device to be broken. 252 */ 253 if (status & (~VIRTIO_CRYPTO_S_HW_READY)) { 254 dev_warn(&vcrypto->vdev->dev, 255 "Unknown status bits: 0x%x\n", status); 256 257 virtio_break_device(vcrypto->vdev); 258 return -EPERM; 259 } 260 261 if (vcrypto->status == status) 262 return 0; 263 264 vcrypto->status = status; 265 266 if (vcrypto->status & VIRTIO_CRYPTO_S_HW_READY) { 267 err = virtcrypto_dev_start(vcrypto); 268 if (err) { 269 dev_err(&vcrypto->vdev->dev, 270 "Failed to start virtio crypto device.\n"); 271 272 return -EPERM; 273 } 274 dev_info(&vcrypto->vdev->dev, "Accelerator is ready\n"); 275 } else { 276 virtcrypto_dev_stop(vcrypto); 277 dev_info(&vcrypto->vdev->dev, "Accelerator is not ready\n"); 278 } 279 280 return 0; 281 } 282 283 static int virtcrypto_start_crypto_engines(struct virtio_crypto *vcrypto) 284 { 285 int32_t i; 286 int ret; 287 288 for (i = 0; i < vcrypto->max_data_queues; i++) { 289 if (vcrypto->data_vq[i].engine) { 290 ret = crypto_engine_start(vcrypto->data_vq[i].engine); 291 if (ret) 292 goto err; 293 } 294 } 295 296 return 0; 297 298 err: 299 while (--i >= 0) 300 if (vcrypto->data_vq[i].engine) 301 crypto_engine_exit(vcrypto->data_vq[i].engine); 302 303 return ret; 304 } 305 306 static void virtcrypto_clear_crypto_engines(struct virtio_crypto *vcrypto) 307 { 308 u32 i; 309 310 for (i = 0; i < vcrypto->max_data_queues; i++) 311 if (vcrypto->data_vq[i].engine) 312 crypto_engine_exit(vcrypto->data_vq[i].engine); 313 } 314 315 static void virtcrypto_del_vqs(struct virtio_crypto *vcrypto) 316 { 317 struct virtio_device *vdev = vcrypto->vdev; 318 319 virtcrypto_clean_affinity(vcrypto, -1); 320 321 vdev->config->del_vqs(vdev); 322 323 virtcrypto_free_queues(vcrypto); 324 } 325 326 static int virtcrypto_probe(struct virtio_device *vdev) 327 { 328 int err = -EFAULT; 329 struct virtio_crypto *vcrypto; 330 u32 max_data_queues = 0, max_cipher_key_len = 0; 331 u32 max_auth_key_len = 0; 332 u64 max_size = 0; 333 334 if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) 335 return -ENODEV; 336 337 if (!vdev->config->get) { 338 dev_err(&vdev->dev, "%s failure: config access disabled\n", 339 __func__); 340 return -EINVAL; 341 } 342 343 if (num_possible_nodes() > 1 && dev_to_node(&vdev->dev) < 0) { 344 /* 345 * If the accelerator is connected to a node with no memory 346 * there is no point in using the accelerator since the remote 347 * memory transaction will be very slow. 348 */ 349 dev_err(&vdev->dev, "Invalid NUMA configuration.\n"); 350 return -EINVAL; 351 } 352 353 vcrypto = kzalloc_node(sizeof(*vcrypto), GFP_KERNEL, 354 dev_to_node(&vdev->dev)); 355 if (!vcrypto) 356 return -ENOMEM; 357 358 virtio_cread(vdev, struct virtio_crypto_config, 359 max_dataqueues, &max_data_queues); 360 if (max_data_queues < 1) 361 max_data_queues = 1; 362 363 virtio_cread(vdev, struct virtio_crypto_config, 364 max_cipher_key_len, &max_cipher_key_len); 365 virtio_cread(vdev, struct virtio_crypto_config, 366 max_auth_key_len, &max_auth_key_len); 367 virtio_cread(vdev, struct virtio_crypto_config, 368 max_size, &max_size); 369 370 /* Add virtio crypto device to global table */ 371 err = virtcrypto_devmgr_add_dev(vcrypto); 372 if (err) { 373 dev_err(&vdev->dev, "Failed to add new virtio crypto device.\n"); 374 goto free; 375 } 376 vcrypto->owner = THIS_MODULE; 377 vcrypto = vdev->priv = vcrypto; 378 vcrypto->vdev = vdev; 379 380 spin_lock_init(&vcrypto->ctrl_lock); 381 382 /* Use single data queue as default */ 383 vcrypto->curr_queue = 1; 384 vcrypto->max_data_queues = max_data_queues; 385 vcrypto->max_cipher_key_len = max_cipher_key_len; 386 vcrypto->max_auth_key_len = max_auth_key_len; 387 vcrypto->max_size = max_size; 388 389 dev_info(&vdev->dev, 390 "max_queues: %u, max_cipher_key_len: %u, max_auth_key_len: %u, max_size 0x%llx\n", 391 vcrypto->max_data_queues, 392 vcrypto->max_cipher_key_len, 393 vcrypto->max_auth_key_len, 394 vcrypto->max_size); 395 396 err = virtcrypto_init_vqs(vcrypto); 397 if (err) { 398 dev_err(&vdev->dev, "Failed to initialize vqs.\n"); 399 goto free_dev; 400 } 401 402 err = virtcrypto_start_crypto_engines(vcrypto); 403 if (err) 404 goto free_vqs; 405 406 virtio_device_ready(vdev); 407 408 err = virtcrypto_update_status(vcrypto); 409 if (err) 410 goto free_engines; 411 412 return 0; 413 414 free_engines: 415 virtcrypto_clear_crypto_engines(vcrypto); 416 free_vqs: 417 vcrypto->vdev->config->reset(vdev); 418 virtcrypto_del_vqs(vcrypto); 419 free_dev: 420 virtcrypto_devmgr_rm_dev(vcrypto); 421 free: 422 kfree(vcrypto); 423 return err; 424 } 425 426 static void virtcrypto_free_unused_reqs(struct virtio_crypto *vcrypto) 427 { 428 struct virtio_crypto_request *vc_req; 429 int i; 430 struct virtqueue *vq; 431 432 for (i = 0; i < vcrypto->max_data_queues; i++) { 433 vq = vcrypto->data_vq[i].vq; 434 while ((vc_req = virtqueue_detach_unused_buf(vq)) != NULL) { 435 kfree(vc_req->req_data); 436 kfree(vc_req->sgs); 437 } 438 } 439 } 440 441 static void virtcrypto_remove(struct virtio_device *vdev) 442 { 443 struct virtio_crypto *vcrypto = vdev->priv; 444 445 dev_info(&vdev->dev, "Start virtcrypto_remove.\n"); 446 447 if (virtcrypto_dev_started(vcrypto)) 448 virtcrypto_dev_stop(vcrypto); 449 vdev->config->reset(vdev); 450 virtcrypto_free_unused_reqs(vcrypto); 451 virtcrypto_clear_crypto_engines(vcrypto); 452 virtcrypto_del_vqs(vcrypto); 453 virtcrypto_devmgr_rm_dev(vcrypto); 454 kfree(vcrypto); 455 } 456 457 static void virtcrypto_config_changed(struct virtio_device *vdev) 458 { 459 struct virtio_crypto *vcrypto = vdev->priv; 460 461 virtcrypto_update_status(vcrypto); 462 } 463 464 #ifdef CONFIG_PM_SLEEP 465 static int virtcrypto_freeze(struct virtio_device *vdev) 466 { 467 struct virtio_crypto *vcrypto = vdev->priv; 468 469 vdev->config->reset(vdev); 470 virtcrypto_free_unused_reqs(vcrypto); 471 if (virtcrypto_dev_started(vcrypto)) 472 virtcrypto_dev_stop(vcrypto); 473 474 virtcrypto_clear_crypto_engines(vcrypto); 475 virtcrypto_del_vqs(vcrypto); 476 return 0; 477 } 478 479 static int virtcrypto_restore(struct virtio_device *vdev) 480 { 481 struct virtio_crypto *vcrypto = vdev->priv; 482 int err; 483 484 err = virtcrypto_init_vqs(vcrypto); 485 if (err) 486 return err; 487 488 err = virtcrypto_start_crypto_engines(vcrypto); 489 if (err) 490 goto free_vqs; 491 492 virtio_device_ready(vdev); 493 494 err = virtcrypto_dev_start(vcrypto); 495 if (err) { 496 dev_err(&vdev->dev, "Failed to start virtio crypto device.\n"); 497 goto free_engines; 498 } 499 500 return 0; 501 502 free_engines: 503 virtcrypto_clear_crypto_engines(vcrypto); 504 free_vqs: 505 vcrypto->vdev->config->reset(vdev); 506 virtcrypto_del_vqs(vcrypto); 507 return err; 508 } 509 #endif 510 511 static unsigned int features[] = { 512 /* none */ 513 }; 514 515 static struct virtio_device_id id_table[] = { 516 { VIRTIO_ID_CRYPTO, VIRTIO_DEV_ANY_ID }, 517 { 0 }, 518 }; 519 520 static struct virtio_driver virtio_crypto_driver = { 521 .driver.name = KBUILD_MODNAME, 522 .driver.owner = THIS_MODULE, 523 .feature_table = features, 524 .feature_table_size = ARRAY_SIZE(features), 525 .id_table = id_table, 526 .probe = virtcrypto_probe, 527 .remove = virtcrypto_remove, 528 .config_changed = virtcrypto_config_changed, 529 #ifdef CONFIG_PM_SLEEP 530 .freeze = virtcrypto_freeze, 531 .restore = virtcrypto_restore, 532 #endif 533 }; 534 535 module_virtio_driver(virtio_crypto_driver); 536 537 MODULE_DEVICE_TABLE(virtio, id_table); 538 MODULE_DESCRIPTION("virtio crypto device driver"); 539 MODULE_LICENSE("GPL"); 540 MODULE_AUTHOR("Gonglei <arei.gonglei@huawei.com>"); 541