1  /* Driver for Virtio crypto device.
2   *
3   * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
4   *
5   * This program is free software; you can redistribute it and/or modify
6   * it under the terms of the GNU General Public License as published by
7   * the Free Software Foundation; either version 2 of the License, or
8   * (at your option) any later version.
9   *
10   * This program is distributed in the hope that it will be useful,
11   * but WITHOUT ANY WARRANTY; without even the implied warranty of
12   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13   * GNU General Public License for more details.
14   *
15   * You should have received a copy of the GNU General Public License
16   * along with this program; if not, see <http://www.gnu.org/licenses/>.
17   */
18 
19 #include <linux/err.h>
20 #include <linux/module.h>
21 #include <linux/virtio_config.h>
22 #include <linux/cpu.h>
23 
24 #include <uapi/linux/virtio_crypto.h>
25 #include "virtio_crypto_common.h"
26 
27 
28 void
29 virtcrypto_clear_request(struct virtio_crypto_request *vc_req)
30 {
31 	if (vc_req) {
32 		kzfree(vc_req->req_data);
33 		kfree(vc_req->sgs);
34 	}
35 }
36 
37 static void virtcrypto_dataq_callback(struct virtqueue *vq)
38 {
39 	struct virtio_crypto *vcrypto = vq->vdev->priv;
40 	struct virtio_crypto_request *vc_req;
41 	unsigned long flags;
42 	unsigned int len;
43 	unsigned int qid = vq->index;
44 
45 	spin_lock_irqsave(&vcrypto->data_vq[qid].lock, flags);
46 	do {
47 		virtqueue_disable_cb(vq);
48 		while ((vc_req = virtqueue_get_buf(vq, &len)) != NULL) {
49 			spin_unlock_irqrestore(
50 				&vcrypto->data_vq[qid].lock, flags);
51 			if (vc_req->alg_cb)
52 				vc_req->alg_cb(vc_req, len);
53 			spin_lock_irqsave(
54 				&vcrypto->data_vq[qid].lock, flags);
55 		}
56 	} while (!virtqueue_enable_cb(vq));
57 	spin_unlock_irqrestore(&vcrypto->data_vq[qid].lock, flags);
58 }
59 
60 static int virtcrypto_find_vqs(struct virtio_crypto *vi)
61 {
62 	vq_callback_t **callbacks;
63 	struct virtqueue **vqs;
64 	int ret = -ENOMEM;
65 	int i, total_vqs;
66 	const char **names;
67 	struct device *dev = &vi->vdev->dev;
68 
69 	/*
70 	 * We expect 1 data virtqueue, followed by
71 	 * possible N-1 data queues used in multiqueue mode,
72 	 * followed by control vq.
73 	 */
74 	total_vqs = vi->max_data_queues + 1;
75 
76 	/* Allocate space for find_vqs parameters */
77 	vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL);
78 	if (!vqs)
79 		goto err_vq;
80 	callbacks = kcalloc(total_vqs, sizeof(*callbacks), GFP_KERNEL);
81 	if (!callbacks)
82 		goto err_callback;
83 	names = kcalloc(total_vqs, sizeof(*names), GFP_KERNEL);
84 	if (!names)
85 		goto err_names;
86 
87 	/* Parameters for control virtqueue */
88 	callbacks[total_vqs - 1] = NULL;
89 	names[total_vqs - 1] = "controlq";
90 
91 	/* Allocate/initialize parameters for data virtqueues */
92 	for (i = 0; i < vi->max_data_queues; i++) {
93 		callbacks[i] = virtcrypto_dataq_callback;
94 		snprintf(vi->data_vq[i].name, sizeof(vi->data_vq[i].name),
95 				"dataq.%d", i);
96 		names[i] = vi->data_vq[i].name;
97 	}
98 
99 	ret = virtio_find_vqs(vi->vdev, total_vqs, vqs, callbacks, names, NULL);
100 	if (ret)
101 		goto err_find;
102 
103 	vi->ctrl_vq = vqs[total_vqs - 1];
104 
105 	for (i = 0; i < vi->max_data_queues; i++) {
106 		spin_lock_init(&vi->data_vq[i].lock);
107 		vi->data_vq[i].vq = vqs[i];
108 		/* Initialize crypto engine */
109 		vi->data_vq[i].engine = crypto_engine_alloc_init(dev, 1);
110 		if (!vi->data_vq[i].engine) {
111 			ret = -ENOMEM;
112 			goto err_engine;
113 		}
114 
115 		vi->data_vq[i].engine->cipher_one_request =
116 			virtio_crypto_ablkcipher_crypt_req;
117 	}
118 
119 	kfree(names);
120 	kfree(callbacks);
121 	kfree(vqs);
122 
123 	return 0;
124 
125 err_engine:
126 err_find:
127 	kfree(names);
128 err_names:
129 	kfree(callbacks);
130 err_callback:
131 	kfree(vqs);
132 err_vq:
133 	return ret;
134 }
135 
136 static int virtcrypto_alloc_queues(struct virtio_crypto *vi)
137 {
138 	vi->data_vq = kcalloc(vi->max_data_queues, sizeof(*vi->data_vq),
139 				GFP_KERNEL);
140 	if (!vi->data_vq)
141 		return -ENOMEM;
142 
143 	return 0;
144 }
145 
146 static void virtcrypto_clean_affinity(struct virtio_crypto *vi, long hcpu)
147 {
148 	int i;
149 
150 	if (vi->affinity_hint_set) {
151 		for (i = 0; i < vi->max_data_queues; i++)
152 			virtqueue_set_affinity(vi->data_vq[i].vq, -1);
153 
154 		vi->affinity_hint_set = false;
155 	}
156 }
157 
158 static void virtcrypto_set_affinity(struct virtio_crypto *vcrypto)
159 {
160 	int i = 0;
161 	int cpu;
162 
163 	/*
164 	 * In single queue mode, we don't set the cpu affinity.
165 	 */
166 	if (vcrypto->curr_queue == 1 || vcrypto->max_data_queues == 1) {
167 		virtcrypto_clean_affinity(vcrypto, -1);
168 		return;
169 	}
170 
171 	/*
172 	 * In multiqueue mode, we let the queue to be private to one cpu
173 	 * by setting the affinity hint to eliminate the contention.
174 	 *
175 	 * TODO: adds cpu hotplug support by register cpu notifier.
176 	 *
177 	 */
178 	for_each_online_cpu(cpu) {
179 		virtqueue_set_affinity(vcrypto->data_vq[i].vq, cpu);
180 		if (++i >= vcrypto->max_data_queues)
181 			break;
182 	}
183 
184 	vcrypto->affinity_hint_set = true;
185 }
186 
187 static void virtcrypto_free_queues(struct virtio_crypto *vi)
188 {
189 	kfree(vi->data_vq);
190 }
191 
192 static int virtcrypto_init_vqs(struct virtio_crypto *vi)
193 {
194 	int ret;
195 
196 	/* Allocate send & receive queues */
197 	ret = virtcrypto_alloc_queues(vi);
198 	if (ret)
199 		goto err;
200 
201 	ret = virtcrypto_find_vqs(vi);
202 	if (ret)
203 		goto err_free;
204 
205 	get_online_cpus();
206 	virtcrypto_set_affinity(vi);
207 	put_online_cpus();
208 
209 	return 0;
210 
211 err_free:
212 	virtcrypto_free_queues(vi);
213 err:
214 	return ret;
215 }
216 
217 static int virtcrypto_update_status(struct virtio_crypto *vcrypto)
218 {
219 	u32 status;
220 	int err;
221 
222 	virtio_cread(vcrypto->vdev,
223 	    struct virtio_crypto_config, status, &status);
224 
225 	/*
226 	 * Unknown status bits would be a host error and the driver
227 	 * should consider the device to be broken.
228 	 */
229 	if (status & (~VIRTIO_CRYPTO_S_HW_READY)) {
230 		dev_warn(&vcrypto->vdev->dev,
231 				"Unknown status bits: 0x%x\n", status);
232 
233 		virtio_break_device(vcrypto->vdev);
234 		return -EPERM;
235 	}
236 
237 	if (vcrypto->status == status)
238 		return 0;
239 
240 	vcrypto->status = status;
241 
242 	if (vcrypto->status & VIRTIO_CRYPTO_S_HW_READY) {
243 		err = virtcrypto_dev_start(vcrypto);
244 		if (err) {
245 			dev_err(&vcrypto->vdev->dev,
246 				"Failed to start virtio crypto device.\n");
247 
248 			return -EPERM;
249 		}
250 		dev_info(&vcrypto->vdev->dev, "Accelerator device is ready\n");
251 	} else {
252 		virtcrypto_dev_stop(vcrypto);
253 		dev_info(&vcrypto->vdev->dev, "Accelerator is not ready\n");
254 	}
255 
256 	return 0;
257 }
258 
259 static int virtcrypto_start_crypto_engines(struct virtio_crypto *vcrypto)
260 {
261 	int32_t i;
262 	int ret;
263 
264 	for (i = 0; i < vcrypto->max_data_queues; i++) {
265 		if (vcrypto->data_vq[i].engine) {
266 			ret = crypto_engine_start(vcrypto->data_vq[i].engine);
267 			if (ret)
268 				goto err;
269 		}
270 	}
271 
272 	return 0;
273 
274 err:
275 	while (--i >= 0)
276 		if (vcrypto->data_vq[i].engine)
277 			crypto_engine_exit(vcrypto->data_vq[i].engine);
278 
279 	return ret;
280 }
281 
282 static void virtcrypto_clear_crypto_engines(struct virtio_crypto *vcrypto)
283 {
284 	u32 i;
285 
286 	for (i = 0; i < vcrypto->max_data_queues; i++)
287 		if (vcrypto->data_vq[i].engine)
288 			crypto_engine_exit(vcrypto->data_vq[i].engine);
289 }
290 
291 static void virtcrypto_del_vqs(struct virtio_crypto *vcrypto)
292 {
293 	struct virtio_device *vdev = vcrypto->vdev;
294 
295 	virtcrypto_clean_affinity(vcrypto, -1);
296 
297 	vdev->config->del_vqs(vdev);
298 
299 	virtcrypto_free_queues(vcrypto);
300 }
301 
302 static int virtcrypto_probe(struct virtio_device *vdev)
303 {
304 	int err = -EFAULT;
305 	struct virtio_crypto *vcrypto;
306 	u32 max_data_queues = 0, max_cipher_key_len = 0;
307 	u32 max_auth_key_len = 0;
308 	u64 max_size = 0;
309 
310 	if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
311 		return -ENODEV;
312 
313 	if (!vdev->config->get) {
314 		dev_err(&vdev->dev, "%s failure: config access disabled\n",
315 			__func__);
316 		return -EINVAL;
317 	}
318 
319 	if (num_possible_nodes() > 1 && dev_to_node(&vdev->dev) < 0) {
320 		/*
321 		 * If the accelerator is connected to a node with no memory
322 		 * there is no point in using the accelerator since the remote
323 		 * memory transaction will be very slow.
324 		 */
325 		dev_err(&vdev->dev, "Invalid NUMA configuration.\n");
326 		return -EINVAL;
327 	}
328 
329 	vcrypto = kzalloc_node(sizeof(*vcrypto), GFP_KERNEL,
330 					dev_to_node(&vdev->dev));
331 	if (!vcrypto)
332 		return -ENOMEM;
333 
334 	virtio_cread(vdev, struct virtio_crypto_config,
335 			max_dataqueues, &max_data_queues);
336 	if (max_data_queues < 1)
337 		max_data_queues = 1;
338 
339 	virtio_cread(vdev, struct virtio_crypto_config,
340 		max_cipher_key_len, &max_cipher_key_len);
341 	virtio_cread(vdev, struct virtio_crypto_config,
342 		max_auth_key_len, &max_auth_key_len);
343 	virtio_cread(vdev, struct virtio_crypto_config,
344 		max_size, &max_size);
345 
346 	/* Add virtio crypto device to global table */
347 	err = virtcrypto_devmgr_add_dev(vcrypto);
348 	if (err) {
349 		dev_err(&vdev->dev, "Failed to add new virtio crypto device.\n");
350 		goto free;
351 	}
352 	vcrypto->owner = THIS_MODULE;
353 	vcrypto = vdev->priv = vcrypto;
354 	vcrypto->vdev = vdev;
355 
356 	spin_lock_init(&vcrypto->ctrl_lock);
357 
358 	/* Use single data queue as default */
359 	vcrypto->curr_queue = 1;
360 	vcrypto->max_data_queues = max_data_queues;
361 	vcrypto->max_cipher_key_len = max_cipher_key_len;
362 	vcrypto->max_auth_key_len = max_auth_key_len;
363 	vcrypto->max_size = max_size;
364 
365 	dev_info(&vdev->dev,
366 		"max_queues: %u, max_cipher_key_len: %u, max_auth_key_len: %u, max_size 0x%llx\n",
367 		vcrypto->max_data_queues,
368 		vcrypto->max_cipher_key_len,
369 		vcrypto->max_auth_key_len,
370 		vcrypto->max_size);
371 
372 	err = virtcrypto_init_vqs(vcrypto);
373 	if (err) {
374 		dev_err(&vdev->dev, "Failed to initialize vqs.\n");
375 		goto free_dev;
376 	}
377 
378 	err = virtcrypto_start_crypto_engines(vcrypto);
379 	if (err)
380 		goto free_vqs;
381 
382 	virtio_device_ready(vdev);
383 
384 	err = virtcrypto_update_status(vcrypto);
385 	if (err)
386 		goto free_engines;
387 
388 	return 0;
389 
390 free_engines:
391 	virtcrypto_clear_crypto_engines(vcrypto);
392 free_vqs:
393 	vcrypto->vdev->config->reset(vdev);
394 	virtcrypto_del_vqs(vcrypto);
395 free_dev:
396 	virtcrypto_devmgr_rm_dev(vcrypto);
397 free:
398 	kfree(vcrypto);
399 	return err;
400 }
401 
402 static void virtcrypto_free_unused_reqs(struct virtio_crypto *vcrypto)
403 {
404 	struct virtio_crypto_request *vc_req;
405 	int i;
406 	struct virtqueue *vq;
407 
408 	for (i = 0; i < vcrypto->max_data_queues; i++) {
409 		vq = vcrypto->data_vq[i].vq;
410 		while ((vc_req = virtqueue_detach_unused_buf(vq)) != NULL) {
411 			kfree(vc_req->req_data);
412 			kfree(vc_req->sgs);
413 		}
414 	}
415 }
416 
417 static void virtcrypto_remove(struct virtio_device *vdev)
418 {
419 	struct virtio_crypto *vcrypto = vdev->priv;
420 
421 	dev_info(&vdev->dev, "Start virtcrypto_remove.\n");
422 
423 	if (virtcrypto_dev_started(vcrypto))
424 		virtcrypto_dev_stop(vcrypto);
425 	vdev->config->reset(vdev);
426 	virtcrypto_free_unused_reqs(vcrypto);
427 	virtcrypto_clear_crypto_engines(vcrypto);
428 	virtcrypto_del_vqs(vcrypto);
429 	virtcrypto_devmgr_rm_dev(vcrypto);
430 	kfree(vcrypto);
431 }
432 
433 static void virtcrypto_config_changed(struct virtio_device *vdev)
434 {
435 	struct virtio_crypto *vcrypto = vdev->priv;
436 
437 	virtcrypto_update_status(vcrypto);
438 }
439 
440 #ifdef CONFIG_PM_SLEEP
441 static int virtcrypto_freeze(struct virtio_device *vdev)
442 {
443 	struct virtio_crypto *vcrypto = vdev->priv;
444 
445 	vdev->config->reset(vdev);
446 	virtcrypto_free_unused_reqs(vcrypto);
447 	if (virtcrypto_dev_started(vcrypto))
448 		virtcrypto_dev_stop(vcrypto);
449 
450 	virtcrypto_clear_crypto_engines(vcrypto);
451 	virtcrypto_del_vqs(vcrypto);
452 	return 0;
453 }
454 
455 static int virtcrypto_restore(struct virtio_device *vdev)
456 {
457 	struct virtio_crypto *vcrypto = vdev->priv;
458 	int err;
459 
460 	err = virtcrypto_init_vqs(vcrypto);
461 	if (err)
462 		return err;
463 
464 	err = virtcrypto_start_crypto_engines(vcrypto);
465 	if (err)
466 		goto free_vqs;
467 
468 	virtio_device_ready(vdev);
469 
470 	err = virtcrypto_dev_start(vcrypto);
471 	if (err) {
472 		dev_err(&vdev->dev, "Failed to start virtio crypto device.\n");
473 		goto free_engines;
474 	}
475 
476 	return 0;
477 
478 free_engines:
479 	virtcrypto_clear_crypto_engines(vcrypto);
480 free_vqs:
481 	vcrypto->vdev->config->reset(vdev);
482 	virtcrypto_del_vqs(vcrypto);
483 	return err;
484 }
485 #endif
486 
487 static unsigned int features[] = {
488 	/* none */
489 };
490 
491 static struct virtio_device_id id_table[] = {
492 	{ VIRTIO_ID_CRYPTO, VIRTIO_DEV_ANY_ID },
493 	{ 0 },
494 };
495 
496 static struct virtio_driver virtio_crypto_driver = {
497 	.driver.name         = KBUILD_MODNAME,
498 	.driver.owner        = THIS_MODULE,
499 	.feature_table       = features,
500 	.feature_table_size  = ARRAY_SIZE(features),
501 	.id_table            = id_table,
502 	.probe               = virtcrypto_probe,
503 	.remove              = virtcrypto_remove,
504 	.config_changed = virtcrypto_config_changed,
505 #ifdef CONFIG_PM_SLEEP
506 	.freeze = virtcrypto_freeze,
507 	.restore = virtcrypto_restore,
508 #endif
509 };
510 
511 module_virtio_driver(virtio_crypto_driver);
512 
513 MODULE_DEVICE_TABLE(virtio, id_table);
514 MODULE_DESCRIPTION("virtio crypto device driver");
515 MODULE_LICENSE("GPL");
516 MODULE_AUTHOR("Gonglei <arei.gonglei@huawei.com>");
517