1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /* Common header for Virtio crypto device.
3  *
4  * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
5  */
6 
7 #ifndef _VIRTIO_CRYPTO_COMMON_H
8 #define _VIRTIO_CRYPTO_COMMON_H
9 
10 #include <linux/virtio.h>
11 #include <linux/crypto.h>
12 #include <linux/spinlock.h>
13 #include <crypto/aead.h>
14 #include <crypto/aes.h>
15 #include <crypto/engine.h>
16 
17 
18 /* Internal representation of a data virtqueue */
19 struct data_queue {
20 	/* Virtqueue associated with this send _queue */
21 	struct virtqueue *vq;
22 
23 	/* To protect the vq operations for the dataq */
24 	spinlock_t lock;
25 
26 	/* Name of the tx queue: dataq.$index */
27 	char name[32];
28 
29 	struct crypto_engine *engine;
30 };
31 
32 struct virtio_crypto {
33 	struct virtio_device *vdev;
34 	struct virtqueue *ctrl_vq;
35 	struct data_queue *data_vq;
36 
37 	/* To protect the vq operations for the controlq */
38 	spinlock_t ctrl_lock;
39 
40 	/* Maximum of data queues supported by the device */
41 	u32 max_data_queues;
42 
43 	/* Number of queue currently used by the driver */
44 	u32 curr_queue;
45 
46 	/*
47 	 * Specifies the services mask which the device support,
48 	 * see VIRTIO_CRYPTO_SERVICE_*
49 	 */
50 	u32 crypto_services;
51 
52 	/* Detailed algorithms mask */
53 	u32 cipher_algo_l;
54 	u32 cipher_algo_h;
55 	u32 hash_algo;
56 	u32 mac_algo_l;
57 	u32 mac_algo_h;
58 	u32 aead_algo;
59 
60 	/* Maximum length of cipher key */
61 	u32 max_cipher_key_len;
62 	/* Maximum length of authenticated key */
63 	u32 max_auth_key_len;
64 	/* Maximum size of per request */
65 	u64 max_size;
66 
67 	/* Control VQ buffers: protected by the ctrl_lock */
68 	struct virtio_crypto_op_ctrl_req ctrl;
69 	struct virtio_crypto_session_input input;
70 	struct virtio_crypto_inhdr ctrl_status;
71 
72 	unsigned long status;
73 	atomic_t ref_count;
74 	struct list_head list;
75 	struct module *owner;
76 	uint8_t dev_id;
77 
78 	/* Does the affinity hint is set for virtqueues? */
79 	bool affinity_hint_set;
80 };
81 
82 struct virtio_crypto_sym_session_info {
83 	/* Backend session id, which come from the host side */
84 	__u64 session_id;
85 };
86 
87 struct virtio_crypto_request;
88 typedef void (*virtio_crypto_data_callback)
89 		(struct virtio_crypto_request *vc_req, int len);
90 
91 struct virtio_crypto_request {
92 	uint8_t status;
93 	struct virtio_crypto_op_data_req *req_data;
94 	struct scatterlist **sgs;
95 	struct data_queue *dataq;
96 	virtio_crypto_data_callback alg_cb;
97 };
98 
99 int virtcrypto_devmgr_add_dev(struct virtio_crypto *vcrypto_dev);
100 struct list_head *virtcrypto_devmgr_get_head(void);
101 void virtcrypto_devmgr_rm_dev(struct virtio_crypto *vcrypto_dev);
102 struct virtio_crypto *virtcrypto_devmgr_get_first(void);
103 int virtcrypto_dev_in_use(struct virtio_crypto *vcrypto_dev);
104 int virtcrypto_dev_get(struct virtio_crypto *vcrypto_dev);
105 void virtcrypto_dev_put(struct virtio_crypto *vcrypto_dev);
106 int virtcrypto_dev_started(struct virtio_crypto *vcrypto_dev);
107 bool virtcrypto_algo_is_supported(struct virtio_crypto *vcrypto_dev,
108 				  uint32_t service,
109 				  uint32_t algo);
110 struct virtio_crypto *virtcrypto_get_dev_node(int node,
111 					      uint32_t service,
112 					      uint32_t algo);
113 int virtcrypto_dev_start(struct virtio_crypto *vcrypto);
114 void virtcrypto_dev_stop(struct virtio_crypto *vcrypto);
115 int virtio_crypto_ablkcipher_crypt_req(
116 	struct crypto_engine *engine, void *vreq);
117 
118 void
119 virtcrypto_clear_request(struct virtio_crypto_request *vc_req);
120 
121 static inline int virtio_crypto_get_current_node(void)
122 {
123 	int cpu, node;
124 
125 	cpu = get_cpu();
126 	node = topology_physical_package_id(cpu);
127 	put_cpu();
128 
129 	return node;
130 }
131 
132 int virtio_crypto_algs_register(struct virtio_crypto *vcrypto);
133 void virtio_crypto_algs_unregister(struct virtio_crypto *vcrypto);
134 
135 #endif /* _VIRTIO_CRYPTO_COMMON_H */
136