1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* Common header for Virtio crypto device. 3 * 4 * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD. 5 */ 6 7 #ifndef _VIRTIO_CRYPTO_COMMON_H 8 #define _VIRTIO_CRYPTO_COMMON_H 9 10 #include <linux/virtio.h> 11 #include <linux/crypto.h> 12 #include <linux/spinlock.h> 13 #include <crypto/aead.h> 14 #include <crypto/aes.h> 15 #include <crypto/engine.h> 16 17 18 /* Internal representation of a data virtqueue */ 19 struct data_queue { 20 /* Virtqueue associated with this send _queue */ 21 struct virtqueue *vq; 22 23 /* To protect the vq operations for the dataq */ 24 spinlock_t lock; 25 26 /* Name of the tx queue: dataq.$index */ 27 char name[32]; 28 29 struct crypto_engine *engine; 30 }; 31 32 struct virtio_crypto { 33 struct virtio_device *vdev; 34 struct virtqueue *ctrl_vq; 35 struct data_queue *data_vq; 36 37 /* To protect the vq operations for the controlq */ 38 spinlock_t ctrl_lock; 39 40 /* Maximum of data queues supported by the device */ 41 u32 max_data_queues; 42 43 /* Number of queue currently used by the driver */ 44 u32 curr_queue; 45 46 /* 47 * Specifies the services mask which the device support, 48 * see VIRTIO_CRYPTO_SERVICE_* 49 */ 50 u32 crypto_services; 51 52 /* Detailed algorithms mask */ 53 u32 cipher_algo_l; 54 u32 cipher_algo_h; 55 u32 hash_algo; 56 u32 mac_algo_l; 57 u32 mac_algo_h; 58 u32 aead_algo; 59 u32 akcipher_algo; 60 61 /* Maximum length of cipher key */ 62 u32 max_cipher_key_len; 63 /* Maximum length of authenticated key */ 64 u32 max_auth_key_len; 65 /* Maximum size of per request */ 66 u64 max_size; 67 68 /* Control VQ buffers: protected by the ctrl_lock */ 69 struct virtio_crypto_op_ctrl_req ctrl; 70 struct virtio_crypto_session_input input; 71 struct virtio_crypto_inhdr ctrl_status; 72 73 unsigned long status; 74 atomic_t ref_count; 75 struct list_head list; 76 struct module *owner; 77 uint8_t dev_id; 78 79 /* Does the affinity hint is set for virtqueues? */ 80 bool affinity_hint_set; 81 }; 82 83 struct virtio_crypto_sym_session_info { 84 /* Backend session id, which come from the host side */ 85 __u64 session_id; 86 }; 87 88 struct virtio_crypto_request; 89 typedef void (*virtio_crypto_data_callback) 90 (struct virtio_crypto_request *vc_req, int len); 91 92 struct virtio_crypto_request { 93 uint8_t status; 94 struct virtio_crypto_op_data_req *req_data; 95 struct scatterlist **sgs; 96 struct data_queue *dataq; 97 virtio_crypto_data_callback alg_cb; 98 }; 99 100 int virtcrypto_devmgr_add_dev(struct virtio_crypto *vcrypto_dev); 101 struct list_head *virtcrypto_devmgr_get_head(void); 102 void virtcrypto_devmgr_rm_dev(struct virtio_crypto *vcrypto_dev); 103 struct virtio_crypto *virtcrypto_devmgr_get_first(void); 104 int virtcrypto_dev_in_use(struct virtio_crypto *vcrypto_dev); 105 int virtcrypto_dev_get(struct virtio_crypto *vcrypto_dev); 106 void virtcrypto_dev_put(struct virtio_crypto *vcrypto_dev); 107 int virtcrypto_dev_started(struct virtio_crypto *vcrypto_dev); 108 bool virtcrypto_algo_is_supported(struct virtio_crypto *vcrypto_dev, 109 uint32_t service, 110 uint32_t algo); 111 struct virtio_crypto *virtcrypto_get_dev_node(int node, 112 uint32_t service, 113 uint32_t algo); 114 int virtcrypto_dev_start(struct virtio_crypto *vcrypto); 115 void virtcrypto_dev_stop(struct virtio_crypto *vcrypto); 116 int virtio_crypto_skcipher_crypt_req( 117 struct crypto_engine *engine, void *vreq); 118 119 void 120 virtcrypto_clear_request(struct virtio_crypto_request *vc_req); 121 122 static inline int virtio_crypto_get_current_node(void) 123 { 124 int cpu, node; 125 126 cpu = get_cpu(); 127 node = topology_physical_package_id(cpu); 128 put_cpu(); 129 130 return node; 131 } 132 133 int virtio_crypto_skcipher_algs_register(struct virtio_crypto *vcrypto); 134 void virtio_crypto_skcipher_algs_unregister(struct virtio_crypto *vcrypto); 135 int virtio_crypto_akcipher_algs_register(struct virtio_crypto *vcrypto); 136 void virtio_crypto_akcipher_algs_unregister(struct virtio_crypto *vcrypto); 137 138 #endif /* _VIRTIO_CRYPTO_COMMON_H */ 139