1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Copyright (c) 2019 HiSilicon Limited. */ 3 4 #ifndef __HISI_SEC_V2_H 5 #define __HISI_SEC_V2_H 6 7 #include <linux/hisi_acc_qm.h> 8 #include "sec_crypto.h" 9 10 /* Algorithm resource per hardware SEC queue */ 11 struct sec_alg_res { 12 u8 *pbuf; 13 dma_addr_t pbuf_dma; 14 u8 *c_ivin; 15 dma_addr_t c_ivin_dma; 16 u8 *a_ivin; 17 dma_addr_t a_ivin_dma; 18 u8 *out_mac; 19 dma_addr_t out_mac_dma; 20 u16 depth; 21 }; 22 23 /* Cipher request of SEC private */ 24 struct sec_cipher_req { 25 struct hisi_acc_hw_sgl *c_out; 26 dma_addr_t c_out_dma; 27 u8 *c_ivin; 28 dma_addr_t c_ivin_dma; 29 struct skcipher_request *sk_req; 30 u32 c_len; 31 bool encrypt; 32 }; 33 34 struct sec_aead_req { 35 u8 *out_mac; 36 dma_addr_t out_mac_dma; 37 u8 *a_ivin; 38 dma_addr_t a_ivin_dma; 39 struct aead_request *aead_req; 40 }; 41 42 /* SEC request of Crypto */ 43 struct sec_req { 44 union { 45 struct sec_sqe sec_sqe; 46 struct sec_sqe3 sec_sqe3; 47 }; 48 struct sec_ctx *ctx; 49 struct sec_qp_ctx *qp_ctx; 50 51 /** 52 * Common parameter of the SEC request. 53 */ 54 struct hisi_acc_hw_sgl *in; 55 dma_addr_t in_dma; 56 struct sec_cipher_req c_req; 57 struct sec_aead_req aead_req; 58 struct list_head backlog_head; 59 60 int err_type; 61 int req_id; 62 u32 flag; 63 64 /* Status of the SEC request */ 65 bool fake_busy; 66 bool use_pbuf; 67 }; 68 69 /** 70 * struct sec_req_op - Operations for SEC request 71 * @buf_map: DMA map the SGL buffers of the request 72 * @buf_unmap: DMA unmap the SGL buffers of the request 73 * @bd_fill: Fill the SEC queue BD 74 * @bd_send: Send the SEC BD into the hardware queue 75 * @callback: Call back for the request 76 * @process: Main processing logic of Skcipher 77 */ 78 struct sec_req_op { 79 int (*buf_map)(struct sec_ctx *ctx, struct sec_req *req); 80 void (*buf_unmap)(struct sec_ctx *ctx, struct sec_req *req); 81 void (*do_transfer)(struct sec_ctx *ctx, struct sec_req *req); 82 int (*bd_fill)(struct sec_ctx *ctx, struct sec_req *req); 83 int (*bd_send)(struct sec_ctx *ctx, struct sec_req *req); 84 void (*callback)(struct sec_ctx *ctx, struct sec_req *req, int err); 85 int (*process)(struct sec_ctx *ctx, struct sec_req *req); 86 }; 87 88 /* SEC auth context */ 89 struct sec_auth_ctx { 90 dma_addr_t a_key_dma; 91 u8 *a_key; 92 u8 a_key_len; 93 u8 a_alg; 94 struct crypto_shash *hash_tfm; 95 struct crypto_aead *fallback_aead_tfm; 96 }; 97 98 /* SEC cipher context which cipher's relatives */ 99 struct sec_cipher_ctx { 100 u8 *c_key; 101 dma_addr_t c_key_dma; 102 sector_t iv_offset; 103 u32 c_gran_size; 104 u32 ivsize; 105 u8 c_mode; 106 u8 c_alg; 107 u8 c_key_len; 108 109 /* add software support */ 110 bool fallback; 111 struct crypto_sync_skcipher *fbtfm; 112 }; 113 114 /* SEC queue context which defines queue's relatives */ 115 struct sec_qp_ctx { 116 struct hisi_qp *qp; 117 struct sec_req **req_list; 118 struct idr req_idr; 119 struct sec_alg_res *res; 120 struct sec_ctx *ctx; 121 spinlock_t req_lock; 122 struct list_head backlog; 123 struct hisi_acc_sgl_pool *c_in_pool; 124 struct hisi_acc_sgl_pool *c_out_pool; 125 }; 126 127 enum sec_alg_type { 128 SEC_SKCIPHER, 129 SEC_AEAD 130 }; 131 132 /* SEC Crypto TFM context which defines queue and cipher .etc relatives */ 133 struct sec_ctx { 134 struct sec_qp_ctx *qp_ctx; 135 struct sec_dev *sec; 136 const struct sec_req_op *req_op; 137 struct hisi_qp **qps; 138 139 /* Half queues for encipher, and half for decipher */ 140 u32 hlf_q_num; 141 142 /* Threshold for fake busy, trigger to return -EBUSY to user */ 143 u32 fake_req_limit; 144 145 /* Current cyclic index to select a queue for encipher */ 146 atomic_t enc_qcyclic; 147 148 /* Current cyclic index to select a queue for decipher */ 149 atomic_t dec_qcyclic; 150 151 enum sec_alg_type alg_type; 152 bool pbuf_supported; 153 struct sec_cipher_ctx c_ctx; 154 struct sec_auth_ctx a_ctx; 155 u8 type_supported; 156 struct device *dev; 157 }; 158 159 160 enum sec_debug_file_index { 161 SEC_CLEAR_ENABLE, 162 SEC_DEBUG_FILE_NUM, 163 }; 164 165 struct sec_debug_file { 166 enum sec_debug_file_index index; 167 spinlock_t lock; 168 struct hisi_qm *qm; 169 }; 170 171 struct sec_dfx { 172 atomic64_t send_cnt; 173 atomic64_t recv_cnt; 174 atomic64_t send_busy_cnt; 175 atomic64_t recv_busy_cnt; 176 atomic64_t err_bd_cnt; 177 atomic64_t invalid_req_cnt; 178 atomic64_t done_flag_cnt; 179 }; 180 181 struct sec_debug { 182 struct sec_dfx dfx; 183 struct sec_debug_file files[SEC_DEBUG_FILE_NUM]; 184 }; 185 186 struct sec_dev { 187 struct hisi_qm qm; 188 struct sec_debug debug; 189 u32 ctx_q_num; 190 bool iommu_used; 191 }; 192 193 enum sec_cap_type { 194 SEC_QM_NFE_MASK_CAP = 0x0, 195 SEC_QM_RESET_MASK_CAP, 196 SEC_QM_OOO_SHUTDOWN_MASK_CAP, 197 SEC_QM_CE_MASK_CAP, 198 SEC_NFE_MASK_CAP, 199 SEC_RESET_MASK_CAP, 200 SEC_OOO_SHUTDOWN_MASK_CAP, 201 SEC_CE_MASK_CAP, 202 SEC_CLUSTER_NUM_CAP, 203 SEC_CORE_TYPE_NUM_CAP, 204 SEC_CORE_NUM_CAP, 205 SEC_CORES_PER_CLUSTER_NUM_CAP, 206 SEC_CORE_ENABLE_BITMAP, 207 SEC_DRV_ALG_BITMAP_LOW, 208 SEC_DRV_ALG_BITMAP_HIGH, 209 SEC_DEV_ALG_BITMAP_LOW, 210 SEC_DEV_ALG_BITMAP_HIGH, 211 SEC_CORE1_ALG_BITMAP_LOW, 212 SEC_CORE1_ALG_BITMAP_HIGH, 213 SEC_CORE2_ALG_BITMAP_LOW, 214 SEC_CORE2_ALG_BITMAP_HIGH, 215 SEC_CORE3_ALG_BITMAP_LOW, 216 SEC_CORE3_ALG_BITMAP_HIGH, 217 SEC_CORE4_ALG_BITMAP_LOW, 218 SEC_CORE4_ALG_BITMAP_HIGH, 219 }; 220 221 enum sec_cap_reg_record_idx { 222 SEC_DRV_ALG_BITMAP_LOW_IDX = 0x0, 223 SEC_DRV_ALG_BITMAP_HIGH_IDX, 224 SEC_DEV_ALG_BITMAP_LOW_IDX, 225 SEC_DEV_ALG_BITMAP_HIGH_IDX, 226 }; 227 228 void sec_destroy_qps(struct hisi_qp **qps, int qp_num); 229 struct hisi_qp **sec_create_qps(void); 230 int sec_register_to_crypto(struct hisi_qm *qm); 231 void sec_unregister_from_crypto(struct hisi_qm *qm); 232 u64 sec_get_alg_bitmap(struct hisi_qm *qm, u32 high, u32 low); 233 #endif 234