1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Copyright (c) 2019 HiSilicon Limited. */ 3 4 #ifndef __HISI_SEC_V2_H 5 #define __HISI_SEC_V2_H 6 7 #include <linux/hisi_acc_qm.h> 8 #include "sec_crypto.h" 9 10 /* Algorithm resource per hardware SEC queue */ 11 struct sec_alg_res { 12 u8 *pbuf; 13 dma_addr_t pbuf_dma; 14 u8 *c_ivin; 15 dma_addr_t c_ivin_dma; 16 u8 *a_ivin; 17 dma_addr_t a_ivin_dma; 18 u8 *out_mac; 19 dma_addr_t out_mac_dma; 20 u16 depth; 21 }; 22 23 /* Cipher request of SEC private */ 24 struct sec_cipher_req { 25 struct hisi_acc_hw_sgl *c_out; 26 dma_addr_t c_out_dma; 27 u8 *c_ivin; 28 dma_addr_t c_ivin_dma; 29 struct skcipher_request *sk_req; 30 u32 c_len; 31 bool encrypt; 32 }; 33 34 struct sec_aead_req { 35 u8 *out_mac; 36 dma_addr_t out_mac_dma; 37 u8 *a_ivin; 38 dma_addr_t a_ivin_dma; 39 struct aead_request *aead_req; 40 bool fallback; 41 }; 42 43 /* SEC request of Crypto */ 44 struct sec_req { 45 union { 46 struct sec_sqe sec_sqe; 47 struct sec_sqe3 sec_sqe3; 48 }; 49 struct sec_ctx *ctx; 50 struct sec_qp_ctx *qp_ctx; 51 52 /** 53 * Common parameter of the SEC request. 54 */ 55 struct hisi_acc_hw_sgl *in; 56 dma_addr_t in_dma; 57 struct sec_cipher_req c_req; 58 struct sec_aead_req aead_req; 59 struct list_head backlog_head; 60 61 int err_type; 62 int req_id; 63 u32 flag; 64 65 /* Status of the SEC request */ 66 bool fake_busy; 67 bool use_pbuf; 68 }; 69 70 /** 71 * struct sec_req_op - Operations for SEC request 72 * @buf_map: DMA map the SGL buffers of the request 73 * @buf_unmap: DMA unmap the SGL buffers of the request 74 * @bd_fill: Fill the SEC queue BD 75 * @bd_send: Send the SEC BD into the hardware queue 76 * @callback: Call back for the request 77 * @process: Main processing logic of Skcipher 78 */ 79 struct sec_req_op { 80 int (*buf_map)(struct sec_ctx *ctx, struct sec_req *req); 81 void (*buf_unmap)(struct sec_ctx *ctx, struct sec_req *req); 82 void (*do_transfer)(struct sec_ctx *ctx, struct sec_req *req); 83 int (*bd_fill)(struct sec_ctx *ctx, struct sec_req *req); 84 int (*bd_send)(struct sec_ctx *ctx, struct sec_req *req); 85 void (*callback)(struct sec_ctx *ctx, struct sec_req *req, int err); 86 int (*process)(struct sec_ctx *ctx, struct sec_req *req); 87 }; 88 89 /* SEC auth context */ 90 struct sec_auth_ctx { 91 dma_addr_t a_key_dma; 92 u8 *a_key; 93 u8 a_key_len; 94 u8 a_alg; 95 struct crypto_shash *hash_tfm; 96 struct crypto_aead *fallback_aead_tfm; 97 }; 98 99 /* SEC cipher context which cipher's relatives */ 100 struct sec_cipher_ctx { 101 u8 *c_key; 102 dma_addr_t c_key_dma; 103 sector_t iv_offset; 104 u32 c_gran_size; 105 u32 ivsize; 106 u8 c_mode; 107 u8 c_alg; 108 u8 c_key_len; 109 110 /* add software support */ 111 bool fallback; 112 struct crypto_sync_skcipher *fbtfm; 113 }; 114 115 /* SEC queue context which defines queue's relatives */ 116 struct sec_qp_ctx { 117 struct hisi_qp *qp; 118 struct sec_req **req_list; 119 struct idr req_idr; 120 struct sec_alg_res *res; 121 struct sec_ctx *ctx; 122 spinlock_t req_lock; 123 struct list_head backlog; 124 struct hisi_acc_sgl_pool *c_in_pool; 125 struct hisi_acc_sgl_pool *c_out_pool; 126 }; 127 128 enum sec_alg_type { 129 SEC_SKCIPHER, 130 SEC_AEAD 131 }; 132 133 /* SEC Crypto TFM context which defines queue and cipher .etc relatives */ 134 struct sec_ctx { 135 struct sec_qp_ctx *qp_ctx; 136 struct sec_dev *sec; 137 const struct sec_req_op *req_op; 138 struct hisi_qp **qps; 139 140 /* Half queues for encipher, and half for decipher */ 141 u32 hlf_q_num; 142 143 /* Threshold for fake busy, trigger to return -EBUSY to user */ 144 u32 fake_req_limit; 145 146 /* Current cyclic index to select a queue for encipher */ 147 atomic_t enc_qcyclic; 148 149 /* Current cyclic index to select a queue for decipher */ 150 atomic_t dec_qcyclic; 151 152 enum sec_alg_type alg_type; 153 bool pbuf_supported; 154 struct sec_cipher_ctx c_ctx; 155 struct sec_auth_ctx a_ctx; 156 u8 type_supported; 157 struct device *dev; 158 }; 159 160 161 enum sec_debug_file_index { 162 SEC_CLEAR_ENABLE, 163 SEC_DEBUG_FILE_NUM, 164 }; 165 166 struct sec_debug_file { 167 enum sec_debug_file_index index; 168 spinlock_t lock; 169 struct hisi_qm *qm; 170 }; 171 172 struct sec_dfx { 173 atomic64_t send_cnt; 174 atomic64_t recv_cnt; 175 atomic64_t send_busy_cnt; 176 atomic64_t recv_busy_cnt; 177 atomic64_t err_bd_cnt; 178 atomic64_t invalid_req_cnt; 179 atomic64_t done_flag_cnt; 180 }; 181 182 struct sec_debug { 183 struct sec_dfx dfx; 184 struct sec_debug_file files[SEC_DEBUG_FILE_NUM]; 185 }; 186 187 struct sec_dev { 188 struct hisi_qm qm; 189 struct sec_debug debug; 190 u32 ctx_q_num; 191 bool iommu_used; 192 }; 193 194 enum sec_cap_type { 195 SEC_QM_NFE_MASK_CAP = 0x0, 196 SEC_QM_RESET_MASK_CAP, 197 SEC_QM_OOO_SHUTDOWN_MASK_CAP, 198 SEC_QM_CE_MASK_CAP, 199 SEC_NFE_MASK_CAP, 200 SEC_RESET_MASK_CAP, 201 SEC_OOO_SHUTDOWN_MASK_CAP, 202 SEC_CE_MASK_CAP, 203 SEC_CLUSTER_NUM_CAP, 204 SEC_CORE_TYPE_NUM_CAP, 205 SEC_CORE_NUM_CAP, 206 SEC_CORES_PER_CLUSTER_NUM_CAP, 207 SEC_CORE_ENABLE_BITMAP, 208 SEC_DRV_ALG_BITMAP_LOW, 209 SEC_DRV_ALG_BITMAP_HIGH, 210 SEC_DEV_ALG_BITMAP_LOW, 211 SEC_DEV_ALG_BITMAP_HIGH, 212 SEC_CORE1_ALG_BITMAP_LOW, 213 SEC_CORE1_ALG_BITMAP_HIGH, 214 SEC_CORE2_ALG_BITMAP_LOW, 215 SEC_CORE2_ALG_BITMAP_HIGH, 216 SEC_CORE3_ALG_BITMAP_LOW, 217 SEC_CORE3_ALG_BITMAP_HIGH, 218 SEC_CORE4_ALG_BITMAP_LOW, 219 SEC_CORE4_ALG_BITMAP_HIGH, 220 }; 221 222 enum sec_cap_reg_record_idx { 223 SEC_DRV_ALG_BITMAP_LOW_IDX = 0x0, 224 SEC_DRV_ALG_BITMAP_HIGH_IDX, 225 SEC_DEV_ALG_BITMAP_LOW_IDX, 226 SEC_DEV_ALG_BITMAP_HIGH_IDX, 227 }; 228 229 void sec_destroy_qps(struct hisi_qp **qps, int qp_num); 230 struct hisi_qp **sec_create_qps(void); 231 int sec_register_to_crypto(struct hisi_qm *qm); 232 void sec_unregister_from_crypto(struct hisi_qm *qm); 233 u64 sec_get_alg_bitmap(struct hisi_qm *qm, u32 high, u32 low); 234 #endif 235