1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2019 HiSilicon Limited. */
3 
4 #ifndef __HISI_SEC_V2_H
5 #define __HISI_SEC_V2_H
6 
7 #include "../qm.h"
8 #include "sec_crypto.h"
9 
10 /* Algorithm resource per hardware SEC queue */
11 struct sec_alg_res {
12 	u8 *pbuf;
13 	dma_addr_t pbuf_dma;
14 	u8 *c_ivin;
15 	dma_addr_t c_ivin_dma;
16 	u8 *a_ivin;
17 	dma_addr_t a_ivin_dma;
18 	u8 *out_mac;
19 	dma_addr_t out_mac_dma;
20 };
21 
22 /* Cipher request of SEC private */
23 struct sec_cipher_req {
24 	struct hisi_acc_hw_sgl *c_out;
25 	dma_addr_t c_out_dma;
26 	u8 *c_ivin;
27 	dma_addr_t c_ivin_dma;
28 	struct skcipher_request *sk_req;
29 	u32 c_len;
30 	bool encrypt;
31 };
32 
33 struct sec_aead_req {
34 	u8 *out_mac;
35 	dma_addr_t out_mac_dma;
36 	u8 *a_ivin;
37 	dma_addr_t a_ivin_dma;
38 	struct aead_request *aead_req;
39 };
40 
41 /* SEC request of Crypto */
42 struct sec_req {
43 	union {
44 		struct sec_sqe sec_sqe;
45 		struct sec_sqe3 sec_sqe3;
46 	};
47 	struct sec_ctx *ctx;
48 	struct sec_qp_ctx *qp_ctx;
49 
50 	/**
51 	 * Common parameter of the SEC request.
52 	 */
53 	struct hisi_acc_hw_sgl *in;
54 	dma_addr_t in_dma;
55 	struct sec_cipher_req c_req;
56 	struct sec_aead_req aead_req;
57 	struct list_head backlog_head;
58 
59 	int err_type;
60 	int req_id;
61 	u32 flag;
62 
63 	/* Status of the SEC request */
64 	bool fake_busy;
65 	bool use_pbuf;
66 };
67 
68 /**
69  * struct sec_req_op - Operations for SEC request
70  * @buf_map: DMA map the SGL buffers of the request
71  * @buf_unmap: DMA unmap the SGL buffers of the request
72  * @bd_fill: Fill the SEC queue BD
73  * @bd_send: Send the SEC BD into the hardware queue
74  * @callback: Call back for the request
75  * @process: Main processing logic of Skcipher
76  */
77 struct sec_req_op {
78 	int (*buf_map)(struct sec_ctx *ctx, struct sec_req *req);
79 	void (*buf_unmap)(struct sec_ctx *ctx, struct sec_req *req);
80 	void (*do_transfer)(struct sec_ctx *ctx, struct sec_req *req);
81 	int (*bd_fill)(struct sec_ctx *ctx, struct sec_req *req);
82 	int (*bd_send)(struct sec_ctx *ctx, struct sec_req *req);
83 	void (*callback)(struct sec_ctx *ctx, struct sec_req *req, int err);
84 	int (*process)(struct sec_ctx *ctx, struct sec_req *req);
85 };
86 
87 /* SEC auth context */
88 struct sec_auth_ctx {
89 	dma_addr_t a_key_dma;
90 	u8 *a_key;
91 	u8 a_key_len;
92 	u8 mac_len;
93 	u8 a_alg;
94 	bool fallback;
95 	struct crypto_shash *hash_tfm;
96 	struct crypto_aead *fallback_aead_tfm;
97 };
98 
99 /* SEC cipher context which cipher's relatives */
100 struct sec_cipher_ctx {
101 	u8 *c_key;
102 	dma_addr_t c_key_dma;
103 	sector_t iv_offset;
104 	u32 c_gran_size;
105 	u32 ivsize;
106 	u8 c_mode;
107 	u8 c_alg;
108 	u8 c_key_len;
109 
110 	/* add software support */
111 	bool fallback;
112 	struct crypto_sync_skcipher *fbtfm;
113 };
114 
115 /* SEC queue context which defines queue's relatives */
116 struct sec_qp_ctx {
117 	struct hisi_qp *qp;
118 	struct sec_req *req_list[QM_Q_DEPTH];
119 	struct idr req_idr;
120 	struct sec_alg_res res[QM_Q_DEPTH];
121 	struct sec_ctx *ctx;
122 	struct mutex req_lock;
123 	struct list_head backlog;
124 	struct hisi_acc_sgl_pool *c_in_pool;
125 	struct hisi_acc_sgl_pool *c_out_pool;
126 };
127 
128 enum sec_alg_type {
129 	SEC_SKCIPHER,
130 	SEC_AEAD
131 };
132 
133 /* SEC Crypto TFM context which defines queue and cipher .etc relatives */
134 struct sec_ctx {
135 	struct sec_qp_ctx *qp_ctx;
136 	struct sec_dev *sec;
137 	const struct sec_req_op *req_op;
138 	struct hisi_qp **qps;
139 
140 	/* Half queues for encipher, and half for decipher */
141 	u32 hlf_q_num;
142 
143 	/* Threshold for fake busy, trigger to return -EBUSY to user */
144 	u32 fake_req_limit;
145 
146 	/* Currrent cyclic index to select a queue for encipher */
147 	atomic_t enc_qcyclic;
148 
149 	 /* Currrent cyclic index to select a queue for decipher */
150 	atomic_t dec_qcyclic;
151 
152 	enum sec_alg_type alg_type;
153 	bool pbuf_supported;
154 	struct sec_cipher_ctx c_ctx;
155 	struct sec_auth_ctx a_ctx;
156 	u8 type_supported;
157 	struct device *dev;
158 };
159 
160 enum sec_endian {
161 	SEC_LE = 0,
162 	SEC_32BE,
163 	SEC_64BE
164 };
165 
166 enum sec_debug_file_index {
167 	SEC_CLEAR_ENABLE,
168 	SEC_DEBUG_FILE_NUM,
169 };
170 
171 struct sec_debug_file {
172 	enum sec_debug_file_index index;
173 	spinlock_t lock;
174 	struct hisi_qm *qm;
175 };
176 
177 struct sec_dfx {
178 	atomic64_t send_cnt;
179 	atomic64_t recv_cnt;
180 	atomic64_t send_busy_cnt;
181 	atomic64_t recv_busy_cnt;
182 	atomic64_t err_bd_cnt;
183 	atomic64_t invalid_req_cnt;
184 	atomic64_t done_flag_cnt;
185 };
186 
187 struct sec_debug {
188 	struct sec_dfx dfx;
189 	struct sec_debug_file files[SEC_DEBUG_FILE_NUM];
190 };
191 
192 struct sec_dev {
193 	struct hisi_qm qm;
194 	struct sec_debug debug;
195 	u32 ctx_q_num;
196 	bool iommu_used;
197 };
198 
199 void sec_destroy_qps(struct hisi_qp **qps, int qp_num);
200 struct hisi_qp **sec_create_qps(void);
201 int sec_register_to_crypto(struct hisi_qm *qm);
202 void sec_unregister_from_crypto(struct hisi_qm *qm);
203 #endif
204