1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <linux/module.h>
4 #include <linux/slab.h>
5 #include <linux/crypto.h>
6 #include <crypto/internal/aead.h>
7 #include <crypto/internal/cipher.h>
8 #include <crypto/internal/skcipher.h>
9 #include <crypto/aes.h>
10 #include <crypto/sha1.h>
11 #include <crypto/sha2.h>
12 #include <crypto/hash.h>
13 #include <crypto/hmac.h>
14 #include <crypto/algapi.h>
15 #include <crypto/authenc.h>
16 #include <crypto/scatterwalk.h>
17 #include <crypto/xts.h>
18 #include <linux/dma-mapping.h>
19 #include "adf_accel_devices.h"
20 #include "qat_algs_send.h"
21 #include "adf_common_drv.h"
22 #include "qat_crypto.h"
23 #include "icp_qat_hw.h"
24 #include "icp_qat_fw.h"
25 #include "icp_qat_fw_la.h"
26 #include "qat_bl.h"
27 
28 #define QAT_AES_HW_CONFIG_ENC(alg, mode) \
29 	ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
30 				       ICP_QAT_HW_CIPHER_NO_CONVERT, \
31 				       ICP_QAT_HW_CIPHER_ENCRYPT)
32 
33 #define QAT_AES_HW_CONFIG_DEC(alg, mode) \
34 	ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
35 				       ICP_QAT_HW_CIPHER_KEY_CONVERT, \
36 				       ICP_QAT_HW_CIPHER_DECRYPT)
37 
38 #define QAT_AES_HW_CONFIG_DEC_NO_CONV(alg, mode) \
39 	ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
40 				       ICP_QAT_HW_CIPHER_NO_CONVERT, \
41 				       ICP_QAT_HW_CIPHER_DECRYPT)
42 
43 #define HW_CAP_AES_V2(accel_dev) \
44 	(GET_HW_DATA(accel_dev)->accel_capabilities_mask & \
45 	 ICP_ACCEL_CAPABILITIES_AES_V2)
46 
47 static DEFINE_MUTEX(algs_lock);
48 static unsigned int active_devs;
49 
50 /* Common content descriptor */
51 struct qat_alg_cd {
52 	union {
53 		struct qat_enc { /* Encrypt content desc */
54 			struct icp_qat_hw_cipher_algo_blk cipher;
55 			struct icp_qat_hw_auth_algo_blk hash;
56 		} qat_enc_cd;
57 		struct qat_dec { /* Decrypt content desc */
58 			struct icp_qat_hw_auth_algo_blk hash;
59 			struct icp_qat_hw_cipher_algo_blk cipher;
60 		} qat_dec_cd;
61 	};
62 } __aligned(64);
63 
64 struct qat_alg_aead_ctx {
65 	struct qat_alg_cd *enc_cd;
66 	struct qat_alg_cd *dec_cd;
67 	dma_addr_t enc_cd_paddr;
68 	dma_addr_t dec_cd_paddr;
69 	struct icp_qat_fw_la_bulk_req enc_fw_req;
70 	struct icp_qat_fw_la_bulk_req dec_fw_req;
71 	struct crypto_shash *hash_tfm;
72 	enum icp_qat_hw_auth_algo qat_hash_alg;
73 	struct qat_crypto_instance *inst;
74 	union {
75 		struct sha1_state sha1;
76 		struct sha256_state sha256;
77 		struct sha512_state sha512;
78 	};
79 	char ipad[SHA512_BLOCK_SIZE]; /* sufficient for SHA-1/SHA-256 as well */
80 	char opad[SHA512_BLOCK_SIZE];
81 };
82 
83 struct qat_alg_skcipher_ctx {
84 	struct icp_qat_hw_cipher_algo_blk *enc_cd;
85 	struct icp_qat_hw_cipher_algo_blk *dec_cd;
86 	dma_addr_t enc_cd_paddr;
87 	dma_addr_t dec_cd_paddr;
88 	struct icp_qat_fw_la_bulk_req enc_fw_req;
89 	struct icp_qat_fw_la_bulk_req dec_fw_req;
90 	struct qat_crypto_instance *inst;
91 	struct crypto_skcipher *ftfm;
92 	struct crypto_cipher *tweak;
93 	bool fallback;
94 	int mode;
95 };
96 
97 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
98 {
99 	switch (qat_hash_alg) {
100 	case ICP_QAT_HW_AUTH_ALGO_SHA1:
101 		return ICP_QAT_HW_SHA1_STATE1_SZ;
102 	case ICP_QAT_HW_AUTH_ALGO_SHA256:
103 		return ICP_QAT_HW_SHA256_STATE1_SZ;
104 	case ICP_QAT_HW_AUTH_ALGO_SHA512:
105 		return ICP_QAT_HW_SHA512_STATE1_SZ;
106 	default:
107 		return -EFAULT;
108 	}
109 	return -EFAULT;
110 }
111 
112 static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
113 				  struct qat_alg_aead_ctx *ctx,
114 				  const u8 *auth_key,
115 				  unsigned int auth_keylen)
116 {
117 	SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
118 	int block_size = crypto_shash_blocksize(ctx->hash_tfm);
119 	int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
120 	__be32 *hash_state_out;
121 	__be64 *hash512_state_out;
122 	int i, offset;
123 
124 	memset(ctx->ipad, 0, block_size);
125 	memset(ctx->opad, 0, block_size);
126 	shash->tfm = ctx->hash_tfm;
127 
128 	if (auth_keylen > block_size) {
129 		int ret = crypto_shash_digest(shash, auth_key,
130 					      auth_keylen, ctx->ipad);
131 		if (ret)
132 			return ret;
133 
134 		memcpy(ctx->opad, ctx->ipad, digest_size);
135 	} else {
136 		memcpy(ctx->ipad, auth_key, auth_keylen);
137 		memcpy(ctx->opad, auth_key, auth_keylen);
138 	}
139 
140 	for (i = 0; i < block_size; i++) {
141 		char *ipad_ptr = ctx->ipad + i;
142 		char *opad_ptr = ctx->opad + i;
143 		*ipad_ptr ^= HMAC_IPAD_VALUE;
144 		*opad_ptr ^= HMAC_OPAD_VALUE;
145 	}
146 
147 	if (crypto_shash_init(shash))
148 		return -EFAULT;
149 
150 	if (crypto_shash_update(shash, ctx->ipad, block_size))
151 		return -EFAULT;
152 
153 	hash_state_out = (__be32 *)hash->sha.state1;
154 	hash512_state_out = (__be64 *)hash_state_out;
155 
156 	switch (ctx->qat_hash_alg) {
157 	case ICP_QAT_HW_AUTH_ALGO_SHA1:
158 		if (crypto_shash_export(shash, &ctx->sha1))
159 			return -EFAULT;
160 		for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
161 			*hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
162 		break;
163 	case ICP_QAT_HW_AUTH_ALGO_SHA256:
164 		if (crypto_shash_export(shash, &ctx->sha256))
165 			return -EFAULT;
166 		for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
167 			*hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
168 		break;
169 	case ICP_QAT_HW_AUTH_ALGO_SHA512:
170 		if (crypto_shash_export(shash, &ctx->sha512))
171 			return -EFAULT;
172 		for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
173 			*hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
174 		break;
175 	default:
176 		return -EFAULT;
177 	}
178 
179 	if (crypto_shash_init(shash))
180 		return -EFAULT;
181 
182 	if (crypto_shash_update(shash, ctx->opad, block_size))
183 		return -EFAULT;
184 
185 	offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
186 	if (offset < 0)
187 		return -EFAULT;
188 
189 	hash_state_out = (__be32 *)(hash->sha.state1 + offset);
190 	hash512_state_out = (__be64 *)hash_state_out;
191 
192 	switch (ctx->qat_hash_alg) {
193 	case ICP_QAT_HW_AUTH_ALGO_SHA1:
194 		if (crypto_shash_export(shash, &ctx->sha1))
195 			return -EFAULT;
196 		for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
197 			*hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
198 		break;
199 	case ICP_QAT_HW_AUTH_ALGO_SHA256:
200 		if (crypto_shash_export(shash, &ctx->sha256))
201 			return -EFAULT;
202 		for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
203 			*hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
204 		break;
205 	case ICP_QAT_HW_AUTH_ALGO_SHA512:
206 		if (crypto_shash_export(shash, &ctx->sha512))
207 			return -EFAULT;
208 		for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
209 			*hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
210 		break;
211 	default:
212 		return -EFAULT;
213 	}
214 	memzero_explicit(ctx->ipad, block_size);
215 	memzero_explicit(ctx->opad, block_size);
216 	return 0;
217 }
218 
219 static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
220 {
221 	header->hdr_flags =
222 		ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
223 	header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
224 	header->comn_req_flags =
225 		ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
226 					    QAT_COMN_PTR_TYPE_SGL);
227 	ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
228 				  ICP_QAT_FW_LA_PARTIAL_NONE);
229 	ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
230 					   ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
231 	ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
232 				ICP_QAT_FW_LA_NO_PROTO);
233 	ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
234 				       ICP_QAT_FW_LA_NO_UPDATE_STATE);
235 }
236 
237 static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
238 					 int alg,
239 					 struct crypto_authenc_keys *keys,
240 					 int mode)
241 {
242 	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
243 	unsigned int digestsize = crypto_aead_authsize(aead_tfm);
244 	struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
245 	struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
246 	struct icp_qat_hw_auth_algo_blk *hash =
247 		(struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
248 		sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
249 	struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req;
250 	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
251 	struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
252 	void *ptr = &req_tmpl->cd_ctrl;
253 	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
254 	struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
255 
256 	/* CD setup */
257 	cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
258 	memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
259 	hash->sha.inner_setup.auth_config.config =
260 		ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
261 					     ctx->qat_hash_alg, digestsize);
262 	hash->sha.inner_setup.auth_counter.counter =
263 		cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
264 
265 	if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
266 		return -EFAULT;
267 
268 	/* Request setup */
269 	qat_alg_init_common_hdr(header);
270 	header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
271 	ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
272 					   ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
273 	ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
274 				   ICP_QAT_FW_LA_RET_AUTH_RES);
275 	ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
276 				   ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
277 	cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
278 	cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
279 
280 	/* Cipher CD config setup */
281 	cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
282 	cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
283 	cipher_cd_ctrl->cipher_cfg_offset = 0;
284 	ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
285 	ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
286 	/* Auth CD config setup */
287 	hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
288 	hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
289 	hash_cd_ctrl->inner_res_sz = digestsize;
290 	hash_cd_ctrl->final_sz = digestsize;
291 
292 	switch (ctx->qat_hash_alg) {
293 	case ICP_QAT_HW_AUTH_ALGO_SHA1:
294 		hash_cd_ctrl->inner_state1_sz =
295 			round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
296 		hash_cd_ctrl->inner_state2_sz =
297 			round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
298 		break;
299 	case ICP_QAT_HW_AUTH_ALGO_SHA256:
300 		hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
301 		hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
302 		break;
303 	case ICP_QAT_HW_AUTH_ALGO_SHA512:
304 		hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
305 		hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
306 		break;
307 	default:
308 		break;
309 	}
310 	hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
311 			((sizeof(struct icp_qat_hw_auth_setup) +
312 			 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
313 	ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
314 	ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
315 	return 0;
316 }
317 
318 static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
319 					 int alg,
320 					 struct crypto_authenc_keys *keys,
321 					 int mode)
322 {
323 	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
324 	unsigned int digestsize = crypto_aead_authsize(aead_tfm);
325 	struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
326 	struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
327 	struct icp_qat_hw_cipher_algo_blk *cipher =
328 		(struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
329 		sizeof(struct icp_qat_hw_auth_setup) +
330 		roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
331 	struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
332 	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
333 	struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
334 	void *ptr = &req_tmpl->cd_ctrl;
335 	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
336 	struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
337 	struct icp_qat_fw_la_auth_req_params *auth_param =
338 		(struct icp_qat_fw_la_auth_req_params *)
339 		((char *)&req_tmpl->serv_specif_rqpars +
340 		sizeof(struct icp_qat_fw_la_cipher_req_params));
341 
342 	/* CD setup */
343 	cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg, mode);
344 	memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
345 	hash->sha.inner_setup.auth_config.config =
346 		ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
347 					     ctx->qat_hash_alg,
348 					     digestsize);
349 	hash->sha.inner_setup.auth_counter.counter =
350 		cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
351 
352 	if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
353 		return -EFAULT;
354 
355 	/* Request setup */
356 	qat_alg_init_common_hdr(header);
357 	header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
358 	ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
359 					   ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
360 	ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
361 				   ICP_QAT_FW_LA_NO_RET_AUTH_RES);
362 	ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
363 				   ICP_QAT_FW_LA_CMP_AUTH_RES);
364 	cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
365 	cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
366 
367 	/* Cipher CD config setup */
368 	cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
369 	cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
370 	cipher_cd_ctrl->cipher_cfg_offset =
371 		(sizeof(struct icp_qat_hw_auth_setup) +
372 		 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
373 	ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
374 	ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
375 
376 	/* Auth CD config setup */
377 	hash_cd_ctrl->hash_cfg_offset = 0;
378 	hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
379 	hash_cd_ctrl->inner_res_sz = digestsize;
380 	hash_cd_ctrl->final_sz = digestsize;
381 
382 	switch (ctx->qat_hash_alg) {
383 	case ICP_QAT_HW_AUTH_ALGO_SHA1:
384 		hash_cd_ctrl->inner_state1_sz =
385 			round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
386 		hash_cd_ctrl->inner_state2_sz =
387 			round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
388 		break;
389 	case ICP_QAT_HW_AUTH_ALGO_SHA256:
390 		hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
391 		hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
392 		break;
393 	case ICP_QAT_HW_AUTH_ALGO_SHA512:
394 		hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
395 		hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
396 		break;
397 	default:
398 		break;
399 	}
400 
401 	hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
402 			((sizeof(struct icp_qat_hw_auth_setup) +
403 			 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
404 	auth_param->auth_res_sz = digestsize;
405 	ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
406 	ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
407 	return 0;
408 }
409 
410 static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx,
411 				      struct icp_qat_fw_la_bulk_req *req,
412 				      struct icp_qat_hw_cipher_algo_blk *cd,
413 				      const u8 *key, unsigned int keylen)
414 {
415 	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
416 	struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
417 	struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
418 	bool aes_v2_capable = HW_CAP_AES_V2(ctx->inst->accel_dev);
419 	int mode = ctx->mode;
420 
421 	qat_alg_init_common_hdr(header);
422 	header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
423 	cd_pars->u.s.content_desc_params_sz =
424 				sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
425 
426 	if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_XTS_MODE) {
427 		ICP_QAT_FW_LA_SLICE_TYPE_SET(header->serv_specif_flags,
428 					     ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE);
429 
430 		/* Store both XTS keys in CD, only the first key is sent
431 		 * to the HW, the second key is used for tweak calculation
432 		 */
433 		memcpy(cd->ucs_aes.key, key, keylen);
434 		keylen = keylen / 2;
435 	} else if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
436 		ICP_QAT_FW_LA_SLICE_TYPE_SET(header->serv_specif_flags,
437 					     ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE);
438 		memcpy(cd->ucs_aes.key, key, keylen);
439 		keylen = round_up(keylen, 16);
440 	} else {
441 		memcpy(cd->aes.key, key, keylen);
442 	}
443 
444 	/* Cipher CD config setup */
445 	cd_ctrl->cipher_key_sz = keylen >> 3;
446 	cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
447 	cd_ctrl->cipher_cfg_offset = 0;
448 	ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
449 	ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
450 }
451 
452 static void qat_alg_skcipher_init_enc(struct qat_alg_skcipher_ctx *ctx,
453 				      int alg, const u8 *key,
454 				      unsigned int keylen, int mode)
455 {
456 	struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
457 	struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
458 	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
459 
460 	qat_alg_skcipher_init_com(ctx, req, enc_cd, key, keylen);
461 	cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
462 	enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
463 }
464 
465 static void qat_alg_xts_reverse_key(const u8 *key_forward, unsigned int keylen,
466 				    u8 *key_reverse)
467 {
468 	struct crypto_aes_ctx aes_expanded;
469 	int nrounds;
470 	u8 *key;
471 
472 	aes_expandkey(&aes_expanded, key_forward, keylen);
473 	if (keylen == AES_KEYSIZE_128) {
474 		nrounds = 10;
475 		key = (u8 *)aes_expanded.key_enc + (AES_BLOCK_SIZE * nrounds);
476 		memcpy(key_reverse, key, AES_BLOCK_SIZE);
477 	} else {
478 		/* AES_KEYSIZE_256 */
479 		nrounds = 14;
480 		key = (u8 *)aes_expanded.key_enc + (AES_BLOCK_SIZE * nrounds);
481 		memcpy(key_reverse, key, AES_BLOCK_SIZE);
482 		memcpy(key_reverse + AES_BLOCK_SIZE, key - AES_BLOCK_SIZE,
483 		       AES_BLOCK_SIZE);
484 	}
485 }
486 
487 static void qat_alg_skcipher_init_dec(struct qat_alg_skcipher_ctx *ctx,
488 				      int alg, const u8 *key,
489 				      unsigned int keylen, int mode)
490 {
491 	struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
492 	struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
493 	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
494 	bool aes_v2_capable = HW_CAP_AES_V2(ctx->inst->accel_dev);
495 
496 	qat_alg_skcipher_init_com(ctx, req, dec_cd, key, keylen);
497 	cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
498 
499 	if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_XTS_MODE) {
500 		/* Key reversing not supported, set no convert */
501 		dec_cd->aes.cipher_config.val =
502 				QAT_AES_HW_CONFIG_DEC_NO_CONV(alg, mode);
503 
504 		/* In-place key reversal */
505 		qat_alg_xts_reverse_key(dec_cd->ucs_aes.key, keylen / 2,
506 					dec_cd->ucs_aes.key);
507 	} else if (mode != ICP_QAT_HW_CIPHER_CTR_MODE) {
508 		dec_cd->aes.cipher_config.val =
509 					QAT_AES_HW_CONFIG_DEC(alg, mode);
510 	} else {
511 		dec_cd->aes.cipher_config.val =
512 					QAT_AES_HW_CONFIG_ENC(alg, mode);
513 	}
514 }
515 
516 static int qat_alg_validate_key(int key_len, int *alg, int mode)
517 {
518 	if (mode != ICP_QAT_HW_CIPHER_XTS_MODE) {
519 		switch (key_len) {
520 		case AES_KEYSIZE_128:
521 			*alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
522 			break;
523 		case AES_KEYSIZE_192:
524 			*alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
525 			break;
526 		case AES_KEYSIZE_256:
527 			*alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
528 			break;
529 		default:
530 			return -EINVAL;
531 		}
532 	} else {
533 		switch (key_len) {
534 		case AES_KEYSIZE_128 << 1:
535 			*alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
536 			break;
537 		case AES_KEYSIZE_256 << 1:
538 			*alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
539 			break;
540 		default:
541 			return -EINVAL;
542 		}
543 	}
544 	return 0;
545 }
546 
547 static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key,
548 				      unsigned int keylen,  int mode)
549 {
550 	struct crypto_authenc_keys keys;
551 	int alg;
552 
553 	if (crypto_authenc_extractkeys(&keys, key, keylen))
554 		goto bad_key;
555 
556 	if (qat_alg_validate_key(keys.enckeylen, &alg, mode))
557 		goto bad_key;
558 
559 	if (qat_alg_aead_init_enc_session(tfm, alg, &keys, mode))
560 		goto error;
561 
562 	if (qat_alg_aead_init_dec_session(tfm, alg, &keys, mode))
563 		goto error;
564 
565 	memzero_explicit(&keys, sizeof(keys));
566 	return 0;
567 bad_key:
568 	memzero_explicit(&keys, sizeof(keys));
569 	return -EINVAL;
570 error:
571 	memzero_explicit(&keys, sizeof(keys));
572 	return -EFAULT;
573 }
574 
575 static int qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx *ctx,
576 					  const u8 *key,
577 					  unsigned int keylen,
578 					  int mode)
579 {
580 	int alg;
581 
582 	if (qat_alg_validate_key(keylen, &alg, mode))
583 		return -EINVAL;
584 
585 	qat_alg_skcipher_init_enc(ctx, alg, key, keylen, mode);
586 	qat_alg_skcipher_init_dec(ctx, alg, key, keylen, mode);
587 	return 0;
588 }
589 
590 static int qat_alg_aead_rekey(struct crypto_aead *tfm, const u8 *key,
591 			      unsigned int keylen)
592 {
593 	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
594 
595 	memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
596 	memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
597 	memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
598 	memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
599 
600 	return qat_alg_aead_init_sessions(tfm, key, keylen,
601 					  ICP_QAT_HW_CIPHER_CBC_MODE);
602 }
603 
604 static int qat_alg_aead_newkey(struct crypto_aead *tfm, const u8 *key,
605 			       unsigned int keylen)
606 {
607 	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
608 	struct qat_crypto_instance *inst = NULL;
609 	int node = numa_node_id();
610 	struct device *dev;
611 	int ret;
612 
613 	inst = qat_crypto_get_instance_node(node);
614 	if (!inst)
615 		return -EINVAL;
616 	dev = &GET_DEV(inst->accel_dev);
617 	ctx->inst = inst;
618 	ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
619 					 &ctx->enc_cd_paddr,
620 					 GFP_ATOMIC);
621 	if (!ctx->enc_cd) {
622 		ret = -ENOMEM;
623 		goto out_free_inst;
624 	}
625 	ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
626 					 &ctx->dec_cd_paddr,
627 					 GFP_ATOMIC);
628 	if (!ctx->dec_cd) {
629 		ret = -ENOMEM;
630 		goto out_free_enc;
631 	}
632 
633 	ret = qat_alg_aead_init_sessions(tfm, key, keylen,
634 					 ICP_QAT_HW_CIPHER_CBC_MODE);
635 	if (ret)
636 		goto out_free_all;
637 
638 	return 0;
639 
640 out_free_all:
641 	memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
642 	dma_free_coherent(dev, sizeof(struct qat_alg_cd),
643 			  ctx->dec_cd, ctx->dec_cd_paddr);
644 	ctx->dec_cd = NULL;
645 out_free_enc:
646 	memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
647 	dma_free_coherent(dev, sizeof(struct qat_alg_cd),
648 			  ctx->enc_cd, ctx->enc_cd_paddr);
649 	ctx->enc_cd = NULL;
650 out_free_inst:
651 	ctx->inst = NULL;
652 	qat_crypto_put_instance(inst);
653 	return ret;
654 }
655 
656 static int qat_alg_aead_setkey(struct crypto_aead *tfm, const u8 *key,
657 			       unsigned int keylen)
658 {
659 	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
660 
661 	if (ctx->enc_cd)
662 		return qat_alg_aead_rekey(tfm, key, keylen);
663 	else
664 		return qat_alg_aead_newkey(tfm, key, keylen);
665 }
666 
667 static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
668 				  struct qat_crypto_request *qat_req)
669 {
670 	struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
671 	struct qat_crypto_instance *inst = ctx->inst;
672 	struct aead_request *areq = qat_req->aead_req;
673 	u8 stat_filed = qat_resp->comn_resp.comn_status;
674 	int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
675 
676 	qat_bl_free_bufl(inst->accel_dev, &qat_req->buf);
677 	if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
678 		res = -EBADMSG;
679 	aead_request_complete(areq, res);
680 }
681 
682 static void qat_alg_update_iv_ctr_mode(struct qat_crypto_request *qat_req)
683 {
684 	struct skcipher_request *sreq = qat_req->skcipher_req;
685 	u64 iv_lo_prev;
686 	u64 iv_lo;
687 	u64 iv_hi;
688 
689 	memcpy(qat_req->iv, sreq->iv, AES_BLOCK_SIZE);
690 
691 	iv_lo = be64_to_cpu(qat_req->iv_lo);
692 	iv_hi = be64_to_cpu(qat_req->iv_hi);
693 
694 	iv_lo_prev = iv_lo;
695 	iv_lo += DIV_ROUND_UP(sreq->cryptlen, AES_BLOCK_SIZE);
696 	if (iv_lo < iv_lo_prev)
697 		iv_hi++;
698 
699 	qat_req->iv_lo = cpu_to_be64(iv_lo);
700 	qat_req->iv_hi = cpu_to_be64(iv_hi);
701 }
702 
703 static void qat_alg_update_iv_cbc_mode(struct qat_crypto_request *qat_req)
704 {
705 	struct skcipher_request *sreq = qat_req->skcipher_req;
706 	int offset = sreq->cryptlen - AES_BLOCK_SIZE;
707 	struct scatterlist *sgl;
708 
709 	if (qat_req->encryption)
710 		sgl = sreq->dst;
711 	else
712 		sgl = sreq->src;
713 
714 	scatterwalk_map_and_copy(qat_req->iv, sgl, offset, AES_BLOCK_SIZE, 0);
715 }
716 
717 static void qat_alg_update_iv(struct qat_crypto_request *qat_req)
718 {
719 	struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
720 	struct device *dev = &GET_DEV(ctx->inst->accel_dev);
721 
722 	switch (ctx->mode) {
723 	case ICP_QAT_HW_CIPHER_CTR_MODE:
724 		qat_alg_update_iv_ctr_mode(qat_req);
725 		break;
726 	case ICP_QAT_HW_CIPHER_CBC_MODE:
727 		qat_alg_update_iv_cbc_mode(qat_req);
728 		break;
729 	case ICP_QAT_HW_CIPHER_XTS_MODE:
730 		break;
731 	default:
732 		dev_warn(dev, "Unsupported IV update for cipher mode %d\n",
733 			 ctx->mode);
734 	}
735 }
736 
737 static void qat_skcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
738 				      struct qat_crypto_request *qat_req)
739 {
740 	struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
741 	struct qat_crypto_instance *inst = ctx->inst;
742 	struct skcipher_request *sreq = qat_req->skcipher_req;
743 	u8 stat_filed = qat_resp->comn_resp.comn_status;
744 	int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
745 
746 	qat_bl_free_bufl(inst->accel_dev, &qat_req->buf);
747 	if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
748 		res = -EINVAL;
749 
750 	if (qat_req->encryption)
751 		qat_alg_update_iv(qat_req);
752 
753 	memcpy(sreq->iv, qat_req->iv, AES_BLOCK_SIZE);
754 
755 	skcipher_request_complete(sreq, res);
756 }
757 
758 void qat_alg_callback(void *resp)
759 {
760 	struct icp_qat_fw_la_resp *qat_resp = resp;
761 	struct qat_crypto_request *qat_req =
762 				(void *)(__force long)qat_resp->opaque_data;
763 	struct qat_instance_backlog *backlog = qat_req->alg_req.backlog;
764 
765 	qat_req->cb(qat_resp, qat_req);
766 
767 	qat_alg_send_backlog(backlog);
768 }
769 
770 static int qat_alg_send_sym_message(struct qat_crypto_request *qat_req,
771 				    struct qat_crypto_instance *inst,
772 				    struct crypto_async_request *base)
773 {
774 	struct qat_alg_req *alg_req = &qat_req->alg_req;
775 
776 	alg_req->fw_req = (u32 *)&qat_req->req;
777 	alg_req->tx_ring = inst->sym_tx;
778 	alg_req->base = base;
779 	alg_req->backlog = &inst->backlog;
780 
781 	return qat_alg_send_message(alg_req);
782 }
783 
784 static int qat_alg_aead_dec(struct aead_request *areq)
785 {
786 	struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
787 	struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
788 	struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
789 	struct qat_crypto_request *qat_req = aead_request_ctx(areq);
790 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
791 	struct icp_qat_fw_la_auth_req_params *auth_param;
792 	struct icp_qat_fw_la_bulk_req *msg;
793 	int digst_size = crypto_aead_authsize(aead_tfm);
794 	gfp_t f = qat_algs_alloc_flags(&areq->base);
795 	int ret;
796 	u32 cipher_len;
797 
798 	cipher_len = areq->cryptlen - digst_size;
799 	if (cipher_len % AES_BLOCK_SIZE != 0)
800 		return -EINVAL;
801 
802 	ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, areq->src, areq->dst,
803 				 &qat_req->buf, NULL, f);
804 	if (unlikely(ret))
805 		return ret;
806 
807 	msg = &qat_req->req;
808 	*msg = ctx->dec_fw_req;
809 	qat_req->aead_ctx = ctx;
810 	qat_req->aead_req = areq;
811 	qat_req->cb = qat_aead_alg_callback;
812 	qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
813 	qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
814 	qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
815 	cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
816 	cipher_param->cipher_length = cipher_len;
817 	cipher_param->cipher_offset = areq->assoclen;
818 	memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
819 	auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
820 	auth_param->auth_off = 0;
821 	auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
822 
823 	ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base);
824 	if (ret == -ENOSPC)
825 		qat_bl_free_bufl(ctx->inst->accel_dev, &qat_req->buf);
826 
827 	return ret;
828 }
829 
830 static int qat_alg_aead_enc(struct aead_request *areq)
831 {
832 	struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
833 	struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
834 	struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
835 	struct qat_crypto_request *qat_req = aead_request_ctx(areq);
836 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
837 	struct icp_qat_fw_la_auth_req_params *auth_param;
838 	gfp_t f = qat_algs_alloc_flags(&areq->base);
839 	struct icp_qat_fw_la_bulk_req *msg;
840 	u8 *iv = areq->iv;
841 	int ret;
842 
843 	if (areq->cryptlen % AES_BLOCK_SIZE != 0)
844 		return -EINVAL;
845 
846 	ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, areq->src, areq->dst,
847 				 &qat_req->buf, NULL, f);
848 	if (unlikely(ret))
849 		return ret;
850 
851 	msg = &qat_req->req;
852 	*msg = ctx->enc_fw_req;
853 	qat_req->aead_ctx = ctx;
854 	qat_req->aead_req = areq;
855 	qat_req->cb = qat_aead_alg_callback;
856 	qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
857 	qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
858 	qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
859 	cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
860 	auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
861 
862 	memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
863 	cipher_param->cipher_length = areq->cryptlen;
864 	cipher_param->cipher_offset = areq->assoclen;
865 
866 	auth_param->auth_off = 0;
867 	auth_param->auth_len = areq->assoclen + areq->cryptlen;
868 
869 	ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base);
870 	if (ret == -ENOSPC)
871 		qat_bl_free_bufl(ctx->inst->accel_dev, &qat_req->buf);
872 
873 	return ret;
874 }
875 
876 static int qat_alg_skcipher_rekey(struct qat_alg_skcipher_ctx *ctx,
877 				  const u8 *key, unsigned int keylen,
878 				  int mode)
879 {
880 	memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
881 	memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
882 	memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
883 	memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
884 
885 	return qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
886 }
887 
888 static int qat_alg_skcipher_newkey(struct qat_alg_skcipher_ctx *ctx,
889 				   const u8 *key, unsigned int keylen,
890 				   int mode)
891 {
892 	struct qat_crypto_instance *inst = NULL;
893 	struct device *dev;
894 	int node = numa_node_id();
895 	int ret;
896 
897 	inst = qat_crypto_get_instance_node(node);
898 	if (!inst)
899 		return -EINVAL;
900 	dev = &GET_DEV(inst->accel_dev);
901 	ctx->inst = inst;
902 	ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
903 					 &ctx->enc_cd_paddr,
904 					 GFP_ATOMIC);
905 	if (!ctx->enc_cd) {
906 		ret = -ENOMEM;
907 		goto out_free_instance;
908 	}
909 	ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
910 					 &ctx->dec_cd_paddr,
911 					 GFP_ATOMIC);
912 	if (!ctx->dec_cd) {
913 		ret = -ENOMEM;
914 		goto out_free_enc;
915 	}
916 
917 	ret = qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
918 	if (ret)
919 		goto out_free_all;
920 
921 	return 0;
922 
923 out_free_all:
924 	memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
925 	dma_free_coherent(dev, sizeof(*ctx->dec_cd),
926 			  ctx->dec_cd, ctx->dec_cd_paddr);
927 	ctx->dec_cd = NULL;
928 out_free_enc:
929 	memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
930 	dma_free_coherent(dev, sizeof(*ctx->enc_cd),
931 			  ctx->enc_cd, ctx->enc_cd_paddr);
932 	ctx->enc_cd = NULL;
933 out_free_instance:
934 	ctx->inst = NULL;
935 	qat_crypto_put_instance(inst);
936 	return ret;
937 }
938 
939 static int qat_alg_skcipher_setkey(struct crypto_skcipher *tfm,
940 				   const u8 *key, unsigned int keylen,
941 				   int mode)
942 {
943 	struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
944 
945 	ctx->mode = mode;
946 
947 	if (ctx->enc_cd)
948 		return qat_alg_skcipher_rekey(ctx, key, keylen, mode);
949 	else
950 		return qat_alg_skcipher_newkey(ctx, key, keylen, mode);
951 }
952 
953 static int qat_alg_skcipher_cbc_setkey(struct crypto_skcipher *tfm,
954 				       const u8 *key, unsigned int keylen)
955 {
956 	return qat_alg_skcipher_setkey(tfm, key, keylen,
957 				       ICP_QAT_HW_CIPHER_CBC_MODE);
958 }
959 
960 static int qat_alg_skcipher_ctr_setkey(struct crypto_skcipher *tfm,
961 				       const u8 *key, unsigned int keylen)
962 {
963 	return qat_alg_skcipher_setkey(tfm, key, keylen,
964 				       ICP_QAT_HW_CIPHER_CTR_MODE);
965 }
966 
967 static int qat_alg_skcipher_xts_setkey(struct crypto_skcipher *tfm,
968 				       const u8 *key, unsigned int keylen)
969 {
970 	struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
971 	int ret;
972 
973 	ret = xts_verify_key(tfm, key, keylen);
974 	if (ret)
975 		return ret;
976 
977 	if (keylen >> 1 == AES_KEYSIZE_192) {
978 		ret = crypto_skcipher_setkey(ctx->ftfm, key, keylen);
979 		if (ret)
980 			return ret;
981 
982 		ctx->fallback = true;
983 
984 		return 0;
985 	}
986 
987 	ctx->fallback = false;
988 
989 	ret = qat_alg_skcipher_setkey(tfm, key, keylen,
990 				      ICP_QAT_HW_CIPHER_XTS_MODE);
991 	if (ret)
992 		return ret;
993 
994 	if (HW_CAP_AES_V2(ctx->inst->accel_dev))
995 		ret = crypto_cipher_setkey(ctx->tweak, key + (keylen / 2),
996 					   keylen / 2);
997 
998 	return ret;
999 }
1000 
1001 static void qat_alg_set_req_iv(struct qat_crypto_request *qat_req)
1002 {
1003 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
1004 	struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
1005 	bool aes_v2_capable = HW_CAP_AES_V2(ctx->inst->accel_dev);
1006 	u8 *iv = qat_req->skcipher_req->iv;
1007 
1008 	cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1009 
1010 	if (aes_v2_capable && ctx->mode == ICP_QAT_HW_CIPHER_XTS_MODE)
1011 		crypto_cipher_encrypt_one(ctx->tweak,
1012 					  (u8 *)cipher_param->u.cipher_IV_array,
1013 					  iv);
1014 	else
1015 		memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
1016 }
1017 
1018 static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
1019 {
1020 	struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1021 	struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
1022 	struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1023 	struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
1024 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
1025 	gfp_t f = qat_algs_alloc_flags(&req->base);
1026 	struct icp_qat_fw_la_bulk_req *msg;
1027 	int ret;
1028 
1029 	if (req->cryptlen == 0)
1030 		return 0;
1031 
1032 	ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, req->src, req->dst,
1033 				 &qat_req->buf, NULL, f);
1034 	if (unlikely(ret))
1035 		return ret;
1036 
1037 	msg = &qat_req->req;
1038 	*msg = ctx->enc_fw_req;
1039 	qat_req->skcipher_ctx = ctx;
1040 	qat_req->skcipher_req = req;
1041 	qat_req->cb = qat_skcipher_alg_callback;
1042 	qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
1043 	qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1044 	qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1045 	qat_req->encryption = true;
1046 	cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1047 	cipher_param->cipher_length = req->cryptlen;
1048 	cipher_param->cipher_offset = 0;
1049 
1050 	qat_alg_set_req_iv(qat_req);
1051 
1052 	ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base);
1053 	if (ret == -ENOSPC)
1054 		qat_bl_free_bufl(ctx->inst->accel_dev, &qat_req->buf);
1055 
1056 	return ret;
1057 }
1058 
1059 static int qat_alg_skcipher_blk_encrypt(struct skcipher_request *req)
1060 {
1061 	if (req->cryptlen % AES_BLOCK_SIZE != 0)
1062 		return -EINVAL;
1063 
1064 	return qat_alg_skcipher_encrypt(req);
1065 }
1066 
1067 static int qat_alg_skcipher_xts_encrypt(struct skcipher_request *req)
1068 {
1069 	struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1070 	struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
1071 	struct skcipher_request *nreq = skcipher_request_ctx(req);
1072 
1073 	if (req->cryptlen < XTS_BLOCK_SIZE)
1074 		return -EINVAL;
1075 
1076 	if (ctx->fallback) {
1077 		memcpy(nreq, req, sizeof(*req));
1078 		skcipher_request_set_tfm(nreq, ctx->ftfm);
1079 		return crypto_skcipher_encrypt(nreq);
1080 	}
1081 
1082 	return qat_alg_skcipher_encrypt(req);
1083 }
1084 
1085 static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
1086 {
1087 	struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1088 	struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
1089 	struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1090 	struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
1091 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
1092 	gfp_t f = qat_algs_alloc_flags(&req->base);
1093 	struct icp_qat_fw_la_bulk_req *msg;
1094 	int ret;
1095 
1096 	if (req->cryptlen == 0)
1097 		return 0;
1098 
1099 	ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, req->src, req->dst,
1100 				 &qat_req->buf, NULL, f);
1101 	if (unlikely(ret))
1102 		return ret;
1103 
1104 	msg = &qat_req->req;
1105 	*msg = ctx->dec_fw_req;
1106 	qat_req->skcipher_ctx = ctx;
1107 	qat_req->skcipher_req = req;
1108 	qat_req->cb = qat_skcipher_alg_callback;
1109 	qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
1110 	qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1111 	qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1112 	qat_req->encryption = false;
1113 	cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1114 	cipher_param->cipher_length = req->cryptlen;
1115 	cipher_param->cipher_offset = 0;
1116 
1117 	qat_alg_set_req_iv(qat_req);
1118 	qat_alg_update_iv(qat_req);
1119 
1120 	ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base);
1121 	if (ret == -ENOSPC)
1122 		qat_bl_free_bufl(ctx->inst->accel_dev, &qat_req->buf);
1123 
1124 	return ret;
1125 }
1126 
1127 static int qat_alg_skcipher_blk_decrypt(struct skcipher_request *req)
1128 {
1129 	if (req->cryptlen % AES_BLOCK_SIZE != 0)
1130 		return -EINVAL;
1131 
1132 	return qat_alg_skcipher_decrypt(req);
1133 }
1134 
1135 static int qat_alg_skcipher_xts_decrypt(struct skcipher_request *req)
1136 {
1137 	struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1138 	struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
1139 	struct skcipher_request *nreq = skcipher_request_ctx(req);
1140 
1141 	if (req->cryptlen < XTS_BLOCK_SIZE)
1142 		return -EINVAL;
1143 
1144 	if (ctx->fallback) {
1145 		memcpy(nreq, req, sizeof(*req));
1146 		skcipher_request_set_tfm(nreq, ctx->ftfm);
1147 		return crypto_skcipher_decrypt(nreq);
1148 	}
1149 
1150 	return qat_alg_skcipher_decrypt(req);
1151 }
1152 
1153 static int qat_alg_aead_init(struct crypto_aead *tfm,
1154 			     enum icp_qat_hw_auth_algo hash,
1155 			     const char *hash_name)
1156 {
1157 	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1158 
1159 	ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
1160 	if (IS_ERR(ctx->hash_tfm))
1161 		return PTR_ERR(ctx->hash_tfm);
1162 	ctx->qat_hash_alg = hash;
1163 	crypto_aead_set_reqsize(tfm, sizeof(struct qat_crypto_request));
1164 	return 0;
1165 }
1166 
1167 static int qat_alg_aead_sha1_init(struct crypto_aead *tfm)
1168 {
1169 	return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
1170 }
1171 
1172 static int qat_alg_aead_sha256_init(struct crypto_aead *tfm)
1173 {
1174 	return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
1175 }
1176 
1177 static int qat_alg_aead_sha512_init(struct crypto_aead *tfm)
1178 {
1179 	return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
1180 }
1181 
1182 static void qat_alg_aead_exit(struct crypto_aead *tfm)
1183 {
1184 	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1185 	struct qat_crypto_instance *inst = ctx->inst;
1186 	struct device *dev;
1187 
1188 	crypto_free_shash(ctx->hash_tfm);
1189 
1190 	if (!inst)
1191 		return;
1192 
1193 	dev = &GET_DEV(inst->accel_dev);
1194 	if (ctx->enc_cd) {
1195 		memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
1196 		dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1197 				  ctx->enc_cd, ctx->enc_cd_paddr);
1198 	}
1199 	if (ctx->dec_cd) {
1200 		memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
1201 		dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1202 				  ctx->dec_cd, ctx->dec_cd_paddr);
1203 	}
1204 	qat_crypto_put_instance(inst);
1205 }
1206 
1207 static int qat_alg_skcipher_init_tfm(struct crypto_skcipher *tfm)
1208 {
1209 	crypto_skcipher_set_reqsize(tfm, sizeof(struct qat_crypto_request));
1210 	return 0;
1211 }
1212 
1213 static int qat_alg_skcipher_init_xts_tfm(struct crypto_skcipher *tfm)
1214 {
1215 	struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1216 	int reqsize;
1217 
1218 	ctx->ftfm = crypto_alloc_skcipher("xts(aes)", 0,
1219 					  CRYPTO_ALG_NEED_FALLBACK);
1220 	if (IS_ERR(ctx->ftfm))
1221 		return PTR_ERR(ctx->ftfm);
1222 
1223 	ctx->tweak = crypto_alloc_cipher("aes", 0, 0);
1224 	if (IS_ERR(ctx->tweak)) {
1225 		crypto_free_skcipher(ctx->ftfm);
1226 		return PTR_ERR(ctx->tweak);
1227 	}
1228 
1229 	reqsize = max(sizeof(struct qat_crypto_request),
1230 		      sizeof(struct skcipher_request) +
1231 		      crypto_skcipher_reqsize(ctx->ftfm));
1232 	crypto_skcipher_set_reqsize(tfm, reqsize);
1233 
1234 	return 0;
1235 }
1236 
1237 static void qat_alg_skcipher_exit_tfm(struct crypto_skcipher *tfm)
1238 {
1239 	struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1240 	struct qat_crypto_instance *inst = ctx->inst;
1241 	struct device *dev;
1242 
1243 	if (!inst)
1244 		return;
1245 
1246 	dev = &GET_DEV(inst->accel_dev);
1247 	if (ctx->enc_cd) {
1248 		memset(ctx->enc_cd, 0,
1249 		       sizeof(struct icp_qat_hw_cipher_algo_blk));
1250 		dma_free_coherent(dev,
1251 				  sizeof(struct icp_qat_hw_cipher_algo_blk),
1252 				  ctx->enc_cd, ctx->enc_cd_paddr);
1253 	}
1254 	if (ctx->dec_cd) {
1255 		memset(ctx->dec_cd, 0,
1256 		       sizeof(struct icp_qat_hw_cipher_algo_blk));
1257 		dma_free_coherent(dev,
1258 				  sizeof(struct icp_qat_hw_cipher_algo_blk),
1259 				  ctx->dec_cd, ctx->dec_cd_paddr);
1260 	}
1261 	qat_crypto_put_instance(inst);
1262 }
1263 
1264 static void qat_alg_skcipher_exit_xts_tfm(struct crypto_skcipher *tfm)
1265 {
1266 	struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1267 
1268 	if (ctx->ftfm)
1269 		crypto_free_skcipher(ctx->ftfm);
1270 
1271 	if (ctx->tweak)
1272 		crypto_free_cipher(ctx->tweak);
1273 
1274 	qat_alg_skcipher_exit_tfm(tfm);
1275 }
1276 
1277 static struct aead_alg qat_aeads[] = { {
1278 	.base = {
1279 		.cra_name = "authenc(hmac(sha1),cbc(aes))",
1280 		.cra_driver_name = "qat_aes_cbc_hmac_sha1",
1281 		.cra_priority = 4001,
1282 		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1283 		.cra_blocksize = AES_BLOCK_SIZE,
1284 		.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1285 		.cra_module = THIS_MODULE,
1286 	},
1287 	.init = qat_alg_aead_sha1_init,
1288 	.exit = qat_alg_aead_exit,
1289 	.setkey = qat_alg_aead_setkey,
1290 	.decrypt = qat_alg_aead_dec,
1291 	.encrypt = qat_alg_aead_enc,
1292 	.ivsize = AES_BLOCK_SIZE,
1293 	.maxauthsize = SHA1_DIGEST_SIZE,
1294 }, {
1295 	.base = {
1296 		.cra_name = "authenc(hmac(sha256),cbc(aes))",
1297 		.cra_driver_name = "qat_aes_cbc_hmac_sha256",
1298 		.cra_priority = 4001,
1299 		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1300 		.cra_blocksize = AES_BLOCK_SIZE,
1301 		.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1302 		.cra_module = THIS_MODULE,
1303 	},
1304 	.init = qat_alg_aead_sha256_init,
1305 	.exit = qat_alg_aead_exit,
1306 	.setkey = qat_alg_aead_setkey,
1307 	.decrypt = qat_alg_aead_dec,
1308 	.encrypt = qat_alg_aead_enc,
1309 	.ivsize = AES_BLOCK_SIZE,
1310 	.maxauthsize = SHA256_DIGEST_SIZE,
1311 }, {
1312 	.base = {
1313 		.cra_name = "authenc(hmac(sha512),cbc(aes))",
1314 		.cra_driver_name = "qat_aes_cbc_hmac_sha512",
1315 		.cra_priority = 4001,
1316 		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1317 		.cra_blocksize = AES_BLOCK_SIZE,
1318 		.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1319 		.cra_module = THIS_MODULE,
1320 	},
1321 	.init = qat_alg_aead_sha512_init,
1322 	.exit = qat_alg_aead_exit,
1323 	.setkey = qat_alg_aead_setkey,
1324 	.decrypt = qat_alg_aead_dec,
1325 	.encrypt = qat_alg_aead_enc,
1326 	.ivsize = AES_BLOCK_SIZE,
1327 	.maxauthsize = SHA512_DIGEST_SIZE,
1328 } };
1329 
1330 static struct skcipher_alg qat_skciphers[] = { {
1331 	.base.cra_name = "cbc(aes)",
1332 	.base.cra_driver_name = "qat_aes_cbc",
1333 	.base.cra_priority = 4001,
1334 	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1335 	.base.cra_blocksize = AES_BLOCK_SIZE,
1336 	.base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
1337 	.base.cra_alignmask = 0,
1338 	.base.cra_module = THIS_MODULE,
1339 
1340 	.init = qat_alg_skcipher_init_tfm,
1341 	.exit = qat_alg_skcipher_exit_tfm,
1342 	.setkey = qat_alg_skcipher_cbc_setkey,
1343 	.decrypt = qat_alg_skcipher_blk_decrypt,
1344 	.encrypt = qat_alg_skcipher_blk_encrypt,
1345 	.min_keysize = AES_MIN_KEY_SIZE,
1346 	.max_keysize = AES_MAX_KEY_SIZE,
1347 	.ivsize = AES_BLOCK_SIZE,
1348 }, {
1349 	.base.cra_name = "ctr(aes)",
1350 	.base.cra_driver_name = "qat_aes_ctr",
1351 	.base.cra_priority = 4001,
1352 	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1353 	.base.cra_blocksize = 1,
1354 	.base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
1355 	.base.cra_alignmask = 0,
1356 	.base.cra_module = THIS_MODULE,
1357 
1358 	.init = qat_alg_skcipher_init_tfm,
1359 	.exit = qat_alg_skcipher_exit_tfm,
1360 	.setkey = qat_alg_skcipher_ctr_setkey,
1361 	.decrypt = qat_alg_skcipher_decrypt,
1362 	.encrypt = qat_alg_skcipher_encrypt,
1363 	.min_keysize = AES_MIN_KEY_SIZE,
1364 	.max_keysize = AES_MAX_KEY_SIZE,
1365 	.ivsize = AES_BLOCK_SIZE,
1366 }, {
1367 	.base.cra_name = "xts(aes)",
1368 	.base.cra_driver_name = "qat_aes_xts",
1369 	.base.cra_priority = 4001,
1370 	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |
1371 			  CRYPTO_ALG_ALLOCATES_MEMORY,
1372 	.base.cra_blocksize = AES_BLOCK_SIZE,
1373 	.base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
1374 	.base.cra_alignmask = 0,
1375 	.base.cra_module = THIS_MODULE,
1376 
1377 	.init = qat_alg_skcipher_init_xts_tfm,
1378 	.exit = qat_alg_skcipher_exit_xts_tfm,
1379 	.setkey = qat_alg_skcipher_xts_setkey,
1380 	.decrypt = qat_alg_skcipher_xts_decrypt,
1381 	.encrypt = qat_alg_skcipher_xts_encrypt,
1382 	.min_keysize = 2 * AES_MIN_KEY_SIZE,
1383 	.max_keysize = 2 * AES_MAX_KEY_SIZE,
1384 	.ivsize = AES_BLOCK_SIZE,
1385 } };
1386 
1387 int qat_algs_register(void)
1388 {
1389 	int ret = 0;
1390 
1391 	mutex_lock(&algs_lock);
1392 	if (++active_devs != 1)
1393 		goto unlock;
1394 
1395 	ret = crypto_register_skciphers(qat_skciphers,
1396 					ARRAY_SIZE(qat_skciphers));
1397 	if (ret)
1398 		goto unlock;
1399 
1400 	ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1401 	if (ret)
1402 		goto unreg_algs;
1403 
1404 unlock:
1405 	mutex_unlock(&algs_lock);
1406 	return ret;
1407 
1408 unreg_algs:
1409 	crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
1410 	goto unlock;
1411 }
1412 
1413 void qat_algs_unregister(void)
1414 {
1415 	mutex_lock(&algs_lock);
1416 	if (--active_devs != 0)
1417 		goto unlock;
1418 
1419 	crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1420 	crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
1421 
1422 unlock:
1423 	mutex_unlock(&algs_lock);
1424 }
1425